bin: compile lttng-sessiond as C++
authorSimon Marchi <simon.marchi@efficios.com>
Wed, 6 Oct 2021 16:14:41 +0000 (12:14 -0400)
committerJérémie Galarneau <jeremie.galarneau@efficios.com>
Wed, 17 Nov 2021 23:26:59 +0000 (18:26 -0500)
Same as commit 48a400056134 ("bin: compile lttng as C++"), but change
lttng-sessiond to be a C++ program. In addition to the categories of
changes already mentioned in that commit's message, here are some
interesting changes:

 - Add an include in trigger.h, an exported header, to fix:

      CXX      notification-thread.lo
    In file included from /home/simark/src/lttng-tools/src/bin/lttng-sessiond/notification-thread.cpp:9:
    /home/simark/src/lttng-tools/include/lttng/trigger/trigger.h:142:13: error: use of enum ‘lttng_error_code’ without previous declaration
      142 | extern enum lttng_error_code lttng_register_trigger_with_name(
          |             ^~~~~~~~~~~~~~~~

 - We get this with clang:

      CXX      lttng-conf.o
    In file included from /home/simark/src/lttng-tools/src/bin/lttng/conf.cpp:18:
    In file included from /home/simark/src/lttng-tools/src/common/common.h:14:
    In file included from /home/simark/src/lttng-tools/src/common/runas.h:17:
    In file included from /home/simark/src/lttng-tools/src/common/sessiond-comm/sessiond-comm.h:38:
    In file included from /home/simark/src/lttng-tools/src/common/unix.h:17:
    /home/simark/src/lttng-tools/src/common/payload-view.h:82:27: error: 'lttng_payload_view_from_payload' has C-linkage specified, but returns user-defined type 'struct lttng_payload_view' which is incompatible with C [-Werror,-Wreturn-type-c-linkage]
    struct lttng_payload_view lttng_payload_view_from_payload(
                              ^

    Turns out that because of the "const" field in lttng_payload_view,
    clang doesn't consider that type incompatible with C. I don't
    really want to remove the "const" for C code using that API, so
    conditionally remove it if we are compiling with clang in C++.

 - clang gives:

      CXX      event.lo
    In file included from /home/simark/src/lttng-tools/src/bin/lttng-sessiond/event.cpp:19:
    /home/simark/src/lttng-tools/src/common/bytecode/bytecode.h:50:1: error: struct has size 0 in C, size 1 in C++ [-Werror,-Wextern-c-compat]
    struct literal_string {
    ^

   It looks like that type isn't even used?  Remove it.

 - it's not possible to initialize some union members, for example with
   lttcomm_consumer_msg, in consumer.cpp. Initialize it in a separate
   statement.

 - It's not possible to use the transparent union trick when calling
   urcu function, for example in thread_application_registration, in
   register.cpp. We need to instantiate a cds_wfcq_head_ptr_t object,
   assign the appropriate field, and pass that object to the function.

 - the ALIGNED_CONST_PTR trick does not work in C++:

      CXX      consumer.lo
    In file included from /home/simark/src/lttng-tools/src/common/error.h:19,
                     from /home/simark/src/lttng-tools/src/common/common.h:12,
                     from /home/simark/src/lttng-tools/src/bin/lttng-sessiond/consumer.cpp:19:
    /home/simark/src/lttng-tools/src/bin/lttng-sessiond/consumer.cpp: In function ‘int consumer_send_relayd_socket(consumer_socket*, lttcomm_relayd_sock*, consumer_output*, lttng_stream_type, uint64_t, const char*, const char*, const char*, int, const uint64_t*, time_t, bool)’:
    /home/simark/src/lttng-tools/src/common/macros.h:116:58: error: expected primary-expression before ‘]’ token
      116 | #define ALIGNED_CONST_PTR(value) (((const typeof(value) []) { value }))
          |                                                          ^
    /home/simark/src/lttng-tools/src/bin/lttng-sessiond/consumer.cpp:1192:48: note: in expansion of macro ‘ALIGNED_CONST_PTR’
     1192 |         ret = consumer_send_fds(consumer_sock, ALIGNED_CONST_PTR(rsock->sock.fd), 1);
          |                                                ^~~~~~~~~~~~~~~~~

   Replace uses with copying the data in a local variable (which is
   properly aligned), and pass the address to that variable to the
   function.

 - In consumer.h, an array field in a structure is defined using
   the max macro. It can't be replaced with std::max, since std::max
   isn't constexpr in C++11. Define a max_constexpr function locally
   and use it.

 - g++ 7 doesn't support non-trivial designated initializers, leading to
   errors like:

         CXX      globals.lo
      /home/smarchi/src/lttng-tools/src/bin/lttng-sessiond/globals.cpp:44:1: sorry, unimplemented: non-trivial designated initializers not supported
      };
      ^

   Change consumer_data to have a constructor instead. Change
   initializations of some structures, such as lttcomm_lttng_msg, to
   initialize the fields separate from the variable declaration. This
   requires making these variable non-const which is not ideal. But
   once everything is C++, these types could get a fancy constructor,
   and then they can be made const again.

 - When compiling without UST support the stub versions of functions
   ust_app_rotate_session & co, in ust-app.h, are used. Some of them
   have the return type "enum lttng_error_code", but return 0, an invalid
   value, causing:

        CXX      main.o
      In file included from /home/smarchi/src/lttng-tools/src/bin/lttng-sessiond/lttng-sessiond.h:22:0,
                       from /home/smarchi/src/lttng-tools/src/bin/lttng-sessiond/main.cpp:45:
      /home/smarchi/src/lttng-tools/src/bin/lttng-sessiond/ust-app.h: In function ‘lttng_error_code ust_app_snapshot_record(ltt_ust_session*, const consumer_output*, int, uint64_t)’:
      /home/smarchi/src/lttng-tools/src/bin/lttng-sessiond/ust-app.h:575:9: error: invalid conversion from ‘int’ to ‘lttng_error_code’ [-fpermissive]
        return 0;
               ^

   Change these functions to return LTTNG_ERR_UNK. These functions are
   not supposed to be called if UST support is not included. But even
   if they were: all their callers check that the return value is not
   LTTNG_OK. The value 0 would be considered an error, so will be
   LTTNG_ERR_UNK.

Change-Id: I2cdd34459a54b1943087b43843ef20b35b7bf7d8
Signed-off-by: Simon Marchi <simon.marchi@efficios.com>
Signed-off-by: Jérémie Galarneau <jeremie.galarneau@efficios.com>
167 files changed:
include/lttng/action/action-internal.h
include/lttng/action/list-internal.h
include/lttng/condition/buffer-usage-internal.h
include/lttng/condition/event-rule-matches-internal.h
include/lttng/condition/session-consumed-size-internal.h
include/lttng/condition/session-rotation-internal.h
include/lttng/error-query-internal.h
include/lttng/event-internal.h
include/lttng/health-internal.h
include/lttng/location-internal.h
include/lttng/log-level-rule-internal.h
include/lttng/notification/notification-internal.h
include/lttng/session-descriptor-internal.h
include/lttng/trigger/trigger.h
include/lttng/userspace-probe-internal.h
src/bin/lttng-sessiond/Makefile.am
src/bin/lttng-sessiond/action-executor.c [deleted file]
src/bin/lttng-sessiond/action-executor.cpp [new file with mode: 0644]
src/bin/lttng-sessiond/agent-thread.c [deleted file]
src/bin/lttng-sessiond/agent-thread.cpp [new file with mode: 0644]
src/bin/lttng-sessiond/agent.c [deleted file]
src/bin/lttng-sessiond/agent.cpp [new file with mode: 0644]
src/bin/lttng-sessiond/buffer-registry.c [deleted file]
src/bin/lttng-sessiond/buffer-registry.cpp [new file with mode: 0644]
src/bin/lttng-sessiond/channel.c [deleted file]
src/bin/lttng-sessiond/channel.cpp [new file with mode: 0644]
src/bin/lttng-sessiond/clear.c [deleted file]
src/bin/lttng-sessiond/clear.cpp [new file with mode: 0644]
src/bin/lttng-sessiond/client.c [deleted file]
src/bin/lttng-sessiond/client.cpp [new file with mode: 0644]
src/bin/lttng-sessiond/cmd.c [deleted file]
src/bin/lttng-sessiond/cmd.cpp [new file with mode: 0644]
src/bin/lttng-sessiond/condition-internal.c [deleted file]
src/bin/lttng-sessiond/condition-internal.cpp [new file with mode: 0644]
src/bin/lttng-sessiond/condition-internal.h
src/bin/lttng-sessiond/consumer.c [deleted file]
src/bin/lttng-sessiond/consumer.cpp [new file with mode: 0644]
src/bin/lttng-sessiond/consumer.h
src/bin/lttng-sessiond/context.c [deleted file]
src/bin/lttng-sessiond/context.cpp [new file with mode: 0644]
src/bin/lttng-sessiond/dispatch.c [deleted file]
src/bin/lttng-sessiond/dispatch.cpp [new file with mode: 0644]
src/bin/lttng-sessiond/event-notifier-error-accounting.c [deleted file]
src/bin/lttng-sessiond/event-notifier-error-accounting.cpp [new file with mode: 0644]
src/bin/lttng-sessiond/event.c [deleted file]
src/bin/lttng-sessiond/event.cpp [new file with mode: 0644]
src/bin/lttng-sessiond/fd-limit.c [deleted file]
src/bin/lttng-sessiond/fd-limit.cpp [new file with mode: 0644]
src/bin/lttng-sessiond/globals.c [deleted file]
src/bin/lttng-sessiond/globals.cpp [new file with mode: 0644]
src/bin/lttng-sessiond/health.c [deleted file]
src/bin/lttng-sessiond/health.cpp [new file with mode: 0644]
src/bin/lttng-sessiond/ht-cleanup.c [deleted file]
src/bin/lttng-sessiond/ht-cleanup.cpp [new file with mode: 0644]
src/bin/lttng-sessiond/kernel-consumer.c [deleted file]
src/bin/lttng-sessiond/kernel-consumer.cpp [new file with mode: 0644]
src/bin/lttng-sessiond/kernel.c [deleted file]
src/bin/lttng-sessiond/kernel.cpp [new file with mode: 0644]
src/bin/lttng-sessiond/lttng-syscall.c [deleted file]
src/bin/lttng-sessiond/lttng-syscall.cpp [new file with mode: 0644]
src/bin/lttng-sessiond/main.c [deleted file]
src/bin/lttng-sessiond/main.cpp [new file with mode: 0644]
src/bin/lttng-sessiond/manage-apps.c [deleted file]
src/bin/lttng-sessiond/manage-apps.cpp [new file with mode: 0644]
src/bin/lttng-sessiond/manage-consumer.c [deleted file]
src/bin/lttng-sessiond/manage-consumer.cpp [new file with mode: 0644]
src/bin/lttng-sessiond/manage-kernel.c [deleted file]
src/bin/lttng-sessiond/manage-kernel.cpp [new file with mode: 0644]
src/bin/lttng-sessiond/modprobe.c [deleted file]
src/bin/lttng-sessiond/modprobe.cpp [new file with mode: 0644]
src/bin/lttng-sessiond/notification-thread-commands.c [deleted file]
src/bin/lttng-sessiond/notification-thread-commands.cpp [new file with mode: 0644]
src/bin/lttng-sessiond/notification-thread-events.c [deleted file]
src/bin/lttng-sessiond/notification-thread-events.cpp [new file with mode: 0644]
src/bin/lttng-sessiond/notification-thread.c [deleted file]
src/bin/lttng-sessiond/notification-thread.cpp [new file with mode: 0644]
src/bin/lttng-sessiond/notify-apps.c [deleted file]
src/bin/lttng-sessiond/notify-apps.cpp [new file with mode: 0644]
src/bin/lttng-sessiond/process-utils.c [deleted file]
src/bin/lttng-sessiond/process-utils.cpp [new file with mode: 0644]
src/bin/lttng-sessiond/register.c [deleted file]
src/bin/lttng-sessiond/register.cpp [new file with mode: 0644]
src/bin/lttng-sessiond/rotate.c [deleted file]
src/bin/lttng-sessiond/rotate.cpp [new file with mode: 0644]
src/bin/lttng-sessiond/rotation-thread.c [deleted file]
src/bin/lttng-sessiond/rotation-thread.cpp [new file with mode: 0644]
src/bin/lttng-sessiond/save.c [deleted file]
src/bin/lttng-sessiond/save.cpp [new file with mode: 0644]
src/bin/lttng-sessiond/session.c [deleted file]
src/bin/lttng-sessiond/session.cpp [new file with mode: 0644]
src/bin/lttng-sessiond/sessiond-config.c [deleted file]
src/bin/lttng-sessiond/sessiond-config.cpp [new file with mode: 0644]
src/bin/lttng-sessiond/snapshot.c [deleted file]
src/bin/lttng-sessiond/snapshot.cpp [new file with mode: 0644]
src/bin/lttng-sessiond/thread-utils.c [deleted file]
src/bin/lttng-sessiond/thread-utils.cpp [new file with mode: 0644]
src/bin/lttng-sessiond/thread.c [deleted file]
src/bin/lttng-sessiond/thread.cpp [new file with mode: 0644]
src/bin/lttng-sessiond/timer.c [deleted file]
src/bin/lttng-sessiond/timer.cpp [new file with mode: 0644]
src/bin/lttng-sessiond/trace-kernel.c [deleted file]
src/bin/lttng-sessiond/trace-kernel.cpp [new file with mode: 0644]
src/bin/lttng-sessiond/trace-ust.c [deleted file]
src/bin/lttng-sessiond/trace-ust.cpp [new file with mode: 0644]
src/bin/lttng-sessiond/tracker.c [deleted file]
src/bin/lttng-sessiond/tracker.cpp [new file with mode: 0644]
src/bin/lttng-sessiond/trigger-error-query.c [deleted file]
src/bin/lttng-sessiond/trigger-error-query.cpp [new file with mode: 0644]
src/bin/lttng-sessiond/ust-app.c [deleted file]
src/bin/lttng-sessiond/ust-app.cpp [new file with mode: 0644]
src/bin/lttng-sessiond/ust-app.h
src/bin/lttng-sessiond/ust-consumer.c [deleted file]
src/bin/lttng-sessiond/ust-consumer.cpp [new file with mode: 0644]
src/bin/lttng-sessiond/ust-ctl-internal.h
src/bin/lttng-sessiond/ust-field-utils.c [deleted file]
src/bin/lttng-sessiond/ust-field-utils.cpp [new file with mode: 0644]
src/bin/lttng-sessiond/ust-metadata.c [deleted file]
src/bin/lttng-sessiond/ust-metadata.cpp [new file with mode: 0644]
src/bin/lttng-sessiond/ust-registry.c [deleted file]
src/bin/lttng-sessiond/ust-registry.cpp [new file with mode: 0644]
src/bin/lttng-sessiond/ust-sigbus.c [deleted file]
src/bin/lttng-sessiond/ust-sigbus.cpp [new file with mode: 0644]
src/bin/lttng-sessiond/utils.c [deleted file]
src/bin/lttng-sessiond/utils.cpp [new file with mode: 0644]
src/common/buffer-view.h
src/common/bytecode/bytecode.h
src/common/compat/directory-handle.h
src/common/compat/poll.h
src/common/config/session-config.h
src/common/context.h
src/common/credentials.h
src/common/daemonize.h
src/common/defaults.h
src/common/dynamic-buffer.h
src/common/fd-handle.h
src/common/filter.h
src/common/futex.h
src/common/hashtable/hashtable.h
src/common/hashtable/utils.h
src/common/index-allocator.h
src/common/kernel-ctl/kernel-ctl.h
src/common/macros.h
src/common/optional.h
src/common/payload-view.h
src/common/payload.h
src/common/pipe.h
src/common/readwrite.h
src/common/relayd/relayd.h
src/common/runas.h
src/common/sessiond-comm/inet.c
src/common/sessiond-comm/inet.h
src/common/sessiond-comm/inet6.c
src/common/sessiond-comm/sessiond-comm.h
src/common/shm.h
src/common/testpoint/testpoint.h
src/common/trace-chunk.h
src/common/unix.h
src/common/uuid.h
src/common/waiter.h
tests/unit/Makefile.am
tests/unit/test_kernel_data.c [deleted file]
tests/unit/test_kernel_data.cpp [new file with mode: 0644]
tests/unit/test_session.c [deleted file]
tests/unit/test_session.cpp [new file with mode: 0644]
tests/unit/test_ust_data.c [deleted file]
tests/unit/test_ust_data.cpp [new file with mode: 0644]
tests/utils/tap/tap.h

index 71270a835cfd95a9d2d93141ef1d0d6c4542c66e..b55a6d59bc2dcde627f517b51436c163afe3d911 100644 (file)
 #include <sys/types.h>
 #include <urcu/ref.h>
 
+#ifdef __cplusplus
+extern "C" {
+#endif
+
 struct lttng_rate_policy;
 struct mi_writer;
 struct mi_lttng_error_query_callbacks;
@@ -128,4 +132,8 @@ enum lttng_error_code lttng_action_mi_serialize(const struct lttng_trigger *trig
                                *error_query_callbacks,
                struct lttng_dynamic_array *action_path_indexes);
 
+#ifdef __cplusplus
+}
+#endif
+
 #endif /* LTTNG_ACTION_INTERNAL_H */
index 231755fd6795b55d88eb2562115526aabb97ab51..3d7a5ba60f03d9b604a726347a1500259f0b166d 100644 (file)
 
 #include <common/macros.h>
 
+#ifdef __cplusplus
+extern "C" {
+#endif
+
 struct lttng_action;
 struct lttng_payload_view;
 struct mi_writer;
@@ -39,4 +43,8 @@ enum lttng_error_code lttng_action_list_mi_serialize(const struct lttng_trigger
                                *error_query_callbacks,
                struct lttng_dynamic_array *action_path_indexes);
 
+#ifdef __cplusplus
+}
+#endif
+
 #endif /* LTTNG_ACTION_LIST_INTERNAL_H */
index 1e6d5a509c40b8c518dc90e634c0cc76322dd0bc..6ea19e065f7e3fec613e60f4ce7f38fd1ebfdedb 100644 (file)
 #include "common/buffer-view.h"
 #include <common/macros.h>
 
+#ifdef __cplusplus
+extern "C" {
+#endif
+
 struct lttng_condition_buffer_usage {
        struct lttng_condition parent;
        struct {
@@ -77,4 +81,8 @@ ssize_t lttng_evaluation_buffer_usage_high_create_from_payload(
                struct lttng_payload_view *view,
                struct lttng_evaluation **evaluation);
 
+#ifdef __cplusplus
+}
+#endif
+
 #endif /* LTTNG_CONDITION_BUFFER_USAGE_INTERNAL_H */
index ef39f4f04f86b2a57d2e15a75c5609cc796495d8..4aac9f991e3517df7eac0395ce77dd19c2e63108 100644 (file)
 #include <common/dynamic-array.h>
 #include <lttng/event-field-value.h>
 
+#ifdef __cplusplus
+extern "C" {
+#endif
+
 struct lttng_capture_descriptor {
        struct lttng_event_expr *event_expression;
        struct lttng_bytecode *bytecode;
@@ -86,4 +90,8 @@ const struct lttng_bytecode *
 lttng_condition_event_rule_matches_get_capture_bytecode_at_index(
                const struct lttng_condition *condition, unsigned int index);
 
+#ifdef __cplusplus
+}
+#endif
+
 #endif /* LTTNG_CONDITION_EVENT_RULE_MATCHES_INTERNAL_H */
index 9340a5f233f6cb5566b0defac1edc57c2878cb70..07c5953e1d014965bd933ba6bbd821b918b095c8 100644 (file)
 #include <common/buffer-view.h>
 #include <common/macros.h>
 
+#ifdef __cplusplus
+extern "C" {
+#endif
+
 struct lttng_payload;
 struct lttng_payload_view;
 
@@ -53,4 +57,8 @@ ssize_t lttng_evaluation_session_consumed_size_create_from_payload(
                struct lttng_payload_view *view,
                struct lttng_evaluation **evaluation);
 
+#ifdef __cplusplus
+}
+#endif
+
 #endif /* LTTNG_CONDITION_SESSION_CONSUMED_SIZE_INTERNAL_H */
index c723c6d6a74638ac22452b5db71204ddc8574b15..e5993e9609b44f7a27712dcc1d0cc862c11d9077 100644 (file)
 #include <lttng/location.h>
 #include <common/macros.h>
 
+#ifdef __cplusplus
+extern "C" {
+#endif
+
 struct lttng_condition_session_rotation {
        struct lttng_condition parent;
        char *session_name;
@@ -61,4 +65,8 @@ ssize_t lttng_evaluation_session_rotation_completed_create_from_payload(
                struct lttng_payload_view *view,
                struct lttng_evaluation **evaluation);
 
+#ifdef __cplusplus
+}
+#endif
+
 #endif /* LTTNG_CONDITION_SESSION_ROTATION_INTERNAL_H */
index 07e2280d7486a283ebab703435f61a03cfd5338f..d3072f54b378a7ae862cccb0e4b15dcb449e4bd9 100644 (file)
 #include <common/payload.h>
 #include <common/payload-view.h>
 
+#ifdef __cplusplus
+extern "C" {
+#endif
+
 struct mi_writer;
 
 enum lttng_error_query_target_type {
@@ -78,4 +82,8 @@ enum lttng_error_code lttng_error_query_results_mi_serialize(
                const struct lttng_error_query_results *results,
                struct mi_writer *writer);
 
+#ifdef __cplusplus
+}
+#endif
+
 #endif /* LTTNG_ERROR_QUERY_INTERNAL_H */
index 8d03fbdec5680c10fd861b02dfe92bac8e5fd379..7370d0510730eb77d4f039bd62c828c911c5c47c 100644 (file)
 #include <common/macros.h>
 #include <lttng/event.h>
 
+#ifdef __cplusplus
+extern "C" {
+#endif
+
 struct lttng_userspace_probe_location;
 
 struct lttng_event_extended {
@@ -35,4 +39,8 @@ struct lttng_event_extended {
 
 struct lttng_event *lttng_event_copy(const struct lttng_event *event);
 
+#ifdef __cplusplus
+}
+#endif
+
 #endif /* LTTNG_EVENT_INTERNAL_H */
index ceda0a8dd5b027321c514885ea6d4124e717b710..875cc960b329a015de2af81962a4c4a6fba2563a 100644 (file)
 #include <lttng/health.h>
 #include <common/macros.h>
 
+#ifdef __cplusplus
+extern "C" {
+#endif
+
 /*
  * These are the value added to the current state depending of the position in
  * the thread where is either waiting on a poll() or running in the code.
@@ -116,4 +120,8 @@ int health_check_state(struct health_app *ha, int type);
 void health_register(struct health_app *ha, int type);
 void health_unregister(struct health_app *ha);
 
+#ifdef __cplusplus
+}
+#endif
+
 #endif /* HEALTH_INTERNAL_H */
index 33498c90d5c0482b6663fdff5cb6ea68f547aa12..9eafd471a2d5273cb4aa2273cfc304ce8f9f0b02 100644 (file)
 #include <sys/types.h>
 #include <urcu/ref.h>
 
+#ifdef __cplusplus
+extern "C" {
+#endif
+
 /*
  * The public API assumes that trace archive locations are always
  * provided as "constant". This means that the user of liblttng-ctl never
@@ -97,4 +101,8 @@ void lttng_trace_archive_location_get(
 void lttng_trace_archive_location_put(
                struct lttng_trace_archive_location *location);
 
+#ifdef __cplusplus
+}
+#endif
+
 #endif /* LTTNG_LOCATION_INTERNAL_H */
index c17e590b7264fa26db4c8fc584d3a393e59975eb..b02afafe0d8d563c86bfeca1c192df1c42a5d029 100644 (file)
 #include <lttng/event.h>
 #include <lttng/log-level-rule.h>
 
+#ifdef __cplusplus
+extern "C" {
+#endif
+
 struct mi_writer;
 
 /*
@@ -60,4 +64,8 @@ enum lttng_error_code lttng_log_level_rule_mi_serialize(
                const struct lttng_log_level_rule *rule,
                struct mi_writer *writer);
 
+#ifdef __cplusplus
+}
+#endif
+
 #endif /* LTTNG_LOG_LEVEL_RULE_INTERNAL_H */
index 7e601b0c7de82d5340b8686c932bff42e6b5b63d..1d337e07db6b18e62f2ab2bd58a89c3f40a351af 100644 (file)
 #include <stdbool.h>
 #include <sys/types.h>
 
+#ifdef __cplusplus
+extern "C" {
+#endif
+
 struct lttng_payload;
 struct lttng_payload_view;
 
@@ -40,4 +44,8 @@ ssize_t lttng_notification_create_from_payload(
                struct lttng_payload_view *view,
                struct lttng_notification **notification);
 
+#ifdef __cplusplus
+}
+#endif
+
 #endif /* LTTNG_NOTIFICATION_INTERNAL_H */
index f438d4812ff66a643d365626a383d2eae40eb789..97628bb881c4c2f2a21491f81216323af7006108 100644 (file)
 #include <common/buffer-view.h>
 #include <stdbool.h>
 
+#ifdef __cplusplus
+extern "C" {
+#endif
+
 /* Note that these enums are used as part of the lttnctl protocol. */
 enum lttng_session_descriptor_type {
        LTTNG_SESSION_DESCRIPTOR_TYPE_UNKNOWN = -1,
@@ -81,4 +85,8 @@ int lttng_session_descriptor_assign(
                struct lttng_session_descriptor *dst_descriptor,
                const struct lttng_session_descriptor *src_descriptor);
 
+#ifdef __cplusplus
+}
+#endif
+
 #endif /* LTTNG_SESSION_DESCRIPTOR_INTERNAL_H */
index c4899f936540ee970164a6091dd3b397e9b28974..842203ace0f145f17ecbee0389762671acbeea83 100644 (file)
@@ -11,6 +11,7 @@
 #include <sys/types.h>
 #include <lttng/constant.h>
 #include <inttypes.h>
+#include <lttng/lttng-error.h>
 
 struct lttng_action;
 struct lttng_condition;
index 5e960c106db1b853224d12c4da59f8c6f1695e18..fe96a4ea3b2b926fac04b232d562f68c6b641e6f 100644 (file)
 #include <common/fd-handle.h>
 #include <stdbool.h>
 
+#ifdef __cplusplus
+extern "C" {
+#endif
+
 struct lttng_payload;
 struct lttng_payload_view;
 struct lttng_dynamic_buffer;
@@ -159,4 +163,8 @@ enum lttng_error_code lttng_userspace_probe_location_mi_serialize(
                const struct lttng_userspace_probe_location *location,
                struct mi_writer *writer);
 
+#ifdef __cplusplus
+}
+#endif
+
 #endif /* LTTNG_USERSPACE_PROBE_INTERNAL_H */
index 11fb85e0b0968d69feca5682258ae18a5a002393..2de44a492d427564650fd0d5c4ec8cf181e88519 100644 (file)
@@ -9,62 +9,62 @@ endif
 
 noinst_LTLIBRARIES = liblttng-sessiond-common.la
 
-liblttng_sessiond_common_la_SOURCES = utils.c utils.h \
-                       trace-kernel.c trace-kernel.h \
-                       kernel.c kernel.h \
+liblttng_sessiond_common_la_SOURCES = utils.cpp utils.h \
+                       trace-kernel.cpp trace-kernel.h \
+                       kernel.cpp kernel.h \
                        ust-app.h ust-sigbus.h trace-ust.h notify-apps.h \
                        lttng-ust-ctl.h lttng-ust-abi.h lttng-ust-error.h \
                        ust-ctl-internal.h ust-abi-internal.h ust-error-internal.h \
                        ust-registry.h \
-                       condition-internal.c condition-internal.h \
-                       context.c context.h \
-                       channel.c channel.h \
-                       event.c event.h \
-                       consumer.c consumer.h \
-                       session.c session.h \
-                       modprobe.c modprobe.h kern-modules.h \
-                       fd-limit.c fd-limit.h \
-                       kernel-consumer.c kernel-consumer.h \
+                       condition-internal.cpp condition-internal.h \
+                       context.cpp context.h \
+                       channel.cpp channel.h \
+                       event.cpp event.h \
+                       consumer.cpp consumer.h \
+                       session.cpp session.h \
+                       modprobe.cpp modprobe.h kern-modules.h \
+                       fd-limit.cpp fd-limit.h \
+                       kernel-consumer.cpp kernel-consumer.h \
                        consumer.h \
                        health-sessiond.h \
-                       cmd.c cmd.h \
-                       buffer-registry.c buffer-registry.h \
-                       testpoint.h ht-cleanup.c ht-cleanup.h \
-                       snapshot.c snapshot.h \
-                       agent.c agent.h \
-                       save.h save.c \
-                       lttng-syscall.h lttng-syscall.c \
-                       notification-thread.h notification-thread.c \
+                       cmd.cpp cmd.h \
+                       buffer-registry.cpp buffer-registry.h \
+                       testpoint.h ht-cleanup.cpp ht-cleanup.h \
+                       snapshot.cpp snapshot.h \
+                       agent.cpp agent.h \
+                       save.h save.cpp \
+                       lttng-syscall.h lttng-syscall.cpp \
+                       notification-thread.h notification-thread.cpp \
                        notification-thread-internal.h \
-                       notification-thread-commands.h notification-thread-commands.c \
-                       notification-thread-events.h notification-thread-events.c \
-                       sessiond-config.h sessiond-config.c \
-                       rotate.h rotate.c \
-                       rotation-thread.h rotation-thread.c \
-                       timer.c timer.h \
-                       globals.c \
-                       thread-utils.c \
-                       process-utils.c \
-                       thread.c thread.h \
-                       health.c \
-                       client.c client.h \
-                       dispatch.c dispatch.h \
-                       register.c register.h \
-                       manage-apps.c manage-apps.h \
-                       manage-kernel.c manage-kernel.h \
-                       manage-consumer.c manage-consumer.h \
-                       clear.c clear.h \
-                       tracker.c tracker.h \
-                       event-notifier-error-accounting.c event-notifier-error-accounting.h \
-                       action-executor.c action-executor.h\
-                       trigger-error-query.c
+                       notification-thread-commands.h notification-thread-commands.cpp \
+                       notification-thread-events.h notification-thread-events.cpp \
+                       sessiond-config.h sessiond-config.cpp \
+                       rotate.h rotate.cpp \
+                       rotation-thread.h rotation-thread.cpp \
+                       timer.cpp timer.h \
+                       globals.cpp \
+                       thread-utils.cpp \
+                       process-utils.cpp \
+                       thread.cpp thread.h \
+                       health.cpp \
+                       client.cpp client.h \
+                       dispatch.cpp dispatch.h \
+                       register.cpp register.h \
+                       manage-apps.cpp manage-apps.h \
+                       manage-kernel.cpp manage-kernel.h \
+                       manage-consumer.cpp manage-consumer.h \
+                       clear.cpp clear.h \
+                       tracker.cpp tracker.h \
+                       event-notifier-error-accounting.cpp event-notifier-error-accounting.h \
+                       action-executor.cpp action-executor.h\
+                       trigger-error-query.cpp
 
 if HAVE_LIBLTTNG_UST_CTL
-liblttng_sessiond_common_la_SOURCES += trace-ust.c ust-registry.c ust-app.c \
-                       ust-consumer.c ust-consumer.h notify-apps.c \
-                       ust-metadata.c ust-clock.h agent-thread.c agent-thread.h \
-                       ust-field-utils.h ust-field-utils.c \
-                       ust-sigbus.c
+liblttng_sessiond_common_la_SOURCES += trace-ust.cpp ust-registry.cpp ust-app.cpp \
+                       ust-consumer.cpp ust-consumer.h notify-apps.cpp \
+                       ust-metadata.cpp ust-clock.h agent-thread.cpp agent-thread.h \
+                       ust-field-utils.h ust-field-utils.cpp \
+                       ust-sigbus.cpp
 endif
 
 # link on liblttngctl for check if sessiond is already alive.
@@ -88,7 +88,7 @@ endif
 
 bin_PROGRAMS = lttng-sessiond
 
-lttng_sessiond_SOURCES = lttng-sessiond.h main.c
+lttng_sessiond_SOURCES = lttng-sessiond.h main.cpp
 
 lttng_sessiond_LDFLAGS = -rdynamic
 
diff --git a/src/bin/lttng-sessiond/action-executor.c b/src/bin/lttng-sessiond/action-executor.c
deleted file mode 100644 (file)
index 94b819c..0000000
+++ /dev/null
@@ -1,1113 +0,0 @@
-/*
- * Copyright (C) 2020 Jérémie Galarneau <jeremie.galarneau@efficios.com>
- *
- * SPDX-License-Identifier: GPL-2.0-only
- *
- */
-
-#include "action-executor.h"
-#include "cmd.h"
-#include "health-sessiond.h"
-#include "lttng-sessiond.h"
-#include "notification-thread-internal.h"
-#include "session.h"
-#include "thread.h"
-#include <common/dynamic-array.h>
-#include <common/macros.h>
-#include <common/optional.h>
-#include <lttng/action/action-internal.h>
-#include <lttng/action/list-internal.h>
-#include <lttng/action/list.h>
-#include <lttng/action/notify-internal.h>
-#include <lttng/action/notify.h>
-#include <lttng/action/rotate-session.h>
-#include <lttng/action/snapshot-session.h>
-#include <lttng/action/start-session.h>
-#include <lttng/action/stop-session.h>
-#include <lttng/condition/evaluation.h>
-#include <lttng/condition/event-rule-matches-internal.h>
-#include <lttng/lttng-error.h>
-#include <lttng/trigger/trigger-internal.h>
-#include <pthread.h>
-#include <stdbool.h>
-#include <stddef.h>
-#include <urcu/list.h>
-
-#define THREAD_NAME "Action Executor"
-#define MAX_QUEUED_WORK_COUNT 8192
-
-/*
- * A work item is composed of a dynamic array of sub-items which
- * represent a flattened, and augmented, version of a trigger's actions.
- *
- * We cannot rely solely on the trigger's actions since each action can have an
- * execution context we need to comply with.
- *
- * The notion of execution context is required since for some actions the
- * associated object are referenced by name and not by id. This can lead to
- * a number of ambiguities when executing an action work item.
- *
- * For example, let's take a simple trigger such as:
- *   - condition: ust event a
- *   - action: start session S
- *
- * At time T, session S exists.
- * At T + 1, the event A is hit.
- * At T + 2, the tracer event notification is received and the work item is
- * queued. Here session S have an id of 1.
- * At T + 3, the session S is destroyed and a new session S is created, with a
- * resulting id of 200.
- * At T +4, the work item is popped from the queue and begin execution and will
- * start session S with an id of 200 instead of the session S id 1 that was
- * present at the queuing phase.
- *
- * The context to be respected is the one when the work item is queued. If the
- * execution context is not the same at the moment of execution, we skip the
- * execution of that sub-item.
- *
- * It is the same policy in regards to the validity of the associated
- * trigger object at the moment of execution, if the trigger is found to be
- * unregistered, the execution is skipped.
- */
-
-struct action_work_item {
-       uint64_t id;
-
-       /*
-        * The actions to be executed with their respective execution context.
-        * See struct `action_work_subitem`.
-        */
-       struct lttng_dynamic_array subitems;
-
-       /* Execution context data */
-       struct lttng_trigger *trigger;
-       struct lttng_evaluation *evaluation;
-       struct notification_client_list *client_list;
-       LTTNG_OPTIONAL(struct lttng_credentials) object_creds;
-       struct cds_list_head list_node;
-};
-
-struct action_work_subitem {
-       struct lttng_action *action;
-       struct {
-               /* Used by actions targeting a session. */
-               LTTNG_OPTIONAL(uint64_t) session_id;
-       } context;
-};
-
-struct action_executor {
-       struct lttng_thread *thread;
-       struct notification_thread_handle *notification_thread_handle;
-       struct {
-               uint64_t pending_count;
-               struct cds_list_head list;
-               pthread_cond_t cond;
-               pthread_mutex_t lock;
-       } work;
-       bool should_quit;
-       uint64_t next_work_item_id;
-};
-
-/*
- * Only return non-zero on a fatal error that should shut down the action
- * executor.
- */
-typedef int (*action_executor_handler)(struct action_executor *executor,
-               const struct action_work_item *,
-               struct action_work_subitem *item);
-
-static int action_executor_notify_handler(struct action_executor *executor,
-               const struct action_work_item *,
-               struct action_work_subitem *);
-static int action_executor_start_session_handler(
-               struct action_executor *executor,
-               const struct action_work_item *,
-               struct action_work_subitem *);
-static int action_executor_stop_session_handler(
-               struct action_executor *executor,
-               const struct action_work_item *,
-               struct action_work_subitem *);
-static int action_executor_rotate_session_handler(
-               struct action_executor *executor,
-               const struct action_work_item *,
-               struct action_work_subitem *);
-static int action_executor_snapshot_session_handler(
-               struct action_executor *executor,
-               const struct action_work_item *,
-               struct action_work_subitem *);
-static int action_executor_list_handler(struct action_executor *executor,
-               const struct action_work_item *,
-               struct action_work_subitem *);
-static int action_executor_generic_handler(struct action_executor *executor,
-               const struct action_work_item *,
-               struct action_work_subitem *);
-
-static const action_executor_handler action_executors[] = {
-       [LTTNG_ACTION_TYPE_NOTIFY] = action_executor_notify_handler,
-       [LTTNG_ACTION_TYPE_START_SESSION] = action_executor_start_session_handler,
-       [LTTNG_ACTION_TYPE_STOP_SESSION] = action_executor_stop_session_handler,
-       [LTTNG_ACTION_TYPE_ROTATE_SESSION] = action_executor_rotate_session_handler,
-       [LTTNG_ACTION_TYPE_SNAPSHOT_SESSION] = action_executor_snapshot_session_handler,
-       [LTTNG_ACTION_TYPE_LIST] = action_executor_list_handler,
-};
-
-/* Forward declaration */
-static int add_action_to_subitem_array(struct lttng_action *action,
-               struct lttng_dynamic_array *subitems);
-
-static int populate_subitem_array_from_trigger(struct lttng_trigger *trigger,
-               struct lttng_dynamic_array *subitems);
-
-static void action_work_subitem_destructor(void *element)
-{
-       struct action_work_subitem *subitem = element;
-
-       lttng_action_put(subitem->action);
-}
-
-static const char *get_action_name(const struct lttng_action *action)
-{
-       const enum lttng_action_type action_type = lttng_action_get_type(action);
-
-       LTTNG_ASSERT(action_type != LTTNG_ACTION_TYPE_UNKNOWN);
-
-       return lttng_action_type_string(action_type);
-}
-
-/* Check if this trigger allowed to interect with a given session. */
-static bool is_trigger_allowed_for_session(const struct lttng_trigger *trigger,
-               struct ltt_session *session)
-{
-       bool is_allowed = false;
-       const struct lttng_credentials session_creds = {
-               .uid = LTTNG_OPTIONAL_INIT_VALUE(session->uid),
-               .gid = LTTNG_OPTIONAL_INIT_VALUE(session->gid),
-       };
-       /* Can never be NULL. */
-       const struct lttng_credentials *trigger_creds =
-                       lttng_trigger_get_credentials(trigger);
-
-       is_allowed = (lttng_credentials_is_equal_uid(trigger_creds, &session_creds)) ||
-                       (lttng_credentials_get_uid(trigger_creds) == 0);
-       if (!is_allowed) {
-               WARN("Trigger is not allowed to interact with session `%s`: session uid = %ld, session gid = %ld, trigger uid = %ld",
-                               session->name,
-                               (long int) session->uid,
-                               (long int) session->gid,
-                               (long int) lttng_credentials_get_uid(trigger_creds));
-       }
-
-       return is_allowed;
-}
-
-static const char *get_trigger_name(const struct lttng_trigger *trigger)
-{
-       const char *trigger_name;
-       enum lttng_trigger_status trigger_status;
-
-       trigger_status = lttng_trigger_get_name(trigger, &trigger_name);
-       switch (trigger_status) {
-       case LTTNG_TRIGGER_STATUS_OK:
-               break;
-       case LTTNG_TRIGGER_STATUS_UNSET:
-               trigger_name = "(anonymous)";
-               break;
-       default:
-               trigger_name = "(failed to get name)";
-               break;
-       }
-
-       return trigger_name;
-}
-
-static int client_handle_transmission_status(
-               struct notification_client *client,
-               enum client_transmission_status status,
-               void *user_data)
-{
-       int ret = 0;
-       struct action_executor *executor = user_data;
-       bool update_communication = true;
-
-       switch (status) {
-       case CLIENT_TRANSMISSION_STATUS_COMPLETE:
-               DBG("Successfully sent full notification to client, client_id = %" PRIu64,
-                               client->id);
-               update_communication = false;
-               break;
-       case CLIENT_TRANSMISSION_STATUS_QUEUED:
-               DBG("Queued notification in client outgoing buffer, client_id = %" PRIu64,
-                               client->id);
-               break;
-       case CLIENT_TRANSMISSION_STATUS_FAIL:
-               DBG("Communication error occurred while sending notification to client, client_id = %" PRIu64,
-                               client->id);
-               break;
-       default:
-               ERR("Fatal error encoutered while sending notification to client, client_id = %" PRIu64,
-                               client->id);
-               ret = -1;
-               goto end;
-       }
-
-       if (!update_communication) {
-               goto end;
-       }
-
-       /* Safe to read client's id without locking as it is immutable. */
-       ret = notification_thread_client_communication_update(
-                       executor->notification_thread_handle, client->id,
-                       status);
-end:
-       return ret;
-}
-
-static int action_executor_notify_handler(struct action_executor *executor,
-               const struct action_work_item *work_item,
-               struct action_work_subitem *item)
-{
-       return notification_client_list_send_evaluation(work_item->client_list,
-                       work_item->trigger,
-                       work_item->evaluation,
-                       work_item->object_creds.is_set ?
-                                       &(work_item->object_creds.value) :
-                                       NULL,
-                       client_handle_transmission_status, executor);
-}
-
-static int action_executor_start_session_handler(
-               struct action_executor *executor,
-               const struct action_work_item *work_item,
-               struct action_work_subitem *item)
-{
-       int ret = 0;
-       const char *session_name;
-       enum lttng_action_status action_status;
-       struct ltt_session *session;
-       enum lttng_error_code cmd_ret;
-       struct lttng_action *action = item->action;
-
-       action_status = lttng_action_start_session_get_session_name(
-                       action, &session_name);
-       if (action_status != LTTNG_ACTION_STATUS_OK) {
-               ERR("Failed to get session name from `%s` action",
-                               get_action_name(action));
-               ret = -1;
-               goto end;
-       }
-
-       /*
-        * Validate if at the moment of the action was queued the session
-        * existed. If not skip the action altogether.
-        */
-       if (!item->context.session_id.is_set) {
-               DBG("Session `%s` was not present at the moment the work item was enqueued for %s` action of trigger `%s`",
-                               session_name, get_action_name(action),
-                               get_trigger_name(work_item->trigger));
-               lttng_action_increase_execution_failure_count(action);
-               ret = 0;
-               goto end;
-       }
-
-       session_lock_list();
-       session = session_find_by_name(session_name);
-       if (!session) {
-               DBG("Failed to find session `%s` by name while executing `%s` action of trigger `%s`",
-                               session_name, get_action_name(action),
-                               get_trigger_name(work_item->trigger));
-               goto error_unlock_list;
-       }
-
-       /*
-        * Check if the session id is the same as when the work item was
-        * enqueued.
-        */
-       if (session->id != LTTNG_OPTIONAL_GET(item->context.session_id)) {
-               DBG("Session id for session `%s` (id: %" PRIu64
-                               " is not the same that was sampled (id: %" PRIu64
-                               " at the moment the work item was enqueued for %s` action of trigger `%s`",
-                               session_name, session->id,
-                               LTTNG_OPTIONAL_GET(item->context.session_id),
-                               get_action_name(action),
-                               get_trigger_name(work_item->trigger));
-               ret = 0;
-               goto error_unlock_list;
-       }
-
-       session_lock(session);
-       if (!is_trigger_allowed_for_session(work_item->trigger, session)) {
-               goto error_dispose_session;
-       }
-
-       cmd_ret = cmd_start_trace(session);
-       switch (cmd_ret) {
-       case LTTNG_OK:
-               DBG("Successfully started session `%s` on behalf of trigger `%s`",
-                               session_name, get_trigger_name(work_item->trigger));
-               break;
-       case LTTNG_ERR_TRACE_ALREADY_STARTED:
-               DBG("Attempted to start session `%s` on behalf of trigger `%s` but it was already started",
-                               session_name, get_trigger_name(work_item->trigger));
-               break;
-       default:
-               WARN("Failed to start session `%s` on behalf of trigger `%s`: %s",
-                               session_name, get_trigger_name(work_item->trigger),
-                               lttng_strerror(-cmd_ret));
-               lttng_action_increase_execution_failure_count(action);
-               break;
-       }
-
-error_dispose_session:
-       session_unlock(session);
-       session_put(session);
-error_unlock_list:
-       session_unlock_list();
-end:
-       return ret;
-}
-
-static int action_executor_stop_session_handler(
-               struct action_executor *executor,
-               const struct action_work_item *work_item,
-               struct action_work_subitem *item)
-{
-       int ret = 0;
-       const char *session_name;
-       enum lttng_action_status action_status;
-       struct ltt_session *session;
-       enum lttng_error_code cmd_ret;
-       struct lttng_action *action = item->action;
-
-       action_status = lttng_action_stop_session_get_session_name(
-                       action, &session_name);
-       if (action_status != LTTNG_ACTION_STATUS_OK) {
-               ERR("Failed to get session name from `%s` action",
-                               get_action_name(action));
-               ret = -1;
-               goto end;
-       }
-
-       /*
-        * Validate if, at the moment the action was queued, the target session
-        * existed. If not, skip the action altogether.
-        */
-       if (!item->context.session_id.is_set) {
-               DBG("Session `%s` was not present at the moment the work item was enqueued for %s` action of trigger `%s`",
-                               session_name, get_action_name(action),
-                               get_trigger_name(work_item->trigger));
-               lttng_action_increase_execution_failure_count(action);
-               ret = 0;
-               goto end;
-       }
-
-       session_lock_list();
-       session = session_find_by_name(session_name);
-       if (!session) {
-               DBG("Failed to find session `%s` by name while executing `%s` action of trigger `%s`",
-                               session_name, get_action_name(action),
-                               get_trigger_name(work_item->trigger));
-               lttng_action_increase_execution_failure_count(action);
-               goto error_unlock_list;
-       }
-
-       /*
-        * Check if the session id is the same as when the work item was
-        * enqueued
-        */
-       if (session->id != LTTNG_OPTIONAL_GET(item->context.session_id)) {
-               DBG("Session id for session `%s` (id: %" PRIu64
-                               " is not the same that was sampled (id: %" PRIu64
-                               " at the moment the work item was enqueued for %s` action of trigger `%s`",
-                               session_name, session->id,
-                               LTTNG_OPTIONAL_GET(item->context.session_id),
-                               get_action_name(action),
-                               get_trigger_name(work_item->trigger));
-               ret = 0;
-               goto error_unlock_list;
-       }
-
-       session_lock(session);
-       if (!is_trigger_allowed_for_session(work_item->trigger, session)) {
-               goto error_dispose_session;
-       }
-
-       cmd_ret = cmd_stop_trace(session);
-       switch (cmd_ret) {
-       case LTTNG_OK:
-               DBG("Successfully stopped session `%s` on behalf of trigger `%s`",
-                               session_name, get_trigger_name(work_item->trigger));
-               break;
-       case LTTNG_ERR_TRACE_ALREADY_STOPPED:
-               DBG("Attempted to stop session `%s` on behalf of trigger `%s` but it was already stopped",
-                               session_name, get_trigger_name(work_item->trigger));
-               break;
-       default:
-               WARN("Failed to stop session `%s` on behalf of trigger `%s`: %s",
-                               session_name, get_trigger_name(work_item->trigger),
-                               lttng_strerror(-cmd_ret));
-               lttng_action_increase_execution_failure_count(action);
-               break;
-       }
-
-error_dispose_session:
-       session_unlock(session);
-       session_put(session);
-error_unlock_list:
-       session_unlock_list();
-end:
-       return ret;
-}
-
-static int action_executor_rotate_session_handler(
-               struct action_executor *executor,
-               const struct action_work_item *work_item,
-               struct action_work_subitem *item)
-{
-       int ret = 0;
-       const char *session_name;
-       enum lttng_action_status action_status;
-       struct ltt_session *session;
-       enum lttng_error_code cmd_ret;
-       struct lttng_action *action = item->action;
-
-       action_status = lttng_action_rotate_session_get_session_name(
-                       action, &session_name);
-       if (action_status != LTTNG_ACTION_STATUS_OK) {
-               ERR("Failed to get session name from `%s` action",
-                               get_action_name(action));
-               ret = -1;
-               goto end;
-       }
-
-       /*
-        * Validate if, at the moment the action was queued, the target session
-        * existed. If not, skip the action altogether.
-        */
-       if (!item->context.session_id.is_set) {
-               DBG("Session `%s` was not present at the moment the work item was enqueued for %s` action of trigger `%s`",
-                               session_name, get_action_name(action),
-                               get_trigger_name(work_item->trigger));
-               lttng_action_increase_execution_failure_count(action);
-               ret = 0;
-               goto end;
-       }
-
-       session_lock_list();
-       session = session_find_by_name(session_name);
-       if (!session) {
-               DBG("Failed to find session `%s` by name while executing `%s` action of trigger `%s`",
-                               session_name, get_action_name(action),
-                               get_trigger_name(work_item->trigger));
-               lttng_action_increase_execution_failure_count(action);
-               goto error_unlock_list;
-       }
-
-       /*
-        * Check if the session id is the same as when the work item was
-        * enqueued.
-        */
-       if (session->id != LTTNG_OPTIONAL_GET(item->context.session_id)) {
-               DBG("Session id for session `%s` (id: %" PRIu64
-                   " is not the same that was sampled (id: %" PRIu64
-                   " at the moment the work item was enqueued for %s` action of trigger `%s`",
-                               session_name, session->id,
-                               LTTNG_OPTIONAL_GET(item->context.session_id),
-                               get_action_name(action),
-                               get_trigger_name(work_item->trigger));
-               ret = 0;
-               goto error_unlock_list;
-       }
-
-       session_lock(session);
-       if (!is_trigger_allowed_for_session(work_item->trigger, session)) {
-               goto error_dispose_session;
-       }
-
-       cmd_ret = cmd_rotate_session(session, NULL, false,
-                       LTTNG_TRACE_CHUNK_COMMAND_TYPE_MOVE_TO_COMPLETED);
-       switch (cmd_ret) {
-       case LTTNG_OK:
-               DBG("Successfully started rotation of session `%s` on behalf of trigger `%s`",
-                               session_name, get_trigger_name(work_item->trigger));
-               break;
-       case LTTNG_ERR_ROTATION_PENDING:
-               DBG("Attempted to start a rotation of session `%s` on behalf of trigger `%s` but a rotation is already ongoing",
-                               session_name, get_trigger_name(work_item->trigger));
-               lttng_action_increase_execution_failure_count(action);
-               break;
-       case LTTNG_ERR_ROTATION_MULTIPLE_AFTER_STOP:
-       case LTTNG_ERR_ROTATION_AFTER_STOP_CLEAR:
-               DBG("Attempted to start a rotation of session `%s` on behalf of trigger `%s` but a rotation has already been completed since the last stop or clear",
-                               session_name, get_trigger_name(work_item->trigger));
-               break;
-       default:
-               WARN("Failed to start a rotation of session `%s` on behalf of trigger `%s`: %s",
-                               session_name, get_trigger_name(work_item->trigger),
-                               lttng_strerror(-cmd_ret));
-               lttng_action_increase_execution_failure_count(action);
-               break;
-       }
-
-error_dispose_session:
-       session_unlock(session);
-       session_put(session);
-error_unlock_list:
-       session_unlock_list();
-end:
-       return ret;
-}
-
-static int action_executor_snapshot_session_handler(
-               struct action_executor *executor,
-               const struct action_work_item *work_item,
-               struct action_work_subitem *item)
-{
-       int ret = 0;
-       const char *session_name;
-       enum lttng_action_status action_status;
-       struct ltt_session *session;
-       const struct lttng_snapshot_output default_snapshot_output = {
-               .max_size = UINT64_MAX,
-       };
-       const struct lttng_snapshot_output *snapshot_output =
-                       &default_snapshot_output;
-       enum lttng_error_code cmd_ret;
-       struct lttng_action *action = item->action;
-
-       /*
-        * Validate if, at the moment the action was queued, the target session
-        * existed. If not, skip the action altogether.
-        */
-       if (!item->context.session_id.is_set) {
-               DBG("Session was not present at the moment the work item was enqueued for %s` action of trigger `%s`",
-                               get_action_name(action),
-                               get_trigger_name(work_item->trigger));
-               lttng_action_increase_execution_failure_count(action);
-               ret = 0;
-               goto end;
-       }
-
-       action_status = lttng_action_snapshot_session_get_session_name(
-                       action, &session_name);
-       if (action_status != LTTNG_ACTION_STATUS_OK) {
-               ERR("Failed to get session name from `%s` action",
-                               get_action_name(action));
-               ret = -1;
-               goto end;
-       }
-
-       action_status = lttng_action_snapshot_session_get_output(
-                       action, &snapshot_output);
-       if (action_status != LTTNG_ACTION_STATUS_OK &&
-                       action_status != LTTNG_ACTION_STATUS_UNSET) {
-               ERR("Failed to get output from `%s` action",
-                               get_action_name(action));
-               ret = -1;
-               goto end;
-       }
-
-       session_lock_list();
-       session = session_find_by_name(session_name);
-       if (!session) {
-               DBG("Failed to find session `%s` by name while executing `%s` action of trigger `%s`",
-                               session_name, get_action_name(action),
-                               get_trigger_name(work_item->trigger));
-               lttng_action_increase_execution_failure_count(action);
-               goto error_unlock_list;
-       }
-
-       /*
-        * Check if the session id is the same as when the work item was
-        * enqueued.
-        */
-       if (session->id != LTTNG_OPTIONAL_GET(item->context.session_id)) {
-               DBG("Session id for session `%s` (id: %" PRIu64
-                   " is not the same that was sampled (id: %" PRIu64
-                   " at the moment the work item was enqueued for %s` action of trigger `%s`",
-                               session_name, session->id,
-                               LTTNG_OPTIONAL_GET(item->context.session_id),
-                               get_action_name(action),
-                               get_trigger_name(work_item->trigger));
-               ret = 0;
-               goto error_unlock_list;
-       }
-
-       session_lock(session);
-       if (!is_trigger_allowed_for_session(work_item->trigger, session)) {
-               goto error_dispose_session;
-       }
-
-       cmd_ret = cmd_snapshot_record(session, snapshot_output, 0);
-       switch (cmd_ret) {
-       case LTTNG_OK:
-               DBG("Successfully recorded snapshot of session `%s` on behalf of trigger `%s`",
-                               session_name, get_trigger_name(work_item->trigger));
-               break;
-       default:
-               WARN("Failed to record snapshot of session `%s` on behalf of trigger `%s`: %s",
-                               session_name, get_trigger_name(work_item->trigger),
-                               lttng_strerror(-cmd_ret));
-               lttng_action_increase_execution_failure_count(action);
-               break;
-       }
-
-error_dispose_session:
-       session_unlock(session);
-       session_put(session);
-error_unlock_list:
-       session_unlock_list();
-end:
-       return ret;
-}
-
-static int action_executor_list_handler(struct action_executor *executor,
-               const struct action_work_item *work_item,
-               struct action_work_subitem *item)
-{
-       ERR("Execution of a list action by the action executor should never occur");
-       abort();
-}
-
-static int action_executor_generic_handler(struct action_executor *executor,
-               const struct action_work_item *work_item,
-               struct action_work_subitem *item)
-{
-       int ret;
-       struct lttng_action *action = item->action;
-       const enum lttng_action_type action_type = lttng_action_get_type(action);
-
-       LTTNG_ASSERT(action_type != LTTNG_ACTION_TYPE_UNKNOWN);
-
-       lttng_action_increase_execution_request_count(action);
-       if (!lttng_action_should_execute(action)) {
-               DBG("Policy prevented execution of action `%s` of trigger `%s` action work item %" PRIu64,
-                               get_action_name(action),
-                               get_trigger_name(work_item->trigger),
-                               work_item->id);
-               ret = 0;
-               goto end;
-       }
-
-       lttng_action_increase_execution_count(action);
-       DBG("Executing action `%s` of trigger `%s` action work item %" PRIu64,
-                       get_action_name(action),
-                       get_trigger_name(work_item->trigger),
-                       work_item->id);
-       ret = action_executors[action_type](executor, work_item, item);
-end:
-       return ret;
-}
-
-static int action_work_item_execute(struct action_executor *executor,
-               struct action_work_item *work_item)
-{
-       int ret;
-       size_t count, i;
-
-       DBG("Starting execution of action work item %" PRIu64 " of trigger `%s`",
-                       work_item->id, get_trigger_name(work_item->trigger));
-
-       count = lttng_dynamic_array_get_count(&work_item->subitems);
-       for (i = 0; i < count; i++) {
-               struct action_work_subitem *item;
-
-               item = lttng_dynamic_array_get_element(&work_item->subitems, i);
-               ret = action_executor_generic_handler(
-                               executor, work_item, item);
-               if (ret) {
-                       goto end;
-               }
-       }
-end:
-       DBG("Completed execution of action work item %" PRIu64 " of trigger `%s`",
-                       work_item->id, get_trigger_name(work_item->trigger));
-       return ret;
-}
-
-static void action_work_item_destroy(struct action_work_item *work_item)
-{
-       lttng_trigger_put(work_item->trigger);
-       lttng_evaluation_destroy(work_item->evaluation);
-       notification_client_list_put(work_item->client_list);
-       lttng_dynamic_array_reset(&work_item->subitems);
-       free(work_item);
-}
-
-static void *action_executor_thread(void *_data)
-{
-       struct action_executor *executor = _data;
-
-       LTTNG_ASSERT(executor);
-
-       health_register(the_health_sessiond,
-                       HEALTH_SESSIOND_TYPE_ACTION_EXECUTOR);
-
-       rcu_register_thread();
-       rcu_thread_online();
-
-       DBG("Entering work execution loop");
-       pthread_mutex_lock(&executor->work.lock);
-       while (!executor->should_quit) {
-               int ret = 0;
-               struct action_work_item *work_item;
-
-               health_code_update();
-               if (executor->work.pending_count == 0) {
-                       health_poll_entry();
-                       DBG("No work items enqueued, entering wait");
-                       pthread_cond_wait(&executor->work.cond,
-                                       &executor->work.lock);
-                       DBG("Woke-up from wait");
-                       health_poll_exit();
-                       continue;
-               }
-
-               /* Pop item from front of the list with work lock held. */
-               work_item = cds_list_first_entry(&executor->work.list,
-                               struct action_work_item, list_node);
-               cds_list_del(&work_item->list_node);
-               executor->work.pending_count--;
-
-               /*
-                * Work can be performed without holding the work lock,
-                * allowing new items to be queued.
-                */
-               pthread_mutex_unlock(&executor->work.lock);
-
-               /* Execute item only if a trigger is registered. */
-               lttng_trigger_lock(work_item->trigger);
-               if (!lttng_trigger_is_registered(work_item->trigger)) {
-                       const char *trigger_name = NULL;
-                       uid_t trigger_owner_uid;
-                       enum lttng_trigger_status trigger_status;
-
-                       trigger_name = get_trigger_name(work_item->trigger);
-
-                       trigger_status = lttng_trigger_get_owner_uid(
-                                       work_item->trigger, &trigger_owner_uid);
-                       LTTNG_ASSERT(trigger_status == LTTNG_TRIGGER_STATUS_OK);
-
-                       DBG("Work item skipped since the associated trigger is no longer registered: work item id = %" PRIu64 ", trigger name = '%s', trigger owner uid = %d",
-                                       work_item->id, trigger_name,
-                                       (int) trigger_owner_uid);
-                       ret = 0;
-                       goto skip_execute;
-               }
-
-               ret = action_work_item_execute(executor, work_item);
-
-       skip_execute:
-               lttng_trigger_unlock(work_item->trigger);
-               action_work_item_destroy(work_item);
-               if (ret) {
-                       /* Fatal error. */
-                       break;
-               }
-
-               health_code_update();
-               pthread_mutex_lock(&executor->work.lock);
-       }
-
-       if (executor->should_quit) {
-               pthread_mutex_unlock(&executor->work.lock);
-       }
-       DBG("Left work execution loop");
-
-       health_code_update();
-
-       rcu_thread_offline();
-       rcu_unregister_thread();
-       health_unregister(the_health_sessiond);
-
-       return NULL;
-}
-
-static bool shutdown_action_executor_thread(void *_data)
-{
-       struct action_executor *executor = _data;
-
-       pthread_mutex_lock(&executor->work.lock);
-       executor->should_quit = true;
-       pthread_cond_signal(&executor->work.cond);
-       pthread_mutex_unlock(&executor->work.lock);
-       return true;
-}
-
-static void clean_up_action_executor_thread(void *_data)
-{
-       struct action_executor *executor = _data;
-
-       LTTNG_ASSERT(cds_list_empty(&executor->work.list));
-
-       pthread_mutex_destroy(&executor->work.lock);
-       pthread_cond_destroy(&executor->work.cond);
-       free(executor);
-}
-
-struct action_executor *action_executor_create(
-               struct notification_thread_handle *handle)
-{
-       struct action_executor *executor = zmalloc(sizeof(*executor));
-
-       if (!executor) {
-               goto end;
-       }
-
-       CDS_INIT_LIST_HEAD(&executor->work.list);
-       pthread_cond_init(&executor->work.cond, NULL);
-       pthread_mutex_init(&executor->work.lock, NULL);
-       executor->notification_thread_handle = handle;
-
-       executor->thread = lttng_thread_create(THREAD_NAME,
-                       action_executor_thread, shutdown_action_executor_thread,
-                       clean_up_action_executor_thread, executor);
-end:
-       return executor;
-}
-
-void action_executor_destroy(struct action_executor *executor)
-{
-       struct action_work_item *work_item, *tmp;
-
-       /* TODO Wait for work list to drain? */
-       lttng_thread_shutdown(executor->thread);
-       pthread_mutex_lock(&executor->work.lock);
-       if (executor->work.pending_count != 0) {
-               WARN("%" PRIu64
-                       " trigger action%s still queued for execution and will be discarded",
-                               executor->work.pending_count,
-                               executor->work.pending_count == 1 ? " is" :
-                                                                   "s are");
-       }
-
-       cds_list_for_each_entry_safe (
-                       work_item, tmp, &executor->work.list, list_node) {
-               WARN("Discarding action work item %" PRIu64
-                               " associated to trigger `%s`",
-                               work_item->id, get_trigger_name(work_item->trigger));
-               cds_list_del(&work_item->list_node);
-               action_work_item_destroy(work_item);
-       }
-       pthread_mutex_unlock(&executor->work.lock);
-       lttng_thread_put(executor->thread);
-}
-
-/* RCU read-lock must be held by the caller. */
-enum action_executor_status action_executor_enqueue_trigger(
-               struct action_executor *executor,
-               struct lttng_trigger *trigger,
-               struct lttng_evaluation *evaluation,
-               const struct lttng_credentials *object_creds,
-               struct notification_client_list *client_list)
-{
-       int ret;
-       enum action_executor_status executor_status = ACTION_EXECUTOR_STATUS_OK;
-       const uint64_t work_item_id = executor->next_work_item_id++;
-       struct action_work_item *work_item;
-       bool signal = false;
-
-       LTTNG_ASSERT(trigger);
-
-       pthread_mutex_lock(&executor->work.lock);
-       /* Check for queue overflow. */
-       if (executor->work.pending_count >= MAX_QUEUED_WORK_COUNT) {
-               /* Most likely spammy, remove if it is the case. */
-               DBG("Refusing to enqueue action for trigger (overflow): trigger name = `%s`, work item id = %" PRIu64,
-                               get_trigger_name(trigger), work_item_id);
-               executor_status = ACTION_EXECUTOR_STATUS_OVERFLOW;
-               goto error_unlock;
-       }
-
-       work_item = zmalloc(sizeof(*work_item));
-       if (!work_item) {
-               PERROR("Failed to allocate action executor work item: trigger name = '%s'",
-                               get_trigger_name(trigger));
-               executor_status = ACTION_EXECUTOR_STATUS_ERROR;
-               goto error_unlock;
-       }
-
-       lttng_trigger_get(trigger);
-       if (client_list) {
-               const bool reference_acquired =
-                               notification_client_list_get(client_list);
-
-               LTTNG_ASSERT(reference_acquired);
-       }
-
-       *work_item = (typeof(*work_item)){
-                       .id = work_item_id,
-                       .trigger = trigger,
-                       /* Ownership transferred to the work item. */
-                       .evaluation = evaluation,
-                       .object_creds = {
-                               .is_set = !!object_creds,
-                               .value = object_creds ? *object_creds :
-                                       (typeof(work_item->object_creds.value)) {},
-                       },
-                       .client_list = client_list,
-                       .list_node = CDS_LIST_HEAD_INIT(work_item->list_node),
-       };
-
-       evaluation = NULL;
-
-       /* Build the array of action work subitems for the passed trigger. */
-       lttng_dynamic_array_init(&work_item->subitems,
-                       sizeof(struct action_work_subitem),
-                       action_work_subitem_destructor);
-
-       ret = populate_subitem_array_from_trigger(
-                       trigger, &work_item->subitems);
-       if (ret) {
-               ERR("Failed to populate work item sub items on behalf of trigger: trigger name = `%s`",
-                               get_trigger_name(trigger));
-               executor_status = ACTION_EXECUTOR_STATUS_ERROR;
-               goto error_unlock;
-       }
-
-       cds_list_add_tail(&work_item->list_node, &executor->work.list);
-       executor->work.pending_count++;
-       DBG("Enqueued action for trigger: trigger name = `%s`, work item id = %" PRIu64,
-                       get_trigger_name(trigger), work_item_id);
-       signal = true;
-
-error_unlock:
-       if (signal) {
-               pthread_cond_signal(&executor->work.cond);
-       }
-
-       pthread_mutex_unlock(&executor->work.lock);
-       lttng_evaluation_destroy(evaluation);
-       return executor_status;
-}
-
-static int add_action_to_subitem_array(struct lttng_action *action,
-               struct lttng_dynamic_array *subitems)
-{
-       int ret = 0;
-       enum lttng_action_type type = lttng_action_get_type(action);
-       const char *session_name = NULL;
-       enum lttng_action_status status;
-       struct action_work_subitem subitem = {
-               .action = NULL,
-               .context = {
-                       .session_id = LTTNG_OPTIONAL_INIT_UNSET,
-               },
-       };
-
-       LTTNG_ASSERT(action);
-       LTTNG_ASSERT(subitems);
-
-       if (type == LTTNG_ACTION_TYPE_LIST) {
-               unsigned int count, i;
-
-               status = lttng_action_list_get_count(action, &count);
-               LTTNG_ASSERT(status == LTTNG_ACTION_STATUS_OK);
-
-               for (i = 0; i < count; i++) {
-                       struct lttng_action *inner_action = NULL;
-
-                       inner_action = lttng_action_list_borrow_mutable_at_index(
-                                       action, i);
-                       LTTNG_ASSERT(inner_action);
-                       ret = add_action_to_subitem_array(
-                                       inner_action, subitems);
-                       if (ret) {
-                               goto end;
-                       }
-               }
-
-               /*
-                * Go directly to the end since there is no need to add the
-                * list action by itself to the subitems array.
-                */
-               goto end;
-       }
-
-       /* Gather execution context. */
-       switch (type) {
-       case LTTNG_ACTION_TYPE_NOTIFY:
-               break;
-       case LTTNG_ACTION_TYPE_START_SESSION:
-               status = lttng_action_start_session_get_session_name(
-                               action, &session_name);
-               LTTNG_ASSERT(status == LTTNG_ACTION_STATUS_OK);
-               break;
-       case LTTNG_ACTION_TYPE_STOP_SESSION:
-               status = lttng_action_stop_session_get_session_name(
-                               action, &session_name);
-               LTTNG_ASSERT(status == LTTNG_ACTION_STATUS_OK);
-               break;
-       case LTTNG_ACTION_TYPE_ROTATE_SESSION:
-               status = lttng_action_rotate_session_get_session_name(
-                               action, &session_name);
-               LTTNG_ASSERT(status == LTTNG_ACTION_STATUS_OK);
-               break;
-       case LTTNG_ACTION_TYPE_SNAPSHOT_SESSION:
-               status = lttng_action_snapshot_session_get_session_name(
-                               action, &session_name);
-               LTTNG_ASSERT(status == LTTNG_ACTION_STATUS_OK);
-               break;
-       case LTTNG_ACTION_TYPE_LIST:
-       case LTTNG_ACTION_TYPE_UNKNOWN:
-               /* Fallthrough */
-       default:
-               abort();
-               break;
-       }
-
-       /*
-        * Fetch the session execution context info as needed.
-        * Note that we could decide to not add an action for which we know the
-        * execution will not happen (i.e no session exists for that name). For
-        * now we leave the decision to skip to the action executor for sake of
-        * simplicity and consistency.
-        */
-       if (session_name != NULL) {
-               uint64_t session_id;
-
-               /*
-                * Instantaneous sampling of the session id if present.
-                *
-                * This method is preferred over `sessiond_find_by_name` then
-                * fetching the session'd id since `sessiond_find_by_name`
-                * requires the session list lock to be taken.
-                *
-                * Taking the session list lock can lead to a deadlock
-                * between the action executor and the notification thread
-                * (caller of add_action_to_subitem_array). It is okay if the
-                * session state changes between the enqueuing time and the
-                * execution time. The execution context is validated at
-                * execution time.
-                */
-               if (sample_session_id_by_name(session_name, &session_id)) {
-                       LTTNG_OPTIONAL_SET(&subitem.context.session_id,
-                                       session_id);
-               }
-       }
-
-       /* Get a reference to the action. */
-       lttng_action_get(action);
-       subitem.action = action;
-
-       ret = lttng_dynamic_array_add_element(subitems, &subitem);
-       if (ret) {
-               ERR("Failed to add work subitem to the subitem array");
-               lttng_action_put(action);
-               ret = -1;
-               goto end;
-       }
-
-end:
-       return ret;
-}
-
-static int populate_subitem_array_from_trigger(struct lttng_trigger *trigger,
-               struct lttng_dynamic_array *subitems)
-{
-       struct lttng_action *action;
-
-       action = lttng_trigger_get_action(trigger);
-       LTTNG_ASSERT(action);
-
-       return add_action_to_subitem_array(action, subitems);
-}
diff --git a/src/bin/lttng-sessiond/action-executor.cpp b/src/bin/lttng-sessiond/action-executor.cpp
new file mode 100644 (file)
index 0000000..cc7834f
--- /dev/null
@@ -0,0 +1,1112 @@
+/*
+ * Copyright (C) 2020 Jérémie Galarneau <jeremie.galarneau@efficios.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0-only
+ *
+ */
+
+#include "action-executor.h"
+#include "cmd.h"
+#include "health-sessiond.h"
+#include "lttng-sessiond.h"
+#include "notification-thread-internal.h"
+#include "session.h"
+#include "thread.h"
+#include <common/dynamic-array.h>
+#include <common/macros.h>
+#include <common/optional.h>
+#include <lttng/action/action-internal.h>
+#include <lttng/action/list-internal.h>
+#include <lttng/action/list.h>
+#include <lttng/action/notify-internal.h>
+#include <lttng/action/notify.h>
+#include <lttng/action/rotate-session.h>
+#include <lttng/action/snapshot-session.h>
+#include <lttng/action/start-session.h>
+#include <lttng/action/stop-session.h>
+#include <lttng/condition/evaluation.h>
+#include <lttng/condition/event-rule-matches-internal.h>
+#include <lttng/lttng-error.h>
+#include <lttng/trigger/trigger-internal.h>
+#include <pthread.h>
+#include <stdbool.h>
+#include <stddef.h>
+#include <urcu/list.h>
+
+#define THREAD_NAME "Action Executor"
+#define MAX_QUEUED_WORK_COUNT 8192
+
+/*
+ * A work item is composed of a dynamic array of sub-items which
+ * represent a flattened, and augmented, version of a trigger's actions.
+ *
+ * We cannot rely solely on the trigger's actions since each action can have an
+ * execution context we need to comply with.
+ *
+ * The notion of execution context is required since for some actions the
+ * associated object are referenced by name and not by id. This can lead to
+ * a number of ambiguities when executing an action work item.
+ *
+ * For example, let's take a simple trigger such as:
+ *   - condition: ust event a
+ *   - action: start session S
+ *
+ * At time T, session S exists.
+ * At T + 1, the event A is hit.
+ * At T + 2, the tracer event notification is received and the work item is
+ * queued. Here session S have an id of 1.
+ * At T + 3, the session S is destroyed and a new session S is created, with a
+ * resulting id of 200.
+ * At T +4, the work item is popped from the queue and begin execution and will
+ * start session S with an id of 200 instead of the session S id 1 that was
+ * present at the queuing phase.
+ *
+ * The context to be respected is the one when the work item is queued. If the
+ * execution context is not the same at the moment of execution, we skip the
+ * execution of that sub-item.
+ *
+ * It is the same policy in regards to the validity of the associated
+ * trigger object at the moment of execution, if the trigger is found to be
+ * unregistered, the execution is skipped.
+ */
+
+struct action_work_item {
+       uint64_t id;
+
+       /*
+        * The actions to be executed with their respective execution context.
+        * See struct `action_work_subitem`.
+        */
+       struct lttng_dynamic_array subitems;
+
+       /* Execution context data */
+       struct lttng_trigger *trigger;
+       struct lttng_evaluation *evaluation;
+       struct notification_client_list *client_list;
+       LTTNG_OPTIONAL(struct lttng_credentials) object_creds;
+       struct cds_list_head list_node;
+};
+
+struct action_work_subitem {
+       struct lttng_action *action;
+       struct {
+               /* Used by actions targeting a session. */
+               LTTNG_OPTIONAL(uint64_t) session_id;
+       } context;
+};
+
+struct action_executor {
+       struct lttng_thread *thread;
+       struct notification_thread_handle *notification_thread_handle;
+       struct {
+               uint64_t pending_count;
+               struct cds_list_head list;
+               pthread_cond_t cond;
+               pthread_mutex_t lock;
+       } work;
+       bool should_quit;
+       uint64_t next_work_item_id;
+};
+
+/*
+ * Only return non-zero on a fatal error that should shut down the action
+ * executor.
+ */
+typedef int (*action_executor_handler)(struct action_executor *executor,
+               const struct action_work_item *,
+               struct action_work_subitem *item);
+
+static int action_executor_notify_handler(struct action_executor *executor,
+               const struct action_work_item *,
+               struct action_work_subitem *);
+static int action_executor_start_session_handler(
+               struct action_executor *executor,
+               const struct action_work_item *,
+               struct action_work_subitem *);
+static int action_executor_stop_session_handler(
+               struct action_executor *executor,
+               const struct action_work_item *,
+               struct action_work_subitem *);
+static int action_executor_rotate_session_handler(
+               struct action_executor *executor,
+               const struct action_work_item *,
+               struct action_work_subitem *);
+static int action_executor_snapshot_session_handler(
+               struct action_executor *executor,
+               const struct action_work_item *,
+               struct action_work_subitem *);
+static int action_executor_list_handler(struct action_executor *executor,
+               const struct action_work_item *,
+               struct action_work_subitem *);
+static int action_executor_generic_handler(struct action_executor *executor,
+               const struct action_work_item *,
+               struct action_work_subitem *);
+
+static const action_executor_handler action_executors[] = {
+       action_executor_notify_handler,
+       action_executor_start_session_handler,
+       action_executor_stop_session_handler,
+       action_executor_rotate_session_handler,
+       action_executor_snapshot_session_handler,
+       action_executor_list_handler,
+};
+
+/* Forward declaration */
+static int add_action_to_subitem_array(struct lttng_action *action,
+               struct lttng_dynamic_array *subitems);
+
+static int populate_subitem_array_from_trigger(struct lttng_trigger *trigger,
+               struct lttng_dynamic_array *subitems);
+
+static void action_work_subitem_destructor(void *element)
+{
+       struct action_work_subitem *subitem = (action_work_subitem *) element;
+
+       lttng_action_put(subitem->action);
+}
+
+static const char *get_action_name(const struct lttng_action *action)
+{
+       const enum lttng_action_type action_type = lttng_action_get_type(action);
+
+       LTTNG_ASSERT(action_type != LTTNG_ACTION_TYPE_UNKNOWN);
+
+       return lttng_action_type_string(action_type);
+}
+
+/* Check if this trigger allowed to interect with a given session. */
+static bool is_trigger_allowed_for_session(const struct lttng_trigger *trigger,
+               struct ltt_session *session)
+{
+       bool is_allowed = false;
+       const struct lttng_credentials session_creds = {
+               .uid = LTTNG_OPTIONAL_INIT_VALUE(session->uid),
+               .gid = LTTNG_OPTIONAL_INIT_VALUE(session->gid),
+       };
+       /* Can never be NULL. */
+       const struct lttng_credentials *trigger_creds =
+                       lttng_trigger_get_credentials(trigger);
+
+       is_allowed = (lttng_credentials_is_equal_uid(trigger_creds, &session_creds)) ||
+                       (lttng_credentials_get_uid(trigger_creds) == 0);
+       if (!is_allowed) {
+               WARN("Trigger is not allowed to interact with session `%s`: session uid = %ld, session gid = %ld, trigger uid = %ld",
+                               session->name,
+                               (long int) session->uid,
+                               (long int) session->gid,
+                               (long int) lttng_credentials_get_uid(trigger_creds));
+       }
+
+       return is_allowed;
+}
+
+static const char *get_trigger_name(const struct lttng_trigger *trigger)
+{
+       const char *trigger_name;
+       enum lttng_trigger_status trigger_status;
+
+       trigger_status = lttng_trigger_get_name(trigger, &trigger_name);
+       switch (trigger_status) {
+       case LTTNG_TRIGGER_STATUS_OK:
+               break;
+       case LTTNG_TRIGGER_STATUS_UNSET:
+               trigger_name = "(anonymous)";
+               break;
+       default:
+               trigger_name = "(failed to get name)";
+               break;
+       }
+
+       return trigger_name;
+}
+
+static int client_handle_transmission_status(
+               struct notification_client *client,
+               enum client_transmission_status status,
+               void *user_data)
+{
+       int ret = 0;
+       struct action_executor *executor = (action_executor *) user_data;
+       bool update_communication = true;
+
+       switch (status) {
+       case CLIENT_TRANSMISSION_STATUS_COMPLETE:
+               DBG("Successfully sent full notification to client, client_id = %" PRIu64,
+                               client->id);
+               update_communication = false;
+               break;
+       case CLIENT_TRANSMISSION_STATUS_QUEUED:
+               DBG("Queued notification in client outgoing buffer, client_id = %" PRIu64,
+                               client->id);
+               break;
+       case CLIENT_TRANSMISSION_STATUS_FAIL:
+               DBG("Communication error occurred while sending notification to client, client_id = %" PRIu64,
+                               client->id);
+               break;
+       default:
+               ERR("Fatal error encoutered while sending notification to client, client_id = %" PRIu64,
+                               client->id);
+               ret = -1;
+               goto end;
+       }
+
+       if (!update_communication) {
+               goto end;
+       }
+
+       /* Safe to read client's id without locking as it is immutable. */
+       ret = notification_thread_client_communication_update(
+                       executor->notification_thread_handle, client->id,
+                       status);
+end:
+       return ret;
+}
+
+static int action_executor_notify_handler(struct action_executor *executor,
+               const struct action_work_item *work_item,
+               struct action_work_subitem *item)
+{
+       return notification_client_list_send_evaluation(work_item->client_list,
+                       work_item->trigger,
+                       work_item->evaluation,
+                       work_item->object_creds.is_set ?
+                                       &(work_item->object_creds.value) :
+                                       NULL,
+                       client_handle_transmission_status, executor);
+}
+
+static int action_executor_start_session_handler(
+               struct action_executor *executor,
+               const struct action_work_item *work_item,
+               struct action_work_subitem *item)
+{
+       int ret = 0;
+       const char *session_name;
+       enum lttng_action_status action_status;
+       struct ltt_session *session;
+       enum lttng_error_code cmd_ret;
+       struct lttng_action *action = item->action;
+
+       action_status = lttng_action_start_session_get_session_name(
+                       action, &session_name);
+       if (action_status != LTTNG_ACTION_STATUS_OK) {
+               ERR("Failed to get session name from `%s` action",
+                               get_action_name(action));
+               ret = -1;
+               goto end;
+       }
+
+       /*
+        * Validate if at the moment of the action was queued the session
+        * existed. If not skip the action altogether.
+        */
+       if (!item->context.session_id.is_set) {
+               DBG("Session `%s` was not present at the moment the work item was enqueued for %s` action of trigger `%s`",
+                               session_name, get_action_name(action),
+                               get_trigger_name(work_item->trigger));
+               lttng_action_increase_execution_failure_count(action);
+               ret = 0;
+               goto end;
+       }
+
+       session_lock_list();
+       session = session_find_by_name(session_name);
+       if (!session) {
+               DBG("Failed to find session `%s` by name while executing `%s` action of trigger `%s`",
+                               session_name, get_action_name(action),
+                               get_trigger_name(work_item->trigger));
+               goto error_unlock_list;
+       }
+
+       /*
+        * Check if the session id is the same as when the work item was
+        * enqueued.
+        */
+       if (session->id != LTTNG_OPTIONAL_GET(item->context.session_id)) {
+               DBG("Session id for session `%s` (id: %" PRIu64
+                               " is not the same that was sampled (id: %" PRIu64
+                               " at the moment the work item was enqueued for %s` action of trigger `%s`",
+                               session_name, session->id,
+                               LTTNG_OPTIONAL_GET(item->context.session_id),
+                               get_action_name(action),
+                               get_trigger_name(work_item->trigger));
+               ret = 0;
+               goto error_unlock_list;
+       }
+
+       session_lock(session);
+       if (!is_trigger_allowed_for_session(work_item->trigger, session)) {
+               goto error_dispose_session;
+       }
+
+       cmd_ret = (lttng_error_code) cmd_start_trace(session);
+       switch (cmd_ret) {
+       case LTTNG_OK:
+               DBG("Successfully started session `%s` on behalf of trigger `%s`",
+                               session_name, get_trigger_name(work_item->trigger));
+               break;
+       case LTTNG_ERR_TRACE_ALREADY_STARTED:
+               DBG("Attempted to start session `%s` on behalf of trigger `%s` but it was already started",
+                               session_name, get_trigger_name(work_item->trigger));
+               break;
+       default:
+               WARN("Failed to start session `%s` on behalf of trigger `%s`: %s",
+                               session_name, get_trigger_name(work_item->trigger),
+                               lttng_strerror(-cmd_ret));
+               lttng_action_increase_execution_failure_count(action);
+               break;
+       }
+
+error_dispose_session:
+       session_unlock(session);
+       session_put(session);
+error_unlock_list:
+       session_unlock_list();
+end:
+       return ret;
+}
+
+static int action_executor_stop_session_handler(
+               struct action_executor *executor,
+               const struct action_work_item *work_item,
+               struct action_work_subitem *item)
+{
+       int ret = 0;
+       const char *session_name;
+       enum lttng_action_status action_status;
+       struct ltt_session *session;
+       enum lttng_error_code cmd_ret;
+       struct lttng_action *action = item->action;
+
+       action_status = lttng_action_stop_session_get_session_name(
+                       action, &session_name);
+       if (action_status != LTTNG_ACTION_STATUS_OK) {
+               ERR("Failed to get session name from `%s` action",
+                               get_action_name(action));
+               ret = -1;
+               goto end;
+       }
+
+       /*
+        * Validate if, at the moment the action was queued, the target session
+        * existed. If not, skip the action altogether.
+        */
+       if (!item->context.session_id.is_set) {
+               DBG("Session `%s` was not present at the moment the work item was enqueued for %s` action of trigger `%s`",
+                               session_name, get_action_name(action),
+                               get_trigger_name(work_item->trigger));
+               lttng_action_increase_execution_failure_count(action);
+               ret = 0;
+               goto end;
+       }
+
+       session_lock_list();
+       session = session_find_by_name(session_name);
+       if (!session) {
+               DBG("Failed to find session `%s` by name while executing `%s` action of trigger `%s`",
+                               session_name, get_action_name(action),
+                               get_trigger_name(work_item->trigger));
+               lttng_action_increase_execution_failure_count(action);
+               goto error_unlock_list;
+       }
+
+       /*
+        * Check if the session id is the same as when the work item was
+        * enqueued
+        */
+       if (session->id != LTTNG_OPTIONAL_GET(item->context.session_id)) {
+               DBG("Session id for session `%s` (id: %" PRIu64
+                               " is not the same that was sampled (id: %" PRIu64
+                               " at the moment the work item was enqueued for %s` action of trigger `%s`",
+                               session_name, session->id,
+                               LTTNG_OPTIONAL_GET(item->context.session_id),
+                               get_action_name(action),
+                               get_trigger_name(work_item->trigger));
+               ret = 0;
+               goto error_unlock_list;
+       }
+
+       session_lock(session);
+       if (!is_trigger_allowed_for_session(work_item->trigger, session)) {
+               goto error_dispose_session;
+       }
+
+       cmd_ret = (lttng_error_code) cmd_stop_trace(session);
+       switch (cmd_ret) {
+       case LTTNG_OK:
+               DBG("Successfully stopped session `%s` on behalf of trigger `%s`",
+                               session_name, get_trigger_name(work_item->trigger));
+               break;
+       case LTTNG_ERR_TRACE_ALREADY_STOPPED:
+               DBG("Attempted to stop session `%s` on behalf of trigger `%s` but it was already stopped",
+                               session_name, get_trigger_name(work_item->trigger));
+               break;
+       default:
+               WARN("Failed to stop session `%s` on behalf of trigger `%s`: %s",
+                               session_name, get_trigger_name(work_item->trigger),
+                               lttng_strerror(-cmd_ret));
+               lttng_action_increase_execution_failure_count(action);
+               break;
+       }
+
+error_dispose_session:
+       session_unlock(session);
+       session_put(session);
+error_unlock_list:
+       session_unlock_list();
+end:
+       return ret;
+}
+
+static int action_executor_rotate_session_handler(
+               struct action_executor *executor,
+               const struct action_work_item *work_item,
+               struct action_work_subitem *item)
+{
+       int ret = 0;
+       const char *session_name;
+       enum lttng_action_status action_status;
+       struct ltt_session *session;
+       enum lttng_error_code cmd_ret;
+       struct lttng_action *action = item->action;
+
+       action_status = lttng_action_rotate_session_get_session_name(
+                       action, &session_name);
+       if (action_status != LTTNG_ACTION_STATUS_OK) {
+               ERR("Failed to get session name from `%s` action",
+                               get_action_name(action));
+               ret = -1;
+               goto end;
+       }
+
+       /*
+        * Validate if, at the moment the action was queued, the target session
+        * existed. If not, skip the action altogether.
+        */
+       if (!item->context.session_id.is_set) {
+               DBG("Session `%s` was not present at the moment the work item was enqueued for %s` action of trigger `%s`",
+                               session_name, get_action_name(action),
+                               get_trigger_name(work_item->trigger));
+               lttng_action_increase_execution_failure_count(action);
+               ret = 0;
+               goto end;
+       }
+
+       session_lock_list();
+       session = session_find_by_name(session_name);
+       if (!session) {
+               DBG("Failed to find session `%s` by name while executing `%s` action of trigger `%s`",
+                               session_name, get_action_name(action),
+                               get_trigger_name(work_item->trigger));
+               lttng_action_increase_execution_failure_count(action);
+               goto error_unlock_list;
+       }
+
+       /*
+        * Check if the session id is the same as when the work item was
+        * enqueued.
+        */
+       if (session->id != LTTNG_OPTIONAL_GET(item->context.session_id)) {
+               DBG("Session id for session `%s` (id: %" PRIu64
+                   " is not the same that was sampled (id: %" PRIu64
+                   " at the moment the work item was enqueued for %s` action of trigger `%s`",
+                               session_name, session->id,
+                               LTTNG_OPTIONAL_GET(item->context.session_id),
+                               get_action_name(action),
+                               get_trigger_name(work_item->trigger));
+               ret = 0;
+               goto error_unlock_list;
+       }
+
+       session_lock(session);
+       if (!is_trigger_allowed_for_session(work_item->trigger, session)) {
+               goto error_dispose_session;
+       }
+
+       cmd_ret = (lttng_error_code) cmd_rotate_session(session, NULL, false,
+                       LTTNG_TRACE_CHUNK_COMMAND_TYPE_MOVE_TO_COMPLETED);
+       switch (cmd_ret) {
+       case LTTNG_OK:
+               DBG("Successfully started rotation of session `%s` on behalf of trigger `%s`",
+                               session_name, get_trigger_name(work_item->trigger));
+               break;
+       case LTTNG_ERR_ROTATION_PENDING:
+               DBG("Attempted to start a rotation of session `%s` on behalf of trigger `%s` but a rotation is already ongoing",
+                               session_name, get_trigger_name(work_item->trigger));
+               lttng_action_increase_execution_failure_count(action);
+               break;
+       case LTTNG_ERR_ROTATION_MULTIPLE_AFTER_STOP:
+       case LTTNG_ERR_ROTATION_AFTER_STOP_CLEAR:
+               DBG("Attempted to start a rotation of session `%s` on behalf of trigger `%s` but a rotation has already been completed since the last stop or clear",
+                               session_name, get_trigger_name(work_item->trigger));
+               break;
+       default:
+               WARN("Failed to start a rotation of session `%s` on behalf of trigger `%s`: %s",
+                               session_name, get_trigger_name(work_item->trigger),
+                               lttng_strerror(-cmd_ret));
+               lttng_action_increase_execution_failure_count(action);
+               break;
+       }
+
+error_dispose_session:
+       session_unlock(session);
+       session_put(session);
+error_unlock_list:
+       session_unlock_list();
+end:
+       return ret;
+}
+
+static int action_executor_snapshot_session_handler(
+               struct action_executor *executor,
+               const struct action_work_item *work_item,
+               struct action_work_subitem *item)
+{
+       int ret = 0;
+       const char *session_name;
+       enum lttng_action_status action_status;
+       struct ltt_session *session;
+       lttng_snapshot_output default_snapshot_output;
+       const struct lttng_snapshot_output *snapshot_output =
+                       &default_snapshot_output;
+       enum lttng_error_code cmd_ret;
+       struct lttng_action *action = item->action;
+
+       default_snapshot_output.max_size = UINT64_MAX;
+
+       /*
+        * Validate if, at the moment the action was queued, the target session
+        * existed. If not, skip the action altogether.
+        */
+       if (!item->context.session_id.is_set) {
+               DBG("Session was not present at the moment the work item was enqueued for %s` action of trigger `%s`",
+                               get_action_name(action),
+                               get_trigger_name(work_item->trigger));
+               lttng_action_increase_execution_failure_count(action);
+               ret = 0;
+               goto end;
+       }
+
+       action_status = lttng_action_snapshot_session_get_session_name(
+                       action, &session_name);
+       if (action_status != LTTNG_ACTION_STATUS_OK) {
+               ERR("Failed to get session name from `%s` action",
+                               get_action_name(action));
+               ret = -1;
+               goto end;
+       }
+
+       action_status = lttng_action_snapshot_session_get_output(
+                       action, &snapshot_output);
+       if (action_status != LTTNG_ACTION_STATUS_OK &&
+                       action_status != LTTNG_ACTION_STATUS_UNSET) {
+               ERR("Failed to get output from `%s` action",
+                               get_action_name(action));
+               ret = -1;
+               goto end;
+       }
+
+       session_lock_list();
+       session = session_find_by_name(session_name);
+       if (!session) {
+               DBG("Failed to find session `%s` by name while executing `%s` action of trigger `%s`",
+                               session_name, get_action_name(action),
+                               get_trigger_name(work_item->trigger));
+               lttng_action_increase_execution_failure_count(action);
+               goto error_unlock_list;
+       }
+
+       /*
+        * Check if the session id is the same as when the work item was
+        * enqueued.
+        */
+       if (session->id != LTTNG_OPTIONAL_GET(item->context.session_id)) {
+               DBG("Session id for session `%s` (id: %" PRIu64
+                   " is not the same that was sampled (id: %" PRIu64
+                   " at the moment the work item was enqueued for %s` action of trigger `%s`",
+                               session_name, session->id,
+                               LTTNG_OPTIONAL_GET(item->context.session_id),
+                               get_action_name(action),
+                               get_trigger_name(work_item->trigger));
+               ret = 0;
+               goto error_unlock_list;
+       }
+
+       session_lock(session);
+       if (!is_trigger_allowed_for_session(work_item->trigger, session)) {
+               goto error_dispose_session;
+       }
+
+       cmd_ret = (lttng_error_code) cmd_snapshot_record(session, snapshot_output, 0);
+       switch (cmd_ret) {
+       case LTTNG_OK:
+               DBG("Successfully recorded snapshot of session `%s` on behalf of trigger `%s`",
+                               session_name, get_trigger_name(work_item->trigger));
+               break;
+       default:
+               WARN("Failed to record snapshot of session `%s` on behalf of trigger `%s`: %s",
+                               session_name, get_trigger_name(work_item->trigger),
+                               lttng_strerror(-cmd_ret));
+               lttng_action_increase_execution_failure_count(action);
+               break;
+       }
+
+error_dispose_session:
+       session_unlock(session);
+       session_put(session);
+error_unlock_list:
+       session_unlock_list();
+end:
+       return ret;
+}
+
+static int action_executor_list_handler(struct action_executor *executor,
+               const struct action_work_item *work_item,
+               struct action_work_subitem *item)
+{
+       ERR("Execution of a list action by the action executor should never occur");
+       abort();
+}
+
+static int action_executor_generic_handler(struct action_executor *executor,
+               const struct action_work_item *work_item,
+               struct action_work_subitem *item)
+{
+       int ret;
+       struct lttng_action *action = item->action;
+       const enum lttng_action_type action_type = lttng_action_get_type(action);
+
+       LTTNG_ASSERT(action_type != LTTNG_ACTION_TYPE_UNKNOWN);
+
+       lttng_action_increase_execution_request_count(action);
+       if (!lttng_action_should_execute(action)) {
+               DBG("Policy prevented execution of action `%s` of trigger `%s` action work item %" PRIu64,
+                               get_action_name(action),
+                               get_trigger_name(work_item->trigger),
+                               work_item->id);
+               ret = 0;
+               goto end;
+       }
+
+       lttng_action_increase_execution_count(action);
+       DBG("Executing action `%s` of trigger `%s` action work item %" PRIu64,
+                       get_action_name(action),
+                       get_trigger_name(work_item->trigger),
+                       work_item->id);
+       ret = action_executors[action_type](executor, work_item, item);
+end:
+       return ret;
+}
+
+static int action_work_item_execute(struct action_executor *executor,
+               struct action_work_item *work_item)
+{
+       int ret;
+       size_t count, i;
+
+       DBG("Starting execution of action work item %" PRIu64 " of trigger `%s`",
+                       work_item->id, get_trigger_name(work_item->trigger));
+
+       count = lttng_dynamic_array_get_count(&work_item->subitems);
+       for (i = 0; i < count; i++) {
+               struct action_work_subitem *item;
+
+               item = (action_work_subitem *) lttng_dynamic_array_get_element(&work_item->subitems, i);
+               ret = action_executor_generic_handler(
+                               executor, work_item, item);
+               if (ret) {
+                       goto end;
+               }
+       }
+end:
+       DBG("Completed execution of action work item %" PRIu64 " of trigger `%s`",
+                       work_item->id, get_trigger_name(work_item->trigger));
+       return ret;
+}
+
+static void action_work_item_destroy(struct action_work_item *work_item)
+{
+       lttng_trigger_put(work_item->trigger);
+       lttng_evaluation_destroy(work_item->evaluation);
+       notification_client_list_put(work_item->client_list);
+       lttng_dynamic_array_reset(&work_item->subitems);
+       free(work_item);
+}
+
+static void *action_executor_thread(void *_data)
+{
+       struct action_executor *executor = (action_executor *) _data;
+
+       LTTNG_ASSERT(executor);
+
+       health_register(the_health_sessiond,
+                       HEALTH_SESSIOND_TYPE_ACTION_EXECUTOR);
+
+       rcu_register_thread();
+       rcu_thread_online();
+
+       DBG("Entering work execution loop");
+       pthread_mutex_lock(&executor->work.lock);
+       while (!executor->should_quit) {
+               int ret = 0;
+               struct action_work_item *work_item;
+
+               health_code_update();
+               if (executor->work.pending_count == 0) {
+                       health_poll_entry();
+                       DBG("No work items enqueued, entering wait");
+                       pthread_cond_wait(&executor->work.cond,
+                                       &executor->work.lock);
+                       DBG("Woke-up from wait");
+                       health_poll_exit();
+                       continue;
+               }
+
+               /* Pop item from front of the list with work lock held. */
+               work_item = cds_list_first_entry(&executor->work.list,
+                               struct action_work_item, list_node);
+               cds_list_del(&work_item->list_node);
+               executor->work.pending_count--;
+
+               /*
+                * Work can be performed without holding the work lock,
+                * allowing new items to be queued.
+                */
+               pthread_mutex_unlock(&executor->work.lock);
+
+               /* Execute item only if a trigger is registered. */
+               lttng_trigger_lock(work_item->trigger);
+               if (!lttng_trigger_is_registered(work_item->trigger)) {
+                       const char *trigger_name = NULL;
+                       uid_t trigger_owner_uid;
+                       enum lttng_trigger_status trigger_status;
+
+                       trigger_name = get_trigger_name(work_item->trigger);
+
+                       trigger_status = lttng_trigger_get_owner_uid(
+                                       work_item->trigger, &trigger_owner_uid);
+                       LTTNG_ASSERT(trigger_status == LTTNG_TRIGGER_STATUS_OK);
+
+                       DBG("Work item skipped since the associated trigger is no longer registered: work item id = %" PRIu64 ", trigger name = '%s', trigger owner uid = %d",
+                                       work_item->id, trigger_name,
+                                       (int) trigger_owner_uid);
+                       ret = 0;
+                       goto skip_execute;
+               }
+
+               ret = action_work_item_execute(executor, work_item);
+
+       skip_execute:
+               lttng_trigger_unlock(work_item->trigger);
+               action_work_item_destroy(work_item);
+               if (ret) {
+                       /* Fatal error. */
+                       break;
+               }
+
+               health_code_update();
+               pthread_mutex_lock(&executor->work.lock);
+       }
+
+       if (executor->should_quit) {
+               pthread_mutex_unlock(&executor->work.lock);
+       }
+       DBG("Left work execution loop");
+
+       health_code_update();
+
+       rcu_thread_offline();
+       rcu_unregister_thread();
+       health_unregister(the_health_sessiond);
+
+       return NULL;
+}
+
+static bool shutdown_action_executor_thread(void *_data)
+{
+       struct action_executor *executor = (action_executor *) _data;
+
+       pthread_mutex_lock(&executor->work.lock);
+       executor->should_quit = true;
+       pthread_cond_signal(&executor->work.cond);
+       pthread_mutex_unlock(&executor->work.lock);
+       return true;
+}
+
+static void clean_up_action_executor_thread(void *_data)
+{
+       struct action_executor *executor = (action_executor *) _data;
+
+       LTTNG_ASSERT(cds_list_empty(&executor->work.list));
+
+       pthread_mutex_destroy(&executor->work.lock);
+       pthread_cond_destroy(&executor->work.cond);
+       free(executor);
+}
+
+struct action_executor *action_executor_create(
+               struct notification_thread_handle *handle)
+{
+       struct action_executor *executor = (action_executor *) zmalloc(sizeof(*executor));
+
+       if (!executor) {
+               goto end;
+       }
+
+       CDS_INIT_LIST_HEAD(&executor->work.list);
+       pthread_cond_init(&executor->work.cond, NULL);
+       pthread_mutex_init(&executor->work.lock, NULL);
+       executor->notification_thread_handle = handle;
+
+       executor->thread = lttng_thread_create(THREAD_NAME,
+                       action_executor_thread, shutdown_action_executor_thread,
+                       clean_up_action_executor_thread, executor);
+end:
+       return executor;
+}
+
+void action_executor_destroy(struct action_executor *executor)
+{
+       struct action_work_item *work_item, *tmp;
+
+       /* TODO Wait for work list to drain? */
+       lttng_thread_shutdown(executor->thread);
+       pthread_mutex_lock(&executor->work.lock);
+       if (executor->work.pending_count != 0) {
+               WARN("%" PRIu64
+                       " trigger action%s still queued for execution and will be discarded",
+                               executor->work.pending_count,
+                               executor->work.pending_count == 1 ? " is" :
+                                                                   "s are");
+       }
+
+       cds_list_for_each_entry_safe (
+                       work_item, tmp, &executor->work.list, list_node) {
+               WARN("Discarding action work item %" PRIu64
+                               " associated to trigger `%s`",
+                               work_item->id, get_trigger_name(work_item->trigger));
+               cds_list_del(&work_item->list_node);
+               action_work_item_destroy(work_item);
+       }
+       pthread_mutex_unlock(&executor->work.lock);
+       lttng_thread_put(executor->thread);
+}
+
+/* RCU read-lock must be held by the caller. */
+enum action_executor_status action_executor_enqueue_trigger(
+               struct action_executor *executor,
+               struct lttng_trigger *trigger,
+               struct lttng_evaluation *evaluation,
+               const struct lttng_credentials *object_creds,
+               struct notification_client_list *client_list)
+{
+       int ret;
+       enum action_executor_status executor_status = ACTION_EXECUTOR_STATUS_OK;
+       const uint64_t work_item_id = executor->next_work_item_id++;
+       struct action_work_item *work_item;
+       bool signal = false;
+
+       LTTNG_ASSERT(trigger);
+
+       pthread_mutex_lock(&executor->work.lock);
+       /* Check for queue overflow. */
+       if (executor->work.pending_count >= MAX_QUEUED_WORK_COUNT) {
+               /* Most likely spammy, remove if it is the case. */
+               DBG("Refusing to enqueue action for trigger (overflow): trigger name = `%s`, work item id = %" PRIu64,
+                               get_trigger_name(trigger), work_item_id);
+               executor_status = ACTION_EXECUTOR_STATUS_OVERFLOW;
+               goto error_unlock;
+       }
+
+       work_item = (action_work_item *) zmalloc(sizeof(*work_item));
+       if (!work_item) {
+               PERROR("Failed to allocate action executor work item: trigger name = '%s'",
+                               get_trigger_name(trigger));
+               executor_status = ACTION_EXECUTOR_STATUS_ERROR;
+               goto error_unlock;
+       }
+
+       lttng_trigger_get(trigger);
+       if (client_list) {
+               const bool reference_acquired =
+                               notification_client_list_get(client_list);
+
+               LTTNG_ASSERT(reference_acquired);
+       }
+
+       work_item->id = work_item_id;
+       work_item->trigger = trigger;
+
+       /* Ownership transferred to the work item. */
+       work_item->evaluation = evaluation;
+       evaluation = NULL;
+
+       work_item->client_list = client_list;
+       work_item->object_creds.is_set = !!object_creds;
+       if (object_creds) {
+               work_item->object_creds.value = *object_creds;
+       }
+
+       CDS_INIT_LIST_HEAD(&work_item->list_node);
+
+       /* Build the array of action work subitems for the passed trigger. */
+       lttng_dynamic_array_init(&work_item->subitems,
+                       sizeof(struct action_work_subitem),
+                       action_work_subitem_destructor);
+
+       ret = populate_subitem_array_from_trigger(
+                       trigger, &work_item->subitems);
+       if (ret) {
+               ERR("Failed to populate work item sub items on behalf of trigger: trigger name = `%s`",
+                               get_trigger_name(trigger));
+               executor_status = ACTION_EXECUTOR_STATUS_ERROR;
+               goto error_unlock;
+       }
+
+       cds_list_add_tail(&work_item->list_node, &executor->work.list);
+       executor->work.pending_count++;
+       DBG("Enqueued action for trigger: trigger name = `%s`, work item id = %" PRIu64,
+                       get_trigger_name(trigger), work_item_id);
+       signal = true;
+
+error_unlock:
+       if (signal) {
+               pthread_cond_signal(&executor->work.cond);
+       }
+
+       pthread_mutex_unlock(&executor->work.lock);
+       lttng_evaluation_destroy(evaluation);
+       return executor_status;
+}
+
+static int add_action_to_subitem_array(struct lttng_action *action,
+               struct lttng_dynamic_array *subitems)
+{
+       int ret = 0;
+       enum lttng_action_type type = lttng_action_get_type(action);
+       const char *session_name = NULL;
+       enum lttng_action_status status;
+       struct action_work_subitem subitem = {
+               .action = NULL,
+               .context = {
+                       .session_id = LTTNG_OPTIONAL_INIT_UNSET,
+               },
+       };
+
+       LTTNG_ASSERT(action);
+       LTTNG_ASSERT(subitems);
+
+       if (type == LTTNG_ACTION_TYPE_LIST) {
+               unsigned int count, i;
+
+               status = lttng_action_list_get_count(action, &count);
+               LTTNG_ASSERT(status == LTTNG_ACTION_STATUS_OK);
+
+               for (i = 0; i < count; i++) {
+                       struct lttng_action *inner_action = NULL;
+
+                       inner_action = lttng_action_list_borrow_mutable_at_index(
+                                       action, i);
+                       LTTNG_ASSERT(inner_action);
+                       ret = add_action_to_subitem_array(
+                                       inner_action, subitems);
+                       if (ret) {
+                               goto end;
+                       }
+               }
+
+               /*
+                * Go directly to the end since there is no need to add the
+                * list action by itself to the subitems array.
+                */
+               goto end;
+       }
+
+       /* Gather execution context. */
+       switch (type) {
+       case LTTNG_ACTION_TYPE_NOTIFY:
+               break;
+       case LTTNG_ACTION_TYPE_START_SESSION:
+               status = lttng_action_start_session_get_session_name(
+                               action, &session_name);
+               LTTNG_ASSERT(status == LTTNG_ACTION_STATUS_OK);
+               break;
+       case LTTNG_ACTION_TYPE_STOP_SESSION:
+               status = lttng_action_stop_session_get_session_name(
+                               action, &session_name);
+               LTTNG_ASSERT(status == LTTNG_ACTION_STATUS_OK);
+               break;
+       case LTTNG_ACTION_TYPE_ROTATE_SESSION:
+               status = lttng_action_rotate_session_get_session_name(
+                               action, &session_name);
+               LTTNG_ASSERT(status == LTTNG_ACTION_STATUS_OK);
+               break;
+       case LTTNG_ACTION_TYPE_SNAPSHOT_SESSION:
+               status = lttng_action_snapshot_session_get_session_name(
+                               action, &session_name);
+               LTTNG_ASSERT(status == LTTNG_ACTION_STATUS_OK);
+               break;
+       case LTTNG_ACTION_TYPE_LIST:
+       case LTTNG_ACTION_TYPE_UNKNOWN:
+               /* Fallthrough */
+       default:
+               abort();
+               break;
+       }
+
+       /*
+        * Fetch the session execution context info as needed.
+        * Note that we could decide to not add an action for which we know the
+        * execution will not happen (i.e no session exists for that name). For
+        * now we leave the decision to skip to the action executor for sake of
+        * simplicity and consistency.
+        */
+       if (session_name != NULL) {
+               uint64_t session_id;
+
+               /*
+                * Instantaneous sampling of the session id if present.
+                *
+                * This method is preferred over `sessiond_find_by_name` then
+                * fetching the session'd id since `sessiond_find_by_name`
+                * requires the session list lock to be taken.
+                *
+                * Taking the session list lock can lead to a deadlock
+                * between the action executor and the notification thread
+                * (caller of add_action_to_subitem_array). It is okay if the
+                * session state changes between the enqueuing time and the
+                * execution time. The execution context is validated at
+                * execution time.
+                */
+               if (sample_session_id_by_name(session_name, &session_id)) {
+                       LTTNG_OPTIONAL_SET(&subitem.context.session_id,
+                                       session_id);
+               }
+       }
+
+       /* Get a reference to the action. */
+       lttng_action_get(action);
+       subitem.action = action;
+
+       ret = lttng_dynamic_array_add_element(subitems, &subitem);
+       if (ret) {
+               ERR("Failed to add work subitem to the subitem array");
+               lttng_action_put(action);
+               ret = -1;
+               goto end;
+       }
+
+end:
+       return ret;
+}
+
+static int populate_subitem_array_from_trigger(struct lttng_trigger *trigger,
+               struct lttng_dynamic_array *subitems)
+{
+       struct lttng_action *action;
+
+       action = lttng_trigger_get_action(trigger);
+       LTTNG_ASSERT(action);
+
+       return add_action_to_subitem_array(action, subitems);
+}
diff --git a/src/bin/lttng-sessiond/agent-thread.c b/src/bin/lttng-sessiond/agent-thread.c
deleted file mode 100644 (file)
index f2ee4c0..0000000
+++ /dev/null
@@ -1,598 +0,0 @@
-/*
- * Copyright (C) 2013 David Goulet <dgoulet@efficios.com>
- *
- * SPDX-License-Identifier: GPL-2.0-only
- *
- */
-
-#define _LGPL_SOURCE
-
-#include <common/common.h>
-#include <common/sessiond-comm/sessiond-comm.h>
-#include <common/uri.h>
-#include <common/utils.h>
-
-#include <common/compat/endian.h>
-
-#include "fd-limit.h"
-#include "agent-thread.h"
-#include "agent.h"
-#include "lttng-sessiond.h"
-#include "session.h"
-#include "utils.h"
-#include "thread.h"
-
-struct thread_notifiers {
-       struct lttng_pipe *quit_pipe;
-       sem_t ready;
-};
-
-struct agent_app_id {
-       pid_t pid;
-       enum lttng_domain_type domain;
-};
-
-struct agent_protocol_version {
-       unsigned int major, minor;
-};
-
-static int agent_tracing_enabled = -1;
-
-/*
- * Note that there is not port here. It's set after this URI is parsed so we
- * can let the user define a custom one. However, localhost is ALWAYS the
- * default listening address.
- */
-static const char *default_reg_uri =
-       "tcp://" DEFAULT_NETWORK_VIEWER_BIND_ADDRESS;
-
-/*
- * Update agent application using the given socket. This is done just after
- * registration was successful.
- *
- * This will acquire the various sessions' lock; none must be held by the
- * caller.
- * The caller must hold the session list lock.
- */
-static void update_agent_app(const struct agent_app *app)
-{
-       struct ltt_session *session, *stmp;
-       struct ltt_session_list *list;
-       struct agent *trigger_agent;
-       struct lttng_ht_iter iter;
-
-       list = session_get_list();
-       LTTNG_ASSERT(list);
-
-       cds_list_for_each_entry_safe(session, stmp, &list->head, list) {
-               if (!session_get(session)) {
-                       continue;
-               }
-
-               session_lock(session);
-               if (session->ust_session) {
-                       const struct agent *agt;
-
-                       rcu_read_lock();
-                       agt = trace_ust_find_agent(session->ust_session, app->domain);
-                       if (agt) {
-                               agent_update(agt, app);
-                       }
-                       rcu_read_unlock();
-               }
-               session_unlock(session);
-               session_put(session);
-       }
-
-       rcu_read_lock();
-       /*
-        * We are protected against the addition of new events by the session
-        * list lock being held.
-        */
-       cds_lfht_for_each_entry(the_trigger_agents_ht_by_domain->ht,
-                       &iter.iter, trigger_agent, node.node) {
-               agent_update(trigger_agent, app);
-       }
-       rcu_read_unlock();
-}
-
-/*
- * Create and init socket from uri.
- */
-static struct lttcomm_sock *init_tcp_socket(void)
-{
-       int ret;
-       struct lttng_uri *uri = NULL;
-       struct lttcomm_sock *sock = NULL;
-       unsigned int port;
-       bool bind_succeeded = false;
-
-       /*
-        * This should never fail since the URI is hardcoded and the port is set
-        * before this thread is launched.
-        */
-       ret = uri_parse(default_reg_uri, &uri);
-       LTTNG_ASSERT(ret);
-       LTTNG_ASSERT(the_config.agent_tcp_port.begin > 0);
-       uri->port = the_config.agent_tcp_port.begin;
-
-       sock = lttcomm_alloc_sock_from_uri(uri);
-       uri_free(uri);
-       if (sock == NULL) {
-               ERR("agent allocating TCP socket");
-               goto error;
-       }
-
-       ret = lttcomm_create_sock(sock);
-       if (ret < 0) {
-               goto error;
-       }
-
-       for (port = the_config.agent_tcp_port.begin;
-                       port <= the_config.agent_tcp_port.end; port++) {
-               ret = lttcomm_sock_set_port(sock, (uint16_t) port);
-               if (ret) {
-                       ERR("Failed to set port %u on socket",
-                                       port);
-                       goto error;
-               }
-               DBG3("Trying to bind on port %u", port);
-               ret = sock->ops->bind(sock);
-               if (!ret) {
-                       bind_succeeded = true;
-                       break;
-               }
-
-               if (errno == EADDRINUSE) {
-                       DBG("Failed to bind to port %u since it is already in use",
-                                       port);
-               } else {
-                       PERROR("Failed to bind to port %u", port);
-                       goto error;
-               }
-       }
-
-       if (!bind_succeeded) {
-               if (the_config.agent_tcp_port.begin ==
-                               the_config.agent_tcp_port.end) {
-                       WARN("Another process is already using the agent port %i. "
-                            "Agent support will be deactivated.",
-                                       the_config.agent_tcp_port.begin);
-                       goto error;
-               } else {
-                       WARN("All ports in the range [%i, %i] are already in use. "
-                            "Agent support will be deactivated.",
-                                       the_config.agent_tcp_port.begin,
-                                       the_config.agent_tcp_port.end);
-                       goto error;
-               }
-       }
-
-       ret = sock->ops->listen(sock, -1);
-       if (ret < 0) {
-               goto error;
-       }
-
-       DBG("Listening on TCP port %u and socket %d",
-                       port, sock->fd);
-
-       return sock;
-
-error:
-       if (sock) {
-               lttcomm_destroy_sock(sock);
-       }
-       return NULL;
-}
-
-/*
- * Close and destroy the given TCP socket.
- */
-static void destroy_tcp_socket(struct lttcomm_sock *sock)
-{
-       int ret;
-       uint16_t port;
-
-       LTTNG_ASSERT(sock);
-
-       ret = lttcomm_sock_get_port(sock, &port);
-       if (ret) {
-               ERR("Failed to get port of agent TCP socket");
-               port = 0;
-       }
-
-       DBG3("Destroy TCP socket on port %" PRIu16,
-                       port);
-
-       /* This will return gracefully if fd is invalid. */
-       sock->ops->close(sock);
-       lttcomm_destroy_sock(sock);
-}
-
-static const char *domain_type_str(enum lttng_domain_type domain_type)
-{
-       switch (domain_type) {
-       case LTTNG_DOMAIN_NONE:
-               return "none";
-       case LTTNG_DOMAIN_KERNEL:
-               return "kernel";
-       case LTTNG_DOMAIN_UST:
-               return "ust";
-       case LTTNG_DOMAIN_JUL:
-               return "jul";
-       case LTTNG_DOMAIN_LOG4J:
-               return "log4j";
-       case LTTNG_DOMAIN_PYTHON:
-               return "python";
-       default:
-               return "unknown";
-       }
-}
-
-static bool is_agent_protocol_version_supported(
-               const struct agent_protocol_version *version)
-{
-       const bool is_supported = version->major == AGENT_MAJOR_VERSION &&
-                       version->minor == AGENT_MINOR_VERSION;
-
-       if (!is_supported) {
-               WARN("Refusing agent connection: unsupported protocol version %ui.%ui, expected %i.%i",
-                               version->major, version->minor,
-                               AGENT_MAJOR_VERSION, AGENT_MINOR_VERSION);
-       }
-
-       return is_supported;
-}
-
-/*
- * Handle a new agent connection on the registration socket.
- *
- * Returns 0 on success, or else a negative errno value.
- * On success, the resulting socket is returned through `agent_app_socket`
- * and the application's reported id is updated through `agent_app_id`.
- */
-static int accept_agent_connection(
-               struct lttcomm_sock *reg_sock,
-               struct agent_app_id *agent_app_id,
-               struct lttcomm_sock **agent_app_socket)
-{
-       int ret;
-       struct agent_protocol_version agent_version;
-       ssize_t size;
-       struct agent_register_msg msg;
-       struct lttcomm_sock *new_sock;
-
-       LTTNG_ASSERT(reg_sock);
-
-       new_sock = reg_sock->ops->accept(reg_sock);
-       if (!new_sock) {
-               ret = -ENOTCONN;
-               goto end;
-       }
-
-       size = new_sock->ops->recvmsg(new_sock, &msg, sizeof(msg), 0);
-       if (size < sizeof(msg)) {
-               if (size < 0) {
-                       PERROR("Failed to register new agent application");
-               } else if (size != 0) {
-                       ERR("Failed to register new agent application: invalid registration message length: expected length = %zu, message length = %zd",
-                                       sizeof(msg), size);
-               } else {
-                       DBG("Failed to register new agent application: connection closed");
-               }
-               ret = -EINVAL;
-               goto error_close_socket;
-       }
-
-       agent_version = (struct agent_protocol_version) {
-               be32toh(msg.major_version),
-               be32toh(msg.minor_version),
-       };
-
-       /* Test communication protocol version of the registering agent. */
-       if (!is_agent_protocol_version_supported(&agent_version)) {
-               ret = -EINVAL;
-               goto error_close_socket;
-       }
-
-       *agent_app_id = (struct agent_app_id) {
-               .domain = (enum lttng_domain_type) be32toh(msg.domain),
-               .pid = (pid_t) be32toh(msg.pid),
-       };
-
-       DBG2("New registration for agent application: pid = %ld, domain = %s, socket fd = %d",
-                       (long) agent_app_id->pid,
-                       domain_type_str(agent_app_id->domain), new_sock->fd);
-
-       *agent_app_socket = new_sock;
-       new_sock = NULL;
-       ret = 0;
-       goto end;
-
-error_close_socket:
-       new_sock->ops->close(new_sock);
-       lttcomm_destroy_sock(new_sock);
-end:
-       return ret;
-}
-
-bool agent_tracing_is_enabled(void)
-{
-       int enabled;
-
-       enabled = uatomic_read(&agent_tracing_enabled);
-       LTTNG_ASSERT(enabled != -1);
-       return enabled == 1;
-}
-
-/*
- * Write agent TCP port using the rundir.
- */
-static int write_agent_port(uint16_t port)
-{
-       return utils_create_pid_file(
-                       (pid_t) port, the_config.agent_port_file_path.value);
-}
-
-static
-void mark_thread_as_ready(struct thread_notifiers *notifiers)
-{
-       DBG("Marking agent management thread as ready");
-       sem_post(&notifiers->ready);
-}
-
-static
-void wait_until_thread_is_ready(struct thread_notifiers *notifiers)
-{
-       DBG("Waiting for agent management thread to be ready");
-       sem_wait(&notifiers->ready);
-       DBG("Agent management thread is ready");
-}
-
-/*
- * This thread manage application notify communication.
- */
-static void *thread_agent_management(void *data)
-{
-       int i, ret, pollfd;
-       uint32_t revents, nb_fd;
-       struct lttng_poll_event events;
-       struct lttcomm_sock *reg_sock;
-       struct thread_notifiers *notifiers = data;
-       const int quit_pipe_read_fd = lttng_pipe_get_readfd(
-                       notifiers->quit_pipe);
-
-       DBG("Manage agent application registration.");
-
-       rcu_register_thread();
-       rcu_thread_online();
-
-       /* Agent initialization call MUST be called before starting the thread. */
-       LTTNG_ASSERT(the_agent_apps_ht_by_sock);
-
-       /* Create pollset with size 2, quit pipe and registration socket. */
-       ret = lttng_poll_create(&events, 2, LTTNG_CLOEXEC);
-       if (ret < 0) {
-               goto error_poll_create;
-       }
-
-       ret = lttng_poll_add(&events, quit_pipe_read_fd,
-                       LPOLLIN | LPOLLERR);
-       if (ret < 0) {
-               goto error_tcp_socket;
-       }
-
-       reg_sock = init_tcp_socket();
-       if (reg_sock) {
-               uint16_t port;
-
-               ret = lttcomm_sock_get_port(reg_sock, &port);
-               LTTNG_ASSERT(ret == 0);
-
-               ret = write_agent_port(port);
-               if (ret) {
-                       ERR("Failed to create agent port file: agent tracing will be unavailable");
-                       /* Don't prevent the launch of the sessiond on error. */
-                       mark_thread_as_ready(notifiers);
-                       goto error;
-               }
-       } else {
-               /* Don't prevent the launch of the sessiond on error. */
-               mark_thread_as_ready(notifiers);
-               goto error_tcp_socket;
-       }
-
-       /*
-        * Signal that the agent thread is ready. The command thread
-        * may start to query whether or not agent tracing is enabled.
-        */
-       uatomic_set(&agent_tracing_enabled, 1);
-       mark_thread_as_ready(notifiers);
-
-       /* Add TCP socket to the poll set. */
-       ret = lttng_poll_add(&events, reg_sock->fd,
-                       LPOLLIN | LPOLLERR | LPOLLHUP | LPOLLRDHUP);
-       if (ret < 0) {
-               goto error;
-       }
-
-       while (1) {
-               DBG3("Manage agent polling");
-
-               /* Inifinite blocking call, waiting for transmission */
-restart:
-               ret = lttng_poll_wait(&events, -1);
-               DBG3("Manage agent return from poll on %d fds",
-                               LTTNG_POLL_GETNB(&events));
-               if (ret < 0) {
-                       /*
-                        * Restart interrupted system call.
-                        */
-                       if (errno == EINTR) {
-                               goto restart;
-                       }
-                       goto error;
-               }
-               nb_fd = ret;
-               DBG3("%d fd ready", nb_fd);
-
-               for (i = 0; i < nb_fd; i++) {
-                       /* Fetch once the poll data */
-                       revents = LTTNG_POLL_GETEV(&events, i);
-                       pollfd = LTTNG_POLL_GETFD(&events, i);
-
-                       /* Thread quit pipe has been closed. Killing thread. */
-                       if (pollfd == quit_pipe_read_fd) {
-                               goto exit;
-                       }
-
-                       /* Activity on the registration socket. */
-                       if (revents & LPOLLIN) {
-                               struct agent_app_id new_app_id;
-                               struct agent_app *new_app = NULL;
-                               struct lttcomm_sock *new_app_socket;
-                               int new_app_socket_fd;
-
-                               LTTNG_ASSERT(pollfd == reg_sock->fd);
-
-                               ret = accept_agent_connection(
-                                       reg_sock, &new_app_id, &new_app_socket);
-                               if (ret < 0) {
-                                       /* Errors are already logged. */
-                                       continue;
-                               }
-
-                               /*
-                                * new_app_socket's ownership has been
-                                * transferred to the new agent app.
-                                */
-                               new_app = agent_create_app(new_app_id.pid,
-                                               new_app_id.domain,
-                                               new_app_socket);
-                               if (!new_app) {
-                                       new_app_socket->ops->close(
-                                                       new_app_socket);
-                                       continue;
-                               }
-                               new_app_socket_fd = new_app_socket->fd;
-                               new_app_socket = NULL;
-
-                               /*
-                                * Since this is a command socket (write then
-                                * read), only add poll error event to only
-                                * detect shutdown.
-                                */
-                               ret = lttng_poll_add(&events, new_app_socket_fd,
-                                               LPOLLERR | LPOLLHUP | LPOLLRDHUP);
-                               if (ret < 0) {
-                                       agent_destroy_app(new_app);
-                                       continue;
-                               }
-
-                               /*
-                                * Prevent sessions from being modified while
-                                * the agent application's configuration is
-                                * updated.
-                                */
-                               session_lock_list();
-
-                               /*
-                                * Update the newly registered applications's
-                                * configuration.
-                                */
-                               update_agent_app(new_app);
-
-                               ret = agent_send_registration_done(new_app);
-                               if (ret < 0) {
-                                       agent_destroy_app(new_app);
-                                       /* Removing from the poll set. */
-                                       ret = lttng_poll_del(&events,
-                                                       new_app_socket_fd);
-                                       if (ret < 0) {
-                                               session_unlock_list();
-                                               goto error;
-                                       }
-                                       continue;
-                               }
-
-                               /* Publish the new agent app. */
-                               agent_add_app(new_app);
-
-                               session_unlock_list();
-                       } else if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
-                               /* Removing from the poll set */
-                               ret = lttng_poll_del(&events, pollfd);
-                               if (ret < 0) {
-                                       goto error;
-                               }
-                               agent_destroy_app_by_sock(pollfd);
-                       } else {
-                               ERR("Unexpected poll events %u for sock %d", revents, pollfd);
-                               goto error;
-                       }
-               }
-       }
-
-exit:
-       /* Whatever happens, try to delete it and exit. */
-       (void) lttng_poll_del(&events, reg_sock->fd);
-error:
-       destroy_tcp_socket(reg_sock);
-error_tcp_socket:
-       lttng_poll_clean(&events);
-error_poll_create:
-       uatomic_set(&agent_tracing_enabled, 0);
-       DBG("Cleaning up and stopping.");
-       rcu_thread_offline();
-       rcu_unregister_thread();
-       return NULL;
-}
-
-static bool shutdown_agent_management_thread(void *data)
-{
-       struct thread_notifiers *notifiers = data;
-       const int write_fd = lttng_pipe_get_writefd(notifiers->quit_pipe);
-
-       return notify_thread_pipe(write_fd) == 1;
-}
-
-static void cleanup_agent_management_thread(void *data)
-{
-       struct thread_notifiers *notifiers = data;
-
-       lttng_pipe_destroy(notifiers->quit_pipe);
-       sem_destroy(&notifiers->ready);
-       free(notifiers);
-}
-
-bool launch_agent_management_thread(void)
-{
-       struct thread_notifiers *notifiers;
-       struct lttng_thread *thread;
-
-       notifiers = zmalloc(sizeof(*notifiers));
-       if (!notifiers) {
-               goto error_alloc;
-       }
-
-       sem_init(&notifiers->ready, 0, 0);
-       notifiers->quit_pipe = lttng_pipe_open(FD_CLOEXEC);
-       if (!notifiers->quit_pipe) {
-               goto error;
-       }
-       thread = lttng_thread_create("Agent management",
-                       thread_agent_management,
-                       shutdown_agent_management_thread,
-                       cleanup_agent_management_thread,
-                       notifiers);
-       if (!thread) {
-               goto error;
-       }
-       wait_until_thread_is_ready(notifiers);
-       lttng_thread_put(thread);
-       return true;
-error:
-       cleanup_agent_management_thread(notifiers);
-error_alloc:
-       return false;
-}
diff --git a/src/bin/lttng-sessiond/agent-thread.cpp b/src/bin/lttng-sessiond/agent-thread.cpp
new file mode 100644 (file)
index 0000000..5e158b4
--- /dev/null
@@ -0,0 +1,598 @@
+/*
+ * Copyright (C) 2013 David Goulet <dgoulet@efficios.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0-only
+ *
+ */
+
+#define _LGPL_SOURCE
+
+#include <common/common.h>
+#include <common/sessiond-comm/sessiond-comm.h>
+#include <common/uri.h>
+#include <common/utils.h>
+
+#include <common/compat/endian.h>
+
+#include "fd-limit.h"
+#include "agent-thread.h"
+#include "agent.h"
+#include "lttng-sessiond.h"
+#include "session.h"
+#include "utils.h"
+#include "thread.h"
+
+struct thread_notifiers {
+       struct lttng_pipe *quit_pipe;
+       sem_t ready;
+};
+
+struct agent_app_id {
+       pid_t pid;
+       enum lttng_domain_type domain;
+};
+
+struct agent_protocol_version {
+       unsigned int major, minor;
+};
+
+static int agent_tracing_enabled = -1;
+
+/*
+ * Note that there is not port here. It's set after this URI is parsed so we
+ * can let the user define a custom one. However, localhost is ALWAYS the
+ * default listening address.
+ */
+static const char *default_reg_uri =
+       "tcp://" DEFAULT_NETWORK_VIEWER_BIND_ADDRESS;
+
+/*
+ * Update agent application using the given socket. This is done just after
+ * registration was successful.
+ *
+ * This will acquire the various sessions' lock; none must be held by the
+ * caller.
+ * The caller must hold the session list lock.
+ */
+static void update_agent_app(const struct agent_app *app)
+{
+       struct ltt_session *session, *stmp;
+       struct ltt_session_list *list;
+       struct agent *trigger_agent;
+       struct lttng_ht_iter iter;
+
+       list = session_get_list();
+       LTTNG_ASSERT(list);
+
+       cds_list_for_each_entry_safe(session, stmp, &list->head, list) {
+               if (!session_get(session)) {
+                       continue;
+               }
+
+               session_lock(session);
+               if (session->ust_session) {
+                       const struct agent *agt;
+
+                       rcu_read_lock();
+                       agt = trace_ust_find_agent(session->ust_session, app->domain);
+                       if (agt) {
+                               agent_update(agt, app);
+                       }
+                       rcu_read_unlock();
+               }
+               session_unlock(session);
+               session_put(session);
+       }
+
+       rcu_read_lock();
+       /*
+        * We are protected against the addition of new events by the session
+        * list lock being held.
+        */
+       cds_lfht_for_each_entry(the_trigger_agents_ht_by_domain->ht,
+                       &iter.iter, trigger_agent, node.node) {
+               agent_update(trigger_agent, app);
+       }
+       rcu_read_unlock();
+}
+
+/*
+ * Create and init socket from uri.
+ */
+static struct lttcomm_sock *init_tcp_socket(void)
+{
+       int ret;
+       struct lttng_uri *uri = NULL;
+       struct lttcomm_sock *sock = NULL;
+       unsigned int port;
+       bool bind_succeeded = false;
+
+       /*
+        * This should never fail since the URI is hardcoded and the port is set
+        * before this thread is launched.
+        */
+       ret = uri_parse(default_reg_uri, &uri);
+       LTTNG_ASSERT(ret);
+       LTTNG_ASSERT(the_config.agent_tcp_port.begin > 0);
+       uri->port = the_config.agent_tcp_port.begin;
+
+       sock = lttcomm_alloc_sock_from_uri(uri);
+       uri_free(uri);
+       if (sock == NULL) {
+               ERR("agent allocating TCP socket");
+               goto error;
+       }
+
+       ret = lttcomm_create_sock(sock);
+       if (ret < 0) {
+               goto error;
+       }
+
+       for (port = the_config.agent_tcp_port.begin;
+                       port <= the_config.agent_tcp_port.end; port++) {
+               ret = lttcomm_sock_set_port(sock, (uint16_t) port);
+               if (ret) {
+                       ERR("Failed to set port %u on socket",
+                                       port);
+                       goto error;
+               }
+               DBG3("Trying to bind on port %u", port);
+               ret = sock->ops->bind(sock);
+               if (!ret) {
+                       bind_succeeded = true;
+                       break;
+               }
+
+               if (errno == EADDRINUSE) {
+                       DBG("Failed to bind to port %u since it is already in use",
+                                       port);
+               } else {
+                       PERROR("Failed to bind to port %u", port);
+                       goto error;
+               }
+       }
+
+       if (!bind_succeeded) {
+               if (the_config.agent_tcp_port.begin ==
+                               the_config.agent_tcp_port.end) {
+                       WARN("Another process is already using the agent port %i. "
+                            "Agent support will be deactivated.",
+                                       the_config.agent_tcp_port.begin);
+                       goto error;
+               } else {
+                       WARN("All ports in the range [%i, %i] are already in use. "
+                            "Agent support will be deactivated.",
+                                       the_config.agent_tcp_port.begin,
+                                       the_config.agent_tcp_port.end);
+                       goto error;
+               }
+       }
+
+       ret = sock->ops->listen(sock, -1);
+       if (ret < 0) {
+               goto error;
+       }
+
+       DBG("Listening on TCP port %u and socket %d",
+                       port, sock->fd);
+
+       return sock;
+
+error:
+       if (sock) {
+               lttcomm_destroy_sock(sock);
+       }
+       return NULL;
+}
+
+/*
+ * Close and destroy the given TCP socket.
+ */
+static void destroy_tcp_socket(struct lttcomm_sock *sock)
+{
+       int ret;
+       uint16_t port;
+
+       LTTNG_ASSERT(sock);
+
+       ret = lttcomm_sock_get_port(sock, &port);
+       if (ret) {
+               ERR("Failed to get port of agent TCP socket");
+               port = 0;
+       }
+
+       DBG3("Destroy TCP socket on port %" PRIu16,
+                       port);
+
+       /* This will return gracefully if fd is invalid. */
+       sock->ops->close(sock);
+       lttcomm_destroy_sock(sock);
+}
+
+static const char *domain_type_str(enum lttng_domain_type domain_type)
+{
+       switch (domain_type) {
+       case LTTNG_DOMAIN_NONE:
+               return "none";
+       case LTTNG_DOMAIN_KERNEL:
+               return "kernel";
+       case LTTNG_DOMAIN_UST:
+               return "ust";
+       case LTTNG_DOMAIN_JUL:
+               return "jul";
+       case LTTNG_DOMAIN_LOG4J:
+               return "log4j";
+       case LTTNG_DOMAIN_PYTHON:
+               return "python";
+       default:
+               return "unknown";
+       }
+}
+
+static bool is_agent_protocol_version_supported(
+               const struct agent_protocol_version *version)
+{
+       const bool is_supported = version->major == AGENT_MAJOR_VERSION &&
+                       version->minor == AGENT_MINOR_VERSION;
+
+       if (!is_supported) {
+               WARN("Refusing agent connection: unsupported protocol version %ui.%ui, expected %i.%i",
+                               version->major, version->minor,
+                               AGENT_MAJOR_VERSION, AGENT_MINOR_VERSION);
+       }
+
+       return is_supported;
+}
+
+/*
+ * Handle a new agent connection on the registration socket.
+ *
+ * Returns 0 on success, or else a negative errno value.
+ * On success, the resulting socket is returned through `agent_app_socket`
+ * and the application's reported id is updated through `agent_app_id`.
+ */
+static int accept_agent_connection(
+               struct lttcomm_sock *reg_sock,
+               struct agent_app_id *agent_app_id,
+               struct lttcomm_sock **agent_app_socket)
+{
+       int ret;
+       struct agent_protocol_version agent_version;
+       ssize_t size;
+       struct agent_register_msg msg;
+       struct lttcomm_sock *new_sock;
+
+       LTTNG_ASSERT(reg_sock);
+
+       new_sock = reg_sock->ops->accept(reg_sock);
+       if (!new_sock) {
+               ret = -ENOTCONN;
+               goto end;
+       }
+
+       size = new_sock->ops->recvmsg(new_sock, &msg, sizeof(msg), 0);
+       if (size < sizeof(msg)) {
+               if (size < 0) {
+                       PERROR("Failed to register new agent application");
+               } else if (size != 0) {
+                       ERR("Failed to register new agent application: invalid registration message length: expected length = %zu, message length = %zd",
+                                       sizeof(msg), size);
+               } else {
+                       DBG("Failed to register new agent application: connection closed");
+               }
+               ret = -EINVAL;
+               goto error_close_socket;
+       }
+
+       agent_version = (struct agent_protocol_version) {
+               be32toh(msg.major_version),
+               be32toh(msg.minor_version),
+       };
+
+       /* Test communication protocol version of the registering agent. */
+       if (!is_agent_protocol_version_supported(&agent_version)) {
+               ret = -EINVAL;
+               goto error_close_socket;
+       }
+
+       *agent_app_id = (struct agent_app_id) {
+               .pid = (pid_t) be32toh(msg.pid),
+               .domain = (lttng_domain_type) be32toh(msg.domain),
+       };
+
+       DBG2("New registration for agent application: pid = %ld, domain = %s, socket fd = %d",
+                       (long) agent_app_id->pid,
+                       domain_type_str(agent_app_id->domain), new_sock->fd);
+
+       *agent_app_socket = new_sock;
+       new_sock = NULL;
+       ret = 0;
+       goto end;
+
+error_close_socket:
+       new_sock->ops->close(new_sock);
+       lttcomm_destroy_sock(new_sock);
+end:
+       return ret;
+}
+
+bool agent_tracing_is_enabled(void)
+{
+       int enabled;
+
+       enabled = uatomic_read(&agent_tracing_enabled);
+       LTTNG_ASSERT(enabled != -1);
+       return enabled == 1;
+}
+
+/*
+ * Write agent TCP port using the rundir.
+ */
+static int write_agent_port(uint16_t port)
+{
+       return utils_create_pid_file(
+                       (pid_t) port, the_config.agent_port_file_path.value);
+}
+
+static
+void mark_thread_as_ready(struct thread_notifiers *notifiers)
+{
+       DBG("Marking agent management thread as ready");
+       sem_post(&notifiers->ready);
+}
+
+static
+void wait_until_thread_is_ready(struct thread_notifiers *notifiers)
+{
+       DBG("Waiting for agent management thread to be ready");
+       sem_wait(&notifiers->ready);
+       DBG("Agent management thread is ready");
+}
+
+/*
+ * This thread manage application notify communication.
+ */
+static void *thread_agent_management(void *data)
+{
+       int i, ret, pollfd;
+       uint32_t revents, nb_fd;
+       struct lttng_poll_event events;
+       struct lttcomm_sock *reg_sock;
+       struct thread_notifiers *notifiers = (thread_notifiers *) data;
+       const int quit_pipe_read_fd = lttng_pipe_get_readfd(
+                       notifiers->quit_pipe);
+
+       DBG("Manage agent application registration.");
+
+       rcu_register_thread();
+       rcu_thread_online();
+
+       /* Agent initialization call MUST be called before starting the thread. */
+       LTTNG_ASSERT(the_agent_apps_ht_by_sock);
+
+       /* Create pollset with size 2, quit pipe and registration socket. */
+       ret = lttng_poll_create(&events, 2, LTTNG_CLOEXEC);
+       if (ret < 0) {
+               goto error_poll_create;
+       }
+
+       ret = lttng_poll_add(&events, quit_pipe_read_fd,
+                       LPOLLIN | LPOLLERR);
+       if (ret < 0) {
+               goto error_tcp_socket;
+       }
+
+       reg_sock = init_tcp_socket();
+       if (reg_sock) {
+               uint16_t port;
+
+               ret = lttcomm_sock_get_port(reg_sock, &port);
+               LTTNG_ASSERT(ret == 0);
+
+               ret = write_agent_port(port);
+               if (ret) {
+                       ERR("Failed to create agent port file: agent tracing will be unavailable");
+                       /* Don't prevent the launch of the sessiond on error. */
+                       mark_thread_as_ready(notifiers);
+                       goto error;
+               }
+       } else {
+               /* Don't prevent the launch of the sessiond on error. */
+               mark_thread_as_ready(notifiers);
+               goto error_tcp_socket;
+       }
+
+       /*
+        * Signal that the agent thread is ready. The command thread
+        * may start to query whether or not agent tracing is enabled.
+        */
+       uatomic_set(&agent_tracing_enabled, 1);
+       mark_thread_as_ready(notifiers);
+
+       /* Add TCP socket to the poll set. */
+       ret = lttng_poll_add(&events, reg_sock->fd,
+                       LPOLLIN | LPOLLERR | LPOLLHUP | LPOLLRDHUP);
+       if (ret < 0) {
+               goto error;
+       }
+
+       while (1) {
+               DBG3("Manage agent polling");
+
+               /* Inifinite blocking call, waiting for transmission */
+restart:
+               ret = lttng_poll_wait(&events, -1);
+               DBG3("Manage agent return from poll on %d fds",
+                               LTTNG_POLL_GETNB(&events));
+               if (ret < 0) {
+                       /*
+                        * Restart interrupted system call.
+                        */
+                       if (errno == EINTR) {
+                               goto restart;
+                       }
+                       goto error;
+               }
+               nb_fd = ret;
+               DBG3("%d fd ready", nb_fd);
+
+               for (i = 0; i < nb_fd; i++) {
+                       /* Fetch once the poll data */
+                       revents = LTTNG_POLL_GETEV(&events, i);
+                       pollfd = LTTNG_POLL_GETFD(&events, i);
+
+                       /* Thread quit pipe has been closed. Killing thread. */
+                       if (pollfd == quit_pipe_read_fd) {
+                               goto exit;
+                       }
+
+                       /* Activity on the registration socket. */
+                       if (revents & LPOLLIN) {
+                               struct agent_app_id new_app_id;
+                               struct agent_app *new_app = NULL;
+                               struct lttcomm_sock *new_app_socket;
+                               int new_app_socket_fd;
+
+                               LTTNG_ASSERT(pollfd == reg_sock->fd);
+
+                               ret = accept_agent_connection(
+                                       reg_sock, &new_app_id, &new_app_socket);
+                               if (ret < 0) {
+                                       /* Errors are already logged. */
+                                       continue;
+                               }
+
+                               /*
+                                * new_app_socket's ownership has been
+                                * transferred to the new agent app.
+                                */
+                               new_app = agent_create_app(new_app_id.pid,
+                                               new_app_id.domain,
+                                               new_app_socket);
+                               if (!new_app) {
+                                       new_app_socket->ops->close(
+                                                       new_app_socket);
+                                       continue;
+                               }
+                               new_app_socket_fd = new_app_socket->fd;
+                               new_app_socket = NULL;
+
+                               /*
+                                * Since this is a command socket (write then
+                                * read), only add poll error event to only
+                                * detect shutdown.
+                                */
+                               ret = lttng_poll_add(&events, new_app_socket_fd,
+                                               LPOLLERR | LPOLLHUP | LPOLLRDHUP);
+                               if (ret < 0) {
+                                       agent_destroy_app(new_app);
+                                       continue;
+                               }
+
+                               /*
+                                * Prevent sessions from being modified while
+                                * the agent application's configuration is
+                                * updated.
+                                */
+                               session_lock_list();
+
+                               /*
+                                * Update the newly registered applications's
+                                * configuration.
+                                */
+                               update_agent_app(new_app);
+
+                               ret = agent_send_registration_done(new_app);
+                               if (ret < 0) {
+                                       agent_destroy_app(new_app);
+                                       /* Removing from the poll set. */
+                                       ret = lttng_poll_del(&events,
+                                                       new_app_socket_fd);
+                                       if (ret < 0) {
+                                               session_unlock_list();
+                                               goto error;
+                                       }
+                                       continue;
+                               }
+
+                               /* Publish the new agent app. */
+                               agent_add_app(new_app);
+
+                               session_unlock_list();
+                       } else if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
+                               /* Removing from the poll set */
+                               ret = lttng_poll_del(&events, pollfd);
+                               if (ret < 0) {
+                                       goto error;
+                               }
+                               agent_destroy_app_by_sock(pollfd);
+                       } else {
+                               ERR("Unexpected poll events %u for sock %d", revents, pollfd);
+                               goto error;
+                       }
+               }
+       }
+
+exit:
+       /* Whatever happens, try to delete it and exit. */
+       (void) lttng_poll_del(&events, reg_sock->fd);
+error:
+       destroy_tcp_socket(reg_sock);
+error_tcp_socket:
+       lttng_poll_clean(&events);
+error_poll_create:
+       uatomic_set(&agent_tracing_enabled, 0);
+       DBG("Cleaning up and stopping.");
+       rcu_thread_offline();
+       rcu_unregister_thread();
+       return NULL;
+}
+
+static bool shutdown_agent_management_thread(void *data)
+{
+       struct thread_notifiers *notifiers = (thread_notifiers *) data;
+       const int write_fd = lttng_pipe_get_writefd(notifiers->quit_pipe);
+
+       return notify_thread_pipe(write_fd) == 1;
+}
+
+static void cleanup_agent_management_thread(void *data)
+{
+       struct thread_notifiers *notifiers = (thread_notifiers *) data;
+
+       lttng_pipe_destroy(notifiers->quit_pipe);
+       sem_destroy(&notifiers->ready);
+       free(notifiers);
+}
+
+bool launch_agent_management_thread(void)
+{
+       struct thread_notifiers *notifiers;
+       struct lttng_thread *thread;
+
+       notifiers = (thread_notifiers *) zmalloc(sizeof(*notifiers));
+       if (!notifiers) {
+               goto error_alloc;
+       }
+
+       sem_init(&notifiers->ready, 0, 0);
+       notifiers->quit_pipe = lttng_pipe_open(FD_CLOEXEC);
+       if (!notifiers->quit_pipe) {
+               goto error;
+       }
+       thread = lttng_thread_create("Agent management",
+                       thread_agent_management,
+                       shutdown_agent_management_thread,
+                       cleanup_agent_management_thread,
+                       notifiers);
+       if (!thread) {
+               goto error;
+       }
+       wait_until_thread_is_ready(notifiers);
+       lttng_thread_put(thread);
+       return true;
+error:
+       cleanup_agent_management_thread(notifiers);
+error_alloc:
+       return false;
+}
diff --git a/src/bin/lttng-sessiond/agent.c b/src/bin/lttng-sessiond/agent.c
deleted file mode 100644 (file)
index 77846b4..0000000
+++ /dev/null
@@ -1,1624 +0,0 @@
-/*
- * Copyright (C) 2013 David Goulet <dgoulet@efficios.com>
- * Copyright (C) 2016 Jérémie Galarneau <jeremie.galarneau@efficios.com>
- *
- * SPDX-License-Identifier: GPL-2.0-only
- *
- */
-
-#define _LGPL_SOURCE
-#include <urcu/uatomic.h>
-#include <urcu/rculist.h>
-
-#include <lttng/event-rule/event-rule.h>
-#include <lttng/event-rule/event-rule-internal.h>
-#include <lttng/event-rule/jul-logging.h>
-#include <lttng/event-rule/log4j-logging.h>
-#include <lttng/event-rule/python-logging.h>
-#include <lttng/condition/condition.h>
-#include <lttng/condition/event-rule-matches.h>
-#include <lttng/domain-internal.h>
-#include <lttng/log-level-rule-internal.h>
-
-#include <common/common.h>
-#include <common/sessiond-comm/agent.h>
-
-#include <common/compat/endian.h>
-
-#include "agent.h"
-#include "ust-app.h"
-#include "utils.h"
-#include "common/error.h"
-
-#define AGENT_RET_CODE_INDEX(code) (code - AGENT_RET_CODE_SUCCESS)
-
-typedef enum lttng_event_rule_status (*event_rule_logging_get_name_pattern)(
-               const struct lttng_event_rule *rule, const char **pattern);
-typedef enum lttng_event_rule_status (*event_rule_logging_get_log_level_rule)(
-               const struct lttng_event_rule *rule,
-               const struct lttng_log_level_rule **log_level_rule);
-
-/*
- * Agent application context representation.
- */
-struct agent_app_ctx {
-       char *provider_name;
-       char *ctx_name;
-
-       /* agent_app_ctx are part of the agent app_ctx_list. */
-       struct cds_list_head list_node;
-
-       /* For call_rcu teardown. */
-       struct rcu_head rcu_node;
-};
-
-/*
- * Human readable agent return code.
- */
-static const char *error_string_array[] = {
-       [ AGENT_RET_CODE_INDEX(AGENT_RET_CODE_SUCCESS) ] = "Success",
-       [ AGENT_RET_CODE_INDEX(AGENT_RET_CODE_INVALID) ] = "Invalid command",
-       [ AGENT_RET_CODE_INDEX(AGENT_RET_CODE_UNKNOWN_NAME) ] = "Unknown logger name",
-
-       /* Last element */
-       [ AGENT_RET_CODE_INDEX(AGENT_RET_CODE_NR) ] = "Unknown code",
-};
-
-static
-void log_reply_code(uint32_t in_reply_ret_code)
-{
-       int level = PRINT_DBG3;
-       /*
-        * reply_ret_code and in_reply_ret_code are kept separate to have a
-        * sanitized value (used to retrieve the human readable string) and the
-        * original value which is logged as-is.
-        */
-       uint32_t reply_ret_code = in_reply_ret_code;
-
-       if (reply_ret_code < AGENT_RET_CODE_SUCCESS ||
-                       reply_ret_code >= AGENT_RET_CODE_NR) {
-               reply_ret_code = AGENT_RET_CODE_NR;
-               level = PRINT_ERR;
-       }
-
-       LOG(level, "Agent replied with retcode: %s (%"PRIu32")",
-                       error_string_array[AGENT_RET_CODE_INDEX(
-                       reply_ret_code)],
-                       in_reply_ret_code);
-}
-
-/*
- * Match function for the events hash table lookup by name.
- */
-static int ht_match_event_by_name(struct cds_lfht_node *node,
-               const void *_key)
-{
-       struct agent_event *event;
-       const struct agent_ht_key *key;
-
-       LTTNG_ASSERT(node);
-       LTTNG_ASSERT(_key);
-
-       event = caa_container_of(node, struct agent_event, node.node);
-       key = _key;
-
-       /* Match 1 elements of the key: name. */
-
-       /* Event name */
-       if (strncmp(event->name, key->name, sizeof(event->name)) != 0) {
-               goto no_match;
-       }
-       /* Match. */
-       return 1;
-
-no_match:
-       return 0;
-}
-
-/*
- * Match function for the events hash table lookup by name, log level and
- * filter expression.
- */
-static int ht_match_event(struct cds_lfht_node *node,
-               const void *_key)
-{
-       struct agent_event *event;
-       const struct agent_ht_key *key;
-       int ll_match;
-
-       LTTNG_ASSERT(node);
-       LTTNG_ASSERT(_key);
-
-       event = caa_container_of(node, struct agent_event, node.node);
-       key = _key;
-
-       /* Match 2 elements of the key: name and loglevel. */
-
-       /* Event name */
-       if (strncmp(event->name, key->name, sizeof(event->name)) != 0) {
-               goto no_match;
-       }
-
-       /* Event loglevel value and type. */
-       ll_match = loglevels_match(event->loglevel_type,
-               event->loglevel_value, key->loglevel_type,
-               key->loglevel_value, LTTNG_EVENT_LOGLEVEL_ALL);
-
-       if (!ll_match) {
-               goto no_match;
-       }
-
-       /* Filter expression */
-       if (!!event->filter_expression != !!key->filter_expression) {
-               /* One has a filter expression, the other does not */
-               goto no_match;
-       }
-
-       if (event->filter_expression) {
-               if (strncmp(event->filter_expression, key->filter_expression,
-                               strlen(event->filter_expression)) != 0) {
-                       goto no_match;
-               }
-       }
-
-       return 1;
-
-no_match:
-       return 0;
-}
-
-/*
- * Add unique agent event based on the event name and loglevel.
- */
-static void add_unique_agent_event(struct lttng_ht *ht,
-               struct agent_event *event)
-{
-       struct cds_lfht_node *node_ptr;
-       struct agent_ht_key key;
-
-       LTTNG_ASSERT(ht);
-       LTTNG_ASSERT(ht->ht);
-       LTTNG_ASSERT(event);
-
-       key.name = event->name;
-       key.loglevel_value = event->loglevel_value;
-       key.loglevel_type = event->loglevel_type;
-       key.filter_expression = event->filter_expression;
-
-       node_ptr = cds_lfht_add_unique(ht->ht,
-                       ht->hash_fct(event->node.key, lttng_ht_seed),
-                       ht_match_event, &key, &event->node.node);
-       LTTNG_ASSERT(node_ptr == &event->node.node);
-}
-
-/*
- * URCU delayed agent event reclaim.
- */
-static void destroy_event_agent_rcu(struct rcu_head *head)
-{
-       struct lttng_ht_node_str *node =
-               caa_container_of(head, struct lttng_ht_node_str, head);
-       struct agent_event *event =
-               caa_container_of(node, struct agent_event, node);
-
-       agent_destroy_event(event);
-}
-
-/*
- * URCU delayed agent app reclaim.
- */
-static void destroy_app_agent_rcu(struct rcu_head *head)
-{
-       struct lttng_ht_node_ulong *node =
-               caa_container_of(head, struct lttng_ht_node_ulong, head);
-       struct agent_app *app =
-               caa_container_of(node, struct agent_app, node);
-
-       free(app);
-}
-
-/*
- * Communication with the agent. Send the message header to the given socket in
- * big endian.
- *
- * Return 0 on success or else a negative errno message of sendmsg() op.
- */
-static int send_header(struct lttcomm_sock *sock, uint64_t data_size,
-               uint32_t cmd, uint32_t cmd_version)
-{
-       int ret;
-       ssize_t size;
-       struct lttcomm_agent_hdr msg;
-
-       LTTNG_ASSERT(sock);
-
-       memset(&msg, 0, sizeof(msg));
-       msg.data_size = htobe64(data_size);
-       msg.cmd = htobe32(cmd);
-       msg.cmd_version = htobe32(cmd_version);
-
-       size = sock->ops->sendmsg(sock, &msg, sizeof(msg), 0);
-       if (size < sizeof(msg)) {
-               ret = -errno;
-               goto error;
-       }
-       ret = 0;
-
-error:
-       return ret;
-}
-
-/*
- * Communication call with the agent. Send the payload to the given socket. The
- * header MUST be sent prior to this call.
- *
- * Return 0 on success or else a negative errno value of sendmsg() op.
- */
-static int send_payload(struct lttcomm_sock *sock, const void *data,
-               size_t size)
-{
-       int ret;
-       ssize_t len;
-
-       LTTNG_ASSERT(sock);
-       LTTNG_ASSERT(data);
-
-       len = sock->ops->sendmsg(sock, data, size, 0);
-       if (len < size) {
-               ret = -errno;
-               goto error;
-       }
-       ret = 0;
-
-error:
-       return ret;
-}
-
-/*
- * Communication call with the agent. Receive reply from the agent using the
- * given socket.
- *
- * Return 0 on success or else a negative errno value from recvmsg() op.
- */
-static int recv_reply(struct lttcomm_sock *sock, void *buf, size_t size)
-{
-       int ret;
-       ssize_t len;
-
-       LTTNG_ASSERT(sock);
-       LTTNG_ASSERT(buf);
-
-       len = sock->ops->recvmsg(sock, buf, size, 0);
-       if (len < size) {
-               ret = -errno;
-               goto error;
-       }
-       ret = 0;
-
-error:
-       return ret;
-}
-
-/*
- * Internal event listing for a given app. Populate events.
- *
- * Return number of element in the list or else a negative LTTNG_ERR* code.
- * On success, the caller is responsible for freeing the memory
- * allocated for "events".
- */
-static ssize_t list_events(struct agent_app *app, struct lttng_event **events)
-{
-       int ret, i, len = 0, offset = 0;
-       uint32_t nb_event;
-       size_t data_size;
-       uint32_t reply_ret_code;
-       struct lttng_event *tmp_events = NULL;
-       struct lttcomm_agent_list_reply *reply = NULL;
-       struct lttcomm_agent_list_reply_hdr reply_hdr;
-
-       LTTNG_ASSERT(app);
-       LTTNG_ASSERT(app->sock);
-       LTTNG_ASSERT(events);
-
-       DBG2("Agent listing events for app pid: %d and socket %d", app->pid,
-                       app->sock->fd);
-
-       ret = send_header(app->sock, 0, AGENT_CMD_LIST, 0);
-       if (ret < 0) {
-               goto error_io;
-       }
-
-       /* Get list header so we know how much we'll receive. */
-       ret = recv_reply(app->sock, &reply_hdr, sizeof(reply_hdr));
-       if (ret < 0) {
-               goto error_io;
-       }
-
-       reply_ret_code = be32toh(reply_hdr.ret_code);
-       log_reply_code(reply_ret_code);
-       switch (reply_ret_code) {
-       case AGENT_RET_CODE_SUCCESS:
-               data_size = be32toh(reply_hdr.data_size) + sizeof(*reply);
-               break;
-       default:
-               ret = LTTNG_ERR_UNK;
-               goto error;
-       }
-
-       reply = zmalloc(data_size);
-       if (!reply) {
-               ret = LTTNG_ERR_NOMEM;
-               goto error;
-       }
-
-       /* Get the list with the appropriate data size. */
-       ret = recv_reply(app->sock, reply, data_size);
-       if (ret < 0) {
-               goto error_io;
-       }
-
-       nb_event = be32toh(reply->nb_event);
-       tmp_events = zmalloc(sizeof(*tmp_events) * nb_event);
-       if (!tmp_events) {
-               ret = LTTNG_ERR_NOMEM;
-               goto error;
-       }
-
-       for (i = 0; i < nb_event; i++) {
-               offset += len;
-               if (lttng_strncpy(tmp_events[i].name, reply->payload + offset,
-                               sizeof(tmp_events[i].name))) {
-                       ret = LTTNG_ERR_INVALID;
-                       goto error;
-               }
-               tmp_events[i].pid = app->pid;
-               tmp_events[i].enabled = -1;
-               len = strlen(reply->payload + offset) + 1;
-       }
-
-       *events = tmp_events;
-
-       free(reply);
-       return nb_event;
-
-error_io:
-       ret = LTTNG_ERR_UST_LIST_FAIL;
-error:
-       free(reply);
-       free(tmp_events);
-       return -ret;
-
-}
-
-/*
- * Internal enable agent event on a agent application. This function
- * communicates with the agent to enable a given event.
- *
- * Return LTTNG_OK on success or else a LTTNG_ERR* code.
- */
-static int enable_event(const struct agent_app *app, struct agent_event *event)
-{
-       int ret;
-       char *bytes_to_send;
-       uint64_t data_size;
-       size_t filter_expression_length;
-       uint32_t reply_ret_code;
-       struct lttcomm_agent_enable_event msg;
-       struct lttcomm_agent_generic_reply reply;
-
-       LTTNG_ASSERT(app);
-       LTTNG_ASSERT(app->sock);
-       LTTNG_ASSERT(event);
-
-       DBG2("Agent enabling event %s for app pid: %d and socket %d", event->name,
-                       app->pid, app->sock->fd);
-
-       /*
-        * Calculate the payload's size, which is the fixed-size struct followed
-        * by the variable-length filter expression (+1 for the ending \0).
-        */
-       if (!event->filter_expression) {
-               filter_expression_length = 0;
-       } else {
-               filter_expression_length = strlen(event->filter_expression) + 1;
-       }
-       data_size = sizeof(msg) + filter_expression_length;
-
-       memset(&msg, 0, sizeof(msg));
-       msg.loglevel_value = htobe32(event->loglevel_value);
-       msg.loglevel_type = htobe32(event->loglevel_type);
-       if (lttng_strncpy(msg.name, event->name, sizeof(msg.name))) {
-               ret = LTTNG_ERR_INVALID;
-               goto error;
-       }
-       msg.filter_expression_length = htobe32(filter_expression_length);
-
-       ret = send_header(app->sock, data_size, AGENT_CMD_ENABLE, 0);
-       if (ret < 0) {
-               goto error_io;
-       }
-
-       bytes_to_send = zmalloc(data_size);
-       if (!bytes_to_send) {
-               ret = LTTNG_ERR_NOMEM;
-               goto error;
-       }
-
-       memcpy(bytes_to_send, &msg, sizeof(msg));
-       if (filter_expression_length > 0) {
-               memcpy(bytes_to_send + sizeof(msg), event->filter_expression,
-                               filter_expression_length);
-       }
-
-       ret = send_payload(app->sock, bytes_to_send, data_size);
-       free(bytes_to_send);
-       if (ret < 0) {
-               goto error_io;
-       }
-
-       ret = recv_reply(app->sock, &reply, sizeof(reply));
-       if (ret < 0) {
-               goto error_io;
-       }
-
-       reply_ret_code = be32toh(reply.ret_code);
-       log_reply_code(reply_ret_code);
-       switch (reply_ret_code) {
-       case AGENT_RET_CODE_SUCCESS:
-               break;
-       case AGENT_RET_CODE_UNKNOWN_NAME:
-               ret = LTTNG_ERR_UST_EVENT_NOT_FOUND;
-               goto error;
-       default:
-               ret = LTTNG_ERR_UNK;
-               goto error;
-       }
-
-       return LTTNG_OK;
-
-error_io:
-       ret = LTTNG_ERR_UST_ENABLE_FAIL;
-error:
-       return ret;
-}
-
-/*
- * Send Pascal-style string. Size is sent as a 32-bit big endian integer.
- */
-static
-int send_pstring(struct lttcomm_sock *sock, const char *str, uint32_t len)
-{
-       int ret;
-       uint32_t len_be;
-
-       len_be = htobe32(len);
-       ret = send_payload(sock, &len_be, sizeof(len_be));
-       if (ret) {
-               goto end;
-       }
-
-       ret = send_payload(sock, str, len);
-       if (ret) {
-               goto end;
-       }
-end:
-       return ret;
-}
-
-/*
- * Internal enable application context on an agent application. This function
- * communicates with the agent to enable a given application context.
- *
- * Return LTTNG_OK on success or else a LTTNG_ERR* code.
- */
-static int app_context_op(const struct agent_app *app,
-               const struct agent_app_ctx *ctx, enum lttcomm_agent_command cmd)
-{
-       int ret;
-       uint32_t reply_ret_code;
-       struct lttcomm_agent_generic_reply reply;
-       size_t app_ctx_provider_name_len, app_ctx_name_len, data_size;
-
-       LTTNG_ASSERT(app);
-       LTTNG_ASSERT(app->sock);
-       LTTNG_ASSERT(ctx);
-       LTTNG_ASSERT(cmd == AGENT_CMD_APP_CTX_ENABLE ||
-                       cmd == AGENT_CMD_APP_CTX_DISABLE);
-
-       DBG2("Agent %s application %s:%s for app pid: %d and socket %d",
-                       cmd == AGENT_CMD_APP_CTX_ENABLE ? "enabling" : "disabling",
-                       ctx->provider_name, ctx->ctx_name,
-                       app->pid, app->sock->fd);
-
-       /*
-        * Calculate the payload's size, which consists of the size (u32, BE)
-        * of the provider name, the NULL-terminated provider name string, the
-        * size (u32, BE) of the context name, followed by the NULL-terminated
-        * context name string.
-        */
-       app_ctx_provider_name_len = strlen(ctx->provider_name) + 1;
-       app_ctx_name_len = strlen(ctx->ctx_name) + 1;
-       data_size = sizeof(uint32_t) + app_ctx_provider_name_len +
-                       sizeof(uint32_t) + app_ctx_name_len;
-
-       ret = send_header(app->sock, data_size, cmd, 0);
-       if (ret < 0) {
-               goto error_io;
-       }
-
-       if (app_ctx_provider_name_len > UINT32_MAX ||
-                       app_ctx_name_len > UINT32_MAX) {
-               ERR("Application context name > MAX_UINT32");
-               ret = LTTNG_ERR_INVALID;
-               goto error;
-       }
-
-       ret = send_pstring(app->sock, ctx->provider_name,
-                       (uint32_t) app_ctx_provider_name_len);
-       if (ret < 0) {
-               goto error_io;
-       }
-
-       ret = send_pstring(app->sock, ctx->ctx_name,
-                       (uint32_t) app_ctx_name_len);
-       if (ret < 0) {
-               goto error_io;
-       }
-
-       ret = recv_reply(app->sock, &reply, sizeof(reply));
-       if (ret < 0) {
-               goto error_io;
-       }
-
-       reply_ret_code = be32toh(reply.ret_code);
-       log_reply_code(reply_ret_code);
-       switch (reply_ret_code) {
-       case AGENT_RET_CODE_SUCCESS:
-               break;
-       default:
-               ret = LTTNG_ERR_UNK;
-               goto error;
-       }
-
-       return LTTNG_OK;
-
-error_io:
-       ret = LTTNG_ERR_UST_ENABLE_FAIL;
-error:
-       return ret;
-}
-
-/*
- * Internal disable agent event call on a agent application. This function
- * communicates with the agent to disable a given event.
- *
- * Return LTTNG_OK on success or else a LTTNG_ERR* code.
- */
-static int disable_event(struct agent_app *app, struct agent_event *event)
-{
-       int ret;
-       uint64_t data_size;
-       uint32_t reply_ret_code;
-       struct lttcomm_agent_disable_event msg;
-       struct lttcomm_agent_generic_reply reply;
-
-       LTTNG_ASSERT(app);
-       LTTNG_ASSERT(app->sock);
-       LTTNG_ASSERT(event);
-
-       DBG2("Agent disabling event %s for app pid: %d and socket %d", event->name,
-                       app->pid, app->sock->fd);
-
-       data_size = sizeof(msg);
-       memset(&msg, 0, sizeof(msg));
-       if (lttng_strncpy(msg.name, event->name, sizeof(msg.name))) {
-               ret = LTTNG_ERR_INVALID;
-               goto error;
-       }
-
-       ret = send_header(app->sock, data_size, AGENT_CMD_DISABLE, 0);
-       if (ret < 0) {
-               goto error_io;
-       }
-
-       ret = send_payload(app->sock, &msg, sizeof(msg));
-       if (ret < 0) {
-               goto error_io;
-       }
-
-       ret = recv_reply(app->sock, &reply, sizeof(reply));
-       if (ret < 0) {
-               goto error_io;
-       }
-
-       reply_ret_code = be32toh(reply.ret_code);
-       log_reply_code(reply_ret_code);
-       switch (reply_ret_code) {
-       case AGENT_RET_CODE_SUCCESS:
-               break;
-       case AGENT_RET_CODE_UNKNOWN_NAME:
-               ret = LTTNG_ERR_UST_EVENT_NOT_FOUND;
-               goto error;
-       default:
-               ret = LTTNG_ERR_UNK;
-               goto error;
-       }
-
-       return LTTNG_OK;
-
-error_io:
-       ret = LTTNG_ERR_UST_DISABLE_FAIL;
-error:
-       return ret;
-}
-
-/*
- * Send back the registration DONE command to a given agent application.
- *
- * Return 0 on success or else a negative value.
- */
-int agent_send_registration_done(struct agent_app *app)
-{
-       LTTNG_ASSERT(app);
-       LTTNG_ASSERT(app->sock);
-
-       DBG("Agent sending registration done to app socket %d", app->sock->fd);
-
-       return send_header(app->sock, 0, AGENT_CMD_REG_DONE, 0);
-}
-
-/*
- * Enable agent event on every agent applications registered with the session
- * daemon.
- *
- * Return LTTNG_OK on success or else a LTTNG_ERR* code.
- */
-int agent_enable_event(struct agent_event *event,
-               enum lttng_domain_type domain)
-{
-       int ret;
-       struct agent_app *app;
-       struct lttng_ht_iter iter;
-
-       LTTNG_ASSERT(event);
-
-       rcu_read_lock();
-
-       cds_lfht_for_each_entry(the_agent_apps_ht_by_sock->ht, &iter.iter, app,
-                       node.node) {
-               if (app->domain != domain) {
-                       continue;
-               }
-
-               /* Enable event on agent application through TCP socket. */
-               ret = enable_event(app, event);
-               if (ret != LTTNG_OK) {
-                       goto error;
-               }
-       }
-
-       event->enabled_count++;
-       ret = LTTNG_OK;
-
-error:
-       rcu_read_unlock();
-       return ret;
-}
-
-static
-void destroy_app_ctx(struct agent_app_ctx *ctx)
-{
-       free(ctx->provider_name);
-       free(ctx->ctx_name);
-       free(ctx);
-}
-
-static
-struct agent_app_ctx *create_app_ctx(const struct lttng_event_context *ctx)
-{
-       struct agent_app_ctx *agent_ctx = NULL;
-
-       if (!ctx) {
-               goto end;
-       }
-
-       LTTNG_ASSERT(ctx->ctx == LTTNG_EVENT_CONTEXT_APP_CONTEXT);
-       agent_ctx = zmalloc(sizeof(*ctx));
-       if (!agent_ctx) {
-               goto end;
-       }
-
-       agent_ctx->provider_name = strdup(ctx->u.app_ctx.provider_name);
-       agent_ctx->ctx_name = strdup(ctx->u.app_ctx.ctx_name);
-       if (!agent_ctx->provider_name || !agent_ctx->ctx_name) {
-               destroy_app_ctx(agent_ctx);
-               agent_ctx = NULL;
-       }
-end:
-       return agent_ctx;
-}
-
-/*
- * Enable agent context on every agent applications registered with the session
- * daemon.
- *
- * Return LTTNG_OK on success or else a LTTNG_ERR* code.
- */
-int agent_enable_context(const struct lttng_event_context *ctx,
-               enum lttng_domain_type domain)
-{
-       int ret;
-       struct agent_app *app;
-       struct lttng_ht_iter iter;
-
-       LTTNG_ASSERT(ctx);
-       if (ctx->ctx != LTTNG_EVENT_CONTEXT_APP_CONTEXT) {
-               ret = LTTNG_ERR_INVALID;
-               goto error;
-       }
-
-       rcu_read_lock();
-
-       cds_lfht_for_each_entry(the_agent_apps_ht_by_sock->ht, &iter.iter, app,
-                       node.node) {
-               struct agent_app_ctx *agent_ctx;
-
-               if (app->domain != domain) {
-                       continue;
-               }
-
-               agent_ctx = create_app_ctx(ctx);
-               if (!agent_ctx) {
-                       ret = LTTNG_ERR_NOMEM;
-                       goto error_unlock;
-               }
-
-               /* Enable event on agent application through TCP socket. */
-               ret = app_context_op(app, agent_ctx, AGENT_CMD_APP_CTX_ENABLE);
-               destroy_app_ctx(agent_ctx);
-               if (ret != LTTNG_OK) {
-                       goto error_unlock;
-               }
-       }
-
-       ret = LTTNG_OK;
-
-error_unlock:
-       rcu_read_unlock();
-error:
-       return ret;
-}
-
-/*
- * Disable agent event on every agent application registered with the session
- * daemon.
- *
- * Return LTTNG_OK on success or else a LTTNG_ERR* code.
- */
-int agent_disable_event(struct agent_event *event,
-               enum lttng_domain_type domain)
-{
-       int ret = LTTNG_OK;
-       struct agent_app *app;
-       struct lttng_ht_iter iter;
-
-       LTTNG_ASSERT(event);
-       if (!AGENT_EVENT_IS_ENABLED(event)) {
-               goto end;
-       }
-
-       if (--event->enabled_count != 0) {
-               /*
-                * Agent event still enabled. Disable the agent event only when
-                * all "users" have disabled it (event notifiers, event rules,
-                * etc.).
-                */
-               ret = LTTNG_OK;
-               goto end;
-       }
-
-       rcu_read_lock();
-
-       cds_lfht_for_each_entry(the_agent_apps_ht_by_sock->ht, &iter.iter, app,
-                       node.node) {
-               if (app->domain != domain) {
-                       continue;
-               }
-
-               /* Enable event on agent application through TCP socket. */
-               ret = disable_event(app, event);
-               if (ret != LTTNG_OK) {
-                       goto error;
-               }
-       }
-
-       /* event->enabled_count is now 0. */
-       LTTNG_ASSERT(!AGENT_EVENT_IS_ENABLED(event));
-
-error:
-       rcu_read_unlock();
-end:
-       return ret;
-}
-
-/*
- * Disable agent context on every agent application registered with the session
- * daemon.
- *
- * Return LTTNG_OK on success or else a LTTNG_ERR* code.
- */
-static int disable_context(struct agent_app_ctx *ctx,
-               enum lttng_domain_type domain)
-{
-       int ret = LTTNG_OK;
-       struct agent_app *app;
-       struct lttng_ht_iter iter;
-
-       LTTNG_ASSERT(ctx);
-
-       rcu_read_lock();
-       DBG2("Disabling agent application context %s:%s",
-                       ctx->provider_name, ctx->ctx_name);
-       cds_lfht_for_each_entry(the_agent_apps_ht_by_sock->ht, &iter.iter, app,
-                       node.node) {
-               if (app->domain != domain) {
-                       continue;
-               }
-
-               ret = app_context_op(app, ctx, AGENT_CMD_APP_CTX_DISABLE);
-               if (ret != LTTNG_OK) {
-                       goto end;
-               }
-       }
-end:
-       rcu_read_unlock();
-       return ret;
-}
-
-/*
- * Ask every agent for the list of possible event. Events is allocated with the
- * events of every agent application.
- *
- * Return the number of events or else a negative value.
- */
-int agent_list_events(struct lttng_event **events,
-               enum lttng_domain_type domain)
-{
-       int ret;
-       size_t nbmem, count = 0;
-       struct agent_app *app;
-       struct lttng_event *tmp_events = NULL;
-       struct lttng_ht_iter iter;
-
-       LTTNG_ASSERT(events);
-
-       DBG2("Agent listing events for domain %d", domain);
-
-       nbmem = UST_APP_EVENT_LIST_SIZE;
-       tmp_events = zmalloc(nbmem * sizeof(*tmp_events));
-       if (!tmp_events) {
-               PERROR("zmalloc agent list events");
-               ret = -ENOMEM;
-               goto error;
-       }
-
-       rcu_read_lock();
-       cds_lfht_for_each_entry(the_agent_apps_ht_by_sock->ht, &iter.iter, app,
-                       node.node) {
-               ssize_t nb_ev;
-               struct lttng_event *agent_events;
-
-               /* Skip domain not asked by the list. */
-               if (app->domain != domain) {
-                       continue;
-               }
-
-               nb_ev = list_events(app, &agent_events);
-               if (nb_ev < 0) {
-                       ret = nb_ev;
-                       goto error_unlock;
-               }
-
-               if (count + nb_ev > nbmem) {
-                       /* In case the realloc fails, we free the memory */
-                       struct lttng_event *new_tmp_events;
-                       size_t new_nbmem;
-
-                       new_nbmem = max_t(size_t, count + nb_ev, nbmem << 1);
-                       DBG2("Reallocating agent event list from %zu to %zu entries",
-                                       nbmem, new_nbmem);
-                       new_tmp_events = realloc(tmp_events,
-                               new_nbmem * sizeof(*new_tmp_events));
-                       if (!new_tmp_events) {
-                               PERROR("realloc agent events");
-                               ret = -ENOMEM;
-                               free(agent_events);
-                               goto error_unlock;
-                       }
-                       /* Zero the new memory */
-                       memset(new_tmp_events + nbmem, 0,
-                               (new_nbmem - nbmem) * sizeof(*new_tmp_events));
-                       nbmem = new_nbmem;
-                       tmp_events = new_tmp_events;
-               }
-               memcpy(tmp_events + count, agent_events,
-                       nb_ev * sizeof(*tmp_events));
-               free(agent_events);
-               count += nb_ev;
-       }
-       rcu_read_unlock();
-
-       ret = count;
-       *events = tmp_events;
-       return ret;
-
-error_unlock:
-       rcu_read_unlock();
-error:
-       free(tmp_events);
-       return ret;
-}
-
-/*
- * Create a agent app object using the given PID.
- *
- * Return newly allocated object or else NULL on error.
- */
-struct agent_app *agent_create_app(pid_t pid, enum lttng_domain_type domain,
-               struct lttcomm_sock *sock)
-{
-       struct agent_app *app;
-
-       LTTNG_ASSERT(sock);
-
-       app = zmalloc(sizeof(*app));
-       if (!app) {
-               PERROR("Failed to allocate agent application instance");
-               goto error;
-       }
-
-       app->pid = pid;
-       app->domain = domain;
-       app->sock = sock;
-       lttng_ht_node_init_ulong(&app->node, (unsigned long) app->sock->fd);
-
-error:
-       return app;
-}
-
-/*
- * Lookup agent app by socket in the global hash table.
- *
- * RCU read side lock MUST be acquired.
- *
- * Return object if found else NULL.
- */
-struct agent_app *agent_find_app_by_sock(int sock)
-{
-       struct lttng_ht_node_ulong *node;
-       struct lttng_ht_iter iter;
-       struct agent_app *app;
-
-       LTTNG_ASSERT(sock >= 0);
-
-       lttng_ht_lookup(the_agent_apps_ht_by_sock,
-                       (void *) ((unsigned long) sock), &iter);
-       node = lttng_ht_iter_get_node_ulong(&iter);
-       if (node == NULL) {
-               goto error;
-       }
-       app = caa_container_of(node, struct agent_app, node);
-
-       DBG3("Agent app pid %d found by sock %d.", app->pid, sock);
-       return app;
-
-error:
-       DBG3("Agent app NOT found by sock %d.", sock);
-       return NULL;
-}
-
-/*
- * Add agent application object to the global hash table.
- */
-void agent_add_app(struct agent_app *app)
-{
-       LTTNG_ASSERT(app);
-
-       DBG3("Agent adding app sock: %d and pid: %d to ht", app->sock->fd, app->pid);
-       lttng_ht_add_unique_ulong(the_agent_apps_ht_by_sock, &app->node);
-}
-
-/*
- * Delete agent application from the global hash table.
- *
- * rcu_read_lock() must be held by the caller.
- */
-void agent_delete_app(struct agent_app *app)
-{
-       int ret;
-       struct lttng_ht_iter iter;
-
-       LTTNG_ASSERT(app);
-
-       DBG3("Agent deleting app pid: %d and sock: %d", app->pid, app->sock->fd);
-
-       iter.iter.node = &app->node.node;
-       ret = lttng_ht_del(the_agent_apps_ht_by_sock, &iter);
-       LTTNG_ASSERT(!ret);
-}
-
-/*
- * Destroy an agent application object by detaching it from its corresponding
- * UST app if one is connected by closing the socket. Finally, perform a
- * delayed memory reclaim.
- */
-void agent_destroy_app(struct agent_app *app)
-{
-       LTTNG_ASSERT(app);
-
-       if (app->sock) {
-               app->sock->ops->close(app->sock);
-               lttcomm_destroy_sock(app->sock);
-       }
-
-       call_rcu(&app->node.head, destroy_app_agent_rcu);
-}
-
-/*
- * Initialize an already allocated agent object.
- *
- * Return 0 on success or else a negative errno value.
- */
-int agent_init(struct agent *agt)
-{
-       int ret;
-
-       LTTNG_ASSERT(agt);
-
-       agt->events = lttng_ht_new(0, LTTNG_HT_TYPE_STRING);
-       if (!agt->events) {
-               ret = -ENOMEM;
-               goto error;
-       }
-       lttng_ht_node_init_u64(&agt->node, agt->domain);
-
-       CDS_INIT_LIST_HEAD(&agt->app_ctx_list);
-       return 0;
-
-error:
-       return ret;
-}
-
-/*
- * Add agent object to the given hash table.
- */
-void agent_add(struct agent *agt, struct lttng_ht *ht)
-{
-       LTTNG_ASSERT(agt);
-       LTTNG_ASSERT(ht);
-
-       DBG3("Agent adding from domain %d", agt->domain);
-
-       lttng_ht_add_unique_u64(ht, &agt->node);
-}
-
-/*
- * Create an agent object for the given domain.
- *
- * Return the allocated agent or NULL on error.
- */
-struct agent *agent_create(enum lttng_domain_type domain)
-{
-       int ret;
-       struct agent *agt;
-
-       agt = zmalloc(sizeof(struct agent));
-       if (!agt) {
-               goto error;
-       }
-       agt->domain = domain;
-
-       ret = agent_init(agt);
-       if (ret < 0) {
-               free(agt);
-               agt = NULL;
-               goto error;
-       }
-
-error:
-       return agt;
-}
-
-/*
- * Create a newly allocated agent event data structure.
- * Ownership of filter_expression is taken.
- *
- * Return a new object else NULL on error.
- */
-struct agent_event *agent_create_event(const char *name,
-               enum lttng_loglevel_type loglevel_type, int loglevel_value,
-               struct lttng_bytecode *filter, char *filter_expression)
-{
-       struct agent_event *event = NULL;
-
-       DBG3("Agent create new event with name %s, loglevel type %d, \
-                       loglevel value %d and filter %s",
-                       name, loglevel_type, loglevel_value,
-                       filter_expression ? filter_expression : "NULL");
-
-       if (!name) {
-               ERR("Failed to create agent event; no name provided.");
-               goto error;
-       }
-
-       event = zmalloc(sizeof(*event));
-       if (!event) {
-               goto error;
-       }
-
-       strncpy(event->name, name, sizeof(event->name));
-       event->name[sizeof(event->name) - 1] = '\0';
-       lttng_ht_node_init_str(&event->node, event->name);
-
-       event->loglevel_value = loglevel_value;
-       event->loglevel_type = loglevel_type;
-       event->filter = filter;
-       event->filter_expression = filter_expression;
-error:
-       return event;
-}
-
-/*
- * Unique add of a agent event to an agent object.
- */
-void agent_add_event(struct agent_event *event, struct agent *agt)
-{
-       LTTNG_ASSERT(event);
-       LTTNG_ASSERT(agt);
-       LTTNG_ASSERT(agt->events);
-
-       DBG3("Agent adding event %s", event->name);
-       add_unique_agent_event(agt->events, event);
-       agt->being_used = 1;
-}
-
-/*
- * Unique add of a agent context to an agent object.
- */
-int agent_add_context(const struct lttng_event_context *ctx, struct agent *agt)
-{
-       int ret = LTTNG_OK;
-       struct agent_app_ctx *agent_ctx = NULL;
-
-       LTTNG_ASSERT(ctx);
-       LTTNG_ASSERT(agt);
-       LTTNG_ASSERT(agt->events);
-       LTTNG_ASSERT(ctx->ctx == LTTNG_EVENT_CONTEXT_APP_CONTEXT);
-
-       agent_ctx = create_app_ctx(ctx);
-       if (!agent_ctx) {
-               ret = LTTNG_ERR_NOMEM;
-               goto end;
-       }
-
-       DBG3("Agent adding context %s:%s", ctx->u.app_ctx.provider_name,
-                       ctx->u.app_ctx.ctx_name);
-       cds_list_add_tail_rcu(&agent_ctx->list_node, &agt->app_ctx_list);
-end:
-       return ret;
-}
-
-/*
- * Find multiple agent events sharing the given name.
- *
- * RCU read side lock MUST be acquired. It must be held for the
- * duration of the iteration.
- *
- * Sets the given iterator.
- */
-void agent_find_events_by_name(const char *name, struct agent *agt,
-               struct lttng_ht_iter* iter)
-{
-       struct lttng_ht *ht;
-       struct agent_ht_key key;
-
-       LTTNG_ASSERT(name);
-       LTTNG_ASSERT(agt);
-       LTTNG_ASSERT(agt->events);
-       LTTNG_ASSERT(iter);
-
-       ht = agt->events;
-       key.name = name;
-
-       cds_lfht_lookup(ht->ht, ht->hash_fct((void *) name, lttng_ht_seed),
-                       ht_match_event_by_name, &key, &iter->iter);
-}
-
-/*
- * Find the agent event matching a trigger.
- *
- * RCU read side lock MUST be acquired. It must be held for as long as
- * the returned agent_event is used.
- *
- * Return object if found else NULL.
- */
-struct agent_event *agent_find_event_by_trigger(
-               const struct lttng_trigger *trigger, struct agent *agt)
-{
-       enum lttng_condition_status c_status;
-       enum lttng_event_rule_status er_status;
-       enum lttng_domain_type domain;
-       const struct lttng_condition *condition;
-       const struct lttng_event_rule *rule;
-       const char *name;
-       const char *filter_expression;
-       const struct lttng_log_level_rule *log_level_rule;
-       /* Unused when loglevel_type is 'ALL'. */
-       int loglevel_value = 0;
-       enum lttng_loglevel_type loglevel_type;
-       event_rule_logging_get_name_pattern logging_get_name_pattern;
-       event_rule_logging_get_log_level_rule logging_get_log_level_rule;
-
-       LTTNG_ASSERT(agt);
-       LTTNG_ASSERT(agt->events);
-
-       condition = lttng_trigger_get_const_condition(trigger);
-
-       LTTNG_ASSERT(lttng_condition_get_type(condition) ==
-                       LTTNG_CONDITION_TYPE_EVENT_RULE_MATCHES);
-
-       c_status = lttng_condition_event_rule_matches_get_rule(
-                       condition, &rule);
-       LTTNG_ASSERT(c_status == LTTNG_CONDITION_STATUS_OK);
-
-       switch (lttng_event_rule_get_type(rule)) {
-       case LTTNG_EVENT_RULE_TYPE_JUL_LOGGING:
-               logging_get_name_pattern =
-                               lttng_event_rule_jul_logging_get_name_pattern;
-               logging_get_log_level_rule =
-                               lttng_event_rule_jul_logging_get_log_level_rule;
-               break;
-       case LTTNG_EVENT_RULE_TYPE_LOG4J_LOGGING:
-               logging_get_name_pattern =
-                               lttng_event_rule_log4j_logging_get_name_pattern;
-               logging_get_log_level_rule =
-                               lttng_event_rule_log4j_logging_get_log_level_rule;
-               break;
-       case LTTNG_EVENT_RULE_TYPE_PYTHON_LOGGING:
-               logging_get_name_pattern =
-                               lttng_event_rule_python_logging_get_name_pattern;
-               logging_get_log_level_rule =
-                               lttng_event_rule_python_logging_get_log_level_rule;
-               break;
-       default:
-               abort();
-               break;
-       }
-
-       domain = lttng_event_rule_get_domain_type(rule);
-       LTTNG_ASSERT(domain == LTTNG_DOMAIN_JUL || domain == LTTNG_DOMAIN_LOG4J ||
-                       domain == LTTNG_DOMAIN_PYTHON);
-
-       /* Get the event's pattern name ('name' in the legacy terminology). */
-       er_status = logging_get_name_pattern(rule, &name);
-       LTTNG_ASSERT(er_status == LTTNG_EVENT_RULE_STATUS_OK);
-
-       /* Get the internal filter expression. */
-       filter_expression = lttng_event_rule_get_filter(rule);
-
-       /* Map log_level_rule to loglevel value. */
-       er_status = logging_get_log_level_rule(rule, &log_level_rule);
-       if (er_status == LTTNG_EVENT_RULE_STATUS_UNSET) {
-               loglevel_type = LTTNG_EVENT_LOGLEVEL_ALL;
-               loglevel_value = 0;
-       } else if (er_status == LTTNG_EVENT_RULE_STATUS_OK) {
-               lttng_log_level_rule_to_loglevel(log_level_rule, &loglevel_type, &loglevel_value);
-       } else {
-               abort();
-       }
-
-       return agent_find_event(name, loglevel_type, loglevel_value,
-                       filter_expression, agt);
-}
-
-/*
- * Get the next agent event duplicate by name. This should be called
- * after a call to agent_find_events_by_name() to iterate on events.
- *
- * The RCU read lock must be held during the iteration and for as long
- * as the object the iterator points to remains in use.
- */
-void agent_event_next_duplicate(const char *name,
-               struct agent *agt, struct lttng_ht_iter* iter)
-{
-       struct agent_ht_key key;
-
-       key.name = name;
-
-       cds_lfht_next_duplicate(agt->events->ht, ht_match_event_by_name,
-               &key, &iter->iter);
-}
-
-/*
- * Find a agent event in the given agent using name, loglevel and filter.
- *
- * RCU read side lock MUST be acquired. It must be kept for as long as
- * the returned agent_event is used.
- *
- * Return object if found else NULL.
- */
-struct agent_event *agent_find_event(const char *name,
-               enum lttng_loglevel_type loglevel_type,
-               int loglevel_value,
-               const char *filter_expression,
-               struct agent *agt)
-{
-       struct lttng_ht_node_str *node;
-       struct lttng_ht_iter iter;
-       struct lttng_ht *ht;
-       struct agent_ht_key key;
-
-       LTTNG_ASSERT(name);
-       LTTNG_ASSERT(agt);
-       LTTNG_ASSERT(agt->events);
-
-       ht = agt->events;
-       key.name = name;
-       key.loglevel_value = loglevel_value;
-       key.loglevel_type = loglevel_type;
-       key.filter_expression = filter_expression;
-
-       cds_lfht_lookup(ht->ht, ht->hash_fct((void *) name, lttng_ht_seed),
-                       ht_match_event, &key, &iter.iter);
-       node = lttng_ht_iter_get_node_str(&iter);
-       if (node == NULL) {
-               goto error;
-       }
-
-       DBG3("Agent event found %s.", name);
-       return caa_container_of(node, struct agent_event, node);
-
-error:
-       DBG3("Agent event NOT found %s.", name);
-       return NULL;
-}
-
-/*
- * Free given agent event. This event must not be globally visible at this
- * point (only expected to be used on failure just after event creation). After
- * this call, the pointer is not usable anymore.
- */
-void agent_destroy_event(struct agent_event *event)
-{
-       LTTNG_ASSERT(event);
-
-       free(event->filter);
-       free(event->filter_expression);
-       free(event->exclusion);
-       free(event);
-}
-
-static
-void destroy_app_ctx_rcu(struct rcu_head *head)
-{
-       struct agent_app_ctx *ctx =
-                       caa_container_of(head, struct agent_app_ctx, rcu_node);
-
-       destroy_app_ctx(ctx);
-}
-
-/*
- * Destroy an agent completely.
- */
-void agent_destroy(struct agent *agt)
-{
-       struct lttng_ht_node_str *node;
-       struct lttng_ht_iter iter;
-       struct agent_app_ctx *ctx;
-
-       LTTNG_ASSERT(agt);
-
-       DBG3("Agent destroy");
-
-       rcu_read_lock();
-       cds_lfht_for_each_entry(agt->events->ht, &iter.iter, node, node) {
-               int ret;
-               struct agent_event *event;
-
-               /*
-                * When destroying an event, we have to try to disable it on the
-                * agent side so the event stops generating data. The return
-                * value is not important since we have to continue anyway
-                * destroying the object.
-                */
-               event = caa_container_of(node, struct agent_event, node);
-               (void) agent_disable_event(event, agt->domain);
-
-               ret = lttng_ht_del(agt->events, &iter);
-               LTTNG_ASSERT(!ret);
-               call_rcu(&node->head, destroy_event_agent_rcu);
-       }
-
-       cds_list_for_each_entry_rcu(ctx, &agt->app_ctx_list, list_node) {
-               (void) disable_context(ctx, agt->domain);
-               cds_list_del(&ctx->list_node);
-               call_rcu(&ctx->rcu_node, destroy_app_ctx_rcu);
-       }
-       rcu_read_unlock();
-       ht_cleanup_push(agt->events);
-       free(agt);
-}
-
-/*
- * Allocate agent_apps_ht_by_sock.
- */
-int agent_app_ht_alloc(void)
-{
-       the_agent_apps_ht_by_sock = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
-       return the_agent_apps_ht_by_sock ? 0 : -1;
-}
-
-/*
- * Destroy a agent application by socket.
- */
-void agent_destroy_app_by_sock(int sock)
-{
-       struct agent_app *app;
-
-       LTTNG_ASSERT(sock >= 0);
-
-       /*
-        * Not finding an application is a very important error that should NEVER
-        * happen. The hash table deletion is ONLY done through this call when the
-        * main sessiond thread is torn down.
-        */
-       rcu_read_lock();
-       app = agent_find_app_by_sock(sock);
-       LTTNG_ASSERT(app);
-
-       /* RCU read side lock is assumed to be held by this function. */
-       agent_delete_app(app);
-
-       /* The application is freed in a RCU call but the socket is closed here. */
-       agent_destroy_app(app);
-       rcu_read_unlock();
-}
-
-/*
- * Clean-up the agent app hash table and destroy it.
- */
-void agent_app_ht_clean(void)
-{
-       struct lttng_ht_node_ulong *node;
-       struct lttng_ht_iter iter;
-
-       if (!the_agent_apps_ht_by_sock) {
-               return;
-       }
-       rcu_read_lock();
-       cds_lfht_for_each_entry(
-                       the_agent_apps_ht_by_sock->ht, &iter.iter, node, node) {
-               struct agent_app *app;
-
-               app = caa_container_of(node, struct agent_app, node);
-               agent_destroy_app_by_sock(app->sock->fd);
-       }
-       rcu_read_unlock();
-
-       lttng_ht_destroy(the_agent_apps_ht_by_sock);
-}
-
-/*
- * Update a agent application (given socket) using the given agent.
- *
- * Note that this function is most likely to be used with a tracing session
- * thus the caller should make sure to hold the appropriate lock(s).
- */
-void agent_update(const struct agent *agt, const struct agent_app *app)
-{
-       int ret;
-       struct agent_event *event;
-       struct lttng_ht_iter iter;
-       struct agent_app_ctx *ctx;
-
-       LTTNG_ASSERT(agt);
-       LTTNG_ASSERT(app);
-
-       DBG("Agent updating app: pid = %ld", (long) app->pid);
-
-       rcu_read_lock();
-       /*
-        * We are in the registration path thus if the application is gone,
-        * there is a serious code flow error.
-        */
-
-       cds_lfht_for_each_entry(agt->events->ht, &iter.iter, event, node.node) {
-               /* Skip event if disabled. */
-               if (!AGENT_EVENT_IS_ENABLED(event)) {
-                       continue;
-               }
-
-               ret = enable_event(app, event);
-               if (ret != LTTNG_OK) {
-                       DBG2("Agent update unable to enable event %s on app pid: %d sock %d",
-                                       event->name, app->pid, app->sock->fd);
-                       /* Let's try the others here and don't assume the app is dead. */
-                       continue;
-               }
-       }
-
-       cds_list_for_each_entry_rcu(ctx, &agt->app_ctx_list, list_node) {
-               ret = app_context_op(app, ctx, AGENT_CMD_APP_CTX_ENABLE);
-               if (ret != LTTNG_OK) {
-                       DBG2("Agent update unable to add application context %s:%s on app pid: %d sock %d",
-                                       ctx->provider_name, ctx->ctx_name,
-                                       app->pid, app->sock->fd);
-                       continue;
-               }
-       }
-
-       rcu_read_unlock();
-}
-
-/*
- * Allocate the per-event notifier domain agent hash table. It is lazily
- * populated as domains are used.
- */
-int agent_by_event_notifier_domain_ht_create(void)
-{
-       the_trigger_agents_ht_by_domain = lttng_ht_new(0, LTTNG_HT_TYPE_U64);
-       return the_trigger_agents_ht_by_domain ? 0 : -1;
-}
-
-/*
- * Clean-up the per-event notifier domain agent hash table and destroy it.
- */
-void agent_by_event_notifier_domain_ht_destroy(void)
-{
-       struct lttng_ht_node_u64 *node;
-       struct lttng_ht_iter iter;
-
-       if (!the_trigger_agents_ht_by_domain) {
-               return;
-       }
-
-       rcu_read_lock();
-       cds_lfht_for_each_entry(the_trigger_agents_ht_by_domain->ht,
-                       &iter.iter, node, node) {
-               struct agent *agent =
-                               caa_container_of(node, struct agent, node);
-               const int ret = lttng_ht_del(
-                               the_trigger_agents_ht_by_domain, &iter);
-
-               LTTNG_ASSERT(ret == 0);
-               agent_destroy(agent);
-       }
-
-       rcu_read_unlock();
-       lttng_ht_destroy(the_trigger_agents_ht_by_domain);
-}
-
-struct agent *agent_find_by_event_notifier_domain(
-               enum lttng_domain_type domain_type)
-{
-       struct agent *agt = NULL;
-       struct lttng_ht_node_u64 *node;
-       struct lttng_ht_iter iter;
-       const uint64_t key = (uint64_t) domain_type;
-
-       LTTNG_ASSERT(the_trigger_agents_ht_by_domain);
-
-       DBG3("Per-event notifier domain agent lookup for domain '%s'",
-                       lttng_domain_type_str(domain_type));
-
-       lttng_ht_lookup(the_trigger_agents_ht_by_domain, &key, &iter);
-       node = lttng_ht_iter_get_node_u64(&iter);
-       if (!node) {
-               goto end;
-       }
-
-       agt = caa_container_of(node, struct agent, node);
-
-end:
-       return agt;
-}
diff --git a/src/bin/lttng-sessiond/agent.cpp b/src/bin/lttng-sessiond/agent.cpp
new file mode 100644 (file)
index 0000000..5be61dc
--- /dev/null
@@ -0,0 +1,1627 @@
+/*
+ * Copyright (C) 2013 David Goulet <dgoulet@efficios.com>
+ * Copyright (C) 2016 Jérémie Galarneau <jeremie.galarneau@efficios.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0-only
+ *
+ */
+
+#define _LGPL_SOURCE
+#include <urcu/uatomic.h>
+#include <urcu/rculist.h>
+
+#include <lttng/event-rule/event-rule.h>
+#include <lttng/event-rule/event-rule-internal.h>
+#include <lttng/event-rule/jul-logging.h>
+#include <lttng/event-rule/log4j-logging.h>
+#include <lttng/event-rule/python-logging.h>
+#include <lttng/condition/condition.h>
+#include <lttng/condition/event-rule-matches.h>
+#include <lttng/domain-internal.h>
+#include <lttng/log-level-rule-internal.h>
+
+#include <common/common.h>
+#include <common/sessiond-comm/agent.h>
+
+#include <common/compat/endian.h>
+
+#include "agent.h"
+#include "ust-app.h"
+#include "utils.h"
+#include "common/error.h"
+
+typedef enum lttng_event_rule_status (*event_rule_logging_get_name_pattern)(
+               const struct lttng_event_rule *rule, const char **pattern);
+typedef enum lttng_event_rule_status (*event_rule_logging_get_log_level_rule)(
+               const struct lttng_event_rule *rule,
+               const struct lttng_log_level_rule **log_level_rule);
+
+/*
+ * Agent application context representation.
+ */
+struct agent_app_ctx {
+       char *provider_name;
+       char *ctx_name;
+
+       /* agent_app_ctx are part of the agent app_ctx_list. */
+       struct cds_list_head list_node;
+
+       /* For call_rcu teardown. */
+       struct rcu_head rcu_node;
+};
+
+/*
+ * Human readable agent return code.
+ */
+static
+const char *lttcomm_agent_ret_code_str(lttcomm_agent_ret_code code)
+{
+       switch (code) {
+       case AGENT_RET_CODE_SUCCESS:
+               return "Success";
+       case AGENT_RET_CODE_INVALID:
+               return "Invalid command";
+       case AGENT_RET_CODE_UNKNOWN_NAME:
+               return "Unknown logger name";
+       default:
+               return "Unknown code";
+       }
+};
+
+static
+void log_reply_code(uint32_t in_reply_ret_code)
+{
+       int level = PRINT_DBG3;
+       /*
+        * reply_ret_code and in_reply_ret_code are kept separate to have a
+        * sanitized value (used to retrieve the human readable string) and the
+        * original value which is logged as-is.
+        */
+       uint32_t reply_ret_code = in_reply_ret_code;
+
+       if (reply_ret_code < AGENT_RET_CODE_SUCCESS ||
+                       reply_ret_code >= AGENT_RET_CODE_NR) {
+               reply_ret_code = AGENT_RET_CODE_NR;
+               level = PRINT_ERR;
+       }
+
+       LOG(level, "Agent replied with retcode: %s (%" PRIu32 ")",
+                       lttcomm_agent_ret_code_str((lttcomm_agent_ret_code) reply_ret_code),
+                       in_reply_ret_code);
+}
+
+/*
+ * Match function for the events hash table lookup by name.
+ */
+static int ht_match_event_by_name(struct cds_lfht_node *node,
+               const void *_key)
+{
+       struct agent_event *event;
+       const struct agent_ht_key *key;
+
+       LTTNG_ASSERT(node);
+       LTTNG_ASSERT(_key);
+
+       event = caa_container_of(node, struct agent_event, node.node);
+       key = (agent_ht_key *) _key;
+
+       /* Match 1 elements of the key: name. */
+
+       /* Event name */
+       if (strncmp(event->name, key->name, sizeof(event->name)) != 0) {
+               goto no_match;
+       }
+       /* Match. */
+       return 1;
+
+no_match:
+       return 0;
+}
+
+/*
+ * Match function for the events hash table lookup by name, log level and
+ * filter expression.
+ */
+static int ht_match_event(struct cds_lfht_node *node,
+               const void *_key)
+{
+       struct agent_event *event;
+       const struct agent_ht_key *key;
+       int ll_match;
+
+       LTTNG_ASSERT(node);
+       LTTNG_ASSERT(_key);
+
+       event = caa_container_of(node, struct agent_event, node.node);
+       key = (agent_ht_key *) _key;
+
+       /* Match 2 elements of the key: name and loglevel. */
+
+       /* Event name */
+       if (strncmp(event->name, key->name, sizeof(event->name)) != 0) {
+               goto no_match;
+       }
+
+       /* Event loglevel value and type. */
+       ll_match = loglevels_match(event->loglevel_type,
+               event->loglevel_value, key->loglevel_type,
+               key->loglevel_value, LTTNG_EVENT_LOGLEVEL_ALL);
+
+       if (!ll_match) {
+               goto no_match;
+       }
+
+       /* Filter expression */
+       if (!!event->filter_expression != !!key->filter_expression) {
+               /* One has a filter expression, the other does not */
+               goto no_match;
+       }
+
+       if (event->filter_expression) {
+               if (strncmp(event->filter_expression, key->filter_expression,
+                               strlen(event->filter_expression)) != 0) {
+                       goto no_match;
+               }
+       }
+
+       return 1;
+
+no_match:
+       return 0;
+}
+
+/*
+ * Add unique agent event based on the event name and loglevel.
+ */
+static void add_unique_agent_event(struct lttng_ht *ht,
+               struct agent_event *event)
+{
+       struct cds_lfht_node *node_ptr;
+       struct agent_ht_key key;
+
+       LTTNG_ASSERT(ht);
+       LTTNG_ASSERT(ht->ht);
+       LTTNG_ASSERT(event);
+
+       key.name = event->name;
+       key.loglevel_value = event->loglevel_value;
+       key.loglevel_type = event->loglevel_type;
+       key.filter_expression = event->filter_expression;
+
+       node_ptr = cds_lfht_add_unique(ht->ht,
+                       ht->hash_fct(event->node.key, lttng_ht_seed),
+                       ht_match_event, &key, &event->node.node);
+       LTTNG_ASSERT(node_ptr == &event->node.node);
+}
+
+/*
+ * URCU delayed agent event reclaim.
+ */
+static void destroy_event_agent_rcu(struct rcu_head *head)
+{
+       struct lttng_ht_node_str *node =
+               caa_container_of(head, struct lttng_ht_node_str, head);
+       struct agent_event *event =
+               caa_container_of(node, struct agent_event, node);
+
+       agent_destroy_event(event);
+}
+
+/*
+ * URCU delayed agent app reclaim.
+ */
+static void destroy_app_agent_rcu(struct rcu_head *head)
+{
+       struct lttng_ht_node_ulong *node =
+               caa_container_of(head, struct lttng_ht_node_ulong, head);
+       struct agent_app *app =
+               caa_container_of(node, struct agent_app, node);
+
+       free(app);
+}
+
+/*
+ * Communication with the agent. Send the message header to the given socket in
+ * big endian.
+ *
+ * Return 0 on success or else a negative errno message of sendmsg() op.
+ */
+static int send_header(struct lttcomm_sock *sock, uint64_t data_size,
+               uint32_t cmd, uint32_t cmd_version)
+{
+       int ret;
+       ssize_t size;
+       struct lttcomm_agent_hdr msg;
+
+       LTTNG_ASSERT(sock);
+
+       memset(&msg, 0, sizeof(msg));
+       msg.data_size = htobe64(data_size);
+       msg.cmd = htobe32(cmd);
+       msg.cmd_version = htobe32(cmd_version);
+
+       size = sock->ops->sendmsg(sock, &msg, sizeof(msg), 0);
+       if (size < sizeof(msg)) {
+               ret = -errno;
+               goto error;
+       }
+       ret = 0;
+
+error:
+       return ret;
+}
+
+/*
+ * Communication call with the agent. Send the payload to the given socket. The
+ * header MUST be sent prior to this call.
+ *
+ * Return 0 on success or else a negative errno value of sendmsg() op.
+ */
+static int send_payload(struct lttcomm_sock *sock, const void *data,
+               size_t size)
+{
+       int ret;
+       ssize_t len;
+
+       LTTNG_ASSERT(sock);
+       LTTNG_ASSERT(data);
+
+       len = sock->ops->sendmsg(sock, data, size, 0);
+       if (len < size) {
+               ret = -errno;
+               goto error;
+       }
+       ret = 0;
+
+error:
+       return ret;
+}
+
+/*
+ * Communication call with the agent. Receive reply from the agent using the
+ * given socket.
+ *
+ * Return 0 on success or else a negative errno value from recvmsg() op.
+ */
+static int recv_reply(struct lttcomm_sock *sock, void *buf, size_t size)
+{
+       int ret;
+       ssize_t len;
+
+       LTTNG_ASSERT(sock);
+       LTTNG_ASSERT(buf);
+
+       len = sock->ops->recvmsg(sock, buf, size, 0);
+       if (len < size) {
+               ret = -errno;
+               goto error;
+       }
+       ret = 0;
+
+error:
+       return ret;
+}
+
+/*
+ * Internal event listing for a given app. Populate events.
+ *
+ * Return number of element in the list or else a negative LTTNG_ERR* code.
+ * On success, the caller is responsible for freeing the memory
+ * allocated for "events".
+ */
+static ssize_t list_events(struct agent_app *app, struct lttng_event **events)
+{
+       int ret, i, len = 0, offset = 0;
+       uint32_t nb_event;
+       size_t data_size;
+       uint32_t reply_ret_code;
+       struct lttng_event *tmp_events = NULL;
+       struct lttcomm_agent_list_reply *reply = NULL;
+       struct lttcomm_agent_list_reply_hdr reply_hdr;
+
+       LTTNG_ASSERT(app);
+       LTTNG_ASSERT(app->sock);
+       LTTNG_ASSERT(events);
+
+       DBG2("Agent listing events for app pid: %d and socket %d", app->pid,
+                       app->sock->fd);
+
+       ret = send_header(app->sock, 0, AGENT_CMD_LIST, 0);
+       if (ret < 0) {
+               goto error_io;
+       }
+
+       /* Get list header so we know how much we'll receive. */
+       ret = recv_reply(app->sock, &reply_hdr, sizeof(reply_hdr));
+       if (ret < 0) {
+               goto error_io;
+       }
+
+       reply_ret_code = be32toh(reply_hdr.ret_code);
+       log_reply_code(reply_ret_code);
+       switch (reply_ret_code) {
+       case AGENT_RET_CODE_SUCCESS:
+               data_size = be32toh(reply_hdr.data_size) + sizeof(*reply);
+               break;
+       default:
+               ret = LTTNG_ERR_UNK;
+               goto error;
+       }
+
+       reply = (lttcomm_agent_list_reply *) zmalloc(data_size);
+       if (!reply) {
+               ret = LTTNG_ERR_NOMEM;
+               goto error;
+       }
+
+       /* Get the list with the appropriate data size. */
+       ret = recv_reply(app->sock, reply, data_size);
+       if (ret < 0) {
+               goto error_io;
+       }
+
+       nb_event = be32toh(reply->nb_event);
+       tmp_events = (lttng_event *) zmalloc(sizeof(*tmp_events) * nb_event);
+       if (!tmp_events) {
+               ret = LTTNG_ERR_NOMEM;
+               goto error;
+       }
+
+       for (i = 0; i < nb_event; i++) {
+               offset += len;
+               if (lttng_strncpy(tmp_events[i].name, reply->payload + offset,
+                               sizeof(tmp_events[i].name))) {
+                       ret = LTTNG_ERR_INVALID;
+                       goto error;
+               }
+               tmp_events[i].pid = app->pid;
+               tmp_events[i].enabled = -1;
+               len = strlen(reply->payload + offset) + 1;
+       }
+
+       *events = tmp_events;
+
+       free(reply);
+       return nb_event;
+
+error_io:
+       ret = LTTNG_ERR_UST_LIST_FAIL;
+error:
+       free(reply);
+       free(tmp_events);
+       return -ret;
+
+}
+
+/*
+ * Internal enable agent event on a agent application. This function
+ * communicates with the agent to enable a given event.
+ *
+ * Return LTTNG_OK on success or else a LTTNG_ERR* code.
+ */
+static int enable_event(const struct agent_app *app, struct agent_event *event)
+{
+       int ret;
+       char *bytes_to_send;
+       uint64_t data_size;
+       size_t filter_expression_length;
+       uint32_t reply_ret_code;
+       struct lttcomm_agent_enable_event msg;
+       struct lttcomm_agent_generic_reply reply;
+
+       LTTNG_ASSERT(app);
+       LTTNG_ASSERT(app->sock);
+       LTTNG_ASSERT(event);
+
+       DBG2("Agent enabling event %s for app pid: %d and socket %d", event->name,
+                       app->pid, app->sock->fd);
+
+       /*
+        * Calculate the payload's size, which is the fixed-size struct followed
+        * by the variable-length filter expression (+1 for the ending \0).
+        */
+       if (!event->filter_expression) {
+               filter_expression_length = 0;
+       } else {
+               filter_expression_length = strlen(event->filter_expression) + 1;
+       }
+       data_size = sizeof(msg) + filter_expression_length;
+
+       memset(&msg, 0, sizeof(msg));
+       msg.loglevel_value = htobe32(event->loglevel_value);
+       msg.loglevel_type = htobe32(event->loglevel_type);
+       if (lttng_strncpy(msg.name, event->name, sizeof(msg.name))) {
+               ret = LTTNG_ERR_INVALID;
+               goto error;
+       }
+       msg.filter_expression_length = htobe32(filter_expression_length);
+
+       ret = send_header(app->sock, data_size, AGENT_CMD_ENABLE, 0);
+       if (ret < 0) {
+               goto error_io;
+       }
+
+       bytes_to_send = (char *) zmalloc(data_size);
+       if (!bytes_to_send) {
+               ret = LTTNG_ERR_NOMEM;
+               goto error;
+       }
+
+       memcpy(bytes_to_send, &msg, sizeof(msg));
+       if (filter_expression_length > 0) {
+               memcpy(bytes_to_send + sizeof(msg), event->filter_expression,
+                               filter_expression_length);
+       }
+
+       ret = send_payload(app->sock, bytes_to_send, data_size);
+       free(bytes_to_send);
+       if (ret < 0) {
+               goto error_io;
+       }
+
+       ret = recv_reply(app->sock, &reply, sizeof(reply));
+       if (ret < 0) {
+               goto error_io;
+       }
+
+       reply_ret_code = be32toh(reply.ret_code);
+       log_reply_code(reply_ret_code);
+       switch (reply_ret_code) {
+       case AGENT_RET_CODE_SUCCESS:
+               break;
+       case AGENT_RET_CODE_UNKNOWN_NAME:
+               ret = LTTNG_ERR_UST_EVENT_NOT_FOUND;
+               goto error;
+       default:
+               ret = LTTNG_ERR_UNK;
+               goto error;
+       }
+
+       return LTTNG_OK;
+
+error_io:
+       ret = LTTNG_ERR_UST_ENABLE_FAIL;
+error:
+       return ret;
+}
+
+/*
+ * Send Pascal-style string. Size is sent as a 32-bit big endian integer.
+ */
+static
+int send_pstring(struct lttcomm_sock *sock, const char *str, uint32_t len)
+{
+       int ret;
+       uint32_t len_be;
+
+       len_be = htobe32(len);
+       ret = send_payload(sock, &len_be, sizeof(len_be));
+       if (ret) {
+               goto end;
+       }
+
+       ret = send_payload(sock, str, len);
+       if (ret) {
+               goto end;
+       }
+end:
+       return ret;
+}
+
+/*
+ * Internal enable application context on an agent application. This function
+ * communicates with the agent to enable a given application context.
+ *
+ * Return LTTNG_OK on success or else a LTTNG_ERR* code.
+ */
+static int app_context_op(const struct agent_app *app,
+               const struct agent_app_ctx *ctx, enum lttcomm_agent_command cmd)
+{
+       int ret;
+       uint32_t reply_ret_code;
+       struct lttcomm_agent_generic_reply reply;
+       size_t app_ctx_provider_name_len, app_ctx_name_len, data_size;
+
+       LTTNG_ASSERT(app);
+       LTTNG_ASSERT(app->sock);
+       LTTNG_ASSERT(ctx);
+       LTTNG_ASSERT(cmd == AGENT_CMD_APP_CTX_ENABLE ||
+                       cmd == AGENT_CMD_APP_CTX_DISABLE);
+
+       DBG2("Agent %s application %s:%s for app pid: %d and socket %d",
+                       cmd == AGENT_CMD_APP_CTX_ENABLE ? "enabling" : "disabling",
+                       ctx->provider_name, ctx->ctx_name,
+                       app->pid, app->sock->fd);
+
+       /*
+        * Calculate the payload's size, which consists of the size (u32, BE)
+        * of the provider name, the NULL-terminated provider name string, the
+        * size (u32, BE) of the context name, followed by the NULL-terminated
+        * context name string.
+        */
+       app_ctx_provider_name_len = strlen(ctx->provider_name) + 1;
+       app_ctx_name_len = strlen(ctx->ctx_name) + 1;
+       data_size = sizeof(uint32_t) + app_ctx_provider_name_len +
+                       sizeof(uint32_t) + app_ctx_name_len;
+
+       ret = send_header(app->sock, data_size, cmd, 0);
+       if (ret < 0) {
+               goto error_io;
+       }
+
+       if (app_ctx_provider_name_len > UINT32_MAX ||
+                       app_ctx_name_len > UINT32_MAX) {
+               ERR("Application context name > MAX_UINT32");
+               ret = LTTNG_ERR_INVALID;
+               goto error;
+       }
+
+       ret = send_pstring(app->sock, ctx->provider_name,
+                       (uint32_t) app_ctx_provider_name_len);
+       if (ret < 0) {
+               goto error_io;
+       }
+
+       ret = send_pstring(app->sock, ctx->ctx_name,
+                       (uint32_t) app_ctx_name_len);
+       if (ret < 0) {
+               goto error_io;
+       }
+
+       ret = recv_reply(app->sock, &reply, sizeof(reply));
+       if (ret < 0) {
+               goto error_io;
+       }
+
+       reply_ret_code = be32toh(reply.ret_code);
+       log_reply_code(reply_ret_code);
+       switch (reply_ret_code) {
+       case AGENT_RET_CODE_SUCCESS:
+               break;
+       default:
+               ret = LTTNG_ERR_UNK;
+               goto error;
+       }
+
+       return LTTNG_OK;
+
+error_io:
+       ret = LTTNG_ERR_UST_ENABLE_FAIL;
+error:
+       return ret;
+}
+
+/*
+ * Internal disable agent event call on a agent application. This function
+ * communicates with the agent to disable a given event.
+ *
+ * Return LTTNG_OK on success or else a LTTNG_ERR* code.
+ */
+static int disable_event(struct agent_app *app, struct agent_event *event)
+{
+       int ret;
+       uint64_t data_size;
+       uint32_t reply_ret_code;
+       struct lttcomm_agent_disable_event msg;
+       struct lttcomm_agent_generic_reply reply;
+
+       LTTNG_ASSERT(app);
+       LTTNG_ASSERT(app->sock);
+       LTTNG_ASSERT(event);
+
+       DBG2("Agent disabling event %s for app pid: %d and socket %d", event->name,
+                       app->pid, app->sock->fd);
+
+       data_size = sizeof(msg);
+       memset(&msg, 0, sizeof(msg));
+       if (lttng_strncpy(msg.name, event->name, sizeof(msg.name))) {
+               ret = LTTNG_ERR_INVALID;
+               goto error;
+       }
+
+       ret = send_header(app->sock, data_size, AGENT_CMD_DISABLE, 0);
+       if (ret < 0) {
+               goto error_io;
+       }
+
+       ret = send_payload(app->sock, &msg, sizeof(msg));
+       if (ret < 0) {
+               goto error_io;
+       }
+
+       ret = recv_reply(app->sock, &reply, sizeof(reply));
+       if (ret < 0) {
+               goto error_io;
+       }
+
+       reply_ret_code = be32toh(reply.ret_code);
+       log_reply_code(reply_ret_code);
+       switch (reply_ret_code) {
+       case AGENT_RET_CODE_SUCCESS:
+               break;
+       case AGENT_RET_CODE_UNKNOWN_NAME:
+               ret = LTTNG_ERR_UST_EVENT_NOT_FOUND;
+               goto error;
+       default:
+               ret = LTTNG_ERR_UNK;
+               goto error;
+       }
+
+       return LTTNG_OK;
+
+error_io:
+       ret = LTTNG_ERR_UST_DISABLE_FAIL;
+error:
+       return ret;
+}
+
+/*
+ * Send back the registration DONE command to a given agent application.
+ *
+ * Return 0 on success or else a negative value.
+ */
+int agent_send_registration_done(struct agent_app *app)
+{
+       LTTNG_ASSERT(app);
+       LTTNG_ASSERT(app->sock);
+
+       DBG("Agent sending registration done to app socket %d", app->sock->fd);
+
+       return send_header(app->sock, 0, AGENT_CMD_REG_DONE, 0);
+}
+
+/*
+ * Enable agent event on every agent applications registered with the session
+ * daemon.
+ *
+ * Return LTTNG_OK on success or else a LTTNG_ERR* code.
+ */
+int agent_enable_event(struct agent_event *event,
+               enum lttng_domain_type domain)
+{
+       int ret;
+       struct agent_app *app;
+       struct lttng_ht_iter iter;
+
+       LTTNG_ASSERT(event);
+
+       rcu_read_lock();
+
+       cds_lfht_for_each_entry(the_agent_apps_ht_by_sock->ht, &iter.iter, app,
+                       node.node) {
+               if (app->domain != domain) {
+                       continue;
+               }
+
+               /* Enable event on agent application through TCP socket. */
+               ret = enable_event(app, event);
+               if (ret != LTTNG_OK) {
+                       goto error;
+               }
+       }
+
+       event->enabled_count++;
+       ret = LTTNG_OK;
+
+error:
+       rcu_read_unlock();
+       return ret;
+}
+
+static
+void destroy_app_ctx(struct agent_app_ctx *ctx)
+{
+       free(ctx->provider_name);
+       free(ctx->ctx_name);
+       free(ctx);
+}
+
+static
+struct agent_app_ctx *create_app_ctx(const struct lttng_event_context *ctx)
+{
+       struct agent_app_ctx *agent_ctx = NULL;
+
+       if (!ctx) {
+               goto end;
+       }
+
+       LTTNG_ASSERT(ctx->ctx == LTTNG_EVENT_CONTEXT_APP_CONTEXT);
+       agent_ctx = (agent_app_ctx *) zmalloc(sizeof(*ctx));
+       if (!agent_ctx) {
+               goto end;
+       }
+
+       agent_ctx->provider_name = strdup(ctx->u.app_ctx.provider_name);
+       agent_ctx->ctx_name = strdup(ctx->u.app_ctx.ctx_name);
+       if (!agent_ctx->provider_name || !agent_ctx->ctx_name) {
+               destroy_app_ctx(agent_ctx);
+               agent_ctx = NULL;
+       }
+end:
+       return agent_ctx;
+}
+
+/*
+ * Enable agent context on every agent applications registered with the session
+ * daemon.
+ *
+ * Return LTTNG_OK on success or else a LTTNG_ERR* code.
+ */
+int agent_enable_context(const struct lttng_event_context *ctx,
+               enum lttng_domain_type domain)
+{
+       int ret;
+       struct agent_app *app;
+       struct lttng_ht_iter iter;
+
+       LTTNG_ASSERT(ctx);
+       if (ctx->ctx != LTTNG_EVENT_CONTEXT_APP_CONTEXT) {
+               ret = LTTNG_ERR_INVALID;
+               goto error;
+       }
+
+       rcu_read_lock();
+
+       cds_lfht_for_each_entry(the_agent_apps_ht_by_sock->ht, &iter.iter, app,
+                       node.node) {
+               struct agent_app_ctx *agent_ctx;
+
+               if (app->domain != domain) {
+                       continue;
+               }
+
+               agent_ctx = create_app_ctx(ctx);
+               if (!agent_ctx) {
+                       ret = LTTNG_ERR_NOMEM;
+                       goto error_unlock;
+               }
+
+               /* Enable event on agent application through TCP socket. */
+               ret = app_context_op(app, agent_ctx, AGENT_CMD_APP_CTX_ENABLE);
+               destroy_app_ctx(agent_ctx);
+               if (ret != LTTNG_OK) {
+                       goto error_unlock;
+               }
+       }
+
+       ret = LTTNG_OK;
+
+error_unlock:
+       rcu_read_unlock();
+error:
+       return ret;
+}
+
+/*
+ * Disable agent event on every agent application registered with the session
+ * daemon.
+ *
+ * Return LTTNG_OK on success or else a LTTNG_ERR* code.
+ */
+int agent_disable_event(struct agent_event *event,
+               enum lttng_domain_type domain)
+{
+       int ret = LTTNG_OK;
+       struct agent_app *app;
+       struct lttng_ht_iter iter;
+
+       LTTNG_ASSERT(event);
+       if (!AGENT_EVENT_IS_ENABLED(event)) {
+               goto end;
+       }
+
+       if (--event->enabled_count != 0) {
+               /*
+                * Agent event still enabled. Disable the agent event only when
+                * all "users" have disabled it (event notifiers, event rules,
+                * etc.).
+                */
+               ret = LTTNG_OK;
+               goto end;
+       }
+
+       rcu_read_lock();
+
+       cds_lfht_for_each_entry(the_agent_apps_ht_by_sock->ht, &iter.iter, app,
+                       node.node) {
+               if (app->domain != domain) {
+                       continue;
+               }
+
+               /* Enable event on agent application through TCP socket. */
+               ret = disable_event(app, event);
+               if (ret != LTTNG_OK) {
+                       goto error;
+               }
+       }
+
+       /* event->enabled_count is now 0. */
+       LTTNG_ASSERT(!AGENT_EVENT_IS_ENABLED(event));
+
+error:
+       rcu_read_unlock();
+end:
+       return ret;
+}
+
+/*
+ * Disable agent context on every agent application registered with the session
+ * daemon.
+ *
+ * Return LTTNG_OK on success or else a LTTNG_ERR* code.
+ */
+static int disable_context(struct agent_app_ctx *ctx,
+               enum lttng_domain_type domain)
+{
+       int ret = LTTNG_OK;
+       struct agent_app *app;
+       struct lttng_ht_iter iter;
+
+       LTTNG_ASSERT(ctx);
+
+       rcu_read_lock();
+       DBG2("Disabling agent application context %s:%s",
+                       ctx->provider_name, ctx->ctx_name);
+       cds_lfht_for_each_entry(the_agent_apps_ht_by_sock->ht, &iter.iter, app,
+                       node.node) {
+               if (app->domain != domain) {
+                       continue;
+               }
+
+               ret = app_context_op(app, ctx, AGENT_CMD_APP_CTX_DISABLE);
+               if (ret != LTTNG_OK) {
+                       goto end;
+               }
+       }
+end:
+       rcu_read_unlock();
+       return ret;
+}
+
+/*
+ * Ask every agent for the list of possible event. Events is allocated with the
+ * events of every agent application.
+ *
+ * Return the number of events or else a negative value.
+ */
+int agent_list_events(struct lttng_event **events,
+               enum lttng_domain_type domain)
+{
+       int ret;
+       size_t nbmem, count = 0;
+       struct agent_app *app;
+       struct lttng_event *tmp_events = NULL;
+       struct lttng_ht_iter iter;
+
+       LTTNG_ASSERT(events);
+
+       DBG2("Agent listing events for domain %d", domain);
+
+       nbmem = UST_APP_EVENT_LIST_SIZE;
+       tmp_events = (lttng_event *) zmalloc(nbmem * sizeof(*tmp_events));
+       if (!tmp_events) {
+               PERROR("zmalloc agent list events");
+               ret = -ENOMEM;
+               goto error;
+       }
+
+       rcu_read_lock();
+       cds_lfht_for_each_entry(the_agent_apps_ht_by_sock->ht, &iter.iter, app,
+                       node.node) {
+               ssize_t nb_ev;
+               struct lttng_event *agent_events;
+
+               /* Skip domain not asked by the list. */
+               if (app->domain != domain) {
+                       continue;
+               }
+
+               nb_ev = list_events(app, &agent_events);
+               if (nb_ev < 0) {
+                       ret = nb_ev;
+                       goto error_unlock;
+               }
+
+               if (count + nb_ev > nbmem) {
+                       /* In case the realloc fails, we free the memory */
+                       struct lttng_event *new_tmp_events;
+                       size_t new_nbmem;
+
+                       new_nbmem = std::max(count + nb_ev, nbmem << 1);
+                       DBG2("Reallocating agent event list from %zu to %zu entries",
+                                       nbmem, new_nbmem);
+                       new_tmp_events = (lttng_event *) realloc(tmp_events,
+                               new_nbmem * sizeof(*new_tmp_events));
+                       if (!new_tmp_events) {
+                               PERROR("realloc agent events");
+                               ret = -ENOMEM;
+                               free(agent_events);
+                               goto error_unlock;
+                       }
+                       /* Zero the new memory */
+                       memset(new_tmp_events + nbmem, 0,
+                               (new_nbmem - nbmem) * sizeof(*new_tmp_events));
+                       nbmem = new_nbmem;
+                       tmp_events = new_tmp_events;
+               }
+               memcpy(tmp_events + count, agent_events,
+                       nb_ev * sizeof(*tmp_events));
+               free(agent_events);
+               count += nb_ev;
+       }
+       rcu_read_unlock();
+
+       ret = count;
+       *events = tmp_events;
+       return ret;
+
+error_unlock:
+       rcu_read_unlock();
+error:
+       free(tmp_events);
+       return ret;
+}
+
+/*
+ * Create a agent app object using the given PID.
+ *
+ * Return newly allocated object or else NULL on error.
+ */
+struct agent_app *agent_create_app(pid_t pid, enum lttng_domain_type domain,
+               struct lttcomm_sock *sock)
+{
+       struct agent_app *app;
+
+       LTTNG_ASSERT(sock);
+
+       app = (agent_app *) zmalloc(sizeof(*app));
+       if (!app) {
+               PERROR("Failed to allocate agent application instance");
+               goto error;
+       }
+
+       app->pid = pid;
+       app->domain = domain;
+       app->sock = sock;
+       lttng_ht_node_init_ulong(&app->node, (unsigned long) app->sock->fd);
+
+error:
+       return app;
+}
+
+/*
+ * Lookup agent app by socket in the global hash table.
+ *
+ * RCU read side lock MUST be acquired.
+ *
+ * Return object if found else NULL.
+ */
+struct agent_app *agent_find_app_by_sock(int sock)
+{
+       struct lttng_ht_node_ulong *node;
+       struct lttng_ht_iter iter;
+       struct agent_app *app;
+
+       LTTNG_ASSERT(sock >= 0);
+
+       lttng_ht_lookup(the_agent_apps_ht_by_sock,
+                       (void *) ((unsigned long) sock), &iter);
+       node = lttng_ht_iter_get_node_ulong(&iter);
+       if (node == NULL) {
+               goto error;
+       }
+       app = caa_container_of(node, struct agent_app, node);
+
+       DBG3("Agent app pid %d found by sock %d.", app->pid, sock);
+       return app;
+
+error:
+       DBG3("Agent app NOT found by sock %d.", sock);
+       return NULL;
+}
+
+/*
+ * Add agent application object to the global hash table.
+ */
+void agent_add_app(struct agent_app *app)
+{
+       LTTNG_ASSERT(app);
+
+       DBG3("Agent adding app sock: %d and pid: %d to ht", app->sock->fd, app->pid);
+       lttng_ht_add_unique_ulong(the_agent_apps_ht_by_sock, &app->node);
+}
+
+/*
+ * Delete agent application from the global hash table.
+ *
+ * rcu_read_lock() must be held by the caller.
+ */
+void agent_delete_app(struct agent_app *app)
+{
+       int ret;
+       struct lttng_ht_iter iter;
+
+       LTTNG_ASSERT(app);
+
+       DBG3("Agent deleting app pid: %d and sock: %d", app->pid, app->sock->fd);
+
+       iter.iter.node = &app->node.node;
+       ret = lttng_ht_del(the_agent_apps_ht_by_sock, &iter);
+       LTTNG_ASSERT(!ret);
+}
+
+/*
+ * Destroy an agent application object by detaching it from its corresponding
+ * UST app if one is connected by closing the socket. Finally, perform a
+ * delayed memory reclaim.
+ */
+void agent_destroy_app(struct agent_app *app)
+{
+       LTTNG_ASSERT(app);
+
+       if (app->sock) {
+               app->sock->ops->close(app->sock);
+               lttcomm_destroy_sock(app->sock);
+       }
+
+       call_rcu(&app->node.head, destroy_app_agent_rcu);
+}
+
+/*
+ * Initialize an already allocated agent object.
+ *
+ * Return 0 on success or else a negative errno value.
+ */
+int agent_init(struct agent *agt)
+{
+       int ret;
+
+       LTTNG_ASSERT(agt);
+
+       agt->events = lttng_ht_new(0, LTTNG_HT_TYPE_STRING);
+       if (!agt->events) {
+               ret = -ENOMEM;
+               goto error;
+       }
+       lttng_ht_node_init_u64(&agt->node, agt->domain);
+
+       CDS_INIT_LIST_HEAD(&agt->app_ctx_list);
+       return 0;
+
+error:
+       return ret;
+}
+
+/*
+ * Add agent object to the given hash table.
+ */
+void agent_add(struct agent *agt, struct lttng_ht *ht)
+{
+       LTTNG_ASSERT(agt);
+       LTTNG_ASSERT(ht);
+
+       DBG3("Agent adding from domain %d", agt->domain);
+
+       lttng_ht_add_unique_u64(ht, &agt->node);
+}
+
+/*
+ * Create an agent object for the given domain.
+ *
+ * Return the allocated agent or NULL on error.
+ */
+struct agent *agent_create(enum lttng_domain_type domain)
+{
+       int ret;
+       struct agent *agt;
+
+       agt = (agent *) zmalloc(sizeof(struct agent));
+       if (!agt) {
+               goto error;
+       }
+       agt->domain = domain;
+
+       ret = agent_init(agt);
+       if (ret < 0) {
+               free(agt);
+               agt = NULL;
+               goto error;
+       }
+
+error:
+       return agt;
+}
+
+/*
+ * Create a newly allocated agent event data structure.
+ * Ownership of filter_expression is taken.
+ *
+ * Return a new object else NULL on error.
+ */
+struct agent_event *agent_create_event(const char *name,
+               enum lttng_loglevel_type loglevel_type, int loglevel_value,
+               struct lttng_bytecode *filter, char *filter_expression)
+{
+       struct agent_event *event = NULL;
+
+       DBG3("Agent create new event with name %s, loglevel type %d, \
+                       loglevel value %d and filter %s",
+                       name, loglevel_type, loglevel_value,
+                       filter_expression ? filter_expression : "NULL");
+
+       if (!name) {
+               ERR("Failed to create agent event; no name provided.");
+               goto error;
+       }
+
+       event = (agent_event *) zmalloc(sizeof(*event));
+       if (!event) {
+               goto error;
+       }
+
+       strncpy(event->name, name, sizeof(event->name));
+       event->name[sizeof(event->name) - 1] = '\0';
+       lttng_ht_node_init_str(&event->node, event->name);
+
+       event->loglevel_value = loglevel_value;
+       event->loglevel_type = loglevel_type;
+       event->filter = filter;
+       event->filter_expression = filter_expression;
+error:
+       return event;
+}
+
+/*
+ * Unique add of a agent event to an agent object.
+ */
+void agent_add_event(struct agent_event *event, struct agent *agt)
+{
+       LTTNG_ASSERT(event);
+       LTTNG_ASSERT(agt);
+       LTTNG_ASSERT(agt->events);
+
+       DBG3("Agent adding event %s", event->name);
+       add_unique_agent_event(agt->events, event);
+       agt->being_used = 1;
+}
+
+/*
+ * Unique add of a agent context to an agent object.
+ */
+int agent_add_context(const struct lttng_event_context *ctx, struct agent *agt)
+{
+       int ret = LTTNG_OK;
+       struct agent_app_ctx *agent_ctx = NULL;
+
+       LTTNG_ASSERT(ctx);
+       LTTNG_ASSERT(agt);
+       LTTNG_ASSERT(agt->events);
+       LTTNG_ASSERT(ctx->ctx == LTTNG_EVENT_CONTEXT_APP_CONTEXT);
+
+       agent_ctx = create_app_ctx(ctx);
+       if (!agent_ctx) {
+               ret = LTTNG_ERR_NOMEM;
+               goto end;
+       }
+
+       DBG3("Agent adding context %s:%s", ctx->u.app_ctx.provider_name,
+                       ctx->u.app_ctx.ctx_name);
+       cds_list_add_tail_rcu(&agent_ctx->list_node, &agt->app_ctx_list);
+end:
+       return ret;
+}
+
+/*
+ * Find multiple agent events sharing the given name.
+ *
+ * RCU read side lock MUST be acquired. It must be held for the
+ * duration of the iteration.
+ *
+ * Sets the given iterator.
+ */
+void agent_find_events_by_name(const char *name, struct agent *agt,
+               struct lttng_ht_iter* iter)
+{
+       struct lttng_ht *ht;
+       struct agent_ht_key key;
+
+       LTTNG_ASSERT(name);
+       LTTNG_ASSERT(agt);
+       LTTNG_ASSERT(agt->events);
+       LTTNG_ASSERT(iter);
+
+       ht = agt->events;
+       key.name = name;
+
+       cds_lfht_lookup(ht->ht, ht->hash_fct((void *) name, lttng_ht_seed),
+                       ht_match_event_by_name, &key, &iter->iter);
+}
+
+/*
+ * Find the agent event matching a trigger.
+ *
+ * RCU read side lock MUST be acquired. It must be held for as long as
+ * the returned agent_event is used.
+ *
+ * Return object if found else NULL.
+ */
+struct agent_event *agent_find_event_by_trigger(
+               const struct lttng_trigger *trigger, struct agent *agt)
+{
+       enum lttng_condition_status c_status;
+       enum lttng_event_rule_status er_status;
+       enum lttng_domain_type domain;
+       const struct lttng_condition *condition;
+       const struct lttng_event_rule *rule;
+       const char *name;
+       const char *filter_expression;
+       const struct lttng_log_level_rule *log_level_rule;
+       /* Unused when loglevel_type is 'ALL'. */
+       int loglevel_value = 0;
+       enum lttng_loglevel_type loglevel_type;
+       event_rule_logging_get_name_pattern logging_get_name_pattern;
+       event_rule_logging_get_log_level_rule logging_get_log_level_rule;
+
+       LTTNG_ASSERT(agt);
+       LTTNG_ASSERT(agt->events);
+
+       condition = lttng_trigger_get_const_condition(trigger);
+
+       LTTNG_ASSERT(lttng_condition_get_type(condition) ==
+                       LTTNG_CONDITION_TYPE_EVENT_RULE_MATCHES);
+
+       c_status = lttng_condition_event_rule_matches_get_rule(
+                       condition, &rule);
+       LTTNG_ASSERT(c_status == LTTNG_CONDITION_STATUS_OK);
+
+       switch (lttng_event_rule_get_type(rule)) {
+       case LTTNG_EVENT_RULE_TYPE_JUL_LOGGING:
+               logging_get_name_pattern =
+                               lttng_event_rule_jul_logging_get_name_pattern;
+               logging_get_log_level_rule =
+                               lttng_event_rule_jul_logging_get_log_level_rule;
+               break;
+       case LTTNG_EVENT_RULE_TYPE_LOG4J_LOGGING:
+               logging_get_name_pattern =
+                               lttng_event_rule_log4j_logging_get_name_pattern;
+               logging_get_log_level_rule =
+                               lttng_event_rule_log4j_logging_get_log_level_rule;
+               break;
+       case LTTNG_EVENT_RULE_TYPE_PYTHON_LOGGING:
+               logging_get_name_pattern =
+                               lttng_event_rule_python_logging_get_name_pattern;
+               logging_get_log_level_rule =
+                               lttng_event_rule_python_logging_get_log_level_rule;
+               break;
+       default:
+               abort();
+               break;
+       }
+
+       domain = lttng_event_rule_get_domain_type(rule);
+       LTTNG_ASSERT(domain == LTTNG_DOMAIN_JUL || domain == LTTNG_DOMAIN_LOG4J ||
+                       domain == LTTNG_DOMAIN_PYTHON);
+
+       /* Get the event's pattern name ('name' in the legacy terminology). */
+       er_status = logging_get_name_pattern(rule, &name);
+       LTTNG_ASSERT(er_status == LTTNG_EVENT_RULE_STATUS_OK);
+
+       /* Get the internal filter expression. */
+       filter_expression = lttng_event_rule_get_filter(rule);
+
+       /* Map log_level_rule to loglevel value. */
+       er_status = logging_get_log_level_rule(rule, &log_level_rule);
+       if (er_status == LTTNG_EVENT_RULE_STATUS_UNSET) {
+               loglevel_type = LTTNG_EVENT_LOGLEVEL_ALL;
+               loglevel_value = 0;
+       } else if (er_status == LTTNG_EVENT_RULE_STATUS_OK) {
+               lttng_log_level_rule_to_loglevel(log_level_rule, &loglevel_type, &loglevel_value);
+       } else {
+               abort();
+       }
+
+       return agent_find_event(name, loglevel_type, loglevel_value,
+                       filter_expression, agt);
+}
+
+/*
+ * Get the next agent event duplicate by name. This should be called
+ * after a call to agent_find_events_by_name() to iterate on events.
+ *
+ * The RCU read lock must be held during the iteration and for as long
+ * as the object the iterator points to remains in use.
+ */
+void agent_event_next_duplicate(const char *name,
+               struct agent *agt, struct lttng_ht_iter* iter)
+{
+       struct agent_ht_key key;
+
+       key.name = name;
+
+       cds_lfht_next_duplicate(agt->events->ht, ht_match_event_by_name,
+               &key, &iter->iter);
+}
+
+/*
+ * Find a agent event in the given agent using name, loglevel and filter.
+ *
+ * RCU read side lock MUST be acquired. It must be kept for as long as
+ * the returned agent_event is used.
+ *
+ * Return object if found else NULL.
+ */
+struct agent_event *agent_find_event(const char *name,
+               enum lttng_loglevel_type loglevel_type,
+               int loglevel_value,
+               const char *filter_expression,
+               struct agent *agt)
+{
+       struct lttng_ht_node_str *node;
+       struct lttng_ht_iter iter;
+       struct lttng_ht *ht;
+       struct agent_ht_key key;
+
+       LTTNG_ASSERT(name);
+       LTTNG_ASSERT(agt);
+       LTTNG_ASSERT(agt->events);
+
+       ht = agt->events;
+       key.name = name;
+       key.loglevel_value = loglevel_value;
+       key.loglevel_type = loglevel_type;
+       key.filter_expression = filter_expression;
+
+       cds_lfht_lookup(ht->ht, ht->hash_fct((void *) name, lttng_ht_seed),
+                       ht_match_event, &key, &iter.iter);
+       node = lttng_ht_iter_get_node_str(&iter);
+       if (node == NULL) {
+               goto error;
+       }
+
+       DBG3("Agent event found %s.", name);
+       return caa_container_of(node, struct agent_event, node);
+
+error:
+       DBG3("Agent event NOT found %s.", name);
+       return NULL;
+}
+
+/*
+ * Free given agent event. This event must not be globally visible at this
+ * point (only expected to be used on failure just after event creation). After
+ * this call, the pointer is not usable anymore.
+ */
+void agent_destroy_event(struct agent_event *event)
+{
+       LTTNG_ASSERT(event);
+
+       free(event->filter);
+       free(event->filter_expression);
+       free(event->exclusion);
+       free(event);
+}
+
+static
+void destroy_app_ctx_rcu(struct rcu_head *head)
+{
+       struct agent_app_ctx *ctx =
+                       caa_container_of(head, struct agent_app_ctx, rcu_node);
+
+       destroy_app_ctx(ctx);
+}
+
+/*
+ * Destroy an agent completely.
+ */
+void agent_destroy(struct agent *agt)
+{
+       struct lttng_ht_node_str *node;
+       struct lttng_ht_iter iter;
+       struct agent_app_ctx *ctx;
+
+       LTTNG_ASSERT(agt);
+
+       DBG3("Agent destroy");
+
+       rcu_read_lock();
+       cds_lfht_for_each_entry(agt->events->ht, &iter.iter, node, node) {
+               int ret;
+               struct agent_event *event;
+
+               /*
+                * When destroying an event, we have to try to disable it on the
+                * agent side so the event stops generating data. The return
+                * value is not important since we have to continue anyway
+                * destroying the object.
+                */
+               event = caa_container_of(node, struct agent_event, node);
+               (void) agent_disable_event(event, agt->domain);
+
+               ret = lttng_ht_del(agt->events, &iter);
+               LTTNG_ASSERT(!ret);
+               call_rcu(&node->head, destroy_event_agent_rcu);
+       }
+
+       cds_list_for_each_entry_rcu(ctx, &agt->app_ctx_list, list_node) {
+               (void) disable_context(ctx, agt->domain);
+               cds_list_del(&ctx->list_node);
+               call_rcu(&ctx->rcu_node, destroy_app_ctx_rcu);
+       }
+       rcu_read_unlock();
+       ht_cleanup_push(agt->events);
+       free(agt);
+}
+
+/*
+ * Allocate agent_apps_ht_by_sock.
+ */
+int agent_app_ht_alloc(void)
+{
+       the_agent_apps_ht_by_sock = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
+       return the_agent_apps_ht_by_sock ? 0 : -1;
+}
+
+/*
+ * Destroy a agent application by socket.
+ */
+void agent_destroy_app_by_sock(int sock)
+{
+       struct agent_app *app;
+
+       LTTNG_ASSERT(sock >= 0);
+
+       /*
+        * Not finding an application is a very important error that should NEVER
+        * happen. The hash table deletion is ONLY done through this call when the
+        * main sessiond thread is torn down.
+        */
+       rcu_read_lock();
+       app = agent_find_app_by_sock(sock);
+       LTTNG_ASSERT(app);
+
+       /* RCU read side lock is assumed to be held by this function. */
+       agent_delete_app(app);
+
+       /* The application is freed in a RCU call but the socket is closed here. */
+       agent_destroy_app(app);
+       rcu_read_unlock();
+}
+
+/*
+ * Clean-up the agent app hash table and destroy it.
+ */
+void agent_app_ht_clean(void)
+{
+       struct lttng_ht_node_ulong *node;
+       struct lttng_ht_iter iter;
+
+       if (!the_agent_apps_ht_by_sock) {
+               return;
+       }
+       rcu_read_lock();
+       cds_lfht_for_each_entry(
+                       the_agent_apps_ht_by_sock->ht, &iter.iter, node, node) {
+               struct agent_app *app;
+
+               app = caa_container_of(node, struct agent_app, node);
+               agent_destroy_app_by_sock(app->sock->fd);
+       }
+       rcu_read_unlock();
+
+       lttng_ht_destroy(the_agent_apps_ht_by_sock);
+}
+
+/*
+ * Update a agent application (given socket) using the given agent.
+ *
+ * Note that this function is most likely to be used with a tracing session
+ * thus the caller should make sure to hold the appropriate lock(s).
+ */
+void agent_update(const struct agent *agt, const struct agent_app *app)
+{
+       int ret;
+       struct agent_event *event;
+       struct lttng_ht_iter iter;
+       struct agent_app_ctx *ctx;
+
+       LTTNG_ASSERT(agt);
+       LTTNG_ASSERT(app);
+
+       DBG("Agent updating app: pid = %ld", (long) app->pid);
+
+       rcu_read_lock();
+       /*
+        * We are in the registration path thus if the application is gone,
+        * there is a serious code flow error.
+        */
+
+       cds_lfht_for_each_entry(agt->events->ht, &iter.iter, event, node.node) {
+               /* Skip event if disabled. */
+               if (!AGENT_EVENT_IS_ENABLED(event)) {
+                       continue;
+               }
+
+               ret = enable_event(app, event);
+               if (ret != LTTNG_OK) {
+                       DBG2("Agent update unable to enable event %s on app pid: %d sock %d",
+                                       event->name, app->pid, app->sock->fd);
+                       /* Let's try the others here and don't assume the app is dead. */
+                       continue;
+               }
+       }
+
+       cds_list_for_each_entry_rcu(ctx, &agt->app_ctx_list, list_node) {
+               ret = app_context_op(app, ctx, AGENT_CMD_APP_CTX_ENABLE);
+               if (ret != LTTNG_OK) {
+                       DBG2("Agent update unable to add application context %s:%s on app pid: %d sock %d",
+                                       ctx->provider_name, ctx->ctx_name,
+                                       app->pid, app->sock->fd);
+                       continue;
+               }
+       }
+
+       rcu_read_unlock();
+}
+
+/*
+ * Allocate the per-event notifier domain agent hash table. It is lazily
+ * populated as domains are used.
+ */
+int agent_by_event_notifier_domain_ht_create(void)
+{
+       the_trigger_agents_ht_by_domain = lttng_ht_new(0, LTTNG_HT_TYPE_U64);
+       return the_trigger_agents_ht_by_domain ? 0 : -1;
+}
+
+/*
+ * Clean-up the per-event notifier domain agent hash table and destroy it.
+ */
+void agent_by_event_notifier_domain_ht_destroy(void)
+{
+       struct lttng_ht_node_u64 *node;
+       struct lttng_ht_iter iter;
+
+       if (!the_trigger_agents_ht_by_domain) {
+               return;
+       }
+
+       rcu_read_lock();
+       cds_lfht_for_each_entry(the_trigger_agents_ht_by_domain->ht,
+                       &iter.iter, node, node) {
+               struct agent *agent =
+                               caa_container_of(node, struct agent, node);
+               const int ret = lttng_ht_del(
+                               the_trigger_agents_ht_by_domain, &iter);
+
+               LTTNG_ASSERT(ret == 0);
+               agent_destroy(agent);
+       }
+
+       rcu_read_unlock();
+       lttng_ht_destroy(the_trigger_agents_ht_by_domain);
+}
+
+struct agent *agent_find_by_event_notifier_domain(
+               enum lttng_domain_type domain_type)
+{
+       struct agent *agt = NULL;
+       struct lttng_ht_node_u64 *node;
+       struct lttng_ht_iter iter;
+       const uint64_t key = (uint64_t) domain_type;
+
+       LTTNG_ASSERT(the_trigger_agents_ht_by_domain);
+
+       DBG3("Per-event notifier domain agent lookup for domain '%s'",
+                       lttng_domain_type_str(domain_type));
+
+       lttng_ht_lookup(the_trigger_agents_ht_by_domain, &key, &iter);
+       node = lttng_ht_iter_get_node_u64(&iter);
+       if (!node) {
+               goto end;
+       }
+
+       agt = caa_container_of(node, struct agent, node);
+
+end:
+       return agt;
+}
diff --git a/src/bin/lttng-sessiond/buffer-registry.c b/src/bin/lttng-sessiond/buffer-registry.c
deleted file mode 100644 (file)
index 3390f87..0000000
+++ /dev/null
@@ -1,746 +0,0 @@
-/*
- * Copyright (C) 2013 David Goulet <dgoulet@efficios.com>
- *
- * SPDX-License-Identifier: GPL-2.0-only
- *
- */
-
-#define _LGPL_SOURCE
-#include <inttypes.h>
-
-#include <common/common.h>
-#include <common/hashtable/utils.h>
-
-#include "buffer-registry.h"
-#include "fd-limit.h"
-#include "ust-consumer.h"
-#include "lttng-ust-ctl.h"
-#include "lttng-ust-error.h"
-#include "utils.h"
-
-/*
- * Set in main.c during initialization process of the daemon. This contains
- * buffer_reg_uid object which are global registry for per UID buffer. Object
- * are indexed by session id and matched by the triplet
- * <session_id/bits_per_long/uid>.
- */
-static struct lttng_ht *buffer_registry_uid;
-
-/*
- * Initialized at the daemon start. This contains buffer_reg_pid object and
- * indexed by session id.
- */
-static struct lttng_ht *buffer_registry_pid;
-
-/*
- * Match function for the per UID registry hash table. It matches a registry
- * uid object with the triplet <session_id/abi/uid>.
- */
-static int ht_match_reg_uid(struct cds_lfht_node *node, const void *_key)
-{
-       struct buffer_reg_uid *reg;
-       const struct buffer_reg_uid *key;
-
-       LTTNG_ASSERT(node);
-       LTTNG_ASSERT(_key);
-
-       reg = caa_container_of(node, struct buffer_reg_uid, node.node);
-       LTTNG_ASSERT(reg);
-       key = _key;
-
-       if (key->session_id != reg->session_id ||
-                       key->bits_per_long != reg->bits_per_long ||
-                       key->uid != reg->uid) {
-               goto no_match;
-       }
-
-       /* Match */
-       return 1;
-no_match:
-       return 0;
-}
-
-/*
- * Hash function for the per UID registry hash table. This XOR the triplet
- * together.
- */
-static unsigned long ht_hash_reg_uid(const void *_key, unsigned long seed)
-{
-       uint64_t xored_key;
-       const struct buffer_reg_uid *key = _key;
-
-       LTTNG_ASSERT(key);
-
-       xored_key = (uint64_t)(key->session_id ^ key->bits_per_long ^ key->uid);
-       return hash_key_u64(&xored_key, seed);
-}
-
-/*
- * Initialize global buffer per UID registry. Should only be called ONCE!.
- */
-void buffer_reg_init_uid_registry(void)
-{
-       /* Should be called once. */
-       LTTNG_ASSERT(!buffer_registry_uid);
-       buffer_registry_uid = lttng_ht_new(0, LTTNG_HT_TYPE_U64);
-       LTTNG_ASSERT(buffer_registry_uid);
-       buffer_registry_uid->match_fct = ht_match_reg_uid;
-       buffer_registry_uid->hash_fct = ht_hash_reg_uid;
-
-       DBG3("Global buffer per UID registry initialized");
-}
-
-/*
- * Allocate and initialize object. Set regp with the object pointer.
- *
- * Return 0 on success else a negative value and regp is untouched.
- */
-int buffer_reg_uid_create(uint64_t session_id, uint32_t bits_per_long, uid_t uid,
-               enum lttng_domain_type domain, struct buffer_reg_uid **regp,
-               const char *root_shm_path, const char *shm_path)
-{
-       int ret = 0;
-       struct buffer_reg_uid *reg = NULL;
-
-       LTTNG_ASSERT(regp);
-
-       reg = zmalloc(sizeof(*reg));
-       if (!reg) {
-               PERROR("zmalloc buffer registry uid");
-               ret = -ENOMEM;
-               goto error;
-       }
-
-       reg->registry = zmalloc(sizeof(struct buffer_reg_session));
-       if (!reg->registry) {
-               PERROR("zmalloc buffer registry uid session");
-               ret = -ENOMEM;
-               goto error;
-       }
-
-       reg->session_id = session_id;
-       reg->bits_per_long = bits_per_long;
-       reg->uid = uid;
-       reg->domain = domain;
-       if (shm_path[0]) {
-               strncpy(reg->root_shm_path, root_shm_path, sizeof(reg->root_shm_path));
-               reg->root_shm_path[sizeof(reg->root_shm_path) - 1] = '\0';
-               strncpy(reg->shm_path, shm_path, sizeof(reg->shm_path));
-               reg->shm_path[sizeof(reg->shm_path) - 1] = '\0';
-               DBG3("shm path '%s' is assigned to uid buffer registry for session id %" PRIu64,
-                       reg->shm_path, session_id);
-       }
-       reg->registry->channels = lttng_ht_new(0, LTTNG_HT_TYPE_U64);
-       if (!reg->registry->channels) {
-               ret = -ENOMEM;
-               goto error_session;
-       }
-
-       cds_lfht_node_init(&reg->node.node);
-       *regp = reg;
-
-       DBG3("Buffer registry per UID created id: %" PRIu64 ", ABI: %u, uid: %d, domain: %d",
-                       session_id, bits_per_long, uid, domain);
-
-       return 0;
-
-error_session:
-       free(reg->registry);
-error:
-       free(reg);
-       return ret;
-}
-
-/*
- * Add a buffer registry per UID object to the global registry.
- */
-void buffer_reg_uid_add(struct buffer_reg_uid *reg)
-{
-       struct cds_lfht_node *nodep;
-       struct lttng_ht *ht = buffer_registry_uid;
-
-       LTTNG_ASSERT(reg);
-
-       DBG3("Buffer registry per UID adding to global registry with id: %" PRIu64 ,
-                       reg->session_id);
-
-       rcu_read_lock();
-       nodep = cds_lfht_add_unique(ht->ht, ht->hash_fct(reg, lttng_ht_seed),
-                       ht->match_fct, reg, &reg->node.node);
-       LTTNG_ASSERT(nodep == &reg->node.node);
-       rcu_read_unlock();
-}
-
-/*
- * Find a buffer registry per UID object with given params. RCU read side lock
- * MUST be acquired before calling this and hold on to protect the object.
- *
- * Return the object pointer or NULL on error.
- */
-struct buffer_reg_uid *buffer_reg_uid_find(uint64_t session_id,
-               uint32_t bits_per_long, uid_t uid)
-{
-       struct lttng_ht_node_u64 *node;
-       struct lttng_ht_iter iter;
-       struct buffer_reg_uid *reg = NULL, key;
-       struct lttng_ht *ht = buffer_registry_uid;
-
-       /* Setup key we are looking for. */
-       key.session_id = session_id;
-       key.bits_per_long = bits_per_long;
-       key.uid = uid;
-
-       DBG3("Buffer registry per UID find id: %" PRIu64 ", ABI: %u, uid: %d",
-                       session_id, bits_per_long, uid);
-
-       /* Custom lookup function since it's a different key. */
-       cds_lfht_lookup(ht->ht, ht->hash_fct(&key, lttng_ht_seed), ht->match_fct,
-                       &key, &iter.iter);
-       node = lttng_ht_iter_get_node_u64(&iter);
-       if (!node) {
-               goto end;
-       }
-       reg = caa_container_of(node, struct buffer_reg_uid, node);
-
-end:
-       return reg;
-}
-
-/*
- * Initialize global buffer per PID registry. Should only be called ONCE!.
- */
-void buffer_reg_init_pid_registry(void)
-{
-       /* Should be called once. */
-       LTTNG_ASSERT(!buffer_registry_pid);
-       buffer_registry_pid = lttng_ht_new(0, LTTNG_HT_TYPE_U64);
-       LTTNG_ASSERT(buffer_registry_pid);
-
-       DBG3("Global buffer per PID registry initialized");
-}
-
-/*
- * Allocate and initialize object. Set regp with the object pointer.
- *
- * Return 0 on success else a negative value and regp is untouched.
- */
-int buffer_reg_pid_create(uint64_t session_id, struct buffer_reg_pid **regp,
-               const char *root_shm_path, const char *shm_path)
-{
-       int ret = 0;
-       struct buffer_reg_pid *reg = NULL;
-
-       LTTNG_ASSERT(regp);
-
-       reg = zmalloc(sizeof(*reg));
-       if (!reg) {
-               PERROR("zmalloc buffer registry pid");
-               ret = -ENOMEM;
-               goto error;
-       }
-
-       reg->registry = zmalloc(sizeof(struct buffer_reg_session));
-       if (!reg->registry) {
-               PERROR("zmalloc buffer registry pid session");
-               ret = -ENOMEM;
-               goto error;
-       }
-
-       /* A cast is done here so we can use the session ID as a u64 ht node. */
-       reg->session_id = session_id;
-       if (shm_path[0]) {
-               strncpy(reg->root_shm_path, root_shm_path, sizeof(reg->root_shm_path));
-               reg->root_shm_path[sizeof(reg->root_shm_path) - 1] = '\0';
-               strncpy(reg->shm_path, shm_path, sizeof(reg->shm_path));
-               reg->shm_path[sizeof(reg->shm_path) - 1] = '\0';
-               DBG3("shm path '%s' is assigned to pid buffer registry for session id %" PRIu64,
-                               reg->shm_path, session_id);
-       }
-       reg->registry->channels = lttng_ht_new(0, LTTNG_HT_TYPE_U64);
-       if (!reg->registry->channels) {
-               ret = -ENOMEM;
-               goto error_session;
-       }
-
-       lttng_ht_node_init_u64(&reg->node, reg->session_id);
-       *regp = reg;
-
-       DBG3("Buffer registry per PID created with session id: %" PRIu64,
-                       session_id);
-
-       return 0;
-
-error_session:
-       free(reg->registry);
-error:
-       free(reg);
-       return ret;
-}
-
-/*
- * Add a buffer registry per PID object to the global registry.
- */
-void buffer_reg_pid_add(struct buffer_reg_pid *reg)
-{
-       LTTNG_ASSERT(reg);
-
-       DBG3("Buffer registry per PID adding to global registry with id: %" PRIu64,
-                       reg->session_id);
-
-       rcu_read_lock();
-       lttng_ht_add_unique_u64(buffer_registry_pid, &reg->node);
-       rcu_read_unlock();
-}
-
-/*
- * Find a buffer registry per PID object with given params. RCU read side lock
- * MUST be acquired before calling this and hold on to protect the object.
- *
- * Return the object pointer or NULL on error.
- */
-struct buffer_reg_pid *buffer_reg_pid_find(uint64_t session_id)
-{
-       struct lttng_ht_node_u64 *node;
-       struct lttng_ht_iter iter;
-       struct buffer_reg_pid *reg = NULL;
-       struct lttng_ht *ht = buffer_registry_pid;
-
-       DBG3("Buffer registry per PID find id: %" PRIu64, session_id);
-
-       lttng_ht_lookup(ht, &session_id, &iter);
-       node = lttng_ht_iter_get_node_u64(&iter);
-       if (!node) {
-               goto end;
-       }
-       reg = caa_container_of(node, struct buffer_reg_pid, node);
-
-end:
-       return reg;
-}
-
-/*
- * Find the consumer channel key from a UST session per-uid channel key.
- *
- * Return the matching key or -1 if not found.
- */
-int buffer_reg_uid_consumer_channel_key(
-               struct cds_list_head *buffer_reg_uid_list,
-               uint64_t chan_key, uint64_t *consumer_chan_key)
-{
-       struct lttng_ht_iter iter;
-       struct buffer_reg_uid *uid_reg = NULL;
-       struct buffer_reg_session *session_reg = NULL;
-       struct buffer_reg_channel *reg_chan;
-       int ret = -1;
-
-       rcu_read_lock();
-       /*
-        * For the per-uid registry, we have to iterate since we don't have the
-        * uid and bitness key.
-        */
-       cds_list_for_each_entry(uid_reg, buffer_reg_uid_list, lnode) {
-               session_reg = uid_reg->registry;
-               cds_lfht_for_each_entry(session_reg->channels->ht,
-                               &iter.iter, reg_chan, node.node) {
-                       if (reg_chan->key == chan_key) {
-                               *consumer_chan_key = reg_chan->consumer_key;
-                               ret = 0;
-                               goto end;
-                       }
-               }
-       }
-
-end:
-       rcu_read_unlock();
-       return ret;
-}
-
-/*
- * Allocate and initialize a buffer registry channel with the given key. Set
- * regp with the object pointer.
- *
- * Return 0 on success or else a negative value keeping regp untouched.
- */
-int buffer_reg_channel_create(uint64_t key, struct buffer_reg_channel **regp)
-{
-       struct buffer_reg_channel *reg;
-
-       LTTNG_ASSERT(regp);
-
-       DBG3("Buffer registry channel create with key: %" PRIu64, key);
-
-       reg = zmalloc(sizeof(*reg));
-       if (!reg) {
-               PERROR("zmalloc buffer registry channel");
-               return -ENOMEM;
-       }
-
-       reg->key = key;
-       CDS_INIT_LIST_HEAD(&reg->streams);
-       pthread_mutex_init(&reg->stream_list_lock, NULL);
-
-       lttng_ht_node_init_u64(&reg->node, key);
-       *regp = reg;
-
-       return 0;
-}
-
-/*
- * Allocate and initialize a buffer registry stream. Set regp with the object
- * pointer.
- *
- * Return 0 on success or else a negative value keeping regp untouched.
- */
-int buffer_reg_stream_create(struct buffer_reg_stream **regp)
-{
-       struct buffer_reg_stream *reg;
-
-       LTTNG_ASSERT(regp);
-
-       DBG3("Buffer registry creating stream");
-
-       reg = zmalloc(sizeof(*reg));
-       if (!reg) {
-               PERROR("zmalloc buffer registry stream");
-               return -ENOMEM;
-       }
-
-       *regp = reg;
-
-       return 0;
-}
-
-/*
- * Add stream to the list in the channel.
- */
-void buffer_reg_stream_add(struct buffer_reg_stream *stream,
-               struct buffer_reg_channel *channel)
-{
-       LTTNG_ASSERT(stream);
-       LTTNG_ASSERT(channel);
-
-       pthread_mutex_lock(&channel->stream_list_lock);
-       cds_list_add_tail(&stream->lnode, &channel->streams);
-       channel->stream_count++;
-       pthread_mutex_unlock(&channel->stream_list_lock);
-}
-
-/*
- * Add a buffer registry channel object to the given session.
- */
-void buffer_reg_channel_add(struct buffer_reg_session *session,
-               struct buffer_reg_channel *channel)
-{
-       LTTNG_ASSERT(session);
-       LTTNG_ASSERT(channel);
-
-       rcu_read_lock();
-       lttng_ht_add_unique_u64(session->channels, &channel->node);
-       rcu_read_unlock();
-}
-
-/*
- * Find a buffer registry channel object with the given key. RCU read side lock
- * MUST be acquired and hold on until the object reference is not needed
- * anymore.
- *
- * Return the object pointer or NULL on error.
- */
-struct buffer_reg_channel *buffer_reg_channel_find(uint64_t key,
-               struct buffer_reg_uid *reg)
-{
-       struct lttng_ht_node_u64 *node;
-       struct lttng_ht_iter iter;
-       struct buffer_reg_channel *chan = NULL;
-       struct lttng_ht *ht;
-
-       LTTNG_ASSERT(reg);
-
-       switch (reg->domain) {
-       case LTTNG_DOMAIN_UST:
-               ht = reg->registry->channels;
-               break;
-       default:
-               abort();
-               goto end;
-       }
-
-       lttng_ht_lookup(ht, &key, &iter);
-       node = lttng_ht_iter_get_node_u64(&iter);
-       if (!node) {
-               goto end;
-       }
-       chan = caa_container_of(node, struct buffer_reg_channel, node);
-
-end:
-       return chan;
-}
-
-/*
- * Destroy a buffer registry stream with the given domain.
- */
-void buffer_reg_stream_destroy(struct buffer_reg_stream *regp,
-               enum lttng_domain_type domain)
-{
-       if (!regp) {
-               return;
-       }
-
-       DBG3("Buffer registry stream destroy with handle %d",
-                       regp->obj.ust->handle);
-
-       switch (domain) {
-       case LTTNG_DOMAIN_UST:
-       {
-               int ret;
-
-               ret = ust_app_release_object(NULL, regp->obj.ust);
-               if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
-                       ERR("Buffer reg stream release obj handle %d failed with ret %d",
-                                       regp->obj.ust->handle, ret);
-               }
-               free(regp->obj.ust);
-               lttng_fd_put(LTTNG_FD_APPS, 2);
-               break;
-       }
-       default:
-               abort();
-       }
-
-       free(regp);
-       return;
-}
-
-/*
- * Remove buffer registry channel object from the session hash table. RCU read
- * side lock MUST be acquired before calling this.
- */
-void buffer_reg_channel_remove(struct buffer_reg_session *session,
-               struct buffer_reg_channel *regp)
-{
-       int ret;
-       struct lttng_ht_iter iter;
-
-       LTTNG_ASSERT(session);
-       LTTNG_ASSERT(regp);
-
-       iter.iter.node = &regp->node.node;
-       ret = lttng_ht_del(session->channels, &iter);
-       LTTNG_ASSERT(!ret);
-}
-
-/*
- * Destroy a buffer registry channel with the given domain.
- */
-void buffer_reg_channel_destroy(struct buffer_reg_channel *regp,
-               enum lttng_domain_type domain)
-{
-       if (!regp) {
-               return;
-       }
-
-       DBG3("Buffer registry channel destroy with key %" PRIu32, regp->key);
-
-       switch (domain) {
-       case LTTNG_DOMAIN_UST:
-       {
-               int ret;
-               struct buffer_reg_stream *sreg, *stmp;
-               /* Wipe stream */
-               cds_list_for_each_entry_safe(sreg, stmp, &regp->streams, lnode) {
-                       cds_list_del(&sreg->lnode);
-                       regp->stream_count--;
-                       buffer_reg_stream_destroy(sreg, domain);
-               }
-
-               if (regp->obj.ust) {
-                       ret = ust_app_release_object(NULL, regp->obj.ust);
-                       if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
-                               ERR("Buffer reg channel release obj handle %d failed with ret %d",
-                                               regp->obj.ust->handle, ret);
-                       }
-                       free(regp->obj.ust);
-               }
-               lttng_fd_put(LTTNG_FD_APPS, 1);
-               break;
-       }
-       default:
-               abort();
-       }
-
-       free(regp);
-       return;
-}
-
-/*
- * Destroy a buffer registry session with the given domain.
- *
- * Should *NOT* be called with RCU read-side lock held.
- */
-static void buffer_reg_session_destroy(struct buffer_reg_session *regp,
-               enum lttng_domain_type domain)
-{
-       int ret;
-       struct lttng_ht_iter iter;
-       struct buffer_reg_channel *reg_chan;
-
-       DBG3("Buffer registry session destroy");
-
-       /* Destroy all channels. */
-       rcu_read_lock();
-       cds_lfht_for_each_entry(regp->channels->ht, &iter.iter, reg_chan,
-                       node.node) {
-               ret = lttng_ht_del(regp->channels, &iter);
-               LTTNG_ASSERT(!ret);
-               buffer_reg_channel_destroy(reg_chan, domain);
-       }
-       rcu_read_unlock();
-
-       ht_cleanup_push(regp->channels);
-
-       switch (domain) {
-       case LTTNG_DOMAIN_UST:
-               ust_registry_session_destroy(regp->reg.ust);
-               free(regp->reg.ust);
-               break;
-       default:
-               abort();
-       }
-
-       free(regp);
-       return;
-}
-
-/*
- * Remove buffer registry UID object from the global hash table.
- */
-void buffer_reg_uid_remove(struct buffer_reg_uid *regp)
-{
-       int ret;
-       struct lttng_ht_iter iter;
-
-       LTTNG_ASSERT(regp);
-
-       rcu_read_lock();
-       iter.iter.node = &regp->node.node;
-       ret = lttng_ht_del(buffer_registry_uid, &iter);
-       LTTNG_ASSERT(!ret);
-       rcu_read_unlock();
-}
-
-static void rcu_free_buffer_reg_uid(struct rcu_head *head)
-{
-       struct lttng_ht_node_u64 *node =
-               caa_container_of(head, struct lttng_ht_node_u64, head);
-       struct buffer_reg_uid *reg =
-               caa_container_of(node, struct buffer_reg_uid, node);
-
-       buffer_reg_session_destroy(reg->registry, reg->domain);
-       free(reg);
-}
-
-static void rcu_free_buffer_reg_pid(struct rcu_head *head)
-{
-       struct lttng_ht_node_u64 *node =
-               caa_container_of(head, struct lttng_ht_node_u64, head);
-       struct buffer_reg_pid *reg =
-               caa_container_of(node, struct buffer_reg_pid, node);
-
-       buffer_reg_session_destroy(reg->registry, LTTNG_DOMAIN_UST);
-       free(reg);
-}
-
-/*
- * Destroy buffer registry per UID. The given pointer is NOT removed from any
- * list or hash table. Use buffer_reg_pid_remove() before calling this function
- * for the case that the object is in the global hash table.
- */
-void buffer_reg_uid_destroy(struct buffer_reg_uid *regp,
-               struct consumer_output *consumer)
-{
-       struct consumer_socket *socket;
-
-       if (!regp) {
-               return;
-       }
-
-       DBG3("Buffer registry per UID destroy with id: %" PRIu64 ", ABI: %u, uid: %d",
-                       regp->session_id, regp->bits_per_long, regp->uid);
-
-       if (!consumer) {
-               goto destroy;
-       }
-
-       rcu_read_lock();
-       /* Get the right socket from the consumer object. */
-       socket = consumer_find_socket_by_bitness(regp->bits_per_long,
-                       consumer);
-       if (!socket) {
-               goto unlock;
-       }
-
-       switch (regp->domain) {
-       case LTTNG_DOMAIN_UST:
-               if (regp->registry->reg.ust->metadata_key) {
-                       /* Return value does not matter. This call will print errors. */
-                       (void) consumer_close_metadata(socket,
-                                       regp->registry->reg.ust->metadata_key);
-               }
-               break;
-       default:
-               abort();
-               rcu_read_unlock();
-               return;
-       }
-
-unlock:
-       rcu_read_unlock();
-destroy:
-       call_rcu(&regp->node.head, rcu_free_buffer_reg_uid);
-}
-
-/*
- * Remove buffer registry UID object from the global hash table. RCU read side
- * lock MUST be acquired before calling this.
- */
-void buffer_reg_pid_remove(struct buffer_reg_pid *regp)
-{
-       int ret;
-       struct lttng_ht_iter iter;
-
-       LTTNG_ASSERT(regp);
-
-       iter.iter.node = &regp->node.node;
-       ret = lttng_ht_del(buffer_registry_pid, &iter);
-       LTTNG_ASSERT(!ret);
-}
-
-/*
- * Destroy buffer registry per PID. The pointer is NOT removed from the global
- * hash table. Call buffer_reg_pid_remove() before that if the object was
- * previously added to the global hash table.
- */
-void buffer_reg_pid_destroy(struct buffer_reg_pid *regp)
-{
-       if (!regp) {
-               return;
-       }
-
-       DBG3("Buffer registry per PID destroy with id: %" PRIu64,
-                       regp->session_id);
-
-       /* This registry is only used by UST. */
-       call_rcu(&regp->node.head, rcu_free_buffer_reg_pid);
-}
-
-/*
- * Destroy per PID and UID registry hash table.
- *
- * Should *NOT* be called with RCU read-side lock held.
- */
-void buffer_reg_destroy_registries(void)
-{
-       DBG3("Buffer registry destroy all registry");
-       ht_cleanup_push(buffer_registry_uid);
-       ht_cleanup_push(buffer_registry_pid);
-}
diff --git a/src/bin/lttng-sessiond/buffer-registry.cpp b/src/bin/lttng-sessiond/buffer-registry.cpp
new file mode 100644 (file)
index 0000000..aca8ec1
--- /dev/null
@@ -0,0 +1,746 @@
+/*
+ * Copyright (C) 2013 David Goulet <dgoulet@efficios.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0-only
+ *
+ */
+
+#define _LGPL_SOURCE
+#include <inttypes.h>
+
+#include <common/common.h>
+#include <common/hashtable/utils.h>
+
+#include "buffer-registry.h"
+#include "fd-limit.h"
+#include "ust-consumer.h"
+#include "lttng-ust-ctl.h"
+#include "lttng-ust-error.h"
+#include "utils.h"
+
+/*
+ * Set in main.c during initialization process of the daemon. This contains
+ * buffer_reg_uid object which are global registry for per UID buffer. Object
+ * are indexed by session id and matched by the triplet
+ * <session_id/bits_per_long/uid>.
+ */
+static struct lttng_ht *buffer_registry_uid;
+
+/*
+ * Initialized at the daemon start. This contains buffer_reg_pid object and
+ * indexed by session id.
+ */
+static struct lttng_ht *buffer_registry_pid;
+
+/*
+ * Match function for the per UID registry hash table. It matches a registry
+ * uid object with the triplet <session_id/abi/uid>.
+ */
+static int ht_match_reg_uid(struct cds_lfht_node *node, const void *_key)
+{
+       struct buffer_reg_uid *reg;
+       const struct buffer_reg_uid *key;
+
+       LTTNG_ASSERT(node);
+       LTTNG_ASSERT(_key);
+
+       reg = caa_container_of(node, struct buffer_reg_uid, node.node);
+       LTTNG_ASSERT(reg);
+       key = (buffer_reg_uid *) _key;
+
+       if (key->session_id != reg->session_id ||
+                       key->bits_per_long != reg->bits_per_long ||
+                       key->uid != reg->uid) {
+               goto no_match;
+       }
+
+       /* Match */
+       return 1;
+no_match:
+       return 0;
+}
+
+/*
+ * Hash function for the per UID registry hash table. This XOR the triplet
+ * together.
+ */
+static unsigned long ht_hash_reg_uid(const void *_key, unsigned long seed)
+{
+       uint64_t xored_key;
+       const struct buffer_reg_uid *key = (buffer_reg_uid *) _key;
+
+       LTTNG_ASSERT(key);
+
+       xored_key = (uint64_t)(key->session_id ^ key->bits_per_long ^ key->uid);
+       return hash_key_u64(&xored_key, seed);
+}
+
+/*
+ * Initialize global buffer per UID registry. Should only be called ONCE!.
+ */
+void buffer_reg_init_uid_registry(void)
+{
+       /* Should be called once. */
+       LTTNG_ASSERT(!buffer_registry_uid);
+       buffer_registry_uid = lttng_ht_new(0, LTTNG_HT_TYPE_U64);
+       LTTNG_ASSERT(buffer_registry_uid);
+       buffer_registry_uid->match_fct = ht_match_reg_uid;
+       buffer_registry_uid->hash_fct = ht_hash_reg_uid;
+
+       DBG3("Global buffer per UID registry initialized");
+}
+
+/*
+ * Allocate and initialize object. Set regp with the object pointer.
+ *
+ * Return 0 on success else a negative value and regp is untouched.
+ */
+int buffer_reg_uid_create(uint64_t session_id, uint32_t bits_per_long, uid_t uid,
+               enum lttng_domain_type domain, struct buffer_reg_uid **regp,
+               const char *root_shm_path, const char *shm_path)
+{
+       int ret = 0;
+       struct buffer_reg_uid *reg = NULL;
+
+       LTTNG_ASSERT(regp);
+
+       reg = (buffer_reg_uid *) zmalloc(sizeof(*reg));
+       if (!reg) {
+               PERROR("zmalloc buffer registry uid");
+               ret = -ENOMEM;
+               goto error;
+       }
+
+       reg->registry = (buffer_reg_session *) zmalloc(sizeof(struct buffer_reg_session));
+       if (!reg->registry) {
+               PERROR("zmalloc buffer registry uid session");
+               ret = -ENOMEM;
+               goto error;
+       }
+
+       reg->session_id = session_id;
+       reg->bits_per_long = bits_per_long;
+       reg->uid = uid;
+       reg->domain = domain;
+       if (shm_path[0]) {
+               strncpy(reg->root_shm_path, root_shm_path, sizeof(reg->root_shm_path));
+               reg->root_shm_path[sizeof(reg->root_shm_path) - 1] = '\0';
+               strncpy(reg->shm_path, shm_path, sizeof(reg->shm_path));
+               reg->shm_path[sizeof(reg->shm_path) - 1] = '\0';
+               DBG3("shm path '%s' is assigned to uid buffer registry for session id %" PRIu64,
+                       reg->shm_path, session_id);
+       }
+       reg->registry->channels = lttng_ht_new(0, LTTNG_HT_TYPE_U64);
+       if (!reg->registry->channels) {
+               ret = -ENOMEM;
+               goto error_session;
+       }
+
+       cds_lfht_node_init(&reg->node.node);
+       *regp = reg;
+
+       DBG3("Buffer registry per UID created id: %" PRIu64 ", ABI: %u, uid: %d, domain: %d",
+                       session_id, bits_per_long, uid, domain);
+
+       return 0;
+
+error_session:
+       free(reg->registry);
+error:
+       free(reg);
+       return ret;
+}
+
+/*
+ * Add a buffer registry per UID object to the global registry.
+ */
+void buffer_reg_uid_add(struct buffer_reg_uid *reg)
+{
+       struct cds_lfht_node *nodep;
+       struct lttng_ht *ht = buffer_registry_uid;
+
+       LTTNG_ASSERT(reg);
+
+       DBG3("Buffer registry per UID adding to global registry with id: %" PRIu64 ,
+                       reg->session_id);
+
+       rcu_read_lock();
+       nodep = cds_lfht_add_unique(ht->ht, ht->hash_fct(reg, lttng_ht_seed),
+                       ht->match_fct, reg, &reg->node.node);
+       LTTNG_ASSERT(nodep == &reg->node.node);
+       rcu_read_unlock();
+}
+
+/*
+ * Find a buffer registry per UID object with given params. RCU read side lock
+ * MUST be acquired before calling this and hold on to protect the object.
+ *
+ * Return the object pointer or NULL on error.
+ */
+struct buffer_reg_uid *buffer_reg_uid_find(uint64_t session_id,
+               uint32_t bits_per_long, uid_t uid)
+{
+       struct lttng_ht_node_u64 *node;
+       struct lttng_ht_iter iter;
+       struct buffer_reg_uid *reg = NULL, key;
+       struct lttng_ht *ht = buffer_registry_uid;
+
+       /* Setup key we are looking for. */
+       key.session_id = session_id;
+       key.bits_per_long = bits_per_long;
+       key.uid = uid;
+
+       DBG3("Buffer registry per UID find id: %" PRIu64 ", ABI: %u, uid: %d",
+                       session_id, bits_per_long, uid);
+
+       /* Custom lookup function since it's a different key. */
+       cds_lfht_lookup(ht->ht, ht->hash_fct(&key, lttng_ht_seed), ht->match_fct,
+                       &key, &iter.iter);
+       node = lttng_ht_iter_get_node_u64(&iter);
+       if (!node) {
+               goto end;
+       }
+       reg = caa_container_of(node, struct buffer_reg_uid, node);
+
+end:
+       return reg;
+}
+
+/*
+ * Initialize global buffer per PID registry. Should only be called ONCE!.
+ */
+void buffer_reg_init_pid_registry(void)
+{
+       /* Should be called once. */
+       LTTNG_ASSERT(!buffer_registry_pid);
+       buffer_registry_pid = lttng_ht_new(0, LTTNG_HT_TYPE_U64);
+       LTTNG_ASSERT(buffer_registry_pid);
+
+       DBG3("Global buffer per PID registry initialized");
+}
+
+/*
+ * Allocate and initialize object. Set regp with the object pointer.
+ *
+ * Return 0 on success else a negative value and regp is untouched.
+ */
+int buffer_reg_pid_create(uint64_t session_id, struct buffer_reg_pid **regp,
+               const char *root_shm_path, const char *shm_path)
+{
+       int ret = 0;
+       struct buffer_reg_pid *reg = NULL;
+
+       LTTNG_ASSERT(regp);
+
+       reg = (buffer_reg_pid *) zmalloc(sizeof(*reg));
+       if (!reg) {
+               PERROR("zmalloc buffer registry pid");
+               ret = -ENOMEM;
+               goto error;
+       }
+
+       reg->registry = (buffer_reg_session *) zmalloc(sizeof(struct buffer_reg_session));
+       if (!reg->registry) {
+               PERROR("zmalloc buffer registry pid session");
+               ret = -ENOMEM;
+               goto error;
+       }
+
+       /* A cast is done here so we can use the session ID as a u64 ht node. */
+       reg->session_id = session_id;
+       if (shm_path[0]) {
+               strncpy(reg->root_shm_path, root_shm_path, sizeof(reg->root_shm_path));
+               reg->root_shm_path[sizeof(reg->root_shm_path) - 1] = '\0';
+               strncpy(reg->shm_path, shm_path, sizeof(reg->shm_path));
+               reg->shm_path[sizeof(reg->shm_path) - 1] = '\0';
+               DBG3("shm path '%s' is assigned to pid buffer registry for session id %" PRIu64,
+                               reg->shm_path, session_id);
+       }
+       reg->registry->channels = lttng_ht_new(0, LTTNG_HT_TYPE_U64);
+       if (!reg->registry->channels) {
+               ret = -ENOMEM;
+               goto error_session;
+       }
+
+       lttng_ht_node_init_u64(&reg->node, reg->session_id);
+       *regp = reg;
+
+       DBG3("Buffer registry per PID created with session id: %" PRIu64,
+                       session_id);
+
+       return 0;
+
+error_session:
+       free(reg->registry);
+error:
+       free(reg);
+       return ret;
+}
+
+/*
+ * Add a buffer registry per PID object to the global registry.
+ */
+void buffer_reg_pid_add(struct buffer_reg_pid *reg)
+{
+       LTTNG_ASSERT(reg);
+
+       DBG3("Buffer registry per PID adding to global registry with id: %" PRIu64,
+                       reg->session_id);
+
+       rcu_read_lock();
+       lttng_ht_add_unique_u64(buffer_registry_pid, &reg->node);
+       rcu_read_unlock();
+}
+
+/*
+ * Find a buffer registry per PID object with given params. RCU read side lock
+ * MUST be acquired before calling this and hold on to protect the object.
+ *
+ * Return the object pointer or NULL on error.
+ */
+struct buffer_reg_pid *buffer_reg_pid_find(uint64_t session_id)
+{
+       struct lttng_ht_node_u64 *node;
+       struct lttng_ht_iter iter;
+       struct buffer_reg_pid *reg = NULL;
+       struct lttng_ht *ht = buffer_registry_pid;
+
+       DBG3("Buffer registry per PID find id: %" PRIu64, session_id);
+
+       lttng_ht_lookup(ht, &session_id, &iter);
+       node = lttng_ht_iter_get_node_u64(&iter);
+       if (!node) {
+               goto end;
+       }
+       reg = caa_container_of(node, struct buffer_reg_pid, node);
+
+end:
+       return reg;
+}
+
+/*
+ * Find the consumer channel key from a UST session per-uid channel key.
+ *
+ * Return the matching key or -1 if not found.
+ */
+int buffer_reg_uid_consumer_channel_key(
+               struct cds_list_head *buffer_reg_uid_list,
+               uint64_t chan_key, uint64_t *consumer_chan_key)
+{
+       struct lttng_ht_iter iter;
+       struct buffer_reg_uid *uid_reg = NULL;
+       struct buffer_reg_session *session_reg = NULL;
+       struct buffer_reg_channel *reg_chan;
+       int ret = -1;
+
+       rcu_read_lock();
+       /*
+        * For the per-uid registry, we have to iterate since we don't have the
+        * uid and bitness key.
+        */
+       cds_list_for_each_entry(uid_reg, buffer_reg_uid_list, lnode) {
+               session_reg = uid_reg->registry;
+               cds_lfht_for_each_entry(session_reg->channels->ht,
+                               &iter.iter, reg_chan, node.node) {
+                       if (reg_chan->key == chan_key) {
+                               *consumer_chan_key = reg_chan->consumer_key;
+                               ret = 0;
+                               goto end;
+                       }
+               }
+       }
+
+end:
+       rcu_read_unlock();
+       return ret;
+}
+
+/*
+ * Allocate and initialize a buffer registry channel with the given key. Set
+ * regp with the object pointer.
+ *
+ * Return 0 on success or else a negative value keeping regp untouched.
+ */
+int buffer_reg_channel_create(uint64_t key, struct buffer_reg_channel **regp)
+{
+       struct buffer_reg_channel *reg;
+
+       LTTNG_ASSERT(regp);
+
+       DBG3("Buffer registry channel create with key: %" PRIu64, key);
+
+       reg = (buffer_reg_channel *) zmalloc(sizeof(*reg));
+       if (!reg) {
+               PERROR("zmalloc buffer registry channel");
+               return -ENOMEM;
+       }
+
+       reg->key = key;
+       CDS_INIT_LIST_HEAD(&reg->streams);
+       pthread_mutex_init(&reg->stream_list_lock, NULL);
+
+       lttng_ht_node_init_u64(&reg->node, key);
+       *regp = reg;
+
+       return 0;
+}
+
+/*
+ * Allocate and initialize a buffer registry stream. Set regp with the object
+ * pointer.
+ *
+ * Return 0 on success or else a negative value keeping regp untouched.
+ */
+int buffer_reg_stream_create(struct buffer_reg_stream **regp)
+{
+       struct buffer_reg_stream *reg;
+
+       LTTNG_ASSERT(regp);
+
+       DBG3("Buffer registry creating stream");
+
+       reg = (buffer_reg_stream *) zmalloc(sizeof(*reg));
+       if (!reg) {
+               PERROR("zmalloc buffer registry stream");
+               return -ENOMEM;
+       }
+
+       *regp = reg;
+
+       return 0;
+}
+
+/*
+ * Add stream to the list in the channel.
+ */
+void buffer_reg_stream_add(struct buffer_reg_stream *stream,
+               struct buffer_reg_channel *channel)
+{
+       LTTNG_ASSERT(stream);
+       LTTNG_ASSERT(channel);
+
+       pthread_mutex_lock(&channel->stream_list_lock);
+       cds_list_add_tail(&stream->lnode, &channel->streams);
+       channel->stream_count++;
+       pthread_mutex_unlock(&channel->stream_list_lock);
+}
+
+/*
+ * Add a buffer registry channel object to the given session.
+ */
+void buffer_reg_channel_add(struct buffer_reg_session *session,
+               struct buffer_reg_channel *channel)
+{
+       LTTNG_ASSERT(session);
+       LTTNG_ASSERT(channel);
+
+       rcu_read_lock();
+       lttng_ht_add_unique_u64(session->channels, &channel->node);
+       rcu_read_unlock();
+}
+
+/*
+ * Find a buffer registry channel object with the given key. RCU read side lock
+ * MUST be acquired and hold on until the object reference is not needed
+ * anymore.
+ *
+ * Return the object pointer or NULL on error.
+ */
+struct buffer_reg_channel *buffer_reg_channel_find(uint64_t key,
+               struct buffer_reg_uid *reg)
+{
+       struct lttng_ht_node_u64 *node;
+       struct lttng_ht_iter iter;
+       struct buffer_reg_channel *chan = NULL;
+       struct lttng_ht *ht;
+
+       LTTNG_ASSERT(reg);
+
+       switch (reg->domain) {
+       case LTTNG_DOMAIN_UST:
+               ht = reg->registry->channels;
+               break;
+       default:
+               abort();
+               goto end;
+       }
+
+       lttng_ht_lookup(ht, &key, &iter);
+       node = lttng_ht_iter_get_node_u64(&iter);
+       if (!node) {
+               goto end;
+       }
+       chan = caa_container_of(node, struct buffer_reg_channel, node);
+
+end:
+       return chan;
+}
+
+/*
+ * Destroy a buffer registry stream with the given domain.
+ */
+void buffer_reg_stream_destroy(struct buffer_reg_stream *regp,
+               enum lttng_domain_type domain)
+{
+       if (!regp) {
+               return;
+       }
+
+       DBG3("Buffer registry stream destroy with handle %d",
+                       regp->obj.ust->handle);
+
+       switch (domain) {
+       case LTTNG_DOMAIN_UST:
+       {
+               int ret;
+
+               ret = ust_app_release_object(NULL, regp->obj.ust);
+               if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
+                       ERR("Buffer reg stream release obj handle %d failed with ret %d",
+                                       regp->obj.ust->handle, ret);
+               }
+               free(regp->obj.ust);
+               lttng_fd_put(LTTNG_FD_APPS, 2);
+               break;
+       }
+       default:
+               abort();
+       }
+
+       free(regp);
+       return;
+}
+
+/*
+ * Remove buffer registry channel object from the session hash table. RCU read
+ * side lock MUST be acquired before calling this.
+ */
+void buffer_reg_channel_remove(struct buffer_reg_session *session,
+               struct buffer_reg_channel *regp)
+{
+       int ret;
+       struct lttng_ht_iter iter;
+
+       LTTNG_ASSERT(session);
+       LTTNG_ASSERT(regp);
+
+       iter.iter.node = &regp->node.node;
+       ret = lttng_ht_del(session->channels, &iter);
+       LTTNG_ASSERT(!ret);
+}
+
+/*
+ * Destroy a buffer registry channel with the given domain.
+ */
+void buffer_reg_channel_destroy(struct buffer_reg_channel *regp,
+               enum lttng_domain_type domain)
+{
+       if (!regp) {
+               return;
+       }
+
+       DBG3("Buffer registry channel destroy with key %" PRIu32, regp->key);
+
+       switch (domain) {
+       case LTTNG_DOMAIN_UST:
+       {
+               int ret;
+               struct buffer_reg_stream *sreg, *stmp;
+               /* Wipe stream */
+               cds_list_for_each_entry_safe(sreg, stmp, &regp->streams, lnode) {
+                       cds_list_del(&sreg->lnode);
+                       regp->stream_count--;
+                       buffer_reg_stream_destroy(sreg, domain);
+               }
+
+               if (regp->obj.ust) {
+                       ret = ust_app_release_object(NULL, regp->obj.ust);
+                       if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
+                               ERR("Buffer reg channel release obj handle %d failed with ret %d",
+                                               regp->obj.ust->handle, ret);
+                       }
+                       free(regp->obj.ust);
+               }
+               lttng_fd_put(LTTNG_FD_APPS, 1);
+               break;
+       }
+       default:
+               abort();
+       }
+
+       free(regp);
+       return;
+}
+
+/*
+ * Destroy a buffer registry session with the given domain.
+ *
+ * Should *NOT* be called with RCU read-side lock held.
+ */
+static void buffer_reg_session_destroy(struct buffer_reg_session *regp,
+               enum lttng_domain_type domain)
+{
+       int ret;
+       struct lttng_ht_iter iter;
+       struct buffer_reg_channel *reg_chan;
+
+       DBG3("Buffer registry session destroy");
+
+       /* Destroy all channels. */
+       rcu_read_lock();
+       cds_lfht_for_each_entry(regp->channels->ht, &iter.iter, reg_chan,
+                       node.node) {
+               ret = lttng_ht_del(regp->channels, &iter);
+               LTTNG_ASSERT(!ret);
+               buffer_reg_channel_destroy(reg_chan, domain);
+       }
+       rcu_read_unlock();
+
+       ht_cleanup_push(regp->channels);
+
+       switch (domain) {
+       case LTTNG_DOMAIN_UST:
+               ust_registry_session_destroy(regp->reg.ust);
+               free(regp->reg.ust);
+               break;
+       default:
+               abort();
+       }
+
+       free(regp);
+       return;
+}
+
+/*
+ * Remove buffer registry UID object from the global hash table.
+ */
+void buffer_reg_uid_remove(struct buffer_reg_uid *regp)
+{
+       int ret;
+       struct lttng_ht_iter iter;
+
+       LTTNG_ASSERT(regp);
+
+       rcu_read_lock();
+       iter.iter.node = &regp->node.node;
+       ret = lttng_ht_del(buffer_registry_uid, &iter);
+       LTTNG_ASSERT(!ret);
+       rcu_read_unlock();
+}
+
+static void rcu_free_buffer_reg_uid(struct rcu_head *head)
+{
+       struct lttng_ht_node_u64 *node =
+               caa_container_of(head, struct lttng_ht_node_u64, head);
+       struct buffer_reg_uid *reg =
+               caa_container_of(node, struct buffer_reg_uid, node);
+
+       buffer_reg_session_destroy(reg->registry, reg->domain);
+       free(reg);
+}
+
+static void rcu_free_buffer_reg_pid(struct rcu_head *head)
+{
+       struct lttng_ht_node_u64 *node =
+               caa_container_of(head, struct lttng_ht_node_u64, head);
+       struct buffer_reg_pid *reg =
+               caa_container_of(node, struct buffer_reg_pid, node);
+
+       buffer_reg_session_destroy(reg->registry, LTTNG_DOMAIN_UST);
+       free(reg);
+}
+
+/*
+ * Destroy buffer registry per UID. The given pointer is NOT removed from any
+ * list or hash table. Use buffer_reg_pid_remove() before calling this function
+ * for the case that the object is in the global hash table.
+ */
+void buffer_reg_uid_destroy(struct buffer_reg_uid *regp,
+               struct consumer_output *consumer)
+{
+       struct consumer_socket *socket;
+
+       if (!regp) {
+               return;
+       }
+
+       DBG3("Buffer registry per UID destroy with id: %" PRIu64 ", ABI: %u, uid: %d",
+                       regp->session_id, regp->bits_per_long, regp->uid);
+
+       if (!consumer) {
+               goto destroy;
+       }
+
+       rcu_read_lock();
+       /* Get the right socket from the consumer object. */
+       socket = consumer_find_socket_by_bitness(regp->bits_per_long,
+                       consumer);
+       if (!socket) {
+               goto unlock;
+       }
+
+       switch (regp->domain) {
+       case LTTNG_DOMAIN_UST:
+               if (regp->registry->reg.ust->metadata_key) {
+                       /* Return value does not matter. This call will print errors. */
+                       (void) consumer_close_metadata(socket,
+                                       regp->registry->reg.ust->metadata_key);
+               }
+               break;
+       default:
+               abort();
+               rcu_read_unlock();
+               return;
+       }
+
+unlock:
+       rcu_read_unlock();
+destroy:
+       call_rcu(&regp->node.head, rcu_free_buffer_reg_uid);
+}
+
+/*
+ * Remove buffer registry UID object from the global hash table. RCU read side
+ * lock MUST be acquired before calling this.
+ */
+void buffer_reg_pid_remove(struct buffer_reg_pid *regp)
+{
+       int ret;
+       struct lttng_ht_iter iter;
+
+       LTTNG_ASSERT(regp);
+
+       iter.iter.node = &regp->node.node;
+       ret = lttng_ht_del(buffer_registry_pid, &iter);
+       LTTNG_ASSERT(!ret);
+}
+
+/*
+ * Destroy buffer registry per PID. The pointer is NOT removed from the global
+ * hash table. Call buffer_reg_pid_remove() before that if the object was
+ * previously added to the global hash table.
+ */
+void buffer_reg_pid_destroy(struct buffer_reg_pid *regp)
+{
+       if (!regp) {
+               return;
+       }
+
+       DBG3("Buffer registry per PID destroy with id: %" PRIu64,
+                       regp->session_id);
+
+       /* This registry is only used by UST. */
+       call_rcu(&regp->node.head, rcu_free_buffer_reg_pid);
+}
+
+/*
+ * Destroy per PID and UID registry hash table.
+ *
+ * Should *NOT* be called with RCU read-side lock held.
+ */
+void buffer_reg_destroy_registries(void)
+{
+       DBG3("Buffer registry destroy all registry");
+       ht_cleanup_push(buffer_registry_uid);
+       ht_cleanup_push(buffer_registry_pid);
+}
diff --git a/src/bin/lttng-sessiond/channel.c b/src/bin/lttng-sessiond/channel.c
deleted file mode 100644 (file)
index e84da03..0000000
+++ /dev/null
@@ -1,551 +0,0 @@
-/*
- * Copyright (C) 2011 David Goulet <david.goulet@polymtl.ca>
- * Copyright (C) 2016 Jérémie Galarneau <jeremie.galarneau@efficios.com>
- *
- * SPDX-License-Identifier: GPL-2.0-only
- *
- */
-
-#define _LGPL_SOURCE
-#include <inttypes.h>
-#include <string.h>
-#include <unistd.h>
-
-#include <common/common.h>
-#include <common/defaults.h>
-#include <common/sessiond-comm/sessiond-comm.h>
-
-#include "channel.h"
-#include "lttng-sessiond.h"
-#include "kernel.h"
-#include "lttng-ust-ctl.h"
-#include "lttng-ust-error.h"
-#include "utils.h"
-#include "ust-app.h"
-#include "agent.h"
-
-/*
- * Return allocated channel attributes.
- */
-struct lttng_channel *channel_new_default_attr(int dom,
-               enum lttng_buffer_type type)
-{
-       struct lttng_channel *chan;
-       const char *channel_name = DEFAULT_CHANNEL_NAME;
-       struct lttng_channel_extended *extended_attr = NULL;
-
-       chan = zmalloc(sizeof(struct lttng_channel));
-       if (chan == NULL) {
-               PERROR("zmalloc channel init");
-               goto error_alloc;
-       }
-
-       extended_attr = zmalloc(sizeof(struct lttng_channel_extended));
-       if (!extended_attr) {
-               PERROR("zmalloc channel extended init");
-               goto error;
-       }
-
-       chan->attr.extended.ptr = extended_attr;
-
-       /* Same for all domains. */
-       chan->attr.overwrite = DEFAULT_CHANNEL_OVERWRITE;
-       chan->attr.tracefile_size = DEFAULT_CHANNEL_TRACEFILE_SIZE;
-       chan->attr.tracefile_count = DEFAULT_CHANNEL_TRACEFILE_COUNT;
-
-       switch (dom) {
-       case LTTNG_DOMAIN_KERNEL:
-               LTTNG_ASSERT(type == LTTNG_BUFFER_GLOBAL);
-               chan->attr.subbuf_size =
-                       default_get_kernel_channel_subbuf_size();
-               chan->attr.num_subbuf = DEFAULT_KERNEL_CHANNEL_SUBBUF_NUM;
-               chan->attr.output = DEFAULT_KERNEL_CHANNEL_OUTPUT;
-               chan->attr.switch_timer_interval = DEFAULT_KERNEL_CHANNEL_SWITCH_TIMER;
-               chan->attr.read_timer_interval = DEFAULT_KERNEL_CHANNEL_READ_TIMER;
-               chan->attr.live_timer_interval = DEFAULT_KERNEL_CHANNEL_LIVE_TIMER;
-               extended_attr->blocking_timeout = DEFAULT_KERNEL_CHANNEL_BLOCKING_TIMEOUT;
-               extended_attr->monitor_timer_interval =
-                       DEFAULT_KERNEL_CHANNEL_MONITOR_TIMER;
-               break;
-       case LTTNG_DOMAIN_JUL:
-               channel_name = DEFAULT_JUL_CHANNEL_NAME;
-               goto common_ust;
-       case LTTNG_DOMAIN_LOG4J:
-               channel_name = DEFAULT_LOG4J_CHANNEL_NAME;
-               goto common_ust;
-       case LTTNG_DOMAIN_PYTHON:
-               channel_name = DEFAULT_PYTHON_CHANNEL_NAME;
-               goto common_ust;
-       case LTTNG_DOMAIN_UST:
-common_ust:
-               switch (type) {
-               case LTTNG_BUFFER_PER_UID:
-                       chan->attr.subbuf_size = default_get_ust_uid_channel_subbuf_size();
-                       chan->attr.num_subbuf = DEFAULT_UST_UID_CHANNEL_SUBBUF_NUM;
-                       chan->attr.output = DEFAULT_UST_UID_CHANNEL_OUTPUT;
-                       chan->attr.switch_timer_interval =
-                               DEFAULT_UST_UID_CHANNEL_SWITCH_TIMER;
-                       chan->attr.read_timer_interval =
-                               DEFAULT_UST_UID_CHANNEL_READ_TIMER;
-                       chan->attr.live_timer_interval =
-                               DEFAULT_UST_UID_CHANNEL_LIVE_TIMER;
-                       extended_attr->blocking_timeout = DEFAULT_UST_UID_CHANNEL_BLOCKING_TIMEOUT;
-                       extended_attr->monitor_timer_interval =
-                               DEFAULT_UST_UID_CHANNEL_MONITOR_TIMER;
-                       break;
-               case LTTNG_BUFFER_PER_PID:
-               default:
-                       chan->attr.subbuf_size = default_get_ust_pid_channel_subbuf_size();
-                       chan->attr.num_subbuf = DEFAULT_UST_PID_CHANNEL_SUBBUF_NUM;
-                       chan->attr.output = DEFAULT_UST_PID_CHANNEL_OUTPUT;
-                       chan->attr.switch_timer_interval =
-                               DEFAULT_UST_PID_CHANNEL_SWITCH_TIMER;
-                       chan->attr.read_timer_interval =
-                               DEFAULT_UST_PID_CHANNEL_READ_TIMER;
-                       chan->attr.live_timer_interval =
-                               DEFAULT_UST_PID_CHANNEL_LIVE_TIMER;
-                       extended_attr->blocking_timeout = DEFAULT_UST_PID_CHANNEL_BLOCKING_TIMEOUT;
-                       extended_attr->monitor_timer_interval =
-                               DEFAULT_UST_PID_CHANNEL_MONITOR_TIMER;
-                       break;
-               }
-               break;
-       default:
-               goto error;     /* Not implemented */
-       }
-
-       if (snprintf(chan->name, sizeof(chan->name), "%s",
-                       channel_name) < 0) {
-               PERROR("snprintf default channel name");
-               goto error;
-       }
-       return chan;
-
-error:
-       free(extended_attr);
-       free(chan);
-error_alloc:
-       return NULL;
-}
-
-void channel_attr_destroy(struct lttng_channel *channel)
-{
-       if (!channel) {
-               return;
-       }
-       free(channel->attr.extended.ptr);
-       free(channel);
-}
-
-/*
- * Disable kernel channel of the kernel session.
- */
-int channel_kernel_disable(struct ltt_kernel_session *ksession,
-               char *channel_name)
-{
-       int ret;
-       struct ltt_kernel_channel *kchan;
-
-       LTTNG_ASSERT(ksession);
-       LTTNG_ASSERT(channel_name);
-
-       kchan = trace_kernel_get_channel_by_name(channel_name, ksession);
-       if (kchan == NULL) {
-               ret = LTTNG_ERR_KERN_CHAN_NOT_FOUND;
-               goto error;
-       }
-
-       /* Only if channel is enabled disable it. */
-       if (kchan->enabled == 1) {
-               ret = kernel_disable_channel(kchan);
-               if (ret < 0 && ret != -EEXIST) {
-                       ret = LTTNG_ERR_KERN_CHAN_DISABLE_FAIL;
-                       goto error;
-               }
-       }
-
-       ret = LTTNG_OK;
-
-error:
-       return ret;
-}
-
-/*
- * Enable kernel channel of the kernel session.
- */
-int channel_kernel_enable(struct ltt_kernel_session *ksession,
-               struct ltt_kernel_channel *kchan)
-{
-       int ret;
-
-       LTTNG_ASSERT(ksession);
-       LTTNG_ASSERT(kchan);
-
-       if (kchan->enabled == 0) {
-               ret = kernel_enable_channel(kchan);
-               if (ret < 0) {
-                       ret = LTTNG_ERR_KERN_CHAN_ENABLE_FAIL;
-                       goto error;
-               }
-       } else {
-               ret = LTTNG_ERR_KERN_CHAN_EXIST;
-               goto error;
-       }
-
-       ret = LTTNG_OK;
-
-error:
-       return ret;
-}
-
-static int channel_validate(struct lttng_channel *attr)
-{
-       /*
-        * The ringbuffer (both in user space and kernel) behaves badly
-        * in overwrite mode and with less than 2 subbuffers so block it
-        * right away and send back an invalid attribute error.
-        */
-       if (attr->attr.overwrite && attr->attr.num_subbuf < 2) {
-               return -1;
-       }
-       return 0;
-}
-
-static int channel_validate_kernel(struct lttng_channel *attr)
-{
-       /* Kernel channels do not support blocking timeout. */
-       if (((struct lttng_channel_extended *)attr->attr.extended.ptr)->blocking_timeout) {
-               return -1;
-       }
-       return 0;
-}
-
-/*
- * Create kernel channel of the kernel session and notify kernel thread.
- */
-int channel_kernel_create(struct ltt_kernel_session *ksession,
-               struct lttng_channel *attr, int kernel_pipe)
-{
-       int ret;
-       struct lttng_channel *defattr = NULL;
-
-       LTTNG_ASSERT(ksession);
-
-       /* Creating channel attributes if needed */
-       if (attr == NULL) {
-               defattr = channel_new_default_attr(LTTNG_DOMAIN_KERNEL,
-                               LTTNG_BUFFER_GLOBAL);
-               if (defattr == NULL) {
-                       ret = LTTNG_ERR_FATAL;
-                       goto error;
-               }
-               attr = defattr;
-       }
-
-       /*
-        * Set the overwrite mode for this channel based on the session
-        * type unless the client explicitly overrides the channel mode.
-        */
-       if (attr->attr.overwrite == DEFAULT_CHANNEL_OVERWRITE) {
-               attr->attr.overwrite = !!ksession->snapshot_mode;
-       }
-
-       /* Validate common channel properties. */
-       if (channel_validate(attr) < 0) {
-               ret = LTTNG_ERR_INVALID;
-               goto error;
-       }
-
-       if (channel_validate_kernel(attr) < 0) {
-               ret = LTTNG_ERR_INVALID;
-               goto error;
-       }
-
-       /* Channel not found, creating it */
-       ret = kernel_create_channel(ksession, attr);
-       if (ret < 0) {
-               ret = LTTNG_ERR_KERN_CHAN_FAIL;
-               goto error;
-       }
-
-       /* Notify kernel thread that there is a new channel */
-       ret = notify_thread_pipe(kernel_pipe);
-       if (ret < 0) {
-               ret = LTTNG_ERR_FATAL;
-               goto error;
-       }
-
-       ret = LTTNG_OK;
-error:
-       channel_attr_destroy(defattr);
-       return ret;
-}
-
-/*
- * Enable UST channel for session and domain.
- */
-int channel_ust_enable(struct ltt_ust_session *usess,
-               struct ltt_ust_channel *uchan)
-{
-       int ret = LTTNG_OK;
-
-       LTTNG_ASSERT(usess);
-       LTTNG_ASSERT(uchan);
-
-       /* If already enabled, everything is OK */
-       if (uchan->enabled) {
-               DBG3("Channel %s already enabled. Skipping", uchan->name);
-               ret = LTTNG_ERR_UST_CHAN_EXIST;
-               goto end;
-       } else {
-               uchan->enabled = 1;
-               DBG2("Channel %s enabled successfully", uchan->name);
-       }
-
-       if (!usess->active) {
-               /*
-                * The channel will be activated against the apps
-                * when the session is started as part of the
-                * application channel "synchronize" operation.
-                */
-               goto end;
-       }
-
-       DBG2("Channel %s being enabled in UST domain", uchan->name);
-
-       /*
-        * Enable channel for UST global domain on all applications. Ignore return
-        * value here since whatever error we got, it means that the channel was
-        * not created on one or many registered applications and we can not report
-        * this to the user yet. However, at this stage, the channel was
-        * successfully created on the session daemon side so the enable-channel
-        * command is a success.
-        */
-       (void) ust_app_enable_channel_glb(usess, uchan);
-
-
-end:
-       return ret;
-}
-
-/*
- * Create UST channel for session and domain.
- */
-int channel_ust_create(struct ltt_ust_session *usess,
-               struct lttng_channel *attr, enum lttng_buffer_type type)
-{
-       int ret = LTTNG_OK;
-       struct ltt_ust_channel *uchan = NULL;
-       struct lttng_channel *defattr = NULL;
-       enum lttng_domain_type domain = LTTNG_DOMAIN_UST;
-       bool chan_published = false;
-
-       LTTNG_ASSERT(usess);
-
-       /* Creating channel attributes if needed */
-       if (attr == NULL) {
-               defattr = channel_new_default_attr(LTTNG_DOMAIN_UST, type);
-               if (defattr == NULL) {
-                       ret = LTTNG_ERR_FATAL;
-                       goto error;
-               }
-               attr = defattr;
-       } else {
-               /*
-                * HACK: Set the channel's subdomain (JUL, Log4j, Python, etc.)
-                * based on the default name.
-                */
-               if (!strcmp(attr->name, DEFAULT_JUL_CHANNEL_NAME)) {
-                       domain = LTTNG_DOMAIN_JUL;
-               } else if (!strcmp(attr->name, DEFAULT_LOG4J_CHANNEL_NAME)) {
-                       domain = LTTNG_DOMAIN_LOG4J;
-               } else if (!strcmp(attr->name, DEFAULT_PYTHON_CHANNEL_NAME)) {
-                       domain = LTTNG_DOMAIN_PYTHON;
-               }
-       }
-
-       /*
-        * Set the overwrite mode for this channel based on the session
-        * type unless the client explicitly overrides the channel mode.
-        */
-       if (attr->attr.overwrite == DEFAULT_CHANNEL_OVERWRITE) {
-               attr->attr.overwrite = !!usess->snapshot_mode;
-       }
-
-       /* Enforce mmap output for snapshot sessions. */
-       if (usess->snapshot_mode) {
-               attr->attr.output = LTTNG_EVENT_MMAP;
-       }
-
-       /* Validate common channel properties. */
-       if (channel_validate(attr) < 0) {
-               ret = LTTNG_ERR_INVALID;
-               goto error;
-       }
-
-       /*
-        * Validate UST buffer size and number of buffers: must both be power of 2
-        * and nonzero. We validate right here for UST, because applications will
-        * not report the error to the user (unlike kernel tracing).
-        */
-       if (!attr->attr.subbuf_size ||
-                       (attr->attr.subbuf_size & (attr->attr.subbuf_size - 1))) {
-               ret = LTTNG_ERR_INVALID;
-               goto error;
-       }
-
-       /*
-        * Invalid subbuffer size if it's lower then the page size.
-        */
-       if (attr->attr.subbuf_size < the_page_size) {
-               ret = LTTNG_ERR_INVALID;
-               goto error;
-       }
-
-       if (!attr->attr.num_subbuf ||
-                       (attr->attr.num_subbuf & (attr->attr.num_subbuf - 1))) {
-               ret = LTTNG_ERR_INVALID;
-               goto error;
-       }
-
-       if (attr->attr.output != LTTNG_EVENT_MMAP) {
-               ret = LTTNG_ERR_NOT_SUPPORTED;
-               goto error;
-       }
-
-       /*
-        * The tracefile_size should not be < to the subbuf_size, otherwise
-        * we won't be able to write the packets on disk
-        */
-       if ((attr->attr.tracefile_size > 0) &&
-                       (attr->attr.tracefile_size < attr->attr.subbuf_size)) {
-               ret = LTTNG_ERR_INVALID;
-               goto error;
-       }
-
-       /* Validate buffer type. */
-       switch (type) {
-       case LTTNG_BUFFER_PER_PID:
-               break;
-       case LTTNG_BUFFER_PER_UID:
-               break;
-       default:
-               ret = LTTNG_ERR_BUFFER_NOT_SUPPORTED;
-               goto error;
-       }
-
-       /* Create UST channel */
-       uchan = trace_ust_create_channel(attr, domain);
-       if (uchan == NULL) {
-               ret = LTTNG_ERR_FATAL;
-               goto error;
-       }
-
-       uchan->enabled = 1;
-       if (trace_ust_is_max_id(usess->used_channel_id)) {
-               ret = LTTNG_ERR_UST_CHAN_FAIL;
-               goto error;
-       }
-       uchan->id = trace_ust_get_next_chan_id(usess);
-
-       DBG2("Channel %s is being created for UST with buffer %d and id %" PRIu64,
-                       uchan->name, type, uchan->id);
-
-       /* Flag session buffer type. */
-       if (!usess->buffer_type_changed) {
-               usess->buffer_type = type;
-               usess->buffer_type_changed = 1;
-       } else if (usess->buffer_type != type) {
-               /* Buffer type was already set. Refuse to create channel. */
-               ret = LTTNG_ERR_BUFFER_TYPE_MISMATCH;
-               goto error_free_chan;
-       }
-
-       /* Adding the channel to the channel hash table. */
-       rcu_read_lock();
-       if (strncmp(uchan->name, DEFAULT_METADATA_NAME,
-                               sizeof(uchan->name))) {
-               lttng_ht_add_unique_str(usess->domain_global.channels, &uchan->node);
-               chan_published = true;
-       } else {
-               /*
-                * Copy channel attribute to session if this is metadata so if NO
-                * application exists we can access that data in the shadow copy during
-                * the global update of newly registered application.
-                */
-               memcpy(&usess->metadata_attr, &uchan->attr,
-                               sizeof(usess->metadata_attr));
-       }
-       rcu_read_unlock();
-
-       DBG2("Channel %s created successfully", uchan->name);
-       if (domain != LTTNG_DOMAIN_UST) {
-               struct agent *agt = trace_ust_find_agent(usess, domain);
-
-               if (!agt) {
-                       agt = agent_create(domain);
-                       if (!agt) {
-                               ret = LTTNG_ERR_NOMEM;
-                               goto error_remove_chan;
-                       }
-                       agent_add(agt, usess->agents);
-               }
-       }
-
-       channel_attr_destroy(defattr);
-       return LTTNG_OK;
-
-error_remove_chan:
-       if (chan_published) {
-               trace_ust_delete_channel(usess->domain_global.channels, uchan);
-       }
-error_free_chan:
-       trace_ust_destroy_channel(uchan);
-error:
-       channel_attr_destroy(defattr);
-       return ret;
-}
-
-/*
- * Disable UST channel for session and domain.
- */
-int channel_ust_disable(struct ltt_ust_session *usess,
-               struct ltt_ust_channel *uchan)
-{
-       int ret = LTTNG_OK;
-
-       LTTNG_ASSERT(usess);
-       LTTNG_ASSERT(uchan);
-
-       /* Already disabled */
-       if (uchan->enabled == 0) {
-               DBG2("Channel UST %s already disabled", uchan->name);
-               goto end;
-       }
-
-       uchan->enabled = 0;
-
-       /*
-        * If session is inactive we don't notify the tracer right away. We
-        * wait for the next synchronization.
-        */
-       if (!usess->active) {
-               goto end;
-       }
-
-       DBG2("Channel %s being disabled in UST global domain", uchan->name);
-       /* Disable channel for global domain */
-       ret = ust_app_disable_channel_glb(usess, uchan);
-       if (ret < 0 && ret != -LTTNG_UST_ERR_EXIST) {
-               ret = LTTNG_ERR_UST_CHAN_DISABLE_FAIL;
-               goto error;
-       }
-
-       DBG2("Channel %s disabled successfully", uchan->name);
-
-       return LTTNG_OK;
-
-end:
-error:
-       return ret;
-}
diff --git a/src/bin/lttng-sessiond/channel.cpp b/src/bin/lttng-sessiond/channel.cpp
new file mode 100644 (file)
index 0000000..0f15097
--- /dev/null
@@ -0,0 +1,551 @@
+/*
+ * Copyright (C) 2011 David Goulet <david.goulet@polymtl.ca>
+ * Copyright (C) 2016 Jérémie Galarneau <jeremie.galarneau@efficios.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0-only
+ *
+ */
+
+#define _LGPL_SOURCE
+#include <inttypes.h>
+#include <string.h>
+#include <unistd.h>
+
+#include <common/common.h>
+#include <common/defaults.h>
+#include <common/sessiond-comm/sessiond-comm.h>
+
+#include "channel.h"
+#include "lttng-sessiond.h"
+#include "kernel.h"
+#include "lttng-ust-ctl.h"
+#include "lttng-ust-error.h"
+#include "utils.h"
+#include "ust-app.h"
+#include "agent.h"
+
+/*
+ * Return allocated channel attributes.
+ */
+struct lttng_channel *channel_new_default_attr(int dom,
+               enum lttng_buffer_type type)
+{
+       struct lttng_channel *chan;
+       const char *channel_name = DEFAULT_CHANNEL_NAME;
+       struct lttng_channel_extended *extended_attr = NULL;
+
+       chan = (lttng_channel *) zmalloc(sizeof(struct lttng_channel));
+       if (chan == NULL) {
+               PERROR("zmalloc channel init");
+               goto error_alloc;
+       }
+
+       extended_attr = (lttng_channel_extended *) zmalloc(sizeof(struct lttng_channel_extended));
+       if (!extended_attr) {
+               PERROR("zmalloc channel extended init");
+               goto error;
+       }
+
+       chan->attr.extended.ptr = extended_attr;
+
+       /* Same for all domains. */
+       chan->attr.overwrite = DEFAULT_CHANNEL_OVERWRITE;
+       chan->attr.tracefile_size = DEFAULT_CHANNEL_TRACEFILE_SIZE;
+       chan->attr.tracefile_count = DEFAULT_CHANNEL_TRACEFILE_COUNT;
+
+       switch (dom) {
+       case LTTNG_DOMAIN_KERNEL:
+               LTTNG_ASSERT(type == LTTNG_BUFFER_GLOBAL);
+               chan->attr.subbuf_size =
+                       default_get_kernel_channel_subbuf_size();
+               chan->attr.num_subbuf = DEFAULT_KERNEL_CHANNEL_SUBBUF_NUM;
+               chan->attr.output = DEFAULT_KERNEL_CHANNEL_OUTPUT;
+               chan->attr.switch_timer_interval = DEFAULT_KERNEL_CHANNEL_SWITCH_TIMER;
+               chan->attr.read_timer_interval = DEFAULT_KERNEL_CHANNEL_READ_TIMER;
+               chan->attr.live_timer_interval = DEFAULT_KERNEL_CHANNEL_LIVE_TIMER;
+               extended_attr->blocking_timeout = DEFAULT_KERNEL_CHANNEL_BLOCKING_TIMEOUT;
+               extended_attr->monitor_timer_interval =
+                       DEFAULT_KERNEL_CHANNEL_MONITOR_TIMER;
+               break;
+       case LTTNG_DOMAIN_JUL:
+               channel_name = DEFAULT_JUL_CHANNEL_NAME;
+               goto common_ust;
+       case LTTNG_DOMAIN_LOG4J:
+               channel_name = DEFAULT_LOG4J_CHANNEL_NAME;
+               goto common_ust;
+       case LTTNG_DOMAIN_PYTHON:
+               channel_name = DEFAULT_PYTHON_CHANNEL_NAME;
+               goto common_ust;
+       case LTTNG_DOMAIN_UST:
+common_ust:
+               switch (type) {
+               case LTTNG_BUFFER_PER_UID:
+                       chan->attr.subbuf_size = default_get_ust_uid_channel_subbuf_size();
+                       chan->attr.num_subbuf = DEFAULT_UST_UID_CHANNEL_SUBBUF_NUM;
+                       chan->attr.output = DEFAULT_UST_UID_CHANNEL_OUTPUT;
+                       chan->attr.switch_timer_interval =
+                               DEFAULT_UST_UID_CHANNEL_SWITCH_TIMER;
+                       chan->attr.read_timer_interval =
+                               DEFAULT_UST_UID_CHANNEL_READ_TIMER;
+                       chan->attr.live_timer_interval =
+                               DEFAULT_UST_UID_CHANNEL_LIVE_TIMER;
+                       extended_attr->blocking_timeout = DEFAULT_UST_UID_CHANNEL_BLOCKING_TIMEOUT;
+                       extended_attr->monitor_timer_interval =
+                               DEFAULT_UST_UID_CHANNEL_MONITOR_TIMER;
+                       break;
+               case LTTNG_BUFFER_PER_PID:
+               default:
+                       chan->attr.subbuf_size = default_get_ust_pid_channel_subbuf_size();
+                       chan->attr.num_subbuf = DEFAULT_UST_PID_CHANNEL_SUBBUF_NUM;
+                       chan->attr.output = DEFAULT_UST_PID_CHANNEL_OUTPUT;
+                       chan->attr.switch_timer_interval =
+                               DEFAULT_UST_PID_CHANNEL_SWITCH_TIMER;
+                       chan->attr.read_timer_interval =
+                               DEFAULT_UST_PID_CHANNEL_READ_TIMER;
+                       chan->attr.live_timer_interval =
+                               DEFAULT_UST_PID_CHANNEL_LIVE_TIMER;
+                       extended_attr->blocking_timeout = DEFAULT_UST_PID_CHANNEL_BLOCKING_TIMEOUT;
+                       extended_attr->monitor_timer_interval =
+                               DEFAULT_UST_PID_CHANNEL_MONITOR_TIMER;
+                       break;
+               }
+               break;
+       default:
+               goto error;     /* Not implemented */
+       }
+
+       if (snprintf(chan->name, sizeof(chan->name), "%s",
+                       channel_name) < 0) {
+               PERROR("snprintf default channel name");
+               goto error;
+       }
+       return chan;
+
+error:
+       free(extended_attr);
+       free(chan);
+error_alloc:
+       return NULL;
+}
+
+void channel_attr_destroy(struct lttng_channel *channel)
+{
+       if (!channel) {
+               return;
+       }
+       free(channel->attr.extended.ptr);
+       free(channel);
+}
+
+/*
+ * Disable kernel channel of the kernel session.
+ */
+int channel_kernel_disable(struct ltt_kernel_session *ksession,
+               char *channel_name)
+{
+       int ret;
+       struct ltt_kernel_channel *kchan;
+
+       LTTNG_ASSERT(ksession);
+       LTTNG_ASSERT(channel_name);
+
+       kchan = trace_kernel_get_channel_by_name(channel_name, ksession);
+       if (kchan == NULL) {
+               ret = LTTNG_ERR_KERN_CHAN_NOT_FOUND;
+               goto error;
+       }
+
+       /* Only if channel is enabled disable it. */
+       if (kchan->enabled == 1) {
+               ret = kernel_disable_channel(kchan);
+               if (ret < 0 && ret != -EEXIST) {
+                       ret = LTTNG_ERR_KERN_CHAN_DISABLE_FAIL;
+                       goto error;
+               }
+       }
+
+       ret = LTTNG_OK;
+
+error:
+       return ret;
+}
+
+/*
+ * Enable kernel channel of the kernel session.
+ */
+int channel_kernel_enable(struct ltt_kernel_session *ksession,
+               struct ltt_kernel_channel *kchan)
+{
+       int ret;
+
+       LTTNG_ASSERT(ksession);
+       LTTNG_ASSERT(kchan);
+
+       if (kchan->enabled == 0) {
+               ret = kernel_enable_channel(kchan);
+               if (ret < 0) {
+                       ret = LTTNG_ERR_KERN_CHAN_ENABLE_FAIL;
+                       goto error;
+               }
+       } else {
+               ret = LTTNG_ERR_KERN_CHAN_EXIST;
+               goto error;
+       }
+
+       ret = LTTNG_OK;
+
+error:
+       return ret;
+}
+
+static int channel_validate(struct lttng_channel *attr)
+{
+       /*
+        * The ringbuffer (both in user space and kernel) behaves badly
+        * in overwrite mode and with less than 2 subbuffers so block it
+        * right away and send back an invalid attribute error.
+        */
+       if (attr->attr.overwrite && attr->attr.num_subbuf < 2) {
+               return -1;
+       }
+       return 0;
+}
+
+static int channel_validate_kernel(struct lttng_channel *attr)
+{
+       /* Kernel channels do not support blocking timeout. */
+       if (((struct lttng_channel_extended *)attr->attr.extended.ptr)->blocking_timeout) {
+               return -1;
+       }
+       return 0;
+}
+
+/*
+ * Create kernel channel of the kernel session and notify kernel thread.
+ */
+int channel_kernel_create(struct ltt_kernel_session *ksession,
+               struct lttng_channel *attr, int kernel_pipe)
+{
+       int ret;
+       struct lttng_channel *defattr = NULL;
+
+       LTTNG_ASSERT(ksession);
+
+       /* Creating channel attributes if needed */
+       if (attr == NULL) {
+               defattr = channel_new_default_attr(LTTNG_DOMAIN_KERNEL,
+                               LTTNG_BUFFER_GLOBAL);
+               if (defattr == NULL) {
+                       ret = LTTNG_ERR_FATAL;
+                       goto error;
+               }
+               attr = defattr;
+       }
+
+       /*
+        * Set the overwrite mode for this channel based on the session
+        * type unless the client explicitly overrides the channel mode.
+        */
+       if (attr->attr.overwrite == DEFAULT_CHANNEL_OVERWRITE) {
+               attr->attr.overwrite = !!ksession->snapshot_mode;
+       }
+
+       /* Validate common channel properties. */
+       if (channel_validate(attr) < 0) {
+               ret = LTTNG_ERR_INVALID;
+               goto error;
+       }
+
+       if (channel_validate_kernel(attr) < 0) {
+               ret = LTTNG_ERR_INVALID;
+               goto error;
+       }
+
+       /* Channel not found, creating it */
+       ret = kernel_create_channel(ksession, attr);
+       if (ret < 0) {
+               ret = LTTNG_ERR_KERN_CHAN_FAIL;
+               goto error;
+       }
+
+       /* Notify kernel thread that there is a new channel */
+       ret = notify_thread_pipe(kernel_pipe);
+       if (ret < 0) {
+               ret = LTTNG_ERR_FATAL;
+               goto error;
+       }
+
+       ret = LTTNG_OK;
+error:
+       channel_attr_destroy(defattr);
+       return ret;
+}
+
+/*
+ * Enable UST channel for session and domain.
+ */
+int channel_ust_enable(struct ltt_ust_session *usess,
+               struct ltt_ust_channel *uchan)
+{
+       int ret = LTTNG_OK;
+
+       LTTNG_ASSERT(usess);
+       LTTNG_ASSERT(uchan);
+
+       /* If already enabled, everything is OK */
+       if (uchan->enabled) {
+               DBG3("Channel %s already enabled. Skipping", uchan->name);
+               ret = LTTNG_ERR_UST_CHAN_EXIST;
+               goto end;
+       } else {
+               uchan->enabled = 1;
+               DBG2("Channel %s enabled successfully", uchan->name);
+       }
+
+       if (!usess->active) {
+               /*
+                * The channel will be activated against the apps
+                * when the session is started as part of the
+                * application channel "synchronize" operation.
+                */
+               goto end;
+       }
+
+       DBG2("Channel %s being enabled in UST domain", uchan->name);
+
+       /*
+        * Enable channel for UST global domain on all applications. Ignore return
+        * value here since whatever error we got, it means that the channel was
+        * not created on one or many registered applications and we can not report
+        * this to the user yet. However, at this stage, the channel was
+        * successfully created on the session daemon side so the enable-channel
+        * command is a success.
+        */
+       (void) ust_app_enable_channel_glb(usess, uchan);
+
+
+end:
+       return ret;
+}
+
+/*
+ * Create UST channel for session and domain.
+ */
+int channel_ust_create(struct ltt_ust_session *usess,
+               struct lttng_channel *attr, enum lttng_buffer_type type)
+{
+       int ret = LTTNG_OK;
+       struct ltt_ust_channel *uchan = NULL;
+       struct lttng_channel *defattr = NULL;
+       enum lttng_domain_type domain = LTTNG_DOMAIN_UST;
+       bool chan_published = false;
+
+       LTTNG_ASSERT(usess);
+
+       /* Creating channel attributes if needed */
+       if (attr == NULL) {
+               defattr = channel_new_default_attr(LTTNG_DOMAIN_UST, type);
+               if (defattr == NULL) {
+                       ret = LTTNG_ERR_FATAL;
+                       goto error;
+               }
+               attr = defattr;
+       } else {
+               /*
+                * HACK: Set the channel's subdomain (JUL, Log4j, Python, etc.)
+                * based on the default name.
+                */
+               if (!strcmp(attr->name, DEFAULT_JUL_CHANNEL_NAME)) {
+                       domain = LTTNG_DOMAIN_JUL;
+               } else if (!strcmp(attr->name, DEFAULT_LOG4J_CHANNEL_NAME)) {
+                       domain = LTTNG_DOMAIN_LOG4J;
+               } else if (!strcmp(attr->name, DEFAULT_PYTHON_CHANNEL_NAME)) {
+                       domain = LTTNG_DOMAIN_PYTHON;
+               }
+       }
+
+       /*
+        * Set the overwrite mode for this channel based on the session
+        * type unless the client explicitly overrides the channel mode.
+        */
+       if (attr->attr.overwrite == DEFAULT_CHANNEL_OVERWRITE) {
+               attr->attr.overwrite = !!usess->snapshot_mode;
+       }
+
+       /* Enforce mmap output for snapshot sessions. */
+       if (usess->snapshot_mode) {
+               attr->attr.output = LTTNG_EVENT_MMAP;
+       }
+
+       /* Validate common channel properties. */
+       if (channel_validate(attr) < 0) {
+               ret = LTTNG_ERR_INVALID;
+               goto error;
+       }
+
+       /*
+        * Validate UST buffer size and number of buffers: must both be power of 2
+        * and nonzero. We validate right here for UST, because applications will
+        * not report the error to the user (unlike kernel tracing).
+        */
+       if (!attr->attr.subbuf_size ||
+                       (attr->attr.subbuf_size & (attr->attr.subbuf_size - 1))) {
+               ret = LTTNG_ERR_INVALID;
+               goto error;
+       }
+
+       /*
+        * Invalid subbuffer size if it's lower then the page size.
+        */
+       if (attr->attr.subbuf_size < the_page_size) {
+               ret = LTTNG_ERR_INVALID;
+               goto error;
+       }
+
+       if (!attr->attr.num_subbuf ||
+                       (attr->attr.num_subbuf & (attr->attr.num_subbuf - 1))) {
+               ret = LTTNG_ERR_INVALID;
+               goto error;
+       }
+
+       if (attr->attr.output != LTTNG_EVENT_MMAP) {
+               ret = LTTNG_ERR_NOT_SUPPORTED;
+               goto error;
+       }
+
+       /*
+        * The tracefile_size should not be < to the subbuf_size, otherwise
+        * we won't be able to write the packets on disk
+        */
+       if ((attr->attr.tracefile_size > 0) &&
+                       (attr->attr.tracefile_size < attr->attr.subbuf_size)) {
+               ret = LTTNG_ERR_INVALID;
+               goto error;
+       }
+
+       /* Validate buffer type. */
+       switch (type) {
+       case LTTNG_BUFFER_PER_PID:
+               break;
+       case LTTNG_BUFFER_PER_UID:
+               break;
+       default:
+               ret = LTTNG_ERR_BUFFER_NOT_SUPPORTED;
+               goto error;
+       }
+
+       /* Create UST channel */
+       uchan = trace_ust_create_channel(attr, domain);
+       if (uchan == NULL) {
+               ret = LTTNG_ERR_FATAL;
+               goto error;
+       }
+
+       uchan->enabled = 1;
+       if (trace_ust_is_max_id(usess->used_channel_id)) {
+               ret = LTTNG_ERR_UST_CHAN_FAIL;
+               goto error;
+       }
+       uchan->id = trace_ust_get_next_chan_id(usess);
+
+       DBG2("Channel %s is being created for UST with buffer %d and id %" PRIu64,
+                       uchan->name, type, uchan->id);
+
+       /* Flag session buffer type. */
+       if (!usess->buffer_type_changed) {
+               usess->buffer_type = type;
+               usess->buffer_type_changed = 1;
+       } else if (usess->buffer_type != type) {
+               /* Buffer type was already set. Refuse to create channel. */
+               ret = LTTNG_ERR_BUFFER_TYPE_MISMATCH;
+               goto error_free_chan;
+       }
+
+       /* Adding the channel to the channel hash table. */
+       rcu_read_lock();
+       if (strncmp(uchan->name, DEFAULT_METADATA_NAME,
+                               sizeof(uchan->name))) {
+               lttng_ht_add_unique_str(usess->domain_global.channels, &uchan->node);
+               chan_published = true;
+       } else {
+               /*
+                * Copy channel attribute to session if this is metadata so if NO
+                * application exists we can access that data in the shadow copy during
+                * the global update of newly registered application.
+                */
+               memcpy(&usess->metadata_attr, &uchan->attr,
+                               sizeof(usess->metadata_attr));
+       }
+       rcu_read_unlock();
+
+       DBG2("Channel %s created successfully", uchan->name);
+       if (domain != LTTNG_DOMAIN_UST) {
+               struct agent *agt = trace_ust_find_agent(usess, domain);
+
+               if (!agt) {
+                       agt = agent_create(domain);
+                       if (!agt) {
+                               ret = LTTNG_ERR_NOMEM;
+                               goto error_remove_chan;
+                       }
+                       agent_add(agt, usess->agents);
+               }
+       }
+
+       channel_attr_destroy(defattr);
+       return LTTNG_OK;
+
+error_remove_chan:
+       if (chan_published) {
+               trace_ust_delete_channel(usess->domain_global.channels, uchan);
+       }
+error_free_chan:
+       trace_ust_destroy_channel(uchan);
+error:
+       channel_attr_destroy(defattr);
+       return ret;
+}
+
+/*
+ * Disable UST channel for session and domain.
+ */
+int channel_ust_disable(struct ltt_ust_session *usess,
+               struct ltt_ust_channel *uchan)
+{
+       int ret = LTTNG_OK;
+
+       LTTNG_ASSERT(usess);
+       LTTNG_ASSERT(uchan);
+
+       /* Already disabled */
+       if (uchan->enabled == 0) {
+               DBG2("Channel UST %s already disabled", uchan->name);
+               goto end;
+       }
+
+       uchan->enabled = 0;
+
+       /*
+        * If session is inactive we don't notify the tracer right away. We
+        * wait for the next synchronization.
+        */
+       if (!usess->active) {
+               goto end;
+       }
+
+       DBG2("Channel %s being disabled in UST global domain", uchan->name);
+       /* Disable channel for global domain */
+       ret = ust_app_disable_channel_glb(usess, uchan);
+       if (ret < 0 && ret != -LTTNG_UST_ERR_EXIST) {
+               ret = LTTNG_ERR_UST_CHAN_DISABLE_FAIL;
+               goto error;
+       }
+
+       DBG2("Channel %s disabled successfully", uchan->name);
+
+       return LTTNG_OK;
+
+end:
+error:
+       return ret;
+}
diff --git a/src/bin/lttng-sessiond/clear.c b/src/bin/lttng-sessiond/clear.c
deleted file mode 100644 (file)
index e699caf..0000000
+++ /dev/null
@@ -1,211 +0,0 @@
-/*
- * Copyright (C) 2019 Jonathan Rajotte <jonathan.rajotte-julien@efficios.com>
- *
- * SPDX-License-Identifier: GPL-2.0-only
- *
- */
-
-#define _LGPL_SOURCE
-#include <inttypes.h>
-#include <string.h>
-#include <unistd.h>
-
-#include <common/defaults.h>
-#include <common/error.h>
-#include <common/utils.h>
-
-#include "clear.h"
-#include "session.h"
-#include "ust-app.h"
-#include "kernel.h"
-#include "cmd.h"
-
-struct cmd_clear_session_reply_context {
-       int reply_sock_fd;
-};
-
-static
-void cmd_clear_session_reply(const struct ltt_session *session,
-               void *_reply_context)
-{
-       int ret;
-       ssize_t comm_ret;
-       const struct cmd_clear_session_reply_context *reply_context =
-                       _reply_context;
-       struct lttcomm_lttng_msg llm = {
-               .cmd_type = LTTNG_CLEAR_SESSION,
-               .ret_code = LTTNG_OK,
-               .pid = UINT32_MAX,
-               .cmd_header_size = 0,
-               .data_size = 0,
-       };
-
-       DBG("End of clear command: replying to client");
-       comm_ret = lttcomm_send_unix_sock(reply_context->reply_sock_fd,
-                       &llm, sizeof(llm));
-       if (comm_ret != (ssize_t) sizeof(llm)) {
-               ERR("Failed to send result of session \"%s\" clear to client",
-                               session->name);
-       }
-       ret = close(reply_context->reply_sock_fd);
-       if (ret) {
-               PERROR("Failed to close client socket in deferred session clear reply");
-       }
-       free(_reply_context);
-}
-
-int cmd_clear_session(struct ltt_session *session, int *sock_fd)
-{
-       int ret = LTTNG_OK;
-       struct cmd_clear_session_reply_context *reply_context = NULL;
-       bool session_was_active = false;
-       struct ltt_kernel_session *ksession;
-       struct ltt_ust_session *usess;
-
-       ksession = session->kernel_session;
-       usess = session->ust_session;
-
-       if (sock_fd) {
-               reply_context = zmalloc(sizeof(*reply_context));
-               if (!reply_context) {
-                       ret = LTTNG_ERR_NOMEM;
-                       goto end;
-               }
-               reply_context->reply_sock_fd = *sock_fd;
-       }
-
-       if (!session->has_been_started) {
-                /*
-                 * Nothing to be cleared, this is not an error: there is
-                 * indeed nothing to do, and there is no reason why we
-                 * should return an error to the user.
-                 */
-                goto end;
-       }
-
-       /* Unsupported feature in lttng-relayd before 2.11. */
-       if (session->consumer->type == CONSUMER_DST_NET &&
-                       (session->consumer->relay_major_version == 2 &&
-                       session->consumer->relay_minor_version < 12)) {
-               ret = LTTNG_ERR_CLEAR_NOT_AVAILABLE_RELAY;
-               goto end;
-       }
-       if (session->consumer->type == CONSUMER_DST_NET &&
-                       !session->consumer->relay_allows_clear) {
-               ret = LTTNG_ERR_CLEAR_NOT_AVAILABLE_RELAY;
-               goto end;
-       }
-
-       /*
-        * After a stop followed by a clear, all subsequent clear are
-        * effect-less until start is performed.
-        */
-       if (session->cleared_after_last_stop) {
-               ret = LTTNG_OK;
-               goto end;
-       }
-
-       /*
-        * After a stop followed by a rotation, all subsequent clear are effect-less
-        * until start is performed.
-        */
-       if (session->rotated_after_last_stop) {
-               ret = LTTNG_OK;
-               goto end;
-       }
-
-       session_was_active = session->active;
-       if (session_was_active) {
-               ret = stop_kernel_session(ksession);
-               if (ret != LTTNG_OK) {
-                       goto end;
-               }
-               if (usess && usess->active) {
-                       ret = ust_app_stop_trace_all(usess);
-                       if (ret < 0) {
-                               ret = LTTNG_ERR_UST_STOP_FAIL;
-                               goto end;
-                       }
-               }
-       }
-
-       /*
-        * Clear active kernel and UST session buffers.
-        */
-       if (session->kernel_session) {
-               ret = kernel_clear_session(session);
-               if (ret != LTTNG_OK) {
-                       goto end;
-               }
-       }
-       if (session->ust_session) {
-               ret = ust_app_clear_session(session);
-               if (ret != LTTNG_OK) {
-                       goto end;
-               }
-       }
-
-       if (session->output_traces) {
-               /*
-                * Use rotation to delete local and remote stream files.
-                */
-               if (reply_context) {
-                       ret = session_add_clear_notifier(session,
-                                       cmd_clear_session_reply,
-                                       (void *) reply_context);
-                       if (ret) {
-                               ret = LTTNG_ERR_FATAL;
-                               goto end;
-                       }
-                       /*
-                        * On success, ownership of reply_context has been
-                        * passed to session_add_clear_notifier().
-                        */
-                       reply_context = NULL;
-                       *sock_fd = -1;
-               }
-               ret = cmd_rotate_session(session, NULL, true,
-                       LTTNG_TRACE_CHUNK_COMMAND_TYPE_DELETE);
-               if (ret != LTTNG_OK) {
-                       goto end;
-               }
-       }
-       if (!session->active) {
-               session->cleared_after_last_stop = true;
-       }
-       if (session_was_active) {
-               /* Kernel tracing */
-               if (ksession != NULL) {
-                       DBG("Start kernel tracing session \"%s\"",
-                                       session->name);
-                       ret = start_kernel_session(ksession);
-                       if (ret != LTTNG_OK) {
-                               goto end;
-                       }
-               }
-
-               /* Flag session that trace should start automatically */
-               if (usess) {
-                       int int_ret = ust_app_start_trace_all(usess);
-
-                       if (int_ret < 0) {
-                               ret = LTTNG_ERR_UST_START_FAIL;
-                               goto end;
-                       }
-               }
-
-               /*
-                * Open a packet in every stream of the session to ensure that
-                * viewers can correctly identify the boundaries of the periods
-                * during which tracing was active for this session.
-                */
-               ret = session_open_packets(session);
-               if (ret != LTTNG_OK) {
-                       goto end;
-               }
-       }
-       ret = LTTNG_OK;
-end:
-       free(reply_context);
-       return ret;
-}
diff --git a/src/bin/lttng-sessiond/clear.cpp b/src/bin/lttng-sessiond/clear.cpp
new file mode 100644 (file)
index 0000000..1239c5f
--- /dev/null
@@ -0,0 +1,211 @@
+/*
+ * Copyright (C) 2019 Jonathan Rajotte <jonathan.rajotte-julien@efficios.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0-only
+ *
+ */
+
+#define _LGPL_SOURCE
+#include <inttypes.h>
+#include <string.h>
+#include <unistd.h>
+
+#include <common/defaults.h>
+#include <common/error.h>
+#include <common/utils.h>
+
+#include "clear.h"
+#include "session.h"
+#include "ust-app.h"
+#include "kernel.h"
+#include "cmd.h"
+
+struct cmd_clear_session_reply_context {
+       int reply_sock_fd;
+};
+
+static
+void cmd_clear_session_reply(const struct ltt_session *session,
+               void *_reply_context)
+{
+       int ret;
+       ssize_t comm_ret;
+       const struct cmd_clear_session_reply_context *reply_context =
+                       (cmd_clear_session_reply_context *) _reply_context;
+       struct lttcomm_lttng_msg llm = {
+               .cmd_type = LTTNG_CLEAR_SESSION,
+               .ret_code = LTTNG_OK,
+               .pid = UINT32_MAX,
+               .cmd_header_size = 0,
+               .data_size = 0,
+       };
+
+       DBG("End of clear command: replying to client");
+       comm_ret = lttcomm_send_unix_sock(reply_context->reply_sock_fd,
+                       &llm, sizeof(llm));
+       if (comm_ret != (ssize_t) sizeof(llm)) {
+               ERR("Failed to send result of session \"%s\" clear to client",
+                               session->name);
+       }
+       ret = close(reply_context->reply_sock_fd);
+       if (ret) {
+               PERROR("Failed to close client socket in deferred session clear reply");
+       }
+       free(_reply_context);
+}
+
+int cmd_clear_session(struct ltt_session *session, int *sock_fd)
+{
+       int ret = LTTNG_OK;
+       struct cmd_clear_session_reply_context *reply_context = NULL;
+       bool session_was_active = false;
+       struct ltt_kernel_session *ksession;
+       struct ltt_ust_session *usess;
+
+       ksession = session->kernel_session;
+       usess = session->ust_session;
+
+       if (sock_fd) {
+               reply_context = (cmd_clear_session_reply_context *) zmalloc(sizeof(*reply_context));
+               if (!reply_context) {
+                       ret = LTTNG_ERR_NOMEM;
+                       goto end;
+               }
+               reply_context->reply_sock_fd = *sock_fd;
+       }
+
+       if (!session->has_been_started) {
+                /*
+                 * Nothing to be cleared, this is not an error: there is
+                 * indeed nothing to do, and there is no reason why we
+                 * should return an error to the user.
+                 */
+                goto end;
+       }
+
+       /* Unsupported feature in lttng-relayd before 2.11. */
+       if (session->consumer->type == CONSUMER_DST_NET &&
+                       (session->consumer->relay_major_version == 2 &&
+                       session->consumer->relay_minor_version < 12)) {
+               ret = LTTNG_ERR_CLEAR_NOT_AVAILABLE_RELAY;
+               goto end;
+       }
+       if (session->consumer->type == CONSUMER_DST_NET &&
+                       !session->consumer->relay_allows_clear) {
+               ret = LTTNG_ERR_CLEAR_NOT_AVAILABLE_RELAY;
+               goto end;
+       }
+
+       /*
+        * After a stop followed by a clear, all subsequent clear are
+        * effect-less until start is performed.
+        */
+       if (session->cleared_after_last_stop) {
+               ret = LTTNG_OK;
+               goto end;
+       }
+
+       /*
+        * After a stop followed by a rotation, all subsequent clear are effect-less
+        * until start is performed.
+        */
+       if (session->rotated_after_last_stop) {
+               ret = LTTNG_OK;
+               goto end;
+       }
+
+       session_was_active = session->active;
+       if (session_was_active) {
+               ret = stop_kernel_session(ksession);
+               if (ret != LTTNG_OK) {
+                       goto end;
+               }
+               if (usess && usess->active) {
+                       ret = ust_app_stop_trace_all(usess);
+                       if (ret < 0) {
+                               ret = LTTNG_ERR_UST_STOP_FAIL;
+                               goto end;
+                       }
+               }
+       }
+
+       /*
+        * Clear active kernel and UST session buffers.
+        */
+       if (session->kernel_session) {
+               ret = kernel_clear_session(session);
+               if (ret != LTTNG_OK) {
+                       goto end;
+               }
+       }
+       if (session->ust_session) {
+               ret = ust_app_clear_session(session);
+               if (ret != LTTNG_OK) {
+                       goto end;
+               }
+       }
+
+       if (session->output_traces) {
+               /*
+                * Use rotation to delete local and remote stream files.
+                */
+               if (reply_context) {
+                       ret = session_add_clear_notifier(session,
+                                       cmd_clear_session_reply,
+                                       (void *) reply_context);
+                       if (ret) {
+                               ret = LTTNG_ERR_FATAL;
+                               goto end;
+                       }
+                       /*
+                        * On success, ownership of reply_context has been
+                        * passed to session_add_clear_notifier().
+                        */
+                       reply_context = NULL;
+                       *sock_fd = -1;
+               }
+               ret = cmd_rotate_session(session, NULL, true,
+                       LTTNG_TRACE_CHUNK_COMMAND_TYPE_DELETE);
+               if (ret != LTTNG_OK) {
+                       goto end;
+               }
+       }
+       if (!session->active) {
+               session->cleared_after_last_stop = true;
+       }
+       if (session_was_active) {
+               /* Kernel tracing */
+               if (ksession != NULL) {
+                       DBG("Start kernel tracing session \"%s\"",
+                                       session->name);
+                       ret = start_kernel_session(ksession);
+                       if (ret != LTTNG_OK) {
+                               goto end;
+                       }
+               }
+
+               /* Flag session that trace should start automatically */
+               if (usess) {
+                       int int_ret = ust_app_start_trace_all(usess);
+
+                       if (int_ret < 0) {
+                               ret = LTTNG_ERR_UST_START_FAIL;
+                               goto end;
+                       }
+               }
+
+               /*
+                * Open a packet in every stream of the session to ensure that
+                * viewers can correctly identify the boundaries of the periods
+                * during which tracing was active for this session.
+                */
+               ret = session_open_packets(session);
+               if (ret != LTTNG_OK) {
+                       goto end;
+               }
+       }
+       ret = LTTNG_OK;
+end:
+       free(reply_context);
+       return ret;
+}
diff --git a/src/bin/lttng-sessiond/client.c b/src/bin/lttng-sessiond/client.c
deleted file mode 100644 (file)
index ed9f2af..0000000
+++ /dev/null
@@ -1,2906 +0,0 @@
-/*
- * Copyright (C) 2011 David Goulet <david.goulet@polymtl.ca>
- * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- * Copyright (C) 2013 Jérémie Galarneau <jeremie.galarneau@efficios.com>
- *
- * SPDX-License-Identifier: GPL-2.0-only
- *
- */
-
-#include "common/buffer-view.h"
-#include "common/compat/socket.h"
-#include "common/dynamic-array.h"
-#include "common/dynamic-buffer.h"
-#include "common/fd-handle.h"
-#include "common/payload-view.h"
-#include "common/payload.h"
-#include "common/sessiond-comm/sessiond-comm.h"
-#include "lttng/lttng-error.h"
-#include "lttng/tracker.h"
-#include <common/compat/getenv.h>
-#include <common/tracker.h>
-#include <common/unix.h>
-#include <common/utils.h>
-#include <lttng/error-query-internal.h>
-#include <lttng/event-internal.h>
-#include <lttng/session-descriptor-internal.h>
-#include <lttng/session-internal.h>
-#include <lttng/userspace-probe-internal.h>
-#include <pthread.h>
-#include <signal.h>
-#include <stddef.h>
-#include <sys/stat.h>
-#include <unistd.h>
-
-#include "agent-thread.h"
-#include "clear.h"
-#include "client.h"
-#include "cmd.h"
-#include "health-sessiond.h"
-#include "kernel.h"
-#include "lttng-sessiond.h"
-#include "manage-consumer.h"
-#include "save.h"
-#include "testpoint.h"
-#include "utils.h"
-
-static bool is_root;
-
-static struct thread_state {
-       sem_t ready;
-       bool running;
-       int client_sock;
-} thread_state;
-
-static void set_thread_status(bool running)
-{
-       DBG("Marking client thread's state as %s", running ? "running" : "error");
-       thread_state.running = running;
-       sem_post(&thread_state.ready);
-}
-
-static bool wait_thread_status(void)
-{
-       DBG("Waiting for client thread to be ready");
-       sem_wait(&thread_state.ready);
-       if (thread_state.running) {
-               DBG("Client thread is ready");
-       } else {
-               ERR("Initialization of client thread failed");
-       }
-
-       return thread_state.running;
-}
-
-/*
- * Setup the outgoing data buffer for the response (llm) by allocating the
- * right amount of memory and copying the original information from the lsm
- * structure.
- *
- * Return 0 on success, negative value on error.
- */
-static int setup_lttng_msg(struct command_ctx *cmd_ctx,
-       const void *payload_buf, size_t payload_len,
-       const void *cmd_header_buf, size_t cmd_header_len)
-{
-       int ret = 0;
-       const size_t header_len = sizeof(struct lttcomm_lttng_msg);
-       const size_t total_msg_size = header_len + cmd_header_len + payload_len;
-       const struct lttcomm_lttng_msg llm = {
-               .cmd_type = cmd_ctx->lsm.cmd_type,
-               .pid = cmd_ctx->lsm.domain.attr.pid,
-               .cmd_header_size = cmd_header_len,
-               .data_size = payload_len,
-       };
-
-       ret = lttng_dynamic_buffer_set_size(&cmd_ctx->reply_payload.buffer, 0);
-       if (ret) {
-               goto end;
-       }
-
-       lttng_dynamic_pointer_array_clear(&cmd_ctx->reply_payload._fd_handles);
-
-       cmd_ctx->lttng_msg_size = total_msg_size;
-
-       /* Append reply header. */
-       ret = lttng_dynamic_buffer_append(
-                       &cmd_ctx->reply_payload.buffer, &llm, sizeof(llm));
-       if (ret) {
-               goto end;
-       }
-
-       /* Append command header. */
-       if (cmd_header_len) {
-               ret = lttng_dynamic_buffer_append(
-                               &cmd_ctx->reply_payload.buffer, cmd_header_buf,
-                               cmd_header_len);
-               if (ret) {
-                       goto end;
-               }
-       }
-
-       /* Append payload. */
-       if (payload_len) {
-               ret = lttng_dynamic_buffer_append(
-                               &cmd_ctx->reply_payload.buffer, payload_buf,
-                               payload_len);
-               if (ret) {
-                       goto end;
-               }
-       }
-
-end:
-       return ret;
-}
-
-static int setup_empty_lttng_msg(struct command_ctx *cmd_ctx)
-{
-       int ret;
-       const struct lttcomm_lttng_msg llm = {};
-
-       ret = lttng_dynamic_buffer_set_size(&cmd_ctx->reply_payload.buffer, 0);
-       if (ret) {
-               goto end;
-       }
-
-       /* Append place-holder reply header. */
-       ret = lttng_dynamic_buffer_append(
-                       &cmd_ctx->reply_payload.buffer, &llm, sizeof(llm));
-       if (ret) {
-               goto end;
-       }
-
-       cmd_ctx->lttng_msg_size = sizeof(llm);
-end:
-       return ret;
-}
-
-static void update_lttng_msg(struct command_ctx *cmd_ctx, size_t cmd_header_len,
-               size_t payload_len)
-{
-       const size_t header_len = sizeof(struct lttcomm_lttng_msg);
-       const size_t total_msg_size = header_len + cmd_header_len + payload_len;
-       const struct lttcomm_lttng_msg llm = {
-               .cmd_type = cmd_ctx->lsm.cmd_type,
-               .pid = cmd_ctx->lsm.domain.attr.pid,
-               .cmd_header_size = cmd_header_len,
-               .data_size = payload_len,
-       };
-       struct lttcomm_lttng_msg *p_llm;
-
-       LTTNG_ASSERT(cmd_ctx->reply_payload.buffer.size >= sizeof(llm));
-
-       p_llm = (typeof(p_llm)) cmd_ctx->reply_payload.buffer.data;
-
-       /* Update existing header. */
-       memcpy(p_llm, &llm, sizeof(llm));
-
-       cmd_ctx->lttng_msg_size = total_msg_size;
-}
-
-/*
- * Start the thread_manage_consumer. This must be done after a lttng-consumerd
- * exec or it will fail.
- */
-static int spawn_consumer_thread(struct consumer_data *consumer_data)
-{
-       return launch_consumer_management_thread(consumer_data) ? 0 : -1;
-}
-
-/*
- * Fork and exec a consumer daemon (consumerd).
- *
- * Return pid if successful else -1.
- */
-static pid_t spawn_consumerd(struct consumer_data *consumer_data)
-{
-       int ret;
-       pid_t pid;
-       const char *consumer_to_use;
-       const char *verbosity;
-       struct stat st;
-
-       DBG("Spawning consumerd");
-
-       pid = fork();
-       if (pid == 0) {
-               /*
-                * Exec consumerd.
-                */
-               if (the_config.verbose_consumer) {
-                       verbosity = "--verbose";
-               } else if (lttng_opt_quiet) {
-                       verbosity = "--quiet";
-               } else {
-                       verbosity = "";
-               }
-
-               switch (consumer_data->type) {
-               case LTTNG_CONSUMER_KERNEL:
-                       /*
-                        * Find out which consumerd to execute. We will first try the
-                        * 64-bit path, then the sessiond's installation directory, and
-                        * fallback on the 32-bit one,
-                        */
-                       DBG3("Looking for a kernel consumer at these locations:");
-                       DBG3("  1) %s", the_config.consumerd64_bin_path.value ? : "NULL");
-                       DBG3("  2) %s/%s", INSTALL_BIN_PATH, DEFAULT_CONSUMERD_FILE);
-                       DBG3("  3) %s", the_config.consumerd32_bin_path.value ? : "NULL");
-                       if (stat(the_config.consumerd64_bin_path.value, &st) == 0) {
-                               DBG3("Found location #1");
-                               consumer_to_use = the_config.consumerd64_bin_path.value;
-                       } else if (stat(INSTALL_BIN_PATH "/" DEFAULT_CONSUMERD_FILE, &st) == 0) {
-                               DBG3("Found location #2");
-                               consumer_to_use = INSTALL_BIN_PATH "/" DEFAULT_CONSUMERD_FILE;
-                       } else if (the_config.consumerd32_bin_path.value &&
-                                       stat(the_config.consumerd32_bin_path.value, &st) == 0) {
-                               DBG3("Found location #3");
-                               consumer_to_use = the_config.consumerd32_bin_path.value;
-                       } else {
-                               DBG("Could not find any valid consumerd executable");
-                               ret = -EINVAL;
-                               goto error;
-                       }
-                       DBG("Using kernel consumer at: %s",  consumer_to_use);
-                       (void) execl(consumer_to_use, "lttng-consumerd",
-                                       verbosity, "-k", "--consumerd-cmd-sock",
-                                       consumer_data->cmd_unix_sock_path,
-                                       "--consumerd-err-sock",
-                                       consumer_data->err_unix_sock_path,
-                                       "--group",
-                                       the_config.tracing_group_name.value,
-                                       NULL);
-                       break;
-               case LTTNG_CONSUMER64_UST:
-               {
-                       if (the_config.consumerd64_lib_dir.value) {
-                               const char *tmp;
-                               size_t tmplen;
-                               char *tmpnew;
-
-                               tmp = lttng_secure_getenv("LD_LIBRARY_PATH");
-                               if (!tmp) {
-                                       tmp = "";
-                               }
-                               tmplen = strlen(the_config.consumerd64_lib_dir.value) + 1 /* : */ + strlen(tmp);
-                               tmpnew = zmalloc(tmplen + 1 /* \0 */);
-                               if (!tmpnew) {
-                                       ret = -ENOMEM;
-                                       goto error;
-                               }
-                               strcat(tmpnew, the_config.consumerd64_lib_dir.value);
-                               if (tmp[0] != '\0') {
-                                       strcat(tmpnew, ":");
-                                       strcat(tmpnew, tmp);
-                               }
-                               ret = setenv("LD_LIBRARY_PATH", tmpnew, 1);
-                               free(tmpnew);
-                               if (ret) {
-                                       ret = -errno;
-                                       goto error;
-                               }
-                       }
-                       DBG("Using 64-bit UST consumer at: %s",
-                                       the_config.consumerd64_bin_path.value);
-                       (void) execl(the_config.consumerd64_bin_path.value,
-                                       "lttng-consumerd", verbosity, "-u",
-                                       "--consumerd-cmd-sock",
-                                       consumer_data->cmd_unix_sock_path,
-                                       "--consumerd-err-sock",
-                                       consumer_data->err_unix_sock_path,
-                                       "--group",
-                                       the_config.tracing_group_name.value,
-                                       NULL);
-                       break;
-               }
-               case LTTNG_CONSUMER32_UST:
-               {
-                       if (the_config.consumerd32_lib_dir.value) {
-                               const char *tmp;
-                               size_t tmplen;
-                               char *tmpnew;
-
-                               tmp = lttng_secure_getenv("LD_LIBRARY_PATH");
-                               if (!tmp) {
-                                       tmp = "";
-                               }
-                               tmplen = strlen(the_config.consumerd32_lib_dir.value) + 1 /* : */ + strlen(tmp);
-                               tmpnew = zmalloc(tmplen + 1 /* \0 */);
-                               if (!tmpnew) {
-                                       ret = -ENOMEM;
-                                       goto error;
-                               }
-                               strcat(tmpnew, the_config.consumerd32_lib_dir.value);
-                               if (tmp[0] != '\0') {
-                                       strcat(tmpnew, ":");
-                                       strcat(tmpnew, tmp);
-                               }
-                               ret = setenv("LD_LIBRARY_PATH", tmpnew, 1);
-                               free(tmpnew);
-                               if (ret) {
-                                       ret = -errno;
-                                       goto error;
-                               }
-                       }
-                       DBG("Using 32-bit UST consumer at: %s",
-                                       the_config.consumerd32_bin_path.value);
-                       (void) execl(the_config.consumerd32_bin_path.value,
-                                       "lttng-consumerd", verbosity, "-u",
-                                       "--consumerd-cmd-sock",
-                                       consumer_data->cmd_unix_sock_path,
-                                       "--consumerd-err-sock",
-                                       consumer_data->err_unix_sock_path,
-                                       "--group",
-                                       the_config.tracing_group_name.value,
-                                       NULL);
-                       break;
-               }
-               default:
-                       ERR("unknown consumer type");
-                       errno = 0;
-               }
-               if (errno != 0) {
-                       PERROR("Consumer execl()");
-               }
-               /* Reaching this point, we got a failure on our execl(). */
-               exit(EXIT_FAILURE);
-       } else if (pid > 0) {
-               ret = pid;
-       } else {
-               PERROR("start consumer fork");
-               ret = -errno;
-       }
-error:
-       return ret;
-}
-
-/*
- * Spawn the consumerd daemon and session daemon thread.
- */
-static int start_consumerd(struct consumer_data *consumer_data)
-{
-       int ret;
-
-       /*
-        * Set the listen() state on the socket since there is a possible race
-        * between the exec() of the consumer daemon and this call if place in the
-        * consumer thread. See bug #366 for more details.
-        */
-       ret = lttcomm_listen_unix_sock(consumer_data->err_sock);
-       if (ret < 0) {
-               goto error;
-       }
-
-       pthread_mutex_lock(&consumer_data->pid_mutex);
-       if (consumer_data->pid != 0) {
-               pthread_mutex_unlock(&consumer_data->pid_mutex);
-               goto end;
-       }
-
-       ret = spawn_consumerd(consumer_data);
-       if (ret < 0) {
-               ERR("Spawning consumerd failed");
-               pthread_mutex_unlock(&consumer_data->pid_mutex);
-               goto error;
-       }
-
-       /* Setting up the consumer_data pid */
-       consumer_data->pid = ret;
-       DBG2("Consumer pid %d", consumer_data->pid);
-       pthread_mutex_unlock(&consumer_data->pid_mutex);
-
-       DBG2("Spawning consumer control thread");
-       ret = spawn_consumer_thread(consumer_data);
-       if (ret < 0) {
-               ERR("Fatal error spawning consumer control thread");
-               goto error;
-       }
-
-end:
-       return 0;
-
-error:
-       /* Cleanup already created sockets on error. */
-       if (consumer_data->err_sock >= 0) {
-               int err;
-
-               err = close(consumer_data->err_sock);
-               if (err < 0) {
-                       PERROR("close consumer data error socket");
-               }
-       }
-       return ret;
-}
-
-/*
- * Copy consumer output from the tracing session to the domain session. The
- * function also applies the right modification on a per domain basis for the
- * trace files destination directory.
- *
- * Should *NOT* be called with RCU read-side lock held.
- */
-static int copy_session_consumer(int domain, struct ltt_session *session)
-{
-       int ret;
-       const char *dir_name;
-       struct consumer_output *consumer;
-
-       LTTNG_ASSERT(session);
-       LTTNG_ASSERT(session->consumer);
-
-       switch (domain) {
-       case LTTNG_DOMAIN_KERNEL:
-               DBG3("Copying tracing session consumer output in kernel session");
-               /*
-                * XXX: We should audit the session creation and what this function
-                * does "extra" in order to avoid a destroy since this function is used
-                * in the domain session creation (kernel and ust) only. Same for UST
-                * domain.
-                */
-               if (session->kernel_session->consumer) {
-                       consumer_output_put(session->kernel_session->consumer);
-               }
-               session->kernel_session->consumer =
-                       consumer_copy_output(session->consumer);
-               /* Ease our life a bit for the next part */
-               consumer = session->kernel_session->consumer;
-               dir_name = DEFAULT_KERNEL_TRACE_DIR;
-               break;
-       case LTTNG_DOMAIN_JUL:
-       case LTTNG_DOMAIN_LOG4J:
-       case LTTNG_DOMAIN_PYTHON:
-       case LTTNG_DOMAIN_UST:
-               DBG3("Copying tracing session consumer output in UST session");
-               if (session->ust_session->consumer) {
-                       consumer_output_put(session->ust_session->consumer);
-               }
-               session->ust_session->consumer =
-                       consumer_copy_output(session->consumer);
-               /* Ease our life a bit for the next part */
-               consumer = session->ust_session->consumer;
-               dir_name = DEFAULT_UST_TRACE_DIR;
-               break;
-       default:
-               ret = LTTNG_ERR_UNKNOWN_DOMAIN;
-               goto error;
-       }
-
-       /* Append correct directory to subdir */
-       ret = lttng_strncpy(consumer->domain_subdir, dir_name,
-                       sizeof(consumer->domain_subdir));
-       if (ret) {
-               ret = LTTNG_ERR_UNK;
-               goto error;
-       }
-       DBG3("Copy session consumer subdir %s", consumer->domain_subdir);
-       ret = LTTNG_OK;
-
-error:
-       return ret;
-}
-
-/*
- * Create an UST session and add it to the session ust list.
- *
- * Should *NOT* be called with RCU read-side lock held.
- */
-static int create_ust_session(struct ltt_session *session,
-               const struct lttng_domain *domain)
-{
-       int ret;
-       struct ltt_ust_session *lus = NULL;
-
-       LTTNG_ASSERT(session);
-       LTTNG_ASSERT(domain);
-       LTTNG_ASSERT(session->consumer);
-
-       switch (domain->type) {
-       case LTTNG_DOMAIN_JUL:
-       case LTTNG_DOMAIN_LOG4J:
-       case LTTNG_DOMAIN_PYTHON:
-       case LTTNG_DOMAIN_UST:
-               break;
-       default:
-               ERR("Unknown UST domain on create session %d", domain->type);
-               ret = LTTNG_ERR_UNKNOWN_DOMAIN;
-               goto error;
-       }
-
-       DBG("Creating UST session");
-
-       lus = trace_ust_create_session(session->id);
-       if (lus == NULL) {
-               ret = LTTNG_ERR_UST_SESS_FAIL;
-               goto error;
-       }
-
-       lus->uid = session->uid;
-       lus->gid = session->gid;
-       lus->output_traces = session->output_traces;
-       lus->snapshot_mode = session->snapshot_mode;
-       lus->live_timer_interval = session->live_timer;
-       session->ust_session = lus;
-       if (session->shm_path[0]) {
-               strncpy(lus->root_shm_path, session->shm_path,
-                       sizeof(lus->root_shm_path));
-               lus->root_shm_path[sizeof(lus->root_shm_path) - 1] = '\0';
-               strncpy(lus->shm_path, session->shm_path,
-                       sizeof(lus->shm_path));
-               lus->shm_path[sizeof(lus->shm_path) - 1] = '\0';
-               strncat(lus->shm_path, "/ust",
-                       sizeof(lus->shm_path) - strlen(lus->shm_path) - 1);
-       }
-       /* Copy session output to the newly created UST session */
-       ret = copy_session_consumer(domain->type, session);
-       if (ret != LTTNG_OK) {
-               goto error;
-       }
-
-       return LTTNG_OK;
-
-error:
-       free(lus);
-       session->ust_session = NULL;
-       return ret;
-}
-
-/*
- * Create a kernel tracer session then create the default channel.
- */
-static int create_kernel_session(struct ltt_session *session)
-{
-       int ret;
-
-       DBG("Creating kernel session");
-
-       ret = kernel_create_session(session);
-       if (ret < 0) {
-               ret = LTTNG_ERR_KERN_SESS_FAIL;
-               goto error_create;
-       }
-
-       /* Code flow safety */
-       LTTNG_ASSERT(session->kernel_session);
-
-       /* Copy session output to the newly created Kernel session */
-       ret = copy_session_consumer(LTTNG_DOMAIN_KERNEL, session);
-       if (ret != LTTNG_OK) {
-               goto error;
-       }
-
-       session->kernel_session->uid = session->uid;
-       session->kernel_session->gid = session->gid;
-       session->kernel_session->output_traces = session->output_traces;
-       session->kernel_session->snapshot_mode = session->snapshot_mode;
-       session->kernel_session->is_live_session = session->live_timer != 0;
-
-       return LTTNG_OK;
-
-error:
-       trace_kernel_destroy_session(session->kernel_session);
-       session->kernel_session = NULL;
-error_create:
-       return ret;
-}
-
-/*
- * Count number of session permitted by uid/gid.
- */
-static unsigned int lttng_sessions_count(uid_t uid, gid_t gid)
-{
-       unsigned int i = 0;
-       struct ltt_session *session;
-       const struct ltt_session_list *session_list = session_get_list();
-
-       DBG("Counting number of available session for UID %d", uid);
-       cds_list_for_each_entry(session, &session_list->head, list) {
-               if (!session_get(session)) {
-                       continue;
-               }
-               session_lock(session);
-               /* Only count the sessions the user can control. */
-               if (session_access_ok(session, uid) &&
-                               !session->destroyed) {
-                       i++;
-               }
-               session_unlock(session);
-               session_put(session);
-       }
-       return i;
-}
-
-static int receive_userspace_probe(struct command_ctx *cmd_ctx, int sock,
-               int *sock_error, struct lttng_event *event)
-{
-       int fd = -1, ret;
-       struct lttng_userspace_probe_location *probe_location;
-       struct lttng_payload probe_location_payload;
-       struct fd_handle *handle = NULL;
-
-       /*
-        * Create a payload to store the serialized version of the probe
-        * location.
-        */
-       lttng_payload_init(&probe_location_payload);
-
-       ret = lttng_dynamic_buffer_set_size(&probe_location_payload.buffer,
-                       cmd_ctx->lsm.u.enable.userspace_probe_location_len);
-       if (ret) {
-               ret = LTTNG_ERR_NOMEM;
-               goto error;
-       }
-
-       /*
-        * Receive the probe location.
-        */
-       ret = lttcomm_recv_unix_sock(sock, probe_location_payload.buffer.data,
-                       probe_location_payload.buffer.size);
-       if (ret <= 0) {
-               DBG("Nothing recv() from client var len data... continuing");
-               *sock_error = 1;
-               ret = LTTNG_ERR_PROBE_LOCATION_INVAL;
-               goto error;
-       }
-
-       /*
-        * Receive the file descriptor to the target binary from the client.
-        */
-       DBG("Receiving userspace probe target FD from client ...");
-       ret = lttcomm_recv_fds_unix_sock(sock, &fd, 1);
-       if (ret <= 0) {
-               DBG("Nothing recv() from client userspace probe fd... continuing");
-               *sock_error = 1;
-               ret = LTTNG_ERR_PROBE_LOCATION_INVAL;
-               goto error;
-       }
-
-       handle = fd_handle_create(fd);
-       if (!handle) {
-               ret = LTTNG_ERR_NOMEM;
-               goto error;
-       }
-
-       /* Transferred to the handle. */
-       fd = -1;
-
-       ret = lttng_payload_push_fd_handle(&probe_location_payload, handle);
-       if (ret) {
-               ERR("Failed to add userspace probe file descriptor to payload");
-               ret = LTTNG_ERR_NOMEM;
-               goto error;
-       }
-
-       fd_handle_put(handle);
-       handle = NULL;
-
-       {
-               struct lttng_payload_view view = lttng_payload_view_from_payload(
-                       &probe_location_payload, 0, -1);
-
-               /* Extract the probe location from the serialized version. */
-               ret = lttng_userspace_probe_location_create_from_payload(
-                               &view, &probe_location);
-       }
-       if (ret < 0) {
-               WARN("Failed to create a userspace probe location from the received buffer");
-               ret = LTTNG_ERR_PROBE_LOCATION_INVAL;
-               goto error;
-       }
-
-       /* Attach the probe location to the event. */
-       ret = lttng_event_set_userspace_probe_location(event, probe_location);
-       if (ret) {
-               ret = LTTNG_ERR_PROBE_LOCATION_INVAL;
-               goto error;
-       }
-
-error:
-       if (fd >= 0) {
-               if (close(fd)) {
-                       PERROR("Failed to close userspace probe location binary fd");
-               }
-       }
-
-       fd_handle_put(handle);
-       lttng_payload_reset(&probe_location_payload);
-       return ret;
-}
-
-static enum lttng_error_code receive_lttng_trigger(struct command_ctx *cmd_ctx,
-               int sock,
-               int *sock_error,
-               struct lttng_trigger **_trigger)
-{
-       int ret;
-       size_t trigger_len;
-       ssize_t sock_recv_len;
-       enum lttng_error_code ret_code;
-       struct lttng_payload trigger_payload;
-       struct lttng_trigger *trigger = NULL;
-
-       lttng_payload_init(&trigger_payload);
-       trigger_len = (size_t) cmd_ctx->lsm.u.trigger.length;
-       ret = lttng_dynamic_buffer_set_size(
-                       &trigger_payload.buffer, trigger_len);
-       if (ret) {
-               ret_code = LTTNG_ERR_NOMEM;
-               goto end;
-       }
-
-       sock_recv_len = lttcomm_recv_unix_sock(
-                       sock, trigger_payload.buffer.data, trigger_len);
-       if (sock_recv_len < 0 || sock_recv_len != trigger_len) {
-               ERR("Failed to receive trigger in command payload");
-               *sock_error = 1;
-               ret_code = LTTNG_ERR_INVALID_PROTOCOL;
-               goto end;
-       }
-
-       /* Receive fds, if any. */
-       if (cmd_ctx->lsm.fd_count > 0) {
-               sock_recv_len = lttcomm_recv_payload_fds_unix_sock(
-                               sock, cmd_ctx->lsm.fd_count, &trigger_payload);
-               if (sock_recv_len > 0 &&
-                               sock_recv_len != cmd_ctx->lsm.fd_count * sizeof(int)) {
-                       ERR("Failed to receive all file descriptors for trigger in command payload: expected fd count = %u, ret = %d",
-                                       cmd_ctx->lsm.fd_count, (int) ret);
-                       ret_code = LTTNG_ERR_INVALID_PROTOCOL;
-                       *sock_error = 1;
-                       goto end;
-               } else if (sock_recv_len <= 0) {
-                       ERR("Failed to receive file descriptors for trigger in command payload: expected fd count = %u, ret = %d",
-                                       cmd_ctx->lsm.fd_count, (int) ret);
-                       ret_code = LTTNG_ERR_FATAL;
-                       *sock_error = 1;
-                       goto end;
-               }
-       }
-
-       /* Deserialize trigger. */
-       {
-               struct lttng_payload_view view =
-                               lttng_payload_view_from_payload(
-                                               &trigger_payload, 0, -1);
-
-               if (lttng_trigger_create_from_payload(&view, &trigger) !=
-                               trigger_len) {
-                       ERR("Invalid trigger received as part of command payload");
-                       ret_code = LTTNG_ERR_INVALID_TRIGGER;
-                       lttng_trigger_put(trigger);
-                       goto end;
-               }
-       }
-
-       *_trigger = trigger;
-       ret_code = LTTNG_OK;
-
-end:
-       lttng_payload_reset(&trigger_payload);
-       return ret_code;
-}
-
-static enum lttng_error_code receive_lttng_error_query(struct command_ctx *cmd_ctx,
-               int sock,
-               int *sock_error,
-               struct lttng_error_query **_query)
-{
-       int ret;
-       size_t query_len;
-       ssize_t sock_recv_len;
-       enum lttng_error_code ret_code;
-       struct lttng_payload query_payload;
-       struct lttng_error_query *query = NULL;
-
-       lttng_payload_init(&query_payload);
-       query_len = (size_t) cmd_ctx->lsm.u.error_query.length;
-       ret = lttng_dynamic_buffer_set_size(&query_payload.buffer, query_len);
-       if (ret) {
-               ret_code = LTTNG_ERR_NOMEM;
-               goto end;
-       }
-
-       sock_recv_len = lttcomm_recv_unix_sock(
-                       sock, query_payload.buffer.data, query_len);
-       if (sock_recv_len < 0 || sock_recv_len != query_len) {
-               ERR("Failed to receive error query in command payload");
-               *sock_error = 1;
-               ret_code = LTTNG_ERR_INVALID_PROTOCOL;
-               goto end;
-       }
-
-       /* Receive fds, if any. */
-       if (cmd_ctx->lsm.fd_count > 0) {
-               sock_recv_len = lttcomm_recv_payload_fds_unix_sock(
-                               sock, cmd_ctx->lsm.fd_count, &query_payload);
-               if (sock_recv_len > 0 &&
-                               sock_recv_len != cmd_ctx->lsm.fd_count * sizeof(int)) {
-                       ERR("Failed to receive all file descriptors for error query in command payload: expected fd count = %u, ret = %d",
-                                       cmd_ctx->lsm.fd_count, (int) ret);
-                       ret_code = LTTNG_ERR_INVALID_PROTOCOL;
-                       *sock_error = 1;
-                       goto end;
-               } else if (sock_recv_len <= 0) {
-                       ERR("Failed to receive file descriptors for error query in command payload: expected fd count = %u, ret = %d",
-                                       cmd_ctx->lsm.fd_count, (int) ret);
-                       ret_code = LTTNG_ERR_FATAL;
-                       *sock_error = 1;
-                       goto end;
-               }
-       }
-
-       /* Deserialize error query. */
-       {
-               struct lttng_payload_view view =
-                               lttng_payload_view_from_payload(
-                                               &query_payload, 0, -1);
-
-               if (lttng_error_query_create_from_payload(&view, &query) !=
-                               query_len) {
-                       ERR("Invalid error query received as part of command payload");
-                       ret_code = LTTNG_ERR_INVALID_PROTOCOL;
-                       goto end;
-               }
-       }
-
-       *_query = query;
-       ret_code = LTTNG_OK;
-
-end:
-       lttng_payload_reset(&query_payload);
-       return ret_code;
-}
-
-/*
- * Version of setup_lttng_msg() without command header.
- */
-static int setup_lttng_msg_no_cmd_header(struct command_ctx *cmd_ctx,
-       void *payload_buf, size_t payload_len)
-{
-       return setup_lttng_msg(cmd_ctx, payload_buf, payload_len, NULL, 0);
-}
-
-/*
- * Check if the current kernel tracer supports the session rotation feature.
- * Return 1 if it does, 0 otherwise.
- */
-static int check_rotate_compatible(void)
-{
-       int ret = 1;
-
-       if (the_kernel_tracer_version.major != 2 ||
-                       the_kernel_tracer_version.minor < 11) {
-               DBG("Kernel tracer version is not compatible with the rotation feature");
-               ret = 0;
-       }
-
-       return ret;
-}
-
-/*
- * Send data on a unix socket using the liblttsessiondcomm API.
- *
- * Return lttcomm error code.
- */
-static int send_unix_sock(int sock, struct lttng_payload_view *view)
-{
-       int ret;
-       const int fd_count = lttng_payload_view_get_fd_handle_count(view);
-
-       /* Check valid length */
-       if (view->buffer.size == 0) {
-               ret = -1;
-               goto end;
-       }
-
-       ret = lttcomm_send_unix_sock(
-                       sock, view->buffer.data, view->buffer.size);
-       if (ret < 0) {
-               goto end;
-       }
-
-       if (fd_count > 0) {
-               ret = lttcomm_send_payload_view_fds_unix_sock(sock, view);
-               if (ret < 0) {
-                       goto end;
-               }
-       }
-
-end:
-       return ret;
-}
-
-/*
- * Process the command requested by the lttng client within the command
- * context structure. This function make sure that the return structure (llm)
- * is set and ready for transmission before returning.
- *
- * Return any error encountered or 0 for success.
- *
- * "sock" is only used for special-case var. len data.
- * A command may assume the ownership of the socket, in which case its value
- * should be set to -1.
- *
- * Should *NOT* be called with RCU read-side lock held.
- */
-static int process_client_msg(struct command_ctx *cmd_ctx, int *sock,
-               int *sock_error)
-{
-       int ret = LTTNG_OK;
-       bool need_tracing_session = true;
-       bool need_domain;
-       bool need_consumerd;
-
-       DBG("Processing client command '%s\' (%d)",
-               lttcomm_sessiond_command_str(cmd_ctx->lsm.cmd_type),
-               cmd_ctx->lsm.cmd_type);
-
-       LTTNG_ASSERT(!rcu_read_ongoing());
-
-       *sock_error = 0;
-
-       switch (cmd_ctx->lsm.cmd_type) {
-       case LTTNG_CREATE_SESSION_EXT:
-       case LTTNG_DESTROY_SESSION:
-       case LTTNG_LIST_SESSIONS:
-       case LTTNG_LIST_DOMAINS:
-       case LTTNG_START_TRACE:
-       case LTTNG_STOP_TRACE:
-       case LTTNG_DATA_PENDING:
-       case LTTNG_SNAPSHOT_ADD_OUTPUT:
-       case LTTNG_SNAPSHOT_DEL_OUTPUT:
-       case LTTNG_SNAPSHOT_LIST_OUTPUT:
-       case LTTNG_SNAPSHOT_RECORD:
-       case LTTNG_SAVE_SESSION:
-       case LTTNG_SET_SESSION_SHM_PATH:
-       case LTTNG_REGENERATE_METADATA:
-       case LTTNG_REGENERATE_STATEDUMP:
-       case LTTNG_ROTATE_SESSION:
-       case LTTNG_ROTATION_GET_INFO:
-       case LTTNG_ROTATION_SET_SCHEDULE:
-       case LTTNG_SESSION_LIST_ROTATION_SCHEDULES:
-       case LTTNG_CLEAR_SESSION:
-       case LTTNG_LIST_TRIGGERS:
-       case LTTNG_EXECUTE_ERROR_QUERY:
-               need_domain = false;
-               break;
-       default:
-               need_domain = true;
-       }
-
-       /* Needs a functioning consumerd? */
-       switch (cmd_ctx->lsm.cmd_type) {
-       case LTTNG_REGISTER_TRIGGER:
-       case LTTNG_UNREGISTER_TRIGGER:
-       case LTTNG_EXECUTE_ERROR_QUERY:
-               need_consumerd = false;
-               break;
-       default:
-               need_consumerd = true;
-               break;
-       }
-
-       if (the_config.no_kernel && need_domain &&
-                       cmd_ctx->lsm.domain.type == LTTNG_DOMAIN_KERNEL) {
-               if (!is_root) {
-                       ret = LTTNG_ERR_NEED_ROOT_SESSIOND;
-               } else {
-                       ret = LTTNG_ERR_KERN_NA;
-               }
-               goto error;
-       }
-
-       /* Deny register consumer if we already have a spawned consumer. */
-       if (cmd_ctx->lsm.cmd_type == LTTNG_REGISTER_CONSUMER) {
-               pthread_mutex_lock(&the_kconsumer_data.pid_mutex);
-               if (the_kconsumer_data.pid > 0) {
-                       ret = LTTNG_ERR_KERN_CONSUMER_FAIL;
-                       pthread_mutex_unlock(&the_kconsumer_data.pid_mutex);
-                       goto error;
-               }
-               pthread_mutex_unlock(&the_kconsumer_data.pid_mutex);
-       }
-
-       /*
-        * Check for command that don't needs to allocate a returned payload. We do
-        * this here so we don't have to make the call for no payload at each
-        * command.
-        */
-       switch(cmd_ctx->lsm.cmd_type) {
-       case LTTNG_LIST_SESSIONS:
-       case LTTNG_LIST_TRACEPOINTS:
-       case LTTNG_LIST_TRACEPOINT_FIELDS:
-       case LTTNG_LIST_DOMAINS:
-       case LTTNG_LIST_CHANNELS:
-       case LTTNG_LIST_EVENTS:
-       case LTTNG_LIST_SYSCALLS:
-       case LTTNG_SESSION_LIST_ROTATION_SCHEDULES:
-       case LTTNG_PROCESS_ATTR_TRACKER_GET_POLICY:
-       case LTTNG_PROCESS_ATTR_TRACKER_GET_INCLUSION_SET:
-       case LTTNG_DATA_PENDING:
-       case LTTNG_ROTATE_SESSION:
-       case LTTNG_ROTATION_GET_INFO:
-       case LTTNG_REGISTER_TRIGGER:
-       case LTTNG_LIST_TRIGGERS:
-       case LTTNG_EXECUTE_ERROR_QUERY:
-               break;
-       default:
-               /* Setup lttng message with no payload */
-               ret = setup_lttng_msg_no_cmd_header(cmd_ctx, NULL, 0);
-               if (ret < 0) {
-                       /* This label does not try to unlock the session */
-                       goto init_setup_error;
-               }
-       }
-
-       /* Commands that DO NOT need a session. */
-       switch (cmd_ctx->lsm.cmd_type) {
-       case LTTNG_CREATE_SESSION_EXT:
-       case LTTNG_LIST_SESSIONS:
-       case LTTNG_LIST_TRACEPOINTS:
-       case LTTNG_LIST_SYSCALLS:
-       case LTTNG_LIST_TRACEPOINT_FIELDS:
-       case LTTNG_SAVE_SESSION:
-       case LTTNG_REGISTER_TRIGGER:
-       case LTTNG_UNREGISTER_TRIGGER:
-       case LTTNG_LIST_TRIGGERS:
-       case LTTNG_EXECUTE_ERROR_QUERY:
-               need_tracing_session = false;
-               break;
-       default:
-               DBG("Getting session %s by name", cmd_ctx->lsm.session.name);
-               /*
-                * We keep the session list lock across _all_ commands
-                * for now, because the per-session lock does not
-                * handle teardown properly.
-                */
-               session_lock_list();
-               cmd_ctx->session = session_find_by_name(cmd_ctx->lsm.session.name);
-               if (cmd_ctx->session == NULL) {
-                       ret = LTTNG_ERR_SESS_NOT_FOUND;
-                       goto error;
-               } else {
-                       /* Acquire lock for the session */
-                       session_lock(cmd_ctx->session);
-               }
-               break;
-       }
-
-       /*
-        * Commands that need a valid session but should NOT create one if none
-        * exists. Instead of creating one and destroying it when the command is
-        * handled, process that right before so we save some round trip in useless
-        * code path.
-        */
-       switch (cmd_ctx->lsm.cmd_type) {
-       case LTTNG_DISABLE_CHANNEL:
-       case LTTNG_DISABLE_EVENT:
-               switch (cmd_ctx->lsm.domain.type) {
-               case LTTNG_DOMAIN_KERNEL:
-                       if (!cmd_ctx->session->kernel_session) {
-                               ret = LTTNG_ERR_NO_CHANNEL;
-                               goto error;
-                       }
-                       break;
-               case LTTNG_DOMAIN_JUL:
-               case LTTNG_DOMAIN_LOG4J:
-               case LTTNG_DOMAIN_PYTHON:
-               case LTTNG_DOMAIN_UST:
-                       if (!cmd_ctx->session->ust_session) {
-                               ret = LTTNG_ERR_NO_CHANNEL;
-                               goto error;
-                       }
-                       break;
-               default:
-                       ret = LTTNG_ERR_UNKNOWN_DOMAIN;
-                       goto error;
-               }
-       default:
-               break;
-       }
-
-       if (!need_domain) {
-               goto skip_domain;
-       }
-
-       /*
-        * Check domain type for specific "pre-action".
-        */
-       switch (cmd_ctx->lsm.domain.type) {
-       case LTTNG_DOMAIN_KERNEL:
-               if (!is_root) {
-                       ret = LTTNG_ERR_NEED_ROOT_SESSIOND;
-                       goto error;
-               }
-
-               /* Kernel tracer check */
-               if (!kernel_tracer_is_initialized()) {
-                       /* Basically, load kernel tracer modules */
-                       ret = init_kernel_tracer();
-                       if (ret != 0) {
-                               goto error;
-                       }
-               }
-
-               /* Consumer is in an ERROR state. Report back to client */
-               if (need_consumerd && uatomic_read(&the_kernel_consumerd_state) ==
-                                               CONSUMER_ERROR) {
-                       ret = LTTNG_ERR_NO_KERNCONSUMERD;
-                       goto error;
-               }
-
-               /* Need a session for kernel command */
-               if (need_tracing_session) {
-                       if (cmd_ctx->session->kernel_session == NULL) {
-                               ret = create_kernel_session(cmd_ctx->session);
-                               if (ret != LTTNG_OK) {
-                                       ret = LTTNG_ERR_KERN_SESS_FAIL;
-                                       goto error;
-                               }
-                       }
-
-                       /* Start the kernel consumer daemon */
-                       pthread_mutex_lock(&the_kconsumer_data.pid_mutex);
-                       if (the_kconsumer_data.pid == 0 &&
-                                       cmd_ctx->lsm.cmd_type != LTTNG_REGISTER_CONSUMER) {
-                               pthread_mutex_unlock(&the_kconsumer_data.pid_mutex);
-                               ret = start_consumerd(&the_kconsumer_data);
-                               if (ret < 0) {
-                                       ret = LTTNG_ERR_KERN_CONSUMER_FAIL;
-                                       goto error;
-                               }
-                               uatomic_set(&the_kernel_consumerd_state, CONSUMER_STARTED);
-                       } else {
-                               pthread_mutex_unlock(&the_kconsumer_data.pid_mutex);
-                       }
-
-                       /*
-                        * The consumer was just spawned so we need to add the socket to
-                        * the consumer output of the session if exist.
-                        */
-                       ret = consumer_create_socket(&the_kconsumer_data,
-                                       cmd_ctx->session->kernel_session->consumer);
-                       if (ret < 0) {
-                               goto error;
-                       }
-               }
-
-               break;
-       case LTTNG_DOMAIN_JUL:
-       case LTTNG_DOMAIN_LOG4J:
-       case LTTNG_DOMAIN_PYTHON:
-               if (!agent_tracing_is_enabled()) {
-                       ret = LTTNG_ERR_AGENT_TRACING_DISABLED;
-                       goto error;
-               }
-               /* Fallthrough */
-       case LTTNG_DOMAIN_UST:
-       {
-               if (!ust_app_supported()) {
-                       ret = LTTNG_ERR_NO_UST;
-                       goto error;
-               }
-
-               /* Consumer is in an ERROR state. Report back to client */
-               if (need_consumerd &&
-                               uatomic_read(&the_ust_consumerd_state) ==
-                                               CONSUMER_ERROR) {
-                       ret = LTTNG_ERR_NO_USTCONSUMERD;
-                       goto error;
-               }
-
-               if (need_tracing_session) {
-                       /* Create UST session if none exist. */
-                       if (cmd_ctx->session->ust_session == NULL) {
-                               ret = create_ust_session(cmd_ctx->session,
-                                               ALIGNED_CONST_PTR(cmd_ctx->lsm.domain));
-                               if (ret != LTTNG_OK) {
-                                       goto error;
-                               }
-                       }
-
-                       /* Start the UST consumer daemons */
-                       /* 64-bit */
-                       pthread_mutex_lock(&the_ustconsumer64_data.pid_mutex);
-                       if (the_config.consumerd64_bin_path.value &&
-                                       the_ustconsumer64_data.pid == 0 &&
-                                       cmd_ctx->lsm.cmd_type != LTTNG_REGISTER_CONSUMER) {
-                               pthread_mutex_unlock(&the_ustconsumer64_data.pid_mutex);
-                               ret = start_consumerd(&the_ustconsumer64_data);
-                               if (ret < 0) {
-                                       ret = LTTNG_ERR_UST_CONSUMER64_FAIL;
-                                       uatomic_set(&the_ust_consumerd64_fd, -EINVAL);
-                                       goto error;
-                               }
-
-                               uatomic_set(&the_ust_consumerd64_fd, the_ustconsumer64_data.cmd_sock);
-                               uatomic_set(&the_ust_consumerd_state, CONSUMER_STARTED);
-                       } else {
-                               pthread_mutex_unlock(&the_ustconsumer64_data.pid_mutex);
-                       }
-
-                       /*
-                        * Setup socket for consumer 64 bit. No need for atomic access
-                        * since it was set above and can ONLY be set in this thread.
-                        */
-                       ret = consumer_create_socket(&the_ustconsumer64_data,
-                                       cmd_ctx->session->ust_session->consumer);
-                       if (ret < 0) {
-                               goto error;
-                       }
-
-                       /* 32-bit */
-                       pthread_mutex_lock(&the_ustconsumer32_data.pid_mutex);
-                       if (the_config.consumerd32_bin_path.value &&
-                                       the_ustconsumer32_data.pid == 0 &&
-                                       cmd_ctx->lsm.cmd_type != LTTNG_REGISTER_CONSUMER) {
-                               pthread_mutex_unlock(&the_ustconsumer32_data.pid_mutex);
-                               ret = start_consumerd(&the_ustconsumer32_data);
-                               if (ret < 0) {
-                                       ret = LTTNG_ERR_UST_CONSUMER32_FAIL;
-                                       uatomic_set(&the_ust_consumerd32_fd, -EINVAL);
-                                       goto error;
-                               }
-
-                               uatomic_set(&the_ust_consumerd32_fd, the_ustconsumer32_data.cmd_sock);
-                               uatomic_set(&the_ust_consumerd_state, CONSUMER_STARTED);
-                       } else {
-                               pthread_mutex_unlock(&the_ustconsumer32_data.pid_mutex);
-                       }
-
-                       /*
-                        * Setup socket for consumer 32 bit. No need for atomic access
-                        * since it was set above and can ONLY be set in this thread.
-                        */
-                       ret = consumer_create_socket(&the_ustconsumer32_data,
-                                       cmd_ctx->session->ust_session->consumer);
-                       if (ret < 0) {
-                               goto error;
-                       }
-               }
-               break;
-       }
-       default:
-               break;
-       }
-skip_domain:
-
-       /* Validate consumer daemon state when start/stop trace command */
-       if (cmd_ctx->lsm.cmd_type == LTTNG_START_TRACE ||
-                       cmd_ctx->lsm.cmd_type == LTTNG_STOP_TRACE) {
-               switch (cmd_ctx->lsm.domain.type) {
-               case LTTNG_DOMAIN_NONE:
-                       break;
-               case LTTNG_DOMAIN_JUL:
-               case LTTNG_DOMAIN_LOG4J:
-               case LTTNG_DOMAIN_PYTHON:
-               case LTTNG_DOMAIN_UST:
-                       if (uatomic_read(&the_ust_consumerd_state) != CONSUMER_STARTED) {
-                               ret = LTTNG_ERR_NO_USTCONSUMERD;
-                               goto error;
-                       }
-                       break;
-               case LTTNG_DOMAIN_KERNEL:
-                       if (uatomic_read(&the_kernel_consumerd_state) != CONSUMER_STARTED) {
-                               ret = LTTNG_ERR_NO_KERNCONSUMERD;
-                               goto error;
-                       }
-                       break;
-               default:
-                       ret = LTTNG_ERR_UNKNOWN_DOMAIN;
-                       goto error;
-               }
-       }
-
-       /*
-        * Check that the UID matches that of the tracing session.
-        * The root user can interact with all sessions.
-        */
-       if (need_tracing_session) {
-               if (!session_access_ok(cmd_ctx->session,
-                               LTTNG_SOCK_GET_UID_CRED(&cmd_ctx->creds)) ||
-                               cmd_ctx->session->destroyed) {
-                       ret = LTTNG_ERR_EPERM;
-                       goto error;
-               }
-       }
-
-       /*
-        * Send relayd information to consumer as soon as we have a domain and a
-        * session defined.
-        */
-       if (cmd_ctx->session && need_domain) {
-               /*
-                * Setup relayd if not done yet. If the relayd information was already
-                * sent to the consumer, this call will gracefully return.
-                */
-               ret = cmd_setup_relayd(cmd_ctx->session);
-               if (ret != LTTNG_OK) {
-                       goto error;
-               }
-       }
-
-       /* Process by command type */
-       switch (cmd_ctx->lsm.cmd_type) {
-       case LTTNG_ADD_CONTEXT:
-       {
-               /*
-                * An LTTNG_ADD_CONTEXT command might have a supplementary
-                * payload if the context being added is an application context.
-                */
-               if (cmd_ctx->lsm.u.context.ctx.ctx ==
-                               LTTNG_EVENT_CONTEXT_APP_CONTEXT) {
-                       char *provider_name = NULL, *context_name = NULL;
-                       size_t provider_name_len =
-                                       cmd_ctx->lsm.u.context.provider_name_len;
-                       size_t context_name_len =
-                                       cmd_ctx->lsm.u.context.context_name_len;
-
-                       if (provider_name_len == 0 || context_name_len == 0) {
-                               /*
-                                * Application provider and context names MUST
-                                * be provided.
-                                */
-                               ret = -LTTNG_ERR_INVALID;
-                               goto error;
-                       }
-
-                       provider_name = zmalloc(provider_name_len + 1);
-                       if (!provider_name) {
-                               ret = -LTTNG_ERR_NOMEM;
-                               goto error;
-                       }
-                       cmd_ctx->lsm.u.context.ctx.u.app_ctx.provider_name =
-                                       provider_name;
-
-                       context_name = zmalloc(context_name_len + 1);
-                       if (!context_name) {
-                               ret = -LTTNG_ERR_NOMEM;
-                               goto error_add_context;
-                       }
-                       cmd_ctx->lsm.u.context.ctx.u.app_ctx.ctx_name =
-                                       context_name;
-
-                       ret = lttcomm_recv_unix_sock(*sock, provider_name,
-                                       provider_name_len);
-                       if (ret < 0) {
-                               goto error_add_context;
-                       }
-
-                       ret = lttcomm_recv_unix_sock(*sock, context_name,
-                                       context_name_len);
-                       if (ret < 0) {
-                               goto error_add_context;
-                       }
-               }
-
-               /*
-                * cmd_add_context assumes ownership of the provider and context
-                * names.
-                */
-               ret = cmd_add_context(cmd_ctx->session,
-                               cmd_ctx->lsm.domain.type,
-                               cmd_ctx->lsm.u.context.channel_name,
-                               ALIGNED_CONST_PTR(cmd_ctx->lsm.u.context.ctx),
-                               the_kernel_poll_pipe[1]);
-
-               cmd_ctx->lsm.u.context.ctx.u.app_ctx.provider_name = NULL;
-               cmd_ctx->lsm.u.context.ctx.u.app_ctx.ctx_name = NULL;
-error_add_context:
-               free(cmd_ctx->lsm.u.context.ctx.u.app_ctx.provider_name);
-               free(cmd_ctx->lsm.u.context.ctx.u.app_ctx.ctx_name);
-               if (ret < 0) {
-                       goto error;
-               }
-               break;
-       }
-       case LTTNG_DISABLE_CHANNEL:
-       {
-               ret = cmd_disable_channel(cmd_ctx->session, cmd_ctx->lsm.domain.type,
-                               cmd_ctx->lsm.u.disable.channel_name);
-               break;
-       }
-       case LTTNG_DISABLE_EVENT:
-       {
-
-               /*
-                * FIXME: handle filter; for now we just receive the filter's
-                * bytecode along with the filter expression which are sent by
-                * liblttng-ctl and discard them.
-                *
-                * This fixes an issue where the client may block while sending
-                * the filter payload and encounter an error because the session
-                * daemon closes the socket without ever handling this data.
-                */
-               size_t count = cmd_ctx->lsm.u.disable.expression_len +
-                       cmd_ctx->lsm.u.disable.bytecode_len;
-
-               if (count) {
-                       char data[LTTNG_FILTER_MAX_LEN];
-
-                       DBG("Discarding disable event command payload of size %zu", count);
-                       while (count) {
-                               ret = lttcomm_recv_unix_sock(*sock, data,
-                                       count > sizeof(data) ? sizeof(data) : count);
-                               if (ret < 0) {
-                                       goto error;
-                               }
-
-                               count -= (size_t) ret;
-                       }
-               }
-               ret = cmd_disable_event(cmd_ctx->session, cmd_ctx->lsm.domain.type,
-                               cmd_ctx->lsm.u.disable.channel_name,
-                               ALIGNED_CONST_PTR(cmd_ctx->lsm.u.disable.event));
-               break;
-       }
-       case LTTNG_ENABLE_CHANNEL:
-       {
-               cmd_ctx->lsm.u.channel.chan.attr.extended.ptr =
-                               (struct lttng_channel_extended *) &cmd_ctx->lsm.u.channel.extended;
-               ret = cmd_enable_channel(cmd_ctx->session,
-                               ALIGNED_CONST_PTR(cmd_ctx->lsm.domain),
-                               ALIGNED_CONST_PTR(cmd_ctx->lsm.u.channel.chan),
-                               the_kernel_poll_pipe[1]);
-               break;
-       }
-       case LTTNG_PROCESS_ATTR_TRACKER_ADD_INCLUDE_VALUE:
-       case LTTNG_PROCESS_ATTR_TRACKER_REMOVE_INCLUDE_VALUE:
-       {
-               struct lttng_dynamic_buffer payload;
-               struct lttng_buffer_view payload_view;
-               const bool add_value =
-                               cmd_ctx->lsm.cmd_type ==
-                               LTTNG_PROCESS_ATTR_TRACKER_ADD_INCLUDE_VALUE;
-               const size_t name_len =
-                               cmd_ctx->lsm.u.process_attr_tracker_add_remove_include_value
-                                               .name_len;
-               const enum lttng_domain_type domain_type =
-                               (enum lttng_domain_type)
-                                               cmd_ctx->lsm.domain.type;
-               const enum lttng_process_attr process_attr =
-                               (enum lttng_process_attr) cmd_ctx->lsm.u
-                                               .process_attr_tracker_add_remove_include_value
-                                               .process_attr;
-               const enum lttng_process_attr_value_type value_type =
-                               (enum lttng_process_attr_value_type) cmd_ctx
-                                               ->lsm.u
-                                               .process_attr_tracker_add_remove_include_value
-                                               .value_type;
-               struct process_attr_value *value;
-               enum lttng_error_code ret_code;
-               long login_name_max;
-
-               login_name_max = sysconf(_SC_LOGIN_NAME_MAX);
-               if (login_name_max < 0) {
-                       PERROR("Failed to get _SC_LOGIN_NAME_MAX system configuration");
-                       ret = LTTNG_ERR_INVALID;
-                       goto error;
-               }
-
-               /* Receive remaining variable length payload if applicable. */
-               if (name_len > login_name_max) {
-                       /*
-                        * POSIX mandates user and group names that are at least
-                        * 8 characters long. Note that although shadow-utils
-                        * (useradd, groupaadd, etc.) use 32 chars as their
-                        * limit (from bits/utmp.h, UT_NAMESIZE),
-                        * LOGIN_NAME_MAX is defined to 256.
-                        */
-                       ERR("Rejecting process attribute tracker value %s as the provided exceeds the maximal allowed length: argument length = %zu, maximal length = %ld",
-                                       add_value ? "addition" : "removal",
-                                       name_len, login_name_max);
-                       ret = LTTNG_ERR_INVALID;
-                       goto error;
-               }
-
-               lttng_dynamic_buffer_init(&payload);
-               if (name_len != 0) {
-                       /*
-                        * Receive variable payload for user/group name
-                        * arguments.
-                        */
-                       ret = lttng_dynamic_buffer_set_size(&payload, name_len);
-                       if (ret) {
-                               ERR("Failed to allocate buffer to receive payload of %s process attribute tracker value argument",
-                                               add_value ? "add" : "remove");
-                               ret = LTTNG_ERR_NOMEM;
-                               goto error_add_remove_tracker_value;
-                       }
-
-                       ret = lttcomm_recv_unix_sock(
-                                       *sock, payload.data, name_len);
-                       if (ret <= 0) {
-                               ERR("Failed to receive payload of %s process attribute tracker value argument",
-                                               add_value ? "add" : "remove");
-                               *sock_error = 1;
-                               ret = LTTNG_ERR_INVALID_PROTOCOL;
-                               goto error_add_remove_tracker_value;
-                       }
-               }
-
-               payload_view = lttng_buffer_view_from_dynamic_buffer(
-                               &payload, 0, name_len);
-               if (name_len > 0 && !lttng_buffer_view_is_valid(&payload_view)) {
-                       ret = LTTNG_ERR_INVALID_PROTOCOL;
-                       goto error_add_remove_tracker_value;
-               }
-
-               /*
-                * Validate the value type and domains are legal for the process
-                * attribute tracker that is specified and convert the value to
-                * add/remove to the internal sessiond representation.
-                */
-               ret_code = process_attr_value_from_comm(domain_type,
-                               process_attr, value_type,
-                               &cmd_ctx->lsm.u.process_attr_tracker_add_remove_include_value
-                                                .integral_value,
-                               &payload_view, &value);
-               if (ret_code != LTTNG_OK) {
-                       ret = ret_code;
-                       goto error_add_remove_tracker_value;
-               }
-
-               if (add_value) {
-                       ret = cmd_process_attr_tracker_inclusion_set_add_value(
-                                       cmd_ctx->session, domain_type,
-                                       process_attr, value);
-               } else {
-                       ret = cmd_process_attr_tracker_inclusion_set_remove_value(
-                                       cmd_ctx->session, domain_type,
-                                       process_attr, value);
-               }
-               process_attr_value_destroy(value);
-       error_add_remove_tracker_value:
-               lttng_dynamic_buffer_reset(&payload);
-               break;
-       }
-       case LTTNG_PROCESS_ATTR_TRACKER_GET_POLICY:
-       {
-               enum lttng_tracking_policy tracking_policy;
-               const enum lttng_domain_type domain_type =
-                               (enum lttng_domain_type)
-                                               cmd_ctx->lsm.domain.type;
-               const enum lttng_process_attr process_attr =
-                               (enum lttng_process_attr) cmd_ctx->lsm.u
-                                               .process_attr_tracker_get_tracking_policy
-                                               .process_attr;
-
-               ret = cmd_process_attr_tracker_get_tracking_policy(
-                               cmd_ctx->session, domain_type, process_attr,
-                               &tracking_policy);
-               if (ret != LTTNG_OK) {
-                       goto error;
-               }
-
-               ret = setup_lttng_msg_no_cmd_header(cmd_ctx,
-                               &(uint32_t){tracking_policy}, sizeof(uint32_t));
-               if (ret < 0) {
-                       ret = LTTNG_ERR_NOMEM;
-                       goto error;
-               }
-               ret = LTTNG_OK;
-               break;
-       }
-       case LTTNG_PROCESS_ATTR_TRACKER_SET_POLICY:
-       {
-               const enum lttng_tracking_policy tracking_policy =
-                               (enum lttng_tracking_policy) cmd_ctx->lsm.u
-                                               .process_attr_tracker_set_tracking_policy
-                                               .tracking_policy;
-               const enum lttng_domain_type domain_type =
-                               (enum lttng_domain_type)
-                                               cmd_ctx->lsm.domain.type;
-               const enum lttng_process_attr process_attr =
-                               (enum lttng_process_attr) cmd_ctx->lsm.u
-                                               .process_attr_tracker_set_tracking_policy
-                                               .process_attr;
-
-               ret = cmd_process_attr_tracker_set_tracking_policy(
-                               cmd_ctx->session, domain_type, process_attr,
-                               tracking_policy);
-               if (ret != LTTNG_OK) {
-                       goto error;
-               }
-               break;
-       }
-       case LTTNG_PROCESS_ATTR_TRACKER_GET_INCLUSION_SET:
-       {
-               struct lttng_process_attr_values *values;
-               struct lttng_dynamic_buffer reply;
-               const enum lttng_domain_type domain_type =
-                               (enum lttng_domain_type)
-                                               cmd_ctx->lsm.domain.type;
-               const enum lttng_process_attr process_attr =
-                               (enum lttng_process_attr) cmd_ctx->lsm.u
-                                               .process_attr_tracker_get_inclusion_set
-                                               .process_attr;
-
-               ret = cmd_process_attr_tracker_get_inclusion_set(
-                               cmd_ctx->session, domain_type, process_attr,
-                               &values);
-               if (ret != LTTNG_OK) {
-                       goto error;
-               }
-
-               lttng_dynamic_buffer_init(&reply);
-               ret = lttng_process_attr_values_serialize(values, &reply);
-               if (ret < 0) {
-                       goto error_tracker_get_inclusion_set;
-               }
-
-               ret = setup_lttng_msg_no_cmd_header(
-                               cmd_ctx, reply.data, reply.size);
-               if (ret < 0) {
-                       ret = LTTNG_ERR_NOMEM;
-                       goto error_tracker_get_inclusion_set;
-               }
-               ret = LTTNG_OK;
-
-       error_tracker_get_inclusion_set:
-               lttng_process_attr_values_destroy(values);
-               lttng_dynamic_buffer_reset(&reply);
-               break;
-       }
-       case LTTNG_ENABLE_EVENT:
-       {
-               struct lttng_event *ev = NULL;
-               struct lttng_event_exclusion *exclusion = NULL;
-               struct lttng_bytecode *bytecode = NULL;
-               char *filter_expression = NULL;
-
-               /* Handle exclusion events and receive it from the client. */
-               if (cmd_ctx->lsm.u.enable.exclusion_count > 0) {
-                       size_t count = cmd_ctx->lsm.u.enable.exclusion_count;
-
-                       exclusion = zmalloc(sizeof(struct lttng_event_exclusion) +
-                                       (count * LTTNG_SYMBOL_NAME_LEN));
-                       if (!exclusion) {
-                               ret = LTTNG_ERR_EXCLUSION_NOMEM;
-                               goto error;
-                       }
-
-                       DBG("Receiving var len exclusion event list from client ...");
-                       exclusion->count = count;
-                       ret = lttcomm_recv_unix_sock(*sock, exclusion->names,
-                                       count * LTTNG_SYMBOL_NAME_LEN);
-                       if (ret <= 0) {
-                               DBG("Nothing recv() from client var len data... continuing");
-                               *sock_error = 1;
-                               free(exclusion);
-                               ret = LTTNG_ERR_EXCLUSION_INVAL;
-                               goto error;
-                       }
-               }
-
-               /* Get filter expression from client. */
-               if (cmd_ctx->lsm.u.enable.expression_len > 0) {
-                       size_t expression_len =
-                               cmd_ctx->lsm.u.enable.expression_len;
-
-                       if (expression_len > LTTNG_FILTER_MAX_LEN) {
-                               ret = LTTNG_ERR_FILTER_INVAL;
-                               free(exclusion);
-                               goto error;
-                       }
-
-                       filter_expression = zmalloc(expression_len);
-                       if (!filter_expression) {
-                               free(exclusion);
-                               ret = LTTNG_ERR_FILTER_NOMEM;
-                               goto error;
-                       }
-
-                       /* Receive var. len. data */
-                       DBG("Receiving var len filter's expression from client ...");
-                       ret = lttcomm_recv_unix_sock(*sock, filter_expression,
-                               expression_len);
-                       if (ret <= 0) {
-                               DBG("Nothing recv() from client var len data... continuing");
-                               *sock_error = 1;
-                               free(filter_expression);
-                               free(exclusion);
-                               ret = LTTNG_ERR_FILTER_INVAL;
-                               goto error;
-                       }
-               }
-
-               /* Handle filter and get bytecode from client. */
-               if (cmd_ctx->lsm.u.enable.bytecode_len > 0) {
-                       size_t bytecode_len = cmd_ctx->lsm.u.enable.bytecode_len;
-
-                       if (bytecode_len > LTTNG_FILTER_MAX_LEN) {
-                               ret = LTTNG_ERR_FILTER_INVAL;
-                               free(filter_expression);
-                               free(exclusion);
-                               goto error;
-                       }
-
-                       bytecode = zmalloc(bytecode_len);
-                       if (!bytecode) {
-                               free(filter_expression);
-                               free(exclusion);
-                               ret = LTTNG_ERR_FILTER_NOMEM;
-                               goto error;
-                       }
-
-                       /* Receive var. len. data */
-                       DBG("Receiving var len filter's bytecode from client ...");
-                       ret = lttcomm_recv_unix_sock(*sock, bytecode, bytecode_len);
-                       if (ret <= 0) {
-                               DBG("Nothing recv() from client var len data... continuing");
-                               *sock_error = 1;
-                               free(filter_expression);
-                               free(bytecode);
-                               free(exclusion);
-                               ret = LTTNG_ERR_FILTER_INVAL;
-                               goto error;
-                       }
-
-                       if ((bytecode->len + sizeof(*bytecode)) != bytecode_len) {
-                               free(filter_expression);
-                               free(bytecode);
-                               free(exclusion);
-                               ret = LTTNG_ERR_FILTER_INVAL;
-                               goto error;
-                       }
-               }
-
-               ev = lttng_event_copy(ALIGNED_CONST_PTR(cmd_ctx->lsm.u.enable.event));
-               if (!ev) {
-                       DBG("Failed to copy event: %s",
-                                       cmd_ctx->lsm.u.enable.event.name);
-                       free(filter_expression);
-                       free(bytecode);
-                       free(exclusion);
-                       ret = LTTNG_ERR_NOMEM;
-                       goto error;
-               }
-
-
-               if (cmd_ctx->lsm.u.enable.userspace_probe_location_len > 0) {
-                       /* Expect a userspace probe description. */
-                       ret = receive_userspace_probe(cmd_ctx, *sock, sock_error, ev);
-                       if (ret) {
-                               free(filter_expression);
-                               free(bytecode);
-                               free(exclusion);
-                               lttng_event_destroy(ev);
-                               goto error;
-                       }
-               }
-
-               ret = cmd_enable_event(cmd_ctx->session,
-                               ALIGNED_CONST_PTR(cmd_ctx->lsm.domain),
-                               cmd_ctx->lsm.u.enable.channel_name,
-                               ev,
-                               filter_expression, bytecode, exclusion,
-                               the_kernel_poll_pipe[1]);
-               lttng_event_destroy(ev);
-               break;
-       }
-       case LTTNG_LIST_TRACEPOINTS:
-       {
-               struct lttng_event *events;
-               ssize_t nb_events;
-
-               session_lock_list();
-               nb_events = cmd_list_tracepoints(cmd_ctx->lsm.domain.type, &events);
-               session_unlock_list();
-               if (nb_events < 0) {
-                       /* Return value is a negative lttng_error_code. */
-                       ret = -nb_events;
-                       goto error;
-               }
-
-               /*
-                * Setup lttng message with payload size set to the event list size in
-                * bytes and then copy list into the llm payload.
-                */
-               ret = setup_lttng_msg_no_cmd_header(cmd_ctx, events,
-                       sizeof(struct lttng_event) * nb_events);
-               free(events);
-
-               if (ret < 0) {
-                       goto setup_error;
-               }
-
-               ret = LTTNG_OK;
-               break;
-       }
-       case LTTNG_LIST_TRACEPOINT_FIELDS:
-       {
-               struct lttng_event_field *fields;
-               ssize_t nb_fields;
-
-               session_lock_list();
-               nb_fields = cmd_list_tracepoint_fields(cmd_ctx->lsm.domain.type,
-                               &fields);
-               session_unlock_list();
-               if (nb_fields < 0) {
-                       /* Return value is a negative lttng_error_code. */
-                       ret = -nb_fields;
-                       goto error;
-               }
-
-               /*
-                * Setup lttng message with payload size set to the event list size in
-                * bytes and then copy list into the llm payload.
-                */
-               ret = setup_lttng_msg_no_cmd_header(cmd_ctx, fields,
-                               sizeof(struct lttng_event_field) * nb_fields);
-               free(fields);
-
-               if (ret < 0) {
-                       goto setup_error;
-               }
-
-               ret = LTTNG_OK;
-               break;
-       }
-       case LTTNG_LIST_SYSCALLS:
-       {
-               struct lttng_event *events;
-               ssize_t nb_events;
-
-               nb_events = cmd_list_syscalls(&events);
-               if (nb_events < 0) {
-                       /* Return value is a negative lttng_error_code. */
-                       ret = -nb_events;
-                       goto error;
-               }
-
-               /*
-                * Setup lttng message with payload size set to the event list size in
-                * bytes and then copy list into the llm payload.
-                */
-               ret = setup_lttng_msg_no_cmd_header(cmd_ctx, events,
-                       sizeof(struct lttng_event) * nb_events);
-               free(events);
-
-               if (ret < 0) {
-                       goto setup_error;
-               }
-
-               ret = LTTNG_OK;
-               break;
-       }
-       case LTTNG_SET_CONSUMER_URI:
-       {
-               size_t nb_uri, len;
-               struct lttng_uri *uris;
-
-               nb_uri = cmd_ctx->lsm.u.uri.size;
-               len = nb_uri * sizeof(struct lttng_uri);
-
-               if (nb_uri == 0) {
-                       ret = LTTNG_ERR_INVALID;
-                       goto error;
-               }
-
-               uris = zmalloc(len);
-               if (uris == NULL) {
-                       ret = LTTNG_ERR_FATAL;
-                       goto error;
-               }
-
-               /* Receive variable len data */
-               DBG("Receiving %zu URI(s) from client ...", nb_uri);
-               ret = lttcomm_recv_unix_sock(*sock, uris, len);
-               if (ret <= 0) {
-                       DBG("No URIs received from client... continuing");
-                       *sock_error = 1;
-                       ret = LTTNG_ERR_SESSION_FAIL;
-                       free(uris);
-                       goto error;
-               }
-
-               ret = cmd_set_consumer_uri(cmd_ctx->session, nb_uri, uris);
-               free(uris);
-               if (ret != LTTNG_OK) {
-                       goto error;
-               }
-
-
-               break;
-       }
-       case LTTNG_START_TRACE:
-       {
-               /*
-                * On the first start, if we have a kernel session and we have
-                * enabled time or size-based rotations, we have to make sure
-                * the kernel tracer supports it.
-                */
-               if (!cmd_ctx->session->has_been_started && \
-                               cmd_ctx->session->kernel_session && \
-                               (cmd_ctx->session->rotate_timer_period || \
-                                       cmd_ctx->session->rotate_size) && \
-                               !check_rotate_compatible()) {
-                       DBG("Kernel tracer version is not compatible with the rotation feature");
-                       ret = LTTNG_ERR_ROTATION_WRONG_VERSION;
-                       goto error;
-               }
-               ret = cmd_start_trace(cmd_ctx->session);
-               break;
-       }
-       case LTTNG_STOP_TRACE:
-       {
-               ret = cmd_stop_trace(cmd_ctx->session);
-               break;
-       }
-       case LTTNG_DESTROY_SESSION:
-       {
-               ret = cmd_destroy_session(cmd_ctx->session,
-                               the_notification_thread_handle, sock);
-               break;
-       }
-       case LTTNG_LIST_DOMAINS:
-       {
-               ssize_t nb_dom;
-               struct lttng_domain *domains = NULL;
-
-               nb_dom = cmd_list_domains(cmd_ctx->session, &domains);
-               if (nb_dom < 0) {
-                       /* Return value is a negative lttng_error_code. */
-                       ret = -nb_dom;
-                       goto error;
-               }
-
-               ret = setup_lttng_msg_no_cmd_header(cmd_ctx, domains,
-                       nb_dom * sizeof(struct lttng_domain));
-               free(domains);
-
-               if (ret < 0) {
-                       goto setup_error;
-               }
-
-               ret = LTTNG_OK;
-               break;
-       }
-       case LTTNG_LIST_CHANNELS:
-       {
-               ssize_t payload_size;
-               struct lttng_channel *channels = NULL;
-
-               payload_size = cmd_list_channels(cmd_ctx->lsm.domain.type,
-                               cmd_ctx->session, &channels);
-               if (payload_size < 0) {
-                       /* Return value is a negative lttng_error_code. */
-                       ret = -payload_size;
-                       goto error;
-               }
-
-               ret = setup_lttng_msg_no_cmd_header(cmd_ctx, channels,
-                       payload_size);
-               free(channels);
-
-               if (ret < 0) {
-                       goto setup_error;
-               }
-
-               ret = LTTNG_OK;
-               break;
-       }
-       case LTTNG_LIST_EVENTS:
-       {
-               ssize_t list_ret;
-               struct lttcomm_event_command_header cmd_header = {};
-               size_t original_payload_size;
-               size_t payload_size;
-
-               ret = setup_empty_lttng_msg(cmd_ctx);
-               if (ret) {
-                       ret = LTTNG_ERR_NOMEM;
-                       goto setup_error;
-               }
-
-               original_payload_size = cmd_ctx->reply_payload.buffer.size;
-
-               /* Extended infos are included at the end of the payload. */
-               list_ret = cmd_list_events(cmd_ctx->lsm.domain.type,
-                               cmd_ctx->session,
-                               cmd_ctx->lsm.u.list.channel_name,
-                               &cmd_ctx->reply_payload);
-               if (list_ret < 0) {
-                       /* Return value is a negative lttng_error_code. */
-                       ret = -list_ret;
-                       goto error;
-               }
-
-               payload_size = cmd_ctx->reply_payload.buffer.size -
-                               sizeof(cmd_header) - original_payload_size;
-               update_lttng_msg(cmd_ctx, sizeof(cmd_header), payload_size);
-
-               ret = LTTNG_OK;
-               break;
-       }
-       case LTTNG_LIST_SESSIONS:
-       {
-               unsigned int nr_sessions;
-               void *sessions_payload;
-               size_t payload_len;
-
-               session_lock_list();
-               nr_sessions = lttng_sessions_count(
-                               LTTNG_SOCK_GET_UID_CRED(&cmd_ctx->creds),
-                               LTTNG_SOCK_GET_GID_CRED(&cmd_ctx->creds));
-
-               payload_len = (sizeof(struct lttng_session) * nr_sessions) +
-                               (sizeof(struct lttng_session_extended) * nr_sessions);
-               sessions_payload = zmalloc(payload_len);
-
-               if (!sessions_payload) {
-                       session_unlock_list();
-                       ret = -ENOMEM;
-                       goto setup_error;
-               }
-
-               cmd_list_lttng_sessions(sessions_payload, nr_sessions,
-                       LTTNG_SOCK_GET_UID_CRED(&cmd_ctx->creds),
-                       LTTNG_SOCK_GET_GID_CRED(&cmd_ctx->creds));
-               session_unlock_list();
-
-               ret = setup_lttng_msg_no_cmd_header(cmd_ctx, sessions_payload,
-                       payload_len);
-               free(sessions_payload);
-
-               if (ret < 0) {
-                       goto setup_error;
-               }
-
-               ret = LTTNG_OK;
-               break;
-       }
-       case LTTNG_REGISTER_CONSUMER:
-       {
-               struct consumer_data *cdata;
-
-               switch (cmd_ctx->lsm.domain.type) {
-               case LTTNG_DOMAIN_KERNEL:
-                       cdata = &the_kconsumer_data;
-                       break;
-               default:
-                       ret = LTTNG_ERR_UND;
-                       goto error;
-               }
-
-               ret = cmd_register_consumer(cmd_ctx->session, cmd_ctx->lsm.domain.type,
-                               cmd_ctx->lsm.u.reg.path, cdata);
-               break;
-       }
-       case LTTNG_DATA_PENDING:
-       {
-               int pending_ret;
-               uint8_t pending_ret_byte;
-
-               pending_ret = cmd_data_pending(cmd_ctx->session);
-
-               /*
-                * FIXME
-                *
-                * This function may returns 0 or 1 to indicate whether or not
-                * there is data pending. In case of error, it should return an
-                * LTTNG_ERR code. However, some code paths may still return
-                * a nondescript error code, which we handle by returning an
-                * "unknown" error.
-                */
-               if (pending_ret == 0 || pending_ret == 1) {
-                       /*
-                        * ret will be set to LTTNG_OK at the end of
-                        * this function.
-                        */
-               } else if (pending_ret < 0) {
-                       ret = LTTNG_ERR_UNK;
-                       goto setup_error;
-               } else {
-                       ret = pending_ret;
-                       goto setup_error;
-               }
-
-               pending_ret_byte = (uint8_t) pending_ret;
-
-               /* 1 byte to return whether or not data is pending */
-               ret = setup_lttng_msg_no_cmd_header(cmd_ctx,
-                       &pending_ret_byte, 1);
-
-               if (ret < 0) {
-                       goto setup_error;
-               }
-
-               ret = LTTNG_OK;
-               break;
-       }
-       case LTTNG_SNAPSHOT_ADD_OUTPUT:
-       {
-               uint32_t snapshot_id;
-               struct lttcomm_lttng_output_id reply;
-
-               ret = cmd_snapshot_add_output(cmd_ctx->session,
-                               ALIGNED_CONST_PTR(cmd_ctx->lsm.u.snapshot_output.output),
-                               &snapshot_id);
-               if (ret != LTTNG_OK) {
-                       goto error;
-               }
-               reply.id = snapshot_id;
-
-               ret = setup_lttng_msg_no_cmd_header(cmd_ctx, &reply,
-                       sizeof(reply));
-               if (ret < 0) {
-                       goto setup_error;
-               }
-
-               /* Copy output list into message payload */
-               ret = LTTNG_OK;
-               break;
-       }
-       case LTTNG_SNAPSHOT_DEL_OUTPUT:
-       {
-               ret = cmd_snapshot_del_output(cmd_ctx->session,
-                               ALIGNED_CONST_PTR(cmd_ctx->lsm.u.snapshot_output.output));
-               break;
-       }
-       case LTTNG_SNAPSHOT_LIST_OUTPUT:
-       {
-               ssize_t nb_output;
-               struct lttng_snapshot_output *outputs = NULL;
-
-               nb_output = cmd_snapshot_list_outputs(cmd_ctx->session, &outputs);
-               if (nb_output < 0) {
-                       ret = -nb_output;
-                       goto error;
-               }
-
-               LTTNG_ASSERT((nb_output > 0 && outputs) || nb_output == 0);
-               ret = setup_lttng_msg_no_cmd_header(cmd_ctx, outputs,
-                               nb_output * sizeof(struct lttng_snapshot_output));
-               free(outputs);
-
-               if (ret < 0) {
-                       goto setup_error;
-               }
-
-               ret = LTTNG_OK;
-               break;
-       }
-       case LTTNG_SNAPSHOT_RECORD:
-       {
-               ret = cmd_snapshot_record(cmd_ctx->session,
-                               ALIGNED_CONST_PTR(cmd_ctx->lsm.u.snapshot_record.output),
-                               cmd_ctx->lsm.u.snapshot_record.wait);
-               break;
-       }
-       case LTTNG_CREATE_SESSION_EXT:
-       {
-               struct lttng_dynamic_buffer payload;
-               struct lttng_session_descriptor *return_descriptor = NULL;
-
-               lttng_dynamic_buffer_init(&payload);
-               ret = cmd_create_session(cmd_ctx, *sock, &return_descriptor);
-               if (ret != LTTNG_OK) {
-                       goto error;
-               }
-
-               ret = lttng_session_descriptor_serialize(return_descriptor,
-                               &payload);
-               if (ret) {
-                       ERR("Failed to serialize session descriptor in reply to \"create session\" command");
-                       lttng_session_descriptor_destroy(return_descriptor);
-                       ret = LTTNG_ERR_NOMEM;
-                       goto error;
-               }
-               ret = setup_lttng_msg_no_cmd_header(cmd_ctx, payload.data,
-                               payload.size);
-               if (ret) {
-                       lttng_session_descriptor_destroy(return_descriptor);
-                       ret = LTTNG_ERR_NOMEM;
-                       goto error;
-               }
-               lttng_dynamic_buffer_reset(&payload);
-               lttng_session_descriptor_destroy(return_descriptor);
-               ret = LTTNG_OK;
-               break;
-       }
-       case LTTNG_SAVE_SESSION:
-       {
-               ret = cmd_save_sessions(&cmd_ctx->lsm.u.save_session.attr,
-                       &cmd_ctx->creds);
-               break;
-       }
-       case LTTNG_SET_SESSION_SHM_PATH:
-       {
-               ret = cmd_set_session_shm_path(cmd_ctx->session,
-                               cmd_ctx->lsm.u.set_shm_path.shm_path);
-               break;
-       }
-       case LTTNG_REGENERATE_METADATA:
-       {
-               ret = cmd_regenerate_metadata(cmd_ctx->session);
-               break;
-       }
-       case LTTNG_REGENERATE_STATEDUMP:
-       {
-               ret = cmd_regenerate_statedump(cmd_ctx->session);
-               break;
-       }
-       case LTTNG_REGISTER_TRIGGER:
-       {
-               struct lttng_trigger *payload_trigger;
-               struct lttng_trigger *return_trigger;
-               size_t original_reply_payload_size;
-               size_t reply_payload_size;
-               const struct lttng_credentials cmd_creds = {
-                       .uid = LTTNG_OPTIONAL_INIT_VALUE(cmd_ctx->creds.uid),
-                       .gid = LTTNG_OPTIONAL_INIT_VALUE(cmd_ctx->creds.gid),
-               };
-
-               ret = setup_empty_lttng_msg(cmd_ctx);
-               if (ret) {
-                       ret = LTTNG_ERR_NOMEM;
-                       goto setup_error;
-               }
-
-               ret = receive_lttng_trigger(
-                               cmd_ctx, *sock, sock_error, &payload_trigger);
-               if (ret != LTTNG_OK) {
-                       goto error;
-               }
-
-               original_reply_payload_size = cmd_ctx->reply_payload.buffer.size;
-
-               ret = cmd_register_trigger(&cmd_creds, payload_trigger,
-                               cmd_ctx->lsm.u.trigger.is_trigger_anonymous,
-                               the_notification_thread_handle,
-                               &return_trigger);
-               if (ret != LTTNG_OK) {
-                       lttng_trigger_put(payload_trigger);
-                       goto error;
-               }
-
-               ret = lttng_trigger_serialize(return_trigger, &cmd_ctx->reply_payload);
-               lttng_trigger_put(payload_trigger);
-               lttng_trigger_put(return_trigger);
-               if (ret) {
-                       ERR("Failed to serialize trigger in reply to \"register trigger\" command");
-                       ret = LTTNG_ERR_NOMEM;
-                       goto error;
-               }
-
-               reply_payload_size = cmd_ctx->reply_payload.buffer.size -
-                       original_reply_payload_size;
-
-               update_lttng_msg(cmd_ctx, 0, reply_payload_size);
-
-               ret = LTTNG_OK;
-               break;
-       }
-       case LTTNG_UNREGISTER_TRIGGER:
-       {
-               struct lttng_trigger *payload_trigger;
-               const struct lttng_credentials cmd_creds = {
-                       .uid = LTTNG_OPTIONAL_INIT_VALUE(cmd_ctx->creds.uid),
-                       .gid = LTTNG_OPTIONAL_INIT_VALUE(cmd_ctx->creds.gid),
-               };
-
-               ret = receive_lttng_trigger(
-                               cmd_ctx, *sock, sock_error, &payload_trigger);
-               if (ret != LTTNG_OK) {
-                       goto error;
-               }
-
-               ret = cmd_unregister_trigger(&cmd_creds, payload_trigger,
-                               the_notification_thread_handle);
-               lttng_trigger_put(payload_trigger);
-               break;
-       }
-       case LTTNG_ROTATE_SESSION:
-       {
-               struct lttng_rotate_session_return rotate_return;
-
-               DBG("Client rotate session \"%s\"", cmd_ctx->session->name);
-
-               memset(&rotate_return, 0, sizeof(rotate_return));
-               if (cmd_ctx->session->kernel_session && !check_rotate_compatible()) {
-                       DBG("Kernel tracer version is not compatible with the rotation feature");
-                       ret = LTTNG_ERR_ROTATION_WRONG_VERSION;
-                       goto error;
-               }
-
-               ret = cmd_rotate_session(cmd_ctx->session, &rotate_return,
-                       false,
-                       LTTNG_TRACE_CHUNK_COMMAND_TYPE_MOVE_TO_COMPLETED);
-               if (ret < 0) {
-                       ret = -ret;
-                       goto error;
-               }
-
-               ret = setup_lttng_msg_no_cmd_header(cmd_ctx, &rotate_return,
-                               sizeof(rotate_return));
-               if (ret < 0) {
-                       ret = -ret;
-                       goto error;
-               }
-
-               ret = LTTNG_OK;
-               break;
-       }
-       case LTTNG_ROTATION_GET_INFO:
-       {
-               struct lttng_rotation_get_info_return get_info_return;
-
-               memset(&get_info_return, 0, sizeof(get_info_return));
-               ret = cmd_rotate_get_info(cmd_ctx->session, &get_info_return,
-                               cmd_ctx->lsm.u.get_rotation_info.rotation_id);
-               if (ret < 0) {
-                       ret = -ret;
-                       goto error;
-               }
-
-               ret = setup_lttng_msg_no_cmd_header(cmd_ctx, &get_info_return,
-                               sizeof(get_info_return));
-               if (ret < 0) {
-                       ret = -ret;
-                       goto error;
-               }
-
-               ret = LTTNG_OK;
-               break;
-       }
-       case LTTNG_ROTATION_SET_SCHEDULE:
-       {
-               bool set_schedule;
-               enum lttng_rotation_schedule_type schedule_type;
-               uint64_t value;
-
-               if (cmd_ctx->session->kernel_session && !check_rotate_compatible()) {
-                       DBG("Kernel tracer version does not support session rotations");
-                       ret = LTTNG_ERR_ROTATION_WRONG_VERSION;
-                       goto error;
-               }
-
-               set_schedule = cmd_ctx->lsm.u.rotation_set_schedule.set == 1;
-               schedule_type = (enum lttng_rotation_schedule_type) cmd_ctx->lsm.u.rotation_set_schedule.type;
-               value = cmd_ctx->lsm.u.rotation_set_schedule.value;
-
-               ret = cmd_rotation_set_schedule(cmd_ctx->session, set_schedule,
-                               schedule_type, value,
-                               the_notification_thread_handle);
-               if (ret != LTTNG_OK) {
-                       goto error;
-               }
-
-               break;
-       }
-       case LTTNG_SESSION_LIST_ROTATION_SCHEDULES:
-       {
-               struct lttng_session_list_schedules_return schedules = {
-                       .periodic.set = !!cmd_ctx->session->rotate_timer_period,
-                       .periodic.value = cmd_ctx->session->rotate_timer_period,
-                       .size.set = !!cmd_ctx->session->rotate_size,
-                       .size.value = cmd_ctx->session->rotate_size,
-               };
-
-               ret = setup_lttng_msg_no_cmd_header(cmd_ctx, &schedules,
-                               sizeof(schedules));
-               if (ret < 0) {
-                       ret = -ret;
-                       goto error;
-               }
-
-               ret = LTTNG_OK;
-               break;
-       }
-       case LTTNG_CLEAR_SESSION:
-       {
-               ret = cmd_clear_session(cmd_ctx->session, sock);
-               break;
-       }
-       case LTTNG_LIST_TRIGGERS:
-       {
-               struct lttng_triggers *return_triggers = NULL;
-               size_t original_payload_size;
-               size_t payload_size;
-
-               ret = setup_empty_lttng_msg(cmd_ctx);
-               if (ret) {
-                       ret = LTTNG_ERR_NOMEM;
-                       goto setup_error;
-               }
-
-               original_payload_size = cmd_ctx->reply_payload.buffer.size;
-
-               ret = cmd_list_triggers(cmd_ctx, the_notification_thread_handle,
-                               &return_triggers);
-               if (ret != LTTNG_OK) {
-                       goto error;
-               }
-
-               LTTNG_ASSERT(return_triggers);
-               ret = lttng_triggers_serialize(
-                               return_triggers, &cmd_ctx->reply_payload);
-               lttng_triggers_destroy(return_triggers);
-               if (ret) {
-                       ERR("Failed to serialize triggers in reply to `list triggers` command");
-                       ret = LTTNG_ERR_NOMEM;
-                       goto error;
-               }
-
-               payload_size = cmd_ctx->reply_payload.buffer.size -
-                       original_payload_size;
-
-               update_lttng_msg(cmd_ctx, 0, payload_size);
-
-               ret = LTTNG_OK;
-               break;
-       }
-       case LTTNG_EXECUTE_ERROR_QUERY:
-       {
-               struct lttng_error_query *query;
-               const struct lttng_credentials cmd_creds = {
-                       .uid = LTTNG_OPTIONAL_INIT_VALUE(cmd_ctx->creds.uid),
-                       .gid = LTTNG_OPTIONAL_INIT_VALUE(cmd_ctx->creds.gid),
-               };
-               struct lttng_error_query_results *results = NULL;
-               size_t original_payload_size;
-               size_t payload_size;
-
-               ret = setup_empty_lttng_msg(cmd_ctx);
-               if (ret) {
-                       ret = LTTNG_ERR_NOMEM;
-                       goto setup_error;
-               }
-
-               original_payload_size = cmd_ctx->reply_payload.buffer.size;
-
-               ret = receive_lttng_error_query(
-                               cmd_ctx, *sock, sock_error, &query);
-               if (ret != LTTNG_OK) {
-                       goto error;
-               }
-
-               ret = cmd_execute_error_query(&cmd_creds, query, &results,
-                               the_notification_thread_handle);
-               lttng_error_query_destroy(query);
-               if (ret != LTTNG_OK) {
-                       goto error;
-               }
-
-               LTTNG_ASSERT(results);
-               ret = lttng_error_query_results_serialize(
-                               results, &cmd_ctx->reply_payload);
-               lttng_error_query_results_destroy(results);
-               if (ret) {
-                       ERR("Failed to serialize error query result set in reply to `execute error query` command");
-                       ret = LTTNG_ERR_NOMEM;
-                       goto error;
-               }
-
-               payload_size = cmd_ctx->reply_payload.buffer.size -
-                       original_payload_size;
-
-               update_lttng_msg(cmd_ctx, 0, payload_size);
-
-               ret = LTTNG_OK;
-
-               break;
-       }
-       default:
-               ret = LTTNG_ERR_UND;
-               break;
-       }
-
-error:
-       if (cmd_ctx->reply_payload.buffer.size == 0) {
-               DBG("Missing llm header, creating one.");
-               if (setup_lttng_msg_no_cmd_header(cmd_ctx, NULL, 0) < 0) {
-                       goto setup_error;
-               }
-       }
-       /* Set return code */
-       ((struct lttcomm_lttng_msg *) (cmd_ctx->reply_payload.buffer.data))->ret_code = ret;
-setup_error:
-       if (cmd_ctx->session) {
-               session_unlock(cmd_ctx->session);
-               session_put(cmd_ctx->session);
-               cmd_ctx->session = NULL;
-       }
-       if (need_tracing_session) {
-               session_unlock_list();
-       }
-init_setup_error:
-       LTTNG_ASSERT(!rcu_read_ongoing());
-       return ret;
-}
-
-static int create_client_sock(void)
-{
-       int ret, client_sock;
-       const mode_t old_umask = umask(0);
-
-       /* Create client tool unix socket */
-       client_sock = lttcomm_create_unix_sock(
-                       the_config.client_unix_sock_path.value);
-       if (client_sock < 0) {
-               ERR("Create unix sock failed: %s",
-                               the_config.client_unix_sock_path.value);
-               ret = -1;
-               goto end;
-       }
-
-       /* Set the cloexec flag */
-       ret = utils_set_fd_cloexec(client_sock);
-       if (ret < 0) {
-               ERR("Unable to set CLOEXEC flag to the client Unix socket (fd: %d). "
-                               "Continuing but note that the consumer daemon will have a "
-                               "reference to this socket on exec()", client_sock);
-       }
-
-       /* File permission MUST be 660 */
-       ret = chmod(the_config.client_unix_sock_path.value,
-                       S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP);
-       if (ret < 0) {
-               ERR("Set file permissions failed: %s",
-                               the_config.client_unix_sock_path.value);
-               PERROR("chmod");
-               (void) lttcomm_close_unix_sock(client_sock);
-               ret = -1;
-               goto end;
-       }
-       DBG("Created client socket (fd = %i)", client_sock);
-       ret = client_sock;
-end:
-       umask(old_umask);
-       return ret;
-}
-
-static void cleanup_client_thread(void *data)
-{
-       struct lttng_pipe *quit_pipe = data;
-
-       lttng_pipe_destroy(quit_pipe);
-}
-
-static void thread_init_cleanup(void *data)
-{
-       set_thread_status(false);
-}
-
-/*
- * This thread manage all clients request using the unix client socket for
- * communication.
- */
-static void *thread_manage_clients(void *data)
-{
-       int sock = -1, ret, i, pollfd, err = -1;
-       int sock_error;
-       uint32_t revents, nb_fd;
-       struct lttng_poll_event events;
-       const int client_sock = thread_state.client_sock;
-       struct lttng_pipe *quit_pipe = data;
-       const int thread_quit_pipe_fd = lttng_pipe_get_readfd(quit_pipe);
-       struct command_ctx cmd_ctx = {};
-
-       DBG("[thread] Manage client started");
-
-       lttng_payload_init(&cmd_ctx.reply_payload);
-
-       is_root = (getuid() == 0);
-
-       pthread_cleanup_push(thread_init_cleanup, NULL);
-
-       rcu_register_thread();
-
-       health_register(the_health_sessiond, HEALTH_SESSIOND_TYPE_CMD);
-
-       health_code_update();
-
-       ret = lttcomm_listen_unix_sock(client_sock);
-       if (ret < 0) {
-               goto error_listen;
-       }
-
-       /*
-        * Pass 2 as size here for the thread quit pipe and client_sock. Nothing
-        * more will be added to this poll set.
-        */
-       ret = lttng_poll_create(&events, 2, LTTNG_CLOEXEC);
-       if (ret < 0) {
-               goto error_create_poll;
-       }
-
-       /* Add the application registration socket */
-       ret = lttng_poll_add(&events, client_sock, LPOLLIN | LPOLLPRI);
-       if (ret < 0) {
-               goto error;
-       }
-
-       /* Add thread quit pipe */
-       ret = lttng_poll_add(&events, thread_quit_pipe_fd, LPOLLIN | LPOLLERR);
-       if (ret < 0) {
-               goto error;
-       }
-
-       /* Set state as running. */
-       set_thread_status(true);
-       pthread_cleanup_pop(0);
-
-       /* This testpoint is after we signal readiness to the parent. */
-       if (testpoint(sessiond_thread_manage_clients)) {
-               goto error;
-       }
-
-       if (testpoint(sessiond_thread_manage_clients_before_loop)) {
-               goto error;
-       }
-
-       health_code_update();
-
-       while (1) {
-               const struct cmd_completion_handler *cmd_completion_handler;
-
-               cmd_ctx.creds = (lttng_sock_cred) {
-                       .uid = UINT32_MAX,
-                       .gid = UINT32_MAX,
-               };
-               cmd_ctx.session = NULL;
-               lttng_payload_clear(&cmd_ctx.reply_payload);
-               cmd_ctx.lttng_msg_size = 0;
-
-               DBG("Accepting client command ...");
-
-               /* Inifinite blocking call, waiting for transmission */
-       restart:
-               health_poll_entry();
-               ret = lttng_poll_wait(&events, -1);
-               health_poll_exit();
-               if (ret < 0) {
-                       /*
-                        * Restart interrupted system call.
-                        */
-                       if (errno == EINTR) {
-                               goto restart;
-                       }
-                       goto error;
-               }
-
-               nb_fd = ret;
-
-               for (i = 0; i < nb_fd; i++) {
-                       revents = LTTNG_POLL_GETEV(&events, i);
-                       pollfd = LTTNG_POLL_GETFD(&events, i);
-
-                       health_code_update();
-
-                       if (pollfd == thread_quit_pipe_fd) {
-                               err = 0;
-                               goto exit;
-                       } else {
-                               /* Event on the registration socket */
-                               if (revents & LPOLLIN) {
-                                       continue;
-                               } else if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
-                                       ERR("Client socket poll error");
-                                       goto error;
-                               } else {
-                                       ERR("Unexpected poll events %u for sock %d", revents, pollfd);
-                                       goto error;
-                               }
-                       }
-               }
-
-               DBG("Wait for client response");
-
-               health_code_update();
-
-               sock = lttcomm_accept_unix_sock(client_sock);
-               if (sock < 0) {
-                       goto error;
-               }
-
-               /*
-                * Set the CLOEXEC flag. Return code is useless because either way, the
-                * show must go on.
-                */
-               (void) utils_set_fd_cloexec(sock);
-
-               /* Set socket option for credentials retrieval */
-               ret = lttcomm_setsockopt_creds_unix_sock(sock);
-               if (ret < 0) {
-                       goto error;
-               }
-
-               health_code_update();
-
-               /*
-                * Data is received from the lttng client. The struct
-                * lttcomm_session_msg (lsm) contains the command and data request of
-                * the client.
-                */
-               DBG("Receiving data from client ...");
-               ret = lttcomm_recv_creds_unix_sock(sock, &cmd_ctx.lsm,
-                               sizeof(struct lttcomm_session_msg), &cmd_ctx.creds);
-               if (ret != sizeof(struct lttcomm_session_msg)) {
-                       DBG("Incomplete recv() from client... continuing");
-                       ret = close(sock);
-                       if (ret) {
-                               PERROR("close");
-                       }
-                       sock = -1;
-                       continue;
-               }
-
-               health_code_update();
-
-               // TODO: Validate cmd_ctx including sanity check for
-               // security purpose.
-
-               rcu_thread_online();
-               /*
-                * This function dispatch the work to the kernel or userspace tracer
-                * libs and fill the lttcomm_lttng_msg data structure of all the needed
-                * informations for the client. The command context struct contains
-                * everything this function may needs.
-                */
-               ret = process_client_msg(&cmd_ctx, &sock, &sock_error);
-               rcu_thread_offline();
-               if (ret < 0) {
-                       if (sock >= 0) {
-                               ret = close(sock);
-                               if (ret) {
-                                       PERROR("close");
-                               }
-                       }
-                       sock = -1;
-                       /*
-                        * TODO: Inform client somehow of the fatal error. At
-                        * this point, ret < 0 means that a zmalloc failed
-                        * (ENOMEM). Error detected but still accept
-                        * command, unless a socket error has been
-                        * detected.
-                        */
-                       continue;
-               }
-
-               if (ret < LTTNG_OK || ret >= LTTNG_ERR_NR) {
-                       WARN("Command returned an invalid status code, returning unknown error: "
-                                       "command type = %s (%d), ret = %d",
-                                       lttcomm_sessiond_command_str(cmd_ctx.lsm.cmd_type),
-                                       cmd_ctx.lsm.cmd_type, ret);
-                       ret = LTTNG_ERR_UNK;
-               }
-
-               cmd_completion_handler = cmd_pop_completion_handler();
-               if (cmd_completion_handler) {
-                       enum lttng_error_code completion_code;
-
-                       completion_code = cmd_completion_handler->run(
-                                       cmd_completion_handler->data);
-                       if (completion_code != LTTNG_OK) {
-                               continue;
-                       }
-               }
-
-               health_code_update();
-
-               if (sock >= 0) {
-                       struct lttng_payload_view view =
-                                       lttng_payload_view_from_payload(
-                                                       &cmd_ctx.reply_payload,
-                                                       0, -1);
-                       struct lttcomm_lttng_msg *llm = (typeof(
-                                       llm)) cmd_ctx.reply_payload.buffer.data;
-
-                       LTTNG_ASSERT(cmd_ctx.reply_payload.buffer.size >= sizeof(*llm));
-                       LTTNG_ASSERT(cmd_ctx.lttng_msg_size == cmd_ctx.reply_payload.buffer.size);
-
-                       llm->fd_count = lttng_payload_view_get_fd_handle_count(&view);
-
-                       DBG("Sending response (size: %d, retcode: %s (%d))",
-                                       cmd_ctx.lttng_msg_size,
-                                       lttng_strerror(-llm->ret_code),
-                                       llm->ret_code);
-                       ret = send_unix_sock(sock, &view);
-                       if (ret < 0) {
-                               ERR("Failed to send data back to client");
-                       }
-
-                       /* End of transmission */
-                       ret = close(sock);
-                       if (ret) {
-                               PERROR("close");
-                       }
-               }
-               sock = -1;
-
-               health_code_update();
-       }
-
-exit:
-error:
-       if (sock >= 0) {
-               ret = close(sock);
-               if (ret) {
-                       PERROR("close");
-               }
-       }
-
-       lttng_poll_clean(&events);
-
-error_listen:
-error_create_poll:
-       unlink(the_config.client_unix_sock_path.value);
-       ret = close(client_sock);
-       if (ret) {
-               PERROR("close");
-       }
-
-       if (err) {
-               health_error();
-               ERR("Health error occurred in %s", __func__);
-       }
-
-       health_unregister(the_health_sessiond);
-
-       DBG("Client thread dying");
-       lttng_payload_reset(&cmd_ctx.reply_payload);
-       rcu_unregister_thread();
-       return NULL;
-}
-
-static
-bool shutdown_client_thread(void *thread_data)
-{
-       struct lttng_pipe *client_quit_pipe = thread_data;
-       const int write_fd = lttng_pipe_get_writefd(client_quit_pipe);
-
-       return notify_thread_pipe(write_fd) == 1;
-}
-
-struct lttng_thread *launch_client_thread(void)
-{
-       bool thread_running;
-       struct lttng_pipe *client_quit_pipe;
-       struct lttng_thread *thread = NULL;
-       int client_sock_fd = -1;
-
-       sem_init(&thread_state.ready, 0, 0);
-       client_quit_pipe = lttng_pipe_open(FD_CLOEXEC);
-       if (!client_quit_pipe) {
-               goto error;
-       }
-
-       client_sock_fd = create_client_sock();
-       if (client_sock_fd < 0) {
-               goto error;
-       }
-
-       thread_state.client_sock = client_sock_fd;
-       thread = lttng_thread_create("Client management",
-                       thread_manage_clients,
-                       shutdown_client_thread,
-                       cleanup_client_thread,
-                       client_quit_pipe);
-       if (!thread) {
-               goto error;
-       }
-       /* The client thread now owns the client sock fd and the quit pipe. */
-       client_sock_fd = -1;
-       client_quit_pipe = NULL;
-
-       /*
-        * This thread is part of the threads that need to be fully
-        * initialized before the session daemon is marked as "ready".
-        */
-       thread_running = wait_thread_status();
-       if (!thread_running) {
-               goto error;
-       }
-       return thread;
-error:
-       if (client_sock_fd >= 0) {
-               if (close(client_sock_fd)) {
-                       PERROR("Failed to close client socket");
-               }
-       }
-       lttng_thread_put(thread);
-       cleanup_client_thread(client_quit_pipe);
-       return NULL;
-}
diff --git a/src/bin/lttng-sessiond/client.cpp b/src/bin/lttng-sessiond/client.cpp
new file mode 100644 (file)
index 0000000..154fa4a
--- /dev/null
@@ -0,0 +1,2919 @@
+/*
+ * Copyright (C) 2011 David Goulet <david.goulet@polymtl.ca>
+ * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ * Copyright (C) 2013 Jérémie Galarneau <jeremie.galarneau@efficios.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0-only
+ *
+ */
+
+#include "common/buffer-view.h"
+#include "common/compat/socket.h"
+#include "common/dynamic-array.h"
+#include "common/dynamic-buffer.h"
+#include "common/fd-handle.h"
+#include "common/payload-view.h"
+#include "common/payload.h"
+#include "common/sessiond-comm/sessiond-comm.h"
+#include "lttng/lttng-error.h"
+#include "lttng/tracker.h"
+#include <common/compat/getenv.h>
+#include <common/tracker.h>
+#include <common/unix.h>
+#include <common/utils.h>
+#include <lttng/error-query-internal.h>
+#include <lttng/event-internal.h>
+#include <lttng/session-descriptor-internal.h>
+#include <lttng/session-internal.h>
+#include <lttng/userspace-probe-internal.h>
+#include <pthread.h>
+#include <signal.h>
+#include <stddef.h>
+#include <sys/stat.h>
+#include <unistd.h>
+
+#include "agent-thread.h"
+#include "clear.h"
+#include "client.h"
+#include "cmd.h"
+#include "health-sessiond.h"
+#include "kernel.h"
+#include "lttng-sessiond.h"
+#include "manage-consumer.h"
+#include "save.h"
+#include "testpoint.h"
+#include "utils.h"
+
+static bool is_root;
+
+static struct thread_state {
+       sem_t ready;
+       bool running;
+       int client_sock;
+} thread_state;
+
+static void set_thread_status(bool running)
+{
+       DBG("Marking client thread's state as %s", running ? "running" : "error");
+       thread_state.running = running;
+       sem_post(&thread_state.ready);
+}
+
+static bool wait_thread_status(void)
+{
+       DBG("Waiting for client thread to be ready");
+       sem_wait(&thread_state.ready);
+       if (thread_state.running) {
+               DBG("Client thread is ready");
+       } else {
+               ERR("Initialization of client thread failed");
+       }
+
+       return thread_state.running;
+}
+
+/*
+ * Setup the outgoing data buffer for the response (llm) by allocating the
+ * right amount of memory and copying the original information from the lsm
+ * structure.
+ *
+ * Return 0 on success, negative value on error.
+ */
+static int setup_lttng_msg(struct command_ctx *cmd_ctx,
+       const void *payload_buf, size_t payload_len,
+       const void *cmd_header_buf, size_t cmd_header_len)
+{
+       int ret = 0;
+       const size_t header_len = sizeof(struct lttcomm_lttng_msg);
+       const size_t total_msg_size = header_len + cmd_header_len + payload_len;
+       lttcomm_lttng_msg llm {};
+
+       llm.cmd_type = cmd_ctx->lsm.cmd_type;
+       llm.pid = (uint32_t) cmd_ctx->lsm.domain.attr.pid;
+       llm.cmd_header_size = (uint32_t) cmd_header_len;
+       llm.data_size = (uint32_t) payload_len;
+
+       ret = lttng_dynamic_buffer_set_size(&cmd_ctx->reply_payload.buffer, 0);
+       if (ret) {
+               goto end;
+       }
+
+       lttng_dynamic_pointer_array_clear(&cmd_ctx->reply_payload._fd_handles);
+
+       cmd_ctx->lttng_msg_size = total_msg_size;
+
+       /* Append reply header. */
+       ret = lttng_dynamic_buffer_append(
+                       &cmd_ctx->reply_payload.buffer, &llm, sizeof(llm));
+       if (ret) {
+               goto end;
+       }
+
+       /* Append command header. */
+       if (cmd_header_len) {
+               ret = lttng_dynamic_buffer_append(
+                               &cmd_ctx->reply_payload.buffer, cmd_header_buf,
+                               cmd_header_len);
+               if (ret) {
+                       goto end;
+               }
+       }
+
+       /* Append payload. */
+       if (payload_len) {
+               ret = lttng_dynamic_buffer_append(
+                               &cmd_ctx->reply_payload.buffer, payload_buf,
+                               payload_len);
+               if (ret) {
+                       goto end;
+               }
+       }
+
+end:
+       return ret;
+}
+
+static int setup_empty_lttng_msg(struct command_ctx *cmd_ctx)
+{
+       int ret;
+       const struct lttcomm_lttng_msg llm = {};
+
+       ret = lttng_dynamic_buffer_set_size(&cmd_ctx->reply_payload.buffer, 0);
+       if (ret) {
+               goto end;
+       }
+
+       /* Append place-holder reply header. */
+       ret = lttng_dynamic_buffer_append(
+                       &cmd_ctx->reply_payload.buffer, &llm, sizeof(llm));
+       if (ret) {
+               goto end;
+       }
+
+       cmd_ctx->lttng_msg_size = sizeof(llm);
+end:
+       return ret;
+}
+
+static void update_lttng_msg(struct command_ctx *cmd_ctx, size_t cmd_header_len,
+               size_t payload_len)
+{
+       const size_t header_len = sizeof(struct lttcomm_lttng_msg);
+       const size_t total_msg_size = header_len + cmd_header_len + payload_len;
+       struct lttcomm_lttng_msg *p_llm;
+       lttcomm_lttng_msg llm {};
+
+       llm.cmd_type = cmd_ctx->lsm.cmd_type;
+       llm.pid = (uint32_t) cmd_ctx->lsm.domain.attr.pid;
+       llm.cmd_header_size = (uint32_t) cmd_header_len;
+       llm.data_size = (uint32_t) payload_len;
+
+       LTTNG_ASSERT(cmd_ctx->reply_payload.buffer.size >= sizeof(llm));
+
+       p_llm = (typeof(p_llm)) cmd_ctx->reply_payload.buffer.data;
+
+       /* Update existing header. */
+       memcpy(p_llm, &llm, sizeof(llm));
+
+       cmd_ctx->lttng_msg_size = total_msg_size;
+}
+
+/*
+ * Start the thread_manage_consumer. This must be done after a lttng-consumerd
+ * exec or it will fail.
+ */
+static int spawn_consumer_thread(struct consumer_data *consumer_data)
+{
+       return launch_consumer_management_thread(consumer_data) ? 0 : -1;
+}
+
+/*
+ * Fork and exec a consumer daemon (consumerd).
+ *
+ * Return pid if successful else -1.
+ */
+static pid_t spawn_consumerd(struct consumer_data *consumer_data)
+{
+       int ret;
+       pid_t pid;
+       const char *consumer_to_use;
+       const char *verbosity;
+       struct stat st;
+
+       DBG("Spawning consumerd");
+
+       pid = fork();
+       if (pid == 0) {
+               /*
+                * Exec consumerd.
+                */
+               if (the_config.verbose_consumer) {
+                       verbosity = "--verbose";
+               } else if (lttng_opt_quiet) {
+                       verbosity = "--quiet";
+               } else {
+                       verbosity = "";
+               }
+
+               switch (consumer_data->type) {
+               case LTTNG_CONSUMER_KERNEL:
+                       /*
+                        * Find out which consumerd to execute. We will first try the
+                        * 64-bit path, then the sessiond's installation directory, and
+                        * fallback on the 32-bit one,
+                        */
+                       DBG3("Looking for a kernel consumer at these locations:");
+                       DBG3("  1) %s", the_config.consumerd64_bin_path.value ? : "NULL");
+                       DBG3("  2) %s/%s", INSTALL_BIN_PATH, DEFAULT_CONSUMERD_FILE);
+                       DBG3("  3) %s", the_config.consumerd32_bin_path.value ? : "NULL");
+                       if (stat(the_config.consumerd64_bin_path.value, &st) == 0) {
+                               DBG3("Found location #1");
+                               consumer_to_use = the_config.consumerd64_bin_path.value;
+                       } else if (stat(INSTALL_BIN_PATH "/" DEFAULT_CONSUMERD_FILE, &st) == 0) {
+                               DBG3("Found location #2");
+                               consumer_to_use = INSTALL_BIN_PATH "/" DEFAULT_CONSUMERD_FILE;
+                       } else if (the_config.consumerd32_bin_path.value &&
+                                       stat(the_config.consumerd32_bin_path.value, &st) == 0) {
+                               DBG3("Found location #3");
+                               consumer_to_use = the_config.consumerd32_bin_path.value;
+                       } else {
+                               DBG("Could not find any valid consumerd executable");
+                               ret = -EINVAL;
+                               goto error;
+                       }
+                       DBG("Using kernel consumer at: %s",  consumer_to_use);
+                       (void) execl(consumer_to_use, "lttng-consumerd",
+                                       verbosity, "-k", "--consumerd-cmd-sock",
+                                       consumer_data->cmd_unix_sock_path,
+                                       "--consumerd-err-sock",
+                                       consumer_data->err_unix_sock_path,
+                                       "--group",
+                                       the_config.tracing_group_name.value,
+                                       NULL);
+                       break;
+               case LTTNG_CONSUMER64_UST:
+               {
+                       if (the_config.consumerd64_lib_dir.value) {
+                               const char *tmp;
+                               size_t tmplen;
+                               char *tmpnew;
+
+                               tmp = lttng_secure_getenv("LD_LIBRARY_PATH");
+                               if (!tmp) {
+                                       tmp = "";
+                               }
+                               tmplen = strlen(the_config.consumerd64_lib_dir.value) + 1 /* : */ + strlen(tmp);
+                               tmpnew = (char *) zmalloc(tmplen + 1 /* \0 */);
+                               if (!tmpnew) {
+                                       ret = -ENOMEM;
+                                       goto error;
+                               }
+                               strcat(tmpnew, the_config.consumerd64_lib_dir.value);
+                               if (tmp[0] != '\0') {
+                                       strcat(tmpnew, ":");
+                                       strcat(tmpnew, tmp);
+                               }
+                               ret = setenv("LD_LIBRARY_PATH", tmpnew, 1);
+                               free(tmpnew);
+                               if (ret) {
+                                       ret = -errno;
+                                       goto error;
+                               }
+                       }
+                       DBG("Using 64-bit UST consumer at: %s",
+                                       the_config.consumerd64_bin_path.value);
+                       (void) execl(the_config.consumerd64_bin_path.value,
+                                       "lttng-consumerd", verbosity, "-u",
+                                       "--consumerd-cmd-sock",
+                                       consumer_data->cmd_unix_sock_path,
+                                       "--consumerd-err-sock",
+                                       consumer_data->err_unix_sock_path,
+                                       "--group",
+                                       the_config.tracing_group_name.value,
+                                       NULL);
+                       break;
+               }
+               case LTTNG_CONSUMER32_UST:
+               {
+                       if (the_config.consumerd32_lib_dir.value) {
+                               const char *tmp;
+                               size_t tmplen;
+                               char *tmpnew;
+
+                               tmp = lttng_secure_getenv("LD_LIBRARY_PATH");
+                               if (!tmp) {
+                                       tmp = "";
+                               }
+                               tmplen = strlen(the_config.consumerd32_lib_dir.value) + 1 /* : */ + strlen(tmp);
+                               tmpnew = (char *) zmalloc(tmplen + 1 /* \0 */);
+                               if (!tmpnew) {
+                                       ret = -ENOMEM;
+                                       goto error;
+                               }
+                               strcat(tmpnew, the_config.consumerd32_lib_dir.value);
+                               if (tmp[0] != '\0') {
+                                       strcat(tmpnew, ":");
+                                       strcat(tmpnew, tmp);
+                               }
+                               ret = setenv("LD_LIBRARY_PATH", tmpnew, 1);
+                               free(tmpnew);
+                               if (ret) {
+                                       ret = -errno;
+                                       goto error;
+                               }
+                       }
+                       DBG("Using 32-bit UST consumer at: %s",
+                                       the_config.consumerd32_bin_path.value);
+                       (void) execl(the_config.consumerd32_bin_path.value,
+                                       "lttng-consumerd", verbosity, "-u",
+                                       "--consumerd-cmd-sock",
+                                       consumer_data->cmd_unix_sock_path,
+                                       "--consumerd-err-sock",
+                                       consumer_data->err_unix_sock_path,
+                                       "--group",
+                                       the_config.tracing_group_name.value,
+                                       NULL);
+                       break;
+               }
+               default:
+                       ERR("unknown consumer type");
+                       errno = 0;
+               }
+               if (errno != 0) {
+                       PERROR("Consumer execl()");
+               }
+               /* Reaching this point, we got a failure on our execl(). */
+               exit(EXIT_FAILURE);
+       } else if (pid > 0) {
+               ret = pid;
+       } else {
+               PERROR("start consumer fork");
+               ret = -errno;
+       }
+error:
+       return ret;
+}
+
+/*
+ * Spawn the consumerd daemon and session daemon thread.
+ */
+static int start_consumerd(struct consumer_data *consumer_data)
+{
+       int ret;
+
+       /*
+        * Set the listen() state on the socket since there is a possible race
+        * between the exec() of the consumer daemon and this call if place in the
+        * consumer thread. See bug #366 for more details.
+        */
+       ret = lttcomm_listen_unix_sock(consumer_data->err_sock);
+       if (ret < 0) {
+               goto error;
+       }
+
+       pthread_mutex_lock(&consumer_data->pid_mutex);
+       if (consumer_data->pid != 0) {
+               pthread_mutex_unlock(&consumer_data->pid_mutex);
+               goto end;
+       }
+
+       ret = spawn_consumerd(consumer_data);
+       if (ret < 0) {
+               ERR("Spawning consumerd failed");
+               pthread_mutex_unlock(&consumer_data->pid_mutex);
+               goto error;
+       }
+
+       /* Setting up the consumer_data pid */
+       consumer_data->pid = ret;
+       DBG2("Consumer pid %d", consumer_data->pid);
+       pthread_mutex_unlock(&consumer_data->pid_mutex);
+
+       DBG2("Spawning consumer control thread");
+       ret = spawn_consumer_thread(consumer_data);
+       if (ret < 0) {
+               ERR("Fatal error spawning consumer control thread");
+               goto error;
+       }
+
+end:
+       return 0;
+
+error:
+       /* Cleanup already created sockets on error. */
+       if (consumer_data->err_sock >= 0) {
+               int err;
+
+               err = close(consumer_data->err_sock);
+               if (err < 0) {
+                       PERROR("close consumer data error socket");
+               }
+       }
+       return ret;
+}
+
+/*
+ * Copy consumer output from the tracing session to the domain session. The
+ * function also applies the right modification on a per domain basis for the
+ * trace files destination directory.
+ *
+ * Should *NOT* be called with RCU read-side lock held.
+ */
+static int copy_session_consumer(int domain, struct ltt_session *session)
+{
+       int ret;
+       const char *dir_name;
+       struct consumer_output *consumer;
+
+       LTTNG_ASSERT(session);
+       LTTNG_ASSERT(session->consumer);
+
+       switch (domain) {
+       case LTTNG_DOMAIN_KERNEL:
+               DBG3("Copying tracing session consumer output in kernel session");
+               /*
+                * XXX: We should audit the session creation and what this function
+                * does "extra" in order to avoid a destroy since this function is used
+                * in the domain session creation (kernel and ust) only. Same for UST
+                * domain.
+                */
+               if (session->kernel_session->consumer) {
+                       consumer_output_put(session->kernel_session->consumer);
+               }
+               session->kernel_session->consumer =
+                       consumer_copy_output(session->consumer);
+               /* Ease our life a bit for the next part */
+               consumer = session->kernel_session->consumer;
+               dir_name = DEFAULT_KERNEL_TRACE_DIR;
+               break;
+       case LTTNG_DOMAIN_JUL:
+       case LTTNG_DOMAIN_LOG4J:
+       case LTTNG_DOMAIN_PYTHON:
+       case LTTNG_DOMAIN_UST:
+               DBG3("Copying tracing session consumer output in UST session");
+               if (session->ust_session->consumer) {
+                       consumer_output_put(session->ust_session->consumer);
+               }
+               session->ust_session->consumer =
+                       consumer_copy_output(session->consumer);
+               /* Ease our life a bit for the next part */
+               consumer = session->ust_session->consumer;
+               dir_name = DEFAULT_UST_TRACE_DIR;
+               break;
+       default:
+               ret = LTTNG_ERR_UNKNOWN_DOMAIN;
+               goto error;
+       }
+
+       /* Append correct directory to subdir */
+       ret = lttng_strncpy(consumer->domain_subdir, dir_name,
+                       sizeof(consumer->domain_subdir));
+       if (ret) {
+               ret = LTTNG_ERR_UNK;
+               goto error;
+       }
+       DBG3("Copy session consumer subdir %s", consumer->domain_subdir);
+       ret = LTTNG_OK;
+
+error:
+       return ret;
+}
+
+/*
+ * Create an UST session and add it to the session ust list.
+ *
+ * Should *NOT* be called with RCU read-side lock held.
+ */
+static int create_ust_session(struct ltt_session *session,
+               const struct lttng_domain *domain)
+{
+       int ret;
+       struct ltt_ust_session *lus = NULL;
+
+       LTTNG_ASSERT(session);
+       LTTNG_ASSERT(domain);
+       LTTNG_ASSERT(session->consumer);
+
+       switch (domain->type) {
+       case LTTNG_DOMAIN_JUL:
+       case LTTNG_DOMAIN_LOG4J:
+       case LTTNG_DOMAIN_PYTHON:
+       case LTTNG_DOMAIN_UST:
+               break;
+       default:
+               ERR("Unknown UST domain on create session %d", domain->type);
+               ret = LTTNG_ERR_UNKNOWN_DOMAIN;
+               goto error;
+       }
+
+       DBG("Creating UST session");
+
+       lus = trace_ust_create_session(session->id);
+       if (lus == NULL) {
+               ret = LTTNG_ERR_UST_SESS_FAIL;
+               goto error;
+       }
+
+       lus->uid = session->uid;
+       lus->gid = session->gid;
+       lus->output_traces = session->output_traces;
+       lus->snapshot_mode = session->snapshot_mode;
+       lus->live_timer_interval = session->live_timer;
+       session->ust_session = lus;
+       if (session->shm_path[0]) {
+               strncpy(lus->root_shm_path, session->shm_path,
+                       sizeof(lus->root_shm_path));
+               lus->root_shm_path[sizeof(lus->root_shm_path) - 1] = '\0';
+               strncpy(lus->shm_path, session->shm_path,
+                       sizeof(lus->shm_path));
+               lus->shm_path[sizeof(lus->shm_path) - 1] = '\0';
+               strncat(lus->shm_path, "/ust",
+                       sizeof(lus->shm_path) - strlen(lus->shm_path) - 1);
+       }
+       /* Copy session output to the newly created UST session */
+       ret = copy_session_consumer(domain->type, session);
+       if (ret != LTTNG_OK) {
+               goto error;
+       }
+
+       return LTTNG_OK;
+
+error:
+       free(lus);
+       session->ust_session = NULL;
+       return ret;
+}
+
+/*
+ * Create a kernel tracer session then create the default channel.
+ */
+static int create_kernel_session(struct ltt_session *session)
+{
+       int ret;
+
+       DBG("Creating kernel session");
+
+       ret = kernel_create_session(session);
+       if (ret < 0) {
+               ret = LTTNG_ERR_KERN_SESS_FAIL;
+               goto error_create;
+       }
+
+       /* Code flow safety */
+       LTTNG_ASSERT(session->kernel_session);
+
+       /* Copy session output to the newly created Kernel session */
+       ret = copy_session_consumer(LTTNG_DOMAIN_KERNEL, session);
+       if (ret != LTTNG_OK) {
+               goto error;
+       }
+
+       session->kernel_session->uid = session->uid;
+       session->kernel_session->gid = session->gid;
+       session->kernel_session->output_traces = session->output_traces;
+       session->kernel_session->snapshot_mode = session->snapshot_mode;
+       session->kernel_session->is_live_session = session->live_timer != 0;
+
+       return LTTNG_OK;
+
+error:
+       trace_kernel_destroy_session(session->kernel_session);
+       session->kernel_session = NULL;
+error_create:
+       return ret;
+}
+
+/*
+ * Count number of session permitted by uid/gid.
+ */
+static unsigned int lttng_sessions_count(uid_t uid, gid_t gid)
+{
+       unsigned int i = 0;
+       struct ltt_session *session;
+       const struct ltt_session_list *session_list = session_get_list();
+
+       DBG("Counting number of available session for UID %d", uid);
+       cds_list_for_each_entry(session, &session_list->head, list) {
+               if (!session_get(session)) {
+                       continue;
+               }
+               session_lock(session);
+               /* Only count the sessions the user can control. */
+               if (session_access_ok(session, uid) &&
+                               !session->destroyed) {
+                       i++;
+               }
+               session_unlock(session);
+               session_put(session);
+       }
+       return i;
+}
+
+static int receive_userspace_probe(struct command_ctx *cmd_ctx, int sock,
+               int *sock_error, struct lttng_event *event)
+{
+       int fd = -1, ret;
+       struct lttng_userspace_probe_location *probe_location;
+       struct lttng_payload probe_location_payload;
+       struct fd_handle *handle = NULL;
+
+       /*
+        * Create a payload to store the serialized version of the probe
+        * location.
+        */
+       lttng_payload_init(&probe_location_payload);
+
+       ret = lttng_dynamic_buffer_set_size(&probe_location_payload.buffer,
+                       cmd_ctx->lsm.u.enable.userspace_probe_location_len);
+       if (ret) {
+               ret = LTTNG_ERR_NOMEM;
+               goto error;
+       }
+
+       /*
+        * Receive the probe location.
+        */
+       ret = lttcomm_recv_unix_sock(sock, probe_location_payload.buffer.data,
+                       probe_location_payload.buffer.size);
+       if (ret <= 0) {
+               DBG("Nothing recv() from client var len data... continuing");
+               *sock_error = 1;
+               ret = LTTNG_ERR_PROBE_LOCATION_INVAL;
+               goto error;
+       }
+
+       /*
+        * Receive the file descriptor to the target binary from the client.
+        */
+       DBG("Receiving userspace probe target FD from client ...");
+       ret = lttcomm_recv_fds_unix_sock(sock, &fd, 1);
+       if (ret <= 0) {
+               DBG("Nothing recv() from client userspace probe fd... continuing");
+               *sock_error = 1;
+               ret = LTTNG_ERR_PROBE_LOCATION_INVAL;
+               goto error;
+       }
+
+       handle = fd_handle_create(fd);
+       if (!handle) {
+               ret = LTTNG_ERR_NOMEM;
+               goto error;
+       }
+
+       /* Transferred to the handle. */
+       fd = -1;
+
+       ret = lttng_payload_push_fd_handle(&probe_location_payload, handle);
+       if (ret) {
+               ERR("Failed to add userspace probe file descriptor to payload");
+               ret = LTTNG_ERR_NOMEM;
+               goto error;
+       }
+
+       fd_handle_put(handle);
+       handle = NULL;
+
+       {
+               struct lttng_payload_view view = lttng_payload_view_from_payload(
+                       &probe_location_payload, 0, -1);
+
+               /* Extract the probe location from the serialized version. */
+               ret = lttng_userspace_probe_location_create_from_payload(
+                               &view, &probe_location);
+       }
+       if (ret < 0) {
+               WARN("Failed to create a userspace probe location from the received buffer");
+               ret = LTTNG_ERR_PROBE_LOCATION_INVAL;
+               goto error;
+       }
+
+       /* Attach the probe location to the event. */
+       ret = lttng_event_set_userspace_probe_location(event, probe_location);
+       if (ret) {
+               ret = LTTNG_ERR_PROBE_LOCATION_INVAL;
+               goto error;
+       }
+
+error:
+       if (fd >= 0) {
+               if (close(fd)) {
+                       PERROR("Failed to close userspace probe location binary fd");
+               }
+       }
+
+       fd_handle_put(handle);
+       lttng_payload_reset(&probe_location_payload);
+       return ret;
+}
+
+static enum lttng_error_code receive_lttng_trigger(struct command_ctx *cmd_ctx,
+               int sock,
+               int *sock_error,
+               struct lttng_trigger **_trigger)
+{
+       int ret;
+       size_t trigger_len;
+       ssize_t sock_recv_len;
+       enum lttng_error_code ret_code;
+       struct lttng_payload trigger_payload;
+       struct lttng_trigger *trigger = NULL;
+
+       lttng_payload_init(&trigger_payload);
+       trigger_len = (size_t) cmd_ctx->lsm.u.trigger.length;
+       ret = lttng_dynamic_buffer_set_size(
+                       &trigger_payload.buffer, trigger_len);
+       if (ret) {
+               ret_code = LTTNG_ERR_NOMEM;
+               goto end;
+       }
+
+       sock_recv_len = lttcomm_recv_unix_sock(
+                       sock, trigger_payload.buffer.data, trigger_len);
+       if (sock_recv_len < 0 || sock_recv_len != trigger_len) {
+               ERR("Failed to receive trigger in command payload");
+               *sock_error = 1;
+               ret_code = LTTNG_ERR_INVALID_PROTOCOL;
+               goto end;
+       }
+
+       /* Receive fds, if any. */
+       if (cmd_ctx->lsm.fd_count > 0) {
+               sock_recv_len = lttcomm_recv_payload_fds_unix_sock(
+                               sock, cmd_ctx->lsm.fd_count, &trigger_payload);
+               if (sock_recv_len > 0 &&
+                               sock_recv_len != cmd_ctx->lsm.fd_count * sizeof(int)) {
+                       ERR("Failed to receive all file descriptors for trigger in command payload: expected fd count = %u, ret = %d",
+                                       cmd_ctx->lsm.fd_count, (int) ret);
+                       ret_code = LTTNG_ERR_INVALID_PROTOCOL;
+                       *sock_error = 1;
+                       goto end;
+               } else if (sock_recv_len <= 0) {
+                       ERR("Failed to receive file descriptors for trigger in command payload: expected fd count = %u, ret = %d",
+                                       cmd_ctx->lsm.fd_count, (int) ret);
+                       ret_code = LTTNG_ERR_FATAL;
+                       *sock_error = 1;
+                       goto end;
+               }
+       }
+
+       /* Deserialize trigger. */
+       {
+               struct lttng_payload_view view =
+                               lttng_payload_view_from_payload(
+                                               &trigger_payload, 0, -1);
+
+               if (lttng_trigger_create_from_payload(&view, &trigger) !=
+                               trigger_len) {
+                       ERR("Invalid trigger received as part of command payload");
+                       ret_code = LTTNG_ERR_INVALID_TRIGGER;
+                       lttng_trigger_put(trigger);
+                       goto end;
+               }
+       }
+
+       *_trigger = trigger;
+       ret_code = LTTNG_OK;
+
+end:
+       lttng_payload_reset(&trigger_payload);
+       return ret_code;
+}
+
+static enum lttng_error_code receive_lttng_error_query(struct command_ctx *cmd_ctx,
+               int sock,
+               int *sock_error,
+               struct lttng_error_query **_query)
+{
+       int ret;
+       size_t query_len;
+       ssize_t sock_recv_len;
+       enum lttng_error_code ret_code;
+       struct lttng_payload query_payload;
+       struct lttng_error_query *query = NULL;
+
+       lttng_payload_init(&query_payload);
+       query_len = (size_t) cmd_ctx->lsm.u.error_query.length;
+       ret = lttng_dynamic_buffer_set_size(&query_payload.buffer, query_len);
+       if (ret) {
+               ret_code = LTTNG_ERR_NOMEM;
+               goto end;
+       }
+
+       sock_recv_len = lttcomm_recv_unix_sock(
+                       sock, query_payload.buffer.data, query_len);
+       if (sock_recv_len < 0 || sock_recv_len != query_len) {
+               ERR("Failed to receive error query in command payload");
+               *sock_error = 1;
+               ret_code = LTTNG_ERR_INVALID_PROTOCOL;
+               goto end;
+       }
+
+       /* Receive fds, if any. */
+       if (cmd_ctx->lsm.fd_count > 0) {
+               sock_recv_len = lttcomm_recv_payload_fds_unix_sock(
+                               sock, cmd_ctx->lsm.fd_count, &query_payload);
+               if (sock_recv_len > 0 &&
+                               sock_recv_len != cmd_ctx->lsm.fd_count * sizeof(int)) {
+                       ERR("Failed to receive all file descriptors for error query in command payload: expected fd count = %u, ret = %d",
+                                       cmd_ctx->lsm.fd_count, (int) ret);
+                       ret_code = LTTNG_ERR_INVALID_PROTOCOL;
+                       *sock_error = 1;
+                       goto end;
+               } else if (sock_recv_len <= 0) {
+                       ERR("Failed to receive file descriptors for error query in command payload: expected fd count = %u, ret = %d",
+                                       cmd_ctx->lsm.fd_count, (int) ret);
+                       ret_code = LTTNG_ERR_FATAL;
+                       *sock_error = 1;
+                       goto end;
+               }
+       }
+
+       /* Deserialize error query. */
+       {
+               struct lttng_payload_view view =
+                               lttng_payload_view_from_payload(
+                                               &query_payload, 0, -1);
+
+               if (lttng_error_query_create_from_payload(&view, &query) !=
+                               query_len) {
+                       ERR("Invalid error query received as part of command payload");
+                       ret_code = LTTNG_ERR_INVALID_PROTOCOL;
+                       goto end;
+               }
+       }
+
+       *_query = query;
+       ret_code = LTTNG_OK;
+
+end:
+       lttng_payload_reset(&query_payload);
+       return ret_code;
+}
+
+/*
+ * Version of setup_lttng_msg() without command header.
+ */
+static int setup_lttng_msg_no_cmd_header(struct command_ctx *cmd_ctx,
+       void *payload_buf, size_t payload_len)
+{
+       return setup_lttng_msg(cmd_ctx, payload_buf, payload_len, NULL, 0);
+}
+
+/*
+ * Check if the current kernel tracer supports the session rotation feature.
+ * Return 1 if it does, 0 otherwise.
+ */
+static int check_rotate_compatible(void)
+{
+       int ret = 1;
+
+       if (the_kernel_tracer_version.major != 2 ||
+                       the_kernel_tracer_version.minor < 11) {
+               DBG("Kernel tracer version is not compatible with the rotation feature");
+               ret = 0;
+       }
+
+       return ret;
+}
+
+/*
+ * Send data on a unix socket using the liblttsessiondcomm API.
+ *
+ * Return lttcomm error code.
+ */
+static int send_unix_sock(int sock, struct lttng_payload_view *view)
+{
+       int ret;
+       const int fd_count = lttng_payload_view_get_fd_handle_count(view);
+
+       /* Check valid length */
+       if (view->buffer.size == 0) {
+               ret = -1;
+               goto end;
+       }
+
+       ret = lttcomm_send_unix_sock(
+                       sock, view->buffer.data, view->buffer.size);
+       if (ret < 0) {
+               goto end;
+       }
+
+       if (fd_count > 0) {
+               ret = lttcomm_send_payload_view_fds_unix_sock(sock, view);
+               if (ret < 0) {
+                       goto end;
+               }
+       }
+
+end:
+       return ret;
+}
+
+/*
+ * Process the command requested by the lttng client within the command
+ * context structure. This function make sure that the return structure (llm)
+ * is set and ready for transmission before returning.
+ *
+ * Return any error encountered or 0 for success.
+ *
+ * "sock" is only used for special-case var. len data.
+ * A command may assume the ownership of the socket, in which case its value
+ * should be set to -1.
+ *
+ * Should *NOT* be called with RCU read-side lock held.
+ */
+static int process_client_msg(struct command_ctx *cmd_ctx, int *sock,
+               int *sock_error)
+{
+       int ret = LTTNG_OK;
+       bool need_tracing_session = true;
+       bool need_domain;
+       bool need_consumerd;
+
+       DBG("Processing client command '%s\' (%d)",
+               lttcomm_sessiond_command_str((lttcomm_sessiond_command) cmd_ctx->lsm.cmd_type),
+               cmd_ctx->lsm.cmd_type);
+
+       LTTNG_ASSERT(!rcu_read_ongoing());
+
+       *sock_error = 0;
+
+       switch (cmd_ctx->lsm.cmd_type) {
+       case LTTNG_CREATE_SESSION_EXT:
+       case LTTNG_DESTROY_SESSION:
+       case LTTNG_LIST_SESSIONS:
+       case LTTNG_LIST_DOMAINS:
+       case LTTNG_START_TRACE:
+       case LTTNG_STOP_TRACE:
+       case LTTNG_DATA_PENDING:
+       case LTTNG_SNAPSHOT_ADD_OUTPUT:
+       case LTTNG_SNAPSHOT_DEL_OUTPUT:
+       case LTTNG_SNAPSHOT_LIST_OUTPUT:
+       case LTTNG_SNAPSHOT_RECORD:
+       case LTTNG_SAVE_SESSION:
+       case LTTNG_SET_SESSION_SHM_PATH:
+       case LTTNG_REGENERATE_METADATA:
+       case LTTNG_REGENERATE_STATEDUMP:
+       case LTTNG_ROTATE_SESSION:
+       case LTTNG_ROTATION_GET_INFO:
+       case LTTNG_ROTATION_SET_SCHEDULE:
+       case LTTNG_SESSION_LIST_ROTATION_SCHEDULES:
+       case LTTNG_CLEAR_SESSION:
+       case LTTNG_LIST_TRIGGERS:
+       case LTTNG_EXECUTE_ERROR_QUERY:
+               need_domain = false;
+               break;
+       default:
+               need_domain = true;
+       }
+
+       /* Needs a functioning consumerd? */
+       switch (cmd_ctx->lsm.cmd_type) {
+       case LTTNG_REGISTER_TRIGGER:
+       case LTTNG_UNREGISTER_TRIGGER:
+       case LTTNG_EXECUTE_ERROR_QUERY:
+               need_consumerd = false;
+               break;
+       default:
+               need_consumerd = true;
+               break;
+       }
+
+       if (the_config.no_kernel && need_domain &&
+                       cmd_ctx->lsm.domain.type == LTTNG_DOMAIN_KERNEL) {
+               if (!is_root) {
+                       ret = LTTNG_ERR_NEED_ROOT_SESSIOND;
+               } else {
+                       ret = LTTNG_ERR_KERN_NA;
+               }
+               goto error;
+       }
+
+       /* Deny register consumer if we already have a spawned consumer. */
+       if (cmd_ctx->lsm.cmd_type == LTTNG_REGISTER_CONSUMER) {
+               pthread_mutex_lock(&the_kconsumer_data.pid_mutex);
+               if (the_kconsumer_data.pid > 0) {
+                       ret = LTTNG_ERR_KERN_CONSUMER_FAIL;
+                       pthread_mutex_unlock(&the_kconsumer_data.pid_mutex);
+                       goto error;
+               }
+               pthread_mutex_unlock(&the_kconsumer_data.pid_mutex);
+       }
+
+       /*
+        * Check for command that don't needs to allocate a returned payload. We do
+        * this here so we don't have to make the call for no payload at each
+        * command.
+        */
+       switch(cmd_ctx->lsm.cmd_type) {
+       case LTTNG_LIST_SESSIONS:
+       case LTTNG_LIST_TRACEPOINTS:
+       case LTTNG_LIST_TRACEPOINT_FIELDS:
+       case LTTNG_LIST_DOMAINS:
+       case LTTNG_LIST_CHANNELS:
+       case LTTNG_LIST_EVENTS:
+       case LTTNG_LIST_SYSCALLS:
+       case LTTNG_SESSION_LIST_ROTATION_SCHEDULES:
+       case LTTNG_PROCESS_ATTR_TRACKER_GET_POLICY:
+       case LTTNG_PROCESS_ATTR_TRACKER_GET_INCLUSION_SET:
+       case LTTNG_DATA_PENDING:
+       case LTTNG_ROTATE_SESSION:
+       case LTTNG_ROTATION_GET_INFO:
+       case LTTNG_REGISTER_TRIGGER:
+       case LTTNG_LIST_TRIGGERS:
+       case LTTNG_EXECUTE_ERROR_QUERY:
+               break;
+       default:
+               /* Setup lttng message with no payload */
+               ret = setup_lttng_msg_no_cmd_header(cmd_ctx, NULL, 0);
+               if (ret < 0) {
+                       /* This label does not try to unlock the session */
+                       goto init_setup_error;
+               }
+       }
+
+       /* Commands that DO NOT need a session. */
+       switch (cmd_ctx->lsm.cmd_type) {
+       case LTTNG_CREATE_SESSION_EXT:
+       case LTTNG_LIST_SESSIONS:
+       case LTTNG_LIST_TRACEPOINTS:
+       case LTTNG_LIST_SYSCALLS:
+       case LTTNG_LIST_TRACEPOINT_FIELDS:
+       case LTTNG_SAVE_SESSION:
+       case LTTNG_REGISTER_TRIGGER:
+       case LTTNG_UNREGISTER_TRIGGER:
+       case LTTNG_LIST_TRIGGERS:
+       case LTTNG_EXECUTE_ERROR_QUERY:
+               need_tracing_session = false;
+               break;
+       default:
+               DBG("Getting session %s by name", cmd_ctx->lsm.session.name);
+               /*
+                * We keep the session list lock across _all_ commands
+                * for now, because the per-session lock does not
+                * handle teardown properly.
+                */
+               session_lock_list();
+               cmd_ctx->session = session_find_by_name(cmd_ctx->lsm.session.name);
+               if (cmd_ctx->session == NULL) {
+                       ret = LTTNG_ERR_SESS_NOT_FOUND;
+                       goto error;
+               } else {
+                       /* Acquire lock for the session */
+                       session_lock(cmd_ctx->session);
+               }
+               break;
+       }
+
+       /*
+        * Commands that need a valid session but should NOT create one if none
+        * exists. Instead of creating one and destroying it when the command is
+        * handled, process that right before so we save some round trip in useless
+        * code path.
+        */
+       switch (cmd_ctx->lsm.cmd_type) {
+       case LTTNG_DISABLE_CHANNEL:
+       case LTTNG_DISABLE_EVENT:
+               switch (cmd_ctx->lsm.domain.type) {
+               case LTTNG_DOMAIN_KERNEL:
+                       if (!cmd_ctx->session->kernel_session) {
+                               ret = LTTNG_ERR_NO_CHANNEL;
+                               goto error;
+                       }
+                       break;
+               case LTTNG_DOMAIN_JUL:
+               case LTTNG_DOMAIN_LOG4J:
+               case LTTNG_DOMAIN_PYTHON:
+               case LTTNG_DOMAIN_UST:
+                       if (!cmd_ctx->session->ust_session) {
+                               ret = LTTNG_ERR_NO_CHANNEL;
+                               goto error;
+                       }
+                       break;
+               default:
+                       ret = LTTNG_ERR_UNKNOWN_DOMAIN;
+                       goto error;
+               }
+       default:
+               break;
+       }
+
+       if (!need_domain) {
+               goto skip_domain;
+       }
+
+       /*
+        * Check domain type for specific "pre-action".
+        */
+       switch (cmd_ctx->lsm.domain.type) {
+       case LTTNG_DOMAIN_KERNEL:
+               if (!is_root) {
+                       ret = LTTNG_ERR_NEED_ROOT_SESSIOND;
+                       goto error;
+               }
+
+               /* Kernel tracer check */
+               if (!kernel_tracer_is_initialized()) {
+                       /* Basically, load kernel tracer modules */
+                       ret = init_kernel_tracer();
+                       if (ret != 0) {
+                               goto error;
+                       }
+               }
+
+               /* Consumer is in an ERROR state. Report back to client */
+               if (need_consumerd && uatomic_read(&the_kernel_consumerd_state) ==
+                                               CONSUMER_ERROR) {
+                       ret = LTTNG_ERR_NO_KERNCONSUMERD;
+                       goto error;
+               }
+
+               /* Need a session for kernel command */
+               if (need_tracing_session) {
+                       if (cmd_ctx->session->kernel_session == NULL) {
+                               ret = create_kernel_session(cmd_ctx->session);
+                               if (ret != LTTNG_OK) {
+                                       ret = LTTNG_ERR_KERN_SESS_FAIL;
+                                       goto error;
+                               }
+                       }
+
+                       /* Start the kernel consumer daemon */
+                       pthread_mutex_lock(&the_kconsumer_data.pid_mutex);
+                       if (the_kconsumer_data.pid == 0 &&
+                                       cmd_ctx->lsm.cmd_type != LTTNG_REGISTER_CONSUMER) {
+                               pthread_mutex_unlock(&the_kconsumer_data.pid_mutex);
+                               ret = start_consumerd(&the_kconsumer_data);
+                               if (ret < 0) {
+                                       ret = LTTNG_ERR_KERN_CONSUMER_FAIL;
+                                       goto error;
+                               }
+                               uatomic_set(&the_kernel_consumerd_state, CONSUMER_STARTED);
+                       } else {
+                               pthread_mutex_unlock(&the_kconsumer_data.pid_mutex);
+                       }
+
+                       /*
+                        * The consumer was just spawned so we need to add the socket to
+                        * the consumer output of the session if exist.
+                        */
+                       ret = consumer_create_socket(&the_kconsumer_data,
+                                       cmd_ctx->session->kernel_session->consumer);
+                       if (ret < 0) {
+                               goto error;
+                       }
+               }
+
+               break;
+       case LTTNG_DOMAIN_JUL:
+       case LTTNG_DOMAIN_LOG4J:
+       case LTTNG_DOMAIN_PYTHON:
+               if (!agent_tracing_is_enabled()) {
+                       ret = LTTNG_ERR_AGENT_TRACING_DISABLED;
+                       goto error;
+               }
+               /* Fallthrough */
+       case LTTNG_DOMAIN_UST:
+       {
+               if (!ust_app_supported()) {
+                       ret = LTTNG_ERR_NO_UST;
+                       goto error;
+               }
+
+               /* Consumer is in an ERROR state. Report back to client */
+               if (need_consumerd &&
+                               uatomic_read(&the_ust_consumerd_state) ==
+                                               CONSUMER_ERROR) {
+                       ret = LTTNG_ERR_NO_USTCONSUMERD;
+                       goto error;
+               }
+
+               if (need_tracing_session) {
+                       /* Create UST session if none exist. */
+                       if (cmd_ctx->session->ust_session == NULL) {
+                               lttng_domain domain = cmd_ctx->lsm.domain;
+                               ret = create_ust_session(cmd_ctx->session, &domain);
+                               if (ret != LTTNG_OK) {
+                                       goto error;
+                               }
+                       }
+
+                       /* Start the UST consumer daemons */
+                       /* 64-bit */
+                       pthread_mutex_lock(&the_ustconsumer64_data.pid_mutex);
+                       if (the_config.consumerd64_bin_path.value &&
+                                       the_ustconsumer64_data.pid == 0 &&
+                                       cmd_ctx->lsm.cmd_type != LTTNG_REGISTER_CONSUMER) {
+                               pthread_mutex_unlock(&the_ustconsumer64_data.pid_mutex);
+                               ret = start_consumerd(&the_ustconsumer64_data);
+                               if (ret < 0) {
+                                       ret = LTTNG_ERR_UST_CONSUMER64_FAIL;
+                                       uatomic_set(&the_ust_consumerd64_fd, -EINVAL);
+                                       goto error;
+                               }
+
+                               uatomic_set(&the_ust_consumerd64_fd, the_ustconsumer64_data.cmd_sock);
+                               uatomic_set(&the_ust_consumerd_state, CONSUMER_STARTED);
+                       } else {
+                               pthread_mutex_unlock(&the_ustconsumer64_data.pid_mutex);
+                       }
+
+                       /*
+                        * Setup socket for consumer 64 bit. No need for atomic access
+                        * since it was set above and can ONLY be set in this thread.
+                        */
+                       ret = consumer_create_socket(&the_ustconsumer64_data,
+                                       cmd_ctx->session->ust_session->consumer);
+                       if (ret < 0) {
+                               goto error;
+                       }
+
+                       /* 32-bit */
+                       pthread_mutex_lock(&the_ustconsumer32_data.pid_mutex);
+                       if (the_config.consumerd32_bin_path.value &&
+                                       the_ustconsumer32_data.pid == 0 &&
+                                       cmd_ctx->lsm.cmd_type != LTTNG_REGISTER_CONSUMER) {
+                               pthread_mutex_unlock(&the_ustconsumer32_data.pid_mutex);
+                               ret = start_consumerd(&the_ustconsumer32_data);
+                               if (ret < 0) {
+                                       ret = LTTNG_ERR_UST_CONSUMER32_FAIL;
+                                       uatomic_set(&the_ust_consumerd32_fd, -EINVAL);
+                                       goto error;
+                               }
+
+                               uatomic_set(&the_ust_consumerd32_fd, the_ustconsumer32_data.cmd_sock);
+                               uatomic_set(&the_ust_consumerd_state, CONSUMER_STARTED);
+                       } else {
+                               pthread_mutex_unlock(&the_ustconsumer32_data.pid_mutex);
+                       }
+
+                       /*
+                        * Setup socket for consumer 32 bit. No need for atomic access
+                        * since it was set above and can ONLY be set in this thread.
+                        */
+                       ret = consumer_create_socket(&the_ustconsumer32_data,
+                                       cmd_ctx->session->ust_session->consumer);
+                       if (ret < 0) {
+                               goto error;
+                       }
+               }
+               break;
+       }
+       default:
+               break;
+       }
+skip_domain:
+
+       /* Validate consumer daemon state when start/stop trace command */
+       if (cmd_ctx->lsm.cmd_type == LTTNG_START_TRACE ||
+                       cmd_ctx->lsm.cmd_type == LTTNG_STOP_TRACE) {
+               switch (cmd_ctx->lsm.domain.type) {
+               case LTTNG_DOMAIN_NONE:
+                       break;
+               case LTTNG_DOMAIN_JUL:
+               case LTTNG_DOMAIN_LOG4J:
+               case LTTNG_DOMAIN_PYTHON:
+               case LTTNG_DOMAIN_UST:
+                       if (uatomic_read(&the_ust_consumerd_state) != CONSUMER_STARTED) {
+                               ret = LTTNG_ERR_NO_USTCONSUMERD;
+                               goto error;
+                       }
+                       break;
+               case LTTNG_DOMAIN_KERNEL:
+                       if (uatomic_read(&the_kernel_consumerd_state) != CONSUMER_STARTED) {
+                               ret = LTTNG_ERR_NO_KERNCONSUMERD;
+                               goto error;
+                       }
+                       break;
+               default:
+                       ret = LTTNG_ERR_UNKNOWN_DOMAIN;
+                       goto error;
+               }
+       }
+
+       /*
+        * Check that the UID matches that of the tracing session.
+        * The root user can interact with all sessions.
+        */
+       if (need_tracing_session) {
+               if (!session_access_ok(cmd_ctx->session,
+                               LTTNG_SOCK_GET_UID_CRED(&cmd_ctx->creds)) ||
+                               cmd_ctx->session->destroyed) {
+                       ret = LTTNG_ERR_EPERM;
+                       goto error;
+               }
+       }
+
+       /*
+        * Send relayd information to consumer as soon as we have a domain and a
+        * session defined.
+        */
+       if (cmd_ctx->session && need_domain) {
+               /*
+                * Setup relayd if not done yet. If the relayd information was already
+                * sent to the consumer, this call will gracefully return.
+                */
+               ret = cmd_setup_relayd(cmd_ctx->session);
+               if (ret != LTTNG_OK) {
+                       goto error;
+               }
+       }
+
+       /* Process by command type */
+       switch (cmd_ctx->lsm.cmd_type) {
+       case LTTNG_ADD_CONTEXT:
+       {
+               lttng_event_context ctx;
+
+               /*
+                * An LTTNG_ADD_CONTEXT command might have a supplementary
+                * payload if the context being added is an application context.
+                */
+               if (cmd_ctx->lsm.u.context.ctx.ctx ==
+                               LTTNG_EVENT_CONTEXT_APP_CONTEXT) {
+                       char *provider_name = NULL, *context_name = NULL;
+                       size_t provider_name_len =
+                                       cmd_ctx->lsm.u.context.provider_name_len;
+                       size_t context_name_len =
+                                       cmd_ctx->lsm.u.context.context_name_len;
+
+                       if (provider_name_len == 0 || context_name_len == 0) {
+                               /*
+                                * Application provider and context names MUST
+                                * be provided.
+                                */
+                               ret = -LTTNG_ERR_INVALID;
+                               goto error;
+                       }
+
+                       provider_name = (char *) zmalloc(provider_name_len + 1);
+                       if (!provider_name) {
+                               ret = -LTTNG_ERR_NOMEM;
+                               goto error;
+                       }
+                       cmd_ctx->lsm.u.context.ctx.u.app_ctx.provider_name =
+                                       provider_name;
+
+                       context_name = (char *) zmalloc(context_name_len + 1);
+                       if (!context_name) {
+                               ret = -LTTNG_ERR_NOMEM;
+                               goto error_add_context;
+                       }
+                       cmd_ctx->lsm.u.context.ctx.u.app_ctx.ctx_name =
+                                       context_name;
+
+                       ret = lttcomm_recv_unix_sock(*sock, provider_name,
+                                       provider_name_len);
+                       if (ret < 0) {
+                               goto error_add_context;
+                       }
+
+                       ret = lttcomm_recv_unix_sock(*sock, context_name,
+                                       context_name_len);
+                       if (ret < 0) {
+                               goto error_add_context;
+                       }
+               }
+
+               /*
+                * cmd_add_context assumes ownership of the provider and context
+                * names.
+                */
+               ctx = cmd_ctx->lsm.u.context.ctx;
+               ret = cmd_add_context(cmd_ctx->session,
+                               cmd_ctx->lsm.domain.type,
+                               cmd_ctx->lsm.u.context.channel_name,
+                               &ctx,
+                               the_kernel_poll_pipe[1]);
+
+               cmd_ctx->lsm.u.context.ctx.u.app_ctx.provider_name = NULL;
+               cmd_ctx->lsm.u.context.ctx.u.app_ctx.ctx_name = NULL;
+error_add_context:
+               free(cmd_ctx->lsm.u.context.ctx.u.app_ctx.provider_name);
+               free(cmd_ctx->lsm.u.context.ctx.u.app_ctx.ctx_name);
+               if (ret < 0) {
+                       goto error;
+               }
+               break;
+       }
+       case LTTNG_DISABLE_CHANNEL:
+       {
+               ret = cmd_disable_channel(cmd_ctx->session, cmd_ctx->lsm.domain.type,
+                               cmd_ctx->lsm.u.disable.channel_name);
+               break;
+       }
+       case LTTNG_DISABLE_EVENT:
+       {
+               lttng_event event;
+
+               /*
+                * FIXME: handle filter; for now we just receive the filter's
+                * bytecode along with the filter expression which are sent by
+                * liblttng-ctl and discard them.
+                *
+                * This fixes an issue where the client may block while sending
+                * the filter payload and encounter an error because the session
+                * daemon closes the socket without ever handling this data.
+                */
+               size_t count = cmd_ctx->lsm.u.disable.expression_len +
+                       cmd_ctx->lsm.u.disable.bytecode_len;
+
+               if (count) {
+                       char data[LTTNG_FILTER_MAX_LEN];
+
+                       DBG("Discarding disable event command payload of size %zu", count);
+                       while (count) {
+                               ret = lttcomm_recv_unix_sock(*sock, data,
+                                       count > sizeof(data) ? sizeof(data) : count);
+                               if (ret < 0) {
+                                       goto error;
+                               }
+
+                               count -= (size_t) ret;
+                       }
+               }
+               event = cmd_ctx->lsm.u.disable.event;
+               ret = cmd_disable_event(cmd_ctx->session, cmd_ctx->lsm.domain.type,
+                               cmd_ctx->lsm.u.disable.channel_name,
+                               &event);
+               break;
+       }
+       case LTTNG_ENABLE_CHANNEL:
+       {
+               cmd_ctx->lsm.u.channel.chan.attr.extended.ptr =
+                               (struct lttng_channel_extended *) &cmd_ctx->lsm.u.channel.extended;
+               lttng_domain domain = cmd_ctx->lsm.domain;
+               lttng_channel chan = cmd_ctx->lsm.u.channel.chan;
+               ret = cmd_enable_channel(cmd_ctx->session,
+                               &domain,
+                               &chan,
+                               the_kernel_poll_pipe[1]);
+               break;
+       }
+       case LTTNG_PROCESS_ATTR_TRACKER_ADD_INCLUDE_VALUE:
+       case LTTNG_PROCESS_ATTR_TRACKER_REMOVE_INCLUDE_VALUE:
+       {
+               struct lttng_dynamic_buffer payload;
+               struct lttng_buffer_view payload_view;
+               const bool add_value =
+                               cmd_ctx->lsm.cmd_type ==
+                               LTTNG_PROCESS_ATTR_TRACKER_ADD_INCLUDE_VALUE;
+               const size_t name_len =
+                               cmd_ctx->lsm.u.process_attr_tracker_add_remove_include_value
+                                               .name_len;
+               const enum lttng_domain_type domain_type =
+                               (enum lttng_domain_type)
+                                               cmd_ctx->lsm.domain.type;
+               const enum lttng_process_attr process_attr =
+                               (enum lttng_process_attr) cmd_ctx->lsm.u
+                                               .process_attr_tracker_add_remove_include_value
+                                               .process_attr;
+               const enum lttng_process_attr_value_type value_type =
+                               (enum lttng_process_attr_value_type) cmd_ctx
+                                               ->lsm.u
+                                               .process_attr_tracker_add_remove_include_value
+                                               .value_type;
+               struct process_attr_value *value;
+               enum lttng_error_code ret_code;
+               long login_name_max;
+
+               login_name_max = sysconf(_SC_LOGIN_NAME_MAX);
+               if (login_name_max < 0) {
+                       PERROR("Failed to get _SC_LOGIN_NAME_MAX system configuration");
+                       ret = LTTNG_ERR_INVALID;
+                       goto error;
+               }
+
+               /* Receive remaining variable length payload if applicable. */
+               if (name_len > login_name_max) {
+                       /*
+                        * POSIX mandates user and group names that are at least
+                        * 8 characters long. Note that although shadow-utils
+                        * (useradd, groupaadd, etc.) use 32 chars as their
+                        * limit (from bits/utmp.h, UT_NAMESIZE),
+                        * LOGIN_NAME_MAX is defined to 256.
+                        */
+                       ERR("Rejecting process attribute tracker value %s as the provided exceeds the maximal allowed length: argument length = %zu, maximal length = %ld",
+                                       add_value ? "addition" : "removal",
+                                       name_len, login_name_max);
+                       ret = LTTNG_ERR_INVALID;
+                       goto error;
+               }
+
+               lttng_dynamic_buffer_init(&payload);
+               if (name_len != 0) {
+                       /*
+                        * Receive variable payload for user/group name
+                        * arguments.
+                        */
+                       ret = lttng_dynamic_buffer_set_size(&payload, name_len);
+                       if (ret) {
+                               ERR("Failed to allocate buffer to receive payload of %s process attribute tracker value argument",
+                                               add_value ? "add" : "remove");
+                               ret = LTTNG_ERR_NOMEM;
+                               goto error_add_remove_tracker_value;
+                       }
+
+                       ret = lttcomm_recv_unix_sock(
+                                       *sock, payload.data, name_len);
+                       if (ret <= 0) {
+                               ERR("Failed to receive payload of %s process attribute tracker value argument",
+                                               add_value ? "add" : "remove");
+                               *sock_error = 1;
+                               ret = LTTNG_ERR_INVALID_PROTOCOL;
+                               goto error_add_remove_tracker_value;
+                       }
+               }
+
+               payload_view = lttng_buffer_view_from_dynamic_buffer(
+                               &payload, 0, name_len);
+               if (name_len > 0 && !lttng_buffer_view_is_valid(&payload_view)) {
+                       ret = LTTNG_ERR_INVALID_PROTOCOL;
+                       goto error_add_remove_tracker_value;
+               }
+
+               /*
+                * Validate the value type and domains are legal for the process
+                * attribute tracker that is specified and convert the value to
+                * add/remove to the internal sessiond representation.
+                */
+               ret_code = process_attr_value_from_comm(domain_type,
+                               process_attr, value_type,
+                               &cmd_ctx->lsm.u.process_attr_tracker_add_remove_include_value
+                                                .integral_value,
+                               &payload_view, &value);
+               if (ret_code != LTTNG_OK) {
+                       ret = ret_code;
+                       goto error_add_remove_tracker_value;
+               }
+
+               if (add_value) {
+                       ret = cmd_process_attr_tracker_inclusion_set_add_value(
+                                       cmd_ctx->session, domain_type,
+                                       process_attr, value);
+               } else {
+                       ret = cmd_process_attr_tracker_inclusion_set_remove_value(
+                                       cmd_ctx->session, domain_type,
+                                       process_attr, value);
+               }
+               process_attr_value_destroy(value);
+       error_add_remove_tracker_value:
+               lttng_dynamic_buffer_reset(&payload);
+               break;
+       }
+       case LTTNG_PROCESS_ATTR_TRACKER_GET_POLICY:
+       {
+               enum lttng_tracking_policy tracking_policy;
+               const enum lttng_domain_type domain_type =
+                               (enum lttng_domain_type)
+                                               cmd_ctx->lsm.domain.type;
+               const enum lttng_process_attr process_attr =
+                               (enum lttng_process_attr) cmd_ctx->lsm.u
+                                               .process_attr_tracker_get_tracking_policy
+                                               .process_attr;
+
+               ret = cmd_process_attr_tracker_get_tracking_policy(
+                               cmd_ctx->session, domain_type, process_attr,
+                               &tracking_policy);
+               if (ret != LTTNG_OK) {
+                       goto error;
+               }
+
+               uint32_t tracking_policy_u32 = tracking_policy;
+               ret = setup_lttng_msg_no_cmd_header(cmd_ctx,
+                               &tracking_policy_u32, sizeof(uint32_t));
+               if (ret < 0) {
+                       ret = LTTNG_ERR_NOMEM;
+                       goto error;
+               }
+               ret = LTTNG_OK;
+               break;
+       }
+       case LTTNG_PROCESS_ATTR_TRACKER_SET_POLICY:
+       {
+               const enum lttng_tracking_policy tracking_policy =
+                               (enum lttng_tracking_policy) cmd_ctx->lsm.u
+                                               .process_attr_tracker_set_tracking_policy
+                                               .tracking_policy;
+               const enum lttng_domain_type domain_type =
+                               (enum lttng_domain_type)
+                                               cmd_ctx->lsm.domain.type;
+               const enum lttng_process_attr process_attr =
+                               (enum lttng_process_attr) cmd_ctx->lsm.u
+                                               .process_attr_tracker_set_tracking_policy
+                                               .process_attr;
+
+               ret = cmd_process_attr_tracker_set_tracking_policy(
+                               cmd_ctx->session, domain_type, process_attr,
+                               tracking_policy);
+               if (ret != LTTNG_OK) {
+                       goto error;
+               }
+               break;
+       }
+       case LTTNG_PROCESS_ATTR_TRACKER_GET_INCLUSION_SET:
+       {
+               struct lttng_process_attr_values *values;
+               struct lttng_dynamic_buffer reply;
+               const enum lttng_domain_type domain_type =
+                               (enum lttng_domain_type)
+                                               cmd_ctx->lsm.domain.type;
+               const enum lttng_process_attr process_attr =
+                               (enum lttng_process_attr) cmd_ctx->lsm.u
+                                               .process_attr_tracker_get_inclusion_set
+                                               .process_attr;
+
+               ret = cmd_process_attr_tracker_get_inclusion_set(
+                               cmd_ctx->session, domain_type, process_attr,
+                               &values);
+               if (ret != LTTNG_OK) {
+                       goto error;
+               }
+
+               lttng_dynamic_buffer_init(&reply);
+               ret = lttng_process_attr_values_serialize(values, &reply);
+               if (ret < 0) {
+                       goto error_tracker_get_inclusion_set;
+               }
+
+               ret = setup_lttng_msg_no_cmd_header(
+                               cmd_ctx, reply.data, reply.size);
+               if (ret < 0) {
+                       ret = LTTNG_ERR_NOMEM;
+                       goto error_tracker_get_inclusion_set;
+               }
+               ret = LTTNG_OK;
+
+       error_tracker_get_inclusion_set:
+               lttng_process_attr_values_destroy(values);
+               lttng_dynamic_buffer_reset(&reply);
+               break;
+       }
+       case LTTNG_ENABLE_EVENT:
+       {
+               struct lttng_event *ev = NULL;
+               struct lttng_event_exclusion *exclusion = NULL;
+               struct lttng_bytecode *bytecode = NULL;
+               char *filter_expression = NULL;
+               lttng_event event;
+               lttng_domain domain;
+
+               /* Handle exclusion events and receive it from the client. */
+               if (cmd_ctx->lsm.u.enable.exclusion_count > 0) {
+                       size_t count = cmd_ctx->lsm.u.enable.exclusion_count;
+
+                       exclusion = (lttng_event_exclusion *) zmalloc(sizeof(struct lttng_event_exclusion) +
+                                       (count * LTTNG_SYMBOL_NAME_LEN));
+                       if (!exclusion) {
+                               ret = LTTNG_ERR_EXCLUSION_NOMEM;
+                               goto error;
+                       }
+
+                       DBG("Receiving var len exclusion event list from client ...");
+                       exclusion->count = count;
+                       ret = lttcomm_recv_unix_sock(*sock, exclusion->names,
+                                       count * LTTNG_SYMBOL_NAME_LEN);
+                       if (ret <= 0) {
+                               DBG("Nothing recv() from client var len data... continuing");
+                               *sock_error = 1;
+                               free(exclusion);
+                               ret = LTTNG_ERR_EXCLUSION_INVAL;
+                               goto error;
+                       }
+               }
+
+               /* Get filter expression from client. */
+               if (cmd_ctx->lsm.u.enable.expression_len > 0) {
+                       size_t expression_len =
+                               cmd_ctx->lsm.u.enable.expression_len;
+
+                       if (expression_len > LTTNG_FILTER_MAX_LEN) {
+                               ret = LTTNG_ERR_FILTER_INVAL;
+                               free(exclusion);
+                               goto error;
+                       }
+
+                       filter_expression = (char *) zmalloc(expression_len);
+                       if (!filter_expression) {
+                               free(exclusion);
+                               ret = LTTNG_ERR_FILTER_NOMEM;
+                               goto error;
+                       }
+
+                       /* Receive var. len. data */
+                       DBG("Receiving var len filter's expression from client ...");
+                       ret = lttcomm_recv_unix_sock(*sock, filter_expression,
+                               expression_len);
+                       if (ret <= 0) {
+                               DBG("Nothing recv() from client var len data... continuing");
+                               *sock_error = 1;
+                               free(filter_expression);
+                               free(exclusion);
+                               ret = LTTNG_ERR_FILTER_INVAL;
+                               goto error;
+                       }
+               }
+
+               /* Handle filter and get bytecode from client. */
+               if (cmd_ctx->lsm.u.enable.bytecode_len > 0) {
+                       size_t bytecode_len = cmd_ctx->lsm.u.enable.bytecode_len;
+
+                       if (bytecode_len > LTTNG_FILTER_MAX_LEN) {
+                               ret = LTTNG_ERR_FILTER_INVAL;
+                               free(filter_expression);
+                               free(exclusion);
+                               goto error;
+                       }
+
+                       bytecode = (lttng_bytecode *) zmalloc(bytecode_len);
+                       if (!bytecode) {
+                               free(filter_expression);
+                               free(exclusion);
+                               ret = LTTNG_ERR_FILTER_NOMEM;
+                               goto error;
+                       }
+
+                       /* Receive var. len. data */
+                       DBG("Receiving var len filter's bytecode from client ...");
+                       ret = lttcomm_recv_unix_sock(*sock, bytecode, bytecode_len);
+                       if (ret <= 0) {
+                               DBG("Nothing recv() from client var len data... continuing");
+                               *sock_error = 1;
+                               free(filter_expression);
+                               free(bytecode);
+                               free(exclusion);
+                               ret = LTTNG_ERR_FILTER_INVAL;
+                               goto error;
+                       }
+
+                       if ((bytecode->len + sizeof(*bytecode)) != bytecode_len) {
+                               free(filter_expression);
+                               free(bytecode);
+                               free(exclusion);
+                               ret = LTTNG_ERR_FILTER_INVAL;
+                               goto error;
+                       }
+               }
+
+               event = cmd_ctx->lsm.u.enable.event;
+               ev = lttng_event_copy(&event);
+               if (!ev) {
+                       DBG("Failed to copy event: %s",
+                                       cmd_ctx->lsm.u.enable.event.name);
+                       free(filter_expression);
+                       free(bytecode);
+                       free(exclusion);
+                       ret = LTTNG_ERR_NOMEM;
+                       goto error;
+               }
+
+
+               if (cmd_ctx->lsm.u.enable.userspace_probe_location_len > 0) {
+                       /* Expect a userspace probe description. */
+                       ret = receive_userspace_probe(cmd_ctx, *sock, sock_error, ev);
+                       if (ret) {
+                               free(filter_expression);
+                               free(bytecode);
+                               free(exclusion);
+                               lttng_event_destroy(ev);
+                               goto error;
+                       }
+               }
+
+               domain = cmd_ctx->lsm.domain;
+               ret = cmd_enable_event(cmd_ctx->session,
+                               &domain,
+                               cmd_ctx->lsm.u.enable.channel_name,
+                               ev,
+                               filter_expression, bytecode, exclusion,
+                               the_kernel_poll_pipe[1]);
+               lttng_event_destroy(ev);
+               break;
+       }
+       case LTTNG_LIST_TRACEPOINTS:
+       {
+               struct lttng_event *events;
+               ssize_t nb_events;
+
+               session_lock_list();
+               nb_events = cmd_list_tracepoints(cmd_ctx->lsm.domain.type, &events);
+               session_unlock_list();
+               if (nb_events < 0) {
+                       /* Return value is a negative lttng_error_code. */
+                       ret = -nb_events;
+                       goto error;
+               }
+
+               /*
+                * Setup lttng message with payload size set to the event list size in
+                * bytes and then copy list into the llm payload.
+                */
+               ret = setup_lttng_msg_no_cmd_header(cmd_ctx, events,
+                       sizeof(struct lttng_event) * nb_events);
+               free(events);
+
+               if (ret < 0) {
+                       goto setup_error;
+               }
+
+               ret = LTTNG_OK;
+               break;
+       }
+       case LTTNG_LIST_TRACEPOINT_FIELDS:
+       {
+               struct lttng_event_field *fields;
+               ssize_t nb_fields;
+
+               session_lock_list();
+               nb_fields = cmd_list_tracepoint_fields(cmd_ctx->lsm.domain.type,
+                               &fields);
+               session_unlock_list();
+               if (nb_fields < 0) {
+                       /* Return value is a negative lttng_error_code. */
+                       ret = -nb_fields;
+                       goto error;
+               }
+
+               /*
+                * Setup lttng message with payload size set to the event list size in
+                * bytes and then copy list into the llm payload.
+                */
+               ret = setup_lttng_msg_no_cmd_header(cmd_ctx, fields,
+                               sizeof(struct lttng_event_field) * nb_fields);
+               free(fields);
+
+               if (ret < 0) {
+                       goto setup_error;
+               }
+
+               ret = LTTNG_OK;
+               break;
+       }
+       case LTTNG_LIST_SYSCALLS:
+       {
+               struct lttng_event *events;
+               ssize_t nb_events;
+
+               nb_events = cmd_list_syscalls(&events);
+               if (nb_events < 0) {
+                       /* Return value is a negative lttng_error_code. */
+                       ret = -nb_events;
+                       goto error;
+               }
+
+               /*
+                * Setup lttng message with payload size set to the event list size in
+                * bytes and then copy list into the llm payload.
+                */
+               ret = setup_lttng_msg_no_cmd_header(cmd_ctx, events,
+                       sizeof(struct lttng_event) * nb_events);
+               free(events);
+
+               if (ret < 0) {
+                       goto setup_error;
+               }
+
+               ret = LTTNG_OK;
+               break;
+       }
+       case LTTNG_SET_CONSUMER_URI:
+       {
+               size_t nb_uri, len;
+               struct lttng_uri *uris;
+
+               nb_uri = cmd_ctx->lsm.u.uri.size;
+               len = nb_uri * sizeof(struct lttng_uri);
+
+               if (nb_uri == 0) {
+                       ret = LTTNG_ERR_INVALID;
+                       goto error;
+               }
+
+               uris = (lttng_uri *) zmalloc(len);
+               if (uris == NULL) {
+                       ret = LTTNG_ERR_FATAL;
+                       goto error;
+               }
+
+               /* Receive variable len data */
+               DBG("Receiving %zu URI(s) from client ...", nb_uri);
+               ret = lttcomm_recv_unix_sock(*sock, uris, len);
+               if (ret <= 0) {
+                       DBG("No URIs received from client... continuing");
+                       *sock_error = 1;
+                       ret = LTTNG_ERR_SESSION_FAIL;
+                       free(uris);
+                       goto error;
+               }
+
+               ret = cmd_set_consumer_uri(cmd_ctx->session, nb_uri, uris);
+               free(uris);
+               if (ret != LTTNG_OK) {
+                       goto error;
+               }
+
+
+               break;
+       }
+       case LTTNG_START_TRACE:
+       {
+               /*
+                * On the first start, if we have a kernel session and we have
+                * enabled time or size-based rotations, we have to make sure
+                * the kernel tracer supports it.
+                */
+               if (!cmd_ctx->session->has_been_started && \
+                               cmd_ctx->session->kernel_session && \
+                               (cmd_ctx->session->rotate_timer_period || \
+                                       cmd_ctx->session->rotate_size) && \
+                               !check_rotate_compatible()) {
+                       DBG("Kernel tracer version is not compatible with the rotation feature");
+                       ret = LTTNG_ERR_ROTATION_WRONG_VERSION;
+                       goto error;
+               }
+               ret = cmd_start_trace(cmd_ctx->session);
+               break;
+       }
+       case LTTNG_STOP_TRACE:
+       {
+               ret = cmd_stop_trace(cmd_ctx->session);
+               break;
+       }
+       case LTTNG_DESTROY_SESSION:
+       {
+               ret = cmd_destroy_session(cmd_ctx->session,
+                               the_notification_thread_handle, sock);
+               break;
+       }
+       case LTTNG_LIST_DOMAINS:
+       {
+               ssize_t nb_dom;
+               struct lttng_domain *domains = NULL;
+
+               nb_dom = cmd_list_domains(cmd_ctx->session, &domains);
+               if (nb_dom < 0) {
+                       /* Return value is a negative lttng_error_code. */
+                       ret = -nb_dom;
+                       goto error;
+               }
+
+               ret = setup_lttng_msg_no_cmd_header(cmd_ctx, domains,
+                       nb_dom * sizeof(struct lttng_domain));
+               free(domains);
+
+               if (ret < 0) {
+                       goto setup_error;
+               }
+
+               ret = LTTNG_OK;
+               break;
+       }
+       case LTTNG_LIST_CHANNELS:
+       {
+               ssize_t payload_size;
+               struct lttng_channel *channels = NULL;
+
+               payload_size = cmd_list_channels(cmd_ctx->lsm.domain.type,
+                               cmd_ctx->session, &channels);
+               if (payload_size < 0) {
+                       /* Return value is a negative lttng_error_code. */
+                       ret = -payload_size;
+                       goto error;
+               }
+
+               ret = setup_lttng_msg_no_cmd_header(cmd_ctx, channels,
+                       payload_size);
+               free(channels);
+
+               if (ret < 0) {
+                       goto setup_error;
+               }
+
+               ret = LTTNG_OK;
+               break;
+       }
+       case LTTNG_LIST_EVENTS:
+       {
+               ssize_t list_ret;
+               struct lttcomm_event_command_header cmd_header = {};
+               size_t original_payload_size;
+               size_t payload_size;
+
+               ret = setup_empty_lttng_msg(cmd_ctx);
+               if (ret) {
+                       ret = LTTNG_ERR_NOMEM;
+                       goto setup_error;
+               }
+
+               original_payload_size = cmd_ctx->reply_payload.buffer.size;
+
+               /* Extended infos are included at the end of the payload. */
+               list_ret = cmd_list_events(cmd_ctx->lsm.domain.type,
+                               cmd_ctx->session,
+                               cmd_ctx->lsm.u.list.channel_name,
+                               &cmd_ctx->reply_payload);
+               if (list_ret < 0) {
+                       /* Return value is a negative lttng_error_code. */
+                       ret = -list_ret;
+                       goto error;
+               }
+
+               payload_size = cmd_ctx->reply_payload.buffer.size -
+                               sizeof(cmd_header) - original_payload_size;
+               update_lttng_msg(cmd_ctx, sizeof(cmd_header), payload_size);
+
+               ret = LTTNG_OK;
+               break;
+       }
+       case LTTNG_LIST_SESSIONS:
+       {
+               unsigned int nr_sessions;
+               lttng_session *sessions_payload;
+               size_t payload_len;
+
+               session_lock_list();
+               nr_sessions = lttng_sessions_count(
+                               LTTNG_SOCK_GET_UID_CRED(&cmd_ctx->creds),
+                               LTTNG_SOCK_GET_GID_CRED(&cmd_ctx->creds));
+
+               payload_len = (sizeof(struct lttng_session) * nr_sessions) +
+                               (sizeof(struct lttng_session_extended) * nr_sessions);
+               sessions_payload = (lttng_session *) zmalloc(payload_len);
+
+               if (!sessions_payload) {
+                       session_unlock_list();
+                       ret = -ENOMEM;
+                       goto setup_error;
+               }
+
+               cmd_list_lttng_sessions(sessions_payload, nr_sessions,
+                       LTTNG_SOCK_GET_UID_CRED(&cmd_ctx->creds),
+                       LTTNG_SOCK_GET_GID_CRED(&cmd_ctx->creds));
+               session_unlock_list();
+
+               ret = setup_lttng_msg_no_cmd_header(cmd_ctx, sessions_payload,
+                       payload_len);
+               free(sessions_payload);
+
+               if (ret < 0) {
+                       goto setup_error;
+               }
+
+               ret = LTTNG_OK;
+               break;
+       }
+       case LTTNG_REGISTER_CONSUMER:
+       {
+               struct consumer_data *cdata;
+
+               switch (cmd_ctx->lsm.domain.type) {
+               case LTTNG_DOMAIN_KERNEL:
+                       cdata = &the_kconsumer_data;
+                       break;
+               default:
+                       ret = LTTNG_ERR_UND;
+                       goto error;
+               }
+
+               ret = cmd_register_consumer(cmd_ctx->session, cmd_ctx->lsm.domain.type,
+                               cmd_ctx->lsm.u.reg.path, cdata);
+               break;
+       }
+       case LTTNG_DATA_PENDING:
+       {
+               int pending_ret;
+               uint8_t pending_ret_byte;
+
+               pending_ret = cmd_data_pending(cmd_ctx->session);
+
+               /*
+                * FIXME
+                *
+                * This function may returns 0 or 1 to indicate whether or not
+                * there is data pending. In case of error, it should return an
+                * LTTNG_ERR code. However, some code paths may still return
+                * a nondescript error code, which we handle by returning an
+                * "unknown" error.
+                */
+               if (pending_ret == 0 || pending_ret == 1) {
+                       /*
+                        * ret will be set to LTTNG_OK at the end of
+                        * this function.
+                        */
+               } else if (pending_ret < 0) {
+                       ret = LTTNG_ERR_UNK;
+                       goto setup_error;
+               } else {
+                       ret = pending_ret;
+                       goto setup_error;
+               }
+
+               pending_ret_byte = (uint8_t) pending_ret;
+
+               /* 1 byte to return whether or not data is pending */
+               ret = setup_lttng_msg_no_cmd_header(cmd_ctx,
+                       &pending_ret_byte, 1);
+
+               if (ret < 0) {
+                       goto setup_error;
+               }
+
+               ret = LTTNG_OK;
+               break;
+       }
+       case LTTNG_SNAPSHOT_ADD_OUTPUT:
+       {
+               uint32_t snapshot_id;
+               struct lttcomm_lttng_output_id reply;
+               lttng_snapshot_output output = cmd_ctx->lsm.u.snapshot_output.output;
+
+               ret = cmd_snapshot_add_output(cmd_ctx->session,
+                               &output,
+                               &snapshot_id);
+               if (ret != LTTNG_OK) {
+                       goto error;
+               }
+               reply.id = snapshot_id;
+
+               ret = setup_lttng_msg_no_cmd_header(cmd_ctx, &reply,
+                       sizeof(reply));
+               if (ret < 0) {
+                       goto setup_error;
+               }
+
+               /* Copy output list into message payload */
+               ret = LTTNG_OK;
+               break;
+       }
+       case LTTNG_SNAPSHOT_DEL_OUTPUT:
+       {
+               lttng_snapshot_output output = cmd_ctx->lsm.u.snapshot_output.output;
+               ret = cmd_snapshot_del_output(cmd_ctx->session, &output);
+               break;
+       }
+       case LTTNG_SNAPSHOT_LIST_OUTPUT:
+       {
+               ssize_t nb_output;
+               struct lttng_snapshot_output *outputs = NULL;
+
+               nb_output = cmd_snapshot_list_outputs(cmd_ctx->session, &outputs);
+               if (nb_output < 0) {
+                       ret = -nb_output;
+                       goto error;
+               }
+
+               LTTNG_ASSERT((nb_output > 0 && outputs) || nb_output == 0);
+               ret = setup_lttng_msg_no_cmd_header(cmd_ctx, outputs,
+                               nb_output * sizeof(struct lttng_snapshot_output));
+               free(outputs);
+
+               if (ret < 0) {
+                       goto setup_error;
+               }
+
+               ret = LTTNG_OK;
+               break;
+       }
+       case LTTNG_SNAPSHOT_RECORD:
+       {
+               lttng_snapshot_output output = cmd_ctx->lsm.u.snapshot_record.output;
+               ret = cmd_snapshot_record(cmd_ctx->session,
+                               &output,
+                               cmd_ctx->lsm.u.snapshot_record.wait);
+               break;
+       }
+       case LTTNG_CREATE_SESSION_EXT:
+       {
+               struct lttng_dynamic_buffer payload;
+               struct lttng_session_descriptor *return_descriptor = NULL;
+
+               lttng_dynamic_buffer_init(&payload);
+               ret = cmd_create_session(cmd_ctx, *sock, &return_descriptor);
+               if (ret != LTTNG_OK) {
+                       goto error;
+               }
+
+               ret = lttng_session_descriptor_serialize(return_descriptor,
+                               &payload);
+               if (ret) {
+                       ERR("Failed to serialize session descriptor in reply to \"create session\" command");
+                       lttng_session_descriptor_destroy(return_descriptor);
+                       ret = LTTNG_ERR_NOMEM;
+                       goto error;
+               }
+               ret = setup_lttng_msg_no_cmd_header(cmd_ctx, payload.data,
+                               payload.size);
+               if (ret) {
+                       lttng_session_descriptor_destroy(return_descriptor);
+                       ret = LTTNG_ERR_NOMEM;
+                       goto error;
+               }
+               lttng_dynamic_buffer_reset(&payload);
+               lttng_session_descriptor_destroy(return_descriptor);
+               ret = LTTNG_OK;
+               break;
+       }
+       case LTTNG_SAVE_SESSION:
+       {
+               ret = cmd_save_sessions(&cmd_ctx->lsm.u.save_session.attr,
+                       &cmd_ctx->creds);
+               break;
+       }
+       case LTTNG_SET_SESSION_SHM_PATH:
+       {
+               ret = cmd_set_session_shm_path(cmd_ctx->session,
+                               cmd_ctx->lsm.u.set_shm_path.shm_path);
+               break;
+       }
+       case LTTNG_REGENERATE_METADATA:
+       {
+               ret = cmd_regenerate_metadata(cmd_ctx->session);
+               break;
+       }
+       case LTTNG_REGENERATE_STATEDUMP:
+       {
+               ret = cmd_regenerate_statedump(cmd_ctx->session);
+               break;
+       }
+       case LTTNG_REGISTER_TRIGGER:
+       {
+               struct lttng_trigger *payload_trigger;
+               struct lttng_trigger *return_trigger;
+               size_t original_reply_payload_size;
+               size_t reply_payload_size;
+               const struct lttng_credentials cmd_creds = {
+                       .uid = LTTNG_OPTIONAL_INIT_VALUE(cmd_ctx->creds.uid),
+                       .gid = LTTNG_OPTIONAL_INIT_VALUE(cmd_ctx->creds.gid),
+               };
+
+               ret = setup_empty_lttng_msg(cmd_ctx);
+               if (ret) {
+                       ret = LTTNG_ERR_NOMEM;
+                       goto setup_error;
+               }
+
+               ret = receive_lttng_trigger(
+                               cmd_ctx, *sock, sock_error, &payload_trigger);
+               if (ret != LTTNG_OK) {
+                       goto error;
+               }
+
+               original_reply_payload_size = cmd_ctx->reply_payload.buffer.size;
+
+               ret = cmd_register_trigger(&cmd_creds, payload_trigger,
+                               cmd_ctx->lsm.u.trigger.is_trigger_anonymous,
+                               the_notification_thread_handle,
+                               &return_trigger);
+               if (ret != LTTNG_OK) {
+                       lttng_trigger_put(payload_trigger);
+                       goto error;
+               }
+
+               ret = lttng_trigger_serialize(return_trigger, &cmd_ctx->reply_payload);
+               lttng_trigger_put(payload_trigger);
+               lttng_trigger_put(return_trigger);
+               if (ret) {
+                       ERR("Failed to serialize trigger in reply to \"register trigger\" command");
+                       ret = LTTNG_ERR_NOMEM;
+                       goto error;
+               }
+
+               reply_payload_size = cmd_ctx->reply_payload.buffer.size -
+                       original_reply_payload_size;
+
+               update_lttng_msg(cmd_ctx, 0, reply_payload_size);
+
+               ret = LTTNG_OK;
+               break;
+       }
+       case LTTNG_UNREGISTER_TRIGGER:
+       {
+               struct lttng_trigger *payload_trigger;
+               const struct lttng_credentials cmd_creds = {
+                       .uid = LTTNG_OPTIONAL_INIT_VALUE(cmd_ctx->creds.uid),
+                       .gid = LTTNG_OPTIONAL_INIT_VALUE(cmd_ctx->creds.gid),
+               };
+
+               ret = receive_lttng_trigger(
+                               cmd_ctx, *sock, sock_error, &payload_trigger);
+               if (ret != LTTNG_OK) {
+                       goto error;
+               }
+
+               ret = cmd_unregister_trigger(&cmd_creds, payload_trigger,
+                               the_notification_thread_handle);
+               lttng_trigger_put(payload_trigger);
+               break;
+       }
+       case LTTNG_ROTATE_SESSION:
+       {
+               struct lttng_rotate_session_return rotate_return;
+
+               DBG("Client rotate session \"%s\"", cmd_ctx->session->name);
+
+               memset(&rotate_return, 0, sizeof(rotate_return));
+               if (cmd_ctx->session->kernel_session && !check_rotate_compatible()) {
+                       DBG("Kernel tracer version is not compatible with the rotation feature");
+                       ret = LTTNG_ERR_ROTATION_WRONG_VERSION;
+                       goto error;
+               }
+
+               ret = cmd_rotate_session(cmd_ctx->session, &rotate_return,
+                       false,
+                       LTTNG_TRACE_CHUNK_COMMAND_TYPE_MOVE_TO_COMPLETED);
+               if (ret < 0) {
+                       ret = -ret;
+                       goto error;
+               }
+
+               ret = setup_lttng_msg_no_cmd_header(cmd_ctx, &rotate_return,
+                               sizeof(rotate_return));
+               if (ret < 0) {
+                       ret = -ret;
+                       goto error;
+               }
+
+               ret = LTTNG_OK;
+               break;
+       }
+       case LTTNG_ROTATION_GET_INFO:
+       {
+               struct lttng_rotation_get_info_return get_info_return;
+
+               memset(&get_info_return, 0, sizeof(get_info_return));
+               ret = cmd_rotate_get_info(cmd_ctx->session, &get_info_return,
+                               cmd_ctx->lsm.u.get_rotation_info.rotation_id);
+               if (ret < 0) {
+                       ret = -ret;
+                       goto error;
+               }
+
+               ret = setup_lttng_msg_no_cmd_header(cmd_ctx, &get_info_return,
+                               sizeof(get_info_return));
+               if (ret < 0) {
+                       ret = -ret;
+                       goto error;
+               }
+
+               ret = LTTNG_OK;
+               break;
+       }
+       case LTTNG_ROTATION_SET_SCHEDULE:
+       {
+               bool set_schedule;
+               enum lttng_rotation_schedule_type schedule_type;
+               uint64_t value;
+
+               if (cmd_ctx->session->kernel_session && !check_rotate_compatible()) {
+                       DBG("Kernel tracer version does not support session rotations");
+                       ret = LTTNG_ERR_ROTATION_WRONG_VERSION;
+                       goto error;
+               }
+
+               set_schedule = cmd_ctx->lsm.u.rotation_set_schedule.set == 1;
+               schedule_type = (enum lttng_rotation_schedule_type) cmd_ctx->lsm.u.rotation_set_schedule.type;
+               value = cmd_ctx->lsm.u.rotation_set_schedule.value;
+
+               ret = cmd_rotation_set_schedule(cmd_ctx->session, set_schedule,
+                               schedule_type, value,
+                               the_notification_thread_handle);
+               if (ret != LTTNG_OK) {
+                       goto error;
+               }
+
+               break;
+       }
+       case LTTNG_SESSION_LIST_ROTATION_SCHEDULES:
+       {
+               lttng_session_list_schedules_return schedules;
+
+               schedules.periodic.set = !!cmd_ctx->session->rotate_timer_period;
+               schedules.periodic.value = cmd_ctx->session->rotate_timer_period;
+               schedules.size.set = !!cmd_ctx->session->rotate_size;
+               schedules.size.value = cmd_ctx->session->rotate_size;
+
+               ret = setup_lttng_msg_no_cmd_header(cmd_ctx, &schedules,
+                               sizeof(schedules));
+               if (ret < 0) {
+                       ret = -ret;
+                       goto error;
+               }
+
+               ret = LTTNG_OK;
+               break;
+       }
+       case LTTNG_CLEAR_SESSION:
+       {
+               ret = cmd_clear_session(cmd_ctx->session, sock);
+               break;
+       }
+       case LTTNG_LIST_TRIGGERS:
+       {
+               struct lttng_triggers *return_triggers = NULL;
+               size_t original_payload_size;
+               size_t payload_size;
+
+               ret = setup_empty_lttng_msg(cmd_ctx);
+               if (ret) {
+                       ret = LTTNG_ERR_NOMEM;
+                       goto setup_error;
+               }
+
+               original_payload_size = cmd_ctx->reply_payload.buffer.size;
+
+               ret = cmd_list_triggers(cmd_ctx, the_notification_thread_handle,
+                               &return_triggers);
+               if (ret != LTTNG_OK) {
+                       goto error;
+               }
+
+               LTTNG_ASSERT(return_triggers);
+               ret = lttng_triggers_serialize(
+                               return_triggers, &cmd_ctx->reply_payload);
+               lttng_triggers_destroy(return_triggers);
+               if (ret) {
+                       ERR("Failed to serialize triggers in reply to `list triggers` command");
+                       ret = LTTNG_ERR_NOMEM;
+                       goto error;
+               }
+
+               payload_size = cmd_ctx->reply_payload.buffer.size -
+                       original_payload_size;
+
+               update_lttng_msg(cmd_ctx, 0, payload_size);
+
+               ret = LTTNG_OK;
+               break;
+       }
+       case LTTNG_EXECUTE_ERROR_QUERY:
+       {
+               struct lttng_error_query *query;
+               const struct lttng_credentials cmd_creds = {
+                       .uid = LTTNG_OPTIONAL_INIT_VALUE(cmd_ctx->creds.uid),
+                       .gid = LTTNG_OPTIONAL_INIT_VALUE(cmd_ctx->creds.gid),
+               };
+               struct lttng_error_query_results *results = NULL;
+               size_t original_payload_size;
+               size_t payload_size;
+
+               ret = setup_empty_lttng_msg(cmd_ctx);
+               if (ret) {
+                       ret = LTTNG_ERR_NOMEM;
+                       goto setup_error;
+               }
+
+               original_payload_size = cmd_ctx->reply_payload.buffer.size;
+
+               ret = receive_lttng_error_query(
+                               cmd_ctx, *sock, sock_error, &query);
+               if (ret != LTTNG_OK) {
+                       goto error;
+               }
+
+               ret = cmd_execute_error_query(&cmd_creds, query, &results,
+                               the_notification_thread_handle);
+               lttng_error_query_destroy(query);
+               if (ret != LTTNG_OK) {
+                       goto error;
+               }
+
+               LTTNG_ASSERT(results);
+               ret = lttng_error_query_results_serialize(
+                               results, &cmd_ctx->reply_payload);
+               lttng_error_query_results_destroy(results);
+               if (ret) {
+                       ERR("Failed to serialize error query result set in reply to `execute error query` command");
+                       ret = LTTNG_ERR_NOMEM;
+                       goto error;
+               }
+
+               payload_size = cmd_ctx->reply_payload.buffer.size -
+                       original_payload_size;
+
+               update_lttng_msg(cmd_ctx, 0, payload_size);
+
+               ret = LTTNG_OK;
+
+               break;
+       }
+       default:
+               ret = LTTNG_ERR_UND;
+               break;
+       }
+
+error:
+       if (cmd_ctx->reply_payload.buffer.size == 0) {
+               DBG("Missing llm header, creating one.");
+               if (setup_lttng_msg_no_cmd_header(cmd_ctx, NULL, 0) < 0) {
+                       goto setup_error;
+               }
+       }
+       /* Set return code */
+       ((struct lttcomm_lttng_msg *) (cmd_ctx->reply_payload.buffer.data))->ret_code = ret;
+setup_error:
+       if (cmd_ctx->session) {
+               session_unlock(cmd_ctx->session);
+               session_put(cmd_ctx->session);
+               cmd_ctx->session = NULL;
+       }
+       if (need_tracing_session) {
+               session_unlock_list();
+       }
+init_setup_error:
+       LTTNG_ASSERT(!rcu_read_ongoing());
+       return ret;
+}
+
+static int create_client_sock(void)
+{
+       int ret, client_sock;
+       const mode_t old_umask = umask(0);
+
+       /* Create client tool unix socket */
+       client_sock = lttcomm_create_unix_sock(
+                       the_config.client_unix_sock_path.value);
+       if (client_sock < 0) {
+               ERR("Create unix sock failed: %s",
+                               the_config.client_unix_sock_path.value);
+               ret = -1;
+               goto end;
+       }
+
+       /* Set the cloexec flag */
+       ret = utils_set_fd_cloexec(client_sock);
+       if (ret < 0) {
+               ERR("Unable to set CLOEXEC flag to the client Unix socket (fd: %d). "
+                               "Continuing but note that the consumer daemon will have a "
+                               "reference to this socket on exec()", client_sock);
+       }
+
+       /* File permission MUST be 660 */
+       ret = chmod(the_config.client_unix_sock_path.value,
+                       S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP);
+       if (ret < 0) {
+               ERR("Set file permissions failed: %s",
+                               the_config.client_unix_sock_path.value);
+               PERROR("chmod");
+               (void) lttcomm_close_unix_sock(client_sock);
+               ret = -1;
+               goto end;
+       }
+       DBG("Created client socket (fd = %i)", client_sock);
+       ret = client_sock;
+end:
+       umask(old_umask);
+       return ret;
+}
+
+static void cleanup_client_thread(void *data)
+{
+       struct lttng_pipe *quit_pipe = (lttng_pipe *) data;
+
+       lttng_pipe_destroy(quit_pipe);
+}
+
+static void thread_init_cleanup(void *data)
+{
+       set_thread_status(false);
+}
+
+/*
+ * This thread manage all clients request using the unix client socket for
+ * communication.
+ */
+static void *thread_manage_clients(void *data)
+{
+       int sock = -1, ret, i, pollfd, err = -1;
+       int sock_error;
+       uint32_t revents, nb_fd;
+       struct lttng_poll_event events;
+       const int client_sock = thread_state.client_sock;
+       struct lttng_pipe *quit_pipe = (lttng_pipe *) data;
+       const int thread_quit_pipe_fd = lttng_pipe_get_readfd(quit_pipe);
+       struct command_ctx cmd_ctx = {};
+
+       DBG("[thread] Manage client started");
+
+       lttng_payload_init(&cmd_ctx.reply_payload);
+
+       is_root = (getuid() == 0);
+
+       pthread_cleanup_push(thread_init_cleanup, NULL);
+
+       rcu_register_thread();
+
+       health_register(the_health_sessiond, HEALTH_SESSIOND_TYPE_CMD);
+
+       health_code_update();
+
+       ret = lttcomm_listen_unix_sock(client_sock);
+       if (ret < 0) {
+               goto error_listen;
+       }
+
+       /*
+        * Pass 2 as size here for the thread quit pipe and client_sock. Nothing
+        * more will be added to this poll set.
+        */
+       ret = lttng_poll_create(&events, 2, LTTNG_CLOEXEC);
+       if (ret < 0) {
+               goto error_create_poll;
+       }
+
+       /* Add the application registration socket */
+       ret = lttng_poll_add(&events, client_sock, LPOLLIN | LPOLLPRI);
+       if (ret < 0) {
+               goto error;
+       }
+
+       /* Add thread quit pipe */
+       ret = lttng_poll_add(&events, thread_quit_pipe_fd, LPOLLIN | LPOLLERR);
+       if (ret < 0) {
+               goto error;
+       }
+
+       /* Set state as running. */
+       set_thread_status(true);
+       pthread_cleanup_pop(0);
+
+       /* This testpoint is after we signal readiness to the parent. */
+       if (testpoint(sessiond_thread_manage_clients)) {
+               goto error;
+       }
+
+       if (testpoint(sessiond_thread_manage_clients_before_loop)) {
+               goto error;
+       }
+
+       health_code_update();
+
+       while (1) {
+               const struct cmd_completion_handler *cmd_completion_handler;
+
+               cmd_ctx.creds.uid = UINT32_MAX;
+               cmd_ctx.creds.gid = UINT32_MAX;
+               cmd_ctx.creds.pid = 0;
+               cmd_ctx.session = NULL;
+               lttng_payload_clear(&cmd_ctx.reply_payload);
+               cmd_ctx.lttng_msg_size = 0;
+
+               DBG("Accepting client command ...");
+
+               /* Inifinite blocking call, waiting for transmission */
+       restart:
+               health_poll_entry();
+               ret = lttng_poll_wait(&events, -1);
+               health_poll_exit();
+               if (ret < 0) {
+                       /*
+                        * Restart interrupted system call.
+                        */
+                       if (errno == EINTR) {
+                               goto restart;
+                       }
+                       goto error;
+               }
+
+               nb_fd = ret;
+
+               for (i = 0; i < nb_fd; i++) {
+                       revents = LTTNG_POLL_GETEV(&events, i);
+                       pollfd = LTTNG_POLL_GETFD(&events, i);
+
+                       health_code_update();
+
+                       if (pollfd == thread_quit_pipe_fd) {
+                               err = 0;
+                               goto exit;
+                       } else {
+                               /* Event on the registration socket */
+                               if (revents & LPOLLIN) {
+                                       continue;
+                               } else if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
+                                       ERR("Client socket poll error");
+                                       goto error;
+                               } else {
+                                       ERR("Unexpected poll events %u for sock %d", revents, pollfd);
+                                       goto error;
+                               }
+                       }
+               }
+
+               DBG("Wait for client response");
+
+               health_code_update();
+
+               sock = lttcomm_accept_unix_sock(client_sock);
+               if (sock < 0) {
+                       goto error;
+               }
+
+               /*
+                * Set the CLOEXEC flag. Return code is useless because either way, the
+                * show must go on.
+                */
+               (void) utils_set_fd_cloexec(sock);
+
+               /* Set socket option for credentials retrieval */
+               ret = lttcomm_setsockopt_creds_unix_sock(sock);
+               if (ret < 0) {
+                       goto error;
+               }
+
+               health_code_update();
+
+               /*
+                * Data is received from the lttng client. The struct
+                * lttcomm_session_msg (lsm) contains the command and data request of
+                * the client.
+                */
+               DBG("Receiving data from client ...");
+               ret = lttcomm_recv_creds_unix_sock(sock, &cmd_ctx.lsm,
+                               sizeof(struct lttcomm_session_msg), &cmd_ctx.creds);
+               if (ret != sizeof(struct lttcomm_session_msg)) {
+                       DBG("Incomplete recv() from client... continuing");
+                       ret = close(sock);
+                       if (ret) {
+                               PERROR("close");
+                       }
+                       sock = -1;
+                       continue;
+               }
+
+               health_code_update();
+
+               // TODO: Validate cmd_ctx including sanity check for
+               // security purpose.
+
+               rcu_thread_online();
+               /*
+                * This function dispatch the work to the kernel or userspace tracer
+                * libs and fill the lttcomm_lttng_msg data structure of all the needed
+                * informations for the client. The command context struct contains
+                * everything this function may needs.
+                */
+               ret = process_client_msg(&cmd_ctx, &sock, &sock_error);
+               rcu_thread_offline();
+               if (ret < 0) {
+                       if (sock >= 0) {
+                               ret = close(sock);
+                               if (ret) {
+                                       PERROR("close");
+                               }
+                       }
+                       sock = -1;
+                       /*
+                        * TODO: Inform client somehow of the fatal error. At
+                        * this point, ret < 0 means that a zmalloc failed
+                        * (ENOMEM). Error detected but still accept
+                        * command, unless a socket error has been
+                        * detected.
+                        */
+                       continue;
+               }
+
+               if (ret < LTTNG_OK || ret >= LTTNG_ERR_NR) {
+                       WARN("Command returned an invalid status code, returning unknown error: "
+                                       "command type = %s (%d), ret = %d",
+                                       lttcomm_sessiond_command_str((lttcomm_sessiond_command) cmd_ctx.lsm.cmd_type),
+                                       cmd_ctx.lsm.cmd_type, ret);
+                       ret = LTTNG_ERR_UNK;
+               }
+
+               cmd_completion_handler = cmd_pop_completion_handler();
+               if (cmd_completion_handler) {
+                       enum lttng_error_code completion_code;
+
+                       completion_code = cmd_completion_handler->run(
+                                       cmd_completion_handler->data);
+                       if (completion_code != LTTNG_OK) {
+                               continue;
+                       }
+               }
+
+               health_code_update();
+
+               if (sock >= 0) {
+                       struct lttng_payload_view view =
+                                       lttng_payload_view_from_payload(
+                                                       &cmd_ctx.reply_payload,
+                                                       0, -1);
+                       struct lttcomm_lttng_msg *llm = (typeof(
+                                       llm)) cmd_ctx.reply_payload.buffer.data;
+
+                       LTTNG_ASSERT(cmd_ctx.reply_payload.buffer.size >= sizeof(*llm));
+                       LTTNG_ASSERT(cmd_ctx.lttng_msg_size == cmd_ctx.reply_payload.buffer.size);
+
+                       llm->fd_count = lttng_payload_view_get_fd_handle_count(&view);
+
+                       DBG("Sending response (size: %d, retcode: %s (%d))",
+                                       cmd_ctx.lttng_msg_size,
+                                       lttng_strerror(-llm->ret_code),
+                                       llm->ret_code);
+                       ret = send_unix_sock(sock, &view);
+                       if (ret < 0) {
+                               ERR("Failed to send data back to client");
+                       }
+
+                       /* End of transmission */
+                       ret = close(sock);
+                       if (ret) {
+                               PERROR("close");
+                       }
+               }
+               sock = -1;
+
+               health_code_update();
+       }
+
+exit:
+error:
+       if (sock >= 0) {
+               ret = close(sock);
+               if (ret) {
+                       PERROR("close");
+               }
+       }
+
+       lttng_poll_clean(&events);
+
+error_listen:
+error_create_poll:
+       unlink(the_config.client_unix_sock_path.value);
+       ret = close(client_sock);
+       if (ret) {
+               PERROR("close");
+       }
+
+       if (err) {
+               health_error();
+               ERR("Health error occurred in %s", __func__);
+       }
+
+       health_unregister(the_health_sessiond);
+
+       DBG("Client thread dying");
+       lttng_payload_reset(&cmd_ctx.reply_payload);
+       rcu_unregister_thread();
+       return NULL;
+}
+
+static
+bool shutdown_client_thread(void *thread_data)
+{
+       struct lttng_pipe *client_quit_pipe = (lttng_pipe *) thread_data;
+       const int write_fd = lttng_pipe_get_writefd(client_quit_pipe);
+
+       return notify_thread_pipe(write_fd) == 1;
+}
+
+struct lttng_thread *launch_client_thread(void)
+{
+       bool thread_running;
+       struct lttng_pipe *client_quit_pipe;
+       struct lttng_thread *thread = NULL;
+       int client_sock_fd = -1;
+
+       sem_init(&thread_state.ready, 0, 0);
+       client_quit_pipe = lttng_pipe_open(FD_CLOEXEC);
+       if (!client_quit_pipe) {
+               goto error;
+       }
+
+       client_sock_fd = create_client_sock();
+       if (client_sock_fd < 0) {
+               goto error;
+       }
+
+       thread_state.client_sock = client_sock_fd;
+       thread = lttng_thread_create("Client management",
+                       thread_manage_clients,
+                       shutdown_client_thread,
+                       cleanup_client_thread,
+                       client_quit_pipe);
+       if (!thread) {
+               goto error;
+       }
+       /* The client thread now owns the client sock fd and the quit pipe. */
+       client_sock_fd = -1;
+       client_quit_pipe = NULL;
+
+       /*
+        * This thread is part of the threads that need to be fully
+        * initialized before the session daemon is marked as "ready".
+        */
+       thread_running = wait_thread_status();
+       if (!thread_running) {
+               goto error;
+       }
+       return thread;
+error:
+       if (client_sock_fd >= 0) {
+               if (close(client_sock_fd)) {
+                       PERROR("Failed to close client socket");
+               }
+       }
+       lttng_thread_put(thread);
+       cleanup_client_thread(client_quit_pipe);
+       return NULL;
+}
diff --git a/src/bin/lttng-sessiond/cmd.c b/src/bin/lttng-sessiond/cmd.c
deleted file mode 100644 (file)
index 2c0fa46..0000000
+++ /dev/null
@@ -1,5893 +0,0 @@
-/*
- * Copyright (C) 2012 David Goulet <dgoulet@efficios.com>
- * Copyright (C) 2016 Jérémie Galarneau <jeremie.galarneau@efficios.com>
- *
- * SPDX-License-Identifier: GPL-2.0-only
- *
- */
-
-
-#define _LGPL_SOURCE
-#include <inttypes.h>
-#include <stdio.h>
-#include <sys/stat.h>
-#include <urcu/list.h>
-#include <urcu/uatomic.h>
-
-#include <common/buffer-view.h>
-#include <common/common.h>
-#include <common/compat/string.h>
-#include <common/defaults.h>
-#include <common/dynamic-buffer.h>
-#include <common/kernel-ctl/kernel-ctl.h>
-#include <common/payload-view.h>
-#include <common/payload.h>
-#include <common/relayd/relayd.h>
-#include <common/sessiond-comm/sessiond-comm.h>
-#include <common/string-utils/string-utils.h>
-#include <common/trace-chunk.h>
-#include <common/utils.h>
-#include <lttng/action/action-internal.h>
-#include <lttng/action/action.h>
-#include <lttng/channel-internal.h>
-#include <lttng/channel.h>
-#include <lttng/condition/condition-internal.h>
-#include <lttng/condition/condition.h>
-#include <lttng/condition/event-rule-matches-internal.h>
-#include <lttng/condition/event-rule-matches.h>
-#include <lttng/error-query-internal.h>
-#include <lttng/event-rule/event-rule-internal.h>
-#include <lttng/event-rule/event-rule.h>
-#include <lttng/location-internal.h>
-#include <lttng/lttng-error.h>
-#include <lttng/rotate-internal.h>
-#include <lttng/session-descriptor-internal.h>
-#include <lttng/session-internal.h>
-#include <lttng/tracker.h>
-#include <lttng/trigger/trigger-internal.h>
-#include <lttng/userspace-probe-internal.h>
-
-#include "agent-thread.h"
-#include "agent.h"
-#include "buffer-registry.h"
-#include "channel.h"
-#include "cmd.h"
-#include "consumer.h"
-#include "event-notifier-error-accounting.h"
-#include "event.h"
-#include "health-sessiond.h"
-#include "kernel-consumer.h"
-#include "kernel.h"
-#include "lttng-sessiond.h"
-#include "lttng-syscall.h"
-#include "notification-thread-commands.h"
-#include "notification-thread.h"
-#include "rotate.h"
-#include "rotation-thread.h"
-#include "session.h"
-#include "timer.h"
-#include "tracker.h"
-#include "utils.h"
-
-/* Sleep for 100ms between each check for the shm path's deletion. */
-#define SESSION_DESTROY_SHM_PATH_CHECK_DELAY_US 100000
-
-struct cmd_destroy_session_reply_context {
-       int reply_sock_fd;
-       bool implicit_rotation_on_destroy;
-       /*
-        * Indicates whether or not an error occurred while launching the
-        * destruction of a session.
-        */
-       enum lttng_error_code destruction_status;
-};
-
-static enum lttng_error_code wait_on_path(void *path);
-
-/*
- * Command completion handler that is used by the destroy command
- * when a session that has a non-default shm_path is being destroyed.
- *
- * See comment in cmd_destroy_session() for the rationale.
- */
-static struct destroy_completion_handler {
-       struct cmd_completion_handler handler;
-       char shm_path[member_sizeof(struct ltt_session, shm_path)];
-} destroy_completion_handler = {
-       .handler = {
-               .run = wait_on_path,
-               .data = destroy_completion_handler.shm_path
-       },
-       .shm_path = { 0 },
-};
-
-static struct cmd_completion_handler *current_completion_handler;
-
-/*
- * Used to keep a unique index for each relayd socket created where this value
- * is associated with streams on the consumer so it can match the right relayd
- * to send to. It must be accessed with the relayd_net_seq_idx_lock
- * held.
- */
-static pthread_mutex_t relayd_net_seq_idx_lock = PTHREAD_MUTEX_INITIALIZER;
-static uint64_t relayd_net_seq_idx;
-
-static int validate_ust_event_name(const char *);
-static int cmd_enable_event_internal(struct ltt_session *session,
-               const struct lttng_domain *domain,
-               char *channel_name, struct lttng_event *event,
-               char *filter_expression,
-               struct lttng_bytecode *filter,
-               struct lttng_event_exclusion *exclusion,
-               int wpipe);
-
-/*
- * Create a session path used by list_lttng_sessions for the case that the
- * session consumer is on the network.
- */
-static int build_network_session_path(char *dst, size_t size,
-               struct ltt_session *session)
-{
-       int ret, kdata_port, udata_port;
-       struct lttng_uri *kuri = NULL, *uuri = NULL, *uri = NULL;
-       char tmp_uurl[PATH_MAX], tmp_urls[PATH_MAX];
-
-       LTTNG_ASSERT(session);
-       LTTNG_ASSERT(dst);
-
-       memset(tmp_urls, 0, sizeof(tmp_urls));
-       memset(tmp_uurl, 0, sizeof(tmp_uurl));
-
-       kdata_port = udata_port = DEFAULT_NETWORK_DATA_PORT;
-
-       if (session->kernel_session && session->kernel_session->consumer) {
-               kuri = &session->kernel_session->consumer->dst.net.control;
-               kdata_port = session->kernel_session->consumer->dst.net.data.port;
-       }
-
-       if (session->ust_session && session->ust_session->consumer) {
-               uuri = &session->ust_session->consumer->dst.net.control;
-               udata_port = session->ust_session->consumer->dst.net.data.port;
-       }
-
-       if (uuri == NULL && kuri == NULL) {
-               uri = &session->consumer->dst.net.control;
-               kdata_port = session->consumer->dst.net.data.port;
-       } else if (kuri && uuri) {
-               ret = uri_compare(kuri, uuri);
-               if (ret) {
-                       /* Not Equal */
-                       uri = kuri;
-                       /* Build uuri URL string */
-                       ret = uri_to_str_url(uuri, tmp_uurl, sizeof(tmp_uurl));
-                       if (ret < 0) {
-                               goto error;
-                       }
-               } else {
-                       uri = kuri;
-               }
-       } else if (kuri && uuri == NULL) {
-               uri = kuri;
-       } else if (uuri && kuri == NULL) {
-               uri = uuri;
-       }
-
-       ret = uri_to_str_url(uri, tmp_urls, sizeof(tmp_urls));
-       if (ret < 0) {
-               goto error;
-       }
-
-       /*
-        * Do we have a UST url set. If yes, this means we have both kernel and UST
-        * to print.
-        */
-       if (*tmp_uurl != '\0') {
-               ret = snprintf(dst, size, "[K]: %s [data: %d] -- [U]: %s [data: %d]",
-                               tmp_urls, kdata_port, tmp_uurl, udata_port);
-       } else {
-               int dport;
-               if (kuri || (!kuri && !uuri)) {
-                       dport = kdata_port;
-               } else {
-                       /* No kernel URI, use the UST port. */
-                       dport = udata_port;
-               }
-               ret = snprintf(dst, size, "%s [data: %d]", tmp_urls, dport);
-       }
-
-error:
-       return ret;
-}
-
-/*
- * Get run-time attributes if the session has been started (discarded events,
- * lost packets).
- */
-static int get_kernel_runtime_stats(struct ltt_session *session,
-               struct ltt_kernel_channel *kchan, uint64_t *discarded_events,
-               uint64_t *lost_packets)
-{
-       int ret;
-
-       if (!session->has_been_started) {
-               ret = 0;
-               *discarded_events = 0;
-               *lost_packets = 0;
-               goto end;
-       }
-
-       ret = consumer_get_discarded_events(session->id, kchan->key,
-                       session->kernel_session->consumer,
-                       discarded_events);
-       if (ret < 0) {
-               goto end;
-       }
-
-       ret = consumer_get_lost_packets(session->id, kchan->key,
-                       session->kernel_session->consumer,
-                       lost_packets);
-       if (ret < 0) {
-               goto end;
-       }
-
-end:
-       return ret;
-}
-
-/*
- * Get run-time attributes if the session has been started (discarded events,
- * lost packets).
- */
-static int get_ust_runtime_stats(struct ltt_session *session,
-               struct ltt_ust_channel *uchan, uint64_t *discarded_events,
-               uint64_t *lost_packets)
-{
-       int ret;
-       struct ltt_ust_session *usess;
-
-       if (!discarded_events || !lost_packets) {
-               ret = -1;
-               goto end;
-       }
-
-       usess = session->ust_session;
-       LTTNG_ASSERT(discarded_events);
-       LTTNG_ASSERT(lost_packets);
-
-       if (!usess || !session->has_been_started) {
-               *discarded_events = 0;
-               *lost_packets = 0;
-               ret = 0;
-               goto end;
-       }
-
-       if (usess->buffer_type == LTTNG_BUFFER_PER_UID) {
-               ret = ust_app_uid_get_channel_runtime_stats(usess->id,
-                               &usess->buffer_reg_uid_list,
-                               usess->consumer, uchan->id,
-                               uchan->attr.overwrite,
-                               discarded_events,
-                               lost_packets);
-       } else if (usess->buffer_type == LTTNG_BUFFER_PER_PID) {
-               ret = ust_app_pid_get_channel_runtime_stats(usess,
-                               uchan, usess->consumer,
-                               uchan->attr.overwrite,
-                               discarded_events,
-                               lost_packets);
-               if (ret < 0) {
-                       goto end;
-               }
-               *discarded_events += uchan->per_pid_closed_app_discarded;
-               *lost_packets += uchan->per_pid_closed_app_lost;
-       } else {
-               ERR("Unsupported buffer type");
-               abort();
-               ret = -1;
-               goto end;
-       }
-
-end:
-       return ret;
-}
-
-/*
- * Fill lttng_channel array of all channels.
- */
-static ssize_t list_lttng_channels(enum lttng_domain_type domain,
-               struct ltt_session *session, struct lttng_channel *channels,
-               struct lttng_channel_extended *chan_exts)
-{
-       int i = 0, ret = 0;
-       struct ltt_kernel_channel *kchan;
-
-       DBG("Listing channels for session %s", session->name);
-
-       switch (domain) {
-       case LTTNG_DOMAIN_KERNEL:
-               /* Kernel channels */
-               if (session->kernel_session != NULL) {
-                       cds_list_for_each_entry(kchan,
-                                       &session->kernel_session->channel_list.head, list) {
-                               uint64_t discarded_events, lost_packets;
-                               struct lttng_channel_extended *extended;
-
-                               extended = (struct lttng_channel_extended *)
-                                               kchan->channel->attr.extended.ptr;
-
-                               ret = get_kernel_runtime_stats(session, kchan,
-                                               &discarded_events, &lost_packets);
-                               if (ret < 0) {
-                                       goto end;
-                               }
-                               /* Copy lttng_channel struct to array */
-                               memcpy(&channels[i], kchan->channel, sizeof(struct lttng_channel));
-                               channels[i].enabled = kchan->enabled;
-                               chan_exts[i].discarded_events =
-                                               discarded_events;
-                               chan_exts[i].lost_packets = lost_packets;
-                               chan_exts[i].monitor_timer_interval =
-                                               extended->monitor_timer_interval;
-                               chan_exts[i].blocking_timeout = 0;
-                               i++;
-                       }
-               }
-               break;
-       case LTTNG_DOMAIN_UST:
-       {
-               struct lttng_ht_iter iter;
-               struct ltt_ust_channel *uchan;
-
-               rcu_read_lock();
-               cds_lfht_for_each_entry(session->ust_session->domain_global.channels->ht,
-                               &iter.iter, uchan, node.node) {
-                       uint64_t discarded_events = 0, lost_packets = 0;
-
-                       if (lttng_strncpy(channels[i].name, uchan->name,
-                                       LTTNG_SYMBOL_NAME_LEN)) {
-                               break;
-                       }
-                       channels[i].attr.overwrite = uchan->attr.overwrite;
-                       channels[i].attr.subbuf_size = uchan->attr.subbuf_size;
-                       channels[i].attr.num_subbuf = uchan->attr.num_subbuf;
-                       channels[i].attr.switch_timer_interval =
-                               uchan->attr.switch_timer_interval;
-                       channels[i].attr.read_timer_interval =
-                               uchan->attr.read_timer_interval;
-                       channels[i].enabled = uchan->enabled;
-                       channels[i].attr.tracefile_size = uchan->tracefile_size;
-                       channels[i].attr.tracefile_count = uchan->tracefile_count;
-
-                       /*
-                        * Map enum lttng_ust_output to enum lttng_event_output.
-                        */
-                       switch (uchan->attr.output) {
-                       case LTTNG_UST_ABI_MMAP:
-                               channels[i].attr.output = LTTNG_EVENT_MMAP;
-                               break;
-                       default:
-                               /*
-                                * LTTNG_UST_MMAP is the only supported UST
-                                * output mode.
-                                */
-                               abort();
-                               break;
-                       }
-
-                       chan_exts[i].monitor_timer_interval =
-                                       uchan->monitor_timer_interval;
-                       chan_exts[i].blocking_timeout =
-                               uchan->attr.u.s.blocking_timeout;
-
-                       ret = get_ust_runtime_stats(session, uchan,
-                                       &discarded_events, &lost_packets);
-                       if (ret < 0) {
-                               break;
-                       }
-                       chan_exts[i].discarded_events = discarded_events;
-                       chan_exts[i].lost_packets = lost_packets;
-                       i++;
-               }
-               rcu_read_unlock();
-               break;
-       }
-       default:
-               break;
-       }
-
-end:
-       if (ret < 0) {
-               return -LTTNG_ERR_FATAL;
-       } else {
-               return LTTNG_OK;
-       }
-}
-
-static int append_extended_info(const char *filter_expression,
-               struct lttng_event_exclusion *exclusion,
-               struct lttng_userspace_probe_location *probe_location,
-               struct lttng_payload *payload)
-{
-       int ret = 0;
-       size_t filter_len = 0;
-       size_t nb_exclusions = 0;
-       size_t userspace_probe_location_len = 0;
-       struct lttcomm_event_extended_header extended_header = {};
-       struct lttcomm_event_extended_header *p_extended_header;
-       const size_t original_payload_size = payload->buffer.size;
-
-       ret = lttng_dynamic_buffer_append(&payload->buffer, &extended_header,
-                       sizeof(extended_header));
-       if (ret) {
-               goto end;
-       }
-
-       if (filter_expression) {
-               filter_len = strlen(filter_expression) + 1;
-               ret = lttng_dynamic_buffer_append(&payload->buffer,
-                               filter_expression, filter_len);
-               if (ret) {
-                       goto end;
-               }
-       }
-
-       if (exclusion) {
-               const size_t len = exclusion->count * LTTNG_SYMBOL_NAME_LEN;
-
-               nb_exclusions = exclusion->count;
-
-               ret = lttng_dynamic_buffer_append(
-                               &payload->buffer, &exclusion->names, len);
-               if (ret) {
-                       goto end;
-               }
-       }
-
-       if (probe_location) {
-               const size_t size_before_probe = payload->buffer.size;
-
-               ret = lttng_userspace_probe_location_serialize(probe_location,
-                               payload);
-               if (ret < 0) {
-                       ret = -1;
-                       goto end;
-               }
-
-               userspace_probe_location_len =
-                               payload->buffer.size - size_before_probe;
-       }
-
-       /* Set header fields */
-       p_extended_header = (struct lttcomm_event_extended_header *)
-                       (payload->buffer.data + original_payload_size);
-
-       p_extended_header->filter_len = filter_len;
-       p_extended_header->nb_exclusions = nb_exclusions;
-       p_extended_header->userspace_probe_location_len =
-                       userspace_probe_location_len;
-
-       ret = 0;
-end:
-       return ret;
-}
-
-/*
- * Create a list of agent domain events.
- *
- * Return number of events in list on success or else a negative value.
- */
-static int list_lttng_agent_events(struct agent *agt,
-               struct lttng_payload *payload)
-{
-       int nb_events = 0, ret = 0;
-       const struct agent_event *agent_event;
-       struct lttng_ht_iter iter;
-
-       LTTNG_ASSERT(agt);
-
-       DBG3("Listing agent events");
-
-       rcu_read_lock();
-       cds_lfht_for_each_entry (
-                       agt->events->ht, &iter.iter, agent_event, node.node) {
-               struct lttng_event event = {
-                       .enabled = AGENT_EVENT_IS_ENABLED(agent_event),
-                       .loglevel = agent_event->loglevel_value,
-                       .loglevel_type = agent_event->loglevel_type,
-               };
-
-               ret = lttng_strncpy(event.name, agent_event->name, sizeof(event.name));
-               if (ret) {
-                       /* Internal error, invalid name. */
-                       ERR("Invalid event name while listing agent events: '%s' exceeds the maximal allowed length of %zu bytes",
-                                       agent_event->name, sizeof(event.name));
-                       ret = -LTTNG_ERR_UNK;
-                       goto end;
-               }
-
-               ret = lttng_dynamic_buffer_append(
-                               &payload->buffer, &event, sizeof(event));
-               if (ret) {
-                       ERR("Failed to append event to payload");
-                       ret = -LTTNG_ERR_NOMEM;
-                       goto end;
-               }
-
-               nb_events++;
-       }
-
-       cds_lfht_for_each_entry (
-               agt->events->ht, &iter.iter, agent_event, node.node) {
-               /* Append extended info. */
-               ret = append_extended_info(agent_event->filter_expression, NULL,
-                               NULL, payload);
-               if (ret) {
-                       ERR("Failed to append extended event info to payload");
-                       ret = -LTTNG_ERR_NOMEM;
-                       goto end;
-               }
-       }
-
-       ret = nb_events;
-end:
-       rcu_read_unlock();
-       return ret;
-}
-
-/*
- * Create a list of ust global domain events.
- */
-static int list_lttng_ust_global_events(char *channel_name,
-               struct ltt_ust_domain_global *ust_global,
-               struct lttng_payload *payload)
-{
-       int ret = 0;
-       unsigned int nb_events = 0;
-       struct lttng_ht_iter iter;
-       const struct lttng_ht_node_str *node;
-       const struct ltt_ust_channel *uchan;
-       const struct ltt_ust_event *uevent;
-
-       DBG("Listing UST global events for channel %s", channel_name);
-
-       rcu_read_lock();
-
-       lttng_ht_lookup(ust_global->channels, (void *) channel_name, &iter);
-       node = lttng_ht_iter_get_node_str(&iter);
-       if (node == NULL) {
-               ret = LTTNG_ERR_UST_CHAN_NOT_FOUND;
-               goto end;
-       }
-
-       uchan = caa_container_of(&node->node, struct ltt_ust_channel, node.node);
-
-       DBG3("Listing UST global events");
-
-       cds_lfht_for_each_entry(uchan->events->ht, &iter.iter, uevent, node.node) {
-               struct lttng_event event = {};
-
-               if (uevent->internal) {
-                       continue;
-               }
-
-               ret = lttng_strncpy(event.name, uevent->attr.name, sizeof(event.name));
-               if (ret) {
-                       /* Internal error, invalid name. */
-                       ERR("Invalid event name while listing user space tracer events: '%s' exceeds the maximal allowed length of %zu bytes",
-                                       uevent->attr.name, sizeof(event.name));
-                       ret = -LTTNG_ERR_UNK;
-                       goto end;
-               }
-
-               event.enabled = uevent->enabled;
-
-               switch (uevent->attr.instrumentation) {
-               case LTTNG_UST_ABI_TRACEPOINT:
-                       event.type = LTTNG_EVENT_TRACEPOINT;
-                       break;
-               case LTTNG_UST_ABI_PROBE:
-                       event.type = LTTNG_EVENT_PROBE;
-                       break;
-               case LTTNG_UST_ABI_FUNCTION:
-                       event.type = LTTNG_EVENT_FUNCTION;
-                       break;
-               }
-
-               event.loglevel = uevent->attr.loglevel;
-               switch (uevent->attr.loglevel_type) {
-               case LTTNG_UST_ABI_LOGLEVEL_ALL:
-                       event.loglevel_type = LTTNG_EVENT_LOGLEVEL_ALL;
-                       break;
-               case LTTNG_UST_ABI_LOGLEVEL_RANGE:
-                       event.loglevel_type = LTTNG_EVENT_LOGLEVEL_RANGE;
-                       break;
-               case LTTNG_UST_ABI_LOGLEVEL_SINGLE:
-                       event.loglevel_type = LTTNG_EVENT_LOGLEVEL_SINGLE;
-                       break;
-               }
-
-               if (uevent->filter) {
-                       event.filter = 1;
-               }
-
-               if (uevent->exclusion) {
-                       event.exclusion = 1;
-               }
-
-               ret = lttng_dynamic_buffer_append(&payload->buffer, &event, sizeof(event));
-               if (ret) {
-                       ERR("Failed to append event to payload");
-                       ret = -LTTNG_ERR_NOMEM;
-                       goto end;
-               }
-
-               nb_events++;
-       }
-
-       cds_lfht_for_each_entry(uchan->events->ht, &iter.iter, uevent, node.node) {
-               /* Append extended info. */
-               ret = append_extended_info(uevent->filter_expression,
-                               uevent->exclusion, NULL, payload);
-               if (ret) {
-                       ERR("Failed to append extended event info to payload");
-                       ret = -LTTNG_ERR_FATAL;
-                       goto end;
-               }
-       }
-
-       ret = nb_events;
-end:
-       rcu_read_unlock();
-       return ret;
-}
-
-/*
- * Fill lttng_event array of all kernel events in the channel.
- */
-static int list_lttng_kernel_events(char *channel_name,
-               struct ltt_kernel_session *kernel_session,
-               struct lttng_payload *payload)
-{
-       int ret;
-       unsigned int nb_event;
-       const struct ltt_kernel_event *kevent;
-       const struct ltt_kernel_channel *kchan;
-
-       kchan = trace_kernel_get_channel_by_name(channel_name, kernel_session);
-       if (kchan == NULL) {
-               ret = LTTNG_ERR_KERN_CHAN_NOT_FOUND;
-               goto error;
-       }
-
-       nb_event = kchan->event_count;
-
-       DBG("Listing events for channel %s", kchan->channel->name);
-
-       /* Kernel channels */
-       cds_list_for_each_entry(kevent, &kchan->events_list.head , list) {
-               struct lttng_event event = {};
-
-               ret = lttng_strncpy(event.name, kevent->event->name, sizeof(event.name));
-               if (ret) {
-                       /* Internal error, invalid name. */
-                       ERR("Invalid event name while listing kernel events: '%s' exceeds the maximal allowed length of %zu bytes",
-                                       kevent->event->name,
-                                       sizeof(event.name));
-                       ret = -LTTNG_ERR_UNK;
-                       goto end;
-               }
-
-               event.enabled = kevent->enabled;
-               event.filter = (unsigned char) !!kevent->filter_expression;
-
-               switch (kevent->event->instrumentation) {
-               case LTTNG_KERNEL_ABI_TRACEPOINT:
-                       event.type = LTTNG_EVENT_TRACEPOINT;
-                       break;
-               case LTTNG_KERNEL_ABI_KRETPROBE:
-                       event.type = LTTNG_EVENT_FUNCTION;
-                       memcpy(&event.attr.probe, &kevent->event->u.kprobe,
-                                       sizeof(struct lttng_kernel_abi_kprobe));
-                       break;
-               case LTTNG_KERNEL_ABI_KPROBE:
-                       event.type = LTTNG_EVENT_PROBE;
-                       memcpy(&event.attr.probe, &kevent->event->u.kprobe,
-                                       sizeof(struct lttng_kernel_abi_kprobe));
-                       break;
-               case LTTNG_KERNEL_ABI_UPROBE:
-                       event.type = LTTNG_EVENT_USERSPACE_PROBE;
-                       break;
-               case LTTNG_KERNEL_ABI_FUNCTION:
-                       event.type = LTTNG_EVENT_FUNCTION;
-                       memcpy(&event.attr.ftrace, &kevent->event->u.ftrace,
-                                       sizeof(struct lttng_kernel_abi_function));
-                       break;
-               case LTTNG_KERNEL_ABI_NOOP:
-                       event.type = LTTNG_EVENT_NOOP;
-                       break;
-               case LTTNG_KERNEL_ABI_SYSCALL:
-                       event.type = LTTNG_EVENT_SYSCALL;
-                       break;
-               case LTTNG_KERNEL_ABI_ALL:
-                       /* fall-through. */
-               default:
-                       abort();
-                       break;
-               }
-
-               ret = lttng_dynamic_buffer_append(
-                               &payload->buffer, &event, sizeof(event));
-               if (ret) {
-                       ERR("Failed to append event to payload");
-                       ret = -LTTNG_ERR_NOMEM;
-                       goto end;
-               }
-       }
-
-       cds_list_for_each_entry(kevent, &kchan->events_list.head , list) {
-               /* Append extended info. */
-               ret = append_extended_info(kevent->filter_expression, NULL,
-                               kevent->userspace_probe_location, payload);
-               if (ret) {
-                       DBG("Error appending extended info message");
-                       ret = -LTTNG_ERR_FATAL;
-                       goto error;
-               }
-       }
-
-end:
-       return nb_event;
-error:
-       return ret;
-}
-
-/*
- * Add URI so the consumer output object. Set the correct path depending on the
- * domain adding the default trace directory.
- */
-static enum lttng_error_code add_uri_to_consumer(
-               const struct ltt_session *session,
-               struct consumer_output *consumer,
-               struct lttng_uri *uri, enum lttng_domain_type domain)
-{
-       int ret;
-       enum lttng_error_code ret_code = LTTNG_OK;
-
-       LTTNG_ASSERT(uri);
-
-       if (consumer == NULL) {
-               DBG("No consumer detected. Don't add URI. Stopping.");
-               ret_code = LTTNG_ERR_NO_CONSUMER;
-               goto error;
-       }
-
-       switch (domain) {
-       case LTTNG_DOMAIN_KERNEL:
-               ret = lttng_strncpy(consumer->domain_subdir,
-                               DEFAULT_KERNEL_TRACE_DIR,
-                               sizeof(consumer->domain_subdir));
-               break;
-       case LTTNG_DOMAIN_UST:
-               ret = lttng_strncpy(consumer->domain_subdir,
-                               DEFAULT_UST_TRACE_DIR,
-                               sizeof(consumer->domain_subdir));
-               break;
-       default:
-               /*
-                * This case is possible is we try to add the URI to the global
-                * tracing session consumer object which in this case there is
-                * no subdir.
-                */
-               memset(consumer->domain_subdir, 0,
-                               sizeof(consumer->domain_subdir));
-               ret = 0;
-       }
-       if (ret) {
-               ERR("Failed to initialize consumer output domain subdirectory");
-               ret_code = LTTNG_ERR_FATAL;
-               goto error;
-       }
-
-       switch (uri->dtype) {
-       case LTTNG_DST_IPV4:
-       case LTTNG_DST_IPV6:
-               DBG2("Setting network URI to consumer");
-
-               if (consumer->type == CONSUMER_DST_NET) {
-                       if ((uri->stype == LTTNG_STREAM_CONTROL &&
-                               consumer->dst.net.control_isset) ||
-                               (uri->stype == LTTNG_STREAM_DATA &&
-                               consumer->dst.net.data_isset)) {
-                               ret_code = LTTNG_ERR_URL_EXIST;
-                               goto error;
-                       }
-               } else {
-                       memset(&consumer->dst, 0, sizeof(consumer->dst));
-               }
-
-               /* Set URI into consumer output object */
-               ret = consumer_set_network_uri(session, consumer, uri);
-               if (ret < 0) {
-                       ret_code = -ret;
-                       goto error;
-               } else if (ret == 1) {
-                       /*
-                        * URI was the same in the consumer so we do not append the subdir
-                        * again so to not duplicate output dir.
-                        */
-                       ret_code = LTTNG_OK;
-                       goto error;
-               }
-               break;
-       case LTTNG_DST_PATH:
-               if (*uri->dst.path != '/' || strstr(uri->dst.path, "../")) {
-                       ret_code = LTTNG_ERR_INVALID;
-                       goto error;
-               }
-               DBG2("Setting trace directory path from URI to %s",
-                               uri->dst.path);
-               memset(&consumer->dst, 0, sizeof(consumer->dst));
-
-               ret = lttng_strncpy(consumer->dst.session_root_path,
-                               uri->dst.path,
-                               sizeof(consumer->dst.session_root_path));
-               if (ret) {
-                       ret_code = LTTNG_ERR_FATAL;
-                       goto error;
-               }
-               consumer->type = CONSUMER_DST_LOCAL;
-               break;
-       }
-
-       ret_code = LTTNG_OK;
-error:
-       return ret_code;
-}
-
-/*
- * Init tracing by creating trace directory and sending fds kernel consumer.
- */
-static int init_kernel_tracing(struct ltt_kernel_session *session)
-{
-       int ret = 0;
-       struct lttng_ht_iter iter;
-       struct consumer_socket *socket;
-
-       LTTNG_ASSERT(session);
-
-       rcu_read_lock();
-
-       if (session->consumer_fds_sent == 0 && session->consumer != NULL) {
-               cds_lfht_for_each_entry(session->consumer->socks->ht, &iter.iter,
-                               socket, node.node) {
-                       pthread_mutex_lock(socket->lock);
-                       ret = kernel_consumer_send_session(socket, session);
-                       pthread_mutex_unlock(socket->lock);
-                       if (ret < 0) {
-                               ret = LTTNG_ERR_KERN_CONSUMER_FAIL;
-                               goto error;
-                       }
-               }
-       }
-
-error:
-       rcu_read_unlock();
-       return ret;
-}
-
-/*
- * Create a socket to the relayd using the URI.
- *
- * On success, the relayd_sock pointer is set to the created socket.
- * Else, it remains untouched and an LTTng error code is returned.
- */
-static enum lttng_error_code create_connect_relayd(struct lttng_uri *uri,
-               struct lttcomm_relayd_sock **relayd_sock,
-               struct consumer_output *consumer)
-{
-       int ret;
-       enum lttng_error_code status = LTTNG_OK;
-       struct lttcomm_relayd_sock *rsock;
-
-       rsock = lttcomm_alloc_relayd_sock(uri, RELAYD_VERSION_COMM_MAJOR,
-                       RELAYD_VERSION_COMM_MINOR);
-       if (!rsock) {
-               status = LTTNG_ERR_FATAL;
-               goto error;
-       }
-
-       /*
-        * Connect to relayd so we can proceed with a session creation. This call
-        * can possibly block for an arbitrary amount of time to set the health
-        * state to be in poll execution.
-        */
-       health_poll_entry();
-       ret = relayd_connect(rsock);
-       health_poll_exit();
-       if (ret < 0) {
-               ERR("Unable to reach lttng-relayd");
-               status = LTTNG_ERR_RELAYD_CONNECT_FAIL;
-               goto free_sock;
-       }
-
-       /* Create socket for control stream. */
-       if (uri->stype == LTTNG_STREAM_CONTROL) {
-               uint64_t result_flags;
-
-               DBG3("Creating relayd stream socket from URI");
-
-               /* Check relayd version */
-               ret = relayd_version_check(rsock);
-               if (ret == LTTNG_ERR_RELAYD_VERSION_FAIL) {
-                       status = LTTNG_ERR_RELAYD_VERSION_FAIL;
-                       goto close_sock;
-               } else if (ret < 0) {
-                       ERR("Unable to reach lttng-relayd");
-                       status = LTTNG_ERR_RELAYD_CONNECT_FAIL;
-                       goto close_sock;
-               }
-               consumer->relay_major_version = rsock->major;
-               consumer->relay_minor_version = rsock->minor;
-               ret = relayd_get_configuration(rsock, 0,
-                               &result_flags);
-               if (ret < 0) {
-                       ERR("Unable to get relayd configuration");
-                       status = LTTNG_ERR_RELAYD_CONNECT_FAIL;
-                       goto close_sock;
-               }
-               if (result_flags & LTTCOMM_RELAYD_CONFIGURATION_FLAG_CLEAR_ALLOWED) {
-                       consumer->relay_allows_clear = true;
-               }
-       } else if (uri->stype == LTTNG_STREAM_DATA) {
-               DBG3("Creating relayd data socket from URI");
-       } else {
-               /* Command is not valid */
-               ERR("Relayd invalid stream type: %d", uri->stype);
-               status = LTTNG_ERR_INVALID;
-               goto close_sock;
-       }
-
-       *relayd_sock = rsock;
-
-       return status;
-
-close_sock:
-       /* The returned value is not useful since we are on an error path. */
-       (void) relayd_close(rsock);
-free_sock:
-       free(rsock);
-error:
-       return status;
-}
-
-/*
- * Connect to the relayd using URI and send the socket to the right consumer.
- *
- * The consumer socket lock must be held by the caller.
- *
- * Returns LTTNG_OK on success or an LTTng error code on failure.
- */
-static enum lttng_error_code send_consumer_relayd_socket(
-               unsigned int session_id,
-               struct lttng_uri *relayd_uri,
-               struct consumer_output *consumer,
-               struct consumer_socket *consumer_sock,
-               const char *session_name, const char *hostname,
-               const char *base_path, int session_live_timer,
-               const uint64_t *current_chunk_id,
-               time_t session_creation_time,
-               bool session_name_contains_creation_time)
-{
-       int ret;
-       struct lttcomm_relayd_sock *rsock = NULL;
-       enum lttng_error_code status;
-
-       /* Connect to relayd and make version check if uri is the control. */
-       status = create_connect_relayd(relayd_uri, &rsock, consumer);
-       if (status != LTTNG_OK) {
-               goto relayd_comm_error;
-       }
-       LTTNG_ASSERT(rsock);
-
-       /* Set the network sequence index if not set. */
-       if (consumer->net_seq_index == (uint64_t) -1ULL) {
-               pthread_mutex_lock(&relayd_net_seq_idx_lock);
-               /*
-                * Increment net_seq_idx because we are about to transfer the
-                * new relayd socket to the consumer.
-                * Assign unique key so the consumer can match streams.
-                */
-               consumer->net_seq_index = ++relayd_net_seq_idx;
-               pthread_mutex_unlock(&relayd_net_seq_idx_lock);
-       }
-
-       /* Send relayd socket to consumer. */
-       ret = consumer_send_relayd_socket(consumer_sock, rsock, consumer,
-                       relayd_uri->stype, session_id,
-                       session_name, hostname, base_path,
-                       session_live_timer, current_chunk_id,
-                       session_creation_time, session_name_contains_creation_time);
-       if (ret < 0) {
-               status = LTTNG_ERR_ENABLE_CONSUMER_FAIL;
-               goto close_sock;
-       }
-
-       /* Flag that the corresponding socket was sent. */
-       if (relayd_uri->stype == LTTNG_STREAM_CONTROL) {
-               consumer_sock->control_sock_sent = 1;
-       } else if (relayd_uri->stype == LTTNG_STREAM_DATA) {
-               consumer_sock->data_sock_sent = 1;
-       }
-
-       /*
-        * Close socket which was dup on the consumer side. The session daemon does
-        * NOT keep track of the relayd socket(s) once transfer to the consumer.
-        */
-
-close_sock:
-       if (status != LTTNG_OK) {
-               /*
-                * The consumer output for this session should not be used anymore
-                * since the relayd connection failed thus making any tracing or/and
-                * streaming not usable.
-                */
-               consumer->enabled = 0;
-       }
-       (void) relayd_close(rsock);
-       free(rsock);
-
-relayd_comm_error:
-       return status;
-}
-
-/*
- * Send both relayd sockets to a specific consumer and domain.  This is a
- * helper function to facilitate sending the information to the consumer for a
- * session.
- *
- * The consumer socket lock must be held by the caller.
- *
- * Returns LTTNG_OK, or an LTTng error code on failure.
- */
-static enum lttng_error_code send_consumer_relayd_sockets(
-               enum lttng_domain_type domain,
-               unsigned int session_id, struct consumer_output *consumer,
-               struct consumer_socket *sock, const char *session_name,
-               const char *hostname, const char *base_path, int session_live_timer,
-               const uint64_t *current_chunk_id, time_t session_creation_time,
-               bool session_name_contains_creation_time)
-{
-       enum lttng_error_code status = LTTNG_OK;
-
-       LTTNG_ASSERT(consumer);
-       LTTNG_ASSERT(sock);
-
-       /* Sending control relayd socket. */
-       if (!sock->control_sock_sent) {
-               status = send_consumer_relayd_socket(session_id,
-                               &consumer->dst.net.control, consumer, sock,
-                               session_name, hostname, base_path, session_live_timer,
-                               current_chunk_id, session_creation_time,
-                               session_name_contains_creation_time);
-               if (status != LTTNG_OK) {
-                       goto error;
-               }
-       }
-
-       /* Sending data relayd socket. */
-       if (!sock->data_sock_sent) {
-               status = send_consumer_relayd_socket(session_id,
-                               &consumer->dst.net.data, consumer, sock,
-                               session_name, hostname, base_path, session_live_timer,
-                               current_chunk_id, session_creation_time,
-                               session_name_contains_creation_time);
-               if (status != LTTNG_OK) {
-                       goto error;
-               }
-       }
-
-error:
-       return status;
-}
-
-/*
- * Setup relayd connections for a tracing session. First creates the socket to
- * the relayd and send them to the right domain consumer. Consumer type MUST be
- * network.
- */
-int cmd_setup_relayd(struct ltt_session *session)
-{
-       int ret = LTTNG_OK;
-       struct ltt_ust_session *usess;
-       struct ltt_kernel_session *ksess;
-       struct consumer_socket *socket;
-       struct lttng_ht_iter iter;
-       LTTNG_OPTIONAL(uint64_t) current_chunk_id = {};
-
-       LTTNG_ASSERT(session);
-
-       usess = session->ust_session;
-       ksess = session->kernel_session;
-
-       DBG("Setting relayd for session %s", session->name);
-
-       rcu_read_lock();
-       if (session->current_trace_chunk) {
-               enum lttng_trace_chunk_status status = lttng_trace_chunk_get_id(
-                               session->current_trace_chunk, &current_chunk_id.value);
-
-               if (status == LTTNG_TRACE_CHUNK_STATUS_OK) {
-                       current_chunk_id.is_set = true;
-               } else {
-                       ERR("Failed to get current trace chunk id");
-                       ret = LTTNG_ERR_UNK;
-                       goto error;
-               }
-       }
-
-       if (usess && usess->consumer && usess->consumer->type == CONSUMER_DST_NET
-                       && usess->consumer->enabled) {
-               /* For each consumer socket, send relayd sockets */
-               cds_lfht_for_each_entry(usess->consumer->socks->ht, &iter.iter,
-                               socket, node.node) {
-                       pthread_mutex_lock(socket->lock);
-                       ret = send_consumer_relayd_sockets(LTTNG_DOMAIN_UST, session->id,
-                                       usess->consumer, socket,
-                                       session->name, session->hostname,
-                                       session->base_path,
-                                       session->live_timer,
-                                       current_chunk_id.is_set ? &current_chunk_id.value : NULL,
-                                       session->creation_time,
-                                       session->name_contains_creation_time);
-                       pthread_mutex_unlock(socket->lock);
-                       if (ret != LTTNG_OK) {
-                               goto error;
-                       }
-                       /* Session is now ready for network streaming. */
-                       session->net_handle = 1;
-               }
-               session->consumer->relay_major_version =
-                       usess->consumer->relay_major_version;
-               session->consumer->relay_minor_version =
-                       usess->consumer->relay_minor_version;
-               session->consumer->relay_allows_clear =
-                       usess->consumer->relay_allows_clear;
-       }
-
-       if (ksess && ksess->consumer && ksess->consumer->type == CONSUMER_DST_NET
-                       && ksess->consumer->enabled) {
-               cds_lfht_for_each_entry(ksess->consumer->socks->ht, &iter.iter,
-                               socket, node.node) {
-                       pthread_mutex_lock(socket->lock);
-                       ret = send_consumer_relayd_sockets(LTTNG_DOMAIN_KERNEL, session->id,
-                                       ksess->consumer, socket,
-                                       session->name, session->hostname,
-                                       session->base_path,
-                                       session->live_timer,
-                                       current_chunk_id.is_set ? &current_chunk_id.value : NULL,
-                                       session->creation_time,
-                                       session->name_contains_creation_time);
-                       pthread_mutex_unlock(socket->lock);
-                       if (ret != LTTNG_OK) {
-                               goto error;
-                       }
-                       /* Session is now ready for network streaming. */
-                       session->net_handle = 1;
-               }
-               session->consumer->relay_major_version =
-                       ksess->consumer->relay_major_version;
-               session->consumer->relay_minor_version =
-                       ksess->consumer->relay_minor_version;
-               session->consumer->relay_allows_clear =
-                       ksess->consumer->relay_allows_clear;
-       }
-
-error:
-       rcu_read_unlock();
-       return ret;
-}
-
-/*
- * Start a kernel session by opening all necessary streams.
- */
-int start_kernel_session(struct ltt_kernel_session *ksess)
-{
-       int ret;
-       struct ltt_kernel_channel *kchan;
-
-       /* Open kernel metadata */
-       if (ksess->metadata == NULL && ksess->output_traces) {
-               ret = kernel_open_metadata(ksess);
-               if (ret < 0) {
-                       ret = LTTNG_ERR_KERN_META_FAIL;
-                       goto error;
-               }
-       }
-
-       /* Open kernel metadata stream */
-       if (ksess->metadata && ksess->metadata_stream_fd < 0) {
-               ret = kernel_open_metadata_stream(ksess);
-               if (ret < 0) {
-                       ERR("Kernel create metadata stream failed");
-                       ret = LTTNG_ERR_KERN_STREAM_FAIL;
-                       goto error;
-               }
-       }
-
-       /* For each channel */
-       cds_list_for_each_entry(kchan, &ksess->channel_list.head, list) {
-               if (kchan->stream_count == 0) {
-                       ret = kernel_open_channel_stream(kchan);
-                       if (ret < 0) {
-                               ret = LTTNG_ERR_KERN_STREAM_FAIL;
-                               goto error;
-                       }
-                       /* Update the stream global counter */
-                       ksess->stream_count_global += ret;
-               }
-       }
-
-       /* Setup kernel consumer socket and send fds to it */
-       ret = init_kernel_tracing(ksess);
-       if (ret != 0) {
-               ret = LTTNG_ERR_KERN_START_FAIL;
-               goto error;
-       }
-
-       /* This start the kernel tracing */
-       ret = kernel_start_session(ksess);
-       if (ret < 0) {
-               ret = LTTNG_ERR_KERN_START_FAIL;
-               goto error;
-       }
-
-       /* Quiescent wait after starting trace */
-       kernel_wait_quiescent();
-
-       ksess->active = 1;
-
-       ret = LTTNG_OK;
-
-error:
-       return ret;
-}
-
-int stop_kernel_session(struct ltt_kernel_session *ksess)
-{
-       struct ltt_kernel_channel *kchan;
-       bool error_occurred = false;
-       int ret;
-
-       if (!ksess || !ksess->active) {
-               return LTTNG_OK;
-       }
-       DBG("Stopping kernel tracing");
-
-       ret = kernel_stop_session(ksess);
-       if (ret < 0) {
-               ret = LTTNG_ERR_KERN_STOP_FAIL;
-               goto error;
-       }
-
-       kernel_wait_quiescent();
-
-       /* Flush metadata after stopping (if exists) */
-       if (ksess->metadata_stream_fd >= 0) {
-               ret = kernel_metadata_flush_buffer(ksess->metadata_stream_fd);
-               if (ret < 0) {
-                       ERR("Kernel metadata flush failed");
-                       error_occurred = true;
-               }
-       }
-
-       /* Flush all buffers after stopping */
-       cds_list_for_each_entry(kchan, &ksess->channel_list.head, list) {
-               ret = kernel_flush_buffer(kchan);
-               if (ret < 0) {
-                       ERR("Kernel flush buffer error");
-                       error_occurred = true;
-               }
-       }
-
-       ksess->active = 0;
-       if (error_occurred) {
-               ret = LTTNG_ERR_UNK;
-       } else {
-               ret = LTTNG_OK;
-       }
-error:
-       return ret;
-}
-
-/*
- * Command LTTNG_DISABLE_CHANNEL processed by the client thread.
- */
-int cmd_disable_channel(struct ltt_session *session,
-               enum lttng_domain_type domain, char *channel_name)
-{
-       int ret;
-       struct ltt_ust_session *usess;
-
-       usess = session->ust_session;
-
-       rcu_read_lock();
-
-       switch (domain) {
-       case LTTNG_DOMAIN_KERNEL:
-       {
-               ret = channel_kernel_disable(session->kernel_session,
-                               channel_name);
-               if (ret != LTTNG_OK) {
-                       goto error;
-               }
-
-               kernel_wait_quiescent();
-               break;
-       }
-       case LTTNG_DOMAIN_UST:
-       {
-               struct ltt_ust_channel *uchan;
-               struct lttng_ht *chan_ht;
-
-               chan_ht = usess->domain_global.channels;
-
-               uchan = trace_ust_find_channel_by_name(chan_ht, channel_name);
-               if (uchan == NULL) {
-                       ret = LTTNG_ERR_UST_CHAN_NOT_FOUND;
-                       goto error;
-               }
-
-               ret = channel_ust_disable(usess, uchan);
-               if (ret != LTTNG_OK) {
-                       goto error;
-               }
-               break;
-       }
-       default:
-               ret = LTTNG_ERR_UNKNOWN_DOMAIN;
-               goto error;
-       }
-
-       ret = LTTNG_OK;
-
-error:
-       rcu_read_unlock();
-       return ret;
-}
-
-/*
- * Command LTTNG_ENABLE_CHANNEL processed by the client thread.
- *
- * The wpipe arguments is used as a notifier for the kernel thread.
- */
-int cmd_enable_channel(struct ltt_session *session,
-               const struct lttng_domain *domain, const struct lttng_channel *_attr, int wpipe)
-{
-       int ret;
-       struct ltt_ust_session *usess = session->ust_session;
-       struct lttng_ht *chan_ht;
-       size_t len;
-       struct lttng_channel attr;
-
-       LTTNG_ASSERT(session);
-       LTTNG_ASSERT(_attr);
-       LTTNG_ASSERT(domain);
-
-       attr = *_attr;
-       len = lttng_strnlen(attr.name, sizeof(attr.name));
-
-       /* Validate channel name */
-       if (attr.name[0] == '.' ||
-               memchr(attr.name, '/', len) != NULL) {
-               ret = LTTNG_ERR_INVALID_CHANNEL_NAME;
-               goto end;
-       }
-
-       DBG("Enabling channel %s for session %s", attr.name, session->name);
-
-       rcu_read_lock();
-
-       /*
-        * If the session is a live session, remove the switch timer, the
-        * live timer does the same thing but sends also synchronisation
-        * beacons for inactive streams.
-        */
-       if (session->live_timer > 0) {
-               attr.attr.live_timer_interval = session->live_timer;
-               attr.attr.switch_timer_interval = 0;
-       }
-
-       /* Check for feature support */
-       switch (domain->type) {
-       case LTTNG_DOMAIN_KERNEL:
-       {
-               if (kernel_supports_ring_buffer_snapshot_sample_positions() != 1) {
-                       /* Sampling position of buffer is not supported */
-                       WARN("Kernel tracer does not support buffer monitoring. "
-                                       "Setting the monitor interval timer to 0 "
-                                       "(disabled) for channel '%s' of session '%s'",
-                                       attr.name, session->name);
-                       lttng_channel_set_monitor_timer_interval(&attr, 0);
-               }
-               break;
-       }
-       case LTTNG_DOMAIN_UST:
-               break;
-       case LTTNG_DOMAIN_JUL:
-       case LTTNG_DOMAIN_LOG4J:
-       case LTTNG_DOMAIN_PYTHON:
-               if (!agent_tracing_is_enabled()) {
-                       DBG("Attempted to enable a channel in an agent domain but the agent thread is not running");
-                       ret = LTTNG_ERR_AGENT_TRACING_DISABLED;
-                       goto error;
-               }
-               break;
-       default:
-               ret = LTTNG_ERR_UNKNOWN_DOMAIN;
-               goto error;
-       }
-
-       switch (domain->type) {
-       case LTTNG_DOMAIN_KERNEL:
-       {
-               struct ltt_kernel_channel *kchan;
-
-               kchan = trace_kernel_get_channel_by_name(attr.name,
-                               session->kernel_session);
-               if (kchan == NULL) {
-                       /*
-                        * Don't try to create a channel if the session has been started at
-                        * some point in time before. The tracer does not allow it.
-                        */
-                       if (session->has_been_started) {
-                               ret = LTTNG_ERR_TRACE_ALREADY_STARTED;
-                               goto error;
-                       }
-
-                       if (session->snapshot.nb_output > 0 ||
-                                       session->snapshot_mode) {
-                               /* Enforce mmap output for snapshot sessions. */
-                               attr.attr.output = LTTNG_EVENT_MMAP;
-                       }
-                       ret = channel_kernel_create(session->kernel_session, &attr, wpipe);
-                       if (attr.name[0] != '\0') {
-                               session->kernel_session->has_non_default_channel = 1;
-                       }
-               } else {
-                       ret = channel_kernel_enable(session->kernel_session, kchan);
-               }
-
-               if (ret != LTTNG_OK) {
-                       goto error;
-               }
-
-               kernel_wait_quiescent();
-               break;
-       }
-       case LTTNG_DOMAIN_UST:
-       case LTTNG_DOMAIN_JUL:
-       case LTTNG_DOMAIN_LOG4J:
-       case LTTNG_DOMAIN_PYTHON:
-       {
-               struct ltt_ust_channel *uchan;
-
-               /*
-                * FIXME
-                *
-                * Current agent implementation limitations force us to allow
-                * only one channel at once in "agent" subdomains. Each
-                * subdomain has a default channel name which must be strictly
-                * adhered to.
-                */
-               if (domain->type == LTTNG_DOMAIN_JUL) {
-                       if (strncmp(attr.name, DEFAULT_JUL_CHANNEL_NAME,
-                                       LTTNG_SYMBOL_NAME_LEN)) {
-                               ret = LTTNG_ERR_INVALID_CHANNEL_NAME;
-                               goto error;
-                       }
-               } else if (domain->type == LTTNG_DOMAIN_LOG4J) {
-                       if (strncmp(attr.name, DEFAULT_LOG4J_CHANNEL_NAME,
-                                       LTTNG_SYMBOL_NAME_LEN)) {
-                               ret = LTTNG_ERR_INVALID_CHANNEL_NAME;
-                               goto error;
-                       }
-               } else if (domain->type == LTTNG_DOMAIN_PYTHON) {
-                       if (strncmp(attr.name, DEFAULT_PYTHON_CHANNEL_NAME,
-                                       LTTNG_SYMBOL_NAME_LEN)) {
-                               ret = LTTNG_ERR_INVALID_CHANNEL_NAME;
-                               goto error;
-                       }
-               }
-
-               chan_ht = usess->domain_global.channels;
-
-               uchan = trace_ust_find_channel_by_name(chan_ht, attr.name);
-               if (uchan == NULL) {
-                       /*
-                        * Don't try to create a channel if the session has been started at
-                        * some point in time before. The tracer does not allow it.
-                        */
-                       if (session->has_been_started) {
-                               ret = LTTNG_ERR_TRACE_ALREADY_STARTED;
-                               goto error;
-                       }
-
-                       ret = channel_ust_create(usess, &attr, domain->buf_type);
-                       if (attr.name[0] != '\0') {
-                               usess->has_non_default_channel = 1;
-                       }
-               } else {
-                       ret = channel_ust_enable(usess, uchan);
-               }
-               break;
-       }
-       default:
-               ret = LTTNG_ERR_UNKNOWN_DOMAIN;
-               goto error;
-       }
-
-       if (ret == LTTNG_OK && attr.attr.output != LTTNG_EVENT_MMAP) {
-               session->has_non_mmap_channel = true;
-       }
-error:
-       rcu_read_unlock();
-end:
-       return ret;
-}
-
-enum lttng_error_code cmd_process_attr_tracker_get_tracking_policy(
-               struct ltt_session *session,
-               enum lttng_domain_type domain,
-               enum lttng_process_attr process_attr,
-               enum lttng_tracking_policy *policy)
-{
-       enum lttng_error_code ret_code = LTTNG_OK;
-       const struct process_attr_tracker *tracker;
-
-       switch (domain) {
-       case LTTNG_DOMAIN_KERNEL:
-               if (!session->kernel_session) {
-                       ret_code = LTTNG_ERR_INVALID;
-                       goto end;
-               }
-               tracker = kernel_get_process_attr_tracker(
-                               session->kernel_session, process_attr);
-               break;
-       case LTTNG_DOMAIN_UST:
-               if (!session->ust_session) {
-                       ret_code = LTTNG_ERR_INVALID;
-                       goto end;
-               }
-               tracker = trace_ust_get_process_attr_tracker(
-                               session->ust_session, process_attr);
-               break;
-       default:
-               ret_code = LTTNG_ERR_UNSUPPORTED_DOMAIN;
-               goto end;
-       }
-       if (tracker) {
-               *policy = process_attr_tracker_get_tracking_policy(tracker);
-       } else {
-               ret_code = LTTNG_ERR_INVALID;
-       }
-end:
-       return ret_code;
-}
-
-enum lttng_error_code cmd_process_attr_tracker_set_tracking_policy(
-               struct ltt_session *session,
-               enum lttng_domain_type domain,
-               enum lttng_process_attr process_attr,
-               enum lttng_tracking_policy policy)
-{
-       enum lttng_error_code ret_code = LTTNG_OK;
-
-       switch (policy) {
-       case LTTNG_TRACKING_POLICY_INCLUDE_SET:
-       case LTTNG_TRACKING_POLICY_EXCLUDE_ALL:
-       case LTTNG_TRACKING_POLICY_INCLUDE_ALL:
-               break;
-       default:
-               ret_code = LTTNG_ERR_INVALID;
-               goto end;
-       }
-
-       switch (domain) {
-       case LTTNG_DOMAIN_KERNEL:
-               if (!session->kernel_session) {
-                       ret_code = LTTNG_ERR_INVALID;
-                       goto end;
-               }
-               ret_code = kernel_process_attr_tracker_set_tracking_policy(
-                               session->kernel_session, process_attr, policy);
-               break;
-       case LTTNG_DOMAIN_UST:
-               if (!session->ust_session) {
-                       ret_code = LTTNG_ERR_INVALID;
-                       goto end;
-               }
-               ret_code = trace_ust_process_attr_tracker_set_tracking_policy(
-                               session->ust_session, process_attr, policy);
-               break;
-       default:
-               ret_code = LTTNG_ERR_UNSUPPORTED_DOMAIN;
-               break;
-       }
-end:
-       return ret_code;
-}
-
-enum lttng_error_code cmd_process_attr_tracker_inclusion_set_add_value(
-               struct ltt_session *session,
-               enum lttng_domain_type domain,
-               enum lttng_process_attr process_attr,
-               const struct process_attr_value *value)
-{
-       enum lttng_error_code ret_code = LTTNG_OK;
-
-       switch (domain) {
-       case LTTNG_DOMAIN_KERNEL:
-               if (!session->kernel_session) {
-                       ret_code = LTTNG_ERR_INVALID;
-                       goto end;
-               }
-               ret_code = kernel_process_attr_tracker_inclusion_set_add_value(
-                               session->kernel_session, process_attr, value);
-               break;
-       case LTTNG_DOMAIN_UST:
-               if (!session->ust_session) {
-                       ret_code = LTTNG_ERR_INVALID;
-                       goto end;
-               }
-               ret_code = trace_ust_process_attr_tracker_inclusion_set_add_value(
-                               session->ust_session, process_attr, value);
-               break;
-       default:
-               ret_code = LTTNG_ERR_UNSUPPORTED_DOMAIN;
-               break;
-       }
-end:
-       return ret_code;
-}
-
-enum lttng_error_code cmd_process_attr_tracker_inclusion_set_remove_value(
-               struct ltt_session *session,
-               enum lttng_domain_type domain,
-               enum lttng_process_attr process_attr,
-               const struct process_attr_value *value)
-{
-       enum lttng_error_code ret_code = LTTNG_OK;
-
-       switch (domain) {
-       case LTTNG_DOMAIN_KERNEL:
-               if (!session->kernel_session) {
-                       ret_code = LTTNG_ERR_INVALID;
-                       goto end;
-               }
-               ret_code = kernel_process_attr_tracker_inclusion_set_remove_value(
-                               session->kernel_session, process_attr, value);
-               break;
-       case LTTNG_DOMAIN_UST:
-               if (!session->ust_session) {
-                       ret_code = LTTNG_ERR_INVALID;
-                       goto end;
-               }
-               ret_code = trace_ust_process_attr_tracker_inclusion_set_remove_value(
-                               session->ust_session, process_attr, value);
-               break;
-       default:
-               ret_code = LTTNG_ERR_UNSUPPORTED_DOMAIN;
-               break;
-       }
-end:
-       return ret_code;
-}
-
-enum lttng_error_code cmd_process_attr_tracker_get_inclusion_set(
-               struct ltt_session *session,
-               enum lttng_domain_type domain,
-               enum lttng_process_attr process_attr,
-               struct lttng_process_attr_values **values)
-{
-       enum lttng_error_code ret_code = LTTNG_OK;
-       const struct process_attr_tracker *tracker;
-       enum process_attr_tracker_status status;
-
-       switch (domain) {
-       case LTTNG_DOMAIN_KERNEL:
-               if (!session->kernel_session) {
-                       ret_code = LTTNG_ERR_INVALID;
-                       goto end;
-               }
-               tracker = kernel_get_process_attr_tracker(
-                               session->kernel_session, process_attr);
-               break;
-       case LTTNG_DOMAIN_UST:
-               if (!session->ust_session) {
-                       ret_code = LTTNG_ERR_INVALID;
-                       goto end;
-               }
-               tracker = trace_ust_get_process_attr_tracker(
-                               session->ust_session, process_attr);
-               break;
-       default:
-               ret_code = LTTNG_ERR_UNSUPPORTED_DOMAIN;
-               goto end;
-       }
-
-       if (!tracker) {
-               ret_code = LTTNG_ERR_INVALID;
-               goto end;
-       }
-
-       status = process_attr_tracker_get_inclusion_set(tracker, values);
-       switch (status) {
-       case PROCESS_ATTR_TRACKER_STATUS_OK:
-               ret_code = LTTNG_OK;
-               break;
-       case PROCESS_ATTR_TRACKER_STATUS_INVALID_TRACKING_POLICY:
-               ret_code = LTTNG_ERR_PROCESS_ATTR_TRACKER_INVALID_TRACKING_POLICY;
-               break;
-       case PROCESS_ATTR_TRACKER_STATUS_ERROR:
-               ret_code = LTTNG_ERR_NOMEM;
-               break;
-       default:
-               ret_code = LTTNG_ERR_UNK;
-               break;
-       }
-
-end:
-       return ret_code;
-}
-
-/*
- * Command LTTNG_DISABLE_EVENT processed by the client thread.
- */
-int cmd_disable_event(struct ltt_session *session,
-               enum lttng_domain_type domain, const char *channel_name,
-               const struct lttng_event *event)
-{
-       int ret;
-       const char *event_name;
-
-       DBG("Disable event command for event \'%s\'", event->name);
-
-       event_name = event->name;
-
-       /* Error out on unhandled search criteria */
-       if (event->loglevel_type || event->loglevel != -1 || event->enabled
-                       || event->pid || event->filter || event->exclusion) {
-               ret = LTTNG_ERR_UNK;
-               goto error;
-       }
-
-       rcu_read_lock();
-
-       switch (domain) {
-       case LTTNG_DOMAIN_KERNEL:
-       {
-               struct ltt_kernel_channel *kchan;
-               struct ltt_kernel_session *ksess;
-
-               ksess = session->kernel_session;
-
-               /*
-                * If a non-default channel has been created in the
-                * session, explicitely require that -c chan_name needs
-                * to be provided.
-                */
-               if (ksess->has_non_default_channel && channel_name[0] == '\0') {
-                       ret = LTTNG_ERR_NEED_CHANNEL_NAME;
-                       goto error_unlock;
-               }
-
-               kchan = trace_kernel_get_channel_by_name(channel_name, ksess);
-               if (kchan == NULL) {
-                       ret = LTTNG_ERR_KERN_CHAN_NOT_FOUND;
-                       goto error_unlock;
-               }
-
-               switch (event->type) {
-               case LTTNG_EVENT_ALL:
-               case LTTNG_EVENT_TRACEPOINT:
-               case LTTNG_EVENT_SYSCALL:
-               case LTTNG_EVENT_PROBE:
-               case LTTNG_EVENT_FUNCTION:
-               case LTTNG_EVENT_FUNCTION_ENTRY:/* fall-through */
-                       if (event_name[0] == '\0') {
-                               ret = event_kernel_disable_event(kchan,
-                                       NULL, event->type);
-                       } else {
-                               ret = event_kernel_disable_event(kchan,
-                                       event_name, event->type);
-                       }
-                       if (ret != LTTNG_OK) {
-                               goto error_unlock;
-                       }
-                       break;
-               default:
-                       ret = LTTNG_ERR_UNK;
-                       goto error_unlock;
-               }
-
-               kernel_wait_quiescent();
-               break;
-       }
-       case LTTNG_DOMAIN_UST:
-       {
-               struct ltt_ust_channel *uchan;
-               struct ltt_ust_session *usess;
-
-               usess = session->ust_session;
-
-               if (validate_ust_event_name(event_name)) {
-                       ret = LTTNG_ERR_INVALID_EVENT_NAME;
-                       goto error_unlock;
-               }
-
-               /*
-                * If a non-default channel has been created in the
-                * session, explicitly require that -c chan_name needs
-                * to be provided.
-                */
-               if (usess->has_non_default_channel && channel_name[0] == '\0') {
-                       ret = LTTNG_ERR_NEED_CHANNEL_NAME;
-                       goto error_unlock;
-               }
-
-               uchan = trace_ust_find_channel_by_name(usess->domain_global.channels,
-                               channel_name);
-               if (uchan == NULL) {
-                       ret = LTTNG_ERR_UST_CHAN_NOT_FOUND;
-                       goto error_unlock;
-               }
-
-               switch (event->type) {
-               case LTTNG_EVENT_ALL:
-                       /*
-                        * An empty event name means that everything
-                        * should be disabled.
-                        */
-                       if (event->name[0] == '\0') {
-                               ret = event_ust_disable_all_tracepoints(usess, uchan);
-                       } else {
-                               ret = event_ust_disable_tracepoint(usess, uchan,
-                                               event_name);
-                       }
-                       if (ret != LTTNG_OK) {
-                               goto error_unlock;
-                       }
-                       break;
-               default:
-                       ret = LTTNG_ERR_UNK;
-                       goto error_unlock;
-               }
-
-               DBG3("Disable UST event %s in channel %s completed", event_name,
-                               channel_name);
-               break;
-       }
-       case LTTNG_DOMAIN_LOG4J:
-       case LTTNG_DOMAIN_JUL:
-       case LTTNG_DOMAIN_PYTHON:
-       {
-               struct agent *agt;
-               struct ltt_ust_session *usess = session->ust_session;
-
-               LTTNG_ASSERT(usess);
-
-               switch (event->type) {
-               case LTTNG_EVENT_ALL:
-                       break;
-               default:
-                       ret = LTTNG_ERR_UNK;
-                       goto error_unlock;
-               }
-
-               agt = trace_ust_find_agent(usess, domain);
-               if (!agt) {
-                       ret = -LTTNG_ERR_UST_EVENT_NOT_FOUND;
-                       goto error_unlock;
-               }
-               /*
-                * An empty event name means that everything
-                * should be disabled.
-                */
-               if (event->name[0] == '\0') {
-                       ret = event_agent_disable_all(usess, agt);
-               } else {
-                       ret = event_agent_disable(usess, agt, event_name);
-               }
-               if (ret != LTTNG_OK) {
-                       goto error_unlock;
-               }
-
-               break;
-       }
-       default:
-               ret = LTTNG_ERR_UND;
-               goto error_unlock;
-       }
-
-       ret = LTTNG_OK;
-
-error_unlock:
-       rcu_read_unlock();
-error:
-       return ret;
-}
-
-/*
- * Command LTTNG_ADD_CONTEXT processed by the client thread.
- */
-int cmd_add_context(struct ltt_session *session, enum lttng_domain_type domain,
-               char *channel_name, const struct lttng_event_context *ctx, int kwpipe)
-{
-       int ret, chan_kern_created = 0, chan_ust_created = 0;
-       char *app_ctx_provider_name = NULL, *app_ctx_name = NULL;
-
-       /*
-        * Don't try to add a context if the session has been started at
-        * some point in time before. The tracer does not allow it and would
-        * result in a corrupted trace.
-        */
-       if (session->has_been_started) {
-               ret = LTTNG_ERR_TRACE_ALREADY_STARTED;
-               goto end;
-       }
-
-       if (ctx->ctx == LTTNG_EVENT_CONTEXT_APP_CONTEXT) {
-               app_ctx_provider_name = ctx->u.app_ctx.provider_name;
-               app_ctx_name = ctx->u.app_ctx.ctx_name;
-       }
-
-       switch (domain) {
-       case LTTNG_DOMAIN_KERNEL:
-               LTTNG_ASSERT(session->kernel_session);
-
-               if (session->kernel_session->channel_count == 0) {
-                       /* Create default channel */
-                       ret = channel_kernel_create(session->kernel_session, NULL, kwpipe);
-                       if (ret != LTTNG_OK) {
-                               goto error;
-                       }
-                       chan_kern_created = 1;
-               }
-               /* Add kernel context to kernel tracer */
-               ret = context_kernel_add(session->kernel_session, ctx, channel_name);
-               if (ret != LTTNG_OK) {
-                       goto error;
-               }
-               break;
-       case LTTNG_DOMAIN_JUL:
-       case LTTNG_DOMAIN_LOG4J:
-       {
-               /*
-                * Validate channel name.
-                * If no channel name is given and the domain is JUL or LOG4J,
-                * set it to the appropriate domain-specific channel name. If
-                * a name is provided but does not match the expexted channel
-                * name, return an error.
-                */
-               if (domain == LTTNG_DOMAIN_JUL && *channel_name &&
-                               strcmp(channel_name,
-                               DEFAULT_JUL_CHANNEL_NAME)) {
-                       ret = LTTNG_ERR_UST_CHAN_NOT_FOUND;
-                       goto error;
-               } else if (domain == LTTNG_DOMAIN_LOG4J && *channel_name &&
-                               strcmp(channel_name,
-                               DEFAULT_LOG4J_CHANNEL_NAME)) {
-                       ret = LTTNG_ERR_UST_CHAN_NOT_FOUND;
-                       goto error;
-               }
-               /* break is _not_ missing here. */
-       }
-       case LTTNG_DOMAIN_UST:
-       {
-               struct ltt_ust_session *usess = session->ust_session;
-               unsigned int chan_count;
-
-               LTTNG_ASSERT(usess);
-
-               chan_count = lttng_ht_get_count(usess->domain_global.channels);
-               if (chan_count == 0) {
-                       struct lttng_channel *attr;
-                       /* Create default channel */
-                       attr = channel_new_default_attr(domain, usess->buffer_type);
-                       if (attr == NULL) {
-                               ret = LTTNG_ERR_FATAL;
-                               goto error;
-                       }
-
-                       ret = channel_ust_create(usess, attr, usess->buffer_type);
-                       if (ret != LTTNG_OK) {
-                               free(attr);
-                               goto error;
-                       }
-                       channel_attr_destroy(attr);
-                       chan_ust_created = 1;
-               }
-
-               ret = context_ust_add(usess, domain, ctx, channel_name);
-               free(app_ctx_provider_name);
-               free(app_ctx_name);
-               app_ctx_name = NULL;
-               app_ctx_provider_name = NULL;
-               if (ret != LTTNG_OK) {
-                       goto error;
-               }
-               break;
-       }
-       default:
-               ret = LTTNG_ERR_UND;
-               goto error;
-       }
-
-       ret = LTTNG_OK;
-       goto end;
-
-error:
-       if (chan_kern_created) {
-               struct ltt_kernel_channel *kchan =
-                       trace_kernel_get_channel_by_name(DEFAULT_CHANNEL_NAME,
-                                       session->kernel_session);
-               /* Created previously, this should NOT fail. */
-               LTTNG_ASSERT(kchan);
-               kernel_destroy_channel(kchan);
-       }
-
-       if (chan_ust_created) {
-               struct ltt_ust_channel *uchan =
-                       trace_ust_find_channel_by_name(
-                                       session->ust_session->domain_global.channels,
-                                       DEFAULT_CHANNEL_NAME);
-               /* Created previously, this should NOT fail. */
-               LTTNG_ASSERT(uchan);
-               /* Remove from the channel list of the session. */
-               trace_ust_delete_channel(session->ust_session->domain_global.channels,
-                               uchan);
-               trace_ust_destroy_channel(uchan);
-       }
-end:
-       free(app_ctx_provider_name);
-       free(app_ctx_name);
-       return ret;
-}
-
-static inline bool name_starts_with(const char *name, const char *prefix)
-{
-       const size_t max_cmp_len = min(strlen(prefix), LTTNG_SYMBOL_NAME_LEN);
-
-       return !strncmp(name, prefix, max_cmp_len);
-}
-
-/* Perform userspace-specific event name validation */
-static int validate_ust_event_name(const char *name)
-{
-       int ret = 0;
-
-       if (!name) {
-               ret = -1;
-               goto end;
-       }
-
-       /*
-        * Check name against all internal UST event component namespaces used
-        * by the agents.
-        */
-       if (name_starts_with(name, DEFAULT_JUL_EVENT_COMPONENT) ||
-               name_starts_with(name, DEFAULT_LOG4J_EVENT_COMPONENT) ||
-               name_starts_with(name, DEFAULT_PYTHON_EVENT_COMPONENT)) {
-               ret = -1;
-       }
-
-end:
-       return ret;
-}
-
-/*
- * Internal version of cmd_enable_event() with a supplemental
- * "internal_event" flag which is used to enable internal events which should
- * be hidden from clients. Such events are used in the agent implementation to
- * enable the events through which all "agent" events are funeled.
- */
-static int _cmd_enable_event(struct ltt_session *session,
-               const struct lttng_domain *domain,
-               char *channel_name, struct lttng_event *event,
-               char *filter_expression,
-               struct lttng_bytecode *filter,
-               struct lttng_event_exclusion *exclusion,
-               int wpipe, bool internal_event)
-{
-       int ret = 0, channel_created = 0;
-       struct lttng_channel *attr = NULL;
-
-       LTTNG_ASSERT(session);
-       LTTNG_ASSERT(event);
-       LTTNG_ASSERT(channel_name);
-
-       /* If we have a filter, we must have its filter expression */
-       LTTNG_ASSERT(!(!!filter_expression ^ !!filter));
-
-       /* Normalize event name as a globbing pattern */
-       strutils_normalize_star_glob_pattern(event->name);
-
-       /* Normalize exclusion names as globbing patterns */
-       if (exclusion) {
-               size_t i;
-
-               for (i = 0; i < exclusion->count; i++) {
-                       char *name = LTTNG_EVENT_EXCLUSION_NAME_AT(exclusion, i);
-
-                       strutils_normalize_star_glob_pattern(name);
-               }
-       }
-
-       DBG("Enable event command for event \'%s\'", event->name);
-
-       rcu_read_lock();
-
-       switch (domain->type) {
-       case LTTNG_DOMAIN_KERNEL:
-       {
-               struct ltt_kernel_channel *kchan;
-
-               /*
-                * If a non-default channel has been created in the
-                * session, explicitely require that -c chan_name needs
-                * to be provided.
-                */
-               if (session->kernel_session->has_non_default_channel
-                               && channel_name[0] == '\0') {
-                       ret = LTTNG_ERR_NEED_CHANNEL_NAME;
-                       goto error;
-               }
-
-               kchan = trace_kernel_get_channel_by_name(channel_name,
-                               session->kernel_session);
-               if (kchan == NULL) {
-                       attr = channel_new_default_attr(LTTNG_DOMAIN_KERNEL,
-                                       LTTNG_BUFFER_GLOBAL);
-                       if (attr == NULL) {
-                               ret = LTTNG_ERR_FATAL;
-                               goto error;
-                       }
-                       if (lttng_strncpy(attr->name, channel_name,
-                                       sizeof(attr->name))) {
-                               ret = LTTNG_ERR_INVALID;
-                               goto error;
-                       }
-
-                       ret = cmd_enable_channel(session, domain, attr, wpipe);
-                       if (ret != LTTNG_OK) {
-                               goto error;
-                       }
-                       channel_created = 1;
-               }
-
-               /* Get the newly created kernel channel pointer */
-               kchan = trace_kernel_get_channel_by_name(channel_name,
-                               session->kernel_session);
-               if (kchan == NULL) {
-                       /* This sould not happen... */
-                       ret = LTTNG_ERR_FATAL;
-                       goto error;
-               }
-
-               switch (event->type) {
-               case LTTNG_EVENT_ALL:
-               {
-                       char *filter_expression_a = NULL;
-                       struct lttng_bytecode *filter_a = NULL;
-
-                       /*
-                        * We need to duplicate filter_expression and filter,
-                        * because ownership is passed to first enable
-                        * event.
-                        */
-                       if (filter_expression) {
-                               filter_expression_a = strdup(filter_expression);
-                               if (!filter_expression_a) {
-                                       ret = LTTNG_ERR_FATAL;
-                                       goto error;
-                               }
-                       }
-                       if (filter) {
-                               filter_a = zmalloc(sizeof(*filter_a) + filter->len);
-                               if (!filter_a) {
-                                       free(filter_expression_a);
-                                       ret = LTTNG_ERR_FATAL;
-                                       goto error;
-                               }
-                               memcpy(filter_a, filter, sizeof(*filter_a) + filter->len);
-                       }
-                       event->type = LTTNG_EVENT_TRACEPOINT;   /* Hack */
-                       ret = event_kernel_enable_event(kchan, event,
-                               filter_expression, filter);
-                       /* We have passed ownership */
-                       filter_expression = NULL;
-                       filter = NULL;
-                       if (ret != LTTNG_OK) {
-                               if (channel_created) {
-                                       /* Let's not leak a useless channel. */
-                                       kernel_destroy_channel(kchan);
-                               }
-                               free(filter_expression_a);
-                               free(filter_a);
-                               goto error;
-                       }
-                       event->type = LTTNG_EVENT_SYSCALL;      /* Hack */
-                       ret = event_kernel_enable_event(kchan, event,
-                               filter_expression_a, filter_a);
-                       /* We have passed ownership */
-                       filter_expression_a = NULL;
-                       filter_a = NULL;
-                       if (ret != LTTNG_OK) {
-                               goto error;
-                       }
-                       break;
-               }
-               case LTTNG_EVENT_PROBE:
-               case LTTNG_EVENT_USERSPACE_PROBE:
-               case LTTNG_EVENT_FUNCTION:
-               case LTTNG_EVENT_FUNCTION_ENTRY:
-               case LTTNG_EVENT_TRACEPOINT:
-                       ret = event_kernel_enable_event(kchan, event,
-                               filter_expression, filter);
-                       /* We have passed ownership */
-                       filter_expression = NULL;
-                       filter = NULL;
-                       if (ret != LTTNG_OK) {
-                               if (channel_created) {
-                                       /* Let's not leak a useless channel. */
-                                       kernel_destroy_channel(kchan);
-                               }
-                               goto error;
-                       }
-                       break;
-               case LTTNG_EVENT_SYSCALL:
-                       ret = event_kernel_enable_event(kchan, event,
-                               filter_expression, filter);
-                       /* We have passed ownership */
-                       filter_expression = NULL;
-                       filter = NULL;
-                       if (ret != LTTNG_OK) {
-                               goto error;
-                       }
-                       break;
-               default:
-                       ret = LTTNG_ERR_UNK;
-                       goto error;
-               }
-
-               kernel_wait_quiescent();
-               break;
-       }
-       case LTTNG_DOMAIN_UST:
-       {
-               struct ltt_ust_channel *uchan;
-               struct ltt_ust_session *usess = session->ust_session;
-
-               LTTNG_ASSERT(usess);
-
-               /*
-                * If a non-default channel has been created in the
-                * session, explicitely require that -c chan_name needs
-                * to be provided.
-                */
-               if (usess->has_non_default_channel && channel_name[0] == '\0') {
-                       ret = LTTNG_ERR_NEED_CHANNEL_NAME;
-                       goto error;
-               }
-
-               /* Get channel from global UST domain */
-               uchan = trace_ust_find_channel_by_name(usess->domain_global.channels,
-                               channel_name);
-               if (uchan == NULL) {
-                       /* Create default channel */
-                       attr = channel_new_default_attr(LTTNG_DOMAIN_UST,
-                                       usess->buffer_type);
-                       if (attr == NULL) {
-                               ret = LTTNG_ERR_FATAL;
-                               goto error;
-                       }
-                       if (lttng_strncpy(attr->name, channel_name,
-                                       sizeof(attr->name))) {
-                               ret = LTTNG_ERR_INVALID;
-                               goto error;
-                       }
-
-                       ret = cmd_enable_channel(session, domain, attr, wpipe);
-                       if (ret != LTTNG_OK) {
-                               goto error;
-                       }
-
-                       /* Get the newly created channel reference back */
-                       uchan = trace_ust_find_channel_by_name(
-                                       usess->domain_global.channels, channel_name);
-                       LTTNG_ASSERT(uchan);
-               }
-
-               if (uchan->domain != LTTNG_DOMAIN_UST && !internal_event) {
-                       /*
-                        * Don't allow users to add UST events to channels which
-                        * are assigned to a userspace subdomain (JUL, Log4J,
-                        * Python, etc.).
-                        */
-                       ret = LTTNG_ERR_INVALID_CHANNEL_DOMAIN;
-                       goto error;
-               }
-
-               if (!internal_event) {
-                       /*
-                        * Ensure the event name is not reserved for internal
-                        * use.
-                        */
-                       ret = validate_ust_event_name(event->name);
-                       if (ret) {
-                               WARN("Userspace event name %s failed validation.",
-                                               event->name);
-                               ret = LTTNG_ERR_INVALID_EVENT_NAME;
-                               goto error;
-                       }
-               }
-
-               /* At this point, the session and channel exist on the tracer */
-               ret = event_ust_enable_tracepoint(usess, uchan, event,
-                               filter_expression, filter, exclusion,
-                               internal_event);
-               /* We have passed ownership */
-               filter_expression = NULL;
-               filter = NULL;
-               exclusion = NULL;
-               if (ret == LTTNG_ERR_UST_EVENT_ENABLED) {
-                       goto already_enabled;
-               } else if (ret != LTTNG_OK) {
-                       goto error;
-               }
-               break;
-       }
-       case LTTNG_DOMAIN_LOG4J:
-       case LTTNG_DOMAIN_JUL:
-       case LTTNG_DOMAIN_PYTHON:
-       {
-               const char *default_event_name, *default_chan_name;
-               struct agent *agt;
-               struct lttng_event uevent;
-               struct lttng_domain tmp_dom;
-               struct ltt_ust_session *usess = session->ust_session;
-
-               LTTNG_ASSERT(usess);
-
-               if (!agent_tracing_is_enabled()) {
-                       DBG("Attempted to enable an event in an agent domain but the agent thread is not running");
-                       ret = LTTNG_ERR_AGENT_TRACING_DISABLED;
-                       goto error;
-               }
-
-               agt = trace_ust_find_agent(usess, domain->type);
-               if (!agt) {
-                       agt = agent_create(domain->type);
-                       if (!agt) {
-                               ret = LTTNG_ERR_NOMEM;
-                               goto error;
-                       }
-                       agent_add(agt, usess->agents);
-               }
-
-               /* Create the default tracepoint. */
-               memset(&uevent, 0, sizeof(uevent));
-               uevent.type = LTTNG_EVENT_TRACEPOINT;
-               uevent.loglevel_type = LTTNG_EVENT_LOGLEVEL_ALL;
-               default_event_name = event_get_default_agent_ust_name(
-                               domain->type);
-               if (!default_event_name) {
-                       ret = LTTNG_ERR_FATAL;
-                       goto error;
-               }
-               strncpy(uevent.name, default_event_name, sizeof(uevent.name));
-               uevent.name[sizeof(uevent.name) - 1] = '\0';
-
-               /*
-                * The domain type is changed because we are about to enable the
-                * default channel and event for the JUL domain that are hardcoded.
-                * This happens in the UST domain.
-                */
-               memcpy(&tmp_dom, domain, sizeof(tmp_dom));
-               tmp_dom.type = LTTNG_DOMAIN_UST;
-
-               switch (domain->type) {
-               case LTTNG_DOMAIN_LOG4J:
-                       default_chan_name = DEFAULT_LOG4J_CHANNEL_NAME;
-                       break;
-               case LTTNG_DOMAIN_JUL:
-                       default_chan_name = DEFAULT_JUL_CHANNEL_NAME;
-                       break;
-               case LTTNG_DOMAIN_PYTHON:
-                       default_chan_name = DEFAULT_PYTHON_CHANNEL_NAME;
-                       break;
-               default:
-                       /* The switch/case we are in makes this impossible */
-                       abort();
-               }
-
-               {
-                       char *filter_expression_copy = NULL;
-                       struct lttng_bytecode *filter_copy = NULL;
-
-                       if (filter) {
-                               const size_t filter_size = sizeof(
-                                               struct lttng_bytecode)
-                                               + filter->len;
-
-                               filter_copy = zmalloc(filter_size);
-                               if (!filter_copy) {
-                                       ret = LTTNG_ERR_NOMEM;
-                                       goto error;
-                               }
-                               memcpy(filter_copy, filter, filter_size);
-
-                               filter_expression_copy =
-                                               strdup(filter_expression);
-                               if (!filter_expression) {
-                                       ret = LTTNG_ERR_NOMEM;
-                               }
-
-                               if (!filter_expression_copy || !filter_copy) {
-                                       free(filter_expression_copy);
-                                       free(filter_copy);
-                                       goto error;
-                               }
-                       }
-
-                       ret = cmd_enable_event_internal(session, &tmp_dom,
-                                       (char *) default_chan_name,
-                                       &uevent, filter_expression_copy,
-                                       filter_copy, NULL, wpipe);
-               }
-
-               if (ret == LTTNG_ERR_UST_EVENT_ENABLED) {
-                       goto already_enabled;
-               } else if (ret != LTTNG_OK) {
-                       goto error;
-               }
-
-               /* The wild card * means that everything should be enabled. */
-               if (strncmp(event->name, "*", 1) == 0 && strlen(event->name) == 1) {
-                       ret = event_agent_enable_all(usess, agt, event, filter,
-                                       filter_expression);
-               } else {
-                       ret = event_agent_enable(usess, agt, event, filter,
-                                       filter_expression);
-               }
-               filter = NULL;
-               filter_expression = NULL;
-               if (ret != LTTNG_OK) {
-                       goto error;
-               }
-
-               break;
-       }
-       default:
-               ret = LTTNG_ERR_UND;
-               goto error;
-       }
-
-       ret = LTTNG_OK;
-
-already_enabled:
-error:
-       free(filter_expression);
-       free(filter);
-       free(exclusion);
-       channel_attr_destroy(attr);
-       rcu_read_unlock();
-       return ret;
-}
-
-/*
- * Command LTTNG_ENABLE_EVENT processed by the client thread.
- * We own filter, exclusion, and filter_expression.
- */
-int cmd_enable_event(struct ltt_session *session,
-               const struct lttng_domain *domain,
-               char *channel_name, struct lttng_event *event,
-               char *filter_expression,
-               struct lttng_bytecode *filter,
-               struct lttng_event_exclusion *exclusion,
-               int wpipe)
-{
-       return _cmd_enable_event(session, domain, channel_name, event,
-                       filter_expression, filter, exclusion, wpipe, false);
-}
-
-/*
- * Enable an event which is internal to LTTng. An internal should
- * never be made visible to clients and are immune to checks such as
- * reserved names.
- */
-static int cmd_enable_event_internal(struct ltt_session *session,
-               const struct lttng_domain *domain,
-               char *channel_name, struct lttng_event *event,
-               char *filter_expression,
-               struct lttng_bytecode *filter,
-               struct lttng_event_exclusion *exclusion,
-               int wpipe)
-{
-       return _cmd_enable_event(session, domain, channel_name, event,
-                       filter_expression, filter, exclusion, wpipe, true);
-}
-
-/*
- * Command LTTNG_LIST_TRACEPOINTS processed by the client thread.
- */
-ssize_t cmd_list_tracepoints(enum lttng_domain_type domain,
-               struct lttng_event **events)
-{
-       int ret;
-       ssize_t nb_events = 0;
-
-       switch (domain) {
-       case LTTNG_DOMAIN_KERNEL:
-               nb_events = kernel_list_events(events);
-               if (nb_events < 0) {
-                       ret = LTTNG_ERR_KERN_LIST_FAIL;
-                       goto error;
-               }
-               break;
-       case LTTNG_DOMAIN_UST:
-               nb_events = ust_app_list_events(events);
-               if (nb_events < 0) {
-                       ret = LTTNG_ERR_UST_LIST_FAIL;
-                       goto error;
-               }
-               break;
-       case LTTNG_DOMAIN_LOG4J:
-       case LTTNG_DOMAIN_JUL:
-       case LTTNG_DOMAIN_PYTHON:
-               nb_events = agent_list_events(events, domain);
-               if (nb_events < 0) {
-                       ret = LTTNG_ERR_UST_LIST_FAIL;
-                       goto error;
-               }
-               break;
-       default:
-               ret = LTTNG_ERR_UND;
-               goto error;
-       }
-
-       return nb_events;
-
-error:
-       /* Return negative value to differentiate return code */
-       return -ret;
-}
-
-/*
- * Command LTTNG_LIST_TRACEPOINT_FIELDS processed by the client thread.
- */
-ssize_t cmd_list_tracepoint_fields(enum lttng_domain_type domain,
-               struct lttng_event_field **fields)
-{
-       int ret;
-       ssize_t nb_fields = 0;
-
-       switch (domain) {
-       case LTTNG_DOMAIN_UST:
-               nb_fields = ust_app_list_event_fields(fields);
-               if (nb_fields < 0) {
-                       ret = LTTNG_ERR_UST_LIST_FAIL;
-                       goto error;
-               }
-               break;
-       case LTTNG_DOMAIN_KERNEL:
-       default:        /* fall-through */
-               ret = LTTNG_ERR_UND;
-               goto error;
-       }
-
-       return nb_fields;
-
-error:
-       /* Return negative value to differentiate return code */
-       return -ret;
-}
-
-ssize_t cmd_list_syscalls(struct lttng_event **events)
-{
-       return syscall_table_list(events);
-}
-
-/*
- * Command LTTNG_START_TRACE processed by the client thread.
- *
- * Called with session mutex held.
- */
-int cmd_start_trace(struct ltt_session *session)
-{
-       enum lttng_error_code ret;
-       unsigned long nb_chan = 0;
-       struct ltt_kernel_session *ksession;
-       struct ltt_ust_session *usess;
-       const bool session_rotated_after_last_stop =
-                       session->rotated_after_last_stop;
-       const bool session_cleared_after_last_stop =
-                       session->cleared_after_last_stop;
-
-       LTTNG_ASSERT(session);
-
-       /* Ease our life a bit ;) */
-       ksession = session->kernel_session;
-       usess = session->ust_session;
-
-       /* Is the session already started? */
-       if (session->active) {
-               ret = LTTNG_ERR_TRACE_ALREADY_STARTED;
-               /* Perform nothing */
-               goto end;
-       }
-
-       if (session->rotation_state == LTTNG_ROTATION_STATE_ONGOING &&
-                       !session->current_trace_chunk) {
-               /*
-                * A rotation was launched while the session was stopped and
-                * it has not been completed yet. It is not possible to start
-                * the session since starting the session here would require a
-                * rotation from "NULL" to a new trace chunk. That rotation
-                * would overlap with the ongoing rotation, which is not
-                * supported.
-                */
-               WARN("Refusing to start session \"%s\" as a rotation launched after the last \"stop\" is still ongoing",
-                               session->name);
-               ret = LTTNG_ERR_ROTATION_PENDING;
-               goto error;
-       }
-
-       /*
-        * Starting a session without channel is useless since after that it's not
-        * possible to enable channel thus inform the client.
-        */
-       if (usess && usess->domain_global.channels) {
-               nb_chan += lttng_ht_get_count(usess->domain_global.channels);
-       }
-       if (ksession) {
-               nb_chan += ksession->channel_count;
-       }
-       if (!nb_chan) {
-               ret = LTTNG_ERR_NO_CHANNEL;
-               goto error;
-       }
-
-       session->active = 1;
-       session->rotated_after_last_stop = false;
-       session->cleared_after_last_stop = false;
-       if (session->output_traces && !session->current_trace_chunk) {
-               if (!session->has_been_started) {
-                       struct lttng_trace_chunk *trace_chunk;
-
-                       DBG("Creating initial trace chunk of session \"%s\"",
-                                       session->name);
-                       trace_chunk = session_create_new_trace_chunk(
-                                       session, NULL, NULL, NULL);
-                       if (!trace_chunk) {
-                               ret = LTTNG_ERR_CREATE_DIR_FAIL;
-                               goto error;
-                       }
-                       LTTNG_ASSERT(!session->current_trace_chunk);
-                       ret = session_set_trace_chunk(session, trace_chunk,
-                                       NULL);
-                       lttng_trace_chunk_put(trace_chunk);
-                       if (ret) {
-                               ret = LTTNG_ERR_CREATE_TRACE_CHUNK_FAIL_CONSUMER;
-                               goto error;
-                       }
-               } else {
-                       DBG("Rotating session \"%s\" from its current \"NULL\" trace chunk to a new chunk",
-                                       session->name);
-                       /*
-                        * Rotate existing streams into the new chunk.
-                        * This is a "quiet" rotation has no client has
-                        * explicitly requested this operation.
-                        *
-                        * There is also no need to wait for the rotation
-                        * to complete as it will happen immediately. No data
-                        * was produced as the session was stopped, so the
-                        * rotation should happen on reception of the command.
-                        */
-                       ret = cmd_rotate_session(session, NULL, true,
-                                       LTTNG_TRACE_CHUNK_COMMAND_TYPE_NO_OPERATION);
-                       if (ret != LTTNG_OK) {
-                               goto error;
-                       }
-               }
-       }
-
-       /* Kernel tracing */
-       if (ksession != NULL) {
-               DBG("Start kernel tracing session %s", session->name);
-               ret = start_kernel_session(ksession);
-               if (ret != LTTNG_OK) {
-                       goto error;
-               }
-       }
-
-       /* Flag session that trace should start automatically */
-       if (usess) {
-               int int_ret = ust_app_start_trace_all(usess);
-
-               if (int_ret < 0) {
-                       ret = LTTNG_ERR_UST_START_FAIL;
-                       goto error;
-               }
-       }
-
-       /*
-        * Open a packet in every stream of the session to ensure that viewers
-        * can correctly identify the boundaries of the periods during which
-        * tracing was active for this session.
-        */
-       ret = session_open_packets(session);
-       if (ret != LTTNG_OK) {
-               goto error;
-       }
-
-       /*
-        * Clear the flag that indicates that a rotation was done while the
-        * session was stopped.
-        */
-       session->rotated_after_last_stop = false;
-
-       if (session->rotate_timer_period) {
-               int int_ret = timer_session_rotation_schedule_timer_start(
-                               session, session->rotate_timer_period);
-
-               if (int_ret < 0) {
-                       ERR("Failed to enable rotate timer");
-                       ret = LTTNG_ERR_UNK;
-                       goto error;
-               }
-       }
-
-       ret = LTTNG_OK;
-
-error:
-       if (ret == LTTNG_OK) {
-               /* Flag this after a successful start. */
-               session->has_been_started |= 1;
-       } else {
-               session->active = 0;
-               /* Restore initial state on error. */
-               session->rotated_after_last_stop =
-                               session_rotated_after_last_stop;
-               session->cleared_after_last_stop =
-                               session_cleared_after_last_stop;
-       }
-end:
-       return ret;
-}
-
-/*
- * Command LTTNG_STOP_TRACE processed by the client thread.
- */
-int cmd_stop_trace(struct ltt_session *session)
-{
-       int ret;
-       struct ltt_kernel_session *ksession;
-       struct ltt_ust_session *usess;
-
-       LTTNG_ASSERT(session);
-
-       DBG("Begin stop session \"%s\" (id %" PRIu64 ")", session->name, session->id);
-       /* Short cut */
-       ksession = session->kernel_session;
-       usess = session->ust_session;
-
-       /* Session is not active. Skip everythong and inform the client. */
-       if (!session->active) {
-               ret = LTTNG_ERR_TRACE_ALREADY_STOPPED;
-               goto error;
-       }
-
-       ret = stop_kernel_session(ksession);
-       if (ret != LTTNG_OK) {
-               goto error;
-       }
-
-       if (usess && usess->active) {
-               ret = ust_app_stop_trace_all(usess);
-               if (ret < 0) {
-                       ret = LTTNG_ERR_UST_STOP_FAIL;
-                       goto error;
-               }
-       }
-
-       DBG("Completed stop session \"%s\" (id %" PRIu64 ")", session->name,
-                       session->id);
-       /* Flag inactive after a successful stop. */
-       session->active = 0;
-       ret = LTTNG_OK;
-
-error:
-       return ret;
-}
-
-/*
- * Set the base_path of the session only if subdir of a control uris is set.
- * Return LTTNG_OK on success, otherwise LTTNG_ERR_*.
- */
-static int set_session_base_path_from_uris(struct ltt_session *session,
-               size_t nb_uri,
-               struct lttng_uri *uris)
-{
-       int ret;
-       size_t i;
-
-       for (i = 0; i < nb_uri; i++) {
-               if (uris[i].stype != LTTNG_STREAM_CONTROL ||
-                               uris[i].subdir[0] == '\0') {
-                       /* Not interested in these URIs */
-                       continue;
-               }
-
-               if (session->base_path != NULL) {
-                       free(session->base_path);
-                       session->base_path = NULL;
-               }
-
-               /* Set session base_path */
-               session->base_path = strdup(uris[i].subdir);
-               if (!session->base_path) {
-                       PERROR("Failed to copy base path \"%s\" to session \"%s\"",
-                                       uris[i].subdir, session->name);
-                       ret = LTTNG_ERR_NOMEM;
-                       goto error;
-               }
-               DBG2("Setting base path \"%s\" for session \"%s\"",
-                               session->base_path, session->name);
-       }
-       ret = LTTNG_OK;
-error:
-       return ret;
-}
-
-/*
- * Command LTTNG_SET_CONSUMER_URI processed by the client thread.
- */
-int cmd_set_consumer_uri(struct ltt_session *session, size_t nb_uri,
-               struct lttng_uri *uris)
-{
-       int ret, i;
-       struct ltt_kernel_session *ksess = session->kernel_session;
-       struct ltt_ust_session *usess = session->ust_session;
-
-       LTTNG_ASSERT(session);
-       LTTNG_ASSERT(uris);
-       LTTNG_ASSERT(nb_uri > 0);
-
-       /* Can't set consumer URI if the session is active. */
-       if (session->active) {
-               ret = LTTNG_ERR_TRACE_ALREADY_STARTED;
-               goto error;
-       }
-
-       /*
-        * Set the session base path if any. This is done inside
-        * cmd_set_consumer_uri to preserve backward compatibility of the
-        * previous session creation api vs the session descriptor api.
-        */
-       ret = set_session_base_path_from_uris(session, nb_uri, uris);
-       if (ret != LTTNG_OK) {
-               goto error;
-       }
-
-       /* Set the "global" consumer URIs */
-       for (i = 0; i < nb_uri; i++) {
-               ret = add_uri_to_consumer(session, session->consumer, &uris[i],
-                               LTTNG_DOMAIN_NONE);
-               if (ret != LTTNG_OK) {
-                       goto error;
-               }
-       }
-
-       /* Set UST session URIs */
-       if (session->ust_session) {
-               for (i = 0; i < nb_uri; i++) {
-                       ret = add_uri_to_consumer(session,
-                                       session->ust_session->consumer,
-                                       &uris[i], LTTNG_DOMAIN_UST);
-                       if (ret != LTTNG_OK) {
-                               goto error;
-                       }
-               }
-       }
-
-       /* Set kernel session URIs */
-       if (session->kernel_session) {
-               for (i = 0; i < nb_uri; i++) {
-                       ret = add_uri_to_consumer(session,
-                                       session->kernel_session->consumer,
-                                       &uris[i], LTTNG_DOMAIN_KERNEL);
-                       if (ret != LTTNG_OK) {
-                               goto error;
-                       }
-               }
-       }
-
-       /*
-        * Make sure to set the session in output mode after we set URI since a
-        * session can be created without URL (thus flagged in no output mode).
-        */
-       session->output_traces = 1;
-       if (ksess) {
-               ksess->output_traces = 1;
-       }
-
-       if (usess) {
-               usess->output_traces = 1;
-       }
-
-       /* All good! */
-       ret = LTTNG_OK;
-
-error:
-       return ret;
-}
-
-static
-enum lttng_error_code set_session_output_from_descriptor(
-               struct ltt_session *session,
-               const struct lttng_session_descriptor *descriptor)
-{
-       int ret;
-       enum lttng_error_code ret_code = LTTNG_OK;
-       enum lttng_session_descriptor_type session_type =
-                       lttng_session_descriptor_get_type(descriptor);
-       enum lttng_session_descriptor_output_type output_type =
-                       lttng_session_descriptor_get_output_type(descriptor);
-       struct lttng_uri uris[2] = {};
-       size_t uri_count = 0;
-
-       switch (output_type) {
-       case LTTNG_SESSION_DESCRIPTOR_OUTPUT_TYPE_NONE:
-               goto end;
-       case LTTNG_SESSION_DESCRIPTOR_OUTPUT_TYPE_LOCAL:
-               lttng_session_descriptor_get_local_output_uri(descriptor,
-                               &uris[0]);
-               uri_count = 1;
-               break;
-       case LTTNG_SESSION_DESCRIPTOR_OUTPUT_TYPE_NETWORK:
-               lttng_session_descriptor_get_network_output_uris(descriptor,
-                               &uris[0], &uris[1]);
-               uri_count = 2;
-               break;
-       default:
-               ret_code = LTTNG_ERR_INVALID;
-               goto end;
-       }
-
-       switch (session_type) {
-       case LTTNG_SESSION_DESCRIPTOR_TYPE_SNAPSHOT:
-       {
-               struct snapshot_output *new_output = NULL;
-
-               new_output = snapshot_output_alloc();
-               if (!new_output) {
-                       ret_code = LTTNG_ERR_NOMEM;
-                       goto end;
-               }
-
-               ret = snapshot_output_init_with_uri(session,
-                               DEFAULT_SNAPSHOT_MAX_SIZE,
-                               NULL, uris, uri_count, session->consumer,
-                               new_output, &session->snapshot);
-               if (ret < 0) {
-                       ret_code = (ret == -ENOMEM) ?
-                                       LTTNG_ERR_NOMEM : LTTNG_ERR_INVALID;
-                       snapshot_output_destroy(new_output);
-                       goto end;
-               }
-               snapshot_add_output(&session->snapshot, new_output);
-               break;
-       }
-       case LTTNG_SESSION_DESCRIPTOR_TYPE_REGULAR:
-       case LTTNG_SESSION_DESCRIPTOR_TYPE_LIVE:
-       {
-               ret_code = cmd_set_consumer_uri(session, uri_count, uris);
-               break;
-       }
-       default:
-               ret_code = LTTNG_ERR_INVALID;
-               goto end;
-       }
-end:
-       return ret_code;
-}
-
-static
-enum lttng_error_code cmd_create_session_from_descriptor(
-               struct lttng_session_descriptor *descriptor,
-               const lttng_sock_cred *creds,
-               const char *home_path)
-{
-       int ret;
-       enum lttng_error_code ret_code;
-       const char *session_name;
-       struct ltt_session *new_session = NULL;
-       enum lttng_session_descriptor_status descriptor_status;
-
-       session_lock_list();
-       if (home_path) {
-               if (*home_path != '/') {
-                       ERR("Home path provided by client is not absolute");
-                       ret_code = LTTNG_ERR_INVALID;
-                       goto end;
-               }
-       }
-
-       descriptor_status = lttng_session_descriptor_get_session_name(
-                       descriptor, &session_name);
-       switch (descriptor_status) {
-       case LTTNG_SESSION_DESCRIPTOR_STATUS_OK:
-               break;
-       case LTTNG_SESSION_DESCRIPTOR_STATUS_UNSET:
-               session_name = NULL;
-               break;
-       default:
-               ret_code = LTTNG_ERR_INVALID;
-               goto end;
-       }
-
-       ret_code = session_create(session_name, creds->uid, creds->gid,
-                       &new_session);
-       if (ret_code != LTTNG_OK) {
-               goto end;
-       }
-
-       if (!session_name) {
-               ret = lttng_session_descriptor_set_session_name(descriptor,
-                               new_session->name);
-               if (ret) {
-                       ret_code = LTTNG_ERR_SESSION_FAIL;
-                       goto end;
-               }
-       }
-
-       if (!lttng_session_descriptor_is_output_destination_initialized(
-                       descriptor)) {
-               /*
-                * Only include the session's creation time in the output
-                * destination if the name of the session itself was
-                * not auto-generated.
-                */
-               ret_code = lttng_session_descriptor_set_default_output(
-                               descriptor,
-                               session_name ? &new_session->creation_time : NULL,
-                               home_path);
-               if (ret_code != LTTNG_OK) {
-                       goto end;
-               }
-       } else {
-               new_session->has_user_specified_directory =
-                               lttng_session_descriptor_has_output_directory(
-                                       descriptor);
-       }
-
-       switch (lttng_session_descriptor_get_type(descriptor)) {
-       case LTTNG_SESSION_DESCRIPTOR_TYPE_SNAPSHOT:
-               new_session->snapshot_mode = 1;
-               break;
-       case LTTNG_SESSION_DESCRIPTOR_TYPE_LIVE:
-               new_session->live_timer =
-                               lttng_session_descriptor_live_get_timer_interval(
-                                       descriptor);
-               break;
-       default:
-               break;
-       }
-
-       ret_code = set_session_output_from_descriptor(new_session, descriptor);
-       if (ret_code != LTTNG_OK) {
-               goto end;
-       }
-       new_session->consumer->enabled = 1;
-       ret_code = LTTNG_OK;
-end:
-       /* Release reference provided by the session_create function. */
-       session_put(new_session);
-       if (ret_code != LTTNG_OK && new_session) {
-               /* Release the global reference on error. */
-               session_destroy(new_session);
-       }
-       session_unlock_list();
-       return ret_code;
-}
-
-enum lttng_error_code cmd_create_session(struct command_ctx *cmd_ctx, int sock,
-               struct lttng_session_descriptor **return_descriptor)
-{
-       int ret;
-       size_t payload_size;
-       struct lttng_dynamic_buffer payload;
-       struct lttng_buffer_view home_dir_view;
-       struct lttng_buffer_view session_descriptor_view;
-       struct lttng_session_descriptor *session_descriptor = NULL;
-       enum lttng_error_code ret_code;
-
-       lttng_dynamic_buffer_init(&payload);
-       if (cmd_ctx->lsm.u.create_session.home_dir_size >=
-                       LTTNG_PATH_MAX) {
-               ret_code = LTTNG_ERR_INVALID;
-               goto error;
-       }
-       if (cmd_ctx->lsm.u.create_session.session_descriptor_size >
-                       LTTNG_SESSION_DESCRIPTOR_MAX_LEN) {
-               ret_code = LTTNG_ERR_INVALID;
-               goto error;
-       }
-
-       payload_size = cmd_ctx->lsm.u.create_session.home_dir_size +
-                       cmd_ctx->lsm.u.create_session.session_descriptor_size;
-       ret = lttng_dynamic_buffer_set_size(&payload, payload_size);
-       if (ret) {
-               ret_code = LTTNG_ERR_NOMEM;
-               goto error;
-       }
-
-       ret = lttcomm_recv_unix_sock(sock, payload.data, payload.size);
-       if (ret <= 0) {
-               ERR("Reception of session descriptor failed, aborting.");
-               ret_code = LTTNG_ERR_SESSION_FAIL;
-               goto error;
-       }
-
-       home_dir_view = lttng_buffer_view_from_dynamic_buffer(
-                       &payload,
-                       0,
-                       cmd_ctx->lsm.u.create_session.home_dir_size);
-       if (cmd_ctx->lsm.u.create_session.home_dir_size > 0 &&
-                       !lttng_buffer_view_is_valid(&home_dir_view)) {
-               ERR("Invalid payload in \"create session\" command: buffer too short to contain home directory");
-               ret_code = LTTNG_ERR_INVALID_PROTOCOL;
-               goto error;
-       }
-
-       session_descriptor_view = lttng_buffer_view_from_dynamic_buffer(
-                       &payload,
-                       cmd_ctx->lsm.u.create_session.home_dir_size,
-                       cmd_ctx->lsm.u.create_session.session_descriptor_size);
-       if (!lttng_buffer_view_is_valid(&session_descriptor_view)) {
-               ERR("Invalid payload in \"create session\" command: buffer too short to contain session descriptor");
-               ret_code = LTTNG_ERR_INVALID_PROTOCOL;
-               goto error;
-       }
-
-       ret = lttng_session_descriptor_create_from_buffer(
-                       &session_descriptor_view, &session_descriptor);
-       if (ret < 0) {
-               ERR("Failed to create session descriptor from payload of \"create session\" command");
-               ret_code = LTTNG_ERR_INVALID;
-               goto error;
-       }
-
-       /*
-        * Sets the descriptor's auto-generated properties (name, output) if
-        * needed.
-        */
-       ret_code = cmd_create_session_from_descriptor(session_descriptor,
-                       &cmd_ctx->creds,
-                       home_dir_view.size ? home_dir_view.data : NULL);
-       if (ret_code != LTTNG_OK) {
-               goto error;
-       }
-
-       ret_code = LTTNG_OK;
-       *return_descriptor = session_descriptor;
-       session_descriptor = NULL;
-error:
-       lttng_dynamic_buffer_reset(&payload);
-       lttng_session_descriptor_destroy(session_descriptor);
-       return ret_code;
-}
-
-static
-void cmd_destroy_session_reply(const struct ltt_session *session,
-               void *_reply_context)
-{
-       int ret;
-       ssize_t comm_ret;
-       const struct cmd_destroy_session_reply_context *reply_context =
-                       _reply_context;
-       struct lttng_dynamic_buffer payload;
-       struct lttcomm_session_destroy_command_header cmd_header;
-       struct lttng_trace_archive_location *location = NULL;
-       struct lttcomm_lttng_msg llm = {
-               .cmd_type = LTTNG_DESTROY_SESSION,
-               .ret_code = reply_context->destruction_status,
-               .pid = UINT32_MAX,
-               .cmd_header_size =
-                       sizeof(struct lttcomm_session_destroy_command_header),
-               .data_size = 0,
-       };
-       size_t payload_size_before_location;
-
-       lttng_dynamic_buffer_init(&payload);
-
-       ret = lttng_dynamic_buffer_append(&payload, &llm, sizeof(llm));
-       if (ret) {
-               ERR("Failed to append session destruction message");
-               goto error;
-       }
-
-       cmd_header.rotation_state =
-                       (int32_t) (reply_context->implicit_rotation_on_destroy ?
-                               session->rotation_state :
-                               LTTNG_ROTATION_STATE_NO_ROTATION);
-       ret = lttng_dynamic_buffer_append(&payload, &cmd_header,
-                       sizeof(cmd_header));
-       if (ret) {
-               ERR("Failed to append session destruction command header");
-               goto error;
-       }
-
-       if (!reply_context->implicit_rotation_on_destroy) {
-               DBG("No implicit rotation performed during the destruction of session \"%s\", sending reply",
-                               session->name);
-               goto send_reply;
-       }
-       if (session->rotation_state != LTTNG_ROTATION_STATE_COMPLETED) {
-               DBG("Rotation state of session \"%s\" is not \"completed\", sending session destruction reply",
-                               session->name);
-               goto send_reply;
-       }
-
-       location = session_get_trace_archive_location(session);
-       if (!location) {
-               ERR("Failed to get the location of the trace archive produced during the destruction of session \"%s\"",
-                               session->name);
-               goto error;
-       }
-
-       payload_size_before_location = payload.size;
-       comm_ret = lttng_trace_archive_location_serialize(location,
-                       &payload);
-       lttng_trace_archive_location_put(location);
-       if (comm_ret < 0) {
-               ERR("Failed to serialize the location of the trace archive produced during the destruction of session \"%s\"",
-                               session->name);
-               goto error;
-       }
-       /* Update the message to indicate the location's length. */
-       ((struct lttcomm_lttng_msg *) payload.data)->data_size =
-                       payload.size - payload_size_before_location;
-send_reply:
-       comm_ret = lttcomm_send_unix_sock(reply_context->reply_sock_fd,
-                       payload.data, payload.size);
-       if (comm_ret != (ssize_t) payload.size) {
-               ERR("Failed to send result of the destruction of session \"%s\" to client",
-                               session->name);
-       }
-error:
-       ret = close(reply_context->reply_sock_fd);
-       if (ret) {
-               PERROR("Failed to close client socket in deferred session destroy reply");
-       }
-       lttng_dynamic_buffer_reset(&payload);
-       free(_reply_context);
-}
-
-/*
- * Command LTTNG_DESTROY_SESSION processed by the client thread.
- *
- * Called with session lock held.
- */
-int cmd_destroy_session(struct ltt_session *session,
-               struct notification_thread_handle *notification_thread_handle,
-               int *sock_fd)
-{
-       int ret;
-       enum lttng_error_code destruction_last_error = LTTNG_OK;
-       struct cmd_destroy_session_reply_context *reply_context = NULL;
-
-       if (sock_fd) {
-               reply_context = zmalloc(sizeof(*reply_context));
-               if (!reply_context) {
-                       ret = LTTNG_ERR_NOMEM;
-                       goto end;
-               }
-               reply_context->reply_sock_fd = *sock_fd;
-       }
-
-       /* Safety net */
-       LTTNG_ASSERT(session);
-
-       DBG("Begin destroy session %s (id %" PRIu64 ")", session->name,
-                       session->id);
-       if (session->active) {
-               DBG("Session \"%s\" is active, attempting to stop it before destroying it",
-                               session->name);
-               ret = cmd_stop_trace(session);
-               if (ret != LTTNG_OK && ret != LTTNG_ERR_TRACE_ALREADY_STOPPED) {
-                       /* Carry on with the destruction of the session. */
-                       ERR("Failed to stop session \"%s\" as part of its destruction: %s",
-                                       session->name, lttng_strerror(-ret));
-                       destruction_last_error = ret;
-               }
-       }
-
-       if (session->rotation_schedule_timer_enabled) {
-               if (timer_session_rotation_schedule_timer_stop(
-                               session)) {
-                       ERR("Failed to stop the \"rotation schedule\" timer of session %s",
-                                       session->name);
-                       destruction_last_error = LTTNG_ERR_TIMER_STOP_ERROR;
-               }
-       }
-
-       if (session->rotate_size) {
-               unsubscribe_session_consumed_size_rotation(session, notification_thread_handle);
-               session->rotate_size = 0;
-       }
-
-       if (session->rotated && session->current_trace_chunk && session->output_traces) {
-               /*
-                * Perform a last rotation on destruction if rotations have
-                * occurred during the session's lifetime.
-                */
-               ret = cmd_rotate_session(session, NULL, false,
-                       LTTNG_TRACE_CHUNK_COMMAND_TYPE_MOVE_TO_COMPLETED);
-               if (ret != LTTNG_OK) {
-                       ERR("Failed to perform an implicit rotation as part of the destruction of session \"%s\": %s",
-                                       session->name, lttng_strerror(-ret));
-                       destruction_last_error = -ret;
-               }
-               if (reply_context) {
-                       reply_context->implicit_rotation_on_destroy = true;
-               }
-       } else if (session->has_been_started && session->current_trace_chunk) {
-               /*
-                * The user has not triggered a session rotation. However, to
-                * ensure all data has been consumed, the session is rotated
-                * to a 'null' trace chunk before it is destroyed.
-                *
-                * This is a "quiet" rotation meaning that no notification is
-                * emitted and no renaming of the current trace chunk takes
-                * place.
-                */
-               ret = cmd_rotate_session(session, NULL, true,
-                       LTTNG_TRACE_CHUNK_COMMAND_TYPE_NO_OPERATION);
-               /*
-                * Rotation operations may not be supported by the kernel
-                * tracer. Hence, do not consider this implicit rotation as
-                * a session destruction error. The library has already stopped
-                * the session and waited for pending data; there is nothing
-                * left to do but complete the destruction of the session.
-                */
-               if (ret != LTTNG_OK &&
-                               ret != -LTTNG_ERR_ROTATION_NOT_AVAILABLE_KERNEL) {
-                       ERR("Failed to perform a quiet rotation as part of the destruction of session \"%s\": %s",
-                           session->name, lttng_strerror(ret));
-                       destruction_last_error = -ret;
-               }
-       }
-
-       if (session->shm_path[0]) {
-               /*
-                * When a session is created with an explicit shm_path,
-                * the consumer daemon will create its shared memory files
-                * at that location and will *not* unlink them. This is normal
-                * as the intention of that feature is to make it possible
-                * to retrieve the content of those files should a crash occur.
-                *
-                * To ensure the content of those files can be used, the
-                * sessiond daemon will replicate the content of the metadata
-                * cache in a metadata file.
-                *
-                * On clean-up, it is expected that the consumer daemon will
-                * unlink the shared memory files and that the session daemon
-                * will unlink the metadata file. Then, the session's directory
-                * in the shm path can be removed.
-                *
-                * Unfortunately, a flaw in the design of the sessiond's and
-                * consumerd's tear down of channels makes it impossible to
-                * determine when the sessiond _and_ the consumerd have both
-                * destroyed their representation of a channel. For one, the
-                * unlinking, close, and rmdir happen in deferred 'call_rcu'
-                * callbacks in both daemons.
-                *
-                * However, it is also impossible for the sessiond to know when
-                * the consumer daemon is done destroying its channel(s) since
-                * it occurs as a reaction to the closing of the channel's file
-                * descriptor. There is no resulting communication initiated
-                * from the consumerd to the sessiond to confirm that the
-                * operation is completed (and was successful).
-                *
-                * Until this is all fixed, the session daemon checks for the
-                * removal of the session's shm path which makes it possible
-                * to safely advertise a session as having been destroyed.
-                *
-                * Prior to this fix, it was not possible to reliably save
-                * a session making use of the --shm-path option, destroy it,
-                * and load it again. This is because the creation of the
-                * session would fail upon seeing the session's shm path
-                * already in existence.
-                *
-                * Note that none of the error paths in the check for the
-                * directory's existence return an error. This is normal
-                * as there isn't much that can be done. The session will
-                * be destroyed properly, except that we can't offer the
-                * guarantee that the same session can be re-created.
-                */
-               current_completion_handler = &destroy_completion_handler.handler;
-               ret = lttng_strncpy(destroy_completion_handler.shm_path,
-                               session->shm_path,
-                               sizeof(destroy_completion_handler.shm_path));
-               LTTNG_ASSERT(!ret);
-       }
-
-       /*
-        * The session is destroyed. However, note that the command context
-        * still holds a reference to the session, thus delaying its destruction
-        * _at least_ up to the point when that reference is released.
-        */
-       session_destroy(session);
-       if (reply_context) {
-               reply_context->destruction_status = destruction_last_error;
-               ret = session_add_destroy_notifier(session,
-                               cmd_destroy_session_reply,
-                               (void *) reply_context);
-               if (ret) {
-                       ret = LTTNG_ERR_FATAL;
-                       goto end;
-               } else {
-                       *sock_fd = -1;
-               }
-       }
-       ret = LTTNG_OK;
-end:
-       return ret;
-}
-
-/*
- * Command LTTNG_REGISTER_CONSUMER processed by the client thread.
- */
-int cmd_register_consumer(struct ltt_session *session,
-               enum lttng_domain_type domain, const char *sock_path,
-               struct consumer_data *cdata)
-{
-       int ret, sock;
-       struct consumer_socket *socket = NULL;
-
-       LTTNG_ASSERT(session);
-       LTTNG_ASSERT(cdata);
-       LTTNG_ASSERT(sock_path);
-
-       switch (domain) {
-       case LTTNG_DOMAIN_KERNEL:
-       {
-               struct ltt_kernel_session *ksess = session->kernel_session;
-
-               LTTNG_ASSERT(ksess);
-
-               /* Can't register a consumer if there is already one */
-               if (ksess->consumer_fds_sent != 0) {
-                       ret = LTTNG_ERR_KERN_CONSUMER_FAIL;
-                       goto error;
-               }
-
-               sock = lttcomm_connect_unix_sock(sock_path);
-               if (sock < 0) {
-                       ret = LTTNG_ERR_CONNECT_FAIL;
-                       goto error;
-               }
-               cdata->cmd_sock = sock;
-
-               socket = consumer_allocate_socket(&cdata->cmd_sock);
-               if (socket == NULL) {
-                       ret = close(sock);
-                       if (ret < 0) {
-                               PERROR("close register consumer");
-                       }
-                       cdata->cmd_sock = -1;
-                       ret = LTTNG_ERR_FATAL;
-                       goto error;
-               }
-
-               socket->lock = zmalloc(sizeof(pthread_mutex_t));
-               if (socket->lock == NULL) {
-                       PERROR("zmalloc pthread mutex");
-                       ret = LTTNG_ERR_FATAL;
-                       goto error;
-               }
-               pthread_mutex_init(socket->lock, NULL);
-               socket->registered = 1;
-
-               rcu_read_lock();
-               consumer_add_socket(socket, ksess->consumer);
-               rcu_read_unlock();
-
-               pthread_mutex_lock(&cdata->pid_mutex);
-               cdata->pid = -1;
-               pthread_mutex_unlock(&cdata->pid_mutex);
-
-               break;
-       }
-       default:
-               /* TODO: Userspace tracing */
-               ret = LTTNG_ERR_UND;
-               goto error;
-       }
-
-       return LTTNG_OK;
-
-error:
-       if (socket) {
-               consumer_destroy_socket(socket);
-       }
-       return ret;
-}
-
-/*
- * Command LTTNG_LIST_DOMAINS processed by the client thread.
- */
-ssize_t cmd_list_domains(struct ltt_session *session,
-               struct lttng_domain **domains)
-{
-       int ret, index = 0;
-       ssize_t nb_dom = 0;
-       struct agent *agt;
-       struct lttng_ht_iter iter;
-
-       if (session->kernel_session != NULL) {
-               DBG3("Listing domains found kernel domain");
-               nb_dom++;
-       }
-
-       if (session->ust_session != NULL) {
-               DBG3("Listing domains found UST global domain");
-               nb_dom++;
-
-               rcu_read_lock();
-               cds_lfht_for_each_entry(session->ust_session->agents->ht, &iter.iter,
-                               agt, node.node) {
-                       if (agt->being_used) {
-                               nb_dom++;
-                       }
-               }
-               rcu_read_unlock();
-       }
-
-       if (!nb_dom) {
-               goto end;
-       }
-
-       *domains = zmalloc(nb_dom * sizeof(struct lttng_domain));
-       if (*domains == NULL) {
-               ret = LTTNG_ERR_FATAL;
-               goto error;
-       }
-
-       if (session->kernel_session != NULL) {
-               (*domains)[index].type = LTTNG_DOMAIN_KERNEL;
-
-               /* Kernel session buffer type is always GLOBAL */
-               (*domains)[index].buf_type = LTTNG_BUFFER_GLOBAL;
-
-               index++;
-       }
-
-       if (session->ust_session != NULL) {
-               (*domains)[index].type = LTTNG_DOMAIN_UST;
-               (*domains)[index].buf_type = session->ust_session->buffer_type;
-               index++;
-
-               rcu_read_lock();
-               cds_lfht_for_each_entry(session->ust_session->agents->ht, &iter.iter,
-                               agt, node.node) {
-                       if (agt->being_used) {
-                               (*domains)[index].type = agt->domain;
-                               (*domains)[index].buf_type = session->ust_session->buffer_type;
-                               index++;
-                       }
-               }
-               rcu_read_unlock();
-       }
-end:
-       return nb_dom;
-
-error:
-       /* Return negative value to differentiate return code */
-       return -ret;
-}
-
-
-/*
- * Command LTTNG_LIST_CHANNELS processed by the client thread.
- */
-ssize_t cmd_list_channels(enum lttng_domain_type domain,
-               struct ltt_session *session, struct lttng_channel **channels)
-{
-       ssize_t nb_chan = 0, payload_size = 0, ret;
-
-       switch (domain) {
-       case LTTNG_DOMAIN_KERNEL:
-               if (session->kernel_session != NULL) {
-                       nb_chan = session->kernel_session->channel_count;
-               }
-               DBG3("Number of kernel channels %zd", nb_chan);
-               if (nb_chan <= 0) {
-                       ret = -LTTNG_ERR_KERN_CHAN_NOT_FOUND;
-                       goto end;
-               }
-               break;
-       case LTTNG_DOMAIN_UST:
-               if (session->ust_session != NULL) {
-                       rcu_read_lock();
-                       nb_chan = lttng_ht_get_count(
-                               session->ust_session->domain_global.channels);
-                       rcu_read_unlock();
-               }
-               DBG3("Number of UST global channels %zd", nb_chan);
-               if (nb_chan < 0) {
-                       ret = -LTTNG_ERR_UST_CHAN_NOT_FOUND;
-                       goto end;
-               }
-               break;
-       default:
-               ret = -LTTNG_ERR_UND;
-               goto end;
-       }
-
-       if (nb_chan > 0) {
-               const size_t channel_size = sizeof(struct lttng_channel) +
-                       sizeof(struct lttng_channel_extended);
-               struct lttng_channel_extended *channel_exts;
-
-               payload_size = nb_chan * channel_size;
-               *channels = zmalloc(payload_size);
-               if (*channels == NULL) {
-                       ret = -LTTNG_ERR_FATAL;
-                       goto end;
-               }
-
-               channel_exts = ((void *) *channels) +
-                               (nb_chan * sizeof(struct lttng_channel));
-               ret = list_lttng_channels(domain, session, *channels, channel_exts);
-               if (ret != LTTNG_OK) {
-                       free(*channels);
-                       *channels = NULL;
-                       goto end;
-               }
-       } else {
-               *channels = NULL;
-       }
-
-       ret = payload_size;
-end:
-       return ret;
-}
-
-/*
- * Command LTTNG_LIST_EVENTS processed by the client thread.
- */
-ssize_t cmd_list_events(enum lttng_domain_type domain,
-               struct ltt_session *session, char *channel_name,
-               struct lttng_payload *payload)
-{
-       int ret = 0;
-       ssize_t nb_events = 0;
-       struct lttcomm_event_command_header cmd_header = {};
-       const size_t cmd_header_offset = payload->buffer.size;
-
-       ret = lttng_dynamic_buffer_append(
-                       &payload->buffer, &cmd_header, sizeof(cmd_header));
-       if (ret) {
-               ret = LTTNG_ERR_NOMEM;
-               goto error;
-       }
-
-       switch (domain) {
-       case LTTNG_DOMAIN_KERNEL:
-               if (session->kernel_session != NULL) {
-                       nb_events = list_lttng_kernel_events(channel_name,
-                                       session->kernel_session, payload);
-               }
-               break;
-       case LTTNG_DOMAIN_UST:
-       {
-               if (session->ust_session != NULL) {
-                       nb_events = list_lttng_ust_global_events(channel_name,
-                                       &session->ust_session->domain_global,
-                                       payload);
-               }
-               break;
-       }
-       case LTTNG_DOMAIN_LOG4J:
-       case LTTNG_DOMAIN_JUL:
-       case LTTNG_DOMAIN_PYTHON:
-               if (session->ust_session) {
-                       struct lttng_ht_iter iter;
-                       struct agent *agt;
-
-                       rcu_read_lock();
-                       cds_lfht_for_each_entry(session->ust_session->agents->ht,
-                                       &iter.iter, agt, node.node) {
-                               if (agt->domain == domain) {
-                                       nb_events = list_lttng_agent_events(
-                                                       agt, payload);
-                                       break;
-                               }
-                       }
-                       rcu_read_unlock();
-               }
-               break;
-       default:
-               ret = LTTNG_ERR_UND;
-               goto error;
-       }
-
-       ((struct lttcomm_event_command_header *) (payload->buffer.data +
-                        cmd_header_offset))->nb_events = (uint32_t) nb_events;
-
-       return nb_events;
-
-error:
-       /* Return negative value to differentiate return code */
-       return -ret;
-}
-
-/*
- * Using the session list, filled a lttng_session array to send back to the
- * client for session listing.
- *
- * The session list lock MUST be acquired before calling this function. Use
- * session_lock_list() and session_unlock_list().
- */
-void cmd_list_lttng_sessions(struct lttng_session *sessions,
-               size_t session_count, uid_t uid, gid_t gid)
-{
-       int ret;
-       unsigned int i = 0;
-       struct ltt_session *session;
-       struct ltt_session_list *list = session_get_list();
-       struct lttng_session_extended *extended =
-                       (typeof(extended)) (&sessions[session_count]);
-
-       DBG("Getting all available session for UID %d GID %d",
-                       uid, gid);
-       /*
-        * Iterate over session list and append data after the control struct in
-        * the buffer.
-        */
-       cds_list_for_each_entry(session, &list->head, list) {
-               if (!session_get(session)) {
-                       continue;
-               }
-               /*
-                * Only list the sessions the user can control.
-                */
-               if (!session_access_ok(session, uid) ||
-                               session->destroyed) {
-                       session_put(session);
-                       continue;
-               }
-
-               struct ltt_kernel_session *ksess = session->kernel_session;
-               struct ltt_ust_session *usess = session->ust_session;
-
-               if (session->consumer->type == CONSUMER_DST_NET ||
-                               (ksess && ksess->consumer->type == CONSUMER_DST_NET) ||
-                               (usess && usess->consumer->type == CONSUMER_DST_NET)) {
-                       ret = build_network_session_path(sessions[i].path,
-                                       sizeof(sessions[i].path), session);
-               } else {
-                       ret = snprintf(sessions[i].path, sizeof(sessions[i].path), "%s",
-                                       session->consumer->dst.session_root_path);
-               }
-               if (ret < 0) {
-                       PERROR("snprintf session path");
-                       session_put(session);
-                       continue;
-               }
-
-               strncpy(sessions[i].name, session->name, NAME_MAX);
-               sessions[i].name[NAME_MAX - 1] = '\0';
-               sessions[i].enabled = session->active;
-               sessions[i].snapshot_mode = session->snapshot_mode;
-               sessions[i].live_timer_interval = session->live_timer;
-               extended[i].creation_time.value = (uint64_t) session->creation_time;
-               extended[i].creation_time.is_set = 1;
-               i++;
-               session_put(session);
-       }
-}
-
-/*
- * Command LTTNG_DATA_PENDING returning 0 if the data is NOT pending meaning
- * ready for trace analysis (or any kind of reader) or else 1 for pending data.
- */
-int cmd_data_pending(struct ltt_session *session)
-{
-       int ret;
-       struct ltt_kernel_session *ksess = session->kernel_session;
-       struct ltt_ust_session *usess = session->ust_session;
-
-       LTTNG_ASSERT(session);
-
-       DBG("Data pending for session %s", session->name);
-
-       /* Session MUST be stopped to ask for data availability. */
-       if (session->active) {
-               ret = LTTNG_ERR_SESSION_STARTED;
-               goto error;
-       } else {
-               /*
-                * If stopped, just make sure we've started before else the above call
-                * will always send that there is data pending.
-                *
-                * The consumer assumes that when the data pending command is received,
-                * the trace has been started before or else no output data is written
-                * by the streams which is a condition for data pending. So, this is
-                * *VERY* important that we don't ask the consumer before a start
-                * trace.
-                */
-               if (!session->has_been_started) {
-                       ret = 0;
-                       goto error;
-               }
-       }
-
-       /* A rotation is still pending, we have to wait. */
-       if (session->rotation_state == LTTNG_ROTATION_STATE_ONGOING) {
-               DBG("Rotate still pending for session %s", session->name);
-               ret = 1;
-               goto error;
-       }
-
-       if (ksess && ksess->consumer) {
-               ret = consumer_is_data_pending(ksess->id, ksess->consumer);
-               if (ret == 1) {
-                       /* Data is still being extracted for the kernel. */
-                       goto error;
-               }
-       }
-
-       if (usess && usess->consumer) {
-               ret = consumer_is_data_pending(usess->id, usess->consumer);
-               if (ret == 1) {
-                       /* Data is still being extracted for the kernel. */
-                       goto error;
-               }
-       }
-
-       /* Data is ready to be read by a viewer */
-       ret = 0;
-
-error:
-       return ret;
-}
-
-/*
- * Command LTTNG_SNAPSHOT_ADD_OUTPUT from the lttng ctl library.
- *
- * Return LTTNG_OK on success or else a LTTNG_ERR code.
- */
-int cmd_snapshot_add_output(struct ltt_session *session,
-               const struct lttng_snapshot_output *output, uint32_t *id)
-{
-       int ret;
-       struct snapshot_output *new_output;
-
-       LTTNG_ASSERT(session);
-       LTTNG_ASSERT(output);
-
-       DBG("Cmd snapshot add output for session %s", session->name);
-
-       /*
-        * Can't create an output if the session is not set in no-output mode.
-        */
-       if (session->output_traces) {
-               ret = LTTNG_ERR_NOT_SNAPSHOT_SESSION;
-               goto error;
-       }
-
-       if (session->has_non_mmap_channel) {
-               ret = LTTNG_ERR_SNAPSHOT_UNSUPPORTED;
-               goto error;
-       }
-
-       /* Only one output is allowed until we have the "tee" feature. */
-       if (session->snapshot.nb_output == 1) {
-               ret = LTTNG_ERR_SNAPSHOT_OUTPUT_EXIST;
-               goto error;
-       }
-
-       new_output = snapshot_output_alloc();
-       if (!new_output) {
-               ret = LTTNG_ERR_NOMEM;
-               goto error;
-       }
-
-       ret = snapshot_output_init(session, output->max_size, output->name,
-                       output->ctrl_url, output->data_url, session->consumer, new_output,
-                       &session->snapshot);
-       if (ret < 0) {
-               if (ret == -ENOMEM) {
-                       ret = LTTNG_ERR_NOMEM;
-               } else {
-                       ret = LTTNG_ERR_INVALID;
-               }
-               goto free_error;
-       }
-
-       rcu_read_lock();
-       snapshot_add_output(&session->snapshot, new_output);
-       if (id) {
-               *id = new_output->id;
-       }
-       rcu_read_unlock();
-
-       return LTTNG_OK;
-
-free_error:
-       snapshot_output_destroy(new_output);
-error:
-       return ret;
-}
-
-/*
- * Command LTTNG_SNAPSHOT_DEL_OUTPUT from lib lttng ctl.
- *
- * Return LTTNG_OK on success or else a LTTNG_ERR code.
- */
-int cmd_snapshot_del_output(struct ltt_session *session,
-               const struct lttng_snapshot_output *output)
-{
-       int ret;
-       struct snapshot_output *sout = NULL;
-
-       LTTNG_ASSERT(session);
-       LTTNG_ASSERT(output);
-
-       rcu_read_lock();
-
-       /*
-        * Permission denied to create an output if the session is not
-        * set in no output mode.
-        */
-       if (session->output_traces) {
-               ret = LTTNG_ERR_NOT_SNAPSHOT_SESSION;
-               goto error;
-       }
-
-       if (output->id) {
-               DBG("Cmd snapshot del output id %" PRIu32 " for session %s", output->id,
-                               session->name);
-               sout = snapshot_find_output_by_id(output->id, &session->snapshot);
-       } else if (*output->name != '\0') {
-               DBG("Cmd snapshot del output name %s for session %s", output->name,
-                               session->name);
-               sout = snapshot_find_output_by_name(output->name, &session->snapshot);
-       }
-       if (!sout) {
-               ret = LTTNG_ERR_INVALID;
-               goto error;
-       }
-
-       snapshot_delete_output(&session->snapshot, sout);
-       snapshot_output_destroy(sout);
-       ret = LTTNG_OK;
-
-error:
-       rcu_read_unlock();
-       return ret;
-}
-
-/*
- * Command LTTNG_SNAPSHOT_LIST_OUTPUT from lib lttng ctl.
- *
- * If no output is available, outputs is untouched and 0 is returned.
- *
- * Return the size of the newly allocated outputs or a negative LTTNG_ERR code.
- */
-ssize_t cmd_snapshot_list_outputs(struct ltt_session *session,
-               struct lttng_snapshot_output **outputs)
-{
-       int ret, idx = 0;
-       struct lttng_snapshot_output *list = NULL;
-       struct lttng_ht_iter iter;
-       struct snapshot_output *output;
-
-       LTTNG_ASSERT(session);
-       LTTNG_ASSERT(outputs);
-
-       DBG("Cmd snapshot list outputs for session %s", session->name);
-
-       /*
-        * Permission denied to create an output if the session is not
-        * set in no output mode.
-        */
-       if (session->output_traces) {
-               ret = -LTTNG_ERR_NOT_SNAPSHOT_SESSION;
-               goto end;
-       }
-
-       if (session->snapshot.nb_output == 0) {
-               ret = 0;
-               goto end;
-       }
-
-       list = zmalloc(session->snapshot.nb_output * sizeof(*list));
-       if (!list) {
-               ret = -LTTNG_ERR_NOMEM;
-               goto end;
-       }
-
-       /* Copy list from session to the new list object. */
-       rcu_read_lock();
-       cds_lfht_for_each_entry(session->snapshot.output_ht->ht, &iter.iter,
-                       output, node.node) {
-               LTTNG_ASSERT(output->consumer);
-               list[idx].id = output->id;
-               list[idx].max_size = output->max_size;
-               if (lttng_strncpy(list[idx].name, output->name,
-                               sizeof(list[idx].name))) {
-                       ret = -LTTNG_ERR_INVALID;
-                       goto error;
-               }
-               if (output->consumer->type == CONSUMER_DST_LOCAL) {
-                       if (lttng_strncpy(list[idx].ctrl_url,
-                                       output->consumer->dst.session_root_path,
-                                       sizeof(list[idx].ctrl_url))) {
-                               ret = -LTTNG_ERR_INVALID;
-                               goto error;
-                       }
-               } else {
-                       /* Control URI. */
-                       ret = uri_to_str_url(&output->consumer->dst.net.control,
-                                       list[idx].ctrl_url, sizeof(list[idx].ctrl_url));
-                       if (ret < 0) {
-                               ret = -LTTNG_ERR_NOMEM;
-                               goto error;
-                       }
-
-                       /* Data URI. */
-                       ret = uri_to_str_url(&output->consumer->dst.net.data,
-                                       list[idx].data_url, sizeof(list[idx].data_url));
-                       if (ret < 0) {
-                               ret = -LTTNG_ERR_NOMEM;
-                               goto error;
-                       }
-               }
-               idx++;
-       }
-
-       *outputs = list;
-       list = NULL;
-       ret = session->snapshot.nb_output;
-error:
-       rcu_read_unlock();
-       free(list);
-end:
-       return ret;
-}
-
-/*
- * Check if we can regenerate the metadata for this session.
- * Only kernel, UST per-uid and non-live sessions are supported.
- *
- * Return 0 if the metadata can be generated, a LTTNG_ERR code otherwise.
- */
-static
-int check_regenerate_metadata_support(struct ltt_session *session)
-{
-       int ret;
-
-       LTTNG_ASSERT(session);
-
-       if (session->live_timer != 0) {
-               ret = LTTNG_ERR_LIVE_SESSION;
-               goto end;
-       }
-       if (!session->active) {
-               ret = LTTNG_ERR_SESSION_NOT_STARTED;
-               goto end;
-       }
-       if (session->ust_session) {
-               switch (session->ust_session->buffer_type) {
-               case LTTNG_BUFFER_PER_UID:
-                       break;
-               case LTTNG_BUFFER_PER_PID:
-                       ret = LTTNG_ERR_PER_PID_SESSION;
-                       goto end;
-               default:
-                       abort();
-                       ret = LTTNG_ERR_UNK;
-                       goto end;
-               }
-       }
-       if (session->consumer->type == CONSUMER_DST_NET &&
-                       session->consumer->relay_minor_version < 8) {
-               ret = LTTNG_ERR_RELAYD_VERSION_FAIL;
-               goto end;
-       }
-       ret = 0;
-
-end:
-       return ret;
-}
-
-static
-int clear_metadata_file(int fd)
-{
-       int ret;
-       off_t lseek_ret;
-
-       lseek_ret = lseek(fd, 0, SEEK_SET);
-       if (lseek_ret < 0) {
-               PERROR("lseek");
-               ret = -1;
-               goto end;
-       }
-
-       ret = ftruncate(fd, 0);
-       if (ret < 0) {
-               PERROR("ftruncate");
-               goto end;
-       }
-
-end:
-       return ret;
-}
-
-static
-int ust_regenerate_metadata(struct ltt_ust_session *usess)
-{
-       int ret = 0;
-       struct buffer_reg_uid *uid_reg = NULL;
-       struct buffer_reg_session *session_reg = NULL;
-
-       rcu_read_lock();
-       cds_list_for_each_entry(uid_reg, &usess->buffer_reg_uid_list, lnode) {
-               struct ust_registry_session *registry;
-               struct ust_registry_channel *chan;
-               struct lttng_ht_iter iter_chan;
-
-               session_reg = uid_reg->registry;
-               registry = session_reg->reg.ust;
-
-               pthread_mutex_lock(&registry->lock);
-               registry->metadata_len_sent = 0;
-               memset(registry->metadata, 0, registry->metadata_alloc_len);
-               registry->metadata_len = 0;
-               registry->metadata_version++;
-               if (registry->metadata_fd > 0) {
-                       /* Clear the metadata file's content. */
-                       ret = clear_metadata_file(registry->metadata_fd);
-                       if (ret) {
-                               pthread_mutex_unlock(&registry->lock);
-                               goto end;
-                       }
-               }
-
-               ret = ust_metadata_session_statedump(registry, NULL,
-                               registry->major, registry->minor);
-               if (ret) {
-                       pthread_mutex_unlock(&registry->lock);
-                       ERR("Failed to generate session metadata (err = %d)",
-                                       ret);
-                       goto end;
-               }
-               cds_lfht_for_each_entry(registry->channels->ht, &iter_chan.iter,
-                               chan, node.node) {
-                       struct ust_registry_event *event;
-                       struct lttng_ht_iter iter_event;
-
-                       ret = ust_metadata_channel_statedump(registry, chan);
-                       if (ret) {
-                               pthread_mutex_unlock(&registry->lock);
-                               ERR("Failed to generate channel metadata "
-                                               "(err = %d)", ret);
-                               goto end;
-                       }
-                       cds_lfht_for_each_entry(chan->ht->ht, &iter_event.iter,
-                                       event, node.node) {
-                               ret = ust_metadata_event_statedump(registry,
-                                               chan, event);
-                               if (ret) {
-                                       pthread_mutex_unlock(&registry->lock);
-                                       ERR("Failed to generate event metadata "
-                                                       "(err = %d)", ret);
-                                       goto end;
-                               }
-                       }
-               }
-               pthread_mutex_unlock(&registry->lock);
-       }
-
-end:
-       rcu_read_unlock();
-       return ret;
-}
-
-/*
- * Command LTTNG_REGENERATE_METADATA from the lttng-ctl library.
- *
- * Ask the consumer to truncate the existing metadata file(s) and
- * then regenerate the metadata. Live and per-pid sessions are not
- * supported and return an error.
- *
- * Return LTTNG_OK on success or else a LTTNG_ERR code.
- */
-int cmd_regenerate_metadata(struct ltt_session *session)
-{
-       int ret;
-
-       LTTNG_ASSERT(session);
-
-       ret = check_regenerate_metadata_support(session);
-       if (ret) {
-               goto end;
-       }
-
-       if (session->kernel_session) {
-               ret = kernctl_session_regenerate_metadata(
-                               session->kernel_session->fd);
-               if (ret < 0) {
-                       ERR("Failed to regenerate the kernel metadata");
-                       goto end;
-               }
-       }
-
-       if (session->ust_session) {
-               ret = ust_regenerate_metadata(session->ust_session);
-               if (ret < 0) {
-                       ERR("Failed to regenerate the UST metadata");
-                       goto end;
-               }
-       }
-       DBG("Cmd metadata regenerate for session %s", session->name);
-       ret = LTTNG_OK;
-
-end:
-       return ret;
-}
-
-/*
- * Command LTTNG_REGENERATE_STATEDUMP from the lttng-ctl library.
- *
- * Ask the tracer to regenerate a new statedump.
- *
- * Return LTTNG_OK on success or else a LTTNG_ERR code.
- */
-int cmd_regenerate_statedump(struct ltt_session *session)
-{
-       int ret;
-
-       LTTNG_ASSERT(session);
-
-       if (!session->active) {
-               ret = LTTNG_ERR_SESSION_NOT_STARTED;
-               goto end;
-       }
-
-       if (session->kernel_session) {
-               ret = kernctl_session_regenerate_statedump(
-                               session->kernel_session->fd);
-               /*
-                * Currently, the statedump in kernel can only fail if out
-                * of memory.
-                */
-               if (ret < 0) {
-                       if (ret == -ENOMEM) {
-                               ret = LTTNG_ERR_REGEN_STATEDUMP_NOMEM;
-                       } else {
-                               ret = LTTNG_ERR_REGEN_STATEDUMP_FAIL;
-                       }
-                       ERR("Failed to regenerate the kernel statedump");
-                       goto end;
-               }
-       }
-
-       if (session->ust_session) {
-               ret = ust_app_regenerate_statedump_all(session->ust_session);
-               /*
-                * Currently, the statedump in UST always returns 0.
-                */
-               if (ret < 0) {
-                       ret = LTTNG_ERR_REGEN_STATEDUMP_FAIL;
-                       ERR("Failed to regenerate the UST statedump");
-                       goto end;
-               }
-       }
-       DBG("Cmd regenerate statedump for session %s", session->name);
-       ret = LTTNG_OK;
-
-end:
-       return ret;
-}
-
-static
-enum lttng_error_code synchronize_tracer_notifier_register(
-               struct notification_thread_handle *notification_thread,
-               struct lttng_trigger *trigger, const struct lttng_credentials *cmd_creds)
-{
-       enum lttng_error_code ret_code;
-       const struct lttng_condition *condition =
-                       lttng_trigger_get_const_condition(trigger);
-       const char *trigger_name;
-       uid_t trigger_owner;
-       enum lttng_trigger_status trigger_status;
-       const enum lttng_domain_type trigger_domain =
-                       lttng_trigger_get_underlying_domain_type_restriction(
-                                       trigger);
-
-       trigger_status = lttng_trigger_get_owner_uid(trigger, &trigger_owner);
-       LTTNG_ASSERT(trigger_status == LTTNG_TRIGGER_STATUS_OK);
-
-       LTTNG_ASSERT(condition);
-       LTTNG_ASSERT(lttng_condition_get_type(condition) ==
-                       LTTNG_CONDITION_TYPE_EVENT_RULE_MATCHES);
-
-       trigger_status = lttng_trigger_get_name(trigger, &trigger_name);
-       trigger_name = trigger_status == LTTNG_TRIGGER_STATUS_OK ?
-                       trigger_name : "(anonymous)";
-
-       session_lock_list();
-       switch (trigger_domain) {
-       case LTTNG_DOMAIN_KERNEL:
-       {
-               ret_code = kernel_register_event_notifier(trigger, cmd_creds);
-               if (ret_code != LTTNG_OK) {
-                       enum lttng_error_code notif_thread_unregister_ret;
-
-                       notif_thread_unregister_ret =
-                                       notification_thread_command_unregister_trigger(
-                                               notification_thread, trigger);
-
-                       if (notif_thread_unregister_ret != LTTNG_OK) {
-                               /* Return the original error code. */
-                               ERR("Failed to unregister trigger from notification thread during error recovery: trigger name = '%s', trigger owner uid = %d, error code = %d",
-                                               trigger_name,
-                                               (int) trigger_owner,
-                                               ret_code);
-                       }
-               }
-               break;
-       }
-       case LTTNG_DOMAIN_UST:
-               ust_app_global_update_all_event_notifier_rules();
-               break;
-       case LTTNG_DOMAIN_JUL:
-       case LTTNG_DOMAIN_LOG4J:
-       case LTTNG_DOMAIN_PYTHON:
-       {
-               /* Agent domains. */
-               struct agent *agt = agent_find_by_event_notifier_domain(
-                               trigger_domain);
-
-               if (!agt) {
-                       agt = agent_create(trigger_domain);
-                       if (!agt) {
-                               ret_code = LTTNG_ERR_NOMEM;
-                               goto end_unlock_session_list;
-                       }
-
-                       agent_add(agt, the_trigger_agents_ht_by_domain);
-               }
-
-               ret_code = trigger_agent_enable(trigger, agt);
-               if (ret_code != LTTNG_OK) {
-                       goto end_unlock_session_list;
-               }
-
-               break;
-       }
-       case LTTNG_DOMAIN_NONE:
-       default:
-               abort();
-       }
-
-       ret_code = LTTNG_OK;
-end_unlock_session_list:
-       session_unlock_list();
-       return ret_code;
-}
-
-enum lttng_error_code cmd_register_trigger(const struct lttng_credentials *cmd_creds,
-               struct lttng_trigger *trigger,
-               bool is_trigger_anonymous,
-               struct notification_thread_handle *notification_thread,
-               struct lttng_trigger **return_trigger)
-{
-       enum lttng_error_code ret_code;
-       const char *trigger_name;
-       uid_t trigger_owner;
-       enum lttng_trigger_status trigger_status;
-
-       trigger_status = lttng_trigger_get_name(trigger, &trigger_name);
-       trigger_name = trigger_status == LTTNG_TRIGGER_STATUS_OK ?
-                       trigger_name : "(anonymous)";
-
-       trigger_status = lttng_trigger_get_owner_uid(
-               trigger, &trigger_owner);
-       LTTNG_ASSERT(trigger_status == LTTNG_TRIGGER_STATUS_OK);
-
-       DBG("Running register trigger command: trigger name = '%s', trigger owner uid = %d, command creds uid = %d",
-                       trigger_name, (int) trigger_owner,
-                       (int) lttng_credentials_get_uid(cmd_creds));
-
-       /*
-        * Validate the trigger credentials against the command credentials.
-        * Only the root user can register a trigger with non-matching
-        * credentials.
-        */
-       if (!lttng_credentials_is_equal_uid(
-                       lttng_trigger_get_credentials(trigger),
-                       cmd_creds)) {
-               if (lttng_credentials_get_uid(cmd_creds) != 0) {
-                       ERR("Trigger credentials do not match the command credentials: trigger name = '%s', trigger owner uid = %d, command creds uid = %d",
-                                       trigger_name, (int) trigger_owner,
-                                       (int) lttng_credentials_get_uid(cmd_creds));
-                       ret_code = LTTNG_ERR_INVALID_TRIGGER;
-                       goto end;
-               }
-       }
-
-       /*
-        * The bytecode generation also serves as a validation step for the
-        * bytecode expressions.
-        */
-       ret_code = lttng_trigger_generate_bytecode(trigger, cmd_creds);
-       if (ret_code != LTTNG_OK) {
-               ERR("Failed to generate bytecode of trigger: trigger name = '%s', trigger owner uid = %d, error code = %d",
-                               trigger_name, (int) trigger_owner, ret_code);
-               goto end;
-       }
-
-       /*
-        * A reference to the trigger is acquired by the notification thread.
-        * It is safe to return the same trigger to the caller since it the
-        * other user holds a reference.
-        *
-        * The trigger is modified during the execution of the
-        * "register trigger" command. However, by the time the command returns,
-        * it is safe to use without any locking as its properties are
-        * immutable.
-        */
-       ret_code = notification_thread_command_register_trigger(
-                       notification_thread, trigger, is_trigger_anonymous);
-       if (ret_code != LTTNG_OK) {
-               DBG("Failed to register trigger to notification thread: trigger name = '%s', trigger owner uid = %d, error code = %d",
-                               trigger_name, (int) trigger_owner, ret_code);
-               goto end;
-       }
-
-       trigger_status = lttng_trigger_get_name(trigger, &trigger_name);
-       trigger_name = trigger_status == LTTNG_TRIGGER_STATUS_OK ?
-                       trigger_name : "(anonymous)";
-
-       /*
-        * Synchronize tracers if the trigger adds an event notifier.
-        */
-       if (lttng_trigger_needs_tracer_notifier(trigger)) {
-               ret_code = synchronize_tracer_notifier_register(notification_thread,
-                               trigger, cmd_creds);
-               if (ret_code != LTTNG_OK) {
-                       ERR("Error registering tracer notifier: %s",
-                                       lttng_strerror(-ret_code));
-                       goto end;
-               }
-       }
-
-       /*
-        * Return an updated trigger to the client.
-        *
-        * Since a modified version of the same trigger is returned, acquire a
-        * reference to the trigger so the caller doesn't have to care if those
-        * are distinct instances or not.
-        */
-       if (ret_code == LTTNG_OK) {
-               lttng_trigger_get(trigger);
-               *return_trigger = trigger;
-               /* Ownership of trigger was transferred to caller. */
-               trigger = NULL;
-       }
-end:
-       return ret_code;
-}
-
-static
-enum lttng_error_code synchronize_tracer_notifier_unregister(
-               const struct lttng_trigger *trigger)
-{
-       enum lttng_error_code ret_code;
-       const struct lttng_condition *condition =
-                       lttng_trigger_get_const_condition(trigger);
-       const enum lttng_domain_type trigger_domain =
-                       lttng_trigger_get_underlying_domain_type_restriction(
-                                       trigger);
-
-       LTTNG_ASSERT(condition);
-       LTTNG_ASSERT(lttng_condition_get_type(condition) ==
-                       LTTNG_CONDITION_TYPE_EVENT_RULE_MATCHES);
-
-       session_lock_list();
-       switch (trigger_domain) {
-       case LTTNG_DOMAIN_KERNEL:
-               ret_code = kernel_unregister_event_notifier(trigger);
-               if (ret_code != LTTNG_OK) {
-                       goto end_unlock_session_list;
-               }
-
-               break;
-       case LTTNG_DOMAIN_UST:
-               ust_app_global_update_all_event_notifier_rules();
-               break;
-       case LTTNG_DOMAIN_JUL:
-       case LTTNG_DOMAIN_LOG4J:
-       case LTTNG_DOMAIN_PYTHON:
-       {
-               /* Agent domains. */
-               struct agent *agt = agent_find_by_event_notifier_domain(
-                               trigger_domain);
-
-               /*
-                * This trigger was never registered in the first place. Calling
-                * this function under those circumstances is an internal error.
-                */
-               LTTNG_ASSERT(agt);
-               ret_code = trigger_agent_disable(trigger, agt);
-               if (ret_code != LTTNG_OK) {
-                       goto end_unlock_session_list;
-               }
-
-               break;
-       }
-       case LTTNG_DOMAIN_NONE:
-       default:
-               abort();
-       }
-
-       ret_code = LTTNG_OK;
-
-end_unlock_session_list:
-       session_unlock_list();
-       return ret_code;
-}
-
-enum lttng_error_code cmd_unregister_trigger(const struct lttng_credentials *cmd_creds,
-               const struct lttng_trigger *trigger,
-               struct notification_thread_handle *notification_thread)
-{
-       enum lttng_error_code ret_code;
-       const char *trigger_name;
-       uid_t trigger_owner;
-       enum lttng_trigger_status trigger_status;
-       struct lttng_trigger *sessiond_trigger = NULL;
-
-       trigger_status = lttng_trigger_get_name(trigger, &trigger_name);
-       trigger_name = trigger_status == LTTNG_TRIGGER_STATUS_OK ? trigger_name : "(anonymous)";
-       trigger_status = lttng_trigger_get_owner_uid(trigger, &trigger_owner);
-       LTTNG_ASSERT(trigger_status == LTTNG_TRIGGER_STATUS_OK);
-
-       DBG("Running unregister trigger command: trigger name = '%s', trigger owner uid = %d, command creds uid = %d",
-                       trigger_name, (int) trigger_owner,
-                       (int) lttng_credentials_get_uid(cmd_creds));
-
-       /*
-        * Validate the trigger credentials against the command credentials.
-        * Only the root user can unregister a trigger with non-matching
-        * credentials.
-        */
-       if (!lttng_credentials_is_equal_uid(
-                       lttng_trigger_get_credentials(trigger),
-                       cmd_creds)) {
-               if (lttng_credentials_get_uid(cmd_creds) != 0) {
-                       ERR("Trigger credentials do not match the command credentials: trigger name = '%s', trigger owner uid = %d, command creds uid = %d",
-                                       trigger_name, (int) trigger_owner,
-                                       (int) lttng_credentials_get_uid(cmd_creds));
-                       ret_code = LTTNG_ERR_INVALID_TRIGGER;
-                       goto end;
-               }
-       }
-
-       /* Fetch the sessiond side trigger object. */
-       ret_code = notification_thread_command_get_trigger(
-                       notification_thread, trigger, &sessiond_trigger);
-       if (ret_code != LTTNG_OK) {
-               DBG("Failed to get trigger from notification thread during unregister: trigger name = '%s', trigger owner uid = %d, error code = %d",
-                               trigger_name, (int) trigger_owner, ret_code);
-               goto end;
-       }
-
-       LTTNG_ASSERT(sessiond_trigger);
-
-       /*
-        * From this point on, no matter what, consider the trigger
-        * unregistered.
-        *
-        * We set the unregistered state of the sessiond side trigger object in
-        * the client thread since we want to minimize the possibility of the
-        * notification thread being stalled due to a long execution of an
-        * action that required the trigger lock.
-        */
-       lttng_trigger_set_as_unregistered(sessiond_trigger);
-
-       ret_code = notification_thread_command_unregister_trigger(notification_thread,
-                                                                 trigger);
-       if (ret_code != LTTNG_OK) {
-               DBG("Failed to unregister trigger from notification thread: trigger name = '%s', trigger owner uid = %d, error code = %d",
-                               trigger_name, (int) trigger_owner, ret_code);
-               goto end;
-       }
-
-       /*
-        * Synchronize tracers if the trigger removes an event notifier.
-        * Do this even if the trigger unregistration failed to at least stop
-        * the tracers from producing notifications associated with this
-        * event notifier.
-        */
-       if (lttng_trigger_needs_tracer_notifier(trigger)) {
-               ret_code = synchronize_tracer_notifier_unregister(trigger);
-               if (ret_code != LTTNG_OK) {
-                       ERR("Error unregistering trigger to tracer.");
-                       goto end;
-               }
-
-       }
-
-end:
-       lttng_trigger_put(sessiond_trigger);
-       return ret_code;
-}
-
-enum lttng_error_code cmd_list_triggers(struct command_ctx *cmd_ctx,
-               struct notification_thread_handle *notification_thread,
-               struct lttng_triggers **return_triggers)
-{
-       int ret;
-       enum lttng_error_code ret_code;
-       struct lttng_triggers *triggers = NULL;
-
-       /* Get the set of triggers from the notification thread. */
-       ret_code = notification_thread_command_list_triggers(
-                       notification_thread, cmd_ctx->creds.uid, &triggers);
-       if (ret_code != LTTNG_OK) {
-               goto end;
-       }
-
-       ret = lttng_triggers_remove_hidden_triggers(triggers);
-       if (ret) {
-               ret_code = LTTNG_ERR_UNK;
-               goto end;
-       }
-
-       *return_triggers = triggers;
-       triggers = NULL;
-       ret_code = LTTNG_OK;
-end:
-       lttng_triggers_destroy(triggers);
-       return ret_code;
-}
-
-enum lttng_error_code cmd_execute_error_query(const struct lttng_credentials *cmd_creds,
-               const struct lttng_error_query *query,
-               struct lttng_error_query_results **_results,
-               struct notification_thread_handle *notification_thread)
-{
-       enum lttng_error_code ret_code;
-       const struct lttng_trigger *query_target_trigger;
-       const struct lttng_action *query_target_action = NULL;
-       struct lttng_trigger *matching_trigger = NULL;
-       const char *trigger_name;
-       uid_t trigger_owner;
-       enum lttng_trigger_status trigger_status;
-       struct lttng_error_query_results *results = NULL;
-
-       switch (lttng_error_query_get_target_type(query)) {
-       case LTTNG_ERROR_QUERY_TARGET_TYPE_TRIGGER:
-               query_target_trigger = lttng_error_query_trigger_borrow_target(query);
-               break;
-       case LTTNG_ERROR_QUERY_TARGET_TYPE_CONDITION:
-               query_target_trigger =
-                               lttng_error_query_condition_borrow_target(query);
-               break;
-       case LTTNG_ERROR_QUERY_TARGET_TYPE_ACTION:
-               query_target_trigger = lttng_error_query_action_borrow_trigger_target(
-                               query);
-               break;
-       default:
-               abort();
-       }
-
-       LTTNG_ASSERT(query_target_trigger);
-
-       ret_code = notification_thread_command_get_trigger(notification_thread,
-                       query_target_trigger, &matching_trigger);
-       if (ret_code != LTTNG_OK) {
-               goto end;
-       }
-
-       /* No longer needed. */
-       query_target_trigger = NULL;
-
-       if (lttng_error_query_get_target_type(query) ==
-                       LTTNG_ERROR_QUERY_TARGET_TYPE_ACTION) {
-               /* Get the sessiond-side version of the target action. */
-               query_target_action =
-                               lttng_error_query_action_borrow_action_target(
-                                               query, matching_trigger);
-       }
-
-       trigger_status = lttng_trigger_get_name(matching_trigger, &trigger_name);
-       trigger_name = trigger_status == LTTNG_TRIGGER_STATUS_OK ?
-                       trigger_name : "(anonymous)";
-       trigger_status = lttng_trigger_get_owner_uid(matching_trigger,
-                       &trigger_owner);
-       LTTNG_ASSERT(trigger_status == LTTNG_TRIGGER_STATUS_OK);
-
-       results = lttng_error_query_results_create();
-       if (!results) {
-               ret_code = LTTNG_ERR_NOMEM;
-               goto end;
-       }
-
-       DBG("Running \"execute error query\" command: trigger name = '%s', trigger owner uid = %d, command creds uid = %d",
-                       trigger_name, (int) trigger_owner,
-                       (int) lttng_credentials_get_uid(cmd_creds));
-
-       /*
-        * Validate the trigger credentials against the command credentials.
-        * Only the root user can target a trigger with non-matching
-        * credentials.
-        */
-       if (!lttng_credentials_is_equal_uid(
-                       lttng_trigger_get_credentials(matching_trigger),
-                       cmd_creds)) {
-               if (lttng_credentials_get_uid(cmd_creds) != 0) {
-                       ERR("Trigger credentials do not match the command credentials: trigger name = '%s', trigger owner uid = %d, command creds uid = %d",
-                                       trigger_name, (int) trigger_owner,
-                                       (int) lttng_credentials_get_uid(cmd_creds));
-                       ret_code = LTTNG_ERR_INVALID_TRIGGER;
-                       goto end;
-               }
-       }
-
-       switch (lttng_error_query_get_target_type(query)) {
-       case LTTNG_ERROR_QUERY_TARGET_TYPE_TRIGGER:
-               trigger_status = lttng_trigger_add_error_results(
-                               matching_trigger, results);
-
-               switch (trigger_status) {
-               case LTTNG_TRIGGER_STATUS_OK:
-                       break;
-               default:
-                       ret_code = LTTNG_ERR_UNK;
-                       goto end;
-               }
-
-               break;
-       case LTTNG_ERROR_QUERY_TARGET_TYPE_CONDITION:
-       {
-               trigger_status = lttng_trigger_condition_add_error_results(
-                               matching_trigger, results);
-
-               switch (trigger_status) {
-               case LTTNG_TRIGGER_STATUS_OK:
-                       break;
-               default:
-                       ret_code = LTTNG_ERR_UNK;
-                       goto end;
-               }
-
-               break;
-       }
-       case LTTNG_ERROR_QUERY_TARGET_TYPE_ACTION:
-       {
-               const enum lttng_action_status action_status =
-                               lttng_action_add_error_query_results(
-                                               query_target_action, results);
-
-               switch (action_status) {
-               case LTTNG_ACTION_STATUS_OK:
-                       break;
-               default:
-                       ret_code = LTTNG_ERR_UNK;
-                       goto end;
-               }
-
-               break;
-       }
-       default:
-               abort();
-               break;
-       }
-
-       *_results = results;
-       results = NULL;
-       ret_code = LTTNG_OK;
-end:
-       lttng_trigger_put(matching_trigger);
-       lttng_error_query_results_destroy(results);
-       return ret_code;
-}
-
-/*
- * Send relayd sockets from snapshot output to consumer. Ignore request if the
- * snapshot output is *not* set with a remote destination.
- *
- * Return LTTNG_OK on success or a LTTNG_ERR code.
- */
-static enum lttng_error_code set_relayd_for_snapshot(
-               struct consumer_output *output,
-               const struct ltt_session *session)
-{
-       enum lttng_error_code status = LTTNG_OK;
-       struct lttng_ht_iter iter;
-       struct consumer_socket *socket;
-       LTTNG_OPTIONAL(uint64_t) current_chunk_id = {};
-       const char *base_path;
-
-       LTTNG_ASSERT(output);
-       LTTNG_ASSERT(session);
-
-       DBG2("Set relayd object from snapshot output");
-
-       if (session->current_trace_chunk) {
-               enum lttng_trace_chunk_status chunk_status =
-                               lttng_trace_chunk_get_id(
-                                               session->current_trace_chunk,
-                                               &current_chunk_id.value);
-
-               if (chunk_status == LTTNG_TRACE_CHUNK_STATUS_OK) {
-                       current_chunk_id.is_set = true;
-               } else {
-                       ERR("Failed to get current trace chunk id");
-                       status = LTTNG_ERR_UNK;
-                       goto error;
-               }
-       }
-
-       /* Ignore if snapshot consumer output is not network. */
-       if (output->type != CONSUMER_DST_NET) {
-               goto error;
-       }
-
-       /*
-        * The snapshot record URI base path overrides the session
-        * base path.
-        */
-       if (output->dst.net.control.subdir[0] != '\0') {
-               base_path = output->dst.net.control.subdir;
-       } else {
-               base_path = session->base_path;
-       }
-
-       /*
-        * For each consumer socket, create and send the relayd object of the
-        * snapshot output.
-        */
-       rcu_read_lock();
-       cds_lfht_for_each_entry(output->socks->ht, &iter.iter,
-                       socket, node.node) {
-               pthread_mutex_lock(socket->lock);
-               status = send_consumer_relayd_sockets(0, session->id,
-                               output, socket,
-                               session->name, session->hostname,
-                               base_path,
-                               session->live_timer,
-                               current_chunk_id.is_set ? &current_chunk_id.value : NULL,
-                               session->creation_time,
-                               session->name_contains_creation_time);
-               pthread_mutex_unlock(socket->lock);
-               if (status != LTTNG_OK) {
-                       rcu_read_unlock();
-                       goto error;
-               }
-       }
-       rcu_read_unlock();
-
-error:
-       return status;
-}
-
-/*
- * Record a kernel snapshot.
- *
- * Return LTTNG_OK on success or a LTTNG_ERR code.
- */
-static enum lttng_error_code record_kernel_snapshot(
-               struct ltt_kernel_session *ksess,
-               const struct consumer_output *output,
-               const struct ltt_session *session,
-               int wait, uint64_t nb_packets_per_stream)
-{
-       enum lttng_error_code status;
-
-       LTTNG_ASSERT(ksess);
-       LTTNG_ASSERT(output);
-       LTTNG_ASSERT(session);
-
-       status = kernel_snapshot_record(
-                       ksess, output, wait, nb_packets_per_stream);
-       return status;
-}
-
-/*
- * Record a UST snapshot.
- *
- * Returns LTTNG_OK on success or a LTTNG_ERR error code.
- */
-static enum lttng_error_code record_ust_snapshot(struct ltt_ust_session *usess,
-               const struct consumer_output *output,
-               const struct ltt_session *session,
-               int wait, uint64_t nb_packets_per_stream)
-{
-       enum lttng_error_code status;
-
-       LTTNG_ASSERT(usess);
-       LTTNG_ASSERT(output);
-       LTTNG_ASSERT(session);
-
-       status = ust_app_snapshot_record(
-                       usess, output, wait, nb_packets_per_stream);
-       return status;
-}
-
-static
-uint64_t get_session_size_one_more_packet_per_stream(
-               const struct ltt_session *session, uint64_t cur_nr_packets)
-{
-       uint64_t tot_size = 0;
-
-       if (session->kernel_session) {
-               struct ltt_kernel_channel *chan;
-               const struct ltt_kernel_session *ksess =
-                               session->kernel_session;
-
-               cds_list_for_each_entry(chan, &ksess->channel_list.head, list) {
-                       if (cur_nr_packets >= chan->channel->attr.num_subbuf) {
-                               /*
-                                * Don't take channel into account if we
-                                * already grab all its packets.
-                                */
-                               continue;
-                       }
-                       tot_size += chan->channel->attr.subbuf_size
-                               * chan->stream_count;
-               }
-       }
-
-       if (session->ust_session) {
-               const struct ltt_ust_session *usess = session->ust_session;
-
-               tot_size += ust_app_get_size_one_more_packet_per_stream(usess,
-                               cur_nr_packets);
-       }
-
-       return tot_size;
-}
-
-/*
- * Calculate the number of packets we can grab from each stream that
- * fits within the overall snapshot max size.
- *
- * Returns -1 on error, 0 means infinite number of packets, else > 0 is
- * the number of packets per stream.
- *
- * TODO: this approach is not perfect: we consider the worse case
- * (packet filling the sub-buffers) as an upper bound, but we could do
- * better if we do this calculation while we actually grab the packet
- * content: we would know how much padding we don't actually store into
- * the file.
- *
- * This algorithm is currently bounded by the number of packets per
- * stream.
- *
- * Since we call this algorithm before actually grabbing the data, it's
- * an approximation: for instance, applications could appear/disappear
- * in between this call and actually grabbing data.
- */
-static
-int64_t get_session_nb_packets_per_stream(const struct ltt_session *session,
-               uint64_t max_size)
-{
-       int64_t size_left;
-       uint64_t cur_nb_packets = 0;
-
-       if (!max_size) {
-               return 0;       /* Infinite */
-       }
-
-       size_left = max_size;
-       for (;;) {
-               uint64_t one_more_packet_tot_size;
-
-               one_more_packet_tot_size = get_session_size_one_more_packet_per_stream(
-                               session, cur_nb_packets);
-               if (!one_more_packet_tot_size) {
-                       /* We are already grabbing all packets. */
-                       break;
-               }
-               size_left -= one_more_packet_tot_size;
-               if (size_left < 0) {
-                       break;
-               }
-               cur_nb_packets++;
-       }
-       if (!cur_nb_packets && size_left != max_size) {
-               /* Not enough room to grab one packet of each stream, error. */
-               return -1;
-       }
-       return cur_nb_packets;
-}
-
-static
-enum lttng_error_code snapshot_record(struct ltt_session *session,
-               const struct snapshot_output *snapshot_output, int wait)
-{
-       int64_t nb_packets_per_stream;
-       char snapshot_chunk_name[LTTNG_NAME_MAX];
-       int ret;
-       enum lttng_error_code ret_code = LTTNG_OK;
-       struct lttng_trace_chunk *snapshot_trace_chunk;
-       struct consumer_output *original_ust_consumer_output = NULL;
-       struct consumer_output *original_kernel_consumer_output = NULL;
-       struct consumer_output *snapshot_ust_consumer_output = NULL;
-       struct consumer_output *snapshot_kernel_consumer_output = NULL;
-
-       ret = snprintf(snapshot_chunk_name, sizeof(snapshot_chunk_name),
-                       "%s-%s-%" PRIu64,
-                       snapshot_output->name,
-                       snapshot_output->datetime,
-                       snapshot_output->nb_snapshot);
-       if (ret < 0 || ret >= sizeof(snapshot_chunk_name)) {
-               ERR("Failed to format snapshot name");
-               ret_code = LTTNG_ERR_INVALID;
-               goto error;
-       }
-       DBG("Recording snapshot \"%s\" for session \"%s\" with chunk name \"%s\"",
-                       snapshot_output->name, session->name,
-                       snapshot_chunk_name);
-       if (!session->kernel_session && !session->ust_session) {
-               ERR("Failed to record snapshot as no channels exist");
-               ret_code = LTTNG_ERR_NO_CHANNEL;
-               goto error;
-       }
-
-       if (session->kernel_session) {
-               original_kernel_consumer_output =
-                               session->kernel_session->consumer;
-               snapshot_kernel_consumer_output =
-                               consumer_copy_output(snapshot_output->consumer);
-               strcpy(snapshot_kernel_consumer_output->chunk_path,
-                       snapshot_chunk_name);
-
-               /* Copy the original domain subdir. */
-               strcpy(snapshot_kernel_consumer_output->domain_subdir,
-                               original_kernel_consumer_output->domain_subdir);
-
-               ret = consumer_copy_sockets(snapshot_kernel_consumer_output,
-                               original_kernel_consumer_output);
-               if (ret < 0) {
-                       ERR("Failed to copy consumer sockets from snapshot output configuration");
-                       ret_code = LTTNG_ERR_NOMEM;
-                       goto error;
-               }
-               ret_code = set_relayd_for_snapshot(
-                               snapshot_kernel_consumer_output, session);
-               if (ret_code != LTTNG_OK) {
-                       ERR("Failed to setup relay daemon for kernel tracer snapshot");
-                       goto error;
-               }
-               session->kernel_session->consumer =
-                               snapshot_kernel_consumer_output;
-       }
-       if (session->ust_session) {
-               original_ust_consumer_output = session->ust_session->consumer;
-               snapshot_ust_consumer_output =
-                               consumer_copy_output(snapshot_output->consumer);
-               strcpy(snapshot_ust_consumer_output->chunk_path,
-                       snapshot_chunk_name);
-
-               /* Copy the original domain subdir. */
-               strcpy(snapshot_ust_consumer_output->domain_subdir,
-                               original_ust_consumer_output->domain_subdir);
-
-               ret = consumer_copy_sockets(snapshot_ust_consumer_output,
-                               original_ust_consumer_output);
-               if (ret < 0) {
-                       ERR("Failed to copy consumer sockets from snapshot output configuration");
-                       ret_code = LTTNG_ERR_NOMEM;
-                       goto error;
-               }
-               ret_code = set_relayd_for_snapshot(
-                               snapshot_ust_consumer_output, session);
-               if (ret_code != LTTNG_OK) {
-                       ERR("Failed to setup relay daemon for userspace tracer snapshot");
-                       goto error;
-               }
-               session->ust_session->consumer =
-                               snapshot_ust_consumer_output;
-       }
-
-       snapshot_trace_chunk = session_create_new_trace_chunk(session,
-                       snapshot_kernel_consumer_output ?:
-                                       snapshot_ust_consumer_output,
-                       consumer_output_get_base_path(
-                                       snapshot_output->consumer),
-                       snapshot_chunk_name);
-       if (!snapshot_trace_chunk) {
-               ERR("Failed to create temporary trace chunk to record a snapshot of session \"%s\"",
-                               session->name);
-               ret_code = LTTNG_ERR_CREATE_DIR_FAIL;
-               goto error;
-       }
-       LTTNG_ASSERT(!session->current_trace_chunk);
-       ret = session_set_trace_chunk(session, snapshot_trace_chunk, NULL);
-       lttng_trace_chunk_put(snapshot_trace_chunk);
-       snapshot_trace_chunk = NULL;
-       if (ret) {
-               ERR("Failed to set temporary trace chunk to record a snapshot of session \"%s\"",
-                               session->name);
-               ret_code = LTTNG_ERR_CREATE_TRACE_CHUNK_FAIL_CONSUMER;
-               goto error;
-       }
-
-       nb_packets_per_stream = get_session_nb_packets_per_stream(session,
-                       snapshot_output->max_size);
-       if (nb_packets_per_stream < 0) {
-               ret_code = LTTNG_ERR_MAX_SIZE_INVALID;
-               goto error_close_trace_chunk;
-       }
-
-       if (session->kernel_session) {
-               ret_code = record_kernel_snapshot(session->kernel_session,
-                               snapshot_kernel_consumer_output, session,
-                               wait, nb_packets_per_stream);
-               if (ret_code != LTTNG_OK) {
-                       goto error_close_trace_chunk;
-               }
-       }
-
-       if (session->ust_session) {
-               ret_code = record_ust_snapshot(session->ust_session,
-                               snapshot_ust_consumer_output, session,
-                               wait, nb_packets_per_stream);
-               if (ret_code != LTTNG_OK) {
-                       goto error_close_trace_chunk;
-               }
-       }
-
-error_close_trace_chunk:
-       if (session_set_trace_chunk(session, NULL, &snapshot_trace_chunk)) {
-               ERR("Failed to release the current trace chunk of session \"%s\"",
-                               session->name);
-               ret_code = LTTNG_ERR_UNK;
-       }
-
-       if (session_close_trace_chunk(session, snapshot_trace_chunk,
-                       LTTNG_TRACE_CHUNK_COMMAND_TYPE_NO_OPERATION, NULL)) {
-               /*
-                * Don't goto end; make sure the chunk is closed for the session
-                * to allow future snapshots.
-                */
-               ERR("Failed to close snapshot trace chunk of session \"%s\"",
-                               session->name);
-               ret_code = LTTNG_ERR_CLOSE_TRACE_CHUNK_FAIL_CONSUMER;
-       }
-error:
-       if (original_ust_consumer_output) {
-               session->ust_session->consumer = original_ust_consumer_output;
-       }
-       if (original_kernel_consumer_output) {
-               session->kernel_session->consumer =
-                               original_kernel_consumer_output;
-       }
-       consumer_output_put(snapshot_ust_consumer_output);
-       consumer_output_put(snapshot_kernel_consumer_output);
-       return ret_code;
-}
-
-/*
- * Command LTTNG_SNAPSHOT_RECORD from lib lttng ctl.
- *
- * The wait parameter is ignored so this call always wait for the snapshot to
- * complete before returning.
- *
- * Return LTTNG_OK on success or else a LTTNG_ERR code.
- */
-int cmd_snapshot_record(struct ltt_session *session,
-               const struct lttng_snapshot_output *output, int wait)
-{
-       enum lttng_error_code cmd_ret = LTTNG_OK;
-       int ret;
-       unsigned int snapshot_success = 0;
-       char datetime[16];
-       struct snapshot_output *tmp_output = NULL;
-
-       LTTNG_ASSERT(session);
-       LTTNG_ASSERT(output);
-
-       DBG("Cmd snapshot record for session %s", session->name);
-
-       /* Get the datetime for the snapshot output directory. */
-       ret = utils_get_current_time_str("%Y%m%d-%H%M%S", datetime,
-                       sizeof(datetime));
-       if (!ret) {
-               cmd_ret = LTTNG_ERR_INVALID;
-               goto error;
-       }
-
-       /*
-        * Permission denied to create an output if the session is not
-        * set in no output mode.
-        */
-       if (session->output_traces) {
-               cmd_ret = LTTNG_ERR_NOT_SNAPSHOT_SESSION;
-               goto error;
-       }
-
-       /* The session needs to be started at least once. */
-       if (!session->has_been_started) {
-               cmd_ret = LTTNG_ERR_START_SESSION_ONCE;
-               goto error;
-       }
-
-       /* Use temporary output for the session. */
-       if (*output->ctrl_url != '\0') {
-               tmp_output = snapshot_output_alloc();
-               if (!tmp_output) {
-                       cmd_ret = LTTNG_ERR_NOMEM;
-                       goto error;
-               }
-
-               ret = snapshot_output_init(session, output->max_size,
-                               output->name,
-                               output->ctrl_url, output->data_url,
-                               session->consumer,
-                               tmp_output, NULL);
-               if (ret < 0) {
-                       if (ret == -ENOMEM) {
-                               cmd_ret = LTTNG_ERR_NOMEM;
-                       } else {
-                               cmd_ret = LTTNG_ERR_INVALID;
-                       }
-                       goto error;
-               }
-               /* Use the global session count for the temporary snapshot. */
-               tmp_output->nb_snapshot = session->snapshot.nb_snapshot;
-
-               /* Use the global datetime */
-               memcpy(tmp_output->datetime, datetime, sizeof(datetime));
-               cmd_ret = snapshot_record(session, tmp_output, wait);
-               if (cmd_ret != LTTNG_OK) {
-                       goto error;
-               }
-               snapshot_success = 1;
-       } else {
-               struct snapshot_output *sout;
-               struct lttng_ht_iter iter;
-
-               rcu_read_lock();
-               cds_lfht_for_each_entry(session->snapshot.output_ht->ht,
-                               &iter.iter, sout, node.node) {
-                       struct snapshot_output output_copy;
-
-                       /*
-                        * Make a local copy of the output and override output
-                        * parameters with those provided as part of the
-                        * command.
-                        */
-                       memcpy(&output_copy, sout, sizeof(output_copy));
-
-                       if (output->max_size != (uint64_t) -1ULL) {
-                               output_copy.max_size = output->max_size;
-                       }
-
-                       output_copy.nb_snapshot = session->snapshot.nb_snapshot;
-                       memcpy(output_copy.datetime, datetime,
-                                       sizeof(datetime));
-
-                       /* Use temporary name. */
-                       if (*output->name != '\0') {
-                               if (lttng_strncpy(output_copy.name,
-                                               output->name,
-                                               sizeof(output_copy.name))) {
-                                       cmd_ret = LTTNG_ERR_INVALID;
-                                       rcu_read_unlock();
-                                       goto error;
-                               }
-                       }
-
-                       cmd_ret = snapshot_record(session, &output_copy, wait);
-                       if (cmd_ret != LTTNG_OK) {
-                               rcu_read_unlock();
-                               goto error;
-                       }
-                       snapshot_success = 1;
-               }
-               rcu_read_unlock();
-       }
-
-       if (snapshot_success) {
-               session->snapshot.nb_snapshot++;
-       } else {
-               cmd_ret = LTTNG_ERR_SNAPSHOT_FAIL;
-       }
-
-error:
-       if (tmp_output) {
-               snapshot_output_destroy(tmp_output);
-       }
-       return cmd_ret;
-}
-
-/*
- * Command LTTNG_SET_SESSION_SHM_PATH processed by the client thread.
- */
-int cmd_set_session_shm_path(struct ltt_session *session,
-               const char *shm_path)
-{
-       /* Safety net */
-       LTTNG_ASSERT(session);
-
-       /*
-        * Can only set shm path before session is started.
-        */
-       if (session->has_been_started) {
-               return LTTNG_ERR_SESSION_STARTED;
-       }
-
-       strncpy(session->shm_path, shm_path,
-               sizeof(session->shm_path));
-       session->shm_path[sizeof(session->shm_path) - 1] = '\0';
-
-       return LTTNG_OK;
-}
-
-/*
- * Command LTTNG_ROTATE_SESSION from the lttng-ctl library.
- *
- * Ask the consumer to rotate the session output directory.
- * The session lock must be held.
- *
- * Returns LTTNG_OK on success or else a negative LTTng error code.
- */
-int cmd_rotate_session(struct ltt_session *session,
-               struct lttng_rotate_session_return *rotate_return,
-               bool quiet_rotation,
-               enum lttng_trace_chunk_command_type command)
-{
-       int ret;
-       uint64_t ongoing_rotation_chunk_id;
-       enum lttng_error_code cmd_ret = LTTNG_OK;
-       struct lttng_trace_chunk *chunk_being_archived = NULL;
-       struct lttng_trace_chunk *new_trace_chunk = NULL;
-       enum lttng_trace_chunk_status chunk_status;
-       bool failed_to_rotate = false;
-       enum lttng_error_code rotation_fail_code = LTTNG_OK;
-
-       LTTNG_ASSERT(session);
-
-       if (!session->has_been_started) {
-               cmd_ret = LTTNG_ERR_START_SESSION_ONCE;
-               goto end;
-       }
-
-       /*
-        * Explicit rotation is not supported for live sessions.
-        * However, live sessions can perform a quiet rotation on
-        * destroy.
-        * Rotation is not supported for snapshot traces (no output).
-        */
-       if ((!quiet_rotation && session->live_timer) ||
-                       !session->output_traces) {
-               cmd_ret = LTTNG_ERR_ROTATION_NOT_AVAILABLE;
-               goto end;
-       }
-
-       /* Unsupported feature in lttng-relayd before 2.11. */
-       if (!quiet_rotation && session->consumer->type == CONSUMER_DST_NET &&
-                       (session->consumer->relay_major_version == 2 &&
-                       session->consumer->relay_minor_version < 11)) {
-               cmd_ret = LTTNG_ERR_ROTATION_NOT_AVAILABLE_RELAY;
-               goto end;
-       }
-
-       /* Unsupported feature in lttng-modules before 2.8 (lack of sequence number). */
-       if (session->kernel_session && !kernel_supports_ring_buffer_packet_sequence_number()) {
-               cmd_ret = LTTNG_ERR_ROTATION_NOT_AVAILABLE_KERNEL;
-               goto end;
-       }
-
-       if (session->rotation_state == LTTNG_ROTATION_STATE_ONGOING) {
-               DBG("Refusing to launch a rotation; a rotation is already in progress for session %s",
-                               session->name);
-               cmd_ret = LTTNG_ERR_ROTATION_PENDING;
-               goto end;
-       }
-
-       /*
-        * After a stop, we only allow one rotation to occur, the other ones are
-        * useless until a new start.
-        */
-       if (session->rotated_after_last_stop) {
-               DBG("Session \"%s\" was already rotated after stop, refusing rotation",
-                               session->name);
-               cmd_ret = LTTNG_ERR_ROTATION_MULTIPLE_AFTER_STOP;
-               goto end;
-       }
-
-       /*
-        * After a stop followed by a clear, disallow following rotations a they would
-        * generate empty chunks.
-        */
-       if (session->cleared_after_last_stop) {
-               DBG("Session \"%s\" was already cleared after stop, refusing rotation",
-                               session->name);
-               cmd_ret = LTTNG_ERR_ROTATION_AFTER_STOP_CLEAR;
-               goto end;
-       }
-
-       if (session->active) {
-               new_trace_chunk = session_create_new_trace_chunk(session, NULL,
-                               NULL, NULL);
-               if (!new_trace_chunk) {
-                       cmd_ret = LTTNG_ERR_CREATE_DIR_FAIL;
-                       goto error;
-               }
-       }
-
-       /*
-        * The current trace chunk becomes the chunk being archived.
-        *
-        * After this point, "chunk_being_archived" must absolutely
-        * be closed on the consumer(s), otherwise it will never be
-        * cleaned-up, which will result in a leak.
-        */
-       ret = session_set_trace_chunk(session, new_trace_chunk,
-                       &chunk_being_archived);
-       if (ret) {
-               cmd_ret = LTTNG_ERR_CREATE_TRACE_CHUNK_FAIL_CONSUMER;
-               goto error;
-       }
-
-       if (session->kernel_session) {
-               cmd_ret = kernel_rotate_session(session);
-               if (cmd_ret != LTTNG_OK) {
-                       failed_to_rotate = true;
-                       rotation_fail_code = cmd_ret;
-               }
-       }
-       if (session->ust_session) {
-               cmd_ret = ust_app_rotate_session(session);
-               if (cmd_ret != LTTNG_OK) {
-                       failed_to_rotate = true;
-                       rotation_fail_code = cmd_ret;
-               }
-       }
-
-       if (!session->active) {
-               session->rotated_after_last_stop = true;
-       }
-
-       if (!chunk_being_archived) {
-               DBG("Rotating session \"%s\" from a \"NULL\" trace chunk to a new trace chunk, skipping completion check",
-                               session->name);
-               if (failed_to_rotate) {
-                       cmd_ret = rotation_fail_code;
-                       goto error;
-               }
-               cmd_ret = LTTNG_OK;
-               goto end;
-       }
-
-       session->rotation_state = LTTNG_ROTATION_STATE_ONGOING;
-       chunk_status = lttng_trace_chunk_get_id(chunk_being_archived,
-                       &ongoing_rotation_chunk_id);
-       LTTNG_ASSERT(chunk_status == LTTNG_TRACE_CHUNK_STATUS_OK);
-
-       ret = session_close_trace_chunk(session, chunk_being_archived,
-               command, session->last_chunk_path);
-       if (ret) {
-               cmd_ret = LTTNG_ERR_CLOSE_TRACE_CHUNK_FAIL_CONSUMER;
-               goto error;
-       }
-
-       if (failed_to_rotate) {
-               cmd_ret = rotation_fail_code;
-               goto error;
-       }
-
-       session->quiet_rotation = quiet_rotation;
-       ret = timer_session_rotation_pending_check_start(session,
-                       DEFAULT_ROTATE_PENDING_TIMER);
-       if (ret) {
-               cmd_ret = LTTNG_ERR_UNK;
-               goto error;
-       }
-
-       if (rotate_return) {
-               rotate_return->rotation_id = ongoing_rotation_chunk_id;
-       }
-
-       session->chunk_being_archived = chunk_being_archived;
-       chunk_being_archived = NULL;
-       if (!quiet_rotation) {
-               ret = notification_thread_command_session_rotation_ongoing(
-                               the_notification_thread_handle, session->name,
-                               session->uid, session->gid,
-                               ongoing_rotation_chunk_id);
-               if (ret != LTTNG_OK) {
-                       ERR("Failed to notify notification thread that a session rotation is ongoing for session %s",
-                                       session->name);
-                       cmd_ret = ret;
-               }
-       }
-
-       DBG("Cmd rotate session %s, archive_id %" PRIu64 " sent",
-                       session->name, ongoing_rotation_chunk_id);
-end:
-       lttng_trace_chunk_put(new_trace_chunk);
-       lttng_trace_chunk_put(chunk_being_archived);
-       ret = (cmd_ret == LTTNG_OK) ? cmd_ret : -((int) cmd_ret);
-       return ret;
-error:
-       if (session_reset_rotation_state(session,
-                       LTTNG_ROTATION_STATE_ERROR)) {
-               ERR("Failed to reset rotation state of session \"%s\"",
-                               session->name);
-       }
-       goto end;
-}
-
-/*
- * Command LTTNG_ROTATION_GET_INFO from the lttng-ctl library.
- *
- * Check if the session has finished its rotation.
- *
- * Return LTTNG_OK on success or else an LTTNG_ERR code.
- */
-int cmd_rotate_get_info(struct ltt_session *session,
-               struct lttng_rotation_get_info_return *info_return,
-               uint64_t rotation_id)
-{
-       enum lttng_error_code cmd_ret = LTTNG_OK;
-       enum lttng_rotation_state rotation_state;
-
-       DBG("Cmd rotate_get_info session %s, rotation id %" PRIu64, session->name,
-                       session->most_recent_chunk_id.value);
-
-       if (session->chunk_being_archived) {
-               enum lttng_trace_chunk_status chunk_status;
-               uint64_t chunk_id;
-
-               chunk_status = lttng_trace_chunk_get_id(
-                               session->chunk_being_archived,
-                               &chunk_id);
-               LTTNG_ASSERT(chunk_status == LTTNG_TRACE_CHUNK_STATUS_OK);
-
-               rotation_state = rotation_id == chunk_id ?
-                               LTTNG_ROTATION_STATE_ONGOING :
-                               LTTNG_ROTATION_STATE_EXPIRED;
-       } else {
-               if (session->last_archived_chunk_id.is_set &&
-                               rotation_id != session->last_archived_chunk_id.value) {
-                       rotation_state = LTTNG_ROTATION_STATE_EXPIRED;
-               } else {
-                       rotation_state = session->rotation_state;
-               }
-       }
-
-       switch (rotation_state) {
-       case LTTNG_ROTATION_STATE_NO_ROTATION:
-               DBG("Reporting that no rotation has occurred within the lifetime of session \"%s\"",
-                               session->name);
-               goto end;
-       case LTTNG_ROTATION_STATE_EXPIRED:
-               DBG("Reporting that the rotation state of rotation id %" PRIu64 " of session \"%s\" has expired",
-                               rotation_id, session->name);
-               break;
-       case LTTNG_ROTATION_STATE_ONGOING:
-               DBG("Reporting that rotation id %" PRIu64 " of session \"%s\" is still pending",
-                               rotation_id, session->name);
-               break;
-       case LTTNG_ROTATION_STATE_COMPLETED:
-       {
-               int fmt_ret;
-               char *chunk_path;
-               char *current_tracing_path_reply;
-               size_t current_tracing_path_reply_len;
-
-               DBG("Reporting that rotation id %" PRIu64 " of session \"%s\" is completed",
-                               rotation_id, session->name);
-
-               switch (session_get_consumer_destination_type(session)) {
-               case CONSUMER_DST_LOCAL:
-                       current_tracing_path_reply =
-                                       info_return->location.local.absolute_path;
-                       current_tracing_path_reply_len =
-                                       sizeof(info_return->location.local.absolute_path);
-                       info_return->location_type =
-                                       (int8_t) LTTNG_TRACE_ARCHIVE_LOCATION_TYPE_LOCAL;
-                       fmt_ret = asprintf(&chunk_path,
-                                       "%s/" DEFAULT_ARCHIVED_TRACE_CHUNKS_DIRECTORY "/%s",
-                                       session_get_base_path(session),
-                                       session->last_archived_chunk_name);
-                       if (fmt_ret == -1) {
-                               PERROR("Failed to format the path of the last archived trace chunk");
-                               info_return->status = LTTNG_ROTATION_STATUS_ERROR;
-                               cmd_ret = LTTNG_ERR_UNK;
-                               goto end;
-                       }
-                       break;
-               case CONSUMER_DST_NET:
-               {
-                       uint16_t ctrl_port, data_port;
-
-                       current_tracing_path_reply =
-                                       info_return->location.relay.relative_path;
-                       current_tracing_path_reply_len =
-                                       sizeof(info_return->location.relay.relative_path);
-                       /* Currently the only supported relay protocol. */
-                       info_return->location.relay.protocol =
-                                       (int8_t) LTTNG_TRACE_ARCHIVE_LOCATION_RELAY_PROTOCOL_TYPE_TCP;
-
-                       fmt_ret = lttng_strncpy(info_return->location.relay.host,
-                                       session_get_net_consumer_hostname(session),
-                                       sizeof(info_return->location.relay.host));
-                       if (fmt_ret) {
-                               ERR("Failed to copy host name to rotate_get_info reply");
-                               info_return->status = LTTNG_ROTATION_STATUS_ERROR;
-                               cmd_ret = LTTNG_ERR_SET_URL;
-                               goto end;
-                       }
-
-                       session_get_net_consumer_ports(session, &ctrl_port, &data_port);
-                       info_return->location.relay.ports.control = ctrl_port;
-                       info_return->location.relay.ports.data = data_port;
-                       info_return->location_type =
-                                       (int8_t) LTTNG_TRACE_ARCHIVE_LOCATION_TYPE_RELAY;
-                       chunk_path = strdup(session->last_chunk_path);
-                       if (!chunk_path) {
-                               ERR("Failed to allocate the path of the last archived trace chunk");
-                               info_return->status = LTTNG_ROTATION_STATUS_ERROR;
-                               cmd_ret = LTTNG_ERR_UNK;
-                               goto end;
-                       }
-                       break;
-               }
-               default:
-                       abort();
-               }
-
-               fmt_ret = lttng_strncpy(current_tracing_path_reply,
-                               chunk_path, current_tracing_path_reply_len);
-               free(chunk_path);
-               if (fmt_ret) {
-                       ERR("Failed to copy path of the last archived trace chunk to rotate_get_info reply");
-                       info_return->status = LTTNG_ROTATION_STATUS_ERROR;
-                       cmd_ret = LTTNG_ERR_UNK;
-                       goto end;
-               }
-
-               break;
-       }
-       case LTTNG_ROTATION_STATE_ERROR:
-               DBG("Reporting that an error occurred during rotation %" PRIu64 " of session \"%s\"",
-                               rotation_id, session->name);
-               break;
-       default:
-               abort();
-       }
-
-       cmd_ret = LTTNG_OK;
-end:
-       info_return->status = (int32_t) rotation_state;
-       return cmd_ret;
-}
-
-/*
- * Command LTTNG_ROTATION_SET_SCHEDULE from the lttng-ctl library.
- *
- * Configure the automatic rotation parameters.
- * 'activate' to true means activate the rotation schedule type with 'new_value'.
- * 'activate' to false means deactivate the rotation schedule and validate that
- * 'new_value' has the same value as the currently active value.
- *
- * Return LTTNG_OK on success or else a positive LTTNG_ERR code.
- */
-int cmd_rotation_set_schedule(struct ltt_session *session,
-               bool activate, enum lttng_rotation_schedule_type schedule_type,
-               uint64_t new_value,
-               struct notification_thread_handle *notification_thread_handle)
-{
-       int ret;
-       uint64_t *parameter_value;
-
-       LTTNG_ASSERT(session);
-
-       DBG("Cmd rotate set schedule session %s", session->name);
-
-       if (session->live_timer || !session->output_traces) {
-               DBG("Failing ROTATION_SET_SCHEDULE command as the rotation feature is not available for this session");
-               ret = LTTNG_ERR_ROTATION_NOT_AVAILABLE;
-               goto end;
-       }
-
-       switch (schedule_type) {
-       case LTTNG_ROTATION_SCHEDULE_TYPE_SIZE_THRESHOLD:
-               parameter_value = &session->rotate_size;
-               break;
-       case LTTNG_ROTATION_SCHEDULE_TYPE_PERIODIC:
-               parameter_value = &session->rotate_timer_period;
-               if (new_value >= UINT_MAX) {
-                       DBG("Failing ROTATION_SET_SCHEDULE command as the value requested for a periodic rotation schedule is invalid: %" PRIu64 " > %u (UINT_MAX)",
-                                       new_value, UINT_MAX);
-                       ret = LTTNG_ERR_INVALID;
-                       goto end;
-               }
-               break;
-       default:
-               WARN("Failing ROTATION_SET_SCHEDULE command on unknown schedule type");
-               ret = LTTNG_ERR_INVALID;
-               goto end;
-       }
-
-       /* Improper use of the API. */
-       if (new_value == -1ULL) {
-               WARN("Failing ROTATION_SET_SCHEDULE command as the value requested is -1");
-               ret = LTTNG_ERR_INVALID;
-               goto end;
-       }
-
-       /*
-        * As indicated in struct ltt_session's comments, a value of == 0 means
-        * this schedule rotation type is not in use.
-        *
-        * Reject the command if we were asked to activate a schedule that was
-        * already active.
-        */
-       if (activate && *parameter_value != 0) {
-               DBG("Failing ROTATION_SET_SCHEDULE (activate) command as the schedule is already active");
-               ret = LTTNG_ERR_ROTATION_SCHEDULE_SET;
-               goto end;
-       }
-
-       /*
-        * Reject the command if we were asked to deactivate a schedule that was
-        * not active.
-        */
-       if (!activate && *parameter_value == 0) {
-               DBG("Failing ROTATION_SET_SCHEDULE (deactivate) command as the schedule is already inactive");
-               ret = LTTNG_ERR_ROTATION_SCHEDULE_NOT_SET;
-               goto end;
-       }
-
-       /*
-        * Reject the command if we were asked to deactivate a schedule that
-        * doesn't exist.
-        */
-       if (!activate && *parameter_value != new_value) {
-               DBG("Failing ROTATION_SET_SCHEDULE (deactivate) command as an inexistant schedule was provided");
-               ret = LTTNG_ERR_ROTATION_SCHEDULE_NOT_SET;
-               goto end;
-       }
-
-       *parameter_value = activate ? new_value : 0;
-
-       switch (schedule_type) {
-       case LTTNG_ROTATION_SCHEDULE_TYPE_PERIODIC:
-               if (activate && session->active) {
-                       /*
-                        * Only start the timer if the session is active,
-                        * otherwise it will be started when the session starts.
-                        */
-                       ret = timer_session_rotation_schedule_timer_start(
-                                       session, new_value);
-                       if (ret) {
-                               ERR("Failed to enable session rotation timer in ROTATION_SET_SCHEDULE command");
-                               ret = LTTNG_ERR_UNK;
-                               goto end;
-                       }
-               } else {
-                       ret = timer_session_rotation_schedule_timer_stop(
-                                       session);
-                       if (ret) {
-                               ERR("Failed to disable session rotation timer in ROTATION_SET_SCHEDULE command");
-                               ret = LTTNG_ERR_UNK;
-                               goto end;
-                       }
-               }
-               break;
-       case LTTNG_ROTATION_SCHEDULE_TYPE_SIZE_THRESHOLD:
-               if (activate) {
-                       ret = subscribe_session_consumed_size_rotation(session,
-                                       new_value, notification_thread_handle);
-                       if (ret) {
-                               ERR("Failed to enable consumed-size notification in ROTATION_SET_SCHEDULE command");
-                               ret = LTTNG_ERR_UNK;
-                               goto end;
-                       }
-               } else {
-                       ret = unsubscribe_session_consumed_size_rotation(session,
-                                       notification_thread_handle);
-                       if (ret) {
-                               ERR("Failed to disable consumed-size notification in ROTATION_SET_SCHEDULE command");
-                               ret = LTTNG_ERR_UNK;
-                               goto end;
-                       }
-
-               }
-               break;
-       default:
-               /* Would have been caught before. */
-               abort();
-       }
-
-       ret = LTTNG_OK;
-
-       goto end;
-
-end:
-       return ret;
-}
-
-/* Wait for a given path to be removed before continuing. */
-static enum lttng_error_code wait_on_path(void *path_data)
-{
-       const char *shm_path = path_data;
-
-       DBG("Waiting for the shm path at %s to be removed before completing session destruction",
-                       shm_path);
-       while (true) {
-               int ret;
-               struct stat st;
-
-               ret = stat(shm_path, &st);
-               if (ret) {
-                       if (errno != ENOENT) {
-                               PERROR("stat() returned an error while checking for the existence of the shm path");
-                       } else {
-                               DBG("shm path no longer exists, completing the destruction of session");
-                       }
-                       break;
-               } else {
-                       if (!S_ISDIR(st.st_mode)) {
-                               ERR("The type of shm path %s returned by stat() is not a directory; aborting the wait for shm path removal",
-                                               shm_path);
-                               break;
-                       }
-               }
-               usleep(SESSION_DESTROY_SHM_PATH_CHECK_DELAY_US);
-       }
-       return LTTNG_OK;
-}
-
-/*
- * Returns a pointer to a handler to run on completion of a command.
- * Returns NULL if no handler has to be run for the last command executed.
- */
-const struct cmd_completion_handler *cmd_pop_completion_handler(void)
-{
-       struct cmd_completion_handler *handler = current_completion_handler;
-
-       current_completion_handler = NULL;
-       return handler;
-}
-
-/*
- * Init command subsystem.
- */
-void cmd_init(void)
-{
-       /*
-        * Set network sequence index to 1 for streams to match a relayd
-        * socket on the consumer side.
-        */
-       pthread_mutex_lock(&relayd_net_seq_idx_lock);
-       relayd_net_seq_idx = 1;
-       pthread_mutex_unlock(&relayd_net_seq_idx_lock);
-
-       DBG("Command subsystem initialized");
-}
diff --git a/src/bin/lttng-sessiond/cmd.cpp b/src/bin/lttng-sessiond/cmd.cpp
new file mode 100644 (file)
index 0000000..4ddff45
--- /dev/null
@@ -0,0 +1,5894 @@
+/*
+ * Copyright (C) 2012 David Goulet <dgoulet@efficios.com>
+ * Copyright (C) 2016 Jérémie Galarneau <jeremie.galarneau@efficios.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0-only
+ *
+ */
+
+
+#define _LGPL_SOURCE
+#include <algorithm>
+#include <inttypes.h>
+#include <stdio.h>
+#include <sys/stat.h>
+#include <urcu/list.h>
+#include <urcu/uatomic.h>
+
+#include <common/buffer-view.h>
+#include <common/common.h>
+#include <common/compat/string.h>
+#include <common/defaults.h>
+#include <common/dynamic-buffer.h>
+#include <common/kernel-ctl/kernel-ctl.h>
+#include <common/payload-view.h>
+#include <common/payload.h>
+#include <common/relayd/relayd.h>
+#include <common/sessiond-comm/sessiond-comm.h>
+#include <common/string-utils/string-utils.h>
+#include <common/trace-chunk.h>
+#include <common/utils.h>
+#include <lttng/action/action-internal.h>
+#include <lttng/action/action.h>
+#include <lttng/channel-internal.h>
+#include <lttng/channel.h>
+#include <lttng/condition/condition-internal.h>
+#include <lttng/condition/condition.h>
+#include <lttng/condition/event-rule-matches-internal.h>
+#include <lttng/condition/event-rule-matches.h>
+#include <lttng/error-query-internal.h>
+#include <lttng/event-rule/event-rule-internal.h>
+#include <lttng/event-rule/event-rule.h>
+#include <lttng/location-internal.h>
+#include <lttng/lttng-error.h>
+#include <lttng/rotate-internal.h>
+#include <lttng/session-descriptor-internal.h>
+#include <lttng/session-internal.h>
+#include <lttng/tracker.h>
+#include <lttng/trigger/trigger-internal.h>
+#include <lttng/userspace-probe-internal.h>
+
+#include "agent-thread.h"
+#include "agent.h"
+#include "buffer-registry.h"
+#include "channel.h"
+#include "cmd.h"
+#include "consumer.h"
+#include "event-notifier-error-accounting.h"
+#include "event.h"
+#include "health-sessiond.h"
+#include "kernel-consumer.h"
+#include "kernel.h"
+#include "lttng-sessiond.h"
+#include "lttng-syscall.h"
+#include "notification-thread-commands.h"
+#include "notification-thread.h"
+#include "rotate.h"
+#include "rotation-thread.h"
+#include "session.h"
+#include "timer.h"
+#include "tracker.h"
+#include "utils.h"
+
+/* Sleep for 100ms between each check for the shm path's deletion. */
+#define SESSION_DESTROY_SHM_PATH_CHECK_DELAY_US 100000
+
+struct cmd_destroy_session_reply_context {
+       int reply_sock_fd;
+       bool implicit_rotation_on_destroy;
+       /*
+        * Indicates whether or not an error occurred while launching the
+        * destruction of a session.
+        */
+       enum lttng_error_code destruction_status;
+};
+
+static enum lttng_error_code wait_on_path(void *path);
+
+/*
+ * Command completion handler that is used by the destroy command
+ * when a session that has a non-default shm_path is being destroyed.
+ *
+ * See comment in cmd_destroy_session() for the rationale.
+ */
+static struct destroy_completion_handler {
+       struct cmd_completion_handler handler;
+       char shm_path[member_sizeof(struct ltt_session, shm_path)];
+} destroy_completion_handler = {
+       .handler = {
+               .run = wait_on_path,
+               .data = destroy_completion_handler.shm_path
+       },
+       .shm_path = { 0 },
+};
+
+static struct cmd_completion_handler *current_completion_handler;
+
+/*
+ * Used to keep a unique index for each relayd socket created where this value
+ * is associated with streams on the consumer so it can match the right relayd
+ * to send to. It must be accessed with the relayd_net_seq_idx_lock
+ * held.
+ */
+static pthread_mutex_t relayd_net_seq_idx_lock = PTHREAD_MUTEX_INITIALIZER;
+static uint64_t relayd_net_seq_idx;
+
+static int validate_ust_event_name(const char *);
+static int cmd_enable_event_internal(struct ltt_session *session,
+               const struct lttng_domain *domain,
+               char *channel_name, struct lttng_event *event,
+               char *filter_expression,
+               struct lttng_bytecode *filter,
+               struct lttng_event_exclusion *exclusion,
+               int wpipe);
+
+/*
+ * Create a session path used by list_lttng_sessions for the case that the
+ * session consumer is on the network.
+ */
+static int build_network_session_path(char *dst, size_t size,
+               struct ltt_session *session)
+{
+       int ret, kdata_port, udata_port;
+       struct lttng_uri *kuri = NULL, *uuri = NULL, *uri = NULL;
+       char tmp_uurl[PATH_MAX], tmp_urls[PATH_MAX];
+
+       LTTNG_ASSERT(session);
+       LTTNG_ASSERT(dst);
+
+       memset(tmp_urls, 0, sizeof(tmp_urls));
+       memset(tmp_uurl, 0, sizeof(tmp_uurl));
+
+       kdata_port = udata_port = DEFAULT_NETWORK_DATA_PORT;
+
+       if (session->kernel_session && session->kernel_session->consumer) {
+               kuri = &session->kernel_session->consumer->dst.net.control;
+               kdata_port = session->kernel_session->consumer->dst.net.data.port;
+       }
+
+       if (session->ust_session && session->ust_session->consumer) {
+               uuri = &session->ust_session->consumer->dst.net.control;
+               udata_port = session->ust_session->consumer->dst.net.data.port;
+       }
+
+       if (uuri == NULL && kuri == NULL) {
+               uri = &session->consumer->dst.net.control;
+               kdata_port = session->consumer->dst.net.data.port;
+       } else if (kuri && uuri) {
+               ret = uri_compare(kuri, uuri);
+               if (ret) {
+                       /* Not Equal */
+                       uri = kuri;
+                       /* Build uuri URL string */
+                       ret = uri_to_str_url(uuri, tmp_uurl, sizeof(tmp_uurl));
+                       if (ret < 0) {
+                               goto error;
+                       }
+               } else {
+                       uri = kuri;
+               }
+       } else if (kuri && uuri == NULL) {
+               uri = kuri;
+       } else if (uuri && kuri == NULL) {
+               uri = uuri;
+       }
+
+       ret = uri_to_str_url(uri, tmp_urls, sizeof(tmp_urls));
+       if (ret < 0) {
+               goto error;
+       }
+
+       /*
+        * Do we have a UST url set. If yes, this means we have both kernel and UST
+        * to print.
+        */
+       if (*tmp_uurl != '\0') {
+               ret = snprintf(dst, size, "[K]: %s [data: %d] -- [U]: %s [data: %d]",
+                               tmp_urls, kdata_port, tmp_uurl, udata_port);
+       } else {
+               int dport;
+               if (kuri || (!kuri && !uuri)) {
+                       dport = kdata_port;
+               } else {
+                       /* No kernel URI, use the UST port. */
+                       dport = udata_port;
+               }
+               ret = snprintf(dst, size, "%s [data: %d]", tmp_urls, dport);
+       }
+
+error:
+       return ret;
+}
+
+/*
+ * Get run-time attributes if the session has been started (discarded events,
+ * lost packets).
+ */
+static int get_kernel_runtime_stats(struct ltt_session *session,
+               struct ltt_kernel_channel *kchan, uint64_t *discarded_events,
+               uint64_t *lost_packets)
+{
+       int ret;
+
+       if (!session->has_been_started) {
+               ret = 0;
+               *discarded_events = 0;
+               *lost_packets = 0;
+               goto end;
+       }
+
+       ret = consumer_get_discarded_events(session->id, kchan->key,
+                       session->kernel_session->consumer,
+                       discarded_events);
+       if (ret < 0) {
+               goto end;
+       }
+
+       ret = consumer_get_lost_packets(session->id, kchan->key,
+                       session->kernel_session->consumer,
+                       lost_packets);
+       if (ret < 0) {
+               goto end;
+       }
+
+end:
+       return ret;
+}
+
+/*
+ * Get run-time attributes if the session has been started (discarded events,
+ * lost packets).
+ */
+static int get_ust_runtime_stats(struct ltt_session *session,
+               struct ltt_ust_channel *uchan, uint64_t *discarded_events,
+               uint64_t *lost_packets)
+{
+       int ret;
+       struct ltt_ust_session *usess;
+
+       if (!discarded_events || !lost_packets) {
+               ret = -1;
+               goto end;
+       }
+
+       usess = session->ust_session;
+       LTTNG_ASSERT(discarded_events);
+       LTTNG_ASSERT(lost_packets);
+
+       if (!usess || !session->has_been_started) {
+               *discarded_events = 0;
+               *lost_packets = 0;
+               ret = 0;
+               goto end;
+       }
+
+       if (usess->buffer_type == LTTNG_BUFFER_PER_UID) {
+               ret = ust_app_uid_get_channel_runtime_stats(usess->id,
+                               &usess->buffer_reg_uid_list,
+                               usess->consumer, uchan->id,
+                               uchan->attr.overwrite,
+                               discarded_events,
+                               lost_packets);
+       } else if (usess->buffer_type == LTTNG_BUFFER_PER_PID) {
+               ret = ust_app_pid_get_channel_runtime_stats(usess,
+                               uchan, usess->consumer,
+                               uchan->attr.overwrite,
+                               discarded_events,
+                               lost_packets);
+               if (ret < 0) {
+                       goto end;
+               }
+               *discarded_events += uchan->per_pid_closed_app_discarded;
+               *lost_packets += uchan->per_pid_closed_app_lost;
+       } else {
+               ERR("Unsupported buffer type");
+               abort();
+               ret = -1;
+               goto end;
+       }
+
+end:
+       return ret;
+}
+
+/*
+ * Fill lttng_channel array of all channels.
+ */
+static ssize_t list_lttng_channels(enum lttng_domain_type domain,
+               struct ltt_session *session, struct lttng_channel *channels,
+               struct lttng_channel_extended *chan_exts)
+{
+       int i = 0, ret = 0;
+       struct ltt_kernel_channel *kchan;
+
+       DBG("Listing channels for session %s", session->name);
+
+       switch (domain) {
+       case LTTNG_DOMAIN_KERNEL:
+               /* Kernel channels */
+               if (session->kernel_session != NULL) {
+                       cds_list_for_each_entry(kchan,
+                                       &session->kernel_session->channel_list.head, list) {
+                               uint64_t discarded_events, lost_packets;
+                               struct lttng_channel_extended *extended;
+
+                               extended = (struct lttng_channel_extended *)
+                                               kchan->channel->attr.extended.ptr;
+
+                               ret = get_kernel_runtime_stats(session, kchan,
+                                               &discarded_events, &lost_packets);
+                               if (ret < 0) {
+                                       goto end;
+                               }
+                               /* Copy lttng_channel struct to array */
+                               memcpy(&channels[i], kchan->channel, sizeof(struct lttng_channel));
+                               channels[i].enabled = kchan->enabled;
+                               chan_exts[i].discarded_events =
+                                               discarded_events;
+                               chan_exts[i].lost_packets = lost_packets;
+                               chan_exts[i].monitor_timer_interval =
+                                               extended->monitor_timer_interval;
+                               chan_exts[i].blocking_timeout = 0;
+                               i++;
+                       }
+               }
+               break;
+       case LTTNG_DOMAIN_UST:
+       {
+               struct lttng_ht_iter iter;
+               struct ltt_ust_channel *uchan;
+
+               rcu_read_lock();
+               cds_lfht_for_each_entry(session->ust_session->domain_global.channels->ht,
+                               &iter.iter, uchan, node.node) {
+                       uint64_t discarded_events = 0, lost_packets = 0;
+
+                       if (lttng_strncpy(channels[i].name, uchan->name,
+                                       LTTNG_SYMBOL_NAME_LEN)) {
+                               break;
+                       }
+                       channels[i].attr.overwrite = uchan->attr.overwrite;
+                       channels[i].attr.subbuf_size = uchan->attr.subbuf_size;
+                       channels[i].attr.num_subbuf = uchan->attr.num_subbuf;
+                       channels[i].attr.switch_timer_interval =
+                               uchan->attr.switch_timer_interval;
+                       channels[i].attr.read_timer_interval =
+                               uchan->attr.read_timer_interval;
+                       channels[i].enabled = uchan->enabled;
+                       channels[i].attr.tracefile_size = uchan->tracefile_size;
+                       channels[i].attr.tracefile_count = uchan->tracefile_count;
+
+                       /*
+                        * Map enum lttng_ust_output to enum lttng_event_output.
+                        */
+                       switch (uchan->attr.output) {
+                       case LTTNG_UST_ABI_MMAP:
+                               channels[i].attr.output = LTTNG_EVENT_MMAP;
+                               break;
+                       default:
+                               /*
+                                * LTTNG_UST_MMAP is the only supported UST
+                                * output mode.
+                                */
+                               abort();
+                               break;
+                       }
+
+                       chan_exts[i].monitor_timer_interval =
+                                       uchan->monitor_timer_interval;
+                       chan_exts[i].blocking_timeout =
+                               uchan->attr.u.s.blocking_timeout;
+
+                       ret = get_ust_runtime_stats(session, uchan,
+                                       &discarded_events, &lost_packets);
+                       if (ret < 0) {
+                               break;
+                       }
+                       chan_exts[i].discarded_events = discarded_events;
+                       chan_exts[i].lost_packets = lost_packets;
+                       i++;
+               }
+               rcu_read_unlock();
+               break;
+       }
+       default:
+               break;
+       }
+
+end:
+       if (ret < 0) {
+               return -LTTNG_ERR_FATAL;
+       } else {
+               return LTTNG_OK;
+       }
+}
+
+static int append_extended_info(const char *filter_expression,
+               struct lttng_event_exclusion *exclusion,
+               struct lttng_userspace_probe_location *probe_location,
+               struct lttng_payload *payload)
+{
+       int ret = 0;
+       size_t filter_len = 0;
+       size_t nb_exclusions = 0;
+       size_t userspace_probe_location_len = 0;
+       struct lttcomm_event_extended_header extended_header = {};
+       struct lttcomm_event_extended_header *p_extended_header;
+       const size_t original_payload_size = payload->buffer.size;
+
+       ret = lttng_dynamic_buffer_append(&payload->buffer, &extended_header,
+                       sizeof(extended_header));
+       if (ret) {
+               goto end;
+       }
+
+       if (filter_expression) {
+               filter_len = strlen(filter_expression) + 1;
+               ret = lttng_dynamic_buffer_append(&payload->buffer,
+                               filter_expression, filter_len);
+               if (ret) {
+                       goto end;
+               }
+       }
+
+       if (exclusion) {
+               const size_t len = exclusion->count * LTTNG_SYMBOL_NAME_LEN;
+
+               nb_exclusions = exclusion->count;
+
+               ret = lttng_dynamic_buffer_append(
+                               &payload->buffer, &exclusion->names, len);
+               if (ret) {
+                       goto end;
+               }
+       }
+
+       if (probe_location) {
+               const size_t size_before_probe = payload->buffer.size;
+
+               ret = lttng_userspace_probe_location_serialize(probe_location,
+                               payload);
+               if (ret < 0) {
+                       ret = -1;
+                       goto end;
+               }
+
+               userspace_probe_location_len =
+                               payload->buffer.size - size_before_probe;
+       }
+
+       /* Set header fields */
+       p_extended_header = (struct lttcomm_event_extended_header *)
+                       (payload->buffer.data + original_payload_size);
+
+       p_extended_header->filter_len = filter_len;
+       p_extended_header->nb_exclusions = nb_exclusions;
+       p_extended_header->userspace_probe_location_len =
+                       userspace_probe_location_len;
+
+       ret = 0;
+end:
+       return ret;
+}
+
+/*
+ * Create a list of agent domain events.
+ *
+ * Return number of events in list on success or else a negative value.
+ */
+static int list_lttng_agent_events(struct agent *agt,
+               struct lttng_payload *payload)
+{
+       int nb_events = 0, ret = 0;
+       const struct agent_event *agent_event;
+       struct lttng_ht_iter iter;
+
+       LTTNG_ASSERT(agt);
+
+       DBG3("Listing agent events");
+
+       rcu_read_lock();
+       cds_lfht_for_each_entry (
+                       agt->events->ht, &iter.iter, agent_event, node.node) {
+               struct lttng_event event {};
+
+               event.loglevel_type = agent_event->loglevel_type;
+               event.loglevel = agent_event->loglevel_value;
+               event.enabled = AGENT_EVENT_IS_ENABLED(agent_event);
+
+               ret = lttng_strncpy(event.name, agent_event->name, sizeof(event.name));
+               if (ret) {
+                       /* Internal error, invalid name. */
+                       ERR("Invalid event name while listing agent events: '%s' exceeds the maximal allowed length of %zu bytes",
+                                       agent_event->name, sizeof(event.name));
+                       ret = -LTTNG_ERR_UNK;
+                       goto end;
+               }
+
+               ret = lttng_dynamic_buffer_append(
+                               &payload->buffer, &event, sizeof(event));
+               if (ret) {
+                       ERR("Failed to append event to payload");
+                       ret = -LTTNG_ERR_NOMEM;
+                       goto end;
+               }
+
+               nb_events++;
+       }
+
+       cds_lfht_for_each_entry (
+               agt->events->ht, &iter.iter, agent_event, node.node) {
+               /* Append extended info. */
+               ret = append_extended_info(agent_event->filter_expression, NULL,
+                               NULL, payload);
+               if (ret) {
+                       ERR("Failed to append extended event info to payload");
+                       ret = -LTTNG_ERR_NOMEM;
+                       goto end;
+               }
+       }
+
+       ret = nb_events;
+end:
+       rcu_read_unlock();
+       return ret;
+}
+
+/*
+ * Create a list of ust global domain events.
+ */
+static int list_lttng_ust_global_events(char *channel_name,
+               struct ltt_ust_domain_global *ust_global,
+               struct lttng_payload *payload)
+{
+       int ret = 0;
+       unsigned int nb_events = 0;
+       struct lttng_ht_iter iter;
+       const struct lttng_ht_node_str *node;
+       const struct ltt_ust_channel *uchan;
+       const struct ltt_ust_event *uevent;
+
+       DBG("Listing UST global events for channel %s", channel_name);
+
+       rcu_read_lock();
+
+       lttng_ht_lookup(ust_global->channels, (void *) channel_name, &iter);
+       node = lttng_ht_iter_get_node_str(&iter);
+       if (node == NULL) {
+               ret = LTTNG_ERR_UST_CHAN_NOT_FOUND;
+               goto end;
+       }
+
+       uchan = caa_container_of(&node->node, struct ltt_ust_channel, node.node);
+
+       DBG3("Listing UST global events");
+
+       cds_lfht_for_each_entry(uchan->events->ht, &iter.iter, uevent, node.node) {
+               struct lttng_event event = {};
+
+               if (uevent->internal) {
+                       continue;
+               }
+
+               ret = lttng_strncpy(event.name, uevent->attr.name, sizeof(event.name));
+               if (ret) {
+                       /* Internal error, invalid name. */
+                       ERR("Invalid event name while listing user space tracer events: '%s' exceeds the maximal allowed length of %zu bytes",
+                                       uevent->attr.name, sizeof(event.name));
+                       ret = -LTTNG_ERR_UNK;
+                       goto end;
+               }
+
+               event.enabled = uevent->enabled;
+
+               switch (uevent->attr.instrumentation) {
+               case LTTNG_UST_ABI_TRACEPOINT:
+                       event.type = LTTNG_EVENT_TRACEPOINT;
+                       break;
+               case LTTNG_UST_ABI_PROBE:
+                       event.type = LTTNG_EVENT_PROBE;
+                       break;
+               case LTTNG_UST_ABI_FUNCTION:
+                       event.type = LTTNG_EVENT_FUNCTION;
+                       break;
+               }
+
+               event.loglevel = uevent->attr.loglevel;
+               switch (uevent->attr.loglevel_type) {
+               case LTTNG_UST_ABI_LOGLEVEL_ALL:
+                       event.loglevel_type = LTTNG_EVENT_LOGLEVEL_ALL;
+                       break;
+               case LTTNG_UST_ABI_LOGLEVEL_RANGE:
+                       event.loglevel_type = LTTNG_EVENT_LOGLEVEL_RANGE;
+                       break;
+               case LTTNG_UST_ABI_LOGLEVEL_SINGLE:
+                       event.loglevel_type = LTTNG_EVENT_LOGLEVEL_SINGLE;
+                       break;
+               }
+
+               if (uevent->filter) {
+                       event.filter = 1;
+               }
+
+               if (uevent->exclusion) {
+                       event.exclusion = 1;
+               }
+
+               ret = lttng_dynamic_buffer_append(&payload->buffer, &event, sizeof(event));
+               if (ret) {
+                       ERR("Failed to append event to payload");
+                       ret = -LTTNG_ERR_NOMEM;
+                       goto end;
+               }
+
+               nb_events++;
+       }
+
+       cds_lfht_for_each_entry(uchan->events->ht, &iter.iter, uevent, node.node) {
+               /* Append extended info. */
+               ret = append_extended_info(uevent->filter_expression,
+                               uevent->exclusion, NULL, payload);
+               if (ret) {
+                       ERR("Failed to append extended event info to payload");
+                       ret = -LTTNG_ERR_FATAL;
+                       goto end;
+               }
+       }
+
+       ret = nb_events;
+end:
+       rcu_read_unlock();
+       return ret;
+}
+
+/*
+ * Fill lttng_event array of all kernel events in the channel.
+ */
+static int list_lttng_kernel_events(char *channel_name,
+               struct ltt_kernel_session *kernel_session,
+               struct lttng_payload *payload)
+{
+       int ret;
+       unsigned int nb_event;
+       const struct ltt_kernel_event *kevent;
+       const struct ltt_kernel_channel *kchan;
+
+       kchan = trace_kernel_get_channel_by_name(channel_name, kernel_session);
+       if (kchan == NULL) {
+               ret = LTTNG_ERR_KERN_CHAN_NOT_FOUND;
+               goto error;
+       }
+
+       nb_event = kchan->event_count;
+
+       DBG("Listing events for channel %s", kchan->channel->name);
+
+       /* Kernel channels */
+       cds_list_for_each_entry(kevent, &kchan->events_list.head , list) {
+               struct lttng_event event = {};
+
+               ret = lttng_strncpy(event.name, kevent->event->name, sizeof(event.name));
+               if (ret) {
+                       /* Internal error, invalid name. */
+                       ERR("Invalid event name while listing kernel events: '%s' exceeds the maximal allowed length of %zu bytes",
+                                       kevent->event->name,
+                                       sizeof(event.name));
+                       ret = -LTTNG_ERR_UNK;
+                       goto end;
+               }
+
+               event.enabled = kevent->enabled;
+               event.filter = (unsigned char) !!kevent->filter_expression;
+
+               switch (kevent->event->instrumentation) {
+               case LTTNG_KERNEL_ABI_TRACEPOINT:
+                       event.type = LTTNG_EVENT_TRACEPOINT;
+                       break;
+               case LTTNG_KERNEL_ABI_KRETPROBE:
+                       event.type = LTTNG_EVENT_FUNCTION;
+                       memcpy(&event.attr.probe, &kevent->event->u.kprobe,
+                                       sizeof(struct lttng_kernel_abi_kprobe));
+                       break;
+               case LTTNG_KERNEL_ABI_KPROBE:
+                       event.type = LTTNG_EVENT_PROBE;
+                       memcpy(&event.attr.probe, &kevent->event->u.kprobe,
+                                       sizeof(struct lttng_kernel_abi_kprobe));
+                       break;
+               case LTTNG_KERNEL_ABI_UPROBE:
+                       event.type = LTTNG_EVENT_USERSPACE_PROBE;
+                       break;
+               case LTTNG_KERNEL_ABI_FUNCTION:
+                       event.type = LTTNG_EVENT_FUNCTION;
+                       memcpy(&event.attr.ftrace, &kevent->event->u.ftrace,
+                                       sizeof(struct lttng_kernel_abi_function));
+                       break;
+               case LTTNG_KERNEL_ABI_NOOP:
+                       event.type = LTTNG_EVENT_NOOP;
+                       break;
+               case LTTNG_KERNEL_ABI_SYSCALL:
+                       event.type = LTTNG_EVENT_SYSCALL;
+                       break;
+               case LTTNG_KERNEL_ABI_ALL:
+                       /* fall-through. */
+               default:
+                       abort();
+                       break;
+               }
+
+               ret = lttng_dynamic_buffer_append(
+                               &payload->buffer, &event, sizeof(event));
+               if (ret) {
+                       ERR("Failed to append event to payload");
+                       ret = -LTTNG_ERR_NOMEM;
+                       goto end;
+               }
+       }
+
+       cds_list_for_each_entry(kevent, &kchan->events_list.head , list) {
+               /* Append extended info. */
+               ret = append_extended_info(kevent->filter_expression, NULL,
+                               kevent->userspace_probe_location, payload);
+               if (ret) {
+                       DBG("Error appending extended info message");
+                       ret = -LTTNG_ERR_FATAL;
+                       goto error;
+               }
+       }
+
+end:
+       return nb_event;
+error:
+       return ret;
+}
+
+/*
+ * Add URI so the consumer output object. Set the correct path depending on the
+ * domain adding the default trace directory.
+ */
+static enum lttng_error_code add_uri_to_consumer(
+               const struct ltt_session *session,
+               struct consumer_output *consumer,
+               struct lttng_uri *uri, enum lttng_domain_type domain)
+{
+       int ret;
+       enum lttng_error_code ret_code = LTTNG_OK;
+
+       LTTNG_ASSERT(uri);
+
+       if (consumer == NULL) {
+               DBG("No consumer detected. Don't add URI. Stopping.");
+               ret_code = LTTNG_ERR_NO_CONSUMER;
+               goto error;
+       }
+
+       switch (domain) {
+       case LTTNG_DOMAIN_KERNEL:
+               ret = lttng_strncpy(consumer->domain_subdir,
+                               DEFAULT_KERNEL_TRACE_DIR,
+                               sizeof(consumer->domain_subdir));
+               break;
+       case LTTNG_DOMAIN_UST:
+               ret = lttng_strncpy(consumer->domain_subdir,
+                               DEFAULT_UST_TRACE_DIR,
+                               sizeof(consumer->domain_subdir));
+               break;
+       default:
+               /*
+                * This case is possible is we try to add the URI to the global
+                * tracing session consumer object which in this case there is
+                * no subdir.
+                */
+               memset(consumer->domain_subdir, 0,
+                               sizeof(consumer->domain_subdir));
+               ret = 0;
+       }
+       if (ret) {
+               ERR("Failed to initialize consumer output domain subdirectory");
+               ret_code = LTTNG_ERR_FATAL;
+               goto error;
+       }
+
+       switch (uri->dtype) {
+       case LTTNG_DST_IPV4:
+       case LTTNG_DST_IPV6:
+               DBG2("Setting network URI to consumer");
+
+               if (consumer->type == CONSUMER_DST_NET) {
+                       if ((uri->stype == LTTNG_STREAM_CONTROL &&
+                               consumer->dst.net.control_isset) ||
+                               (uri->stype == LTTNG_STREAM_DATA &&
+                               consumer->dst.net.data_isset)) {
+                               ret_code = LTTNG_ERR_URL_EXIST;
+                               goto error;
+                       }
+               } else {
+                       memset(&consumer->dst, 0, sizeof(consumer->dst));
+               }
+
+               /* Set URI into consumer output object */
+               ret = consumer_set_network_uri(session, consumer, uri);
+               if (ret < 0) {
+                       ret_code = (lttng_error_code) -ret;
+                       goto error;
+               } else if (ret == 1) {
+                       /*
+                        * URI was the same in the consumer so we do not append the subdir
+                        * again so to not duplicate output dir.
+                        */
+                       ret_code = LTTNG_OK;
+                       goto error;
+               }
+               break;
+       case LTTNG_DST_PATH:
+               if (*uri->dst.path != '/' || strstr(uri->dst.path, "../")) {
+                       ret_code = LTTNG_ERR_INVALID;
+                       goto error;
+               }
+               DBG2("Setting trace directory path from URI to %s",
+                               uri->dst.path);
+               memset(&consumer->dst, 0, sizeof(consumer->dst));
+
+               ret = lttng_strncpy(consumer->dst.session_root_path,
+                               uri->dst.path,
+                               sizeof(consumer->dst.session_root_path));
+               if (ret) {
+                       ret_code = LTTNG_ERR_FATAL;
+                       goto error;
+               }
+               consumer->type = CONSUMER_DST_LOCAL;
+               break;
+       }
+
+       ret_code = LTTNG_OK;
+error:
+       return ret_code;
+}
+
+/*
+ * Init tracing by creating trace directory and sending fds kernel consumer.
+ */
+static int init_kernel_tracing(struct ltt_kernel_session *session)
+{
+       int ret = 0;
+       struct lttng_ht_iter iter;
+       struct consumer_socket *socket;
+
+       LTTNG_ASSERT(session);
+
+       rcu_read_lock();
+
+       if (session->consumer_fds_sent == 0 && session->consumer != NULL) {
+               cds_lfht_for_each_entry(session->consumer->socks->ht, &iter.iter,
+                               socket, node.node) {
+                       pthread_mutex_lock(socket->lock);
+                       ret = kernel_consumer_send_session(socket, session);
+                       pthread_mutex_unlock(socket->lock);
+                       if (ret < 0) {
+                               ret = LTTNG_ERR_KERN_CONSUMER_FAIL;
+                               goto error;
+                       }
+               }
+       }
+
+error:
+       rcu_read_unlock();
+       return ret;
+}
+
+/*
+ * Create a socket to the relayd using the URI.
+ *
+ * On success, the relayd_sock pointer is set to the created socket.
+ * Else, it remains untouched and an LTTng error code is returned.
+ */
+static enum lttng_error_code create_connect_relayd(struct lttng_uri *uri,
+               struct lttcomm_relayd_sock **relayd_sock,
+               struct consumer_output *consumer)
+{
+       int ret;
+       enum lttng_error_code status = LTTNG_OK;
+       struct lttcomm_relayd_sock *rsock;
+
+       rsock = lttcomm_alloc_relayd_sock(uri, RELAYD_VERSION_COMM_MAJOR,
+                       RELAYD_VERSION_COMM_MINOR);
+       if (!rsock) {
+               status = LTTNG_ERR_FATAL;
+               goto error;
+       }
+
+       /*
+        * Connect to relayd so we can proceed with a session creation. This call
+        * can possibly block for an arbitrary amount of time to set the health
+        * state to be in poll execution.
+        */
+       health_poll_entry();
+       ret = relayd_connect(rsock);
+       health_poll_exit();
+       if (ret < 0) {
+               ERR("Unable to reach lttng-relayd");
+               status = LTTNG_ERR_RELAYD_CONNECT_FAIL;
+               goto free_sock;
+       }
+
+       /* Create socket for control stream. */
+       if (uri->stype == LTTNG_STREAM_CONTROL) {
+               uint64_t result_flags;
+
+               DBG3("Creating relayd stream socket from URI");
+
+               /* Check relayd version */
+               ret = relayd_version_check(rsock);
+               if (ret == LTTNG_ERR_RELAYD_VERSION_FAIL) {
+                       status = LTTNG_ERR_RELAYD_VERSION_FAIL;
+                       goto close_sock;
+               } else if (ret < 0) {
+                       ERR("Unable to reach lttng-relayd");
+                       status = LTTNG_ERR_RELAYD_CONNECT_FAIL;
+                       goto close_sock;
+               }
+               consumer->relay_major_version = rsock->major;
+               consumer->relay_minor_version = rsock->minor;
+               ret = relayd_get_configuration(rsock, 0,
+                               &result_flags);
+               if (ret < 0) {
+                       ERR("Unable to get relayd configuration");
+                       status = LTTNG_ERR_RELAYD_CONNECT_FAIL;
+                       goto close_sock;
+               }
+               if (result_flags & LTTCOMM_RELAYD_CONFIGURATION_FLAG_CLEAR_ALLOWED) {
+                       consumer->relay_allows_clear = true;
+               }
+       } else if (uri->stype == LTTNG_STREAM_DATA) {
+               DBG3("Creating relayd data socket from URI");
+       } else {
+               /* Command is not valid */
+               ERR("Relayd invalid stream type: %d", uri->stype);
+               status = LTTNG_ERR_INVALID;
+               goto close_sock;
+       }
+
+       *relayd_sock = rsock;
+
+       return status;
+
+close_sock:
+       /* The returned value is not useful since we are on an error path. */
+       (void) relayd_close(rsock);
+free_sock:
+       free(rsock);
+error:
+       return status;
+}
+
+/*
+ * Connect to the relayd using URI and send the socket to the right consumer.
+ *
+ * The consumer socket lock must be held by the caller.
+ *
+ * Returns LTTNG_OK on success or an LTTng error code on failure.
+ */
+static enum lttng_error_code send_consumer_relayd_socket(
+               unsigned int session_id,
+               struct lttng_uri *relayd_uri,
+               struct consumer_output *consumer,
+               struct consumer_socket *consumer_sock,
+               const char *session_name, const char *hostname,
+               const char *base_path, int session_live_timer,
+               const uint64_t *current_chunk_id,
+               time_t session_creation_time,
+               bool session_name_contains_creation_time)
+{
+       int ret;
+       struct lttcomm_relayd_sock *rsock = NULL;
+       enum lttng_error_code status;
+
+       /* Connect to relayd and make version check if uri is the control. */
+       status = create_connect_relayd(relayd_uri, &rsock, consumer);
+       if (status != LTTNG_OK) {
+               goto relayd_comm_error;
+       }
+       LTTNG_ASSERT(rsock);
+
+       /* Set the network sequence index if not set. */
+       if (consumer->net_seq_index == (uint64_t) -1ULL) {
+               pthread_mutex_lock(&relayd_net_seq_idx_lock);
+               /*
+                * Increment net_seq_idx because we are about to transfer the
+                * new relayd socket to the consumer.
+                * Assign unique key so the consumer can match streams.
+                */
+               consumer->net_seq_index = ++relayd_net_seq_idx;
+               pthread_mutex_unlock(&relayd_net_seq_idx_lock);
+       }
+
+       /* Send relayd socket to consumer. */
+       ret = consumer_send_relayd_socket(consumer_sock, rsock, consumer,
+                       relayd_uri->stype, session_id,
+                       session_name, hostname, base_path,
+                       session_live_timer, current_chunk_id,
+                       session_creation_time, session_name_contains_creation_time);
+       if (ret < 0) {
+               status = LTTNG_ERR_ENABLE_CONSUMER_FAIL;
+               goto close_sock;
+       }
+
+       /* Flag that the corresponding socket was sent. */
+       if (relayd_uri->stype == LTTNG_STREAM_CONTROL) {
+               consumer_sock->control_sock_sent = 1;
+       } else if (relayd_uri->stype == LTTNG_STREAM_DATA) {
+               consumer_sock->data_sock_sent = 1;
+       }
+
+       /*
+        * Close socket which was dup on the consumer side. The session daemon does
+        * NOT keep track of the relayd socket(s) once transfer to the consumer.
+        */
+
+close_sock:
+       if (status != LTTNG_OK) {
+               /*
+                * The consumer output for this session should not be used anymore
+                * since the relayd connection failed thus making any tracing or/and
+                * streaming not usable.
+                */
+               consumer->enabled = 0;
+       }
+       (void) relayd_close(rsock);
+       free(rsock);
+
+relayd_comm_error:
+       return status;
+}
+
+/*
+ * Send both relayd sockets to a specific consumer and domain.  This is a
+ * helper function to facilitate sending the information to the consumer for a
+ * session.
+ *
+ * The consumer socket lock must be held by the caller.
+ *
+ * Returns LTTNG_OK, or an LTTng error code on failure.
+ */
+static enum lttng_error_code send_consumer_relayd_sockets(
+               enum lttng_domain_type domain,
+               unsigned int session_id, struct consumer_output *consumer,
+               struct consumer_socket *sock, const char *session_name,
+               const char *hostname, const char *base_path, int session_live_timer,
+               const uint64_t *current_chunk_id, time_t session_creation_time,
+               bool session_name_contains_creation_time)
+{
+       enum lttng_error_code status = LTTNG_OK;
+
+       LTTNG_ASSERT(consumer);
+       LTTNG_ASSERT(sock);
+
+       /* Sending control relayd socket. */
+       if (!sock->control_sock_sent) {
+               status = send_consumer_relayd_socket(session_id,
+                               &consumer->dst.net.control, consumer, sock,
+                               session_name, hostname, base_path, session_live_timer,
+                               current_chunk_id, session_creation_time,
+                               session_name_contains_creation_time);
+               if (status != LTTNG_OK) {
+                       goto error;
+               }
+       }
+
+       /* Sending data relayd socket. */
+       if (!sock->data_sock_sent) {
+               status = send_consumer_relayd_socket(session_id,
+                               &consumer->dst.net.data, consumer, sock,
+                               session_name, hostname, base_path, session_live_timer,
+                               current_chunk_id, session_creation_time,
+                               session_name_contains_creation_time);
+               if (status != LTTNG_OK) {
+                       goto error;
+               }
+       }
+
+error:
+       return status;
+}
+
+/*
+ * Setup relayd connections for a tracing session. First creates the socket to
+ * the relayd and send them to the right domain consumer. Consumer type MUST be
+ * network.
+ */
+int cmd_setup_relayd(struct ltt_session *session)
+{
+       int ret = LTTNG_OK;
+       struct ltt_ust_session *usess;
+       struct ltt_kernel_session *ksess;
+       struct consumer_socket *socket;
+       struct lttng_ht_iter iter;
+       LTTNG_OPTIONAL(uint64_t) current_chunk_id = {};
+
+       LTTNG_ASSERT(session);
+
+       usess = session->ust_session;
+       ksess = session->kernel_session;
+
+       DBG("Setting relayd for session %s", session->name);
+
+       rcu_read_lock();
+       if (session->current_trace_chunk) {
+               enum lttng_trace_chunk_status status = lttng_trace_chunk_get_id(
+                               session->current_trace_chunk, &current_chunk_id.value);
+
+               if (status == LTTNG_TRACE_CHUNK_STATUS_OK) {
+                       current_chunk_id.is_set = true;
+               } else {
+                       ERR("Failed to get current trace chunk id");
+                       ret = LTTNG_ERR_UNK;
+                       goto error;
+               }
+       }
+
+       if (usess && usess->consumer && usess->consumer->type == CONSUMER_DST_NET
+                       && usess->consumer->enabled) {
+               /* For each consumer socket, send relayd sockets */
+               cds_lfht_for_each_entry(usess->consumer->socks->ht, &iter.iter,
+                               socket, node.node) {
+                       pthread_mutex_lock(socket->lock);
+                       ret = send_consumer_relayd_sockets(LTTNG_DOMAIN_UST, session->id,
+                                       usess->consumer, socket,
+                                       session->name, session->hostname,
+                                       session->base_path,
+                                       session->live_timer,
+                                       current_chunk_id.is_set ? &current_chunk_id.value : NULL,
+                                       session->creation_time,
+                                       session->name_contains_creation_time);
+                       pthread_mutex_unlock(socket->lock);
+                       if (ret != LTTNG_OK) {
+                               goto error;
+                       }
+                       /* Session is now ready for network streaming. */
+                       session->net_handle = 1;
+               }
+               session->consumer->relay_major_version =
+                       usess->consumer->relay_major_version;
+               session->consumer->relay_minor_version =
+                       usess->consumer->relay_minor_version;
+               session->consumer->relay_allows_clear =
+                       usess->consumer->relay_allows_clear;
+       }
+
+       if (ksess && ksess->consumer && ksess->consumer->type == CONSUMER_DST_NET
+                       && ksess->consumer->enabled) {
+               cds_lfht_for_each_entry(ksess->consumer->socks->ht, &iter.iter,
+                               socket, node.node) {
+                       pthread_mutex_lock(socket->lock);
+                       ret = send_consumer_relayd_sockets(LTTNG_DOMAIN_KERNEL, session->id,
+                                       ksess->consumer, socket,
+                                       session->name, session->hostname,
+                                       session->base_path,
+                                       session->live_timer,
+                                       current_chunk_id.is_set ? &current_chunk_id.value : NULL,
+                                       session->creation_time,
+                                       session->name_contains_creation_time);
+                       pthread_mutex_unlock(socket->lock);
+                       if (ret != LTTNG_OK) {
+                               goto error;
+                       }
+                       /* Session is now ready for network streaming. */
+                       session->net_handle = 1;
+               }
+               session->consumer->relay_major_version =
+                       ksess->consumer->relay_major_version;
+               session->consumer->relay_minor_version =
+                       ksess->consumer->relay_minor_version;
+               session->consumer->relay_allows_clear =
+                       ksess->consumer->relay_allows_clear;
+       }
+
+error:
+       rcu_read_unlock();
+       return ret;
+}
+
+/*
+ * Start a kernel session by opening all necessary streams.
+ */
+int start_kernel_session(struct ltt_kernel_session *ksess)
+{
+       int ret;
+       struct ltt_kernel_channel *kchan;
+
+       /* Open kernel metadata */
+       if (ksess->metadata == NULL && ksess->output_traces) {
+               ret = kernel_open_metadata(ksess);
+               if (ret < 0) {
+                       ret = LTTNG_ERR_KERN_META_FAIL;
+                       goto error;
+               }
+       }
+
+       /* Open kernel metadata stream */
+       if (ksess->metadata && ksess->metadata_stream_fd < 0) {
+               ret = kernel_open_metadata_stream(ksess);
+               if (ret < 0) {
+                       ERR("Kernel create metadata stream failed");
+                       ret = LTTNG_ERR_KERN_STREAM_FAIL;
+                       goto error;
+               }
+       }
+
+       /* For each channel */
+       cds_list_for_each_entry(kchan, &ksess->channel_list.head, list) {
+               if (kchan->stream_count == 0) {
+                       ret = kernel_open_channel_stream(kchan);
+                       if (ret < 0) {
+                               ret = LTTNG_ERR_KERN_STREAM_FAIL;
+                               goto error;
+                       }
+                       /* Update the stream global counter */
+                       ksess->stream_count_global += ret;
+               }
+       }
+
+       /* Setup kernel consumer socket and send fds to it */
+       ret = init_kernel_tracing(ksess);
+       if (ret != 0) {
+               ret = LTTNG_ERR_KERN_START_FAIL;
+               goto error;
+       }
+
+       /* This start the kernel tracing */
+       ret = kernel_start_session(ksess);
+       if (ret < 0) {
+               ret = LTTNG_ERR_KERN_START_FAIL;
+               goto error;
+       }
+
+       /* Quiescent wait after starting trace */
+       kernel_wait_quiescent();
+
+       ksess->active = 1;
+
+       ret = LTTNG_OK;
+
+error:
+       return ret;
+}
+
+int stop_kernel_session(struct ltt_kernel_session *ksess)
+{
+       struct ltt_kernel_channel *kchan;
+       bool error_occurred = false;
+       int ret;
+
+       if (!ksess || !ksess->active) {
+               return LTTNG_OK;
+       }
+       DBG("Stopping kernel tracing");
+
+       ret = kernel_stop_session(ksess);
+       if (ret < 0) {
+               ret = LTTNG_ERR_KERN_STOP_FAIL;
+               goto error;
+       }
+
+       kernel_wait_quiescent();
+
+       /* Flush metadata after stopping (if exists) */
+       if (ksess->metadata_stream_fd >= 0) {
+               ret = kernel_metadata_flush_buffer(ksess->metadata_stream_fd);
+               if (ret < 0) {
+                       ERR("Kernel metadata flush failed");
+                       error_occurred = true;
+               }
+       }
+
+       /* Flush all buffers after stopping */
+       cds_list_for_each_entry(kchan, &ksess->channel_list.head, list) {
+               ret = kernel_flush_buffer(kchan);
+               if (ret < 0) {
+                       ERR("Kernel flush buffer error");
+                       error_occurred = true;
+               }
+       }
+
+       ksess->active = 0;
+       if (error_occurred) {
+               ret = LTTNG_ERR_UNK;
+       } else {
+               ret = LTTNG_OK;
+       }
+error:
+       return ret;
+}
+
+/*
+ * Command LTTNG_DISABLE_CHANNEL processed by the client thread.
+ */
+int cmd_disable_channel(struct ltt_session *session,
+               enum lttng_domain_type domain, char *channel_name)
+{
+       int ret;
+       struct ltt_ust_session *usess;
+
+       usess = session->ust_session;
+
+       rcu_read_lock();
+
+       switch (domain) {
+       case LTTNG_DOMAIN_KERNEL:
+       {
+               ret = channel_kernel_disable(session->kernel_session,
+                               channel_name);
+               if (ret != LTTNG_OK) {
+                       goto error;
+               }
+
+               kernel_wait_quiescent();
+               break;
+       }
+       case LTTNG_DOMAIN_UST:
+       {
+               struct ltt_ust_channel *uchan;
+               struct lttng_ht *chan_ht;
+
+               chan_ht = usess->domain_global.channels;
+
+               uchan = trace_ust_find_channel_by_name(chan_ht, channel_name);
+               if (uchan == NULL) {
+                       ret = LTTNG_ERR_UST_CHAN_NOT_FOUND;
+                       goto error;
+               }
+
+               ret = channel_ust_disable(usess, uchan);
+               if (ret != LTTNG_OK) {
+                       goto error;
+               }
+               break;
+       }
+       default:
+               ret = LTTNG_ERR_UNKNOWN_DOMAIN;
+               goto error;
+       }
+
+       ret = LTTNG_OK;
+
+error:
+       rcu_read_unlock();
+       return ret;
+}
+
+/*
+ * Command LTTNG_ENABLE_CHANNEL processed by the client thread.
+ *
+ * The wpipe arguments is used as a notifier for the kernel thread.
+ */
+int cmd_enable_channel(struct ltt_session *session,
+               const struct lttng_domain *domain, const struct lttng_channel *_attr, int wpipe)
+{
+       int ret;
+       struct ltt_ust_session *usess = session->ust_session;
+       struct lttng_ht *chan_ht;
+       size_t len;
+       struct lttng_channel attr;
+
+       LTTNG_ASSERT(session);
+       LTTNG_ASSERT(_attr);
+       LTTNG_ASSERT(domain);
+
+       attr = *_attr;
+       len = lttng_strnlen(attr.name, sizeof(attr.name));
+
+       /* Validate channel name */
+       if (attr.name[0] == '.' ||
+               memchr(attr.name, '/', len) != NULL) {
+               ret = LTTNG_ERR_INVALID_CHANNEL_NAME;
+               goto end;
+       }
+
+       DBG("Enabling channel %s for session %s", attr.name, session->name);
+
+       rcu_read_lock();
+
+       /*
+        * If the session is a live session, remove the switch timer, the
+        * live timer does the same thing but sends also synchronisation
+        * beacons for inactive streams.
+        */
+       if (session->live_timer > 0) {
+               attr.attr.live_timer_interval = session->live_timer;
+               attr.attr.switch_timer_interval = 0;
+       }
+
+       /* Check for feature support */
+       switch (domain->type) {
+       case LTTNG_DOMAIN_KERNEL:
+       {
+               if (kernel_supports_ring_buffer_snapshot_sample_positions() != 1) {
+                       /* Sampling position of buffer is not supported */
+                       WARN("Kernel tracer does not support buffer monitoring. "
+                                       "Setting the monitor interval timer to 0 "
+                                       "(disabled) for channel '%s' of session '%s'",
+                                       attr.name, session->name);
+                       lttng_channel_set_monitor_timer_interval(&attr, 0);
+               }
+               break;
+       }
+       case LTTNG_DOMAIN_UST:
+               break;
+       case LTTNG_DOMAIN_JUL:
+       case LTTNG_DOMAIN_LOG4J:
+       case LTTNG_DOMAIN_PYTHON:
+               if (!agent_tracing_is_enabled()) {
+                       DBG("Attempted to enable a channel in an agent domain but the agent thread is not running");
+                       ret = LTTNG_ERR_AGENT_TRACING_DISABLED;
+                       goto error;
+               }
+               break;
+       default:
+               ret = LTTNG_ERR_UNKNOWN_DOMAIN;
+               goto error;
+       }
+
+       switch (domain->type) {
+       case LTTNG_DOMAIN_KERNEL:
+       {
+               struct ltt_kernel_channel *kchan;
+
+               kchan = trace_kernel_get_channel_by_name(attr.name,
+                               session->kernel_session);
+               if (kchan == NULL) {
+                       /*
+                        * Don't try to create a channel if the session has been started at
+                        * some point in time before. The tracer does not allow it.
+                        */
+                       if (session->has_been_started) {
+                               ret = LTTNG_ERR_TRACE_ALREADY_STARTED;
+                               goto error;
+                       }
+
+                       if (session->snapshot.nb_output > 0 ||
+                                       session->snapshot_mode) {
+                               /* Enforce mmap output for snapshot sessions. */
+                               attr.attr.output = LTTNG_EVENT_MMAP;
+                       }
+                       ret = channel_kernel_create(session->kernel_session, &attr, wpipe);
+                       if (attr.name[0] != '\0') {
+                               session->kernel_session->has_non_default_channel = 1;
+                       }
+               } else {
+                       ret = channel_kernel_enable(session->kernel_session, kchan);
+               }
+
+               if (ret != LTTNG_OK) {
+                       goto error;
+               }
+
+               kernel_wait_quiescent();
+               break;
+       }
+       case LTTNG_DOMAIN_UST:
+       case LTTNG_DOMAIN_JUL:
+       case LTTNG_DOMAIN_LOG4J:
+       case LTTNG_DOMAIN_PYTHON:
+       {
+               struct ltt_ust_channel *uchan;
+
+               /*
+                * FIXME
+                *
+                * Current agent implementation limitations force us to allow
+                * only one channel at once in "agent" subdomains. Each
+                * subdomain has a default channel name which must be strictly
+                * adhered to.
+                */
+               if (domain->type == LTTNG_DOMAIN_JUL) {
+                       if (strncmp(attr.name, DEFAULT_JUL_CHANNEL_NAME,
+                                       LTTNG_SYMBOL_NAME_LEN)) {
+                               ret = LTTNG_ERR_INVALID_CHANNEL_NAME;
+                               goto error;
+                       }
+               } else if (domain->type == LTTNG_DOMAIN_LOG4J) {
+                       if (strncmp(attr.name, DEFAULT_LOG4J_CHANNEL_NAME,
+                                       LTTNG_SYMBOL_NAME_LEN)) {
+                               ret = LTTNG_ERR_INVALID_CHANNEL_NAME;
+                               goto error;
+                       }
+               } else if (domain->type == LTTNG_DOMAIN_PYTHON) {
+                       if (strncmp(attr.name, DEFAULT_PYTHON_CHANNEL_NAME,
+                                       LTTNG_SYMBOL_NAME_LEN)) {
+                               ret = LTTNG_ERR_INVALID_CHANNEL_NAME;
+                               goto error;
+                       }
+               }
+
+               chan_ht = usess->domain_global.channels;
+
+               uchan = trace_ust_find_channel_by_name(chan_ht, attr.name);
+               if (uchan == NULL) {
+                       /*
+                        * Don't try to create a channel if the session has been started at
+                        * some point in time before. The tracer does not allow it.
+                        */
+                       if (session->has_been_started) {
+                               ret = LTTNG_ERR_TRACE_ALREADY_STARTED;
+                               goto error;
+                       }
+
+                       ret = channel_ust_create(usess, &attr, domain->buf_type);
+                       if (attr.name[0] != '\0') {
+                               usess->has_non_default_channel = 1;
+                       }
+               } else {
+                       ret = channel_ust_enable(usess, uchan);
+               }
+               break;
+       }
+       default:
+               ret = LTTNG_ERR_UNKNOWN_DOMAIN;
+               goto error;
+       }
+
+       if (ret == LTTNG_OK && attr.attr.output != LTTNG_EVENT_MMAP) {
+               session->has_non_mmap_channel = true;
+       }
+error:
+       rcu_read_unlock();
+end:
+       return ret;
+}
+
+enum lttng_error_code cmd_process_attr_tracker_get_tracking_policy(
+               struct ltt_session *session,
+               enum lttng_domain_type domain,
+               enum lttng_process_attr process_attr,
+               enum lttng_tracking_policy *policy)
+{
+       enum lttng_error_code ret_code = LTTNG_OK;
+       const struct process_attr_tracker *tracker;
+
+       switch (domain) {
+       case LTTNG_DOMAIN_KERNEL:
+               if (!session->kernel_session) {
+                       ret_code = LTTNG_ERR_INVALID;
+                       goto end;
+               }
+               tracker = kernel_get_process_attr_tracker(
+                               session->kernel_session, process_attr);
+               break;
+       case LTTNG_DOMAIN_UST:
+               if (!session->ust_session) {
+                       ret_code = LTTNG_ERR_INVALID;
+                       goto end;
+               }
+               tracker = trace_ust_get_process_attr_tracker(
+                               session->ust_session, process_attr);
+               break;
+       default:
+               ret_code = LTTNG_ERR_UNSUPPORTED_DOMAIN;
+               goto end;
+       }
+       if (tracker) {
+               *policy = process_attr_tracker_get_tracking_policy(tracker);
+       } else {
+               ret_code = LTTNG_ERR_INVALID;
+       }
+end:
+       return ret_code;
+}
+
+enum lttng_error_code cmd_process_attr_tracker_set_tracking_policy(
+               struct ltt_session *session,
+               enum lttng_domain_type domain,
+               enum lttng_process_attr process_attr,
+               enum lttng_tracking_policy policy)
+{
+       enum lttng_error_code ret_code = LTTNG_OK;
+
+       switch (policy) {
+       case LTTNG_TRACKING_POLICY_INCLUDE_SET:
+       case LTTNG_TRACKING_POLICY_EXCLUDE_ALL:
+       case LTTNG_TRACKING_POLICY_INCLUDE_ALL:
+               break;
+       default:
+               ret_code = LTTNG_ERR_INVALID;
+               goto end;
+       }
+
+       switch (domain) {
+       case LTTNG_DOMAIN_KERNEL:
+               if (!session->kernel_session) {
+                       ret_code = LTTNG_ERR_INVALID;
+                       goto end;
+               }
+               ret_code = kernel_process_attr_tracker_set_tracking_policy(
+                               session->kernel_session, process_attr, policy);
+               break;
+       case LTTNG_DOMAIN_UST:
+               if (!session->ust_session) {
+                       ret_code = LTTNG_ERR_INVALID;
+                       goto end;
+               }
+               ret_code = trace_ust_process_attr_tracker_set_tracking_policy(
+                               session->ust_session, process_attr, policy);
+               break;
+       default:
+               ret_code = LTTNG_ERR_UNSUPPORTED_DOMAIN;
+               break;
+       }
+end:
+       return ret_code;
+}
+
+enum lttng_error_code cmd_process_attr_tracker_inclusion_set_add_value(
+               struct ltt_session *session,
+               enum lttng_domain_type domain,
+               enum lttng_process_attr process_attr,
+               const struct process_attr_value *value)
+{
+       enum lttng_error_code ret_code = LTTNG_OK;
+
+       switch (domain) {
+       case LTTNG_DOMAIN_KERNEL:
+               if (!session->kernel_session) {
+                       ret_code = LTTNG_ERR_INVALID;
+                       goto end;
+               }
+               ret_code = kernel_process_attr_tracker_inclusion_set_add_value(
+                               session->kernel_session, process_attr, value);
+               break;
+       case LTTNG_DOMAIN_UST:
+               if (!session->ust_session) {
+                       ret_code = LTTNG_ERR_INVALID;
+                       goto end;
+               }
+               ret_code = trace_ust_process_attr_tracker_inclusion_set_add_value(
+                               session->ust_session, process_attr, value);
+               break;
+       default:
+               ret_code = LTTNG_ERR_UNSUPPORTED_DOMAIN;
+               break;
+       }
+end:
+       return ret_code;
+}
+
+enum lttng_error_code cmd_process_attr_tracker_inclusion_set_remove_value(
+               struct ltt_session *session,
+               enum lttng_domain_type domain,
+               enum lttng_process_attr process_attr,
+               const struct process_attr_value *value)
+{
+       enum lttng_error_code ret_code = LTTNG_OK;
+
+       switch (domain) {
+       case LTTNG_DOMAIN_KERNEL:
+               if (!session->kernel_session) {
+                       ret_code = LTTNG_ERR_INVALID;
+                       goto end;
+               }
+               ret_code = kernel_process_attr_tracker_inclusion_set_remove_value(
+                               session->kernel_session, process_attr, value);
+               break;
+       case LTTNG_DOMAIN_UST:
+               if (!session->ust_session) {
+                       ret_code = LTTNG_ERR_INVALID;
+                       goto end;
+               }
+               ret_code = trace_ust_process_attr_tracker_inclusion_set_remove_value(
+                               session->ust_session, process_attr, value);
+               break;
+       default:
+               ret_code = LTTNG_ERR_UNSUPPORTED_DOMAIN;
+               break;
+       }
+end:
+       return ret_code;
+}
+
+enum lttng_error_code cmd_process_attr_tracker_get_inclusion_set(
+               struct ltt_session *session,
+               enum lttng_domain_type domain,
+               enum lttng_process_attr process_attr,
+               struct lttng_process_attr_values **values)
+{
+       enum lttng_error_code ret_code = LTTNG_OK;
+       const struct process_attr_tracker *tracker;
+       enum process_attr_tracker_status status;
+
+       switch (domain) {
+       case LTTNG_DOMAIN_KERNEL:
+               if (!session->kernel_session) {
+                       ret_code = LTTNG_ERR_INVALID;
+                       goto end;
+               }
+               tracker = kernel_get_process_attr_tracker(
+                               session->kernel_session, process_attr);
+               break;
+       case LTTNG_DOMAIN_UST:
+               if (!session->ust_session) {
+                       ret_code = LTTNG_ERR_INVALID;
+                       goto end;
+               }
+               tracker = trace_ust_get_process_attr_tracker(
+                               session->ust_session, process_attr);
+               break;
+       default:
+               ret_code = LTTNG_ERR_UNSUPPORTED_DOMAIN;
+               goto end;
+       }
+
+       if (!tracker) {
+               ret_code = LTTNG_ERR_INVALID;
+               goto end;
+       }
+
+       status = process_attr_tracker_get_inclusion_set(tracker, values);
+       switch (status) {
+       case PROCESS_ATTR_TRACKER_STATUS_OK:
+               ret_code = LTTNG_OK;
+               break;
+       case PROCESS_ATTR_TRACKER_STATUS_INVALID_TRACKING_POLICY:
+               ret_code = LTTNG_ERR_PROCESS_ATTR_TRACKER_INVALID_TRACKING_POLICY;
+               break;
+       case PROCESS_ATTR_TRACKER_STATUS_ERROR:
+               ret_code = LTTNG_ERR_NOMEM;
+               break;
+       default:
+               ret_code = LTTNG_ERR_UNK;
+               break;
+       }
+
+end:
+       return ret_code;
+}
+
+/*
+ * Command LTTNG_DISABLE_EVENT processed by the client thread.
+ */
+int cmd_disable_event(struct ltt_session *session,
+               enum lttng_domain_type domain, const char *channel_name,
+               const struct lttng_event *event)
+{
+       int ret;
+       const char *event_name;
+
+       DBG("Disable event command for event \'%s\'", event->name);
+
+       event_name = event->name;
+
+       /* Error out on unhandled search criteria */
+       if (event->loglevel_type || event->loglevel != -1 || event->enabled
+                       || event->pid || event->filter || event->exclusion) {
+               ret = LTTNG_ERR_UNK;
+               goto error;
+       }
+
+       rcu_read_lock();
+
+       switch (domain) {
+       case LTTNG_DOMAIN_KERNEL:
+       {
+               struct ltt_kernel_channel *kchan;
+               struct ltt_kernel_session *ksess;
+
+               ksess = session->kernel_session;
+
+               /*
+                * If a non-default channel has been created in the
+                * session, explicitely require that -c chan_name needs
+                * to be provided.
+                */
+               if (ksess->has_non_default_channel && channel_name[0] == '\0') {
+                       ret = LTTNG_ERR_NEED_CHANNEL_NAME;
+                       goto error_unlock;
+               }
+
+               kchan = trace_kernel_get_channel_by_name(channel_name, ksess);
+               if (kchan == NULL) {
+                       ret = LTTNG_ERR_KERN_CHAN_NOT_FOUND;
+                       goto error_unlock;
+               }
+
+               switch (event->type) {
+               case LTTNG_EVENT_ALL:
+               case LTTNG_EVENT_TRACEPOINT:
+               case LTTNG_EVENT_SYSCALL:
+               case LTTNG_EVENT_PROBE:
+               case LTTNG_EVENT_FUNCTION:
+               case LTTNG_EVENT_FUNCTION_ENTRY:/* fall-through */
+                       if (event_name[0] == '\0') {
+                               ret = event_kernel_disable_event(kchan,
+                                       NULL, event->type);
+                       } else {
+                               ret = event_kernel_disable_event(kchan,
+                                       event_name, event->type);
+                       }
+                       if (ret != LTTNG_OK) {
+                               goto error_unlock;
+                       }
+                       break;
+               default:
+                       ret = LTTNG_ERR_UNK;
+                       goto error_unlock;
+               }
+
+               kernel_wait_quiescent();
+               break;
+       }
+       case LTTNG_DOMAIN_UST:
+       {
+               struct ltt_ust_channel *uchan;
+               struct ltt_ust_session *usess;
+
+               usess = session->ust_session;
+
+               if (validate_ust_event_name(event_name)) {
+                       ret = LTTNG_ERR_INVALID_EVENT_NAME;
+                       goto error_unlock;
+               }
+
+               /*
+                * If a non-default channel has been created in the
+                * session, explicitly require that -c chan_name needs
+                * to be provided.
+                */
+               if (usess->has_non_default_channel && channel_name[0] == '\0') {
+                       ret = LTTNG_ERR_NEED_CHANNEL_NAME;
+                       goto error_unlock;
+               }
+
+               uchan = trace_ust_find_channel_by_name(usess->domain_global.channels,
+                               channel_name);
+               if (uchan == NULL) {
+                       ret = LTTNG_ERR_UST_CHAN_NOT_FOUND;
+                       goto error_unlock;
+               }
+
+               switch (event->type) {
+               case LTTNG_EVENT_ALL:
+                       /*
+                        * An empty event name means that everything
+                        * should be disabled.
+                        */
+                       if (event->name[0] == '\0') {
+                               ret = event_ust_disable_all_tracepoints(usess, uchan);
+                       } else {
+                               ret = event_ust_disable_tracepoint(usess, uchan,
+                                               event_name);
+                       }
+                       if (ret != LTTNG_OK) {
+                               goto error_unlock;
+                       }
+                       break;
+               default:
+                       ret = LTTNG_ERR_UNK;
+                       goto error_unlock;
+               }
+
+               DBG3("Disable UST event %s in channel %s completed", event_name,
+                               channel_name);
+               break;
+       }
+       case LTTNG_DOMAIN_LOG4J:
+       case LTTNG_DOMAIN_JUL:
+       case LTTNG_DOMAIN_PYTHON:
+       {
+               struct agent *agt;
+               struct ltt_ust_session *usess = session->ust_session;
+
+               LTTNG_ASSERT(usess);
+
+               switch (event->type) {
+               case LTTNG_EVENT_ALL:
+                       break;
+               default:
+                       ret = LTTNG_ERR_UNK;
+                       goto error_unlock;
+               }
+
+               agt = trace_ust_find_agent(usess, domain);
+               if (!agt) {
+                       ret = -LTTNG_ERR_UST_EVENT_NOT_FOUND;
+                       goto error_unlock;
+               }
+               /*
+                * An empty event name means that everything
+                * should be disabled.
+                */
+               if (event->name[0] == '\0') {
+                       ret = event_agent_disable_all(usess, agt);
+               } else {
+                       ret = event_agent_disable(usess, agt, event_name);
+               }
+               if (ret != LTTNG_OK) {
+                       goto error_unlock;
+               }
+
+               break;
+       }
+       default:
+               ret = LTTNG_ERR_UND;
+               goto error_unlock;
+       }
+
+       ret = LTTNG_OK;
+
+error_unlock:
+       rcu_read_unlock();
+error:
+       return ret;
+}
+
+/*
+ * Command LTTNG_ADD_CONTEXT processed by the client thread.
+ */
+int cmd_add_context(struct ltt_session *session, enum lttng_domain_type domain,
+               char *channel_name, const struct lttng_event_context *ctx, int kwpipe)
+{
+       int ret, chan_kern_created = 0, chan_ust_created = 0;
+       char *app_ctx_provider_name = NULL, *app_ctx_name = NULL;
+
+       /*
+        * Don't try to add a context if the session has been started at
+        * some point in time before. The tracer does not allow it and would
+        * result in a corrupted trace.
+        */
+       if (session->has_been_started) {
+               ret = LTTNG_ERR_TRACE_ALREADY_STARTED;
+               goto end;
+       }
+
+       if (ctx->ctx == LTTNG_EVENT_CONTEXT_APP_CONTEXT) {
+               app_ctx_provider_name = ctx->u.app_ctx.provider_name;
+               app_ctx_name = ctx->u.app_ctx.ctx_name;
+       }
+
+       switch (domain) {
+       case LTTNG_DOMAIN_KERNEL:
+               LTTNG_ASSERT(session->kernel_session);
+
+               if (session->kernel_session->channel_count == 0) {
+                       /* Create default channel */
+                       ret = channel_kernel_create(session->kernel_session, NULL, kwpipe);
+                       if (ret != LTTNG_OK) {
+                               goto error;
+                       }
+                       chan_kern_created = 1;
+               }
+               /* Add kernel context to kernel tracer */
+               ret = context_kernel_add(session->kernel_session, ctx, channel_name);
+               if (ret != LTTNG_OK) {
+                       goto error;
+               }
+               break;
+       case LTTNG_DOMAIN_JUL:
+       case LTTNG_DOMAIN_LOG4J:
+       {
+               /*
+                * Validate channel name.
+                * If no channel name is given and the domain is JUL or LOG4J,
+                * set it to the appropriate domain-specific channel name. If
+                * a name is provided but does not match the expexted channel
+                * name, return an error.
+                */
+               if (domain == LTTNG_DOMAIN_JUL && *channel_name &&
+                               strcmp(channel_name,
+                               DEFAULT_JUL_CHANNEL_NAME)) {
+                       ret = LTTNG_ERR_UST_CHAN_NOT_FOUND;
+                       goto error;
+               } else if (domain == LTTNG_DOMAIN_LOG4J && *channel_name &&
+                               strcmp(channel_name,
+                               DEFAULT_LOG4J_CHANNEL_NAME)) {
+                       ret = LTTNG_ERR_UST_CHAN_NOT_FOUND;
+                       goto error;
+               }
+               /* break is _not_ missing here. */
+       }
+       case LTTNG_DOMAIN_UST:
+       {
+               struct ltt_ust_session *usess = session->ust_session;
+               unsigned int chan_count;
+
+               LTTNG_ASSERT(usess);
+
+               chan_count = lttng_ht_get_count(usess->domain_global.channels);
+               if (chan_count == 0) {
+                       struct lttng_channel *attr;
+                       /* Create default channel */
+                       attr = channel_new_default_attr(domain, usess->buffer_type);
+                       if (attr == NULL) {
+                               ret = LTTNG_ERR_FATAL;
+                               goto error;
+                       }
+
+                       ret = channel_ust_create(usess, attr, usess->buffer_type);
+                       if (ret != LTTNG_OK) {
+                               free(attr);
+                               goto error;
+                       }
+                       channel_attr_destroy(attr);
+                       chan_ust_created = 1;
+               }
+
+               ret = context_ust_add(usess, domain, ctx, channel_name);
+               free(app_ctx_provider_name);
+               free(app_ctx_name);
+               app_ctx_name = NULL;
+               app_ctx_provider_name = NULL;
+               if (ret != LTTNG_OK) {
+                       goto error;
+               }
+               break;
+       }
+       default:
+               ret = LTTNG_ERR_UND;
+               goto error;
+       }
+
+       ret = LTTNG_OK;
+       goto end;
+
+error:
+       if (chan_kern_created) {
+               struct ltt_kernel_channel *kchan =
+                       trace_kernel_get_channel_by_name(DEFAULT_CHANNEL_NAME,
+                                       session->kernel_session);
+               /* Created previously, this should NOT fail. */
+               LTTNG_ASSERT(kchan);
+               kernel_destroy_channel(kchan);
+       }
+
+       if (chan_ust_created) {
+               struct ltt_ust_channel *uchan =
+                       trace_ust_find_channel_by_name(
+                                       session->ust_session->domain_global.channels,
+                                       DEFAULT_CHANNEL_NAME);
+               /* Created previously, this should NOT fail. */
+               LTTNG_ASSERT(uchan);
+               /* Remove from the channel list of the session. */
+               trace_ust_delete_channel(session->ust_session->domain_global.channels,
+                               uchan);
+               trace_ust_destroy_channel(uchan);
+       }
+end:
+       free(app_ctx_provider_name);
+       free(app_ctx_name);
+       return ret;
+}
+
+static inline bool name_starts_with(const char *name, const char *prefix)
+{
+       const size_t max_cmp_len = std::min(strlen(prefix), (size_t) LTTNG_SYMBOL_NAME_LEN);
+
+       return !strncmp(name, prefix, max_cmp_len);
+}
+
+/* Perform userspace-specific event name validation */
+static int validate_ust_event_name(const char *name)
+{
+       int ret = 0;
+
+       if (!name) {
+               ret = -1;
+               goto end;
+       }
+
+       /*
+        * Check name against all internal UST event component namespaces used
+        * by the agents.
+        */
+       if (name_starts_with(name, DEFAULT_JUL_EVENT_COMPONENT) ||
+               name_starts_with(name, DEFAULT_LOG4J_EVENT_COMPONENT) ||
+               name_starts_with(name, DEFAULT_PYTHON_EVENT_COMPONENT)) {
+               ret = -1;
+       }
+
+end:
+       return ret;
+}
+
+/*
+ * Internal version of cmd_enable_event() with a supplemental
+ * "internal_event" flag which is used to enable internal events which should
+ * be hidden from clients. Such events are used in the agent implementation to
+ * enable the events through which all "agent" events are funeled.
+ */
+static int _cmd_enable_event(struct ltt_session *session,
+               const struct lttng_domain *domain,
+               char *channel_name, struct lttng_event *event,
+               char *filter_expression,
+               struct lttng_bytecode *filter,
+               struct lttng_event_exclusion *exclusion,
+               int wpipe, bool internal_event)
+{
+       int ret = 0, channel_created = 0;
+       struct lttng_channel *attr = NULL;
+
+       LTTNG_ASSERT(session);
+       LTTNG_ASSERT(event);
+       LTTNG_ASSERT(channel_name);
+
+       /* If we have a filter, we must have its filter expression */
+       LTTNG_ASSERT(!(!!filter_expression ^ !!filter));
+
+       /* Normalize event name as a globbing pattern */
+       strutils_normalize_star_glob_pattern(event->name);
+
+       /* Normalize exclusion names as globbing patterns */
+       if (exclusion) {
+               size_t i;
+
+               for (i = 0; i < exclusion->count; i++) {
+                       char *name = LTTNG_EVENT_EXCLUSION_NAME_AT(exclusion, i);
+
+                       strutils_normalize_star_glob_pattern(name);
+               }
+       }
+
+       DBG("Enable event command for event \'%s\'", event->name);
+
+       rcu_read_lock();
+
+       switch (domain->type) {
+       case LTTNG_DOMAIN_KERNEL:
+       {
+               struct ltt_kernel_channel *kchan;
+
+               /*
+                * If a non-default channel has been created in the
+                * session, explicitely require that -c chan_name needs
+                * to be provided.
+                */
+               if (session->kernel_session->has_non_default_channel
+                               && channel_name[0] == '\0') {
+                       ret = LTTNG_ERR_NEED_CHANNEL_NAME;
+                       goto error;
+               }
+
+               kchan = trace_kernel_get_channel_by_name(channel_name,
+                               session->kernel_session);
+               if (kchan == NULL) {
+                       attr = channel_new_default_attr(LTTNG_DOMAIN_KERNEL,
+                                       LTTNG_BUFFER_GLOBAL);
+                       if (attr == NULL) {
+                               ret = LTTNG_ERR_FATAL;
+                               goto error;
+                       }
+                       if (lttng_strncpy(attr->name, channel_name,
+                                       sizeof(attr->name))) {
+                               ret = LTTNG_ERR_INVALID;
+                               goto error;
+                       }
+
+                       ret = cmd_enable_channel(session, domain, attr, wpipe);
+                       if (ret != LTTNG_OK) {
+                               goto error;
+                       }
+                       channel_created = 1;
+               }
+
+               /* Get the newly created kernel channel pointer */
+               kchan = trace_kernel_get_channel_by_name(channel_name,
+                               session->kernel_session);
+               if (kchan == NULL) {
+                       /* This sould not happen... */
+                       ret = LTTNG_ERR_FATAL;
+                       goto error;
+               }
+
+               switch (event->type) {
+               case LTTNG_EVENT_ALL:
+               {
+                       char *filter_expression_a = NULL;
+                       struct lttng_bytecode *filter_a = NULL;
+
+                       /*
+                        * We need to duplicate filter_expression and filter,
+                        * because ownership is passed to first enable
+                        * event.
+                        */
+                       if (filter_expression) {
+                               filter_expression_a = strdup(filter_expression);
+                               if (!filter_expression_a) {
+                                       ret = LTTNG_ERR_FATAL;
+                                       goto error;
+                               }
+                       }
+                       if (filter) {
+                               filter_a = (lttng_bytecode *) zmalloc(sizeof(*filter_a) + filter->len);
+                               if (!filter_a) {
+                                       free(filter_expression_a);
+                                       ret = LTTNG_ERR_FATAL;
+                                       goto error;
+                               }
+                               memcpy(filter_a, filter, sizeof(*filter_a) + filter->len);
+                       }
+                       event->type = LTTNG_EVENT_TRACEPOINT;   /* Hack */
+                       ret = event_kernel_enable_event(kchan, event,
+                               filter_expression, filter);
+                       /* We have passed ownership */
+                       filter_expression = NULL;
+                       filter = NULL;
+                       if (ret != LTTNG_OK) {
+                               if (channel_created) {
+                                       /* Let's not leak a useless channel. */
+                                       kernel_destroy_channel(kchan);
+                               }
+                               free(filter_expression_a);
+                               free(filter_a);
+                               goto error;
+                       }
+                       event->type = LTTNG_EVENT_SYSCALL;      /* Hack */
+                       ret = event_kernel_enable_event(kchan, event,
+                               filter_expression_a, filter_a);
+                       /* We have passed ownership */
+                       filter_expression_a = NULL;
+                       filter_a = NULL;
+                       if (ret != LTTNG_OK) {
+                               goto error;
+                       }
+                       break;
+               }
+               case LTTNG_EVENT_PROBE:
+               case LTTNG_EVENT_USERSPACE_PROBE:
+               case LTTNG_EVENT_FUNCTION:
+               case LTTNG_EVENT_FUNCTION_ENTRY:
+               case LTTNG_EVENT_TRACEPOINT:
+                       ret = event_kernel_enable_event(kchan, event,
+                               filter_expression, filter);
+                       /* We have passed ownership */
+                       filter_expression = NULL;
+                       filter = NULL;
+                       if (ret != LTTNG_OK) {
+                               if (channel_created) {
+                                       /* Let's not leak a useless channel. */
+                                       kernel_destroy_channel(kchan);
+                               }
+                               goto error;
+                       }
+                       break;
+               case LTTNG_EVENT_SYSCALL:
+                       ret = event_kernel_enable_event(kchan, event,
+                               filter_expression, filter);
+                       /* We have passed ownership */
+                       filter_expression = NULL;
+                       filter = NULL;
+                       if (ret != LTTNG_OK) {
+                               goto error;
+                       }
+                       break;
+               default:
+                       ret = LTTNG_ERR_UNK;
+                       goto error;
+               }
+
+               kernel_wait_quiescent();
+               break;
+       }
+       case LTTNG_DOMAIN_UST:
+       {
+               struct ltt_ust_channel *uchan;
+               struct ltt_ust_session *usess = session->ust_session;
+
+               LTTNG_ASSERT(usess);
+
+               /*
+                * If a non-default channel has been created in the
+                * session, explicitely require that -c chan_name needs
+                * to be provided.
+                */
+               if (usess->has_non_default_channel && channel_name[0] == '\0') {
+                       ret = LTTNG_ERR_NEED_CHANNEL_NAME;
+                       goto error;
+               }
+
+               /* Get channel from global UST domain */
+               uchan = trace_ust_find_channel_by_name(usess->domain_global.channels,
+                               channel_name);
+               if (uchan == NULL) {
+                       /* Create default channel */
+                       attr = channel_new_default_attr(LTTNG_DOMAIN_UST,
+                                       usess->buffer_type);
+                       if (attr == NULL) {
+                               ret = LTTNG_ERR_FATAL;
+                               goto error;
+                       }
+                       if (lttng_strncpy(attr->name, channel_name,
+                                       sizeof(attr->name))) {
+                               ret = LTTNG_ERR_INVALID;
+                               goto error;
+                       }
+
+                       ret = cmd_enable_channel(session, domain, attr, wpipe);
+                       if (ret != LTTNG_OK) {
+                               goto error;
+                       }
+
+                       /* Get the newly created channel reference back */
+                       uchan = trace_ust_find_channel_by_name(
+                                       usess->domain_global.channels, channel_name);
+                       LTTNG_ASSERT(uchan);
+               }
+
+               if (uchan->domain != LTTNG_DOMAIN_UST && !internal_event) {
+                       /*
+                        * Don't allow users to add UST events to channels which
+                        * are assigned to a userspace subdomain (JUL, Log4J,
+                        * Python, etc.).
+                        */
+                       ret = LTTNG_ERR_INVALID_CHANNEL_DOMAIN;
+                       goto error;
+               }
+
+               if (!internal_event) {
+                       /*
+                        * Ensure the event name is not reserved for internal
+                        * use.
+                        */
+                       ret = validate_ust_event_name(event->name);
+                       if (ret) {
+                               WARN("Userspace event name %s failed validation.",
+                                               event->name);
+                               ret = LTTNG_ERR_INVALID_EVENT_NAME;
+                               goto error;
+                       }
+               }
+
+               /* At this point, the session and channel exist on the tracer */
+               ret = event_ust_enable_tracepoint(usess, uchan, event,
+                               filter_expression, filter, exclusion,
+                               internal_event);
+               /* We have passed ownership */
+               filter_expression = NULL;
+               filter = NULL;
+               exclusion = NULL;
+               if (ret == LTTNG_ERR_UST_EVENT_ENABLED) {
+                       goto already_enabled;
+               } else if (ret != LTTNG_OK) {
+                       goto error;
+               }
+               break;
+       }
+       case LTTNG_DOMAIN_LOG4J:
+       case LTTNG_DOMAIN_JUL:
+       case LTTNG_DOMAIN_PYTHON:
+       {
+               const char *default_event_name, *default_chan_name;
+               struct agent *agt;
+               struct lttng_event uevent;
+               struct lttng_domain tmp_dom;
+               struct ltt_ust_session *usess = session->ust_session;
+
+               LTTNG_ASSERT(usess);
+
+               if (!agent_tracing_is_enabled()) {
+                       DBG("Attempted to enable an event in an agent domain but the agent thread is not running");
+                       ret = LTTNG_ERR_AGENT_TRACING_DISABLED;
+                       goto error;
+               }
+
+               agt = trace_ust_find_agent(usess, domain->type);
+               if (!agt) {
+                       agt = agent_create(domain->type);
+                       if (!agt) {
+                               ret = LTTNG_ERR_NOMEM;
+                               goto error;
+                       }
+                       agent_add(agt, usess->agents);
+               }
+
+               /* Create the default tracepoint. */
+               memset(&uevent, 0, sizeof(uevent));
+               uevent.type = LTTNG_EVENT_TRACEPOINT;
+               uevent.loglevel_type = LTTNG_EVENT_LOGLEVEL_ALL;
+               default_event_name = event_get_default_agent_ust_name(
+                               domain->type);
+               if (!default_event_name) {
+                       ret = LTTNG_ERR_FATAL;
+                       goto error;
+               }
+               strncpy(uevent.name, default_event_name, sizeof(uevent.name));
+               uevent.name[sizeof(uevent.name) - 1] = '\0';
+
+               /*
+                * The domain type is changed because we are about to enable the
+                * default channel and event for the JUL domain that are hardcoded.
+                * This happens in the UST domain.
+                */
+               memcpy(&tmp_dom, domain, sizeof(tmp_dom));
+               tmp_dom.type = LTTNG_DOMAIN_UST;
+
+               switch (domain->type) {
+               case LTTNG_DOMAIN_LOG4J:
+                       default_chan_name = DEFAULT_LOG4J_CHANNEL_NAME;
+                       break;
+               case LTTNG_DOMAIN_JUL:
+                       default_chan_name = DEFAULT_JUL_CHANNEL_NAME;
+                       break;
+               case LTTNG_DOMAIN_PYTHON:
+                       default_chan_name = DEFAULT_PYTHON_CHANNEL_NAME;
+                       break;
+               default:
+                       /* The switch/case we are in makes this impossible */
+                       abort();
+               }
+
+               {
+                       char *filter_expression_copy = NULL;
+                       struct lttng_bytecode *filter_copy = NULL;
+
+                       if (filter) {
+                               const size_t filter_size = sizeof(
+                                               struct lttng_bytecode)
+                                               + filter->len;
+
+                               filter_copy = (lttng_bytecode *) zmalloc(filter_size);
+                               if (!filter_copy) {
+                                       ret = LTTNG_ERR_NOMEM;
+                                       goto error;
+                               }
+                               memcpy(filter_copy, filter, filter_size);
+
+                               filter_expression_copy =
+                                               strdup(filter_expression);
+                               if (!filter_expression) {
+                                       ret = LTTNG_ERR_NOMEM;
+                               }
+
+                               if (!filter_expression_copy || !filter_copy) {
+                                       free(filter_expression_copy);
+                                       free(filter_copy);
+                                       goto error;
+                               }
+                       }
+
+                       ret = cmd_enable_event_internal(session, &tmp_dom,
+                                       (char *) default_chan_name,
+                                       &uevent, filter_expression_copy,
+                                       filter_copy, NULL, wpipe);
+               }
+
+               if (ret == LTTNG_ERR_UST_EVENT_ENABLED) {
+                       goto already_enabled;
+               } else if (ret != LTTNG_OK) {
+                       goto error;
+               }
+
+               /* The wild card * means that everything should be enabled. */
+               if (strncmp(event->name, "*", 1) == 0 && strlen(event->name) == 1) {
+                       ret = event_agent_enable_all(usess, agt, event, filter,
+                                       filter_expression);
+               } else {
+                       ret = event_agent_enable(usess, agt, event, filter,
+                                       filter_expression);
+               }
+               filter = NULL;
+               filter_expression = NULL;
+               if (ret != LTTNG_OK) {
+                       goto error;
+               }
+
+               break;
+       }
+       default:
+               ret = LTTNG_ERR_UND;
+               goto error;
+       }
+
+       ret = LTTNG_OK;
+
+already_enabled:
+error:
+       free(filter_expression);
+       free(filter);
+       free(exclusion);
+       channel_attr_destroy(attr);
+       rcu_read_unlock();
+       return ret;
+}
+
+/*
+ * Command LTTNG_ENABLE_EVENT processed by the client thread.
+ * We own filter, exclusion, and filter_expression.
+ */
+int cmd_enable_event(struct ltt_session *session,
+               const struct lttng_domain *domain,
+               char *channel_name, struct lttng_event *event,
+               char *filter_expression,
+               struct lttng_bytecode *filter,
+               struct lttng_event_exclusion *exclusion,
+               int wpipe)
+{
+       return _cmd_enable_event(session, domain, channel_name, event,
+                       filter_expression, filter, exclusion, wpipe, false);
+}
+
+/*
+ * Enable an event which is internal to LTTng. An internal should
+ * never be made visible to clients and are immune to checks such as
+ * reserved names.
+ */
+static int cmd_enable_event_internal(struct ltt_session *session,
+               const struct lttng_domain *domain,
+               char *channel_name, struct lttng_event *event,
+               char *filter_expression,
+               struct lttng_bytecode *filter,
+               struct lttng_event_exclusion *exclusion,
+               int wpipe)
+{
+       return _cmd_enable_event(session, domain, channel_name, event,
+                       filter_expression, filter, exclusion, wpipe, true);
+}
+
+/*
+ * Command LTTNG_LIST_TRACEPOINTS processed by the client thread.
+ */
+ssize_t cmd_list_tracepoints(enum lttng_domain_type domain,
+               struct lttng_event **events)
+{
+       int ret;
+       ssize_t nb_events = 0;
+
+       switch (domain) {
+       case LTTNG_DOMAIN_KERNEL:
+               nb_events = kernel_list_events(events);
+               if (nb_events < 0) {
+                       ret = LTTNG_ERR_KERN_LIST_FAIL;
+                       goto error;
+               }
+               break;
+       case LTTNG_DOMAIN_UST:
+               nb_events = ust_app_list_events(events);
+               if (nb_events < 0) {
+                       ret = LTTNG_ERR_UST_LIST_FAIL;
+                       goto error;
+               }
+               break;
+       case LTTNG_DOMAIN_LOG4J:
+       case LTTNG_DOMAIN_JUL:
+       case LTTNG_DOMAIN_PYTHON:
+               nb_events = agent_list_events(events, domain);
+               if (nb_events < 0) {
+                       ret = LTTNG_ERR_UST_LIST_FAIL;
+                       goto error;
+               }
+               break;
+       default:
+               ret = LTTNG_ERR_UND;
+               goto error;
+       }
+
+       return nb_events;
+
+error:
+       /* Return negative value to differentiate return code */
+       return -ret;
+}
+
+/*
+ * Command LTTNG_LIST_TRACEPOINT_FIELDS processed by the client thread.
+ */
+ssize_t cmd_list_tracepoint_fields(enum lttng_domain_type domain,
+               struct lttng_event_field **fields)
+{
+       int ret;
+       ssize_t nb_fields = 0;
+
+       switch (domain) {
+       case LTTNG_DOMAIN_UST:
+               nb_fields = ust_app_list_event_fields(fields);
+               if (nb_fields < 0) {
+                       ret = LTTNG_ERR_UST_LIST_FAIL;
+                       goto error;
+               }
+               break;
+       case LTTNG_DOMAIN_KERNEL:
+       default:        /* fall-through */
+               ret = LTTNG_ERR_UND;
+               goto error;
+       }
+
+       return nb_fields;
+
+error:
+       /* Return negative value to differentiate return code */
+       return -ret;
+}
+
+ssize_t cmd_list_syscalls(struct lttng_event **events)
+{
+       return syscall_table_list(events);
+}
+
+/*
+ * Command LTTNG_START_TRACE processed by the client thread.
+ *
+ * Called with session mutex held.
+ */
+int cmd_start_trace(struct ltt_session *session)
+{
+       enum lttng_error_code ret;
+       unsigned long nb_chan = 0;
+       struct ltt_kernel_session *ksession;
+       struct ltt_ust_session *usess;
+       const bool session_rotated_after_last_stop =
+                       session->rotated_after_last_stop;
+       const bool session_cleared_after_last_stop =
+                       session->cleared_after_last_stop;
+
+       LTTNG_ASSERT(session);
+
+       /* Ease our life a bit ;) */
+       ksession = session->kernel_session;
+       usess = session->ust_session;
+
+       /* Is the session already started? */
+       if (session->active) {
+               ret = LTTNG_ERR_TRACE_ALREADY_STARTED;
+               /* Perform nothing */
+               goto end;
+       }
+
+       if (session->rotation_state == LTTNG_ROTATION_STATE_ONGOING &&
+                       !session->current_trace_chunk) {
+               /*
+                * A rotation was launched while the session was stopped and
+                * it has not been completed yet. It is not possible to start
+                * the session since starting the session here would require a
+                * rotation from "NULL" to a new trace chunk. That rotation
+                * would overlap with the ongoing rotation, which is not
+                * supported.
+                */
+               WARN("Refusing to start session \"%s\" as a rotation launched after the last \"stop\" is still ongoing",
+                               session->name);
+               ret = LTTNG_ERR_ROTATION_PENDING;
+               goto error;
+       }
+
+       /*
+        * Starting a session without channel is useless since after that it's not
+        * possible to enable channel thus inform the client.
+        */
+       if (usess && usess->domain_global.channels) {
+               nb_chan += lttng_ht_get_count(usess->domain_global.channels);
+       }
+       if (ksession) {
+               nb_chan += ksession->channel_count;
+       }
+       if (!nb_chan) {
+               ret = LTTNG_ERR_NO_CHANNEL;
+               goto error;
+       }
+
+       session->active = 1;
+       session->rotated_after_last_stop = false;
+       session->cleared_after_last_stop = false;
+       if (session->output_traces && !session->current_trace_chunk) {
+               if (!session->has_been_started) {
+                       struct lttng_trace_chunk *trace_chunk;
+
+                       DBG("Creating initial trace chunk of session \"%s\"",
+                                       session->name);
+                       trace_chunk = session_create_new_trace_chunk(
+                                       session, NULL, NULL, NULL);
+                       if (!trace_chunk) {
+                               ret = LTTNG_ERR_CREATE_DIR_FAIL;
+                               goto error;
+                       }
+                       LTTNG_ASSERT(!session->current_trace_chunk);
+                       ret = (lttng_error_code) session_set_trace_chunk(session, trace_chunk,
+                                       NULL);
+                       lttng_trace_chunk_put(trace_chunk);
+                       if (ret) {
+                               ret = LTTNG_ERR_CREATE_TRACE_CHUNK_FAIL_CONSUMER;
+                               goto error;
+                       }
+               } else {
+                       DBG("Rotating session \"%s\" from its current \"NULL\" trace chunk to a new chunk",
+                                       session->name);
+                       /*
+                        * Rotate existing streams into the new chunk.
+                        * This is a "quiet" rotation has no client has
+                        * explicitly requested this operation.
+                        *
+                        * There is also no need to wait for the rotation
+                        * to complete as it will happen immediately. No data
+                        * was produced as the session was stopped, so the
+                        * rotation should happen on reception of the command.
+                        */
+                       ret = (lttng_error_code) cmd_rotate_session(session, NULL, true,
+                                       LTTNG_TRACE_CHUNK_COMMAND_TYPE_NO_OPERATION);
+                       if (ret != LTTNG_OK) {
+                               goto error;
+                       }
+               }
+       }
+
+       /* Kernel tracing */
+       if (ksession != NULL) {
+               DBG("Start kernel tracing session %s", session->name);
+               ret = (lttng_error_code) start_kernel_session(ksession);
+               if (ret != LTTNG_OK) {
+                       goto error;
+               }
+       }
+
+       /* Flag session that trace should start automatically */
+       if (usess) {
+               int int_ret = ust_app_start_trace_all(usess);
+
+               if (int_ret < 0) {
+                       ret = LTTNG_ERR_UST_START_FAIL;
+                       goto error;
+               }
+       }
+
+       /*
+        * Open a packet in every stream of the session to ensure that viewers
+        * can correctly identify the boundaries of the periods during which
+        * tracing was active for this session.
+        */
+       ret = session_open_packets(session);
+       if (ret != LTTNG_OK) {
+               goto error;
+       }
+
+       /*
+        * Clear the flag that indicates that a rotation was done while the
+        * session was stopped.
+        */
+       session->rotated_after_last_stop = false;
+
+       if (session->rotate_timer_period) {
+               int int_ret = timer_session_rotation_schedule_timer_start(
+                               session, session->rotate_timer_period);
+
+               if (int_ret < 0) {
+                       ERR("Failed to enable rotate timer");
+                       ret = LTTNG_ERR_UNK;
+                       goto error;
+               }
+       }
+
+       ret = LTTNG_OK;
+
+error:
+       if (ret == LTTNG_OK) {
+               /* Flag this after a successful start. */
+               session->has_been_started |= 1;
+       } else {
+               session->active = 0;
+               /* Restore initial state on error. */
+               session->rotated_after_last_stop =
+                               session_rotated_after_last_stop;
+               session->cleared_after_last_stop =
+                               session_cleared_after_last_stop;
+       }
+end:
+       return ret;
+}
+
+/*
+ * Command LTTNG_STOP_TRACE processed by the client thread.
+ */
+int cmd_stop_trace(struct ltt_session *session)
+{
+       int ret;
+       struct ltt_kernel_session *ksession;
+       struct ltt_ust_session *usess;
+
+       LTTNG_ASSERT(session);
+
+       DBG("Begin stop session \"%s\" (id %" PRIu64 ")", session->name, session->id);
+       /* Short cut */
+       ksession = session->kernel_session;
+       usess = session->ust_session;
+
+       /* Session is not active. Skip everythong and inform the client. */
+       if (!session->active) {
+               ret = LTTNG_ERR_TRACE_ALREADY_STOPPED;
+               goto error;
+       }
+
+       ret = stop_kernel_session(ksession);
+       if (ret != LTTNG_OK) {
+               goto error;
+       }
+
+       if (usess && usess->active) {
+               ret = ust_app_stop_trace_all(usess);
+               if (ret < 0) {
+                       ret = LTTNG_ERR_UST_STOP_FAIL;
+                       goto error;
+               }
+       }
+
+       DBG("Completed stop session \"%s\" (id %" PRIu64 ")", session->name,
+                       session->id);
+       /* Flag inactive after a successful stop. */
+       session->active = 0;
+       ret = LTTNG_OK;
+
+error:
+       return ret;
+}
+
+/*
+ * Set the base_path of the session only if subdir of a control uris is set.
+ * Return LTTNG_OK on success, otherwise LTTNG_ERR_*.
+ */
+static int set_session_base_path_from_uris(struct ltt_session *session,
+               size_t nb_uri,
+               struct lttng_uri *uris)
+{
+       int ret;
+       size_t i;
+
+       for (i = 0; i < nb_uri; i++) {
+               if (uris[i].stype != LTTNG_STREAM_CONTROL ||
+                               uris[i].subdir[0] == '\0') {
+                       /* Not interested in these URIs */
+                       continue;
+               }
+
+               if (session->base_path != NULL) {
+                       free(session->base_path);
+                       session->base_path = NULL;
+               }
+
+               /* Set session base_path */
+               session->base_path = strdup(uris[i].subdir);
+               if (!session->base_path) {
+                       PERROR("Failed to copy base path \"%s\" to session \"%s\"",
+                                       uris[i].subdir, session->name);
+                       ret = LTTNG_ERR_NOMEM;
+                       goto error;
+               }
+               DBG2("Setting base path \"%s\" for session \"%s\"",
+                               session->base_path, session->name);
+       }
+       ret = LTTNG_OK;
+error:
+       return ret;
+}
+
+/*
+ * Command LTTNG_SET_CONSUMER_URI processed by the client thread.
+ */
+int cmd_set_consumer_uri(struct ltt_session *session, size_t nb_uri,
+               struct lttng_uri *uris)
+{
+       int ret, i;
+       struct ltt_kernel_session *ksess = session->kernel_session;
+       struct ltt_ust_session *usess = session->ust_session;
+
+       LTTNG_ASSERT(session);
+       LTTNG_ASSERT(uris);
+       LTTNG_ASSERT(nb_uri > 0);
+
+       /* Can't set consumer URI if the session is active. */
+       if (session->active) {
+               ret = LTTNG_ERR_TRACE_ALREADY_STARTED;
+               goto error;
+       }
+
+       /*
+        * Set the session base path if any. This is done inside
+        * cmd_set_consumer_uri to preserve backward compatibility of the
+        * previous session creation api vs the session descriptor api.
+        */
+       ret = set_session_base_path_from_uris(session, nb_uri, uris);
+       if (ret != LTTNG_OK) {
+               goto error;
+       }
+
+       /* Set the "global" consumer URIs */
+       for (i = 0; i < nb_uri; i++) {
+               ret = add_uri_to_consumer(session, session->consumer, &uris[i],
+                               LTTNG_DOMAIN_NONE);
+               if (ret != LTTNG_OK) {
+                       goto error;
+               }
+       }
+
+       /* Set UST session URIs */
+       if (session->ust_session) {
+               for (i = 0; i < nb_uri; i++) {
+                       ret = add_uri_to_consumer(session,
+                                       session->ust_session->consumer,
+                                       &uris[i], LTTNG_DOMAIN_UST);
+                       if (ret != LTTNG_OK) {
+                               goto error;
+                       }
+               }
+       }
+
+       /* Set kernel session URIs */
+       if (session->kernel_session) {
+               for (i = 0; i < nb_uri; i++) {
+                       ret = add_uri_to_consumer(session,
+                                       session->kernel_session->consumer,
+                                       &uris[i], LTTNG_DOMAIN_KERNEL);
+                       if (ret != LTTNG_OK) {
+                               goto error;
+                       }
+               }
+       }
+
+       /*
+        * Make sure to set the session in output mode after we set URI since a
+        * session can be created without URL (thus flagged in no output mode).
+        */
+       session->output_traces = 1;
+       if (ksess) {
+               ksess->output_traces = 1;
+       }
+
+       if (usess) {
+               usess->output_traces = 1;
+       }
+
+       /* All good! */
+       ret = LTTNG_OK;
+
+error:
+       return ret;
+}
+
+static
+enum lttng_error_code set_session_output_from_descriptor(
+               struct ltt_session *session,
+               const struct lttng_session_descriptor *descriptor)
+{
+       int ret;
+       enum lttng_error_code ret_code = LTTNG_OK;
+       enum lttng_session_descriptor_type session_type =
+                       lttng_session_descriptor_get_type(descriptor);
+       enum lttng_session_descriptor_output_type output_type =
+                       lttng_session_descriptor_get_output_type(descriptor);
+       struct lttng_uri uris[2] = {};
+       size_t uri_count = 0;
+
+       switch (output_type) {
+       case LTTNG_SESSION_DESCRIPTOR_OUTPUT_TYPE_NONE:
+               goto end;
+       case LTTNG_SESSION_DESCRIPTOR_OUTPUT_TYPE_LOCAL:
+               lttng_session_descriptor_get_local_output_uri(descriptor,
+                               &uris[0]);
+               uri_count = 1;
+               break;
+       case LTTNG_SESSION_DESCRIPTOR_OUTPUT_TYPE_NETWORK:
+               lttng_session_descriptor_get_network_output_uris(descriptor,
+                               &uris[0], &uris[1]);
+               uri_count = 2;
+               break;
+       default:
+               ret_code = LTTNG_ERR_INVALID;
+               goto end;
+       }
+
+       switch (session_type) {
+       case LTTNG_SESSION_DESCRIPTOR_TYPE_SNAPSHOT:
+       {
+               struct snapshot_output *new_output = NULL;
+
+               new_output = snapshot_output_alloc();
+               if (!new_output) {
+                       ret_code = LTTNG_ERR_NOMEM;
+                       goto end;
+               }
+
+               ret = snapshot_output_init_with_uri(session,
+                               DEFAULT_SNAPSHOT_MAX_SIZE,
+                               NULL, uris, uri_count, session->consumer,
+                               new_output, &session->snapshot);
+               if (ret < 0) {
+                       ret_code = (ret == -ENOMEM) ?
+                                       LTTNG_ERR_NOMEM : LTTNG_ERR_INVALID;
+                       snapshot_output_destroy(new_output);
+                       goto end;
+               }
+               snapshot_add_output(&session->snapshot, new_output);
+               break;
+       }
+       case LTTNG_SESSION_DESCRIPTOR_TYPE_REGULAR:
+       case LTTNG_SESSION_DESCRIPTOR_TYPE_LIVE:
+       {
+               ret_code = (lttng_error_code) cmd_set_consumer_uri(session, uri_count, uris);
+               break;
+       }
+       default:
+               ret_code = LTTNG_ERR_INVALID;
+               goto end;
+       }
+end:
+       return ret_code;
+}
+
+static
+enum lttng_error_code cmd_create_session_from_descriptor(
+               struct lttng_session_descriptor *descriptor,
+               const lttng_sock_cred *creds,
+               const char *home_path)
+{
+       int ret;
+       enum lttng_error_code ret_code;
+       const char *session_name;
+       struct ltt_session *new_session = NULL;
+       enum lttng_session_descriptor_status descriptor_status;
+
+       session_lock_list();
+       if (home_path) {
+               if (*home_path != '/') {
+                       ERR("Home path provided by client is not absolute");
+                       ret_code = LTTNG_ERR_INVALID;
+                       goto end;
+               }
+       }
+
+       descriptor_status = lttng_session_descriptor_get_session_name(
+                       descriptor, &session_name);
+       switch (descriptor_status) {
+       case LTTNG_SESSION_DESCRIPTOR_STATUS_OK:
+               break;
+       case LTTNG_SESSION_DESCRIPTOR_STATUS_UNSET:
+               session_name = NULL;
+               break;
+       default:
+               ret_code = LTTNG_ERR_INVALID;
+               goto end;
+       }
+
+       ret_code = session_create(session_name, creds->uid, creds->gid,
+                       &new_session);
+       if (ret_code != LTTNG_OK) {
+               goto end;
+       }
+
+       if (!session_name) {
+               ret = lttng_session_descriptor_set_session_name(descriptor,
+                               new_session->name);
+               if (ret) {
+                       ret_code = LTTNG_ERR_SESSION_FAIL;
+                       goto end;
+               }
+       }
+
+       if (!lttng_session_descriptor_is_output_destination_initialized(
+                       descriptor)) {
+               /*
+                * Only include the session's creation time in the output
+                * destination if the name of the session itself was
+                * not auto-generated.
+                */
+               ret_code = lttng_session_descriptor_set_default_output(
+                               descriptor,
+                               session_name ? &new_session->creation_time : NULL,
+                               home_path);
+               if (ret_code != LTTNG_OK) {
+                       goto end;
+               }
+       } else {
+               new_session->has_user_specified_directory =
+                               lttng_session_descriptor_has_output_directory(
+                                       descriptor);
+       }
+
+       switch (lttng_session_descriptor_get_type(descriptor)) {
+       case LTTNG_SESSION_DESCRIPTOR_TYPE_SNAPSHOT:
+               new_session->snapshot_mode = 1;
+               break;
+       case LTTNG_SESSION_DESCRIPTOR_TYPE_LIVE:
+               new_session->live_timer =
+                               lttng_session_descriptor_live_get_timer_interval(
+                                       descriptor);
+               break;
+       default:
+               break;
+       }
+
+       ret_code = set_session_output_from_descriptor(new_session, descriptor);
+       if (ret_code != LTTNG_OK) {
+               goto end;
+       }
+       new_session->consumer->enabled = 1;
+       ret_code = LTTNG_OK;
+end:
+       /* Release reference provided by the session_create function. */
+       session_put(new_session);
+       if (ret_code != LTTNG_OK && new_session) {
+               /* Release the global reference on error. */
+               session_destroy(new_session);
+       }
+       session_unlock_list();
+       return ret_code;
+}
+
+enum lttng_error_code cmd_create_session(struct command_ctx *cmd_ctx, int sock,
+               struct lttng_session_descriptor **return_descriptor)
+{
+       int ret;
+       size_t payload_size;
+       struct lttng_dynamic_buffer payload;
+       struct lttng_buffer_view home_dir_view;
+       struct lttng_buffer_view session_descriptor_view;
+       struct lttng_session_descriptor *session_descriptor = NULL;
+       enum lttng_error_code ret_code;
+
+       lttng_dynamic_buffer_init(&payload);
+       if (cmd_ctx->lsm.u.create_session.home_dir_size >=
+                       LTTNG_PATH_MAX) {
+               ret_code = LTTNG_ERR_INVALID;
+               goto error;
+       }
+       if (cmd_ctx->lsm.u.create_session.session_descriptor_size >
+                       LTTNG_SESSION_DESCRIPTOR_MAX_LEN) {
+               ret_code = LTTNG_ERR_INVALID;
+               goto error;
+       }
+
+       payload_size = cmd_ctx->lsm.u.create_session.home_dir_size +
+                       cmd_ctx->lsm.u.create_session.session_descriptor_size;
+       ret = lttng_dynamic_buffer_set_size(&payload, payload_size);
+       if (ret) {
+               ret_code = LTTNG_ERR_NOMEM;
+               goto error;
+       }
+
+       ret = lttcomm_recv_unix_sock(sock, payload.data, payload.size);
+       if (ret <= 0) {
+               ERR("Reception of session descriptor failed, aborting.");
+               ret_code = LTTNG_ERR_SESSION_FAIL;
+               goto error;
+       }
+
+       home_dir_view = lttng_buffer_view_from_dynamic_buffer(
+                       &payload,
+                       0,
+                       cmd_ctx->lsm.u.create_session.home_dir_size);
+       if (cmd_ctx->lsm.u.create_session.home_dir_size > 0 &&
+                       !lttng_buffer_view_is_valid(&home_dir_view)) {
+               ERR("Invalid payload in \"create session\" command: buffer too short to contain home directory");
+               ret_code = LTTNG_ERR_INVALID_PROTOCOL;
+               goto error;
+       }
+
+       session_descriptor_view = lttng_buffer_view_from_dynamic_buffer(
+                       &payload,
+                       cmd_ctx->lsm.u.create_session.home_dir_size,
+                       cmd_ctx->lsm.u.create_session.session_descriptor_size);
+       if (!lttng_buffer_view_is_valid(&session_descriptor_view)) {
+               ERR("Invalid payload in \"create session\" command: buffer too short to contain session descriptor");
+               ret_code = LTTNG_ERR_INVALID_PROTOCOL;
+               goto error;
+       }
+
+       ret = lttng_session_descriptor_create_from_buffer(
+                       &session_descriptor_view, &session_descriptor);
+       if (ret < 0) {
+               ERR("Failed to create session descriptor from payload of \"create session\" command");
+               ret_code = LTTNG_ERR_INVALID;
+               goto error;
+       }
+
+       /*
+        * Sets the descriptor's auto-generated properties (name, output) if
+        * needed.
+        */
+       ret_code = cmd_create_session_from_descriptor(session_descriptor,
+                       &cmd_ctx->creds,
+                       home_dir_view.size ? home_dir_view.data : NULL);
+       if (ret_code != LTTNG_OK) {
+               goto error;
+       }
+
+       ret_code = LTTNG_OK;
+       *return_descriptor = session_descriptor;
+       session_descriptor = NULL;
+error:
+       lttng_dynamic_buffer_reset(&payload);
+       lttng_session_descriptor_destroy(session_descriptor);
+       return ret_code;
+}
+
+static
+void cmd_destroy_session_reply(const struct ltt_session *session,
+               void *_reply_context)
+{
+       int ret;
+       ssize_t comm_ret;
+       const struct cmd_destroy_session_reply_context *reply_context =
+                       (cmd_destroy_session_reply_context *) _reply_context;
+       struct lttng_dynamic_buffer payload;
+       struct lttcomm_session_destroy_command_header cmd_header;
+       struct lttng_trace_archive_location *location = NULL;
+       struct lttcomm_lttng_msg llm = {
+               .cmd_type = LTTNG_DESTROY_SESSION,
+               .ret_code = reply_context->destruction_status,
+               .pid = UINT32_MAX,
+               .cmd_header_size =
+                       sizeof(struct lttcomm_session_destroy_command_header),
+               .data_size = 0,
+       };
+       size_t payload_size_before_location;
+
+       lttng_dynamic_buffer_init(&payload);
+
+       ret = lttng_dynamic_buffer_append(&payload, &llm, sizeof(llm));
+       if (ret) {
+               ERR("Failed to append session destruction message");
+               goto error;
+       }
+
+       cmd_header.rotation_state =
+                       (int32_t) (reply_context->implicit_rotation_on_destroy ?
+                               session->rotation_state :
+                               LTTNG_ROTATION_STATE_NO_ROTATION);
+       ret = lttng_dynamic_buffer_append(&payload, &cmd_header,
+                       sizeof(cmd_header));
+       if (ret) {
+               ERR("Failed to append session destruction command header");
+               goto error;
+       }
+
+       if (!reply_context->implicit_rotation_on_destroy) {
+               DBG("No implicit rotation performed during the destruction of session \"%s\", sending reply",
+                               session->name);
+               goto send_reply;
+       }
+       if (session->rotation_state != LTTNG_ROTATION_STATE_COMPLETED) {
+               DBG("Rotation state of session \"%s\" is not \"completed\", sending session destruction reply",
+                               session->name);
+               goto send_reply;
+       }
+
+       location = session_get_trace_archive_location(session);
+       if (!location) {
+               ERR("Failed to get the location of the trace archive produced during the destruction of session \"%s\"",
+                               session->name);
+               goto error;
+       }
+
+       payload_size_before_location = payload.size;
+       comm_ret = lttng_trace_archive_location_serialize(location,
+                       &payload);
+       lttng_trace_archive_location_put(location);
+       if (comm_ret < 0) {
+               ERR("Failed to serialize the location of the trace archive produced during the destruction of session \"%s\"",
+                               session->name);
+               goto error;
+       }
+       /* Update the message to indicate the location's length. */
+       ((struct lttcomm_lttng_msg *) payload.data)->data_size =
+                       payload.size - payload_size_before_location;
+send_reply:
+       comm_ret = lttcomm_send_unix_sock(reply_context->reply_sock_fd,
+                       payload.data, payload.size);
+       if (comm_ret != (ssize_t) payload.size) {
+               ERR("Failed to send result of the destruction of session \"%s\" to client",
+                               session->name);
+       }
+error:
+       ret = close(reply_context->reply_sock_fd);
+       if (ret) {
+               PERROR("Failed to close client socket in deferred session destroy reply");
+       }
+       lttng_dynamic_buffer_reset(&payload);
+       free(_reply_context);
+}
+
+/*
+ * Command LTTNG_DESTROY_SESSION processed by the client thread.
+ *
+ * Called with session lock held.
+ */
+int cmd_destroy_session(struct ltt_session *session,
+               struct notification_thread_handle *notification_thread_handle,
+               int *sock_fd)
+{
+       int ret;
+       enum lttng_error_code destruction_last_error = LTTNG_OK;
+       struct cmd_destroy_session_reply_context *reply_context = NULL;
+
+       if (sock_fd) {
+               reply_context = (cmd_destroy_session_reply_context *) zmalloc(sizeof(*reply_context));
+               if (!reply_context) {
+                       ret = LTTNG_ERR_NOMEM;
+                       goto end;
+               }
+               reply_context->reply_sock_fd = *sock_fd;
+       }
+
+       /* Safety net */
+       LTTNG_ASSERT(session);
+
+       DBG("Begin destroy session %s (id %" PRIu64 ")", session->name,
+                       session->id);
+       if (session->active) {
+               DBG("Session \"%s\" is active, attempting to stop it before destroying it",
+                               session->name);
+               ret = cmd_stop_trace(session);
+               if (ret != LTTNG_OK && ret != LTTNG_ERR_TRACE_ALREADY_STOPPED) {
+                       /* Carry on with the destruction of the session. */
+                       ERR("Failed to stop session \"%s\" as part of its destruction: %s",
+                                       session->name, lttng_strerror(-ret));
+                       destruction_last_error = (lttng_error_code) ret;
+               }
+       }
+
+       if (session->rotation_schedule_timer_enabled) {
+               if (timer_session_rotation_schedule_timer_stop(
+                               session)) {
+                       ERR("Failed to stop the \"rotation schedule\" timer of session %s",
+                                       session->name);
+                       destruction_last_error = LTTNG_ERR_TIMER_STOP_ERROR;
+               }
+       }
+
+       if (session->rotate_size) {
+               unsubscribe_session_consumed_size_rotation(session, notification_thread_handle);
+               session->rotate_size = 0;
+       }
+
+       if (session->rotated && session->current_trace_chunk && session->output_traces) {
+               /*
+                * Perform a last rotation on destruction if rotations have
+                * occurred during the session's lifetime.
+                */
+               ret = cmd_rotate_session(session, NULL, false,
+                       LTTNG_TRACE_CHUNK_COMMAND_TYPE_MOVE_TO_COMPLETED);
+               if (ret != LTTNG_OK) {
+                       ERR("Failed to perform an implicit rotation as part of the destruction of session \"%s\": %s",
+                                       session->name, lttng_strerror(-ret));
+                       destruction_last_error = (lttng_error_code) -ret;
+               }
+               if (reply_context) {
+                       reply_context->implicit_rotation_on_destroy = true;
+               }
+       } else if (session->has_been_started && session->current_trace_chunk) {
+               /*
+                * The user has not triggered a session rotation. However, to
+                * ensure all data has been consumed, the session is rotated
+                * to a 'null' trace chunk before it is destroyed.
+                *
+                * This is a "quiet" rotation meaning that no notification is
+                * emitted and no renaming of the current trace chunk takes
+                * place.
+                */
+               ret = cmd_rotate_session(session, NULL, true,
+                       LTTNG_TRACE_CHUNK_COMMAND_TYPE_NO_OPERATION);
+               /*
+                * Rotation operations may not be supported by the kernel
+                * tracer. Hence, do not consider this implicit rotation as
+                * a session destruction error. The library has already stopped
+                * the session and waited for pending data; there is nothing
+                * left to do but complete the destruction of the session.
+                */
+               if (ret != LTTNG_OK &&
+                               ret != -LTTNG_ERR_ROTATION_NOT_AVAILABLE_KERNEL) {
+                       ERR("Failed to perform a quiet rotation as part of the destruction of session \"%s\": %s",
+                           session->name, lttng_strerror(ret));
+                       destruction_last_error = (lttng_error_code) -ret;
+               }
+       }
+
+       if (session->shm_path[0]) {
+               /*
+                * When a session is created with an explicit shm_path,
+                * the consumer daemon will create its shared memory files
+                * at that location and will *not* unlink them. This is normal
+                * as the intention of that feature is to make it possible
+                * to retrieve the content of those files should a crash occur.
+                *
+                * To ensure the content of those files can be used, the
+                * sessiond daemon will replicate the content of the metadata
+                * cache in a metadata file.
+                *
+                * On clean-up, it is expected that the consumer daemon will
+                * unlink the shared memory files and that the session daemon
+                * will unlink the metadata file. Then, the session's directory
+                * in the shm path can be removed.
+                *
+                * Unfortunately, a flaw in the design of the sessiond's and
+                * consumerd's tear down of channels makes it impossible to
+                * determine when the sessiond _and_ the consumerd have both
+                * destroyed their representation of a channel. For one, the
+                * unlinking, close, and rmdir happen in deferred 'call_rcu'
+                * callbacks in both daemons.
+                *
+                * However, it is also impossible for the sessiond to know when
+                * the consumer daemon is done destroying its channel(s) since
+                * it occurs as a reaction to the closing of the channel's file
+                * descriptor. There is no resulting communication initiated
+                * from the consumerd to the sessiond to confirm that the
+                * operation is completed (and was successful).
+                *
+                * Until this is all fixed, the session daemon checks for the
+                * removal of the session's shm path which makes it possible
+                * to safely advertise a session as having been destroyed.
+                *
+                * Prior to this fix, it was not possible to reliably save
+                * a session making use of the --shm-path option, destroy it,
+                * and load it again. This is because the creation of the
+                * session would fail upon seeing the session's shm path
+                * already in existence.
+                *
+                * Note that none of the error paths in the check for the
+                * directory's existence return an error. This is normal
+                * as there isn't much that can be done. The session will
+                * be destroyed properly, except that we can't offer the
+                * guarantee that the same session can be re-created.
+                */
+               current_completion_handler = &destroy_completion_handler.handler;
+               ret = lttng_strncpy(destroy_completion_handler.shm_path,
+                               session->shm_path,
+                               sizeof(destroy_completion_handler.shm_path));
+               LTTNG_ASSERT(!ret);
+       }
+
+       /*
+        * The session is destroyed. However, note that the command context
+        * still holds a reference to the session, thus delaying its destruction
+        * _at least_ up to the point when that reference is released.
+        */
+       session_destroy(session);
+       if (reply_context) {
+               reply_context->destruction_status = destruction_last_error;
+               ret = session_add_destroy_notifier(session,
+                               cmd_destroy_session_reply,
+                               (void *) reply_context);
+               if (ret) {
+                       ret = LTTNG_ERR_FATAL;
+                       goto end;
+               } else {
+                       *sock_fd = -1;
+               }
+       }
+       ret = LTTNG_OK;
+end:
+       return ret;
+}
+
+/*
+ * Command LTTNG_REGISTER_CONSUMER processed by the client thread.
+ */
+int cmd_register_consumer(struct ltt_session *session,
+               enum lttng_domain_type domain, const char *sock_path,
+               struct consumer_data *cdata)
+{
+       int ret, sock;
+       struct consumer_socket *socket = NULL;
+
+       LTTNG_ASSERT(session);
+       LTTNG_ASSERT(cdata);
+       LTTNG_ASSERT(sock_path);
+
+       switch (domain) {
+       case LTTNG_DOMAIN_KERNEL:
+       {
+               struct ltt_kernel_session *ksess = session->kernel_session;
+
+               LTTNG_ASSERT(ksess);
+
+               /* Can't register a consumer if there is already one */
+               if (ksess->consumer_fds_sent != 0) {
+                       ret = LTTNG_ERR_KERN_CONSUMER_FAIL;
+                       goto error;
+               }
+
+               sock = lttcomm_connect_unix_sock(sock_path);
+               if (sock < 0) {
+                       ret = LTTNG_ERR_CONNECT_FAIL;
+                       goto error;
+               }
+               cdata->cmd_sock = sock;
+
+               socket = consumer_allocate_socket(&cdata->cmd_sock);
+               if (socket == NULL) {
+                       ret = close(sock);
+                       if (ret < 0) {
+                               PERROR("close register consumer");
+                       }
+                       cdata->cmd_sock = -1;
+                       ret = LTTNG_ERR_FATAL;
+                       goto error;
+               }
+
+               socket->lock = (pthread_mutex_t *) zmalloc(sizeof(pthread_mutex_t));
+               if (socket->lock == NULL) {
+                       PERROR("zmalloc pthread mutex");
+                       ret = LTTNG_ERR_FATAL;
+                       goto error;
+               }
+               pthread_mutex_init(socket->lock, NULL);
+               socket->registered = 1;
+
+               rcu_read_lock();
+               consumer_add_socket(socket, ksess->consumer);
+               rcu_read_unlock();
+
+               pthread_mutex_lock(&cdata->pid_mutex);
+               cdata->pid = -1;
+               pthread_mutex_unlock(&cdata->pid_mutex);
+
+               break;
+       }
+       default:
+               /* TODO: Userspace tracing */
+               ret = LTTNG_ERR_UND;
+               goto error;
+       }
+
+       return LTTNG_OK;
+
+error:
+       if (socket) {
+               consumer_destroy_socket(socket);
+       }
+       return ret;
+}
+
+/*
+ * Command LTTNG_LIST_DOMAINS processed by the client thread.
+ */
+ssize_t cmd_list_domains(struct ltt_session *session,
+               struct lttng_domain **domains)
+{
+       int ret, index = 0;
+       ssize_t nb_dom = 0;
+       struct agent *agt;
+       struct lttng_ht_iter iter;
+
+       if (session->kernel_session != NULL) {
+               DBG3("Listing domains found kernel domain");
+               nb_dom++;
+       }
+
+       if (session->ust_session != NULL) {
+               DBG3("Listing domains found UST global domain");
+               nb_dom++;
+
+               rcu_read_lock();
+               cds_lfht_for_each_entry(session->ust_session->agents->ht, &iter.iter,
+                               agt, node.node) {
+                       if (agt->being_used) {
+                               nb_dom++;
+                       }
+               }
+               rcu_read_unlock();
+       }
+
+       if (!nb_dom) {
+               goto end;
+       }
+
+       *domains = (lttng_domain *) zmalloc(nb_dom * sizeof(struct lttng_domain));
+       if (*domains == NULL) {
+               ret = LTTNG_ERR_FATAL;
+               goto error;
+       }
+
+       if (session->kernel_session != NULL) {
+               (*domains)[index].type = LTTNG_DOMAIN_KERNEL;
+
+               /* Kernel session buffer type is always GLOBAL */
+               (*domains)[index].buf_type = LTTNG_BUFFER_GLOBAL;
+
+               index++;
+       }
+
+       if (session->ust_session != NULL) {
+               (*domains)[index].type = LTTNG_DOMAIN_UST;
+               (*domains)[index].buf_type = session->ust_session->buffer_type;
+               index++;
+
+               rcu_read_lock();
+               cds_lfht_for_each_entry(session->ust_session->agents->ht, &iter.iter,
+                               agt, node.node) {
+                       if (agt->being_used) {
+                               (*domains)[index].type = agt->domain;
+                               (*domains)[index].buf_type = session->ust_session->buffer_type;
+                               index++;
+                       }
+               }
+               rcu_read_unlock();
+       }
+end:
+       return nb_dom;
+
+error:
+       /* Return negative value to differentiate return code */
+       return -ret;
+}
+
+
+/*
+ * Command LTTNG_LIST_CHANNELS processed by the client thread.
+ */
+ssize_t cmd_list_channels(enum lttng_domain_type domain,
+               struct ltt_session *session, struct lttng_channel **channels)
+{
+       ssize_t nb_chan = 0, payload_size = 0, ret;
+
+       switch (domain) {
+       case LTTNG_DOMAIN_KERNEL:
+               if (session->kernel_session != NULL) {
+                       nb_chan = session->kernel_session->channel_count;
+               }
+               DBG3("Number of kernel channels %zd", nb_chan);
+               if (nb_chan <= 0) {
+                       ret = -LTTNG_ERR_KERN_CHAN_NOT_FOUND;
+                       goto end;
+               }
+               break;
+       case LTTNG_DOMAIN_UST:
+               if (session->ust_session != NULL) {
+                       rcu_read_lock();
+                       nb_chan = lttng_ht_get_count(
+                               session->ust_session->domain_global.channels);
+                       rcu_read_unlock();
+               }
+               DBG3("Number of UST global channels %zd", nb_chan);
+               if (nb_chan < 0) {
+                       ret = -LTTNG_ERR_UST_CHAN_NOT_FOUND;
+                       goto end;
+               }
+               break;
+       default:
+               ret = -LTTNG_ERR_UND;
+               goto end;
+       }
+
+       if (nb_chan > 0) {
+               const size_t channel_size = sizeof(struct lttng_channel) +
+                       sizeof(struct lttng_channel_extended);
+               struct lttng_channel_extended *channel_exts;
+
+               payload_size = nb_chan * channel_size;
+               *channels = (lttng_channel *) zmalloc(payload_size);
+               if (*channels == NULL) {
+                       ret = -LTTNG_ERR_FATAL;
+                       goto end;
+               }
+
+               channel_exts = (lttng_channel_extended *)
+                       (((char *) *channels) + (nb_chan * sizeof(struct lttng_channel)));
+               ret = list_lttng_channels(domain, session, *channels, channel_exts);
+               if (ret != LTTNG_OK) {
+                       free(*channels);
+                       *channels = NULL;
+                       goto end;
+               }
+       } else {
+               *channels = NULL;
+       }
+
+       ret = payload_size;
+end:
+       return ret;
+}
+
+/*
+ * Command LTTNG_LIST_EVENTS processed by the client thread.
+ */
+ssize_t cmd_list_events(enum lttng_domain_type domain,
+               struct ltt_session *session, char *channel_name,
+               struct lttng_payload *payload)
+{
+       int ret = 0;
+       ssize_t nb_events = 0;
+       struct lttcomm_event_command_header cmd_header = {};
+       const size_t cmd_header_offset = payload->buffer.size;
+
+       ret = lttng_dynamic_buffer_append(
+                       &payload->buffer, &cmd_header, sizeof(cmd_header));
+       if (ret) {
+               ret = LTTNG_ERR_NOMEM;
+               goto error;
+       }
+
+       switch (domain) {
+       case LTTNG_DOMAIN_KERNEL:
+               if (session->kernel_session != NULL) {
+                       nb_events = list_lttng_kernel_events(channel_name,
+                                       session->kernel_session, payload);
+               }
+               break;
+       case LTTNG_DOMAIN_UST:
+       {
+               if (session->ust_session != NULL) {
+                       nb_events = list_lttng_ust_global_events(channel_name,
+                                       &session->ust_session->domain_global,
+                                       payload);
+               }
+               break;
+       }
+       case LTTNG_DOMAIN_LOG4J:
+       case LTTNG_DOMAIN_JUL:
+       case LTTNG_DOMAIN_PYTHON:
+               if (session->ust_session) {
+                       struct lttng_ht_iter iter;
+                       struct agent *agt;
+
+                       rcu_read_lock();
+                       cds_lfht_for_each_entry(session->ust_session->agents->ht,
+                                       &iter.iter, agt, node.node) {
+                               if (agt->domain == domain) {
+                                       nb_events = list_lttng_agent_events(
+                                                       agt, payload);
+                                       break;
+                               }
+                       }
+                       rcu_read_unlock();
+               }
+               break;
+       default:
+               ret = LTTNG_ERR_UND;
+               goto error;
+       }
+
+       ((struct lttcomm_event_command_header *) (payload->buffer.data +
+                        cmd_header_offset))->nb_events = (uint32_t) nb_events;
+
+       return nb_events;
+
+error:
+       /* Return negative value to differentiate return code */
+       return -ret;
+}
+
+/*
+ * Using the session list, filled a lttng_session array to send back to the
+ * client for session listing.
+ *
+ * The session list lock MUST be acquired before calling this function. Use
+ * session_lock_list() and session_unlock_list().
+ */
+void cmd_list_lttng_sessions(struct lttng_session *sessions,
+               size_t session_count, uid_t uid, gid_t gid)
+{
+       int ret;
+       unsigned int i = 0;
+       struct ltt_session *session;
+       struct ltt_session_list *list = session_get_list();
+       struct lttng_session_extended *extended =
+                       (typeof(extended)) (&sessions[session_count]);
+
+       DBG("Getting all available session for UID %d GID %d",
+                       uid, gid);
+       /*
+        * Iterate over session list and append data after the control struct in
+        * the buffer.
+        */
+       cds_list_for_each_entry(session, &list->head, list) {
+               if (!session_get(session)) {
+                       continue;
+               }
+               /*
+                * Only list the sessions the user can control.
+                */
+               if (!session_access_ok(session, uid) ||
+                               session->destroyed) {
+                       session_put(session);
+                       continue;
+               }
+
+               struct ltt_kernel_session *ksess = session->kernel_session;
+               struct ltt_ust_session *usess = session->ust_session;
+
+               if (session->consumer->type == CONSUMER_DST_NET ||
+                               (ksess && ksess->consumer->type == CONSUMER_DST_NET) ||
+                               (usess && usess->consumer->type == CONSUMER_DST_NET)) {
+                       ret = build_network_session_path(sessions[i].path,
+                                       sizeof(sessions[i].path), session);
+               } else {
+                       ret = snprintf(sessions[i].path, sizeof(sessions[i].path), "%s",
+                                       session->consumer->dst.session_root_path);
+               }
+               if (ret < 0) {
+                       PERROR("snprintf session path");
+                       session_put(session);
+                       continue;
+               }
+
+               strncpy(sessions[i].name, session->name, NAME_MAX);
+               sessions[i].name[NAME_MAX - 1] = '\0';
+               sessions[i].enabled = session->active;
+               sessions[i].snapshot_mode = session->snapshot_mode;
+               sessions[i].live_timer_interval = session->live_timer;
+               extended[i].creation_time.value = (uint64_t) session->creation_time;
+               extended[i].creation_time.is_set = 1;
+               i++;
+               session_put(session);
+       }
+}
+
+/*
+ * Command LTTNG_DATA_PENDING returning 0 if the data is NOT pending meaning
+ * ready for trace analysis (or any kind of reader) or else 1 for pending data.
+ */
+int cmd_data_pending(struct ltt_session *session)
+{
+       int ret;
+       struct ltt_kernel_session *ksess = session->kernel_session;
+       struct ltt_ust_session *usess = session->ust_session;
+
+       LTTNG_ASSERT(session);
+
+       DBG("Data pending for session %s", session->name);
+
+       /* Session MUST be stopped to ask for data availability. */
+       if (session->active) {
+               ret = LTTNG_ERR_SESSION_STARTED;
+               goto error;
+       } else {
+               /*
+                * If stopped, just make sure we've started before else the above call
+                * will always send that there is data pending.
+                *
+                * The consumer assumes that when the data pending command is received,
+                * the trace has been started before or else no output data is written
+                * by the streams which is a condition for data pending. So, this is
+                * *VERY* important that we don't ask the consumer before a start
+                * trace.
+                */
+               if (!session->has_been_started) {
+                       ret = 0;
+                       goto error;
+               }
+       }
+
+       /* A rotation is still pending, we have to wait. */
+       if (session->rotation_state == LTTNG_ROTATION_STATE_ONGOING) {
+               DBG("Rotate still pending for session %s", session->name);
+               ret = 1;
+               goto error;
+       }
+
+       if (ksess && ksess->consumer) {
+               ret = consumer_is_data_pending(ksess->id, ksess->consumer);
+               if (ret == 1) {
+                       /* Data is still being extracted for the kernel. */
+                       goto error;
+               }
+       }
+
+       if (usess && usess->consumer) {
+               ret = consumer_is_data_pending(usess->id, usess->consumer);
+               if (ret == 1) {
+                       /* Data is still being extracted for the kernel. */
+                       goto error;
+               }
+       }
+
+       /* Data is ready to be read by a viewer */
+       ret = 0;
+
+error:
+       return ret;
+}
+
+/*
+ * Command LTTNG_SNAPSHOT_ADD_OUTPUT from the lttng ctl library.
+ *
+ * Return LTTNG_OK on success or else a LTTNG_ERR code.
+ */
+int cmd_snapshot_add_output(struct ltt_session *session,
+               const struct lttng_snapshot_output *output, uint32_t *id)
+{
+       int ret;
+       struct snapshot_output *new_output;
+
+       LTTNG_ASSERT(session);
+       LTTNG_ASSERT(output);
+
+       DBG("Cmd snapshot add output for session %s", session->name);
+
+       /*
+        * Can't create an output if the session is not set in no-output mode.
+        */
+       if (session->output_traces) {
+               ret = LTTNG_ERR_NOT_SNAPSHOT_SESSION;
+               goto error;
+       }
+
+       if (session->has_non_mmap_channel) {
+               ret = LTTNG_ERR_SNAPSHOT_UNSUPPORTED;
+               goto error;
+       }
+
+       /* Only one output is allowed until we have the "tee" feature. */
+       if (session->snapshot.nb_output == 1) {
+               ret = LTTNG_ERR_SNAPSHOT_OUTPUT_EXIST;
+               goto error;
+       }
+
+       new_output = snapshot_output_alloc();
+       if (!new_output) {
+               ret = LTTNG_ERR_NOMEM;
+               goto error;
+       }
+
+       ret = snapshot_output_init(session, output->max_size, output->name,
+                       output->ctrl_url, output->data_url, session->consumer, new_output,
+                       &session->snapshot);
+       if (ret < 0) {
+               if (ret == -ENOMEM) {
+                       ret = LTTNG_ERR_NOMEM;
+               } else {
+                       ret = LTTNG_ERR_INVALID;
+               }
+               goto free_error;
+       }
+
+       rcu_read_lock();
+       snapshot_add_output(&session->snapshot, new_output);
+       if (id) {
+               *id = new_output->id;
+       }
+       rcu_read_unlock();
+
+       return LTTNG_OK;
+
+free_error:
+       snapshot_output_destroy(new_output);
+error:
+       return ret;
+}
+
+/*
+ * Command LTTNG_SNAPSHOT_DEL_OUTPUT from lib lttng ctl.
+ *
+ * Return LTTNG_OK on success or else a LTTNG_ERR code.
+ */
+int cmd_snapshot_del_output(struct ltt_session *session,
+               const struct lttng_snapshot_output *output)
+{
+       int ret;
+       struct snapshot_output *sout = NULL;
+
+       LTTNG_ASSERT(session);
+       LTTNG_ASSERT(output);
+
+       rcu_read_lock();
+
+       /*
+        * Permission denied to create an output if the session is not
+        * set in no output mode.
+        */
+       if (session->output_traces) {
+               ret = LTTNG_ERR_NOT_SNAPSHOT_SESSION;
+               goto error;
+       }
+
+       if (output->id) {
+               DBG("Cmd snapshot del output id %" PRIu32 " for session %s", output->id,
+                               session->name);
+               sout = snapshot_find_output_by_id(output->id, &session->snapshot);
+       } else if (*output->name != '\0') {
+               DBG("Cmd snapshot del output name %s for session %s", output->name,
+                               session->name);
+               sout = snapshot_find_output_by_name(output->name, &session->snapshot);
+       }
+       if (!sout) {
+               ret = LTTNG_ERR_INVALID;
+               goto error;
+       }
+
+       snapshot_delete_output(&session->snapshot, sout);
+       snapshot_output_destroy(sout);
+       ret = LTTNG_OK;
+
+error:
+       rcu_read_unlock();
+       return ret;
+}
+
+/*
+ * Command LTTNG_SNAPSHOT_LIST_OUTPUT from lib lttng ctl.
+ *
+ * If no output is available, outputs is untouched and 0 is returned.
+ *
+ * Return the size of the newly allocated outputs or a negative LTTNG_ERR code.
+ */
+ssize_t cmd_snapshot_list_outputs(struct ltt_session *session,
+               struct lttng_snapshot_output **outputs)
+{
+       int ret, idx = 0;
+       struct lttng_snapshot_output *list = NULL;
+       struct lttng_ht_iter iter;
+       struct snapshot_output *output;
+
+       LTTNG_ASSERT(session);
+       LTTNG_ASSERT(outputs);
+
+       DBG("Cmd snapshot list outputs for session %s", session->name);
+
+       /*
+        * Permission denied to create an output if the session is not
+        * set in no output mode.
+        */
+       if (session->output_traces) {
+               ret = -LTTNG_ERR_NOT_SNAPSHOT_SESSION;
+               goto end;
+       }
+
+       if (session->snapshot.nb_output == 0) {
+               ret = 0;
+               goto end;
+       }
+
+       list = (lttng_snapshot_output *) zmalloc(session->snapshot.nb_output * sizeof(*list));
+       if (!list) {
+               ret = -LTTNG_ERR_NOMEM;
+               goto end;
+       }
+
+       /* Copy list from session to the new list object. */
+       rcu_read_lock();
+       cds_lfht_for_each_entry(session->snapshot.output_ht->ht, &iter.iter,
+                       output, node.node) {
+               LTTNG_ASSERT(output->consumer);
+               list[idx].id = output->id;
+               list[idx].max_size = output->max_size;
+               if (lttng_strncpy(list[idx].name, output->name,
+                               sizeof(list[idx].name))) {
+                       ret = -LTTNG_ERR_INVALID;
+                       goto error;
+               }
+               if (output->consumer->type == CONSUMER_DST_LOCAL) {
+                       if (lttng_strncpy(list[idx].ctrl_url,
+                                       output->consumer->dst.session_root_path,
+                                       sizeof(list[idx].ctrl_url))) {
+                               ret = -LTTNG_ERR_INVALID;
+                               goto error;
+                       }
+               } else {
+                       /* Control URI. */
+                       ret = uri_to_str_url(&output->consumer->dst.net.control,
+                                       list[idx].ctrl_url, sizeof(list[idx].ctrl_url));
+                       if (ret < 0) {
+                               ret = -LTTNG_ERR_NOMEM;
+                               goto error;
+                       }
+
+                       /* Data URI. */
+                       ret = uri_to_str_url(&output->consumer->dst.net.data,
+                                       list[idx].data_url, sizeof(list[idx].data_url));
+                       if (ret < 0) {
+                               ret = -LTTNG_ERR_NOMEM;
+                               goto error;
+                       }
+               }
+               idx++;
+       }
+
+       *outputs = list;
+       list = NULL;
+       ret = session->snapshot.nb_output;
+error:
+       rcu_read_unlock();
+       free(list);
+end:
+       return ret;
+}
+
+/*
+ * Check if we can regenerate the metadata for this session.
+ * Only kernel, UST per-uid and non-live sessions are supported.
+ *
+ * Return 0 if the metadata can be generated, a LTTNG_ERR code otherwise.
+ */
+static
+int check_regenerate_metadata_support(struct ltt_session *session)
+{
+       int ret;
+
+       LTTNG_ASSERT(session);
+
+       if (session->live_timer != 0) {
+               ret = LTTNG_ERR_LIVE_SESSION;
+               goto end;
+       }
+       if (!session->active) {
+               ret = LTTNG_ERR_SESSION_NOT_STARTED;
+               goto end;
+       }
+       if (session->ust_session) {
+               switch (session->ust_session->buffer_type) {
+               case LTTNG_BUFFER_PER_UID:
+                       break;
+               case LTTNG_BUFFER_PER_PID:
+                       ret = LTTNG_ERR_PER_PID_SESSION;
+                       goto end;
+               default:
+                       abort();
+                       ret = LTTNG_ERR_UNK;
+                       goto end;
+               }
+       }
+       if (session->consumer->type == CONSUMER_DST_NET &&
+                       session->consumer->relay_minor_version < 8) {
+               ret = LTTNG_ERR_RELAYD_VERSION_FAIL;
+               goto end;
+       }
+       ret = 0;
+
+end:
+       return ret;
+}
+
+static
+int clear_metadata_file(int fd)
+{
+       int ret;
+       off_t lseek_ret;
+
+       lseek_ret = lseek(fd, 0, SEEK_SET);
+       if (lseek_ret < 0) {
+               PERROR("lseek");
+               ret = -1;
+               goto end;
+       }
+
+       ret = ftruncate(fd, 0);
+       if (ret < 0) {
+               PERROR("ftruncate");
+               goto end;
+       }
+
+end:
+       return ret;
+}
+
+static
+int ust_regenerate_metadata(struct ltt_ust_session *usess)
+{
+       int ret = 0;
+       struct buffer_reg_uid *uid_reg = NULL;
+       struct buffer_reg_session *session_reg = NULL;
+
+       rcu_read_lock();
+       cds_list_for_each_entry(uid_reg, &usess->buffer_reg_uid_list, lnode) {
+               struct ust_registry_session *registry;
+               struct ust_registry_channel *chan;
+               struct lttng_ht_iter iter_chan;
+
+               session_reg = uid_reg->registry;
+               registry = session_reg->reg.ust;
+
+               pthread_mutex_lock(&registry->lock);
+               registry->metadata_len_sent = 0;
+               memset(registry->metadata, 0, registry->metadata_alloc_len);
+               registry->metadata_len = 0;
+               registry->metadata_version++;
+               if (registry->metadata_fd > 0) {
+                       /* Clear the metadata file's content. */
+                       ret = clear_metadata_file(registry->metadata_fd);
+                       if (ret) {
+                               pthread_mutex_unlock(&registry->lock);
+                               goto end;
+                       }
+               }
+
+               ret = ust_metadata_session_statedump(registry, NULL,
+                               registry->major, registry->minor);
+               if (ret) {
+                       pthread_mutex_unlock(&registry->lock);
+                       ERR("Failed to generate session metadata (err = %d)",
+                                       ret);
+                       goto end;
+               }
+               cds_lfht_for_each_entry(registry->channels->ht, &iter_chan.iter,
+                               chan, node.node) {
+                       struct ust_registry_event *event;
+                       struct lttng_ht_iter iter_event;
+
+                       ret = ust_metadata_channel_statedump(registry, chan);
+                       if (ret) {
+                               pthread_mutex_unlock(&registry->lock);
+                               ERR("Failed to generate channel metadata "
+                                               "(err = %d)", ret);
+                               goto end;
+                       }
+                       cds_lfht_for_each_entry(chan->ht->ht, &iter_event.iter,
+                                       event, node.node) {
+                               ret = ust_metadata_event_statedump(registry,
+                                               chan, event);
+                               if (ret) {
+                                       pthread_mutex_unlock(&registry->lock);
+                                       ERR("Failed to generate event metadata "
+                                                       "(err = %d)", ret);
+                                       goto end;
+                               }
+                       }
+               }
+               pthread_mutex_unlock(&registry->lock);
+       }
+
+end:
+       rcu_read_unlock();
+       return ret;
+}
+
+/*
+ * Command LTTNG_REGENERATE_METADATA from the lttng-ctl library.
+ *
+ * Ask the consumer to truncate the existing metadata file(s) and
+ * then regenerate the metadata. Live and per-pid sessions are not
+ * supported and return an error.
+ *
+ * Return LTTNG_OK on success or else a LTTNG_ERR code.
+ */
+int cmd_regenerate_metadata(struct ltt_session *session)
+{
+       int ret;
+
+       LTTNG_ASSERT(session);
+
+       ret = check_regenerate_metadata_support(session);
+       if (ret) {
+               goto end;
+       }
+
+       if (session->kernel_session) {
+               ret = kernctl_session_regenerate_metadata(
+                               session->kernel_session->fd);
+               if (ret < 0) {
+                       ERR("Failed to regenerate the kernel metadata");
+                       goto end;
+               }
+       }
+
+       if (session->ust_session) {
+               ret = ust_regenerate_metadata(session->ust_session);
+               if (ret < 0) {
+                       ERR("Failed to regenerate the UST metadata");
+                       goto end;
+               }
+       }
+       DBG("Cmd metadata regenerate for session %s", session->name);
+       ret = LTTNG_OK;
+
+end:
+       return ret;
+}
+
+/*
+ * Command LTTNG_REGENERATE_STATEDUMP from the lttng-ctl library.
+ *
+ * Ask the tracer to regenerate a new statedump.
+ *
+ * Return LTTNG_OK on success or else a LTTNG_ERR code.
+ */
+int cmd_regenerate_statedump(struct ltt_session *session)
+{
+       int ret;
+
+       LTTNG_ASSERT(session);
+
+       if (!session->active) {
+               ret = LTTNG_ERR_SESSION_NOT_STARTED;
+               goto end;
+       }
+
+       if (session->kernel_session) {
+               ret = kernctl_session_regenerate_statedump(
+                               session->kernel_session->fd);
+               /*
+                * Currently, the statedump in kernel can only fail if out
+                * of memory.
+                */
+               if (ret < 0) {
+                       if (ret == -ENOMEM) {
+                               ret = LTTNG_ERR_REGEN_STATEDUMP_NOMEM;
+                       } else {
+                               ret = LTTNG_ERR_REGEN_STATEDUMP_FAIL;
+                       }
+                       ERR("Failed to regenerate the kernel statedump");
+                       goto end;
+               }
+       }
+
+       if (session->ust_session) {
+               ret = ust_app_regenerate_statedump_all(session->ust_session);
+               /*
+                * Currently, the statedump in UST always returns 0.
+                */
+               if (ret < 0) {
+                       ret = LTTNG_ERR_REGEN_STATEDUMP_FAIL;
+                       ERR("Failed to regenerate the UST statedump");
+                       goto end;
+               }
+       }
+       DBG("Cmd regenerate statedump for session %s", session->name);
+       ret = LTTNG_OK;
+
+end:
+       return ret;
+}
+
+static
+enum lttng_error_code synchronize_tracer_notifier_register(
+               struct notification_thread_handle *notification_thread,
+               struct lttng_trigger *trigger, const struct lttng_credentials *cmd_creds)
+{
+       enum lttng_error_code ret_code;
+       const struct lttng_condition *condition =
+                       lttng_trigger_get_const_condition(trigger);
+       const char *trigger_name;
+       uid_t trigger_owner;
+       enum lttng_trigger_status trigger_status;
+       const enum lttng_domain_type trigger_domain =
+                       lttng_trigger_get_underlying_domain_type_restriction(
+                                       trigger);
+
+       trigger_status = lttng_trigger_get_owner_uid(trigger, &trigger_owner);
+       LTTNG_ASSERT(trigger_status == LTTNG_TRIGGER_STATUS_OK);
+
+       LTTNG_ASSERT(condition);
+       LTTNG_ASSERT(lttng_condition_get_type(condition) ==
+                       LTTNG_CONDITION_TYPE_EVENT_RULE_MATCHES);
+
+       trigger_status = lttng_trigger_get_name(trigger, &trigger_name);
+       trigger_name = trigger_status == LTTNG_TRIGGER_STATUS_OK ?
+                       trigger_name : "(anonymous)";
+
+       session_lock_list();
+       switch (trigger_domain) {
+       case LTTNG_DOMAIN_KERNEL:
+       {
+               ret_code = kernel_register_event_notifier(trigger, cmd_creds);
+               if (ret_code != LTTNG_OK) {
+                       enum lttng_error_code notif_thread_unregister_ret;
+
+                       notif_thread_unregister_ret =
+                                       notification_thread_command_unregister_trigger(
+                                               notification_thread, trigger);
+
+                       if (notif_thread_unregister_ret != LTTNG_OK) {
+                               /* Return the original error code. */
+                               ERR("Failed to unregister trigger from notification thread during error recovery: trigger name = '%s', trigger owner uid = %d, error code = %d",
+                                               trigger_name,
+                                               (int) trigger_owner,
+                                               ret_code);
+                       }
+               }
+               break;
+       }
+       case LTTNG_DOMAIN_UST:
+               ust_app_global_update_all_event_notifier_rules();
+               break;
+       case LTTNG_DOMAIN_JUL:
+       case LTTNG_DOMAIN_LOG4J:
+       case LTTNG_DOMAIN_PYTHON:
+       {
+               /* Agent domains. */
+               struct agent *agt = agent_find_by_event_notifier_domain(
+                               trigger_domain);
+
+               if (!agt) {
+                       agt = agent_create(trigger_domain);
+                       if (!agt) {
+                               ret_code = LTTNG_ERR_NOMEM;
+                               goto end_unlock_session_list;
+                       }
+
+                       agent_add(agt, the_trigger_agents_ht_by_domain);
+               }
+
+               ret_code = (lttng_error_code) trigger_agent_enable(trigger, agt);
+               if (ret_code != LTTNG_OK) {
+                       goto end_unlock_session_list;
+               }
+
+               break;
+       }
+       case LTTNG_DOMAIN_NONE:
+       default:
+               abort();
+       }
+
+       ret_code = LTTNG_OK;
+end_unlock_session_list:
+       session_unlock_list();
+       return ret_code;
+}
+
+enum lttng_error_code cmd_register_trigger(const struct lttng_credentials *cmd_creds,
+               struct lttng_trigger *trigger,
+               bool is_trigger_anonymous,
+               struct notification_thread_handle *notification_thread,
+               struct lttng_trigger **return_trigger)
+{
+       enum lttng_error_code ret_code;
+       const char *trigger_name;
+       uid_t trigger_owner;
+       enum lttng_trigger_status trigger_status;
+
+       trigger_status = lttng_trigger_get_name(trigger, &trigger_name);
+       trigger_name = trigger_status == LTTNG_TRIGGER_STATUS_OK ?
+                       trigger_name : "(anonymous)";
+
+       trigger_status = lttng_trigger_get_owner_uid(
+               trigger, &trigger_owner);
+       LTTNG_ASSERT(trigger_status == LTTNG_TRIGGER_STATUS_OK);
+
+       DBG("Running register trigger command: trigger name = '%s', trigger owner uid = %d, command creds uid = %d",
+                       trigger_name, (int) trigger_owner,
+                       (int) lttng_credentials_get_uid(cmd_creds));
+
+       /*
+        * Validate the trigger credentials against the command credentials.
+        * Only the root user can register a trigger with non-matching
+        * credentials.
+        */
+       if (!lttng_credentials_is_equal_uid(
+                       lttng_trigger_get_credentials(trigger),
+                       cmd_creds)) {
+               if (lttng_credentials_get_uid(cmd_creds) != 0) {
+                       ERR("Trigger credentials do not match the command credentials: trigger name = '%s', trigger owner uid = %d, command creds uid = %d",
+                                       trigger_name, (int) trigger_owner,
+                                       (int) lttng_credentials_get_uid(cmd_creds));
+                       ret_code = LTTNG_ERR_INVALID_TRIGGER;
+                       goto end;
+               }
+       }
+
+       /*
+        * The bytecode generation also serves as a validation step for the
+        * bytecode expressions.
+        */
+       ret_code = lttng_trigger_generate_bytecode(trigger, cmd_creds);
+       if (ret_code != LTTNG_OK) {
+               ERR("Failed to generate bytecode of trigger: trigger name = '%s', trigger owner uid = %d, error code = %d",
+                               trigger_name, (int) trigger_owner, ret_code);
+               goto end;
+       }
+
+       /*
+        * A reference to the trigger is acquired by the notification thread.
+        * It is safe to return the same trigger to the caller since it the
+        * other user holds a reference.
+        *
+        * The trigger is modified during the execution of the
+        * "register trigger" command. However, by the time the command returns,
+        * it is safe to use without any locking as its properties are
+        * immutable.
+        */
+       ret_code = notification_thread_command_register_trigger(
+                       notification_thread, trigger, is_trigger_anonymous);
+       if (ret_code != LTTNG_OK) {
+               DBG("Failed to register trigger to notification thread: trigger name = '%s', trigger owner uid = %d, error code = %d",
+                               trigger_name, (int) trigger_owner, ret_code);
+               goto end;
+       }
+
+       trigger_status = lttng_trigger_get_name(trigger, &trigger_name);
+       trigger_name = trigger_status == LTTNG_TRIGGER_STATUS_OK ?
+                       trigger_name : "(anonymous)";
+
+       /*
+        * Synchronize tracers if the trigger adds an event notifier.
+        */
+       if (lttng_trigger_needs_tracer_notifier(trigger)) {
+               ret_code = synchronize_tracer_notifier_register(notification_thread,
+                               trigger, cmd_creds);
+               if (ret_code != LTTNG_OK) {
+                       ERR("Error registering tracer notifier: %s",
+                                       lttng_strerror(-ret_code));
+                       goto end;
+               }
+       }
+
+       /*
+        * Return an updated trigger to the client.
+        *
+        * Since a modified version of the same trigger is returned, acquire a
+        * reference to the trigger so the caller doesn't have to care if those
+        * are distinct instances or not.
+        */
+       if (ret_code == LTTNG_OK) {
+               lttng_trigger_get(trigger);
+               *return_trigger = trigger;
+               /* Ownership of trigger was transferred to caller. */
+               trigger = NULL;
+       }
+end:
+       return ret_code;
+}
+
+static
+enum lttng_error_code synchronize_tracer_notifier_unregister(
+               const struct lttng_trigger *trigger)
+{
+       enum lttng_error_code ret_code;
+       const struct lttng_condition *condition =
+                       lttng_trigger_get_const_condition(trigger);
+       const enum lttng_domain_type trigger_domain =
+                       lttng_trigger_get_underlying_domain_type_restriction(
+                                       trigger);
+
+       LTTNG_ASSERT(condition);
+       LTTNG_ASSERT(lttng_condition_get_type(condition) ==
+                       LTTNG_CONDITION_TYPE_EVENT_RULE_MATCHES);
+
+       session_lock_list();
+       switch (trigger_domain) {
+       case LTTNG_DOMAIN_KERNEL:
+               ret_code = kernel_unregister_event_notifier(trigger);
+               if (ret_code != LTTNG_OK) {
+                       goto end_unlock_session_list;
+               }
+
+               break;
+       case LTTNG_DOMAIN_UST:
+               ust_app_global_update_all_event_notifier_rules();
+               break;
+       case LTTNG_DOMAIN_JUL:
+       case LTTNG_DOMAIN_LOG4J:
+       case LTTNG_DOMAIN_PYTHON:
+       {
+               /* Agent domains. */
+               struct agent *agt = agent_find_by_event_notifier_domain(
+                               trigger_domain);
+
+               /*
+                * This trigger was never registered in the first place. Calling
+                * this function under those circumstances is an internal error.
+                */
+               LTTNG_ASSERT(agt);
+               ret_code = (lttng_error_code) trigger_agent_disable(trigger, agt);
+               if (ret_code != LTTNG_OK) {
+                       goto end_unlock_session_list;
+               }
+
+               break;
+       }
+       case LTTNG_DOMAIN_NONE:
+       default:
+               abort();
+       }
+
+       ret_code = LTTNG_OK;
+
+end_unlock_session_list:
+       session_unlock_list();
+       return ret_code;
+}
+
+enum lttng_error_code cmd_unregister_trigger(const struct lttng_credentials *cmd_creds,
+               const struct lttng_trigger *trigger,
+               struct notification_thread_handle *notification_thread)
+{
+       enum lttng_error_code ret_code;
+       const char *trigger_name;
+       uid_t trigger_owner;
+       enum lttng_trigger_status trigger_status;
+       struct lttng_trigger *sessiond_trigger = NULL;
+
+       trigger_status = lttng_trigger_get_name(trigger, &trigger_name);
+       trigger_name = trigger_status == LTTNG_TRIGGER_STATUS_OK ? trigger_name : "(anonymous)";
+       trigger_status = lttng_trigger_get_owner_uid(trigger, &trigger_owner);
+       LTTNG_ASSERT(trigger_status == LTTNG_TRIGGER_STATUS_OK);
+
+       DBG("Running unregister trigger command: trigger name = '%s', trigger owner uid = %d, command creds uid = %d",
+                       trigger_name, (int) trigger_owner,
+                       (int) lttng_credentials_get_uid(cmd_creds));
+
+       /*
+        * Validate the trigger credentials against the command credentials.
+        * Only the root user can unregister a trigger with non-matching
+        * credentials.
+        */
+       if (!lttng_credentials_is_equal_uid(
+                       lttng_trigger_get_credentials(trigger),
+                       cmd_creds)) {
+               if (lttng_credentials_get_uid(cmd_creds) != 0) {
+                       ERR("Trigger credentials do not match the command credentials: trigger name = '%s', trigger owner uid = %d, command creds uid = %d",
+                                       trigger_name, (int) trigger_owner,
+                                       (int) lttng_credentials_get_uid(cmd_creds));
+                       ret_code = LTTNG_ERR_INVALID_TRIGGER;
+                       goto end;
+               }
+       }
+
+       /* Fetch the sessiond side trigger object. */
+       ret_code = notification_thread_command_get_trigger(
+                       notification_thread, trigger, &sessiond_trigger);
+       if (ret_code != LTTNG_OK) {
+               DBG("Failed to get trigger from notification thread during unregister: trigger name = '%s', trigger owner uid = %d, error code = %d",
+                               trigger_name, (int) trigger_owner, ret_code);
+               goto end;
+       }
+
+       LTTNG_ASSERT(sessiond_trigger);
+
+       /*
+        * From this point on, no matter what, consider the trigger
+        * unregistered.
+        *
+        * We set the unregistered state of the sessiond side trigger object in
+        * the client thread since we want to minimize the possibility of the
+        * notification thread being stalled due to a long execution of an
+        * action that required the trigger lock.
+        */
+       lttng_trigger_set_as_unregistered(sessiond_trigger);
+
+       ret_code = notification_thread_command_unregister_trigger(notification_thread,
+                                                                 trigger);
+       if (ret_code != LTTNG_OK) {
+               DBG("Failed to unregister trigger from notification thread: trigger name = '%s', trigger owner uid = %d, error code = %d",
+                               trigger_name, (int) trigger_owner, ret_code);
+               goto end;
+       }
+
+       /*
+        * Synchronize tracers if the trigger removes an event notifier.
+        * Do this even if the trigger unregistration failed to at least stop
+        * the tracers from producing notifications associated with this
+        * event notifier.
+        */
+       if (lttng_trigger_needs_tracer_notifier(trigger)) {
+               ret_code = synchronize_tracer_notifier_unregister(trigger);
+               if (ret_code != LTTNG_OK) {
+                       ERR("Error unregistering trigger to tracer.");
+                       goto end;
+               }
+
+       }
+
+end:
+       lttng_trigger_put(sessiond_trigger);
+       return ret_code;
+}
+
+enum lttng_error_code cmd_list_triggers(struct command_ctx *cmd_ctx,
+               struct notification_thread_handle *notification_thread,
+               struct lttng_triggers **return_triggers)
+{
+       int ret;
+       enum lttng_error_code ret_code;
+       struct lttng_triggers *triggers = NULL;
+
+       /* Get the set of triggers from the notification thread. */
+       ret_code = notification_thread_command_list_triggers(
+                       notification_thread, cmd_ctx->creds.uid, &triggers);
+       if (ret_code != LTTNG_OK) {
+               goto end;
+       }
+
+       ret = lttng_triggers_remove_hidden_triggers(triggers);
+       if (ret) {
+               ret_code = LTTNG_ERR_UNK;
+               goto end;
+       }
+
+       *return_triggers = triggers;
+       triggers = NULL;
+       ret_code = LTTNG_OK;
+end:
+       lttng_triggers_destroy(triggers);
+       return ret_code;
+}
+
+enum lttng_error_code cmd_execute_error_query(const struct lttng_credentials *cmd_creds,
+               const struct lttng_error_query *query,
+               struct lttng_error_query_results **_results,
+               struct notification_thread_handle *notification_thread)
+{
+       enum lttng_error_code ret_code;
+       const struct lttng_trigger *query_target_trigger;
+       const struct lttng_action *query_target_action = NULL;
+       struct lttng_trigger *matching_trigger = NULL;
+       const char *trigger_name;
+       uid_t trigger_owner;
+       enum lttng_trigger_status trigger_status;
+       struct lttng_error_query_results *results = NULL;
+
+       switch (lttng_error_query_get_target_type(query)) {
+       case LTTNG_ERROR_QUERY_TARGET_TYPE_TRIGGER:
+               query_target_trigger = lttng_error_query_trigger_borrow_target(query);
+               break;
+       case LTTNG_ERROR_QUERY_TARGET_TYPE_CONDITION:
+               query_target_trigger =
+                               lttng_error_query_condition_borrow_target(query);
+               break;
+       case LTTNG_ERROR_QUERY_TARGET_TYPE_ACTION:
+               query_target_trigger = lttng_error_query_action_borrow_trigger_target(
+                               query);
+               break;
+       default:
+               abort();
+       }
+
+       LTTNG_ASSERT(query_target_trigger);
+
+       ret_code = notification_thread_command_get_trigger(notification_thread,
+                       query_target_trigger, &matching_trigger);
+       if (ret_code != LTTNG_OK) {
+               goto end;
+       }
+
+       /* No longer needed. */
+       query_target_trigger = NULL;
+
+       if (lttng_error_query_get_target_type(query) ==
+                       LTTNG_ERROR_QUERY_TARGET_TYPE_ACTION) {
+               /* Get the sessiond-side version of the target action. */
+               query_target_action =
+                               lttng_error_query_action_borrow_action_target(
+                                               query, matching_trigger);
+       }
+
+       trigger_status = lttng_trigger_get_name(matching_trigger, &trigger_name);
+       trigger_name = trigger_status == LTTNG_TRIGGER_STATUS_OK ?
+                       trigger_name : "(anonymous)";
+       trigger_status = lttng_trigger_get_owner_uid(matching_trigger,
+                       &trigger_owner);
+       LTTNG_ASSERT(trigger_status == LTTNG_TRIGGER_STATUS_OK);
+
+       results = lttng_error_query_results_create();
+       if (!results) {
+               ret_code = LTTNG_ERR_NOMEM;
+               goto end;
+       }
+
+       DBG("Running \"execute error query\" command: trigger name = '%s', trigger owner uid = %d, command creds uid = %d",
+                       trigger_name, (int) trigger_owner,
+                       (int) lttng_credentials_get_uid(cmd_creds));
+
+       /*
+        * Validate the trigger credentials against the command credentials.
+        * Only the root user can target a trigger with non-matching
+        * credentials.
+        */
+       if (!lttng_credentials_is_equal_uid(
+                       lttng_trigger_get_credentials(matching_trigger),
+                       cmd_creds)) {
+               if (lttng_credentials_get_uid(cmd_creds) != 0) {
+                       ERR("Trigger credentials do not match the command credentials: trigger name = '%s', trigger owner uid = %d, command creds uid = %d",
+                                       trigger_name, (int) trigger_owner,
+                                       (int) lttng_credentials_get_uid(cmd_creds));
+                       ret_code = LTTNG_ERR_INVALID_TRIGGER;
+                       goto end;
+               }
+       }
+
+       switch (lttng_error_query_get_target_type(query)) {
+       case LTTNG_ERROR_QUERY_TARGET_TYPE_TRIGGER:
+               trigger_status = lttng_trigger_add_error_results(
+                               matching_trigger, results);
+
+               switch (trigger_status) {
+               case LTTNG_TRIGGER_STATUS_OK:
+                       break;
+               default:
+                       ret_code = LTTNG_ERR_UNK;
+                       goto end;
+               }
+
+               break;
+       case LTTNG_ERROR_QUERY_TARGET_TYPE_CONDITION:
+       {
+               trigger_status = lttng_trigger_condition_add_error_results(
+                               matching_trigger, results);
+
+               switch (trigger_status) {
+               case LTTNG_TRIGGER_STATUS_OK:
+                       break;
+               default:
+                       ret_code = LTTNG_ERR_UNK;
+                       goto end;
+               }
+
+               break;
+       }
+       case LTTNG_ERROR_QUERY_TARGET_TYPE_ACTION:
+       {
+               const enum lttng_action_status action_status =
+                               lttng_action_add_error_query_results(
+                                               query_target_action, results);
+
+               switch (action_status) {
+               case LTTNG_ACTION_STATUS_OK:
+                       break;
+               default:
+                       ret_code = LTTNG_ERR_UNK;
+                       goto end;
+               }
+
+               break;
+       }
+       default:
+               abort();
+               break;
+       }
+
+       *_results = results;
+       results = NULL;
+       ret_code = LTTNG_OK;
+end:
+       lttng_trigger_put(matching_trigger);
+       lttng_error_query_results_destroy(results);
+       return ret_code;
+}
+
+/*
+ * Send relayd sockets from snapshot output to consumer. Ignore request if the
+ * snapshot output is *not* set with a remote destination.
+ *
+ * Return LTTNG_OK on success or a LTTNG_ERR code.
+ */
+static enum lttng_error_code set_relayd_for_snapshot(
+               struct consumer_output *output,
+               const struct ltt_session *session)
+{
+       enum lttng_error_code status = LTTNG_OK;
+       struct lttng_ht_iter iter;
+       struct consumer_socket *socket;
+       LTTNG_OPTIONAL(uint64_t) current_chunk_id = {};
+       const char *base_path;
+
+       LTTNG_ASSERT(output);
+       LTTNG_ASSERT(session);
+
+       DBG2("Set relayd object from snapshot output");
+
+       if (session->current_trace_chunk) {
+               enum lttng_trace_chunk_status chunk_status =
+                               lttng_trace_chunk_get_id(
+                                               session->current_trace_chunk,
+                                               &current_chunk_id.value);
+
+               if (chunk_status == LTTNG_TRACE_CHUNK_STATUS_OK) {
+                       current_chunk_id.is_set = true;
+               } else {
+                       ERR("Failed to get current trace chunk id");
+                       status = LTTNG_ERR_UNK;
+                       goto error;
+               }
+       }
+
+       /* Ignore if snapshot consumer output is not network. */
+       if (output->type != CONSUMER_DST_NET) {
+               goto error;
+       }
+
+       /*
+        * The snapshot record URI base path overrides the session
+        * base path.
+        */
+       if (output->dst.net.control.subdir[0] != '\0') {
+               base_path = output->dst.net.control.subdir;
+       } else {
+               base_path = session->base_path;
+       }
+
+       /*
+        * For each consumer socket, create and send the relayd object of the
+        * snapshot output.
+        */
+       rcu_read_lock();
+       cds_lfht_for_each_entry(output->socks->ht, &iter.iter,
+                       socket, node.node) {
+               pthread_mutex_lock(socket->lock);
+               status = send_consumer_relayd_sockets(LTTNG_DOMAIN_NONE, session->id,
+                               output, socket,
+                               session->name, session->hostname,
+                               base_path,
+                               session->live_timer,
+                               current_chunk_id.is_set ? &current_chunk_id.value : NULL,
+                               session->creation_time,
+                               session->name_contains_creation_time);
+               pthread_mutex_unlock(socket->lock);
+               if (status != LTTNG_OK) {
+                       rcu_read_unlock();
+                       goto error;
+               }
+       }
+       rcu_read_unlock();
+
+error:
+       return status;
+}
+
+/*
+ * Record a kernel snapshot.
+ *
+ * Return LTTNG_OK on success or a LTTNG_ERR code.
+ */
+static enum lttng_error_code record_kernel_snapshot(
+               struct ltt_kernel_session *ksess,
+               const struct consumer_output *output,
+               const struct ltt_session *session,
+               int wait, uint64_t nb_packets_per_stream)
+{
+       enum lttng_error_code status;
+
+       LTTNG_ASSERT(ksess);
+       LTTNG_ASSERT(output);
+       LTTNG_ASSERT(session);
+
+       status = kernel_snapshot_record(
+                       ksess, output, wait, nb_packets_per_stream);
+       return status;
+}
+
+/*
+ * Record a UST snapshot.
+ *
+ * Returns LTTNG_OK on success or a LTTNG_ERR error code.
+ */
+static enum lttng_error_code record_ust_snapshot(struct ltt_ust_session *usess,
+               const struct consumer_output *output,
+               const struct ltt_session *session,
+               int wait, uint64_t nb_packets_per_stream)
+{
+       enum lttng_error_code status;
+
+       LTTNG_ASSERT(usess);
+       LTTNG_ASSERT(output);
+       LTTNG_ASSERT(session);
+
+       status = ust_app_snapshot_record(
+                       usess, output, wait, nb_packets_per_stream);
+       return status;
+}
+
+static
+uint64_t get_session_size_one_more_packet_per_stream(
+               const struct ltt_session *session, uint64_t cur_nr_packets)
+{
+       uint64_t tot_size = 0;
+
+       if (session->kernel_session) {
+               struct ltt_kernel_channel *chan;
+               const struct ltt_kernel_session *ksess =
+                               session->kernel_session;
+
+               cds_list_for_each_entry(chan, &ksess->channel_list.head, list) {
+                       if (cur_nr_packets >= chan->channel->attr.num_subbuf) {
+                               /*
+                                * Don't take channel into account if we
+                                * already grab all its packets.
+                                */
+                               continue;
+                       }
+                       tot_size += chan->channel->attr.subbuf_size
+                               * chan->stream_count;
+               }
+       }
+
+       if (session->ust_session) {
+               const struct ltt_ust_session *usess = session->ust_session;
+
+               tot_size += ust_app_get_size_one_more_packet_per_stream(usess,
+                               cur_nr_packets);
+       }
+
+       return tot_size;
+}
+
+/*
+ * Calculate the number of packets we can grab from each stream that
+ * fits within the overall snapshot max size.
+ *
+ * Returns -1 on error, 0 means infinite number of packets, else > 0 is
+ * the number of packets per stream.
+ *
+ * TODO: this approach is not perfect: we consider the worse case
+ * (packet filling the sub-buffers) as an upper bound, but we could do
+ * better if we do this calculation while we actually grab the packet
+ * content: we would know how much padding we don't actually store into
+ * the file.
+ *
+ * This algorithm is currently bounded by the number of packets per
+ * stream.
+ *
+ * Since we call this algorithm before actually grabbing the data, it's
+ * an approximation: for instance, applications could appear/disappear
+ * in between this call and actually grabbing data.
+ */
+static
+int64_t get_session_nb_packets_per_stream(const struct ltt_session *session,
+               uint64_t max_size)
+{
+       int64_t size_left;
+       uint64_t cur_nb_packets = 0;
+
+       if (!max_size) {
+               return 0;       /* Infinite */
+       }
+
+       size_left = max_size;
+       for (;;) {
+               uint64_t one_more_packet_tot_size;
+
+               one_more_packet_tot_size = get_session_size_one_more_packet_per_stream(
+                               session, cur_nb_packets);
+               if (!one_more_packet_tot_size) {
+                       /* We are already grabbing all packets. */
+                       break;
+               }
+               size_left -= one_more_packet_tot_size;
+               if (size_left < 0) {
+                       break;
+               }
+               cur_nb_packets++;
+       }
+       if (!cur_nb_packets && size_left != max_size) {
+               /* Not enough room to grab one packet of each stream, error. */
+               return -1;
+       }
+       return cur_nb_packets;
+}
+
+static
+enum lttng_error_code snapshot_record(struct ltt_session *session,
+               const struct snapshot_output *snapshot_output, int wait)
+{
+       int64_t nb_packets_per_stream;
+       char snapshot_chunk_name[LTTNG_NAME_MAX];
+       int ret;
+       enum lttng_error_code ret_code = LTTNG_OK;
+       struct lttng_trace_chunk *snapshot_trace_chunk;
+       struct consumer_output *original_ust_consumer_output = NULL;
+       struct consumer_output *original_kernel_consumer_output = NULL;
+       struct consumer_output *snapshot_ust_consumer_output = NULL;
+       struct consumer_output *snapshot_kernel_consumer_output = NULL;
+
+       ret = snprintf(snapshot_chunk_name, sizeof(snapshot_chunk_name),
+                       "%s-%s-%" PRIu64,
+                       snapshot_output->name,
+                       snapshot_output->datetime,
+                       snapshot_output->nb_snapshot);
+       if (ret < 0 || ret >= sizeof(snapshot_chunk_name)) {
+               ERR("Failed to format snapshot name");
+               ret_code = LTTNG_ERR_INVALID;
+               goto error;
+       }
+       DBG("Recording snapshot \"%s\" for session \"%s\" with chunk name \"%s\"",
+                       snapshot_output->name, session->name,
+                       snapshot_chunk_name);
+       if (!session->kernel_session && !session->ust_session) {
+               ERR("Failed to record snapshot as no channels exist");
+               ret_code = LTTNG_ERR_NO_CHANNEL;
+               goto error;
+       }
+
+       if (session->kernel_session) {
+               original_kernel_consumer_output =
+                               session->kernel_session->consumer;
+               snapshot_kernel_consumer_output =
+                               consumer_copy_output(snapshot_output->consumer);
+               strcpy(snapshot_kernel_consumer_output->chunk_path,
+                       snapshot_chunk_name);
+
+               /* Copy the original domain subdir. */
+               strcpy(snapshot_kernel_consumer_output->domain_subdir,
+                               original_kernel_consumer_output->domain_subdir);
+
+               ret = consumer_copy_sockets(snapshot_kernel_consumer_output,
+                               original_kernel_consumer_output);
+               if (ret < 0) {
+                       ERR("Failed to copy consumer sockets from snapshot output configuration");
+                       ret_code = LTTNG_ERR_NOMEM;
+                       goto error;
+               }
+               ret_code = set_relayd_for_snapshot(
+                               snapshot_kernel_consumer_output, session);
+               if (ret_code != LTTNG_OK) {
+                       ERR("Failed to setup relay daemon for kernel tracer snapshot");
+                       goto error;
+               }
+               session->kernel_session->consumer =
+                               snapshot_kernel_consumer_output;
+       }
+       if (session->ust_session) {
+               original_ust_consumer_output = session->ust_session->consumer;
+               snapshot_ust_consumer_output =
+                               consumer_copy_output(snapshot_output->consumer);
+               strcpy(snapshot_ust_consumer_output->chunk_path,
+                       snapshot_chunk_name);
+
+               /* Copy the original domain subdir. */
+               strcpy(snapshot_ust_consumer_output->domain_subdir,
+                               original_ust_consumer_output->domain_subdir);
+
+               ret = consumer_copy_sockets(snapshot_ust_consumer_output,
+                               original_ust_consumer_output);
+               if (ret < 0) {
+                       ERR("Failed to copy consumer sockets from snapshot output configuration");
+                       ret_code = LTTNG_ERR_NOMEM;
+                       goto error;
+               }
+               ret_code = set_relayd_for_snapshot(
+                               snapshot_ust_consumer_output, session);
+               if (ret_code != LTTNG_OK) {
+                       ERR("Failed to setup relay daemon for userspace tracer snapshot");
+                       goto error;
+               }
+               session->ust_session->consumer =
+                               snapshot_ust_consumer_output;
+       }
+
+       snapshot_trace_chunk = session_create_new_trace_chunk(session,
+                       snapshot_kernel_consumer_output ?:
+                                       snapshot_ust_consumer_output,
+                       consumer_output_get_base_path(
+                                       snapshot_output->consumer),
+                       snapshot_chunk_name);
+       if (!snapshot_trace_chunk) {
+               ERR("Failed to create temporary trace chunk to record a snapshot of session \"%s\"",
+                               session->name);
+               ret_code = LTTNG_ERR_CREATE_DIR_FAIL;
+               goto error;
+       }
+       LTTNG_ASSERT(!session->current_trace_chunk);
+       ret = session_set_trace_chunk(session, snapshot_trace_chunk, NULL);
+       lttng_trace_chunk_put(snapshot_trace_chunk);
+       snapshot_trace_chunk = NULL;
+       if (ret) {
+               ERR("Failed to set temporary trace chunk to record a snapshot of session \"%s\"",
+                               session->name);
+               ret_code = LTTNG_ERR_CREATE_TRACE_CHUNK_FAIL_CONSUMER;
+               goto error;
+       }
+
+       nb_packets_per_stream = get_session_nb_packets_per_stream(session,
+                       snapshot_output->max_size);
+       if (nb_packets_per_stream < 0) {
+               ret_code = LTTNG_ERR_MAX_SIZE_INVALID;
+               goto error_close_trace_chunk;
+       }
+
+       if (session->kernel_session) {
+               ret_code = record_kernel_snapshot(session->kernel_session,
+                               snapshot_kernel_consumer_output, session,
+                               wait, nb_packets_per_stream);
+               if (ret_code != LTTNG_OK) {
+                       goto error_close_trace_chunk;
+               }
+       }
+
+       if (session->ust_session) {
+               ret_code = record_ust_snapshot(session->ust_session,
+                               snapshot_ust_consumer_output, session,
+                               wait, nb_packets_per_stream);
+               if (ret_code != LTTNG_OK) {
+                       goto error_close_trace_chunk;
+               }
+       }
+
+error_close_trace_chunk:
+       if (session_set_trace_chunk(session, NULL, &snapshot_trace_chunk)) {
+               ERR("Failed to release the current trace chunk of session \"%s\"",
+                               session->name);
+               ret_code = LTTNG_ERR_UNK;
+       }
+
+       if (session_close_trace_chunk(session, snapshot_trace_chunk,
+                       LTTNG_TRACE_CHUNK_COMMAND_TYPE_NO_OPERATION, NULL)) {
+               /*
+                * Don't goto end; make sure the chunk is closed for the session
+                * to allow future snapshots.
+                */
+               ERR("Failed to close snapshot trace chunk of session \"%s\"",
+                               session->name);
+               ret_code = LTTNG_ERR_CLOSE_TRACE_CHUNK_FAIL_CONSUMER;
+       }
+error:
+       if (original_ust_consumer_output) {
+               session->ust_session->consumer = original_ust_consumer_output;
+       }
+       if (original_kernel_consumer_output) {
+               session->kernel_session->consumer =
+                               original_kernel_consumer_output;
+       }
+       consumer_output_put(snapshot_ust_consumer_output);
+       consumer_output_put(snapshot_kernel_consumer_output);
+       return ret_code;
+}
+
+/*
+ * Command LTTNG_SNAPSHOT_RECORD from lib lttng ctl.
+ *
+ * The wait parameter is ignored so this call always wait for the snapshot to
+ * complete before returning.
+ *
+ * Return LTTNG_OK on success or else a LTTNG_ERR code.
+ */
+int cmd_snapshot_record(struct ltt_session *session,
+               const struct lttng_snapshot_output *output, int wait)
+{
+       enum lttng_error_code cmd_ret = LTTNG_OK;
+       int ret;
+       unsigned int snapshot_success = 0;
+       char datetime[16];
+       struct snapshot_output *tmp_output = NULL;
+
+       LTTNG_ASSERT(session);
+       LTTNG_ASSERT(output);
+
+       DBG("Cmd snapshot record for session %s", session->name);
+
+       /* Get the datetime for the snapshot output directory. */
+       ret = utils_get_current_time_str("%Y%m%d-%H%M%S", datetime,
+                       sizeof(datetime));
+       if (!ret) {
+               cmd_ret = LTTNG_ERR_INVALID;
+               goto error;
+       }
+
+       /*
+        * Permission denied to create an output if the session is not
+        * set in no output mode.
+        */
+       if (session->output_traces) {
+               cmd_ret = LTTNG_ERR_NOT_SNAPSHOT_SESSION;
+               goto error;
+       }
+
+       /* The session needs to be started at least once. */
+       if (!session->has_been_started) {
+               cmd_ret = LTTNG_ERR_START_SESSION_ONCE;
+               goto error;
+       }
+
+       /* Use temporary output for the session. */
+       if (*output->ctrl_url != '\0') {
+               tmp_output = snapshot_output_alloc();
+               if (!tmp_output) {
+                       cmd_ret = LTTNG_ERR_NOMEM;
+                       goto error;
+               }
+
+               ret = snapshot_output_init(session, output->max_size,
+                               output->name,
+                               output->ctrl_url, output->data_url,
+                               session->consumer,
+                               tmp_output, NULL);
+               if (ret < 0) {
+                       if (ret == -ENOMEM) {
+                               cmd_ret = LTTNG_ERR_NOMEM;
+                       } else {
+                               cmd_ret = LTTNG_ERR_INVALID;
+                       }
+                       goto error;
+               }
+               /* Use the global session count for the temporary snapshot. */
+               tmp_output->nb_snapshot = session->snapshot.nb_snapshot;
+
+               /* Use the global datetime */
+               memcpy(tmp_output->datetime, datetime, sizeof(datetime));
+               cmd_ret = snapshot_record(session, tmp_output, wait);
+               if (cmd_ret != LTTNG_OK) {
+                       goto error;
+               }
+               snapshot_success = 1;
+       } else {
+               struct snapshot_output *sout;
+               struct lttng_ht_iter iter;
+
+               rcu_read_lock();
+               cds_lfht_for_each_entry(session->snapshot.output_ht->ht,
+                               &iter.iter, sout, node.node) {
+                       struct snapshot_output output_copy;
+
+                       /*
+                        * Make a local copy of the output and override output
+                        * parameters with those provided as part of the
+                        * command.
+                        */
+                       memcpy(&output_copy, sout, sizeof(output_copy));
+
+                       if (output->max_size != (uint64_t) -1ULL) {
+                               output_copy.max_size = output->max_size;
+                       }
+
+                       output_copy.nb_snapshot = session->snapshot.nb_snapshot;
+                       memcpy(output_copy.datetime, datetime,
+                                       sizeof(datetime));
+
+                       /* Use temporary name. */
+                       if (*output->name != '\0') {
+                               if (lttng_strncpy(output_copy.name,
+                                               output->name,
+                                               sizeof(output_copy.name))) {
+                                       cmd_ret = LTTNG_ERR_INVALID;
+                                       rcu_read_unlock();
+                                       goto error;
+                               }
+                       }
+
+                       cmd_ret = snapshot_record(session, &output_copy, wait);
+                       if (cmd_ret != LTTNG_OK) {
+                               rcu_read_unlock();
+                               goto error;
+                       }
+                       snapshot_success = 1;
+               }
+               rcu_read_unlock();
+       }
+
+       if (snapshot_success) {
+               session->snapshot.nb_snapshot++;
+       } else {
+               cmd_ret = LTTNG_ERR_SNAPSHOT_FAIL;
+       }
+
+error:
+       if (tmp_output) {
+               snapshot_output_destroy(tmp_output);
+       }
+       return cmd_ret;
+}
+
+/*
+ * Command LTTNG_SET_SESSION_SHM_PATH processed by the client thread.
+ */
+int cmd_set_session_shm_path(struct ltt_session *session,
+               const char *shm_path)
+{
+       /* Safety net */
+       LTTNG_ASSERT(session);
+
+       /*
+        * Can only set shm path before session is started.
+        */
+       if (session->has_been_started) {
+               return LTTNG_ERR_SESSION_STARTED;
+       }
+
+       strncpy(session->shm_path, shm_path,
+               sizeof(session->shm_path));
+       session->shm_path[sizeof(session->shm_path) - 1] = '\0';
+
+       return LTTNG_OK;
+}
+
+/*
+ * Command LTTNG_ROTATE_SESSION from the lttng-ctl library.
+ *
+ * Ask the consumer to rotate the session output directory.
+ * The session lock must be held.
+ *
+ * Returns LTTNG_OK on success or else a negative LTTng error code.
+ */
+int cmd_rotate_session(struct ltt_session *session,
+               struct lttng_rotate_session_return *rotate_return,
+               bool quiet_rotation,
+               enum lttng_trace_chunk_command_type command)
+{
+       int ret;
+       uint64_t ongoing_rotation_chunk_id;
+       enum lttng_error_code cmd_ret = LTTNG_OK;
+       struct lttng_trace_chunk *chunk_being_archived = NULL;
+       struct lttng_trace_chunk *new_trace_chunk = NULL;
+       enum lttng_trace_chunk_status chunk_status;
+       bool failed_to_rotate = false;
+       enum lttng_error_code rotation_fail_code = LTTNG_OK;
+
+       LTTNG_ASSERT(session);
+
+       if (!session->has_been_started) {
+               cmd_ret = LTTNG_ERR_START_SESSION_ONCE;
+               goto end;
+       }
+
+       /*
+        * Explicit rotation is not supported for live sessions.
+        * However, live sessions can perform a quiet rotation on
+        * destroy.
+        * Rotation is not supported for snapshot traces (no output).
+        */
+       if ((!quiet_rotation && session->live_timer) ||
+                       !session->output_traces) {
+               cmd_ret = LTTNG_ERR_ROTATION_NOT_AVAILABLE;
+               goto end;
+       }
+
+       /* Unsupported feature in lttng-relayd before 2.11. */
+       if (!quiet_rotation && session->consumer->type == CONSUMER_DST_NET &&
+                       (session->consumer->relay_major_version == 2 &&
+                       session->consumer->relay_minor_version < 11)) {
+               cmd_ret = LTTNG_ERR_ROTATION_NOT_AVAILABLE_RELAY;
+               goto end;
+       }
+
+       /* Unsupported feature in lttng-modules before 2.8 (lack of sequence number). */
+       if (session->kernel_session && !kernel_supports_ring_buffer_packet_sequence_number()) {
+               cmd_ret = LTTNG_ERR_ROTATION_NOT_AVAILABLE_KERNEL;
+               goto end;
+       }
+
+       if (session->rotation_state == LTTNG_ROTATION_STATE_ONGOING) {
+               DBG("Refusing to launch a rotation; a rotation is already in progress for session %s",
+                               session->name);
+               cmd_ret = LTTNG_ERR_ROTATION_PENDING;
+               goto end;
+       }
+
+       /*
+        * After a stop, we only allow one rotation to occur, the other ones are
+        * useless until a new start.
+        */
+       if (session->rotated_after_last_stop) {
+               DBG("Session \"%s\" was already rotated after stop, refusing rotation",
+                               session->name);
+               cmd_ret = LTTNG_ERR_ROTATION_MULTIPLE_AFTER_STOP;
+               goto end;
+       }
+
+       /*
+        * After a stop followed by a clear, disallow following rotations a they would
+        * generate empty chunks.
+        */
+       if (session->cleared_after_last_stop) {
+               DBG("Session \"%s\" was already cleared after stop, refusing rotation",
+                               session->name);
+               cmd_ret = LTTNG_ERR_ROTATION_AFTER_STOP_CLEAR;
+               goto end;
+       }
+
+       if (session->active) {
+               new_trace_chunk = session_create_new_trace_chunk(session, NULL,
+                               NULL, NULL);
+               if (!new_trace_chunk) {
+                       cmd_ret = LTTNG_ERR_CREATE_DIR_FAIL;
+                       goto error;
+               }
+       }
+
+       /*
+        * The current trace chunk becomes the chunk being archived.
+        *
+        * After this point, "chunk_being_archived" must absolutely
+        * be closed on the consumer(s), otherwise it will never be
+        * cleaned-up, which will result in a leak.
+        */
+       ret = session_set_trace_chunk(session, new_trace_chunk,
+                       &chunk_being_archived);
+       if (ret) {
+               cmd_ret = LTTNG_ERR_CREATE_TRACE_CHUNK_FAIL_CONSUMER;
+               goto error;
+       }
+
+       if (session->kernel_session) {
+               cmd_ret = kernel_rotate_session(session);
+               if (cmd_ret != LTTNG_OK) {
+                       failed_to_rotate = true;
+                       rotation_fail_code = cmd_ret;
+               }
+       }
+       if (session->ust_session) {
+               cmd_ret = ust_app_rotate_session(session);
+               if (cmd_ret != LTTNG_OK) {
+                       failed_to_rotate = true;
+                       rotation_fail_code = cmd_ret;
+               }
+       }
+
+       if (!session->active) {
+               session->rotated_after_last_stop = true;
+       }
+
+       if (!chunk_being_archived) {
+               DBG("Rotating session \"%s\" from a \"NULL\" trace chunk to a new trace chunk, skipping completion check",
+                               session->name);
+               if (failed_to_rotate) {
+                       cmd_ret = rotation_fail_code;
+                       goto error;
+               }
+               cmd_ret = LTTNG_OK;
+               goto end;
+       }
+
+       session->rotation_state = LTTNG_ROTATION_STATE_ONGOING;
+       chunk_status = lttng_trace_chunk_get_id(chunk_being_archived,
+                       &ongoing_rotation_chunk_id);
+       LTTNG_ASSERT(chunk_status == LTTNG_TRACE_CHUNK_STATUS_OK);
+
+       ret = session_close_trace_chunk(session, chunk_being_archived,
+               command, session->last_chunk_path);
+       if (ret) {
+               cmd_ret = LTTNG_ERR_CLOSE_TRACE_CHUNK_FAIL_CONSUMER;
+               goto error;
+       }
+
+       if (failed_to_rotate) {
+               cmd_ret = rotation_fail_code;
+               goto error;
+       }
+
+       session->quiet_rotation = quiet_rotation;
+       ret = timer_session_rotation_pending_check_start(session,
+                       DEFAULT_ROTATE_PENDING_TIMER);
+       if (ret) {
+               cmd_ret = LTTNG_ERR_UNK;
+               goto error;
+       }
+
+       if (rotate_return) {
+               rotate_return->rotation_id = ongoing_rotation_chunk_id;
+       }
+
+       session->chunk_being_archived = chunk_being_archived;
+       chunk_being_archived = NULL;
+       if (!quiet_rotation) {
+               ret = notification_thread_command_session_rotation_ongoing(
+                               the_notification_thread_handle, session->name,
+                               session->uid, session->gid,
+                               ongoing_rotation_chunk_id);
+               if (ret != LTTNG_OK) {
+                       ERR("Failed to notify notification thread that a session rotation is ongoing for session %s",
+                                       session->name);
+                       cmd_ret = (lttng_error_code) ret;
+               }
+       }
+
+       DBG("Cmd rotate session %s, archive_id %" PRIu64 " sent",
+                       session->name, ongoing_rotation_chunk_id);
+end:
+       lttng_trace_chunk_put(new_trace_chunk);
+       lttng_trace_chunk_put(chunk_being_archived);
+       ret = (cmd_ret == LTTNG_OK) ? cmd_ret : -((int) cmd_ret);
+       return ret;
+error:
+       if (session_reset_rotation_state(session,
+                       LTTNG_ROTATION_STATE_ERROR)) {
+               ERR("Failed to reset rotation state of session \"%s\"",
+                               session->name);
+       }
+       goto end;
+}
+
+/*
+ * Command LTTNG_ROTATION_GET_INFO from the lttng-ctl library.
+ *
+ * Check if the session has finished its rotation.
+ *
+ * Return LTTNG_OK on success or else an LTTNG_ERR code.
+ */
+int cmd_rotate_get_info(struct ltt_session *session,
+               struct lttng_rotation_get_info_return *info_return,
+               uint64_t rotation_id)
+{
+       enum lttng_error_code cmd_ret = LTTNG_OK;
+       enum lttng_rotation_state rotation_state;
+
+       DBG("Cmd rotate_get_info session %s, rotation id %" PRIu64, session->name,
+                       session->most_recent_chunk_id.value);
+
+       if (session->chunk_being_archived) {
+               enum lttng_trace_chunk_status chunk_status;
+               uint64_t chunk_id;
+
+               chunk_status = lttng_trace_chunk_get_id(
+                               session->chunk_being_archived,
+                               &chunk_id);
+               LTTNG_ASSERT(chunk_status == LTTNG_TRACE_CHUNK_STATUS_OK);
+
+               rotation_state = rotation_id == chunk_id ?
+                               LTTNG_ROTATION_STATE_ONGOING :
+                               LTTNG_ROTATION_STATE_EXPIRED;
+       } else {
+               if (session->last_archived_chunk_id.is_set &&
+                               rotation_id != session->last_archived_chunk_id.value) {
+                       rotation_state = LTTNG_ROTATION_STATE_EXPIRED;
+               } else {
+                       rotation_state = session->rotation_state;
+               }
+       }
+
+       switch (rotation_state) {
+       case LTTNG_ROTATION_STATE_NO_ROTATION:
+               DBG("Reporting that no rotation has occurred within the lifetime of session \"%s\"",
+                               session->name);
+               goto end;
+       case LTTNG_ROTATION_STATE_EXPIRED:
+               DBG("Reporting that the rotation state of rotation id %" PRIu64 " of session \"%s\" has expired",
+                               rotation_id, session->name);
+               break;
+       case LTTNG_ROTATION_STATE_ONGOING:
+               DBG("Reporting that rotation id %" PRIu64 " of session \"%s\" is still pending",
+                               rotation_id, session->name);
+               break;
+       case LTTNG_ROTATION_STATE_COMPLETED:
+       {
+               int fmt_ret;
+               char *chunk_path;
+               char *current_tracing_path_reply;
+               size_t current_tracing_path_reply_len;
+
+               DBG("Reporting that rotation id %" PRIu64 " of session \"%s\" is completed",
+                               rotation_id, session->name);
+
+               switch (session_get_consumer_destination_type(session)) {
+               case CONSUMER_DST_LOCAL:
+                       current_tracing_path_reply =
+                                       info_return->location.local.absolute_path;
+                       current_tracing_path_reply_len =
+                                       sizeof(info_return->location.local.absolute_path);
+                       info_return->location_type =
+                                       (int8_t) LTTNG_TRACE_ARCHIVE_LOCATION_TYPE_LOCAL;
+                       fmt_ret = asprintf(&chunk_path,
+                                       "%s/" DEFAULT_ARCHIVED_TRACE_CHUNKS_DIRECTORY "/%s",
+                                       session_get_base_path(session),
+                                       session->last_archived_chunk_name);
+                       if (fmt_ret == -1) {
+                               PERROR("Failed to format the path of the last archived trace chunk");
+                               info_return->status = LTTNG_ROTATION_STATUS_ERROR;
+                               cmd_ret = LTTNG_ERR_UNK;
+                               goto end;
+                       }
+                       break;
+               case CONSUMER_DST_NET:
+               {
+                       uint16_t ctrl_port, data_port;
+
+                       current_tracing_path_reply =
+                                       info_return->location.relay.relative_path;
+                       current_tracing_path_reply_len =
+                                       sizeof(info_return->location.relay.relative_path);
+                       /* Currently the only supported relay protocol. */
+                       info_return->location.relay.protocol =
+                                       (int8_t) LTTNG_TRACE_ARCHIVE_LOCATION_RELAY_PROTOCOL_TYPE_TCP;
+
+                       fmt_ret = lttng_strncpy(info_return->location.relay.host,
+                                       session_get_net_consumer_hostname(session),
+                                       sizeof(info_return->location.relay.host));
+                       if (fmt_ret) {
+                               ERR("Failed to copy host name to rotate_get_info reply");
+                               info_return->status = LTTNG_ROTATION_STATUS_ERROR;
+                               cmd_ret = LTTNG_ERR_SET_URL;
+                               goto end;
+                       }
+
+                       session_get_net_consumer_ports(session, &ctrl_port, &data_port);
+                       info_return->location.relay.ports.control = ctrl_port;
+                       info_return->location.relay.ports.data = data_port;
+                       info_return->location_type =
+                                       (int8_t) LTTNG_TRACE_ARCHIVE_LOCATION_TYPE_RELAY;
+                       chunk_path = strdup(session->last_chunk_path);
+                       if (!chunk_path) {
+                               ERR("Failed to allocate the path of the last archived trace chunk");
+                               info_return->status = LTTNG_ROTATION_STATUS_ERROR;
+                               cmd_ret = LTTNG_ERR_UNK;
+                               goto end;
+                       }
+                       break;
+               }
+               default:
+                       abort();
+               }
+
+               fmt_ret = lttng_strncpy(current_tracing_path_reply,
+                               chunk_path, current_tracing_path_reply_len);
+               free(chunk_path);
+               if (fmt_ret) {
+                       ERR("Failed to copy path of the last archived trace chunk to rotate_get_info reply");
+                       info_return->status = LTTNG_ROTATION_STATUS_ERROR;
+                       cmd_ret = LTTNG_ERR_UNK;
+                       goto end;
+               }
+
+               break;
+       }
+       case LTTNG_ROTATION_STATE_ERROR:
+               DBG("Reporting that an error occurred during rotation %" PRIu64 " of session \"%s\"",
+                               rotation_id, session->name);
+               break;
+       default:
+               abort();
+       }
+
+       cmd_ret = LTTNG_OK;
+end:
+       info_return->status = (int32_t) rotation_state;
+       return cmd_ret;
+}
+
+/*
+ * Command LTTNG_ROTATION_SET_SCHEDULE from the lttng-ctl library.
+ *
+ * Configure the automatic rotation parameters.
+ * 'activate' to true means activate the rotation schedule type with 'new_value'.
+ * 'activate' to false means deactivate the rotation schedule and validate that
+ * 'new_value' has the same value as the currently active value.
+ *
+ * Return LTTNG_OK on success or else a positive LTTNG_ERR code.
+ */
+int cmd_rotation_set_schedule(struct ltt_session *session,
+               bool activate, enum lttng_rotation_schedule_type schedule_type,
+               uint64_t new_value,
+               struct notification_thread_handle *notification_thread_handle)
+{
+       int ret;
+       uint64_t *parameter_value;
+
+       LTTNG_ASSERT(session);
+
+       DBG("Cmd rotate set schedule session %s", session->name);
+
+       if (session->live_timer || !session->output_traces) {
+               DBG("Failing ROTATION_SET_SCHEDULE command as the rotation feature is not available for this session");
+               ret = LTTNG_ERR_ROTATION_NOT_AVAILABLE;
+               goto end;
+       }
+
+       switch (schedule_type) {
+       case LTTNG_ROTATION_SCHEDULE_TYPE_SIZE_THRESHOLD:
+               parameter_value = &session->rotate_size;
+               break;
+       case LTTNG_ROTATION_SCHEDULE_TYPE_PERIODIC:
+               parameter_value = &session->rotate_timer_period;
+               if (new_value >= UINT_MAX) {
+                       DBG("Failing ROTATION_SET_SCHEDULE command as the value requested for a periodic rotation schedule is invalid: %" PRIu64 " > %u (UINT_MAX)",
+                                       new_value, UINT_MAX);
+                       ret = LTTNG_ERR_INVALID;
+                       goto end;
+               }
+               break;
+       default:
+               WARN("Failing ROTATION_SET_SCHEDULE command on unknown schedule type");
+               ret = LTTNG_ERR_INVALID;
+               goto end;
+       }
+
+       /* Improper use of the API. */
+       if (new_value == -1ULL) {
+               WARN("Failing ROTATION_SET_SCHEDULE command as the value requested is -1");
+               ret = LTTNG_ERR_INVALID;
+               goto end;
+       }
+
+       /*
+        * As indicated in struct ltt_session's comments, a value of == 0 means
+        * this schedule rotation type is not in use.
+        *
+        * Reject the command if we were asked to activate a schedule that was
+        * already active.
+        */
+       if (activate && *parameter_value != 0) {
+               DBG("Failing ROTATION_SET_SCHEDULE (activate) command as the schedule is already active");
+               ret = LTTNG_ERR_ROTATION_SCHEDULE_SET;
+               goto end;
+       }
+
+       /*
+        * Reject the command if we were asked to deactivate a schedule that was
+        * not active.
+        */
+       if (!activate && *parameter_value == 0) {
+               DBG("Failing ROTATION_SET_SCHEDULE (deactivate) command as the schedule is already inactive");
+               ret = LTTNG_ERR_ROTATION_SCHEDULE_NOT_SET;
+               goto end;
+       }
+
+       /*
+        * Reject the command if we were asked to deactivate a schedule that
+        * doesn't exist.
+        */
+       if (!activate && *parameter_value != new_value) {
+               DBG("Failing ROTATION_SET_SCHEDULE (deactivate) command as an inexistant schedule was provided");
+               ret = LTTNG_ERR_ROTATION_SCHEDULE_NOT_SET;
+               goto end;
+       }
+
+       *parameter_value = activate ? new_value : 0;
+
+       switch (schedule_type) {
+       case LTTNG_ROTATION_SCHEDULE_TYPE_PERIODIC:
+               if (activate && session->active) {
+                       /*
+                        * Only start the timer if the session is active,
+                        * otherwise it will be started when the session starts.
+                        */
+                       ret = timer_session_rotation_schedule_timer_start(
+                                       session, new_value);
+                       if (ret) {
+                               ERR("Failed to enable session rotation timer in ROTATION_SET_SCHEDULE command");
+                               ret = LTTNG_ERR_UNK;
+                               goto end;
+                       }
+               } else {
+                       ret = timer_session_rotation_schedule_timer_stop(
+                                       session);
+                       if (ret) {
+                               ERR("Failed to disable session rotation timer in ROTATION_SET_SCHEDULE command");
+                               ret = LTTNG_ERR_UNK;
+                               goto end;
+                       }
+               }
+               break;
+       case LTTNG_ROTATION_SCHEDULE_TYPE_SIZE_THRESHOLD:
+               if (activate) {
+                       ret = subscribe_session_consumed_size_rotation(session,
+                                       new_value, notification_thread_handle);
+                       if (ret) {
+                               ERR("Failed to enable consumed-size notification in ROTATION_SET_SCHEDULE command");
+                               ret = LTTNG_ERR_UNK;
+                               goto end;
+                       }
+               } else {
+                       ret = unsubscribe_session_consumed_size_rotation(session,
+                                       notification_thread_handle);
+                       if (ret) {
+                               ERR("Failed to disable consumed-size notification in ROTATION_SET_SCHEDULE command");
+                               ret = LTTNG_ERR_UNK;
+                               goto end;
+                       }
+
+               }
+               break;
+       default:
+               /* Would have been caught before. */
+               abort();
+       }
+
+       ret = LTTNG_OK;
+
+       goto end;
+
+end:
+       return ret;
+}
+
+/* Wait for a given path to be removed before continuing. */
+static enum lttng_error_code wait_on_path(void *path_data)
+{
+       const char *shm_path = (const char *) path_data;
+
+       DBG("Waiting for the shm path at %s to be removed before completing session destruction",
+                       shm_path);
+       while (true) {
+               int ret;
+               struct stat st;
+
+               ret = stat(shm_path, &st);
+               if (ret) {
+                       if (errno != ENOENT) {
+                               PERROR("stat() returned an error while checking for the existence of the shm path");
+                       } else {
+                               DBG("shm path no longer exists, completing the destruction of session");
+                       }
+                       break;
+               } else {
+                       if (!S_ISDIR(st.st_mode)) {
+                               ERR("The type of shm path %s returned by stat() is not a directory; aborting the wait for shm path removal",
+                                               shm_path);
+                               break;
+                       }
+               }
+               usleep(SESSION_DESTROY_SHM_PATH_CHECK_DELAY_US);
+       }
+       return LTTNG_OK;
+}
+
+/*
+ * Returns a pointer to a handler to run on completion of a command.
+ * Returns NULL if no handler has to be run for the last command executed.
+ */
+const struct cmd_completion_handler *cmd_pop_completion_handler(void)
+{
+       struct cmd_completion_handler *handler = current_completion_handler;
+
+       current_completion_handler = NULL;
+       return handler;
+}
+
+/*
+ * Init command subsystem.
+ */
+void cmd_init(void)
+{
+       /*
+        * Set network sequence index to 1 for streams to match a relayd
+        * socket on the consumer side.
+        */
+       pthread_mutex_lock(&relayd_net_seq_idx_lock);
+       relayd_net_seq_idx = 1;
+       pthread_mutex_unlock(&relayd_net_seq_idx_lock);
+
+       DBG("Command subsystem initialized");
+}
diff --git a/src/bin/lttng-sessiond/condition-internal.c b/src/bin/lttng-sessiond/condition-internal.c
deleted file mode 100644 (file)
index dd9c244..0000000
+++ /dev/null
@@ -1,163 +0,0 @@
-/*
- * Copyright (C) 2020 Francis Deslauriers <francis.deslauriers@efficios.com>
- *
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- */
-
-#include <common/hashtable/utils.h>
-#include <common/hashtable/hashtable.h>
-
-#include <lttng/condition/condition.h>
-#include <lttng/condition/condition-internal.h>
-#include <lttng/condition/buffer-usage-internal.h>
-#include <lttng/condition/session-consumed-size-internal.h>
-#include <lttng/condition/session-rotation-internal.h>
-#include <lttng/condition/event-rule-matches-internal.h>
-#include <lttng/condition/event-rule-matches.h>
-#include <lttng/event-rule/event-rule-internal.h>
-#include <lttng/condition/event-rule-matches-internal.h>
-#include "condition-internal.h"
-
-static
-unsigned long lttng_condition_buffer_usage_hash(
-       const struct lttng_condition *_condition)
-{
-       unsigned long hash;
-       unsigned long condition_type;
-       struct lttng_condition_buffer_usage *condition;
-
-       condition = container_of(_condition,
-                       struct lttng_condition_buffer_usage, parent);
-
-       condition_type = (unsigned long) condition->parent.type;
-       hash = hash_key_ulong((void *) condition_type, lttng_ht_seed);
-       if (condition->session_name) {
-               hash ^= hash_key_str(condition->session_name, lttng_ht_seed);
-       }
-       if (condition->channel_name) {
-               hash ^= hash_key_str(condition->channel_name, lttng_ht_seed);
-       }
-       if (condition->domain.set) {
-               hash ^= hash_key_ulong(
-                               (void *) condition->domain.type,
-                               lttng_ht_seed);
-       }
-       if (condition->threshold_ratio.set) {
-               hash ^= hash_key_u64(&condition->threshold_ratio.value, lttng_ht_seed);
-       } else if (condition->threshold_bytes.set) {
-               uint64_t val;
-
-               val = condition->threshold_bytes.value;
-               hash ^= hash_key_u64(&val, lttng_ht_seed);
-       }
-       return hash;
-}
-
-static
-unsigned long lttng_condition_session_consumed_size_hash(
-       const struct lttng_condition *_condition)
-{
-       unsigned long hash;
-       unsigned long condition_type =
-                       (unsigned long) LTTNG_CONDITION_TYPE_SESSION_CONSUMED_SIZE;
-       struct lttng_condition_session_consumed_size *condition;
-       uint64_t val;
-
-       condition = container_of(_condition,
-                       struct lttng_condition_session_consumed_size, parent);
-
-       hash = hash_key_ulong((void *) condition_type, lttng_ht_seed);
-       if (condition->session_name) {
-               hash ^= hash_key_str(condition->session_name, lttng_ht_seed);
-       }
-       val = condition->consumed_threshold_bytes.value;
-       hash ^= hash_key_u64(&val, lttng_ht_seed);
-       return hash;
-}
-
-static
-unsigned long lttng_condition_session_rotation_hash(
-       const struct lttng_condition *_condition)
-{
-       unsigned long hash, condition_type;
-       struct lttng_condition_session_rotation *condition;
-
-       condition = container_of(_condition,
-                       struct lttng_condition_session_rotation, parent);
-       condition_type = (unsigned long) condition->parent.type;
-       hash = hash_key_ulong((void *) condition_type, lttng_ht_seed);
-       LTTNG_ASSERT(condition->session_name);
-       hash ^= hash_key_str(condition->session_name, lttng_ht_seed);
-       return hash;
-}
-
-static unsigned long lttng_condition_event_rule_matches_hash(
-               const struct lttng_condition *condition)
-{
-       unsigned long hash, condition_type;
-       enum lttng_condition_status condition_status;
-       const struct lttng_event_rule *event_rule;
-
-       condition_type = (unsigned long) condition->type;
-       condition_status = lttng_condition_event_rule_matches_get_rule(
-                       condition, &event_rule);
-       LTTNG_ASSERT(condition_status == LTTNG_CONDITION_STATUS_OK);
-
-       hash = hash_key_ulong((void *) condition_type, lttng_ht_seed);
-       return hash ^ lttng_event_rule_hash(event_rule);
-}
-
-/*
- * The lttng_condition hashing code is kept in this file (rather than
- * condition.c) since it makes use of GPLv2 code (hashtable utils), which we
- * don't want to link in liblttng-ctl.
- */
-unsigned long lttng_condition_hash(const struct lttng_condition *condition)
-{
-       switch (condition->type) {
-       case LTTNG_CONDITION_TYPE_BUFFER_USAGE_LOW:
-       case LTTNG_CONDITION_TYPE_BUFFER_USAGE_HIGH:
-               return lttng_condition_buffer_usage_hash(condition);
-       case LTTNG_CONDITION_TYPE_SESSION_CONSUMED_SIZE:
-               return lttng_condition_session_consumed_size_hash(condition);
-       case LTTNG_CONDITION_TYPE_SESSION_ROTATION_ONGOING:
-       case LTTNG_CONDITION_TYPE_SESSION_ROTATION_COMPLETED:
-               return lttng_condition_session_rotation_hash(condition);
-       case LTTNG_CONDITION_TYPE_EVENT_RULE_MATCHES:
-               return lttng_condition_event_rule_matches_hash(condition);
-       default:
-               abort();
-       }
-}
-
-struct lttng_condition *lttng_condition_copy(const struct lttng_condition *condition)
-{
-       int ret;
-       struct lttng_payload copy_buffer;
-       struct lttng_condition *copy = NULL;
-
-       lttng_payload_init(&copy_buffer);
-
-       ret = lttng_condition_serialize(condition, &copy_buffer);
-       if (ret < 0) {
-               goto end;
-       }
-
-       {
-               struct lttng_payload_view view =
-                               lttng_payload_view_from_payload(
-                                               &copy_buffer, 0, -1);
-
-               ret = lttng_condition_create_from_payload(
-                               &view, &copy);
-               if (ret < 0) {
-                       copy = NULL;
-                       goto end;
-               }
-       }
-
-end:
-       lttng_payload_reset(&copy_buffer);
-       return copy;
-}
diff --git a/src/bin/lttng-sessiond/condition-internal.cpp b/src/bin/lttng-sessiond/condition-internal.cpp
new file mode 100644 (file)
index 0000000..dd9c244
--- /dev/null
@@ -0,0 +1,163 @@
+/*
+ * Copyright (C) 2020 Francis Deslauriers <francis.deslauriers@efficios.com>
+ *
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ */
+
+#include <common/hashtable/utils.h>
+#include <common/hashtable/hashtable.h>
+
+#include <lttng/condition/condition.h>
+#include <lttng/condition/condition-internal.h>
+#include <lttng/condition/buffer-usage-internal.h>
+#include <lttng/condition/session-consumed-size-internal.h>
+#include <lttng/condition/session-rotation-internal.h>
+#include <lttng/condition/event-rule-matches-internal.h>
+#include <lttng/condition/event-rule-matches.h>
+#include <lttng/event-rule/event-rule-internal.h>
+#include <lttng/condition/event-rule-matches-internal.h>
+#include "condition-internal.h"
+
+static
+unsigned long lttng_condition_buffer_usage_hash(
+       const struct lttng_condition *_condition)
+{
+       unsigned long hash;
+       unsigned long condition_type;
+       struct lttng_condition_buffer_usage *condition;
+
+       condition = container_of(_condition,
+                       struct lttng_condition_buffer_usage, parent);
+
+       condition_type = (unsigned long) condition->parent.type;
+       hash = hash_key_ulong((void *) condition_type, lttng_ht_seed);
+       if (condition->session_name) {
+               hash ^= hash_key_str(condition->session_name, lttng_ht_seed);
+       }
+       if (condition->channel_name) {
+               hash ^= hash_key_str(condition->channel_name, lttng_ht_seed);
+       }
+       if (condition->domain.set) {
+               hash ^= hash_key_ulong(
+                               (void *) condition->domain.type,
+                               lttng_ht_seed);
+       }
+       if (condition->threshold_ratio.set) {
+               hash ^= hash_key_u64(&condition->threshold_ratio.value, lttng_ht_seed);
+       } else if (condition->threshold_bytes.set) {
+               uint64_t val;
+
+               val = condition->threshold_bytes.value;
+               hash ^= hash_key_u64(&val, lttng_ht_seed);
+       }
+       return hash;
+}
+
+static
+unsigned long lttng_condition_session_consumed_size_hash(
+       const struct lttng_condition *_condition)
+{
+       unsigned long hash;
+       unsigned long condition_type =
+                       (unsigned long) LTTNG_CONDITION_TYPE_SESSION_CONSUMED_SIZE;
+       struct lttng_condition_session_consumed_size *condition;
+       uint64_t val;
+
+       condition = container_of(_condition,
+                       struct lttng_condition_session_consumed_size, parent);
+
+       hash = hash_key_ulong((void *) condition_type, lttng_ht_seed);
+       if (condition->session_name) {
+               hash ^= hash_key_str(condition->session_name, lttng_ht_seed);
+       }
+       val = condition->consumed_threshold_bytes.value;
+       hash ^= hash_key_u64(&val, lttng_ht_seed);
+       return hash;
+}
+
+static
+unsigned long lttng_condition_session_rotation_hash(
+       const struct lttng_condition *_condition)
+{
+       unsigned long hash, condition_type;
+       struct lttng_condition_session_rotation *condition;
+
+       condition = container_of(_condition,
+                       struct lttng_condition_session_rotation, parent);
+       condition_type = (unsigned long) condition->parent.type;
+       hash = hash_key_ulong((void *) condition_type, lttng_ht_seed);
+       LTTNG_ASSERT(condition->session_name);
+       hash ^= hash_key_str(condition->session_name, lttng_ht_seed);
+       return hash;
+}
+
+static unsigned long lttng_condition_event_rule_matches_hash(
+               const struct lttng_condition *condition)
+{
+       unsigned long hash, condition_type;
+       enum lttng_condition_status condition_status;
+       const struct lttng_event_rule *event_rule;
+
+       condition_type = (unsigned long) condition->type;
+       condition_status = lttng_condition_event_rule_matches_get_rule(
+                       condition, &event_rule);
+       LTTNG_ASSERT(condition_status == LTTNG_CONDITION_STATUS_OK);
+
+       hash = hash_key_ulong((void *) condition_type, lttng_ht_seed);
+       return hash ^ lttng_event_rule_hash(event_rule);
+}
+
+/*
+ * The lttng_condition hashing code is kept in this file (rather than
+ * condition.c) since it makes use of GPLv2 code (hashtable utils), which we
+ * don't want to link in liblttng-ctl.
+ */
+unsigned long lttng_condition_hash(const struct lttng_condition *condition)
+{
+       switch (condition->type) {
+       case LTTNG_CONDITION_TYPE_BUFFER_USAGE_LOW:
+       case LTTNG_CONDITION_TYPE_BUFFER_USAGE_HIGH:
+               return lttng_condition_buffer_usage_hash(condition);
+       case LTTNG_CONDITION_TYPE_SESSION_CONSUMED_SIZE:
+               return lttng_condition_session_consumed_size_hash(condition);
+       case LTTNG_CONDITION_TYPE_SESSION_ROTATION_ONGOING:
+       case LTTNG_CONDITION_TYPE_SESSION_ROTATION_COMPLETED:
+               return lttng_condition_session_rotation_hash(condition);
+       case LTTNG_CONDITION_TYPE_EVENT_RULE_MATCHES:
+               return lttng_condition_event_rule_matches_hash(condition);
+       default:
+               abort();
+       }
+}
+
+struct lttng_condition *lttng_condition_copy(const struct lttng_condition *condition)
+{
+       int ret;
+       struct lttng_payload copy_buffer;
+       struct lttng_condition *copy = NULL;
+
+       lttng_payload_init(&copy_buffer);
+
+       ret = lttng_condition_serialize(condition, &copy_buffer);
+       if (ret < 0) {
+               goto end;
+       }
+
+       {
+               struct lttng_payload_view view =
+                               lttng_payload_view_from_payload(
+                                               &copy_buffer, 0, -1);
+
+               ret = lttng_condition_create_from_payload(
+                               &view, &copy);
+               if (ret < 0) {
+                       copy = NULL;
+                       goto end;
+               }
+       }
+
+end:
+       lttng_payload_reset(&copy_buffer);
+       return copy;
+}
index 270a8b1afbf3da33919c3e7864bf6908ff12da44..fa8f7be044ef011062a964743d52d1eb5dbc64e5 100644 (file)
 
 #include <lttng/condition/condition.h>
 
+#ifdef __cplusplus
+extern "C" {
+#endif
+
 /*
  * The lttng_condition hashing code is kept in this file (rather than
  * condition.c) since it makes use of GPLv2 code (hashtable utils), which we
@@ -19,4 +23,9 @@ unsigned long lttng_condition_hash(const struct lttng_condition *condition);
 
 struct lttng_condition *lttng_condition_copy(
                const struct lttng_condition *condition);
+
+#ifdef __cplusplus
+}
+#endif
+
 #endif /* LTTNG_SESSIOND_CONDITION_INTERNAL_H */
diff --git a/src/bin/lttng-sessiond/consumer.c b/src/bin/lttng-sessiond/consumer.c
deleted file mode 100644 (file)
index 7c36d61..0000000
+++ /dev/null
@@ -1,2161 +0,0 @@
-/*
- * Copyright (C) 2012 David Goulet <dgoulet@efficios.com>
- * Copyright (C) 2018 Jérémie Galarneau <jeremie.galarneau@efficios.com>
- *
- * SPDX-License-Identifier: GPL-2.0-only
- *
- */
-
-#define _LGPL_SOURCE
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <sys/stat.h>
-#include <sys/types.h>
-#include <unistd.h>
-#include <inttypes.h>
-
-#include <common/common.h>
-#include <common/defaults.h>
-#include <common/uri.h>
-#include <common/relayd/relayd.h>
-#include <common/string-utils/format.h>
-
-#include "consumer.h"
-#include "health-sessiond.h"
-#include "ust-app.h"
-#include "utils.h"
-#include "lttng-sessiond.h"
-
-/*
- * Return allocated full pathname of the session using the consumer trace path
- * and subdir if available.
- *
- * The caller can safely free(3) the returned value. On error, NULL is
- * returned.
- */
-char *setup_channel_trace_path(struct consumer_output *consumer,
-               const char *session_path, size_t *consumer_path_offset)
-{
-       int ret;
-       char *pathname;
-
-       LTTNG_ASSERT(consumer);
-       LTTNG_ASSERT(session_path);
-
-       health_code_update();
-
-       /*
-        * Allocate the string ourself to make sure we never exceed
-        * LTTNG_PATH_MAX.
-        */
-       pathname = zmalloc(LTTNG_PATH_MAX);
-       if (!pathname) {
-               goto error;
-       }
-
-       /* Get correct path name destination */
-       if (consumer->type == CONSUMER_DST_NET &&
-                       consumer->relay_major_version == 2 &&
-                       consumer->relay_minor_version < 11) {
-               ret = snprintf(pathname, LTTNG_PATH_MAX, "%s%s/%s/%s",
-                               consumer->dst.net.base_dir,
-                               consumer->chunk_path, consumer->domain_subdir,
-                               session_path);
-               *consumer_path_offset = 0;
-       } else {
-               ret = snprintf(pathname, LTTNG_PATH_MAX, "%s/%s",
-                               consumer->domain_subdir, session_path);
-               *consumer_path_offset = strlen(consumer->domain_subdir) + 1;
-       }
-       DBG3("Consumer trace path relative to current trace chunk: \"%s\"",
-                       pathname);
-       if (ret < 0) {
-               PERROR("Failed to format channel path");
-               goto error;
-       } else if (ret >= LTTNG_PATH_MAX) {
-               ERR("Truncation occurred while formatting channel path");
-               goto error;
-       }
-
-       return pathname;
-error:
-       free(pathname);
-       return NULL;
-}
-
-/*
- * Send a data payload using a given consumer socket of size len.
- *
- * The consumer socket lock MUST be acquired before calling this since this
- * function can change the fd value.
- *
- * Return 0 on success else a negative value on error.
- */
-int consumer_socket_send(
-               struct consumer_socket *socket, const void *msg, size_t len)
-{
-       int fd;
-       ssize_t size;
-
-       LTTNG_ASSERT(socket);
-       LTTNG_ASSERT(socket->fd_ptr);
-       LTTNG_ASSERT(msg);
-
-       /* Consumer socket is invalid. Stopping. */
-       fd = *socket->fd_ptr;
-       if (fd < 0) {
-               goto error;
-       }
-
-       size = lttcomm_send_unix_sock(fd, msg, len);
-       if (size < 0) {
-               /* The above call will print a PERROR on error. */
-               DBG("Error when sending data to consumer on sock %d", fd);
-               /*
-                * At this point, the socket is not usable anymore thus closing it and
-                * setting the file descriptor to -1 so it is not reused.
-                */
-
-               /* This call will PERROR on error. */
-               (void) lttcomm_close_unix_sock(fd);
-               *socket->fd_ptr = -1;
-               goto error;
-       }
-
-       return 0;
-
-error:
-       return -1;
-}
-
-/*
- * Receive a data payload using a given consumer socket of size len.
- *
- * The consumer socket lock MUST be acquired before calling this since this
- * function can change the fd value.
- *
- * Return 0 on success else a negative value on error.
- */
-int consumer_socket_recv(struct consumer_socket *socket, void *msg, size_t len)
-{
-       int fd;
-       ssize_t size;
-
-       LTTNG_ASSERT(socket);
-       LTTNG_ASSERT(socket->fd_ptr);
-       LTTNG_ASSERT(msg);
-
-       /* Consumer socket is invalid. Stopping. */
-       fd = *socket->fd_ptr;
-       if (fd < 0) {
-               goto error;
-       }
-
-       size = lttcomm_recv_unix_sock(fd, msg, len);
-       if (size <= 0) {
-               /* The above call will print a PERROR on error. */
-               DBG("Error when receiving data from the consumer socket %d", fd);
-               /*
-                * At this point, the socket is not usable anymore thus closing it and
-                * setting the file descriptor to -1 so it is not reused.
-                */
-
-               /* This call will PERROR on error. */
-               (void) lttcomm_close_unix_sock(fd);
-               *socket->fd_ptr = -1;
-               goto error;
-       }
-
-       return 0;
-
-error:
-       return -1;
-}
-
-/*
- * Receive a reply command status message from the consumer. Consumer socket
- * lock MUST be acquired before calling this function.
- *
- * Return 0 on success, -1 on recv error or a negative lttng error code which
- * was possibly returned by the consumer.
- */
-int consumer_recv_status_reply(struct consumer_socket *sock)
-{
-       int ret;
-       struct lttcomm_consumer_status_msg reply;
-
-       LTTNG_ASSERT(sock);
-
-       ret = consumer_socket_recv(sock, &reply, sizeof(reply));
-       if (ret < 0) {
-               goto end;
-       }
-
-       if (reply.ret_code == LTTCOMM_CONSUMERD_SUCCESS) {
-               /* All good. */
-               ret = 0;
-       } else {
-               ret = -reply.ret_code;
-               DBG("Consumer ret code %d", ret);
-       }
-
-end:
-       return ret;
-}
-
-/*
- * Once the ASK_CHANNEL command is sent to the consumer, the channel
- * information are sent back. This call receives that data and populates key
- * and stream_count.
- *
- * On success return 0 and both key and stream_count are set. On error, a
- * negative value is sent back and both parameters are untouched.
- */
-int consumer_recv_status_channel(struct consumer_socket *sock,
-               uint64_t *key, unsigned int *stream_count)
-{
-       int ret;
-       struct lttcomm_consumer_status_channel reply;
-
-       LTTNG_ASSERT(sock);
-       LTTNG_ASSERT(stream_count);
-       LTTNG_ASSERT(key);
-
-       ret = consumer_socket_recv(sock, &reply, sizeof(reply));
-       if (ret < 0) {
-               goto end;
-       }
-
-       /* An error is possible so don't touch the key and stream_count. */
-       if (reply.ret_code != LTTCOMM_CONSUMERD_SUCCESS) {
-               ret = -1;
-               goto end;
-       }
-
-       *key = reply.key;
-       *stream_count = reply.stream_count;
-       ret = 0;
-
-end:
-       return ret;
-}
-
-/*
- * Send destroy relayd command to consumer.
- *
- * On success return positive value. On error, negative value.
- */
-int consumer_send_destroy_relayd(struct consumer_socket *sock,
-               struct consumer_output *consumer)
-{
-       int ret;
-       struct lttcomm_consumer_msg msg;
-
-       LTTNG_ASSERT(consumer);
-       LTTNG_ASSERT(sock);
-
-       DBG2("Sending destroy relayd command to consumer sock %d", *sock->fd_ptr);
-
-       memset(&msg, 0, sizeof(msg));
-       msg.cmd_type = LTTNG_CONSUMER_DESTROY_RELAYD;
-       msg.u.destroy_relayd.net_seq_idx = consumer->net_seq_index;
-
-       pthread_mutex_lock(sock->lock);
-       ret = consumer_socket_send(sock, &msg, sizeof(msg));
-       if (ret < 0) {
-               goto error;
-       }
-
-       /* Don't check the return value. The caller will do it. */
-       ret = consumer_recv_status_reply(sock);
-
-       DBG2("Consumer send destroy relayd command done");
-
-error:
-       pthread_mutex_unlock(sock->lock);
-       return ret;
-}
-
-/*
- * For each consumer socket in the consumer output object, send a destroy
- * relayd command.
- */
-void consumer_output_send_destroy_relayd(struct consumer_output *consumer)
-{
-       struct lttng_ht_iter iter;
-       struct consumer_socket *socket;
-
-       LTTNG_ASSERT(consumer);
-
-       /* Destroy any relayd connection */
-       if (consumer->type == CONSUMER_DST_NET) {
-               rcu_read_lock();
-               cds_lfht_for_each_entry(consumer->socks->ht, &iter.iter, socket,
-                               node.node) {
-                       int ret;
-
-                       /* Send destroy relayd command */
-                       ret = consumer_send_destroy_relayd(socket, consumer);
-                       if (ret < 0) {
-                               DBG("Unable to send destroy relayd command to consumer");
-                               /* Continue since we MUST delete everything at this point. */
-                       }
-               }
-               rcu_read_unlock();
-       }
-}
-
-/*
- * From a consumer_data structure, allocate and add a consumer socket to the
- * consumer output.
- *
- * Return 0 on success, else negative value on error
- */
-int consumer_create_socket(struct consumer_data *data,
-               struct consumer_output *output)
-{
-       int ret = 0;
-       struct consumer_socket *socket;
-
-       LTTNG_ASSERT(data);
-
-       if (output == NULL || data->cmd_sock < 0) {
-               /*
-                * Not an error. Possible there is simply not spawned consumer or it's
-                * disabled for the tracing session asking the socket.
-                */
-               goto error;
-       }
-
-       rcu_read_lock();
-       socket = consumer_find_socket(data->cmd_sock, output);
-       rcu_read_unlock();
-       if (socket == NULL) {
-               socket = consumer_allocate_socket(&data->cmd_sock);
-               if (socket == NULL) {
-                       ret = -1;
-                       goto error;
-               }
-
-               socket->registered = 0;
-               socket->lock = &data->lock;
-               rcu_read_lock();
-               consumer_add_socket(socket, output);
-               rcu_read_unlock();
-       }
-
-       socket->type = data->type;
-
-       DBG3("Consumer socket created (fd: %d) and added to output",
-                       data->cmd_sock);
-
-error:
-       return ret;
-}
-
-/*
- * Return the consumer socket from the given consumer output with the right
- * bitness. On error, returns NULL.
- *
- * The caller MUST acquire a rcu read side lock and keep it until the socket
- * object reference is not needed anymore.
- */
-struct consumer_socket *consumer_find_socket_by_bitness(int bits,
-               const struct consumer_output *consumer)
-{
-       int consumer_fd;
-       struct consumer_socket *socket = NULL;
-
-       switch (bits) {
-       case 64:
-               consumer_fd = uatomic_read(&the_ust_consumerd64_fd);
-               break;
-       case 32:
-               consumer_fd = uatomic_read(&the_ust_consumerd32_fd);
-               break;
-       default:
-               abort();
-               goto end;
-       }
-
-       socket = consumer_find_socket(consumer_fd, consumer);
-       if (!socket) {
-               ERR("Consumer socket fd %d not found in consumer obj %p",
-                               consumer_fd, consumer);
-       }
-
-end:
-       return socket;
-}
-
-/*
- * Find a consumer_socket in a consumer_output hashtable. Read side lock must
- * be acquired before calling this function and across use of the
- * returned consumer_socket.
- */
-struct consumer_socket *consumer_find_socket(int key,
-               const struct consumer_output *consumer)
-{
-       struct lttng_ht_iter iter;
-       struct lttng_ht_node_ulong *node;
-       struct consumer_socket *socket = NULL;
-
-       /* Negative keys are lookup failures */
-       if (key < 0 || consumer == NULL) {
-               return NULL;
-       }
-
-       lttng_ht_lookup(consumer->socks, (void *)((unsigned long) key),
-                       &iter);
-       node = lttng_ht_iter_get_node_ulong(&iter);
-       if (node != NULL) {
-               socket = caa_container_of(node, struct consumer_socket, node);
-       }
-
-       return socket;
-}
-
-/*
- * Allocate a new consumer_socket and return the pointer.
- */
-struct consumer_socket *consumer_allocate_socket(int *fd)
-{
-       struct consumer_socket *socket = NULL;
-
-       LTTNG_ASSERT(fd);
-
-       socket = zmalloc(sizeof(struct consumer_socket));
-       if (socket == NULL) {
-               PERROR("zmalloc consumer socket");
-               goto error;
-       }
-
-       socket->fd_ptr = fd;
-       lttng_ht_node_init_ulong(&socket->node, *fd);
-
-error:
-       return socket;
-}
-
-/*
- * Add consumer socket to consumer output object. Read side lock must be
- * acquired before calling this function.
- */
-void consumer_add_socket(struct consumer_socket *sock,
-               struct consumer_output *consumer)
-{
-       LTTNG_ASSERT(sock);
-       LTTNG_ASSERT(consumer);
-
-       lttng_ht_add_unique_ulong(consumer->socks, &sock->node);
-}
-
-/*
- * Delete consumer socket to consumer output object. Read side lock must be
- * acquired before calling this function.
- */
-void consumer_del_socket(struct consumer_socket *sock,
-               struct consumer_output *consumer)
-{
-       int ret;
-       struct lttng_ht_iter iter;
-
-       LTTNG_ASSERT(sock);
-       LTTNG_ASSERT(consumer);
-
-       iter.iter.node = &sock->node.node;
-       ret = lttng_ht_del(consumer->socks, &iter);
-       LTTNG_ASSERT(!ret);
-}
-
-/*
- * RCU destroy call function.
- */
-static void destroy_socket_rcu(struct rcu_head *head)
-{
-       struct lttng_ht_node_ulong *node =
-               caa_container_of(head, struct lttng_ht_node_ulong, head);
-       struct consumer_socket *socket =
-               caa_container_of(node, struct consumer_socket, node);
-
-       free(socket);
-}
-
-/*
- * Destroy and free socket pointer in a call RCU. Read side lock must be
- * acquired before calling this function.
- */
-void consumer_destroy_socket(struct consumer_socket *sock)
-{
-       LTTNG_ASSERT(sock);
-
-       /*
-        * We DO NOT close the file descriptor here since it is global to the
-        * session daemon and is closed only if the consumer dies or a custom
-        * consumer was registered,
-        */
-       if (sock->registered) {
-               DBG3("Consumer socket was registered. Closing fd %d", *sock->fd_ptr);
-               lttcomm_close_unix_sock(*sock->fd_ptr);
-       }
-
-       call_rcu(&sock->node.head, destroy_socket_rcu);
-}
-
-/*
- * Allocate and assign data to a consumer_output object.
- *
- * Return pointer to structure.
- */
-struct consumer_output *consumer_create_output(enum consumer_dst_type type)
-{
-       struct consumer_output *output = NULL;
-
-       output = zmalloc(sizeof(struct consumer_output));
-       if (output == NULL) {
-               PERROR("zmalloc consumer_output");
-               goto error;
-       }
-
-       /* By default, consumer output is enabled */
-       output->enabled = 1;
-       output->type = type;
-       output->net_seq_index = (uint64_t) -1ULL;
-       urcu_ref_init(&output->ref);
-
-       output->socks = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
-
-error:
-       return output;
-}
-
-/*
- * Iterate over the consumer output socket hash table and destroy them. The
- * socket file descriptor are only closed if the consumer output was
- * registered meaning it's an external consumer.
- */
-void consumer_destroy_output_sockets(struct consumer_output *obj)
-{
-       struct lttng_ht_iter iter;
-       struct consumer_socket *socket;
-
-       if (!obj->socks) {
-               return;
-       }
-
-       rcu_read_lock();
-       cds_lfht_for_each_entry(obj->socks->ht, &iter.iter, socket, node.node) {
-               consumer_del_socket(socket, obj);
-               consumer_destroy_socket(socket);
-       }
-       rcu_read_unlock();
-}
-
-/*
- * Delete the consumer_output object from the list and free the ptr.
- *
- * Should *NOT* be called with RCU read-side lock held.
- */
-static void consumer_release_output(struct urcu_ref *ref)
-{
-       struct consumer_output *obj =
-               caa_container_of(ref, struct consumer_output, ref);
-
-       consumer_destroy_output_sockets(obj);
-
-       if (obj->socks) {
-               /* Finally destroy HT */
-               ht_cleanup_push(obj->socks);
-       }
-
-       free(obj);
-}
-
-/*
- * Get the consumer_output object.
- */
-void consumer_output_get(struct consumer_output *obj)
-{
-       urcu_ref_get(&obj->ref);
-}
-
-/*
- * Put the consumer_output object.
- *
- * Should *NOT* be called with RCU read-side lock held.
- */
-void consumer_output_put(struct consumer_output *obj)
-{
-       if (!obj) {
-               return;
-       }
-       urcu_ref_put(&obj->ref, consumer_release_output);
-}
-
-/*
- * Copy consumer output and returned the newly allocated copy.
- *
- * Should *NOT* be called with RCU read-side lock held.
- */
-struct consumer_output *consumer_copy_output(struct consumer_output *src)
-{
-       int ret;
-       struct consumer_output *output;
-
-       LTTNG_ASSERT(src);
-
-       output = consumer_create_output(src->type);
-       if (output == NULL) {
-               goto end;
-       }
-       output->enabled = src->enabled;
-       output->net_seq_index = src->net_seq_index;
-       memcpy(output->domain_subdir, src->domain_subdir,
-                       sizeof(output->domain_subdir));
-       output->snapshot = src->snapshot;
-       output->relay_major_version = src->relay_major_version;
-       output->relay_minor_version = src->relay_minor_version;
-       output->relay_allows_clear = src->relay_allows_clear;
-       memcpy(&output->dst, &src->dst, sizeof(output->dst));
-       ret = consumer_copy_sockets(output, src);
-       if (ret < 0) {
-               goto error_put;
-       }
-end:
-       return output;
-
-error_put:
-       consumer_output_put(output);
-       return NULL;
-}
-
-/*
- * Copy consumer sockets from src to dst.
- *
- * Return 0 on success or else a negative value.
- */
-int consumer_copy_sockets(struct consumer_output *dst,
-               struct consumer_output *src)
-{
-       int ret = 0;
-       struct lttng_ht_iter iter;
-       struct consumer_socket *socket, *copy_sock;
-
-       LTTNG_ASSERT(dst);
-       LTTNG_ASSERT(src);
-
-       rcu_read_lock();
-       cds_lfht_for_each_entry(src->socks->ht, &iter.iter, socket, node.node) {
-               /* Ignore socket that are already there. */
-               copy_sock = consumer_find_socket(*socket->fd_ptr, dst);
-               if (copy_sock) {
-                       continue;
-               }
-
-               /* Create new socket object. */
-               copy_sock = consumer_allocate_socket(socket->fd_ptr);
-               if (copy_sock == NULL) {
-                       rcu_read_unlock();
-                       ret = -ENOMEM;
-                       goto error;
-               }
-
-               copy_sock->registered = socket->registered;
-               /*
-                * This is valid because this lock is shared accross all consumer
-                * object being the global lock of the consumer data structure of the
-                * session daemon.
-                */
-               copy_sock->lock = socket->lock;
-               consumer_add_socket(copy_sock, dst);
-       }
-       rcu_read_unlock();
-
-error:
-       return ret;
-}
-
-/*
- * Set network URI to the consumer output.
- *
- * Return 0 on success. Return 1 if the URI were equal. Else, negative value on
- * error.
- */
-int consumer_set_network_uri(const struct ltt_session *session,
-               struct consumer_output *output,
-               struct lttng_uri *uri)
-{
-       int ret;
-       struct lttng_uri *dst_uri = NULL;
-
-       /* Code flow error safety net. */
-       LTTNG_ASSERT(output);
-       LTTNG_ASSERT(uri);
-
-       switch (uri->stype) {
-       case LTTNG_STREAM_CONTROL:
-               dst_uri = &output->dst.net.control;
-               output->dst.net.control_isset = 1;
-               if (uri->port == 0) {
-                       /* Assign default port. */
-                       uri->port = DEFAULT_NETWORK_CONTROL_PORT;
-               } else {
-                       if (output->dst.net.data_isset && uri->port ==
-                                       output->dst.net.data.port) {
-                               ret = -LTTNG_ERR_INVALID;
-                               goto error;
-                       }
-               }
-               DBG3("Consumer control URI set with port %d", uri->port);
-               break;
-       case LTTNG_STREAM_DATA:
-               dst_uri = &output->dst.net.data;
-               output->dst.net.data_isset = 1;
-               if (uri->port == 0) {
-                       /* Assign default port. */
-                       uri->port = DEFAULT_NETWORK_DATA_PORT;
-               } else {
-                       if (output->dst.net.control_isset && uri->port ==
-                                       output->dst.net.control.port) {
-                               ret = -LTTNG_ERR_INVALID;
-                               goto error;
-                       }
-               }
-               DBG3("Consumer data URI set with port %d", uri->port);
-               break;
-       default:
-               ERR("Set network uri type unknown %d", uri->stype);
-               ret = -LTTNG_ERR_INVALID;
-               goto error;
-       }
-
-       ret = uri_compare(dst_uri, uri);
-       if (!ret) {
-               /* Same URI, don't touch it and return success. */
-               DBG3("URI network compare are the same");
-               goto equal;
-       }
-
-       /* URIs were not equal, replacing it. */
-       memcpy(dst_uri, uri, sizeof(struct lttng_uri));
-       output->type = CONSUMER_DST_NET;
-       if (dst_uri->stype != LTTNG_STREAM_CONTROL) {
-               /* Only the control uri needs to contain the path. */
-               goto end;
-       }
-
-       /*
-        * If the user has specified a subdir as part of the control
-        * URL, the session's base output directory is:
-        *   /RELAYD_OUTPUT_PATH/HOSTNAME/USER_SPECIFIED_DIR
-        *
-        * Hence, the "base_dir" from which all stream files and
-        * session rotation chunks are created takes the form
-        *   /HOSTNAME/USER_SPECIFIED_DIR
-        *
-        * If the user has not specified an output directory as part of
-        * the control URL, the base output directory has the form:
-        *   /RELAYD_OUTPUT_PATH/HOSTNAME/SESSION_NAME-CREATION_TIME
-        *
-        * Hence, the "base_dir" from which all stream files and
-        * session rotation chunks are created takes the form
-        *   /HOSTNAME/SESSION_NAME-CREATION_TIME
-        *
-        * Note that automatically generated session names already
-        * contain the session's creation time. In that case, the
-        * creation time is omitted to prevent it from being duplicated
-        * in the final directory hierarchy.
-        */
-       if (*uri->subdir) {
-               if (strstr(uri->subdir, "../")) {
-                       ERR("Network URI subdirs are not allowed to walk up the path hierarchy");
-                       ret = -LTTNG_ERR_INVALID;
-                       goto error;
-               }
-               ret = snprintf(output->dst.net.base_dir,
-                               sizeof(output->dst.net.base_dir),
-                               "/%s/%s/", session->hostname, uri->subdir);
-       } else {
-               if (session->has_auto_generated_name) {
-                       ret = snprintf(output->dst.net.base_dir,
-                                       sizeof(output->dst.net.base_dir),
-                                       "/%s/%s/", session->hostname,
-                                       session->name);
-               } else {
-                       char session_creation_datetime[16];
-                       size_t strftime_ret;
-                       struct tm *timeinfo;
-
-                       timeinfo = localtime(&session->creation_time);
-                       if (!timeinfo) {
-                               ret = -LTTNG_ERR_FATAL;
-                               goto error;
-                       }
-                       strftime_ret = strftime(session_creation_datetime,
-                                       sizeof(session_creation_datetime),
-                                       "%Y%m%d-%H%M%S", timeinfo);
-                       if (strftime_ret == 0) {
-                               ERR("Failed to format session creation timestamp while setting network URI");
-                               ret = -LTTNG_ERR_FATAL;
-                               goto error;
-                       }
-                       ret = snprintf(output->dst.net.base_dir,
-                                       sizeof(output->dst.net.base_dir),
-                                       "/%s/%s-%s/", session->hostname,
-                                       session->name,
-                                       session_creation_datetime);
-               }
-       }
-       if (ret >= sizeof(output->dst.net.base_dir)) {
-               ret = -LTTNG_ERR_INVALID;
-               ERR("Truncation occurred while setting network output base directory");
-               goto error;
-       } else if (ret == -1) {
-               ret = -LTTNG_ERR_INVALID;
-               PERROR("Error occurred while setting network output base directory");
-               goto error;
-       }
-
-       DBG3("Consumer set network uri base_dir path %s",
-                       output->dst.net.base_dir);
-
-end:
-       return 0;
-equal:
-       return 1;
-error:
-       return ret;
-}
-
-/*
- * Send file descriptor to consumer via sock.
- *
- * The consumer socket lock must be held by the caller.
- */
-int consumer_send_fds(struct consumer_socket *sock, const int *fds,
-               size_t nb_fd)
-{
-       int ret;
-
-       LTTNG_ASSERT(fds);
-       LTTNG_ASSERT(sock);
-       LTTNG_ASSERT(nb_fd > 0);
-       LTTNG_ASSERT(pthread_mutex_trylock(sock->lock) == EBUSY);
-
-       ret = lttcomm_send_fds_unix_sock(*sock->fd_ptr, fds, nb_fd);
-       if (ret < 0) {
-               /* The above call will print a PERROR on error. */
-               DBG("Error when sending consumer fds on sock %d", *sock->fd_ptr);
-               goto error;
-       }
-
-       ret = consumer_recv_status_reply(sock);
-error:
-       return ret;
-}
-
-/*
- * Consumer send communication message structure to consumer.
- *
- * The consumer socket lock must be held by the caller.
- */
-int consumer_send_msg(struct consumer_socket *sock,
-               const struct lttcomm_consumer_msg *msg)
-{
-       int ret;
-
-       LTTNG_ASSERT(msg);
-       LTTNG_ASSERT(sock);
-       LTTNG_ASSERT(pthread_mutex_trylock(sock->lock) == EBUSY);
-
-       ret = consumer_socket_send(sock, msg, sizeof(struct lttcomm_consumer_msg));
-       if (ret < 0) {
-               goto error;
-       }
-
-       ret = consumer_recv_status_reply(sock);
-
-error:
-       return ret;
-}
-
-/*
- * Consumer send channel communication message structure to consumer.
- *
- * The consumer socket lock must be held by the caller.
- */
-int consumer_send_channel(struct consumer_socket *sock,
-               struct lttcomm_consumer_msg *msg)
-{
-       int ret;
-
-       LTTNG_ASSERT(msg);
-       LTTNG_ASSERT(sock);
-
-       ret = consumer_send_msg(sock, msg);
-       if (ret < 0) {
-               goto error;
-       }
-
-error:
-       return ret;
-}
-
-/*
- * Populate the given consumer msg structure with the ask_channel command
- * information.
- */
-void consumer_init_ask_channel_comm_msg(struct lttcomm_consumer_msg *msg,
-               uint64_t subbuf_size,
-               uint64_t num_subbuf,
-               int overwrite,
-               unsigned int switch_timer_interval,
-               unsigned int read_timer_interval,
-               unsigned int live_timer_interval,
-               bool is_in_live_session,
-               unsigned int monitor_timer_interval,
-               int output,
-               int type,
-               uint64_t session_id,
-               const char *pathname,
-               const char *name,
-               uint64_t relayd_id,
-               uint64_t key,
-               unsigned char *uuid,
-               uint32_t chan_id,
-               uint64_t tracefile_size,
-               uint64_t tracefile_count,
-               uint64_t session_id_per_pid,
-               unsigned int monitor,
-               uint32_t ust_app_uid,
-               int64_t blocking_timeout,
-               const char *root_shm_path,
-               const char *shm_path,
-               struct lttng_trace_chunk *trace_chunk,
-               const struct lttng_credentials *buffer_credentials)
-{
-       LTTNG_ASSERT(msg);
-
-       /* Zeroed structure */
-       memset(msg, 0, sizeof(struct lttcomm_consumer_msg));
-       msg->u.ask_channel.buffer_credentials.uid = UINT32_MAX;
-       msg->u.ask_channel.buffer_credentials.gid = UINT32_MAX;
-
-       if (trace_chunk) {
-               uint64_t chunk_id;
-               enum lttng_trace_chunk_status chunk_status;
-
-               chunk_status = lttng_trace_chunk_get_id(trace_chunk, &chunk_id);
-               LTTNG_ASSERT(chunk_status == LTTNG_TRACE_CHUNK_STATUS_OK);
-               LTTNG_OPTIONAL_SET(&msg->u.ask_channel.chunk_id, chunk_id);
-       }
-       msg->u.ask_channel.buffer_credentials.uid =
-                       lttng_credentials_get_uid(buffer_credentials);
-       msg->u.ask_channel.buffer_credentials.gid =
-                       lttng_credentials_get_gid(buffer_credentials);
-
-       msg->cmd_type = LTTNG_CONSUMER_ASK_CHANNEL_CREATION;
-       msg->u.ask_channel.subbuf_size = subbuf_size;
-       msg->u.ask_channel.num_subbuf = num_subbuf ;
-       msg->u.ask_channel.overwrite = overwrite;
-       msg->u.ask_channel.switch_timer_interval = switch_timer_interval;
-       msg->u.ask_channel.read_timer_interval = read_timer_interval;
-       msg->u.ask_channel.live_timer_interval = live_timer_interval;
-       msg->u.ask_channel.is_live = is_in_live_session;
-       msg->u.ask_channel.monitor_timer_interval = monitor_timer_interval;
-       msg->u.ask_channel.output = output;
-       msg->u.ask_channel.type = type;
-       msg->u.ask_channel.session_id = session_id;
-       msg->u.ask_channel.session_id_per_pid = session_id_per_pid;
-       msg->u.ask_channel.relayd_id = relayd_id;
-       msg->u.ask_channel.key = key;
-       msg->u.ask_channel.chan_id = chan_id;
-       msg->u.ask_channel.tracefile_size = tracefile_size;
-       msg->u.ask_channel.tracefile_count = tracefile_count;
-       msg->u.ask_channel.monitor = monitor;
-       msg->u.ask_channel.ust_app_uid = ust_app_uid;
-       msg->u.ask_channel.blocking_timeout = blocking_timeout;
-
-       memcpy(msg->u.ask_channel.uuid, uuid, sizeof(msg->u.ask_channel.uuid));
-
-       if (pathname) {
-               strncpy(msg->u.ask_channel.pathname, pathname,
-                               sizeof(msg->u.ask_channel.pathname));
-               msg->u.ask_channel.pathname[sizeof(msg->u.ask_channel.pathname)-1] = '\0';
-       }
-
-       strncpy(msg->u.ask_channel.name, name, sizeof(msg->u.ask_channel.name));
-       msg->u.ask_channel.name[sizeof(msg->u.ask_channel.name) - 1] = '\0';
-
-       if (root_shm_path) {
-               strncpy(msg->u.ask_channel.root_shm_path, root_shm_path,
-                       sizeof(msg->u.ask_channel.root_shm_path));
-               msg->u.ask_channel.root_shm_path[sizeof(msg->u.ask_channel.root_shm_path) - 1] = '\0';
-       }
-       if (shm_path) {
-               strncpy(msg->u.ask_channel.shm_path, shm_path,
-                       sizeof(msg->u.ask_channel.shm_path));
-               msg->u.ask_channel.shm_path[sizeof(msg->u.ask_channel.shm_path) - 1] = '\0';
-       }
-}
-
-/*
- * Init channel communication message structure.
- */
-void consumer_init_add_channel_comm_msg(struct lttcomm_consumer_msg *msg,
-               uint64_t channel_key,
-               uint64_t session_id,
-               const char *pathname,
-               uid_t uid,
-               gid_t gid,
-               uint64_t relayd_id,
-               const char *name,
-               unsigned int nb_init_streams,
-               enum lttng_event_output output,
-               int type,
-               uint64_t tracefile_size,
-               uint64_t tracefile_count,
-               unsigned int monitor,
-               unsigned int live_timer_interval,
-               bool is_in_live_session,
-               unsigned int monitor_timer_interval,
-               struct lttng_trace_chunk *trace_chunk)
-{
-       LTTNG_ASSERT(msg);
-
-       /* Zeroed structure */
-       memset(msg, 0, sizeof(struct lttcomm_consumer_msg));
-
-       if (trace_chunk) {
-               uint64_t chunk_id;
-               enum lttng_trace_chunk_status chunk_status;
-
-               chunk_status = lttng_trace_chunk_get_id(trace_chunk, &chunk_id);
-               LTTNG_ASSERT(chunk_status == LTTNG_TRACE_CHUNK_STATUS_OK);
-               LTTNG_OPTIONAL_SET(&msg->u.channel.chunk_id, chunk_id);
-       }
-
-       /* Send channel */
-       msg->cmd_type = LTTNG_CONSUMER_ADD_CHANNEL;
-       msg->u.channel.channel_key = channel_key;
-       msg->u.channel.session_id = session_id;
-       msg->u.channel.relayd_id = relayd_id;
-       msg->u.channel.nb_init_streams = nb_init_streams;
-       msg->u.channel.output = output;
-       msg->u.channel.type = type;
-       msg->u.channel.tracefile_size = tracefile_size;
-       msg->u.channel.tracefile_count = tracefile_count;
-       msg->u.channel.monitor = monitor;
-       msg->u.channel.live_timer_interval = live_timer_interval;
-       msg->u.channel.is_live = is_in_live_session;
-       msg->u.channel.monitor_timer_interval = monitor_timer_interval;
-
-       strncpy(msg->u.channel.pathname, pathname,
-                       sizeof(msg->u.channel.pathname));
-       msg->u.channel.pathname[sizeof(msg->u.channel.pathname) - 1] = '\0';
-
-       strncpy(msg->u.channel.name, name, sizeof(msg->u.channel.name));
-       msg->u.channel.name[sizeof(msg->u.channel.name) - 1] = '\0';
-}
-
-/*
- * Init stream communication message structure.
- */
-void consumer_init_add_stream_comm_msg(struct lttcomm_consumer_msg *msg,
-               uint64_t channel_key,
-               uint64_t stream_key,
-               int32_t cpu)
-{
-       LTTNG_ASSERT(msg);
-
-       memset(msg, 0, sizeof(struct lttcomm_consumer_msg));
-
-       msg->cmd_type = LTTNG_CONSUMER_ADD_STREAM;
-       msg->u.stream.channel_key = channel_key;
-       msg->u.stream.stream_key = stream_key;
-       msg->u.stream.cpu = cpu;
-}
-
-void consumer_init_streams_sent_comm_msg(struct lttcomm_consumer_msg *msg,
-               enum lttng_consumer_command cmd,
-               uint64_t channel_key, uint64_t net_seq_idx)
-{
-       LTTNG_ASSERT(msg);
-
-       memset(msg, 0, sizeof(struct lttcomm_consumer_msg));
-
-       msg->cmd_type = cmd;
-       msg->u.sent_streams.channel_key = channel_key;
-       msg->u.sent_streams.net_seq_idx = net_seq_idx;
-}
-
-/*
- * Send stream communication structure to the consumer.
- */
-int consumer_send_stream(struct consumer_socket *sock,
-               struct consumer_output *dst, struct lttcomm_consumer_msg *msg,
-               const int *fds, size_t nb_fd)
-{
-       int ret;
-
-       LTTNG_ASSERT(msg);
-       LTTNG_ASSERT(dst);
-       LTTNG_ASSERT(sock);
-       LTTNG_ASSERT(fds);
-
-       ret = consumer_send_msg(sock, msg);
-       if (ret < 0) {
-               goto error;
-       }
-
-       ret = consumer_send_fds(sock, fds, nb_fd);
-       if (ret < 0) {
-               goto error;
-       }
-
-error:
-       return ret;
-}
-
-/*
- * Send relayd socket to consumer associated with a session name.
- *
- * The consumer socket lock must be held by the caller.
- *
- * On success return positive value. On error, negative value.
- */
-int consumer_send_relayd_socket(struct consumer_socket *consumer_sock,
-               struct lttcomm_relayd_sock *rsock, struct consumer_output *consumer,
-               enum lttng_stream_type type, uint64_t session_id,
-               const char *session_name, const char *hostname,
-               const char *base_path, int session_live_timer,
-               const uint64_t *current_chunk_id, time_t session_creation_time,
-               bool session_name_contains_creation_time)
-{
-       int ret;
-       struct lttcomm_consumer_msg msg;
-
-       /* Code flow error. Safety net. */
-       LTTNG_ASSERT(rsock);
-       LTTNG_ASSERT(consumer);
-       LTTNG_ASSERT(consumer_sock);
-
-       memset(&msg, 0, sizeof(msg));
-       /* Bail out if consumer is disabled */
-       if (!consumer->enabled) {
-               ret = LTTNG_OK;
-               goto error;
-       }
-
-       if (type == LTTNG_STREAM_CONTROL) {
-               char output_path[LTTNG_PATH_MAX] = {};
-               uint64_t relayd_session_id;
-
-               ret = relayd_create_session(rsock, &relayd_session_id,
-                               session_name, hostname, base_path,
-                               session_live_timer, consumer->snapshot,
-                               session_id, the_sessiond_uuid, current_chunk_id,
-                               session_creation_time,
-                               session_name_contains_creation_time,
-                               output_path);
-               if (ret < 0) {
-                       /* Close the control socket. */
-                       (void) relayd_close(rsock);
-                       goto error;
-               }
-               msg.u.relayd_sock.relayd_session_id = relayd_session_id;
-               DBG("Created session on relay, output path reply: %s",
-                       output_path);
-       }
-
-       msg.cmd_type = LTTNG_CONSUMER_ADD_RELAYD_SOCKET;
-       /*
-        * Assign network consumer output index using the temporary consumer since
-        * this call should only be made from within a set_consumer_uri() function
-        * call in the session daemon.
-        */
-       msg.u.relayd_sock.net_index = consumer->net_seq_index;
-       msg.u.relayd_sock.type = type;
-       msg.u.relayd_sock.session_id = session_id;
-       memcpy(&msg.u.relayd_sock.sock, rsock, sizeof(msg.u.relayd_sock.sock));
-
-       DBG3("Sending relayd sock info to consumer on %d", *consumer_sock->fd_ptr);
-       ret = consumer_send_msg(consumer_sock, &msg);
-       if (ret < 0) {
-               goto error;
-       }
-
-       DBG3("Sending relayd socket file descriptor to consumer");
-       ret = consumer_send_fds(consumer_sock, ALIGNED_CONST_PTR(rsock->sock.fd), 1);
-       if (ret < 0) {
-               goto error;
-       }
-
-       DBG2("Consumer relayd socket sent");
-
-error:
-       return ret;
-}
-
-static
-int consumer_send_pipe(struct consumer_socket *consumer_sock,
-               enum lttng_consumer_command cmd, int pipe)
-{
-       int ret;
-       struct lttcomm_consumer_msg msg;
-       const char *pipe_name;
-       const char *command_name;
-
-       switch (cmd) {
-       case LTTNG_CONSUMER_SET_CHANNEL_MONITOR_PIPE:
-               pipe_name = "channel monitor";
-               command_name = "SET_CHANNEL_MONITOR_PIPE";
-               break;
-       default:
-               ERR("Unexpected command received in %s (cmd = %d)", __func__,
-                               (int) cmd);
-               abort();
-       }
-
-       /* Code flow error. Safety net. */
-
-       memset(&msg, 0, sizeof(msg));
-       msg.cmd_type = cmd;
-
-       pthread_mutex_lock(consumer_sock->lock);
-       DBG3("Sending %s command to consumer", command_name);
-       ret = consumer_send_msg(consumer_sock, &msg);
-       if (ret < 0) {
-               goto error;
-       }
-
-       DBG3("Sending %s pipe %d to consumer on socket %d",
-                       pipe_name,
-                       pipe, *consumer_sock->fd_ptr);
-       ret = consumer_send_fds(consumer_sock, &pipe, 1);
-       if (ret < 0) {
-               goto error;
-       }
-
-       DBG2("%s pipe successfully sent", pipe_name);
-error:
-       pthread_mutex_unlock(consumer_sock->lock);
-       return ret;
-}
-
-int consumer_send_channel_monitor_pipe(struct consumer_socket *consumer_sock,
-               int pipe)
-{
-       return consumer_send_pipe(consumer_sock,
-                       LTTNG_CONSUMER_SET_CHANNEL_MONITOR_PIPE, pipe);
-}
-
-/*
- * Ask the consumer if the data is pending for the specific session id.
- * Returns 1 if data is pending, 0 otherwise, or < 0 on error.
- */
-int consumer_is_data_pending(uint64_t session_id,
-               struct consumer_output *consumer)
-{
-       int ret;
-       int32_t ret_code = 0;  /* Default is that the data is NOT pending */
-       struct consumer_socket *socket;
-       struct lttng_ht_iter iter;
-       struct lttcomm_consumer_msg msg;
-
-       LTTNG_ASSERT(consumer);
-
-       DBG3("Consumer data pending for id %" PRIu64, session_id);
-
-       memset(&msg, 0, sizeof(msg));
-       msg.cmd_type = LTTNG_CONSUMER_DATA_PENDING;
-       msg.u.data_pending.session_id = session_id;
-
-       /* Send command for each consumer */
-       rcu_read_lock();
-       cds_lfht_for_each_entry(consumer->socks->ht, &iter.iter, socket,
-                       node.node) {
-               pthread_mutex_lock(socket->lock);
-               ret = consumer_socket_send(socket, &msg, sizeof(msg));
-               if (ret < 0) {
-                       pthread_mutex_unlock(socket->lock);
-                       goto error_unlock;
-               }
-
-               /*
-                * No need for a recv reply status because the answer to the command is
-                * the reply status message.
-                */
-
-               ret = consumer_socket_recv(socket, &ret_code, sizeof(ret_code));
-               if (ret < 0) {
-                       pthread_mutex_unlock(socket->lock);
-                       goto error_unlock;
-               }
-               pthread_mutex_unlock(socket->lock);
-
-               if (ret_code == 1) {
-                       break;
-               }
-       }
-       rcu_read_unlock();
-
-       DBG("Consumer data is %s pending for session id %" PRIu64,
-                       ret_code == 1 ? "" : "NOT", session_id);
-       return ret_code;
-
-error_unlock:
-       rcu_read_unlock();
-       return -1;
-}
-
-/*
- * Send a flush command to consumer using the given channel key.
- *
- * Return 0 on success else a negative value.
- */
-int consumer_flush_channel(struct consumer_socket *socket, uint64_t key)
-{
-       int ret;
-       struct lttcomm_consumer_msg msg;
-
-       LTTNG_ASSERT(socket);
-
-       DBG2("Consumer flush channel key %" PRIu64, key);
-
-       memset(&msg, 0, sizeof(msg));
-       msg.cmd_type = LTTNG_CONSUMER_FLUSH_CHANNEL;
-       msg.u.flush_channel.key = key;
-
-       pthread_mutex_lock(socket->lock);
-       health_code_update();
-
-       ret = consumer_send_msg(socket, &msg);
-       if (ret < 0) {
-               goto end;
-       }
-
-end:
-       health_code_update();
-       pthread_mutex_unlock(socket->lock);
-       return ret;
-}
-
-/*
- * Send a clear quiescent command to consumer using the given channel key.
- *
- * Return 0 on success else a negative value.
- */
-int consumer_clear_quiescent_channel(struct consumer_socket *socket, uint64_t key)
-{
-       int ret;
-       struct lttcomm_consumer_msg msg;
-
-       LTTNG_ASSERT(socket);
-
-       DBG2("Consumer clear quiescent channel key %" PRIu64, key);
-
-       memset(&msg, 0, sizeof(msg));
-       msg.cmd_type = LTTNG_CONSUMER_CLEAR_QUIESCENT_CHANNEL;
-       msg.u.clear_quiescent_channel.key = key;
-
-       pthread_mutex_lock(socket->lock);
-       health_code_update();
-
-       ret = consumer_send_msg(socket, &msg);
-       if (ret < 0) {
-               goto end;
-       }
-
-end:
-       health_code_update();
-       pthread_mutex_unlock(socket->lock);
-       return ret;
-}
-
-/*
- * Send a close metadata command to consumer using the given channel key.
- * Called with registry lock held.
- *
- * Return 0 on success else a negative value.
- */
-int consumer_close_metadata(struct consumer_socket *socket,
-               uint64_t metadata_key)
-{
-       int ret;
-       struct lttcomm_consumer_msg msg;
-
-       LTTNG_ASSERT(socket);
-
-       DBG2("Consumer close metadata channel key %" PRIu64, metadata_key);
-
-       memset(&msg, 0, sizeof(msg));
-       msg.cmd_type = LTTNG_CONSUMER_CLOSE_METADATA;
-       msg.u.close_metadata.key = metadata_key;
-
-       pthread_mutex_lock(socket->lock);
-       health_code_update();
-
-       ret = consumer_send_msg(socket, &msg);
-       if (ret < 0) {
-               goto end;
-       }
-
-end:
-       health_code_update();
-       pthread_mutex_unlock(socket->lock);
-       return ret;
-}
-
-/*
- * Send a setup metdata command to consumer using the given channel key.
- *
- * Return 0 on success else a negative value.
- */
-int consumer_setup_metadata(struct consumer_socket *socket,
-               uint64_t metadata_key)
-{
-       int ret;
-       struct lttcomm_consumer_msg msg;
-
-       LTTNG_ASSERT(socket);
-
-       DBG2("Consumer setup metadata channel key %" PRIu64, metadata_key);
-
-       memset(&msg, 0, sizeof(msg));
-       msg.cmd_type = LTTNG_CONSUMER_SETUP_METADATA;
-       msg.u.setup_metadata.key = metadata_key;
-
-       pthread_mutex_lock(socket->lock);
-       health_code_update();
-
-       ret = consumer_send_msg(socket, &msg);
-       if (ret < 0) {
-               goto end;
-       }
-
-end:
-       health_code_update();
-       pthread_mutex_unlock(socket->lock);
-       return ret;
-}
-
-/*
- * Send metadata string to consumer.
- * RCU read-side lock must be held to guarantee existence of socket.
- *
- * Return 0 on success else a negative value.
- */
-int consumer_push_metadata(struct consumer_socket *socket,
-               uint64_t metadata_key, char *metadata_str, size_t len,
-               size_t target_offset, uint64_t version)
-{
-       int ret;
-       struct lttcomm_consumer_msg msg;
-
-       LTTNG_ASSERT(socket);
-
-       DBG2("Consumer push metadata to consumer socket %d", *socket->fd_ptr);
-
-       pthread_mutex_lock(socket->lock);
-
-       memset(&msg, 0, sizeof(msg));
-       msg.cmd_type = LTTNG_CONSUMER_PUSH_METADATA;
-       msg.u.push_metadata.key = metadata_key;
-       msg.u.push_metadata.target_offset = target_offset;
-       msg.u.push_metadata.len = len;
-       msg.u.push_metadata.version = version;
-
-       health_code_update();
-       ret = consumer_send_msg(socket, &msg);
-       if (ret < 0 || len == 0) {
-               goto end;
-       }
-
-       DBG3("Consumer pushing metadata on sock %d of len %zu", *socket->fd_ptr,
-                       len);
-
-       ret = consumer_socket_send(socket, metadata_str, len);
-       if (ret < 0) {
-               goto end;
-       }
-
-       health_code_update();
-       ret = consumer_recv_status_reply(socket);
-       if (ret < 0) {
-               goto end;
-       }
-
-end:
-       pthread_mutex_unlock(socket->lock);
-       health_code_update();
-       return ret;
-}
-
-/*
- * Ask the consumer to snapshot a specific channel using the key.
- *
- * Returns LTTNG_OK on success or else an LTTng error code.
- */
-enum lttng_error_code consumer_snapshot_channel(struct consumer_socket *socket,
-               uint64_t key, const struct consumer_output *output, int metadata,
-               uid_t uid, gid_t gid, const char *channel_path, int wait,
-               uint64_t nb_packets_per_stream)
-{
-       int ret;
-       enum lttng_error_code status = LTTNG_OK;
-       struct lttcomm_consumer_msg msg;
-
-       LTTNG_ASSERT(socket);
-       LTTNG_ASSERT(output);
-
-       DBG("Consumer snapshot channel key %" PRIu64, key);
-
-       memset(&msg, 0, sizeof(msg));
-       msg.cmd_type = LTTNG_CONSUMER_SNAPSHOT_CHANNEL;
-       msg.u.snapshot_channel.key = key;
-       msg.u.snapshot_channel.nb_packets_per_stream = nb_packets_per_stream;
-       msg.u.snapshot_channel.metadata = metadata;
-
-       if (output->type == CONSUMER_DST_NET) {
-               msg.u.snapshot_channel.relayd_id =
-                               output->net_seq_index;
-               msg.u.snapshot_channel.use_relayd = 1;
-       } else {
-               msg.u.snapshot_channel.relayd_id = (uint64_t) -1ULL;
-       }
-       ret = lttng_strncpy(msg.u.snapshot_channel.pathname,
-                       channel_path,
-                       sizeof(msg.u.snapshot_channel.pathname));
-       if (ret < 0) {
-               ERR("Snapshot path exceeds the maximal allowed length of %zu bytes (%zu bytes required) with path \"%s\"",
-                               sizeof(msg.u.snapshot_channel.pathname),
-                               strlen(channel_path),
-                               channel_path);
-               status = LTTNG_ERR_SNAPSHOT_FAIL;
-               goto error;
-       }
-
-       health_code_update();
-       pthread_mutex_lock(socket->lock);
-       ret = consumer_send_msg(socket, &msg);
-       pthread_mutex_unlock(socket->lock);
-       if (ret < 0) {
-               switch (-ret) {
-               case LTTCOMM_CONSUMERD_CHAN_NOT_FOUND:
-                       status = LTTNG_ERR_CHAN_NOT_FOUND;
-                       break;
-               default:
-                       status = LTTNG_ERR_SNAPSHOT_FAIL;
-                       break;
-               }
-               goto error;
-       }
-
-error:
-       health_code_update();
-       return status;
-}
-
-/*
- * Ask the consumer the number of discarded events for a channel.
- */
-int consumer_get_discarded_events(uint64_t session_id, uint64_t channel_key,
-               struct consumer_output *consumer, uint64_t *discarded)
-{
-       int ret;
-       struct consumer_socket *socket;
-       struct lttng_ht_iter iter;
-       struct lttcomm_consumer_msg msg;
-
-       LTTNG_ASSERT(consumer);
-
-       DBG3("Consumer discarded events id %" PRIu64, session_id);
-
-       memset(&msg, 0, sizeof(msg));
-       msg.cmd_type = LTTNG_CONSUMER_DISCARDED_EVENTS;
-       msg.u.discarded_events.session_id = session_id;
-       msg.u.discarded_events.channel_key = channel_key;
-
-       *discarded = 0;
-
-       /* Send command for each consumer */
-       rcu_read_lock();
-       cds_lfht_for_each_entry(consumer->socks->ht, &iter.iter, socket,
-                       node.node) {
-               uint64_t consumer_discarded = 0;
-               pthread_mutex_lock(socket->lock);
-               ret = consumer_socket_send(socket, &msg, sizeof(msg));
-               if (ret < 0) {
-                       pthread_mutex_unlock(socket->lock);
-                       goto end;
-               }
-
-               /*
-                * No need for a recv reply status because the answer to the
-                * command is the reply status message.
-                */
-               ret = consumer_socket_recv(socket, &consumer_discarded,
-                               sizeof(consumer_discarded));
-               if (ret < 0) {
-                       ERR("get discarded events");
-                       pthread_mutex_unlock(socket->lock);
-                       goto end;
-               }
-               pthread_mutex_unlock(socket->lock);
-               *discarded += consumer_discarded;
-       }
-       ret = 0;
-       DBG("Consumer discarded %" PRIu64 " events in session id %" PRIu64,
-                       *discarded, session_id);
-
-end:
-       rcu_read_unlock();
-       return ret;
-}
-
-/*
- * Ask the consumer the number of lost packets for a channel.
- */
-int consumer_get_lost_packets(uint64_t session_id, uint64_t channel_key,
-               struct consumer_output *consumer, uint64_t *lost)
-{
-       int ret;
-       struct consumer_socket *socket;
-       struct lttng_ht_iter iter;
-       struct lttcomm_consumer_msg msg;
-
-       LTTNG_ASSERT(consumer);
-
-       DBG3("Consumer lost packets id %" PRIu64, session_id);
-
-       memset(&msg, 0, sizeof(msg));
-       msg.cmd_type = LTTNG_CONSUMER_LOST_PACKETS;
-       msg.u.lost_packets.session_id = session_id;
-       msg.u.lost_packets.channel_key = channel_key;
-
-       *lost = 0;
-
-       /* Send command for each consumer */
-       rcu_read_lock();
-       cds_lfht_for_each_entry(consumer->socks->ht, &iter.iter, socket,
-                       node.node) {
-               uint64_t consumer_lost = 0;
-               pthread_mutex_lock(socket->lock);
-               ret = consumer_socket_send(socket, &msg, sizeof(msg));
-               if (ret < 0) {
-                       pthread_mutex_unlock(socket->lock);
-                       goto end;
-               }
-
-               /*
-                * No need for a recv reply status because the answer to the
-                * command is the reply status message.
-                */
-               ret = consumer_socket_recv(socket, &consumer_lost,
-                               sizeof(consumer_lost));
-               if (ret < 0) {
-                       ERR("get lost packets");
-                       pthread_mutex_unlock(socket->lock);
-                       goto end;
-               }
-               pthread_mutex_unlock(socket->lock);
-               *lost += consumer_lost;
-       }
-       ret = 0;
-       DBG("Consumer lost %" PRIu64 " packets in session id %" PRIu64,
-                       *lost, session_id);
-
-end:
-       rcu_read_unlock();
-       return ret;
-}
-
-/*
- * Ask the consumer to rotate a channel.
- *
- * The new_chunk_id is the session->rotate_count that has been incremented
- * when the rotation started. On the relay, this allows to keep track in which
- * chunk each stream is currently writing to (for the rotate_pending operation).
- */
-int consumer_rotate_channel(struct consumer_socket *socket, uint64_t key,
-               uid_t uid, gid_t gid, struct consumer_output *output,
-               bool is_metadata_channel)
-{
-       int ret;
-       struct lttcomm_consumer_msg msg;
-
-       LTTNG_ASSERT(socket);
-
-       DBG("Consumer rotate channel key %" PRIu64, key);
-
-       pthread_mutex_lock(socket->lock);
-       memset(&msg, 0, sizeof(msg));
-       msg.cmd_type = LTTNG_CONSUMER_ROTATE_CHANNEL;
-       msg.u.rotate_channel.key = key;
-       msg.u.rotate_channel.metadata = !!is_metadata_channel;
-
-       if (output->type == CONSUMER_DST_NET) {
-               msg.u.rotate_channel.relayd_id = output->net_seq_index;
-       } else {
-               msg.u.rotate_channel.relayd_id = (uint64_t) -1ULL;
-       }
-
-       health_code_update();
-       ret = consumer_send_msg(socket, &msg);
-       if (ret < 0) {
-               switch (-ret) {
-               case LTTCOMM_CONSUMERD_CHAN_NOT_FOUND:
-                       ret = -LTTNG_ERR_CHAN_NOT_FOUND;
-                       break;
-               default:
-                       ret = -LTTNG_ERR_ROTATION_FAIL_CONSUMER;
-                       break;
-               }
-               goto error;
-       }
-error:
-       pthread_mutex_unlock(socket->lock);
-       health_code_update();
-       return ret;
-}
-
-int consumer_open_channel_packets(struct consumer_socket *socket, uint64_t key)
-{
-       int ret;
-       const struct lttcomm_consumer_msg msg = {
-               .cmd_type = LTTNG_CONSUMER_OPEN_CHANNEL_PACKETS,
-               .u.open_channel_packets.key = key,
-       };
-
-       LTTNG_ASSERT(socket);
-
-       DBG("Consumer open channel packets: channel key = %" PRIu64, key);
-
-       health_code_update();
-
-       pthread_mutex_lock(socket->lock);
-       ret = consumer_send_msg(socket, &msg);
-       pthread_mutex_unlock(socket->lock);
-       if (ret < 0) {
-               goto error_socket;
-       }
-
-error_socket:
-       health_code_update();
-       return ret;
-}
-
-int consumer_clear_channel(struct consumer_socket *socket, uint64_t key)
-{
-       int ret;
-       struct lttcomm_consumer_msg msg;
-
-       LTTNG_ASSERT(socket);
-
-       DBG("Consumer clear channel %" PRIu64, key);
-
-       memset(&msg, 0, sizeof(msg));
-       msg.cmd_type = LTTNG_CONSUMER_CLEAR_CHANNEL;
-       msg.u.clear_channel.key = key;
-
-       health_code_update();
-
-       pthread_mutex_lock(socket->lock);
-       ret = consumer_send_msg(socket, &msg);
-       if (ret < 0) {
-               goto error_socket;
-       }
-
-error_socket:
-       pthread_mutex_unlock(socket->lock);
-
-       health_code_update();
-       return ret;
-}
-
-int consumer_init(struct consumer_socket *socket,
-               const lttng_uuid sessiond_uuid)
-{
-       int ret;
-       struct lttcomm_consumer_msg msg = {
-               .cmd_type = LTTNG_CONSUMER_INIT,
-       };
-
-       LTTNG_ASSERT(socket);
-
-       DBG("Sending consumer initialization command");
-       lttng_uuid_copy(msg.u.init.sessiond_uuid, sessiond_uuid);
-
-       health_code_update();
-       ret = consumer_send_msg(socket, &msg);
-       if (ret < 0) {
-               goto error;
-       }
-
-error:
-       health_code_update();
-       return ret;
-}
-
-/*
- * Ask the consumer to create a new chunk for a given session.
- *
- * Called with the consumer socket lock held.
- */
-int consumer_create_trace_chunk(struct consumer_socket *socket,
-               uint64_t relayd_id, uint64_t session_id,
-               struct lttng_trace_chunk *chunk,
-               const char *domain_subdir)
-{
-       int ret;
-       enum lttng_trace_chunk_status chunk_status;
-       struct lttng_credentials chunk_credentials;
-       const struct lttng_directory_handle *chunk_directory_handle = NULL;
-       struct lttng_directory_handle *domain_handle = NULL;
-       int domain_dirfd;
-       const char *chunk_name;
-       bool chunk_name_overridden;
-       uint64_t chunk_id;
-       time_t creation_timestamp;
-       char creation_timestamp_buffer[ISO8601_STR_LEN];
-       const char *creation_timestamp_str = "(none)";
-       const bool chunk_has_local_output = relayd_id == -1ULL;
-       enum lttng_trace_chunk_status tc_status;
-       struct lttcomm_consumer_msg msg = {
-               .cmd_type = LTTNG_CONSUMER_CREATE_TRACE_CHUNK,
-               .u.create_trace_chunk.session_id = session_id,
-       };
-
-       LTTNG_ASSERT(socket);
-       LTTNG_ASSERT(chunk);
-
-       if (relayd_id != -1ULL) {
-               LTTNG_OPTIONAL_SET(&msg.u.create_trace_chunk.relayd_id,
-                               relayd_id);
-       }
-
-       chunk_status = lttng_trace_chunk_get_name(chunk, &chunk_name,
-                       &chunk_name_overridden);
-       if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK &&
-                       chunk_status != LTTNG_TRACE_CHUNK_STATUS_NONE) {
-               ERR("Failed to get name of trace chunk");
-               ret = -LTTNG_ERR_FATAL;
-               goto error;
-       }
-       if (chunk_name_overridden) {
-               ret = lttng_strncpy(msg.u.create_trace_chunk.override_name,
-                               chunk_name,
-                               sizeof(msg.u.create_trace_chunk.override_name));
-               if (ret) {
-                       ERR("Trace chunk name \"%s\" exceeds the maximal length allowed by the consumer protocol",
-                                       chunk_name);
-                       ret = -LTTNG_ERR_FATAL;
-                       goto error;
-               }
-       }
-
-       chunk_status = lttng_trace_chunk_get_creation_timestamp(chunk,
-                       &creation_timestamp);
-       if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) {
-               ret = -LTTNG_ERR_FATAL;
-               goto error;
-       }
-       msg.u.create_trace_chunk.creation_timestamp =
-                       (uint64_t) creation_timestamp;
-       /* Only used for logging purposes. */
-       ret = time_to_iso8601_str(creation_timestamp,
-                       creation_timestamp_buffer,
-                       sizeof(creation_timestamp_buffer));
-       creation_timestamp_str = !ret ? creation_timestamp_buffer :
-                       "(formatting error)";
-
-       chunk_status = lttng_trace_chunk_get_id(chunk, &chunk_id);
-       if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) {
-               /*
-                * Anonymous trace chunks should never be transmitted
-                * to remote peers (consumerd and relayd). They are used
-                * internally for backward-compatibility purposes.
-                */
-               ret = -LTTNG_ERR_FATAL;
-               goto error;
-       }
-       msg.u.create_trace_chunk.chunk_id = chunk_id;
-
-       if (chunk_has_local_output) {
-               chunk_status = lttng_trace_chunk_borrow_chunk_directory_handle(
-                               chunk, &chunk_directory_handle);
-               if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) {
-                       ret = -LTTNG_ERR_FATAL;
-                       goto error;
-               }
-               chunk_status = lttng_trace_chunk_get_credentials(
-                               chunk, &chunk_credentials);
-               if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) {
-                       /*
-                        * Not associating credentials to a sessiond chunk is a
-                        * fatal internal error.
-                        */
-                       ret = -LTTNG_ERR_FATAL;
-                       goto error;
-               }
-               tc_status = lttng_trace_chunk_create_subdirectory(
-                               chunk, domain_subdir);
-               if (tc_status != LTTNG_TRACE_CHUNK_STATUS_OK) {
-                       PERROR("Failed to create chunk domain output directory \"%s\"",
-                               domain_subdir);
-                       ret = -LTTNG_ERR_FATAL;
-                       goto error;
-               }
-               domain_handle = lttng_directory_handle_create_from_handle(
-                               domain_subdir,
-                               chunk_directory_handle);
-               if (!domain_handle) {
-                       ret = -LTTNG_ERR_FATAL;
-                       goto error;
-               }
-
-               /*
-                * This will only compile on platforms that support
-                * dirfd (POSIX.2008). This is fine as the session daemon
-                * is only built for such platforms.
-                *
-                * The ownership of the chunk directory handle's is maintained
-                * by the trace chunk.
-                */
-               domain_dirfd = lttng_directory_handle_get_dirfd(
-                               domain_handle);
-               LTTNG_ASSERT(domain_dirfd >= 0);
-
-               msg.u.create_trace_chunk.credentials.value.uid =
-                               lttng_credentials_get_uid(&chunk_credentials);
-               msg.u.create_trace_chunk.credentials.value.gid =
-                               lttng_credentials_get_gid(&chunk_credentials);
-               msg.u.create_trace_chunk.credentials.is_set = 1;
-       }
-
-       DBG("Sending consumer create trace chunk command: relayd_id = %" PRId64
-                       ", session_id = %" PRIu64 ", chunk_id = %" PRIu64
-                       ", creation_timestamp = %s",
-                       relayd_id, session_id, chunk_id,
-                       creation_timestamp_str);
-       health_code_update();
-       ret = consumer_send_msg(socket, &msg);
-       health_code_update();
-       if (ret < 0) {
-               ERR("Trace chunk creation error on consumer");
-               ret = -LTTNG_ERR_CREATE_TRACE_CHUNK_FAIL_CONSUMER;
-               goto error;
-       }
-
-       if (chunk_has_local_output) {
-               DBG("Sending trace chunk domain directory fd to consumer");
-               health_code_update();
-               ret = consumer_send_fds(socket, &domain_dirfd, 1);
-               health_code_update();
-               if (ret < 0) {
-                       ERR("Trace chunk creation error on consumer");
-                       ret = -LTTNG_ERR_CREATE_TRACE_CHUNK_FAIL_CONSUMER;
-                       goto error;
-               }
-       }
-error:
-       lttng_directory_handle_put(domain_handle);
-       return ret;
-}
-
-/*
- * Ask the consumer to close a trace chunk for a given session.
- *
- * Called with the consumer socket lock held.
- */
-int consumer_close_trace_chunk(struct consumer_socket *socket,
-               uint64_t relayd_id, uint64_t session_id,
-               struct lttng_trace_chunk *chunk,
-               char *closed_trace_chunk_path)
-{
-       int ret;
-       enum lttng_trace_chunk_status chunk_status;
-       struct lttcomm_consumer_msg msg = {
-                       .cmd_type = LTTNG_CONSUMER_CLOSE_TRACE_CHUNK,
-                       .u.close_trace_chunk.session_id = session_id,
-       };
-       struct lttcomm_consumer_close_trace_chunk_reply reply;
-       uint64_t chunk_id;
-       time_t close_timestamp;
-       enum lttng_trace_chunk_command_type close_command;
-       const char *close_command_name = "none";
-       struct lttng_dynamic_buffer path_reception_buffer;
-
-       LTTNG_ASSERT(socket);
-       lttng_dynamic_buffer_init(&path_reception_buffer);
-
-       if (relayd_id != -1ULL) {
-               LTTNG_OPTIONAL_SET(
-                               &msg.u.close_trace_chunk.relayd_id, relayd_id);
-       }
-
-       chunk_status = lttng_trace_chunk_get_close_command(
-                       chunk, &close_command);
-       switch (chunk_status) {
-       case LTTNG_TRACE_CHUNK_STATUS_OK:
-               LTTNG_OPTIONAL_SET(&msg.u.close_trace_chunk.close_command,
-                               (uint32_t) close_command);
-               break;
-       case LTTNG_TRACE_CHUNK_STATUS_NONE:
-               break;
-       default:
-               ERR("Failed to get trace chunk close command");
-               ret = -1;
-               goto error;
-       }
-
-       chunk_status = lttng_trace_chunk_get_id(chunk, &chunk_id);
-       /*
-        * Anonymous trace chunks should never be transmitted to remote peers
-        * (consumerd and relayd). They are used internally for
-        * backward-compatibility purposes.
-        */
-       LTTNG_ASSERT(chunk_status == LTTNG_TRACE_CHUNK_STATUS_OK);
-       msg.u.close_trace_chunk.chunk_id = chunk_id;
-
-       chunk_status = lttng_trace_chunk_get_close_timestamp(chunk,
-                       &close_timestamp);
-       /*
-        * A trace chunk should be closed locally before being closed remotely.
-        * Otherwise, the close timestamp would never be transmitted to the
-        * peers.
-        */
-       LTTNG_ASSERT(chunk_status == LTTNG_TRACE_CHUNK_STATUS_OK);
-       msg.u.close_trace_chunk.close_timestamp = (uint64_t) close_timestamp;
-
-       if (msg.u.close_trace_chunk.close_command.is_set) {
-               close_command_name = lttng_trace_chunk_command_type_get_name(
-                               close_command);
-       }
-       DBG("Sending consumer close trace chunk command: relayd_id = %" PRId64
-                       ", session_id = %" PRIu64 ", chunk_id = %" PRIu64
-                       ", close command = \"%s\"",
-                       relayd_id, session_id, chunk_id, close_command_name);
-
-       health_code_update();
-       ret = consumer_socket_send(socket, &msg, sizeof(struct lttcomm_consumer_msg));
-       if (ret < 0) {
-               ret = -LTTNG_ERR_CLOSE_TRACE_CHUNK_FAIL_CONSUMER;
-               goto error;
-       }
-       ret = consumer_socket_recv(socket, &reply, sizeof(reply));
-       if (ret < 0) {
-               ret = -LTTNG_ERR_CLOSE_TRACE_CHUNK_FAIL_CONSUMER;
-               goto error;
-       }
-       if (reply.path_length >= LTTNG_PATH_MAX) {
-               ERR("Invalid path returned by relay daemon: %" PRIu32 "bytes exceeds maximal allowed length of %d bytes",
-                               reply.path_length, LTTNG_PATH_MAX);
-               ret = -LTTNG_ERR_INVALID_PROTOCOL;
-               goto error;
-       }
-       ret = lttng_dynamic_buffer_set_size(&path_reception_buffer,
-                       reply.path_length);
-       if (ret) {
-               ERR("Failed to allocate reception buffer of path returned by the \"close trace chunk\" command");
-               ret = -LTTNG_ERR_NOMEM;
-               goto error;
-       }
-       ret = consumer_socket_recv(socket, path_reception_buffer.data,
-                       path_reception_buffer.size);
-       if (ret < 0) {
-               ERR("Communication error while receiving path of closed trace chunk");
-               ret = -LTTNG_ERR_CLOSE_TRACE_CHUNK_FAIL_CONSUMER;
-               goto error;
-       }
-       if (path_reception_buffer.data[path_reception_buffer.size - 1] != '\0') {
-               ERR("Invalid path returned by relay daemon: not null-terminated");
-               ret = -LTTNG_ERR_INVALID_PROTOCOL;
-               goto error;
-       }
-       if (closed_trace_chunk_path) {
-               /*
-                * closed_trace_chunk_path is assumed to have a length >=
-                * LTTNG_PATH_MAX
-                */
-               memcpy(closed_trace_chunk_path, path_reception_buffer.data,
-                               path_reception_buffer.size);
-       }
-error:
-       lttng_dynamic_buffer_reset(&path_reception_buffer);
-       health_code_update();
-       return ret;
-}
-
-/*
- * Ask the consumer if a trace chunk exists.
- *
- * Called with the consumer socket lock held.
- * Returns 0 on success, or a negative value on error.
- */
-int consumer_trace_chunk_exists(struct consumer_socket *socket,
-               uint64_t relayd_id, uint64_t session_id,
-               struct lttng_trace_chunk *chunk,
-               enum consumer_trace_chunk_exists_status *result)
-{
-       int ret;
-       enum lttng_trace_chunk_status chunk_status;
-       struct lttcomm_consumer_msg msg = {
-               .cmd_type = LTTNG_CONSUMER_TRACE_CHUNK_EXISTS,
-               .u.trace_chunk_exists.session_id = session_id,
-       };
-       uint64_t chunk_id;
-       const char *consumer_reply_str;
-
-       LTTNG_ASSERT(socket);
-
-       if (relayd_id != -1ULL) {
-               LTTNG_OPTIONAL_SET(&msg.u.trace_chunk_exists.relayd_id,
-                               relayd_id);
-       }
-
-       chunk_status = lttng_trace_chunk_get_id(chunk, &chunk_id);
-       if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) {
-               /*
-                * Anonymous trace chunks should never be transmitted
-                * to remote peers (consumerd and relayd). They are used
-                * internally for backward-compatibility purposes.
-                */
-               ret = -LTTNG_ERR_FATAL;
-               goto error;
-       }
-       msg.u.trace_chunk_exists.chunk_id = chunk_id;
-
-       DBG("Sending consumer trace chunk exists command: relayd_id = %" PRId64
-                       ", session_id = %" PRIu64
-                       ", chunk_id = %" PRIu64, relayd_id, session_id, chunk_id);
-
-       health_code_update();
-       ret = consumer_send_msg(socket, &msg);
-       switch (-ret) {
-       case LTTCOMM_CONSUMERD_UNKNOWN_TRACE_CHUNK:
-               consumer_reply_str = "unknown trace chunk";
-               *result = CONSUMER_TRACE_CHUNK_EXISTS_STATUS_UNKNOWN_CHUNK;
-               break;
-       case LTTCOMM_CONSUMERD_TRACE_CHUNK_EXISTS_LOCAL:
-               consumer_reply_str = "trace chunk exists locally";
-               *result = CONSUMER_TRACE_CHUNK_EXISTS_STATUS_EXISTS_LOCAL;
-               break;
-       case LTTCOMM_CONSUMERD_TRACE_CHUNK_EXISTS_REMOTE:
-               consumer_reply_str = "trace chunk exists on remote peer";
-               *result = CONSUMER_TRACE_CHUNK_EXISTS_STATUS_EXISTS_REMOTE;
-               break;
-       default:
-               ERR("Consumer returned an error from TRACE_CHUNK_EXISTS command");
-               ret = -1;
-               goto error;
-       }
-       DBG("Consumer reply to TRACE_CHUNK_EXISTS command: %s",
-                       consumer_reply_str);
-       ret = 0;
-error:
-       health_code_update();
-       return ret;
-}
diff --git a/src/bin/lttng-sessiond/consumer.cpp b/src/bin/lttng-sessiond/consumer.cpp
new file mode 100644 (file)
index 0000000..f7a92cb
--- /dev/null
@@ -0,0 +1,2165 @@
+/*
+ * Copyright (C) 2012 David Goulet <dgoulet@efficios.com>
+ * Copyright (C) 2018 Jérémie Galarneau <jeremie.galarneau@efficios.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0-only
+ *
+ */
+
+#define _LGPL_SOURCE
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <unistd.h>
+#include <inttypes.h>
+
+#include <common/common.h>
+#include <common/defaults.h>
+#include <common/uri.h>
+#include <common/relayd/relayd.h>
+#include <common/string-utils/format.h>
+
+#include "consumer.h"
+#include "health-sessiond.h"
+#include "ust-app.h"
+#include "utils.h"
+#include "lttng-sessiond.h"
+
+/*
+ * Return allocated full pathname of the session using the consumer trace path
+ * and subdir if available.
+ *
+ * The caller can safely free(3) the returned value. On error, NULL is
+ * returned.
+ */
+char *setup_channel_trace_path(struct consumer_output *consumer,
+               const char *session_path, size_t *consumer_path_offset)
+{
+       int ret;
+       char *pathname;
+
+       LTTNG_ASSERT(consumer);
+       LTTNG_ASSERT(session_path);
+
+       health_code_update();
+
+       /*
+        * Allocate the string ourself to make sure we never exceed
+        * LTTNG_PATH_MAX.
+        */
+       pathname = (char *) zmalloc(LTTNG_PATH_MAX);
+       if (!pathname) {
+               goto error;
+       }
+
+       /* Get correct path name destination */
+       if (consumer->type == CONSUMER_DST_NET &&
+                       consumer->relay_major_version == 2 &&
+                       consumer->relay_minor_version < 11) {
+               ret = snprintf(pathname, LTTNG_PATH_MAX, "%s%s/%s/%s",
+                               consumer->dst.net.base_dir,
+                               consumer->chunk_path, consumer->domain_subdir,
+                               session_path);
+               *consumer_path_offset = 0;
+       } else {
+               ret = snprintf(pathname, LTTNG_PATH_MAX, "%s/%s",
+                               consumer->domain_subdir, session_path);
+               *consumer_path_offset = strlen(consumer->domain_subdir) + 1;
+       }
+       DBG3("Consumer trace path relative to current trace chunk: \"%s\"",
+                       pathname);
+       if (ret < 0) {
+               PERROR("Failed to format channel path");
+               goto error;
+       } else if (ret >= LTTNG_PATH_MAX) {
+               ERR("Truncation occurred while formatting channel path");
+               goto error;
+       }
+
+       return pathname;
+error:
+       free(pathname);
+       return NULL;
+}
+
+/*
+ * Send a data payload using a given consumer socket of size len.
+ *
+ * The consumer socket lock MUST be acquired before calling this since this
+ * function can change the fd value.
+ *
+ * Return 0 on success else a negative value on error.
+ */
+int consumer_socket_send(
+               struct consumer_socket *socket, const void *msg, size_t len)
+{
+       int fd;
+       ssize_t size;
+
+       LTTNG_ASSERT(socket);
+       LTTNG_ASSERT(socket->fd_ptr);
+       LTTNG_ASSERT(msg);
+
+       /* Consumer socket is invalid. Stopping. */
+       fd = *socket->fd_ptr;
+       if (fd < 0) {
+               goto error;
+       }
+
+       size = lttcomm_send_unix_sock(fd, msg, len);
+       if (size < 0) {
+               /* The above call will print a PERROR on error. */
+               DBG("Error when sending data to consumer on sock %d", fd);
+               /*
+                * At this point, the socket is not usable anymore thus closing it and
+                * setting the file descriptor to -1 so it is not reused.
+                */
+
+               /* This call will PERROR on error. */
+               (void) lttcomm_close_unix_sock(fd);
+               *socket->fd_ptr = -1;
+               goto error;
+       }
+
+       return 0;
+
+error:
+       return -1;
+}
+
+/*
+ * Receive a data payload using a given consumer socket of size len.
+ *
+ * The consumer socket lock MUST be acquired before calling this since this
+ * function can change the fd value.
+ *
+ * Return 0 on success else a negative value on error.
+ */
+int consumer_socket_recv(struct consumer_socket *socket, void *msg, size_t len)
+{
+       int fd;
+       ssize_t size;
+
+       LTTNG_ASSERT(socket);
+       LTTNG_ASSERT(socket->fd_ptr);
+       LTTNG_ASSERT(msg);
+
+       /* Consumer socket is invalid. Stopping. */
+       fd = *socket->fd_ptr;
+       if (fd < 0) {
+               goto error;
+       }
+
+       size = lttcomm_recv_unix_sock(fd, msg, len);
+       if (size <= 0) {
+               /* The above call will print a PERROR on error. */
+               DBG("Error when receiving data from the consumer socket %d", fd);
+               /*
+                * At this point, the socket is not usable anymore thus closing it and
+                * setting the file descriptor to -1 so it is not reused.
+                */
+
+               /* This call will PERROR on error. */
+               (void) lttcomm_close_unix_sock(fd);
+               *socket->fd_ptr = -1;
+               goto error;
+       }
+
+       return 0;
+
+error:
+       return -1;
+}
+
+/*
+ * Receive a reply command status message from the consumer. Consumer socket
+ * lock MUST be acquired before calling this function.
+ *
+ * Return 0 on success, -1 on recv error or a negative lttng error code which
+ * was possibly returned by the consumer.
+ */
+int consumer_recv_status_reply(struct consumer_socket *sock)
+{
+       int ret;
+       struct lttcomm_consumer_status_msg reply;
+
+       LTTNG_ASSERT(sock);
+
+       ret = consumer_socket_recv(sock, &reply, sizeof(reply));
+       if (ret < 0) {
+               goto end;
+       }
+
+       if (reply.ret_code == LTTCOMM_CONSUMERD_SUCCESS) {
+               /* All good. */
+               ret = 0;
+       } else {
+               ret = -reply.ret_code;
+               DBG("Consumer ret code %d", ret);
+       }
+
+end:
+       return ret;
+}
+
+/*
+ * Once the ASK_CHANNEL command is sent to the consumer, the channel
+ * information are sent back. This call receives that data and populates key
+ * and stream_count.
+ *
+ * On success return 0 and both key and stream_count are set. On error, a
+ * negative value is sent back and both parameters are untouched.
+ */
+int consumer_recv_status_channel(struct consumer_socket *sock,
+               uint64_t *key, unsigned int *stream_count)
+{
+       int ret;
+       struct lttcomm_consumer_status_channel reply;
+
+       LTTNG_ASSERT(sock);
+       LTTNG_ASSERT(stream_count);
+       LTTNG_ASSERT(key);
+
+       ret = consumer_socket_recv(sock, &reply, sizeof(reply));
+       if (ret < 0) {
+               goto end;
+       }
+
+       /* An error is possible so don't touch the key and stream_count. */
+       if (reply.ret_code != LTTCOMM_CONSUMERD_SUCCESS) {
+               ret = -1;
+               goto end;
+       }
+
+       *key = reply.key;
+       *stream_count = reply.stream_count;
+       ret = 0;
+
+end:
+       return ret;
+}
+
+/*
+ * Send destroy relayd command to consumer.
+ *
+ * On success return positive value. On error, negative value.
+ */
+int consumer_send_destroy_relayd(struct consumer_socket *sock,
+               struct consumer_output *consumer)
+{
+       int ret;
+       struct lttcomm_consumer_msg msg;
+
+       LTTNG_ASSERT(consumer);
+       LTTNG_ASSERT(sock);
+
+       DBG2("Sending destroy relayd command to consumer sock %d", *sock->fd_ptr);
+
+       memset(&msg, 0, sizeof(msg));
+       msg.cmd_type = LTTNG_CONSUMER_DESTROY_RELAYD;
+       msg.u.destroy_relayd.net_seq_idx = consumer->net_seq_index;
+
+       pthread_mutex_lock(sock->lock);
+       ret = consumer_socket_send(sock, &msg, sizeof(msg));
+       if (ret < 0) {
+               goto error;
+       }
+
+       /* Don't check the return value. The caller will do it. */
+       ret = consumer_recv_status_reply(sock);
+
+       DBG2("Consumer send destroy relayd command done");
+
+error:
+       pthread_mutex_unlock(sock->lock);
+       return ret;
+}
+
+/*
+ * For each consumer socket in the consumer output object, send a destroy
+ * relayd command.
+ */
+void consumer_output_send_destroy_relayd(struct consumer_output *consumer)
+{
+       struct lttng_ht_iter iter;
+       struct consumer_socket *socket;
+
+       LTTNG_ASSERT(consumer);
+
+       /* Destroy any relayd connection */
+       if (consumer->type == CONSUMER_DST_NET) {
+               rcu_read_lock();
+               cds_lfht_for_each_entry(consumer->socks->ht, &iter.iter, socket,
+                               node.node) {
+                       int ret;
+
+                       /* Send destroy relayd command */
+                       ret = consumer_send_destroy_relayd(socket, consumer);
+                       if (ret < 0) {
+                               DBG("Unable to send destroy relayd command to consumer");
+                               /* Continue since we MUST delete everything at this point. */
+                       }
+               }
+               rcu_read_unlock();
+       }
+}
+
+/*
+ * From a consumer_data structure, allocate and add a consumer socket to the
+ * consumer output.
+ *
+ * Return 0 on success, else negative value on error
+ */
+int consumer_create_socket(struct consumer_data *data,
+               struct consumer_output *output)
+{
+       int ret = 0;
+       struct consumer_socket *socket;
+
+       LTTNG_ASSERT(data);
+
+       if (output == NULL || data->cmd_sock < 0) {
+               /*
+                * Not an error. Possible there is simply not spawned consumer or it's
+                * disabled for the tracing session asking the socket.
+                */
+               goto error;
+       }
+
+       rcu_read_lock();
+       socket = consumer_find_socket(data->cmd_sock, output);
+       rcu_read_unlock();
+       if (socket == NULL) {
+               socket = consumer_allocate_socket(&data->cmd_sock);
+               if (socket == NULL) {
+                       ret = -1;
+                       goto error;
+               }
+
+               socket->registered = 0;
+               socket->lock = &data->lock;
+               rcu_read_lock();
+               consumer_add_socket(socket, output);
+               rcu_read_unlock();
+       }
+
+       socket->type = data->type;
+
+       DBG3("Consumer socket created (fd: %d) and added to output",
+                       data->cmd_sock);
+
+error:
+       return ret;
+}
+
+/*
+ * Return the consumer socket from the given consumer output with the right
+ * bitness. On error, returns NULL.
+ *
+ * The caller MUST acquire a rcu read side lock and keep it until the socket
+ * object reference is not needed anymore.
+ */
+struct consumer_socket *consumer_find_socket_by_bitness(int bits,
+               const struct consumer_output *consumer)
+{
+       int consumer_fd;
+       struct consumer_socket *socket = NULL;
+
+       switch (bits) {
+       case 64:
+               consumer_fd = uatomic_read(&the_ust_consumerd64_fd);
+               break;
+       case 32:
+               consumer_fd = uatomic_read(&the_ust_consumerd32_fd);
+               break;
+       default:
+               abort();
+               goto end;
+       }
+
+       socket = consumer_find_socket(consumer_fd, consumer);
+       if (!socket) {
+               ERR("Consumer socket fd %d not found in consumer obj %p",
+                               consumer_fd, consumer);
+       }
+
+end:
+       return socket;
+}
+
+/*
+ * Find a consumer_socket in a consumer_output hashtable. Read side lock must
+ * be acquired before calling this function and across use of the
+ * returned consumer_socket.
+ */
+struct consumer_socket *consumer_find_socket(int key,
+               const struct consumer_output *consumer)
+{
+       struct lttng_ht_iter iter;
+       struct lttng_ht_node_ulong *node;
+       struct consumer_socket *socket = NULL;
+
+       /* Negative keys are lookup failures */
+       if (key < 0 || consumer == NULL) {
+               return NULL;
+       }
+
+       lttng_ht_lookup(consumer->socks, (void *)((unsigned long) key),
+                       &iter);
+       node = lttng_ht_iter_get_node_ulong(&iter);
+       if (node != NULL) {
+               socket = caa_container_of(node, struct consumer_socket, node);
+       }
+
+       return socket;
+}
+
+/*
+ * Allocate a new consumer_socket and return the pointer.
+ */
+struct consumer_socket *consumer_allocate_socket(int *fd)
+{
+       struct consumer_socket *socket = NULL;
+
+       LTTNG_ASSERT(fd);
+
+       socket = (consumer_socket *) zmalloc(sizeof(struct consumer_socket));
+       if (socket == NULL) {
+               PERROR("zmalloc consumer socket");
+               goto error;
+       }
+
+       socket->fd_ptr = fd;
+       lttng_ht_node_init_ulong(&socket->node, *fd);
+
+error:
+       return socket;
+}
+
+/*
+ * Add consumer socket to consumer output object. Read side lock must be
+ * acquired before calling this function.
+ */
+void consumer_add_socket(struct consumer_socket *sock,
+               struct consumer_output *consumer)
+{
+       LTTNG_ASSERT(sock);
+       LTTNG_ASSERT(consumer);
+
+       lttng_ht_add_unique_ulong(consumer->socks, &sock->node);
+}
+
+/*
+ * Delete consumer socket to consumer output object. Read side lock must be
+ * acquired before calling this function.
+ */
+void consumer_del_socket(struct consumer_socket *sock,
+               struct consumer_output *consumer)
+{
+       int ret;
+       struct lttng_ht_iter iter;
+
+       LTTNG_ASSERT(sock);
+       LTTNG_ASSERT(consumer);
+
+       iter.iter.node = &sock->node.node;
+       ret = lttng_ht_del(consumer->socks, &iter);
+       LTTNG_ASSERT(!ret);
+}
+
+/*
+ * RCU destroy call function.
+ */
+static void destroy_socket_rcu(struct rcu_head *head)
+{
+       struct lttng_ht_node_ulong *node =
+               caa_container_of(head, struct lttng_ht_node_ulong, head);
+       struct consumer_socket *socket =
+               caa_container_of(node, struct consumer_socket, node);
+
+       free(socket);
+}
+
+/*
+ * Destroy and free socket pointer in a call RCU. Read side lock must be
+ * acquired before calling this function.
+ */
+void consumer_destroy_socket(struct consumer_socket *sock)
+{
+       LTTNG_ASSERT(sock);
+
+       /*
+        * We DO NOT close the file descriptor here since it is global to the
+        * session daemon and is closed only if the consumer dies or a custom
+        * consumer was registered,
+        */
+       if (sock->registered) {
+               DBG3("Consumer socket was registered. Closing fd %d", *sock->fd_ptr);
+               lttcomm_close_unix_sock(*sock->fd_ptr);
+       }
+
+       call_rcu(&sock->node.head, destroy_socket_rcu);
+}
+
+/*
+ * Allocate and assign data to a consumer_output object.
+ *
+ * Return pointer to structure.
+ */
+struct consumer_output *consumer_create_output(enum consumer_dst_type type)
+{
+       struct consumer_output *output = NULL;
+
+       output = (consumer_output *) zmalloc(sizeof(struct consumer_output));
+       if (output == NULL) {
+               PERROR("zmalloc consumer_output");
+               goto error;
+       }
+
+       /* By default, consumer output is enabled */
+       output->enabled = 1;
+       output->type = type;
+       output->net_seq_index = (uint64_t) -1ULL;
+       urcu_ref_init(&output->ref);
+
+       output->socks = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
+
+error:
+       return output;
+}
+
+/*
+ * Iterate over the consumer output socket hash table and destroy them. The
+ * socket file descriptor are only closed if the consumer output was
+ * registered meaning it's an external consumer.
+ */
+void consumer_destroy_output_sockets(struct consumer_output *obj)
+{
+       struct lttng_ht_iter iter;
+       struct consumer_socket *socket;
+
+       if (!obj->socks) {
+               return;
+       }
+
+       rcu_read_lock();
+       cds_lfht_for_each_entry(obj->socks->ht, &iter.iter, socket, node.node) {
+               consumer_del_socket(socket, obj);
+               consumer_destroy_socket(socket);
+       }
+       rcu_read_unlock();
+}
+
+/*
+ * Delete the consumer_output object from the list and free the ptr.
+ *
+ * Should *NOT* be called with RCU read-side lock held.
+ */
+static void consumer_release_output(struct urcu_ref *ref)
+{
+       struct consumer_output *obj =
+               caa_container_of(ref, struct consumer_output, ref);
+
+       consumer_destroy_output_sockets(obj);
+
+       if (obj->socks) {
+               /* Finally destroy HT */
+               ht_cleanup_push(obj->socks);
+       }
+
+       free(obj);
+}
+
+/*
+ * Get the consumer_output object.
+ */
+void consumer_output_get(struct consumer_output *obj)
+{
+       urcu_ref_get(&obj->ref);
+}
+
+/*
+ * Put the consumer_output object.
+ *
+ * Should *NOT* be called with RCU read-side lock held.
+ */
+void consumer_output_put(struct consumer_output *obj)
+{
+       if (!obj) {
+               return;
+       }
+       urcu_ref_put(&obj->ref, consumer_release_output);
+}
+
+/*
+ * Copy consumer output and returned the newly allocated copy.
+ *
+ * Should *NOT* be called with RCU read-side lock held.
+ */
+struct consumer_output *consumer_copy_output(struct consumer_output *src)
+{
+       int ret;
+       struct consumer_output *output;
+
+       LTTNG_ASSERT(src);
+
+       output = consumer_create_output(src->type);
+       if (output == NULL) {
+               goto end;
+       }
+       output->enabled = src->enabled;
+       output->net_seq_index = src->net_seq_index;
+       memcpy(output->domain_subdir, src->domain_subdir,
+                       sizeof(output->domain_subdir));
+       output->snapshot = src->snapshot;
+       output->relay_major_version = src->relay_major_version;
+       output->relay_minor_version = src->relay_minor_version;
+       output->relay_allows_clear = src->relay_allows_clear;
+       memcpy(&output->dst, &src->dst, sizeof(output->dst));
+       ret = consumer_copy_sockets(output, src);
+       if (ret < 0) {
+               goto error_put;
+       }
+end:
+       return output;
+
+error_put:
+       consumer_output_put(output);
+       return NULL;
+}
+
+/*
+ * Copy consumer sockets from src to dst.
+ *
+ * Return 0 on success or else a negative value.
+ */
+int consumer_copy_sockets(struct consumer_output *dst,
+               struct consumer_output *src)
+{
+       int ret = 0;
+       struct lttng_ht_iter iter;
+       struct consumer_socket *socket, *copy_sock;
+
+       LTTNG_ASSERT(dst);
+       LTTNG_ASSERT(src);
+
+       rcu_read_lock();
+       cds_lfht_for_each_entry(src->socks->ht, &iter.iter, socket, node.node) {
+               /* Ignore socket that are already there. */
+               copy_sock = consumer_find_socket(*socket->fd_ptr, dst);
+               if (copy_sock) {
+                       continue;
+               }
+
+               /* Create new socket object. */
+               copy_sock = consumer_allocate_socket(socket->fd_ptr);
+               if (copy_sock == NULL) {
+                       rcu_read_unlock();
+                       ret = -ENOMEM;
+                       goto error;
+               }
+
+               copy_sock->registered = socket->registered;
+               /*
+                * This is valid because this lock is shared accross all consumer
+                * object being the global lock of the consumer data structure of the
+                * session daemon.
+                */
+               copy_sock->lock = socket->lock;
+               consumer_add_socket(copy_sock, dst);
+       }
+       rcu_read_unlock();
+
+error:
+       return ret;
+}
+
+/*
+ * Set network URI to the consumer output.
+ *
+ * Return 0 on success. Return 1 if the URI were equal. Else, negative value on
+ * error.
+ */
+int consumer_set_network_uri(const struct ltt_session *session,
+               struct consumer_output *output,
+               struct lttng_uri *uri)
+{
+       int ret;
+       struct lttng_uri *dst_uri = NULL;
+
+       /* Code flow error safety net. */
+       LTTNG_ASSERT(output);
+       LTTNG_ASSERT(uri);
+
+       switch (uri->stype) {
+       case LTTNG_STREAM_CONTROL:
+               dst_uri = &output->dst.net.control;
+               output->dst.net.control_isset = 1;
+               if (uri->port == 0) {
+                       /* Assign default port. */
+                       uri->port = DEFAULT_NETWORK_CONTROL_PORT;
+               } else {
+                       if (output->dst.net.data_isset && uri->port ==
+                                       output->dst.net.data.port) {
+                               ret = -LTTNG_ERR_INVALID;
+                               goto error;
+                       }
+               }
+               DBG3("Consumer control URI set with port %d", uri->port);
+               break;
+       case LTTNG_STREAM_DATA:
+               dst_uri = &output->dst.net.data;
+               output->dst.net.data_isset = 1;
+               if (uri->port == 0) {
+                       /* Assign default port. */
+                       uri->port = DEFAULT_NETWORK_DATA_PORT;
+               } else {
+                       if (output->dst.net.control_isset && uri->port ==
+                                       output->dst.net.control.port) {
+                               ret = -LTTNG_ERR_INVALID;
+                               goto error;
+                       }
+               }
+               DBG3("Consumer data URI set with port %d", uri->port);
+               break;
+       default:
+               ERR("Set network uri type unknown %d", uri->stype);
+               ret = -LTTNG_ERR_INVALID;
+               goto error;
+       }
+
+       ret = uri_compare(dst_uri, uri);
+       if (!ret) {
+               /* Same URI, don't touch it and return success. */
+               DBG3("URI network compare are the same");
+               goto equal;
+       }
+
+       /* URIs were not equal, replacing it. */
+       memcpy(dst_uri, uri, sizeof(struct lttng_uri));
+       output->type = CONSUMER_DST_NET;
+       if (dst_uri->stype != LTTNG_STREAM_CONTROL) {
+               /* Only the control uri needs to contain the path. */
+               goto end;
+       }
+
+       /*
+        * If the user has specified a subdir as part of the control
+        * URL, the session's base output directory is:
+        *   /RELAYD_OUTPUT_PATH/HOSTNAME/USER_SPECIFIED_DIR
+        *
+        * Hence, the "base_dir" from which all stream files and
+        * session rotation chunks are created takes the form
+        *   /HOSTNAME/USER_SPECIFIED_DIR
+        *
+        * If the user has not specified an output directory as part of
+        * the control URL, the base output directory has the form:
+        *   /RELAYD_OUTPUT_PATH/HOSTNAME/SESSION_NAME-CREATION_TIME
+        *
+        * Hence, the "base_dir" from which all stream files and
+        * session rotation chunks are created takes the form
+        *   /HOSTNAME/SESSION_NAME-CREATION_TIME
+        *
+        * Note that automatically generated session names already
+        * contain the session's creation time. In that case, the
+        * creation time is omitted to prevent it from being duplicated
+        * in the final directory hierarchy.
+        */
+       if (*uri->subdir) {
+               if (strstr(uri->subdir, "../")) {
+                       ERR("Network URI subdirs are not allowed to walk up the path hierarchy");
+                       ret = -LTTNG_ERR_INVALID;
+                       goto error;
+               }
+               ret = snprintf(output->dst.net.base_dir,
+                               sizeof(output->dst.net.base_dir),
+                               "/%s/%s/", session->hostname, uri->subdir);
+       } else {
+               if (session->has_auto_generated_name) {
+                       ret = snprintf(output->dst.net.base_dir,
+                                       sizeof(output->dst.net.base_dir),
+                                       "/%s/%s/", session->hostname,
+                                       session->name);
+               } else {
+                       char session_creation_datetime[16];
+                       size_t strftime_ret;
+                       struct tm *timeinfo;
+
+                       timeinfo = localtime(&session->creation_time);
+                       if (!timeinfo) {
+                               ret = -LTTNG_ERR_FATAL;
+                               goto error;
+                       }
+                       strftime_ret = strftime(session_creation_datetime,
+                                       sizeof(session_creation_datetime),
+                                       "%Y%m%d-%H%M%S", timeinfo);
+                       if (strftime_ret == 0) {
+                               ERR("Failed to format session creation timestamp while setting network URI");
+                               ret = -LTTNG_ERR_FATAL;
+                               goto error;
+                       }
+                       ret = snprintf(output->dst.net.base_dir,
+                                       sizeof(output->dst.net.base_dir),
+                                       "/%s/%s-%s/", session->hostname,
+                                       session->name,
+                                       session_creation_datetime);
+               }
+       }
+       if (ret >= sizeof(output->dst.net.base_dir)) {
+               ret = -LTTNG_ERR_INVALID;
+               ERR("Truncation occurred while setting network output base directory");
+               goto error;
+       } else if (ret == -1) {
+               ret = -LTTNG_ERR_INVALID;
+               PERROR("Error occurred while setting network output base directory");
+               goto error;
+       }
+
+       DBG3("Consumer set network uri base_dir path %s",
+                       output->dst.net.base_dir);
+
+end:
+       return 0;
+equal:
+       return 1;
+error:
+       return ret;
+}
+
+/*
+ * Send file descriptor to consumer via sock.
+ *
+ * The consumer socket lock must be held by the caller.
+ */
+int consumer_send_fds(struct consumer_socket *sock, const int *fds,
+               size_t nb_fd)
+{
+       int ret;
+
+       LTTNG_ASSERT(fds);
+       LTTNG_ASSERT(sock);
+       LTTNG_ASSERT(nb_fd > 0);
+       LTTNG_ASSERT(pthread_mutex_trylock(sock->lock) == EBUSY);
+
+       ret = lttcomm_send_fds_unix_sock(*sock->fd_ptr, fds, nb_fd);
+       if (ret < 0) {
+               /* The above call will print a PERROR on error. */
+               DBG("Error when sending consumer fds on sock %d", *sock->fd_ptr);
+               goto error;
+       }
+
+       ret = consumer_recv_status_reply(sock);
+error:
+       return ret;
+}
+
+/*
+ * Consumer send communication message structure to consumer.
+ *
+ * The consumer socket lock must be held by the caller.
+ */
+int consumer_send_msg(struct consumer_socket *sock,
+               const struct lttcomm_consumer_msg *msg)
+{
+       int ret;
+
+       LTTNG_ASSERT(msg);
+       LTTNG_ASSERT(sock);
+       LTTNG_ASSERT(pthread_mutex_trylock(sock->lock) == EBUSY);
+
+       ret = consumer_socket_send(sock, msg, sizeof(struct lttcomm_consumer_msg));
+       if (ret < 0) {
+               goto error;
+       }
+
+       ret = consumer_recv_status_reply(sock);
+
+error:
+       return ret;
+}
+
+/*
+ * Consumer send channel communication message structure to consumer.
+ *
+ * The consumer socket lock must be held by the caller.
+ */
+int consumer_send_channel(struct consumer_socket *sock,
+               struct lttcomm_consumer_msg *msg)
+{
+       int ret;
+
+       LTTNG_ASSERT(msg);
+       LTTNG_ASSERT(sock);
+
+       ret = consumer_send_msg(sock, msg);
+       if (ret < 0) {
+               goto error;
+       }
+
+error:
+       return ret;
+}
+
+/*
+ * Populate the given consumer msg structure with the ask_channel command
+ * information.
+ */
+void consumer_init_ask_channel_comm_msg(struct lttcomm_consumer_msg *msg,
+               uint64_t subbuf_size,
+               uint64_t num_subbuf,
+               int overwrite,
+               unsigned int switch_timer_interval,
+               unsigned int read_timer_interval,
+               unsigned int live_timer_interval,
+               bool is_in_live_session,
+               unsigned int monitor_timer_interval,
+               int output,
+               int type,
+               uint64_t session_id,
+               const char *pathname,
+               const char *name,
+               uint64_t relayd_id,
+               uint64_t key,
+               unsigned char *uuid,
+               uint32_t chan_id,
+               uint64_t tracefile_size,
+               uint64_t tracefile_count,
+               uint64_t session_id_per_pid,
+               unsigned int monitor,
+               uint32_t ust_app_uid,
+               int64_t blocking_timeout,
+               const char *root_shm_path,
+               const char *shm_path,
+               struct lttng_trace_chunk *trace_chunk,
+               const struct lttng_credentials *buffer_credentials)
+{
+       LTTNG_ASSERT(msg);
+
+       /* Zeroed structure */
+       memset(msg, 0, sizeof(struct lttcomm_consumer_msg));
+       msg->u.ask_channel.buffer_credentials.uid = UINT32_MAX;
+       msg->u.ask_channel.buffer_credentials.gid = UINT32_MAX;
+
+       if (trace_chunk) {
+               uint64_t chunk_id;
+               enum lttng_trace_chunk_status chunk_status;
+
+               chunk_status = lttng_trace_chunk_get_id(trace_chunk, &chunk_id);
+               LTTNG_ASSERT(chunk_status == LTTNG_TRACE_CHUNK_STATUS_OK);
+               LTTNG_OPTIONAL_SET(&msg->u.ask_channel.chunk_id, chunk_id);
+       }
+       msg->u.ask_channel.buffer_credentials.uid =
+                       lttng_credentials_get_uid(buffer_credentials);
+       msg->u.ask_channel.buffer_credentials.gid =
+                       lttng_credentials_get_gid(buffer_credentials);
+
+       msg->cmd_type = LTTNG_CONSUMER_ASK_CHANNEL_CREATION;
+       msg->u.ask_channel.subbuf_size = subbuf_size;
+       msg->u.ask_channel.num_subbuf = num_subbuf ;
+       msg->u.ask_channel.overwrite = overwrite;
+       msg->u.ask_channel.switch_timer_interval = switch_timer_interval;
+       msg->u.ask_channel.read_timer_interval = read_timer_interval;
+       msg->u.ask_channel.live_timer_interval = live_timer_interval;
+       msg->u.ask_channel.is_live = is_in_live_session;
+       msg->u.ask_channel.monitor_timer_interval = monitor_timer_interval;
+       msg->u.ask_channel.output = output;
+       msg->u.ask_channel.type = type;
+       msg->u.ask_channel.session_id = session_id;
+       msg->u.ask_channel.session_id_per_pid = session_id_per_pid;
+       msg->u.ask_channel.relayd_id = relayd_id;
+       msg->u.ask_channel.key = key;
+       msg->u.ask_channel.chan_id = chan_id;
+       msg->u.ask_channel.tracefile_size = tracefile_size;
+       msg->u.ask_channel.tracefile_count = tracefile_count;
+       msg->u.ask_channel.monitor = monitor;
+       msg->u.ask_channel.ust_app_uid = ust_app_uid;
+       msg->u.ask_channel.blocking_timeout = blocking_timeout;
+
+       memcpy(msg->u.ask_channel.uuid, uuid, sizeof(msg->u.ask_channel.uuid));
+
+       if (pathname) {
+               strncpy(msg->u.ask_channel.pathname, pathname,
+                               sizeof(msg->u.ask_channel.pathname));
+               msg->u.ask_channel.pathname[sizeof(msg->u.ask_channel.pathname)-1] = '\0';
+       }
+
+       strncpy(msg->u.ask_channel.name, name, sizeof(msg->u.ask_channel.name));
+       msg->u.ask_channel.name[sizeof(msg->u.ask_channel.name) - 1] = '\0';
+
+       if (root_shm_path) {
+               strncpy(msg->u.ask_channel.root_shm_path, root_shm_path,
+                       sizeof(msg->u.ask_channel.root_shm_path));
+               msg->u.ask_channel.root_shm_path[sizeof(msg->u.ask_channel.root_shm_path) - 1] = '\0';
+       }
+       if (shm_path) {
+               strncpy(msg->u.ask_channel.shm_path, shm_path,
+                       sizeof(msg->u.ask_channel.shm_path));
+               msg->u.ask_channel.shm_path[sizeof(msg->u.ask_channel.shm_path) - 1] = '\0';
+       }
+}
+
+/*
+ * Init channel communication message structure.
+ */
+void consumer_init_add_channel_comm_msg(struct lttcomm_consumer_msg *msg,
+               uint64_t channel_key,
+               uint64_t session_id,
+               const char *pathname,
+               uid_t uid,
+               gid_t gid,
+               uint64_t relayd_id,
+               const char *name,
+               unsigned int nb_init_streams,
+               enum lttng_event_output output,
+               int type,
+               uint64_t tracefile_size,
+               uint64_t tracefile_count,
+               unsigned int monitor,
+               unsigned int live_timer_interval,
+               bool is_in_live_session,
+               unsigned int monitor_timer_interval,
+               struct lttng_trace_chunk *trace_chunk)
+{
+       LTTNG_ASSERT(msg);
+
+       /* Zeroed structure */
+       memset(msg, 0, sizeof(struct lttcomm_consumer_msg));
+
+       if (trace_chunk) {
+               uint64_t chunk_id;
+               enum lttng_trace_chunk_status chunk_status;
+
+               chunk_status = lttng_trace_chunk_get_id(trace_chunk, &chunk_id);
+               LTTNG_ASSERT(chunk_status == LTTNG_TRACE_CHUNK_STATUS_OK);
+               LTTNG_OPTIONAL_SET(&msg->u.channel.chunk_id, chunk_id);
+       }
+
+       /* Send channel */
+       msg->cmd_type = LTTNG_CONSUMER_ADD_CHANNEL;
+       msg->u.channel.channel_key = channel_key;
+       msg->u.channel.session_id = session_id;
+       msg->u.channel.relayd_id = relayd_id;
+       msg->u.channel.nb_init_streams = nb_init_streams;
+       msg->u.channel.output = output;
+       msg->u.channel.type = type;
+       msg->u.channel.tracefile_size = tracefile_size;
+       msg->u.channel.tracefile_count = tracefile_count;
+       msg->u.channel.monitor = monitor;
+       msg->u.channel.live_timer_interval = live_timer_interval;
+       msg->u.channel.is_live = is_in_live_session;
+       msg->u.channel.monitor_timer_interval = monitor_timer_interval;
+
+       strncpy(msg->u.channel.pathname, pathname,
+                       sizeof(msg->u.channel.pathname));
+       msg->u.channel.pathname[sizeof(msg->u.channel.pathname) - 1] = '\0';
+
+       strncpy(msg->u.channel.name, name, sizeof(msg->u.channel.name));
+       msg->u.channel.name[sizeof(msg->u.channel.name) - 1] = '\0';
+}
+
+/*
+ * Init stream communication message structure.
+ */
+void consumer_init_add_stream_comm_msg(struct lttcomm_consumer_msg *msg,
+               uint64_t channel_key,
+               uint64_t stream_key,
+               int32_t cpu)
+{
+       LTTNG_ASSERT(msg);
+
+       memset(msg, 0, sizeof(struct lttcomm_consumer_msg));
+
+       msg->cmd_type = LTTNG_CONSUMER_ADD_STREAM;
+       msg->u.stream.channel_key = channel_key;
+       msg->u.stream.stream_key = stream_key;
+       msg->u.stream.cpu = cpu;
+}
+
+void consumer_init_streams_sent_comm_msg(struct lttcomm_consumer_msg *msg,
+               enum lttng_consumer_command cmd,
+               uint64_t channel_key, uint64_t net_seq_idx)
+{
+       LTTNG_ASSERT(msg);
+
+       memset(msg, 0, sizeof(struct lttcomm_consumer_msg));
+
+       msg->cmd_type = cmd;
+       msg->u.sent_streams.channel_key = channel_key;
+       msg->u.sent_streams.net_seq_idx = net_seq_idx;
+}
+
+/*
+ * Send stream communication structure to the consumer.
+ */
+int consumer_send_stream(struct consumer_socket *sock,
+               struct consumer_output *dst, struct lttcomm_consumer_msg *msg,
+               const int *fds, size_t nb_fd)
+{
+       int ret;
+
+       LTTNG_ASSERT(msg);
+       LTTNG_ASSERT(dst);
+       LTTNG_ASSERT(sock);
+       LTTNG_ASSERT(fds);
+
+       ret = consumer_send_msg(sock, msg);
+       if (ret < 0) {
+               goto error;
+       }
+
+       ret = consumer_send_fds(sock, fds, nb_fd);
+       if (ret < 0) {
+               goto error;
+       }
+
+error:
+       return ret;
+}
+
+/*
+ * Send relayd socket to consumer associated with a session name.
+ *
+ * The consumer socket lock must be held by the caller.
+ *
+ * On success return positive value. On error, negative value.
+ */
+int consumer_send_relayd_socket(struct consumer_socket *consumer_sock,
+               struct lttcomm_relayd_sock *rsock, struct consumer_output *consumer,
+               enum lttng_stream_type type, uint64_t session_id,
+               const char *session_name, const char *hostname,
+               const char *base_path, int session_live_timer,
+               const uint64_t *current_chunk_id, time_t session_creation_time,
+               bool session_name_contains_creation_time)
+{
+       int ret;
+       int fd;
+       struct lttcomm_consumer_msg msg;
+
+       /* Code flow error. Safety net. */
+       LTTNG_ASSERT(rsock);
+       LTTNG_ASSERT(consumer);
+       LTTNG_ASSERT(consumer_sock);
+
+       memset(&msg, 0, sizeof(msg));
+       /* Bail out if consumer is disabled */
+       if (!consumer->enabled) {
+               ret = LTTNG_OK;
+               goto error;
+       }
+
+       if (type == LTTNG_STREAM_CONTROL) {
+               char output_path[LTTNG_PATH_MAX] = {};
+               uint64_t relayd_session_id;
+
+               ret = relayd_create_session(rsock, &relayd_session_id,
+                               session_name, hostname, base_path,
+                               session_live_timer, consumer->snapshot,
+                               session_id, the_sessiond_uuid, current_chunk_id,
+                               session_creation_time,
+                               session_name_contains_creation_time,
+                               output_path);
+               if (ret < 0) {
+                       /* Close the control socket. */
+                       (void) relayd_close(rsock);
+                       goto error;
+               }
+               msg.u.relayd_sock.relayd_session_id = relayd_session_id;
+               DBG("Created session on relay, output path reply: %s",
+                       output_path);
+       }
+
+       msg.cmd_type = LTTNG_CONSUMER_ADD_RELAYD_SOCKET;
+       /*
+        * Assign network consumer output index using the temporary consumer since
+        * this call should only be made from within a set_consumer_uri() function
+        * call in the session daemon.
+        */
+       msg.u.relayd_sock.net_index = consumer->net_seq_index;
+       msg.u.relayd_sock.type = type;
+       msg.u.relayd_sock.session_id = session_id;
+       memcpy(&msg.u.relayd_sock.sock, rsock, sizeof(msg.u.relayd_sock.sock));
+
+       DBG3("Sending relayd sock info to consumer on %d", *consumer_sock->fd_ptr);
+       ret = consumer_send_msg(consumer_sock, &msg);
+       if (ret < 0) {
+               goto error;
+       }
+
+       DBG3("Sending relayd socket file descriptor to consumer");
+       fd = rsock->sock.fd;
+       ret = consumer_send_fds(consumer_sock, &fd, 1);
+       if (ret < 0) {
+               goto error;
+       }
+
+       DBG2("Consumer relayd socket sent");
+
+error:
+       return ret;
+}
+
+static
+int consumer_send_pipe(struct consumer_socket *consumer_sock,
+               enum lttng_consumer_command cmd, int pipe)
+{
+       int ret;
+       struct lttcomm_consumer_msg msg;
+       const char *pipe_name;
+       const char *command_name;
+
+       switch (cmd) {
+       case LTTNG_CONSUMER_SET_CHANNEL_MONITOR_PIPE:
+               pipe_name = "channel monitor";
+               command_name = "SET_CHANNEL_MONITOR_PIPE";
+               break;
+       default:
+               ERR("Unexpected command received in %s (cmd = %d)", __func__,
+                               (int) cmd);
+               abort();
+       }
+
+       /* Code flow error. Safety net. */
+
+       memset(&msg, 0, sizeof(msg));
+       msg.cmd_type = cmd;
+
+       pthread_mutex_lock(consumer_sock->lock);
+       DBG3("Sending %s command to consumer", command_name);
+       ret = consumer_send_msg(consumer_sock, &msg);
+       if (ret < 0) {
+               goto error;
+       }
+
+       DBG3("Sending %s pipe %d to consumer on socket %d",
+                       pipe_name,
+                       pipe, *consumer_sock->fd_ptr);
+       ret = consumer_send_fds(consumer_sock, &pipe, 1);
+       if (ret < 0) {
+               goto error;
+       }
+
+       DBG2("%s pipe successfully sent", pipe_name);
+error:
+       pthread_mutex_unlock(consumer_sock->lock);
+       return ret;
+}
+
+int consumer_send_channel_monitor_pipe(struct consumer_socket *consumer_sock,
+               int pipe)
+{
+       return consumer_send_pipe(consumer_sock,
+                       LTTNG_CONSUMER_SET_CHANNEL_MONITOR_PIPE, pipe);
+}
+
+/*
+ * Ask the consumer if the data is pending for the specific session id.
+ * Returns 1 if data is pending, 0 otherwise, or < 0 on error.
+ */
+int consumer_is_data_pending(uint64_t session_id,
+               struct consumer_output *consumer)
+{
+       int ret;
+       int32_t ret_code = 0;  /* Default is that the data is NOT pending */
+       struct consumer_socket *socket;
+       struct lttng_ht_iter iter;
+       struct lttcomm_consumer_msg msg;
+
+       LTTNG_ASSERT(consumer);
+
+       DBG3("Consumer data pending for id %" PRIu64, session_id);
+
+       memset(&msg, 0, sizeof(msg));
+       msg.cmd_type = LTTNG_CONSUMER_DATA_PENDING;
+       msg.u.data_pending.session_id = session_id;
+
+       /* Send command for each consumer */
+       rcu_read_lock();
+       cds_lfht_for_each_entry(consumer->socks->ht, &iter.iter, socket,
+                       node.node) {
+               pthread_mutex_lock(socket->lock);
+               ret = consumer_socket_send(socket, &msg, sizeof(msg));
+               if (ret < 0) {
+                       pthread_mutex_unlock(socket->lock);
+                       goto error_unlock;
+               }
+
+               /*
+                * No need for a recv reply status because the answer to the command is
+                * the reply status message.
+                */
+
+               ret = consumer_socket_recv(socket, &ret_code, sizeof(ret_code));
+               if (ret < 0) {
+                       pthread_mutex_unlock(socket->lock);
+                       goto error_unlock;
+               }
+               pthread_mutex_unlock(socket->lock);
+
+               if (ret_code == 1) {
+                       break;
+               }
+       }
+       rcu_read_unlock();
+
+       DBG("Consumer data is %s pending for session id %" PRIu64,
+                       ret_code == 1 ? "" : "NOT", session_id);
+       return ret_code;
+
+error_unlock:
+       rcu_read_unlock();
+       return -1;
+}
+
+/*
+ * Send a flush command to consumer using the given channel key.
+ *
+ * Return 0 on success else a negative value.
+ */
+int consumer_flush_channel(struct consumer_socket *socket, uint64_t key)
+{
+       int ret;
+       struct lttcomm_consumer_msg msg;
+
+       LTTNG_ASSERT(socket);
+
+       DBG2("Consumer flush channel key %" PRIu64, key);
+
+       memset(&msg, 0, sizeof(msg));
+       msg.cmd_type = LTTNG_CONSUMER_FLUSH_CHANNEL;
+       msg.u.flush_channel.key = key;
+
+       pthread_mutex_lock(socket->lock);
+       health_code_update();
+
+       ret = consumer_send_msg(socket, &msg);
+       if (ret < 0) {
+               goto end;
+       }
+
+end:
+       health_code_update();
+       pthread_mutex_unlock(socket->lock);
+       return ret;
+}
+
+/*
+ * Send a clear quiescent command to consumer using the given channel key.
+ *
+ * Return 0 on success else a negative value.
+ */
+int consumer_clear_quiescent_channel(struct consumer_socket *socket, uint64_t key)
+{
+       int ret;
+       struct lttcomm_consumer_msg msg;
+
+       LTTNG_ASSERT(socket);
+
+       DBG2("Consumer clear quiescent channel key %" PRIu64, key);
+
+       memset(&msg, 0, sizeof(msg));
+       msg.cmd_type = LTTNG_CONSUMER_CLEAR_QUIESCENT_CHANNEL;
+       msg.u.clear_quiescent_channel.key = key;
+
+       pthread_mutex_lock(socket->lock);
+       health_code_update();
+
+       ret = consumer_send_msg(socket, &msg);
+       if (ret < 0) {
+               goto end;
+       }
+
+end:
+       health_code_update();
+       pthread_mutex_unlock(socket->lock);
+       return ret;
+}
+
+/*
+ * Send a close metadata command to consumer using the given channel key.
+ * Called with registry lock held.
+ *
+ * Return 0 on success else a negative value.
+ */
+int consumer_close_metadata(struct consumer_socket *socket,
+               uint64_t metadata_key)
+{
+       int ret;
+       struct lttcomm_consumer_msg msg;
+
+       LTTNG_ASSERT(socket);
+
+       DBG2("Consumer close metadata channel key %" PRIu64, metadata_key);
+
+       memset(&msg, 0, sizeof(msg));
+       msg.cmd_type = LTTNG_CONSUMER_CLOSE_METADATA;
+       msg.u.close_metadata.key = metadata_key;
+
+       pthread_mutex_lock(socket->lock);
+       health_code_update();
+
+       ret = consumer_send_msg(socket, &msg);
+       if (ret < 0) {
+               goto end;
+       }
+
+end:
+       health_code_update();
+       pthread_mutex_unlock(socket->lock);
+       return ret;
+}
+
+/*
+ * Send a setup metdata command to consumer using the given channel key.
+ *
+ * Return 0 on success else a negative value.
+ */
+int consumer_setup_metadata(struct consumer_socket *socket,
+               uint64_t metadata_key)
+{
+       int ret;
+       struct lttcomm_consumer_msg msg;
+
+       LTTNG_ASSERT(socket);
+
+       DBG2("Consumer setup metadata channel key %" PRIu64, metadata_key);
+
+       memset(&msg, 0, sizeof(msg));
+       msg.cmd_type = LTTNG_CONSUMER_SETUP_METADATA;
+       msg.u.setup_metadata.key = metadata_key;
+
+       pthread_mutex_lock(socket->lock);
+       health_code_update();
+
+       ret = consumer_send_msg(socket, &msg);
+       if (ret < 0) {
+               goto end;
+       }
+
+end:
+       health_code_update();
+       pthread_mutex_unlock(socket->lock);
+       return ret;
+}
+
+/*
+ * Send metadata string to consumer.
+ * RCU read-side lock must be held to guarantee existence of socket.
+ *
+ * Return 0 on success else a negative value.
+ */
+int consumer_push_metadata(struct consumer_socket *socket,
+               uint64_t metadata_key, char *metadata_str, size_t len,
+               size_t target_offset, uint64_t version)
+{
+       int ret;
+       struct lttcomm_consumer_msg msg;
+
+       LTTNG_ASSERT(socket);
+
+       DBG2("Consumer push metadata to consumer socket %d", *socket->fd_ptr);
+
+       pthread_mutex_lock(socket->lock);
+
+       memset(&msg, 0, sizeof(msg));
+       msg.cmd_type = LTTNG_CONSUMER_PUSH_METADATA;
+       msg.u.push_metadata.key = metadata_key;
+       msg.u.push_metadata.target_offset = target_offset;
+       msg.u.push_metadata.len = len;
+       msg.u.push_metadata.version = version;
+
+       health_code_update();
+       ret = consumer_send_msg(socket, &msg);
+       if (ret < 0 || len == 0) {
+               goto end;
+       }
+
+       DBG3("Consumer pushing metadata on sock %d of len %zu", *socket->fd_ptr,
+                       len);
+
+       ret = consumer_socket_send(socket, metadata_str, len);
+       if (ret < 0) {
+               goto end;
+       }
+
+       health_code_update();
+       ret = consumer_recv_status_reply(socket);
+       if (ret < 0) {
+               goto end;
+       }
+
+end:
+       pthread_mutex_unlock(socket->lock);
+       health_code_update();
+       return ret;
+}
+
+/*
+ * Ask the consumer to snapshot a specific channel using the key.
+ *
+ * Returns LTTNG_OK on success or else an LTTng error code.
+ */
+enum lttng_error_code consumer_snapshot_channel(struct consumer_socket *socket,
+               uint64_t key, const struct consumer_output *output, int metadata,
+               uid_t uid, gid_t gid, const char *channel_path, int wait,
+               uint64_t nb_packets_per_stream)
+{
+       int ret;
+       enum lttng_error_code status = LTTNG_OK;
+       struct lttcomm_consumer_msg msg;
+
+       LTTNG_ASSERT(socket);
+       LTTNG_ASSERT(output);
+
+       DBG("Consumer snapshot channel key %" PRIu64, key);
+
+       memset(&msg, 0, sizeof(msg));
+       msg.cmd_type = LTTNG_CONSUMER_SNAPSHOT_CHANNEL;
+       msg.u.snapshot_channel.key = key;
+       msg.u.snapshot_channel.nb_packets_per_stream = nb_packets_per_stream;
+       msg.u.snapshot_channel.metadata = metadata;
+
+       if (output->type == CONSUMER_DST_NET) {
+               msg.u.snapshot_channel.relayd_id =
+                               output->net_seq_index;
+               msg.u.snapshot_channel.use_relayd = 1;
+       } else {
+               msg.u.snapshot_channel.relayd_id = (uint64_t) -1ULL;
+       }
+       ret = lttng_strncpy(msg.u.snapshot_channel.pathname,
+                       channel_path,
+                       sizeof(msg.u.snapshot_channel.pathname));
+       if (ret < 0) {
+               ERR("Snapshot path exceeds the maximal allowed length of %zu bytes (%zu bytes required) with path \"%s\"",
+                               sizeof(msg.u.snapshot_channel.pathname),
+                               strlen(channel_path),
+                               channel_path);
+               status = LTTNG_ERR_SNAPSHOT_FAIL;
+               goto error;
+       }
+
+       health_code_update();
+       pthread_mutex_lock(socket->lock);
+       ret = consumer_send_msg(socket, &msg);
+       pthread_mutex_unlock(socket->lock);
+       if (ret < 0) {
+               switch (-ret) {
+               case LTTCOMM_CONSUMERD_CHAN_NOT_FOUND:
+                       status = LTTNG_ERR_CHAN_NOT_FOUND;
+                       break;
+               default:
+                       status = LTTNG_ERR_SNAPSHOT_FAIL;
+                       break;
+               }
+               goto error;
+       }
+
+error:
+       health_code_update();
+       return status;
+}
+
+/*
+ * Ask the consumer the number of discarded events for a channel.
+ */
+int consumer_get_discarded_events(uint64_t session_id, uint64_t channel_key,
+               struct consumer_output *consumer, uint64_t *discarded)
+{
+       int ret;
+       struct consumer_socket *socket;
+       struct lttng_ht_iter iter;
+       struct lttcomm_consumer_msg msg;
+
+       LTTNG_ASSERT(consumer);
+
+       DBG3("Consumer discarded events id %" PRIu64, session_id);
+
+       memset(&msg, 0, sizeof(msg));
+       msg.cmd_type = LTTNG_CONSUMER_DISCARDED_EVENTS;
+       msg.u.discarded_events.session_id = session_id;
+       msg.u.discarded_events.channel_key = channel_key;
+
+       *discarded = 0;
+
+       /* Send command for each consumer */
+       rcu_read_lock();
+       cds_lfht_for_each_entry(consumer->socks->ht, &iter.iter, socket,
+                       node.node) {
+               uint64_t consumer_discarded = 0;
+               pthread_mutex_lock(socket->lock);
+               ret = consumer_socket_send(socket, &msg, sizeof(msg));
+               if (ret < 0) {
+                       pthread_mutex_unlock(socket->lock);
+                       goto end;
+               }
+
+               /*
+                * No need for a recv reply status because the answer to the
+                * command is the reply status message.
+                */
+               ret = consumer_socket_recv(socket, &consumer_discarded,
+                               sizeof(consumer_discarded));
+               if (ret < 0) {
+                       ERR("get discarded events");
+                       pthread_mutex_unlock(socket->lock);
+                       goto end;
+               }
+               pthread_mutex_unlock(socket->lock);
+               *discarded += consumer_discarded;
+       }
+       ret = 0;
+       DBG("Consumer discarded %" PRIu64 " events in session id %" PRIu64,
+                       *discarded, session_id);
+
+end:
+       rcu_read_unlock();
+       return ret;
+}
+
+/*
+ * Ask the consumer the number of lost packets for a channel.
+ */
+int consumer_get_lost_packets(uint64_t session_id, uint64_t channel_key,
+               struct consumer_output *consumer, uint64_t *lost)
+{
+       int ret;
+       struct consumer_socket *socket;
+       struct lttng_ht_iter iter;
+       struct lttcomm_consumer_msg msg;
+
+       LTTNG_ASSERT(consumer);
+
+       DBG3("Consumer lost packets id %" PRIu64, session_id);
+
+       memset(&msg, 0, sizeof(msg));
+       msg.cmd_type = LTTNG_CONSUMER_LOST_PACKETS;
+       msg.u.lost_packets.session_id = session_id;
+       msg.u.lost_packets.channel_key = channel_key;
+
+       *lost = 0;
+
+       /* Send command for each consumer */
+       rcu_read_lock();
+       cds_lfht_for_each_entry(consumer->socks->ht, &iter.iter, socket,
+                       node.node) {
+               uint64_t consumer_lost = 0;
+               pthread_mutex_lock(socket->lock);
+               ret = consumer_socket_send(socket, &msg, sizeof(msg));
+               if (ret < 0) {
+                       pthread_mutex_unlock(socket->lock);
+                       goto end;
+               }
+
+               /*
+                * No need for a recv reply status because the answer to the
+                * command is the reply status message.
+                */
+               ret = consumer_socket_recv(socket, &consumer_lost,
+                               sizeof(consumer_lost));
+               if (ret < 0) {
+                       ERR("get lost packets");
+                       pthread_mutex_unlock(socket->lock);
+                       goto end;
+               }
+               pthread_mutex_unlock(socket->lock);
+               *lost += consumer_lost;
+       }
+       ret = 0;
+       DBG("Consumer lost %" PRIu64 " packets in session id %" PRIu64,
+                       *lost, session_id);
+
+end:
+       rcu_read_unlock();
+       return ret;
+}
+
+/*
+ * Ask the consumer to rotate a channel.
+ *
+ * The new_chunk_id is the session->rotate_count that has been incremented
+ * when the rotation started. On the relay, this allows to keep track in which
+ * chunk each stream is currently writing to (for the rotate_pending operation).
+ */
+int consumer_rotate_channel(struct consumer_socket *socket, uint64_t key,
+               uid_t uid, gid_t gid, struct consumer_output *output,
+               bool is_metadata_channel)
+{
+       int ret;
+       struct lttcomm_consumer_msg msg;
+
+       LTTNG_ASSERT(socket);
+
+       DBG("Consumer rotate channel key %" PRIu64, key);
+
+       pthread_mutex_lock(socket->lock);
+       memset(&msg, 0, sizeof(msg));
+       msg.cmd_type = LTTNG_CONSUMER_ROTATE_CHANNEL;
+       msg.u.rotate_channel.key = key;
+       msg.u.rotate_channel.metadata = !!is_metadata_channel;
+
+       if (output->type == CONSUMER_DST_NET) {
+               msg.u.rotate_channel.relayd_id = output->net_seq_index;
+       } else {
+               msg.u.rotate_channel.relayd_id = (uint64_t) -1ULL;
+       }
+
+       health_code_update();
+       ret = consumer_send_msg(socket, &msg);
+       if (ret < 0) {
+               switch (-ret) {
+               case LTTCOMM_CONSUMERD_CHAN_NOT_FOUND:
+                       ret = -LTTNG_ERR_CHAN_NOT_FOUND;
+                       break;
+               default:
+                       ret = -LTTNG_ERR_ROTATION_FAIL_CONSUMER;
+                       break;
+               }
+               goto error;
+       }
+error:
+       pthread_mutex_unlock(socket->lock);
+       health_code_update();
+       return ret;
+}
+
+int consumer_open_channel_packets(struct consumer_socket *socket, uint64_t key)
+{
+       int ret;
+       lttcomm_consumer_msg msg = {
+               .cmd_type = LTTNG_CONSUMER_OPEN_CHANNEL_PACKETS,
+       };
+       msg.u.open_channel_packets.key = key;
+
+       LTTNG_ASSERT(socket);
+
+       DBG("Consumer open channel packets: channel key = %" PRIu64, key);
+
+       health_code_update();
+
+       pthread_mutex_lock(socket->lock);
+       ret = consumer_send_msg(socket, &msg);
+       pthread_mutex_unlock(socket->lock);
+       if (ret < 0) {
+               goto error_socket;
+       }
+
+error_socket:
+       health_code_update();
+       return ret;
+}
+
+int consumer_clear_channel(struct consumer_socket *socket, uint64_t key)
+{
+       int ret;
+       struct lttcomm_consumer_msg msg;
+
+       LTTNG_ASSERT(socket);
+
+       DBG("Consumer clear channel %" PRIu64, key);
+
+       memset(&msg, 0, sizeof(msg));
+       msg.cmd_type = LTTNG_CONSUMER_CLEAR_CHANNEL;
+       msg.u.clear_channel.key = key;
+
+       health_code_update();
+
+       pthread_mutex_lock(socket->lock);
+       ret = consumer_send_msg(socket, &msg);
+       if (ret < 0) {
+               goto error_socket;
+       }
+
+error_socket:
+       pthread_mutex_unlock(socket->lock);
+
+       health_code_update();
+       return ret;
+}
+
+int consumer_init(struct consumer_socket *socket,
+               const lttng_uuid sessiond_uuid)
+{
+       int ret;
+       struct lttcomm_consumer_msg msg = {
+               .cmd_type = LTTNG_CONSUMER_INIT,
+       };
+
+       LTTNG_ASSERT(socket);
+
+       DBG("Sending consumer initialization command");
+       lttng_uuid_copy(msg.u.init.sessiond_uuid, sessiond_uuid);
+
+       health_code_update();
+       ret = consumer_send_msg(socket, &msg);
+       if (ret < 0) {
+               goto error;
+       }
+
+error:
+       health_code_update();
+       return ret;
+}
+
+/*
+ * Ask the consumer to create a new chunk for a given session.
+ *
+ * Called with the consumer socket lock held.
+ */
+int consumer_create_trace_chunk(struct consumer_socket *socket,
+               uint64_t relayd_id, uint64_t session_id,
+               struct lttng_trace_chunk *chunk,
+               const char *domain_subdir)
+{
+       int ret;
+       enum lttng_trace_chunk_status chunk_status;
+       struct lttng_credentials chunk_credentials;
+       const struct lttng_directory_handle *chunk_directory_handle = NULL;
+       struct lttng_directory_handle *domain_handle = NULL;
+       int domain_dirfd;
+       const char *chunk_name;
+       bool chunk_name_overridden;
+       uint64_t chunk_id;
+       time_t creation_timestamp;
+       char creation_timestamp_buffer[ISO8601_STR_LEN];
+       const char *creation_timestamp_str = "(none)";
+       const bool chunk_has_local_output = relayd_id == -1ULL;
+       enum lttng_trace_chunk_status tc_status;
+       struct lttcomm_consumer_msg msg = {
+               .cmd_type = LTTNG_CONSUMER_CREATE_TRACE_CHUNK,
+       };
+       msg.u.create_trace_chunk.session_id = session_id;
+
+       LTTNG_ASSERT(socket);
+       LTTNG_ASSERT(chunk);
+
+       if (relayd_id != -1ULL) {
+               LTTNG_OPTIONAL_SET(&msg.u.create_trace_chunk.relayd_id,
+                               relayd_id);
+       }
+
+       chunk_status = lttng_trace_chunk_get_name(chunk, &chunk_name,
+                       &chunk_name_overridden);
+       if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK &&
+                       chunk_status != LTTNG_TRACE_CHUNK_STATUS_NONE) {
+               ERR("Failed to get name of trace chunk");
+               ret = -LTTNG_ERR_FATAL;
+               goto error;
+       }
+       if (chunk_name_overridden) {
+               ret = lttng_strncpy(msg.u.create_trace_chunk.override_name,
+                               chunk_name,
+                               sizeof(msg.u.create_trace_chunk.override_name));
+               if (ret) {
+                       ERR("Trace chunk name \"%s\" exceeds the maximal length allowed by the consumer protocol",
+                                       chunk_name);
+                       ret = -LTTNG_ERR_FATAL;
+                       goto error;
+               }
+       }
+
+       chunk_status = lttng_trace_chunk_get_creation_timestamp(chunk,
+                       &creation_timestamp);
+       if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) {
+               ret = -LTTNG_ERR_FATAL;
+               goto error;
+       }
+       msg.u.create_trace_chunk.creation_timestamp =
+                       (uint64_t) creation_timestamp;
+       /* Only used for logging purposes. */
+       ret = time_to_iso8601_str(creation_timestamp,
+                       creation_timestamp_buffer,
+                       sizeof(creation_timestamp_buffer));
+       creation_timestamp_str = !ret ? creation_timestamp_buffer :
+                       "(formatting error)";
+
+       chunk_status = lttng_trace_chunk_get_id(chunk, &chunk_id);
+       if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) {
+               /*
+                * Anonymous trace chunks should never be transmitted
+                * to remote peers (consumerd and relayd). They are used
+                * internally for backward-compatibility purposes.
+                */
+               ret = -LTTNG_ERR_FATAL;
+               goto error;
+       }
+       msg.u.create_trace_chunk.chunk_id = chunk_id;
+
+       if (chunk_has_local_output) {
+               chunk_status = lttng_trace_chunk_borrow_chunk_directory_handle(
+                               chunk, &chunk_directory_handle);
+               if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) {
+                       ret = -LTTNG_ERR_FATAL;
+                       goto error;
+               }
+               chunk_status = lttng_trace_chunk_get_credentials(
+                               chunk, &chunk_credentials);
+               if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) {
+                       /*
+                        * Not associating credentials to a sessiond chunk is a
+                        * fatal internal error.
+                        */
+                       ret = -LTTNG_ERR_FATAL;
+                       goto error;
+               }
+               tc_status = lttng_trace_chunk_create_subdirectory(
+                               chunk, domain_subdir);
+               if (tc_status != LTTNG_TRACE_CHUNK_STATUS_OK) {
+                       PERROR("Failed to create chunk domain output directory \"%s\"",
+                               domain_subdir);
+                       ret = -LTTNG_ERR_FATAL;
+                       goto error;
+               }
+               domain_handle = lttng_directory_handle_create_from_handle(
+                               domain_subdir,
+                               chunk_directory_handle);
+               if (!domain_handle) {
+                       ret = -LTTNG_ERR_FATAL;
+                       goto error;
+               }
+
+               /*
+                * This will only compile on platforms that support
+                * dirfd (POSIX.2008). This is fine as the session daemon
+                * is only built for such platforms.
+                *
+                * The ownership of the chunk directory handle's is maintained
+                * by the trace chunk.
+                */
+               domain_dirfd = lttng_directory_handle_get_dirfd(
+                               domain_handle);
+               LTTNG_ASSERT(domain_dirfd >= 0);
+
+               msg.u.create_trace_chunk.credentials.value.uid =
+                               lttng_credentials_get_uid(&chunk_credentials);
+               msg.u.create_trace_chunk.credentials.value.gid =
+                               lttng_credentials_get_gid(&chunk_credentials);
+               msg.u.create_trace_chunk.credentials.is_set = 1;
+       }
+
+       DBG("Sending consumer create trace chunk command: relayd_id = %" PRId64
+                       ", session_id = %" PRIu64 ", chunk_id = %" PRIu64
+                       ", creation_timestamp = %s",
+                       relayd_id, session_id, chunk_id,
+                       creation_timestamp_str);
+       health_code_update();
+       ret = consumer_send_msg(socket, &msg);
+       health_code_update();
+       if (ret < 0) {
+               ERR("Trace chunk creation error on consumer");
+               ret = -LTTNG_ERR_CREATE_TRACE_CHUNK_FAIL_CONSUMER;
+               goto error;
+       }
+
+       if (chunk_has_local_output) {
+               DBG("Sending trace chunk domain directory fd to consumer");
+               health_code_update();
+               ret = consumer_send_fds(socket, &domain_dirfd, 1);
+               health_code_update();
+               if (ret < 0) {
+                       ERR("Trace chunk creation error on consumer");
+                       ret = -LTTNG_ERR_CREATE_TRACE_CHUNK_FAIL_CONSUMER;
+                       goto error;
+               }
+       }
+error:
+       lttng_directory_handle_put(domain_handle);
+       return ret;
+}
+
+/*
+ * Ask the consumer to close a trace chunk for a given session.
+ *
+ * Called with the consumer socket lock held.
+ */
+int consumer_close_trace_chunk(struct consumer_socket *socket,
+               uint64_t relayd_id, uint64_t session_id,
+               struct lttng_trace_chunk *chunk,
+               char *closed_trace_chunk_path)
+{
+       int ret;
+       enum lttng_trace_chunk_status chunk_status;
+       lttcomm_consumer_msg msg = {
+               .cmd_type = LTTNG_CONSUMER_CLOSE_TRACE_CHUNK,
+       };
+       msg.u.close_trace_chunk.session_id = session_id;
+
+       struct lttcomm_consumer_close_trace_chunk_reply reply;
+       uint64_t chunk_id;
+       time_t close_timestamp;
+       enum lttng_trace_chunk_command_type close_command;
+       const char *close_command_name = "none";
+       struct lttng_dynamic_buffer path_reception_buffer;
+
+       LTTNG_ASSERT(socket);
+       lttng_dynamic_buffer_init(&path_reception_buffer);
+
+       if (relayd_id != -1ULL) {
+               LTTNG_OPTIONAL_SET(
+                               &msg.u.close_trace_chunk.relayd_id, relayd_id);
+       }
+
+       chunk_status = lttng_trace_chunk_get_close_command(
+                       chunk, &close_command);
+       switch (chunk_status) {
+       case LTTNG_TRACE_CHUNK_STATUS_OK:
+               LTTNG_OPTIONAL_SET(&msg.u.close_trace_chunk.close_command,
+                               (uint32_t) close_command);
+               break;
+       case LTTNG_TRACE_CHUNK_STATUS_NONE:
+               break;
+       default:
+               ERR("Failed to get trace chunk close command");
+               ret = -1;
+               goto error;
+       }
+
+       chunk_status = lttng_trace_chunk_get_id(chunk, &chunk_id);
+       /*
+        * Anonymous trace chunks should never be transmitted to remote peers
+        * (consumerd and relayd). They are used internally for
+        * backward-compatibility purposes.
+        */
+       LTTNG_ASSERT(chunk_status == LTTNG_TRACE_CHUNK_STATUS_OK);
+       msg.u.close_trace_chunk.chunk_id = chunk_id;
+
+       chunk_status = lttng_trace_chunk_get_close_timestamp(chunk,
+                       &close_timestamp);
+       /*
+        * A trace chunk should be closed locally before being closed remotely.
+        * Otherwise, the close timestamp would never be transmitted to the
+        * peers.
+        */
+       LTTNG_ASSERT(chunk_status == LTTNG_TRACE_CHUNK_STATUS_OK);
+       msg.u.close_trace_chunk.close_timestamp = (uint64_t) close_timestamp;
+
+       if (msg.u.close_trace_chunk.close_command.is_set) {
+               close_command_name = lttng_trace_chunk_command_type_get_name(
+                               close_command);
+       }
+       DBG("Sending consumer close trace chunk command: relayd_id = %" PRId64
+                       ", session_id = %" PRIu64 ", chunk_id = %" PRIu64
+                       ", close command = \"%s\"",
+                       relayd_id, session_id, chunk_id, close_command_name);
+
+       health_code_update();
+       ret = consumer_socket_send(socket, &msg, sizeof(struct lttcomm_consumer_msg));
+       if (ret < 0) {
+               ret = -LTTNG_ERR_CLOSE_TRACE_CHUNK_FAIL_CONSUMER;
+               goto error;
+       }
+       ret = consumer_socket_recv(socket, &reply, sizeof(reply));
+       if (ret < 0) {
+               ret = -LTTNG_ERR_CLOSE_TRACE_CHUNK_FAIL_CONSUMER;
+               goto error;
+       }
+       if (reply.path_length >= LTTNG_PATH_MAX) {
+               ERR("Invalid path returned by relay daemon: %" PRIu32 "bytes exceeds maximal allowed length of %d bytes",
+                               reply.path_length, LTTNG_PATH_MAX);
+               ret = -LTTNG_ERR_INVALID_PROTOCOL;
+               goto error;
+       }
+       ret = lttng_dynamic_buffer_set_size(&path_reception_buffer,
+                       reply.path_length);
+       if (ret) {
+               ERR("Failed to allocate reception buffer of path returned by the \"close trace chunk\" command");
+               ret = -LTTNG_ERR_NOMEM;
+               goto error;
+       }
+       ret = consumer_socket_recv(socket, path_reception_buffer.data,
+                       path_reception_buffer.size);
+       if (ret < 0) {
+               ERR("Communication error while receiving path of closed trace chunk");
+               ret = -LTTNG_ERR_CLOSE_TRACE_CHUNK_FAIL_CONSUMER;
+               goto error;
+       }
+       if (path_reception_buffer.data[path_reception_buffer.size - 1] != '\0') {
+               ERR("Invalid path returned by relay daemon: not null-terminated");
+               ret = -LTTNG_ERR_INVALID_PROTOCOL;
+               goto error;
+       }
+       if (closed_trace_chunk_path) {
+               /*
+                * closed_trace_chunk_path is assumed to have a length >=
+                * LTTNG_PATH_MAX
+                */
+               memcpy(closed_trace_chunk_path, path_reception_buffer.data,
+                               path_reception_buffer.size);
+       }
+error:
+       lttng_dynamic_buffer_reset(&path_reception_buffer);
+       health_code_update();
+       return ret;
+}
+
+/*
+ * Ask the consumer if a trace chunk exists.
+ *
+ * Called with the consumer socket lock held.
+ * Returns 0 on success, or a negative value on error.
+ */
+int consumer_trace_chunk_exists(struct consumer_socket *socket,
+               uint64_t relayd_id, uint64_t session_id,
+               struct lttng_trace_chunk *chunk,
+               enum consumer_trace_chunk_exists_status *result)
+{
+       int ret;
+       enum lttng_trace_chunk_status chunk_status;
+       lttcomm_consumer_msg msg = {
+               .cmd_type = LTTNG_CONSUMER_TRACE_CHUNK_EXISTS,
+       };
+       msg.u.trace_chunk_exists.session_id = session_id;
+
+       uint64_t chunk_id;
+       const char *consumer_reply_str;
+
+       LTTNG_ASSERT(socket);
+
+       if (relayd_id != -1ULL) {
+               LTTNG_OPTIONAL_SET(&msg.u.trace_chunk_exists.relayd_id,
+                               relayd_id);
+       }
+
+       chunk_status = lttng_trace_chunk_get_id(chunk, &chunk_id);
+       if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) {
+               /*
+                * Anonymous trace chunks should never be transmitted
+                * to remote peers (consumerd and relayd). They are used
+                * internally for backward-compatibility purposes.
+                */
+               ret = -LTTNG_ERR_FATAL;
+               goto error;
+       }
+       msg.u.trace_chunk_exists.chunk_id = chunk_id;
+
+       DBG("Sending consumer trace chunk exists command: relayd_id = %" PRId64
+                       ", session_id = %" PRIu64
+                       ", chunk_id = %" PRIu64, relayd_id, session_id, chunk_id);
+
+       health_code_update();
+       ret = consumer_send_msg(socket, &msg);
+       switch (-ret) {
+       case LTTCOMM_CONSUMERD_UNKNOWN_TRACE_CHUNK:
+               consumer_reply_str = "unknown trace chunk";
+               *result = CONSUMER_TRACE_CHUNK_EXISTS_STATUS_UNKNOWN_CHUNK;
+               break;
+       case LTTCOMM_CONSUMERD_TRACE_CHUNK_EXISTS_LOCAL:
+               consumer_reply_str = "trace chunk exists locally";
+               *result = CONSUMER_TRACE_CHUNK_EXISTS_STATUS_EXISTS_LOCAL;
+               break;
+       case LTTCOMM_CONSUMERD_TRACE_CHUNK_EXISTS_REMOTE:
+               consumer_reply_str = "trace chunk exists on remote peer";
+               *result = CONSUMER_TRACE_CHUNK_EXISTS_STATUS_EXISTS_REMOTE;
+               break;
+       default:
+               ERR("Consumer returned an error from TRACE_CHUNK_EXISTS command");
+               ret = -1;
+               goto error;
+       }
+       DBG("Consumer reply to TRACE_CHUNK_EXISTS command: %s",
+                       consumer_reply_str);
+       ret = 0;
+error:
+       health_code_update();
+       return ret;
+}
index eaa04d302ae6abf61c8a60b24223ab638bbb2051..ffba198a829117ab8ace50687c0822be31f40eb0 100644 (file)
@@ -12,6 +12,7 @@
 #include <common/hashtable/hashtable.h>
 #include <lttng/lttng.h>
 #include <urcu/ref.h>
+#include <algorithm>
 
 #include "snapshot.h"
 
@@ -19,6 +20,19 @@ struct snapshot;
 struct snapshot_output;
 struct ltt_session;
 
+/*
+ * Needed until we use C++14, where std::max is constexpr.
+ *
+ * Use a static_assert so we remember to remove it when we upgrade to a newer
+ * C++.
+ */
+static_assert(__cplusplus == 201103L, "");
+template <typename T>
+constexpr T max_constexpr(T l, T r)
+{
+       return l > r ? l : r;
+}
+
 enum consumer_dst_type {
        CONSUMER_DST_LOCAL,
        CONSUMER_DST_NET,
@@ -61,39 +75,43 @@ struct consumer_socket {
 };
 
 struct consumer_data {
+       consumer_data (lttng_consumer_type type_)
+               : type(type_)
+       {}
+
        enum lttng_consumer_type type;
 
        /* Mutex to control consumerd pid assignation */
-       pthread_mutex_t pid_mutex;
-       pid_t pid;
+       pthread_mutex_t pid_mutex = PTHREAD_MUTEX_INITIALIZER;
+       pid_t pid = 0;
 
-       int err_sock;
+       int err_sock = -1;
        /* These two sockets uses the cmd_unix_sock_path. */
-       int cmd_sock;
+       int cmd_sock = -1;
        /*
         * Write-end of the channel monitoring pipe to be passed to the
         * consumer.
         */
-       int channel_monitor_pipe;
+       int channel_monitor_pipe = -1;
        /*
         * The metadata socket object is handled differently and only created
         * locally in this object thus it's the only reference available in the
         * session daemon. For that reason, a variable for the fd is required and
         * the metadata socket fd points to it.
         */
-       int metadata_fd;
-       struct consumer_socket metadata_sock;
+       int metadata_fd = 0;
+       struct consumer_socket metadata_sock {};
 
        /* consumer error and command Unix socket path */
-       const char *err_unix_sock_path;
-       const char *cmd_unix_sock_path;
+       const char *err_unix_sock_path = nullptr;
+       const char *cmd_unix_sock_path = nullptr;
 
        /*
         * This lock has two purposes. It protects any change to the consumer
         * socket and make sure only one thread uses this object for read/write
         * operations.
         */
-       pthread_mutex_t lock;
+       pthread_mutex_t lock = PTHREAD_MUTEX_INITIALIZER;
 };
 
 /*
@@ -149,7 +167,8 @@ struct consumer_output {
         * Subdirectory path name used for both local and network
         * consumer ("kernel", "ust", or empty).
         */
-       char domain_subdir[max(sizeof(DEFAULT_KERNEL_TRACE_DIR),
+       char domain_subdir[
+               max_constexpr(sizeof(DEFAULT_KERNEL_TRACE_DIR),
                        sizeof(DEFAULT_UST_TRACE_DIR))];
 
        /*
diff --git a/src/bin/lttng-sessiond/context.c b/src/bin/lttng-sessiond/context.c
deleted file mode 100644 (file)
index c8c8924..0000000
+++ /dev/null
@@ -1,435 +0,0 @@
-/*
- * Copyright (C) 2011 David Goulet <david.goulet@polymtl.ca>
- * Copyright (C) 2016 Jérémie Galarneau <jeremie.galarneau@efficios.com>
- *
- * SPDX-License-Identifier: GPL-2.0-only
- *
- */
-
-#define _LGPL_SOURCE
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <unistd.h>
-#include <urcu/list.h>
-
-#include <common/error.h>
-#include <common/sessiond-comm/sessiond-comm.h>
-
-#include "context.h"
-#include "kernel.h"
-#include "ust-app.h"
-#include "trace-ust.h"
-#include "agent.h"
-
-/*
- * Add kernel context to all channel.
- *
- * Assumes the ownership of kctx.
- */
-static int add_kctx_all_channels(struct ltt_kernel_session *ksession,
-               struct ltt_kernel_context *kctx)
-{
-       int ret;
-       struct ltt_kernel_channel *kchan;
-
-       LTTNG_ASSERT(ksession);
-       LTTNG_ASSERT(kctx);
-
-       DBG("Adding kernel context to all channels");
-
-       /* Go over all channels */
-       cds_list_for_each_entry(kchan, &ksession->channel_list.head, list) {
-               struct ltt_kernel_context *kctx_copy;
-
-               kctx_copy = trace_kernel_copy_context(kctx);
-               if (!kctx_copy) {
-                       PERROR("zmalloc ltt_kernel_context");
-                       ret = -LTTNG_ERR_NOMEM;
-                       goto error;
-               }
-
-               /* Ownership of kctx_copy is transferred to the callee. */
-               ret = kernel_add_channel_context(kchan, kctx_copy);
-               kctx_copy = NULL;
-               if (ret != 0) {
-                       goto error;
-               }
-       }
-
-       ret = LTTNG_OK;
-
-error:
-       trace_kernel_destroy_context(kctx);
-       return ret;
-}
-
-/*
- * Add kernel context to a specific channel.
- *
- * Assumes the ownership of kctx.
- */
-static int add_kctx_to_channel(struct ltt_kernel_context *kctx,
-               struct ltt_kernel_channel *kchan)
-{
-       int ret;
-
-       LTTNG_ASSERT(kchan);
-       LTTNG_ASSERT(kctx);
-
-       DBG("Add kernel context to channel '%s'", kchan->channel->name);
-
-       /* Ownership of kctx is transferred to the callee. */
-       ret = kernel_add_channel_context(kchan, kctx);
-       kctx = NULL;
-       if (ret != 0) {
-               goto error;
-       }
-
-       ret = LTTNG_OK;
-
-error:
-       return ret;
-}
-
-/*
- * Add UST context to channel.
- */
-static int add_uctx_to_channel(struct ltt_ust_session *usess,
-               enum lttng_domain_type domain,
-               struct ltt_ust_channel *uchan,
-               const struct lttng_event_context *ctx)
-{
-       int ret;
-       struct ltt_ust_context *uctx = NULL;
-
-       LTTNG_ASSERT(usess);
-       LTTNG_ASSERT(uchan);
-       LTTNG_ASSERT(ctx);
-
-       /* Check if context is duplicate */
-       cds_list_for_each_entry(uctx, &uchan->ctx_list, list) {
-               if (trace_ust_match_context(uctx, ctx)) {
-                       ret = LTTNG_ERR_UST_CONTEXT_EXIST;
-                       goto duplicate;
-               }
-       }
-       uctx = NULL;
-
-       switch (domain) {
-       case LTTNG_DOMAIN_JUL:
-       case LTTNG_DOMAIN_LOG4J:
-       {
-               struct agent *agt;
-
-               if (ctx->ctx != LTTNG_EVENT_CONTEXT_APP_CONTEXT) {
-                       /* Other contexts are not needed by the agent. */
-                       break;
-               }
-               agt = trace_ust_find_agent(usess, domain);
-
-               if (!agt) {
-                       agt = agent_create(domain);
-                       if (!agt) {
-                               ret = -LTTNG_ERR_NOMEM;
-                               goto error;
-                       }
-                       agent_add(agt, usess->agents);
-               }
-               ret = agent_add_context(ctx, agt);
-               if (ret != LTTNG_OK) {
-                       goto error;
-               }
-
-               ret = agent_enable_context(ctx, domain);
-               if (ret != LTTNG_OK) {
-                       goto error;
-               }
-               break;
-       }
-       case LTTNG_DOMAIN_UST:
-               break;
-       default:
-               abort();
-       }
-
-       /* Create ltt UST context */
-       uctx = trace_ust_create_context(ctx);
-       if (uctx == NULL) {
-               ret = LTTNG_ERR_UST_CONTEXT_INVAL;
-               goto error;
-       }
-
-       /* Add ltt UST context node to ltt UST channel */
-       lttng_ht_add_ulong(uchan->ctx, &uctx->node);
-       cds_list_add_tail(&uctx->list, &uchan->ctx_list);
-
-       if (!usess->active) {
-               goto end;
-       }
-
-       ret = ust_app_add_ctx_channel_glb(usess, uchan, uctx);
-       if (ret < 0) {
-               goto error;
-       }
-end:
-       DBG("Context UST %d added to channel %s", uctx->ctx.ctx, uchan->name);
-
-       return 0;
-
-error:
-       free(uctx);
-duplicate:
-       return ret;
-}
-
-/*
- * Add kernel context to tracer.
- */
-int context_kernel_add(struct ltt_kernel_session *ksession,
-               const struct lttng_event_context *ctx, char *channel_name)
-{
-       int ret;
-       struct ltt_kernel_channel *kchan;
-       struct ltt_kernel_context *kctx;
-
-       LTTNG_ASSERT(ksession);
-       LTTNG_ASSERT(ctx);
-       LTTNG_ASSERT(channel_name);
-
-       kctx = trace_kernel_create_context(NULL);
-       if (!kctx) {
-               ret = -LTTNG_ERR_NOMEM;
-               goto error;
-       }
-
-       /* Setup kernel context structure */
-       switch (ctx->ctx) {
-       case LTTNG_EVENT_CONTEXT_PID:
-               kctx->ctx.ctx = LTTNG_KERNEL_ABI_CONTEXT_PID;
-               break;
-       case LTTNG_EVENT_CONTEXT_PROCNAME:
-               kctx->ctx.ctx = LTTNG_KERNEL_ABI_CONTEXT_PROCNAME;
-               break;
-       case LTTNG_EVENT_CONTEXT_PRIO:
-               kctx->ctx.ctx = LTTNG_KERNEL_ABI_CONTEXT_PRIO;
-               break;
-       case LTTNG_EVENT_CONTEXT_NICE:
-               kctx->ctx.ctx = LTTNG_KERNEL_ABI_CONTEXT_NICE;
-               break;
-       case LTTNG_EVENT_CONTEXT_VPID:
-               kctx->ctx.ctx = LTTNG_KERNEL_ABI_CONTEXT_VPID;
-               break;
-       case LTTNG_EVENT_CONTEXT_TID:
-               kctx->ctx.ctx = LTTNG_KERNEL_ABI_CONTEXT_TID;
-               break;
-       case LTTNG_EVENT_CONTEXT_VTID:
-               kctx->ctx.ctx = LTTNG_KERNEL_ABI_CONTEXT_VTID;
-               break;
-       case LTTNG_EVENT_CONTEXT_PPID:
-               kctx->ctx.ctx = LTTNG_KERNEL_ABI_CONTEXT_PPID;
-               break;
-       case LTTNG_EVENT_CONTEXT_VPPID:
-               kctx->ctx.ctx = LTTNG_KERNEL_ABI_CONTEXT_VPPID;
-               break;
-       case LTTNG_EVENT_CONTEXT_HOSTNAME:
-               kctx->ctx.ctx = LTTNG_KERNEL_ABI_CONTEXT_HOSTNAME;
-               break;
-       case LTTNG_EVENT_CONTEXT_PERF_CPU_COUNTER:
-       case LTTNG_EVENT_CONTEXT_PERF_COUNTER:
-               kctx->ctx.ctx = LTTNG_KERNEL_ABI_CONTEXT_PERF_CPU_COUNTER;
-               break;
-       case LTTNG_EVENT_CONTEXT_INTERRUPTIBLE:
-               kctx->ctx.ctx = LTTNG_KERNEL_ABI_CONTEXT_INTERRUPTIBLE;
-               break;
-       case LTTNG_EVENT_CONTEXT_PREEMPTIBLE:
-               kctx->ctx.ctx = LTTNG_KERNEL_ABI_CONTEXT_PREEMPTIBLE;
-               break;
-       case LTTNG_EVENT_CONTEXT_NEED_RESCHEDULE:
-               kctx->ctx.ctx = LTTNG_KERNEL_ABI_CONTEXT_NEED_RESCHEDULE;
-               break;
-       case LTTNG_EVENT_CONTEXT_MIGRATABLE:
-               kctx->ctx.ctx = LTTNG_KERNEL_ABI_CONTEXT_MIGRATABLE;
-               break;
-       case LTTNG_EVENT_CONTEXT_CALLSTACK_KERNEL:
-               kctx->ctx.ctx = LTTNG_KERNEL_ABI_CONTEXT_CALLSTACK_KERNEL;
-               break;
-       case LTTNG_EVENT_CONTEXT_CALLSTACK_USER:
-               kctx->ctx.ctx = LTTNG_KERNEL_ABI_CONTEXT_CALLSTACK_USER;
-               break;
-       case LTTNG_EVENT_CONTEXT_CGROUP_NS:
-               kctx->ctx.ctx = LTTNG_KERNEL_ABI_CONTEXT_CGROUP_NS;
-               break;
-       case LTTNG_EVENT_CONTEXT_IPC_NS:
-               kctx->ctx.ctx = LTTNG_KERNEL_ABI_CONTEXT_IPC_NS;
-               break;
-       case LTTNG_EVENT_CONTEXT_MNT_NS:
-               kctx->ctx.ctx = LTTNG_KERNEL_ABI_CONTEXT_MNT_NS;
-               break;
-       case LTTNG_EVENT_CONTEXT_NET_NS:
-               kctx->ctx.ctx = LTTNG_KERNEL_ABI_CONTEXT_NET_NS;
-               break;
-       case LTTNG_EVENT_CONTEXT_PID_NS:
-               kctx->ctx.ctx = LTTNG_KERNEL_ABI_CONTEXT_PID_NS;
-               break;
-       case LTTNG_EVENT_CONTEXT_TIME_NS:
-               kctx->ctx.ctx = LTTNG_KERNEL_ABI_CONTEXT_TIME_NS;
-               break;
-       case LTTNG_EVENT_CONTEXT_USER_NS:
-               kctx->ctx.ctx = LTTNG_KERNEL_ABI_CONTEXT_USER_NS;
-               break;
-       case LTTNG_EVENT_CONTEXT_UTS_NS:
-               kctx->ctx.ctx = LTTNG_KERNEL_ABI_CONTEXT_UTS_NS;
-               break;
-       case LTTNG_EVENT_CONTEXT_UID:
-               kctx->ctx.ctx = LTTNG_KERNEL_ABI_CONTEXT_UID;
-               break;
-       case LTTNG_EVENT_CONTEXT_EUID:
-               kctx->ctx.ctx = LTTNG_KERNEL_ABI_CONTEXT_EUID;
-               break;
-       case LTTNG_EVENT_CONTEXT_SUID:
-               kctx->ctx.ctx = LTTNG_KERNEL_ABI_CONTEXT_SUID;
-               break;
-       case LTTNG_EVENT_CONTEXT_GID:
-               kctx->ctx.ctx = LTTNG_KERNEL_ABI_CONTEXT_GID;
-               break;
-       case LTTNG_EVENT_CONTEXT_EGID:
-               kctx->ctx.ctx = LTTNG_KERNEL_ABI_CONTEXT_EGID;
-               break;
-       case LTTNG_EVENT_CONTEXT_SGID:
-               kctx->ctx.ctx = LTTNG_KERNEL_ABI_CONTEXT_SGID;
-               break;
-       case LTTNG_EVENT_CONTEXT_VUID:
-               kctx->ctx.ctx = LTTNG_KERNEL_ABI_CONTEXT_VUID;
-               break;
-       case LTTNG_EVENT_CONTEXT_VEUID:
-               kctx->ctx.ctx = LTTNG_KERNEL_ABI_CONTEXT_VEUID;
-               break;
-       case LTTNG_EVENT_CONTEXT_VSUID:
-               kctx->ctx.ctx = LTTNG_KERNEL_ABI_CONTEXT_VSUID;
-               break;
-       case LTTNG_EVENT_CONTEXT_VGID:
-               kctx->ctx.ctx = LTTNG_KERNEL_ABI_CONTEXT_VGID;
-               break;
-       case LTTNG_EVENT_CONTEXT_VEGID:
-               kctx->ctx.ctx = LTTNG_KERNEL_ABI_CONTEXT_VEGID;
-               break;
-       case LTTNG_EVENT_CONTEXT_VSGID:
-               kctx->ctx.ctx = LTTNG_KERNEL_ABI_CONTEXT_VSGID;
-               break;
-       default:
-               ret = LTTNG_ERR_KERN_CONTEXT_FAIL;
-               goto error;
-       }
-
-       kctx->ctx.u.perf_counter.type = ctx->u.perf_counter.type;
-       kctx->ctx.u.perf_counter.config = ctx->u.perf_counter.config;
-       strncpy(kctx->ctx.u.perf_counter.name, ctx->u.perf_counter.name,
-                       LTTNG_SYMBOL_NAME_LEN);
-       kctx->ctx.u.perf_counter.name[LTTNG_SYMBOL_NAME_LEN - 1] = '\0';
-
-       if (*channel_name == '\0') {
-               ret = add_kctx_all_channels(ksession, kctx);
-               /* Ownership of kctx is transferred to the callee. */
-               kctx = NULL;
-               if (ret != LTTNG_OK) {
-                       goto error;
-               }
-       } else {
-               /* Get kernel channel */
-               kchan = trace_kernel_get_channel_by_name(channel_name, ksession);
-               if (kchan == NULL) {
-                       ret = LTTNG_ERR_KERN_CHAN_NOT_FOUND;
-                       goto error;
-               }
-
-               ret = add_kctx_to_channel(kctx, kchan);
-               /* Ownership of kctx is transferred to the callee. */
-               kctx = NULL;
-               if (ret != LTTNG_OK) {
-                       goto error;
-               }
-       }
-
-       ret = LTTNG_OK;
-
-error:
-       if (kctx) {
-               trace_kernel_destroy_context(kctx);
-       }
-       return ret;
-}
-
-/*
- * Add UST context to tracer.
- */
-int context_ust_add(struct ltt_ust_session *usess,
-               enum lttng_domain_type domain,
-               const struct lttng_event_context *ctx,
-               char *channel_name)
-{
-       int ret = LTTNG_OK;
-       struct lttng_ht_iter iter;
-       struct lttng_ht *chan_ht;
-       struct ltt_ust_channel *uchan = NULL;
-
-       LTTNG_ASSERT(usess);
-       LTTNG_ASSERT(ctx);
-       LTTNG_ASSERT(channel_name);
-
-       rcu_read_lock();
-
-       chan_ht = usess->domain_global.channels;
-
-       /* Get UST channel if defined */
-       if (channel_name[0] != '\0') {
-               uchan = trace_ust_find_channel_by_name(chan_ht, channel_name);
-               if (uchan == NULL) {
-                       ret = LTTNG_ERR_UST_CHAN_NOT_FOUND;
-                       goto error;
-               }
-       }
-
-       if (uchan) {
-               /* Add ctx to channel */
-               ret = add_uctx_to_channel(usess, domain, uchan, ctx);
-       } else {
-               rcu_read_lock();
-               /* Add ctx all events, all channels */
-               cds_lfht_for_each_entry(chan_ht->ht, &iter.iter, uchan, node.node) {
-                       ret = add_uctx_to_channel(usess, domain, uchan, ctx);
-                       if (ret) {
-                               ERR("Failed to add context to channel %s",
-                                               uchan->name);
-                               continue;
-                       }
-               }
-               rcu_read_unlock();
-       }
-
-       switch (ret) {
-       case LTTNG_ERR_UST_CONTEXT_EXIST:
-               break;
-       case -ENOMEM:
-       case -LTTNG_ERR_NOMEM:
-               ret = LTTNG_ERR_FATAL;
-               break;
-       case -EINVAL:
-               ret = LTTNG_ERR_UST_CONTEXT_INVAL;
-               break;
-       case -ENOSYS:
-               ret = LTTNG_ERR_UNKNOWN_DOMAIN;
-               break;
-       default:
-               if (ret != 0 && ret != LTTNG_OK) {
-                       ret = ret > 0 ? ret : LTTNG_ERR_UNK;
-               } else {
-                       ret = LTTNG_OK;
-               }
-               break;
-       }
-
-error:
-       rcu_read_unlock();
-       return ret;
-}
diff --git a/src/bin/lttng-sessiond/context.cpp b/src/bin/lttng-sessiond/context.cpp
new file mode 100644 (file)
index 0000000..c8c8924
--- /dev/null
@@ -0,0 +1,435 @@
+/*
+ * Copyright (C) 2011 David Goulet <david.goulet@polymtl.ca>
+ * Copyright (C) 2016 Jérémie Galarneau <jeremie.galarneau@efficios.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0-only
+ *
+ */
+
+#define _LGPL_SOURCE
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <urcu/list.h>
+
+#include <common/error.h>
+#include <common/sessiond-comm/sessiond-comm.h>
+
+#include "context.h"
+#include "kernel.h"
+#include "ust-app.h"
+#include "trace-ust.h"
+#include "agent.h"
+
+/*
+ * Add kernel context to all channel.
+ *
+ * Assumes the ownership of kctx.
+ */
+static int add_kctx_all_channels(struct ltt_kernel_session *ksession,
+               struct ltt_kernel_context *kctx)
+{
+       int ret;
+       struct ltt_kernel_channel *kchan;
+
+       LTTNG_ASSERT(ksession);
+       LTTNG_ASSERT(kctx);
+
+       DBG("Adding kernel context to all channels");
+
+       /* Go over all channels */
+       cds_list_for_each_entry(kchan, &ksession->channel_list.head, list) {
+               struct ltt_kernel_context *kctx_copy;
+
+               kctx_copy = trace_kernel_copy_context(kctx);
+               if (!kctx_copy) {
+                       PERROR("zmalloc ltt_kernel_context");
+                       ret = -LTTNG_ERR_NOMEM;
+                       goto error;
+               }
+
+               /* Ownership of kctx_copy is transferred to the callee. */
+               ret = kernel_add_channel_context(kchan, kctx_copy);
+               kctx_copy = NULL;
+               if (ret != 0) {
+                       goto error;
+               }
+       }
+
+       ret = LTTNG_OK;
+
+error:
+       trace_kernel_destroy_context(kctx);
+       return ret;
+}
+
+/*
+ * Add kernel context to a specific channel.
+ *
+ * Assumes the ownership of kctx.
+ */
+static int add_kctx_to_channel(struct ltt_kernel_context *kctx,
+               struct ltt_kernel_channel *kchan)
+{
+       int ret;
+
+       LTTNG_ASSERT(kchan);
+       LTTNG_ASSERT(kctx);
+
+       DBG("Add kernel context to channel '%s'", kchan->channel->name);
+
+       /* Ownership of kctx is transferred to the callee. */
+       ret = kernel_add_channel_context(kchan, kctx);
+       kctx = NULL;
+       if (ret != 0) {
+               goto error;
+       }
+
+       ret = LTTNG_OK;
+
+error:
+       return ret;
+}
+
+/*
+ * Add UST context to channel.
+ */
+static int add_uctx_to_channel(struct ltt_ust_session *usess,
+               enum lttng_domain_type domain,
+               struct ltt_ust_channel *uchan,
+               const struct lttng_event_context *ctx)
+{
+       int ret;
+       struct ltt_ust_context *uctx = NULL;
+
+       LTTNG_ASSERT(usess);
+       LTTNG_ASSERT(uchan);
+       LTTNG_ASSERT(ctx);
+
+       /* Check if context is duplicate */
+       cds_list_for_each_entry(uctx, &uchan->ctx_list, list) {
+               if (trace_ust_match_context(uctx, ctx)) {
+                       ret = LTTNG_ERR_UST_CONTEXT_EXIST;
+                       goto duplicate;
+               }
+       }
+       uctx = NULL;
+
+       switch (domain) {
+       case LTTNG_DOMAIN_JUL:
+       case LTTNG_DOMAIN_LOG4J:
+       {
+               struct agent *agt;
+
+               if (ctx->ctx != LTTNG_EVENT_CONTEXT_APP_CONTEXT) {
+                       /* Other contexts are not needed by the agent. */
+                       break;
+               }
+               agt = trace_ust_find_agent(usess, domain);
+
+               if (!agt) {
+                       agt = agent_create(domain);
+                       if (!agt) {
+                               ret = -LTTNG_ERR_NOMEM;
+                               goto error;
+                       }
+                       agent_add(agt, usess->agents);
+               }
+               ret = agent_add_context(ctx, agt);
+               if (ret != LTTNG_OK) {
+                       goto error;
+               }
+
+               ret = agent_enable_context(ctx, domain);
+               if (ret != LTTNG_OK) {
+                       goto error;
+               }
+               break;
+       }
+       case LTTNG_DOMAIN_UST:
+               break;
+       default:
+               abort();
+       }
+
+       /* Create ltt UST context */
+       uctx = trace_ust_create_context(ctx);
+       if (uctx == NULL) {
+               ret = LTTNG_ERR_UST_CONTEXT_INVAL;
+               goto error;
+       }
+
+       /* Add ltt UST context node to ltt UST channel */
+       lttng_ht_add_ulong(uchan->ctx, &uctx->node);
+       cds_list_add_tail(&uctx->list, &uchan->ctx_list);
+
+       if (!usess->active) {
+               goto end;
+       }
+
+       ret = ust_app_add_ctx_channel_glb(usess, uchan, uctx);
+       if (ret < 0) {
+               goto error;
+       }
+end:
+       DBG("Context UST %d added to channel %s", uctx->ctx.ctx, uchan->name);
+
+       return 0;
+
+error:
+       free(uctx);
+duplicate:
+       return ret;
+}
+
+/*
+ * Add kernel context to tracer.
+ */
+int context_kernel_add(struct ltt_kernel_session *ksession,
+               const struct lttng_event_context *ctx, char *channel_name)
+{
+       int ret;
+       struct ltt_kernel_channel *kchan;
+       struct ltt_kernel_context *kctx;
+
+       LTTNG_ASSERT(ksession);
+       LTTNG_ASSERT(ctx);
+       LTTNG_ASSERT(channel_name);
+
+       kctx = trace_kernel_create_context(NULL);
+       if (!kctx) {
+               ret = -LTTNG_ERR_NOMEM;
+               goto error;
+       }
+
+       /* Setup kernel context structure */
+       switch (ctx->ctx) {
+       case LTTNG_EVENT_CONTEXT_PID:
+               kctx->ctx.ctx = LTTNG_KERNEL_ABI_CONTEXT_PID;
+               break;
+       case LTTNG_EVENT_CONTEXT_PROCNAME:
+               kctx->ctx.ctx = LTTNG_KERNEL_ABI_CONTEXT_PROCNAME;
+               break;
+       case LTTNG_EVENT_CONTEXT_PRIO:
+               kctx->ctx.ctx = LTTNG_KERNEL_ABI_CONTEXT_PRIO;
+               break;
+       case LTTNG_EVENT_CONTEXT_NICE:
+               kctx->ctx.ctx = LTTNG_KERNEL_ABI_CONTEXT_NICE;
+               break;
+       case LTTNG_EVENT_CONTEXT_VPID:
+               kctx->ctx.ctx = LTTNG_KERNEL_ABI_CONTEXT_VPID;
+               break;
+       case LTTNG_EVENT_CONTEXT_TID:
+               kctx->ctx.ctx = LTTNG_KERNEL_ABI_CONTEXT_TID;
+               break;
+       case LTTNG_EVENT_CONTEXT_VTID:
+               kctx->ctx.ctx = LTTNG_KERNEL_ABI_CONTEXT_VTID;
+               break;
+       case LTTNG_EVENT_CONTEXT_PPID:
+               kctx->ctx.ctx = LTTNG_KERNEL_ABI_CONTEXT_PPID;
+               break;
+       case LTTNG_EVENT_CONTEXT_VPPID:
+               kctx->ctx.ctx = LTTNG_KERNEL_ABI_CONTEXT_VPPID;
+               break;
+       case LTTNG_EVENT_CONTEXT_HOSTNAME:
+               kctx->ctx.ctx = LTTNG_KERNEL_ABI_CONTEXT_HOSTNAME;
+               break;
+       case LTTNG_EVENT_CONTEXT_PERF_CPU_COUNTER:
+       case LTTNG_EVENT_CONTEXT_PERF_COUNTER:
+               kctx->ctx.ctx = LTTNG_KERNEL_ABI_CONTEXT_PERF_CPU_COUNTER;
+               break;
+       case LTTNG_EVENT_CONTEXT_INTERRUPTIBLE:
+               kctx->ctx.ctx = LTTNG_KERNEL_ABI_CONTEXT_INTERRUPTIBLE;
+               break;
+       case LTTNG_EVENT_CONTEXT_PREEMPTIBLE:
+               kctx->ctx.ctx = LTTNG_KERNEL_ABI_CONTEXT_PREEMPTIBLE;
+               break;
+       case LTTNG_EVENT_CONTEXT_NEED_RESCHEDULE:
+               kctx->ctx.ctx = LTTNG_KERNEL_ABI_CONTEXT_NEED_RESCHEDULE;
+               break;
+       case LTTNG_EVENT_CONTEXT_MIGRATABLE:
+               kctx->ctx.ctx = LTTNG_KERNEL_ABI_CONTEXT_MIGRATABLE;
+               break;
+       case LTTNG_EVENT_CONTEXT_CALLSTACK_KERNEL:
+               kctx->ctx.ctx = LTTNG_KERNEL_ABI_CONTEXT_CALLSTACK_KERNEL;
+               break;
+       case LTTNG_EVENT_CONTEXT_CALLSTACK_USER:
+               kctx->ctx.ctx = LTTNG_KERNEL_ABI_CONTEXT_CALLSTACK_USER;
+               break;
+       case LTTNG_EVENT_CONTEXT_CGROUP_NS:
+               kctx->ctx.ctx = LTTNG_KERNEL_ABI_CONTEXT_CGROUP_NS;
+               break;
+       case LTTNG_EVENT_CONTEXT_IPC_NS:
+               kctx->ctx.ctx = LTTNG_KERNEL_ABI_CONTEXT_IPC_NS;
+               break;
+       case LTTNG_EVENT_CONTEXT_MNT_NS:
+               kctx->ctx.ctx = LTTNG_KERNEL_ABI_CONTEXT_MNT_NS;
+               break;
+       case LTTNG_EVENT_CONTEXT_NET_NS:
+               kctx->ctx.ctx = LTTNG_KERNEL_ABI_CONTEXT_NET_NS;
+               break;
+       case LTTNG_EVENT_CONTEXT_PID_NS:
+               kctx->ctx.ctx = LTTNG_KERNEL_ABI_CONTEXT_PID_NS;
+               break;
+       case LTTNG_EVENT_CONTEXT_TIME_NS:
+               kctx->ctx.ctx = LTTNG_KERNEL_ABI_CONTEXT_TIME_NS;
+               break;
+       case LTTNG_EVENT_CONTEXT_USER_NS:
+               kctx->ctx.ctx = LTTNG_KERNEL_ABI_CONTEXT_USER_NS;
+               break;
+       case LTTNG_EVENT_CONTEXT_UTS_NS:
+               kctx->ctx.ctx = LTTNG_KERNEL_ABI_CONTEXT_UTS_NS;
+               break;
+       case LTTNG_EVENT_CONTEXT_UID:
+               kctx->ctx.ctx = LTTNG_KERNEL_ABI_CONTEXT_UID;
+               break;
+       case LTTNG_EVENT_CONTEXT_EUID:
+               kctx->ctx.ctx = LTTNG_KERNEL_ABI_CONTEXT_EUID;
+               break;
+       case LTTNG_EVENT_CONTEXT_SUID:
+               kctx->ctx.ctx = LTTNG_KERNEL_ABI_CONTEXT_SUID;
+               break;
+       case LTTNG_EVENT_CONTEXT_GID:
+               kctx->ctx.ctx = LTTNG_KERNEL_ABI_CONTEXT_GID;
+               break;
+       case LTTNG_EVENT_CONTEXT_EGID:
+               kctx->ctx.ctx = LTTNG_KERNEL_ABI_CONTEXT_EGID;
+               break;
+       case LTTNG_EVENT_CONTEXT_SGID:
+               kctx->ctx.ctx = LTTNG_KERNEL_ABI_CONTEXT_SGID;
+               break;
+       case LTTNG_EVENT_CONTEXT_VUID:
+               kctx->ctx.ctx = LTTNG_KERNEL_ABI_CONTEXT_VUID;
+               break;
+       case LTTNG_EVENT_CONTEXT_VEUID:
+               kctx->ctx.ctx = LTTNG_KERNEL_ABI_CONTEXT_VEUID;
+               break;
+       case LTTNG_EVENT_CONTEXT_VSUID:
+               kctx->ctx.ctx = LTTNG_KERNEL_ABI_CONTEXT_VSUID;
+               break;
+       case LTTNG_EVENT_CONTEXT_VGID:
+               kctx->ctx.ctx = LTTNG_KERNEL_ABI_CONTEXT_VGID;
+               break;
+       case LTTNG_EVENT_CONTEXT_VEGID:
+               kctx->ctx.ctx = LTTNG_KERNEL_ABI_CONTEXT_VEGID;
+               break;
+       case LTTNG_EVENT_CONTEXT_VSGID:
+               kctx->ctx.ctx = LTTNG_KERNEL_ABI_CONTEXT_VSGID;
+               break;
+       default:
+               ret = LTTNG_ERR_KERN_CONTEXT_FAIL;
+               goto error;
+       }
+
+       kctx->ctx.u.perf_counter.type = ctx->u.perf_counter.type;
+       kctx->ctx.u.perf_counter.config = ctx->u.perf_counter.config;
+       strncpy(kctx->ctx.u.perf_counter.name, ctx->u.perf_counter.name,
+                       LTTNG_SYMBOL_NAME_LEN);
+       kctx->ctx.u.perf_counter.name[LTTNG_SYMBOL_NAME_LEN - 1] = '\0';
+
+       if (*channel_name == '\0') {
+               ret = add_kctx_all_channels(ksession, kctx);
+               /* Ownership of kctx is transferred to the callee. */
+               kctx = NULL;
+               if (ret != LTTNG_OK) {
+                       goto error;
+               }
+       } else {
+               /* Get kernel channel */
+               kchan = trace_kernel_get_channel_by_name(channel_name, ksession);
+               if (kchan == NULL) {
+                       ret = LTTNG_ERR_KERN_CHAN_NOT_FOUND;
+                       goto error;
+               }
+
+               ret = add_kctx_to_channel(kctx, kchan);
+               /* Ownership of kctx is transferred to the callee. */
+               kctx = NULL;
+               if (ret != LTTNG_OK) {
+                       goto error;
+               }
+       }
+
+       ret = LTTNG_OK;
+
+error:
+       if (kctx) {
+               trace_kernel_destroy_context(kctx);
+       }
+       return ret;
+}
+
+/*
+ * Add UST context to tracer.
+ */
+int context_ust_add(struct ltt_ust_session *usess,
+               enum lttng_domain_type domain,
+               const struct lttng_event_context *ctx,
+               char *channel_name)
+{
+       int ret = LTTNG_OK;
+       struct lttng_ht_iter iter;
+       struct lttng_ht *chan_ht;
+       struct ltt_ust_channel *uchan = NULL;
+
+       LTTNG_ASSERT(usess);
+       LTTNG_ASSERT(ctx);
+       LTTNG_ASSERT(channel_name);
+
+       rcu_read_lock();
+
+       chan_ht = usess->domain_global.channels;
+
+       /* Get UST channel if defined */
+       if (channel_name[0] != '\0') {
+               uchan = trace_ust_find_channel_by_name(chan_ht, channel_name);
+               if (uchan == NULL) {
+                       ret = LTTNG_ERR_UST_CHAN_NOT_FOUND;
+                       goto error;
+               }
+       }
+
+       if (uchan) {
+               /* Add ctx to channel */
+               ret = add_uctx_to_channel(usess, domain, uchan, ctx);
+       } else {
+               rcu_read_lock();
+               /* Add ctx all events, all channels */
+               cds_lfht_for_each_entry(chan_ht->ht, &iter.iter, uchan, node.node) {
+                       ret = add_uctx_to_channel(usess, domain, uchan, ctx);
+                       if (ret) {
+                               ERR("Failed to add context to channel %s",
+                                               uchan->name);
+                               continue;
+                       }
+               }
+               rcu_read_unlock();
+       }
+
+       switch (ret) {
+       case LTTNG_ERR_UST_CONTEXT_EXIST:
+               break;
+       case -ENOMEM:
+       case -LTTNG_ERR_NOMEM:
+               ret = LTTNG_ERR_FATAL;
+               break;
+       case -EINVAL:
+               ret = LTTNG_ERR_UST_CONTEXT_INVAL;
+               break;
+       case -ENOSYS:
+               ret = LTTNG_ERR_UNKNOWN_DOMAIN;
+               break;
+       default:
+               if (ret != 0 && ret != LTTNG_OK) {
+                       ret = ret > 0 ? ret : LTTNG_ERR_UNK;
+               } else {
+                       ret = LTTNG_OK;
+               }
+               break;
+       }
+
+error:
+       rcu_read_unlock();
+       return ret;
+}
diff --git a/src/bin/lttng-sessiond/dispatch.c b/src/bin/lttng-sessiond/dispatch.c
deleted file mode 100644 (file)
index a767ccb..0000000
+++ /dev/null
@@ -1,533 +0,0 @@
-/*
- * Copyright (C) 2011 David Goulet <david.goulet@polymtl.ca>
- * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- * Copyright (C) 2013 Jérémie Galarneau <jeremie.galarneau@efficios.com>
- *
- * SPDX-License-Identifier: GPL-2.0-only
- *
- */
-
-#include <stddef.h>
-#include <stdlib.h>
-#include <urcu.h>
-#include <common/futex.h>
-#include <common/macros.h>
-
-#include "dispatch.h"
-#include "ust-app.h"
-#include "testpoint.h"
-#include "fd-limit.h"
-#include "health-sessiond.h"
-#include "lttng-sessiond.h"
-#include "thread.h"
-
-struct thread_notifiers {
-       struct ust_cmd_queue *ust_cmd_queue;
-       int apps_cmd_pipe_write_fd;
-       int apps_cmd_notify_pipe_write_fd;
-       int dispatch_thread_exit;
-};
-
-/*
- * For each tracing session, update newly registered apps. The session list
- * lock MUST be acquired before calling this.
- */
-static void update_ust_app(int app_sock)
-{
-       struct ltt_session *sess, *stmp;
-       const struct ltt_session_list *session_list = session_get_list();
-       struct ust_app *app;
-
-       /* Consumer is in an ERROR state. Stop any application update. */
-       if (uatomic_read(&the_ust_consumerd_state) == CONSUMER_ERROR) {
-               /* Stop the update process since the consumer is dead. */
-               return;
-       }
-
-       rcu_read_lock();
-       LTTNG_ASSERT(app_sock >= 0);
-       app = ust_app_find_by_sock(app_sock);
-       if (app == NULL) {
-               /*
-                * Application can be unregistered before so
-                * this is possible hence simply stopping the
-                * update.
-                */
-               DBG3("UST app update failed to find app sock %d",
-                       app_sock);
-               goto unlock_rcu;
-       }
-
-       /* Update all event notifiers for the app. */
-       ust_app_global_update_event_notifier_rules(app);
-
-       /* For all tracing session(s) */
-       cds_list_for_each_entry_safe(sess, stmp, &session_list->head, list) {
-               if (!session_get(sess)) {
-                       continue;
-               }
-               session_lock(sess);
-               if (!sess->active || !sess->ust_session ||
-                               !sess->ust_session->active) {
-                       goto unlock_session;
-               }
-
-               ust_app_global_update(sess->ust_session, app);
-       unlock_session:
-               session_unlock(sess);
-               session_put(sess);
-       }
-
-unlock_rcu:
-       rcu_read_unlock();
-}
-
-/*
- * Sanitize the wait queue of the dispatch registration thread meaning removing
- * invalid nodes from it. This is to avoid memory leaks for the case the UST
- * notify socket is never received.
- */
-static void sanitize_wait_queue(struct ust_reg_wait_queue *wait_queue)
-{
-       int ret, nb_fd = 0, i;
-       unsigned int fd_added = 0;
-       struct lttng_poll_event events;
-       struct ust_reg_wait_node *wait_node = NULL, *tmp_wait_node;
-
-       LTTNG_ASSERT(wait_queue);
-
-       lttng_poll_init(&events);
-
-       /* Just skip everything for an empty queue. */
-       if (!wait_queue->count) {
-               goto end;
-       }
-
-       ret = lttng_poll_create(&events, wait_queue->count, LTTNG_CLOEXEC);
-       if (ret < 0) {
-               goto error_create;
-       }
-
-       cds_list_for_each_entry_safe(wait_node, tmp_wait_node,
-                       &wait_queue->head, head) {
-               LTTNG_ASSERT(wait_node->app);
-               ret = lttng_poll_add(&events, wait_node->app->sock,
-                               LPOLLHUP | LPOLLERR);
-               if (ret < 0) {
-                       goto error;
-               }
-
-               fd_added = 1;
-       }
-
-       if (!fd_added) {
-               goto end;
-       }
-
-       /*
-        * Poll but don't block so we can quickly identify the faulty events and
-        * clean them afterwards from the wait queue.
-        */
-       ret = lttng_poll_wait(&events, 0);
-       if (ret < 0) {
-               goto error;
-       }
-       nb_fd = ret;
-
-       for (i = 0; i < nb_fd; i++) {
-               /* Get faulty FD. */
-               uint32_t revents = LTTNG_POLL_GETEV(&events, i);
-               int pollfd = LTTNG_POLL_GETFD(&events, i);
-
-               cds_list_for_each_entry_safe(wait_node, tmp_wait_node,
-                               &wait_queue->head, head) {
-                       if (pollfd == wait_node->app->sock &&
-                                       (revents & (LPOLLHUP | LPOLLERR))) {
-                               cds_list_del(&wait_node->head);
-                               wait_queue->count--;
-                               ust_app_destroy(wait_node->app);
-                               free(wait_node);
-                               /*
-                                * Silence warning of use-after-free in
-                                * cds_list_for_each_entry_safe which uses
-                                * __typeof__(*wait_node).
-                                */
-                               wait_node = NULL;
-                               break;
-                       } else {
-                               ERR("Unexpected poll events %u for sock %d", revents, pollfd);
-                               goto error;
-                       }
-               }
-       }
-
-       if (nb_fd > 0) {
-               DBG("Wait queue sanitized, %d node were cleaned up", nb_fd);
-       }
-
-end:
-       lttng_poll_clean(&events);
-       return;
-
-error:
-       lttng_poll_clean(&events);
-error_create:
-       ERR("Unable to sanitize wait queue");
-       return;
-}
-
-/*
- * Send a socket to a thread This is called from the dispatch UST registration
- * thread once all sockets are set for the application.
- *
- * The sock value can be invalid, we don't really care, the thread will handle
- * it and make the necessary cleanup if so.
- *
- * On success, return 0 else a negative value being the errno message of the
- * write().
- */
-static int send_socket_to_thread(int fd, int sock)
-{
-       ssize_t ret;
-
-       /*
-        * It's possible that the FD is set as invalid with -1 concurrently just
-        * before calling this function being a shutdown state of the thread.
-        */
-       if (fd < 0) {
-               ret = -EBADF;
-               goto error;
-       }
-
-       ret = lttng_write(fd, &sock, sizeof(sock));
-       if (ret < sizeof(sock)) {
-               PERROR("write apps pipe %d", fd);
-               if (ret < 0) {
-                       ret = -errno;
-               }
-               goto error;
-       }
-
-       /* All good. Don't send back the write positive ret value. */
-       ret = 0;
-error:
-       return (int) ret;
-}
-
-static void cleanup_ust_dispatch_thread(void *data)
-{
-       free(data);
-}
-
-/*
- * Dispatch request from the registration threads to the application
- * communication thread.
- */
-static void *thread_dispatch_ust_registration(void *data)
-{
-       int ret, err = -1;
-       struct cds_wfcq_node *node;
-       struct ust_command *ust_cmd = NULL;
-       struct ust_reg_wait_node *wait_node = NULL, *tmp_wait_node;
-       struct ust_reg_wait_queue wait_queue = {
-               .count = 0,
-       };
-       struct thread_notifiers *notifiers = data;
-
-       rcu_register_thread();
-
-       health_register(the_health_sessiond,
-                       HEALTH_SESSIOND_TYPE_APP_REG_DISPATCH);
-
-       if (testpoint(sessiond_thread_app_reg_dispatch)) {
-               goto error_testpoint;
-       }
-
-       health_code_update();
-
-       CDS_INIT_LIST_HEAD(&wait_queue.head);
-
-       DBG("[thread] Dispatch UST command started");
-
-       for (;;) {
-               health_code_update();
-
-               /* Atomically prepare the queue futex */
-               futex_nto1_prepare(&notifiers->ust_cmd_queue->futex);
-
-               if (CMM_LOAD_SHARED(notifiers->dispatch_thread_exit)) {
-                       break;
-               }
-
-               do {
-                       struct ust_app *app = NULL;
-                       ust_cmd = NULL;
-
-                       /*
-                        * Make sure we don't have node(s) that have hung up before receiving
-                        * the notify socket. This is to clean the list in order to avoid
-                        * memory leaks from notify socket that are never seen.
-                        */
-                       sanitize_wait_queue(&wait_queue);
-
-                       health_code_update();
-                       /* Dequeue command for registration */
-                       node = cds_wfcq_dequeue_blocking(
-                                       &notifiers->ust_cmd_queue->head,
-                                       &notifiers->ust_cmd_queue->tail);
-                       if (node == NULL) {
-                               DBG("Woken up but nothing in the UST command queue");
-                               /* Continue thread execution */
-                               break;
-                       }
-
-                       ust_cmd = caa_container_of(node, struct ust_command, node);
-
-                       DBG("Dispatching UST registration pid:%d ppid:%d uid:%d"
-                                       " gid:%d sock:%d name:%s (version %d.%d)",
-                                       ust_cmd->reg_msg.pid, ust_cmd->reg_msg.ppid,
-                                       ust_cmd->reg_msg.uid, ust_cmd->reg_msg.gid,
-                                       ust_cmd->sock, ust_cmd->reg_msg.name,
-                                       ust_cmd->reg_msg.major, ust_cmd->reg_msg.minor);
-
-                       if (ust_cmd->reg_msg.type == LTTNG_UST_CTL_SOCKET_CMD) {
-                               wait_node = zmalloc(sizeof(*wait_node));
-                               if (!wait_node) {
-                                       PERROR("zmalloc wait_node dispatch");
-                                       ret = close(ust_cmd->sock);
-                                       if (ret < 0) {
-                                               PERROR("close ust sock dispatch %d", ust_cmd->sock);
-                                       }
-                                       lttng_fd_put(LTTNG_FD_APPS, 1);
-                                       free(ust_cmd);
-                                       ust_cmd = NULL;
-                                       goto error;
-                               }
-                               CDS_INIT_LIST_HEAD(&wait_node->head);
-
-                               /* Create application object if socket is CMD. */
-                               wait_node->app = ust_app_create(&ust_cmd->reg_msg,
-                                               ust_cmd->sock);
-                               if (!wait_node->app) {
-                                       ret = close(ust_cmd->sock);
-                                       if (ret < 0) {
-                                               PERROR("close ust sock dispatch %d", ust_cmd->sock);
-                                       }
-                                       lttng_fd_put(LTTNG_FD_APPS, 1);
-                                       free(wait_node);
-                                       wait_node = NULL;
-                                       free(ust_cmd);
-                                       ust_cmd = NULL;
-                                       continue;
-                               }
-                               /*
-                                * Add application to the wait queue so we can set the notify
-                                * socket before putting this object in the global ht.
-                                */
-                               cds_list_add(&wait_node->head, &wait_queue.head);
-                               wait_queue.count++;
-
-                               free(ust_cmd);
-                               ust_cmd = NULL;
-                               /*
-                                * We have to continue here since we don't have the notify
-                                * socket and the application MUST be added to the hash table
-                                * only at that moment.
-                                */
-                               continue;
-                       } else {
-                               /*
-                                * Look for the application in the local wait queue and set the
-                                * notify socket if found.
-                                */
-                               cds_list_for_each_entry_safe(wait_node, tmp_wait_node,
-                                               &wait_queue.head, head) {
-                                       health_code_update();
-                                       if (wait_node->app->pid == ust_cmd->reg_msg.pid) {
-                                               wait_node->app->notify_sock = ust_cmd->sock;
-                                               cds_list_del(&wait_node->head);
-                                               wait_queue.count--;
-                                               app = wait_node->app;
-                                               free(wait_node);
-                                               wait_node = NULL;
-                                               DBG3("UST app notify socket %d is set", ust_cmd->sock);
-                                               break;
-                                       }
-                               }
-
-                               /*
-                                * With no application at this stage the received socket is
-                                * basically useless so close it before we free the cmd data
-                                * structure for good.
-                                */
-                               if (!app) {
-                                       ret = close(ust_cmd->sock);
-                                       if (ret < 0) {
-                                               PERROR("close ust sock dispatch %d", ust_cmd->sock);
-                                       }
-                                       lttng_fd_put(LTTNG_FD_APPS, 1);
-                               }
-                               free(ust_cmd);
-                               ust_cmd = NULL;
-                       }
-
-                       if (app) {
-                               /*
-                                * @session_lock_list
-                                *
-                                * Lock the global session list so from the register up to the
-                                * registration done message, no thread can see the application
-                                * and change its state.
-                                */
-                               session_lock_list();
-                               rcu_read_lock();
-
-                               /*
-                                * Add application to the global hash table. This needs to be
-                                * done before the update to the UST registry can locate the
-                                * application.
-                                */
-                               ust_app_add(app);
-
-                               /* Set app version. This call will print an error if needed. */
-                               (void) ust_app_version(app);
-
-                               (void) ust_app_setup_event_notifier_group(app);
-
-                               /* Send notify socket through the notify pipe. */
-                               ret = send_socket_to_thread(
-                                               notifiers->apps_cmd_notify_pipe_write_fd,
-                                               app->notify_sock);
-                               if (ret < 0) {
-                                       rcu_read_unlock();
-                                       session_unlock_list();
-                                       /*
-                                        * No notify thread, stop the UST tracing. However, this is
-                                        * not an internal error of the this thread thus setting
-                                        * the health error code to a normal exit.
-                                        */
-                                       err = 0;
-                                       goto error;
-                               }
-
-                               /*
-                                * Update newly registered application with the tracing
-                                * registry info already enabled information.
-                                */
-                               update_ust_app(app->sock);
-
-                               /*
-                                * Don't care about return value. Let the manage apps threads
-                                * handle app unregistration upon socket close.
-                                */
-                               (void) ust_app_register_done(app);
-
-                               /*
-                                * Even if the application socket has been closed, send the app
-                                * to the thread and unregistration will take place at that
-                                * place.
-                                */
-                               ret = send_socket_to_thread(
-                                               notifiers->apps_cmd_pipe_write_fd,
-                                               app->sock);
-                               if (ret < 0) {
-                                       rcu_read_unlock();
-                                       session_unlock_list();
-                                       /*
-                                        * No apps. thread, stop the UST tracing. However, this is
-                                        * not an internal error of the this thread thus setting
-                                        * the health error code to a normal exit.
-                                        */
-                                       err = 0;
-                                       goto error;
-                               }
-
-                               rcu_read_unlock();
-                               session_unlock_list();
-                       }
-               } while (node != NULL);
-
-               health_poll_entry();
-               /* Futex wait on queue. Blocking call on futex() */
-               futex_nto1_wait(&notifiers->ust_cmd_queue->futex);
-               health_poll_exit();
-       }
-       /* Normal exit, no error */
-       err = 0;
-
-error:
-       /* Clean up wait queue. */
-       cds_list_for_each_entry_safe(wait_node, tmp_wait_node,
-                       &wait_queue.head, head) {
-               cds_list_del(&wait_node->head);
-               wait_queue.count--;
-               free(wait_node);
-       }
-
-       /* Empty command queue. */
-       for (;;) {
-               /* Dequeue command for registration */
-               node = cds_wfcq_dequeue_blocking(
-                               &notifiers->ust_cmd_queue->head,
-                               &notifiers->ust_cmd_queue->tail);
-               if (node == NULL) {
-                       break;
-               }
-               ust_cmd = caa_container_of(node, struct ust_command, node);
-               ret = close(ust_cmd->sock);
-               if (ret < 0) {
-                       PERROR("close ust sock exit dispatch %d", ust_cmd->sock);
-               }
-               lttng_fd_put(LTTNG_FD_APPS, 1);
-               free(ust_cmd);
-       }
-
-error_testpoint:
-       DBG("Dispatch thread dying");
-       if (err) {
-               health_error();
-               ERR("Health error occurred in %s", __func__);
-       }
-       health_unregister(the_health_sessiond);
-       rcu_unregister_thread();
-       return NULL;
-}
-
-static bool shutdown_ust_dispatch_thread(void *data)
-{
-       struct thread_notifiers *notifiers = data;
-
-       CMM_STORE_SHARED(notifiers->dispatch_thread_exit, 1);
-       futex_nto1_wake(&notifiers->ust_cmd_queue->futex);
-       return true;
-}
-
-bool launch_ust_dispatch_thread(struct ust_cmd_queue *cmd_queue,
-               int apps_cmd_pipe_write_fd,
-               int apps_cmd_notify_pipe_write_fd)
-{
-       struct lttng_thread *thread;
-       struct thread_notifiers *notifiers;
-
-       notifiers = zmalloc(sizeof(*notifiers));
-       if (!notifiers) {
-               goto error;
-       }
-       notifiers->ust_cmd_queue = cmd_queue;
-       notifiers->apps_cmd_pipe_write_fd = apps_cmd_pipe_write_fd;
-       notifiers->apps_cmd_notify_pipe_write_fd = apps_cmd_notify_pipe_write_fd;
-
-       thread = lttng_thread_create("UST registration dispatch",
-                       thread_dispatch_ust_registration,
-                       shutdown_ust_dispatch_thread,
-                       cleanup_ust_dispatch_thread,
-                       notifiers);
-       if (!thread) {
-               goto error;
-       }
-       lttng_thread_put(thread);
-       return true;
-error:
-       free(notifiers);
-       return false;
-}
diff --git a/src/bin/lttng-sessiond/dispatch.cpp b/src/bin/lttng-sessiond/dispatch.cpp
new file mode 100644 (file)
index 0000000..b4092eb
--- /dev/null
@@ -0,0 +1,533 @@
+/*
+ * Copyright (C) 2011 David Goulet <david.goulet@polymtl.ca>
+ * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ * Copyright (C) 2013 Jérémie Galarneau <jeremie.galarneau@efficios.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0-only
+ *
+ */
+
+#include <stddef.h>
+#include <stdlib.h>
+#include <urcu.h>
+#include <common/futex.h>
+#include <common/macros.h>
+
+#include "dispatch.h"
+#include "ust-app.h"
+#include "testpoint.h"
+#include "fd-limit.h"
+#include "health-sessiond.h"
+#include "lttng-sessiond.h"
+#include "thread.h"
+
+struct thread_notifiers {
+       struct ust_cmd_queue *ust_cmd_queue;
+       int apps_cmd_pipe_write_fd;
+       int apps_cmd_notify_pipe_write_fd;
+       int dispatch_thread_exit;
+};
+
+/*
+ * For each tracing session, update newly registered apps. The session list
+ * lock MUST be acquired before calling this.
+ */
+static void update_ust_app(int app_sock)
+{
+       struct ltt_session *sess, *stmp;
+       const struct ltt_session_list *session_list = session_get_list();
+       struct ust_app *app;
+
+       /* Consumer is in an ERROR state. Stop any application update. */
+       if (uatomic_read(&the_ust_consumerd_state) == CONSUMER_ERROR) {
+               /* Stop the update process since the consumer is dead. */
+               return;
+       }
+
+       rcu_read_lock();
+       LTTNG_ASSERT(app_sock >= 0);
+       app = ust_app_find_by_sock(app_sock);
+       if (app == NULL) {
+               /*
+                * Application can be unregistered before so
+                * this is possible hence simply stopping the
+                * update.
+                */
+               DBG3("UST app update failed to find app sock %d",
+                       app_sock);
+               goto unlock_rcu;
+       }
+
+       /* Update all event notifiers for the app. */
+       ust_app_global_update_event_notifier_rules(app);
+
+       /* For all tracing session(s) */
+       cds_list_for_each_entry_safe(sess, stmp, &session_list->head, list) {
+               if (!session_get(sess)) {
+                       continue;
+               }
+               session_lock(sess);
+               if (!sess->active || !sess->ust_session ||
+                               !sess->ust_session->active) {
+                       goto unlock_session;
+               }
+
+               ust_app_global_update(sess->ust_session, app);
+       unlock_session:
+               session_unlock(sess);
+               session_put(sess);
+       }
+
+unlock_rcu:
+       rcu_read_unlock();
+}
+
+/*
+ * Sanitize the wait queue of the dispatch registration thread meaning removing
+ * invalid nodes from it. This is to avoid memory leaks for the case the UST
+ * notify socket is never received.
+ */
+static void sanitize_wait_queue(struct ust_reg_wait_queue *wait_queue)
+{
+       int ret, nb_fd = 0, i;
+       unsigned int fd_added = 0;
+       struct lttng_poll_event events;
+       struct ust_reg_wait_node *wait_node = NULL, *tmp_wait_node;
+
+       LTTNG_ASSERT(wait_queue);
+
+       lttng_poll_init(&events);
+
+       /* Just skip everything for an empty queue. */
+       if (!wait_queue->count) {
+               goto end;
+       }
+
+       ret = lttng_poll_create(&events, wait_queue->count, LTTNG_CLOEXEC);
+       if (ret < 0) {
+               goto error_create;
+       }
+
+       cds_list_for_each_entry_safe(wait_node, tmp_wait_node,
+                       &wait_queue->head, head) {
+               LTTNG_ASSERT(wait_node->app);
+               ret = lttng_poll_add(&events, wait_node->app->sock,
+                               LPOLLHUP | LPOLLERR);
+               if (ret < 0) {
+                       goto error;
+               }
+
+               fd_added = 1;
+       }
+
+       if (!fd_added) {
+               goto end;
+       }
+
+       /*
+        * Poll but don't block so we can quickly identify the faulty events and
+        * clean them afterwards from the wait queue.
+        */
+       ret = lttng_poll_wait(&events, 0);
+       if (ret < 0) {
+               goto error;
+       }
+       nb_fd = ret;
+
+       for (i = 0; i < nb_fd; i++) {
+               /* Get faulty FD. */
+               uint32_t revents = LTTNG_POLL_GETEV(&events, i);
+               int pollfd = LTTNG_POLL_GETFD(&events, i);
+
+               cds_list_for_each_entry_safe(wait_node, tmp_wait_node,
+                               &wait_queue->head, head) {
+                       if (pollfd == wait_node->app->sock &&
+                                       (revents & (LPOLLHUP | LPOLLERR))) {
+                               cds_list_del(&wait_node->head);
+                               wait_queue->count--;
+                               ust_app_destroy(wait_node->app);
+                               free(wait_node);
+                               /*
+                                * Silence warning of use-after-free in
+                                * cds_list_for_each_entry_safe which uses
+                                * __typeof__(*wait_node).
+                                */
+                               wait_node = NULL;
+                               break;
+                       } else {
+                               ERR("Unexpected poll events %u for sock %d", revents, pollfd);
+                               goto error;
+                       }
+               }
+       }
+
+       if (nb_fd > 0) {
+               DBG("Wait queue sanitized, %d node were cleaned up", nb_fd);
+       }
+
+end:
+       lttng_poll_clean(&events);
+       return;
+
+error:
+       lttng_poll_clean(&events);
+error_create:
+       ERR("Unable to sanitize wait queue");
+       return;
+}
+
+/*
+ * Send a socket to a thread This is called from the dispatch UST registration
+ * thread once all sockets are set for the application.
+ *
+ * The sock value can be invalid, we don't really care, the thread will handle
+ * it and make the necessary cleanup if so.
+ *
+ * On success, return 0 else a negative value being the errno message of the
+ * write().
+ */
+static int send_socket_to_thread(int fd, int sock)
+{
+       ssize_t ret;
+
+       /*
+        * It's possible that the FD is set as invalid with -1 concurrently just
+        * before calling this function being a shutdown state of the thread.
+        */
+       if (fd < 0) {
+               ret = -EBADF;
+               goto error;
+       }
+
+       ret = lttng_write(fd, &sock, sizeof(sock));
+       if (ret < sizeof(sock)) {
+               PERROR("write apps pipe %d", fd);
+               if (ret < 0) {
+                       ret = -errno;
+               }
+               goto error;
+       }
+
+       /* All good. Don't send back the write positive ret value. */
+       ret = 0;
+error:
+       return (int) ret;
+}
+
+static void cleanup_ust_dispatch_thread(void *data)
+{
+       free(data);
+}
+
+/*
+ * Dispatch request from the registration threads to the application
+ * communication thread.
+ */
+static void *thread_dispatch_ust_registration(void *data)
+{
+       int ret, err = -1;
+       struct cds_wfcq_node *node;
+       struct ust_command *ust_cmd = NULL;
+       struct ust_reg_wait_node *wait_node = NULL, *tmp_wait_node;
+       struct ust_reg_wait_queue wait_queue = {
+               .count = 0,
+       };
+       struct thread_notifiers *notifiers = (thread_notifiers *) data;
+
+       rcu_register_thread();
+
+       health_register(the_health_sessiond,
+                       HEALTH_SESSIOND_TYPE_APP_REG_DISPATCH);
+
+       if (testpoint(sessiond_thread_app_reg_dispatch)) {
+               goto error_testpoint;
+       }
+
+       health_code_update();
+
+       CDS_INIT_LIST_HEAD(&wait_queue.head);
+
+       DBG("[thread] Dispatch UST command started");
+
+       for (;;) {
+               health_code_update();
+
+               /* Atomically prepare the queue futex */
+               futex_nto1_prepare(&notifiers->ust_cmd_queue->futex);
+
+               if (CMM_LOAD_SHARED(notifiers->dispatch_thread_exit)) {
+                       break;
+               }
+
+               do {
+                       struct ust_app *app = NULL;
+                       ust_cmd = NULL;
+
+                       /*
+                        * Make sure we don't have node(s) that have hung up before receiving
+                        * the notify socket. This is to clean the list in order to avoid
+                        * memory leaks from notify socket that are never seen.
+                        */
+                       sanitize_wait_queue(&wait_queue);
+
+                       health_code_update();
+                       /* Dequeue command for registration */
+                       node = cds_wfcq_dequeue_blocking(
+                                       &notifiers->ust_cmd_queue->head,
+                                       &notifiers->ust_cmd_queue->tail);
+                       if (node == NULL) {
+                               DBG("Woken up but nothing in the UST command queue");
+                               /* Continue thread execution */
+                               break;
+                       }
+
+                       ust_cmd = caa_container_of(node, struct ust_command, node);
+
+                       DBG("Dispatching UST registration pid:%d ppid:%d uid:%d"
+                                       " gid:%d sock:%d name:%s (version %d.%d)",
+                                       ust_cmd->reg_msg.pid, ust_cmd->reg_msg.ppid,
+                                       ust_cmd->reg_msg.uid, ust_cmd->reg_msg.gid,
+                                       ust_cmd->sock, ust_cmd->reg_msg.name,
+                                       ust_cmd->reg_msg.major, ust_cmd->reg_msg.minor);
+
+                       if (ust_cmd->reg_msg.type == LTTNG_UST_CTL_SOCKET_CMD) {
+                               wait_node = (ust_reg_wait_node *) zmalloc(sizeof(*wait_node));
+                               if (!wait_node) {
+                                       PERROR("zmalloc wait_node dispatch");
+                                       ret = close(ust_cmd->sock);
+                                       if (ret < 0) {
+                                               PERROR("close ust sock dispatch %d", ust_cmd->sock);
+                                       }
+                                       lttng_fd_put(LTTNG_FD_APPS, 1);
+                                       free(ust_cmd);
+                                       ust_cmd = NULL;
+                                       goto error;
+                               }
+                               CDS_INIT_LIST_HEAD(&wait_node->head);
+
+                               /* Create application object if socket is CMD. */
+                               wait_node->app = ust_app_create(&ust_cmd->reg_msg,
+                                               ust_cmd->sock);
+                               if (!wait_node->app) {
+                                       ret = close(ust_cmd->sock);
+                                       if (ret < 0) {
+                                               PERROR("close ust sock dispatch %d", ust_cmd->sock);
+                                       }
+                                       lttng_fd_put(LTTNG_FD_APPS, 1);
+                                       free(wait_node);
+                                       wait_node = NULL;
+                                       free(ust_cmd);
+                                       ust_cmd = NULL;
+                                       continue;
+                               }
+                               /*
+                                * Add application to the wait queue so we can set the notify
+                                * socket before putting this object in the global ht.
+                                */
+                               cds_list_add(&wait_node->head, &wait_queue.head);
+                               wait_queue.count++;
+
+                               free(ust_cmd);
+                               ust_cmd = NULL;
+                               /*
+                                * We have to continue here since we don't have the notify
+                                * socket and the application MUST be added to the hash table
+                                * only at that moment.
+                                */
+                               continue;
+                       } else {
+                               /*
+                                * Look for the application in the local wait queue and set the
+                                * notify socket if found.
+                                */
+                               cds_list_for_each_entry_safe(wait_node, tmp_wait_node,
+                                               &wait_queue.head, head) {
+                                       health_code_update();
+                                       if (wait_node->app->pid == ust_cmd->reg_msg.pid) {
+                                               wait_node->app->notify_sock = ust_cmd->sock;
+                                               cds_list_del(&wait_node->head);
+                                               wait_queue.count--;
+                                               app = wait_node->app;
+                                               free(wait_node);
+                                               wait_node = NULL;
+                                               DBG3("UST app notify socket %d is set", ust_cmd->sock);
+                                               break;
+                                       }
+                               }
+
+                               /*
+                                * With no application at this stage the received socket is
+                                * basically useless so close it before we free the cmd data
+                                * structure for good.
+                                */
+                               if (!app) {
+                                       ret = close(ust_cmd->sock);
+                                       if (ret < 0) {
+                                               PERROR("close ust sock dispatch %d", ust_cmd->sock);
+                                       }
+                                       lttng_fd_put(LTTNG_FD_APPS, 1);
+                               }
+                               free(ust_cmd);
+                               ust_cmd = NULL;
+                       }
+
+                       if (app) {
+                               /*
+                                * @session_lock_list
+                                *
+                                * Lock the global session list so from the register up to the
+                                * registration done message, no thread can see the application
+                                * and change its state.
+                                */
+                               session_lock_list();
+                               rcu_read_lock();
+
+                               /*
+                                * Add application to the global hash table. This needs to be
+                                * done before the update to the UST registry can locate the
+                                * application.
+                                */
+                               ust_app_add(app);
+
+                               /* Set app version. This call will print an error if needed. */
+                               (void) ust_app_version(app);
+
+                               (void) ust_app_setup_event_notifier_group(app);
+
+                               /* Send notify socket through the notify pipe. */
+                               ret = send_socket_to_thread(
+                                               notifiers->apps_cmd_notify_pipe_write_fd,
+                                               app->notify_sock);
+                               if (ret < 0) {
+                                       rcu_read_unlock();
+                                       session_unlock_list();
+                                       /*
+                                        * No notify thread, stop the UST tracing. However, this is
+                                        * not an internal error of the this thread thus setting
+                                        * the health error code to a normal exit.
+                                        */
+                                       err = 0;
+                                       goto error;
+                               }
+
+                               /*
+                                * Update newly registered application with the tracing
+                                * registry info already enabled information.
+                                */
+                               update_ust_app(app->sock);
+
+                               /*
+                                * Don't care about return value. Let the manage apps threads
+                                * handle app unregistration upon socket close.
+                                */
+                               (void) ust_app_register_done(app);
+
+                               /*
+                                * Even if the application socket has been closed, send the app
+                                * to the thread and unregistration will take place at that
+                                * place.
+                                */
+                               ret = send_socket_to_thread(
+                                               notifiers->apps_cmd_pipe_write_fd,
+                                               app->sock);
+                               if (ret < 0) {
+                                       rcu_read_unlock();
+                                       session_unlock_list();
+                                       /*
+                                        * No apps. thread, stop the UST tracing. However, this is
+                                        * not an internal error of the this thread thus setting
+                                        * the health error code to a normal exit.
+                                        */
+                                       err = 0;
+                                       goto error;
+                               }
+
+                               rcu_read_unlock();
+                               session_unlock_list();
+                       }
+               } while (node != NULL);
+
+               health_poll_entry();
+               /* Futex wait on queue. Blocking call on futex() */
+               futex_nto1_wait(&notifiers->ust_cmd_queue->futex);
+               health_poll_exit();
+       }
+       /* Normal exit, no error */
+       err = 0;
+
+error:
+       /* Clean up wait queue. */
+       cds_list_for_each_entry_safe(wait_node, tmp_wait_node,
+                       &wait_queue.head, head) {
+               cds_list_del(&wait_node->head);
+               wait_queue.count--;
+               free(wait_node);
+       }
+
+       /* Empty command queue. */
+       for (;;) {
+               /* Dequeue command for registration */
+               node = cds_wfcq_dequeue_blocking(
+                               &notifiers->ust_cmd_queue->head,
+                               &notifiers->ust_cmd_queue->tail);
+               if (node == NULL) {
+                       break;
+               }
+               ust_cmd = caa_container_of(node, struct ust_command, node);
+               ret = close(ust_cmd->sock);
+               if (ret < 0) {
+                       PERROR("close ust sock exit dispatch %d", ust_cmd->sock);
+               }
+               lttng_fd_put(LTTNG_FD_APPS, 1);
+               free(ust_cmd);
+       }
+
+error_testpoint:
+       DBG("Dispatch thread dying");
+       if (err) {
+               health_error();
+               ERR("Health error occurred in %s", __func__);
+       }
+       health_unregister(the_health_sessiond);
+       rcu_unregister_thread();
+       return NULL;
+}
+
+static bool shutdown_ust_dispatch_thread(void *data)
+{
+       struct thread_notifiers *notifiers = (thread_notifiers *) data;
+
+       CMM_STORE_SHARED(notifiers->dispatch_thread_exit, 1);
+       futex_nto1_wake(&notifiers->ust_cmd_queue->futex);
+       return true;
+}
+
+bool launch_ust_dispatch_thread(struct ust_cmd_queue *cmd_queue,
+               int apps_cmd_pipe_write_fd,
+               int apps_cmd_notify_pipe_write_fd)
+{
+       struct lttng_thread *thread;
+       struct thread_notifiers *notifiers;
+
+       notifiers = (thread_notifiers *) zmalloc(sizeof(*notifiers));
+       if (!notifiers) {
+               goto error;
+       }
+       notifiers->ust_cmd_queue = cmd_queue;
+       notifiers->apps_cmd_pipe_write_fd = apps_cmd_pipe_write_fd;
+       notifiers->apps_cmd_notify_pipe_write_fd = apps_cmd_notify_pipe_write_fd;
+
+       thread = lttng_thread_create("UST registration dispatch",
+                       thread_dispatch_ust_registration,
+                       shutdown_ust_dispatch_thread,
+                       cleanup_ust_dispatch_thread,
+                       notifiers);
+       if (!thread) {
+               goto error;
+       }
+       lttng_thread_put(thread);
+       return true;
+error:
+       free(notifiers);
+       return false;
+}
diff --git a/src/bin/lttng-sessiond/event-notifier-error-accounting.c b/src/bin/lttng-sessiond/event-notifier-error-accounting.c
deleted file mode 100644 (file)
index 1488d80..0000000
+++ /dev/null
@@ -1,1368 +0,0 @@
-/*
- * Copyright (C) 2020 Francis Deslauriers <francis.deslauriers@efficios.com>
- *
- * SPDX-License-Identifier: GPL-2.0-only
- *
- */
-
-#include <fcntl.h>
-#include <sys/mman.h>
-#include <sys/stat.h>
-#include <unistd.h>
-#include <urcu/compiler.h>
-#include <pthread.h>
-
-#include <common/error.h>
-#include <common/hashtable/hashtable.h>
-#include <common/index-allocator.h>
-#include <common/kernel-ctl/kernel-ctl.h>
-#include <common/shm.h>
-#include <lttng/trigger/trigger-internal.h>
-
-#include "event-notifier-error-accounting.h"
-#include "lttng-ust-error.h"
-#include "ust-app.h"
-
-#define ERROR_COUNTER_INDEX_HT_INITIAL_SIZE 16
-
-struct index_ht_entry {
-       struct lttng_ht_node_u64 node;
-       uint64_t error_counter_index;
-       struct rcu_head rcu_head;
-};
-
-struct ust_error_accounting_entry {
-       uid_t uid;
-       struct urcu_ref ref;
-       struct lttng_ht_node_u64 node;
-       struct rcu_head rcu_head;
-       struct lttng_ust_ctl_daemon_counter *daemon_counter;
-       /*
-        * Those `lttng_ust_abi_object_data` are anonymous handles to the
-        * counters objects.
-        * They are only used to be duplicated for each new applications of the
-        * user. To destroy them, call with the `sock` parameter set to -1.
-        * e.g. `lttng_ust_ctl_release_object(-1, data)`;
-        */
-       struct lttng_ust_abi_object_data *counter;
-       struct lttng_ust_abi_object_data **cpu_counters;
-       int nr_counter_cpu_fds;
-};
-
-struct kernel_error_accounting_entry {
-       int error_counter_fd;
-};
-
-static struct kernel_error_accounting_entry kernel_error_accounting_entry;
-
-/* Hashtable mapping uid to error_account_entry. */
-static struct lttng_ht *error_counter_uid_ht;
-
-struct error_accounting_state {
-       struct lttng_index_allocator *index_allocator;
-       /* Hashtable mapping event notifier token to index_ht_entry. */
-       struct lttng_ht *indices_ht;
-       uint64_t number_indices;
-};
-
-static struct error_accounting_state ust_state;
-static struct error_accounting_state kernel_state;
-
-static inline void get_trigger_info_for_log(const struct lttng_trigger *trigger,
-               const char **trigger_name,
-               uid_t *trigger_owner_uid)
-{
-       enum lttng_trigger_status trigger_status;
-
-       trigger_status = lttng_trigger_get_name(trigger, trigger_name);
-       switch (trigger_status) {
-       case LTTNG_TRIGGER_STATUS_OK:
-               break;
-       case LTTNG_TRIGGER_STATUS_UNSET:
-               *trigger_name = "(anonymous)";
-               break;
-       default:
-               abort();
-       }
-
-       trigger_status = lttng_trigger_get_owner_uid(trigger,
-                       trigger_owner_uid);
-       LTTNG_ASSERT(trigger_status == LTTNG_TRIGGER_STATUS_OK);
-}
-
-static inline
-const char *error_accounting_status_str(
-               enum event_notifier_error_accounting_status status)
-{
-       switch (status) {
-       case EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_OK:
-               return "OK";
-       case EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_ERR:
-               return "ERROR";
-       case EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_NOT_FOUND:
-               return "NOT_FOUND";
-       case EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_NOMEM:
-               return "NOMEM";
-       case EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_NO_INDEX_AVAILABLE:
-               return "NO_INDEX_AVAILABLE";
-       case EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_APP_DEAD:
-               return "APP_DEAD";
-       default:
-               abort();
-       }
-}
-
-#ifdef HAVE_LIBLTTNG_UST_CTL
-struct event_notifier_counter {
-       pthread_mutex_t lock;
-       long count;
-};
-
-static struct event_notifier_counter the_event_notifier_counter;
-
-static void free_ust_error_accounting_entry(struct rcu_head *head)
-{
-       int i;
-       struct ust_error_accounting_entry *entry =
-                       caa_container_of(head, typeof(*entry), rcu_head);
-
-       for (i = 0; i < entry->nr_counter_cpu_fds; i++) {
-               lttng_ust_ctl_release_object(-1, entry->cpu_counters[i]);
-               free(entry->cpu_counters[i]);
-       }
-
-       free(entry->cpu_counters);
-
-       lttng_ust_ctl_release_object(-1, entry->counter);
-       free(entry->counter);
-
-       lttng_ust_ctl_destroy_counter(entry->daemon_counter);
-
-       free(entry);
-}
-
-static
-bool ust_error_accounting_entry_get(struct ust_error_accounting_entry *entry)
-{
-       return urcu_ref_get_unless_zero(&entry->ref);
-}
-
-static
-void ust_error_accounting_entry_release(struct urcu_ref *entry_ref)
-{
-       struct ust_error_accounting_entry *entry =
-                       container_of(entry_ref, typeof(*entry), ref);
-
-       rcu_read_lock();
-       cds_lfht_del(error_counter_uid_ht->ht, &entry->node.node);
-       call_rcu(&entry->rcu_head, free_ust_error_accounting_entry);
-       rcu_read_unlock();
-}
-
-
-static
-void ust_error_accounting_entry_put(struct ust_error_accounting_entry *entry)
-{
-       if (!entry) {
-               return;
-       }
-
-       urcu_ref_put(&entry->ref, ust_error_accounting_entry_release);
-}
-
-/*
- * Put one reference to every UID entries.
- */
-static
-void put_ref_all_ust_error_accounting_entry(void)
-{
-       struct lttng_ht_iter iter;
-       struct ust_error_accounting_entry *uid_entry;
-
-       ASSERT_LOCKED(the_event_notifier_counter.lock);
-
-       rcu_read_lock();
-       cds_lfht_for_each_entry(error_counter_uid_ht->ht, &iter.iter,
-                       uid_entry, node.node) {
-               ust_error_accounting_entry_put(uid_entry);
-       }
-
-       rcu_read_unlock();
-}
-
-/*
- * Get one reference to every UID entries.
- */
-static
-void get_ref_all_ust_error_accounting_entry(void)
-{
-       struct lttng_ht_iter iter;
-       struct ust_error_accounting_entry *uid_entry;
-
-       ASSERT_LOCKED(the_event_notifier_counter.lock);
-
-       rcu_read_lock();
-       cds_lfht_for_each_entry(error_counter_uid_ht->ht, &iter.iter,
-                       uid_entry, node.node) {
-               ust_error_accounting_entry_get(uid_entry);
-       }
-
-       rcu_read_unlock();
-}
-
-#endif /* HAVE_LIBLTTNG_UST_CTL */
-
-static
-enum event_notifier_error_accounting_status
-init_error_accounting_state(struct error_accounting_state *state,
-               uint64_t index_count)
-{
-       enum event_notifier_error_accounting_status status;
-
-       LTTNG_ASSERT(state);
-
-       state->number_indices = index_count;
-
-       state->index_allocator = lttng_index_allocator_create(index_count);
-       if (!state->index_allocator) {
-               ERR("Failed to allocate event notifier error counter index allocator");
-               status = EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_NOMEM;
-               goto end;
-       }
-
-       state->indices_ht = lttng_ht_new(ERROR_COUNTER_INDEX_HT_INITIAL_SIZE,
-                       LTTNG_HT_TYPE_U64);
-       if (!state->indices_ht) {
-               ERR("Failed to allocate error counter indices hash table");
-               status = EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_NOMEM;
-               goto error_indices_ht;
-       }
-
-       status = EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_OK;
-       goto end;
-
-error_indices_ht:
-       lttng_index_allocator_destroy(state->index_allocator);
-       state->index_allocator = NULL;
-end:
-       return status;
-}
-
-static
-void fini_error_accounting_state(struct error_accounting_state *state)
-{
-       LTTNG_ASSERT(state);
-
-       /*
-        * Will assert if some error counter indices were not released (an
-        * internal error).
-        */
-       lttng_ht_destroy(state->indices_ht);
-       lttng_index_allocator_destroy(state->index_allocator);
-}
-
-enum event_notifier_error_accounting_status
-event_notifier_error_accounting_init(uint64_t buffer_size_kernel,
-               uint64_t buffer_size_ust)
-{
-       enum event_notifier_error_accounting_status status;
-
-       status = init_error_accounting_state(&kernel_state, buffer_size_kernel);
-       if (status != EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_OK) {
-               ERR("Failed to initialize kernel event notifier accounting state: status = %s",
-                               error_accounting_status_str(status));
-               goto end;
-       }
-
-       status = init_error_accounting_state(&ust_state, buffer_size_ust);
-       if (status != EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_OK) {
-               ERR("Failed to initialize UST event notifier accounting state: status = %s",
-                               error_accounting_status_str(status));
-               goto error_ust_state;
-       }
-
-       error_counter_uid_ht = lttng_ht_new(
-                       ERROR_COUNTER_INDEX_HT_INITIAL_SIZE, LTTNG_HT_TYPE_U64);
-       if (!error_counter_uid_ht) {
-               ERR("Failed to allocate UID to error counter accountant hash table");
-               status = EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_NOMEM;
-               goto error_uid_ht;
-       }
-
-       status = EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_OK;
-       goto end;
-
-error_uid_ht:
-       fini_error_accounting_state(&ust_state);
-error_ust_state:
-       fini_error_accounting_state(&kernel_state);
-end:
-       return status;
-}
-
-/*
- * Return the error counteur index associated to this event notifier tracer
- * token. Returns _STATUS_OK if found and _STATUS_NOT_FOUND otherwise.
- */
-static
-enum event_notifier_error_accounting_status get_error_counter_index_for_token(
-               struct error_accounting_state *state, uint64_t tracer_token,
-               uint64_t *error_counter_index)
-{
-       struct lttng_ht_node_u64 *node;
-       struct lttng_ht_iter iter;
-       const struct index_ht_entry *index_entry;
-       enum event_notifier_error_accounting_status status;
-
-       rcu_read_lock();
-       lttng_ht_lookup(state->indices_ht, &tracer_token, &iter);
-       node = lttng_ht_iter_get_node_u64(&iter);
-       if (node) {
-               index_entry = caa_container_of(
-                               node, const struct index_ht_entry, node);
-               *error_counter_index = index_entry->error_counter_index;
-               status = EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_OK;
-       } else {
-               status = EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_NOT_FOUND;
-       }
-
-       rcu_read_unlock();
-       return status;
-}
-
-#ifdef HAVE_LIBLTTNG_UST_CTL
-/*
- * Find the entry for this app's UID, the caller acquires a reference if the
- * entry is found.
- */
-static
-struct ust_error_accounting_entry *ust_error_accounting_entry_find(
-               struct lttng_ht *uid_ht, const struct ust_app *app)
-{
-       struct ust_error_accounting_entry *entry;
-       struct lttng_ht_node_u64 *node;
-       struct lttng_ht_iter iter;
-       uint64_t key = app->uid;
-
-       lttng_ht_lookup(uid_ht, &key, &iter);
-       node = lttng_ht_iter_get_node_u64(&iter);
-       if(node == NULL) {
-               entry = NULL;
-       } else {
-               bool got_ref;
-
-               entry = caa_container_of(node,
-                               struct ust_error_accounting_entry, node);
-
-               got_ref = ust_error_accounting_entry_get(entry);
-               if (!got_ref) {
-                       entry = NULL;
-               }
-       }
-
-       return entry;
-}
-
-/*
- * Create the entry for this app's UID, the caller acquires a reference to the
- * entry,
- */
-static
-struct ust_error_accounting_entry *ust_error_accounting_entry_create(
-               struct lttng_ht *uid_ht, const struct ust_app *app)
-{
-       int i, ret, *cpu_counter_fds = NULL;
-       struct lttng_ust_ctl_daemon_counter *daemon_counter;
-       struct lttng_ust_abi_object_data *counter, **cpu_counters;
-       struct ust_error_accounting_entry *entry = NULL;
-       const struct lttng_ust_ctl_counter_dimension dimension = {
-               .size = ust_state.number_indices,
-               .has_underflow = false,
-               .has_overflow = false,
-       };
-
-       if (!ust_app_supports_counters(app)) {
-               DBG("Refusing to create accounting entry for application (unsupported feature): app name = '%s', app ppid = %d",
-                               app->name, (int) app->ppid);
-               goto error;
-       }
-
-       entry = zmalloc(sizeof(struct ust_error_accounting_entry));
-       if (!entry) {
-               PERROR("Failed to allocate event notifier error acounting entry")
-               goto error;
-       }
-
-       urcu_ref_init(&entry->ref);
-       entry->uid = app->uid;
-       entry->nr_counter_cpu_fds = lttng_ust_ctl_get_nr_cpu_per_counter();
-
-       cpu_counter_fds = zmalloc(entry->nr_counter_cpu_fds * sizeof(*cpu_counter_fds));
-       if (!cpu_counter_fds) {
-               PERROR("Failed to allocate event notifier error counter file descriptors array: application uid = %d, application name = '%s', pid = %d, allocation size = %zu",
-                               (int) app->uid, app->name, (int) app->pid,
-                               entry->nr_counter_cpu_fds * sizeof(*cpu_counter_fds));
-               goto error_counter_cpu_fds_alloc;
-       }
-
-       /* Initialize to an invalid fd value to closes fds in case of error. */
-       for (i = 0; i < entry->nr_counter_cpu_fds; i++) {
-               cpu_counter_fds[i] = -1;
-       }
-
-       cpu_counters = zmalloc(entry->nr_counter_cpu_fds * sizeof(struct lttng_ust_abi_object_data *));
-       if (!cpu_counters) {
-               PERROR("Failed to allocate event notifier error counter lttng_ust_abi_object_data array: application uid = %d, application name = '%s', pid = %d, allocation size = %zu",
-                               (int) app->uid, app->name, (int) app->pid,
-                               entry->nr_counter_cpu_fds * sizeof(struct lttng_ust_abi_object_data *));
-               goto error_counter_cpus_alloc;
-       }
-
-       for (i = 0; i < entry->nr_counter_cpu_fds; i++) {
-               cpu_counter_fds[i] = shm_create_anonymous("event-notifier-error-accounting");
-               if (cpu_counter_fds[i] == -1) {
-                       ERR("Failed to create event notifier error accounting shared memory for application user: application uid = %d, pid = %d, application name = '%s'",
-                                       (int) app->uid, (int) app->pid, app->name);
-                       goto error_shm_alloc;
-               }
-       }
-
-       /*
-        * Ownership of the file descriptors transferred to the ustctl object.
-        */
-       daemon_counter = lttng_ust_ctl_create_counter(1, &dimension, 0, -1,
-                       entry->nr_counter_cpu_fds, cpu_counter_fds,
-                       LTTNG_UST_CTL_COUNTER_BITNESS_32,
-                       LTTNG_UST_CTL_COUNTER_ARITHMETIC_MODULAR,
-                       LTTNG_UST_CTL_COUNTER_ALLOC_PER_CPU,
-                       false);
-       if (!daemon_counter) {
-               goto error_create_daemon_counter;
-       }
-
-       ret = lttng_ust_ctl_create_counter_data(daemon_counter, &counter);
-       if (ret) {
-               ERR("Failed to create userspace tracer counter data for application user: uid = %d, pid = %d, application name = '%s'",
-                               (int) app->uid, (int) app->pid, app->name);
-               goto error_create_counter_data;
-       }
-
-       for (i = 0; i < entry->nr_counter_cpu_fds; i++) {
-               ret = lttng_ust_ctl_create_counter_cpu_data(daemon_counter, i,
-                               &cpu_counters[i]);
-               if (ret) {
-                       ERR("Failed to create userspace tracer counter cpu data for application user: uid = %d, pid = %d, application name = '%s'",
-                                       (int) app->uid, (int) app->pid,
-                                       app->name);
-                       goto error_create_counter_cpu_data;
-               }
-       }
-
-       entry->daemon_counter = daemon_counter;
-       entry->counter = counter;
-       entry->cpu_counters = cpu_counters;
-
-       lttng_ht_node_init_u64(&entry->node, entry->uid);
-       lttng_ht_add_unique_u64(error_counter_uid_ht, &entry->node);
-
-       goto end;
-
-error_create_counter_cpu_data:
-       /* Teardown any allocated cpu counters. */
-       for (i = 0; i < entry->nr_counter_cpu_fds; i++) {
-               if (!cpu_counters[i]) {
-                       /*
-                        * Early-exit when error occurred before all cpu
-                        * counters could be initialized.
-                        */
-                       break;
-               }
-
-               lttng_ust_ctl_release_object(-1, cpu_counters[i]);
-               free(cpu_counters[i]);
-       }
-
-       lttng_ust_ctl_release_object(-1, entry->counter);
-       free(entry->counter);
-error_create_counter_data:
-       lttng_ust_ctl_destroy_counter(daemon_counter);
-error_create_daemon_counter:
-error_shm_alloc:
-       /* Error occurred before per-cpu SHMs were handed-off to ustctl. */
-       if (cpu_counter_fds) {
-               for (i = 0; i < entry->nr_counter_cpu_fds; i++) {
-                       if (cpu_counter_fds[i] < 0) {
-                               /*
-                                * Early-exit when error occurred before all cpu
-                                * counter shm fds could be initialized.
-                                */
-                               break;
-                       }
-
-                       ret = close(cpu_counter_fds[i]);
-                       if (ret) {
-                               PERROR("Failed to close error counter per-CPU shm file descriptor: fd = %d",
-                                               cpu_counter_fds[i]);
-                       }
-               }
-       }
-
-       free(cpu_counters);
-error_counter_cpus_alloc:
-error_counter_cpu_fds_alloc:
-       free(entry);
-error:
-       entry = NULL;
-end:
-       free(cpu_counter_fds);
-       return entry;
-}
-
-static
-enum event_notifier_error_accounting_status send_counter_data_to_ust(
-               struct ust_app *app,
-               struct lttng_ust_abi_object_data *new_counter)
-{
-       int ret;
-       enum event_notifier_error_accounting_status status;
-
-       /* Attach counter to trigger group. */
-       pthread_mutex_lock(&app->sock_lock);
-       ret = lttng_ust_ctl_send_counter_data_to_ust(app->sock,
-                       app->event_notifier_group.object->handle, new_counter);
-       pthread_mutex_unlock(&app->sock_lock);
-       if (ret < 0) {
-               if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
-                       ERR("Failed to send counter data to application: application name = '%s', pid = %d, ret = %d",
-                                       app->name, app->pid, ret);
-                       status = EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_ERR;
-               } else {
-                       DBG3("Failed to send counter data to application (application is dead): application name = '%s', pid = %d, ret = %d",
-                                       app->name, app->pid, ret);
-                       status = EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_APP_DEAD;
-               }
-
-               goto end;
-       }
-
-       status = EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_OK;
-end:
-       return status;
-}
-
-static
-enum event_notifier_error_accounting_status send_counter_cpu_data_to_ust(
-               struct ust_app *app,
-               struct lttng_ust_abi_object_data *counter,
-               struct lttng_ust_abi_object_data *counter_cpu)
-{
-       int ret;
-       enum event_notifier_error_accounting_status status;
-
-       pthread_mutex_lock(&app->sock_lock);
-       ret = lttng_ust_ctl_send_counter_cpu_data_to_ust(app->sock,
-                       counter, counter_cpu);
-       pthread_mutex_unlock(&app->sock_lock);
-       if (ret < 0) {
-               if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
-                       ERR("Failed to send counter CPU data to application: application name = '%s', pid = %d, ret = %d",
-                                       app->name, app->pid, ret);
-                       status = EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_ERR;
-               } else {
-                       DBG3("Failed to send counter CPU data to application: application name = '%s', pid = %d, ret = %d",
-                                       app->name, app->pid, ret);
-                       status = EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_APP_DEAD;
-               }
-
-               goto end;
-       }
-
-       status = EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_OK;
-end:
-       return status;
-}
-
-enum event_notifier_error_accounting_status
-event_notifier_error_accounting_register_app(struct ust_app *app)
-{
-       int ret;
-       uint64_t i;
-       struct lttng_ust_abi_object_data *new_counter;
-       struct ust_error_accounting_entry *entry;
-       enum event_notifier_error_accounting_status status;
-       struct lttng_ust_abi_object_data **cpu_counters;
-
-       if (!ust_app_supports_counters(app)) {
-               status = EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_UNSUPPORTED;
-               goto end;
-       }
-
-       /*
-        * Check if we already have a error counter for the user id of this
-        * app. If not, create one.
-        */
-       rcu_read_lock();
-       entry = ust_error_accounting_entry_find(error_counter_uid_ht, app);
-       if (entry == NULL) {
-               /*
-                * Take the event notifier counter lock before creating the new
-                * entry to ensure that no event notifier is registered between
-                * the the entry creation and event notifier count check.
-                */
-               pthread_mutex_lock(&the_event_notifier_counter.lock);
-
-               entry = ust_error_accounting_entry_create(error_counter_uid_ht,
-                               app);
-               if (!entry) {
-                       status = EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_ERR;
-                       pthread_mutex_unlock(&the_event_notifier_counter.lock);
-                       goto error_creating_entry;
-               }
-
-               /*
-                * We just created a new UID entry, If there are event
-                * notifiers already registered, take one reference on their
-                * behalf.
-                */
-               if (the_event_notifier_counter.count > 0) {
-                       ust_error_accounting_entry_get(entry);
-               }
-
-               pthread_mutex_unlock(&the_event_notifier_counter.lock);
-       }
-
-       /* Duplicate counter object data. */
-       ret = lttng_ust_ctl_duplicate_ust_object_data(&new_counter,
-                       entry->counter);
-       if (ret) {
-               ERR("Failed to duplicate event notifier error accounting counter for application user: application uid = %d, pid = %d, application name = '%s'",
-                               (int) app->uid, (int) app->pid, app->name);
-               status = EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_ERR;
-               goto error_duplicate_counter;
-       }
-
-       status = send_counter_data_to_ust(app, new_counter);
-       if (status != EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_OK) {
-               if (status == EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_APP_DEAD) {
-                       goto error_send_counter_data;
-               }
-
-               ERR("Failed to send counter data to application tracer: status = %s, application uid = %d, pid = %d, application name = '%s'",
-                               error_accounting_status_str(status),
-                               (int) app->uid, (int) app->pid, app->name);
-               status = EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_ERR;
-               goto error_send_counter_data;
-       }
-
-       cpu_counters = zmalloc(entry->nr_counter_cpu_fds * sizeof(struct lttng_ust_abi_object_data *));
-       if (!cpu_counters) {
-               PERROR("Failed to allocate event notifier error counter lttng_ust_abi_object_data array: application uid = %d, application name = '%s', pid = %d, allocation size = %zu",
-                               (int) app->uid, app->name, (int) app->pid,
-                               entry->nr_counter_cpu_fds * sizeof(**cpu_counters));
-               status = EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_NOMEM;
-               goto error_allocate_cpu_counters;
-       }
-
-       for (i = 0; i < entry->nr_counter_cpu_fds; i++) {
-               struct lttng_ust_abi_object_data *new_counter_cpu = NULL;
-
-               ret = lttng_ust_ctl_duplicate_ust_object_data(&new_counter_cpu,
-                               entry->cpu_counters[i]);
-               if (ret) {
-                       ERR("Failed to duplicate userspace tracer counter cpu data for application user: uid = %d, pid = %d, application name = '%s'",
-                                       (int) app->uid, (int) app->pid,
-                                       app->name);
-                       status = EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_NOMEM;
-                       goto error_duplicate_cpu_counter;
-               }
-
-               cpu_counters[i] = new_counter_cpu;
-
-               status = send_counter_cpu_data_to_ust(app, new_counter,
-                               new_counter_cpu);
-               if (status != EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_OK) {
-                       if (status == EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_APP_DEAD) {
-                               goto error_send_cpu_counter_data;
-                       }
-
-                       ERR("Failed to send counter cpu data to application tracer: status = %s, application uid = %d, pid = %d, application name = '%s'",
-                                       error_accounting_status_str(status),
-                                       (int) app->uid, (int) app->pid,
-                                       app->name);
-                       status = EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_ERR;
-                       goto error_send_cpu_counter_data;
-               }
-       }
-
-       app->event_notifier_group.counter = new_counter;
-       new_counter = NULL;
-       app->event_notifier_group.nr_counter_cpu = entry->nr_counter_cpu_fds;
-       app->event_notifier_group.counter_cpu = cpu_counters;
-       cpu_counters = NULL;
-       goto end_unlock;
-
-error_send_cpu_counter_data:
-error_duplicate_cpu_counter:
-       /* Teardown any duplicated cpu counters. */
-       for (i = 0; i < entry->nr_counter_cpu_fds; i++) {
-               if (!cpu_counters[i]) {
-                       /*
-                        * Early-exit when error occurred before all cpu
-                        * counters could be initialized.
-                        */
-                       break;
-               }
-
-               lttng_ust_ctl_release_object(-1, cpu_counters[i]);
-               free(cpu_counters[i]);
-       }
-
-       free(cpu_counters);
-
-error_allocate_cpu_counters:
-error_send_counter_data:
-       lttng_ust_ctl_release_object(-1, new_counter);
-       free(new_counter);
-error_duplicate_counter:
-       ust_error_accounting_entry_put(entry);
-error_creating_entry:
-       app->event_notifier_group.counter = NULL;
-end_unlock:
-       rcu_read_unlock();
-end:
-       return status;
-}
-
-enum event_notifier_error_accounting_status
-event_notifier_error_accounting_unregister_app(struct ust_app *app)
-{
-       enum event_notifier_error_accounting_status status;
-       struct ust_error_accounting_entry *entry;
-       int i;
-
-       rcu_read_lock();
-
-       /* If an error occurred during app registration no entry was created. */
-       if (!app->event_notifier_group.counter) {
-               status = EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_OK;
-               goto end;
-       }
-
-       entry = ust_error_accounting_entry_find(error_counter_uid_ht, app);
-       if (entry == NULL) {
-               ERR("Failed to find event notitifier error accounting entry on application teardown: pid = %d, application name = '%s'",
-                               app->pid, app->name);
-               status = EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_ERR;
-               goto end;
-       } else {
-               /*
-                * Put the entry twice as we acquired a reference from the
-                * `ust_error_accounting_entry_find()` above.
-                */
-               ust_error_accounting_entry_put(entry);
-               ust_error_accounting_entry_put(entry);
-       }
-
-       for (i = 0; i < app->event_notifier_group.nr_counter_cpu; i++) {
-               lttng_ust_ctl_release_object(app->sock,
-                               app->event_notifier_group.counter_cpu[i]);
-               free(app->event_notifier_group.counter_cpu[i]);
-       }
-
-       free(app->event_notifier_group.counter_cpu);
-
-       lttng_ust_ctl_release_object(app->sock, app->event_notifier_group.counter);
-       free(app->event_notifier_group.counter);
-
-       status = EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_OK;
-end:
-       rcu_read_unlock();
-       return status;
-}
-
-static
-enum event_notifier_error_accounting_status
-event_notifier_error_accounting_ust_get_count(
-               const struct lttng_trigger *trigger, uint64_t *count)
-{
-       struct lttng_ht_iter iter;
-       struct ust_error_accounting_entry *uid_entry;
-       uint64_t error_counter_index, global_sum = 0;
-       enum event_notifier_error_accounting_status status;
-       size_t dimension_indexes[1];
-       const uint64_t tracer_token = lttng_trigger_get_tracer_token(trigger);
-       uid_t trigger_owner_uid;
-       const char *trigger_name;
-
-
-       rcu_read_lock();
-
-       get_trigger_info_for_log(trigger, &trigger_name, &trigger_owner_uid);
-
-       status = get_error_counter_index_for_token(&ust_state, tracer_token,
-                       &error_counter_index);
-       if (status != EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_OK) {
-
-               ERR("Failed to retrieve index for tracer token: token = %" PRIu64 ", trigger name = '%s', trigger owner uid = %d, status = %s",
-                               tracer_token, trigger_name,
-                               (int) trigger_owner_uid,
-                               error_accounting_status_str(status));
-               goto end;
-       }
-
-       dimension_indexes[0] = error_counter_index;
-
-       /*
-        * Iterate over all the UID entries.
-        * We aggregate the value of all uid entries regardless of if the uid
-        * matches the trigger's uid because a user that is allowed to register
-        * a trigger to a given sessiond is also allowed to create an event
-        * notifier on all apps that this sessiond is aware of.
-        */
-       cds_lfht_for_each_entry(error_counter_uid_ht->ht, &iter.iter,
-                       uid_entry, node.node) {
-               int ret;
-               int64_t local_value = 0;
-               bool overflow = false, underflow = false;
-
-               ret = lttng_ust_ctl_counter_aggregate(uid_entry->daemon_counter,
-                               dimension_indexes, &local_value, &overflow,
-                               &underflow);
-               if (ret || local_value < 0) {
-                       if (ret) {
-                               ERR("Failed to aggregate event notifier error counter values of trigger: trigger name = '%s', trigger owner uid = %d",
-                                               trigger_name,
-                                               (int) trigger_owner_uid);
-                       } else if (local_value < 0) {
-                               ERR("Negative event notifier error counter value encountered during aggregation: trigger name = '%s', trigger owner uid = %d, value = %" PRId64,
-                                               trigger_name,
-                                               (int) trigger_owner_uid,
-                                               local_value);
-                       } else {
-                               abort();
-                       }
-
-                       status = EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_ERR;
-                       goto end;
-               }
-
-               /* Cast is safe as negative values are checked-for above. */
-               global_sum += (uint64_t) local_value;
-       }
-
-       *count = global_sum;
-       status = EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_OK;
-
-end:
-       rcu_read_unlock();
-       return status;
-}
-
-static
-enum event_notifier_error_accounting_status event_notifier_error_accounting_ust_clear(
-               const struct lttng_trigger *trigger)
-{
-       struct lttng_ht_iter iter;
-       struct ust_error_accounting_entry *uid_entry;
-       uint64_t error_counter_index;
-       enum event_notifier_error_accounting_status status;
-       size_t dimension_index;
-       const uint64_t tracer_token = lttng_trigger_get_tracer_token(trigger);
-
-       rcu_read_lock();
-       status = get_error_counter_index_for_token(&ust_state, tracer_token,
-                       &error_counter_index);
-       if (status != EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_OK) {
-               uid_t trigger_owner_uid;
-               const char *trigger_name;
-
-               get_trigger_info_for_log(trigger, &trigger_name,
-                                        &trigger_owner_uid);
-
-               ERR("Failed to retrieve index for tracer token: token = %" PRIu64 ", trigger name = '%s', trigger owner uid = %d, status = %s",
-                               tracer_token, trigger_name,
-                               (int) trigger_owner_uid,
-                               error_accounting_status_str(status));
-               goto end;
-       }
-
-       dimension_index = error_counter_index;
-
-       /*
-        * Go over all error counters (ignoring uid) as a trigger (and trigger
-        * errors) can be generated from any applications that this session
-        * daemon is managing.
-        */
-       cds_lfht_for_each_entry(error_counter_uid_ht->ht, &iter.iter,
-                       uid_entry, node.node) {
-               const int ret = lttng_ust_ctl_counter_clear(uid_entry->daemon_counter,
-                               &dimension_index);
-
-               if (ret) {
-                       uid_t trigger_owner_uid;
-                       const char *trigger_name;
-
-                       get_trigger_info_for_log(trigger, &trigger_name,
-                                                &trigger_owner_uid);
-                       ERR("Failed to clear event notifier counter value for trigger: counter uid = %d, trigger name = '%s', trigger owner uid = %d",
-                                       (int) uid_entry->node.key, trigger_name,
-                                       (int) trigger_owner_uid);
-                       status = EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_ERR;
-                       goto end;
-               }
-       }
-
-       status = EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_OK;
-end:
-       rcu_read_unlock();
-       return status;
-}
-#endif /* HAVE_LIBLTTNG_UST_CTL */
-
-static
-enum event_notifier_error_accounting_status
-event_notifier_error_accounting_kernel_clear(
-               const struct lttng_trigger *trigger)
-{
-       int ret;
-       uint64_t error_counter_index;
-       enum event_notifier_error_accounting_status status;
-       struct lttng_kernel_abi_counter_clear counter_clear = {};
-
-       status = get_error_counter_index_for_token(&kernel_state,
-                       lttng_trigger_get_tracer_token(trigger),
-                       &error_counter_index);
-       if (status != EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_OK) {
-               uid_t trigger_owner_uid;
-               const char *trigger_name;
-
-               get_trigger_info_for_log(
-                               trigger, &trigger_name, &trigger_owner_uid);
-
-               ERR("Failed to get event notifier error counter index: trigger owner uid = %d, trigger name = '%s', status = '%s'",
-                               trigger_owner_uid, trigger_name,
-                               error_accounting_status_str(status));
-               goto end;
-       }
-
-       counter_clear.index.number_dimensions = 1;
-       counter_clear.index.dimension_indexes[0] = error_counter_index;
-
-       ret = kernctl_counter_clear(
-                       kernel_error_accounting_entry.error_counter_fd,
-                       &counter_clear);
-       if (ret) {
-               uid_t trigger_owner_uid;
-               const char *trigger_name;
-
-               get_trigger_info_for_log(
-                               trigger, &trigger_name, &trigger_owner_uid);
-
-               ERR("Failed to clear kernel event notifier error counter: trigger owner uid = %d, trigger name = '%s'",
-                               trigger_owner_uid, trigger_name);
-               status = EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_ERR;
-               goto end;
-       }
-
-       status = EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_OK;
-end:
-       return status;
-}
-
-enum event_notifier_error_accounting_status
-event_notifier_error_accounting_register_kernel(
-               int kernel_event_notifier_group_fd)
-{
-       int error_counter_fd = -1, ret;
-       enum event_notifier_error_accounting_status status;
-       const struct lttng_kernel_abi_counter_conf error_counter_conf = {
-               .arithmetic = LTTNG_KERNEL_ABI_COUNTER_ARITHMETIC_MODULAR,
-               .bitness = sizeof(void *) == sizeof(uint32_t) ?
-                               LTTNG_KERNEL_ABI_COUNTER_BITNESS_32 :
-                               LTTNG_KERNEL_ABI_COUNTER_BITNESS_64,
-               .global_sum_step = 0,
-               .number_dimensions = 1,
-               .dimensions[0].size = kernel_state.number_indices,
-               .dimensions[0].has_underflow = false,
-               .dimensions[0].has_overflow = false,
-       };
-
-       ret = kernctl_create_event_notifier_group_error_counter(
-                       kernel_event_notifier_group_fd, &error_counter_conf);
-       if (ret < 0) {
-               PERROR("Failed to create event notifier group error counter through kernel ioctl: kernel_event_notifier_group_fd = %d",
-                               kernel_event_notifier_group_fd);
-               status = EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_ERR;
-               goto error;
-       }
-
-       error_counter_fd = ret;
-
-       /* Prevent fd duplication after execlp(). */
-       ret = fcntl(error_counter_fd, F_SETFD, FD_CLOEXEC);
-       if (ret < 0) {
-               PERROR("Failed to set FD_CLOEXEC flag on event notifier error counter file descriptor: error_counter_fd = %d",
-                               error_counter_fd);
-               status = EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_ERR;
-               goto error;
-       }
-
-       DBG("Created kernel event notifier group error counter: fd = %d",
-                       error_counter_fd);
-
-       kernel_error_accounting_entry.error_counter_fd =
-                       error_counter_fd;
-       status = EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_OK;
-
-error:
-       return status;
-}
-
-static
-enum event_notifier_error_accounting_status create_error_counter_index_for_token(
-               struct error_accounting_state *state, uint64_t tracer_token,
-               uint64_t *error_counter_index)
-{
-       struct index_ht_entry *index_entry;
-       enum lttng_index_allocator_status index_alloc_status;
-       uint64_t local_error_counter_index;
-       enum event_notifier_error_accounting_status status;
-
-       LTTNG_ASSERT(state);
-
-       /* Allocate a new index for that counter. */
-       index_alloc_status = lttng_index_allocator_alloc(state->index_allocator,
-                       &local_error_counter_index);
-       switch (index_alloc_status) {
-       case LTTNG_INDEX_ALLOCATOR_STATUS_EMPTY:
-               DBG("No indices left in the configured event notifier error counter: "
-                               "number-of-indices = %"PRIu64,
-                               lttng_index_allocator_get_index_count(
-                                       state->index_allocator));
-               status = EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_NO_INDEX_AVAILABLE;
-               goto end;
-       case LTTNG_INDEX_ALLOCATOR_STATUS_OK:
-               break;
-       default:
-               status = EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_ERR;
-               goto end;
-       }
-
-       index_entry = zmalloc(sizeof(*index_entry));
-       if (index_entry == NULL) {
-               PERROR("Failed to allocate event notifier error counter hash table entry");
-               status = EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_NOMEM;
-               goto end;
-       }
-
-       index_entry->error_counter_index = local_error_counter_index;
-       lttng_ht_node_init_u64(&index_entry->node, tracer_token);
-       lttng_ht_add_unique_u64(state->indices_ht, &index_entry->node);
-
-       DBG("Allocated error counter index for tracer token: tracer token = %" PRIu64 ", index = %" PRIu64,
-                       tracer_token, local_error_counter_index);
-       *error_counter_index = local_error_counter_index;
-       status = EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_OK;
-end:
-       return status;
-}
-
-enum event_notifier_error_accounting_status
-event_notifier_error_accounting_register_event_notifier(
-               const struct lttng_trigger *trigger,
-               uint64_t *error_counter_index)
-{
-       enum event_notifier_error_accounting_status status;
-       uint64_t local_error_counter_index;
-       struct error_accounting_state *state;
-
-       switch (lttng_trigger_get_underlying_domain_type_restriction(trigger)) {
-       case LTTNG_DOMAIN_KERNEL:
-               state = &kernel_state;
-               break;
-       case LTTNG_DOMAIN_UST:
-       case LTTNG_DOMAIN_PYTHON:
-       case LTTNG_DOMAIN_JUL:
-       case LTTNG_DOMAIN_LOG4J:
-               state = &ust_state;
-               break;
-       default:
-               abort();
-       }
-
-       /*
-        * Check if this event notifier already has a error counter index
-        * assigned.
-        */
-       status = get_error_counter_index_for_token(state,
-                       lttng_trigger_get_tracer_token(trigger),
-                       &local_error_counter_index);
-       switch (status) {
-       case EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_NOT_FOUND:
-       {
-               uid_t trigger_owner_uid;
-               const char *trigger_name;
-
-               get_trigger_info_for_log(
-                               trigger, &trigger_name, &trigger_owner_uid);
-
-               DBG("Event notifier error counter index not found for tracer token (allocating a new one): trigger name = '%s', trigger owner uid = %d, tracer token = %" PRIu64,
-                               trigger_name, trigger_owner_uid,
-                               lttng_trigger_get_tracer_token(trigger));
-
-               status = create_error_counter_index_for_token(state,
-                               lttng_trigger_get_tracer_token(trigger),
-                               &local_error_counter_index);
-               if (status != EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_OK) {
-                       ERR("Error creating index for token: status = %s, trigger name = '%s', trigger owner uid = %d",
-                                       error_accounting_status_str(status),
-                                       trigger_name, trigger_owner_uid);
-                       goto end;
-               }
-               /* fall-through. */
-       }
-       case EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_OK:
-               *error_counter_index = local_error_counter_index;
-               status = EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_OK;
-               break;
-       default:
-               break;
-       }
-
-#ifdef HAVE_LIBLTTNG_UST_CTL
-       switch (lttng_trigger_get_underlying_domain_type_restriction(trigger)) {
-       case LTTNG_DOMAIN_UST:
-       case LTTNG_DOMAIN_PYTHON:
-       case LTTNG_DOMAIN_JUL:
-       case LTTNG_DOMAIN_LOG4J:
-               pthread_mutex_lock(&the_event_notifier_counter.lock);
-               the_event_notifier_counter.count++;
-               if (the_event_notifier_counter.count == 1) {
-                       /*
-                        * On the first event notifier, we get a reference to
-                        * every existing UID entries. This ensures that the
-                        * entries are kept around if there are still
-                        * registered event notifiers but no apps.
-                        */
-                       get_ref_all_ust_error_accounting_entry();
-               }
-               pthread_mutex_unlock(&the_event_notifier_counter.lock);
-               break;
-       default:
-               break;
-       }
-#endif /* HAVE_LIBLTTNG_UST_CTL */
-
-
-end:
-       return status;
-}
-
-static
-enum event_notifier_error_accounting_status
-event_notifier_error_accounting_kernel_get_count(
-               const struct lttng_trigger *trigger, uint64_t *count)
-{
-       struct lttng_kernel_abi_counter_aggregate counter_aggregate = {};
-       enum event_notifier_error_accounting_status status;
-       uint64_t error_counter_index;
-       int ret;
-
-       status = get_error_counter_index_for_token(&kernel_state,
-                       lttng_trigger_get_tracer_token(trigger),
-                       &error_counter_index);
-       if (status != EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_OK) {
-               ERR("Error getting index for token: status=%s",
-                               error_accounting_status_str(status));
-               goto end;
-       }
-
-       counter_aggregate.index.number_dimensions = 1;
-       counter_aggregate.index.dimension_indexes[0] = error_counter_index;
-
-       LTTNG_ASSERT(kernel_error_accounting_entry.error_counter_fd);
-
-       ret = kernctl_counter_get_aggregate_value(
-                       kernel_error_accounting_entry.error_counter_fd,
-                       &counter_aggregate);
-       if (ret || counter_aggregate.value.value < 0) {
-               uid_t trigger_owner_uid;
-               const char *trigger_name;
-
-               get_trigger_info_for_log(trigger, &trigger_name,
-                               &trigger_owner_uid);
-
-               if (counter_aggregate.value.value < 0) {
-                       ERR("Invalid negative event notifier error counter value: trigger owner = %d, trigger name = '%s', value = %" PRId64,
-                                       trigger_owner_uid, trigger_name,
-                                       counter_aggregate.value.value);
-               } else {
-                       ERR("Failed to getting event notifier error count: trigger owner = %d, trigger name = '%s', ret = %d",
-                                       trigger_owner_uid, trigger_name, ret);
-               }
-
-               status = EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_ERR;
-               goto end;
-       }
-
-       /* Error count can't be negative. */
-       LTTNG_ASSERT(counter_aggregate.value.value >= 0);
-       *count = (uint64_t) counter_aggregate.value.value;
-
-       status = EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_OK;
-
-end:
-       return status;
-}
-
-enum event_notifier_error_accounting_status
-event_notifier_error_accounting_get_count(
-               const struct lttng_trigger *trigger, uint64_t *count)
-{
-       switch (lttng_trigger_get_underlying_domain_type_restriction(trigger)) {
-       case LTTNG_DOMAIN_KERNEL:
-               return event_notifier_error_accounting_kernel_get_count(
-                               trigger, count);
-       case LTTNG_DOMAIN_UST:
-       case LTTNG_DOMAIN_PYTHON:
-       case LTTNG_DOMAIN_JUL:
-       case LTTNG_DOMAIN_LOG4J:
-#ifdef HAVE_LIBLTTNG_UST_CTL
-               return event_notifier_error_accounting_ust_get_count(trigger,
-                               count);
-#else
-               *count = 0;
-               return EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_OK;
-#endif /* HAVE_LIBLTTNG_UST_CTL */
-       default:
-               abort();
-       }
-}
-
-static
-enum event_notifier_error_accounting_status
-event_notifier_error_accounting_clear(const struct lttng_trigger *trigger)
-{
-       switch (lttng_trigger_get_underlying_domain_type_restriction(trigger)) {
-       case LTTNG_DOMAIN_KERNEL:
-               return event_notifier_error_accounting_kernel_clear(trigger);
-       case LTTNG_DOMAIN_UST:
-       case LTTNG_DOMAIN_PYTHON:
-       case LTTNG_DOMAIN_JUL:
-       case LTTNG_DOMAIN_LOG4J:
-#ifdef HAVE_LIBLTTNG_UST_CTL
-               return event_notifier_error_accounting_ust_clear(trigger);
-#else
-               return EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_OK;
-#endif /* HAVE_LIBLTTNG_UST_CTL */
-       default:
-               abort();
-       }
-}
-
-static void free_index_ht_entry(struct rcu_head *head)
-{
-       struct index_ht_entry *entry = caa_container_of(head,
-                       struct index_ht_entry, rcu_head);
-
-       free(entry);
-}
-
-void event_notifier_error_accounting_unregister_event_notifier(
-               const struct lttng_trigger *trigger)
-{
-       struct lttng_ht_iter iter;
-       struct lttng_ht_node_u64 *node;
-       const uint64_t tracer_token = lttng_trigger_get_tracer_token(trigger);
-       enum event_notifier_error_accounting_status status;
-       struct error_accounting_state *state;
-
-       status = event_notifier_error_accounting_clear(trigger);
-       if (status != EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_OK) {
-               /* Trigger details already logged by callee on error. */
-               ERR("Failed to clear event notifier error counter during unregistration of event notifier: status = '%s'",
-                               error_accounting_status_str(status));
-               goto end;
-       }
-
-       rcu_read_lock();
-
-       switch (lttng_trigger_get_underlying_domain_type_restriction(trigger)) {
-       case LTTNG_DOMAIN_KERNEL:
-               state = &kernel_state;
-               break;
-#ifdef HAVE_LIBLTTNG_UST_CTL
-       case LTTNG_DOMAIN_UST:
-       case LTTNG_DOMAIN_PYTHON:
-       case LTTNG_DOMAIN_JUL:
-       case LTTNG_DOMAIN_LOG4J:
-               state = &ust_state;
-
-               pthread_mutex_lock(&the_event_notifier_counter.lock);
-               the_event_notifier_counter.count--;
-               if (the_event_notifier_counter.count == 0) {
-
-                       /*
-                        * When unregistering the last event notifier, put one
-                        * reference to every uid entries on the behalf of all
-                        * event notifiers.
-                        */
-                       put_ref_all_ust_error_accounting_entry();
-               }
-
-               pthread_mutex_unlock(&the_event_notifier_counter.lock);
-
-               break;
-#endif /* HAVE_LIBLTTNG_UST_CTL */
-       default:
-               abort();
-       }
-
-       lttng_ht_lookup(state->indices_ht, &tracer_token, &iter);
-       node = lttng_ht_iter_get_node_u64(&iter);
-       if (node) {
-               int del_ret;
-               struct index_ht_entry *index_entry = caa_container_of(
-                               node, typeof(*index_entry), node);
-               enum lttng_index_allocator_status index_alloc_status;
-
-               index_alloc_status = lttng_index_allocator_release(
-                               state->index_allocator,
-                               index_entry->error_counter_index);
-               if (index_alloc_status != LTTNG_INDEX_ALLOCATOR_STATUS_OK) {
-                       uid_t trigger_owner_uid;
-                       const char *trigger_name;
-
-                       get_trigger_info_for_log(trigger, &trigger_name,
-                                       &trigger_owner_uid);
-
-                       ERR("Failed to release event notifier error counter index: index = %" PRIu64 ", trigger name = '%s', trigger owner uid = %d",
-                                       index_entry->error_counter_index,
-                                       trigger_name, (int) trigger_owner_uid);
-                       /* Don't exit, perform the rest of the clean-up. */
-               }
-
-               del_ret = lttng_ht_del(state->indices_ht, &iter);
-               LTTNG_ASSERT(!del_ret);
-               call_rcu(&index_entry->rcu_head, free_index_ht_entry);
-       }
-
-end:
-       rcu_read_unlock();
-}
-
-void event_notifier_error_accounting_fini(void)
-{
-       if (kernel_error_accounting_entry.error_counter_fd) {
-               const int ret = close(kernel_error_accounting_entry.error_counter_fd);
-
-               if (ret) {
-                       PERROR("Failed to close kernel event notifier error counter");
-               }
-       }
-
-       lttng_ht_destroy(error_counter_uid_ht);
-
-       fini_error_accounting_state(&kernel_state);
-       fini_error_accounting_state(&ust_state);
-}
diff --git a/src/bin/lttng-sessiond/event-notifier-error-accounting.cpp b/src/bin/lttng-sessiond/event-notifier-error-accounting.cpp
new file mode 100644 (file)
index 0000000..c99c393
--- /dev/null
@@ -0,0 +1,1368 @@
+/*
+ * Copyright (C) 2020 Francis Deslauriers <francis.deslauriers@efficios.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0-only
+ *
+ */
+
+#include <fcntl.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <unistd.h>
+#include <urcu/compiler.h>
+#include <pthread.h>
+
+#include <common/error.h>
+#include <common/hashtable/hashtable.h>
+#include <common/index-allocator.h>
+#include <common/kernel-ctl/kernel-ctl.h>
+#include <common/shm.h>
+#include <lttng/trigger/trigger-internal.h>
+
+#include "event-notifier-error-accounting.h"
+#include "lttng-ust-error.h"
+#include "ust-app.h"
+
+#define ERROR_COUNTER_INDEX_HT_INITIAL_SIZE 16
+
+struct index_ht_entry {
+       struct lttng_ht_node_u64 node;
+       uint64_t error_counter_index;
+       struct rcu_head rcu_head;
+};
+
+struct ust_error_accounting_entry {
+       uid_t uid;
+       struct urcu_ref ref;
+       struct lttng_ht_node_u64 node;
+       struct rcu_head rcu_head;
+       struct lttng_ust_ctl_daemon_counter *daemon_counter;
+       /*
+        * Those `lttng_ust_abi_object_data` are anonymous handles to the
+        * counters objects.
+        * They are only used to be duplicated for each new applications of the
+        * user. To destroy them, call with the `sock` parameter set to -1.
+        * e.g. `lttng_ust_ctl_release_object(-1, data)`;
+        */
+       struct lttng_ust_abi_object_data *counter;
+       struct lttng_ust_abi_object_data **cpu_counters;
+       int nr_counter_cpu_fds;
+};
+
+struct kernel_error_accounting_entry {
+       int error_counter_fd;
+};
+
+static struct kernel_error_accounting_entry kernel_error_accounting_entry;
+
+/* Hashtable mapping uid to error_account_entry. */
+static struct lttng_ht *error_counter_uid_ht;
+
+struct error_accounting_state {
+       struct lttng_index_allocator *index_allocator;
+       /* Hashtable mapping event notifier token to index_ht_entry. */
+       struct lttng_ht *indices_ht;
+       uint64_t number_indices;
+};
+
+static struct error_accounting_state ust_state;
+static struct error_accounting_state kernel_state;
+
+static inline void get_trigger_info_for_log(const struct lttng_trigger *trigger,
+               const char **trigger_name,
+               uid_t *trigger_owner_uid)
+{
+       enum lttng_trigger_status trigger_status;
+
+       trigger_status = lttng_trigger_get_name(trigger, trigger_name);
+       switch (trigger_status) {
+       case LTTNG_TRIGGER_STATUS_OK:
+               break;
+       case LTTNG_TRIGGER_STATUS_UNSET:
+               *trigger_name = "(anonymous)";
+               break;
+       default:
+               abort();
+       }
+
+       trigger_status = lttng_trigger_get_owner_uid(trigger,
+                       trigger_owner_uid);
+       LTTNG_ASSERT(trigger_status == LTTNG_TRIGGER_STATUS_OK);
+}
+
+static inline
+const char *error_accounting_status_str(
+               enum event_notifier_error_accounting_status status)
+{
+       switch (status) {
+       case EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_OK:
+               return "OK";
+       case EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_ERR:
+               return "ERROR";
+       case EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_NOT_FOUND:
+               return "NOT_FOUND";
+       case EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_NOMEM:
+               return "NOMEM";
+       case EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_NO_INDEX_AVAILABLE:
+               return "NO_INDEX_AVAILABLE";
+       case EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_APP_DEAD:
+               return "APP_DEAD";
+       default:
+               abort();
+       }
+}
+
+#ifdef HAVE_LIBLTTNG_UST_CTL
+struct event_notifier_counter {
+       pthread_mutex_t lock;
+       long count;
+};
+
+static struct event_notifier_counter the_event_notifier_counter;
+
+static void free_ust_error_accounting_entry(struct rcu_head *head)
+{
+       int i;
+       struct ust_error_accounting_entry *entry =
+                       caa_container_of(head, typeof(*entry), rcu_head);
+
+       for (i = 0; i < entry->nr_counter_cpu_fds; i++) {
+               lttng_ust_ctl_release_object(-1, entry->cpu_counters[i]);
+               free(entry->cpu_counters[i]);
+       }
+
+       free(entry->cpu_counters);
+
+       lttng_ust_ctl_release_object(-1, entry->counter);
+       free(entry->counter);
+
+       lttng_ust_ctl_destroy_counter(entry->daemon_counter);
+
+       free(entry);
+}
+
+static
+bool ust_error_accounting_entry_get(struct ust_error_accounting_entry *entry)
+{
+       return urcu_ref_get_unless_zero(&entry->ref);
+}
+
+static
+void ust_error_accounting_entry_release(struct urcu_ref *entry_ref)
+{
+       struct ust_error_accounting_entry *entry =
+                       container_of(entry_ref, typeof(*entry), ref);
+
+       rcu_read_lock();
+       cds_lfht_del(error_counter_uid_ht->ht, &entry->node.node);
+       call_rcu(&entry->rcu_head, free_ust_error_accounting_entry);
+       rcu_read_unlock();
+}
+
+
+static
+void ust_error_accounting_entry_put(struct ust_error_accounting_entry *entry)
+{
+       if (!entry) {
+               return;
+       }
+
+       urcu_ref_put(&entry->ref, ust_error_accounting_entry_release);
+}
+
+/*
+ * Put one reference to every UID entries.
+ */
+static
+void put_ref_all_ust_error_accounting_entry(void)
+{
+       struct lttng_ht_iter iter;
+       struct ust_error_accounting_entry *uid_entry;
+
+       ASSERT_LOCKED(the_event_notifier_counter.lock);
+
+       rcu_read_lock();
+       cds_lfht_for_each_entry(error_counter_uid_ht->ht, &iter.iter,
+                       uid_entry, node.node) {
+               ust_error_accounting_entry_put(uid_entry);
+       }
+
+       rcu_read_unlock();
+}
+
+/*
+ * Get one reference to every UID entries.
+ */
+static
+void get_ref_all_ust_error_accounting_entry(void)
+{
+       struct lttng_ht_iter iter;
+       struct ust_error_accounting_entry *uid_entry;
+
+       ASSERT_LOCKED(the_event_notifier_counter.lock);
+
+       rcu_read_lock();
+       cds_lfht_for_each_entry(error_counter_uid_ht->ht, &iter.iter,
+                       uid_entry, node.node) {
+               ust_error_accounting_entry_get(uid_entry);
+       }
+
+       rcu_read_unlock();
+}
+
+#endif /* HAVE_LIBLTTNG_UST_CTL */
+
+static
+enum event_notifier_error_accounting_status
+init_error_accounting_state(struct error_accounting_state *state,
+               uint64_t index_count)
+{
+       enum event_notifier_error_accounting_status status;
+
+       LTTNG_ASSERT(state);
+
+       state->number_indices = index_count;
+
+       state->index_allocator = lttng_index_allocator_create(index_count);
+       if (!state->index_allocator) {
+               ERR("Failed to allocate event notifier error counter index allocator");
+               status = EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_NOMEM;
+               goto end;
+       }
+
+       state->indices_ht = lttng_ht_new(ERROR_COUNTER_INDEX_HT_INITIAL_SIZE,
+                       LTTNG_HT_TYPE_U64);
+       if (!state->indices_ht) {
+               ERR("Failed to allocate error counter indices hash table");
+               status = EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_NOMEM;
+               goto error_indices_ht;
+       }
+
+       status = EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_OK;
+       goto end;
+
+error_indices_ht:
+       lttng_index_allocator_destroy(state->index_allocator);
+       state->index_allocator = NULL;
+end:
+       return status;
+}
+
+static
+void fini_error_accounting_state(struct error_accounting_state *state)
+{
+       LTTNG_ASSERT(state);
+
+       /*
+        * Will assert if some error counter indices were not released (an
+        * internal error).
+        */
+       lttng_ht_destroy(state->indices_ht);
+       lttng_index_allocator_destroy(state->index_allocator);
+}
+
+enum event_notifier_error_accounting_status
+event_notifier_error_accounting_init(uint64_t buffer_size_kernel,
+               uint64_t buffer_size_ust)
+{
+       enum event_notifier_error_accounting_status status;
+
+       status = init_error_accounting_state(&kernel_state, buffer_size_kernel);
+       if (status != EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_OK) {
+               ERR("Failed to initialize kernel event notifier accounting state: status = %s",
+                               error_accounting_status_str(status));
+               goto end;
+       }
+
+       status = init_error_accounting_state(&ust_state, buffer_size_ust);
+       if (status != EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_OK) {
+               ERR("Failed to initialize UST event notifier accounting state: status = %s",
+                               error_accounting_status_str(status));
+               goto error_ust_state;
+       }
+
+       error_counter_uid_ht = lttng_ht_new(
+                       ERROR_COUNTER_INDEX_HT_INITIAL_SIZE, LTTNG_HT_TYPE_U64);
+       if (!error_counter_uid_ht) {
+               ERR("Failed to allocate UID to error counter accountant hash table");
+               status = EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_NOMEM;
+               goto error_uid_ht;
+       }
+
+       status = EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_OK;
+       goto end;
+
+error_uid_ht:
+       fini_error_accounting_state(&ust_state);
+error_ust_state:
+       fini_error_accounting_state(&kernel_state);
+end:
+       return status;
+}
+
+/*
+ * Return the error counteur index associated to this event notifier tracer
+ * token. Returns _STATUS_OK if found and _STATUS_NOT_FOUND otherwise.
+ */
+static
+enum event_notifier_error_accounting_status get_error_counter_index_for_token(
+               struct error_accounting_state *state, uint64_t tracer_token,
+               uint64_t *error_counter_index)
+{
+       struct lttng_ht_node_u64 *node;
+       struct lttng_ht_iter iter;
+       const struct index_ht_entry *index_entry;
+       enum event_notifier_error_accounting_status status;
+
+       rcu_read_lock();
+       lttng_ht_lookup(state->indices_ht, &tracer_token, &iter);
+       node = lttng_ht_iter_get_node_u64(&iter);
+       if (node) {
+               index_entry = caa_container_of(
+                               node, const struct index_ht_entry, node);
+               *error_counter_index = index_entry->error_counter_index;
+               status = EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_OK;
+       } else {
+               status = EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_NOT_FOUND;
+       }
+
+       rcu_read_unlock();
+       return status;
+}
+
+#ifdef HAVE_LIBLTTNG_UST_CTL
+/*
+ * Find the entry for this app's UID, the caller acquires a reference if the
+ * entry is found.
+ */
+static
+struct ust_error_accounting_entry *ust_error_accounting_entry_find(
+               struct lttng_ht *uid_ht, const struct ust_app *app)
+{
+       struct ust_error_accounting_entry *entry;
+       struct lttng_ht_node_u64 *node;
+       struct lttng_ht_iter iter;
+       uint64_t key = app->uid;
+
+       lttng_ht_lookup(uid_ht, &key, &iter);
+       node = lttng_ht_iter_get_node_u64(&iter);
+       if(node == NULL) {
+               entry = NULL;
+       } else {
+               bool got_ref;
+
+               entry = caa_container_of(node,
+                               struct ust_error_accounting_entry, node);
+
+               got_ref = ust_error_accounting_entry_get(entry);
+               if (!got_ref) {
+                       entry = NULL;
+               }
+       }
+
+       return entry;
+}
+
+/*
+ * Create the entry for this app's UID, the caller acquires a reference to the
+ * entry,
+ */
+static
+struct ust_error_accounting_entry *ust_error_accounting_entry_create(
+               struct lttng_ht *uid_ht, const struct ust_app *app)
+{
+       int i, ret, *cpu_counter_fds = NULL;
+       struct lttng_ust_ctl_daemon_counter *daemon_counter;
+       struct lttng_ust_abi_object_data *counter, **cpu_counters;
+       struct ust_error_accounting_entry *entry = NULL;
+       lttng_ust_ctl_counter_dimension dimension;
+
+       dimension.size = ust_state.number_indices;
+       dimension.has_underflow = false;
+       dimension.has_overflow = false;
+
+       if (!ust_app_supports_counters(app)) {
+               DBG("Refusing to create accounting entry for application (unsupported feature): app name = '%s', app ppid = %d",
+                               app->name, (int) app->ppid);
+               goto error;
+       }
+
+       entry = (ust_error_accounting_entry *) zmalloc(sizeof(struct ust_error_accounting_entry));
+       if (!entry) {
+               PERROR("Failed to allocate event notifier error acounting entry")
+               goto error;
+       }
+
+       urcu_ref_init(&entry->ref);
+       entry->uid = app->uid;
+       entry->nr_counter_cpu_fds = lttng_ust_ctl_get_nr_cpu_per_counter();
+
+       cpu_counter_fds = (int *) zmalloc(entry->nr_counter_cpu_fds * sizeof(*cpu_counter_fds));
+       if (!cpu_counter_fds) {
+               PERROR("Failed to allocate event notifier error counter file descriptors array: application uid = %d, application name = '%s', pid = %d, allocation size = %zu",
+                               (int) app->uid, app->name, (int) app->pid,
+                               entry->nr_counter_cpu_fds * sizeof(*cpu_counter_fds));
+               goto error_counter_cpu_fds_alloc;
+       }
+
+       /* Initialize to an invalid fd value to closes fds in case of error. */
+       for (i = 0; i < entry->nr_counter_cpu_fds; i++) {
+               cpu_counter_fds[i] = -1;
+       }
+
+       cpu_counters = (lttng_ust_abi_object_data **) zmalloc(entry->nr_counter_cpu_fds * sizeof(struct lttng_ust_abi_object_data *));
+       if (!cpu_counters) {
+               PERROR("Failed to allocate event notifier error counter lttng_ust_abi_object_data array: application uid = %d, application name = '%s', pid = %d, allocation size = %zu",
+                               (int) app->uid, app->name, (int) app->pid,
+                               entry->nr_counter_cpu_fds * sizeof(struct lttng_ust_abi_object_data *));
+               goto error_counter_cpus_alloc;
+       }
+
+       for (i = 0; i < entry->nr_counter_cpu_fds; i++) {
+               cpu_counter_fds[i] = shm_create_anonymous("event-notifier-error-accounting");
+               if (cpu_counter_fds[i] == -1) {
+                       ERR("Failed to create event notifier error accounting shared memory for application user: application uid = %d, pid = %d, application name = '%s'",
+                                       (int) app->uid, (int) app->pid, app->name);
+                       goto error_shm_alloc;
+               }
+       }
+
+       /*
+        * Ownership of the file descriptors transferred to the ustctl object.
+        */
+       daemon_counter = lttng_ust_ctl_create_counter(1, &dimension, 0, -1,
+                       entry->nr_counter_cpu_fds, cpu_counter_fds,
+                       LTTNG_UST_CTL_COUNTER_BITNESS_32,
+                       LTTNG_UST_CTL_COUNTER_ARITHMETIC_MODULAR,
+                       LTTNG_UST_CTL_COUNTER_ALLOC_PER_CPU,
+                       false);
+       if (!daemon_counter) {
+               goto error_create_daemon_counter;
+       }
+
+       ret = lttng_ust_ctl_create_counter_data(daemon_counter, &counter);
+       if (ret) {
+               ERR("Failed to create userspace tracer counter data for application user: uid = %d, pid = %d, application name = '%s'",
+                               (int) app->uid, (int) app->pid, app->name);
+               goto error_create_counter_data;
+       }
+
+       for (i = 0; i < entry->nr_counter_cpu_fds; i++) {
+               ret = lttng_ust_ctl_create_counter_cpu_data(daemon_counter, i,
+                               &cpu_counters[i]);
+               if (ret) {
+                       ERR("Failed to create userspace tracer counter cpu data for application user: uid = %d, pid = %d, application name = '%s'",
+                                       (int) app->uid, (int) app->pid,
+                                       app->name);
+                       goto error_create_counter_cpu_data;
+               }
+       }
+
+       entry->daemon_counter = daemon_counter;
+       entry->counter = counter;
+       entry->cpu_counters = cpu_counters;
+
+       lttng_ht_node_init_u64(&entry->node, entry->uid);
+       lttng_ht_add_unique_u64(error_counter_uid_ht, &entry->node);
+
+       goto end;
+
+error_create_counter_cpu_data:
+       /* Teardown any allocated cpu counters. */
+       for (i = 0; i < entry->nr_counter_cpu_fds; i++) {
+               if (!cpu_counters[i]) {
+                       /*
+                        * Early-exit when error occurred before all cpu
+                        * counters could be initialized.
+                        */
+                       break;
+               }
+
+               lttng_ust_ctl_release_object(-1, cpu_counters[i]);
+               free(cpu_counters[i]);
+       }
+
+       lttng_ust_ctl_release_object(-1, entry->counter);
+       free(entry->counter);
+error_create_counter_data:
+       lttng_ust_ctl_destroy_counter(daemon_counter);
+error_create_daemon_counter:
+error_shm_alloc:
+       /* Error occurred before per-cpu SHMs were handed-off to ustctl. */
+       if (cpu_counter_fds) {
+               for (i = 0; i < entry->nr_counter_cpu_fds; i++) {
+                       if (cpu_counter_fds[i] < 0) {
+                               /*
+                                * Early-exit when error occurred before all cpu
+                                * counter shm fds could be initialized.
+                                */
+                               break;
+                       }
+
+                       ret = close(cpu_counter_fds[i]);
+                       if (ret) {
+                               PERROR("Failed to close error counter per-CPU shm file descriptor: fd = %d",
+                                               cpu_counter_fds[i]);
+                       }
+               }
+       }
+
+       free(cpu_counters);
+error_counter_cpus_alloc:
+error_counter_cpu_fds_alloc:
+       free(entry);
+error:
+       entry = NULL;
+end:
+       free(cpu_counter_fds);
+       return entry;
+}
+
+static
+enum event_notifier_error_accounting_status send_counter_data_to_ust(
+               struct ust_app *app,
+               struct lttng_ust_abi_object_data *new_counter)
+{
+       int ret;
+       enum event_notifier_error_accounting_status status;
+
+       /* Attach counter to trigger group. */
+       pthread_mutex_lock(&app->sock_lock);
+       ret = lttng_ust_ctl_send_counter_data_to_ust(app->sock,
+                       app->event_notifier_group.object->handle, new_counter);
+       pthread_mutex_unlock(&app->sock_lock);
+       if (ret < 0) {
+               if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
+                       ERR("Failed to send counter data to application: application name = '%s', pid = %d, ret = %d",
+                                       app->name, app->pid, ret);
+                       status = EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_ERR;
+               } else {
+                       DBG3("Failed to send counter data to application (application is dead): application name = '%s', pid = %d, ret = %d",
+                                       app->name, app->pid, ret);
+                       status = EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_APP_DEAD;
+               }
+
+               goto end;
+       }
+
+       status = EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_OK;
+end:
+       return status;
+}
+
+static
+enum event_notifier_error_accounting_status send_counter_cpu_data_to_ust(
+               struct ust_app *app,
+               struct lttng_ust_abi_object_data *counter,
+               struct lttng_ust_abi_object_data *counter_cpu)
+{
+       int ret;
+       enum event_notifier_error_accounting_status status;
+
+       pthread_mutex_lock(&app->sock_lock);
+       ret = lttng_ust_ctl_send_counter_cpu_data_to_ust(app->sock,
+                       counter, counter_cpu);
+       pthread_mutex_unlock(&app->sock_lock);
+       if (ret < 0) {
+               if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
+                       ERR("Failed to send counter CPU data to application: application name = '%s', pid = %d, ret = %d",
+                                       app->name, app->pid, ret);
+                       status = EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_ERR;
+               } else {
+                       DBG3("Failed to send counter CPU data to application: application name = '%s', pid = %d, ret = %d",
+                                       app->name, app->pid, ret);
+                       status = EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_APP_DEAD;
+               }
+
+               goto end;
+       }
+
+       status = EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_OK;
+end:
+       return status;
+}
+
+enum event_notifier_error_accounting_status
+event_notifier_error_accounting_register_app(struct ust_app *app)
+{
+       int ret;
+       uint64_t i;
+       struct lttng_ust_abi_object_data *new_counter;
+       struct ust_error_accounting_entry *entry;
+       enum event_notifier_error_accounting_status status;
+       struct lttng_ust_abi_object_data **cpu_counters;
+
+       if (!ust_app_supports_counters(app)) {
+               status = EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_UNSUPPORTED;
+               goto end;
+       }
+
+       /*
+        * Check if we already have a error counter for the user id of this
+        * app. If not, create one.
+        */
+       rcu_read_lock();
+       entry = ust_error_accounting_entry_find(error_counter_uid_ht, app);
+       if (entry == NULL) {
+               /*
+                * Take the event notifier counter lock before creating the new
+                * entry to ensure that no event notifier is registered between
+                * the the entry creation and event notifier count check.
+                */
+               pthread_mutex_lock(&the_event_notifier_counter.lock);
+
+               entry = ust_error_accounting_entry_create(error_counter_uid_ht,
+                               app);
+               if (!entry) {
+                       status = EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_ERR;
+                       pthread_mutex_unlock(&the_event_notifier_counter.lock);
+                       goto error_creating_entry;
+               }
+
+               /*
+                * We just created a new UID entry, If there are event
+                * notifiers already registered, take one reference on their
+                * behalf.
+                */
+               if (the_event_notifier_counter.count > 0) {
+                       ust_error_accounting_entry_get(entry);
+               }
+
+               pthread_mutex_unlock(&the_event_notifier_counter.lock);
+       }
+
+       /* Duplicate counter object data. */
+       ret = lttng_ust_ctl_duplicate_ust_object_data(&new_counter,
+                       entry->counter);
+       if (ret) {
+               ERR("Failed to duplicate event notifier error accounting counter for application user: application uid = %d, pid = %d, application name = '%s'",
+                               (int) app->uid, (int) app->pid, app->name);
+               status = EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_ERR;
+               goto error_duplicate_counter;
+       }
+
+       status = send_counter_data_to_ust(app, new_counter);
+       if (status != EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_OK) {
+               if (status == EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_APP_DEAD) {
+                       goto error_send_counter_data;
+               }
+
+               ERR("Failed to send counter data to application tracer: status = %s, application uid = %d, pid = %d, application name = '%s'",
+                               error_accounting_status_str(status),
+                               (int) app->uid, (int) app->pid, app->name);
+               status = EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_ERR;
+               goto error_send_counter_data;
+       }
+
+       cpu_counters = (lttng_ust_abi_object_data **) zmalloc(entry->nr_counter_cpu_fds * sizeof(struct lttng_ust_abi_object_data *));
+       if (!cpu_counters) {
+               PERROR("Failed to allocate event notifier error counter lttng_ust_abi_object_data array: application uid = %d, application name = '%s', pid = %d, allocation size = %zu",
+                               (int) app->uid, app->name, (int) app->pid,
+                               entry->nr_counter_cpu_fds * sizeof(**cpu_counters));
+               status = EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_NOMEM;
+               goto error_allocate_cpu_counters;
+       }
+
+       for (i = 0; i < entry->nr_counter_cpu_fds; i++) {
+               struct lttng_ust_abi_object_data *new_counter_cpu = NULL;
+
+               ret = lttng_ust_ctl_duplicate_ust_object_data(&new_counter_cpu,
+                               entry->cpu_counters[i]);
+               if (ret) {
+                       ERR("Failed to duplicate userspace tracer counter cpu data for application user: uid = %d, pid = %d, application name = '%s'",
+                                       (int) app->uid, (int) app->pid,
+                                       app->name);
+                       status = EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_NOMEM;
+                       goto error_duplicate_cpu_counter;
+               }
+
+               cpu_counters[i] = new_counter_cpu;
+
+               status = send_counter_cpu_data_to_ust(app, new_counter,
+                               new_counter_cpu);
+               if (status != EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_OK) {
+                       if (status == EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_APP_DEAD) {
+                               goto error_send_cpu_counter_data;
+                       }
+
+                       ERR("Failed to send counter cpu data to application tracer: status = %s, application uid = %d, pid = %d, application name = '%s'",
+                                       error_accounting_status_str(status),
+                                       (int) app->uid, (int) app->pid,
+                                       app->name);
+                       status = EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_ERR;
+                       goto error_send_cpu_counter_data;
+               }
+       }
+
+       app->event_notifier_group.counter = new_counter;
+       new_counter = NULL;
+       app->event_notifier_group.nr_counter_cpu = entry->nr_counter_cpu_fds;
+       app->event_notifier_group.counter_cpu = cpu_counters;
+       cpu_counters = NULL;
+       goto end_unlock;
+
+error_send_cpu_counter_data:
+error_duplicate_cpu_counter:
+       /* Teardown any duplicated cpu counters. */
+       for (i = 0; i < entry->nr_counter_cpu_fds; i++) {
+               if (!cpu_counters[i]) {
+                       /*
+                        * Early-exit when error occurred before all cpu
+                        * counters could be initialized.
+                        */
+                       break;
+               }
+
+               lttng_ust_ctl_release_object(-1, cpu_counters[i]);
+               free(cpu_counters[i]);
+       }
+
+       free(cpu_counters);
+
+error_allocate_cpu_counters:
+error_send_counter_data:
+       lttng_ust_ctl_release_object(-1, new_counter);
+       free(new_counter);
+error_duplicate_counter:
+       ust_error_accounting_entry_put(entry);
+error_creating_entry:
+       app->event_notifier_group.counter = NULL;
+end_unlock:
+       rcu_read_unlock();
+end:
+       return status;
+}
+
+enum event_notifier_error_accounting_status
+event_notifier_error_accounting_unregister_app(struct ust_app *app)
+{
+       enum event_notifier_error_accounting_status status;
+       struct ust_error_accounting_entry *entry;
+       int i;
+
+       rcu_read_lock();
+
+       /* If an error occurred during app registration no entry was created. */
+       if (!app->event_notifier_group.counter) {
+               status = EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_OK;
+               goto end;
+       }
+
+       entry = ust_error_accounting_entry_find(error_counter_uid_ht, app);
+       if (entry == NULL) {
+               ERR("Failed to find event notitifier error accounting entry on application teardown: pid = %d, application name = '%s'",
+                               app->pid, app->name);
+               status = EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_ERR;
+               goto end;
+       } else {
+               /*
+                * Put the entry twice as we acquired a reference from the
+                * `ust_error_accounting_entry_find()` above.
+                */
+               ust_error_accounting_entry_put(entry);
+               ust_error_accounting_entry_put(entry);
+       }
+
+       for (i = 0; i < app->event_notifier_group.nr_counter_cpu; i++) {
+               lttng_ust_ctl_release_object(app->sock,
+                               app->event_notifier_group.counter_cpu[i]);
+               free(app->event_notifier_group.counter_cpu[i]);
+       }
+
+       free(app->event_notifier_group.counter_cpu);
+
+       lttng_ust_ctl_release_object(app->sock, app->event_notifier_group.counter);
+       free(app->event_notifier_group.counter);
+
+       status = EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_OK;
+end:
+       rcu_read_unlock();
+       return status;
+}
+
+static
+enum event_notifier_error_accounting_status
+event_notifier_error_accounting_ust_get_count(
+               const struct lttng_trigger *trigger, uint64_t *count)
+{
+       struct lttng_ht_iter iter;
+       struct ust_error_accounting_entry *uid_entry;
+       uint64_t error_counter_index, global_sum = 0;
+       enum event_notifier_error_accounting_status status;
+       size_t dimension_indexes[1];
+       const uint64_t tracer_token = lttng_trigger_get_tracer_token(trigger);
+       uid_t trigger_owner_uid;
+       const char *trigger_name;
+
+
+       rcu_read_lock();
+
+       get_trigger_info_for_log(trigger, &trigger_name, &trigger_owner_uid);
+
+       status = get_error_counter_index_for_token(&ust_state, tracer_token,
+                       &error_counter_index);
+       if (status != EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_OK) {
+
+               ERR("Failed to retrieve index for tracer token: token = %" PRIu64 ", trigger name = '%s', trigger owner uid = %d, status = %s",
+                               tracer_token, trigger_name,
+                               (int) trigger_owner_uid,
+                               error_accounting_status_str(status));
+               goto end;
+       }
+
+       dimension_indexes[0] = error_counter_index;
+
+       /*
+        * Iterate over all the UID entries.
+        * We aggregate the value of all uid entries regardless of if the uid
+        * matches the trigger's uid because a user that is allowed to register
+        * a trigger to a given sessiond is also allowed to create an event
+        * notifier on all apps that this sessiond is aware of.
+        */
+       cds_lfht_for_each_entry(error_counter_uid_ht->ht, &iter.iter,
+                       uid_entry, node.node) {
+               int ret;
+               int64_t local_value = 0;
+               bool overflow = false, underflow = false;
+
+               ret = lttng_ust_ctl_counter_aggregate(uid_entry->daemon_counter,
+                               dimension_indexes, &local_value, &overflow,
+                               &underflow);
+               if (ret || local_value < 0) {
+                       if (ret) {
+                               ERR("Failed to aggregate event notifier error counter values of trigger: trigger name = '%s', trigger owner uid = %d",
+                                               trigger_name,
+                                               (int) trigger_owner_uid);
+                       } else if (local_value < 0) {
+                               ERR("Negative event notifier error counter value encountered during aggregation: trigger name = '%s', trigger owner uid = %d, value = %" PRId64,
+                                               trigger_name,
+                                               (int) trigger_owner_uid,
+                                               local_value);
+                       } else {
+                               abort();
+                       }
+
+                       status = EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_ERR;
+                       goto end;
+               }
+
+               /* Cast is safe as negative values are checked-for above. */
+               global_sum += (uint64_t) local_value;
+       }
+
+       *count = global_sum;
+       status = EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_OK;
+
+end:
+       rcu_read_unlock();
+       return status;
+}
+
+static
+enum event_notifier_error_accounting_status event_notifier_error_accounting_ust_clear(
+               const struct lttng_trigger *trigger)
+{
+       struct lttng_ht_iter iter;
+       struct ust_error_accounting_entry *uid_entry;
+       uint64_t error_counter_index;
+       enum event_notifier_error_accounting_status status;
+       size_t dimension_index;
+       const uint64_t tracer_token = lttng_trigger_get_tracer_token(trigger);
+
+       rcu_read_lock();
+       status = get_error_counter_index_for_token(&ust_state, tracer_token,
+                       &error_counter_index);
+       if (status != EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_OK) {
+               uid_t trigger_owner_uid;
+               const char *trigger_name;
+
+               get_trigger_info_for_log(trigger, &trigger_name,
+                                        &trigger_owner_uid);
+
+               ERR("Failed to retrieve index for tracer token: token = %" PRIu64 ", trigger name = '%s', trigger owner uid = %d, status = %s",
+                               tracer_token, trigger_name,
+                               (int) trigger_owner_uid,
+                               error_accounting_status_str(status));
+               goto end;
+       }
+
+       dimension_index = error_counter_index;
+
+       /*
+        * Go over all error counters (ignoring uid) as a trigger (and trigger
+        * errors) can be generated from any applications that this session
+        * daemon is managing.
+        */
+       cds_lfht_for_each_entry(error_counter_uid_ht->ht, &iter.iter,
+                       uid_entry, node.node) {
+               const int ret = lttng_ust_ctl_counter_clear(uid_entry->daemon_counter,
+                               &dimension_index);
+
+               if (ret) {
+                       uid_t trigger_owner_uid;
+                       const char *trigger_name;
+
+                       get_trigger_info_for_log(trigger, &trigger_name,
+                                                &trigger_owner_uid);
+                       ERR("Failed to clear event notifier counter value for trigger: counter uid = %d, trigger name = '%s', trigger owner uid = %d",
+                                       (int) uid_entry->node.key, trigger_name,
+                                       (int) trigger_owner_uid);
+                       status = EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_ERR;
+                       goto end;
+               }
+       }
+
+       status = EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_OK;
+end:
+       rcu_read_unlock();
+       return status;
+}
+#endif /* HAVE_LIBLTTNG_UST_CTL */
+
+static
+enum event_notifier_error_accounting_status
+event_notifier_error_accounting_kernel_clear(
+               const struct lttng_trigger *trigger)
+{
+       int ret;
+       uint64_t error_counter_index;
+       enum event_notifier_error_accounting_status status;
+       struct lttng_kernel_abi_counter_clear counter_clear = {};
+
+       status = get_error_counter_index_for_token(&kernel_state,
+                       lttng_trigger_get_tracer_token(trigger),
+                       &error_counter_index);
+       if (status != EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_OK) {
+               uid_t trigger_owner_uid;
+               const char *trigger_name;
+
+               get_trigger_info_for_log(
+                               trigger, &trigger_name, &trigger_owner_uid);
+
+               ERR("Failed to get event notifier error counter index: trigger owner uid = %d, trigger name = '%s', status = '%s'",
+                               trigger_owner_uid, trigger_name,
+                               error_accounting_status_str(status));
+               goto end;
+       }
+
+       counter_clear.index.number_dimensions = 1;
+       counter_clear.index.dimension_indexes[0] = error_counter_index;
+
+       ret = kernctl_counter_clear(
+                       kernel_error_accounting_entry.error_counter_fd,
+                       &counter_clear);
+       if (ret) {
+               uid_t trigger_owner_uid;
+               const char *trigger_name;
+
+               get_trigger_info_for_log(
+                               trigger, &trigger_name, &trigger_owner_uid);
+
+               ERR("Failed to clear kernel event notifier error counter: trigger owner uid = %d, trigger name = '%s'",
+                               trigger_owner_uid, trigger_name);
+               status = EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_ERR;
+               goto end;
+       }
+
+       status = EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_OK;
+end:
+       return status;
+}
+
+enum event_notifier_error_accounting_status
+event_notifier_error_accounting_register_kernel(
+               int kernel_event_notifier_group_fd)
+{
+       int error_counter_fd = -1, ret;
+       enum event_notifier_error_accounting_status status;
+       lttng_kernel_abi_counter_conf error_counter_conf = {
+               .arithmetic = LTTNG_KERNEL_ABI_COUNTER_ARITHMETIC_MODULAR,
+               .bitness = sizeof(void *) == sizeof(uint32_t) ?
+                               LTTNG_KERNEL_ABI_COUNTER_BITNESS_32 :
+                               LTTNG_KERNEL_ABI_COUNTER_BITNESS_64,
+               .number_dimensions = 1,
+               .global_sum_step = 0,
+       };
+       error_counter_conf.dimensions[0].size = kernel_state.number_indices;
+       error_counter_conf.dimensions[0].has_underflow = false;
+       error_counter_conf.dimensions[0].has_overflow = false;
+
+       ret = kernctl_create_event_notifier_group_error_counter(
+                       kernel_event_notifier_group_fd, &error_counter_conf);
+       if (ret < 0) {
+               PERROR("Failed to create event notifier group error counter through kernel ioctl: kernel_event_notifier_group_fd = %d",
+                               kernel_event_notifier_group_fd);
+               status = EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_ERR;
+               goto error;
+       }
+
+       error_counter_fd = ret;
+
+       /* Prevent fd duplication after execlp(). */
+       ret = fcntl(error_counter_fd, F_SETFD, FD_CLOEXEC);
+       if (ret < 0) {
+               PERROR("Failed to set FD_CLOEXEC flag on event notifier error counter file descriptor: error_counter_fd = %d",
+                               error_counter_fd);
+               status = EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_ERR;
+               goto error;
+       }
+
+       DBG("Created kernel event notifier group error counter: fd = %d",
+                       error_counter_fd);
+
+       kernel_error_accounting_entry.error_counter_fd =
+                       error_counter_fd;
+       status = EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_OK;
+
+error:
+       return status;
+}
+
+static
+enum event_notifier_error_accounting_status create_error_counter_index_for_token(
+               struct error_accounting_state *state, uint64_t tracer_token,
+               uint64_t *error_counter_index)
+{
+       struct index_ht_entry *index_entry;
+       enum lttng_index_allocator_status index_alloc_status;
+       uint64_t local_error_counter_index;
+       enum event_notifier_error_accounting_status status;
+
+       LTTNG_ASSERT(state);
+
+       /* Allocate a new index for that counter. */
+       index_alloc_status = lttng_index_allocator_alloc(state->index_allocator,
+                       &local_error_counter_index);
+       switch (index_alloc_status) {
+       case LTTNG_INDEX_ALLOCATOR_STATUS_EMPTY:
+               DBG("No indices left in the configured event notifier error counter: "
+                               "number-of-indices = %" PRIu64,
+                               lttng_index_allocator_get_index_count(
+                                       state->index_allocator));
+               status = EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_NO_INDEX_AVAILABLE;
+               goto end;
+       case LTTNG_INDEX_ALLOCATOR_STATUS_OK:
+               break;
+       default:
+               status = EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_ERR;
+               goto end;
+       }
+
+       index_entry = (index_ht_entry *) zmalloc(sizeof(*index_entry));
+       if (index_entry == NULL) {
+               PERROR("Failed to allocate event notifier error counter hash table entry");
+               status = EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_NOMEM;
+               goto end;
+       }
+
+       index_entry->error_counter_index = local_error_counter_index;
+       lttng_ht_node_init_u64(&index_entry->node, tracer_token);
+       lttng_ht_add_unique_u64(state->indices_ht, &index_entry->node);
+
+       DBG("Allocated error counter index for tracer token: tracer token = %" PRIu64 ", index = %" PRIu64,
+                       tracer_token, local_error_counter_index);
+       *error_counter_index = local_error_counter_index;
+       status = EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_OK;
+end:
+       return status;
+}
+
+enum event_notifier_error_accounting_status
+event_notifier_error_accounting_register_event_notifier(
+               const struct lttng_trigger *trigger,
+               uint64_t *error_counter_index)
+{
+       enum event_notifier_error_accounting_status status;
+       uint64_t local_error_counter_index;
+       struct error_accounting_state *state;
+
+       switch (lttng_trigger_get_underlying_domain_type_restriction(trigger)) {
+       case LTTNG_DOMAIN_KERNEL:
+               state = &kernel_state;
+               break;
+       case LTTNG_DOMAIN_UST:
+       case LTTNG_DOMAIN_PYTHON:
+       case LTTNG_DOMAIN_JUL:
+       case LTTNG_DOMAIN_LOG4J:
+               state = &ust_state;
+               break;
+       default:
+               abort();
+       }
+
+       /*
+        * Check if this event notifier already has a error counter index
+        * assigned.
+        */
+       status = get_error_counter_index_for_token(state,
+                       lttng_trigger_get_tracer_token(trigger),
+                       &local_error_counter_index);
+       switch (status) {
+       case EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_NOT_FOUND:
+       {
+               uid_t trigger_owner_uid;
+               const char *trigger_name;
+
+               get_trigger_info_for_log(
+                               trigger, &trigger_name, &trigger_owner_uid);
+
+               DBG("Event notifier error counter index not found for tracer token (allocating a new one): trigger name = '%s', trigger owner uid = %d, tracer token = %" PRIu64,
+                               trigger_name, trigger_owner_uid,
+                               lttng_trigger_get_tracer_token(trigger));
+
+               status = create_error_counter_index_for_token(state,
+                               lttng_trigger_get_tracer_token(trigger),
+                               &local_error_counter_index);
+               if (status != EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_OK) {
+                       ERR("Error creating index for token: status = %s, trigger name = '%s', trigger owner uid = %d",
+                                       error_accounting_status_str(status),
+                                       trigger_name, trigger_owner_uid);
+                       goto end;
+               }
+               /* fall-through. */
+       }
+       case EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_OK:
+               *error_counter_index = local_error_counter_index;
+               status = EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_OK;
+               break;
+       default:
+               break;
+       }
+
+#ifdef HAVE_LIBLTTNG_UST_CTL
+       switch (lttng_trigger_get_underlying_domain_type_restriction(trigger)) {
+       case LTTNG_DOMAIN_UST:
+       case LTTNG_DOMAIN_PYTHON:
+       case LTTNG_DOMAIN_JUL:
+       case LTTNG_DOMAIN_LOG4J:
+               pthread_mutex_lock(&the_event_notifier_counter.lock);
+               the_event_notifier_counter.count++;
+               if (the_event_notifier_counter.count == 1) {
+                       /*
+                        * On the first event notifier, we get a reference to
+                        * every existing UID entries. This ensures that the
+                        * entries are kept around if there are still
+                        * registered event notifiers but no apps.
+                        */
+                       get_ref_all_ust_error_accounting_entry();
+               }
+               pthread_mutex_unlock(&the_event_notifier_counter.lock);
+               break;
+       default:
+               break;
+       }
+#endif /* HAVE_LIBLTTNG_UST_CTL */
+
+
+end:
+       return status;
+}
+
+static
+enum event_notifier_error_accounting_status
+event_notifier_error_accounting_kernel_get_count(
+               const struct lttng_trigger *trigger, uint64_t *count)
+{
+       struct lttng_kernel_abi_counter_aggregate counter_aggregate = {};
+       enum event_notifier_error_accounting_status status;
+       uint64_t error_counter_index;
+       int ret;
+
+       status = get_error_counter_index_for_token(&kernel_state,
+                       lttng_trigger_get_tracer_token(trigger),
+                       &error_counter_index);
+       if (status != EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_OK) {
+               ERR("Error getting index for token: status=%s",
+                               error_accounting_status_str(status));
+               goto end;
+       }
+
+       counter_aggregate.index.number_dimensions = 1;
+       counter_aggregate.index.dimension_indexes[0] = error_counter_index;
+
+       LTTNG_ASSERT(kernel_error_accounting_entry.error_counter_fd);
+
+       ret = kernctl_counter_get_aggregate_value(
+                       kernel_error_accounting_entry.error_counter_fd,
+                       &counter_aggregate);
+       if (ret || counter_aggregate.value.value < 0) {
+               uid_t trigger_owner_uid;
+               const char *trigger_name;
+
+               get_trigger_info_for_log(trigger, &trigger_name,
+                               &trigger_owner_uid);
+
+               if (counter_aggregate.value.value < 0) {
+                       ERR("Invalid negative event notifier error counter value: trigger owner = %d, trigger name = '%s', value = %" PRId64,
+                                       trigger_owner_uid, trigger_name,
+                                       counter_aggregate.value.value);
+               } else {
+                       ERR("Failed to getting event notifier error count: trigger owner = %d, trigger name = '%s', ret = %d",
+                                       trigger_owner_uid, trigger_name, ret);
+               }
+
+               status = EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_ERR;
+               goto end;
+       }
+
+       /* Error count can't be negative. */
+       LTTNG_ASSERT(counter_aggregate.value.value >= 0);
+       *count = (uint64_t) counter_aggregate.value.value;
+
+       status = EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_OK;
+
+end:
+       return status;
+}
+
+enum event_notifier_error_accounting_status
+event_notifier_error_accounting_get_count(
+               const struct lttng_trigger *trigger, uint64_t *count)
+{
+       switch (lttng_trigger_get_underlying_domain_type_restriction(trigger)) {
+       case LTTNG_DOMAIN_KERNEL:
+               return event_notifier_error_accounting_kernel_get_count(
+                               trigger, count);
+       case LTTNG_DOMAIN_UST:
+       case LTTNG_DOMAIN_PYTHON:
+       case LTTNG_DOMAIN_JUL:
+       case LTTNG_DOMAIN_LOG4J:
+#ifdef HAVE_LIBLTTNG_UST_CTL
+               return event_notifier_error_accounting_ust_get_count(trigger,
+                               count);
+#else
+               *count = 0;
+               return EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_OK;
+#endif /* HAVE_LIBLTTNG_UST_CTL */
+       default:
+               abort();
+       }
+}
+
+static
+enum event_notifier_error_accounting_status
+event_notifier_error_accounting_clear(const struct lttng_trigger *trigger)
+{
+       switch (lttng_trigger_get_underlying_domain_type_restriction(trigger)) {
+       case LTTNG_DOMAIN_KERNEL:
+               return event_notifier_error_accounting_kernel_clear(trigger);
+       case LTTNG_DOMAIN_UST:
+       case LTTNG_DOMAIN_PYTHON:
+       case LTTNG_DOMAIN_JUL:
+       case LTTNG_DOMAIN_LOG4J:
+#ifdef HAVE_LIBLTTNG_UST_CTL
+               return event_notifier_error_accounting_ust_clear(trigger);
+#else
+               return EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_OK;
+#endif /* HAVE_LIBLTTNG_UST_CTL */
+       default:
+               abort();
+       }
+}
+
+static void free_index_ht_entry(struct rcu_head *head)
+{
+       struct index_ht_entry *entry = caa_container_of(head,
+                       struct index_ht_entry, rcu_head);
+
+       free(entry);
+}
+
+void event_notifier_error_accounting_unregister_event_notifier(
+               const struct lttng_trigger *trigger)
+{
+       struct lttng_ht_iter iter;
+       struct lttng_ht_node_u64 *node;
+       const uint64_t tracer_token = lttng_trigger_get_tracer_token(trigger);
+       enum event_notifier_error_accounting_status status;
+       struct error_accounting_state *state;
+
+       status = event_notifier_error_accounting_clear(trigger);
+       if (status != EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_OK) {
+               /* Trigger details already logged by callee on error. */
+               ERR("Failed to clear event notifier error counter during unregistration of event notifier: status = '%s'",
+                               error_accounting_status_str(status));
+               goto end;
+       }
+
+       rcu_read_lock();
+
+       switch (lttng_trigger_get_underlying_domain_type_restriction(trigger)) {
+       case LTTNG_DOMAIN_KERNEL:
+               state = &kernel_state;
+               break;
+#ifdef HAVE_LIBLTTNG_UST_CTL
+       case LTTNG_DOMAIN_UST:
+       case LTTNG_DOMAIN_PYTHON:
+       case LTTNG_DOMAIN_JUL:
+       case LTTNG_DOMAIN_LOG4J:
+               state = &ust_state;
+
+               pthread_mutex_lock(&the_event_notifier_counter.lock);
+               the_event_notifier_counter.count--;
+               if (the_event_notifier_counter.count == 0) {
+
+                       /*
+                        * When unregistering the last event notifier, put one
+                        * reference to every uid entries on the behalf of all
+                        * event notifiers.
+                        */
+                       put_ref_all_ust_error_accounting_entry();
+               }
+
+               pthread_mutex_unlock(&the_event_notifier_counter.lock);
+
+               break;
+#endif /* HAVE_LIBLTTNG_UST_CTL */
+       default:
+               abort();
+       }
+
+       lttng_ht_lookup(state->indices_ht, &tracer_token, &iter);
+       node = lttng_ht_iter_get_node_u64(&iter);
+       if (node) {
+               int del_ret;
+               struct index_ht_entry *index_entry = caa_container_of(
+                               node, typeof(*index_entry), node);
+               enum lttng_index_allocator_status index_alloc_status;
+
+               index_alloc_status = lttng_index_allocator_release(
+                               state->index_allocator,
+                               index_entry->error_counter_index);
+               if (index_alloc_status != LTTNG_INDEX_ALLOCATOR_STATUS_OK) {
+                       uid_t trigger_owner_uid;
+                       const char *trigger_name;
+
+                       get_trigger_info_for_log(trigger, &trigger_name,
+                                       &trigger_owner_uid);
+
+                       ERR("Failed to release event notifier error counter index: index = %" PRIu64 ", trigger name = '%s', trigger owner uid = %d",
+                                       index_entry->error_counter_index,
+                                       trigger_name, (int) trigger_owner_uid);
+                       /* Don't exit, perform the rest of the clean-up. */
+               }
+
+               del_ret = lttng_ht_del(state->indices_ht, &iter);
+               LTTNG_ASSERT(!del_ret);
+               call_rcu(&index_entry->rcu_head, free_index_ht_entry);
+       }
+
+end:
+       rcu_read_unlock();
+}
+
+void event_notifier_error_accounting_fini(void)
+{
+       if (kernel_error_accounting_entry.error_counter_fd) {
+               const int ret = close(kernel_error_accounting_entry.error_counter_fd);
+
+               if (ret) {
+                       PERROR("Failed to close kernel event notifier error counter");
+               }
+       }
+
+       lttng_ht_destroy(error_counter_uid_ht);
+
+       fini_error_accounting_state(&kernel_state);
+       fini_error_accounting_state(&ust_state);
+}
diff --git a/src/bin/lttng-sessiond/event.c b/src/bin/lttng-sessiond/event.c
deleted file mode 100644 (file)
index 97bb69b..0000000
+++ /dev/null
@@ -1,928 +0,0 @@
-/*
- * Copyright (C) 2011 David Goulet <david.goulet@polymtl.ca>
- * Copyright (C) 2016 Jérémie Galarneau <jeremie.galarneau@efficios.com>
- *
- * SPDX-License-Identifier: GPL-2.0-only
- *
- */
-
-#define _LGPL_SOURCE
-#include <urcu/list.h>
-#include <string.h>
-
-#include <common/compat/errno.h>
-#include <lttng/lttng.h>
-#include <lttng/condition/condition.h>
-#include <lttng/condition/event-rule-matches.h>
-#include <lttng/event-rule/event-rule.h>
-#include <lttng/event-rule/event-rule-internal.h>
-#include <common/bytecode/bytecode.h>
-#include <common/error.h>
-#include <common/sessiond-comm/sessiond-comm.h>
-#include <common/filter.h>
-#include <common/context.h>
-
-#include "channel.h"
-#include "event.h"
-#include "kernel.h"
-#include "lttng-sessiond.h"
-#include "lttng-ust-ctl.h"
-#include "lttng-ust-error.h"
-#include "ust-app.h"
-#include "trace-kernel.h"
-#include "trace-ust.h"
-#include "agent.h"
-#include "utils.h"
-
-/*
- * Add unique UST event based on the event name, filter bytecode and loglevel.
- */
-static void add_unique_ust_event(struct lttng_ht *ht,
-               struct ltt_ust_event *event)
-{
-       struct cds_lfht_node *node_ptr;
-       struct ltt_ust_ht_key key;
-
-       LTTNG_ASSERT(ht);
-       LTTNG_ASSERT(ht->ht);
-       LTTNG_ASSERT(event);
-
-       key.name = event->attr.name;
-       key.filter = (struct lttng_bytecode *) event->filter;
-       key.loglevel_type = event->attr.loglevel_type;
-       key.loglevel_value = event->attr.loglevel;
-       key.exclusion = event->exclusion;
-
-       node_ptr = cds_lfht_add_unique(ht->ht,
-                       ht->hash_fct(event->node.key, lttng_ht_seed),
-                       trace_ust_ht_match_event, &key, &event->node.node);
-       LTTNG_ASSERT(node_ptr == &event->node.node);
-}
-
-/*
- * Disable kernel tracepoint events for a channel from the kernel session of
- * a specified event_name and event type.
- * On type LTTNG_EVENT_ALL all events with event_name are disabled.
- * If event_name is NULL all events of the specified type are disabled.
- */
-int event_kernel_disable_event(struct ltt_kernel_channel *kchan,
-               const char *event_name, enum lttng_event_type type)
-{
-       int ret, error = 0, found = 0;
-       struct ltt_kernel_event *kevent;
-
-       LTTNG_ASSERT(kchan);
-
-       /* For each event in the kernel session */
-       cds_list_for_each_entry(kevent, &kchan->events_list.head, list) {
-               if (type != LTTNG_EVENT_ALL && kevent->type != type)
-                       continue;
-               if (event_name != NULL && strcmp(event_name, kevent->event->name)) {
-                       continue;
-               }
-               found++;
-               ret = kernel_disable_event(kevent);
-               if (ret < 0) {
-                       error = 1;
-                       continue;
-               }
-       }
-       DBG("Disable kernel event: found %d events with name: %s and type: %d",
-                       found, event_name ? event_name : "NULL", type);
-
-       if (event_name != NULL && !found) {
-               ret = LTTNG_ERR_NO_EVENT;
-       } else {
-               ret = error ? LTTNG_ERR_KERN_DISABLE_FAIL : LTTNG_OK;
-       }
-
-       return ret;
-}
-
-/*
- * Enable kernel tracepoint event for a channel from the kernel session.
- * We own filter_expression and filter.
- */
-int event_kernel_enable_event(struct ltt_kernel_channel *kchan,
-               struct lttng_event *event, char *filter_expression,
-               struct lttng_bytecode *filter)
-{
-       int ret;
-       struct ltt_kernel_event *kevent;
-
-       LTTNG_ASSERT(kchan);
-       LTTNG_ASSERT(event);
-
-       kevent = trace_kernel_find_event(event->name, kchan,
-                       event->type, filter);
-       if (kevent == NULL) {
-               ret = kernel_create_event(event, kchan, filter_expression, filter);
-               /* We have passed ownership */
-               filter_expression = NULL;
-               filter = NULL;
-               if (ret) {
-                       goto end;
-               }
-       } else if (kevent->enabled == 0) {
-               ret = kernel_enable_event(kevent);
-               if (ret < 0) {
-                       ret = LTTNG_ERR_KERN_ENABLE_FAIL;
-                       goto end;
-               }
-       } else {
-               /* At this point, the event is considered enabled */
-               ret = LTTNG_ERR_KERN_EVENT_EXIST;
-               goto end;
-       }
-
-       ret = LTTNG_OK;
-end:
-       free(filter_expression);
-       free(filter);
-       return ret;
-}
-
-/*
- * ============================
- * UST : The Ultimate Frontier!
- * ============================
- */
-
-/*
- * Enable UST tracepoint event for a channel from a UST session.
- * We own filter_expression, filter, and exclusion.
- */
-int event_ust_enable_tracepoint(struct ltt_ust_session *usess,
-               struct ltt_ust_channel *uchan, struct lttng_event *event,
-               char *filter_expression,
-               struct lttng_bytecode *filter,
-               struct lttng_event_exclusion *exclusion,
-               bool internal_event)
-{
-       int ret = LTTNG_OK, to_create = 0;
-       struct ltt_ust_event *uevent;
-
-       LTTNG_ASSERT(usess);
-       LTTNG_ASSERT(uchan);
-       LTTNG_ASSERT(event);
-
-       rcu_read_lock();
-
-       uevent = trace_ust_find_event(uchan->events, event->name, filter,
-                       (enum lttng_ust_abi_loglevel_type) event->loglevel_type,
-                       event->loglevel, exclusion);
-       if (!uevent) {
-               ret = trace_ust_create_event(event, filter_expression,
-                               filter, exclusion, internal_event, &uevent);
-               /* We have passed ownership */
-               filter_expression = NULL;
-               filter = NULL;
-               exclusion = NULL;
-               if (ret != LTTNG_OK) {
-                       goto end;
-               }
-
-               /* Valid to set it after the goto error since uevent is still NULL */
-               to_create = 1;
-       }
-
-       if (uevent->enabled) {
-               /* It's already enabled so everything is OK */
-               LTTNG_ASSERT(!to_create);
-               ret = LTTNG_ERR_UST_EVENT_ENABLED;
-               goto end;
-       }
-
-       uevent->enabled = 1;
-       if (to_create) {
-               /* Add ltt ust event to channel */
-               add_unique_ust_event(uchan->events, uevent);
-       }
-
-       if (!usess->active) {
-               goto end;
-       }
-
-       if (to_create) {
-               /* Create event on all UST registered apps for session */
-               ret = ust_app_create_event_glb(usess, uchan, uevent);
-       } else {
-               /* Enable event on all UST registered apps for session */
-               ret = ust_app_enable_event_glb(usess, uchan, uevent);
-       }
-
-       if (ret < 0) {
-               if (ret == -LTTNG_UST_ERR_EXIST) {
-                       ret = LTTNG_ERR_UST_EVENT_EXIST;
-               } else {
-                       ret = LTTNG_ERR_UST_ENABLE_FAIL;
-               }
-               goto end;
-       }
-
-       DBG("Event UST %s %s in channel %s", uevent->attr.name,
-                       to_create ? "created" : "enabled", uchan->name);
-
-       ret = LTTNG_OK;
-
-end:
-       rcu_read_unlock();
-       free(filter_expression);
-       free(filter);
-       free(exclusion);
-       return ret;
-}
-
-/*
- * Disable UST tracepoint of a channel from a UST session.
- */
-int event_ust_disable_tracepoint(struct ltt_ust_session *usess,
-               struct ltt_ust_channel *uchan, const char *event_name)
-{
-       int ret;
-       struct ltt_ust_event *uevent;
-       struct lttng_ht_node_str *node;
-       struct lttng_ht_iter iter;
-       struct lttng_ht *ht;
-
-       LTTNG_ASSERT(usess);
-       LTTNG_ASSERT(uchan);
-       LTTNG_ASSERT(event_name);
-
-       ht = uchan->events;
-
-       rcu_read_lock();
-
-       /*
-        * We use a custom lookup since we need the iterator for the next_duplicate
-        * call in the do while loop below.
-        */
-       cds_lfht_lookup(ht->ht, ht->hash_fct((void *) event_name, lttng_ht_seed),
-                       trace_ust_ht_match_event_by_name, event_name, &iter.iter);
-       node = lttng_ht_iter_get_node_str(&iter);
-       if (node == NULL) {
-               DBG2("Trace UST event NOT found by name %s", event_name);
-               ret = LTTNG_ERR_UST_EVENT_NOT_FOUND;
-               goto error;
-       }
-
-       do {
-               uevent = caa_container_of(node, struct ltt_ust_event, node);
-               LTTNG_ASSERT(uevent);
-
-               if (uevent->enabled == 0) {
-                       /* It's already disabled so everything is OK */
-                       goto next;
-               }
-               uevent->enabled = 0;
-               DBG2("Event UST %s disabled in channel %s", uevent->attr.name,
-                               uchan->name);
-
-               if (!usess->active) {
-                       goto next;
-               }
-               ret = ust_app_disable_event_glb(usess, uchan, uevent);
-               if (ret < 0 && ret != -LTTNG_UST_ERR_EXIST) {
-                       ret = LTTNG_ERR_UST_DISABLE_FAIL;
-                       goto error;
-               }
-next:
-               /* Get next duplicate event by name. */
-               cds_lfht_next_duplicate(ht->ht, trace_ust_ht_match_event_by_name,
-                               event_name, &iter.iter);
-               node = lttng_ht_iter_get_node_str(&iter);
-       } while (node);
-
-       ret = LTTNG_OK;
-
-error:
-       rcu_read_unlock();
-       return ret;
-}
-
-/*
- * Disable all UST tracepoints for a channel from a UST session.
- */
-int event_ust_disable_all_tracepoints(struct ltt_ust_session *usess,
-               struct ltt_ust_channel *uchan)
-{
-       int ret, i, size, error = 0;
-       struct lttng_ht_iter iter;
-       struct ltt_ust_event *uevent = NULL;
-       struct lttng_event *events = NULL;
-
-       LTTNG_ASSERT(usess);
-       LTTNG_ASSERT(uchan);
-
-       rcu_read_lock();
-
-       /* Disabling existing events */
-       cds_lfht_for_each_entry(uchan->events->ht, &iter.iter, uevent,
-                       node.node) {
-               if (uevent->enabled == 1) {
-                       ret = event_ust_disable_tracepoint(usess, uchan,
-                                       uevent->attr.name);
-                       if (ret < 0) {
-                               error = LTTNG_ERR_UST_DISABLE_FAIL;
-                               continue;
-                       }
-               }
-       }
-
-       /* Get all UST available events */
-       size = ust_app_list_events(&events);
-       if (size < 0) {
-               ret = LTTNG_ERR_UST_LIST_FAIL;
-               goto error;
-       }
-
-       for (i = 0; i < size; i++) {
-               ret = event_ust_disable_tracepoint(usess, uchan,
-                               events[i].name);
-               if (ret < 0) {
-                       /* Continue to disable the rest... */
-                       error = LTTNG_ERR_UST_DISABLE_FAIL;
-                       continue;
-               }
-       }
-
-       ret = error ? error : LTTNG_OK;
-error:
-       rcu_read_unlock();
-       free(events);
-       return ret;
-}
-
-static void agent_enable_all(struct agent *agt)
-{
-       struct agent_event *aevent;
-       struct lttng_ht_iter iter;
-
-       /* Flag every event as enabled. */
-       rcu_read_lock();
-       cds_lfht_for_each_entry (
-                       agt->events->ht, &iter.iter, aevent, node.node) {
-               aevent->enabled_count++;
-       }
-       rcu_read_unlock();
-}
-
-/*
- * Enable all agent event for a given UST session.
- *
- * Return LTTNG_OK on success or else a LTTNG_ERR* code.
- */
-int event_agent_enable_all(struct ltt_ust_session *usess,
-               struct agent *agt, struct lttng_event *event,
-               struct lttng_bytecode *filter ,char *filter_expression)
-{
-       int ret;
-
-       LTTNG_ASSERT(usess);
-
-       DBG("Event agent enabling ALL events for session %" PRIu64, usess->id);
-
-       /* Enable event on agent application through TCP socket. */
-       ret = event_agent_enable(usess, agt, event, filter, filter_expression);
-       if (ret != LTTNG_OK) {
-               goto error;
-       }
-
-       agent_enable_all(agt);
-
-       ret = LTTNG_OK;
-
-error:
-       return ret;
-}
-
-/*
- * Check if this event's filter requires the activation of application contexts
- * and enable them in the agent.
- * TODO: bytecode iterator does not support non-legacy application
- * contexts yet. Not an issue for now, since they are not generated by
- * the lttng-ctl library.
- */
-static int add_filter_app_ctx(struct lttng_bytecode *bytecode,
-               const char *filter_expression, struct agent *agt)
-{
-       int ret = LTTNG_OK;
-       char *provider_name = NULL, *ctx_name = NULL;
-       struct bytecode_symbol_iterator *it =
-                       bytecode_symbol_iterator_create(bytecode);
-
-       if (!it) {
-               ret = LTTNG_ERR_NOMEM;
-               goto end;
-       }
-
-       do {
-               struct lttng_event_context ctx;
-               const char *symbol_name =
-                               bytecode_symbol_iterator_get_name(it);
-
-               if (parse_application_context(symbol_name, &provider_name,
-                               &ctx_name)) {
-                       /* Not an application context. */
-                       continue;
-               }
-
-               ctx.ctx = LTTNG_EVENT_CONTEXT_APP_CONTEXT;
-               ctx.u.app_ctx.provider_name = provider_name;
-               ctx.u.app_ctx.ctx_name = ctx_name;
-
-               /* Recognized an application context. */
-               DBG("Enabling event with filter expression \"%s\" requires enabling the %s:%s application context.",
-                               filter_expression, provider_name, ctx_name);
-
-               ret = agent_add_context(&ctx, agt);
-               if (ret != LTTNG_OK) {
-                       ERR("Failed to add application context %s:%s.",
-                                       provider_name, ctx_name);
-                       goto end;
-               }
-
-               ret = agent_enable_context(&ctx, agt->domain);
-               if (ret != LTTNG_OK) {
-                       ERR("Failed to enable application context %s:%s.",
-                                       provider_name, ctx_name);
-                       goto end;
-               }
-
-               free(provider_name);
-               free(ctx_name);
-               provider_name = ctx_name = NULL;
-       } while (bytecode_symbol_iterator_next(it) == 0);
-end:
-       free(provider_name);
-       free(ctx_name);
-       bytecode_symbol_iterator_destroy(it);
-       return ret;
-}
-
-static int agent_enable(struct agent *agt,
-               struct lttng_event *event,
-               struct lttng_bytecode *filter,
-               char *filter_expression)
-{
-       int ret, created = 0;
-       struct agent_event *aevent;
-
-       LTTNG_ASSERT(event);
-       LTTNG_ASSERT(agt);
-
-       aevent = agent_find_event(event->name, event->loglevel_type,
-                       event->loglevel, filter_expression, agt);
-       if (!aevent) {
-               aevent = agent_create_event(event->name, event->loglevel_type,
-                               event->loglevel, filter,
-                               filter_expression);
-               if (!aevent) {
-                       ret = LTTNG_ERR_NOMEM;
-                       goto error;
-               }
-               filter = NULL;
-               filter_expression = NULL;
-               created = 1;
-               LTTNG_ASSERT(!AGENT_EVENT_IS_ENABLED(aevent));
-       }
-
-       if (created && aevent->filter) {
-               ret = add_filter_app_ctx(
-                               aevent->filter, aevent->filter_expression, agt);
-               if (ret != LTTNG_OK) {
-                       goto error;
-               }
-       }
-
-       /* Already enabled? */
-       if (AGENT_EVENT_IS_ENABLED(aevent)) {
-               ret = LTTNG_OK;
-               goto end;
-       }
-
-       ret = agent_enable_event(aevent, agt->domain);
-       if (ret != LTTNG_OK) {
-               goto error;
-       }
-
-       /* If the event was created prior to the enable, add it to the domain. */
-       if (created) {
-               agent_add_event(aevent, agt);
-       }
-
-       ret = LTTNG_OK;
-       goto end;
-
-error:
-       if (created) {
-               agent_destroy_event(aevent);
-       }
-end:
-       free(filter);
-       free(filter_expression);
-       return ret;
-}
-
-/*
- * Enable a single agent event for a given UST session.
- *
- * Return LTTNG_OK on success or else a LTTNG_ERR* code.
- */
-int event_agent_enable(struct ltt_ust_session *usess,
-               struct agent *agt,
-               struct lttng_event *event,
-               struct lttng_bytecode *filter,
-               char *filter_expression)
-{
-       LTTNG_ASSERT(usess);
-       LTTNG_ASSERT(event);
-       LTTNG_ASSERT(agt);
-
-       DBG("Enabling agent event: event pattern = '%s', session id = %" PRIu64 ", loglevel type = %d, loglevel = %d, filter expression = '%s'",
-                       event->name, usess->id, event->loglevel_type,
-                       event->loglevel,
-                       filter_expression ? filter_expression : "(none)");
-
-       return agent_enable(agt, event, filter, filter_expression);
-}
-
-/*
- * Enable a single agent event for a trigger.
- *
- * Return LTTNG_OK on success or else a LTTNG_ERR* code.
- */
-int trigger_agent_enable(const struct lttng_trigger *trigger, struct agent *agt)
-{
-       int ret;
-       enum lttng_condition_status c_status;
-       enum lttng_trigger_status t_status;
-       enum lttng_domain_type d_type;
-       const struct lttng_condition *condition;
-       const struct lttng_event_rule *rule;
-       const char *filter_expression;
-       char *filter_expression_copy = NULL;
-       const struct lttng_bytecode *filter_bytecode;
-       struct lttng_bytecode *filter_bytecode_copy = NULL;
-       struct lttng_event *event = NULL;
-       uid_t trigger_owner_uid = 0;
-       const char *trigger_name;
-
-       LTTNG_ASSERT(trigger);
-       LTTNG_ASSERT(agt);
-
-       t_status = lttng_trigger_get_name(trigger, &trigger_name);
-       if (t_status != LTTNG_TRIGGER_STATUS_OK) {
-               trigger_name = "(anonymous)";
-       }
-
-       t_status = lttng_trigger_get_owner_uid(trigger, &trigger_owner_uid);
-       LTTNG_ASSERT(t_status == LTTNG_TRIGGER_STATUS_OK);
-
-       condition = lttng_trigger_get_const_condition(trigger);
-
-       LTTNG_ASSERT(lttng_condition_get_type(condition) ==
-                       LTTNG_CONDITION_TYPE_EVENT_RULE_MATCHES);
-
-       c_status = lttng_condition_event_rule_matches_get_rule(
-                       condition, &rule);
-       LTTNG_ASSERT(c_status == LTTNG_CONDITION_STATUS_OK);
-
-       switch (lttng_event_rule_get_type(rule)) {
-       case LTTNG_EVENT_RULE_TYPE_JUL_LOGGING:
-       case LTTNG_EVENT_RULE_TYPE_LOG4J_LOGGING:
-       case LTTNG_EVENT_RULE_TYPE_PYTHON_LOGGING:
-               break;
-       default:
-               abort();
-               break;
-       }
-
-       d_type = lttng_event_rule_get_domain_type(rule);
-       LTTNG_ASSERT(d_type == agt->domain);
-
-       event = lttng_event_rule_generate_lttng_event(rule);
-       if (!event) {
-               ret = LTTNG_ERR_NOMEM;
-               goto end;
-       }
-
-       /* Get the internal filter expression and bytecode. */
-       filter_expression = lttng_event_rule_get_filter(rule);
-       if (filter_expression) {
-               filter_expression_copy = strdup(filter_expression);
-               if (!filter_expression_copy) {
-                       ret = LTTNG_ERR_NOMEM;
-                       goto end;
-               }
-
-               /* Get the filter bytecode */
-               filter_bytecode = lttng_event_rule_get_filter_bytecode(rule);
-               if (filter_bytecode) {
-                       filter_bytecode_copy =
-                                       lttng_bytecode_copy(filter_bytecode);
-                       if (!filter_bytecode_copy) {
-                               ret = LTTNG_ERR_NOMEM;
-                               goto end;
-                       }
-               }
-       }
-
-       DBG("Enabling agent event from trigger: trigger name = '%s', trigger owner uid = %d, token = %" PRIu64,
-                       trigger_name, trigger_owner_uid,
-                       lttng_trigger_get_tracer_token(trigger));
-
-       ret = agent_enable(agt, event, filter_bytecode_copy,
-                       filter_expression_copy);
-       /* Ownership was passed even in case of error. */
-       filter_expression_copy = NULL;
-       filter_bytecode_copy = NULL;
-
-end:
-       free(filter_expression_copy);
-       free(filter_bytecode_copy);
-       free(event);
-       return ret;
-}
-
-/*
- * Return the default event name associated with the provided UST domain. Return
- * NULL on error.
- */
-const char *event_get_default_agent_ust_name(enum lttng_domain_type domain)
-{
-       const char *default_event_name = NULL;
-
-       switch (domain) {
-       case LTTNG_DOMAIN_LOG4J:
-               default_event_name = DEFAULT_LOG4J_EVENT_NAME;
-               break;
-       case LTTNG_DOMAIN_JUL:
-               default_event_name = DEFAULT_JUL_EVENT_NAME;
-               break;
-       case LTTNG_DOMAIN_PYTHON:
-               default_event_name = DEFAULT_PYTHON_EVENT_NAME;
-               break;
-       default:
-               abort();
-       }
-
-       return default_event_name;
-}
-
-static int trigger_agent_disable_one(const struct lttng_trigger *trigger,
-               struct agent *agt,
-               struct agent_event *aevent)
-
-{
-       int ret;
-
-       LTTNG_ASSERT(agt);
-       LTTNG_ASSERT(trigger);
-       LTTNG_ASSERT(aevent);
-
-       /*
-        * Actual ust event un-registration happens on the trigger
-        * un-registration at that point.
-        */
-
-       DBG("Event agent disabling %s (loglevel type %d, loglevel value %d) for trigger %" PRIu64,
-                       aevent->name, aevent->loglevel_type,
-                       aevent->loglevel_value, lttng_trigger_get_tracer_token(trigger));
-
-       /* Already disabled? */
-       if (!AGENT_EVENT_IS_ENABLED(aevent)) {
-               goto end;
-       }
-
-       ret = agent_disable_event(aevent, agt->domain);
-       if (ret != LTTNG_OK) {
-               goto error;
-       }
-
-end:
-       return LTTNG_OK;
-
-error:
-       return ret;
-}
-
-/*
- * Disable a given agent event for a given UST session.
- *
- * Must be called with the RCU read lock held.
- * Return LTTNG_OK on success or else a LTTNG_ERR* code.
- */
-static int event_agent_disable_one(struct ltt_ust_session *usess,
-               struct agent *agt, struct agent_event *aevent)
-{
-       int ret;
-       struct ltt_ust_event *uevent = NULL;
-       struct ltt_ust_channel *uchan = NULL;
-       const char *ust_event_name, *ust_channel_name;
-
-       LTTNG_ASSERT(agt);
-       LTTNG_ASSERT(usess);
-       LTTNG_ASSERT(aevent);
-
-       DBG("Event agent disabling %s (loglevel type %d, loglevel value %d) for session %" PRIu64,
-               aevent->name, aevent->loglevel_type, aevent->loglevel_value,
-               usess->id);
-
-       /* Already disabled? */
-       if (!AGENT_EVENT_IS_ENABLED(aevent)) {
-               goto end;
-       }
-
-       if (agt->domain == LTTNG_DOMAIN_JUL) {
-               ust_channel_name = DEFAULT_JUL_CHANNEL_NAME;
-       } else if (agt->domain == LTTNG_DOMAIN_LOG4J) {
-               ust_channel_name = DEFAULT_LOG4J_CHANNEL_NAME;
-       } else if (agt->domain == LTTNG_DOMAIN_PYTHON) {
-               ust_channel_name = DEFAULT_PYTHON_CHANNEL_NAME;
-       } else {
-               ret = LTTNG_ERR_INVALID;
-               goto error;
-       }
-
-       /*
-        * Disable it on the UST side. First get the channel reference then find
-        * the event and finally disable it.
-        */
-       uchan = trace_ust_find_channel_by_name(usess->domain_global.channels,
-                       (char *) ust_channel_name);
-       if (!uchan) {
-               ret = LTTNG_ERR_UST_CHAN_NOT_FOUND;
-               goto error;
-       }
-
-       ust_event_name = event_get_default_agent_ust_name(agt->domain);
-       if (!ust_event_name) {
-               ret = LTTNG_ERR_FATAL;
-               goto error;
-       }
-
-       /*
-        * Agent UST event has its loglevel type forced to
-        * LTTNG_UST_LOGLEVEL_ALL. The actual loglevel type/value filtering
-        * happens thanks to an UST filter. The following -1 is actually
-        * ignored since the type is LTTNG_UST_LOGLEVEL_ALL.
-        */
-       uevent = trace_ust_find_event(uchan->events, (char *) ust_event_name,
-                       aevent->filter, LTTNG_UST_ABI_LOGLEVEL_ALL, -1, NULL);
-       /* If the agent event exists, it must be available on the UST side. */
-       LTTNG_ASSERT(uevent);
-
-       if (usess->active) {
-               ret = ust_app_disable_event_glb(usess, uchan, uevent);
-               if (ret < 0 && ret != -LTTNG_UST_ERR_EXIST) {
-                       ret = LTTNG_ERR_UST_DISABLE_FAIL;
-                       goto error;
-               }
-       }
-
-       /*
-        * Flag event that it's disabled so the shadow copy on the ust app side
-        * will disable it if an application shows up.
-        */
-       uevent->enabled = 0;
-
-       ret = agent_disable_event(aevent, agt->domain);
-       if (ret != LTTNG_OK) {
-               goto error;
-       }
-
-end:
-       return LTTNG_OK;
-
-error:
-       return ret;
-}
-
-/*
- * Disable agent event matching a given trigger.
- *
- * Return LTTNG_OK on success or else a LTTNG_ERR* code.
- */
-int trigger_agent_disable(
-               const struct lttng_trigger *trigger, struct agent *agt)
-{
-       int ret = LTTNG_OK;
-       struct agent_event *aevent;
-
-       LTTNG_ASSERT(trigger);
-       LTTNG_ASSERT(agt);
-
-       DBG("Event agent disabling for trigger %" PRIu64,
-                       lttng_trigger_get_tracer_token(trigger));
-
-       rcu_read_lock();
-       aevent = agent_find_event_by_trigger(trigger, agt);
-
-       if (aevent == NULL) {
-               DBG2("Event agent NOT found by trigger %" PRIu64,
-                               lttng_trigger_get_tracer_token(trigger));
-               ret = LTTNG_ERR_UST_EVENT_NOT_FOUND;
-               goto end;
-       }
-
-       ret = trigger_agent_disable_one(trigger, agt, aevent);
-
-       if (ret != LTTNG_OK) {
-               goto end;
-       }
-
-end:
-       rcu_read_unlock();
-       return ret;
-}
-
-/*
- * Disable all agent events matching a given name for a given UST session.
- *
- * Return LTTNG_OK on success or else a LTTNG_ERR* code.
- */
-int event_agent_disable(struct ltt_ust_session *usess, struct agent *agt,
-               const char *event_name)
-{
-       int ret = LTTNG_OK;
-       struct agent_event *aevent;
-       struct lttng_ht_iter iter;
-       struct lttng_ht_node_str *node;
-
-       LTTNG_ASSERT(agt);
-       LTTNG_ASSERT(usess);
-       LTTNG_ASSERT(event_name);
-
-       DBG("Event agent disabling %s (all loglevels) for session %" PRIu64, event_name, usess->id);
-
-       rcu_read_lock();
-       agent_find_events_by_name(event_name, agt, &iter);
-       node = lttng_ht_iter_get_node_str(&iter);
-
-       if (node == NULL) {
-               DBG2("Event agent NOT found by name %s", event_name);
-               ret = LTTNG_ERR_UST_EVENT_NOT_FOUND;
-               goto end;
-       }
-
-       do {
-               aevent = caa_container_of(node, struct agent_event, node);
-               ret = event_agent_disable_one(usess, agt, aevent);
-
-               if (ret != LTTNG_OK) {
-                       goto end;
-               }
-
-               /* Get next duplicate agent event by name. */
-               agent_event_next_duplicate(event_name, agt, &iter);
-               node = lttng_ht_iter_get_node_str(&iter);
-       } while (node);
-end:
-       rcu_read_unlock();
-       return ret;
-}
-/*
- * Disable all agent event for a given UST session.
- *
- * Return LTTNG_OK on success or else a LTTNG_ERR* code.
- */
-int event_agent_disable_all(struct ltt_ust_session *usess,
-               struct agent *agt)
-{
-       int ret;
-       struct agent_event *aevent;
-       struct lttng_ht_iter iter;
-
-       LTTNG_ASSERT(agt);
-       LTTNG_ASSERT(usess);
-
-       /*
-        * Disable event on agent application. Continue to disable all other events
-        * if the * event is not found.
-        */
-       ret = event_agent_disable(usess, agt, "*");
-       if (ret != LTTNG_OK && ret != LTTNG_ERR_UST_EVENT_NOT_FOUND) {
-               goto error;
-       }
-
-       /* Disable every event. */
-       rcu_read_lock();
-       cds_lfht_for_each_entry(agt->events->ht, &iter.iter, aevent,
-                       node.node) {
-               if (!AGENT_EVENT_IS_ENABLED(aevent)) {
-                       continue;
-               }
-
-               ret = event_agent_disable(usess, agt, aevent->name);
-               if (ret != LTTNG_OK) {
-                       goto error_unlock;
-               }
-       }
-       ret = LTTNG_OK;
-
-error_unlock:
-       rcu_read_unlock();
-error:
-       return ret;
-}
diff --git a/src/bin/lttng-sessiond/event.cpp b/src/bin/lttng-sessiond/event.cpp
new file mode 100644 (file)
index 0000000..284a8c4
--- /dev/null
@@ -0,0 +1,928 @@
+/*
+ * Copyright (C) 2011 David Goulet <david.goulet@polymtl.ca>
+ * Copyright (C) 2016 Jérémie Galarneau <jeremie.galarneau@efficios.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0-only
+ *
+ */
+
+#define _LGPL_SOURCE
+#include <urcu/list.h>
+#include <string.h>
+
+#include <common/compat/errno.h>
+#include <lttng/lttng.h>
+#include <lttng/condition/condition.h>
+#include <lttng/condition/event-rule-matches.h>
+#include <lttng/event-rule/event-rule.h>
+#include <lttng/event-rule/event-rule-internal.h>
+#include <common/bytecode/bytecode.h>
+#include <common/error.h>
+#include <common/sessiond-comm/sessiond-comm.h>
+#include <common/filter.h>
+#include <common/context.h>
+
+#include "channel.h"
+#include "event.h"
+#include "kernel.h"
+#include "lttng-sessiond.h"
+#include "lttng-ust-ctl.h"
+#include "lttng-ust-error.h"
+#include "ust-app.h"
+#include "trace-kernel.h"
+#include "trace-ust.h"
+#include "agent.h"
+#include "utils.h"
+
+/*
+ * Add unique UST event based on the event name, filter bytecode and loglevel.
+ */
+static void add_unique_ust_event(struct lttng_ht *ht,
+               struct ltt_ust_event *event)
+{
+       struct cds_lfht_node *node_ptr;
+       struct ltt_ust_ht_key key;
+
+       LTTNG_ASSERT(ht);
+       LTTNG_ASSERT(ht->ht);
+       LTTNG_ASSERT(event);
+
+       key.name = event->attr.name;
+       key.filter = (struct lttng_bytecode *) event->filter;
+       key.loglevel_type = (lttng_ust_abi_loglevel_type) event->attr.loglevel_type;
+       key.loglevel_value = event->attr.loglevel;
+       key.exclusion = event->exclusion;
+
+       node_ptr = cds_lfht_add_unique(ht->ht,
+                       ht->hash_fct(event->node.key, lttng_ht_seed),
+                       trace_ust_ht_match_event, &key, &event->node.node);
+       LTTNG_ASSERT(node_ptr == &event->node.node);
+}
+
+/*
+ * Disable kernel tracepoint events for a channel from the kernel session of
+ * a specified event_name and event type.
+ * On type LTTNG_EVENT_ALL all events with event_name are disabled.
+ * If event_name is NULL all events of the specified type are disabled.
+ */
+int event_kernel_disable_event(struct ltt_kernel_channel *kchan,
+               const char *event_name, enum lttng_event_type type)
+{
+       int ret, error = 0, found = 0;
+       struct ltt_kernel_event *kevent;
+
+       LTTNG_ASSERT(kchan);
+
+       /* For each event in the kernel session */
+       cds_list_for_each_entry(kevent, &kchan->events_list.head, list) {
+               if (type != LTTNG_EVENT_ALL && kevent->type != type)
+                       continue;
+               if (event_name != NULL && strcmp(event_name, kevent->event->name)) {
+                       continue;
+               }
+               found++;
+               ret = kernel_disable_event(kevent);
+               if (ret < 0) {
+                       error = 1;
+                       continue;
+               }
+       }
+       DBG("Disable kernel event: found %d events with name: %s and type: %d",
+                       found, event_name ? event_name : "NULL", type);
+
+       if (event_name != NULL && !found) {
+               ret = LTTNG_ERR_NO_EVENT;
+       } else {
+               ret = error ? LTTNG_ERR_KERN_DISABLE_FAIL : LTTNG_OK;
+       }
+
+       return ret;
+}
+
+/*
+ * Enable kernel tracepoint event for a channel from the kernel session.
+ * We own filter_expression and filter.
+ */
+int event_kernel_enable_event(struct ltt_kernel_channel *kchan,
+               struct lttng_event *event, char *filter_expression,
+               struct lttng_bytecode *filter)
+{
+       int ret;
+       struct ltt_kernel_event *kevent;
+
+       LTTNG_ASSERT(kchan);
+       LTTNG_ASSERT(event);
+
+       kevent = trace_kernel_find_event(event->name, kchan,
+                       event->type, filter);
+       if (kevent == NULL) {
+               ret = kernel_create_event(event, kchan, filter_expression, filter);
+               /* We have passed ownership */
+               filter_expression = NULL;
+               filter = NULL;
+               if (ret) {
+                       goto end;
+               }
+       } else if (kevent->enabled == 0) {
+               ret = kernel_enable_event(kevent);
+               if (ret < 0) {
+                       ret = LTTNG_ERR_KERN_ENABLE_FAIL;
+                       goto end;
+               }
+       } else {
+               /* At this point, the event is considered enabled */
+               ret = LTTNG_ERR_KERN_EVENT_EXIST;
+               goto end;
+       }
+
+       ret = LTTNG_OK;
+end:
+       free(filter_expression);
+       free(filter);
+       return ret;
+}
+
+/*
+ * ============================
+ * UST : The Ultimate Frontier!
+ * ============================
+ */
+
+/*
+ * Enable UST tracepoint event for a channel from a UST session.
+ * We own filter_expression, filter, and exclusion.
+ */
+int event_ust_enable_tracepoint(struct ltt_ust_session *usess,
+               struct ltt_ust_channel *uchan, struct lttng_event *event,
+               char *filter_expression,
+               struct lttng_bytecode *filter,
+               struct lttng_event_exclusion *exclusion,
+               bool internal_event)
+{
+       int ret = LTTNG_OK, to_create = 0;
+       struct ltt_ust_event *uevent;
+
+       LTTNG_ASSERT(usess);
+       LTTNG_ASSERT(uchan);
+       LTTNG_ASSERT(event);
+
+       rcu_read_lock();
+
+       uevent = trace_ust_find_event(uchan->events, event->name, filter,
+                       (enum lttng_ust_abi_loglevel_type) event->loglevel_type,
+                       event->loglevel, exclusion);
+       if (!uevent) {
+               ret = trace_ust_create_event(event, filter_expression,
+                               filter, exclusion, internal_event, &uevent);
+               /* We have passed ownership */
+               filter_expression = NULL;
+               filter = NULL;
+               exclusion = NULL;
+               if (ret != LTTNG_OK) {
+                       goto end;
+               }
+
+               /* Valid to set it after the goto error since uevent is still NULL */
+               to_create = 1;
+       }
+
+       if (uevent->enabled) {
+               /* It's already enabled so everything is OK */
+               LTTNG_ASSERT(!to_create);
+               ret = LTTNG_ERR_UST_EVENT_ENABLED;
+               goto end;
+       }
+
+       uevent->enabled = 1;
+       if (to_create) {
+               /* Add ltt ust event to channel */
+               add_unique_ust_event(uchan->events, uevent);
+       }
+
+       if (!usess->active) {
+               goto end;
+       }
+
+       if (to_create) {
+               /* Create event on all UST registered apps for session */
+               ret = ust_app_create_event_glb(usess, uchan, uevent);
+       } else {
+               /* Enable event on all UST registered apps for session */
+               ret = ust_app_enable_event_glb(usess, uchan, uevent);
+       }
+
+       if (ret < 0) {
+               if (ret == -LTTNG_UST_ERR_EXIST) {
+                       ret = LTTNG_ERR_UST_EVENT_EXIST;
+               } else {
+                       ret = LTTNG_ERR_UST_ENABLE_FAIL;
+               }
+               goto end;
+       }
+
+       DBG("Event UST %s %s in channel %s", uevent->attr.name,
+                       to_create ? "created" : "enabled", uchan->name);
+
+       ret = LTTNG_OK;
+
+end:
+       rcu_read_unlock();
+       free(filter_expression);
+       free(filter);
+       free(exclusion);
+       return ret;
+}
+
+/*
+ * Disable UST tracepoint of a channel from a UST session.
+ */
+int event_ust_disable_tracepoint(struct ltt_ust_session *usess,
+               struct ltt_ust_channel *uchan, const char *event_name)
+{
+       int ret;
+       struct ltt_ust_event *uevent;
+       struct lttng_ht_node_str *node;
+       struct lttng_ht_iter iter;
+       struct lttng_ht *ht;
+
+       LTTNG_ASSERT(usess);
+       LTTNG_ASSERT(uchan);
+       LTTNG_ASSERT(event_name);
+
+       ht = uchan->events;
+
+       rcu_read_lock();
+
+       /*
+        * We use a custom lookup since we need the iterator for the next_duplicate
+        * call in the do while loop below.
+        */
+       cds_lfht_lookup(ht->ht, ht->hash_fct((void *) event_name, lttng_ht_seed),
+                       trace_ust_ht_match_event_by_name, event_name, &iter.iter);
+       node = lttng_ht_iter_get_node_str(&iter);
+       if (node == NULL) {
+               DBG2("Trace UST event NOT found by name %s", event_name);
+               ret = LTTNG_ERR_UST_EVENT_NOT_FOUND;
+               goto error;
+       }
+
+       do {
+               uevent = caa_container_of(node, struct ltt_ust_event, node);
+               LTTNG_ASSERT(uevent);
+
+               if (uevent->enabled == 0) {
+                       /* It's already disabled so everything is OK */
+                       goto next;
+               }
+               uevent->enabled = 0;
+               DBG2("Event UST %s disabled in channel %s", uevent->attr.name,
+                               uchan->name);
+
+               if (!usess->active) {
+                       goto next;
+               }
+               ret = ust_app_disable_event_glb(usess, uchan, uevent);
+               if (ret < 0 && ret != -LTTNG_UST_ERR_EXIST) {
+                       ret = LTTNG_ERR_UST_DISABLE_FAIL;
+                       goto error;
+               }
+next:
+               /* Get next duplicate event by name. */
+               cds_lfht_next_duplicate(ht->ht, trace_ust_ht_match_event_by_name,
+                               event_name, &iter.iter);
+               node = lttng_ht_iter_get_node_str(&iter);
+       } while (node);
+
+       ret = LTTNG_OK;
+
+error:
+       rcu_read_unlock();
+       return ret;
+}
+
+/*
+ * Disable all UST tracepoints for a channel from a UST session.
+ */
+int event_ust_disable_all_tracepoints(struct ltt_ust_session *usess,
+               struct ltt_ust_channel *uchan)
+{
+       int ret, i, size, error = 0;
+       struct lttng_ht_iter iter;
+       struct ltt_ust_event *uevent = NULL;
+       struct lttng_event *events = NULL;
+
+       LTTNG_ASSERT(usess);
+       LTTNG_ASSERT(uchan);
+
+       rcu_read_lock();
+
+       /* Disabling existing events */
+       cds_lfht_for_each_entry(uchan->events->ht, &iter.iter, uevent,
+                       node.node) {
+               if (uevent->enabled == 1) {
+                       ret = event_ust_disable_tracepoint(usess, uchan,
+                                       uevent->attr.name);
+                       if (ret < 0) {
+                               error = LTTNG_ERR_UST_DISABLE_FAIL;
+                               continue;
+                       }
+               }
+       }
+
+       /* Get all UST available events */
+       size = ust_app_list_events(&events);
+       if (size < 0) {
+               ret = LTTNG_ERR_UST_LIST_FAIL;
+               goto error;
+       }
+
+       for (i = 0; i < size; i++) {
+               ret = event_ust_disable_tracepoint(usess, uchan,
+                               events[i].name);
+               if (ret < 0) {
+                       /* Continue to disable the rest... */
+                       error = LTTNG_ERR_UST_DISABLE_FAIL;
+                       continue;
+               }
+       }
+
+       ret = error ? error : LTTNG_OK;
+error:
+       rcu_read_unlock();
+       free(events);
+       return ret;
+}
+
+static void agent_enable_all(struct agent *agt)
+{
+       struct agent_event *aevent;
+       struct lttng_ht_iter iter;
+
+       /* Flag every event as enabled. */
+       rcu_read_lock();
+       cds_lfht_for_each_entry (
+                       agt->events->ht, &iter.iter, aevent, node.node) {
+               aevent->enabled_count++;
+       }
+       rcu_read_unlock();
+}
+
+/*
+ * Enable all agent event for a given UST session.
+ *
+ * Return LTTNG_OK on success or else a LTTNG_ERR* code.
+ */
+int event_agent_enable_all(struct ltt_ust_session *usess,
+               struct agent *agt, struct lttng_event *event,
+               struct lttng_bytecode *filter ,char *filter_expression)
+{
+       int ret;
+
+       LTTNG_ASSERT(usess);
+
+       DBG("Event agent enabling ALL events for session %" PRIu64, usess->id);
+
+       /* Enable event on agent application through TCP socket. */
+       ret = event_agent_enable(usess, agt, event, filter, filter_expression);
+       if (ret != LTTNG_OK) {
+               goto error;
+       }
+
+       agent_enable_all(agt);
+
+       ret = LTTNG_OK;
+
+error:
+       return ret;
+}
+
+/*
+ * Check if this event's filter requires the activation of application contexts
+ * and enable them in the agent.
+ * TODO: bytecode iterator does not support non-legacy application
+ * contexts yet. Not an issue for now, since they are not generated by
+ * the lttng-ctl library.
+ */
+static int add_filter_app_ctx(struct lttng_bytecode *bytecode,
+               const char *filter_expression, struct agent *agt)
+{
+       int ret = LTTNG_OK;
+       char *provider_name = NULL, *ctx_name = NULL;
+       struct bytecode_symbol_iterator *it =
+                       bytecode_symbol_iterator_create(bytecode);
+
+       if (!it) {
+               ret = LTTNG_ERR_NOMEM;
+               goto end;
+       }
+
+       do {
+               struct lttng_event_context ctx;
+               const char *symbol_name =
+                               bytecode_symbol_iterator_get_name(it);
+
+               if (parse_application_context(symbol_name, &provider_name,
+                               &ctx_name)) {
+                       /* Not an application context. */
+                       continue;
+               }
+
+               ctx.ctx = LTTNG_EVENT_CONTEXT_APP_CONTEXT;
+               ctx.u.app_ctx.provider_name = provider_name;
+               ctx.u.app_ctx.ctx_name = ctx_name;
+
+               /* Recognized an application context. */
+               DBG("Enabling event with filter expression \"%s\" requires enabling the %s:%s application context.",
+                               filter_expression, provider_name, ctx_name);
+
+               ret = agent_add_context(&ctx, agt);
+               if (ret != LTTNG_OK) {
+                       ERR("Failed to add application context %s:%s.",
+                                       provider_name, ctx_name);
+                       goto end;
+               }
+
+               ret = agent_enable_context(&ctx, agt->domain);
+               if (ret != LTTNG_OK) {
+                       ERR("Failed to enable application context %s:%s.",
+                                       provider_name, ctx_name);
+                       goto end;
+               }
+
+               free(provider_name);
+               free(ctx_name);
+               provider_name = ctx_name = NULL;
+       } while (bytecode_symbol_iterator_next(it) == 0);
+end:
+       free(provider_name);
+       free(ctx_name);
+       bytecode_symbol_iterator_destroy(it);
+       return ret;
+}
+
+static int agent_enable(struct agent *agt,
+               struct lttng_event *event,
+               struct lttng_bytecode *filter,
+               char *filter_expression)
+{
+       int ret, created = 0;
+       struct agent_event *aevent;
+
+       LTTNG_ASSERT(event);
+       LTTNG_ASSERT(agt);
+
+       aevent = agent_find_event(event->name, event->loglevel_type,
+                       event->loglevel, filter_expression, agt);
+       if (!aevent) {
+               aevent = agent_create_event(event->name, event->loglevel_type,
+                               event->loglevel, filter,
+                               filter_expression);
+               if (!aevent) {
+                       ret = LTTNG_ERR_NOMEM;
+                       goto error;
+               }
+               filter = NULL;
+               filter_expression = NULL;
+               created = 1;
+               LTTNG_ASSERT(!AGENT_EVENT_IS_ENABLED(aevent));
+       }
+
+       if (created && aevent->filter) {
+               ret = add_filter_app_ctx(
+                               aevent->filter, aevent->filter_expression, agt);
+               if (ret != LTTNG_OK) {
+                       goto error;
+               }
+       }
+
+       /* Already enabled? */
+       if (AGENT_EVENT_IS_ENABLED(aevent)) {
+               ret = LTTNG_OK;
+               goto end;
+       }
+
+       ret = agent_enable_event(aevent, agt->domain);
+       if (ret != LTTNG_OK) {
+               goto error;
+       }
+
+       /* If the event was created prior to the enable, add it to the domain. */
+       if (created) {
+               agent_add_event(aevent, agt);
+       }
+
+       ret = LTTNG_OK;
+       goto end;
+
+error:
+       if (created) {
+               agent_destroy_event(aevent);
+       }
+end:
+       free(filter);
+       free(filter_expression);
+       return ret;
+}
+
+/*
+ * Enable a single agent event for a given UST session.
+ *
+ * Return LTTNG_OK on success or else a LTTNG_ERR* code.
+ */
+int event_agent_enable(struct ltt_ust_session *usess,
+               struct agent *agt,
+               struct lttng_event *event,
+               struct lttng_bytecode *filter,
+               char *filter_expression)
+{
+       LTTNG_ASSERT(usess);
+       LTTNG_ASSERT(event);
+       LTTNG_ASSERT(agt);
+
+       DBG("Enabling agent event: event pattern = '%s', session id = %" PRIu64 ", loglevel type = %d, loglevel = %d, filter expression = '%s'",
+                       event->name, usess->id, event->loglevel_type,
+                       event->loglevel,
+                       filter_expression ? filter_expression : "(none)");
+
+       return agent_enable(agt, event, filter, filter_expression);
+}
+
+/*
+ * Enable a single agent event for a trigger.
+ *
+ * Return LTTNG_OK on success or else a LTTNG_ERR* code.
+ */
+int trigger_agent_enable(const struct lttng_trigger *trigger, struct agent *agt)
+{
+       int ret;
+       enum lttng_condition_status c_status;
+       enum lttng_trigger_status t_status;
+       enum lttng_domain_type d_type;
+       const struct lttng_condition *condition;
+       const struct lttng_event_rule *rule;
+       const char *filter_expression;
+       char *filter_expression_copy = NULL;
+       const struct lttng_bytecode *filter_bytecode;
+       struct lttng_bytecode *filter_bytecode_copy = NULL;
+       struct lttng_event *event = NULL;
+       uid_t trigger_owner_uid = 0;
+       const char *trigger_name;
+
+       LTTNG_ASSERT(trigger);
+       LTTNG_ASSERT(agt);
+
+       t_status = lttng_trigger_get_name(trigger, &trigger_name);
+       if (t_status != LTTNG_TRIGGER_STATUS_OK) {
+               trigger_name = "(anonymous)";
+       }
+
+       t_status = lttng_trigger_get_owner_uid(trigger, &trigger_owner_uid);
+       LTTNG_ASSERT(t_status == LTTNG_TRIGGER_STATUS_OK);
+
+       condition = lttng_trigger_get_const_condition(trigger);
+
+       LTTNG_ASSERT(lttng_condition_get_type(condition) ==
+                       LTTNG_CONDITION_TYPE_EVENT_RULE_MATCHES);
+
+       c_status = lttng_condition_event_rule_matches_get_rule(
+                       condition, &rule);
+       LTTNG_ASSERT(c_status == LTTNG_CONDITION_STATUS_OK);
+
+       switch (lttng_event_rule_get_type(rule)) {
+       case LTTNG_EVENT_RULE_TYPE_JUL_LOGGING:
+       case LTTNG_EVENT_RULE_TYPE_LOG4J_LOGGING:
+       case LTTNG_EVENT_RULE_TYPE_PYTHON_LOGGING:
+               break;
+       default:
+               abort();
+               break;
+       }
+
+       d_type = lttng_event_rule_get_domain_type(rule);
+       LTTNG_ASSERT(d_type == agt->domain);
+
+       event = lttng_event_rule_generate_lttng_event(rule);
+       if (!event) {
+               ret = LTTNG_ERR_NOMEM;
+               goto end;
+       }
+
+       /* Get the internal filter expression and bytecode. */
+       filter_expression = lttng_event_rule_get_filter(rule);
+       if (filter_expression) {
+               filter_expression_copy = strdup(filter_expression);
+               if (!filter_expression_copy) {
+                       ret = LTTNG_ERR_NOMEM;
+                       goto end;
+               }
+
+               /* Get the filter bytecode */
+               filter_bytecode = lttng_event_rule_get_filter_bytecode(rule);
+               if (filter_bytecode) {
+                       filter_bytecode_copy =
+                                       lttng_bytecode_copy(filter_bytecode);
+                       if (!filter_bytecode_copy) {
+                               ret = LTTNG_ERR_NOMEM;
+                               goto end;
+                       }
+               }
+       }
+
+       DBG("Enabling agent event from trigger: trigger name = '%s', trigger owner uid = %d, token = %" PRIu64,
+                       trigger_name, trigger_owner_uid,
+                       lttng_trigger_get_tracer_token(trigger));
+
+       ret = agent_enable(agt, event, filter_bytecode_copy,
+                       filter_expression_copy);
+       /* Ownership was passed even in case of error. */
+       filter_expression_copy = NULL;
+       filter_bytecode_copy = NULL;
+
+end:
+       free(filter_expression_copy);
+       free(filter_bytecode_copy);
+       free(event);
+       return ret;
+}
+
+/*
+ * Return the default event name associated with the provided UST domain. Return
+ * NULL on error.
+ */
+const char *event_get_default_agent_ust_name(enum lttng_domain_type domain)
+{
+       const char *default_event_name = NULL;
+
+       switch (domain) {
+       case LTTNG_DOMAIN_LOG4J:
+               default_event_name = DEFAULT_LOG4J_EVENT_NAME;
+               break;
+       case LTTNG_DOMAIN_JUL:
+               default_event_name = DEFAULT_JUL_EVENT_NAME;
+               break;
+       case LTTNG_DOMAIN_PYTHON:
+               default_event_name = DEFAULT_PYTHON_EVENT_NAME;
+               break;
+       default:
+               abort();
+       }
+
+       return default_event_name;
+}
+
+static int trigger_agent_disable_one(const struct lttng_trigger *trigger,
+               struct agent *agt,
+               struct agent_event *aevent)
+
+{
+       int ret;
+
+       LTTNG_ASSERT(agt);
+       LTTNG_ASSERT(trigger);
+       LTTNG_ASSERT(aevent);
+
+       /*
+        * Actual ust event un-registration happens on the trigger
+        * un-registration at that point.
+        */
+
+       DBG("Event agent disabling %s (loglevel type %d, loglevel value %d) for trigger %" PRIu64,
+                       aevent->name, aevent->loglevel_type,
+                       aevent->loglevel_value, lttng_trigger_get_tracer_token(trigger));
+
+       /* Already disabled? */
+       if (!AGENT_EVENT_IS_ENABLED(aevent)) {
+               goto end;
+       }
+
+       ret = agent_disable_event(aevent, agt->domain);
+       if (ret != LTTNG_OK) {
+               goto error;
+       }
+
+end:
+       return LTTNG_OK;
+
+error:
+       return ret;
+}
+
+/*
+ * Disable a given agent event for a given UST session.
+ *
+ * Must be called with the RCU read lock held.
+ * Return LTTNG_OK on success or else a LTTNG_ERR* code.
+ */
+static int event_agent_disable_one(struct ltt_ust_session *usess,
+               struct agent *agt, struct agent_event *aevent)
+{
+       int ret;
+       struct ltt_ust_event *uevent = NULL;
+       struct ltt_ust_channel *uchan = NULL;
+       const char *ust_event_name, *ust_channel_name;
+
+       LTTNG_ASSERT(agt);
+       LTTNG_ASSERT(usess);
+       LTTNG_ASSERT(aevent);
+
+       DBG("Event agent disabling %s (loglevel type %d, loglevel value %d) for session %" PRIu64,
+               aevent->name, aevent->loglevel_type, aevent->loglevel_value,
+               usess->id);
+
+       /* Already disabled? */
+       if (!AGENT_EVENT_IS_ENABLED(aevent)) {
+               goto end;
+       }
+
+       if (agt->domain == LTTNG_DOMAIN_JUL) {
+               ust_channel_name = DEFAULT_JUL_CHANNEL_NAME;
+       } else if (agt->domain == LTTNG_DOMAIN_LOG4J) {
+               ust_channel_name = DEFAULT_LOG4J_CHANNEL_NAME;
+       } else if (agt->domain == LTTNG_DOMAIN_PYTHON) {
+               ust_channel_name = DEFAULT_PYTHON_CHANNEL_NAME;
+       } else {
+               ret = LTTNG_ERR_INVALID;
+               goto error;
+       }
+
+       /*
+        * Disable it on the UST side. First get the channel reference then find
+        * the event and finally disable it.
+        */
+       uchan = trace_ust_find_channel_by_name(usess->domain_global.channels,
+                       (char *) ust_channel_name);
+       if (!uchan) {
+               ret = LTTNG_ERR_UST_CHAN_NOT_FOUND;
+               goto error;
+       }
+
+       ust_event_name = event_get_default_agent_ust_name(agt->domain);
+       if (!ust_event_name) {
+               ret = LTTNG_ERR_FATAL;
+               goto error;
+       }
+
+       /*
+        * Agent UST event has its loglevel type forced to
+        * LTTNG_UST_LOGLEVEL_ALL. The actual loglevel type/value filtering
+        * happens thanks to an UST filter. The following -1 is actually
+        * ignored since the type is LTTNG_UST_LOGLEVEL_ALL.
+        */
+       uevent = trace_ust_find_event(uchan->events, (char *) ust_event_name,
+                       aevent->filter, LTTNG_UST_ABI_LOGLEVEL_ALL, -1, NULL);
+       /* If the agent event exists, it must be available on the UST side. */
+       LTTNG_ASSERT(uevent);
+
+       if (usess->active) {
+               ret = ust_app_disable_event_glb(usess, uchan, uevent);
+               if (ret < 0 && ret != -LTTNG_UST_ERR_EXIST) {
+                       ret = LTTNG_ERR_UST_DISABLE_FAIL;
+                       goto error;
+               }
+       }
+
+       /*
+        * Flag event that it's disabled so the shadow copy on the ust app side
+        * will disable it if an application shows up.
+        */
+       uevent->enabled = 0;
+
+       ret = agent_disable_event(aevent, agt->domain);
+       if (ret != LTTNG_OK) {
+               goto error;
+       }
+
+end:
+       return LTTNG_OK;
+
+error:
+       return ret;
+}
+
+/*
+ * Disable agent event matching a given trigger.
+ *
+ * Return LTTNG_OK on success or else a LTTNG_ERR* code.
+ */
+int trigger_agent_disable(
+               const struct lttng_trigger *trigger, struct agent *agt)
+{
+       int ret = LTTNG_OK;
+       struct agent_event *aevent;
+
+       LTTNG_ASSERT(trigger);
+       LTTNG_ASSERT(agt);
+
+       DBG("Event agent disabling for trigger %" PRIu64,
+                       lttng_trigger_get_tracer_token(trigger));
+
+       rcu_read_lock();
+       aevent = agent_find_event_by_trigger(trigger, agt);
+
+       if (aevent == NULL) {
+               DBG2("Event agent NOT found by trigger %" PRIu64,
+                               lttng_trigger_get_tracer_token(trigger));
+               ret = LTTNG_ERR_UST_EVENT_NOT_FOUND;
+               goto end;
+       }
+
+       ret = trigger_agent_disable_one(trigger, agt, aevent);
+
+       if (ret != LTTNG_OK) {
+               goto end;
+       }
+
+end:
+       rcu_read_unlock();
+       return ret;
+}
+
+/*
+ * Disable all agent events matching a given name for a given UST session.
+ *
+ * Return LTTNG_OK on success or else a LTTNG_ERR* code.
+ */
+int event_agent_disable(struct ltt_ust_session *usess, struct agent *agt,
+               const char *event_name)
+{
+       int ret = LTTNG_OK;
+       struct agent_event *aevent;
+       struct lttng_ht_iter iter;
+       struct lttng_ht_node_str *node;
+
+       LTTNG_ASSERT(agt);
+       LTTNG_ASSERT(usess);
+       LTTNG_ASSERT(event_name);
+
+       DBG("Event agent disabling %s (all loglevels) for session %" PRIu64, event_name, usess->id);
+
+       rcu_read_lock();
+       agent_find_events_by_name(event_name, agt, &iter);
+       node = lttng_ht_iter_get_node_str(&iter);
+
+       if (node == NULL) {
+               DBG2("Event agent NOT found by name %s", event_name);
+               ret = LTTNG_ERR_UST_EVENT_NOT_FOUND;
+               goto end;
+       }
+
+       do {
+               aevent = caa_container_of(node, struct agent_event, node);
+               ret = event_agent_disable_one(usess, agt, aevent);
+
+               if (ret != LTTNG_OK) {
+                       goto end;
+               }
+
+               /* Get next duplicate agent event by name. */
+               agent_event_next_duplicate(event_name, agt, &iter);
+               node = lttng_ht_iter_get_node_str(&iter);
+       } while (node);
+end:
+       rcu_read_unlock();
+       return ret;
+}
+/*
+ * Disable all agent event for a given UST session.
+ *
+ * Return LTTNG_OK on success or else a LTTNG_ERR* code.
+ */
+int event_agent_disable_all(struct ltt_ust_session *usess,
+               struct agent *agt)
+{
+       int ret;
+       struct agent_event *aevent;
+       struct lttng_ht_iter iter;
+
+       LTTNG_ASSERT(agt);
+       LTTNG_ASSERT(usess);
+
+       /*
+        * Disable event on agent application. Continue to disable all other events
+        * if the * event is not found.
+        */
+       ret = event_agent_disable(usess, agt, "*");
+       if (ret != LTTNG_OK && ret != LTTNG_ERR_UST_EVENT_NOT_FOUND) {
+               goto error;
+       }
+
+       /* Disable every event. */
+       rcu_read_lock();
+       cds_lfht_for_each_entry(agt->events->ht, &iter.iter, aevent,
+                       node.node) {
+               if (!AGENT_EVENT_IS_ENABLED(aevent)) {
+                       continue;
+               }
+
+               ret = event_agent_disable(usess, agt, aevent->name);
+               if (ret != LTTNG_OK) {
+                       goto error_unlock;
+               }
+       }
+       ret = LTTNG_OK;
+
+error_unlock:
+       rcu_read_unlock();
+error:
+       return ret;
+}
diff --git a/src/bin/lttng-sessiond/fd-limit.c b/src/bin/lttng-sessiond/fd-limit.c
deleted file mode 100644 (file)
index 973f7d7..0000000
+++ /dev/null
@@ -1,61 +0,0 @@
-/*
- * Copyright (C) 2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
- * SPDX-License-Identifier: GPL-2.0-only
- *
- */
-
-#define _LGPL_SOURCE
-#include <urcu/uatomic.h>
-#include <sys/time.h>
-#include <sys/resource.h>
-#include <stdio.h>
-#include "fd-limit.h"
-#include <common/error.h>
-#include <common/compat/errno.h>
-
-/* total count of fd. */
-static long fd_count;
-
-/*
- * threshold in % of number of fd allowed.
- */
-static long fd_threshold[LTTNG_FD_NR_TYPES] = {
-       [LTTNG_FD_APPS] = 75,
-};
-
-static rlim_t max_nr_fd;
-
-int lttng_fd_get(enum lttng_fd_type type, unsigned int nr)
-{
-       long newval;
-
-       if (type >= LTTNG_FD_NR_TYPES) {
-               return -EINVAL;
-       }
-
-       newval = uatomic_add_return(&fd_count, (long) nr);
-       if ((long) (newval * 100)
-                       - (long) (max_nr_fd * fd_threshold[type]) > 0) {
-               uatomic_sub(&fd_count, (long) nr);
-               return -EPERM;
-       }
-       return 0;
-}
-
-void lttng_fd_put(enum lttng_fd_type type, unsigned int nr)
-{
-       uatomic_sub(&fd_count, (long) nr);
-}
-
-void lttng_fd_init(void)
-{
-       struct rlimit rlim;
-       int ret;
-
-       ret = getrlimit(RLIMIT_NOFILE, &rlim);
-       if (ret < 0) {
-               PERROR("getrlimit");
-       }
-       max_nr_fd = rlim.rlim_cur;
-}
diff --git a/src/bin/lttng-sessiond/fd-limit.cpp b/src/bin/lttng-sessiond/fd-limit.cpp
new file mode 100644 (file)
index 0000000..833b647
--- /dev/null
@@ -0,0 +1,61 @@
+/*
+ * Copyright (C) 2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0-only
+ *
+ */
+
+#define _LGPL_SOURCE
+#include <urcu/uatomic.h>
+#include <sys/time.h>
+#include <sys/resource.h>
+#include <stdio.h>
+#include "fd-limit.h"
+#include <common/error.h>
+#include <common/compat/errno.h>
+
+/* total count of fd. */
+static long fd_count;
+
+/*
+ * threshold in % of number of fd allowed.
+ */
+static long fd_threshold[LTTNG_FD_NR_TYPES] = {
+       75, /* LTTNG_FD_APPS */
+};
+
+static rlim_t max_nr_fd;
+
+int lttng_fd_get(enum lttng_fd_type type, unsigned int nr)
+{
+       long newval;
+
+       if (type >= LTTNG_FD_NR_TYPES) {
+               return -EINVAL;
+       }
+
+       newval = uatomic_add_return(&fd_count, (long) nr);
+       if ((long) (newval * 100)
+                       - (long) (max_nr_fd * fd_threshold[type]) > 0) {
+               uatomic_sub(&fd_count, (long) nr);
+               return -EPERM;
+       }
+       return 0;
+}
+
+void lttng_fd_put(enum lttng_fd_type type, unsigned int nr)
+{
+       uatomic_sub(&fd_count, (long) nr);
+}
+
+void lttng_fd_init(void)
+{
+       struct rlimit rlim;
+       int ret;
+
+       ret = getrlimit(RLIMIT_NOFILE, &rlim);
+       if (ret < 0) {
+               PERROR("getrlimit");
+       }
+       max_nr_fd = rlim.rlim_cur;
+}
diff --git a/src/bin/lttng-sessiond/globals.c b/src/bin/lttng-sessiond/globals.c
deleted file mode 100644 (file)
index ee08ef1..0000000
+++ /dev/null
@@ -1,73 +0,0 @@
-/*
- * Copyright (C) 2011 David Goulet <david.goulet@polymtl.ca>
- * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- * Copyright (C) 2013 Jérémie Galarneau <jeremie.galarneau@efficios.com>
- *
- * SPDX-License-Identifier: GPL-2.0-only
- *
- */
-
-#include "lttng-sessiond.h"
-#include <common/uuid.h>
-
-lttng_uuid the_sessiond_uuid;
-
-int the_ust_consumerd64_fd = -1;
-int the_ust_consumerd32_fd = -1;
-
-long the_page_size;
-
-struct health_app *the_health_sessiond;
-
-struct notification_thread_handle *the_notification_thread_handle;
-
-struct lttng_ht *the_agent_apps_ht_by_sock = NULL;
-struct lttng_ht *the_trigger_agents_ht_by_domain = NULL;
-
-struct lttng_kernel_abi_tracer_version the_kernel_tracer_version;
-struct lttng_kernel_abi_tracer_abi_version the_kernel_tracer_abi_version;
-
-int the_kernel_poll_pipe[2] = {-1, -1};
-
-pid_t the_ppid;
-pid_t the_child_ppid;
-
-struct sessiond_config the_config;
-
-struct consumer_data the_kconsumer_data = {
-       .type = LTTNG_CONSUMER_KERNEL,
-       .err_sock = -1,
-       .cmd_sock = -1,
-       .channel_monitor_pipe = -1,
-       .pid_mutex = PTHREAD_MUTEX_INITIALIZER,
-       .lock = PTHREAD_MUTEX_INITIALIZER,
-};
-
-struct consumer_data the_ustconsumer64_data = {
-       .type = LTTNG_CONSUMER64_UST,
-       .err_sock = -1,
-       .cmd_sock = -1,
-       .channel_monitor_pipe = -1,
-       .pid_mutex = PTHREAD_MUTEX_INITIALIZER,
-       .lock = PTHREAD_MUTEX_INITIALIZER,
-};
-
-struct consumer_data the_ustconsumer32_data = {
-       .type = LTTNG_CONSUMER32_UST,
-       .err_sock = -1,
-       .cmd_sock = -1,
-       .channel_monitor_pipe = -1,
-       .pid_mutex = PTHREAD_MUTEX_INITIALIZER,
-       .lock = PTHREAD_MUTEX_INITIALIZER,
-};
-
-enum consumerd_state the_ust_consumerd_state;
-enum consumerd_state the_kernel_consumerd_state;
-
-static void __attribute__((constructor)) init_sessiond_uuid(void)
-{
-       if (lttng_uuid_generate(the_sessiond_uuid)) {
-               ERR("Failed to generate a session daemon UUID");
-               abort();
-       }
-}
diff --git a/src/bin/lttng-sessiond/globals.cpp b/src/bin/lttng-sessiond/globals.cpp
new file mode 100644 (file)
index 0000000..b4cedf3
--- /dev/null
@@ -0,0 +1,50 @@
+/*
+ * Copyright (C) 2011 David Goulet <david.goulet@polymtl.ca>
+ * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ * Copyright (C) 2013 Jérémie Galarneau <jeremie.galarneau@efficios.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0-only
+ *
+ */
+
+#include "lttng-sessiond.h"
+#include <common/uuid.h>
+
+lttng_uuid the_sessiond_uuid;
+
+int the_ust_consumerd64_fd = -1;
+int the_ust_consumerd32_fd = -1;
+
+long the_page_size;
+
+struct health_app *the_health_sessiond;
+
+struct notification_thread_handle *the_notification_thread_handle;
+
+struct lttng_ht *the_agent_apps_ht_by_sock = NULL;
+struct lttng_ht *the_trigger_agents_ht_by_domain = NULL;
+
+struct lttng_kernel_abi_tracer_version the_kernel_tracer_version;
+struct lttng_kernel_abi_tracer_abi_version the_kernel_tracer_abi_version;
+
+int the_kernel_poll_pipe[2] = {-1, -1};
+
+pid_t the_ppid;
+pid_t the_child_ppid;
+
+struct sessiond_config the_config;
+
+consumer_data the_kconsumer_data(LTTNG_CONSUMER_KERNEL);
+consumer_data the_ustconsumer64_data(LTTNG_CONSUMER64_UST);
+consumer_data the_ustconsumer32_data(LTTNG_CONSUMER32_UST);
+
+enum consumerd_state the_ust_consumerd_state;
+enum consumerd_state the_kernel_consumerd_state;
+
+static void __attribute__((constructor)) init_sessiond_uuid(void)
+{
+       if (lttng_uuid_generate(the_sessiond_uuid)) {
+               ERR("Failed to generate a session daemon UUID");
+               abort();
+       }
+}
diff --git a/src/bin/lttng-sessiond/health.c b/src/bin/lttng-sessiond/health.c
deleted file mode 100644 (file)
index 7fc557d..0000000
+++ /dev/null
@@ -1,284 +0,0 @@
-/*
- * Copyright (C) 2012 David Goulet <dgoulet@efficios.com>
- * Copyright (C) 2018 Jérémie Galarneau <jeremie.galarneau@efficios.com>
- *
- * SPDX-License-Identifier: GPL-2.0-only
- *
- */
-
-#include "lttng-sessiond.h"
-#include "health-sessiond.h"
-#include <common/macros.h>
-#include <common/error.h>
-#include <common/utils.h>
-#include <common/pipe.h>
-#include <inttypes.h>
-#include <sys/stat.h>
-#include "utils.h"
-#include "thread.h"
-
-struct thread_notifiers {
-       struct lttng_pipe *quit_pipe;
-       sem_t ready;
-};
-
-static
-void mark_thread_as_ready(struct thread_notifiers *notifiers)
-{
-       DBG("Marking health management thread as ready");
-       sem_post(&notifiers->ready);
-}
-
-static
-void wait_until_thread_is_ready(struct thread_notifiers *notifiers)
-{
-       DBG("Waiting for health management thread to be ready");
-       sem_wait(&notifiers->ready);
-       DBG("Health management thread is ready");
-}
-
-static void cleanup_health_management_thread(void *data)
-{
-       struct thread_notifiers *notifiers = data;
-
-       lttng_pipe_destroy(notifiers->quit_pipe);
-       sem_destroy(&notifiers->ready);
-       free(notifiers);
-}
-
-/*
- * Thread managing health check socket.
- */
-static void *thread_manage_health(void *data)
-{
-       const bool is_root = (getuid() == 0);
-       int sock = -1, new_sock = -1, ret, i, pollfd, err = -1;
-       uint32_t revents, nb_fd;
-       struct lttng_poll_event events;
-       struct health_comm_msg msg;
-       struct health_comm_reply reply;
-       /* Thread-specific quit pipe. */
-       struct thread_notifiers *notifiers = data;
-       const int quit_pipe_read_fd = lttng_pipe_get_readfd(
-                       notifiers->quit_pipe);
-
-       DBG("[thread] Manage health check started");
-
-       rcu_register_thread();
-
-       /*
-        * Created with a size of two for:
-        *   - client socket
-        *   - thread quit pipe
-        */
-       ret = lttng_poll_create(&events, 2, LTTNG_CLOEXEC);
-       if (ret < 0) {
-               goto error;
-       }
-
-       /* Create unix socket */
-       sock = lttcomm_create_unix_sock(the_config.health_unix_sock_path.value);
-       if (sock < 0) {
-               ERR("Unable to create health check Unix socket");
-               goto error;
-       }
-
-       if (is_root) {
-               /* lttng health client socket path permissions */
-               gid_t gid;
-
-               ret = utils_get_group_id(the_config.tracing_group_name.value, true, &gid);
-               if (ret) {
-                       /* Default to root group. */
-                       gid = 0;
-               }
-
-               ret = chown(the_config.health_unix_sock_path.value, 0, gid);
-               if (ret < 0) {
-                       ERR("Unable to set group on %s", the_config.health_unix_sock_path.value);
-                       PERROR("chown");
-                       goto error;
-               }
-
-               ret = chmod(the_config.health_unix_sock_path.value,
-                               S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP);
-               if (ret < 0) {
-                       ERR("Unable to set permissions on %s", the_config.health_unix_sock_path.value);
-                       PERROR("chmod");
-                       goto error;
-               }
-       }
-
-       /*
-        * Set the CLOEXEC flag. Return code is useless because either way, the
-        * show must go on.
-        */
-       (void) utils_set_fd_cloexec(sock);
-
-       ret = lttcomm_listen_unix_sock(sock);
-       if (ret < 0) {
-               goto error;
-       }
-
-       ret = lttng_poll_add(&events, quit_pipe_read_fd, LPOLLIN | LPOLLERR);
-       if (ret < 0) {
-               goto error;
-       }
-
-       /* Add the application registration socket */
-       ret = lttng_poll_add(&events, sock, LPOLLIN | LPOLLPRI);
-       if (ret < 0) {
-               goto error;
-       }
-
-       mark_thread_as_ready(notifiers);
-       while (1) {
-               DBG("Health check ready");
-
-               /* Infinite blocking call, waiting for transmission */
-restart:
-               ret = lttng_poll_wait(&events, -1);
-               if (ret < 0) {
-                       /*
-                        * Restart interrupted system call.
-                        */
-                       if (errno == EINTR) {
-                               goto restart;
-                       }
-                       goto error;
-               }
-
-               nb_fd = ret;
-
-               for (i = 0; i < nb_fd; i++) {
-                       /* Fetch once the poll data */
-                       revents = LTTNG_POLL_GETEV(&events, i);
-                       pollfd = LTTNG_POLL_GETFD(&events, i);
-
-                       /* Event on the registration socket */
-                       if (pollfd == sock) {
-                               if (revents & LPOLLIN) {
-                                       continue;
-                               } else if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
-                                       ERR("Health socket poll error");
-                                       goto error;
-                               } else {
-                                       ERR("Unexpected poll events %u for sock %d", revents, pollfd);
-                                       goto error;
-                               }
-                       } else {
-                               /* Event on the thread's quit pipe. */
-                               err = 0;
-                               goto exit;
-                       }
-               }
-
-               new_sock = lttcomm_accept_unix_sock(sock);
-               if (new_sock < 0) {
-                       goto error;
-               }
-
-               /*
-                * Set the CLOEXEC flag. Return code is useless because either way, the
-                * show must go on.
-                */
-               (void) utils_set_fd_cloexec(new_sock);
-
-               DBG("Receiving data from client for health...");
-               ret = lttcomm_recv_unix_sock(new_sock, (void *)&msg, sizeof(msg));
-               if (ret <= 0) {
-                       DBG("Nothing recv() from client... continuing");
-                       ret = close(new_sock);
-                       if (ret) {
-                               PERROR("close");
-                       }
-                       continue;
-               }
-
-               rcu_thread_online();
-
-               memset(&reply, 0, sizeof(reply));
-               for (i = 0; i < NR_HEALTH_SESSIOND_TYPES; i++) {
-                       /*
-                        * health_check_state returns 0 if health is
-                        * bad.
-                        */
-                       if (!health_check_state(the_health_sessiond, i)) {
-                               reply.ret_code |= 1ULL << i;
-                       }
-               }
-
-               DBG2("Health check return value %" PRIx64, reply.ret_code);
-
-               ret = lttcomm_send_unix_sock(new_sock, (void *) &reply,
-                               sizeof(reply));
-               if (ret < 0) {
-                       ERR("Failed to send health data back to client");
-               }
-
-               /* End of transmission */
-               ret = close(new_sock);
-               if (ret) {
-                       PERROR("close");
-               }
-       }
-
-exit:
-error:
-       if (err) {
-               ERR("Health error occurred in %s", __func__);
-       }
-       DBG("Health check thread dying");
-       unlink(the_config.health_unix_sock_path.value);
-       if (sock >= 0) {
-               ret = close(sock);
-               if (ret) {
-                       PERROR("close");
-               }
-       }
-
-       lttng_poll_clean(&events);
-       rcu_unregister_thread();
-       return NULL;
-}
-
-static bool shutdown_health_management_thread(void *data)
-{
-       struct thread_notifiers *notifiers = data;
-       const int write_fd = lttng_pipe_get_writefd(notifiers->quit_pipe);
-
-       return notify_thread_pipe(write_fd) == 1;
-}
-
-bool launch_health_management_thread(void)
-{
-       struct thread_notifiers *notifiers;
-       struct lttng_thread *thread;
-
-       notifiers = zmalloc(sizeof(*notifiers));
-       if (!notifiers) {
-               goto error_alloc;
-       }
-
-       sem_init(&notifiers->ready, 0, 0);
-       notifiers->quit_pipe = lttng_pipe_open(FD_CLOEXEC);
-       if (!notifiers->quit_pipe) {
-               goto error;
-       }
-       thread = lttng_thread_create("Health management",
-                       thread_manage_health,
-                       shutdown_health_management_thread,
-                       cleanup_health_management_thread,
-                       notifiers);
-       if (!thread) {
-               goto error;
-       }
-
-       wait_until_thread_is_ready(notifiers);
-       lttng_thread_put(thread);
-       return true;
-error:
-       cleanup_health_management_thread(notifiers);
-error_alloc:
-       return false;
-}
diff --git a/src/bin/lttng-sessiond/health.cpp b/src/bin/lttng-sessiond/health.cpp
new file mode 100644 (file)
index 0000000..53b9ee0
--- /dev/null
@@ -0,0 +1,284 @@
+/*
+ * Copyright (C) 2012 David Goulet <dgoulet@efficios.com>
+ * Copyright (C) 2018 Jérémie Galarneau <jeremie.galarneau@efficios.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0-only
+ *
+ */
+
+#include "lttng-sessiond.h"
+#include "health-sessiond.h"
+#include <common/macros.h>
+#include <common/error.h>
+#include <common/utils.h>
+#include <common/pipe.h>
+#include <inttypes.h>
+#include <sys/stat.h>
+#include "utils.h"
+#include "thread.h"
+
+struct thread_notifiers {
+       struct lttng_pipe *quit_pipe;
+       sem_t ready;
+};
+
+static
+void mark_thread_as_ready(struct thread_notifiers *notifiers)
+{
+       DBG("Marking health management thread as ready");
+       sem_post(&notifiers->ready);
+}
+
+static
+void wait_until_thread_is_ready(struct thread_notifiers *notifiers)
+{
+       DBG("Waiting for health management thread to be ready");
+       sem_wait(&notifiers->ready);
+       DBG("Health management thread is ready");
+}
+
+static void cleanup_health_management_thread(void *data)
+{
+       struct thread_notifiers *notifiers = (thread_notifiers *) data;
+
+       lttng_pipe_destroy(notifiers->quit_pipe);
+       sem_destroy(&notifiers->ready);
+       free(notifiers);
+}
+
+/*
+ * Thread managing health check socket.
+ */
+static void *thread_manage_health(void *data)
+{
+       const bool is_root = (getuid() == 0);
+       int sock = -1, new_sock = -1, ret, i, pollfd, err = -1;
+       uint32_t revents, nb_fd;
+       struct lttng_poll_event events;
+       struct health_comm_msg msg;
+       struct health_comm_reply reply;
+       /* Thread-specific quit pipe. */
+       struct thread_notifiers *notifiers = (thread_notifiers *) data;
+       const int quit_pipe_read_fd = lttng_pipe_get_readfd(
+                       notifiers->quit_pipe);
+
+       DBG("[thread] Manage health check started");
+
+       rcu_register_thread();
+
+       /*
+        * Created with a size of two for:
+        *   - client socket
+        *   - thread quit pipe
+        */
+       ret = lttng_poll_create(&events, 2, LTTNG_CLOEXEC);
+       if (ret < 0) {
+               goto error;
+       }
+
+       /* Create unix socket */
+       sock = lttcomm_create_unix_sock(the_config.health_unix_sock_path.value);
+       if (sock < 0) {
+               ERR("Unable to create health check Unix socket");
+               goto error;
+       }
+
+       if (is_root) {
+               /* lttng health client socket path permissions */
+               gid_t gid;
+
+               ret = utils_get_group_id(the_config.tracing_group_name.value, true, &gid);
+               if (ret) {
+                       /* Default to root group. */
+                       gid = 0;
+               }
+
+               ret = chown(the_config.health_unix_sock_path.value, 0, gid);
+               if (ret < 0) {
+                       ERR("Unable to set group on %s", the_config.health_unix_sock_path.value);
+                       PERROR("chown");
+                       goto error;
+               }
+
+               ret = chmod(the_config.health_unix_sock_path.value,
+                               S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP);
+               if (ret < 0) {
+                       ERR("Unable to set permissions on %s", the_config.health_unix_sock_path.value);
+                       PERROR("chmod");
+                       goto error;
+               }
+       }
+
+       /*
+        * Set the CLOEXEC flag. Return code is useless because either way, the
+        * show must go on.
+        */
+       (void) utils_set_fd_cloexec(sock);
+
+       ret = lttcomm_listen_unix_sock(sock);
+       if (ret < 0) {
+               goto error;
+       }
+
+       ret = lttng_poll_add(&events, quit_pipe_read_fd, LPOLLIN | LPOLLERR);
+       if (ret < 0) {
+               goto error;
+       }
+
+       /* Add the application registration socket */
+       ret = lttng_poll_add(&events, sock, LPOLLIN | LPOLLPRI);
+       if (ret < 0) {
+               goto error;
+       }
+
+       mark_thread_as_ready(notifiers);
+       while (1) {
+               DBG("Health check ready");
+
+               /* Infinite blocking call, waiting for transmission */
+restart:
+               ret = lttng_poll_wait(&events, -1);
+               if (ret < 0) {
+                       /*
+                        * Restart interrupted system call.
+                        */
+                       if (errno == EINTR) {
+                               goto restart;
+                       }
+                       goto error;
+               }
+
+               nb_fd = ret;
+
+               for (i = 0; i < nb_fd; i++) {
+                       /* Fetch once the poll data */
+                       revents = LTTNG_POLL_GETEV(&events, i);
+                       pollfd = LTTNG_POLL_GETFD(&events, i);
+
+                       /* Event on the registration socket */
+                       if (pollfd == sock) {
+                               if (revents & LPOLLIN) {
+                                       continue;
+                               } else if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
+                                       ERR("Health socket poll error");
+                                       goto error;
+                               } else {
+                                       ERR("Unexpected poll events %u for sock %d", revents, pollfd);
+                                       goto error;
+                               }
+                       } else {
+                               /* Event on the thread's quit pipe. */
+                               err = 0;
+                               goto exit;
+                       }
+               }
+
+               new_sock = lttcomm_accept_unix_sock(sock);
+               if (new_sock < 0) {
+                       goto error;
+               }
+
+               /*
+                * Set the CLOEXEC flag. Return code is useless because either way, the
+                * show must go on.
+                */
+               (void) utils_set_fd_cloexec(new_sock);
+
+               DBG("Receiving data from client for health...");
+               ret = lttcomm_recv_unix_sock(new_sock, (void *)&msg, sizeof(msg));
+               if (ret <= 0) {
+                       DBG("Nothing recv() from client... continuing");
+                       ret = close(new_sock);
+                       if (ret) {
+                               PERROR("close");
+                       }
+                       continue;
+               }
+
+               rcu_thread_online();
+
+               memset(&reply, 0, sizeof(reply));
+               for (i = 0; i < NR_HEALTH_SESSIOND_TYPES; i++) {
+                       /*
+                        * health_check_state returns 0 if health is
+                        * bad.
+                        */
+                       if (!health_check_state(the_health_sessiond, i)) {
+                               reply.ret_code |= 1ULL << i;
+                       }
+               }
+
+               DBG2("Health check return value %" PRIx64, reply.ret_code);
+
+               ret = lttcomm_send_unix_sock(new_sock, (void *) &reply,
+                               sizeof(reply));
+               if (ret < 0) {
+                       ERR("Failed to send health data back to client");
+               }
+
+               /* End of transmission */
+               ret = close(new_sock);
+               if (ret) {
+                       PERROR("close");
+               }
+       }
+
+exit:
+error:
+       if (err) {
+               ERR("Health error occurred in %s", __func__);
+       }
+       DBG("Health check thread dying");
+       unlink(the_config.health_unix_sock_path.value);
+       if (sock >= 0) {
+               ret = close(sock);
+               if (ret) {
+                       PERROR("close");
+               }
+       }
+
+       lttng_poll_clean(&events);
+       rcu_unregister_thread();
+       return NULL;
+}
+
+static bool shutdown_health_management_thread(void *data)
+{
+       struct thread_notifiers *notifiers = ( thread_notifiers *) data;
+       const int write_fd = lttng_pipe_get_writefd(notifiers->quit_pipe);
+
+       return notify_thread_pipe(write_fd) == 1;
+}
+
+bool launch_health_management_thread(void)
+{
+       struct thread_notifiers *notifiers;
+       struct lttng_thread *thread;
+
+       notifiers = (thread_notifiers *) zmalloc(sizeof(*notifiers));
+       if (!notifiers) {
+               goto error_alloc;
+       }
+
+       sem_init(&notifiers->ready, 0, 0);
+       notifiers->quit_pipe = lttng_pipe_open(FD_CLOEXEC);
+       if (!notifiers->quit_pipe) {
+               goto error;
+       }
+       thread = lttng_thread_create("Health management",
+                       thread_manage_health,
+                       shutdown_health_management_thread,
+                       cleanup_health_management_thread,
+                       notifiers);
+       if (!thread) {
+               goto error;
+       }
+
+       wait_until_thread_is_ready(notifiers);
+       lttng_thread_put(thread);
+       return true;
+error:
+       cleanup_health_management_thread(notifiers);
+error_alloc:
+       return false;
+}
diff --git a/src/bin/lttng-sessiond/ht-cleanup.c b/src/bin/lttng-sessiond/ht-cleanup.c
deleted file mode 100644 (file)
index 050f18e..0000000
+++ /dev/null
@@ -1,267 +0,0 @@
-/*
- * Copyright (C) 2013 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
- * SPDX-License-Identifier: GPL-2.0-only
- *
- */
-
-#define _LGPL_SOURCE
-
-#include <common/hashtable/hashtable.h>
-#include <common/common.h>
-#include <common/utils.h>
-#include <pthread.h>
-
-#include "lttng-sessiond.h"
-#include "health-sessiond.h"
-#include "testpoint.h"
-#include "utils.h"
-#include "ht-cleanup.h"
-
-static int ht_cleanup_quit_pipe[2] = { -1, -1 };
-
-/*
- * Check if the ht_cleanup thread quit pipe was triggered.
- *
- * Return true if it was triggered else false;
- */
-static bool check_quit_pipe(int fd, uint32_t events)
-{
-       return (fd == ht_cleanup_quit_pipe[0] && (events & LPOLLIN));
-}
-
-static int init_pipe(int *pipe_fds)
-{
-       int ret, i;
-
-       ret = pipe(pipe_fds);
-       if (ret < 0) {
-               PERROR("ht_cleanup thread quit pipe");
-               goto error;
-       }
-
-       for (i = 0; i < 2; i++) {
-               ret = fcntl(pipe_fds[i], F_SETFD, FD_CLOEXEC);
-               if (ret < 0) {
-                       PERROR("fcntl ht_cleanup_quit_pipe");
-                       goto error;
-               }
-       }
-error:
-       return ret;
-}
-
-/*
- * Create a poll set with O_CLOEXEC and add the thread quit pipe to the set.
- */
-static int set_pollset(struct lttng_poll_event *events, size_t size)
-{
-       int ret;
-
-       ret = lttng_poll_create(events, size, LTTNG_CLOEXEC);
-       if (ret < 0) {
-               goto error;
-       }
-
-       ret = lttng_poll_add(events, ht_cleanup_quit_pipe[0],
-                       LPOLLIN | LPOLLERR);
-       if (ret < 0) {
-               goto error;
-       }
-
-       ret = lttng_poll_add(events, the_ht_cleanup_pipe[0], LPOLLIN | LPOLLERR);
-       if (ret < 0) {
-               DBG("lttng_poll_add error %d.", ret);
-               goto error;
-       }
-
-       return 0;
-
-error:
-       return ret;
-}
-
-static void cleanup_ht_cleanup_thread(void *data)
-{
-       utils_close_pipe(ht_cleanup_quit_pipe);
-       utils_close_pipe(the_ht_cleanup_pipe);
-}
-
-static void *thread_ht_cleanup(void *data)
-{
-       int ret, i, pollfd, err = -1;
-       ssize_t size_ret;
-       uint32_t revents, nb_fd;
-       struct lttng_poll_event events;
-
-       DBG("startup.");
-
-       rcu_register_thread();
-       rcu_thread_online();
-
-       health_register(the_health_sessiond, HEALTH_SESSIOND_TYPE_HT_CLEANUP);
-
-       if (testpoint(sessiond_thread_ht_cleanup)) {
-               DBG("testpoint.");
-               goto error_testpoint;
-       }
-
-       health_code_update();
-
-       ret = set_pollset(&events, 2);
-       if (ret < 0) {
-               DBG("sessiond_set_ht_cleanup_thread_pollset error %d.", ret);
-               goto error_poll_create;
-       }
-
-       health_code_update();
-
-       while (1) {
-       restart:
-               DBG3("Polling.");
-               health_poll_entry();
-               ret = lttng_poll_wait(&events, -1);
-               DBG3("Returning from poll on %d fds.",
-                       LTTNG_POLL_GETNB(&events));
-               health_poll_exit();
-               if (ret < 0) {
-                       /*
-                        * Restart interrupted system call.
-                        */
-                       if (errno == EINTR) {
-                               continue;
-                       }
-                       goto error;
-               }
-
-               nb_fd = ret;
-               for (i = 0; i < nb_fd; i++) {
-                       struct lttng_ht *ht;
-
-                       health_code_update();
-
-                       /* Fetch once the poll data */
-                       revents = LTTNG_POLL_GETEV(&events, i);
-                       pollfd = LTTNG_POLL_GETFD(&events, i);
-
-                       if (pollfd != the_ht_cleanup_pipe[0]) {
-                               continue;
-                       }
-
-                       if (revents & LPOLLIN) {
-                               /* Get socket from dispatch thread. */
-                               size_ret = lttng_read(the_ht_cleanup_pipe[0],
-                                               &ht, sizeof(ht));
-                               if (size_ret < sizeof(ht)) {
-                                       PERROR("ht cleanup notify pipe");
-                                       goto error;
-                               }
-                               health_code_update();
-                               /*
-                                * The whole point of this thread is to call
-                                * lttng_ht_destroy from a context that is NOT:
-                                * 1) a read-side RCU lock,
-                                * 2) a call_rcu thread.
-                                */
-                               lttng_ht_destroy(ht);
-
-                               health_code_update();
-
-                               /*
-                                * Ensure that we never process the quit pipe
-                                * event while there is still data available
-                                * on the ht clean pipe.
-                                */
-                               goto restart;
-                       } else if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
-                               ERR("ht cleanup pipe error");
-                               goto error;
-                       } else {
-                               ERR("Unexpected poll events %u for sock %d", revents, pollfd);
-                               goto error;
-                       }
-               }
-
-               for (i = 0; i < nb_fd; i++) {
-                       health_code_update();
-
-                       /* Fetch once the poll data */
-                       revents = LTTNG_POLL_GETEV(&events, i);
-                       pollfd = LTTNG_POLL_GETFD(&events, i);
-
-                       if (!revents) {
-                               /* No activity for this FD (poll implementation). */
-                               continue;
-                       }
-
-                       if (pollfd == the_ht_cleanup_pipe[0]) {
-                               continue;
-                       }
-
-                       /* Thread quit pipe has been closed. Killing thread. */
-                       ret = check_quit_pipe(pollfd, revents);
-                       if (ret) {
-                               err = 0;
-                               DBG("[ht-cleanup] quit.");
-                               goto exit;
-                       }
-               }
-       }
-
-exit:
-error:
-       lttng_poll_clean(&events);
-error_poll_create:
-error_testpoint:
-       DBG("[ht-cleanup] Thread terminates.");
-       if (err) {
-               health_error();
-               ERR("Health error occurred in %s", __func__);
-       }
-       health_unregister(the_health_sessiond);
-       rcu_thread_offline();
-       rcu_unregister_thread();
-       return NULL;
-}
-
-static bool shutdown_ht_cleanup_thread(void *data)
-{
-       int ret;
-
-       ret = notify_thread_pipe(ht_cleanup_quit_pipe[1]);
-       if (ret < 0) {
-               ERR("write error on ht_cleanup quit pipe");
-               goto end;
-       }
-end:
-       return ret;
-}
-
-struct lttng_thread *launch_ht_cleanup_thread(void)
-{
-       int ret;
-       struct lttng_thread *thread;
-
-       ret = init_pipe(the_ht_cleanup_pipe);
-       if (ret) {
-               goto error;
-       }
-
-       ret = init_pipe(ht_cleanup_quit_pipe);
-       if (ret) {
-               goto error;
-       }
-
-       thread = lttng_thread_create("HT cleanup",
-                       thread_ht_cleanup,
-                       shutdown_ht_cleanup_thread,
-                       cleanup_ht_cleanup_thread,
-                       NULL);
-       if (!thread) {
-               goto error;
-       }
-       return thread;
-error:
-       cleanup_ht_cleanup_thread(NULL);
-       return NULL;
-}
diff --git a/src/bin/lttng-sessiond/ht-cleanup.cpp b/src/bin/lttng-sessiond/ht-cleanup.cpp
new file mode 100644 (file)
index 0000000..050f18e
--- /dev/null
@@ -0,0 +1,267 @@
+/*
+ * Copyright (C) 2013 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0-only
+ *
+ */
+
+#define _LGPL_SOURCE
+
+#include <common/hashtable/hashtable.h>
+#include <common/common.h>
+#include <common/utils.h>
+#include <pthread.h>
+
+#include "lttng-sessiond.h"
+#include "health-sessiond.h"
+#include "testpoint.h"
+#include "utils.h"
+#include "ht-cleanup.h"
+
+static int ht_cleanup_quit_pipe[2] = { -1, -1 };
+
+/*
+ * Check if the ht_cleanup thread quit pipe was triggered.
+ *
+ * Return true if it was triggered else false;
+ */
+static bool check_quit_pipe(int fd, uint32_t events)
+{
+       return (fd == ht_cleanup_quit_pipe[0] && (events & LPOLLIN));
+}
+
+static int init_pipe(int *pipe_fds)
+{
+       int ret, i;
+
+       ret = pipe(pipe_fds);
+       if (ret < 0) {
+               PERROR("ht_cleanup thread quit pipe");
+               goto error;
+       }
+
+       for (i = 0; i < 2; i++) {
+               ret = fcntl(pipe_fds[i], F_SETFD, FD_CLOEXEC);
+               if (ret < 0) {
+                       PERROR("fcntl ht_cleanup_quit_pipe");
+                       goto error;
+               }
+       }
+error:
+       return ret;
+}
+
+/*
+ * Create a poll set with O_CLOEXEC and add the thread quit pipe to the set.
+ */
+static int set_pollset(struct lttng_poll_event *events, size_t size)
+{
+       int ret;
+
+       ret = lttng_poll_create(events, size, LTTNG_CLOEXEC);
+       if (ret < 0) {
+               goto error;
+       }
+
+       ret = lttng_poll_add(events, ht_cleanup_quit_pipe[0],
+                       LPOLLIN | LPOLLERR);
+       if (ret < 0) {
+               goto error;
+       }
+
+       ret = lttng_poll_add(events, the_ht_cleanup_pipe[0], LPOLLIN | LPOLLERR);
+       if (ret < 0) {
+               DBG("lttng_poll_add error %d.", ret);
+               goto error;
+       }
+
+       return 0;
+
+error:
+       return ret;
+}
+
+static void cleanup_ht_cleanup_thread(void *data)
+{
+       utils_close_pipe(ht_cleanup_quit_pipe);
+       utils_close_pipe(the_ht_cleanup_pipe);
+}
+
+static void *thread_ht_cleanup(void *data)
+{
+       int ret, i, pollfd, err = -1;
+       ssize_t size_ret;
+       uint32_t revents, nb_fd;
+       struct lttng_poll_event events;
+
+       DBG("startup.");
+
+       rcu_register_thread();
+       rcu_thread_online();
+
+       health_register(the_health_sessiond, HEALTH_SESSIOND_TYPE_HT_CLEANUP);
+
+       if (testpoint(sessiond_thread_ht_cleanup)) {
+               DBG("testpoint.");
+               goto error_testpoint;
+       }
+
+       health_code_update();
+
+       ret = set_pollset(&events, 2);
+       if (ret < 0) {
+               DBG("sessiond_set_ht_cleanup_thread_pollset error %d.", ret);
+               goto error_poll_create;
+       }
+
+       health_code_update();
+
+       while (1) {
+       restart:
+               DBG3("Polling.");
+               health_poll_entry();
+               ret = lttng_poll_wait(&events, -1);
+               DBG3("Returning from poll on %d fds.",
+                       LTTNG_POLL_GETNB(&events));
+               health_poll_exit();
+               if (ret < 0) {
+                       /*
+                        * Restart interrupted system call.
+                        */
+                       if (errno == EINTR) {
+                               continue;
+                       }
+                       goto error;
+               }
+
+               nb_fd = ret;
+               for (i = 0; i < nb_fd; i++) {
+                       struct lttng_ht *ht;
+
+                       health_code_update();
+
+                       /* Fetch once the poll data */
+                       revents = LTTNG_POLL_GETEV(&events, i);
+                       pollfd = LTTNG_POLL_GETFD(&events, i);
+
+                       if (pollfd != the_ht_cleanup_pipe[0]) {
+                               continue;
+                       }
+
+                       if (revents & LPOLLIN) {
+                               /* Get socket from dispatch thread. */
+                               size_ret = lttng_read(the_ht_cleanup_pipe[0],
+                                               &ht, sizeof(ht));
+                               if (size_ret < sizeof(ht)) {
+                                       PERROR("ht cleanup notify pipe");
+                                       goto error;
+                               }
+                               health_code_update();
+                               /*
+                                * The whole point of this thread is to call
+                                * lttng_ht_destroy from a context that is NOT:
+                                * 1) a read-side RCU lock,
+                                * 2) a call_rcu thread.
+                                */
+                               lttng_ht_destroy(ht);
+
+                               health_code_update();
+
+                               /*
+                                * Ensure that we never process the quit pipe
+                                * event while there is still data available
+                                * on the ht clean pipe.
+                                */
+                               goto restart;
+                       } else if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
+                               ERR("ht cleanup pipe error");
+                               goto error;
+                       } else {
+                               ERR("Unexpected poll events %u for sock %d", revents, pollfd);
+                               goto error;
+                       }
+               }
+
+               for (i = 0; i < nb_fd; i++) {
+                       health_code_update();
+
+                       /* Fetch once the poll data */
+                       revents = LTTNG_POLL_GETEV(&events, i);
+                       pollfd = LTTNG_POLL_GETFD(&events, i);
+
+                       if (!revents) {
+                               /* No activity for this FD (poll implementation). */
+                               continue;
+                       }
+
+                       if (pollfd == the_ht_cleanup_pipe[0]) {
+                               continue;
+                       }
+
+                       /* Thread quit pipe has been closed. Killing thread. */
+                       ret = check_quit_pipe(pollfd, revents);
+                       if (ret) {
+                               err = 0;
+                               DBG("[ht-cleanup] quit.");
+                               goto exit;
+                       }
+               }
+       }
+
+exit:
+error:
+       lttng_poll_clean(&events);
+error_poll_create:
+error_testpoint:
+       DBG("[ht-cleanup] Thread terminates.");
+       if (err) {
+               health_error();
+               ERR("Health error occurred in %s", __func__);
+       }
+       health_unregister(the_health_sessiond);
+       rcu_thread_offline();
+       rcu_unregister_thread();
+       return NULL;
+}
+
+static bool shutdown_ht_cleanup_thread(void *data)
+{
+       int ret;
+
+       ret = notify_thread_pipe(ht_cleanup_quit_pipe[1]);
+       if (ret < 0) {
+               ERR("write error on ht_cleanup quit pipe");
+               goto end;
+       }
+end:
+       return ret;
+}
+
+struct lttng_thread *launch_ht_cleanup_thread(void)
+{
+       int ret;
+       struct lttng_thread *thread;
+
+       ret = init_pipe(the_ht_cleanup_pipe);
+       if (ret) {
+               goto error;
+       }
+
+       ret = init_pipe(ht_cleanup_quit_pipe);
+       if (ret) {
+               goto error;
+       }
+
+       thread = lttng_thread_create("HT cleanup",
+                       thread_ht_cleanup,
+                       shutdown_ht_cleanup_thread,
+                       cleanup_ht_cleanup_thread,
+                       NULL);
+       if (!thread) {
+               goto error;
+       }
+       return thread;
+error:
+       cleanup_ht_cleanup_thread(NULL);
+       return NULL;
+}
diff --git a/src/bin/lttng-sessiond/kernel-consumer.c b/src/bin/lttng-sessiond/kernel-consumer.c
deleted file mode 100644 (file)
index 364705f..0000000
+++ /dev/null
@@ -1,531 +0,0 @@
-/*
- * Copyright (C) 2012 David Goulet <dgoulet@efficios.com>
- *
- * SPDX-License-Identifier: GPL-2.0-only
- *
- */
-
-#define _LGPL_SOURCE
-#include <stdio.h>
-#include <stdlib.h>
-#include <sys/stat.h>
-#include <unistd.h>
-#include <inttypes.h>
-
-#include <common/common.h>
-#include <common/defaults.h>
-#include <common/compat/string.h>
-
-#include "consumer.h"
-#include "health-sessiond.h"
-#include "kernel-consumer.h"
-#include "notification-thread-commands.h"
-#include "session.h"
-#include "lttng-sessiond.h"
-
-static char *create_channel_path(struct consumer_output *consumer,
-               size_t *consumer_path_offset)
-{
-       int ret;
-       char tmp_path[PATH_MAX];
-       char *pathname = NULL;
-
-       LTTNG_ASSERT(consumer);
-
-       /* Get the right path name destination */
-       if (consumer->type == CONSUMER_DST_LOCAL ||
-                       (consumer->type == CONSUMER_DST_NET &&
-                                       consumer->relay_major_version == 2 &&
-                                       consumer->relay_minor_version >= 11)) {
-               pathname = strdup(consumer->domain_subdir);
-               if (!pathname) {
-                       PERROR("Failed to copy domain subdirectory string %s",
-                                       consumer->domain_subdir);
-                       goto error;
-               }
-               *consumer_path_offset = strlen(consumer->domain_subdir);
-               DBG3("Kernel local consumer trace path relative to current trace chunk: \"%s\"",
-                               pathname);
-       } else {
-               /* Network output, relayd < 2.11. */
-               ret = snprintf(tmp_path, sizeof(tmp_path), "%s%s",
-                               consumer->dst.net.base_dir,
-                               consumer->domain_subdir);
-               if (ret < 0) {
-                       PERROR("snprintf kernel metadata path");
-                       goto error;
-               } else if (ret >= sizeof(tmp_path)) {
-                       ERR("Kernel channel path exceeds the maximal allowed length of of %zu bytes (%i bytes required) with path \"%s%s\"",
-                                       sizeof(tmp_path), ret,
-                                       consumer->dst.net.base_dir,
-                                       consumer->domain_subdir);
-                       goto error;
-               }
-               pathname = lttng_strndup(tmp_path, sizeof(tmp_path));
-               if (!pathname) {
-                       PERROR("lttng_strndup");
-                       goto error;
-               }
-               *consumer_path_offset = 0;
-               DBG3("Kernel network consumer subdir path: %s", pathname);
-       }
-
-       return pathname;
-
-error:
-       free(pathname);
-       return NULL;
-}
-
-/*
- * Sending a single channel to the consumer with command ADD_CHANNEL.
- */
-static
-int kernel_consumer_add_channel(struct consumer_socket *sock,
-               struct ltt_kernel_channel *channel,
-               struct ltt_kernel_session *ksession,
-               unsigned int monitor)
-{
-       int ret;
-       char *pathname = NULL;
-       struct lttcomm_consumer_msg lkm;
-       struct consumer_output *consumer;
-       enum lttng_error_code status;
-       struct ltt_session *session = NULL;
-       struct lttng_channel_extended *channel_attr_extended;
-       bool is_local_trace;
-       size_t consumer_path_offset = 0;
-
-       /* Safety net */
-       LTTNG_ASSERT(channel);
-       LTTNG_ASSERT(ksession);
-       LTTNG_ASSERT(ksession->consumer);
-
-       consumer = ksession->consumer;
-       channel_attr_extended = (struct lttng_channel_extended *)
-                       channel->channel->attr.extended.ptr;
-
-       DBG("Kernel consumer adding channel %s to kernel consumer",
-                       channel->channel->name);
-       is_local_trace = consumer->net_seq_index == -1ULL;
-
-       pathname = create_channel_path(consumer, &consumer_path_offset);
-       if (!pathname) {
-               ret = -1;
-               goto error;
-       }
-
-       if (is_local_trace && ksession->current_trace_chunk) {
-               enum lttng_trace_chunk_status chunk_status;
-               char *pathname_index;
-
-               ret = asprintf(&pathname_index, "%s/" DEFAULT_INDEX_DIR,
-                               pathname);
-               if (ret < 0) {
-                       ERR("Failed to format channel index directory");
-                       ret = -1;
-                       goto error;
-               }
-
-               /*
-                * Create the index subdirectory which will take care
-                * of implicitly creating the channel's path.
-                */
-               chunk_status = lttng_trace_chunk_create_subdirectory(
-                               ksession->current_trace_chunk, pathname_index);
-               free(pathname_index);
-               if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) {
-                       ret = -1;
-                       goto error;
-               }
-       }
-
-       /* Prep channel message structure */
-       consumer_init_add_channel_comm_msg(&lkm,
-                       channel->key,
-                       ksession->id,
-                       &pathname[consumer_path_offset],
-                       ksession->uid,
-                       ksession->gid,
-                       consumer->net_seq_index,
-                       channel->channel->name,
-                       channel->stream_count,
-                       channel->channel->attr.output,
-                       CONSUMER_CHANNEL_TYPE_DATA,
-                       channel->channel->attr.tracefile_size,
-                       channel->channel->attr.tracefile_count,
-                       monitor,
-                       channel->channel->attr.live_timer_interval,
-                       ksession->is_live_session,
-                       channel_attr_extended->monitor_timer_interval,
-                       ksession->current_trace_chunk);
-
-       health_code_update();
-
-       ret = consumer_send_channel(sock, &lkm);
-       if (ret < 0) {
-               goto error;
-       }
-
-       health_code_update();
-       rcu_read_lock();
-       session = session_find_by_id(ksession->id);
-       LTTNG_ASSERT(session);
-       LTTNG_ASSERT(pthread_mutex_trylock(&session->lock));
-       LTTNG_ASSERT(session_trylock_list());
-
-       status = notification_thread_command_add_channel(
-                       the_notification_thread_handle, session->name,
-                       ksession->uid, ksession->gid, channel->channel->name,
-                       channel->key, LTTNG_DOMAIN_KERNEL,
-                       channel->channel->attr.subbuf_size *
-                                       channel->channel->attr.num_subbuf);
-       rcu_read_unlock();
-       if (status != LTTNG_OK) {
-               ret = -1;
-               goto error;
-       }
-
-       channel->published_to_notification_thread = true;
-
-error:
-       if (session) {
-               session_put(session);
-       }
-       free(pathname);
-       return ret;
-}
-
-/*
- * Sending metadata to the consumer with command ADD_CHANNEL and ADD_STREAM.
- *
- * The consumer socket lock must be held by the caller.
- */
-int kernel_consumer_add_metadata(struct consumer_socket *sock,
-               struct ltt_kernel_session *ksession, unsigned int monitor)
-{
-       int ret;
-       struct lttcomm_consumer_msg lkm;
-       struct consumer_output *consumer;
-
-       rcu_read_lock();
-
-       /* Safety net */
-       LTTNG_ASSERT(ksession);
-       LTTNG_ASSERT(ksession->consumer);
-       LTTNG_ASSERT(sock);
-
-       DBG("Sending metadata %d to kernel consumer",
-                       ksession->metadata_stream_fd);
-
-       /* Get consumer output pointer */
-       consumer = ksession->consumer;
-
-       /* Prep channel message structure */
-       consumer_init_add_channel_comm_msg(&lkm,
-                       ksession->metadata->key,
-                       ksession->id,
-                       "",
-                       ksession->uid,
-                       ksession->gid,
-                       consumer->net_seq_index,
-                       ksession->metadata->conf->name,
-                       1,
-                       ksession->metadata->conf->attr.output,
-                       CONSUMER_CHANNEL_TYPE_METADATA,
-                       ksession->metadata->conf->attr.tracefile_size,
-                       ksession->metadata->conf->attr.tracefile_count,
-                       monitor,
-                       ksession->metadata->conf->attr.live_timer_interval,
-                       ksession->is_live_session,
-                       0,
-                       ksession->current_trace_chunk);
-
-       health_code_update();
-
-       ret = consumer_send_channel(sock, &lkm);
-       if (ret < 0) {
-               goto error;
-       }
-
-       health_code_update();
-
-       /* Prep stream message structure */
-       consumer_init_add_stream_comm_msg(&lkm,
-                       ksession->metadata->key,
-                       ksession->metadata_stream_fd,
-                       0 /* CPU: 0 for metadata. */);
-
-       health_code_update();
-
-       /* Send stream and file descriptor */
-       ret = consumer_send_stream(sock, consumer, &lkm,
-                       &ksession->metadata_stream_fd, 1);
-       if (ret < 0) {
-               goto error;
-       }
-
-       health_code_update();
-
-error:
-       rcu_read_unlock();
-       return ret;
-}
-
-/*
- * Sending a single stream to the consumer with command ADD_STREAM.
- */
-static
-int kernel_consumer_add_stream(struct consumer_socket *sock,
-               struct ltt_kernel_channel *channel,
-               struct ltt_kernel_stream *stream,
-               struct ltt_kernel_session *session, unsigned int monitor)
-{
-       int ret;
-       struct lttcomm_consumer_msg lkm;
-       struct consumer_output *consumer;
-
-       LTTNG_ASSERT(channel);
-       LTTNG_ASSERT(stream);
-       LTTNG_ASSERT(session);
-       LTTNG_ASSERT(session->consumer);
-       LTTNG_ASSERT(sock);
-
-       DBG("Sending stream %d of channel %s to kernel consumer",
-                       stream->fd, channel->channel->name);
-
-       /* Get consumer output pointer */
-       consumer = session->consumer;
-
-       /* Prep stream consumer message */
-       consumer_init_add_stream_comm_msg(&lkm,
-                       channel->key,
-                       stream->fd,
-                       stream->cpu);
-
-       health_code_update();
-
-       /* Send stream and file descriptor */
-       ret = consumer_send_stream(sock, consumer, &lkm, &stream->fd, 1);
-       if (ret < 0) {
-               goto error;
-       }
-
-       health_code_update();
-
-error:
-       return ret;
-}
-
-/*
- * Sending the notification that all streams were sent with STREAMS_SENT.
- */
-int kernel_consumer_streams_sent(struct consumer_socket *sock,
-               struct ltt_kernel_session *session, uint64_t channel_key)
-{
-       int ret;
-       struct lttcomm_consumer_msg lkm;
-       struct consumer_output *consumer;
-
-       LTTNG_ASSERT(sock);
-       LTTNG_ASSERT(session);
-
-       DBG("Sending streams_sent");
-       /* Get consumer output pointer */
-       consumer = session->consumer;
-
-       /* Prep stream consumer message */
-       consumer_init_streams_sent_comm_msg(&lkm,
-                       LTTNG_CONSUMER_STREAMS_SENT,
-                       channel_key, consumer->net_seq_index);
-
-       health_code_update();
-
-       /* Send stream and file descriptor */
-       ret = consumer_send_msg(sock, &lkm);
-       if (ret < 0) {
-               goto error;
-       }
-
-error:
-       return ret;
-}
-
-/*
- * Send all stream fds of kernel channel to the consumer.
- *
- * The consumer socket lock must be held by the caller.
- */
-int kernel_consumer_send_channel_streams(struct consumer_socket *sock,
-               struct ltt_kernel_channel *channel, struct ltt_kernel_session *ksession,
-               unsigned int monitor)
-{
-       int ret = LTTNG_OK;
-       struct ltt_kernel_stream *stream;
-
-       /* Safety net */
-       LTTNG_ASSERT(channel);
-       LTTNG_ASSERT(ksession);
-       LTTNG_ASSERT(ksession->consumer);
-       LTTNG_ASSERT(sock);
-
-       rcu_read_lock();
-
-       /* Bail out if consumer is disabled */
-       if (!ksession->consumer->enabled) {
-               ret = LTTNG_OK;
-               goto error;
-       }
-
-       DBG("Sending streams of channel %s to kernel consumer",
-                       channel->channel->name);
-
-       if (!channel->sent_to_consumer) {
-               ret = kernel_consumer_add_channel(sock, channel, ksession, monitor);
-               if (ret < 0) {
-                       goto error;
-               }
-               channel->sent_to_consumer = true;
-       }
-
-       /* Send streams */
-       cds_list_for_each_entry(stream, &channel->stream_list.head, list) {
-               if (!stream->fd || stream->sent_to_consumer) {
-                       continue;
-               }
-
-               /* Add stream on the kernel consumer side. */
-               ret = kernel_consumer_add_stream(sock, channel, stream,
-                               ksession, monitor);
-               if (ret < 0) {
-                       goto error;
-               }
-               stream->sent_to_consumer = true;
-       }
-
-error:
-       rcu_read_unlock();
-       return ret;
-}
-
-/*
- * Send all stream fds of the kernel session to the consumer.
- *
- * The consumer socket lock must be held by the caller.
- */
-int kernel_consumer_send_session(struct consumer_socket *sock,
-               struct ltt_kernel_session *session)
-{
-       int ret, monitor = 0;
-       struct ltt_kernel_channel *chan;
-
-       /* Safety net */
-       LTTNG_ASSERT(session);
-       LTTNG_ASSERT(session->consumer);
-       LTTNG_ASSERT(sock);
-
-       /* Bail out if consumer is disabled */
-       if (!session->consumer->enabled) {
-               ret = LTTNG_OK;
-               goto error;
-       }
-
-       /* Don't monitor the streams on the consumer if in flight recorder. */
-       if (session->output_traces) {
-               monitor = 1;
-       }
-
-       DBG("Sending session stream to kernel consumer");
-
-       if (session->metadata_stream_fd >= 0 && session->metadata) {
-               ret = kernel_consumer_add_metadata(sock, session, monitor);
-               if (ret < 0) {
-                       goto error;
-               }
-       }
-
-       /* Send channel and streams of it */
-       cds_list_for_each_entry(chan, &session->channel_list.head, list) {
-               ret = kernel_consumer_send_channel_streams(sock, chan, session,
-                               monitor);
-               if (ret < 0) {
-                       goto error;
-               }
-               if (monitor) {
-                       /*
-                        * Inform the relay that all the streams for the
-                        * channel were sent.
-                        */
-                       ret = kernel_consumer_streams_sent(sock, session, chan->key);
-                       if (ret < 0) {
-                               goto error;
-                       }
-               }
-       }
-
-       DBG("Kernel consumer FDs of metadata and channel streams sent");
-
-       session->consumer_fds_sent = 1;
-       return 0;
-
-error:
-       return ret;
-}
-
-int kernel_consumer_destroy_channel(struct consumer_socket *socket,
-               struct ltt_kernel_channel *channel)
-{
-       int ret;
-       struct lttcomm_consumer_msg msg;
-
-       LTTNG_ASSERT(channel);
-       LTTNG_ASSERT(socket);
-
-       DBG("Sending kernel consumer destroy channel key %" PRIu64, channel->key);
-
-       memset(&msg, 0, sizeof(msg));
-       msg.cmd_type = LTTNG_CONSUMER_DESTROY_CHANNEL;
-       msg.u.destroy_channel.key = channel->key;
-
-       pthread_mutex_lock(socket->lock);
-       health_code_update();
-
-       ret = consumer_send_msg(socket, &msg);
-       if (ret < 0) {
-               goto error;
-       }
-
-error:
-       health_code_update();
-       pthread_mutex_unlock(socket->lock);
-       return ret;
-}
-
-int kernel_consumer_destroy_metadata(struct consumer_socket *socket,
-               struct ltt_kernel_metadata *metadata)
-{
-       int ret;
-       struct lttcomm_consumer_msg msg;
-
-       LTTNG_ASSERT(metadata);
-       LTTNG_ASSERT(socket);
-
-       DBG("Sending kernel consumer destroy channel key %" PRIu64, metadata->key);
-
-       memset(&msg, 0, sizeof(msg));
-       msg.cmd_type = LTTNG_CONSUMER_DESTROY_CHANNEL;
-       msg.u.destroy_channel.key = metadata->key;
-
-       pthread_mutex_lock(socket->lock);
-       health_code_update();
-
-       ret = consumer_send_msg(socket, &msg);
-       if (ret < 0) {
-               goto error;
-       }
-
-error:
-       health_code_update();
-       pthread_mutex_unlock(socket->lock);
-       return ret;
-}
diff --git a/src/bin/lttng-sessiond/kernel-consumer.cpp b/src/bin/lttng-sessiond/kernel-consumer.cpp
new file mode 100644 (file)
index 0000000..364705f
--- /dev/null
@@ -0,0 +1,531 @@
+/*
+ * Copyright (C) 2012 David Goulet <dgoulet@efficios.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0-only
+ *
+ */
+
+#define _LGPL_SOURCE
+#include <stdio.h>
+#include <stdlib.h>
+#include <sys/stat.h>
+#include <unistd.h>
+#include <inttypes.h>
+
+#include <common/common.h>
+#include <common/defaults.h>
+#include <common/compat/string.h>
+
+#include "consumer.h"
+#include "health-sessiond.h"
+#include "kernel-consumer.h"
+#include "notification-thread-commands.h"
+#include "session.h"
+#include "lttng-sessiond.h"
+
+static char *create_channel_path(struct consumer_output *consumer,
+               size_t *consumer_path_offset)
+{
+       int ret;
+       char tmp_path[PATH_MAX];
+       char *pathname = NULL;
+
+       LTTNG_ASSERT(consumer);
+
+       /* Get the right path name destination */
+       if (consumer->type == CONSUMER_DST_LOCAL ||
+                       (consumer->type == CONSUMER_DST_NET &&
+                                       consumer->relay_major_version == 2 &&
+                                       consumer->relay_minor_version >= 11)) {
+               pathname = strdup(consumer->domain_subdir);
+               if (!pathname) {
+                       PERROR("Failed to copy domain subdirectory string %s",
+                                       consumer->domain_subdir);
+                       goto error;
+               }
+               *consumer_path_offset = strlen(consumer->domain_subdir);
+               DBG3("Kernel local consumer trace path relative to current trace chunk: \"%s\"",
+                               pathname);
+       } else {
+               /* Network output, relayd < 2.11. */
+               ret = snprintf(tmp_path, sizeof(tmp_path), "%s%s",
+                               consumer->dst.net.base_dir,
+                               consumer->domain_subdir);
+               if (ret < 0) {
+                       PERROR("snprintf kernel metadata path");
+                       goto error;
+               } else if (ret >= sizeof(tmp_path)) {
+                       ERR("Kernel channel path exceeds the maximal allowed length of of %zu bytes (%i bytes required) with path \"%s%s\"",
+                                       sizeof(tmp_path), ret,
+                                       consumer->dst.net.base_dir,
+                                       consumer->domain_subdir);
+                       goto error;
+               }
+               pathname = lttng_strndup(tmp_path, sizeof(tmp_path));
+               if (!pathname) {
+                       PERROR("lttng_strndup");
+                       goto error;
+               }
+               *consumer_path_offset = 0;
+               DBG3("Kernel network consumer subdir path: %s", pathname);
+       }
+
+       return pathname;
+
+error:
+       free(pathname);
+       return NULL;
+}
+
+/*
+ * Sending a single channel to the consumer with command ADD_CHANNEL.
+ */
+static
+int kernel_consumer_add_channel(struct consumer_socket *sock,
+               struct ltt_kernel_channel *channel,
+               struct ltt_kernel_session *ksession,
+               unsigned int monitor)
+{
+       int ret;
+       char *pathname = NULL;
+       struct lttcomm_consumer_msg lkm;
+       struct consumer_output *consumer;
+       enum lttng_error_code status;
+       struct ltt_session *session = NULL;
+       struct lttng_channel_extended *channel_attr_extended;
+       bool is_local_trace;
+       size_t consumer_path_offset = 0;
+
+       /* Safety net */
+       LTTNG_ASSERT(channel);
+       LTTNG_ASSERT(ksession);
+       LTTNG_ASSERT(ksession->consumer);
+
+       consumer = ksession->consumer;
+       channel_attr_extended = (struct lttng_channel_extended *)
+                       channel->channel->attr.extended.ptr;
+
+       DBG("Kernel consumer adding channel %s to kernel consumer",
+                       channel->channel->name);
+       is_local_trace = consumer->net_seq_index == -1ULL;
+
+       pathname = create_channel_path(consumer, &consumer_path_offset);
+       if (!pathname) {
+               ret = -1;
+               goto error;
+       }
+
+       if (is_local_trace && ksession->current_trace_chunk) {
+               enum lttng_trace_chunk_status chunk_status;
+               char *pathname_index;
+
+               ret = asprintf(&pathname_index, "%s/" DEFAULT_INDEX_DIR,
+                               pathname);
+               if (ret < 0) {
+                       ERR("Failed to format channel index directory");
+                       ret = -1;
+                       goto error;
+               }
+
+               /*
+                * Create the index subdirectory which will take care
+                * of implicitly creating the channel's path.
+                */
+               chunk_status = lttng_trace_chunk_create_subdirectory(
+                               ksession->current_trace_chunk, pathname_index);
+               free(pathname_index);
+               if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) {
+                       ret = -1;
+                       goto error;
+               }
+       }
+
+       /* Prep channel message structure */
+       consumer_init_add_channel_comm_msg(&lkm,
+                       channel->key,
+                       ksession->id,
+                       &pathname[consumer_path_offset],
+                       ksession->uid,
+                       ksession->gid,
+                       consumer->net_seq_index,
+                       channel->channel->name,
+                       channel->stream_count,
+                       channel->channel->attr.output,
+                       CONSUMER_CHANNEL_TYPE_DATA,
+                       channel->channel->attr.tracefile_size,
+                       channel->channel->attr.tracefile_count,
+                       monitor,
+                       channel->channel->attr.live_timer_interval,
+                       ksession->is_live_session,
+                       channel_attr_extended->monitor_timer_interval,
+                       ksession->current_trace_chunk);
+
+       health_code_update();
+
+       ret = consumer_send_channel(sock, &lkm);
+       if (ret < 0) {
+               goto error;
+       }
+
+       health_code_update();
+       rcu_read_lock();
+       session = session_find_by_id(ksession->id);
+       LTTNG_ASSERT(session);
+       LTTNG_ASSERT(pthread_mutex_trylock(&session->lock));
+       LTTNG_ASSERT(session_trylock_list());
+
+       status = notification_thread_command_add_channel(
+                       the_notification_thread_handle, session->name,
+                       ksession->uid, ksession->gid, channel->channel->name,
+                       channel->key, LTTNG_DOMAIN_KERNEL,
+                       channel->channel->attr.subbuf_size *
+                                       channel->channel->attr.num_subbuf);
+       rcu_read_unlock();
+       if (status != LTTNG_OK) {
+               ret = -1;
+               goto error;
+       }
+
+       channel->published_to_notification_thread = true;
+
+error:
+       if (session) {
+               session_put(session);
+       }
+       free(pathname);
+       return ret;
+}
+
+/*
+ * Sending metadata to the consumer with command ADD_CHANNEL and ADD_STREAM.
+ *
+ * The consumer socket lock must be held by the caller.
+ */
+int kernel_consumer_add_metadata(struct consumer_socket *sock,
+               struct ltt_kernel_session *ksession, unsigned int monitor)
+{
+       int ret;
+       struct lttcomm_consumer_msg lkm;
+       struct consumer_output *consumer;
+
+       rcu_read_lock();
+
+       /* Safety net */
+       LTTNG_ASSERT(ksession);
+       LTTNG_ASSERT(ksession->consumer);
+       LTTNG_ASSERT(sock);
+
+       DBG("Sending metadata %d to kernel consumer",
+                       ksession->metadata_stream_fd);
+
+       /* Get consumer output pointer */
+       consumer = ksession->consumer;
+
+       /* Prep channel message structure */
+       consumer_init_add_channel_comm_msg(&lkm,
+                       ksession->metadata->key,
+                       ksession->id,
+                       "",
+                       ksession->uid,
+                       ksession->gid,
+                       consumer->net_seq_index,
+                       ksession->metadata->conf->name,
+                       1,
+                       ksession->metadata->conf->attr.output,
+                       CONSUMER_CHANNEL_TYPE_METADATA,
+                       ksession->metadata->conf->attr.tracefile_size,
+                       ksession->metadata->conf->attr.tracefile_count,
+                       monitor,
+                       ksession->metadata->conf->attr.live_timer_interval,
+                       ksession->is_live_session,
+                       0,
+                       ksession->current_trace_chunk);
+
+       health_code_update();
+
+       ret = consumer_send_channel(sock, &lkm);
+       if (ret < 0) {
+               goto error;
+       }
+
+       health_code_update();
+
+       /* Prep stream message structure */
+       consumer_init_add_stream_comm_msg(&lkm,
+                       ksession->metadata->key,
+                       ksession->metadata_stream_fd,
+                       0 /* CPU: 0 for metadata. */);
+
+       health_code_update();
+
+       /* Send stream and file descriptor */
+       ret = consumer_send_stream(sock, consumer, &lkm,
+                       &ksession->metadata_stream_fd, 1);
+       if (ret < 0) {
+               goto error;
+       }
+
+       health_code_update();
+
+error:
+       rcu_read_unlock();
+       return ret;
+}
+
+/*
+ * Sending a single stream to the consumer with command ADD_STREAM.
+ */
+static
+int kernel_consumer_add_stream(struct consumer_socket *sock,
+               struct ltt_kernel_channel *channel,
+               struct ltt_kernel_stream *stream,
+               struct ltt_kernel_session *session, unsigned int monitor)
+{
+       int ret;
+       struct lttcomm_consumer_msg lkm;
+       struct consumer_output *consumer;
+
+       LTTNG_ASSERT(channel);
+       LTTNG_ASSERT(stream);
+       LTTNG_ASSERT(session);
+       LTTNG_ASSERT(session->consumer);
+       LTTNG_ASSERT(sock);
+
+       DBG("Sending stream %d of channel %s to kernel consumer",
+                       stream->fd, channel->channel->name);
+
+       /* Get consumer output pointer */
+       consumer = session->consumer;
+
+       /* Prep stream consumer message */
+       consumer_init_add_stream_comm_msg(&lkm,
+                       channel->key,
+                       stream->fd,
+                       stream->cpu);
+
+       health_code_update();
+
+       /* Send stream and file descriptor */
+       ret = consumer_send_stream(sock, consumer, &lkm, &stream->fd, 1);
+       if (ret < 0) {
+               goto error;
+       }
+
+       health_code_update();
+
+error:
+       return ret;
+}
+
+/*
+ * Sending the notification that all streams were sent with STREAMS_SENT.
+ */
+int kernel_consumer_streams_sent(struct consumer_socket *sock,
+               struct ltt_kernel_session *session, uint64_t channel_key)
+{
+       int ret;
+       struct lttcomm_consumer_msg lkm;
+       struct consumer_output *consumer;
+
+       LTTNG_ASSERT(sock);
+       LTTNG_ASSERT(session);
+
+       DBG("Sending streams_sent");
+       /* Get consumer output pointer */
+       consumer = session->consumer;
+
+       /* Prep stream consumer message */
+       consumer_init_streams_sent_comm_msg(&lkm,
+                       LTTNG_CONSUMER_STREAMS_SENT,
+                       channel_key, consumer->net_seq_index);
+
+       health_code_update();
+
+       /* Send stream and file descriptor */
+       ret = consumer_send_msg(sock, &lkm);
+       if (ret < 0) {
+               goto error;
+       }
+
+error:
+       return ret;
+}
+
+/*
+ * Send all stream fds of kernel channel to the consumer.
+ *
+ * The consumer socket lock must be held by the caller.
+ */
+int kernel_consumer_send_channel_streams(struct consumer_socket *sock,
+               struct ltt_kernel_channel *channel, struct ltt_kernel_session *ksession,
+               unsigned int monitor)
+{
+       int ret = LTTNG_OK;
+       struct ltt_kernel_stream *stream;
+
+       /* Safety net */
+       LTTNG_ASSERT(channel);
+       LTTNG_ASSERT(ksession);
+       LTTNG_ASSERT(ksession->consumer);
+       LTTNG_ASSERT(sock);
+
+       rcu_read_lock();
+
+       /* Bail out if consumer is disabled */
+       if (!ksession->consumer->enabled) {
+               ret = LTTNG_OK;
+               goto error;
+       }
+
+       DBG("Sending streams of channel %s to kernel consumer",
+                       channel->channel->name);
+
+       if (!channel->sent_to_consumer) {
+               ret = kernel_consumer_add_channel(sock, channel, ksession, monitor);
+               if (ret < 0) {
+                       goto error;
+               }
+               channel->sent_to_consumer = true;
+       }
+
+       /* Send streams */
+       cds_list_for_each_entry(stream, &channel->stream_list.head, list) {
+               if (!stream->fd || stream->sent_to_consumer) {
+                       continue;
+               }
+
+               /* Add stream on the kernel consumer side. */
+               ret = kernel_consumer_add_stream(sock, channel, stream,
+                               ksession, monitor);
+               if (ret < 0) {
+                       goto error;
+               }
+               stream->sent_to_consumer = true;
+       }
+
+error:
+       rcu_read_unlock();
+       return ret;
+}
+
+/*
+ * Send all stream fds of the kernel session to the consumer.
+ *
+ * The consumer socket lock must be held by the caller.
+ */
+int kernel_consumer_send_session(struct consumer_socket *sock,
+               struct ltt_kernel_session *session)
+{
+       int ret, monitor = 0;
+       struct ltt_kernel_channel *chan;
+
+       /* Safety net */
+       LTTNG_ASSERT(session);
+       LTTNG_ASSERT(session->consumer);
+       LTTNG_ASSERT(sock);
+
+       /* Bail out if consumer is disabled */
+       if (!session->consumer->enabled) {
+               ret = LTTNG_OK;
+               goto error;
+       }
+
+       /* Don't monitor the streams on the consumer if in flight recorder. */
+       if (session->output_traces) {
+               monitor = 1;
+       }
+
+       DBG("Sending session stream to kernel consumer");
+
+       if (session->metadata_stream_fd >= 0 && session->metadata) {
+               ret = kernel_consumer_add_metadata(sock, session, monitor);
+               if (ret < 0) {
+                       goto error;
+               }
+       }
+
+       /* Send channel and streams of it */
+       cds_list_for_each_entry(chan, &session->channel_list.head, list) {
+               ret = kernel_consumer_send_channel_streams(sock, chan, session,
+                               monitor);
+               if (ret < 0) {
+                       goto error;
+               }
+               if (monitor) {
+                       /*
+                        * Inform the relay that all the streams for the
+                        * channel were sent.
+                        */
+                       ret = kernel_consumer_streams_sent(sock, session, chan->key);
+                       if (ret < 0) {
+                               goto error;
+                       }
+               }
+       }
+
+       DBG("Kernel consumer FDs of metadata and channel streams sent");
+
+       session->consumer_fds_sent = 1;
+       return 0;
+
+error:
+       return ret;
+}
+
+int kernel_consumer_destroy_channel(struct consumer_socket *socket,
+               struct ltt_kernel_channel *channel)
+{
+       int ret;
+       struct lttcomm_consumer_msg msg;
+
+       LTTNG_ASSERT(channel);
+       LTTNG_ASSERT(socket);
+
+       DBG("Sending kernel consumer destroy channel key %" PRIu64, channel->key);
+
+       memset(&msg, 0, sizeof(msg));
+       msg.cmd_type = LTTNG_CONSUMER_DESTROY_CHANNEL;
+       msg.u.destroy_channel.key = channel->key;
+
+       pthread_mutex_lock(socket->lock);
+       health_code_update();
+
+       ret = consumer_send_msg(socket, &msg);
+       if (ret < 0) {
+               goto error;
+       }
+
+error:
+       health_code_update();
+       pthread_mutex_unlock(socket->lock);
+       return ret;
+}
+
+int kernel_consumer_destroy_metadata(struct consumer_socket *socket,
+               struct ltt_kernel_metadata *metadata)
+{
+       int ret;
+       struct lttcomm_consumer_msg msg;
+
+       LTTNG_ASSERT(metadata);
+       LTTNG_ASSERT(socket);
+
+       DBG("Sending kernel consumer destroy channel key %" PRIu64, metadata->key);
+
+       memset(&msg, 0, sizeof(msg));
+       msg.cmd_type = LTTNG_CONSUMER_DESTROY_CHANNEL;
+       msg.u.destroy_channel.key = metadata->key;
+
+       pthread_mutex_lock(socket->lock);
+       health_code_update();
+
+       ret = consumer_send_msg(socket, &msg);
+       if (ret < 0) {
+               goto error;
+       }
+
+error:
+       health_code_update();
+       pthread_mutex_unlock(socket->lock);
+       return ret;
+}
diff --git a/src/bin/lttng-sessiond/kernel.c b/src/bin/lttng-sessiond/kernel.c
deleted file mode 100644 (file)
index ffbde69..0000000
+++ /dev/null
@@ -1,2544 +0,0 @@
-/*
- * Copyright (C) 2011 David Goulet <david.goulet@polymtl.ca>
- *
- * SPDX-License-Identifier: GPL-2.0-only
- *
- */
-
-#define _LGPL_SOURCE
-#include <fcntl.h>
-#include <stdlib.h>
-#include <stdio.h>
-#include <string.h>
-#include <unistd.h>
-#include <inttypes.h>
-#include <sys/types.h>
-
-#include <common/common.h>
-#include <common/hashtable/utils.h>
-#include <common/trace-chunk.h>
-#include <common/kernel-ctl/kernel-ctl.h>
-#include <common/kernel-ctl/kernel-ioctl.h>
-#include <common/sessiond-comm/sessiond-comm.h>
-#include <common/tracker.h>
-#include <common/utils.h>
-#include <lttng/event.h>
-#include <lttng/lttng-error.h>
-#include <lttng/tracker.h>
-
-#include <lttng/userspace-probe.h>
-#include <lttng/userspace-probe-internal.h>
-#include <lttng/condition/event-rule-matches.h>
-#include <lttng/condition/event-rule-matches-internal.h>
-#include <lttng/event-rule/event-rule.h>
-#include <lttng/event-rule/event-rule-internal.h>
-#include <lttng/event-rule/kernel-uprobe-internal.h>
-
-#include "event-notifier-error-accounting.h"
-#include "lttng-sessiond.h"
-#include "lttng-syscall.h"
-#include "condition-internal.h"
-#include "consumer.h"
-#include "kernel.h"
-#include "kernel-consumer.h"
-#include "kern-modules.h"
-#include "sessiond-config.h"
-#include "utils.h"
-#include "rotate.h"
-#include "modprobe.h"
-#include "tracker.h"
-#include "notification-thread-commands.h"
-
-/*
- * Key used to reference a channel between the sessiond and the consumer. This
- * is only read and updated with the session_list lock held.
- */
-static uint64_t next_kernel_channel_key;
-
-static const char *module_proc_lttng = "/proc/lttng";
-
-static int kernel_tracer_fd = -1;
-static int kernel_tracer_event_notifier_group_fd = -1;
-static int kernel_tracer_event_notifier_group_notification_fd = -1;
-static struct cds_lfht *kernel_token_to_event_notifier_rule_ht;
-
-/*
- * Add context on a kernel channel.
- *
- * Assumes the ownership of ctx.
- */
-int kernel_add_channel_context(struct ltt_kernel_channel *chan,
-               struct ltt_kernel_context *ctx)
-{
-       int ret;
-
-       LTTNG_ASSERT(chan);
-       LTTNG_ASSERT(ctx);
-
-       DBG("Adding context to channel %s", chan->channel->name);
-       ret = kernctl_add_context(chan->fd, &ctx->ctx);
-       if (ret < 0) {
-               switch (-ret) {
-               case ENOSYS:
-                       /* Exists but not available for this kernel */
-                       ret = LTTNG_ERR_KERN_CONTEXT_UNAVAILABLE;
-                       goto error;
-               case EEXIST:
-                       /* If EEXIST, we just ignore the error */
-                       ret = 0;
-                       goto end;
-               default:
-                       PERROR("add context ioctl");
-                       ret = LTTNG_ERR_KERN_CONTEXT_FAIL;
-                       goto error;
-               }
-       }
-       ret = 0;
-
-end:
-       cds_list_add_tail(&ctx->list, &chan->ctx_list);
-       ctx->in_list = true;
-       ctx = NULL;
-error:
-       if (ctx) {
-               trace_kernel_destroy_context(ctx);
-       }
-       return ret;
-}
-
-/*
- * Create a new kernel session, register it to the kernel tracer and add it to
- * the session daemon session.
- */
-int kernel_create_session(struct ltt_session *session)
-{
-       int ret;
-       struct ltt_kernel_session *lks;
-
-       LTTNG_ASSERT(session);
-
-       /* Allocate data structure */
-       lks = trace_kernel_create_session();
-       if (lks == NULL) {
-               ret = -1;
-               goto error;
-       }
-
-       /* Kernel tracer session creation */
-       ret = kernctl_create_session(kernel_tracer_fd);
-       if (ret < 0) {
-               PERROR("ioctl kernel create session");
-               goto error;
-       }
-
-       lks->fd = ret;
-       /* Prevent fd duplication after execlp() */
-       ret = fcntl(lks->fd, F_SETFD, FD_CLOEXEC);
-       if (ret < 0) {
-               PERROR("fcntl session fd");
-       }
-
-       lks->id = session->id;
-       lks->consumer_fds_sent = 0;
-       session->kernel_session = lks;
-
-       DBG("Kernel session created (fd: %d)", lks->fd);
-
-       /*
-        * This is necessary since the creation time is present in the session
-        * name when it is generated.
-        */
-       if (session->has_auto_generated_name) {
-               ret = kernctl_session_set_name(lks->fd, DEFAULT_SESSION_NAME);
-       } else {
-               ret = kernctl_session_set_name(lks->fd, session->name);
-       }
-       if (ret) {
-               WARN("Could not set kernel session name for session %" PRIu64 " name: %s",
-                       session->id, session->name);
-       }
-
-       ret = kernctl_session_set_creation_time(lks->fd, session->creation_time);
-       if (ret) {
-               WARN("Could not set kernel session creation time for session %" PRIu64 " name: %s",
-                       session->id, session->name);
-       }
-
-       return 0;
-
-error:
-       if (lks) {
-               trace_kernel_destroy_session(lks);
-               trace_kernel_free_session(lks);
-       }
-       return ret;
-}
-
-/*
- * Create a kernel channel, register it to the kernel tracer and add it to the
- * kernel session.
- */
-int kernel_create_channel(struct ltt_kernel_session *session,
-               struct lttng_channel *chan)
-{
-       int ret;
-       struct ltt_kernel_channel *lkc;
-
-       LTTNG_ASSERT(session);
-       LTTNG_ASSERT(chan);
-
-       /* Allocate kernel channel */
-       lkc = trace_kernel_create_channel(chan);
-       if (lkc == NULL) {
-               goto error;
-       }
-
-       DBG3("Kernel create channel %s with attr: %d, %" PRIu64 ", %" PRIu64 ", %u, %u, %d, %d",
-                       chan->name, lkc->channel->attr.overwrite,
-                       lkc->channel->attr.subbuf_size, lkc->channel->attr.num_subbuf,
-                       lkc->channel->attr.switch_timer_interval, lkc->channel->attr.read_timer_interval,
-                       lkc->channel->attr.live_timer_interval, lkc->channel->attr.output);
-
-       /* Kernel tracer channel creation */
-       ret = kernctl_create_channel(session->fd, &lkc->channel->attr);
-       if (ret < 0) {
-               PERROR("ioctl kernel create channel");
-               goto error;
-       }
-
-       /* Setup the channel fd */
-       lkc->fd = ret;
-       /* Prevent fd duplication after execlp() */
-       ret = fcntl(lkc->fd, F_SETFD, FD_CLOEXEC);
-       if (ret < 0) {
-               PERROR("fcntl session fd");
-       }
-
-       /* Add channel to session */
-       cds_list_add(&lkc->list, &session->channel_list.head);
-       session->channel_count++;
-       lkc->session = session;
-       lkc->key = ++next_kernel_channel_key;
-
-       DBG("Kernel channel %s created (fd: %d, key: %" PRIu64 ")",
-                       lkc->channel->name, lkc->fd, lkc->key);
-
-       return 0;
-
-error:
-       if (lkc) {
-               free(lkc->channel);
-               free(lkc);
-       }
-       return -1;
-}
-
-/*
- * Create a kernel event notifier group, register it to the kernel tracer and
- * add it to the kernel session.
- */
-static int kernel_create_event_notifier_group(int *event_notifier_group_fd)
-{
-       int ret;
-       int local_fd = -1;
-
-       LTTNG_ASSERT(event_notifier_group_fd);
-
-       /* Kernel event notifier group creation. */
-       ret = kernctl_create_event_notifier_group(kernel_tracer_fd);
-       if (ret < 0) {
-               PERROR("Failed to create kernel event notifier group");
-               ret = -1;
-               goto error;
-       }
-
-       local_fd = ret;
-
-       /* Prevent fd duplication after execlp(). */
-       ret = fcntl(local_fd, F_SETFD, FD_CLOEXEC);
-       if (ret < 0) {
-               PERROR("Failed to set FD_CLOEXEC on kernel event notifier group file descriptor: fd = %d",
-                               local_fd);
-               goto error;
-       }
-
-       DBG("Created kernel event notifier group: fd = %d", local_fd);
-       *event_notifier_group_fd = local_fd;
-       local_fd = -1;
-       ret = 0;
-error:
-       if (local_fd >= 0) {
-               ret = close(local_fd);
-               if (ret) {
-                       PERROR("Failed to close kernel event notifier group file descriptor: fd = %d",
-                                       local_fd);
-               }
-       }
-
-       return ret;
-}
-
-/*
- * Compute the offset of the instrumentation byte in the binary based on the
- * function probe location using the ELF lookup method.
- *
- * Returns 0 on success and set the offset out parameter to the offset of the
- * elf symbol
- * Returns -1 on error
- */
-static
-int extract_userspace_probe_offset_function_elf(
-               const struct lttng_userspace_probe_location *probe_location,
-               uid_t uid, gid_t gid, uint64_t *offset)
-{
-       int fd;
-       int ret = 0;
-       const char *symbol = NULL;
-       const struct lttng_userspace_probe_location_lookup_method *lookup = NULL;
-       enum lttng_userspace_probe_location_lookup_method_type lookup_method_type;
-
-       LTTNG_ASSERT(lttng_userspace_probe_location_get_type(probe_location) ==
-                       LTTNG_USERSPACE_PROBE_LOCATION_TYPE_FUNCTION);
-
-       lookup = lttng_userspace_probe_location_get_lookup_method(
-                       probe_location);
-       if (!lookup) {
-               ret = -1;
-               goto end;
-       }
-
-       lookup_method_type =
-                       lttng_userspace_probe_location_lookup_method_get_type(lookup);
-
-       LTTNG_ASSERT(lookup_method_type ==
-                       LTTNG_USERSPACE_PROBE_LOCATION_LOOKUP_METHOD_TYPE_FUNCTION_ELF);
-
-       symbol = lttng_userspace_probe_location_function_get_function_name(
-                       probe_location);
-       if (!symbol) {
-               ret = -1;
-               goto end;
-       }
-
-       fd = lttng_userspace_probe_location_function_get_binary_fd(probe_location);
-       if (fd < 0) {
-               ret = -1;
-               goto end;
-       }
-
-       ret = run_as_extract_elf_symbol_offset(fd, symbol, uid, gid, offset);
-       if (ret < 0) {
-               DBG("userspace probe offset calculation failed for "
-                               "function %s", symbol);
-               goto end;
-       }
-
-       DBG("userspace probe elf offset for %s is 0x%jd", symbol, (intmax_t)(*offset));
-end:
-       return ret;
-}
-
-/*
- * Compute the offsets of the instrumentation bytes in the binary based on the
- * tracepoint probe location using the SDT lookup method. This function
- * allocates the offsets buffer, the caller must free it.
- *
- * Returns 0 on success and set the offset out parameter to the offsets of the
- * SDT tracepoint.
- * Returns -1 on error.
- */
-static
-int extract_userspace_probe_offset_tracepoint_sdt(
-               const struct lttng_userspace_probe_location *probe_location,
-               uid_t uid, gid_t gid, uint64_t **offsets,
-               uint32_t *offsets_count)
-{
-       enum lttng_userspace_probe_location_lookup_method_type lookup_method_type;
-       const struct lttng_userspace_probe_location_lookup_method *lookup = NULL;
-       const char *probe_name = NULL, *provider_name = NULL;
-       int ret = 0;
-       int fd, i;
-
-       LTTNG_ASSERT(lttng_userspace_probe_location_get_type(probe_location) ==
-                       LTTNG_USERSPACE_PROBE_LOCATION_TYPE_TRACEPOINT);
-
-       lookup = lttng_userspace_probe_location_get_lookup_method(probe_location);
-       if (!lookup) {
-               ret = -1;
-               goto end;
-       }
-
-       lookup_method_type =
-                       lttng_userspace_probe_location_lookup_method_get_type(lookup);
-
-       LTTNG_ASSERT(lookup_method_type ==
-                       LTTNG_USERSPACE_PROBE_LOCATION_LOOKUP_METHOD_TYPE_TRACEPOINT_SDT);
-
-
-       probe_name = lttng_userspace_probe_location_tracepoint_get_probe_name(
-                       probe_location);
-       if (!probe_name) {
-               ret = -1;
-               goto end;
-       }
-
-       provider_name = lttng_userspace_probe_location_tracepoint_get_provider_name(
-                       probe_location);
-       if (!provider_name) {
-               ret = -1;
-               goto end;
-       }
-
-       fd = lttng_userspace_probe_location_tracepoint_get_binary_fd(probe_location);
-       if (fd < 0) {
-               ret = -1;
-               goto end;
-       }
-
-       ret = run_as_extract_sdt_probe_offsets(fd, provider_name, probe_name,
-                       uid, gid, offsets, offsets_count);
-       if (ret < 0) {
-               DBG("userspace probe offset calculation failed for sdt "
-                               "probe %s:%s", provider_name, probe_name);
-               goto end;
-       }
-
-       if (*offsets_count == 0) {
-               DBG("no userspace probe offset found");
-               goto end;
-       }
-
-       DBG("%u userspace probe SDT offsets found for %s:%s at:",
-                       *offsets_count, provider_name, probe_name);
-       for (i = 0; i < *offsets_count; i++) {
-               DBG("\t0x%jd", (intmax_t)((*offsets)[i]));
-       }
-end:
-       return ret;
-}
-
-static
-int userspace_probe_add_callsite(
-               const struct lttng_userspace_probe_location *location,
-               uid_t uid, gid_t gid, int fd)
-{
-       const struct lttng_userspace_probe_location_lookup_method *lookup_method = NULL;
-       enum lttng_userspace_probe_location_lookup_method_type type;
-       int ret;
-
-       lookup_method = lttng_userspace_probe_location_get_lookup_method(location);
-       if (!lookup_method) {
-               ret = -1;
-               goto end;
-       }
-
-       type = lttng_userspace_probe_location_lookup_method_get_type(lookup_method);
-       switch (type) {
-       case LTTNG_USERSPACE_PROBE_LOCATION_LOOKUP_METHOD_TYPE_FUNCTION_ELF:
-       {
-               struct lttng_kernel_abi_event_callsite callsite;
-               uint64_t offset;
-
-               ret = extract_userspace_probe_offset_function_elf(location,
-                               uid, gid, &offset);
-               if (ret) {
-                       ret = LTTNG_ERR_PROBE_LOCATION_INVAL;
-                       goto end;
-               }
-
-               callsite.u.uprobe.offset = offset;
-               ret = kernctl_add_callsite(fd, &callsite);
-               if (ret) {
-                       WARN("Failed to add callsite to ELF userspace probe.");
-                       ret = LTTNG_ERR_KERN_ENABLE_FAIL;
-                       goto end;
-               }
-               break;
-       }
-       case LTTNG_USERSPACE_PROBE_LOCATION_LOOKUP_METHOD_TYPE_TRACEPOINT_SDT:
-       {
-               int i;
-               uint64_t *offsets = NULL;
-               uint32_t offsets_count;
-               struct lttng_kernel_abi_event_callsite callsite;
-
-               /*
-                * This call allocates the offsets buffer. This buffer must be freed
-                * by the caller
-                */
-               ret = extract_userspace_probe_offset_tracepoint_sdt(location,
-                               uid, gid, &offsets, &offsets_count);
-               if (ret) {
-                       ret = LTTNG_ERR_PROBE_LOCATION_INVAL;
-                       goto end;
-               }
-               for (i = 0; i < offsets_count; i++) {
-                       callsite.u.uprobe.offset = offsets[i];
-                       ret = kernctl_add_callsite(fd, &callsite);
-                       if (ret) {
-                               WARN("Failed to add callsite to SDT userspace probe");
-                               ret = LTTNG_ERR_KERN_ENABLE_FAIL;
-                               free(offsets);
-                               goto end;
-                       }
-               }
-               free(offsets);
-               break;
-       }
-       default:
-               ret = LTTNG_ERR_PROBE_LOCATION_INVAL;
-               goto end;
-       }
-end:
-       return ret;
-}
-
-/*
- * Extract the offsets of the instrumentation point for the different lookup
- * methods.
- */
-static
-int userspace_probe_event_add_callsites(struct lttng_event *ev,
-                       struct ltt_kernel_session *session, int fd)
-{
-       int ret;
-       const struct lttng_userspace_probe_location *location = NULL;
-
-       LTTNG_ASSERT(ev);
-       LTTNG_ASSERT(ev->type == LTTNG_EVENT_USERSPACE_PROBE);
-
-       location = lttng_event_get_userspace_probe_location(ev);
-       if (!location) {
-               ret = -1;
-               goto end;
-       }
-
-       ret = userspace_probe_add_callsite(location, session->uid, session->gid,
-               fd);
-       if (ret) {
-               WARN("Failed to add callsite to userspace probe event '%s'",
-                               ev->name);
-       }
-
-end:
-       return ret;
-}
-
-/*
- * Extract the offsets of the instrumentation point for the different look-up
- * methods.
- */
-static int userspace_probe_event_rule_add_callsites(
-               const struct lttng_event_rule *rule,
-               const struct lttng_credentials *creds,
-               int fd)
-{
-       int ret;
-       enum lttng_event_rule_status status;
-       enum lttng_event_rule_type event_rule_type;
-       const struct lttng_userspace_probe_location *location = NULL;
-
-       LTTNG_ASSERT(rule);
-       LTTNG_ASSERT(creds);
-
-       event_rule_type = lttng_event_rule_get_type(rule);
-       LTTNG_ASSERT(event_rule_type == LTTNG_EVENT_RULE_TYPE_KERNEL_UPROBE);
-
-       status = lttng_event_rule_kernel_uprobe_get_location(rule, &location);
-       if (status != LTTNG_EVENT_RULE_STATUS_OK || !location) {
-               ret = -1;
-               goto end;
-       }
-
-       ret = userspace_probe_add_callsite(location,
-                       lttng_credentials_get_uid(creds),
-                       lttng_credentials_get_gid(creds), fd);
-       if (ret) {
-               WARN("Failed to add callsite to user space probe object: fd = %d",
-                               fd);
-       }
-
-end:
-       return ret;
-}
-
-/*
- * Create a kernel event, enable it to the kernel tracer and add it to the
- * channel event list of the kernel session.
- * We own filter_expression and filter.
- */
-int kernel_create_event(struct lttng_event *ev,
-               struct ltt_kernel_channel *channel,
-               char *filter_expression,
-               struct lttng_bytecode *filter)
-{
-       int err, fd;
-       enum lttng_error_code ret;
-       struct ltt_kernel_event *event;
-
-       LTTNG_ASSERT(ev);
-       LTTNG_ASSERT(channel);
-
-       /* We pass ownership of filter_expression and filter */
-       ret = trace_kernel_create_event(ev, filter_expression,
-                       filter, &event);
-       if (ret != LTTNG_OK) {
-               goto error;
-       }
-
-       fd = kernctl_create_event(channel->fd, event->event);
-       if (fd < 0) {
-               switch (-fd) {
-               case EEXIST:
-                       ret = LTTNG_ERR_KERN_EVENT_EXIST;
-                       break;
-               case ENOSYS:
-                       WARN("Event type not implemented");
-                       ret = LTTNG_ERR_KERN_EVENT_ENOSYS;
-                       break;
-               case ENOENT:
-                       WARN("Event %s not found!", ev->name);
-                       ret = LTTNG_ERR_KERN_ENABLE_FAIL;
-                       break;
-               default:
-                       ret = LTTNG_ERR_KERN_ENABLE_FAIL;
-                       PERROR("create event ioctl");
-               }
-               goto free_event;
-       }
-
-       event->type = ev->type;
-       event->fd = fd;
-       /* Prevent fd duplication after execlp() */
-       err = fcntl(event->fd, F_SETFD, FD_CLOEXEC);
-       if (err < 0) {
-               PERROR("fcntl session fd");
-       }
-
-       if (filter) {
-               err = kernctl_filter(event->fd, filter);
-               if (err < 0) {
-                       switch (-err) {
-                       case ENOMEM:
-                               ret = LTTNG_ERR_FILTER_NOMEM;
-                               break;
-                       default:
-                               ret = LTTNG_ERR_FILTER_INVAL;
-                               break;
-                       }
-                       goto filter_error;
-               }
-       }
-
-       if (ev->type == LTTNG_EVENT_USERSPACE_PROBE) {
-               ret = userspace_probe_event_add_callsites(ev, channel->session,
-                               event->fd);
-               if (ret) {
-                       goto add_callsite_error;
-               }
-       }
-
-       err = kernctl_enable(event->fd);
-       if (err < 0) {
-               switch (-err) {
-               case EEXIST:
-                       ret = LTTNG_ERR_KERN_EVENT_EXIST;
-                       break;
-               default:
-                       PERROR("enable kernel event");
-                       ret = LTTNG_ERR_KERN_ENABLE_FAIL;
-                       break;
-               }
-               goto enable_error;
-       }
-
-       /* Add event to event list */
-       cds_list_add(&event->list, &channel->events_list.head);
-       channel->event_count++;
-
-       DBG("Event %s created (fd: %d)", ev->name, event->fd);
-
-       return 0;
-
-add_callsite_error:
-enable_error:
-filter_error:
-       {
-               int closeret;
-
-               closeret = close(event->fd);
-               if (closeret) {
-                       PERROR("close event fd");
-               }
-       }
-free_event:
-       free(event);
-error:
-       return ret;
-}
-
-/*
- * Disable a kernel channel.
- */
-int kernel_disable_channel(struct ltt_kernel_channel *chan)
-{
-       int ret;
-
-       LTTNG_ASSERT(chan);
-
-       ret = kernctl_disable(chan->fd);
-       if (ret < 0) {
-               PERROR("disable chan ioctl");
-               goto error;
-       }
-
-       chan->enabled = 0;
-       DBG("Kernel channel %s disabled (fd: %d, key: %" PRIu64 ")",
-                       chan->channel->name, chan->fd, chan->key);
-
-       return 0;
-
-error:
-       return ret;
-}
-
-/*
- * Enable a kernel channel.
- */
-int kernel_enable_channel(struct ltt_kernel_channel *chan)
-{
-       int ret;
-
-       LTTNG_ASSERT(chan);
-
-       ret = kernctl_enable(chan->fd);
-       if (ret < 0 && ret != -EEXIST) {
-               PERROR("Enable kernel chan");
-               goto error;
-       }
-
-       chan->enabled = 1;
-       DBG("Kernel channel %s enabled (fd: %d, key: %" PRIu64 ")",
-                       chan->channel->name, chan->fd, chan->key);
-
-       return 0;
-
-error:
-       return ret;
-}
-
-/*
- * Enable a kernel event.
- */
-int kernel_enable_event(struct ltt_kernel_event *event)
-{
-       int ret;
-
-       LTTNG_ASSERT(event);
-
-       ret = kernctl_enable(event->fd);
-       if (ret < 0) {
-               switch (-ret) {
-               case EEXIST:
-                       ret = LTTNG_ERR_KERN_EVENT_EXIST;
-                       break;
-               default:
-                       PERROR("enable kernel event");
-                       break;
-               }
-               goto error;
-       }
-
-       event->enabled = 1;
-       DBG("Kernel event %s enabled (fd: %d)", event->event->name, event->fd);
-
-       return 0;
-
-error:
-       return ret;
-}
-
-/*
- * Disable a kernel event.
- */
-int kernel_disable_event(struct ltt_kernel_event *event)
-{
-       int ret;
-
-       LTTNG_ASSERT(event);
-
-       ret = kernctl_disable(event->fd);
-       if (ret < 0) {
-               PERROR("Failed to disable kernel event: name = '%s', fd = %d",
-                               event->event->name, event->fd);
-               goto error;
-       }
-
-       event->enabled = 0;
-       DBG("Kernel event %s disabled (fd: %d)", event->event->name, event->fd);
-
-       return 0;
-
-error:
-       return ret;
-}
-
-/*
- * Disable a kernel event notifier.
- */
-static
-int kernel_disable_event_notifier_rule(struct ltt_kernel_event_notifier_rule *event)
-{
-       int ret;
-
-       LTTNG_ASSERT(event);
-
-       rcu_read_lock();
-       cds_lfht_del(kernel_token_to_event_notifier_rule_ht, &event->ht_node);
-       rcu_read_unlock();
-
-       ret = kernctl_disable(event->fd);
-       if (ret < 0) {
-               PERROR("Failed to disable kernel event notifier: fd = %d, token = %" PRIu64,
-                               event->fd, event->token);
-               goto error;
-       }
-
-       event->enabled = 0;
-       DBG("Disabled kernel event notifier: fd = %d, token = %" PRIu64,
-                       event->fd, event->token);
-
-error:
-       return ret;
-}
-
-static
-struct process_attr_tracker *_kernel_get_process_attr_tracker(
-               struct ltt_kernel_session *session,
-               enum lttng_process_attr process_attr)
-{
-       switch (process_attr) {
-       case LTTNG_PROCESS_ATTR_PROCESS_ID:
-               return session->tracker_pid;
-       case LTTNG_PROCESS_ATTR_VIRTUAL_PROCESS_ID:
-               return session->tracker_vpid;
-       case LTTNG_PROCESS_ATTR_USER_ID:
-               return session->tracker_uid;
-       case LTTNG_PROCESS_ATTR_VIRTUAL_USER_ID:
-               return session->tracker_vuid;
-       case LTTNG_PROCESS_ATTR_GROUP_ID:
-               return session->tracker_gid;
-       case LTTNG_PROCESS_ATTR_VIRTUAL_GROUP_ID:
-               return session->tracker_vgid;
-       default:
-               return NULL;
-       }
-}
-
-const struct process_attr_tracker *kernel_get_process_attr_tracker(
-               struct ltt_kernel_session *session,
-               enum lttng_process_attr process_attr)
-{
-       return (const struct process_attr_tracker *)
-                       _kernel_get_process_attr_tracker(session, process_attr);
-}
-
-enum lttng_error_code kernel_process_attr_tracker_set_tracking_policy(
-               struct ltt_kernel_session *session,
-               enum lttng_process_attr process_attr,
-               enum lttng_tracking_policy policy)
-{
-       int ret;
-       enum lttng_error_code ret_code = LTTNG_OK;
-       struct process_attr_tracker *tracker =
-                       _kernel_get_process_attr_tracker(session, process_attr);
-       enum lttng_tracking_policy previous_policy;
-
-       if (!tracker) {
-               ret_code = LTTNG_ERR_INVALID;
-               goto end;
-       }
-
-       previous_policy = process_attr_tracker_get_tracking_policy(tracker);
-       ret = process_attr_tracker_set_tracking_policy(tracker, policy);
-       if (ret) {
-               ret_code = LTTNG_ERR_UNK;
-               goto end;
-       }
-
-       if (previous_policy == policy) {
-               goto end;
-       }
-
-       switch (policy) {
-       case LTTNG_TRACKING_POLICY_INCLUDE_ALL:
-               if (process_attr == LTTNG_PROCESS_ATTR_PROCESS_ID) {
-                       /*
-                        * Maintain a special case for the process ID process
-                        * attribute tracker as it was the only supported
-                        * attribute prior to 2.12.
-                        */
-                       ret = kernctl_track_pid(session->fd, -1);
-               } else {
-                       ret = kernctl_track_id(session->fd, process_attr, -1);
-               }
-               break;
-       case LTTNG_TRACKING_POLICY_EXCLUDE_ALL:
-       case LTTNG_TRACKING_POLICY_INCLUDE_SET:
-               /* fall-through. */
-               if (process_attr == LTTNG_PROCESS_ATTR_PROCESS_ID) {
-                       /*
-                        * Maintain a special case for the process ID process
-                        * attribute tracker as it was the only supported
-                        * attribute prior to 2.12.
-                        */
-                       ret = kernctl_untrack_pid(session->fd, -1);
-               } else {
-                       ret = kernctl_untrack_id(session->fd, process_attr, -1);
-               }
-               break;
-       default:
-               abort();
-       }
-       /* kern-ctl error handling */
-       switch (-ret) {
-       case 0:
-               ret_code = LTTNG_OK;
-               break;
-       case EINVAL:
-               ret_code = LTTNG_ERR_INVALID;
-               break;
-       case ENOMEM:
-               ret_code = LTTNG_ERR_NOMEM;
-               break;
-       case EEXIST:
-               ret_code = LTTNG_ERR_PROCESS_ATTR_EXISTS;
-               break;
-       default:
-               ret_code = LTTNG_ERR_UNK;
-               break;
-       }
-end:
-       return ret_code;
-}
-
-enum lttng_error_code kernel_process_attr_tracker_inclusion_set_add_value(
-               struct ltt_kernel_session *session,
-               enum lttng_process_attr process_attr,
-               const struct process_attr_value *value)
-{
-       int ret, integral_value;
-       enum lttng_error_code ret_code;
-       struct process_attr_tracker *tracker;
-       enum process_attr_tracker_status status;
-
-       /*
-        * Convert process attribute tracker value to the integral
-        * representation required by the kern-ctl API.
-        */
-       switch (process_attr) {
-       case LTTNG_PROCESS_ATTR_PROCESS_ID:
-       case LTTNG_PROCESS_ATTR_VIRTUAL_PROCESS_ID:
-               integral_value = (int) value->value.pid;
-               break;
-       case LTTNG_PROCESS_ATTR_USER_ID:
-       case LTTNG_PROCESS_ATTR_VIRTUAL_USER_ID:
-               if (value->type == LTTNG_PROCESS_ATTR_VALUE_TYPE_USER_NAME) {
-                       uid_t uid;
-
-                       ret_code = utils_user_id_from_name(
-                                       value->value.user_name, &uid);
-                       if (ret_code != LTTNG_OK) {
-                               goto end;
-                       }
-                       integral_value = (int) uid;
-               } else {
-                       integral_value = (int) value->value.uid;
-               }
-               break;
-       case LTTNG_PROCESS_ATTR_GROUP_ID:
-       case LTTNG_PROCESS_ATTR_VIRTUAL_GROUP_ID:
-               if (value->type == LTTNG_PROCESS_ATTR_VALUE_TYPE_GROUP_NAME) {
-                       gid_t gid;
-
-                       ret_code = utils_group_id_from_name(
-                                       value->value.group_name, &gid);
-                       if (ret_code != LTTNG_OK) {
-                               goto end;
-                       }
-                       integral_value = (int) gid;
-               } else {
-                       integral_value = (int) value->value.gid;
-               }
-               break;
-       default:
-               ret_code = LTTNG_ERR_INVALID;
-               goto end;
-       }
-
-       tracker = _kernel_get_process_attr_tracker(session, process_attr);
-       if (!tracker) {
-               ret_code = LTTNG_ERR_INVALID;
-               goto end;
-       }
-
-       status = process_attr_tracker_inclusion_set_add_value(tracker, value);
-       if (status != PROCESS_ATTR_TRACKER_STATUS_OK) {
-               switch (status) {
-               case PROCESS_ATTR_TRACKER_STATUS_EXISTS:
-                       ret_code = LTTNG_ERR_PROCESS_ATTR_EXISTS;
-                       break;
-               case PROCESS_ATTR_TRACKER_STATUS_INVALID_TRACKING_POLICY:
-                       ret_code = LTTNG_ERR_PROCESS_ATTR_TRACKER_INVALID_TRACKING_POLICY;
-                       break;
-               case PROCESS_ATTR_TRACKER_STATUS_ERROR:
-               default:
-                       ret_code = LTTNG_ERR_UNK;
-                       break;
-               }
-               goto end;
-       }
-
-       DBG("Kernel track %s %d for session id %" PRIu64,
-                       lttng_process_attr_to_string(process_attr),
-                       integral_value, session->id);
-       if (process_attr == LTTNG_PROCESS_ATTR_PROCESS_ID) {
-               /*
-                * Maintain a special case for the process ID process attribute
-                * tracker as it was the only supported attribute prior to 2.12.
-                */
-               ret = kernctl_track_pid(session->fd, integral_value);
-       } else {
-               ret = kernctl_track_id(
-                               session->fd, process_attr, integral_value);
-       }
-       if (ret == 0) {
-               ret_code = LTTNG_OK;
-               goto end;
-       }
-
-       kernel_wait_quiescent();
-
-       /* kern-ctl error handling */
-       switch (-ret) {
-       case 0:
-               ret_code = LTTNG_OK;
-               break;
-       case EINVAL:
-               ret_code = LTTNG_ERR_INVALID;
-               break;
-       case ENOMEM:
-               ret_code = LTTNG_ERR_NOMEM;
-               break;
-       case EEXIST:
-               ret_code = LTTNG_ERR_PROCESS_ATTR_EXISTS;
-               break;
-       default:
-               ret_code = LTTNG_ERR_UNK;
-               break;
-       }
-
-       /* Attempt to remove the value from the tracker. */
-       status = process_attr_tracker_inclusion_set_remove_value(
-                       tracker, value);
-       if (status != PROCESS_ATTR_TRACKER_STATUS_OK) {
-               ERR("Failed to roll-back the tracking of kernel %s process attribute %d while handling a kern-ctl error",
-                               lttng_process_attr_to_string(process_attr),
-                               integral_value);
-       }
-end:
-       return ret_code;
-}
-
-enum lttng_error_code kernel_process_attr_tracker_inclusion_set_remove_value(
-               struct ltt_kernel_session *session,
-               enum lttng_process_attr process_attr,
-               const struct process_attr_value *value)
-{
-       int ret, integral_value;
-       enum lttng_error_code ret_code;
-       struct process_attr_tracker *tracker;
-       enum process_attr_tracker_status status;
-
-       /*
-        * Convert process attribute tracker value to the integral
-        * representation required by the kern-ctl API.
-        */
-       switch (process_attr) {
-       case LTTNG_PROCESS_ATTR_PROCESS_ID:
-       case LTTNG_PROCESS_ATTR_VIRTUAL_PROCESS_ID:
-               integral_value = (int) value->value.pid;
-               break;
-       case LTTNG_PROCESS_ATTR_USER_ID:
-       case LTTNG_PROCESS_ATTR_VIRTUAL_USER_ID:
-               if (value->type == LTTNG_PROCESS_ATTR_VALUE_TYPE_USER_NAME) {
-                       uid_t uid;
-
-                       ret_code = utils_user_id_from_name(
-                                       value->value.user_name, &uid);
-                       if (ret_code != LTTNG_OK) {
-                               goto end;
-                       }
-                       integral_value = (int) uid;
-               } else {
-                       integral_value = (int) value->value.uid;
-               }
-               break;
-       case LTTNG_PROCESS_ATTR_GROUP_ID:
-       case LTTNG_PROCESS_ATTR_VIRTUAL_GROUP_ID:
-               if (value->type == LTTNG_PROCESS_ATTR_VALUE_TYPE_GROUP_NAME) {
-                       gid_t gid;
-
-                       ret_code = utils_group_id_from_name(
-                                       value->value.group_name, &gid);
-                       if (ret_code != LTTNG_OK) {
-                               goto end;
-                       }
-                       integral_value = (int) gid;
-               } else {
-                       integral_value = (int) value->value.gid;
-               }
-               break;
-       default:
-               ret_code = LTTNG_ERR_INVALID;
-               goto end;
-       }
-
-       tracker = _kernel_get_process_attr_tracker(session, process_attr);
-       if (!tracker) {
-               ret_code = LTTNG_ERR_INVALID;
-               goto end;
-       }
-
-       status = process_attr_tracker_inclusion_set_remove_value(
-                       tracker, value);
-       if (status != PROCESS_ATTR_TRACKER_STATUS_OK) {
-               switch (status) {
-               case PROCESS_ATTR_TRACKER_STATUS_MISSING:
-                       ret_code = LTTNG_ERR_PROCESS_ATTR_MISSING;
-                       break;
-               case PROCESS_ATTR_TRACKER_STATUS_INVALID_TRACKING_POLICY:
-                       ret_code = LTTNG_ERR_PROCESS_ATTR_TRACKER_INVALID_TRACKING_POLICY;
-                       break;
-               case PROCESS_ATTR_TRACKER_STATUS_ERROR:
-               default:
-                       ret_code = LTTNG_ERR_UNK;
-                       break;
-               }
-               goto end;
-       }
-
-       DBG("Kernel track %s %d for session id %" PRIu64,
-                       lttng_process_attr_to_string(process_attr),
-                       integral_value, session->id);
-       if (process_attr == LTTNG_PROCESS_ATTR_PROCESS_ID) {
-               /*
-                * Maintain a special case for the process ID process attribute
-                * tracker as it was the only supported attribute prior to 2.12.
-                */
-               ret = kernctl_untrack_pid(session->fd, integral_value);
-       } else {
-               ret = kernctl_untrack_id(
-                               session->fd, process_attr, integral_value);
-       }
-       if (ret == 0) {
-               ret_code = LTTNG_OK;
-               goto end;
-       }
-       kernel_wait_quiescent();
-
-       /* kern-ctl error handling */
-       switch (-ret) {
-       case 0:
-               ret_code = LTTNG_OK;
-               break;
-       case EINVAL:
-               ret_code = LTTNG_ERR_INVALID;
-               break;
-       case ENOMEM:
-               ret_code = LTTNG_ERR_NOMEM;
-               break;
-       case ENOENT:
-               ret_code = LTTNG_ERR_PROCESS_ATTR_MISSING;
-               break;
-       default:
-               ret_code = LTTNG_ERR_UNK;
-               break;
-       }
-
-       /* Attempt to add the value to the tracker. */
-       status = process_attr_tracker_inclusion_set_add_value(
-                       tracker, value);
-       if (status != PROCESS_ATTR_TRACKER_STATUS_OK) {
-               ERR("Failed to roll-back the tracking of kernel %s process attribute %d while handling a kern-ctl error",
-                               lttng_process_attr_to_string(process_attr),
-                               integral_value);
-       }
-end:
-       return ret_code;
-}
-
-/*
- * Create kernel metadata, open from the kernel tracer and add it to the
- * kernel session.
- */
-int kernel_open_metadata(struct ltt_kernel_session *session)
-{
-       int ret;
-       struct ltt_kernel_metadata *lkm = NULL;
-
-       LTTNG_ASSERT(session);
-
-       /* Allocate kernel metadata */
-       lkm = trace_kernel_create_metadata();
-       if (lkm == NULL) {
-               goto error;
-       }
-
-       /* Kernel tracer metadata creation */
-       ret = kernctl_open_metadata(session->fd, &lkm->conf->attr);
-       if (ret < 0) {
-               goto error_open;
-       }
-
-       lkm->fd = ret;
-       lkm->key = ++next_kernel_channel_key;
-       /* Prevent fd duplication after execlp() */
-       ret = fcntl(lkm->fd, F_SETFD, FD_CLOEXEC);
-       if (ret < 0) {
-               PERROR("fcntl session fd");
-       }
-
-       session->metadata = lkm;
-
-       DBG("Kernel metadata opened (fd: %d)", lkm->fd);
-
-       return 0;
-
-error_open:
-       trace_kernel_destroy_metadata(lkm);
-error:
-       return -1;
-}
-
-/*
- * Start tracing session.
- */
-int kernel_start_session(struct ltt_kernel_session *session)
-{
-       int ret;
-
-       LTTNG_ASSERT(session);
-
-       ret = kernctl_start_session(session->fd);
-       if (ret < 0) {
-               PERROR("ioctl start session");
-               goto error;
-       }
-
-       DBG("Kernel session started");
-
-       return 0;
-
-error:
-       return ret;
-}
-
-/*
- * Make a kernel wait to make sure in-flight probe have completed.
- */
-void kernel_wait_quiescent(void)
-{
-       int ret;
-       int fd = kernel_tracer_fd;
-
-       DBG("Kernel quiescent wait on %d", fd);
-
-       ret = kernctl_wait_quiescent(fd);
-       if (ret < 0) {
-               PERROR("wait quiescent ioctl");
-               ERR("Kernel quiescent wait failed");
-       }
-}
-
-/*
- *  Force flush buffer of metadata.
- */
-int kernel_metadata_flush_buffer(int fd)
-{
-       int ret;
-
-       DBG("Kernel flushing metadata buffer on fd %d", fd);
-
-       ret = kernctl_buffer_flush(fd);
-       if (ret < 0) {
-               ERR("Fail to flush metadata buffers %d (ret: %d)", fd, ret);
-       }
-
-       return 0;
-}
-
-/*
- * Force flush buffer for channel.
- */
-int kernel_flush_buffer(struct ltt_kernel_channel *channel)
-{
-       int ret;
-       struct ltt_kernel_stream *stream;
-
-       LTTNG_ASSERT(channel);
-
-       DBG("Flush buffer for channel %s", channel->channel->name);
-
-       cds_list_for_each_entry(stream, &channel->stream_list.head, list) {
-               DBG("Flushing channel stream %d", stream->fd);
-               ret = kernctl_buffer_flush(stream->fd);
-               if (ret < 0) {
-                       PERROR("ioctl");
-                       ERR("Fail to flush buffer for stream %d (ret: %d)",
-                                       stream->fd, ret);
-               }
-       }
-
-       return 0;
-}
-
-/*
- * Stop tracing session.
- */
-int kernel_stop_session(struct ltt_kernel_session *session)
-{
-       int ret;
-
-       LTTNG_ASSERT(session);
-
-       ret = kernctl_stop_session(session->fd);
-       if (ret < 0) {
-               goto error;
-       }
-
-       DBG("Kernel session stopped");
-
-       return 0;
-
-error:
-       return ret;
-}
-
-/*
- * Open stream of channel, register it to the kernel tracer and add it
- * to the stream list of the channel.
- *
- * Note: given that the streams may appear in random order wrt CPU
- * number (e.g. cpu hotplug), the index value of the stream number in
- * the stream name is not necessarily linked to the CPU number.
- *
- * Return the number of created stream. Else, a negative value.
- */
-int kernel_open_channel_stream(struct ltt_kernel_channel *channel)
-{
-       int ret;
-       struct ltt_kernel_stream *lks;
-
-       LTTNG_ASSERT(channel);
-
-       while ((ret = kernctl_create_stream(channel->fd)) >= 0) {
-               lks = trace_kernel_create_stream(channel->channel->name,
-                               channel->stream_count);
-               if (lks == NULL) {
-                       ret = close(ret);
-                       if (ret) {
-                               PERROR("close");
-                       }
-                       goto error;
-               }
-
-               lks->fd = ret;
-               /* Prevent fd duplication after execlp() */
-               ret = fcntl(lks->fd, F_SETFD, FD_CLOEXEC);
-               if (ret < 0) {
-                       PERROR("fcntl session fd");
-               }
-
-               lks->tracefile_size = channel->channel->attr.tracefile_size;
-               lks->tracefile_count = channel->channel->attr.tracefile_count;
-
-               /* Add stream to channel stream list */
-               cds_list_add(&lks->list, &channel->stream_list.head);
-               channel->stream_count++;
-
-               DBG("Kernel stream %s created (fd: %d, state: %d)", lks->name, lks->fd,
-                               lks->state);
-       }
-
-       return channel->stream_count;
-
-error:
-       return -1;
-}
-
-/*
- * Open the metadata stream and set it to the kernel session.
- */
-int kernel_open_metadata_stream(struct ltt_kernel_session *session)
-{
-       int ret;
-
-       LTTNG_ASSERT(session);
-
-       ret = kernctl_create_stream(session->metadata->fd);
-       if (ret < 0) {
-               PERROR("kernel create metadata stream");
-               goto error;
-       }
-
-       DBG("Kernel metadata stream created (fd: %d)", ret);
-       session->metadata_stream_fd = ret;
-       /* Prevent fd duplication after execlp() */
-       ret = fcntl(session->metadata_stream_fd, F_SETFD, FD_CLOEXEC);
-       if (ret < 0) {
-               PERROR("fcntl session fd");
-       }
-
-       return 0;
-
-error:
-       return -1;
-}
-
-/*
- * Get the event list from the kernel tracer and return the number of elements.
- */
-ssize_t kernel_list_events(struct lttng_event **events)
-{
-       int fd, ret;
-       char *event;
-       size_t nbmem, count = 0;
-       FILE *fp;
-       struct lttng_event *elist;
-
-       LTTNG_ASSERT(events);
-
-       fd = kernctl_tracepoint_list(kernel_tracer_fd);
-       if (fd < 0) {
-               PERROR("kernel tracepoint list");
-               goto error;
-       }
-
-       fp = fdopen(fd, "r");
-       if (fp == NULL) {
-               PERROR("kernel tracepoint list fdopen");
-               goto error_fp;
-       }
-
-       /*
-        * Init memory size counter
-        * See kernel-ctl.h for explanation of this value
-        */
-       nbmem = KERNEL_EVENT_INIT_LIST_SIZE;
-       elist = zmalloc(sizeof(struct lttng_event) * nbmem);
-       if (elist == NULL) {
-               PERROR("alloc list events");
-               count = -ENOMEM;
-               goto end;
-       }
-
-       while (fscanf(fp, "event { name = %m[^;]; };\n", &event) == 1) {
-               if (count >= nbmem) {
-                       struct lttng_event *new_elist;
-                       size_t new_nbmem;
-
-                       new_nbmem = nbmem << 1;
-                       DBG("Reallocating event list from %zu to %zu bytes",
-                                       nbmem, new_nbmem);
-                       new_elist = realloc(elist, new_nbmem * sizeof(struct lttng_event));
-                       if (new_elist == NULL) {
-                               PERROR("realloc list events");
-                               free(event);
-                               free(elist);
-                               count = -ENOMEM;
-                               goto end;
-                       }
-                       /* Zero the new memory */
-                       memset(new_elist + nbmem, 0,
-                               (new_nbmem - nbmem) * sizeof(struct lttng_event));
-                       nbmem = new_nbmem;
-                       elist = new_elist;
-               }
-               strncpy(elist[count].name, event, LTTNG_SYMBOL_NAME_LEN);
-               elist[count].name[LTTNG_SYMBOL_NAME_LEN - 1] = '\0';
-               elist[count].enabled = -1;
-               count++;
-               free(event);
-       }
-
-       *events = elist;
-       DBG("Kernel list events done (%zu events)", count);
-end:
-       ret = fclose(fp);       /* closes both fp and fd */
-       if (ret) {
-               PERROR("fclose");
-       }
-       return count;
-
-error_fp:
-       ret = close(fd);
-       if (ret) {
-               PERROR("close");
-       }
-error:
-       return -1;
-}
-
-/*
- * Get kernel version and validate it.
- */
-int kernel_validate_version(struct lttng_kernel_abi_tracer_version *version,
-               struct lttng_kernel_abi_tracer_abi_version *abi_version)
-{
-       int ret;
-
-       ret = kernctl_tracer_version(kernel_tracer_fd, version);
-       if (ret < 0) {
-               ERR("Failed to retrieve the lttng-modules version");
-               goto error;
-       }
-
-       /* Validate version */
-       if (version->major != VERSION_MAJOR) {
-               ERR("Kernel tracer major version (%d) is not compatible with lttng-tools major version (%d)",
-                       version->major, VERSION_MAJOR);
-               goto error_version;
-       }
-       ret = kernctl_tracer_abi_version(kernel_tracer_fd, abi_version);
-       if (ret < 0) {
-               ERR("Failed to retrieve lttng-modules ABI version");
-               goto error;
-       }
-       if (abi_version->major != LTTNG_KERNEL_ABI_MAJOR_VERSION) {
-               ERR("Kernel tracer ABI version (%d.%d) does not match the expected ABI major version (%d.*)",
-                       abi_version->major, abi_version->minor,
-                       LTTNG_KERNEL_ABI_MAJOR_VERSION);
-               goto error;
-       }
-       DBG2("Kernel tracer version validated (%d.%d, ABI %d.%d)",
-                       version->major, version->minor,
-                       abi_version->major, abi_version->minor);
-       return 0;
-
-error_version:
-       ret = -1;
-
-error:
-       ERR("Kernel tracer version check failed; kernel tracing will not be available");
-       return ret;
-}
-
-/*
- * Kernel work-arounds called at the start of sessiond main().
- */
-int init_kernel_workarounds(void)
-{
-       int ret;
-       FILE *fp;
-
-       /*
-        * boot_id needs to be read once before being used concurrently
-        * to deal with a Linux kernel race. A fix is proposed for
-        * upstream, but the work-around is needed for older kernels.
-        */
-       fp = fopen("/proc/sys/kernel/random/boot_id", "r");
-       if (!fp) {
-               goto end_boot_id;
-       }
-       while (!feof(fp)) {
-               char buf[37] = "";
-
-               ret = fread(buf, 1, sizeof(buf), fp);
-               if (ret < 0) {
-                       /* Ignore error, we don't really care */
-               }
-       }
-       ret = fclose(fp);
-       if (ret) {
-               PERROR("fclose");
-       }
-end_boot_id:
-       return 0;
-}
-
-/*
- * Teardown of a kernel session, keeping data required by destroy notifiers.
- */
-void kernel_destroy_session(struct ltt_kernel_session *ksess)
-{
-       struct lttng_trace_chunk *trace_chunk;
-
-       if (ksess == NULL) {
-               DBG3("No kernel session when tearing down session");
-               return;
-       }
-
-       DBG("Tearing down kernel session");
-       trace_chunk = ksess->current_trace_chunk;
-
-       /*
-        * Destroy channels on the consumer if at least one FD has been sent and we
-        * are in no output mode because the streams are in *no* monitor mode so we
-        * have to send a command to clean them up or else they leaked.
-        */
-       if (!ksess->output_traces && ksess->consumer_fds_sent) {
-               int ret;
-               struct consumer_socket *socket;
-               struct lttng_ht_iter iter;
-
-               /* For each consumer socket. */
-               rcu_read_lock();
-               cds_lfht_for_each_entry(ksess->consumer->socks->ht, &iter.iter,
-                               socket, node.node) {
-                       struct ltt_kernel_channel *chan;
-
-                       /* For each channel, ask the consumer to destroy it. */
-                       cds_list_for_each_entry(chan, &ksess->channel_list.head, list) {
-                               ret = kernel_consumer_destroy_channel(socket, chan);
-                               if (ret < 0) {
-                                       /* Consumer is probably dead. Use next socket. */
-                                       continue;
-                               }
-                       }
-               }
-               rcu_read_unlock();
-       }
-
-       /* Close any relayd session */
-       consumer_output_send_destroy_relayd(ksess->consumer);
-
-       trace_kernel_destroy_session(ksess);
-       lttng_trace_chunk_put(trace_chunk);
-}
-
-/* Teardown of data required by destroy notifiers. */
-void kernel_free_session(struct ltt_kernel_session *ksess)
-{
-       if (ksess == NULL) {
-               return;
-       }
-       trace_kernel_free_session(ksess);
-}
-
-/*
- * Destroy a kernel channel object. It does not do anything on the tracer side.
- */
-void kernel_destroy_channel(struct ltt_kernel_channel *kchan)
-{
-       struct ltt_kernel_session *ksess = NULL;
-
-       LTTNG_ASSERT(kchan);
-       LTTNG_ASSERT(kchan->channel);
-
-       DBG3("Kernel destroy channel %s", kchan->channel->name);
-
-       /* Update channel count of associated session. */
-       if (kchan->session) {
-               /* Keep pointer reference so we can update it after the destroy. */
-               ksess = kchan->session;
-       }
-
-       trace_kernel_destroy_channel(kchan);
-
-       /*
-        * At this point the kernel channel is not visible anymore. This is safe
-        * since in order to work on a visible kernel session, the tracing session
-        * lock (ltt_session.lock) MUST be acquired.
-        */
-       if (ksess) {
-               ksess->channel_count--;
-       }
-}
-
-/*
- * Take a snapshot for a given kernel session.
- *
- * Return LTTNG_OK on success or else return a LTTNG_ERR code.
- */
-enum lttng_error_code kernel_snapshot_record(
-               struct ltt_kernel_session *ksess,
-               const struct consumer_output *output, int wait,
-               uint64_t nb_packets_per_stream)
-{
-       int err, ret, saved_metadata_fd;
-       enum lttng_error_code status = LTTNG_OK;
-       struct consumer_socket *socket;
-       struct lttng_ht_iter iter;
-       struct ltt_kernel_metadata *saved_metadata;
-       char *trace_path = NULL;
-       size_t consumer_path_offset = 0;
-
-       LTTNG_ASSERT(ksess);
-       LTTNG_ASSERT(ksess->consumer);
-       LTTNG_ASSERT(output);
-
-       DBG("Kernel snapshot record started");
-
-       /* Save current metadata since the following calls will change it. */
-       saved_metadata = ksess->metadata;
-       saved_metadata_fd = ksess->metadata_stream_fd;
-
-       rcu_read_lock();
-
-       ret = kernel_open_metadata(ksess);
-       if (ret < 0) {
-               status = LTTNG_ERR_KERN_META_FAIL;
-               goto error;
-       }
-
-       ret = kernel_open_metadata_stream(ksess);
-       if (ret < 0) {
-               status = LTTNG_ERR_KERN_META_FAIL;
-               goto error_open_stream;
-       }
-
-       trace_path = setup_channel_trace_path(ksess->consumer,
-                       "", &consumer_path_offset);
-       if (!trace_path) {
-               status = LTTNG_ERR_INVALID;
-               goto error;
-       }
-       /* Send metadata to consumer and snapshot everything. */
-       cds_lfht_for_each_entry(output->socks->ht, &iter.iter,
-                       socket, node.node) {
-               struct ltt_kernel_channel *chan;
-
-               pthread_mutex_lock(socket->lock);
-               /* This stream must not be monitored by the consumer. */
-               ret = kernel_consumer_add_metadata(socket, ksess, 0);
-               pthread_mutex_unlock(socket->lock);
-               if (ret < 0) {
-                       status = LTTNG_ERR_KERN_META_FAIL;
-                       goto error_consumer;
-               }
-
-               /* For each channel, ask the consumer to snapshot it. */
-               cds_list_for_each_entry(chan, &ksess->channel_list.head, list) {
-                       status = consumer_snapshot_channel(socket, chan->key, output, 0,
-                                       ksess->uid, ksess->gid,
-                                       &trace_path[consumer_path_offset], wait,
-                                       nb_packets_per_stream);
-                       if (status != LTTNG_OK) {
-                               (void) kernel_consumer_destroy_metadata(socket,
-                                               ksess->metadata);
-                               goto error_consumer;
-                       }
-               }
-
-               /* Snapshot metadata, */
-               status = consumer_snapshot_channel(socket, ksess->metadata->key, output,
-                               1, ksess->uid, ksess->gid, &trace_path[consumer_path_offset],
-                               wait, 0);
-               if (status != LTTNG_OK) {
-                       goto error_consumer;
-               }
-
-               /*
-                * The metadata snapshot is done, ask the consumer to destroy it since
-                * it's not monitored on the consumer side.
-                */
-               (void) kernel_consumer_destroy_metadata(socket, ksess->metadata);
-       }
-
-error_consumer:
-       /* Close newly opened metadata stream. It's now on the consumer side. */
-       err = close(ksess->metadata_stream_fd);
-       if (err < 0) {
-               PERROR("close snapshot kernel");
-       }
-
-error_open_stream:
-       trace_kernel_destroy_metadata(ksess->metadata);
-error:
-       /* Restore metadata state.*/
-       ksess->metadata = saved_metadata;
-       ksess->metadata_stream_fd = saved_metadata_fd;
-       rcu_read_unlock();
-       free(trace_path);
-       return status;
-}
-
-/*
- * Get the syscall mask array from the kernel tracer.
- *
- * Return 0 on success else a negative value. In both case, syscall_mask should
- * be freed.
- */
-int kernel_syscall_mask(int chan_fd, char **syscall_mask, uint32_t *nr_bits)
-{
-       LTTNG_ASSERT(syscall_mask);
-       LTTNG_ASSERT(nr_bits);
-
-       return kernctl_syscall_mask(chan_fd, syscall_mask, nr_bits);
-}
-
-static
-int kernel_tracer_abi_greater_or_equal(unsigned int major, unsigned int minor)
-{
-       int ret;
-       struct lttng_kernel_abi_tracer_abi_version abi;
-
-       ret = kernctl_tracer_abi_version(kernel_tracer_fd, &abi);
-       if (ret < 0) {
-               ERR("Failed to retrieve lttng-modules ABI version");
-               goto error;
-       }
-
-       ret = abi.major > major || (abi.major == major && abi.minor >= minor);
-error:
-       return ret;
-}
-
-/*
- * Check for the support of the RING_BUFFER_SNAPSHOT_SAMPLE_POSITIONS via abi
- * version number.
- *
- * Return 1 on success, 0 when feature is not supported, negative value in case
- * of errors.
- */
-int kernel_supports_ring_buffer_snapshot_sample_positions(void)
-{
-       /*
-        * RING_BUFFER_SNAPSHOT_SAMPLE_POSITIONS was introduced in 2.3
-        */
-       return kernel_tracer_abi_greater_or_equal(2, 3);
-}
-
-/*
- * Check for the support of the packet sequence number via abi version number.
- *
- * Return 1 on success, 0 when feature is not supported, negative value in case
- * of errors.
- */
-int kernel_supports_ring_buffer_packet_sequence_number(void)
-{
-       /*
-        * Packet sequence number was introduced in LTTng 2.8,
-        * lttng-modules ABI 2.1.
-        */
-       return kernel_tracer_abi_greater_or_equal(2, 1);
-}
-
-/*
- * Check for the support of event notifiers via abi version number.
- *
- * Return 1 on success, 0 when feature is not supported, negative value in case
- * of errors.
- */
-int kernel_supports_event_notifiers(void)
-{
-       /*
-        * Event notifiers were introduced in LTTng 2.13, lttng-modules ABI 2.6.
-        */
-       return kernel_tracer_abi_greater_or_equal(2, 6);
-}
-
-/*
- * Rotate a kernel session.
- *
- * Return LTTNG_OK on success or else an LTTng error code.
- */
-enum lttng_error_code kernel_rotate_session(struct ltt_session *session)
-{
-       int ret;
-       enum lttng_error_code status = LTTNG_OK;
-       struct consumer_socket *socket;
-       struct lttng_ht_iter iter;
-       struct ltt_kernel_session *ksess = session->kernel_session;
-
-       LTTNG_ASSERT(ksess);
-       LTTNG_ASSERT(ksess->consumer);
-
-       DBG("Rotate kernel session %s started (session %" PRIu64 ")",
-                       session->name, session->id);
-
-       rcu_read_lock();
-
-       /*
-        * Note that this loop will end after one iteration given that there is
-        * only one kernel consumer.
-        */
-       cds_lfht_for_each_entry(ksess->consumer->socks->ht, &iter.iter,
-                       socket, node.node) {
-               struct ltt_kernel_channel *chan;
-
-               /* For each channel, ask the consumer to rotate it. */
-               cds_list_for_each_entry(chan, &ksess->channel_list.head, list) {
-                       DBG("Rotate kernel channel %" PRIu64 ", session %s",
-                                       chan->key, session->name);
-                       ret = consumer_rotate_channel(socket, chan->key,
-                                       ksess->uid, ksess->gid, ksess->consumer,
-                                       /* is_metadata_channel */ false);
-                       if (ret < 0) {
-                               status = LTTNG_ERR_ROTATION_FAIL_CONSUMER;
-                               goto error;
-                       }
-               }
-
-               /*
-                * Rotate the metadata channel.
-                */
-               ret = consumer_rotate_channel(socket, ksess->metadata->key,
-                               ksess->uid, ksess->gid, ksess->consumer,
-                               /* is_metadata_channel */ true);
-               if (ret < 0) {
-                       status = LTTNG_ERR_ROTATION_FAIL_CONSUMER;
-                       goto error;
-               }
-       }
-
-error:
-       rcu_read_unlock();
-       return status;
-}
-
-enum lttng_error_code kernel_create_channel_subdirectories(
-               const struct ltt_kernel_session *ksess)
-{
-       enum lttng_error_code ret = LTTNG_OK;
-       enum lttng_trace_chunk_status chunk_status;
-
-       rcu_read_lock();
-       LTTNG_ASSERT(ksess->current_trace_chunk);
-
-       /*
-        * Create the index subdirectory which will take care
-        * of implicitly creating the channel's path.
-        */
-       chunk_status = lttng_trace_chunk_create_subdirectory(
-                       ksess->current_trace_chunk,
-                       DEFAULT_KERNEL_TRACE_DIR "/" DEFAULT_INDEX_DIR);
-       if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) {
-               ret = LTTNG_ERR_CREATE_DIR_FAIL;
-               goto error;
-       }
-error:
-       rcu_read_unlock();
-       return ret;
-}
-
-/*
- * Setup necessary data for kernel tracer action.
- */
-int init_kernel_tracer(void)
-{
-       int ret;
-       bool is_root = !getuid();
-
-       /* Modprobe lttng kernel modules */
-       ret = modprobe_lttng_control();
-       if (ret < 0) {
-               goto error;
-       }
-
-       /* Open debugfs lttng */
-       kernel_tracer_fd = open(module_proc_lttng, O_RDWR);
-       if (kernel_tracer_fd < 0) {
-               DBG("Failed to open %s", module_proc_lttng);
-               goto error_open;
-       }
-
-       /* Validate kernel version */
-       ret = kernel_validate_version(&the_kernel_tracer_version,
-                       &the_kernel_tracer_abi_version);
-       if (ret < 0) {
-               goto error_version;
-       }
-
-       ret = modprobe_lttng_data();
-       if (ret < 0) {
-               goto error_modules;
-       }
-
-       ret = kernel_supports_ring_buffer_snapshot_sample_positions();
-       if (ret < 0) {
-               goto error_modules;
-       }
-       if (ret < 1) {
-               WARN("Kernel tracer does not support buffer monitoring. "
-                       "The monitoring timer of channels in the kernel domain "
-                       "will be set to 0 (disabled).");
-       }
-
-       ret = kernel_supports_event_notifiers();
-       if (ret < 0) {
-               ERR("Failed to check for kernel tracer event notifier support");
-               goto error_modules;
-       }
-       ret = kernel_create_event_notifier_group(&kernel_tracer_event_notifier_group_fd);
-       if (ret < 0) {
-               /* This is not fatal. */
-               WARN("Failed to create kernel event notifier group");
-               kernel_tracer_event_notifier_group_fd = -1;
-       } else {
-               enum event_notifier_error_accounting_status error_accounting_status;
-               enum lttng_error_code error_code_ret =
-                               kernel_create_event_notifier_group_notification_fd(
-                                               &kernel_tracer_event_notifier_group_notification_fd);
-
-               if (error_code_ret != LTTNG_OK) {
-                       goto error_modules;
-               }
-
-               error_accounting_status = event_notifier_error_accounting_register_kernel(
-                               kernel_tracer_event_notifier_group_fd);
-               if (error_accounting_status != EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_OK) {
-                       ERR("Failed to initialize event notifier error accounting for kernel tracer");
-                       error_code_ret = LTTNG_ERR_EVENT_NOTIFIER_ERROR_ACCOUNTING;
-                       goto error_modules;
-               }
-
-               kernel_token_to_event_notifier_rule_ht = cds_lfht_new(
-                               DEFAULT_HT_SIZE, 1, 0,
-                               CDS_LFHT_AUTO_RESIZE | CDS_LFHT_ACCOUNTING,
-                               NULL);
-               if (!kernel_token_to_event_notifier_rule_ht) {
-                       goto error_token_ht;
-               }
-       }
-
-       DBG("Kernel tracer initialized: kernel tracer fd = %d, event notifier group fd = %d, event notifier group notification fd = %d",
-                       kernel_tracer_fd, kernel_tracer_event_notifier_group_fd,
-                       kernel_tracer_event_notifier_group_notification_fd);
-
-       ret = syscall_init_table(kernel_tracer_fd);
-       if (ret < 0) {
-               ERR("Unable to populate syscall table. Syscall tracing won't "
-                       "work for this session daemon.");
-       }
-
-       return 0;
-
-error_version:
-       modprobe_remove_lttng_control();
-       ret = close(kernel_tracer_fd);
-       if (ret) {
-               PERROR("Failed to close kernel tracer file descriptor: fd = %d",
-                               kernel_tracer_fd);
-       }
-
-       kernel_tracer_fd = -1;
-       return LTTNG_ERR_KERN_VERSION;
-
-
-error_token_ht:
-       ret = close(kernel_tracer_event_notifier_group_notification_fd);
-       if (ret) {
-               PERROR("Failed to close kernel tracer event notifier group notification file descriptor: fd = %d",
-                               kernel_tracer_event_notifier_group_notification_fd);
-       }
-
-       kernel_tracer_event_notifier_group_notification_fd = -1;
-
-error_modules:
-       ret = close(kernel_tracer_event_notifier_group_fd);
-       if (ret) {
-               PERROR("Failed to close kernel tracer event notifier group file descriptor: fd = %d",
-                               kernel_tracer_event_notifier_group_fd);
-       }
-
-       kernel_tracer_event_notifier_group_fd = -1;
-
-       ret = close(kernel_tracer_fd);
-       if (ret) {
-               PERROR("Failed to close kernel tracer file descriptor: fd = %d",
-                               kernel_tracer_fd);
-       }
-
-       kernel_tracer_fd = -1;
-
-error_open:
-       modprobe_remove_lttng_control();
-
-error:
-       WARN("No kernel tracer available");
-       kernel_tracer_fd = -1;
-       if (!is_root) {
-               return LTTNG_ERR_NEED_ROOT_SESSIOND;
-       } else {
-               return LTTNG_ERR_KERN_NA;
-       }
-}
-
-void cleanup_kernel_tracer(void)
-{
-       DBG2("Closing kernel event notifier group notification file descriptor");
-       if (kernel_tracer_event_notifier_group_notification_fd >= 0) {
-               int ret = notification_thread_command_remove_tracer_event_source(
-                               the_notification_thread_handle,
-                               kernel_tracer_event_notifier_group_notification_fd);
-               if (ret != LTTNG_OK) {
-                       ERR("Failed to remove kernel event notifier notification from notification thread");
-               }
-
-               ret = close(kernel_tracer_event_notifier_group_notification_fd);
-               if (ret) {
-                       PERROR("Failed to close kernel event notifier group notification file descriptor: fd = %d",
-                                       kernel_tracer_event_notifier_group_notification_fd);
-               }
-
-               kernel_tracer_event_notifier_group_notification_fd = -1;
-       }
-
-       if (kernel_token_to_event_notifier_rule_ht) {
-               const int ret = cds_lfht_destroy(
-                               kernel_token_to_event_notifier_rule_ht, NULL);
-               LTTNG_ASSERT(ret == 0);
-       }
-
-       DBG2("Closing kernel event notifier group file descriptor");
-       if (kernel_tracer_event_notifier_group_fd >= 0) {
-               const int ret = close(kernel_tracer_event_notifier_group_fd);
-
-               if (ret) {
-                       PERROR("Failed to close kernel event notifier group file descriptor: fd = %d",
-                                       kernel_tracer_event_notifier_group_fd);
-               }
-
-               kernel_tracer_event_notifier_group_fd = -1;
-       }
-
-       DBG2("Closing kernel fd");
-       if (kernel_tracer_fd >= 0) {
-               const int ret = close(kernel_tracer_fd);
-
-               if (ret) {
-                       PERROR("Failed to close kernel tracer file descriptor: fd = %d",
-                                       kernel_tracer_fd);
-               }
-
-               kernel_tracer_fd = -1;
-       }
-
-       free(syscall_table);
-}
-
-bool kernel_tracer_is_initialized(void)
-{
-       return kernel_tracer_fd >= 0;
-}
-
-/*
- *  Clear a kernel session.
- *
- * Return LTTNG_OK on success or else an LTTng error code.
- */
-enum lttng_error_code kernel_clear_session(struct ltt_session *session)
-{
-       int ret;
-       enum lttng_error_code status = LTTNG_OK;
-       struct consumer_socket *socket;
-       struct lttng_ht_iter iter;
-       struct ltt_kernel_session *ksess = session->kernel_session;
-
-       LTTNG_ASSERT(ksess);
-       LTTNG_ASSERT(ksess->consumer);
-
-       DBG("Clear kernel session %s (session %" PRIu64 ")",
-                       session->name, session->id);
-
-       rcu_read_lock();
-
-       if (ksess->active) {
-               ERR("Expecting inactive session %s (%" PRIu64 ")", session->name, session->id);
-               status = LTTNG_ERR_FATAL;
-               goto end;
-       }
-
-       /*
-        * Note that this loop will end after one iteration given that there is
-        * only one kernel consumer.
-        */
-       cds_lfht_for_each_entry(ksess->consumer->socks->ht, &iter.iter,
-                       socket, node.node) {
-               struct ltt_kernel_channel *chan;
-
-               /* For each channel, ask the consumer to clear it. */
-               cds_list_for_each_entry(chan, &ksess->channel_list.head, list) {
-                       DBG("Clear kernel channel %" PRIu64 ", session %s",
-                                       chan->key, session->name);
-                       ret = consumer_clear_channel(socket, chan->key);
-                       if (ret < 0) {
-                               goto error;
-                       }
-               }
-
-               if (!ksess->metadata) {
-                       /*
-                        * Nothing to do for the metadata.
-                        * This is a snapshot session.
-                        * The metadata is genererated on the fly.
-                        */
-                       continue;
-               }
-
-               /*
-                * Clear the metadata channel.
-                * Metadata channel is not cleared per se but we still need to
-                * perform a rotation operation on it behind the scene.
-                */
-               ret = consumer_clear_channel(socket, ksess->metadata->key);
-               if (ret < 0) {
-                       goto error;
-               }
-       }
-
-       goto end;
-error:
-       switch (-ret) {
-       case LTTCOMM_CONSUMERD_RELAYD_CLEAR_DISALLOWED:
-             status = LTTNG_ERR_CLEAR_RELAY_DISALLOWED;
-             break;
-       default:
-             status = LTTNG_ERR_CLEAR_FAIL_CONSUMER;
-             break;
-       }
-end:
-       rcu_read_unlock();
-       return status;
-}
-
-enum lttng_error_code kernel_create_event_notifier_group_notification_fd(
-               int *event_notifier_group_notification_fd)
-{
-       int local_fd = -1, ret;
-       enum lttng_error_code error_code_ret;
-
-       LTTNG_ASSERT(event_notifier_group_notification_fd);
-
-       ret = kernctl_create_event_notifier_group_notification_fd(
-                       kernel_tracer_event_notifier_group_fd);
-       if (ret < 0) {
-               PERROR("Failed to create kernel event notifier group notification file descriptor");
-               error_code_ret = LTTNG_ERR_EVENT_NOTIFIER_GROUP_NOTIFICATION_FD;
-               goto error;
-       }
-
-       local_fd = ret;
-
-       /* Prevent fd duplication after execlp(). */
-       ret = fcntl(local_fd, F_SETFD, FD_CLOEXEC);
-       if (ret < 0) {
-               PERROR("Failed to set FD_CLOEXEC on kernel event notifier group notification file descriptor: fd = %d",
-                               local_fd);
-               error_code_ret = LTTNG_ERR_EVENT_NOTIFIER_GROUP_NOTIFICATION_FD;
-               goto error;
-       }
-
-       DBG("Created kernel notifier group notification file descriptor: fd = %d",
-                       local_fd);
-       error_code_ret = LTTNG_OK;
-       *event_notifier_group_notification_fd = local_fd;
-       local_fd = -1;
-
-error:
-       if (local_fd >= 0) {
-               ret = close(local_fd);
-               if (ret) {
-                       PERROR("Failed to close kernel event notifier group notification file descriptor: fd = %d",
-                                       local_fd);
-               }
-       }
-
-       return error_code_ret;
-}
-
-enum lttng_error_code kernel_destroy_event_notifier_group_notification_fd(
-               int event_notifier_group_notification_fd)
-{
-       enum lttng_error_code ret_code = LTTNG_OK;
-
-       DBG("Closing event notifier group notification file descriptor: fd = %d",
-                       event_notifier_group_notification_fd);
-       if (event_notifier_group_notification_fd >= 0) {
-               const int ret = close(event_notifier_group_notification_fd);
-               if (ret) {
-                       PERROR("Failed to close event notifier group notification file descriptor: fd = %d",
-                                       event_notifier_group_notification_fd);
-               }
-       }
-
-       return ret_code;
-}
-
-static
-unsigned long hash_trigger(const struct lttng_trigger *trigger)
-{
-       const struct lttng_condition *condition =
-                       lttng_trigger_get_const_condition(trigger);
-
-       return lttng_condition_hash(condition);
-}
-
-static
-int match_trigger(struct cds_lfht_node *node, const void *key)
-{
-       const struct ltt_kernel_event_notifier_rule *event_notifier_rule;
-       const struct lttng_trigger *trigger = key;
-
-       event_notifier_rule = caa_container_of(node,
-                       const struct ltt_kernel_event_notifier_rule, ht_node);
-
-       return lttng_trigger_is_equal(trigger, event_notifier_rule->trigger);
-}
-
-static enum lttng_error_code kernel_create_event_notifier_rule(
-               struct lttng_trigger *trigger,
-               const struct lttng_credentials *creds, uint64_t token)
-{
-       int err, fd, ret = 0;
-       enum lttng_error_code error_code_ret;
-       enum lttng_condition_status condition_status;
-       enum lttng_condition_type condition_type;
-       enum lttng_event_rule_type event_rule_type;
-       struct ltt_kernel_event_notifier_rule *event_notifier_rule;
-       struct lttng_kernel_abi_event_notifier kernel_event_notifier = {};
-       unsigned int capture_bytecode_count = 0, i;
-       const struct lttng_condition *condition = NULL;
-       const struct lttng_event_rule *event_rule = NULL;
-       enum lttng_condition_status cond_status;
-
-       LTTNG_ASSERT(trigger);
-
-       condition = lttng_trigger_get_const_condition(trigger);
-       LTTNG_ASSERT(condition);
-
-       condition_type = lttng_condition_get_type(condition);
-       LTTNG_ASSERT(condition_type == LTTNG_CONDITION_TYPE_EVENT_RULE_MATCHES);
-
-       /* Does not acquire a reference. */
-       condition_status = lttng_condition_event_rule_matches_get_rule(
-                       condition, &event_rule);
-       LTTNG_ASSERT(condition_status == LTTNG_CONDITION_STATUS_OK);
-       LTTNG_ASSERT(event_rule);
-
-       event_rule_type = lttng_event_rule_get_type(event_rule);
-       LTTNG_ASSERT(event_rule_type != LTTNG_EVENT_RULE_TYPE_UNKNOWN);
-
-       error_code_ret = trace_kernel_create_event_notifier_rule(trigger, token,
-                       lttng_condition_event_rule_matches_get_error_counter_index(
-                                       condition),
-                       &event_notifier_rule);
-       if (error_code_ret != LTTNG_OK) {
-               goto error;
-       }
-
-       error_code_ret = trace_kernel_init_event_notifier_from_event_rule(
-                       event_rule, &kernel_event_notifier);
-       if (error_code_ret != LTTNG_OK) {
-               goto free_event;
-       }
-
-       kernel_event_notifier.event.token = event_notifier_rule->token;
-       kernel_event_notifier.error_counter_idx =
-                       lttng_condition_event_rule_matches_get_error_counter_index(
-                                       condition);
-
-       fd = kernctl_create_event_notifier(
-                       kernel_tracer_event_notifier_group_fd,
-                       &kernel_event_notifier);
-       if (fd < 0) {
-               switch (-fd) {
-               case EEXIST:
-                       error_code_ret = LTTNG_ERR_KERN_EVENT_EXIST;
-                       break;
-               case ENOSYS:
-                       WARN("Failed to create kernel event notifier: not notifier type not implemented");
-                       error_code_ret = LTTNG_ERR_KERN_EVENT_ENOSYS;
-                       break;
-               case ENOENT:
-                       WARN("Failed to create kernel event notifier: not found: name = '%s'",
-                                       kernel_event_notifier.event.name);
-                       error_code_ret = LTTNG_ERR_KERN_ENABLE_FAIL;
-                       break;
-               default:
-                       PERROR("Failed to create kernel event notifier: error code = %d, name = '%s'",
-                                       fd, kernel_event_notifier.event.name);
-                       error_code_ret = LTTNG_ERR_KERN_ENABLE_FAIL;
-               }
-               goto free_event;
-       }
-
-       event_notifier_rule->fd = fd;
-       /* Prevent fd duplication after execlp(). */
-       err = fcntl(event_notifier_rule->fd, F_SETFD, FD_CLOEXEC);
-       if (err < 0) {
-               PERROR("Failed to set FD_CLOEXEC on kernel event notifier file descriptor: fd = %d",
-                               fd);
-               error_code_ret = LTTNG_ERR_FATAL;
-               goto set_cloexec_error;
-       }
-
-       if (event_notifier_rule->filter) {
-               err = kernctl_filter(event_notifier_rule->fd, event_notifier_rule->filter);
-               if (err < 0) {
-                       switch (-err) {
-                       case ENOMEM:
-                               error_code_ret = LTTNG_ERR_FILTER_NOMEM;
-                               break;
-                       default:
-                               error_code_ret = LTTNG_ERR_FILTER_INVAL;
-                               break;
-                       }
-                       goto filter_error;
-               }
-       }
-
-       if (lttng_event_rule_get_type(event_rule) ==
-                       LTTNG_EVENT_RULE_TYPE_KERNEL_UPROBE) {
-               ret = userspace_probe_event_rule_add_callsites(
-                               event_rule, creds, event_notifier_rule->fd);
-               if (ret) {
-                       error_code_ret = LTTNG_ERR_KERN_ENABLE_FAIL;
-                       goto add_callsite_error;
-               }
-       }
-
-       /* Set the capture bytecode if any. */
-       cond_status = lttng_condition_event_rule_matches_get_capture_descriptor_count(
-                       condition, &capture_bytecode_count);
-       LTTNG_ASSERT(cond_status == LTTNG_CONDITION_STATUS_OK);
-
-       for (i = 0; i < capture_bytecode_count; i++) {
-               const struct lttng_bytecode *capture_bytecode =
-                               lttng_condition_event_rule_matches_get_capture_bytecode_at_index(
-                                               condition, i);
-
-               if (capture_bytecode == NULL) {
-                       ERR("Unexpected NULL capture bytecode on condition");
-                       error_code_ret = LTTNG_ERR_KERN_ENABLE_FAIL;
-                       goto capture_error;
-               }
-
-               ret = kernctl_capture(event_notifier_rule->fd, capture_bytecode);
-               if (ret < 0) {
-                       ERR("Failed to set capture bytecode on event notifier rule fd: fd = %d",
-                                       event_notifier_rule->fd);
-                       error_code_ret = LTTNG_ERR_KERN_ENABLE_FAIL;
-                       goto capture_error;
-               }
-       }
-
-       err = kernctl_enable(event_notifier_rule->fd);
-       if (err < 0) {
-               switch (-err) {
-               case EEXIST:
-                       error_code_ret = LTTNG_ERR_KERN_EVENT_EXIST;
-                       break;
-               default:
-                       PERROR("enable kernel event notifier");
-                       error_code_ret = LTTNG_ERR_KERN_ENABLE_FAIL;
-                       break;
-               }
-               goto enable_error;
-       }
-
-       /* Add trigger to kernel token mapping in the hash table. */
-       rcu_read_lock();
-       cds_lfht_add(kernel_token_to_event_notifier_rule_ht, hash_trigger(trigger),
-                       &event_notifier_rule->ht_node);
-       rcu_read_unlock();
-
-       DBG("Created kernel event notifier: name = '%s', fd = %d",
-                       kernel_event_notifier.event.name,
-                       event_notifier_rule->fd);
-
-       return LTTNG_OK;
-
-capture_error:
-add_callsite_error:
-enable_error:
-set_cloexec_error:
-filter_error:
-       {
-               const int close_ret = close(event_notifier_rule->fd);
-
-               if (close_ret) {
-                       PERROR("Failed to close kernel event notifier file descriptor: fd = %d",
-                                       event_notifier_rule->fd);
-               }
-       }
-free_event:
-       free(event_notifier_rule);
-error:
-       return error_code_ret;
-}
-
-enum lttng_error_code kernel_register_event_notifier(
-               struct lttng_trigger *trigger,
-               const struct lttng_credentials *cmd_creds)
-{
-       enum lttng_error_code ret;
-       enum lttng_condition_status status;
-       enum lttng_domain_type domain_type;
-       const struct lttng_event_rule *event_rule;
-       const struct lttng_condition *const condition =
-                       lttng_trigger_get_const_condition(trigger);
-       const uint64_t token = lttng_trigger_get_tracer_token(trigger);
-
-       LTTNG_ASSERT(condition);
-
-       /* Does not acquire a reference to the event rule. */
-       status = lttng_condition_event_rule_matches_get_rule(
-                       condition, &event_rule);
-       LTTNG_ASSERT(status == LTTNG_CONDITION_STATUS_OK);
-
-       domain_type = lttng_event_rule_get_domain_type(event_rule);
-       LTTNG_ASSERT(domain_type == LTTNG_DOMAIN_KERNEL);
-
-       ret = kernel_create_event_notifier_rule(trigger, cmd_creds, token);
-       if (ret != LTTNG_OK) {
-               ERR("Failed to create kernel event notifier rule");
-       }
-
-       return ret;
-}
-
-enum lttng_error_code kernel_unregister_event_notifier(
-               const struct lttng_trigger *trigger)
-{
-       struct ltt_kernel_event_notifier_rule *token_event_rule_element;
-       struct cds_lfht_node *node;
-       struct cds_lfht_iter iter;
-       enum lttng_error_code error_code_ret;
-       int ret;
-
-       rcu_read_lock();
-
-       cds_lfht_lookup(kernel_token_to_event_notifier_rule_ht,
-                       hash_trigger(trigger), match_trigger, trigger, &iter);
-
-       node = cds_lfht_iter_get_node(&iter);
-       if (!node) {
-               error_code_ret = LTTNG_ERR_TRIGGER_NOT_FOUND;
-               goto error;
-       }
-
-       token_event_rule_element = caa_container_of(node,
-                       struct ltt_kernel_event_notifier_rule, ht_node);
-
-       ret = kernel_disable_event_notifier_rule(token_event_rule_element);
-       if (ret) {
-               error_code_ret = LTTNG_ERR_FATAL;
-               goto error;
-       }
-
-       trace_kernel_destroy_event_notifier_rule(token_event_rule_element);
-       error_code_ret = LTTNG_OK;
-
-error:
-       rcu_read_unlock();
-
-       return error_code_ret;
-}
-
-int kernel_get_notification_fd(void)
-{
-       return kernel_tracer_event_notifier_group_notification_fd;
-}
diff --git a/src/bin/lttng-sessiond/kernel.cpp b/src/bin/lttng-sessiond/kernel.cpp
new file mode 100644 (file)
index 0000000..22ce819
--- /dev/null
@@ -0,0 +1,2544 @@
+/*
+ * Copyright (C) 2011 David Goulet <david.goulet@polymtl.ca>
+ *
+ * SPDX-License-Identifier: GPL-2.0-only
+ *
+ */
+
+#define _LGPL_SOURCE
+#include <fcntl.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <unistd.h>
+#include <inttypes.h>
+#include <sys/types.h>
+
+#include <common/common.h>
+#include <common/hashtable/utils.h>
+#include <common/trace-chunk.h>
+#include <common/kernel-ctl/kernel-ctl.h>
+#include <common/kernel-ctl/kernel-ioctl.h>
+#include <common/sessiond-comm/sessiond-comm.h>
+#include <common/tracker.h>
+#include <common/utils.h>
+#include <lttng/event.h>
+#include <lttng/lttng-error.h>
+#include <lttng/tracker.h>
+
+#include <lttng/userspace-probe.h>
+#include <lttng/userspace-probe-internal.h>
+#include <lttng/condition/event-rule-matches.h>
+#include <lttng/condition/event-rule-matches-internal.h>
+#include <lttng/event-rule/event-rule.h>
+#include <lttng/event-rule/event-rule-internal.h>
+#include <lttng/event-rule/kernel-uprobe-internal.h>
+
+#include "event-notifier-error-accounting.h"
+#include "lttng-sessiond.h"
+#include "lttng-syscall.h"
+#include "condition-internal.h"
+#include "consumer.h"
+#include "kernel.h"
+#include "kernel-consumer.h"
+#include "kern-modules.h"
+#include "sessiond-config.h"
+#include "utils.h"
+#include "rotate.h"
+#include "modprobe.h"
+#include "tracker.h"
+#include "notification-thread-commands.h"
+
+/*
+ * Key used to reference a channel between the sessiond and the consumer. This
+ * is only read and updated with the session_list lock held.
+ */
+static uint64_t next_kernel_channel_key;
+
+static const char *module_proc_lttng = "/proc/lttng";
+
+static int kernel_tracer_fd = -1;
+static int kernel_tracer_event_notifier_group_fd = -1;
+static int kernel_tracer_event_notifier_group_notification_fd = -1;
+static struct cds_lfht *kernel_token_to_event_notifier_rule_ht;
+
+/*
+ * Add context on a kernel channel.
+ *
+ * Assumes the ownership of ctx.
+ */
+int kernel_add_channel_context(struct ltt_kernel_channel *chan,
+               struct ltt_kernel_context *ctx)
+{
+       int ret;
+
+       LTTNG_ASSERT(chan);
+       LTTNG_ASSERT(ctx);
+
+       DBG("Adding context to channel %s", chan->channel->name);
+       ret = kernctl_add_context(chan->fd, &ctx->ctx);
+       if (ret < 0) {
+               switch (-ret) {
+               case ENOSYS:
+                       /* Exists but not available for this kernel */
+                       ret = LTTNG_ERR_KERN_CONTEXT_UNAVAILABLE;
+                       goto error;
+               case EEXIST:
+                       /* If EEXIST, we just ignore the error */
+                       ret = 0;
+                       goto end;
+               default:
+                       PERROR("add context ioctl");
+                       ret = LTTNG_ERR_KERN_CONTEXT_FAIL;
+                       goto error;
+               }
+       }
+       ret = 0;
+
+end:
+       cds_list_add_tail(&ctx->list, &chan->ctx_list);
+       ctx->in_list = true;
+       ctx = NULL;
+error:
+       if (ctx) {
+               trace_kernel_destroy_context(ctx);
+       }
+       return ret;
+}
+
+/*
+ * Create a new kernel session, register it to the kernel tracer and add it to
+ * the session daemon session.
+ */
+int kernel_create_session(struct ltt_session *session)
+{
+       int ret;
+       struct ltt_kernel_session *lks;
+
+       LTTNG_ASSERT(session);
+
+       /* Allocate data structure */
+       lks = trace_kernel_create_session();
+       if (lks == NULL) {
+               ret = -1;
+               goto error;
+       }
+
+       /* Kernel tracer session creation */
+       ret = kernctl_create_session(kernel_tracer_fd);
+       if (ret < 0) {
+               PERROR("ioctl kernel create session");
+               goto error;
+       }
+
+       lks->fd = ret;
+       /* Prevent fd duplication after execlp() */
+       ret = fcntl(lks->fd, F_SETFD, FD_CLOEXEC);
+       if (ret < 0) {
+               PERROR("fcntl session fd");
+       }
+
+       lks->id = session->id;
+       lks->consumer_fds_sent = 0;
+       session->kernel_session = lks;
+
+       DBG("Kernel session created (fd: %d)", lks->fd);
+
+       /*
+        * This is necessary since the creation time is present in the session
+        * name when it is generated.
+        */
+       if (session->has_auto_generated_name) {
+               ret = kernctl_session_set_name(lks->fd, DEFAULT_SESSION_NAME);
+       } else {
+               ret = kernctl_session_set_name(lks->fd, session->name);
+       }
+       if (ret) {
+               WARN("Could not set kernel session name for session %" PRIu64 " name: %s",
+                       session->id, session->name);
+       }
+
+       ret = kernctl_session_set_creation_time(lks->fd, session->creation_time);
+       if (ret) {
+               WARN("Could not set kernel session creation time for session %" PRIu64 " name: %s",
+                       session->id, session->name);
+       }
+
+       return 0;
+
+error:
+       if (lks) {
+               trace_kernel_destroy_session(lks);
+               trace_kernel_free_session(lks);
+       }
+       return ret;
+}
+
+/*
+ * Create a kernel channel, register it to the kernel tracer and add it to the
+ * kernel session.
+ */
+int kernel_create_channel(struct ltt_kernel_session *session,
+               struct lttng_channel *chan)
+{
+       int ret;
+       struct ltt_kernel_channel *lkc;
+
+       LTTNG_ASSERT(session);
+       LTTNG_ASSERT(chan);
+
+       /* Allocate kernel channel */
+       lkc = trace_kernel_create_channel(chan);
+       if (lkc == NULL) {
+               goto error;
+       }
+
+       DBG3("Kernel create channel %s with attr: %d, %" PRIu64 ", %" PRIu64 ", %u, %u, %d, %d",
+                       chan->name, lkc->channel->attr.overwrite,
+                       lkc->channel->attr.subbuf_size, lkc->channel->attr.num_subbuf,
+                       lkc->channel->attr.switch_timer_interval, lkc->channel->attr.read_timer_interval,
+                       lkc->channel->attr.live_timer_interval, lkc->channel->attr.output);
+
+       /* Kernel tracer channel creation */
+       ret = kernctl_create_channel(session->fd, &lkc->channel->attr);
+       if (ret < 0) {
+               PERROR("ioctl kernel create channel");
+               goto error;
+       }
+
+       /* Setup the channel fd */
+       lkc->fd = ret;
+       /* Prevent fd duplication after execlp() */
+       ret = fcntl(lkc->fd, F_SETFD, FD_CLOEXEC);
+       if (ret < 0) {
+               PERROR("fcntl session fd");
+       }
+
+       /* Add channel to session */
+       cds_list_add(&lkc->list, &session->channel_list.head);
+       session->channel_count++;
+       lkc->session = session;
+       lkc->key = ++next_kernel_channel_key;
+
+       DBG("Kernel channel %s created (fd: %d, key: %" PRIu64 ")",
+                       lkc->channel->name, lkc->fd, lkc->key);
+
+       return 0;
+
+error:
+       if (lkc) {
+               free(lkc->channel);
+               free(lkc);
+       }
+       return -1;
+}
+
+/*
+ * Create a kernel event notifier group, register it to the kernel tracer and
+ * add it to the kernel session.
+ */
+static int kernel_create_event_notifier_group(int *event_notifier_group_fd)
+{
+       int ret;
+       int local_fd = -1;
+
+       LTTNG_ASSERT(event_notifier_group_fd);
+
+       /* Kernel event notifier group creation. */
+       ret = kernctl_create_event_notifier_group(kernel_tracer_fd);
+       if (ret < 0) {
+               PERROR("Failed to create kernel event notifier group");
+               ret = -1;
+               goto error;
+       }
+
+       local_fd = ret;
+
+       /* Prevent fd duplication after execlp(). */
+       ret = fcntl(local_fd, F_SETFD, FD_CLOEXEC);
+       if (ret < 0) {
+               PERROR("Failed to set FD_CLOEXEC on kernel event notifier group file descriptor: fd = %d",
+                               local_fd);
+               goto error;
+       }
+
+       DBG("Created kernel event notifier group: fd = %d", local_fd);
+       *event_notifier_group_fd = local_fd;
+       local_fd = -1;
+       ret = 0;
+error:
+       if (local_fd >= 0) {
+               ret = close(local_fd);
+               if (ret) {
+                       PERROR("Failed to close kernel event notifier group file descriptor: fd = %d",
+                                       local_fd);
+               }
+       }
+
+       return ret;
+}
+
+/*
+ * Compute the offset of the instrumentation byte in the binary based on the
+ * function probe location using the ELF lookup method.
+ *
+ * Returns 0 on success and set the offset out parameter to the offset of the
+ * elf symbol
+ * Returns -1 on error
+ */
+static
+int extract_userspace_probe_offset_function_elf(
+               const struct lttng_userspace_probe_location *probe_location,
+               uid_t uid, gid_t gid, uint64_t *offset)
+{
+       int fd;
+       int ret = 0;
+       const char *symbol = NULL;
+       const struct lttng_userspace_probe_location_lookup_method *lookup = NULL;
+       enum lttng_userspace_probe_location_lookup_method_type lookup_method_type;
+
+       LTTNG_ASSERT(lttng_userspace_probe_location_get_type(probe_location) ==
+                       LTTNG_USERSPACE_PROBE_LOCATION_TYPE_FUNCTION);
+
+       lookup = lttng_userspace_probe_location_get_lookup_method(
+                       probe_location);
+       if (!lookup) {
+               ret = -1;
+               goto end;
+       }
+
+       lookup_method_type =
+                       lttng_userspace_probe_location_lookup_method_get_type(lookup);
+
+       LTTNG_ASSERT(lookup_method_type ==
+                       LTTNG_USERSPACE_PROBE_LOCATION_LOOKUP_METHOD_TYPE_FUNCTION_ELF);
+
+       symbol = lttng_userspace_probe_location_function_get_function_name(
+                       probe_location);
+       if (!symbol) {
+               ret = -1;
+               goto end;
+       }
+
+       fd = lttng_userspace_probe_location_function_get_binary_fd(probe_location);
+       if (fd < 0) {
+               ret = -1;
+               goto end;
+       }
+
+       ret = run_as_extract_elf_symbol_offset(fd, symbol, uid, gid, offset);
+       if (ret < 0) {
+               DBG("userspace probe offset calculation failed for "
+                               "function %s", symbol);
+               goto end;
+       }
+
+       DBG("userspace probe elf offset for %s is 0x%jd", symbol, (intmax_t)(*offset));
+end:
+       return ret;
+}
+
+/*
+ * Compute the offsets of the instrumentation bytes in the binary based on the
+ * tracepoint probe location using the SDT lookup method. This function
+ * allocates the offsets buffer, the caller must free it.
+ *
+ * Returns 0 on success and set the offset out parameter to the offsets of the
+ * SDT tracepoint.
+ * Returns -1 on error.
+ */
+static
+int extract_userspace_probe_offset_tracepoint_sdt(
+               const struct lttng_userspace_probe_location *probe_location,
+               uid_t uid, gid_t gid, uint64_t **offsets,
+               uint32_t *offsets_count)
+{
+       enum lttng_userspace_probe_location_lookup_method_type lookup_method_type;
+       const struct lttng_userspace_probe_location_lookup_method *lookup = NULL;
+       const char *probe_name = NULL, *provider_name = NULL;
+       int ret = 0;
+       int fd, i;
+
+       LTTNG_ASSERT(lttng_userspace_probe_location_get_type(probe_location) ==
+                       LTTNG_USERSPACE_PROBE_LOCATION_TYPE_TRACEPOINT);
+
+       lookup = lttng_userspace_probe_location_get_lookup_method(probe_location);
+       if (!lookup) {
+               ret = -1;
+               goto end;
+       }
+
+       lookup_method_type =
+                       lttng_userspace_probe_location_lookup_method_get_type(lookup);
+
+       LTTNG_ASSERT(lookup_method_type ==
+                       LTTNG_USERSPACE_PROBE_LOCATION_LOOKUP_METHOD_TYPE_TRACEPOINT_SDT);
+
+
+       probe_name = lttng_userspace_probe_location_tracepoint_get_probe_name(
+                       probe_location);
+       if (!probe_name) {
+               ret = -1;
+               goto end;
+       }
+
+       provider_name = lttng_userspace_probe_location_tracepoint_get_provider_name(
+                       probe_location);
+       if (!provider_name) {
+               ret = -1;
+               goto end;
+       }
+
+       fd = lttng_userspace_probe_location_tracepoint_get_binary_fd(probe_location);
+       if (fd < 0) {
+               ret = -1;
+               goto end;
+       }
+
+       ret = run_as_extract_sdt_probe_offsets(fd, provider_name, probe_name,
+                       uid, gid, offsets, offsets_count);
+       if (ret < 0) {
+               DBG("userspace probe offset calculation failed for sdt "
+                               "probe %s:%s", provider_name, probe_name);
+               goto end;
+       }
+
+       if (*offsets_count == 0) {
+               DBG("no userspace probe offset found");
+               goto end;
+       }
+
+       DBG("%u userspace probe SDT offsets found for %s:%s at:",
+                       *offsets_count, provider_name, probe_name);
+       for (i = 0; i < *offsets_count; i++) {
+               DBG("\t0x%jd", (intmax_t)((*offsets)[i]));
+       }
+end:
+       return ret;
+}
+
+static
+int userspace_probe_add_callsite(
+               const struct lttng_userspace_probe_location *location,
+               uid_t uid, gid_t gid, int fd)
+{
+       const struct lttng_userspace_probe_location_lookup_method *lookup_method = NULL;
+       enum lttng_userspace_probe_location_lookup_method_type type;
+       int ret;
+
+       lookup_method = lttng_userspace_probe_location_get_lookup_method(location);
+       if (!lookup_method) {
+               ret = -1;
+               goto end;
+       }
+
+       type = lttng_userspace_probe_location_lookup_method_get_type(lookup_method);
+       switch (type) {
+       case LTTNG_USERSPACE_PROBE_LOCATION_LOOKUP_METHOD_TYPE_FUNCTION_ELF:
+       {
+               struct lttng_kernel_abi_event_callsite callsite;
+               uint64_t offset;
+
+               ret = extract_userspace_probe_offset_function_elf(location,
+                               uid, gid, &offset);
+               if (ret) {
+                       ret = LTTNG_ERR_PROBE_LOCATION_INVAL;
+                       goto end;
+               }
+
+               callsite.u.uprobe.offset = offset;
+               ret = kernctl_add_callsite(fd, &callsite);
+               if (ret) {
+                       WARN("Failed to add callsite to ELF userspace probe.");
+                       ret = LTTNG_ERR_KERN_ENABLE_FAIL;
+                       goto end;
+               }
+               break;
+       }
+       case LTTNG_USERSPACE_PROBE_LOCATION_LOOKUP_METHOD_TYPE_TRACEPOINT_SDT:
+       {
+               int i;
+               uint64_t *offsets = NULL;
+               uint32_t offsets_count;
+               struct lttng_kernel_abi_event_callsite callsite;
+
+               /*
+                * This call allocates the offsets buffer. This buffer must be freed
+                * by the caller
+                */
+               ret = extract_userspace_probe_offset_tracepoint_sdt(location,
+                               uid, gid, &offsets, &offsets_count);
+               if (ret) {
+                       ret = LTTNG_ERR_PROBE_LOCATION_INVAL;
+                       goto end;
+               }
+               for (i = 0; i < offsets_count; i++) {
+                       callsite.u.uprobe.offset = offsets[i];
+                       ret = kernctl_add_callsite(fd, &callsite);
+                       if (ret) {
+                               WARN("Failed to add callsite to SDT userspace probe");
+                               ret = LTTNG_ERR_KERN_ENABLE_FAIL;
+                               free(offsets);
+                               goto end;
+                       }
+               }
+               free(offsets);
+               break;
+       }
+       default:
+               ret = LTTNG_ERR_PROBE_LOCATION_INVAL;
+               goto end;
+       }
+end:
+       return ret;
+}
+
+/*
+ * Extract the offsets of the instrumentation point for the different lookup
+ * methods.
+ */
+static
+int userspace_probe_event_add_callsites(struct lttng_event *ev,
+                       struct ltt_kernel_session *session, int fd)
+{
+       int ret;
+       const struct lttng_userspace_probe_location *location = NULL;
+
+       LTTNG_ASSERT(ev);
+       LTTNG_ASSERT(ev->type == LTTNG_EVENT_USERSPACE_PROBE);
+
+       location = lttng_event_get_userspace_probe_location(ev);
+       if (!location) {
+               ret = -1;
+               goto end;
+       }
+
+       ret = userspace_probe_add_callsite(location, session->uid, session->gid,
+               fd);
+       if (ret) {
+               WARN("Failed to add callsite to userspace probe event '%s'",
+                               ev->name);
+       }
+
+end:
+       return ret;
+}
+
+/*
+ * Extract the offsets of the instrumentation point for the different look-up
+ * methods.
+ */
+static int userspace_probe_event_rule_add_callsites(
+               const struct lttng_event_rule *rule,
+               const struct lttng_credentials *creds,
+               int fd)
+{
+       int ret;
+       enum lttng_event_rule_status status;
+       enum lttng_event_rule_type event_rule_type;
+       const struct lttng_userspace_probe_location *location = NULL;
+
+       LTTNG_ASSERT(rule);
+       LTTNG_ASSERT(creds);
+
+       event_rule_type = lttng_event_rule_get_type(rule);
+       LTTNG_ASSERT(event_rule_type == LTTNG_EVENT_RULE_TYPE_KERNEL_UPROBE);
+
+       status = lttng_event_rule_kernel_uprobe_get_location(rule, &location);
+       if (status != LTTNG_EVENT_RULE_STATUS_OK || !location) {
+               ret = -1;
+               goto end;
+       }
+
+       ret = userspace_probe_add_callsite(location,
+                       lttng_credentials_get_uid(creds),
+                       lttng_credentials_get_gid(creds), fd);
+       if (ret) {
+               WARN("Failed to add callsite to user space probe object: fd = %d",
+                               fd);
+       }
+
+end:
+       return ret;
+}
+
+/*
+ * Create a kernel event, enable it to the kernel tracer and add it to the
+ * channel event list of the kernel session.
+ * We own filter_expression and filter.
+ */
+int kernel_create_event(struct lttng_event *ev,
+               struct ltt_kernel_channel *channel,
+               char *filter_expression,
+               struct lttng_bytecode *filter)
+{
+       int err, fd;
+       enum lttng_error_code ret;
+       struct ltt_kernel_event *event;
+
+       LTTNG_ASSERT(ev);
+       LTTNG_ASSERT(channel);
+
+       /* We pass ownership of filter_expression and filter */
+       ret = trace_kernel_create_event(ev, filter_expression,
+                       filter, &event);
+       if (ret != LTTNG_OK) {
+               goto error;
+       }
+
+       fd = kernctl_create_event(channel->fd, event->event);
+       if (fd < 0) {
+               switch (-fd) {
+               case EEXIST:
+                       ret = LTTNG_ERR_KERN_EVENT_EXIST;
+                       break;
+               case ENOSYS:
+                       WARN("Event type not implemented");
+                       ret = LTTNG_ERR_KERN_EVENT_ENOSYS;
+                       break;
+               case ENOENT:
+                       WARN("Event %s not found!", ev->name);
+                       ret = LTTNG_ERR_KERN_ENABLE_FAIL;
+                       break;
+               default:
+                       ret = LTTNG_ERR_KERN_ENABLE_FAIL;
+                       PERROR("create event ioctl");
+               }
+               goto free_event;
+       }
+
+       event->type = ev->type;
+       event->fd = fd;
+       /* Prevent fd duplication after execlp() */
+       err = fcntl(event->fd, F_SETFD, FD_CLOEXEC);
+       if (err < 0) {
+               PERROR("fcntl session fd");
+       }
+
+       if (filter) {
+               err = kernctl_filter(event->fd, filter);
+               if (err < 0) {
+                       switch (-err) {
+                       case ENOMEM:
+                               ret = LTTNG_ERR_FILTER_NOMEM;
+                               break;
+                       default:
+                               ret = LTTNG_ERR_FILTER_INVAL;
+                               break;
+                       }
+                       goto filter_error;
+               }
+       }
+
+       if (ev->type == LTTNG_EVENT_USERSPACE_PROBE) {
+               ret = (lttng_error_code) userspace_probe_event_add_callsites(ev, channel->session,
+                               event->fd);
+               if (ret) {
+                       goto add_callsite_error;
+               }
+       }
+
+       err = kernctl_enable(event->fd);
+       if (err < 0) {
+               switch (-err) {
+               case EEXIST:
+                       ret = LTTNG_ERR_KERN_EVENT_EXIST;
+                       break;
+               default:
+                       PERROR("enable kernel event");
+                       ret = LTTNG_ERR_KERN_ENABLE_FAIL;
+                       break;
+               }
+               goto enable_error;
+       }
+
+       /* Add event to event list */
+       cds_list_add(&event->list, &channel->events_list.head);
+       channel->event_count++;
+
+       DBG("Event %s created (fd: %d)", ev->name, event->fd);
+
+       return 0;
+
+add_callsite_error:
+enable_error:
+filter_error:
+       {
+               int closeret;
+
+               closeret = close(event->fd);
+               if (closeret) {
+                       PERROR("close event fd");
+               }
+       }
+free_event:
+       free(event);
+error:
+       return ret;
+}
+
+/*
+ * Disable a kernel channel.
+ */
+int kernel_disable_channel(struct ltt_kernel_channel *chan)
+{
+       int ret;
+
+       LTTNG_ASSERT(chan);
+
+       ret = kernctl_disable(chan->fd);
+       if (ret < 0) {
+               PERROR("disable chan ioctl");
+               goto error;
+       }
+
+       chan->enabled = 0;
+       DBG("Kernel channel %s disabled (fd: %d, key: %" PRIu64 ")",
+                       chan->channel->name, chan->fd, chan->key);
+
+       return 0;
+
+error:
+       return ret;
+}
+
+/*
+ * Enable a kernel channel.
+ */
+int kernel_enable_channel(struct ltt_kernel_channel *chan)
+{
+       int ret;
+
+       LTTNG_ASSERT(chan);
+
+       ret = kernctl_enable(chan->fd);
+       if (ret < 0 && ret != -EEXIST) {
+               PERROR("Enable kernel chan");
+               goto error;
+       }
+
+       chan->enabled = 1;
+       DBG("Kernel channel %s enabled (fd: %d, key: %" PRIu64 ")",
+                       chan->channel->name, chan->fd, chan->key);
+
+       return 0;
+
+error:
+       return ret;
+}
+
+/*
+ * Enable a kernel event.
+ */
+int kernel_enable_event(struct ltt_kernel_event *event)
+{
+       int ret;
+
+       LTTNG_ASSERT(event);
+
+       ret = kernctl_enable(event->fd);
+       if (ret < 0) {
+               switch (-ret) {
+               case EEXIST:
+                       ret = LTTNG_ERR_KERN_EVENT_EXIST;
+                       break;
+               default:
+                       PERROR("enable kernel event");
+                       break;
+               }
+               goto error;
+       }
+
+       event->enabled = 1;
+       DBG("Kernel event %s enabled (fd: %d)", event->event->name, event->fd);
+
+       return 0;
+
+error:
+       return ret;
+}
+
+/*
+ * Disable a kernel event.
+ */
+int kernel_disable_event(struct ltt_kernel_event *event)
+{
+       int ret;
+
+       LTTNG_ASSERT(event);
+
+       ret = kernctl_disable(event->fd);
+       if (ret < 0) {
+               PERROR("Failed to disable kernel event: name = '%s', fd = %d",
+                               event->event->name, event->fd);
+               goto error;
+       }
+
+       event->enabled = 0;
+       DBG("Kernel event %s disabled (fd: %d)", event->event->name, event->fd);
+
+       return 0;
+
+error:
+       return ret;
+}
+
+/*
+ * Disable a kernel event notifier.
+ */
+static
+int kernel_disable_event_notifier_rule(struct ltt_kernel_event_notifier_rule *event)
+{
+       int ret;
+
+       LTTNG_ASSERT(event);
+
+       rcu_read_lock();
+       cds_lfht_del(kernel_token_to_event_notifier_rule_ht, &event->ht_node);
+       rcu_read_unlock();
+
+       ret = kernctl_disable(event->fd);
+       if (ret < 0) {
+               PERROR("Failed to disable kernel event notifier: fd = %d, token = %" PRIu64,
+                               event->fd, event->token);
+               goto error;
+       }
+
+       event->enabled = 0;
+       DBG("Disabled kernel event notifier: fd = %d, token = %" PRIu64,
+                       event->fd, event->token);
+
+error:
+       return ret;
+}
+
+static
+struct process_attr_tracker *_kernel_get_process_attr_tracker(
+               struct ltt_kernel_session *session,
+               enum lttng_process_attr process_attr)
+{
+       switch (process_attr) {
+       case LTTNG_PROCESS_ATTR_PROCESS_ID:
+               return session->tracker_pid;
+       case LTTNG_PROCESS_ATTR_VIRTUAL_PROCESS_ID:
+               return session->tracker_vpid;
+       case LTTNG_PROCESS_ATTR_USER_ID:
+               return session->tracker_uid;
+       case LTTNG_PROCESS_ATTR_VIRTUAL_USER_ID:
+               return session->tracker_vuid;
+       case LTTNG_PROCESS_ATTR_GROUP_ID:
+               return session->tracker_gid;
+       case LTTNG_PROCESS_ATTR_VIRTUAL_GROUP_ID:
+               return session->tracker_vgid;
+       default:
+               return NULL;
+       }
+}
+
+const struct process_attr_tracker *kernel_get_process_attr_tracker(
+               struct ltt_kernel_session *session,
+               enum lttng_process_attr process_attr)
+{
+       return (const struct process_attr_tracker *)
+                       _kernel_get_process_attr_tracker(session, process_attr);
+}
+
+enum lttng_error_code kernel_process_attr_tracker_set_tracking_policy(
+               struct ltt_kernel_session *session,
+               enum lttng_process_attr process_attr,
+               enum lttng_tracking_policy policy)
+{
+       int ret;
+       enum lttng_error_code ret_code = LTTNG_OK;
+       struct process_attr_tracker *tracker =
+                       _kernel_get_process_attr_tracker(session, process_attr);
+       enum lttng_tracking_policy previous_policy;
+
+       if (!tracker) {
+               ret_code = LTTNG_ERR_INVALID;
+               goto end;
+       }
+
+       previous_policy = process_attr_tracker_get_tracking_policy(tracker);
+       ret = process_attr_tracker_set_tracking_policy(tracker, policy);
+       if (ret) {
+               ret_code = LTTNG_ERR_UNK;
+               goto end;
+       }
+
+       if (previous_policy == policy) {
+               goto end;
+       }
+
+       switch (policy) {
+       case LTTNG_TRACKING_POLICY_INCLUDE_ALL:
+               if (process_attr == LTTNG_PROCESS_ATTR_PROCESS_ID) {
+                       /*
+                        * Maintain a special case for the process ID process
+                        * attribute tracker as it was the only supported
+                        * attribute prior to 2.12.
+                        */
+                       ret = kernctl_track_pid(session->fd, -1);
+               } else {
+                       ret = kernctl_track_id(session->fd, process_attr, -1);
+               }
+               break;
+       case LTTNG_TRACKING_POLICY_EXCLUDE_ALL:
+       case LTTNG_TRACKING_POLICY_INCLUDE_SET:
+               /* fall-through. */
+               if (process_attr == LTTNG_PROCESS_ATTR_PROCESS_ID) {
+                       /*
+                        * Maintain a special case for the process ID process
+                        * attribute tracker as it was the only supported
+                        * attribute prior to 2.12.
+                        */
+                       ret = kernctl_untrack_pid(session->fd, -1);
+               } else {
+                       ret = kernctl_untrack_id(session->fd, process_attr, -1);
+               }
+               break;
+       default:
+               abort();
+       }
+       /* kern-ctl error handling */
+       switch (-ret) {
+       case 0:
+               ret_code = LTTNG_OK;
+               break;
+       case EINVAL:
+               ret_code = LTTNG_ERR_INVALID;
+               break;
+       case ENOMEM:
+               ret_code = LTTNG_ERR_NOMEM;
+               break;
+       case EEXIST:
+               ret_code = LTTNG_ERR_PROCESS_ATTR_EXISTS;
+               break;
+       default:
+               ret_code = LTTNG_ERR_UNK;
+               break;
+       }
+end:
+       return ret_code;
+}
+
+enum lttng_error_code kernel_process_attr_tracker_inclusion_set_add_value(
+               struct ltt_kernel_session *session,
+               enum lttng_process_attr process_attr,
+               const struct process_attr_value *value)
+{
+       int ret, integral_value;
+       enum lttng_error_code ret_code;
+       struct process_attr_tracker *tracker;
+       enum process_attr_tracker_status status;
+
+       /*
+        * Convert process attribute tracker value to the integral
+        * representation required by the kern-ctl API.
+        */
+       switch (process_attr) {
+       case LTTNG_PROCESS_ATTR_PROCESS_ID:
+       case LTTNG_PROCESS_ATTR_VIRTUAL_PROCESS_ID:
+               integral_value = (int) value->value.pid;
+               break;
+       case LTTNG_PROCESS_ATTR_USER_ID:
+       case LTTNG_PROCESS_ATTR_VIRTUAL_USER_ID:
+               if (value->type == LTTNG_PROCESS_ATTR_VALUE_TYPE_USER_NAME) {
+                       uid_t uid;
+
+                       ret_code = utils_user_id_from_name(
+                                       value->value.user_name, &uid);
+                       if (ret_code != LTTNG_OK) {
+                               goto end;
+                       }
+                       integral_value = (int) uid;
+               } else {
+                       integral_value = (int) value->value.uid;
+               }
+               break;
+       case LTTNG_PROCESS_ATTR_GROUP_ID:
+       case LTTNG_PROCESS_ATTR_VIRTUAL_GROUP_ID:
+               if (value->type == LTTNG_PROCESS_ATTR_VALUE_TYPE_GROUP_NAME) {
+                       gid_t gid;
+
+                       ret_code = utils_group_id_from_name(
+                                       value->value.group_name, &gid);
+                       if (ret_code != LTTNG_OK) {
+                               goto end;
+                       }
+                       integral_value = (int) gid;
+               } else {
+                       integral_value = (int) value->value.gid;
+               }
+               break;
+       default:
+               ret_code = LTTNG_ERR_INVALID;
+               goto end;
+       }
+
+       tracker = _kernel_get_process_attr_tracker(session, process_attr);
+       if (!tracker) {
+               ret_code = LTTNG_ERR_INVALID;
+               goto end;
+       }
+
+       status = process_attr_tracker_inclusion_set_add_value(tracker, value);
+       if (status != PROCESS_ATTR_TRACKER_STATUS_OK) {
+               switch (status) {
+               case PROCESS_ATTR_TRACKER_STATUS_EXISTS:
+                       ret_code = LTTNG_ERR_PROCESS_ATTR_EXISTS;
+                       break;
+               case PROCESS_ATTR_TRACKER_STATUS_INVALID_TRACKING_POLICY:
+                       ret_code = LTTNG_ERR_PROCESS_ATTR_TRACKER_INVALID_TRACKING_POLICY;
+                       break;
+               case PROCESS_ATTR_TRACKER_STATUS_ERROR:
+               default:
+                       ret_code = LTTNG_ERR_UNK;
+                       break;
+               }
+               goto end;
+       }
+
+       DBG("Kernel track %s %d for session id %" PRIu64,
+                       lttng_process_attr_to_string(process_attr),
+                       integral_value, session->id);
+       if (process_attr == LTTNG_PROCESS_ATTR_PROCESS_ID) {
+               /*
+                * Maintain a special case for the process ID process attribute
+                * tracker as it was the only supported attribute prior to 2.12.
+                */
+               ret = kernctl_track_pid(session->fd, integral_value);
+       } else {
+               ret = kernctl_track_id(
+                               session->fd, process_attr, integral_value);
+       }
+       if (ret == 0) {
+               ret_code = LTTNG_OK;
+               goto end;
+       }
+
+       kernel_wait_quiescent();
+
+       /* kern-ctl error handling */
+       switch (-ret) {
+       case 0:
+               ret_code = LTTNG_OK;
+               break;
+       case EINVAL:
+               ret_code = LTTNG_ERR_INVALID;
+               break;
+       case ENOMEM:
+               ret_code = LTTNG_ERR_NOMEM;
+               break;
+       case EEXIST:
+               ret_code = LTTNG_ERR_PROCESS_ATTR_EXISTS;
+               break;
+       default:
+               ret_code = LTTNG_ERR_UNK;
+               break;
+       }
+
+       /* Attempt to remove the value from the tracker. */
+       status = process_attr_tracker_inclusion_set_remove_value(
+                       tracker, value);
+       if (status != PROCESS_ATTR_TRACKER_STATUS_OK) {
+               ERR("Failed to roll-back the tracking of kernel %s process attribute %d while handling a kern-ctl error",
+                               lttng_process_attr_to_string(process_attr),
+                               integral_value);
+       }
+end:
+       return ret_code;
+}
+
+enum lttng_error_code kernel_process_attr_tracker_inclusion_set_remove_value(
+               struct ltt_kernel_session *session,
+               enum lttng_process_attr process_attr,
+               const struct process_attr_value *value)
+{
+       int ret, integral_value;
+       enum lttng_error_code ret_code;
+       struct process_attr_tracker *tracker;
+       enum process_attr_tracker_status status;
+
+       /*
+        * Convert process attribute tracker value to the integral
+        * representation required by the kern-ctl API.
+        */
+       switch (process_attr) {
+       case LTTNG_PROCESS_ATTR_PROCESS_ID:
+       case LTTNG_PROCESS_ATTR_VIRTUAL_PROCESS_ID:
+               integral_value = (int) value->value.pid;
+               break;
+       case LTTNG_PROCESS_ATTR_USER_ID:
+       case LTTNG_PROCESS_ATTR_VIRTUAL_USER_ID:
+               if (value->type == LTTNG_PROCESS_ATTR_VALUE_TYPE_USER_NAME) {
+                       uid_t uid;
+
+                       ret_code = utils_user_id_from_name(
+                                       value->value.user_name, &uid);
+                       if (ret_code != LTTNG_OK) {
+                               goto end;
+                       }
+                       integral_value = (int) uid;
+               } else {
+                       integral_value = (int) value->value.uid;
+               }
+               break;
+       case LTTNG_PROCESS_ATTR_GROUP_ID:
+       case LTTNG_PROCESS_ATTR_VIRTUAL_GROUP_ID:
+               if (value->type == LTTNG_PROCESS_ATTR_VALUE_TYPE_GROUP_NAME) {
+                       gid_t gid;
+
+                       ret_code = utils_group_id_from_name(
+                                       value->value.group_name, &gid);
+                       if (ret_code != LTTNG_OK) {
+                               goto end;
+                       }
+                       integral_value = (int) gid;
+               } else {
+                       integral_value = (int) value->value.gid;
+               }
+               break;
+       default:
+               ret_code = LTTNG_ERR_INVALID;
+               goto end;
+       }
+
+       tracker = _kernel_get_process_attr_tracker(session, process_attr);
+       if (!tracker) {
+               ret_code = LTTNG_ERR_INVALID;
+               goto end;
+       }
+
+       status = process_attr_tracker_inclusion_set_remove_value(
+                       tracker, value);
+       if (status != PROCESS_ATTR_TRACKER_STATUS_OK) {
+               switch (status) {
+               case PROCESS_ATTR_TRACKER_STATUS_MISSING:
+                       ret_code = LTTNG_ERR_PROCESS_ATTR_MISSING;
+                       break;
+               case PROCESS_ATTR_TRACKER_STATUS_INVALID_TRACKING_POLICY:
+                       ret_code = LTTNG_ERR_PROCESS_ATTR_TRACKER_INVALID_TRACKING_POLICY;
+                       break;
+               case PROCESS_ATTR_TRACKER_STATUS_ERROR:
+               default:
+                       ret_code = LTTNG_ERR_UNK;
+                       break;
+               }
+               goto end;
+       }
+
+       DBG("Kernel track %s %d for session id %" PRIu64,
+                       lttng_process_attr_to_string(process_attr),
+                       integral_value, session->id);
+       if (process_attr == LTTNG_PROCESS_ATTR_PROCESS_ID) {
+               /*
+                * Maintain a special case for the process ID process attribute
+                * tracker as it was the only supported attribute prior to 2.12.
+                */
+               ret = kernctl_untrack_pid(session->fd, integral_value);
+       } else {
+               ret = kernctl_untrack_id(
+                               session->fd, process_attr, integral_value);
+       }
+       if (ret == 0) {
+               ret_code = LTTNG_OK;
+               goto end;
+       }
+       kernel_wait_quiescent();
+
+       /* kern-ctl error handling */
+       switch (-ret) {
+       case 0:
+               ret_code = LTTNG_OK;
+               break;
+       case EINVAL:
+               ret_code = LTTNG_ERR_INVALID;
+               break;
+       case ENOMEM:
+               ret_code = LTTNG_ERR_NOMEM;
+               break;
+       case ENOENT:
+               ret_code = LTTNG_ERR_PROCESS_ATTR_MISSING;
+               break;
+       default:
+               ret_code = LTTNG_ERR_UNK;
+               break;
+       }
+
+       /* Attempt to add the value to the tracker. */
+       status = process_attr_tracker_inclusion_set_add_value(
+                       tracker, value);
+       if (status != PROCESS_ATTR_TRACKER_STATUS_OK) {
+               ERR("Failed to roll-back the tracking of kernel %s process attribute %d while handling a kern-ctl error",
+                               lttng_process_attr_to_string(process_attr),
+                               integral_value);
+       }
+end:
+       return ret_code;
+}
+
+/*
+ * Create kernel metadata, open from the kernel tracer and add it to the
+ * kernel session.
+ */
+int kernel_open_metadata(struct ltt_kernel_session *session)
+{
+       int ret;
+       struct ltt_kernel_metadata *lkm = NULL;
+
+       LTTNG_ASSERT(session);
+
+       /* Allocate kernel metadata */
+       lkm = trace_kernel_create_metadata();
+       if (lkm == NULL) {
+               goto error;
+       }
+
+       /* Kernel tracer metadata creation */
+       ret = kernctl_open_metadata(session->fd, &lkm->conf->attr);
+       if (ret < 0) {
+               goto error_open;
+       }
+
+       lkm->fd = ret;
+       lkm->key = ++next_kernel_channel_key;
+       /* Prevent fd duplication after execlp() */
+       ret = fcntl(lkm->fd, F_SETFD, FD_CLOEXEC);
+       if (ret < 0) {
+               PERROR("fcntl session fd");
+       }
+
+       session->metadata = lkm;
+
+       DBG("Kernel metadata opened (fd: %d)", lkm->fd);
+
+       return 0;
+
+error_open:
+       trace_kernel_destroy_metadata(lkm);
+error:
+       return -1;
+}
+
+/*
+ * Start tracing session.
+ */
+int kernel_start_session(struct ltt_kernel_session *session)
+{
+       int ret;
+
+       LTTNG_ASSERT(session);
+
+       ret = kernctl_start_session(session->fd);
+       if (ret < 0) {
+               PERROR("ioctl start session");
+               goto error;
+       }
+
+       DBG("Kernel session started");
+
+       return 0;
+
+error:
+       return ret;
+}
+
+/*
+ * Make a kernel wait to make sure in-flight probe have completed.
+ */
+void kernel_wait_quiescent(void)
+{
+       int ret;
+       int fd = kernel_tracer_fd;
+
+       DBG("Kernel quiescent wait on %d", fd);
+
+       ret = kernctl_wait_quiescent(fd);
+       if (ret < 0) {
+               PERROR("wait quiescent ioctl");
+               ERR("Kernel quiescent wait failed");
+       }
+}
+
+/*
+ *  Force flush buffer of metadata.
+ */
+int kernel_metadata_flush_buffer(int fd)
+{
+       int ret;
+
+       DBG("Kernel flushing metadata buffer on fd %d", fd);
+
+       ret = kernctl_buffer_flush(fd);
+       if (ret < 0) {
+               ERR("Fail to flush metadata buffers %d (ret: %d)", fd, ret);
+       }
+
+       return 0;
+}
+
+/*
+ * Force flush buffer for channel.
+ */
+int kernel_flush_buffer(struct ltt_kernel_channel *channel)
+{
+       int ret;
+       struct ltt_kernel_stream *stream;
+
+       LTTNG_ASSERT(channel);
+
+       DBG("Flush buffer for channel %s", channel->channel->name);
+
+       cds_list_for_each_entry(stream, &channel->stream_list.head, list) {
+               DBG("Flushing channel stream %d", stream->fd);
+               ret = kernctl_buffer_flush(stream->fd);
+               if (ret < 0) {
+                       PERROR("ioctl");
+                       ERR("Fail to flush buffer for stream %d (ret: %d)",
+                                       stream->fd, ret);
+               }
+       }
+
+       return 0;
+}
+
+/*
+ * Stop tracing session.
+ */
+int kernel_stop_session(struct ltt_kernel_session *session)
+{
+       int ret;
+
+       LTTNG_ASSERT(session);
+
+       ret = kernctl_stop_session(session->fd);
+       if (ret < 0) {
+               goto error;
+       }
+
+       DBG("Kernel session stopped");
+
+       return 0;
+
+error:
+       return ret;
+}
+
+/*
+ * Open stream of channel, register it to the kernel tracer and add it
+ * to the stream list of the channel.
+ *
+ * Note: given that the streams may appear in random order wrt CPU
+ * number (e.g. cpu hotplug), the index value of the stream number in
+ * the stream name is not necessarily linked to the CPU number.
+ *
+ * Return the number of created stream. Else, a negative value.
+ */
+int kernel_open_channel_stream(struct ltt_kernel_channel *channel)
+{
+       int ret;
+       struct ltt_kernel_stream *lks;
+
+       LTTNG_ASSERT(channel);
+
+       while ((ret = kernctl_create_stream(channel->fd)) >= 0) {
+               lks = trace_kernel_create_stream(channel->channel->name,
+                               channel->stream_count);
+               if (lks == NULL) {
+                       ret = close(ret);
+                       if (ret) {
+                               PERROR("close");
+                       }
+                       goto error;
+               }
+
+               lks->fd = ret;
+               /* Prevent fd duplication after execlp() */
+               ret = fcntl(lks->fd, F_SETFD, FD_CLOEXEC);
+               if (ret < 0) {
+                       PERROR("fcntl session fd");
+               }
+
+               lks->tracefile_size = channel->channel->attr.tracefile_size;
+               lks->tracefile_count = channel->channel->attr.tracefile_count;
+
+               /* Add stream to channel stream list */
+               cds_list_add(&lks->list, &channel->stream_list.head);
+               channel->stream_count++;
+
+               DBG("Kernel stream %s created (fd: %d, state: %d)", lks->name, lks->fd,
+                               lks->state);
+       }
+
+       return channel->stream_count;
+
+error:
+       return -1;
+}
+
+/*
+ * Open the metadata stream and set it to the kernel session.
+ */
+int kernel_open_metadata_stream(struct ltt_kernel_session *session)
+{
+       int ret;
+
+       LTTNG_ASSERT(session);
+
+       ret = kernctl_create_stream(session->metadata->fd);
+       if (ret < 0) {
+               PERROR("kernel create metadata stream");
+               goto error;
+       }
+
+       DBG("Kernel metadata stream created (fd: %d)", ret);
+       session->metadata_stream_fd = ret;
+       /* Prevent fd duplication after execlp() */
+       ret = fcntl(session->metadata_stream_fd, F_SETFD, FD_CLOEXEC);
+       if (ret < 0) {
+               PERROR("fcntl session fd");
+       }
+
+       return 0;
+
+error:
+       return -1;
+}
+
+/*
+ * Get the event list from the kernel tracer and return the number of elements.
+ */
+ssize_t kernel_list_events(struct lttng_event **events)
+{
+       int fd, ret;
+       char *event;
+       size_t nbmem, count = 0;
+       FILE *fp;
+       struct lttng_event *elist;
+
+       LTTNG_ASSERT(events);
+
+       fd = kernctl_tracepoint_list(kernel_tracer_fd);
+       if (fd < 0) {
+               PERROR("kernel tracepoint list");
+               goto error;
+       }
+
+       fp = fdopen(fd, "r");
+       if (fp == NULL) {
+               PERROR("kernel tracepoint list fdopen");
+               goto error_fp;
+       }
+
+       /*
+        * Init memory size counter
+        * See kernel-ctl.h for explanation of this value
+        */
+       nbmem = KERNEL_EVENT_INIT_LIST_SIZE;
+       elist = (lttng_event *) zmalloc(sizeof(struct lttng_event) * nbmem);
+       if (elist == NULL) {
+               PERROR("alloc list events");
+               count = -ENOMEM;
+               goto end;
+       }
+
+       while (fscanf(fp, "event { name = %m[^;]; };\n", &event) == 1) {
+               if (count >= nbmem) {
+                       struct lttng_event *new_elist;
+                       size_t new_nbmem;
+
+                       new_nbmem = nbmem << 1;
+                       DBG("Reallocating event list from %zu to %zu bytes",
+                                       nbmem, new_nbmem);
+                       new_elist = (lttng_event *) realloc(elist, new_nbmem * sizeof(struct lttng_event));
+                       if (new_elist == NULL) {
+                               PERROR("realloc list events");
+                               free(event);
+                               free(elist);
+                               count = -ENOMEM;
+                               goto end;
+                       }
+                       /* Zero the new memory */
+                       memset(new_elist + nbmem, 0,
+                               (new_nbmem - nbmem) * sizeof(struct lttng_event));
+                       nbmem = new_nbmem;
+                       elist = new_elist;
+               }
+               strncpy(elist[count].name, event, LTTNG_SYMBOL_NAME_LEN);
+               elist[count].name[LTTNG_SYMBOL_NAME_LEN - 1] = '\0';
+               elist[count].enabled = -1;
+               count++;
+               free(event);
+       }
+
+       *events = elist;
+       DBG("Kernel list events done (%zu events)", count);
+end:
+       ret = fclose(fp);       /* closes both fp and fd */
+       if (ret) {
+               PERROR("fclose");
+       }
+       return count;
+
+error_fp:
+       ret = close(fd);
+       if (ret) {
+               PERROR("close");
+       }
+error:
+       return -1;
+}
+
+/*
+ * Get kernel version and validate it.
+ */
+int kernel_validate_version(struct lttng_kernel_abi_tracer_version *version,
+               struct lttng_kernel_abi_tracer_abi_version *abi_version)
+{
+       int ret;
+
+       ret = kernctl_tracer_version(kernel_tracer_fd, version);
+       if (ret < 0) {
+               ERR("Failed to retrieve the lttng-modules version");
+               goto error;
+       }
+
+       /* Validate version */
+       if (version->major != VERSION_MAJOR) {
+               ERR("Kernel tracer major version (%d) is not compatible with lttng-tools major version (%d)",
+                       version->major, VERSION_MAJOR);
+               goto error_version;
+       }
+       ret = kernctl_tracer_abi_version(kernel_tracer_fd, abi_version);
+       if (ret < 0) {
+               ERR("Failed to retrieve lttng-modules ABI version");
+               goto error;
+       }
+       if (abi_version->major != LTTNG_KERNEL_ABI_MAJOR_VERSION) {
+               ERR("Kernel tracer ABI version (%d.%d) does not match the expected ABI major version (%d.*)",
+                       abi_version->major, abi_version->minor,
+                       LTTNG_KERNEL_ABI_MAJOR_VERSION);
+               goto error;
+       }
+       DBG2("Kernel tracer version validated (%d.%d, ABI %d.%d)",
+                       version->major, version->minor,
+                       abi_version->major, abi_version->minor);
+       return 0;
+
+error_version:
+       ret = -1;
+
+error:
+       ERR("Kernel tracer version check failed; kernel tracing will not be available");
+       return ret;
+}
+
+/*
+ * Kernel work-arounds called at the start of sessiond main().
+ */
+int init_kernel_workarounds(void)
+{
+       int ret;
+       FILE *fp;
+
+       /*
+        * boot_id needs to be read once before being used concurrently
+        * to deal with a Linux kernel race. A fix is proposed for
+        * upstream, but the work-around is needed for older kernels.
+        */
+       fp = fopen("/proc/sys/kernel/random/boot_id", "r");
+       if (!fp) {
+               goto end_boot_id;
+       }
+       while (!feof(fp)) {
+               char buf[37] = "";
+
+               ret = fread(buf, 1, sizeof(buf), fp);
+               if (ret < 0) {
+                       /* Ignore error, we don't really care */
+               }
+       }
+       ret = fclose(fp);
+       if (ret) {
+               PERROR("fclose");
+       }
+end_boot_id:
+       return 0;
+}
+
+/*
+ * Teardown of a kernel session, keeping data required by destroy notifiers.
+ */
+void kernel_destroy_session(struct ltt_kernel_session *ksess)
+{
+       struct lttng_trace_chunk *trace_chunk;
+
+       if (ksess == NULL) {
+               DBG3("No kernel session when tearing down session");
+               return;
+       }
+
+       DBG("Tearing down kernel session");
+       trace_chunk = ksess->current_trace_chunk;
+
+       /*
+        * Destroy channels on the consumer if at least one FD has been sent and we
+        * are in no output mode because the streams are in *no* monitor mode so we
+        * have to send a command to clean them up or else they leaked.
+        */
+       if (!ksess->output_traces && ksess->consumer_fds_sent) {
+               int ret;
+               struct consumer_socket *socket;
+               struct lttng_ht_iter iter;
+
+               /* For each consumer socket. */
+               rcu_read_lock();
+               cds_lfht_for_each_entry(ksess->consumer->socks->ht, &iter.iter,
+                               socket, node.node) {
+                       struct ltt_kernel_channel *chan;
+
+                       /* For each channel, ask the consumer to destroy it. */
+                       cds_list_for_each_entry(chan, &ksess->channel_list.head, list) {
+                               ret = kernel_consumer_destroy_channel(socket, chan);
+                               if (ret < 0) {
+                                       /* Consumer is probably dead. Use next socket. */
+                                       continue;
+                               }
+                       }
+               }
+               rcu_read_unlock();
+       }
+
+       /* Close any relayd session */
+       consumer_output_send_destroy_relayd(ksess->consumer);
+
+       trace_kernel_destroy_session(ksess);
+       lttng_trace_chunk_put(trace_chunk);
+}
+
+/* Teardown of data required by destroy notifiers. */
+void kernel_free_session(struct ltt_kernel_session *ksess)
+{
+       if (ksess == NULL) {
+               return;
+       }
+       trace_kernel_free_session(ksess);
+}
+
+/*
+ * Destroy a kernel channel object. It does not do anything on the tracer side.
+ */
+void kernel_destroy_channel(struct ltt_kernel_channel *kchan)
+{
+       struct ltt_kernel_session *ksess = NULL;
+
+       LTTNG_ASSERT(kchan);
+       LTTNG_ASSERT(kchan->channel);
+
+       DBG3("Kernel destroy channel %s", kchan->channel->name);
+
+       /* Update channel count of associated session. */
+       if (kchan->session) {
+               /* Keep pointer reference so we can update it after the destroy. */
+               ksess = kchan->session;
+       }
+
+       trace_kernel_destroy_channel(kchan);
+
+       /*
+        * At this point the kernel channel is not visible anymore. This is safe
+        * since in order to work on a visible kernel session, the tracing session
+        * lock (ltt_session.lock) MUST be acquired.
+        */
+       if (ksess) {
+               ksess->channel_count--;
+       }
+}
+
+/*
+ * Take a snapshot for a given kernel session.
+ *
+ * Return LTTNG_OK on success or else return a LTTNG_ERR code.
+ */
+enum lttng_error_code kernel_snapshot_record(
+               struct ltt_kernel_session *ksess,
+               const struct consumer_output *output, int wait,
+               uint64_t nb_packets_per_stream)
+{
+       int err, ret, saved_metadata_fd;
+       enum lttng_error_code status = LTTNG_OK;
+       struct consumer_socket *socket;
+       struct lttng_ht_iter iter;
+       struct ltt_kernel_metadata *saved_metadata;
+       char *trace_path = NULL;
+       size_t consumer_path_offset = 0;
+
+       LTTNG_ASSERT(ksess);
+       LTTNG_ASSERT(ksess->consumer);
+       LTTNG_ASSERT(output);
+
+       DBG("Kernel snapshot record started");
+
+       /* Save current metadata since the following calls will change it. */
+       saved_metadata = ksess->metadata;
+       saved_metadata_fd = ksess->metadata_stream_fd;
+
+       rcu_read_lock();
+
+       ret = kernel_open_metadata(ksess);
+       if (ret < 0) {
+               status = LTTNG_ERR_KERN_META_FAIL;
+               goto error;
+       }
+
+       ret = kernel_open_metadata_stream(ksess);
+       if (ret < 0) {
+               status = LTTNG_ERR_KERN_META_FAIL;
+               goto error_open_stream;
+       }
+
+       trace_path = setup_channel_trace_path(ksess->consumer,
+                       "", &consumer_path_offset);
+       if (!trace_path) {
+               status = LTTNG_ERR_INVALID;
+               goto error;
+       }
+       /* Send metadata to consumer and snapshot everything. */
+       cds_lfht_for_each_entry(output->socks->ht, &iter.iter,
+                       socket, node.node) {
+               struct ltt_kernel_channel *chan;
+
+               pthread_mutex_lock(socket->lock);
+               /* This stream must not be monitored by the consumer. */
+               ret = kernel_consumer_add_metadata(socket, ksess, 0);
+               pthread_mutex_unlock(socket->lock);
+               if (ret < 0) {
+                       status = LTTNG_ERR_KERN_META_FAIL;
+                       goto error_consumer;
+               }
+
+               /* For each channel, ask the consumer to snapshot it. */
+               cds_list_for_each_entry(chan, &ksess->channel_list.head, list) {
+                       status = consumer_snapshot_channel(socket, chan->key, output, 0,
+                                       ksess->uid, ksess->gid,
+                                       &trace_path[consumer_path_offset], wait,
+                                       nb_packets_per_stream);
+                       if (status != LTTNG_OK) {
+                               (void) kernel_consumer_destroy_metadata(socket,
+                                               ksess->metadata);
+                               goto error_consumer;
+                       }
+               }
+
+               /* Snapshot metadata, */
+               status = consumer_snapshot_channel(socket, ksess->metadata->key, output,
+                               1, ksess->uid, ksess->gid, &trace_path[consumer_path_offset],
+                               wait, 0);
+               if (status != LTTNG_OK) {
+                       goto error_consumer;
+               }
+
+               /*
+                * The metadata snapshot is done, ask the consumer to destroy it since
+                * it's not monitored on the consumer side.
+                */
+               (void) kernel_consumer_destroy_metadata(socket, ksess->metadata);
+       }
+
+error_consumer:
+       /* Close newly opened metadata stream. It's now on the consumer side. */
+       err = close(ksess->metadata_stream_fd);
+       if (err < 0) {
+               PERROR("close snapshot kernel");
+       }
+
+error_open_stream:
+       trace_kernel_destroy_metadata(ksess->metadata);
+error:
+       /* Restore metadata state.*/
+       ksess->metadata = saved_metadata;
+       ksess->metadata_stream_fd = saved_metadata_fd;
+       rcu_read_unlock();
+       free(trace_path);
+       return status;
+}
+
+/*
+ * Get the syscall mask array from the kernel tracer.
+ *
+ * Return 0 on success else a negative value. In both case, syscall_mask should
+ * be freed.
+ */
+int kernel_syscall_mask(int chan_fd, char **syscall_mask, uint32_t *nr_bits)
+{
+       LTTNG_ASSERT(syscall_mask);
+       LTTNG_ASSERT(nr_bits);
+
+       return kernctl_syscall_mask(chan_fd, syscall_mask, nr_bits);
+}
+
+static
+int kernel_tracer_abi_greater_or_equal(unsigned int major, unsigned int minor)
+{
+       int ret;
+       struct lttng_kernel_abi_tracer_abi_version abi;
+
+       ret = kernctl_tracer_abi_version(kernel_tracer_fd, &abi);
+       if (ret < 0) {
+               ERR("Failed to retrieve lttng-modules ABI version");
+               goto error;
+       }
+
+       ret = abi.major > major || (abi.major == major && abi.minor >= minor);
+error:
+       return ret;
+}
+
+/*
+ * Check for the support of the RING_BUFFER_SNAPSHOT_SAMPLE_POSITIONS via abi
+ * version number.
+ *
+ * Return 1 on success, 0 when feature is not supported, negative value in case
+ * of errors.
+ */
+int kernel_supports_ring_buffer_snapshot_sample_positions(void)
+{
+       /*
+        * RING_BUFFER_SNAPSHOT_SAMPLE_POSITIONS was introduced in 2.3
+        */
+       return kernel_tracer_abi_greater_or_equal(2, 3);
+}
+
+/*
+ * Check for the support of the packet sequence number via abi version number.
+ *
+ * Return 1 on success, 0 when feature is not supported, negative value in case
+ * of errors.
+ */
+int kernel_supports_ring_buffer_packet_sequence_number(void)
+{
+       /*
+        * Packet sequence number was introduced in LTTng 2.8,
+        * lttng-modules ABI 2.1.
+        */
+       return kernel_tracer_abi_greater_or_equal(2, 1);
+}
+
+/*
+ * Check for the support of event notifiers via abi version number.
+ *
+ * Return 1 on success, 0 when feature is not supported, negative value in case
+ * of errors.
+ */
+int kernel_supports_event_notifiers(void)
+{
+       /*
+        * Event notifiers were introduced in LTTng 2.13, lttng-modules ABI 2.6.
+        */
+       return kernel_tracer_abi_greater_or_equal(2, 6);
+}
+
+/*
+ * Rotate a kernel session.
+ *
+ * Return LTTNG_OK on success or else an LTTng error code.
+ */
+enum lttng_error_code kernel_rotate_session(struct ltt_session *session)
+{
+       int ret;
+       enum lttng_error_code status = LTTNG_OK;
+       struct consumer_socket *socket;
+       struct lttng_ht_iter iter;
+       struct ltt_kernel_session *ksess = session->kernel_session;
+
+       LTTNG_ASSERT(ksess);
+       LTTNG_ASSERT(ksess->consumer);
+
+       DBG("Rotate kernel session %s started (session %" PRIu64 ")",
+                       session->name, session->id);
+
+       rcu_read_lock();
+
+       /*
+        * Note that this loop will end after one iteration given that there is
+        * only one kernel consumer.
+        */
+       cds_lfht_for_each_entry(ksess->consumer->socks->ht, &iter.iter,
+                       socket, node.node) {
+               struct ltt_kernel_channel *chan;
+
+               /* For each channel, ask the consumer to rotate it. */
+               cds_list_for_each_entry(chan, &ksess->channel_list.head, list) {
+                       DBG("Rotate kernel channel %" PRIu64 ", session %s",
+                                       chan->key, session->name);
+                       ret = consumer_rotate_channel(socket, chan->key,
+                                       ksess->uid, ksess->gid, ksess->consumer,
+                                       /* is_metadata_channel */ false);
+                       if (ret < 0) {
+                               status = LTTNG_ERR_ROTATION_FAIL_CONSUMER;
+                               goto error;
+                       }
+               }
+
+               /*
+                * Rotate the metadata channel.
+                */
+               ret = consumer_rotate_channel(socket, ksess->metadata->key,
+                               ksess->uid, ksess->gid, ksess->consumer,
+                               /* is_metadata_channel */ true);
+               if (ret < 0) {
+                       status = LTTNG_ERR_ROTATION_FAIL_CONSUMER;
+                       goto error;
+               }
+       }
+
+error:
+       rcu_read_unlock();
+       return status;
+}
+
+enum lttng_error_code kernel_create_channel_subdirectories(
+               const struct ltt_kernel_session *ksess)
+{
+       enum lttng_error_code ret = LTTNG_OK;
+       enum lttng_trace_chunk_status chunk_status;
+
+       rcu_read_lock();
+       LTTNG_ASSERT(ksess->current_trace_chunk);
+
+       /*
+        * Create the index subdirectory which will take care
+        * of implicitly creating the channel's path.
+        */
+       chunk_status = lttng_trace_chunk_create_subdirectory(
+                       ksess->current_trace_chunk,
+                       DEFAULT_KERNEL_TRACE_DIR "/" DEFAULT_INDEX_DIR);
+       if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) {
+               ret = LTTNG_ERR_CREATE_DIR_FAIL;
+               goto error;
+       }
+error:
+       rcu_read_unlock();
+       return ret;
+}
+
+/*
+ * Setup necessary data for kernel tracer action.
+ */
+int init_kernel_tracer(void)
+{
+       int ret;
+       bool is_root = !getuid();
+
+       /* Modprobe lttng kernel modules */
+       ret = modprobe_lttng_control();
+       if (ret < 0) {
+               goto error;
+       }
+
+       /* Open debugfs lttng */
+       kernel_tracer_fd = open(module_proc_lttng, O_RDWR);
+       if (kernel_tracer_fd < 0) {
+               DBG("Failed to open %s", module_proc_lttng);
+               goto error_open;
+       }
+
+       /* Validate kernel version */
+       ret = kernel_validate_version(&the_kernel_tracer_version,
+                       &the_kernel_tracer_abi_version);
+       if (ret < 0) {
+               goto error_version;
+       }
+
+       ret = modprobe_lttng_data();
+       if (ret < 0) {
+               goto error_modules;
+       }
+
+       ret = kernel_supports_ring_buffer_snapshot_sample_positions();
+       if (ret < 0) {
+               goto error_modules;
+       }
+       if (ret < 1) {
+               WARN("Kernel tracer does not support buffer monitoring. "
+                       "The monitoring timer of channels in the kernel domain "
+                       "will be set to 0 (disabled).");
+       }
+
+       ret = kernel_supports_event_notifiers();
+       if (ret < 0) {
+               ERR("Failed to check for kernel tracer event notifier support");
+               goto error_modules;
+       }
+       ret = kernel_create_event_notifier_group(&kernel_tracer_event_notifier_group_fd);
+       if (ret < 0) {
+               /* This is not fatal. */
+               WARN("Failed to create kernel event notifier group");
+               kernel_tracer_event_notifier_group_fd = -1;
+       } else {
+               enum event_notifier_error_accounting_status error_accounting_status;
+               enum lttng_error_code error_code_ret =
+                               kernel_create_event_notifier_group_notification_fd(
+                                               &kernel_tracer_event_notifier_group_notification_fd);
+
+               if (error_code_ret != LTTNG_OK) {
+                       goto error_modules;
+               }
+
+               error_accounting_status = event_notifier_error_accounting_register_kernel(
+                               kernel_tracer_event_notifier_group_fd);
+               if (error_accounting_status != EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_OK) {
+                       ERR("Failed to initialize event notifier error accounting for kernel tracer");
+                       error_code_ret = LTTNG_ERR_EVENT_NOTIFIER_ERROR_ACCOUNTING;
+                       goto error_modules;
+               }
+
+               kernel_token_to_event_notifier_rule_ht = cds_lfht_new(
+                               DEFAULT_HT_SIZE, 1, 0,
+                               CDS_LFHT_AUTO_RESIZE | CDS_LFHT_ACCOUNTING,
+                               NULL);
+               if (!kernel_token_to_event_notifier_rule_ht) {
+                       goto error_token_ht;
+               }
+       }
+
+       DBG("Kernel tracer initialized: kernel tracer fd = %d, event notifier group fd = %d, event notifier group notification fd = %d",
+                       kernel_tracer_fd, kernel_tracer_event_notifier_group_fd,
+                       kernel_tracer_event_notifier_group_notification_fd);
+
+       ret = syscall_init_table(kernel_tracer_fd);
+       if (ret < 0) {
+               ERR("Unable to populate syscall table. Syscall tracing won't "
+                       "work for this session daemon.");
+       }
+
+       return 0;
+
+error_version:
+       modprobe_remove_lttng_control();
+       ret = close(kernel_tracer_fd);
+       if (ret) {
+               PERROR("Failed to close kernel tracer file descriptor: fd = %d",
+                               kernel_tracer_fd);
+       }
+
+       kernel_tracer_fd = -1;
+       return LTTNG_ERR_KERN_VERSION;
+
+
+error_token_ht:
+       ret = close(kernel_tracer_event_notifier_group_notification_fd);
+       if (ret) {
+               PERROR("Failed to close kernel tracer event notifier group notification file descriptor: fd = %d",
+                               kernel_tracer_event_notifier_group_notification_fd);
+       }
+
+       kernel_tracer_event_notifier_group_notification_fd = -1;
+
+error_modules:
+       ret = close(kernel_tracer_event_notifier_group_fd);
+       if (ret) {
+               PERROR("Failed to close kernel tracer event notifier group file descriptor: fd = %d",
+                               kernel_tracer_event_notifier_group_fd);
+       }
+
+       kernel_tracer_event_notifier_group_fd = -1;
+
+       ret = close(kernel_tracer_fd);
+       if (ret) {
+               PERROR("Failed to close kernel tracer file descriptor: fd = %d",
+                               kernel_tracer_fd);
+       }
+
+       kernel_tracer_fd = -1;
+
+error_open:
+       modprobe_remove_lttng_control();
+
+error:
+       WARN("No kernel tracer available");
+       kernel_tracer_fd = -1;
+       if (!is_root) {
+               return LTTNG_ERR_NEED_ROOT_SESSIOND;
+       } else {
+               return LTTNG_ERR_KERN_NA;
+       }
+}
+
+void cleanup_kernel_tracer(void)
+{
+       DBG2("Closing kernel event notifier group notification file descriptor");
+       if (kernel_tracer_event_notifier_group_notification_fd >= 0) {
+               int ret = notification_thread_command_remove_tracer_event_source(
+                               the_notification_thread_handle,
+                               kernel_tracer_event_notifier_group_notification_fd);
+               if (ret != LTTNG_OK) {
+                       ERR("Failed to remove kernel event notifier notification from notification thread");
+               }
+
+               ret = close(kernel_tracer_event_notifier_group_notification_fd);
+               if (ret) {
+                       PERROR("Failed to close kernel event notifier group notification file descriptor: fd = %d",
+                                       kernel_tracer_event_notifier_group_notification_fd);
+               }
+
+               kernel_tracer_event_notifier_group_notification_fd = -1;
+       }
+
+       if (kernel_token_to_event_notifier_rule_ht) {
+               const int ret = cds_lfht_destroy(
+                               kernel_token_to_event_notifier_rule_ht, NULL);
+               LTTNG_ASSERT(ret == 0);
+       }
+
+       DBG2("Closing kernel event notifier group file descriptor");
+       if (kernel_tracer_event_notifier_group_fd >= 0) {
+               const int ret = close(kernel_tracer_event_notifier_group_fd);
+
+               if (ret) {
+                       PERROR("Failed to close kernel event notifier group file descriptor: fd = %d",
+                                       kernel_tracer_event_notifier_group_fd);
+               }
+
+               kernel_tracer_event_notifier_group_fd = -1;
+       }
+
+       DBG2("Closing kernel fd");
+       if (kernel_tracer_fd >= 0) {
+               const int ret = close(kernel_tracer_fd);
+
+               if (ret) {
+                       PERROR("Failed to close kernel tracer file descriptor: fd = %d",
+                                       kernel_tracer_fd);
+               }
+
+               kernel_tracer_fd = -1;
+       }
+
+       free(syscall_table);
+}
+
+bool kernel_tracer_is_initialized(void)
+{
+       return kernel_tracer_fd >= 0;
+}
+
+/*
+ *  Clear a kernel session.
+ *
+ * Return LTTNG_OK on success or else an LTTng error code.
+ */
+enum lttng_error_code kernel_clear_session(struct ltt_session *session)
+{
+       int ret;
+       enum lttng_error_code status = LTTNG_OK;
+       struct consumer_socket *socket;
+       struct lttng_ht_iter iter;
+       struct ltt_kernel_session *ksess = session->kernel_session;
+
+       LTTNG_ASSERT(ksess);
+       LTTNG_ASSERT(ksess->consumer);
+
+       DBG("Clear kernel session %s (session %" PRIu64 ")",
+                       session->name, session->id);
+
+       rcu_read_lock();
+
+       if (ksess->active) {
+               ERR("Expecting inactive session %s (%" PRIu64 ")", session->name, session->id);
+               status = LTTNG_ERR_FATAL;
+               goto end;
+       }
+
+       /*
+        * Note that this loop will end after one iteration given that there is
+        * only one kernel consumer.
+        */
+       cds_lfht_for_each_entry(ksess->consumer->socks->ht, &iter.iter,
+                       socket, node.node) {
+               struct ltt_kernel_channel *chan;
+
+               /* For each channel, ask the consumer to clear it. */
+               cds_list_for_each_entry(chan, &ksess->channel_list.head, list) {
+                       DBG("Clear kernel channel %" PRIu64 ", session %s",
+                                       chan->key, session->name);
+                       ret = consumer_clear_channel(socket, chan->key);
+                       if (ret < 0) {
+                               goto error;
+                       }
+               }
+
+               if (!ksess->metadata) {
+                       /*
+                        * Nothing to do for the metadata.
+                        * This is a snapshot session.
+                        * The metadata is genererated on the fly.
+                        */
+                       continue;
+               }
+
+               /*
+                * Clear the metadata channel.
+                * Metadata channel is not cleared per se but we still need to
+                * perform a rotation operation on it behind the scene.
+                */
+               ret = consumer_clear_channel(socket, ksess->metadata->key);
+               if (ret < 0) {
+                       goto error;
+               }
+       }
+
+       goto end;
+error:
+       switch (-ret) {
+       case LTTCOMM_CONSUMERD_RELAYD_CLEAR_DISALLOWED:
+             status = LTTNG_ERR_CLEAR_RELAY_DISALLOWED;
+             break;
+       default:
+             status = LTTNG_ERR_CLEAR_FAIL_CONSUMER;
+             break;
+       }
+end:
+       rcu_read_unlock();
+       return status;
+}
+
+enum lttng_error_code kernel_create_event_notifier_group_notification_fd(
+               int *event_notifier_group_notification_fd)
+{
+       int local_fd = -1, ret;
+       enum lttng_error_code error_code_ret;
+
+       LTTNG_ASSERT(event_notifier_group_notification_fd);
+
+       ret = kernctl_create_event_notifier_group_notification_fd(
+                       kernel_tracer_event_notifier_group_fd);
+       if (ret < 0) {
+               PERROR("Failed to create kernel event notifier group notification file descriptor");
+               error_code_ret = LTTNG_ERR_EVENT_NOTIFIER_GROUP_NOTIFICATION_FD;
+               goto error;
+       }
+
+       local_fd = ret;
+
+       /* Prevent fd duplication after execlp(). */
+       ret = fcntl(local_fd, F_SETFD, FD_CLOEXEC);
+       if (ret < 0) {
+               PERROR("Failed to set FD_CLOEXEC on kernel event notifier group notification file descriptor: fd = %d",
+                               local_fd);
+               error_code_ret = LTTNG_ERR_EVENT_NOTIFIER_GROUP_NOTIFICATION_FD;
+               goto error;
+       }
+
+       DBG("Created kernel notifier group notification file descriptor: fd = %d",
+                       local_fd);
+       error_code_ret = LTTNG_OK;
+       *event_notifier_group_notification_fd = local_fd;
+       local_fd = -1;
+
+error:
+       if (local_fd >= 0) {
+               ret = close(local_fd);
+               if (ret) {
+                       PERROR("Failed to close kernel event notifier group notification file descriptor: fd = %d",
+                                       local_fd);
+               }
+       }
+
+       return error_code_ret;
+}
+
+enum lttng_error_code kernel_destroy_event_notifier_group_notification_fd(
+               int event_notifier_group_notification_fd)
+{
+       enum lttng_error_code ret_code = LTTNG_OK;
+
+       DBG("Closing event notifier group notification file descriptor: fd = %d",
+                       event_notifier_group_notification_fd);
+       if (event_notifier_group_notification_fd >= 0) {
+               const int ret = close(event_notifier_group_notification_fd);
+               if (ret) {
+                       PERROR("Failed to close event notifier group notification file descriptor: fd = %d",
+                                       event_notifier_group_notification_fd);
+               }
+       }
+
+       return ret_code;
+}
+
+static
+unsigned long hash_trigger(const struct lttng_trigger *trigger)
+{
+       const struct lttng_condition *condition =
+                       lttng_trigger_get_const_condition(trigger);
+
+       return lttng_condition_hash(condition);
+}
+
+static
+int match_trigger(struct cds_lfht_node *node, const void *key)
+{
+       const struct ltt_kernel_event_notifier_rule *event_notifier_rule;
+       const struct lttng_trigger *trigger = (lttng_trigger *) key;
+
+       event_notifier_rule = caa_container_of(node,
+                       const struct ltt_kernel_event_notifier_rule, ht_node);
+
+       return lttng_trigger_is_equal(trigger, event_notifier_rule->trigger);
+}
+
+static enum lttng_error_code kernel_create_event_notifier_rule(
+               struct lttng_trigger *trigger,
+               const struct lttng_credentials *creds, uint64_t token)
+{
+       int err, fd, ret = 0;
+       enum lttng_error_code error_code_ret;
+       enum lttng_condition_status condition_status;
+       enum lttng_condition_type condition_type;
+       enum lttng_event_rule_type event_rule_type;
+       struct ltt_kernel_event_notifier_rule *event_notifier_rule;
+       struct lttng_kernel_abi_event_notifier kernel_event_notifier = {};
+       unsigned int capture_bytecode_count = 0, i;
+       const struct lttng_condition *condition = NULL;
+       const struct lttng_event_rule *event_rule = NULL;
+       enum lttng_condition_status cond_status;
+
+       LTTNG_ASSERT(trigger);
+
+       condition = lttng_trigger_get_const_condition(trigger);
+       LTTNG_ASSERT(condition);
+
+       condition_type = lttng_condition_get_type(condition);
+       LTTNG_ASSERT(condition_type == LTTNG_CONDITION_TYPE_EVENT_RULE_MATCHES);
+
+       /* Does not acquire a reference. */
+       condition_status = lttng_condition_event_rule_matches_get_rule(
+                       condition, &event_rule);
+       LTTNG_ASSERT(condition_status == LTTNG_CONDITION_STATUS_OK);
+       LTTNG_ASSERT(event_rule);
+
+       event_rule_type = lttng_event_rule_get_type(event_rule);
+       LTTNG_ASSERT(event_rule_type != LTTNG_EVENT_RULE_TYPE_UNKNOWN);
+
+       error_code_ret = trace_kernel_create_event_notifier_rule(trigger, token,
+                       lttng_condition_event_rule_matches_get_error_counter_index(
+                                       condition),
+                       &event_notifier_rule);
+       if (error_code_ret != LTTNG_OK) {
+               goto error;
+       }
+
+       error_code_ret = trace_kernel_init_event_notifier_from_event_rule(
+                       event_rule, &kernel_event_notifier);
+       if (error_code_ret != LTTNG_OK) {
+               goto free_event;
+       }
+
+       kernel_event_notifier.event.token = event_notifier_rule->token;
+       kernel_event_notifier.error_counter_idx =
+                       lttng_condition_event_rule_matches_get_error_counter_index(
+                                       condition);
+
+       fd = kernctl_create_event_notifier(
+                       kernel_tracer_event_notifier_group_fd,
+                       &kernel_event_notifier);
+       if (fd < 0) {
+               switch (-fd) {
+               case EEXIST:
+                       error_code_ret = LTTNG_ERR_KERN_EVENT_EXIST;
+                       break;
+               case ENOSYS:
+                       WARN("Failed to create kernel event notifier: not notifier type not implemented");
+                       error_code_ret = LTTNG_ERR_KERN_EVENT_ENOSYS;
+                       break;
+               case ENOENT:
+                       WARN("Failed to create kernel event notifier: not found: name = '%s'",
+                                       kernel_event_notifier.event.name);
+                       error_code_ret = LTTNG_ERR_KERN_ENABLE_FAIL;
+                       break;
+               default:
+                       PERROR("Failed to create kernel event notifier: error code = %d, name = '%s'",
+                                       fd, kernel_event_notifier.event.name);
+                       error_code_ret = LTTNG_ERR_KERN_ENABLE_FAIL;
+               }
+               goto free_event;
+       }
+
+       event_notifier_rule->fd = fd;
+       /* Prevent fd duplication after execlp(). */
+       err = fcntl(event_notifier_rule->fd, F_SETFD, FD_CLOEXEC);
+       if (err < 0) {
+               PERROR("Failed to set FD_CLOEXEC on kernel event notifier file descriptor: fd = %d",
+                               fd);
+               error_code_ret = LTTNG_ERR_FATAL;
+               goto set_cloexec_error;
+       }
+
+       if (event_notifier_rule->filter) {
+               err = kernctl_filter(event_notifier_rule->fd, event_notifier_rule->filter);
+               if (err < 0) {
+                       switch (-err) {
+                       case ENOMEM:
+                               error_code_ret = LTTNG_ERR_FILTER_NOMEM;
+                               break;
+                       default:
+                               error_code_ret = LTTNG_ERR_FILTER_INVAL;
+                               break;
+                       }
+                       goto filter_error;
+               }
+       }
+
+       if (lttng_event_rule_get_type(event_rule) ==
+                       LTTNG_EVENT_RULE_TYPE_KERNEL_UPROBE) {
+               ret = userspace_probe_event_rule_add_callsites(
+                               event_rule, creds, event_notifier_rule->fd);
+               if (ret) {
+                       error_code_ret = LTTNG_ERR_KERN_ENABLE_FAIL;
+                       goto add_callsite_error;
+               }
+       }
+
+       /* Set the capture bytecode if any. */
+       cond_status = lttng_condition_event_rule_matches_get_capture_descriptor_count(
+                       condition, &capture_bytecode_count);
+       LTTNG_ASSERT(cond_status == LTTNG_CONDITION_STATUS_OK);
+
+       for (i = 0; i < capture_bytecode_count; i++) {
+               const struct lttng_bytecode *capture_bytecode =
+                               lttng_condition_event_rule_matches_get_capture_bytecode_at_index(
+                                               condition, i);
+
+               if (capture_bytecode == NULL) {
+                       ERR("Unexpected NULL capture bytecode on condition");
+                       error_code_ret = LTTNG_ERR_KERN_ENABLE_FAIL;
+                       goto capture_error;
+               }
+
+               ret = kernctl_capture(event_notifier_rule->fd, capture_bytecode);
+               if (ret < 0) {
+                       ERR("Failed to set capture bytecode on event notifier rule fd: fd = %d",
+                                       event_notifier_rule->fd);
+                       error_code_ret = LTTNG_ERR_KERN_ENABLE_FAIL;
+                       goto capture_error;
+               }
+       }
+
+       err = kernctl_enable(event_notifier_rule->fd);
+       if (err < 0) {
+               switch (-err) {
+               case EEXIST:
+                       error_code_ret = LTTNG_ERR_KERN_EVENT_EXIST;
+                       break;
+               default:
+                       PERROR("enable kernel event notifier");
+                       error_code_ret = LTTNG_ERR_KERN_ENABLE_FAIL;
+                       break;
+               }
+               goto enable_error;
+       }
+
+       /* Add trigger to kernel token mapping in the hash table. */
+       rcu_read_lock();
+       cds_lfht_add(kernel_token_to_event_notifier_rule_ht, hash_trigger(trigger),
+                       &event_notifier_rule->ht_node);
+       rcu_read_unlock();
+
+       DBG("Created kernel event notifier: name = '%s', fd = %d",
+                       kernel_event_notifier.event.name,
+                       event_notifier_rule->fd);
+
+       return LTTNG_OK;
+
+capture_error:
+add_callsite_error:
+enable_error:
+set_cloexec_error:
+filter_error:
+       {
+               const int close_ret = close(event_notifier_rule->fd);
+
+               if (close_ret) {
+                       PERROR("Failed to close kernel event notifier file descriptor: fd = %d",
+                                       event_notifier_rule->fd);
+               }
+       }
+free_event:
+       free(event_notifier_rule);
+error:
+       return error_code_ret;
+}
+
+enum lttng_error_code kernel_register_event_notifier(
+               struct lttng_trigger *trigger,
+               const struct lttng_credentials *cmd_creds)
+{
+       enum lttng_error_code ret;
+       enum lttng_condition_status status;
+       enum lttng_domain_type domain_type;
+       const struct lttng_event_rule *event_rule;
+       const struct lttng_condition *const condition =
+                       lttng_trigger_get_const_condition(trigger);
+       const uint64_t token = lttng_trigger_get_tracer_token(trigger);
+
+       LTTNG_ASSERT(condition);
+
+       /* Does not acquire a reference to the event rule. */
+       status = lttng_condition_event_rule_matches_get_rule(
+                       condition, &event_rule);
+       LTTNG_ASSERT(status == LTTNG_CONDITION_STATUS_OK);
+
+       domain_type = lttng_event_rule_get_domain_type(event_rule);
+       LTTNG_ASSERT(domain_type == LTTNG_DOMAIN_KERNEL);
+
+       ret = kernel_create_event_notifier_rule(trigger, cmd_creds, token);
+       if (ret != LTTNG_OK) {
+               ERR("Failed to create kernel event notifier rule");
+       }
+
+       return ret;
+}
+
+enum lttng_error_code kernel_unregister_event_notifier(
+               const struct lttng_trigger *trigger)
+{
+       struct ltt_kernel_event_notifier_rule *token_event_rule_element;
+       struct cds_lfht_node *node;
+       struct cds_lfht_iter iter;
+       enum lttng_error_code error_code_ret;
+       int ret;
+
+       rcu_read_lock();
+
+       cds_lfht_lookup(kernel_token_to_event_notifier_rule_ht,
+                       hash_trigger(trigger), match_trigger, trigger, &iter);
+
+       node = cds_lfht_iter_get_node(&iter);
+       if (!node) {
+               error_code_ret = LTTNG_ERR_TRIGGER_NOT_FOUND;
+               goto error;
+       }
+
+       token_event_rule_element = caa_container_of(node,
+                       struct ltt_kernel_event_notifier_rule, ht_node);
+
+       ret = kernel_disable_event_notifier_rule(token_event_rule_element);
+       if (ret) {
+               error_code_ret = LTTNG_ERR_FATAL;
+               goto error;
+       }
+
+       trace_kernel_destroy_event_notifier_rule(token_event_rule_element);
+       error_code_ret = LTTNG_OK;
+
+error:
+       rcu_read_unlock();
+
+       return error_code_ret;
+}
+
+int kernel_get_notification_fd(void)
+{
+       return kernel_tracer_event_notifier_group_notification_fd;
+}
diff --git a/src/bin/lttng-sessiond/lttng-syscall.c b/src/bin/lttng-sessiond/lttng-syscall.c
deleted file mode 100644 (file)
index 06021bc..0000000
+++ /dev/null
@@ -1,340 +0,0 @@
-/*
- * Copyright (C) 2014 David Goulet <dgoulet@efficios.com>
- *
- * SPDX-License-Identifier: GPL-2.0-only
- *
- */
-
-#define _LGPL_SOURCE
-#include <stdbool.h>
-
-#include <common/common.h>
-#include <common/kernel-ctl/kernel-ctl.h>
-
-#include "lttng-sessiond.h"
-#include "kernel.h"
-#include "lttng-syscall.h"
-#include "utils.h"
-
-/* Global syscall table. */
-struct syscall *syscall_table;
-
-/* Number of entry in the syscall table. */
-static size_t syscall_table_nb_entry;
-
-/*
- * Populate the system call table using the kernel tracer.
- *
- * Return 0 on success and the syscall table is allocated. On error, a negative
- * value is returned.
- */
-int syscall_init_table(int tracer_fd)
-{
-       int ret, fd, err;
-       size_t nbmem;
-       FILE *fp;
-       /* Syscall data from the kernel. */
-       size_t index = 0;
-       bool at_least_one_syscall = false;
-       uint32_t bitness;
-       char name[SYSCALL_NAME_LEN];
-
-#if (SYSCALL_NAME_LEN == 255)
-#define SYSCALL_NAME_LEN_SCANF_IS_A_BROKEN_API "254"
-#endif
-
-       DBG3("Syscall init system call table");
-
-       fd = kernctl_syscall_list(tracer_fd);
-       if (fd < 0) {
-               ret = fd;
-               PERROR("kernelctl syscall list");
-               goto error_ioctl;
-       }
-
-       fp = fdopen(fd, "r");
-       if (!fp) {
-               ret = -errno;
-               PERROR("syscall list fdopen");
-               goto error_fp;
-       }
-
-       nbmem = SYSCALL_TABLE_INIT_SIZE;
-       syscall_table = zmalloc(sizeof(struct syscall) * nbmem);
-       if (!syscall_table) {
-               ret = -errno;
-               PERROR("syscall list zmalloc");
-               goto error;
-       }
-
-       while (fscanf(fp,
-                               "syscall { index = %zu; \
-                               name = %" SYSCALL_NAME_LEN_SCANF_IS_A_BROKEN_API "[^;]; \
-                               bitness = %u; };\n",
-                               &index, name, &bitness) == 3) {
-               at_least_one_syscall = true;
-               if (index >= nbmem) {
-                       struct syscall *new_list;
-                       size_t new_nbmem;
-
-                       /* Double memory size. */
-                       new_nbmem = max(index + 1, nbmem << 1);
-                       if (new_nbmem > (SIZE_MAX / sizeof(*new_list))) {
-                               /* Overflow, stop everything, something went really wrong. */
-                               ERR("Syscall listing memory size overflow. Stopping");
-                               free(syscall_table);
-                               syscall_table = NULL;
-                               ret = -EINVAL;
-                               goto error;
-                       }
-
-                       DBG("Reallocating syscall table from %zu to %zu entries", nbmem,
-                                       new_nbmem);
-                       new_list = realloc(syscall_table, new_nbmem * sizeof(*new_list));
-                       if (!new_list) {
-                               ret = -errno;
-                               PERROR("syscall list realloc");
-                               goto error;
-                       }
-
-                       /* Zero out the new memory. */
-                       memset(new_list + nbmem, 0,
-                                       (new_nbmem - nbmem) * sizeof(*new_list));
-                       nbmem = new_nbmem;
-                       syscall_table = new_list;
-               }
-               syscall_table[index].index = index;
-               syscall_table[index].bitness = bitness;
-               if (lttng_strncpy(syscall_table[index].name, name,
-                               sizeof(syscall_table[index].name))) {
-                       ret = -EINVAL;
-                       free(syscall_table);
-                       syscall_table = NULL;
-                       goto error;
-               }
-               /*
-               DBG("Syscall name '%s' at index %" PRIu32 " of bitness %u",
-                               syscall_table[index].name,
-                               syscall_table[index].index,
-                               syscall_table[index].bitness);
-               */
-       }
-
-       /* Index starts at 0. */
-       if (at_least_one_syscall) {
-               syscall_table_nb_entry = index + 1;
-       }
-
-       ret = 0;
-
-error:
-       err = fclose(fp);
-       if (err) {
-               PERROR("syscall list fclose");
-       }
-       return ret;
-
-error_fp:
-       err = close(fd);
-       if (err) {
-               PERROR("syscall list close");
-       }
-
-error_ioctl:
-       return ret;
-}
-
-/*
- * Helper function for the list syscalls command that empty the temporary
- * syscall hashtable used to track duplicate between 32 and 64 bit arch.
- *
- * This empty the hash table and destroys it after. After this, the pointer is
- * unsuable. RCU read side lock MUST be acquired before calling this.
- */
-static void destroy_syscall_ht(struct lttng_ht *ht)
-{
-       struct lttng_ht_iter iter;
-       struct syscall *ksyscall;
-
-       DBG3("Destroying syscall hash table.");
-
-       if (!ht) {
-               return;
-       }
-
-       cds_lfht_for_each_entry(ht->ht, &iter.iter, ksyscall, node.node) {
-               int ret;
-
-               ret = lttng_ht_del(ht, &iter);
-               LTTNG_ASSERT(!ret);
-               free(ksyscall);
-       }
-       ht_cleanup_push(ht);
-}
-
-/*
- * Allocate the given hashtable pointer.
- *
- * Return 0 on success else a negative LTTNG error value.
- */
-static int init_syscall_ht(struct lttng_ht **ht)
-{
-       int ret;
-
-       *ht = lttng_ht_new(0, LTTNG_HT_TYPE_STRING);
-       if (!*ht) {
-               ret = -LTTNG_ERR_NOMEM;
-       } else {
-               ret = 0;
-       }
-
-       return ret;
-}
-
-/*
- * Lookup a syscall in the given hash table by name.
- *
- * Return syscall object if found or else NULL.
- */
-static struct syscall *lookup_syscall(struct lttng_ht *ht, const char *name)
-{
-       struct lttng_ht_node_str *node;
-       struct lttng_ht_iter iter;
-       struct syscall *ksyscall = NULL;
-
-       LTTNG_ASSERT(ht);
-       LTTNG_ASSERT(name);
-
-       lttng_ht_lookup(ht, (void *) name, &iter);
-       node = lttng_ht_iter_get_node_str(&iter);
-       if (node) {
-               ksyscall = caa_container_of(node, struct syscall, node);
-       }
-
-       return ksyscall;
-}
-
-/*
- * Using the given syscall object in the events array with the bitness of the
- * syscall at index in the syscall table.
- */
-static void update_event_syscall_bitness(struct lttng_event *events,
-               unsigned int index, unsigned int syscall_index)
-{
-       LTTNG_ASSERT(events);
-
-       if (syscall_table[index].bitness == 32) {
-               events[syscall_index].flags |= LTTNG_EVENT_FLAG_SYSCALL_32;
-       } else {
-               events[syscall_index].flags |= LTTNG_EVENT_FLAG_SYSCALL_64;
-       }
-}
-
-/*
- * Allocate and initialize syscall object and add it to the given hashtable.
- *
- * Return 0 on success else -LTTNG_ERR_NOMEM.
- */
-static int add_syscall_to_ht(struct lttng_ht *ht, unsigned int index,
-               unsigned int syscall_index)
-{
-       int ret;
-       struct syscall *ksyscall;
-
-       LTTNG_ASSERT(ht);
-
-       ksyscall = zmalloc(sizeof(*ksyscall));
-       if (!ksyscall) {
-               ret = -LTTNG_ERR_NOMEM;
-               goto error;
-       }
-
-       strncpy(ksyscall->name, syscall_table[index].name,
-                       sizeof(ksyscall->name));
-       ksyscall->bitness = syscall_table[index].bitness;
-       ksyscall->index = syscall_index;
-       lttng_ht_node_init_str(&ksyscall->node, ksyscall->name);
-       lttng_ht_add_unique_str(ht, &ksyscall->node);
-       ret = 0;
-
-error:
-       return ret;
-}
-
-/*
- * List syscalls present in the kernel syscall global array, allocate and
- * populate the events structure with them. Skip the empty syscall name.
- *
- * Return the number of entries in the array else a negative value.
- */
-ssize_t syscall_table_list(struct lttng_event **_events)
-{
-       int i, index = 0;
-       ssize_t ret;
-       struct lttng_event *events;
-       /* Hash table used to filter duplicate out. */
-       struct lttng_ht *syscalls_ht = NULL;
-
-       LTTNG_ASSERT(_events);
-
-       DBG("Syscall table listing.");
-
-       rcu_read_lock();
-
-       /*
-        * Allocate at least the number of total syscall we have even if some of
-        * them might not be valid. The count below will make sure to return the
-        * right size of the events array.
-        */
-       events = zmalloc(syscall_table_nb_entry * sizeof(*events));
-       if (!events) {
-               PERROR("syscall table list zmalloc");
-               ret = -LTTNG_ERR_NOMEM;
-               goto error;
-       }
-
-       ret = init_syscall_ht(&syscalls_ht);
-       if (ret < 0) {
-               goto error;
-       }
-
-       for (i = 0; i < syscall_table_nb_entry; i++) {
-               struct syscall *ksyscall;
-
-               /* Skip empty syscalls. */
-               if (*syscall_table[i].name == '\0') {
-                       continue;
-               }
-
-               ksyscall = lookup_syscall(syscalls_ht, syscall_table[i].name);
-               if (ksyscall) {
-                       update_event_syscall_bitness(events, i, ksyscall->index);
-                       continue;
-               }
-
-               ret = add_syscall_to_ht(syscalls_ht, i, index);
-               if (ret < 0) {
-                       goto error;
-               }
-
-               /* Copy the event information in the event's array. */
-               strncpy(events[index].name, syscall_table[i].name,
-                               sizeof(events[index].name));
-               update_event_syscall_bitness(events, i, index);
-               events[index].type = LTTNG_EVENT_SYSCALL;
-               /* This makes the command line not print the enabled/disabled field. */
-               events[index].enabled = -1;
-               index++;
-       }
-
-       destroy_syscall_ht(syscalls_ht);
-       *_events = events;
-       rcu_read_unlock();
-       return index;
-
-error:
-       destroy_syscall_ht(syscalls_ht);
-       free(events);
-       rcu_read_unlock();
-       return ret;
-}
diff --git a/src/bin/lttng-sessiond/lttng-syscall.cpp b/src/bin/lttng-sessiond/lttng-syscall.cpp
new file mode 100644 (file)
index 0000000..f145634
--- /dev/null
@@ -0,0 +1,340 @@
+/*
+ * Copyright (C) 2014 David Goulet <dgoulet@efficios.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0-only
+ *
+ */
+
+#define _LGPL_SOURCE
+#include <stdbool.h>
+
+#include <common/common.h>
+#include <common/kernel-ctl/kernel-ctl.h>
+
+#include "lttng-sessiond.h"
+#include "kernel.h"
+#include "lttng-syscall.h"
+#include "utils.h"
+
+/* Global syscall table. */
+struct syscall *syscall_table;
+
+/* Number of entry in the syscall table. */
+static size_t syscall_table_nb_entry;
+
+/*
+ * Populate the system call table using the kernel tracer.
+ *
+ * Return 0 on success and the syscall table is allocated. On error, a negative
+ * value is returned.
+ */
+int syscall_init_table(int tracer_fd)
+{
+       int ret, fd, err;
+       size_t nbmem;
+       FILE *fp;
+       /* Syscall data from the kernel. */
+       size_t index = 0;
+       bool at_least_one_syscall = false;
+       uint32_t bitness;
+       char name[SYSCALL_NAME_LEN];
+
+#if (SYSCALL_NAME_LEN == 255)
+#define SYSCALL_NAME_LEN_SCANF_IS_A_BROKEN_API "254"
+#endif
+
+       DBG3("Syscall init system call table");
+
+       fd = kernctl_syscall_list(tracer_fd);
+       if (fd < 0) {
+               ret = fd;
+               PERROR("kernelctl syscall list");
+               goto error_ioctl;
+       }
+
+       fp = fdopen(fd, "r");
+       if (!fp) {
+               ret = -errno;
+               PERROR("syscall list fdopen");
+               goto error_fp;
+       }
+
+       nbmem = SYSCALL_TABLE_INIT_SIZE;
+       syscall_table = (struct syscall *) zmalloc(sizeof(struct syscall) * nbmem);
+       if (!syscall_table) {
+               ret = -errno;
+               PERROR("syscall list zmalloc");
+               goto error;
+       }
+
+       while (fscanf(fp,
+                               "syscall { index = %zu; \
+                               name = %" SYSCALL_NAME_LEN_SCANF_IS_A_BROKEN_API "[^;]; \
+                               bitness = %u; };\n",
+                               &index, name, &bitness) == 3) {
+               at_least_one_syscall = true;
+               if (index >= nbmem) {
+                       struct syscall *new_list;
+                       size_t new_nbmem;
+
+                       /* Double memory size. */
+                       new_nbmem = std::max(index + 1, nbmem << 1);
+                       if (new_nbmem > (SIZE_MAX / sizeof(*new_list))) {
+                               /* Overflow, stop everything, something went really wrong. */
+                               ERR("Syscall listing memory size overflow. Stopping");
+                               free(syscall_table);
+                               syscall_table = NULL;
+                               ret = -EINVAL;
+                               goto error;
+                       }
+
+                       DBG("Reallocating syscall table from %zu to %zu entries", nbmem,
+                                       new_nbmem);
+                       new_list = (struct syscall *) realloc(syscall_table, new_nbmem * sizeof(*new_list));
+                       if (!new_list) {
+                               ret = -errno;
+                               PERROR("syscall list realloc");
+                               goto error;
+                       }
+
+                       /* Zero out the new memory. */
+                       memset(new_list + nbmem, 0,
+                                       (new_nbmem - nbmem) * sizeof(*new_list));
+                       nbmem = new_nbmem;
+                       syscall_table = new_list;
+               }
+               syscall_table[index].index = index;
+               syscall_table[index].bitness = bitness;
+               if (lttng_strncpy(syscall_table[index].name, name,
+                               sizeof(syscall_table[index].name))) {
+                       ret = -EINVAL;
+                       free(syscall_table);
+                       syscall_table = NULL;
+                       goto error;
+               }
+               /*
+               DBG("Syscall name '%s' at index %" PRIu32 " of bitness %u",
+                               syscall_table[index].name,
+                               syscall_table[index].index,
+                               syscall_table[index].bitness);
+               */
+       }
+
+       /* Index starts at 0. */
+       if (at_least_one_syscall) {
+               syscall_table_nb_entry = index + 1;
+       }
+
+       ret = 0;
+
+error:
+       err = fclose(fp);
+       if (err) {
+               PERROR("syscall list fclose");
+       }
+       return ret;
+
+error_fp:
+       err = close(fd);
+       if (err) {
+               PERROR("syscall list close");
+       }
+
+error_ioctl:
+       return ret;
+}
+
+/*
+ * Helper function for the list syscalls command that empty the temporary
+ * syscall hashtable used to track duplicate between 32 and 64 bit arch.
+ *
+ * This empty the hash table and destroys it after. After this, the pointer is
+ * unsuable. RCU read side lock MUST be acquired before calling this.
+ */
+static void destroy_syscall_ht(struct lttng_ht *ht)
+{
+       struct lttng_ht_iter iter;
+       struct syscall *ksyscall;
+
+       DBG3("Destroying syscall hash table.");
+
+       if (!ht) {
+               return;
+       }
+
+       cds_lfht_for_each_entry(ht->ht, &iter.iter, ksyscall, node.node) {
+               int ret;
+
+               ret = lttng_ht_del(ht, &iter);
+               LTTNG_ASSERT(!ret);
+               free(ksyscall);
+       }
+       ht_cleanup_push(ht);
+}
+
+/*
+ * Allocate the given hashtable pointer.
+ *
+ * Return 0 on success else a negative LTTNG error value.
+ */
+static int init_syscall_ht(struct lttng_ht **ht)
+{
+       int ret;
+
+       *ht = lttng_ht_new(0, LTTNG_HT_TYPE_STRING);
+       if (!*ht) {
+               ret = -LTTNG_ERR_NOMEM;
+       } else {
+               ret = 0;
+       }
+
+       return ret;
+}
+
+/*
+ * Lookup a syscall in the given hash table by name.
+ *
+ * Return syscall object if found or else NULL.
+ */
+static struct syscall *lookup_syscall(struct lttng_ht *ht, const char *name)
+{
+       struct lttng_ht_node_str *node;
+       struct lttng_ht_iter iter;
+       struct syscall *ksyscall = NULL;
+
+       LTTNG_ASSERT(ht);
+       LTTNG_ASSERT(name);
+
+       lttng_ht_lookup(ht, (void *) name, &iter);
+       node = lttng_ht_iter_get_node_str(&iter);
+       if (node) {
+               ksyscall = caa_container_of(node, struct syscall, node);
+       }
+
+       return ksyscall;
+}
+
+/*
+ * Using the given syscall object in the events array with the bitness of the
+ * syscall at index in the syscall table.
+ */
+static void update_event_syscall_bitness(struct lttng_event *events,
+               unsigned int index, unsigned int syscall_index)
+{
+       LTTNG_ASSERT(events);
+
+       if (syscall_table[index].bitness == 32) {
+               events[syscall_index].flags = (lttng_event_flag) (events[syscall_index].flags | LTTNG_EVENT_FLAG_SYSCALL_32);
+       } else {
+               events[syscall_index].flags = (lttng_event_flag) (events[syscall_index].flags | LTTNG_EVENT_FLAG_SYSCALL_64);
+       }
+}
+
+/*
+ * Allocate and initialize syscall object and add it to the given hashtable.
+ *
+ * Return 0 on success else -LTTNG_ERR_NOMEM.
+ */
+static int add_syscall_to_ht(struct lttng_ht *ht, unsigned int index,
+               unsigned int syscall_index)
+{
+       int ret;
+       struct syscall *ksyscall;
+
+       LTTNG_ASSERT(ht);
+
+       ksyscall = (struct syscall *) zmalloc(sizeof(*ksyscall));
+       if (!ksyscall) {
+               ret = -LTTNG_ERR_NOMEM;
+               goto error;
+       }
+
+       strncpy(ksyscall->name, syscall_table[index].name,
+                       sizeof(ksyscall->name));
+       ksyscall->bitness = syscall_table[index].bitness;
+       ksyscall->index = syscall_index;
+       lttng_ht_node_init_str(&ksyscall->node, ksyscall->name);
+       lttng_ht_add_unique_str(ht, &ksyscall->node);
+       ret = 0;
+
+error:
+       return ret;
+}
+
+/*
+ * List syscalls present in the kernel syscall global array, allocate and
+ * populate the events structure with them. Skip the empty syscall name.
+ *
+ * Return the number of entries in the array else a negative value.
+ */
+ssize_t syscall_table_list(struct lttng_event **_events)
+{
+       int i, index = 0;
+       ssize_t ret;
+       struct lttng_event *events;
+       /* Hash table used to filter duplicate out. */
+       struct lttng_ht *syscalls_ht = NULL;
+
+       LTTNG_ASSERT(_events);
+
+       DBG("Syscall table listing.");
+
+       rcu_read_lock();
+
+       /*
+        * Allocate at least the number of total syscall we have even if some of
+        * them might not be valid. The count below will make sure to return the
+        * right size of the events array.
+        */
+       events = (lttng_event *) zmalloc(syscall_table_nb_entry * sizeof(*events));
+       if (!events) {
+               PERROR("syscall table list zmalloc");
+               ret = -LTTNG_ERR_NOMEM;
+               goto error;
+       }
+
+       ret = init_syscall_ht(&syscalls_ht);
+       if (ret < 0) {
+               goto error;
+       }
+
+       for (i = 0; i < syscall_table_nb_entry; i++) {
+               struct syscall *ksyscall;
+
+               /* Skip empty syscalls. */
+               if (*syscall_table[i].name == '\0') {
+                       continue;
+               }
+
+               ksyscall = lookup_syscall(syscalls_ht, syscall_table[i].name);
+               if (ksyscall) {
+                       update_event_syscall_bitness(events, i, ksyscall->index);
+                       continue;
+               }
+
+               ret = add_syscall_to_ht(syscalls_ht, i, index);
+               if (ret < 0) {
+                       goto error;
+               }
+
+               /* Copy the event information in the event's array. */
+               strncpy(events[index].name, syscall_table[i].name,
+                               sizeof(events[index].name));
+               update_event_syscall_bitness(events, i, index);
+               events[index].type = LTTNG_EVENT_SYSCALL;
+               /* This makes the command line not print the enabled/disabled field. */
+               events[index].enabled = -1;
+               index++;
+       }
+
+       destroy_syscall_ht(syscalls_ht);
+       *_events = events;
+       rcu_read_unlock();
+       return index;
+
+error:
+       destroy_syscall_ht(syscalls_ht);
+       free(events);
+       rcu_read_unlock();
+       return ret;
+}
diff --git a/src/bin/lttng-sessiond/main.c b/src/bin/lttng-sessiond/main.c
deleted file mode 100644 (file)
index 4effb3c..0000000
+++ /dev/null
@@ -1,2053 +0,0 @@
-/*
- * Copyright (C) 2011 David Goulet <david.goulet@polymtl.ca>
- * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- * Copyright (C) 2013 Jérémie Galarneau <jeremie.galarneau@efficios.com>
- *
- * SPDX-License-Identifier: GPL-2.0-only
- *
- */
-
-#define _LGPL_SOURCE
-#include <getopt.h>
-#include <grp.h>
-#include <limits.h>
-#include <paths.h>
-#include <pthread.h>
-#include <signal.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <inttypes.h>
-#include <sys/mman.h>
-#include <sys/mount.h>
-#include <sys/resource.h>
-#include <sys/socket.h>
-#include <sys/stat.h>
-#include <sys/types.h>
-#include <sys/wait.h>
-#include <urcu/uatomic.h>
-#include <unistd.h>
-#include <ctype.h>
-
-#include <common/common.h>
-#include <common/compat/socket.h>
-#include <common/compat/getenv.h>
-#include <common/defaults.h>
-#include <common/kernel-consumer/kernel-consumer.h>
-#include <common/futex.h>
-#include <common/relayd/relayd.h>
-#include <common/utils.h>
-#include <common/daemonize.h>
-#include <common/config/session-config.h>
-#include <common/dynamic-buffer.h>
-#include <lttng/event-internal.h>
-
-#include "lttng-sessiond.h"
-#include "buffer-registry.h"
-#include "channel.h"
-#include "cmd.h"
-#include "consumer.h"
-#include "context.h"
-#include "event.h"
-#include "event-notifier-error-accounting.h"
-#include "kernel.h"
-#include "kernel-consumer.h"
-#include "lttng-ust-ctl.h"
-#include "ust-consumer.h"
-#include "utils.h"
-#include "fd-limit.h"
-#include "health-sessiond.h"
-#include "testpoint.h"
-#include "notify-apps.h"
-#include "agent-thread.h"
-#include "save.h"
-#include "notification-thread.h"
-#include "notification-thread-commands.h"
-#include "rotation-thread.h"
-#include "agent.h"
-#include "ht-cleanup.h"
-#include "sessiond-config.h"
-#include "timer.h"
-#include "thread.h"
-#include "client.h"
-#include "dispatch.h"
-#include "register.h"
-#include "manage-apps.h"
-#include "manage-kernel.h"
-#include "modprobe.h"
-#include "ust-sigbus.h"
-
-static const char *help_msg =
-#ifdef LTTNG_EMBED_HELP
-#include <lttng-sessiond.8.h>
-#else
-NULL
-#endif
-;
-
-#define EVENT_NOTIFIER_ERROR_COUNTER_NUMBER_OF_BUCKET_MAX 65535
-#define EVENT_NOTIFIER_ERROR_BUFFER_SIZE_BASE_OPTION_STR \
-               "event-notifier-error-buffer-size"
-#define EVENT_NOTIFIER_ERROR_BUFFER_SIZE_KERNEL_OPTION_STR \
-               EVENT_NOTIFIER_ERROR_BUFFER_SIZE_BASE_OPTION_STR "-kernel"
-#define EVENT_NOTIFIER_ERROR_BUFFER_SIZE_USERSPACE_OPTION_STR \
-               EVENT_NOTIFIER_ERROR_BUFFER_SIZE_BASE_OPTION_STR "-userspace"
-
-
-const char *progname;
-static int lockfile_fd = -1;
-static int opt_print_version;
-
-/* Set to 1 when a SIGUSR1 signal is received. */
-static int recv_child_signal;
-
-/* Command line options */
-static const struct option long_options[] = {
-       { "client-sock", required_argument, 0, 'c' },
-       { "apps-sock", required_argument, 0, 'a' },
-       { "kconsumerd-cmd-sock", required_argument, 0, '\0' },
-       { "kconsumerd-err-sock", required_argument, 0, '\0' },
-       { "ustconsumerd32-cmd-sock", required_argument, 0, '\0' },
-       { "ustconsumerd32-err-sock", required_argument, 0, '\0' },
-       { "ustconsumerd64-cmd-sock", required_argument, 0, '\0' },
-       { "ustconsumerd64-err-sock", required_argument, 0, '\0' },
-       { "consumerd32-path", required_argument, 0, '\0' },
-       { "consumerd32-libdir", required_argument, 0, '\0' },
-       { "consumerd64-path", required_argument, 0, '\0' },
-       { "consumerd64-libdir", required_argument, 0, '\0' },
-       { "daemonize", no_argument, 0, 'd' },
-       { "background", no_argument, 0, 'b' },
-       { "sig-parent", no_argument, 0, 'S' },
-       { "help", no_argument, 0, 'h' },
-       { "group", required_argument, 0, 'g' },
-       { "version", no_argument, 0, 'V' },
-       { "quiet", no_argument, 0, 'q' },
-       { "verbose", no_argument, 0, 'v' },
-       { "verbose-consumer", no_argument, 0, '\0' },
-       { "no-kernel", no_argument, 0, '\0' },
-       { "pidfile", required_argument, 0, 'p' },
-       { "agent-tcp-port", required_argument, 0, '\0' },
-       { "config", required_argument, 0, 'f' },
-       { "load", required_argument, 0, 'l' },
-       { "kmod-probes", required_argument, 0, '\0' },
-       { "extra-kmod-probes", required_argument, 0, '\0' },
-       { EVENT_NOTIFIER_ERROR_BUFFER_SIZE_KERNEL_OPTION_STR, required_argument, 0, '\0' },
-       { EVENT_NOTIFIER_ERROR_BUFFER_SIZE_USERSPACE_OPTION_STR, required_argument, 0, '\0' },
-       { NULL, 0, 0, 0 }
-};
-
-/* Command line options to ignore from configuration file */
-static const char *config_ignore_options[] = { "help", "version", "config" };
-
-/*
- * This pipe is used to inform the thread managing application communication
- * that a command is queued and ready to be processed.
- */
-static int apps_cmd_pipe[2] = { -1, -1 };
-static int apps_cmd_notify_pipe[2] = { -1, -1 };
-
-/*
- * UST registration command queue. This queue is tied with a futex and uses a N
- * wakers / 1 waiter implemented and detailed in futex.c/.h
- *
- * The thread_registration_apps and thread_dispatch_ust_registration uses this
- * queue along with the wait/wake scheme. The thread_manage_apps receives down
- * the line new application socket and monitors it for any I/O error or clean
- * close that triggers an unregistration of the application.
- */
-static struct ust_cmd_queue ust_cmd_queue;
-
-/*
- * Section name to look for in the daemon configuration file.
- */
-static const char * const config_section_name = "sessiond";
-
-/* Am I root or not. Set to 1 if the daemon is running as root */
-static int is_root;
-
-/*
- * Stop all threads by closing the thread quit pipe.
- */
-static void stop_threads(void)
-{
-       int ret;
-
-       /* Stopping all threads */
-       DBG("Terminating all threads");
-       ret = sessiond_notify_quit_pipe();
-       if (ret < 0) {
-               ERR("write error on thread quit pipe");
-       }
-}
-
-/*
- * Close every consumer sockets.
- */
-static void close_consumer_sockets(void)
-{
-       int ret;
-
-       if (the_kconsumer_data.err_sock >= 0) {
-               ret = close(the_kconsumer_data.err_sock);
-               if (ret < 0) {
-                       PERROR("kernel consumer err_sock close");
-               }
-       }
-       if (the_ustconsumer32_data.err_sock >= 0) {
-               ret = close(the_ustconsumer32_data.err_sock);
-               if (ret < 0) {
-                       PERROR("UST consumerd32 err_sock close");
-               }
-       }
-       if (the_ustconsumer64_data.err_sock >= 0) {
-               ret = close(the_ustconsumer64_data.err_sock);
-               if (ret < 0) {
-                       PERROR("UST consumerd64 err_sock close");
-               }
-       }
-       if (the_kconsumer_data.cmd_sock >= 0) {
-               ret = close(the_kconsumer_data.cmd_sock);
-               if (ret < 0) {
-                       PERROR("kernel consumer cmd_sock close");
-               }
-       }
-       if (the_ustconsumer32_data.cmd_sock >= 0) {
-               ret = close(the_ustconsumer32_data.cmd_sock);
-               if (ret < 0) {
-                       PERROR("UST consumerd32 cmd_sock close");
-               }
-       }
-       if (the_ustconsumer64_data.cmd_sock >= 0) {
-               ret = close(the_ustconsumer64_data.cmd_sock);
-               if (ret < 0) {
-                       PERROR("UST consumerd64 cmd_sock close");
-               }
-       }
-       if (the_kconsumer_data.channel_monitor_pipe >= 0) {
-               ret = close(the_kconsumer_data.channel_monitor_pipe);
-               if (ret < 0) {
-                       PERROR("kernel consumer channel monitor pipe close");
-               }
-       }
-       if (the_ustconsumer32_data.channel_monitor_pipe >= 0) {
-               ret = close(the_ustconsumer32_data.channel_monitor_pipe);
-               if (ret < 0) {
-                       PERROR("UST consumerd32 channel monitor pipe close");
-               }
-       }
-       if (the_ustconsumer64_data.channel_monitor_pipe >= 0) {
-               ret = close(the_ustconsumer64_data.channel_monitor_pipe);
-               if (ret < 0) {
-                       PERROR("UST consumerd64 channel monitor pipe close");
-               }
-       }
-}
-
-/*
- * Wait on consumer process termination.
- *
- * Need to be called with the consumer data lock held or from a context
- * ensuring no concurrent access to data (e.g: cleanup).
- */
-static void wait_consumer(struct consumer_data *consumer_data)
-{
-       pid_t ret;
-       int status;
-
-       if (consumer_data->pid <= 0) {
-               return;
-       }
-
-       DBG("Waiting for complete teardown of consumerd (PID: %d)",
-                       consumer_data->pid);
-       ret = waitpid(consumer_data->pid, &status, 0);
-       if (ret == -1) {
-               PERROR("consumerd waitpid pid: %d", consumer_data->pid)
-       } else  if (!WIFEXITED(status)) {
-               ERR("consumerd termination with error: %d",
-                               WEXITSTATUS(ret));
-       }
-       consumer_data->pid = 0;
-}
-
-/*
- * Cleanup the session daemon's data structures.
- */
-static void sessiond_cleanup(void)
-{
-       int ret;
-       struct ltt_session_list *session_list = session_get_list();
-
-       DBG("Cleanup sessiond");
-
-       /*
-        * Close the thread quit pipe. It has already done its job,
-        * since we are now called.
-        */
-       sessiond_close_quit_pipe();
-       utils_close_pipe(apps_cmd_pipe);
-       utils_close_pipe(apps_cmd_notify_pipe);
-       utils_close_pipe(the_kernel_poll_pipe);
-
-       ret = remove(the_config.pid_file_path.value);
-       if (ret < 0) {
-               PERROR("remove pidfile %s", the_config.pid_file_path.value);
-       }
-
-       DBG("Removing sessiond and consumerd content of directory %s",
-                       the_config.rundir.value);
-
-       /* sessiond */
-       DBG("Removing %s", the_config.pid_file_path.value);
-       (void) unlink(the_config.pid_file_path.value);
-
-       DBG("Removing %s", the_config.agent_port_file_path.value);
-       (void) unlink(the_config.agent_port_file_path.value);
-
-       /* kconsumerd */
-       DBG("Removing %s", the_kconsumer_data.err_unix_sock_path);
-       (void) unlink(the_kconsumer_data.err_unix_sock_path);
-
-       DBG("Removing directory %s", the_config.kconsumerd_path.value);
-       (void) rmdir(the_config.kconsumerd_path.value);
-
-       /* ust consumerd 32 */
-       DBG("Removing %s", the_config.consumerd32_err_unix_sock_path.value);
-       (void) unlink(the_config.consumerd32_err_unix_sock_path.value);
-
-       DBG("Removing directory %s", the_config.consumerd32_path.value);
-       (void) rmdir(the_config.consumerd32_path.value);
-
-       /* ust consumerd 64 */
-       DBG("Removing %s", the_config.consumerd64_err_unix_sock_path.value);
-       (void) unlink(the_config.consumerd64_err_unix_sock_path.value);
-
-       DBG("Removing directory %s", the_config.consumerd64_path.value);
-       (void) rmdir(the_config.consumerd64_path.value);
-
-       pthread_mutex_destroy(&session_list->lock);
-
-       DBG("Cleaning up all per-event notifier domain agents");
-       agent_by_event_notifier_domain_ht_destroy();
-
-       DBG("Cleaning up all agent apps");
-       agent_app_ht_clean();
-       DBG("Closing all UST sockets");
-       ust_app_clean_list();
-       buffer_reg_destroy_registries();
-
-       close_consumer_sockets();
-
-       wait_consumer(&the_kconsumer_data);
-       wait_consumer(&the_ustconsumer64_data);
-       wait_consumer(&the_ustconsumer32_data);
-
-       if (is_root && !the_config.no_kernel) {
-               cleanup_kernel_tracer();
-       }
-
-       /*
-        * We do NOT rmdir rundir because there are other processes
-        * using it, for instance lttng-relayd, which can start in
-        * parallel with this teardown.
-        */
-}
-
-/*
- * Cleanup the daemon's option data structures.
- */
-static void sessiond_cleanup_options(void)
-{
-       DBG("Cleaning up options");
-
-       sessiond_config_fini(&the_config);
-
-       run_as_destroy_worker();
-}
-
-static int string_match(const char *str1, const char *str2)
-{
-       return (str1 && str2) && !strcmp(str1, str2);
-}
-
-/*
- * Take an option from the getopt output and set it in the right variable to be
- * used later.
- *
- * Return 0 on success else a negative value.
- */
-static int set_option(int opt, const char *arg, const char *optname)
-{
-       int ret = 0;
-
-       if (string_match(optname, "client-sock") || opt == 'c') {
-               if (!arg || *arg == '\0') {
-                       ret = -EINVAL;
-                       goto end;
-               }
-               if (lttng_is_setuid_setgid()) {
-                       WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
-                               "-c, --client-sock");
-               } else {
-                       config_string_set(&the_config.client_unix_sock_path,
-                                       strdup(arg));
-                       if (!the_config.client_unix_sock_path.value) {
-                               ret = -ENOMEM;
-                               PERROR("strdup");
-                       }
-               }
-       } else if (string_match(optname, "apps-sock") || opt == 'a') {
-               if (!arg || *arg == '\0') {
-                       ret = -EINVAL;
-                       goto end;
-               }
-               if (lttng_is_setuid_setgid()) {
-                       WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
-                               "-a, --apps-sock");
-               } else {
-                       config_string_set(&the_config.apps_unix_sock_path,
-                                       strdup(arg));
-                       if (!the_config.apps_unix_sock_path.value) {
-                               ret = -ENOMEM;
-                               PERROR("strdup");
-                       }
-               }
-       } else if (string_match(optname, "daemonize") || opt == 'd') {
-               the_config.daemonize = true;
-       } else if (string_match(optname, "background") || opt == 'b') {
-               the_config.background = true;
-       } else if (string_match(optname, "group") || opt == 'g') {
-               if (!arg || *arg == '\0') {
-                       ret = -EINVAL;
-                       goto end;
-               }
-               if (lttng_is_setuid_setgid()) {
-                       WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
-                               "-g, --group");
-               } else {
-                       config_string_set(&the_config.tracing_group_name,
-                                       strdup(arg));
-                       if (!the_config.tracing_group_name.value) {
-                               ret = -ENOMEM;
-                               PERROR("strdup");
-                       }
-               }
-       } else if (string_match(optname, "help") || opt == 'h') {
-               ret = utils_show_help(8, "lttng-sessiond", help_msg);
-               if (ret) {
-                       ERR("Cannot show --help for `lttng-sessiond`");
-                       perror("exec");
-               }
-               exit(ret ? EXIT_FAILURE : EXIT_SUCCESS);
-       } else if (string_match(optname, "version") || opt == 'V') {
-               opt_print_version = 1;
-       } else if (string_match(optname, "sig-parent") || opt == 'S') {
-               the_config.sig_parent = true;
-       } else if (string_match(optname, "kconsumerd-err-sock")) {
-               if (!arg || *arg == '\0') {
-                       ret = -EINVAL;
-                       goto end;
-               }
-               if (lttng_is_setuid_setgid()) {
-                       WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
-                               "--kconsumerd-err-sock");
-               } else {
-                       config_string_set(
-                                       &the_config.kconsumerd_err_unix_sock_path,
-                                       strdup(arg));
-                       if (!the_config.kconsumerd_err_unix_sock_path.value) {
-                               ret = -ENOMEM;
-                               PERROR("strdup");
-                       }
-               }
-       } else if (string_match(optname, "kconsumerd-cmd-sock")) {
-               if (!arg || *arg == '\0') {
-                       ret = -EINVAL;
-                       goto end;
-               }
-               if (lttng_is_setuid_setgid()) {
-                       WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
-                               "--kconsumerd-cmd-sock");
-               } else {
-                       config_string_set(
-                                       &the_config.kconsumerd_cmd_unix_sock_path,
-                                       strdup(arg));
-                       if (!the_config.kconsumerd_cmd_unix_sock_path.value) {
-                               ret = -ENOMEM;
-                               PERROR("strdup");
-                       }
-               }
-       } else if (string_match(optname, "ustconsumerd64-err-sock")) {
-               if (!arg || *arg == '\0') {
-                       ret = -EINVAL;
-                       goto end;
-               }
-               if (lttng_is_setuid_setgid()) {
-                       WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
-                               "--ustconsumerd64-err-sock");
-               } else {
-                       config_string_set(
-                                       &the_config.consumerd64_err_unix_sock_path,
-                                       strdup(arg));
-                       if (!the_config.consumerd64_err_unix_sock_path.value) {
-                               ret = -ENOMEM;
-                               PERROR("strdup");
-                       }
-               }
-       } else if (string_match(optname, "ustconsumerd64-cmd-sock")) {
-               if (!arg || *arg == '\0') {
-                       ret = -EINVAL;
-                       goto end;
-               }
-               if (lttng_is_setuid_setgid()) {
-                       WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
-                               "--ustconsumerd64-cmd-sock");
-               } else {
-                       config_string_set(
-                                       &the_config.consumerd64_cmd_unix_sock_path,
-                                       strdup(arg));
-                       if (!the_config.consumerd64_cmd_unix_sock_path.value) {
-                               ret = -ENOMEM;
-                               PERROR("strdup");
-                       }
-               }
-       } else if (string_match(optname, "ustconsumerd32-err-sock")) {
-               if (!arg || *arg == '\0') {
-                       ret = -EINVAL;
-                       goto end;
-               }
-               if (lttng_is_setuid_setgid()) {
-                       WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
-                               "--ustconsumerd32-err-sock");
-               } else {
-                       config_string_set(
-                                       &the_config.consumerd32_err_unix_sock_path,
-                                       strdup(arg));
-                       if (!the_config.consumerd32_err_unix_sock_path.value) {
-                               ret = -ENOMEM;
-                               PERROR("strdup");
-                       }
-               }
-       } else if (string_match(optname, "ustconsumerd32-cmd-sock")) {
-               if (!arg || *arg == '\0') {
-                       ret = -EINVAL;
-                       goto end;
-               }
-               if (lttng_is_setuid_setgid()) {
-                       WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
-                               "--ustconsumerd32-cmd-sock");
-               } else {
-                       config_string_set(
-                                       &the_config.consumerd32_cmd_unix_sock_path,
-                                       strdup(arg));
-                       if (!the_config.consumerd32_cmd_unix_sock_path.value) {
-                               ret = -ENOMEM;
-                               PERROR("strdup");
-                       }
-               }
-       } else if (string_match(optname, "no-kernel")) {
-               the_config.no_kernel = true;
-       } else if (string_match(optname, "quiet") || opt == 'q') {
-               the_config.quiet = true;
-       } else if (string_match(optname, "verbose") || opt == 'v') {
-               /* Verbose level can increase using multiple -v */
-               if (arg) {
-                       /* Value obtained from config file */
-                       the_config.verbose = config_parse_value(arg);
-               } else {
-                       /* -v used on command line */
-                       the_config.verbose++;
-               }
-               /* Clamp value to [0, 3] */
-               the_config.verbose = the_config.verbose < 0 ?
-                                     0 :
-                                     (the_config.verbose <= 3 ? the_config.verbose :
-                                                                3);
-       } else if (string_match(optname, "verbose-consumer")) {
-               if (arg) {
-                       the_config.verbose_consumer = config_parse_value(arg);
-               } else {
-                       the_config.verbose_consumer++;
-               }
-       } else if (string_match(optname, "consumerd32-path")) {
-               if (!arg || *arg == '\0') {
-                       ret = -EINVAL;
-                       goto end;
-               }
-               if (lttng_is_setuid_setgid()) {
-                       WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
-                               "--consumerd32-path");
-               } else {
-                       config_string_set(&the_config.consumerd32_bin_path,
-                                       strdup(arg));
-                       if (!the_config.consumerd32_bin_path.value) {
-                               PERROR("strdup");
-                               ret = -ENOMEM;
-                       }
-               }
-       } else if (string_match(optname, "consumerd32-libdir")) {
-               if (!arg || *arg == '\0') {
-                       ret = -EINVAL;
-                       goto end;
-               }
-               if (lttng_is_setuid_setgid()) {
-                       WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
-                               "--consumerd32-libdir");
-               } else {
-                       config_string_set(&the_config.consumerd32_lib_dir,
-                                       strdup(arg));
-                       if (!the_config.consumerd32_lib_dir.value) {
-                               PERROR("strdup");
-                               ret = -ENOMEM;
-                       }
-               }
-       } else if (string_match(optname, "consumerd64-path")) {
-               if (!arg || *arg == '\0') {
-                       ret = -EINVAL;
-                       goto end;
-               }
-               if (lttng_is_setuid_setgid()) {
-                       WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
-                               "--consumerd64-path");
-               } else {
-                       config_string_set(&the_config.consumerd64_bin_path,
-                                       strdup(arg));
-                       if (!the_config.consumerd64_bin_path.value) {
-                               PERROR("strdup");
-                               ret = -ENOMEM;
-                       }
-               }
-       } else if (string_match(optname, "consumerd64-libdir")) {
-               if (!arg || *arg == '\0') {
-                       ret = -EINVAL;
-                       goto end;
-               }
-               if (lttng_is_setuid_setgid()) {
-                       WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
-                               "--consumerd64-libdir");
-               } else {
-                       config_string_set(&the_config.consumerd64_lib_dir,
-                                       strdup(arg));
-                       if (!the_config.consumerd64_lib_dir.value) {
-                               PERROR("strdup");
-                               ret = -ENOMEM;
-                       }
-               }
-       } else if (string_match(optname, "pidfile") || opt == 'p') {
-               if (!arg || *arg == '\0') {
-                       ret = -EINVAL;
-                       goto end;
-               }
-               if (lttng_is_setuid_setgid()) {
-                       WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
-                               "-p, --pidfile");
-               } else {
-                       config_string_set(
-                                       &the_config.pid_file_path, strdup(arg));
-                       if (!the_config.pid_file_path.value) {
-                               PERROR("strdup");
-                               ret = -ENOMEM;
-                       }
-               }
-       } else if (string_match(optname, "agent-tcp-port")) {
-               if (!arg || *arg == '\0') {
-                       ret = -EINVAL;
-                       goto end;
-               }
-               if (lttng_is_setuid_setgid()) {
-                       WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
-                               "--agent-tcp-port");
-               } else {
-                       unsigned long v;
-
-                       errno = 0;
-                       v = strtoul(arg, NULL, 0);
-                       if (errno != 0 || !isdigit(arg[0])) {
-                               ERR("Wrong value in --agent-tcp-port parameter: %s", arg);
-                               return -1;
-                       }
-                       if (v == 0 || v >= 65535) {
-                               ERR("Port overflow in --agent-tcp-port parameter: %s", arg);
-                               return -1;
-                       }
-                       the_config.agent_tcp_port.begin =
-                                       the_config.agent_tcp_port.end = (int) v;
-                       DBG3("Agent TCP port set to non default: %i", (int) v);
-               }
-       } else if (string_match(optname, "load") || opt == 'l') {
-               if (!arg || *arg == '\0') {
-                       ret = -EINVAL;
-                       goto end;
-               }
-               if (lttng_is_setuid_setgid()) {
-                       WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
-                               "-l, --load");
-               } else {
-                       config_string_set(&the_config.load_session_path,
-                                       strdup(arg));
-                       if (!the_config.load_session_path.value) {
-                               PERROR("strdup");
-                               ret = -ENOMEM;
-                       }
-               }
-       } else if (string_match(optname, "kmod-probes")) {
-               if (!arg || *arg == '\0') {
-                       ret = -EINVAL;
-                       goto end;
-               }
-               if (lttng_is_setuid_setgid()) {
-                       WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
-                               "--kmod-probes");
-               } else {
-                       config_string_set(&the_config.kmod_probes_list,
-                                       strdup(arg));
-                       if (!the_config.kmod_probes_list.value) {
-                               PERROR("strdup");
-                               ret = -ENOMEM;
-                       }
-               }
-       } else if (string_match(optname, "extra-kmod-probes")) {
-               if (!arg || *arg == '\0') {
-                       ret = -EINVAL;
-                       goto end;
-               }
-               if (lttng_is_setuid_setgid()) {
-                       WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
-                               "--extra-kmod-probes");
-               } else {
-                       config_string_set(&the_config.kmod_extra_probes_list,
-                                       strdup(arg));
-                       if (!the_config.kmod_extra_probes_list.value) {
-                               PERROR("strdup");
-                               ret = -ENOMEM;
-                       }
-               }
-       } else if (string_match(optname, EVENT_NOTIFIER_ERROR_BUFFER_SIZE_KERNEL_OPTION_STR)) {
-               unsigned long v;
-
-               errno = 0;
-               v = strtoul(arg, NULL, 0);
-               if (errno != 0 || !isdigit(arg[0])) {
-                       ERR("Wrong value in --%s parameter: %s",
-                                       EVENT_NOTIFIER_ERROR_BUFFER_SIZE_KERNEL_OPTION_STR, arg);
-                       return -1;
-               }
-               if (v == 0 || v >= EVENT_NOTIFIER_ERROR_COUNTER_NUMBER_OF_BUCKET_MAX) {
-                       ERR("Value out of range for --%s parameter: %s",
-                                       EVENT_NOTIFIER_ERROR_BUFFER_SIZE_KERNEL_OPTION_STR, arg);
-                       return -1;
-               }
-               the_config.event_notifier_buffer_size_kernel = (int) v;
-               DBG3("Number of event notifier error buffer kernel size to non default: %i",
-                               the_config.event_notifier_buffer_size_kernel);
-               goto end;
-       } else if (string_match(optname, EVENT_NOTIFIER_ERROR_BUFFER_SIZE_USERSPACE_OPTION_STR)) {
-               unsigned long v;
-
-               errno = 0;
-               v = strtoul(arg, NULL, 0);
-               if (errno != 0 || !isdigit(arg[0])) {
-                       ERR("Wrong value in --%s parameter: %s",
-                                       EVENT_NOTIFIER_ERROR_BUFFER_SIZE_USERSPACE_OPTION_STR, arg);
-                       return -1;
-               }
-               if (v == 0 || v >= EVENT_NOTIFIER_ERROR_COUNTER_NUMBER_OF_BUCKET_MAX) {
-                       ERR("Value out of range for --%s parameter: %s",
-                                       EVENT_NOTIFIER_ERROR_BUFFER_SIZE_USERSPACE_OPTION_STR, arg);
-                       return -1;
-               }
-               the_config.event_notifier_buffer_size_userspace = (int) v;
-               DBG3("Number of event notifier error buffer userspace size to non default: %i",
-                               the_config.event_notifier_buffer_size_userspace);
-               goto end;
-       } else if (string_match(optname, "config") || opt == 'f') {
-               /* This is handled in set_options() thus silent skip. */
-               goto end;
-       } else {
-               /* Unknown option or other error.
-                * Error is printed by getopt, just return */
-               ret = -1;
-       }
-
-end:
-       if (ret == -EINVAL) {
-               const char *opt_name = "unknown";
-               int i;
-
-               for (i = 0; i < sizeof(long_options) / sizeof(struct option);
-                       i++) {
-                       if (opt == long_options[i].val) {
-                               opt_name = long_options[i].name;
-                               break;
-                       }
-               }
-
-               WARN("Invalid argument provided for option \"%s\", using default value.",
-                       opt_name);
-       }
-
-       return ret;
-}
-
-/*
- * config_entry_handler_cb used to handle options read from a config file.
- * See config_entry_handler_cb comment in common/config/session-config.h for the
- * return value conventions.
- */
-static int config_entry_handler(const struct config_entry *entry, void *unused)
-{
-       int ret = 0, i;
-
-       if (!entry || !entry->name || !entry->value) {
-               ret = -EINVAL;
-               goto end;
-       }
-
-       /* Check if the option is to be ignored */
-       for (i = 0; i < sizeof(config_ignore_options) / sizeof(char *); i++) {
-               if (!strcmp(entry->name, config_ignore_options[i])) {
-                       goto end;
-               }
-       }
-
-       for (i = 0; i < (sizeof(long_options) / sizeof(struct option)) - 1;
-               i++) {
-
-               /* Ignore if not fully matched. */
-               if (strcmp(entry->name, long_options[i].name)) {
-                       continue;
-               }
-
-               /*
-                * If the option takes no argument on the command line, we have to
-                * check if the value is "true". We support non-zero numeric values,
-                * true, on and yes.
-                */
-               if (!long_options[i].has_arg) {
-                       ret = config_parse_value(entry->value);
-                       if (ret <= 0) {
-                               if (ret) {
-                                       WARN("Invalid configuration value \"%s\" for option %s",
-                                                       entry->value, entry->name);
-                               }
-                               /* False, skip boolean config option. */
-                               goto end;
-                       }
-               }
-
-               ret = set_option(long_options[i].val, entry->value, entry->name);
-               goto end;
-       }
-
-       WARN("Unrecognized option \"%s\" in daemon configuration file.", entry->name);
-
-end:
-       return ret;
-}
-
-static void print_version(void) {
-       fprintf(stdout, "%s\n", VERSION);
-}
-
-/*
- * daemon configuration loading and argument parsing
- */
-static int set_options(int argc, char **argv)
-{
-       int ret = 0, c = 0, option_index = 0;
-       int orig_optopt = optopt, orig_optind = optind;
-       char *optstring;
-       char *config_path = NULL;
-
-       optstring = utils_generate_optstring(long_options,
-                       sizeof(long_options) / sizeof(struct option));
-       if (!optstring) {
-               ret = -ENOMEM;
-               goto end;
-       }
-
-       /* Check for the --config option */
-       while ((c = getopt_long(argc, argv, optstring, long_options,
-                                       &option_index)) != -1) {
-               if (c == '?') {
-                       ret = -EINVAL;
-                       goto end;
-               } else if (c != 'f') {
-                       /* if not equal to --config option. */
-                       continue;
-               }
-
-               if (lttng_is_setuid_setgid()) {
-                       WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
-                               "-f, --config");
-               } else {
-                       free(config_path);
-                       config_path = utils_expand_path(optarg);
-                       if (!config_path) {
-                               ERR("Failed to resolve path: %s", optarg);
-                       }
-               }
-       }
-
-       ret = config_get_section_entries(config_path, config_section_name,
-                       config_entry_handler, NULL);
-       if (ret) {
-               if (ret > 0) {
-                       ERR("Invalid configuration option at line %i", ret);
-                       ret = -1;
-               }
-               goto end;
-       }
-
-       /* Reset getopt's global state */
-       optopt = orig_optopt;
-       optind = orig_optind;
-       while (1) {
-               option_index = -1;
-               /*
-                * getopt_long() will not set option_index if it encounters a
-                * short option.
-                */
-               c = getopt_long(argc, argv, optstring, long_options,
-                               &option_index);
-               if (c == -1) {
-                       break;
-               }
-
-               /*
-                * Pass NULL as the long option name if popt left the index
-                * unset.
-                */
-               ret = set_option(c, optarg,
-                               option_index < 0 ? NULL :
-                               long_options[option_index].name);
-               if (ret < 0) {
-                       break;
-               }
-       }
-
-end:
-       free(config_path);
-       free(optstring);
-       return ret;
-}
-
-/*
- * Create lockfile using the rundir and return its fd.
- */
-static int create_lockfile(void)
-{
-       return utils_create_lock_file(the_config.lock_file_path.value);
-}
-
-/*
- * Check if the global socket is available, and if a daemon is answering at the
- * other side. If yes, error is returned.
- *
- * Also attempts to create and hold the lock file.
- */
-static int check_existing_daemon(void)
-{
-       int ret = 0;
-
-       /* Is there anybody out there ? */
-       if (lttng_session_daemon_alive()) {
-               ret = -EEXIST;
-               goto end;
-       }
-
-       lockfile_fd = create_lockfile();
-       if (lockfile_fd < 0) {
-               ret = -EEXIST;
-               goto end;
-       }
-end:
-       return ret;
-}
-
-static void sessiond_cleanup_lock_file(void)
-{
-       int ret;
-
-       /*
-        * Cleanup lock file by deleting it and finaly closing it which will
-        * release the file system lock.
-        */
-       if (lockfile_fd >= 0) {
-               ret = remove(the_config.lock_file_path.value);
-               if (ret < 0) {
-                       PERROR("remove lock file");
-               }
-               ret = close(lockfile_fd);
-               if (ret < 0) {
-                       PERROR("close lock file");
-               }
-       }
-}
-
-/*
- * Set the tracing group gid onto the client socket.
- *
- * Race window between mkdir and chown is OK because we are going from more
- * permissive (root.root) to less permissive (root.tracing).
- */
-static int set_permissions(char *rundir)
-{
-       int ret;
-       gid_t gid;
-
-       ret = utils_get_group_id(
-                       the_config.tracing_group_name.value, true, &gid);
-       if (ret) {
-               /* Default to root group. */
-               gid = 0;
-       }
-
-       /* Set lttng run dir */
-       ret = chown(rundir, 0, gid);
-       if (ret < 0) {
-               ERR("Unable to set group on %s", rundir);
-               PERROR("chown");
-       }
-
-       /*
-        * Ensure all applications and tracing group can search the run
-        * dir. Allow everyone to read the directory, since it does not
-        * buy us anything to hide its content.
-        */
-       ret = chmod(rundir, S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH | S_IXOTH);
-       if (ret < 0) {
-               ERR("Unable to set permissions on %s", rundir);
-               PERROR("chmod");
-       }
-
-       /* lttng client socket path */
-       ret = chown(the_config.client_unix_sock_path.value, 0, gid);
-       if (ret < 0) {
-               ERR("Unable to set group on %s",
-                               the_config.client_unix_sock_path.value);
-               PERROR("chown");
-       }
-
-       /* kconsumer error socket path */
-       ret = chown(the_kconsumer_data.err_unix_sock_path, 0, 0);
-       if (ret < 0) {
-               ERR("Unable to set group on %s",
-                               the_kconsumer_data.err_unix_sock_path);
-               PERROR("chown");
-       }
-
-       /* 64-bit ustconsumer error socket path */
-       ret = chown(the_ustconsumer64_data.err_unix_sock_path, 0, 0);
-       if (ret < 0) {
-               ERR("Unable to set group on %s",
-                               the_ustconsumer64_data.err_unix_sock_path);
-               PERROR("chown");
-       }
-
-       /* 32-bit ustconsumer compat32 error socket path */
-       ret = chown(the_ustconsumer32_data.err_unix_sock_path, 0, 0);
-       if (ret < 0) {
-               ERR("Unable to set group on %s",
-                               the_ustconsumer32_data.err_unix_sock_path);
-               PERROR("chown");
-       }
-
-       DBG("All permissions are set");
-
-       return ret;
-}
-
-/*
- * Create the lttng run directory needed for all global sockets and pipe.
- */
-static int create_lttng_rundir(void)
-{
-       int ret;
-
-       DBG3("Creating LTTng run directory: %s", the_config.rundir.value);
-
-       ret = mkdir(the_config.rundir.value, S_IRWXU);
-       if (ret < 0) {
-               if (errno != EEXIST) {
-                       ERR("Unable to create %s", the_config.rundir.value);
-                       goto error;
-               } else {
-                       ret = 0;
-               }
-       }
-
-error:
-       return ret;
-}
-
-/*
- * Setup sockets and directory needed by the consumerds' communication with the
- * session daemon.
- */
-static int set_consumer_sockets(struct consumer_data *consumer_data)
-{
-       int ret;
-       char *path = NULL;
-
-       switch (consumer_data->type) {
-       case LTTNG_CONSUMER_KERNEL:
-               path = the_config.kconsumerd_path.value;
-               break;
-       case LTTNG_CONSUMER64_UST:
-               path = the_config.consumerd64_path.value;
-               break;
-       case LTTNG_CONSUMER32_UST:
-               path = the_config.consumerd32_path.value;
-               break;
-       default:
-               ERR("Consumer type unknown");
-               ret = -EINVAL;
-               goto error;
-       }
-       LTTNG_ASSERT(path);
-
-       DBG2("Creating consumer directory: %s", path);
-
-       ret = mkdir(path, S_IRWXU | S_IRGRP | S_IXGRP);
-       if (ret < 0 && errno != EEXIST) {
-               PERROR("mkdir");
-               ERR("Failed to create %s", path);
-               goto error;
-       }
-       if (is_root) {
-               gid_t gid;
-
-               ret = utils_get_group_id(the_config.tracing_group_name.value,
-                               true, &gid);
-               if (ret) {
-                       /* Default to root group. */
-                       gid = 0;
-               }
-
-               ret = chown(path, 0, gid);
-               if (ret < 0) {
-                       ERR("Unable to set group on %s", path);
-                       PERROR("chown");
-                       goto error;
-               }
-       }
-
-       /* Create the consumerd error unix socket */
-       consumer_data->err_sock =
-               lttcomm_create_unix_sock(consumer_data->err_unix_sock_path);
-       if (consumer_data->err_sock < 0) {
-               ERR("Create unix sock failed: %s", consumer_data->err_unix_sock_path);
-               ret = -1;
-               goto error;
-       }
-
-       /*
-        * Set the CLOEXEC flag. Return code is useless because either way, the
-        * show must go on.
-        */
-       ret = utils_set_fd_cloexec(consumer_data->err_sock);
-       if (ret < 0) {
-               PERROR("utils_set_fd_cloexec");
-               /* continue anyway */
-       }
-
-       /* File permission MUST be 660 */
-       ret = chmod(consumer_data->err_unix_sock_path,
-                       S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP);
-       if (ret < 0) {
-               ERR("Set file permissions failed: %s", consumer_data->err_unix_sock_path);
-               PERROR("chmod");
-               goto error;
-       }
-
-error:
-       return ret;
-}
-
-/*
- * Signal handler for the daemon
- *
- * Simply stop all worker threads, leaving main() return gracefully after
- * joining all threads and calling cleanup().
- */
-static void sighandler(int sig, siginfo_t *siginfo, void *arg)
-{
-       switch (sig) {
-       case SIGINT:
-               DBG("SIGINT caught");
-               stop_threads();
-               break;
-       case SIGTERM:
-               DBG("SIGTERM caught");
-               stop_threads();
-               break;
-       case SIGUSR1:
-               CMM_STORE_SHARED(recv_child_signal, 1);
-               break;
-       case SIGBUS:
-       {
-               int write_ret;
-               const char msg[] = "Received SIGBUS, aborting program.\n";
-
-               lttng_ust_handle_sigbus(siginfo->si_addr);
-               /*
-                * If ustctl did not catch this signal (triggering a
-                * siglongjmp), abort the program. Otherwise, the execution
-                * will resume from the ust-ctl call which caused this error.
-                *
-                * The return value is ignored since the program aborts anyhow.
-                */
-               write_ret = write(STDERR_FILENO, msg, sizeof(msg));
-               (void) write_ret;
-               abort();
-       }
-       default:
-               break;
-       }
-}
-
-/*
- * Setup signal handler for :
- *             SIGINT, SIGTERM, SIGPIPE
- */
-static int set_signal_handler(void)
-{
-       int ret = 0;
-       struct sigaction sa;
-       sigset_t sigset;
-
-       if ((ret = sigemptyset(&sigset)) < 0) {
-               PERROR("sigemptyset");
-               return ret;
-       }
-
-       sa.sa_mask = sigset;
-       sa.sa_flags = SA_SIGINFO;
-
-       sa.sa_sigaction = sighandler;
-       if ((ret = sigaction(SIGTERM, &sa, NULL)) < 0) {
-               PERROR("sigaction");
-               return ret;
-       }
-
-       if ((ret = sigaction(SIGINT, &sa, NULL)) < 0) {
-               PERROR("sigaction");
-               return ret;
-       }
-
-       if ((ret = sigaction(SIGUSR1, &sa, NULL)) < 0) {
-               PERROR("sigaction");
-               return ret;
-       }
-
-       if ((ret = sigaction(SIGBUS, &sa, NULL)) < 0) {
-               PERROR("sigaction");
-               return ret;
-       }
-
-       sa.sa_flags = 0;
-       sa.sa_handler = SIG_IGN;
-       if ((ret = sigaction(SIGPIPE, &sa, NULL)) < 0) {
-               PERROR("sigaction");
-               return ret;
-       }
-
-       DBG("Signal handler set for SIGTERM, SIGUSR1, SIGPIPE, SIGINT, and SIGBUS");
-
-       return ret;
-}
-
-/*
- * Set open files limit to unlimited. This daemon can open a large number of
- * file descriptors in order to consume multiple kernel traces.
- */
-static void set_ulimit(void)
-{
-       int ret;
-       struct rlimit lim;
-
-       /* The kernel does not allow an infinite limit for open files */
-       lim.rlim_cur = 65535;
-       lim.rlim_max = 65535;
-
-       ret = setrlimit(RLIMIT_NOFILE, &lim);
-       if (ret < 0) {
-               PERROR("failed to set open files limit");
-       }
-}
-
-static int write_pidfile(void)
-{
-       return utils_create_pid_file(getpid(), the_config.pid_file_path.value);
-}
-
-static int set_clock_plugin_env(void)
-{
-       int ret = 0;
-       char *env_value = NULL;
-
-       if (!the_config.lttng_ust_clock_plugin.value) {
-               goto end;
-       }
-
-       ret = asprintf(&env_value, "LTTNG_UST_CLOCK_PLUGIN=%s",
-                       the_config.lttng_ust_clock_plugin.value);
-       if (ret < 0) {
-               PERROR("asprintf");
-               goto end;
-       }
-
-       ret = putenv(env_value);
-       if (ret) {
-               free(env_value);
-               PERROR("putenv of LTTNG_UST_CLOCK_PLUGIN");
-               goto end;
-       }
-
-       DBG("Updated LTTNG_UST_CLOCK_PLUGIN environment variable to \"%s\"",
-                       the_config.lttng_ust_clock_plugin.value);
-end:
-       return ret;
-}
-
-static void destroy_all_sessions_and_wait(void)
-{
-       struct ltt_session *session, *tmp;
-       struct ltt_session_list *session_list;
-
-       session_list = session_get_list();
-       DBG("Initiating destruction of all sessions");
-
-       if (!session_list) {
-               return;
-       }
-
-       session_lock_list();
-       /* Initiate the destruction of all sessions. */
-       cds_list_for_each_entry_safe(session, tmp,
-                       &session_list->head, list) {
-               if (!session_get(session)) {
-                       continue;
-               }
-
-               session_lock(session);
-               if (session->destroyed) {
-                       goto unlock_session;
-               }
-               (void) cmd_stop_trace(session);
-               (void) cmd_destroy_session(
-                               session, the_notification_thread_handle, NULL);
-       unlock_session:
-               session_unlock(session);
-               session_put(session);
-       }
-       session_unlock_list();
-
-       /* Wait for the destruction of all sessions to complete. */
-       DBG("Waiting for the destruction of all sessions to complete");
-       session_list_wait_empty();
-       DBG("Destruction of all sessions completed");
-}
-
-static void unregister_all_triggers(void)
-{
-       enum lttng_error_code ret_code;
-       enum lttng_trigger_status trigger_status;
-       struct lttng_triggers *triggers = NULL;
-       unsigned int trigger_count, i;
-       const struct lttng_credentials creds = {
-               .uid = LTTNG_OPTIONAL_INIT_VALUE(0),
-       };
-
-       DBG("Unregistering all triggers");
-
-       /*
-        * List all triggers as "root" since we wish to unregister all triggers.
-        */
-       ret_code = notification_thread_command_list_triggers(
-                       the_notification_thread_handle, creds.uid.value,
-                       &triggers);
-       if (ret_code != LTTNG_OK) {
-               ERR("Failed to list triggers while unregistering all triggers");
-               goto end;
-       }
-
-       trigger_status = lttng_triggers_get_count(triggers, &trigger_count);
-       LTTNG_ASSERT(trigger_status == LTTNG_TRIGGER_STATUS_OK);
-
-       for (i = 0; i < trigger_count; i++) {
-               uid_t trigger_owner;
-               const char *trigger_name;
-               const struct lttng_trigger *trigger =
-                               lttng_triggers_get_at_index(triggers, i);
-
-               LTTNG_ASSERT(trigger);
-
-               trigger_status = lttng_trigger_get_owner_uid(
-                               trigger, &trigger_owner);
-               LTTNG_ASSERT(trigger_status == LTTNG_TRIGGER_STATUS_OK);
-
-               trigger_status = lttng_trigger_get_name(trigger, &trigger_name);
-               trigger_name = trigger_status == LTTNG_TRIGGER_STATUS_OK ?
-                               trigger_name : "(anonymous)";
-
-               DBG("Unregistering trigger: trigger owner uid = %d, trigger name = '%s'",
-                               (int) trigger_owner, trigger_name);
-
-               ret_code = cmd_unregister_trigger(&creds, trigger,
-                               the_notification_thread_handle);
-               if (ret_code != LTTNG_OK) {
-                       ERR("Failed to unregister trigger: trigger owner uid = %d, trigger name = '%s', error: '%s'",
-                                       (int) trigger_owner, trigger_name,
-                                       lttng_strerror(-ret_code));
-                       /* Continue to unregister the remaining triggers. */
-               }
-       }
-end:
-       lttng_triggers_destroy(triggers);
-}
-
-static int run_as_worker_post_fork_cleanup(void *data)
-{
-       struct sessiond_config *sessiond_config = data;
-
-       sessiond_config_fini(sessiond_config);
-       return 0;
-}
-
-static int launch_run_as_worker(const char *procname)
-{
-       /*
-        * Clean-up before forking the run-as worker. Any dynamically
-        * allocated memory of which the worker is not aware will
-        * be leaked as the process forks a run-as worker (and performs
-        * no exec*()). The same would apply to any opened fd.
-        */
-       return run_as_create_worker(
-                       procname, run_as_worker_post_fork_cleanup, &the_config);
-}
-
-static void sessiond_uuid_log(void)
-{
-       char uuid_str[LTTNG_UUID_STR_LEN];
-
-       lttng_uuid_to_str(the_sessiond_uuid, uuid_str);
-       DBG("Starting lttng-sessiond {%s}", uuid_str);
-}
-
-/*
- * main
- */
-int main(int argc, char **argv)
-{
-       int ret = 0, retval = 0;
-       const char *env_app_timeout;
-       struct lttng_pipe *ust32_channel_monitor_pipe = NULL,
-                       *ust64_channel_monitor_pipe = NULL,
-                       *kernel_channel_monitor_pipe = NULL;
-       struct lttng_thread *ht_cleanup_thread = NULL;
-       struct timer_thread_parameters timer_thread_parameters;
-       /* Rotation thread handle. */
-       struct rotation_thread_handle *rotation_thread_handle = NULL;
-       /* Queue of rotation jobs populated by the sessiond-timer. */
-       struct rotation_thread_timer_queue *rotation_timer_queue = NULL;
-       struct lttng_thread *client_thread = NULL;
-       struct lttng_thread *notification_thread = NULL;
-       struct lttng_thread *register_apps_thread = NULL;
-       enum event_notifier_error_accounting_status event_notifier_error_accounting_status;
-
-       logger_set_thread_name("Main", false);
-       init_kernel_workarounds();
-
-       rcu_register_thread();
-
-       if (set_signal_handler()) {
-               retval = -1;
-               goto exit_set_signal_handler;
-       }
-
-       if (timer_signal_init()) {
-               retval = -1;
-               goto exit_set_signal_handler;
-       }
-
-       the_page_size = sysconf(_SC_PAGE_SIZE);
-       if (the_page_size < 0) {
-               PERROR("sysconf _SC_PAGE_SIZE");
-               the_page_size = LONG_MAX;
-               WARN("Fallback page size to %ld", the_page_size);
-       }
-
-       ret = sessiond_config_init(&the_config);
-       if (ret) {
-               retval = -1;
-               goto exit_set_signal_handler;
-       }
-
-       /*
-        * Init config from environment variables.
-        * Command line option override env configuration per-doc. Do env first.
-        */
-       sessiond_config_apply_env_config(&the_config);
-
-       /*
-        * Parse arguments and load the daemon configuration file.
-        *
-        * We have an exit_options exit path to free memory reserved by
-        * set_options. This is needed because the rest of sessiond_cleanup()
-        * depends on ht_cleanup_thread, which depends on lttng_daemonize, which
-        * depends on set_options.
-        */
-       progname = argv[0];
-       if (set_options(argc, argv)) {
-               retval = -1;
-               goto exit_options;
-       }
-
-       /*
-        * Resolve all paths received as arguments, configuration option, or
-        * through environment variable as absolute paths. This is necessary
-        * since daemonizing causes the sessiond's current working directory
-        * to '/'.
-        */
-       ret = sessiond_config_resolve_paths(&the_config);
-       if (ret) {
-               goto exit_options;
-       }
-
-       /* Apply config. */
-       lttng_opt_verbose = the_config.verbose;
-       lttng_opt_quiet = the_config.quiet;
-       the_kconsumer_data.err_unix_sock_path =
-                       the_config.kconsumerd_err_unix_sock_path.value;
-       the_kconsumer_data.cmd_unix_sock_path =
-                       the_config.kconsumerd_cmd_unix_sock_path.value;
-       the_ustconsumer32_data.err_unix_sock_path =
-                       the_config.consumerd32_err_unix_sock_path.value;
-       the_ustconsumer32_data.cmd_unix_sock_path =
-                       the_config.consumerd32_cmd_unix_sock_path.value;
-       the_ustconsumer64_data.err_unix_sock_path =
-                       the_config.consumerd64_err_unix_sock_path.value;
-       the_ustconsumer64_data.cmd_unix_sock_path =
-                       the_config.consumerd64_cmd_unix_sock_path.value;
-       set_clock_plugin_env();
-
-       sessiond_config_log(&the_config);
-       sessiond_uuid_log();
-
-       if (opt_print_version) {
-               print_version();
-               retval = 0;
-               goto exit_options;
-       }
-
-       if (create_lttng_rundir()) {
-               retval = -1;
-               goto exit_options;
-       }
-
-       /* Abort launch if a session daemon is already running. */
-       if (check_existing_daemon()) {
-               ERR("A session daemon is already running.");
-               retval = -1;
-               goto exit_options;
-       }
-
-       /* Daemonize */
-       if (the_config.daemonize || the_config.background) {
-               int i;
-
-               ret = lttng_daemonize(&the_child_ppid, &recv_child_signal,
-                               !the_config.background);
-               if (ret < 0) {
-                       retval = -1;
-                       goto exit_options;
-               }
-
-               /*
-                * We are in the child. Make sure all other file descriptors are
-                * closed, in case we are called with more opened file
-                * descriptors than the standard ones and the lock file.
-                */
-               for (i = 3; i < sysconf(_SC_OPEN_MAX); i++) {
-                       if (i == lockfile_fd) {
-                               continue;
-                       }
-                       (void) close(i);
-               }
-       }
-
-       if (launch_run_as_worker(argv[0]) < 0) {
-               goto exit_create_run_as_worker_cleanup;
-       }
-
-       /*
-        * Starting from here, we can create threads. This needs to be after
-        * lttng_daemonize due to RCU.
-        */
-
-       /*
-        * Initialize the health check subsystem. This call should set the
-        * appropriate time values.
-        */
-       the_health_sessiond = health_app_create(NR_HEALTH_SESSIOND_TYPES);
-       if (!the_health_sessiond) {
-               PERROR("health_app_create error");
-               retval = -1;
-               goto stop_threads;
-       }
-
-       /* Create thread to clean up RCU hash tables */
-       ht_cleanup_thread = launch_ht_cleanup_thread();
-       if (!ht_cleanup_thread) {
-               retval = -1;
-               goto stop_threads;
-       }
-
-       /* Create thread quit pipe */
-       if (sessiond_init_thread_quit_pipe()) {
-               retval = -1;
-               goto stop_threads;
-       }
-
-       /* Check if daemon is UID = 0 */
-       is_root = !getuid();
-       if (is_root) {
-               /* Create global run dir with root access */
-
-               kernel_channel_monitor_pipe = lttng_pipe_open(0);
-               if (!kernel_channel_monitor_pipe) {
-                       ERR("Failed to create kernel consumer channel monitor pipe");
-                       retval = -1;
-                       goto stop_threads;
-               }
-               the_kconsumer_data.channel_monitor_pipe =
-                               lttng_pipe_release_writefd(
-                                               kernel_channel_monitor_pipe);
-               if (the_kconsumer_data.channel_monitor_pipe < 0) {
-                       retval = -1;
-                       goto stop_threads;
-               }
-       }
-
-       /* Set consumer initial state */
-       the_kernel_consumerd_state = CONSUMER_STOPPED;
-       the_ust_consumerd_state = CONSUMER_STOPPED;
-
-       ust32_channel_monitor_pipe = lttng_pipe_open(0);
-       if (!ust32_channel_monitor_pipe) {
-               ERR("Failed to create 32-bit user space consumer channel monitor pipe");
-               retval = -1;
-               goto stop_threads;
-       }
-       the_ustconsumer32_data.channel_monitor_pipe =
-                       lttng_pipe_release_writefd(ust32_channel_monitor_pipe);
-       if (the_ustconsumer32_data.channel_monitor_pipe < 0) {
-               retval = -1;
-               goto stop_threads;
-       }
-
-       /*
-        * The rotation_thread_timer_queue structure is shared between the
-        * sessiond timer thread and the rotation thread. The main thread keeps
-        * its ownership and destroys it when both threads have been joined.
-        */
-       rotation_timer_queue = rotation_thread_timer_queue_create();
-       if (!rotation_timer_queue) {
-               retval = -1;
-               goto stop_threads;
-       }
-       timer_thread_parameters.rotation_thread_job_queue =
-                       rotation_timer_queue;
-
-       ust64_channel_monitor_pipe = lttng_pipe_open(0);
-       if (!ust64_channel_monitor_pipe) {
-               ERR("Failed to create 64-bit user space consumer channel monitor pipe");
-               retval = -1;
-               goto stop_threads;
-       }
-       the_ustconsumer64_data.channel_monitor_pipe =
-                       lttng_pipe_release_writefd(ust64_channel_monitor_pipe);
-       if (the_ustconsumer64_data.channel_monitor_pipe < 0) {
-               retval = -1;
-               goto stop_threads;
-       }
-
-       /*
-        * Init UST app hash table. Alloc hash table before this point since
-        * cleanup() can get called after that point.
-        */
-       if (ust_app_ht_alloc()) {
-               ERR("Failed to allocate UST app hash table");
-               retval = -1;
-               goto stop_threads;
-       }
-
-       event_notifier_error_accounting_status = event_notifier_error_accounting_init(
-                       the_config.event_notifier_buffer_size_kernel,
-                       the_config.event_notifier_buffer_size_userspace);
-       if (event_notifier_error_accounting_status != EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_OK) {
-               ERR("Failed to initialize event notifier error accounting system");
-               retval = -1;
-               goto stop_threads;
-       }
-
-       /*
-        * Initialize agent app hash table. We allocate the hash table here
-        * since cleanup() can get called after this point.
-        */
-       if (agent_app_ht_alloc()) {
-               ERR("Failed to allocate Agent app hash table");
-               retval = -1;
-               goto stop_threads;
-       }
-
-       if (agent_by_event_notifier_domain_ht_create()) {
-               ERR("Failed to allocate per-event notifier domain agent hash table");
-               retval = -1;
-               goto stop_threads;
-       }
-       /*
-        * These actions must be executed as root. We do that *after* setting up
-        * the sockets path because we MUST make the check for another daemon using
-        * those paths *before* trying to set the kernel consumer sockets and init
-        * kernel tracer.
-        */
-       if (is_root) {
-               if (set_consumer_sockets(&the_kconsumer_data)) {
-                       retval = -1;
-                       goto stop_threads;
-               }
-
-               /* Setup kernel tracer */
-               if (!the_config.no_kernel) {
-                       init_kernel_tracer();
-               }
-
-               /* Set ulimit for open files */
-               set_ulimit();
-       }
-       /* init lttng_fd tracking must be done after set_ulimit. */
-       lttng_fd_init();
-
-       if (set_consumer_sockets(&the_ustconsumer64_data)) {
-               retval = -1;
-               goto stop_threads;
-       }
-
-       if (set_consumer_sockets(&the_ustconsumer32_data)) {
-               retval = -1;
-               goto stop_threads;
-       }
-
-       /* Get parent pid if -S, --sig-parent is specified. */
-       if (the_config.sig_parent) {
-               the_ppid = getppid();
-       }
-
-       /* Setup the kernel pipe for waking up the kernel thread */
-       if (is_root && !the_config.no_kernel) {
-               if (utils_create_pipe_cloexec(the_kernel_poll_pipe)) {
-                       retval = -1;
-                       goto stop_threads;
-               }
-       }
-
-       /* Setup the thread apps communication pipe. */
-       if (utils_create_pipe_cloexec(apps_cmd_pipe)) {
-               retval = -1;
-               goto stop_threads;
-       }
-
-       /* Setup the thread apps notify communication pipe. */
-       if (utils_create_pipe_cloexec(apps_cmd_notify_pipe)) {
-               retval = -1;
-               goto stop_threads;
-       }
-
-       /* Initialize global buffer per UID and PID registry. */
-       buffer_reg_init_uid_registry();
-       buffer_reg_init_pid_registry();
-
-       /* Init UST command queue. */
-       cds_wfcq_init(&ust_cmd_queue.head, &ust_cmd_queue.tail);
-
-       cmd_init();
-
-       /* Check for the application socket timeout env variable. */
-       env_app_timeout = getenv(DEFAULT_APP_SOCKET_TIMEOUT_ENV);
-       if (env_app_timeout) {
-               the_config.app_socket_timeout = atoi(env_app_timeout);
-       } else {
-               the_config.app_socket_timeout = DEFAULT_APP_SOCKET_RW_TIMEOUT;
-       }
-
-       ret = write_pidfile();
-       if (ret) {
-               ERR("Error in write_pidfile");
-               retval = -1;
-               goto stop_threads;
-       }
-
-       /* Initialize communication library */
-       lttcomm_init();
-       /* Initialize TCP timeout values */
-       lttcomm_inet_init();
-
-       /* Create health-check thread. */
-       if (!launch_health_management_thread()) {
-               retval = -1;
-               goto stop_threads;
-       }
-
-       /* notification_thread_data acquires the pipes' read side. */
-       the_notification_thread_handle = notification_thread_handle_create(
-                       ust32_channel_monitor_pipe, ust64_channel_monitor_pipe,
-                       kernel_channel_monitor_pipe);
-       if (!the_notification_thread_handle) {
-               retval = -1;
-               ERR("Failed to create notification thread shared data");
-               goto stop_threads;
-       }
-
-       /* Create notification thread. */
-       notification_thread = launch_notification_thread(
-                       the_notification_thread_handle);
-       if (!notification_thread) {
-               retval = -1;
-               goto stop_threads;
-       }
-
-       /* Create timer thread. */
-       if (!launch_timer_thread(&timer_thread_parameters)) {
-               retval = -1;
-               goto stop_threads;
-       }
-
-       /* rotation_thread_data acquires the pipes' read side. */
-       rotation_thread_handle = rotation_thread_handle_create(
-                       rotation_timer_queue, the_notification_thread_handle);
-       if (!rotation_thread_handle) {
-               retval = -1;
-               ERR("Failed to create rotation thread shared data");
-               stop_threads();
-               goto stop_threads;
-       }
-
-       /* Create rotation thread. */
-       if (!launch_rotation_thread(rotation_thread_handle)) {
-               retval = -1;
-               goto stop_threads;
-       }
-
-       /* Create thread to manage the client socket */
-       client_thread = launch_client_thread();
-       if (!client_thread) {
-               retval = -1;
-               goto stop_threads;
-       }
-
-       /* Set credentials of the client socket and rundir */
-       if (is_root && set_permissions(the_config.rundir.value)) {
-               retval = -1;
-               goto stop_threads;
-       }
-
-       if (!launch_ust_dispatch_thread(&ust_cmd_queue, apps_cmd_pipe[1],
-                       apps_cmd_notify_pipe[1])) {
-               retval = -1;
-               goto stop_threads;
-       }
-
-       /* Create thread to manage application registration. */
-       register_apps_thread = launch_application_registration_thread(
-                       &ust_cmd_queue);
-       if (!register_apps_thread) {
-               retval = -1;
-               goto stop_threads;
-       }
-
-       /* Create thread to manage application socket */
-       if (!launch_application_management_thread(apps_cmd_pipe[0])) {
-               retval = -1;
-               goto stop_threads;
-       }
-
-       /* Create thread to manage application notify socket */
-       if (!launch_application_notification_thread(apps_cmd_notify_pipe[0])) {
-               retval = -1;
-               goto stop_threads;
-       }
-
-       /* Create agent management thread. */
-       if (!launch_agent_management_thread()) {
-               retval = -1;
-               goto stop_threads;
-       }
-
-       /* Don't start this thread if kernel tracing is not requested nor root */
-       if (is_root && !the_config.no_kernel) {
-               /* Create kernel thread to manage kernel event */
-               if (!launch_kernel_management_thread(the_kernel_poll_pipe[0])) {
-                       retval = -1;
-                       goto stop_threads;
-               }
-
-               if (kernel_get_notification_fd() >= 0) {
-                       ret = notification_thread_command_add_tracer_event_source(
-                                       the_notification_thread_handle,
-                                       kernel_get_notification_fd(),
-                                       LTTNG_DOMAIN_KERNEL);
-                       if (ret != LTTNG_OK) {
-                               ERR("Failed to add kernel trigger event source to notification thread");
-                               retval = -1;
-                               goto stop_threads;
-                       }
-               }
-       }
-
-       /* Load sessions. */
-       ret = config_load_session(
-                       the_config.load_session_path.value, NULL, 1, 1, NULL);
-       if (ret) {
-               ERR("Session load failed: %s", error_get_str(ret));
-               retval = -1;
-               goto stop_threads;
-       }
-
-       /* Initialization completed. */
-       sessiond_signal_parents();
-
-       /*
-        * This is where we start awaiting program completion (e.g. through
-        * signal that asks threads to teardown).
-        */
-
-       /* Initiate teardown once activity occurs on the quit pipe. */
-       sessiond_wait_for_quit_pipe(-1);
-
-stop_threads:
-
-       /*
-        * Ensure that the client thread is no longer accepting new commands,
-        * which could cause new sessions to be created.
-        */
-       if (client_thread) {
-               lttng_thread_shutdown(client_thread);
-               lttng_thread_put(client_thread);
-       }
-
-       destroy_all_sessions_and_wait();
-
-       /*
-        * At this point no new trigger can be registered (no sessions are
-        * running/rotating) and clients can't connect to the session daemon
-        * anymore. Unregister all triggers.
-        */
-       unregister_all_triggers();
-
-       if (register_apps_thread) {
-               lttng_thread_shutdown(register_apps_thread);
-               lttng_thread_put(register_apps_thread);
-       }
-       lttng_thread_list_shutdown_orphans();
-
-       /*
-        * Wait for all pending call_rcu work to complete before tearing
-        * down data structures. call_rcu worker may be trying to
-        * perform lookups in those structures.
-        */
-       rcu_barrier();
-       /*
-        * sessiond_cleanup() is called when no other thread is running, except
-        * the ht_cleanup thread, which is needed to destroy the hash tables.
-        */
-       rcu_thread_online();
-       sessiond_cleanup();
-
-       /*
-        * Wait for all pending call_rcu work to complete before shutting down
-        * the notification thread. This call_rcu work includes shutting down
-        * UST apps and event notifier pipes.
-        */
-       rcu_barrier();
-
-       if (notification_thread) {
-               lttng_thread_shutdown(notification_thread);
-               lttng_thread_put(notification_thread);
-       }
-
-       /*
-        * Error accounting teardown has to be done after the teardown of all
-        * event notifier pipes to ensure that no tracer may try to use the
-        * error accounting facilities.
-        */
-       event_notifier_error_accounting_fini();
-
-       /*
-        * Unloading the kernel modules needs to be done after all kernel
-        * ressources have been released. In our case, this includes the
-        * notification fd, the event notifier group fd, error accounting fd,
-        * all event and event notifier fds, etc.
-        *
-        * In short, at this point, we need to have called close() on all fds
-        * received from the kernel tracer.
-        */
-       if (is_root && !the_config.no_kernel) {
-               DBG("Unloading kernel modules");
-               modprobe_remove_lttng_all();
-       }
-
-       /*
-        * Ensure all prior call_rcu are done. call_rcu callbacks may push
-        * hash tables to the ht_cleanup thread. Therefore, we ensure that
-        * the queue is empty before shutting down the clean-up thread.
-        */
-       rcu_barrier();
-
-       if (ht_cleanup_thread) {
-               lttng_thread_shutdown(ht_cleanup_thread);
-               lttng_thread_put(ht_cleanup_thread);
-       }
-
-       rcu_thread_offline();
-       rcu_unregister_thread();
-
-       if (rotation_thread_handle) {
-               rotation_thread_handle_destroy(rotation_thread_handle);
-       }
-
-       /*
-        * After the rotation and timer thread have quit, we can safely destroy
-        * the rotation_timer_queue.
-        */
-       rotation_thread_timer_queue_destroy(rotation_timer_queue);
-       /*
-        * The teardown of the notification system is performed after the
-        * session daemon's teardown in order to allow it to be notified
-        * of the active session and channels at the moment of the teardown.
-        */
-       if (the_notification_thread_handle) {
-               notification_thread_handle_destroy(
-                               the_notification_thread_handle);
-       }
-       lttng_pipe_destroy(ust32_channel_monitor_pipe);
-       lttng_pipe_destroy(ust64_channel_monitor_pipe);
-       lttng_pipe_destroy(kernel_channel_monitor_pipe);
-
-       if (the_health_sessiond) {
-               health_app_destroy(the_health_sessiond);
-       }
-exit_create_run_as_worker_cleanup:
-exit_options:
-       sessiond_cleanup_lock_file();
-       sessiond_cleanup_options();
-
-exit_set_signal_handler:
-       if (!retval) {
-               exit(EXIT_SUCCESS);
-       } else {
-               exit(EXIT_FAILURE);
-       }
-}
diff --git a/src/bin/lttng-sessiond/main.cpp b/src/bin/lttng-sessiond/main.cpp
new file mode 100644 (file)
index 0000000..e5e7188
--- /dev/null
@@ -0,0 +1,2053 @@
+/*
+ * Copyright (C) 2011 David Goulet <david.goulet@polymtl.ca>
+ * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ * Copyright (C) 2013 Jérémie Galarneau <jeremie.galarneau@efficios.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0-only
+ *
+ */
+
+#define _LGPL_SOURCE
+#include <getopt.h>
+#include <grp.h>
+#include <limits.h>
+#include <paths.h>
+#include <pthread.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <inttypes.h>
+#include <sys/mman.h>
+#include <sys/mount.h>
+#include <sys/resource.h>
+#include <sys/socket.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <urcu/uatomic.h>
+#include <unistd.h>
+#include <ctype.h>
+
+#include <common/common.h>
+#include <common/compat/socket.h>
+#include <common/compat/getenv.h>
+#include <common/defaults.h>
+#include <common/kernel-consumer/kernel-consumer.h>
+#include <common/futex.h>
+#include <common/relayd/relayd.h>
+#include <common/utils.h>
+#include <common/daemonize.h>
+#include <common/config/session-config.h>
+#include <common/dynamic-buffer.h>
+#include <lttng/event-internal.h>
+
+#include "lttng-sessiond.h"
+#include "buffer-registry.h"
+#include "channel.h"
+#include "cmd.h"
+#include "consumer.h"
+#include "context.h"
+#include "event.h"
+#include "event-notifier-error-accounting.h"
+#include "kernel.h"
+#include "kernel-consumer.h"
+#include "lttng-ust-ctl.h"
+#include "ust-consumer.h"
+#include "utils.h"
+#include "fd-limit.h"
+#include "health-sessiond.h"
+#include "testpoint.h"
+#include "notify-apps.h"
+#include "agent-thread.h"
+#include "save.h"
+#include "notification-thread.h"
+#include "notification-thread-commands.h"
+#include "rotation-thread.h"
+#include "agent.h"
+#include "ht-cleanup.h"
+#include "sessiond-config.h"
+#include "timer.h"
+#include "thread.h"
+#include "client.h"
+#include "dispatch.h"
+#include "register.h"
+#include "manage-apps.h"
+#include "manage-kernel.h"
+#include "modprobe.h"
+#include "ust-sigbus.h"
+
+static const char *help_msg =
+#ifdef LTTNG_EMBED_HELP
+#include <lttng-sessiond.8.h>
+#else
+NULL
+#endif
+;
+
+#define EVENT_NOTIFIER_ERROR_COUNTER_NUMBER_OF_BUCKET_MAX 65535
+#define EVENT_NOTIFIER_ERROR_BUFFER_SIZE_BASE_OPTION_STR \
+               "event-notifier-error-buffer-size"
+#define EVENT_NOTIFIER_ERROR_BUFFER_SIZE_KERNEL_OPTION_STR \
+               EVENT_NOTIFIER_ERROR_BUFFER_SIZE_BASE_OPTION_STR "-kernel"
+#define EVENT_NOTIFIER_ERROR_BUFFER_SIZE_USERSPACE_OPTION_STR \
+               EVENT_NOTIFIER_ERROR_BUFFER_SIZE_BASE_OPTION_STR "-userspace"
+
+
+const char *progname;
+static int lockfile_fd = -1;
+static int opt_print_version;
+
+/* Set to 1 when a SIGUSR1 signal is received. */
+static int recv_child_signal;
+
+/* Command line options */
+static const struct option long_options[] = {
+       { "client-sock", required_argument, 0, 'c' },
+       { "apps-sock", required_argument, 0, 'a' },
+       { "kconsumerd-cmd-sock", required_argument, 0, '\0' },
+       { "kconsumerd-err-sock", required_argument, 0, '\0' },
+       { "ustconsumerd32-cmd-sock", required_argument, 0, '\0' },
+       { "ustconsumerd32-err-sock", required_argument, 0, '\0' },
+       { "ustconsumerd64-cmd-sock", required_argument, 0, '\0' },
+       { "ustconsumerd64-err-sock", required_argument, 0, '\0' },
+       { "consumerd32-path", required_argument, 0, '\0' },
+       { "consumerd32-libdir", required_argument, 0, '\0' },
+       { "consumerd64-path", required_argument, 0, '\0' },
+       { "consumerd64-libdir", required_argument, 0, '\0' },
+       { "daemonize", no_argument, 0, 'd' },
+       { "background", no_argument, 0, 'b' },
+       { "sig-parent", no_argument, 0, 'S' },
+       { "help", no_argument, 0, 'h' },
+       { "group", required_argument, 0, 'g' },
+       { "version", no_argument, 0, 'V' },
+       { "quiet", no_argument, 0, 'q' },
+       { "verbose", no_argument, 0, 'v' },
+       { "verbose-consumer", no_argument, 0, '\0' },
+       { "no-kernel", no_argument, 0, '\0' },
+       { "pidfile", required_argument, 0, 'p' },
+       { "agent-tcp-port", required_argument, 0, '\0' },
+       { "config", required_argument, 0, 'f' },
+       { "load", required_argument, 0, 'l' },
+       { "kmod-probes", required_argument, 0, '\0' },
+       { "extra-kmod-probes", required_argument, 0, '\0' },
+       { EVENT_NOTIFIER_ERROR_BUFFER_SIZE_KERNEL_OPTION_STR, required_argument, 0, '\0' },
+       { EVENT_NOTIFIER_ERROR_BUFFER_SIZE_USERSPACE_OPTION_STR, required_argument, 0, '\0' },
+       { NULL, 0, 0, 0 }
+};
+
+/* Command line options to ignore from configuration file */
+static const char *config_ignore_options[] = { "help", "version", "config" };
+
+/*
+ * This pipe is used to inform the thread managing application communication
+ * that a command is queued and ready to be processed.
+ */
+static int apps_cmd_pipe[2] = { -1, -1 };
+static int apps_cmd_notify_pipe[2] = { -1, -1 };
+
+/*
+ * UST registration command queue. This queue is tied with a futex and uses a N
+ * wakers / 1 waiter implemented and detailed in futex.c/.h
+ *
+ * The thread_registration_apps and thread_dispatch_ust_registration uses this
+ * queue along with the wait/wake scheme. The thread_manage_apps receives down
+ * the line new application socket and monitors it for any I/O error or clean
+ * close that triggers an unregistration of the application.
+ */
+static struct ust_cmd_queue ust_cmd_queue;
+
+/*
+ * Section name to look for in the daemon configuration file.
+ */
+static const char * const config_section_name = "sessiond";
+
+/* Am I root or not. Set to 1 if the daemon is running as root */
+static int is_root;
+
+/*
+ * Stop all threads by closing the thread quit pipe.
+ */
+static void stop_threads(void)
+{
+       int ret;
+
+       /* Stopping all threads */
+       DBG("Terminating all threads");
+       ret = sessiond_notify_quit_pipe();
+       if (ret < 0) {
+               ERR("write error on thread quit pipe");
+       }
+}
+
+/*
+ * Close every consumer sockets.
+ */
+static void close_consumer_sockets(void)
+{
+       int ret;
+
+       if (the_kconsumer_data.err_sock >= 0) {
+               ret = close(the_kconsumer_data.err_sock);
+               if (ret < 0) {
+                       PERROR("kernel consumer err_sock close");
+               }
+       }
+       if (the_ustconsumer32_data.err_sock >= 0) {
+               ret = close(the_ustconsumer32_data.err_sock);
+               if (ret < 0) {
+                       PERROR("UST consumerd32 err_sock close");
+               }
+       }
+       if (the_ustconsumer64_data.err_sock >= 0) {
+               ret = close(the_ustconsumer64_data.err_sock);
+               if (ret < 0) {
+                       PERROR("UST consumerd64 err_sock close");
+               }
+       }
+       if (the_kconsumer_data.cmd_sock >= 0) {
+               ret = close(the_kconsumer_data.cmd_sock);
+               if (ret < 0) {
+                       PERROR("kernel consumer cmd_sock close");
+               }
+       }
+       if (the_ustconsumer32_data.cmd_sock >= 0) {
+               ret = close(the_ustconsumer32_data.cmd_sock);
+               if (ret < 0) {
+                       PERROR("UST consumerd32 cmd_sock close");
+               }
+       }
+       if (the_ustconsumer64_data.cmd_sock >= 0) {
+               ret = close(the_ustconsumer64_data.cmd_sock);
+               if (ret < 0) {
+                       PERROR("UST consumerd64 cmd_sock close");
+               }
+       }
+       if (the_kconsumer_data.channel_monitor_pipe >= 0) {
+               ret = close(the_kconsumer_data.channel_monitor_pipe);
+               if (ret < 0) {
+                       PERROR("kernel consumer channel monitor pipe close");
+               }
+       }
+       if (the_ustconsumer32_data.channel_monitor_pipe >= 0) {
+               ret = close(the_ustconsumer32_data.channel_monitor_pipe);
+               if (ret < 0) {
+                       PERROR("UST consumerd32 channel monitor pipe close");
+               }
+       }
+       if (the_ustconsumer64_data.channel_monitor_pipe >= 0) {
+               ret = close(the_ustconsumer64_data.channel_monitor_pipe);
+               if (ret < 0) {
+                       PERROR("UST consumerd64 channel monitor pipe close");
+               }
+       }
+}
+
+/*
+ * Wait on consumer process termination.
+ *
+ * Need to be called with the consumer data lock held or from a context
+ * ensuring no concurrent access to data (e.g: cleanup).
+ */
+static void wait_consumer(struct consumer_data *consumer_data)
+{
+       pid_t ret;
+       int status;
+
+       if (consumer_data->pid <= 0) {
+               return;
+       }
+
+       DBG("Waiting for complete teardown of consumerd (PID: %d)",
+                       consumer_data->pid);
+       ret = waitpid(consumer_data->pid, &status, 0);
+       if (ret == -1) {
+               PERROR("consumerd waitpid pid: %d", consumer_data->pid)
+       } else  if (!WIFEXITED(status)) {
+               ERR("consumerd termination with error: %d",
+                               WEXITSTATUS(ret));
+       }
+       consumer_data->pid = 0;
+}
+
+/*
+ * Cleanup the session daemon's data structures.
+ */
+static void sessiond_cleanup(void)
+{
+       int ret;
+       struct ltt_session_list *session_list = session_get_list();
+
+       DBG("Cleanup sessiond");
+
+       /*
+        * Close the thread quit pipe. It has already done its job,
+        * since we are now called.
+        */
+       sessiond_close_quit_pipe();
+       utils_close_pipe(apps_cmd_pipe);
+       utils_close_pipe(apps_cmd_notify_pipe);
+       utils_close_pipe(the_kernel_poll_pipe);
+
+       ret = remove(the_config.pid_file_path.value);
+       if (ret < 0) {
+               PERROR("remove pidfile %s", the_config.pid_file_path.value);
+       }
+
+       DBG("Removing sessiond and consumerd content of directory %s",
+                       the_config.rundir.value);
+
+       /* sessiond */
+       DBG("Removing %s", the_config.pid_file_path.value);
+       (void) unlink(the_config.pid_file_path.value);
+
+       DBG("Removing %s", the_config.agent_port_file_path.value);
+       (void) unlink(the_config.agent_port_file_path.value);
+
+       /* kconsumerd */
+       DBG("Removing %s", the_kconsumer_data.err_unix_sock_path);
+       (void) unlink(the_kconsumer_data.err_unix_sock_path);
+
+       DBG("Removing directory %s", the_config.kconsumerd_path.value);
+       (void) rmdir(the_config.kconsumerd_path.value);
+
+       /* ust consumerd 32 */
+       DBG("Removing %s", the_config.consumerd32_err_unix_sock_path.value);
+       (void) unlink(the_config.consumerd32_err_unix_sock_path.value);
+
+       DBG("Removing directory %s", the_config.consumerd32_path.value);
+       (void) rmdir(the_config.consumerd32_path.value);
+
+       /* ust consumerd 64 */
+       DBG("Removing %s", the_config.consumerd64_err_unix_sock_path.value);
+       (void) unlink(the_config.consumerd64_err_unix_sock_path.value);
+
+       DBG("Removing directory %s", the_config.consumerd64_path.value);
+       (void) rmdir(the_config.consumerd64_path.value);
+
+       pthread_mutex_destroy(&session_list->lock);
+
+       DBG("Cleaning up all per-event notifier domain agents");
+       agent_by_event_notifier_domain_ht_destroy();
+
+       DBG("Cleaning up all agent apps");
+       agent_app_ht_clean();
+       DBG("Closing all UST sockets");
+       ust_app_clean_list();
+       buffer_reg_destroy_registries();
+
+       close_consumer_sockets();
+
+       wait_consumer(&the_kconsumer_data);
+       wait_consumer(&the_ustconsumer64_data);
+       wait_consumer(&the_ustconsumer32_data);
+
+       if (is_root && !the_config.no_kernel) {
+               cleanup_kernel_tracer();
+       }
+
+       /*
+        * We do NOT rmdir rundir because there are other processes
+        * using it, for instance lttng-relayd, which can start in
+        * parallel with this teardown.
+        */
+}
+
+/*
+ * Cleanup the daemon's option data structures.
+ */
+static void sessiond_cleanup_options(void)
+{
+       DBG("Cleaning up options");
+
+       sessiond_config_fini(&the_config);
+
+       run_as_destroy_worker();
+}
+
+static int string_match(const char *str1, const char *str2)
+{
+       return (str1 && str2) && !strcmp(str1, str2);
+}
+
+/*
+ * Take an option from the getopt output and set it in the right variable to be
+ * used later.
+ *
+ * Return 0 on success else a negative value.
+ */
+static int set_option(int opt, const char *arg, const char *optname)
+{
+       int ret = 0;
+
+       if (string_match(optname, "client-sock") || opt == 'c') {
+               if (!arg || *arg == '\0') {
+                       ret = -EINVAL;
+                       goto end;
+               }
+               if (lttng_is_setuid_setgid()) {
+                       WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
+                               "-c, --client-sock");
+               } else {
+                       config_string_set(&the_config.client_unix_sock_path,
+                                       strdup(arg));
+                       if (!the_config.client_unix_sock_path.value) {
+                               ret = -ENOMEM;
+                               PERROR("strdup");
+                       }
+               }
+       } else if (string_match(optname, "apps-sock") || opt == 'a') {
+               if (!arg || *arg == '\0') {
+                       ret = -EINVAL;
+                       goto end;
+               }
+               if (lttng_is_setuid_setgid()) {
+                       WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
+                               "-a, --apps-sock");
+               } else {
+                       config_string_set(&the_config.apps_unix_sock_path,
+                                       strdup(arg));
+                       if (!the_config.apps_unix_sock_path.value) {
+                               ret = -ENOMEM;
+                               PERROR("strdup");
+                       }
+               }
+       } else if (string_match(optname, "daemonize") || opt == 'd') {
+               the_config.daemonize = true;
+       } else if (string_match(optname, "background") || opt == 'b') {
+               the_config.background = true;
+       } else if (string_match(optname, "group") || opt == 'g') {
+               if (!arg || *arg == '\0') {
+                       ret = -EINVAL;
+                       goto end;
+               }
+               if (lttng_is_setuid_setgid()) {
+                       WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
+                               "-g, --group");
+               } else {
+                       config_string_set(&the_config.tracing_group_name,
+                                       strdup(arg));
+                       if (!the_config.tracing_group_name.value) {
+                               ret = -ENOMEM;
+                               PERROR("strdup");
+                       }
+               }
+       } else if (string_match(optname, "help") || opt == 'h') {
+               ret = utils_show_help(8, "lttng-sessiond", help_msg);
+               if (ret) {
+                       ERR("Cannot show --help for `lttng-sessiond`");
+                       perror("exec");
+               }
+               exit(ret ? EXIT_FAILURE : EXIT_SUCCESS);
+       } else if (string_match(optname, "version") || opt == 'V') {
+               opt_print_version = 1;
+       } else if (string_match(optname, "sig-parent") || opt == 'S') {
+               the_config.sig_parent = true;
+       } else if (string_match(optname, "kconsumerd-err-sock")) {
+               if (!arg || *arg == '\0') {
+                       ret = -EINVAL;
+                       goto end;
+               }
+               if (lttng_is_setuid_setgid()) {
+                       WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
+                               "--kconsumerd-err-sock");
+               } else {
+                       config_string_set(
+                                       &the_config.kconsumerd_err_unix_sock_path,
+                                       strdup(arg));
+                       if (!the_config.kconsumerd_err_unix_sock_path.value) {
+                               ret = -ENOMEM;
+                               PERROR("strdup");
+                       }
+               }
+       } else if (string_match(optname, "kconsumerd-cmd-sock")) {
+               if (!arg || *arg == '\0') {
+                       ret = -EINVAL;
+                       goto end;
+               }
+               if (lttng_is_setuid_setgid()) {
+                       WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
+                               "--kconsumerd-cmd-sock");
+               } else {
+                       config_string_set(
+                                       &the_config.kconsumerd_cmd_unix_sock_path,
+                                       strdup(arg));
+                       if (!the_config.kconsumerd_cmd_unix_sock_path.value) {
+                               ret = -ENOMEM;
+                               PERROR("strdup");
+                       }
+               }
+       } else if (string_match(optname, "ustconsumerd64-err-sock")) {
+               if (!arg || *arg == '\0') {
+                       ret = -EINVAL;
+                       goto end;
+               }
+               if (lttng_is_setuid_setgid()) {
+                       WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
+                               "--ustconsumerd64-err-sock");
+               } else {
+                       config_string_set(
+                                       &the_config.consumerd64_err_unix_sock_path,
+                                       strdup(arg));
+                       if (!the_config.consumerd64_err_unix_sock_path.value) {
+                               ret = -ENOMEM;
+                               PERROR("strdup");
+                       }
+               }
+       } else if (string_match(optname, "ustconsumerd64-cmd-sock")) {
+               if (!arg || *arg == '\0') {
+                       ret = -EINVAL;
+                       goto end;
+               }
+               if (lttng_is_setuid_setgid()) {
+                       WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
+                               "--ustconsumerd64-cmd-sock");
+               } else {
+                       config_string_set(
+                                       &the_config.consumerd64_cmd_unix_sock_path,
+                                       strdup(arg));
+                       if (!the_config.consumerd64_cmd_unix_sock_path.value) {
+                               ret = -ENOMEM;
+                               PERROR("strdup");
+                       }
+               }
+       } else if (string_match(optname, "ustconsumerd32-err-sock")) {
+               if (!arg || *arg == '\0') {
+                       ret = -EINVAL;
+                       goto end;
+               }
+               if (lttng_is_setuid_setgid()) {
+                       WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
+                               "--ustconsumerd32-err-sock");
+               } else {
+                       config_string_set(
+                                       &the_config.consumerd32_err_unix_sock_path,
+                                       strdup(arg));
+                       if (!the_config.consumerd32_err_unix_sock_path.value) {
+                               ret = -ENOMEM;
+                               PERROR("strdup");
+                       }
+               }
+       } else if (string_match(optname, "ustconsumerd32-cmd-sock")) {
+               if (!arg || *arg == '\0') {
+                       ret = -EINVAL;
+                       goto end;
+               }
+               if (lttng_is_setuid_setgid()) {
+                       WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
+                               "--ustconsumerd32-cmd-sock");
+               } else {
+                       config_string_set(
+                                       &the_config.consumerd32_cmd_unix_sock_path,
+                                       strdup(arg));
+                       if (!the_config.consumerd32_cmd_unix_sock_path.value) {
+                               ret = -ENOMEM;
+                               PERROR("strdup");
+                       }
+               }
+       } else if (string_match(optname, "no-kernel")) {
+               the_config.no_kernel = true;
+       } else if (string_match(optname, "quiet") || opt == 'q') {
+               the_config.quiet = true;
+       } else if (string_match(optname, "verbose") || opt == 'v') {
+               /* Verbose level can increase using multiple -v */
+               if (arg) {
+                       /* Value obtained from config file */
+                       the_config.verbose = config_parse_value(arg);
+               } else {
+                       /* -v used on command line */
+                       the_config.verbose++;
+               }
+               /* Clamp value to [0, 3] */
+               the_config.verbose = the_config.verbose < 0 ?
+                                     0 :
+                                     (the_config.verbose <= 3 ? the_config.verbose :
+                                                                3);
+       } else if (string_match(optname, "verbose-consumer")) {
+               if (arg) {
+                       the_config.verbose_consumer = config_parse_value(arg);
+               } else {
+                       the_config.verbose_consumer++;
+               }
+       } else if (string_match(optname, "consumerd32-path")) {
+               if (!arg || *arg == '\0') {
+                       ret = -EINVAL;
+                       goto end;
+               }
+               if (lttng_is_setuid_setgid()) {
+                       WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
+                               "--consumerd32-path");
+               } else {
+                       config_string_set(&the_config.consumerd32_bin_path,
+                                       strdup(arg));
+                       if (!the_config.consumerd32_bin_path.value) {
+                               PERROR("strdup");
+                               ret = -ENOMEM;
+                       }
+               }
+       } else if (string_match(optname, "consumerd32-libdir")) {
+               if (!arg || *arg == '\0') {
+                       ret = -EINVAL;
+                       goto end;
+               }
+               if (lttng_is_setuid_setgid()) {
+                       WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
+                               "--consumerd32-libdir");
+               } else {
+                       config_string_set(&the_config.consumerd32_lib_dir,
+                                       strdup(arg));
+                       if (!the_config.consumerd32_lib_dir.value) {
+                               PERROR("strdup");
+                               ret = -ENOMEM;
+                       }
+               }
+       } else if (string_match(optname, "consumerd64-path")) {
+               if (!arg || *arg == '\0') {
+                       ret = -EINVAL;
+                       goto end;
+               }
+               if (lttng_is_setuid_setgid()) {
+                       WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
+                               "--consumerd64-path");
+               } else {
+                       config_string_set(&the_config.consumerd64_bin_path,
+                                       strdup(arg));
+                       if (!the_config.consumerd64_bin_path.value) {
+                               PERROR("strdup");
+                               ret = -ENOMEM;
+                       }
+               }
+       } else if (string_match(optname, "consumerd64-libdir")) {
+               if (!arg || *arg == '\0') {
+                       ret = -EINVAL;
+                       goto end;
+               }
+               if (lttng_is_setuid_setgid()) {
+                       WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
+                               "--consumerd64-libdir");
+               } else {
+                       config_string_set(&the_config.consumerd64_lib_dir,
+                                       strdup(arg));
+                       if (!the_config.consumerd64_lib_dir.value) {
+                               PERROR("strdup");
+                               ret = -ENOMEM;
+                       }
+               }
+       } else if (string_match(optname, "pidfile") || opt == 'p') {
+               if (!arg || *arg == '\0') {
+                       ret = -EINVAL;
+                       goto end;
+               }
+               if (lttng_is_setuid_setgid()) {
+                       WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
+                               "-p, --pidfile");
+               } else {
+                       config_string_set(
+                                       &the_config.pid_file_path, strdup(arg));
+                       if (!the_config.pid_file_path.value) {
+                               PERROR("strdup");
+                               ret = -ENOMEM;
+                       }
+               }
+       } else if (string_match(optname, "agent-tcp-port")) {
+               if (!arg || *arg == '\0') {
+                       ret = -EINVAL;
+                       goto end;
+               }
+               if (lttng_is_setuid_setgid()) {
+                       WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
+                               "--agent-tcp-port");
+               } else {
+                       unsigned long v;
+
+                       errno = 0;
+                       v = strtoul(arg, NULL, 0);
+                       if (errno != 0 || !isdigit(arg[0])) {
+                               ERR("Wrong value in --agent-tcp-port parameter: %s", arg);
+                               return -1;
+                       }
+                       if (v == 0 || v >= 65535) {
+                               ERR("Port overflow in --agent-tcp-port parameter: %s", arg);
+                               return -1;
+                       }
+                       the_config.agent_tcp_port.begin =
+                                       the_config.agent_tcp_port.end = (int) v;
+                       DBG3("Agent TCP port set to non default: %i", (int) v);
+               }
+       } else if (string_match(optname, "load") || opt == 'l') {
+               if (!arg || *arg == '\0') {
+                       ret = -EINVAL;
+                       goto end;
+               }
+               if (lttng_is_setuid_setgid()) {
+                       WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
+                               "-l, --load");
+               } else {
+                       config_string_set(&the_config.load_session_path,
+                                       strdup(arg));
+                       if (!the_config.load_session_path.value) {
+                               PERROR("strdup");
+                               ret = -ENOMEM;
+                       }
+               }
+       } else if (string_match(optname, "kmod-probes")) {
+               if (!arg || *arg == '\0') {
+                       ret = -EINVAL;
+                       goto end;
+               }
+               if (lttng_is_setuid_setgid()) {
+                       WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
+                               "--kmod-probes");
+               } else {
+                       config_string_set(&the_config.kmod_probes_list,
+                                       strdup(arg));
+                       if (!the_config.kmod_probes_list.value) {
+                               PERROR("strdup");
+                               ret = -ENOMEM;
+                       }
+               }
+       } else if (string_match(optname, "extra-kmod-probes")) {
+               if (!arg || *arg == '\0') {
+                       ret = -EINVAL;
+                       goto end;
+               }
+               if (lttng_is_setuid_setgid()) {
+                       WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
+                               "--extra-kmod-probes");
+               } else {
+                       config_string_set(&the_config.kmod_extra_probes_list,
+                                       strdup(arg));
+                       if (!the_config.kmod_extra_probes_list.value) {
+                               PERROR("strdup");
+                               ret = -ENOMEM;
+                       }
+               }
+       } else if (string_match(optname, EVENT_NOTIFIER_ERROR_BUFFER_SIZE_KERNEL_OPTION_STR)) {
+               unsigned long v;
+
+               errno = 0;
+               v = strtoul(arg, NULL, 0);
+               if (errno != 0 || !isdigit(arg[0])) {
+                       ERR("Wrong value in --%s parameter: %s",
+                                       EVENT_NOTIFIER_ERROR_BUFFER_SIZE_KERNEL_OPTION_STR, arg);
+                       return -1;
+               }
+               if (v == 0 || v >= EVENT_NOTIFIER_ERROR_COUNTER_NUMBER_OF_BUCKET_MAX) {
+                       ERR("Value out of range for --%s parameter: %s",
+                                       EVENT_NOTIFIER_ERROR_BUFFER_SIZE_KERNEL_OPTION_STR, arg);
+                       return -1;
+               }
+               the_config.event_notifier_buffer_size_kernel = (int) v;
+               DBG3("Number of event notifier error buffer kernel size to non default: %i",
+                               the_config.event_notifier_buffer_size_kernel);
+               goto end;
+       } else if (string_match(optname, EVENT_NOTIFIER_ERROR_BUFFER_SIZE_USERSPACE_OPTION_STR)) {
+               unsigned long v;
+
+               errno = 0;
+               v = strtoul(arg, NULL, 0);
+               if (errno != 0 || !isdigit(arg[0])) {
+                       ERR("Wrong value in --%s parameter: %s",
+                                       EVENT_NOTIFIER_ERROR_BUFFER_SIZE_USERSPACE_OPTION_STR, arg);
+                       return -1;
+               }
+               if (v == 0 || v >= EVENT_NOTIFIER_ERROR_COUNTER_NUMBER_OF_BUCKET_MAX) {
+                       ERR("Value out of range for --%s parameter: %s",
+                                       EVENT_NOTIFIER_ERROR_BUFFER_SIZE_USERSPACE_OPTION_STR, arg);
+                       return -1;
+               }
+               the_config.event_notifier_buffer_size_userspace = (int) v;
+               DBG3("Number of event notifier error buffer userspace size to non default: %i",
+                               the_config.event_notifier_buffer_size_userspace);
+               goto end;
+       } else if (string_match(optname, "config") || opt == 'f') {
+               /* This is handled in set_options() thus silent skip. */
+               goto end;
+       } else {
+               /* Unknown option or other error.
+                * Error is printed by getopt, just return */
+               ret = -1;
+       }
+
+end:
+       if (ret == -EINVAL) {
+               const char *opt_name = "unknown";
+               int i;
+
+               for (i = 0; i < sizeof(long_options) / sizeof(struct option);
+                       i++) {
+                       if (opt == long_options[i].val) {
+                               opt_name = long_options[i].name;
+                               break;
+                       }
+               }
+
+               WARN("Invalid argument provided for option \"%s\", using default value.",
+                       opt_name);
+       }
+
+       return ret;
+}
+
+/*
+ * config_entry_handler_cb used to handle options read from a config file.
+ * See config_entry_handler_cb comment in common/config/session-config.h for the
+ * return value conventions.
+ */
+static int config_entry_handler(const struct config_entry *entry, void *unused)
+{
+       int ret = 0, i;
+
+       if (!entry || !entry->name || !entry->value) {
+               ret = -EINVAL;
+               goto end;
+       }
+
+       /* Check if the option is to be ignored */
+       for (i = 0; i < sizeof(config_ignore_options) / sizeof(char *); i++) {
+               if (!strcmp(entry->name, config_ignore_options[i])) {
+                       goto end;
+               }
+       }
+
+       for (i = 0; i < (sizeof(long_options) / sizeof(struct option)) - 1;
+               i++) {
+
+               /* Ignore if not fully matched. */
+               if (strcmp(entry->name, long_options[i].name)) {
+                       continue;
+               }
+
+               /*
+                * If the option takes no argument on the command line, we have to
+                * check if the value is "true". We support non-zero numeric values,
+                * true, on and yes.
+                */
+               if (!long_options[i].has_arg) {
+                       ret = config_parse_value(entry->value);
+                       if (ret <= 0) {
+                               if (ret) {
+                                       WARN("Invalid configuration value \"%s\" for option %s",
+                                                       entry->value, entry->name);
+                               }
+                               /* False, skip boolean config option. */
+                               goto end;
+                       }
+               }
+
+               ret = set_option(long_options[i].val, entry->value, entry->name);
+               goto end;
+       }
+
+       WARN("Unrecognized option \"%s\" in daemon configuration file.", entry->name);
+
+end:
+       return ret;
+}
+
+static void print_version(void) {
+       fprintf(stdout, "%s\n", VERSION);
+}
+
+/*
+ * daemon configuration loading and argument parsing
+ */
+static int set_options(int argc, char **argv)
+{
+       int ret = 0, c = 0, option_index = 0;
+       int orig_optopt = optopt, orig_optind = optind;
+       char *optstring;
+       char *config_path = NULL;
+
+       optstring = utils_generate_optstring(long_options,
+                       sizeof(long_options) / sizeof(struct option));
+       if (!optstring) {
+               ret = -ENOMEM;
+               goto end;
+       }
+
+       /* Check for the --config option */
+       while ((c = getopt_long(argc, argv, optstring, long_options,
+                                       &option_index)) != -1) {
+               if (c == '?') {
+                       ret = -EINVAL;
+                       goto end;
+               } else if (c != 'f') {
+                       /* if not equal to --config option. */
+                       continue;
+               }
+
+               if (lttng_is_setuid_setgid()) {
+                       WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
+                               "-f, --config");
+               } else {
+                       free(config_path);
+                       config_path = utils_expand_path(optarg);
+                       if (!config_path) {
+                               ERR("Failed to resolve path: %s", optarg);
+                       }
+               }
+       }
+
+       ret = config_get_section_entries(config_path, config_section_name,
+                       config_entry_handler, NULL);
+       if (ret) {
+               if (ret > 0) {
+                       ERR("Invalid configuration option at line %i", ret);
+                       ret = -1;
+               }
+               goto end;
+       }
+
+       /* Reset getopt's global state */
+       optopt = orig_optopt;
+       optind = orig_optind;
+       while (1) {
+               option_index = -1;
+               /*
+                * getopt_long() will not set option_index if it encounters a
+                * short option.
+                */
+               c = getopt_long(argc, argv, optstring, long_options,
+                               &option_index);
+               if (c == -1) {
+                       break;
+               }
+
+               /*
+                * Pass NULL as the long option name if popt left the index
+                * unset.
+                */
+               ret = set_option(c, optarg,
+                               option_index < 0 ? NULL :
+                               long_options[option_index].name);
+               if (ret < 0) {
+                       break;
+               }
+       }
+
+end:
+       free(config_path);
+       free(optstring);
+       return ret;
+}
+
+/*
+ * Create lockfile using the rundir and return its fd.
+ */
+static int create_lockfile(void)
+{
+       return utils_create_lock_file(the_config.lock_file_path.value);
+}
+
+/*
+ * Check if the global socket is available, and if a daemon is answering at the
+ * other side. If yes, error is returned.
+ *
+ * Also attempts to create and hold the lock file.
+ */
+static int check_existing_daemon(void)
+{
+       int ret = 0;
+
+       /* Is there anybody out there ? */
+       if (lttng_session_daemon_alive()) {
+               ret = -EEXIST;
+               goto end;
+       }
+
+       lockfile_fd = create_lockfile();
+       if (lockfile_fd < 0) {
+               ret = -EEXIST;
+               goto end;
+       }
+end:
+       return ret;
+}
+
+static void sessiond_cleanup_lock_file(void)
+{
+       int ret;
+
+       /*
+        * Cleanup lock file by deleting it and finaly closing it which will
+        * release the file system lock.
+        */
+       if (lockfile_fd >= 0) {
+               ret = remove(the_config.lock_file_path.value);
+               if (ret < 0) {
+                       PERROR("remove lock file");
+               }
+               ret = close(lockfile_fd);
+               if (ret < 0) {
+                       PERROR("close lock file");
+               }
+       }
+}
+
+/*
+ * Set the tracing group gid onto the client socket.
+ *
+ * Race window between mkdir and chown is OK because we are going from more
+ * permissive (root.root) to less permissive (root.tracing).
+ */
+static int set_permissions(char *rundir)
+{
+       int ret;
+       gid_t gid;
+
+       ret = utils_get_group_id(
+                       the_config.tracing_group_name.value, true, &gid);
+       if (ret) {
+               /* Default to root group. */
+               gid = 0;
+       }
+
+       /* Set lttng run dir */
+       ret = chown(rundir, 0, gid);
+       if (ret < 0) {
+               ERR("Unable to set group on %s", rundir);
+               PERROR("chown");
+       }
+
+       /*
+        * Ensure all applications and tracing group can search the run
+        * dir. Allow everyone to read the directory, since it does not
+        * buy us anything to hide its content.
+        */
+       ret = chmod(rundir, S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH | S_IXOTH);
+       if (ret < 0) {
+               ERR("Unable to set permissions on %s", rundir);
+               PERROR("chmod");
+       }
+
+       /* lttng client socket path */
+       ret = chown(the_config.client_unix_sock_path.value, 0, gid);
+       if (ret < 0) {
+               ERR("Unable to set group on %s",
+                               the_config.client_unix_sock_path.value);
+               PERROR("chown");
+       }
+
+       /* kconsumer error socket path */
+       ret = chown(the_kconsumer_data.err_unix_sock_path, 0, 0);
+       if (ret < 0) {
+               ERR("Unable to set group on %s",
+                               the_kconsumer_data.err_unix_sock_path);
+               PERROR("chown");
+       }
+
+       /* 64-bit ustconsumer error socket path */
+       ret = chown(the_ustconsumer64_data.err_unix_sock_path, 0, 0);
+       if (ret < 0) {
+               ERR("Unable to set group on %s",
+                               the_ustconsumer64_data.err_unix_sock_path);
+               PERROR("chown");
+       }
+
+       /* 32-bit ustconsumer compat32 error socket path */
+       ret = chown(the_ustconsumer32_data.err_unix_sock_path, 0, 0);
+       if (ret < 0) {
+               ERR("Unable to set group on %s",
+                               the_ustconsumer32_data.err_unix_sock_path);
+               PERROR("chown");
+       }
+
+       DBG("All permissions are set");
+
+       return ret;
+}
+
+/*
+ * Create the lttng run directory needed for all global sockets and pipe.
+ */
+static int create_lttng_rundir(void)
+{
+       int ret;
+
+       DBG3("Creating LTTng run directory: %s", the_config.rundir.value);
+
+       ret = mkdir(the_config.rundir.value, S_IRWXU);
+       if (ret < 0) {
+               if (errno != EEXIST) {
+                       ERR("Unable to create %s", the_config.rundir.value);
+                       goto error;
+               } else {
+                       ret = 0;
+               }
+       }
+
+error:
+       return ret;
+}
+
+/*
+ * Setup sockets and directory needed by the consumerds' communication with the
+ * session daemon.
+ */
+static int set_consumer_sockets(struct consumer_data *consumer_data)
+{
+       int ret;
+       char *path = NULL;
+
+       switch (consumer_data->type) {
+       case LTTNG_CONSUMER_KERNEL:
+               path = the_config.kconsumerd_path.value;
+               break;
+       case LTTNG_CONSUMER64_UST:
+               path = the_config.consumerd64_path.value;
+               break;
+       case LTTNG_CONSUMER32_UST:
+               path = the_config.consumerd32_path.value;
+               break;
+       default:
+               ERR("Consumer type unknown");
+               ret = -EINVAL;
+               goto error;
+       }
+       LTTNG_ASSERT(path);
+
+       DBG2("Creating consumer directory: %s", path);
+
+       ret = mkdir(path, S_IRWXU | S_IRGRP | S_IXGRP);
+       if (ret < 0 && errno != EEXIST) {
+               PERROR("mkdir");
+               ERR("Failed to create %s", path);
+               goto error;
+       }
+       if (is_root) {
+               gid_t gid;
+
+               ret = utils_get_group_id(the_config.tracing_group_name.value,
+                               true, &gid);
+               if (ret) {
+                       /* Default to root group. */
+                       gid = 0;
+               }
+
+               ret = chown(path, 0, gid);
+               if (ret < 0) {
+                       ERR("Unable to set group on %s", path);
+                       PERROR("chown");
+                       goto error;
+               }
+       }
+
+       /* Create the consumerd error unix socket */
+       consumer_data->err_sock =
+               lttcomm_create_unix_sock(consumer_data->err_unix_sock_path);
+       if (consumer_data->err_sock < 0) {
+               ERR("Create unix sock failed: %s", consumer_data->err_unix_sock_path);
+               ret = -1;
+               goto error;
+       }
+
+       /*
+        * Set the CLOEXEC flag. Return code is useless because either way, the
+        * show must go on.
+        */
+       ret = utils_set_fd_cloexec(consumer_data->err_sock);
+       if (ret < 0) {
+               PERROR("utils_set_fd_cloexec");
+               /* continue anyway */
+       }
+
+       /* File permission MUST be 660 */
+       ret = chmod(consumer_data->err_unix_sock_path,
+                       S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP);
+       if (ret < 0) {
+               ERR("Set file permissions failed: %s", consumer_data->err_unix_sock_path);
+               PERROR("chmod");
+               goto error;
+       }
+
+error:
+       return ret;
+}
+
+/*
+ * Signal handler for the daemon
+ *
+ * Simply stop all worker threads, leaving main() return gracefully after
+ * joining all threads and calling cleanup().
+ */
+static void sighandler(int sig, siginfo_t *siginfo, void *arg)
+{
+       switch (sig) {
+       case SIGINT:
+               DBG("SIGINT caught");
+               stop_threads();
+               break;
+       case SIGTERM:
+               DBG("SIGTERM caught");
+               stop_threads();
+               break;
+       case SIGUSR1:
+               CMM_STORE_SHARED(recv_child_signal, 1);
+               break;
+       case SIGBUS:
+       {
+               int write_ret;
+               const char msg[] = "Received SIGBUS, aborting program.\n";
+
+               lttng_ust_handle_sigbus(siginfo->si_addr);
+               /*
+                * If ustctl did not catch this signal (triggering a
+                * siglongjmp), abort the program. Otherwise, the execution
+                * will resume from the ust-ctl call which caused this error.
+                *
+                * The return value is ignored since the program aborts anyhow.
+                */
+               write_ret = write(STDERR_FILENO, msg, sizeof(msg));
+               (void) write_ret;
+               abort();
+       }
+       default:
+               break;
+       }
+}
+
+/*
+ * Setup signal handler for :
+ *             SIGINT, SIGTERM, SIGPIPE
+ */
+static int set_signal_handler(void)
+{
+       int ret = 0;
+       struct sigaction sa;
+       sigset_t sigset;
+
+       if ((ret = sigemptyset(&sigset)) < 0) {
+               PERROR("sigemptyset");
+               return ret;
+       }
+
+       sa.sa_mask = sigset;
+       sa.sa_flags = SA_SIGINFO;
+
+       sa.sa_sigaction = sighandler;
+       if ((ret = sigaction(SIGTERM, &sa, NULL)) < 0) {
+               PERROR("sigaction");
+               return ret;
+       }
+
+       if ((ret = sigaction(SIGINT, &sa, NULL)) < 0) {
+               PERROR("sigaction");
+               return ret;
+       }
+
+       if ((ret = sigaction(SIGUSR1, &sa, NULL)) < 0) {
+               PERROR("sigaction");
+               return ret;
+       }
+
+       if ((ret = sigaction(SIGBUS, &sa, NULL)) < 0) {
+               PERROR("sigaction");
+               return ret;
+       }
+
+       sa.sa_flags = 0;
+       sa.sa_handler = SIG_IGN;
+       if ((ret = sigaction(SIGPIPE, &sa, NULL)) < 0) {
+               PERROR("sigaction");
+               return ret;
+       }
+
+       DBG("Signal handler set for SIGTERM, SIGUSR1, SIGPIPE, SIGINT, and SIGBUS");
+
+       return ret;
+}
+
+/*
+ * Set open files limit to unlimited. This daemon can open a large number of
+ * file descriptors in order to consume multiple kernel traces.
+ */
+static void set_ulimit(void)
+{
+       int ret;
+       struct rlimit lim;
+
+       /* The kernel does not allow an infinite limit for open files */
+       lim.rlim_cur = 65535;
+       lim.rlim_max = 65535;
+
+       ret = setrlimit(RLIMIT_NOFILE, &lim);
+       if (ret < 0) {
+               PERROR("failed to set open files limit");
+       }
+}
+
+static int write_pidfile(void)
+{
+       return utils_create_pid_file(getpid(), the_config.pid_file_path.value);
+}
+
+static int set_clock_plugin_env(void)
+{
+       int ret = 0;
+       char *env_value = NULL;
+
+       if (!the_config.lttng_ust_clock_plugin.value) {
+               goto end;
+       }
+
+       ret = asprintf(&env_value, "LTTNG_UST_CLOCK_PLUGIN=%s",
+                       the_config.lttng_ust_clock_plugin.value);
+       if (ret < 0) {
+               PERROR("asprintf");
+               goto end;
+       }
+
+       ret = putenv(env_value);
+       if (ret) {
+               free(env_value);
+               PERROR("putenv of LTTNG_UST_CLOCK_PLUGIN");
+               goto end;
+       }
+
+       DBG("Updated LTTNG_UST_CLOCK_PLUGIN environment variable to \"%s\"",
+                       the_config.lttng_ust_clock_plugin.value);
+end:
+       return ret;
+}
+
+static void destroy_all_sessions_and_wait(void)
+{
+       struct ltt_session *session, *tmp;
+       struct ltt_session_list *session_list;
+
+       session_list = session_get_list();
+       DBG("Initiating destruction of all sessions");
+
+       if (!session_list) {
+               return;
+       }
+
+       session_lock_list();
+       /* Initiate the destruction of all sessions. */
+       cds_list_for_each_entry_safe(session, tmp,
+                       &session_list->head, list) {
+               if (!session_get(session)) {
+                       continue;
+               }
+
+               session_lock(session);
+               if (session->destroyed) {
+                       goto unlock_session;
+               }
+               (void) cmd_stop_trace(session);
+               (void) cmd_destroy_session(
+                               session, the_notification_thread_handle, NULL);
+       unlock_session:
+               session_unlock(session);
+               session_put(session);
+       }
+       session_unlock_list();
+
+       /* Wait for the destruction of all sessions to complete. */
+       DBG("Waiting for the destruction of all sessions to complete");
+       session_list_wait_empty();
+       DBG("Destruction of all sessions completed");
+}
+
+static void unregister_all_triggers(void)
+{
+       enum lttng_error_code ret_code;
+       enum lttng_trigger_status trigger_status;
+       struct lttng_triggers *triggers = NULL;
+       unsigned int trigger_count, i;
+       const struct lttng_credentials creds = {
+               .uid = LTTNG_OPTIONAL_INIT_VALUE(0),
+       };
+
+       DBG("Unregistering all triggers");
+
+       /*
+        * List all triggers as "root" since we wish to unregister all triggers.
+        */
+       ret_code = notification_thread_command_list_triggers(
+                       the_notification_thread_handle, creds.uid.value,
+                       &triggers);
+       if (ret_code != LTTNG_OK) {
+               ERR("Failed to list triggers while unregistering all triggers");
+               goto end;
+       }
+
+       trigger_status = lttng_triggers_get_count(triggers, &trigger_count);
+       LTTNG_ASSERT(trigger_status == LTTNG_TRIGGER_STATUS_OK);
+
+       for (i = 0; i < trigger_count; i++) {
+               uid_t trigger_owner;
+               const char *trigger_name;
+               const struct lttng_trigger *trigger =
+                               lttng_triggers_get_at_index(triggers, i);
+
+               LTTNG_ASSERT(trigger);
+
+               trigger_status = lttng_trigger_get_owner_uid(
+                               trigger, &trigger_owner);
+               LTTNG_ASSERT(trigger_status == LTTNG_TRIGGER_STATUS_OK);
+
+               trigger_status = lttng_trigger_get_name(trigger, &trigger_name);
+               trigger_name = trigger_status == LTTNG_TRIGGER_STATUS_OK ?
+                               trigger_name : "(anonymous)";
+
+               DBG("Unregistering trigger: trigger owner uid = %d, trigger name = '%s'",
+                               (int) trigger_owner, trigger_name);
+
+               ret_code = cmd_unregister_trigger(&creds, trigger,
+                               the_notification_thread_handle);
+               if (ret_code != LTTNG_OK) {
+                       ERR("Failed to unregister trigger: trigger owner uid = %d, trigger name = '%s', error: '%s'",
+                                       (int) trigger_owner, trigger_name,
+                                       lttng_strerror(-ret_code));
+                       /* Continue to unregister the remaining triggers. */
+               }
+       }
+end:
+       lttng_triggers_destroy(triggers);
+}
+
+static int run_as_worker_post_fork_cleanup(void *data)
+{
+       struct sessiond_config *sessiond_config = (struct sessiond_config *) data;
+
+       sessiond_config_fini(sessiond_config);
+       return 0;
+}
+
+static int launch_run_as_worker(const char *procname)
+{
+       /*
+        * Clean-up before forking the run-as worker. Any dynamically
+        * allocated memory of which the worker is not aware will
+        * be leaked as the process forks a run-as worker (and performs
+        * no exec*()). The same would apply to any opened fd.
+        */
+       return run_as_create_worker(
+                       procname, run_as_worker_post_fork_cleanup, &the_config);
+}
+
+static void sessiond_uuid_log(void)
+{
+       char uuid_str[LTTNG_UUID_STR_LEN];
+
+       lttng_uuid_to_str(the_sessiond_uuid, uuid_str);
+       DBG("Starting lttng-sessiond {%s}", uuid_str);
+}
+
+/*
+ * main
+ */
+int main(int argc, char **argv)
+{
+       int ret = 0, retval = 0;
+       const char *env_app_timeout;
+       struct lttng_pipe *ust32_channel_monitor_pipe = NULL,
+                       *ust64_channel_monitor_pipe = NULL,
+                       *kernel_channel_monitor_pipe = NULL;
+       struct lttng_thread *ht_cleanup_thread = NULL;
+       struct timer_thread_parameters timer_thread_parameters;
+       /* Rotation thread handle. */
+       struct rotation_thread_handle *rotation_thread_handle = NULL;
+       /* Queue of rotation jobs populated by the sessiond-timer. */
+       struct rotation_thread_timer_queue *rotation_timer_queue = NULL;
+       struct lttng_thread *client_thread = NULL;
+       struct lttng_thread *notification_thread = NULL;
+       struct lttng_thread *register_apps_thread = NULL;
+       enum event_notifier_error_accounting_status event_notifier_error_accounting_status;
+
+       logger_set_thread_name("Main", false);
+       init_kernel_workarounds();
+
+       rcu_register_thread();
+
+       if (set_signal_handler()) {
+               retval = -1;
+               goto exit_set_signal_handler;
+       }
+
+       if (timer_signal_init()) {
+               retval = -1;
+               goto exit_set_signal_handler;
+       }
+
+       the_page_size = sysconf(_SC_PAGE_SIZE);
+       if (the_page_size < 0) {
+               PERROR("sysconf _SC_PAGE_SIZE");
+               the_page_size = LONG_MAX;
+               WARN("Fallback page size to %ld", the_page_size);
+       }
+
+       ret = sessiond_config_init(&the_config);
+       if (ret) {
+               retval = -1;
+               goto exit_set_signal_handler;
+       }
+
+       /*
+        * Init config from environment variables.
+        * Command line option override env configuration per-doc. Do env first.
+        */
+       sessiond_config_apply_env_config(&the_config);
+
+       /*
+        * Parse arguments and load the daemon configuration file.
+        *
+        * We have an exit_options exit path to free memory reserved by
+        * set_options. This is needed because the rest of sessiond_cleanup()
+        * depends on ht_cleanup_thread, which depends on lttng_daemonize, which
+        * depends on set_options.
+        */
+       progname = argv[0];
+       if (set_options(argc, argv)) {
+               retval = -1;
+               goto exit_options;
+       }
+
+       /*
+        * Resolve all paths received as arguments, configuration option, or
+        * through environment variable as absolute paths. This is necessary
+        * since daemonizing causes the sessiond's current working directory
+        * to '/'.
+        */
+       ret = sessiond_config_resolve_paths(&the_config);
+       if (ret) {
+               goto exit_options;
+       }
+
+       /* Apply config. */
+       lttng_opt_verbose = the_config.verbose;
+       lttng_opt_quiet = the_config.quiet;
+       the_kconsumer_data.err_unix_sock_path =
+                       the_config.kconsumerd_err_unix_sock_path.value;
+       the_kconsumer_data.cmd_unix_sock_path =
+                       the_config.kconsumerd_cmd_unix_sock_path.value;
+       the_ustconsumer32_data.err_unix_sock_path =
+                       the_config.consumerd32_err_unix_sock_path.value;
+       the_ustconsumer32_data.cmd_unix_sock_path =
+                       the_config.consumerd32_cmd_unix_sock_path.value;
+       the_ustconsumer64_data.err_unix_sock_path =
+                       the_config.consumerd64_err_unix_sock_path.value;
+       the_ustconsumer64_data.cmd_unix_sock_path =
+                       the_config.consumerd64_cmd_unix_sock_path.value;
+       set_clock_plugin_env();
+
+       sessiond_config_log(&the_config);
+       sessiond_uuid_log();
+
+       if (opt_print_version) {
+               print_version();
+               retval = 0;
+               goto exit_options;
+       }
+
+       if (create_lttng_rundir()) {
+               retval = -1;
+               goto exit_options;
+       }
+
+       /* Abort launch if a session daemon is already running. */
+       if (check_existing_daemon()) {
+               ERR("A session daemon is already running.");
+               retval = -1;
+               goto exit_options;
+       }
+
+       /* Daemonize */
+       if (the_config.daemonize || the_config.background) {
+               int i;
+
+               ret = lttng_daemonize(&the_child_ppid, &recv_child_signal,
+                               !the_config.background);
+               if (ret < 0) {
+                       retval = -1;
+                       goto exit_options;
+               }
+
+               /*
+                * We are in the child. Make sure all other file descriptors are
+                * closed, in case we are called with more opened file
+                * descriptors than the standard ones and the lock file.
+                */
+               for (i = 3; i < sysconf(_SC_OPEN_MAX); i++) {
+                       if (i == lockfile_fd) {
+                               continue;
+                       }
+                       (void) close(i);
+               }
+       }
+
+       if (launch_run_as_worker(argv[0]) < 0) {
+               goto exit_create_run_as_worker_cleanup;
+       }
+
+       /*
+        * Starting from here, we can create threads. This needs to be after
+        * lttng_daemonize due to RCU.
+        */
+
+       /*
+        * Initialize the health check subsystem. This call should set the
+        * appropriate time values.
+        */
+       the_health_sessiond = health_app_create(NR_HEALTH_SESSIOND_TYPES);
+       if (!the_health_sessiond) {
+               PERROR("health_app_create error");
+               retval = -1;
+               goto stop_threads;
+       }
+
+       /* Create thread to clean up RCU hash tables */
+       ht_cleanup_thread = launch_ht_cleanup_thread();
+       if (!ht_cleanup_thread) {
+               retval = -1;
+               goto stop_threads;
+       }
+
+       /* Create thread quit pipe */
+       if (sessiond_init_thread_quit_pipe()) {
+               retval = -1;
+               goto stop_threads;
+       }
+
+       /* Check if daemon is UID = 0 */
+       is_root = !getuid();
+       if (is_root) {
+               /* Create global run dir with root access */
+
+               kernel_channel_monitor_pipe = lttng_pipe_open(0);
+               if (!kernel_channel_monitor_pipe) {
+                       ERR("Failed to create kernel consumer channel monitor pipe");
+                       retval = -1;
+                       goto stop_threads;
+               }
+               the_kconsumer_data.channel_monitor_pipe =
+                               lttng_pipe_release_writefd(
+                                               kernel_channel_monitor_pipe);
+               if (the_kconsumer_data.channel_monitor_pipe < 0) {
+                       retval = -1;
+                       goto stop_threads;
+               }
+       }
+
+       /* Set consumer initial state */
+       the_kernel_consumerd_state = CONSUMER_STOPPED;
+       the_ust_consumerd_state = CONSUMER_STOPPED;
+
+       ust32_channel_monitor_pipe = lttng_pipe_open(0);
+       if (!ust32_channel_monitor_pipe) {
+               ERR("Failed to create 32-bit user space consumer channel monitor pipe");
+               retval = -1;
+               goto stop_threads;
+       }
+       the_ustconsumer32_data.channel_monitor_pipe =
+                       lttng_pipe_release_writefd(ust32_channel_monitor_pipe);
+       if (the_ustconsumer32_data.channel_monitor_pipe < 0) {
+               retval = -1;
+               goto stop_threads;
+       }
+
+       /*
+        * The rotation_thread_timer_queue structure is shared between the
+        * sessiond timer thread and the rotation thread. The main thread keeps
+        * its ownership and destroys it when both threads have been joined.
+        */
+       rotation_timer_queue = rotation_thread_timer_queue_create();
+       if (!rotation_timer_queue) {
+               retval = -1;
+               goto stop_threads;
+       }
+       timer_thread_parameters.rotation_thread_job_queue =
+                       rotation_timer_queue;
+
+       ust64_channel_monitor_pipe = lttng_pipe_open(0);
+       if (!ust64_channel_monitor_pipe) {
+               ERR("Failed to create 64-bit user space consumer channel monitor pipe");
+               retval = -1;
+               goto stop_threads;
+       }
+       the_ustconsumer64_data.channel_monitor_pipe =
+                       lttng_pipe_release_writefd(ust64_channel_monitor_pipe);
+       if (the_ustconsumer64_data.channel_monitor_pipe < 0) {
+               retval = -1;
+               goto stop_threads;
+       }
+
+       /*
+        * Init UST app hash table. Alloc hash table before this point since
+        * cleanup() can get called after that point.
+        */
+       if (ust_app_ht_alloc()) {
+               ERR("Failed to allocate UST app hash table");
+               retval = -1;
+               goto stop_threads;
+       }
+
+       event_notifier_error_accounting_status = event_notifier_error_accounting_init(
+                       the_config.event_notifier_buffer_size_kernel,
+                       the_config.event_notifier_buffer_size_userspace);
+       if (event_notifier_error_accounting_status != EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_OK) {
+               ERR("Failed to initialize event notifier error accounting system");
+               retval = -1;
+               goto stop_threads;
+       }
+
+       /*
+        * Initialize agent app hash table. We allocate the hash table here
+        * since cleanup() can get called after this point.
+        */
+       if (agent_app_ht_alloc()) {
+               ERR("Failed to allocate Agent app hash table");
+               retval = -1;
+               goto stop_threads;
+       }
+
+       if (agent_by_event_notifier_domain_ht_create()) {
+               ERR("Failed to allocate per-event notifier domain agent hash table");
+               retval = -1;
+               goto stop_threads;
+       }
+       /*
+        * These actions must be executed as root. We do that *after* setting up
+        * the sockets path because we MUST make the check for another daemon using
+        * those paths *before* trying to set the kernel consumer sockets and init
+        * kernel tracer.
+        */
+       if (is_root) {
+               if (set_consumer_sockets(&the_kconsumer_data)) {
+                       retval = -1;
+                       goto stop_threads;
+               }
+
+               /* Setup kernel tracer */
+               if (!the_config.no_kernel) {
+                       init_kernel_tracer();
+               }
+
+               /* Set ulimit for open files */
+               set_ulimit();
+       }
+       /* init lttng_fd tracking must be done after set_ulimit. */
+       lttng_fd_init();
+
+       if (set_consumer_sockets(&the_ustconsumer64_data)) {
+               retval = -1;
+               goto stop_threads;
+       }
+
+       if (set_consumer_sockets(&the_ustconsumer32_data)) {
+               retval = -1;
+               goto stop_threads;
+       }
+
+       /* Get parent pid if -S, --sig-parent is specified. */
+       if (the_config.sig_parent) {
+               the_ppid = getppid();
+       }
+
+       /* Setup the kernel pipe for waking up the kernel thread */
+       if (is_root && !the_config.no_kernel) {
+               if (utils_create_pipe_cloexec(the_kernel_poll_pipe)) {
+                       retval = -1;
+                       goto stop_threads;
+               }
+       }
+
+       /* Setup the thread apps communication pipe. */
+       if (utils_create_pipe_cloexec(apps_cmd_pipe)) {
+               retval = -1;
+               goto stop_threads;
+       }
+
+       /* Setup the thread apps notify communication pipe. */
+       if (utils_create_pipe_cloexec(apps_cmd_notify_pipe)) {
+               retval = -1;
+               goto stop_threads;
+       }
+
+       /* Initialize global buffer per UID and PID registry. */
+       buffer_reg_init_uid_registry();
+       buffer_reg_init_pid_registry();
+
+       /* Init UST command queue. */
+       cds_wfcq_init(&ust_cmd_queue.head, &ust_cmd_queue.tail);
+
+       cmd_init();
+
+       /* Check for the application socket timeout env variable. */
+       env_app_timeout = getenv(DEFAULT_APP_SOCKET_TIMEOUT_ENV);
+       if (env_app_timeout) {
+               the_config.app_socket_timeout = atoi(env_app_timeout);
+       } else {
+               the_config.app_socket_timeout = DEFAULT_APP_SOCKET_RW_TIMEOUT;
+       }
+
+       ret = write_pidfile();
+       if (ret) {
+               ERR("Error in write_pidfile");
+               retval = -1;
+               goto stop_threads;
+       }
+
+       /* Initialize communication library */
+       lttcomm_init();
+       /* Initialize TCP timeout values */
+       lttcomm_inet_init();
+
+       /* Create health-check thread. */
+       if (!launch_health_management_thread()) {
+               retval = -1;
+               goto stop_threads;
+       }
+
+       /* notification_thread_data acquires the pipes' read side. */
+       the_notification_thread_handle = notification_thread_handle_create(
+                       ust32_channel_monitor_pipe, ust64_channel_monitor_pipe,
+                       kernel_channel_monitor_pipe);
+       if (!the_notification_thread_handle) {
+               retval = -1;
+               ERR("Failed to create notification thread shared data");
+               goto stop_threads;
+       }
+
+       /* Create notification thread. */
+       notification_thread = launch_notification_thread(
+                       the_notification_thread_handle);
+       if (!notification_thread) {
+               retval = -1;
+               goto stop_threads;
+       }
+
+       /* Create timer thread. */
+       if (!launch_timer_thread(&timer_thread_parameters)) {
+               retval = -1;
+               goto stop_threads;
+       }
+
+       /* rotation_thread_data acquires the pipes' read side. */
+       rotation_thread_handle = rotation_thread_handle_create(
+                       rotation_timer_queue, the_notification_thread_handle);
+       if (!rotation_thread_handle) {
+               retval = -1;
+               ERR("Failed to create rotation thread shared data");
+               stop_threads();
+               goto stop_threads;
+       }
+
+       /* Create rotation thread. */
+       if (!launch_rotation_thread(rotation_thread_handle)) {
+               retval = -1;
+               goto stop_threads;
+       }
+
+       /* Create thread to manage the client socket */
+       client_thread = launch_client_thread();
+       if (!client_thread) {
+               retval = -1;
+               goto stop_threads;
+       }
+
+       /* Set credentials of the client socket and rundir */
+       if (is_root && set_permissions(the_config.rundir.value)) {
+               retval = -1;
+               goto stop_threads;
+       }
+
+       if (!launch_ust_dispatch_thread(&ust_cmd_queue, apps_cmd_pipe[1],
+                       apps_cmd_notify_pipe[1])) {
+               retval = -1;
+               goto stop_threads;
+       }
+
+       /* Create thread to manage application registration. */
+       register_apps_thread = launch_application_registration_thread(
+                       &ust_cmd_queue);
+       if (!register_apps_thread) {
+               retval = -1;
+               goto stop_threads;
+       }
+
+       /* Create thread to manage application socket */
+       if (!launch_application_management_thread(apps_cmd_pipe[0])) {
+               retval = -1;
+               goto stop_threads;
+       }
+
+       /* Create thread to manage application notify socket */
+       if (!launch_application_notification_thread(apps_cmd_notify_pipe[0])) {
+               retval = -1;
+               goto stop_threads;
+       }
+
+       /* Create agent management thread. */
+       if (!launch_agent_management_thread()) {
+               retval = -1;
+               goto stop_threads;
+       }
+
+       /* Don't start this thread if kernel tracing is not requested nor root */
+       if (is_root && !the_config.no_kernel) {
+               /* Create kernel thread to manage kernel event */
+               if (!launch_kernel_management_thread(the_kernel_poll_pipe[0])) {
+                       retval = -1;
+                       goto stop_threads;
+               }
+
+               if (kernel_get_notification_fd() >= 0) {
+                       ret = notification_thread_command_add_tracer_event_source(
+                                       the_notification_thread_handle,
+                                       kernel_get_notification_fd(),
+                                       LTTNG_DOMAIN_KERNEL);
+                       if (ret != LTTNG_OK) {
+                               ERR("Failed to add kernel trigger event source to notification thread");
+                               retval = -1;
+                               goto stop_threads;
+                       }
+               }
+       }
+
+       /* Load sessions. */
+       ret = config_load_session(
+                       the_config.load_session_path.value, NULL, 1, 1, NULL);
+       if (ret) {
+               ERR("Session load failed: %s", error_get_str(ret));
+               retval = -1;
+               goto stop_threads;
+       }
+
+       /* Initialization completed. */
+       sessiond_signal_parents();
+
+       /*
+        * This is where we start awaiting program completion (e.g. through
+        * signal that asks threads to teardown).
+        */
+
+       /* Initiate teardown once activity occurs on the quit pipe. */
+       sessiond_wait_for_quit_pipe(-1);
+
+stop_threads:
+
+       /*
+        * Ensure that the client thread is no longer accepting new commands,
+        * which could cause new sessions to be created.
+        */
+       if (client_thread) {
+               lttng_thread_shutdown(client_thread);
+               lttng_thread_put(client_thread);
+       }
+
+       destroy_all_sessions_and_wait();
+
+       /*
+        * At this point no new trigger can be registered (no sessions are
+        * running/rotating) and clients can't connect to the session daemon
+        * anymore. Unregister all triggers.
+        */
+       unregister_all_triggers();
+
+       if (register_apps_thread) {
+               lttng_thread_shutdown(register_apps_thread);
+               lttng_thread_put(register_apps_thread);
+       }
+       lttng_thread_list_shutdown_orphans();
+
+       /*
+        * Wait for all pending call_rcu work to complete before tearing
+        * down data structures. call_rcu worker may be trying to
+        * perform lookups in those structures.
+        */
+       rcu_barrier();
+       /*
+        * sessiond_cleanup() is called when no other thread is running, except
+        * the ht_cleanup thread, which is needed to destroy the hash tables.
+        */
+       rcu_thread_online();
+       sessiond_cleanup();
+
+       /*
+        * Wait for all pending call_rcu work to complete before shutting down
+        * the notification thread. This call_rcu work includes shutting down
+        * UST apps and event notifier pipes.
+        */
+       rcu_barrier();
+
+       if (notification_thread) {
+               lttng_thread_shutdown(notification_thread);
+               lttng_thread_put(notification_thread);
+       }
+
+       /*
+        * Error accounting teardown has to be done after the teardown of all
+        * event notifier pipes to ensure that no tracer may try to use the
+        * error accounting facilities.
+        */
+       event_notifier_error_accounting_fini();
+
+       /*
+        * Unloading the kernel modules needs to be done after all kernel
+        * ressources have been released. In our case, this includes the
+        * notification fd, the event notifier group fd, error accounting fd,
+        * all event and event notifier fds, etc.
+        *
+        * In short, at this point, we need to have called close() on all fds
+        * received from the kernel tracer.
+        */
+       if (is_root && !the_config.no_kernel) {
+               DBG("Unloading kernel modules");
+               modprobe_remove_lttng_all();
+       }
+
+       /*
+        * Ensure all prior call_rcu are done. call_rcu callbacks may push
+        * hash tables to the ht_cleanup thread. Therefore, we ensure that
+        * the queue is empty before shutting down the clean-up thread.
+        */
+       rcu_barrier();
+
+       if (ht_cleanup_thread) {
+               lttng_thread_shutdown(ht_cleanup_thread);
+               lttng_thread_put(ht_cleanup_thread);
+       }
+
+       rcu_thread_offline();
+       rcu_unregister_thread();
+
+       if (rotation_thread_handle) {
+               rotation_thread_handle_destroy(rotation_thread_handle);
+       }
+
+       /*
+        * After the rotation and timer thread have quit, we can safely destroy
+        * the rotation_timer_queue.
+        */
+       rotation_thread_timer_queue_destroy(rotation_timer_queue);
+       /*
+        * The teardown of the notification system is performed after the
+        * session daemon's teardown in order to allow it to be notified
+        * of the active session and channels at the moment of the teardown.
+        */
+       if (the_notification_thread_handle) {
+               notification_thread_handle_destroy(
+                               the_notification_thread_handle);
+       }
+       lttng_pipe_destroy(ust32_channel_monitor_pipe);
+       lttng_pipe_destroy(ust64_channel_monitor_pipe);
+       lttng_pipe_destroy(kernel_channel_monitor_pipe);
+
+       if (the_health_sessiond) {
+               health_app_destroy(the_health_sessiond);
+       }
+exit_create_run_as_worker_cleanup:
+exit_options:
+       sessiond_cleanup_lock_file();
+       sessiond_cleanup_options();
+
+exit_set_signal_handler:
+       if (!retval) {
+               exit(EXIT_SUCCESS);
+       } else {
+               exit(EXIT_FAILURE);
+       }
+}
diff --git a/src/bin/lttng-sessiond/manage-apps.c b/src/bin/lttng-sessiond/manage-apps.c
deleted file mode 100644 (file)
index e2f3ed2..0000000
+++ /dev/null
@@ -1,240 +0,0 @@
-/*
- * Copyright (C) 2011 David Goulet <david.goulet@polymtl.ca>
- * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- * Copyright (C) 2013 Jérémie Galarneau <jeremie.galarneau@efficios.com>
- *
- * SPDX-License-Identifier: GPL-2.0-only
- *
- */
-
-#include "manage-apps.h"
-#include "testpoint.h"
-#include "health-sessiond.h"
-#include "utils.h"
-#include "thread.h"
-
-struct thread_notifiers {
-       struct lttng_pipe *quit_pipe;
-       int apps_cmd_pipe_read_fd;
-};
-
-static void cleanup_application_management_thread(void *data)
-{
-       struct thread_notifiers *notifiers = data;
-
-       lttng_pipe_destroy(notifiers->quit_pipe);
-       free(notifiers);
-}
-
-/*
- * This thread receives application command sockets (FDs) on the
- * apps_cmd_pipe and waits (polls) on them until they are closed
- * or an error occurs.
- *
- * At that point, it flushes the data (tracing and metadata) associated
- * with this application and tears down ust app sessions and other
- * associated data structures through ust_app_unregister().
- *
- * Note that this thread never sends commands to the applications
- * through the command sockets; it merely listens for hang-ups
- * and errors on those sockets and cleans-up as they occur.
- */
-static void *thread_application_management(void *data)
-{
-       int i, ret, pollfd, err = -1;
-       ssize_t size_ret;
-       uint32_t revents, nb_fd;
-       struct lttng_poll_event events;
-       struct thread_notifiers *notifiers = data;
-       const int quit_pipe_read_fd = lttng_pipe_get_readfd(
-                       notifiers->quit_pipe);
-
-       DBG("[thread] Manage application started");
-
-       rcu_register_thread();
-       rcu_thread_online();
-
-       health_register(the_health_sessiond, HEALTH_SESSIOND_TYPE_APP_MANAGE);
-
-       if (testpoint(sessiond_thread_manage_apps)) {
-               goto error_testpoint;
-       }
-
-       health_code_update();
-
-       ret = lttng_poll_create(&events, 2, LTTNG_CLOEXEC);
-       if (ret < 0) {
-               goto error_poll_create;
-       }
-
-       ret = lttng_poll_add(&events, notifiers->apps_cmd_pipe_read_fd,
-                       LPOLLIN | LPOLLRDHUP);
-       if (ret < 0) {
-               goto error;
-       }
-
-       ret = lttng_poll_add(&events, quit_pipe_read_fd, LPOLLIN | LPOLLERR);
-       if (ret < 0) {
-               goto error;
-       }
-
-       if (testpoint(sessiond_thread_manage_apps_before_loop)) {
-               goto error;
-       }
-
-       health_code_update();
-
-       while (1) {
-               DBG("Apps thread polling");
-
-               /* Inifinite blocking call, waiting for transmission */
-       restart:
-               health_poll_entry();
-               ret = lttng_poll_wait(&events, -1);
-               DBG("Apps thread return from poll on %d fds",
-                               LTTNG_POLL_GETNB(&events));
-               health_poll_exit();
-               if (ret < 0) {
-                       /*
-                        * Restart interrupted system call.
-                        */
-                       if (errno == EINTR) {
-                               goto restart;
-                       }
-                       goto error;
-               }
-
-               nb_fd = ret;
-
-               for (i = 0; i < nb_fd; i++) {
-                       /* Fetch once the poll data */
-                       revents = LTTNG_POLL_GETEV(&events, i);
-                       pollfd = LTTNG_POLL_GETFD(&events, i);
-
-                       health_code_update();
-
-                       if (pollfd == quit_pipe_read_fd) {
-                               err = 0;
-                               goto exit;
-                       } else if (pollfd == notifiers->apps_cmd_pipe_read_fd) {
-                               /* Inspect the apps cmd pipe */
-                               if (revents & LPOLLIN) {
-                                       int sock;
-
-                                       /* Empty pipe */
-                                       size_ret = lttng_read(
-                                                       notifiers->apps_cmd_pipe_read_fd,
-                                                       &sock, sizeof(sock));
-                                       if (size_ret < sizeof(sock)) {
-                                               PERROR("read apps cmd pipe");
-                                               goto error;
-                                       }
-
-                                       health_code_update();
-
-                                       /*
-                                        * Since this is a command socket (write then read),
-                                        * we only monitor the error events of the socket.
-                                        */
-                                       ret = lttng_poll_add(&events, sock,
-                                                       LPOLLERR | LPOLLHUP | LPOLLRDHUP);
-                                       if (ret < 0) {
-                                               goto error;
-                                       }
-
-                                       DBG("Apps with sock %d added to poll set", sock);
-                               } else if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
-                                       ERR("Apps command pipe error");
-                                       goto error;
-                               } else {
-                                       ERR("Unknown poll events %u for sock %d", revents, pollfd);
-                                       goto error;
-                               }
-                       } else {
-                               /*
-                                * At this point, we know that a registered application made
-                                * the event at poll_wait.
-                                */
-                               if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
-                                       /* Removing from the poll set */
-                                       ret = lttng_poll_del(&events, pollfd);
-                                       if (ret < 0) {
-                                               goto error;
-                                       }
-
-                                       /* Socket closed on remote end. */
-                                       ust_app_unregister(pollfd);
-                               } else {
-                                       ERR("Unexpected poll events %u for sock %d", revents, pollfd);
-                                       goto error;
-                               }
-                       }
-
-                       health_code_update();
-               }
-       }
-
-exit:
-error:
-       lttng_poll_clean(&events);
-error_poll_create:
-error_testpoint:
-
-       /*
-        * We don't clean the UST app hash table here since already registered
-        * applications can still be controlled so let them be until the session
-        * daemon dies or the applications stop.
-        */
-
-       if (err) {
-               health_error();
-               ERR("Health error occurred in %s", __func__);
-       }
-       health_unregister(the_health_sessiond);
-       DBG("Application communication apps thread cleanup complete");
-       rcu_thread_offline();
-       rcu_unregister_thread();
-       return NULL;
-}
-
-static bool shutdown_application_management_thread(void *data)
-{
-       struct thread_notifiers *notifiers = data;
-       const int write_fd = lttng_pipe_get_writefd(notifiers->quit_pipe);
-
-       return notify_thread_pipe(write_fd) == 1;
-}
-
-bool launch_application_management_thread(int apps_cmd_pipe_read_fd)
-{
-       struct lttng_pipe *quit_pipe;
-       struct thread_notifiers *notifiers = NULL;
-       struct lttng_thread *thread;
-
-       notifiers = zmalloc(sizeof(*notifiers));
-       if (!notifiers) {
-               goto error_alloc;
-       }
-       quit_pipe = lttng_pipe_open(FD_CLOEXEC);
-       if (!quit_pipe) {
-               goto error;
-       }
-       notifiers->quit_pipe = quit_pipe;
-       notifiers->apps_cmd_pipe_read_fd = apps_cmd_pipe_read_fd;
-
-       thread = lttng_thread_create("UST application management",
-                       thread_application_management,
-                       shutdown_application_management_thread,
-                       cleanup_application_management_thread,
-                       notifiers);
-       if (!thread) {
-               goto error;
-       }
-
-       lttng_thread_put(thread);
-       return true;
-error:
-       cleanup_application_management_thread(notifiers);
-error_alloc:
-       return false;
-}
diff --git a/src/bin/lttng-sessiond/manage-apps.cpp b/src/bin/lttng-sessiond/manage-apps.cpp
new file mode 100644 (file)
index 0000000..cff56cb
--- /dev/null
@@ -0,0 +1,240 @@
+/*
+ * Copyright (C) 2011 David Goulet <david.goulet@polymtl.ca>
+ * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ * Copyright (C) 2013 Jérémie Galarneau <jeremie.galarneau@efficios.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0-only
+ *
+ */
+
+#include "manage-apps.h"
+#include "testpoint.h"
+#include "health-sessiond.h"
+#include "utils.h"
+#include "thread.h"
+
+struct thread_notifiers {
+       struct lttng_pipe *quit_pipe;
+       int apps_cmd_pipe_read_fd;
+};
+
+static void cleanup_application_management_thread(void *data)
+{
+       struct thread_notifiers *notifiers = (thread_notifiers *) data;
+
+       lttng_pipe_destroy(notifiers->quit_pipe);
+       free(notifiers);
+}
+
+/*
+ * This thread receives application command sockets (FDs) on the
+ * apps_cmd_pipe and waits (polls) on them until they are closed
+ * or an error occurs.
+ *
+ * At that point, it flushes the data (tracing and metadata) associated
+ * with this application and tears down ust app sessions and other
+ * associated data structures through ust_app_unregister().
+ *
+ * Note that this thread never sends commands to the applications
+ * through the command sockets; it merely listens for hang-ups
+ * and errors on those sockets and cleans-up as they occur.
+ */
+static void *thread_application_management(void *data)
+{
+       int i, ret, pollfd, err = -1;
+       ssize_t size_ret;
+       uint32_t revents, nb_fd;
+       struct lttng_poll_event events;
+       struct thread_notifiers *notifiers = (thread_notifiers *) data;
+       const int quit_pipe_read_fd = lttng_pipe_get_readfd(
+                       notifiers->quit_pipe);
+
+       DBG("[thread] Manage application started");
+
+       rcu_register_thread();
+       rcu_thread_online();
+
+       health_register(the_health_sessiond, HEALTH_SESSIOND_TYPE_APP_MANAGE);
+
+       if (testpoint(sessiond_thread_manage_apps)) {
+               goto error_testpoint;
+       }
+
+       health_code_update();
+
+       ret = lttng_poll_create(&events, 2, LTTNG_CLOEXEC);
+       if (ret < 0) {
+               goto error_poll_create;
+       }
+
+       ret = lttng_poll_add(&events, notifiers->apps_cmd_pipe_read_fd,
+                       LPOLLIN | LPOLLRDHUP);
+       if (ret < 0) {
+               goto error;
+       }
+
+       ret = lttng_poll_add(&events, quit_pipe_read_fd, LPOLLIN | LPOLLERR);
+       if (ret < 0) {
+               goto error;
+       }
+
+       if (testpoint(sessiond_thread_manage_apps_before_loop)) {
+               goto error;
+       }
+
+       health_code_update();
+
+       while (1) {
+               DBG("Apps thread polling");
+
+               /* Inifinite blocking call, waiting for transmission */
+       restart:
+               health_poll_entry();
+               ret = lttng_poll_wait(&events, -1);
+               DBG("Apps thread return from poll on %d fds",
+                               LTTNG_POLL_GETNB(&events));
+               health_poll_exit();
+               if (ret < 0) {
+                       /*
+                        * Restart interrupted system call.
+                        */
+                       if (errno == EINTR) {
+                               goto restart;
+                       }
+                       goto error;
+               }
+
+               nb_fd = ret;
+
+               for (i = 0; i < nb_fd; i++) {
+                       /* Fetch once the poll data */
+                       revents = LTTNG_POLL_GETEV(&events, i);
+                       pollfd = LTTNG_POLL_GETFD(&events, i);
+
+                       health_code_update();
+
+                       if (pollfd == quit_pipe_read_fd) {
+                               err = 0;
+                               goto exit;
+                       } else if (pollfd == notifiers->apps_cmd_pipe_read_fd) {
+                               /* Inspect the apps cmd pipe */
+                               if (revents & LPOLLIN) {
+                                       int sock;
+
+                                       /* Empty pipe */
+                                       size_ret = lttng_read(
+                                                       notifiers->apps_cmd_pipe_read_fd,
+                                                       &sock, sizeof(sock));
+                                       if (size_ret < sizeof(sock)) {
+                                               PERROR("read apps cmd pipe");
+                                               goto error;
+                                       }
+
+                                       health_code_update();
+
+                                       /*
+                                        * Since this is a command socket (write then read),
+                                        * we only monitor the error events of the socket.
+                                        */
+                                       ret = lttng_poll_add(&events, sock,
+                                                       LPOLLERR | LPOLLHUP | LPOLLRDHUP);
+                                       if (ret < 0) {
+                                               goto error;
+                                       }
+
+                                       DBG("Apps with sock %d added to poll set", sock);
+                               } else if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
+                                       ERR("Apps command pipe error");
+                                       goto error;
+                               } else {
+                                       ERR("Unknown poll events %u for sock %d", revents, pollfd);
+                                       goto error;
+                               }
+                       } else {
+                               /*
+                                * At this point, we know that a registered application made
+                                * the event at poll_wait.
+                                */
+                               if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
+                                       /* Removing from the poll set */
+                                       ret = lttng_poll_del(&events, pollfd);
+                                       if (ret < 0) {
+                                               goto error;
+                                       }
+
+                                       /* Socket closed on remote end. */
+                                       ust_app_unregister(pollfd);
+                               } else {
+                                       ERR("Unexpected poll events %u for sock %d", revents, pollfd);
+                                       goto error;
+                               }
+                       }
+
+                       health_code_update();
+               }
+       }
+
+exit:
+error:
+       lttng_poll_clean(&events);
+error_poll_create:
+error_testpoint:
+
+       /*
+        * We don't clean the UST app hash table here since already registered
+        * applications can still be controlled so let them be until the session
+        * daemon dies or the applications stop.
+        */
+
+       if (err) {
+               health_error();
+               ERR("Health error occurred in %s", __func__);
+       }
+       health_unregister(the_health_sessiond);
+       DBG("Application communication apps thread cleanup complete");
+       rcu_thread_offline();
+       rcu_unregister_thread();
+       return NULL;
+}
+
+static bool shutdown_application_management_thread(void *data)
+{
+       struct thread_notifiers *notifiers = (thread_notifiers *) data;
+       const int write_fd = lttng_pipe_get_writefd(notifiers->quit_pipe);
+
+       return notify_thread_pipe(write_fd) == 1;
+}
+
+bool launch_application_management_thread(int apps_cmd_pipe_read_fd)
+{
+       struct lttng_pipe *quit_pipe;
+       struct thread_notifiers *notifiers = NULL;
+       struct lttng_thread *thread;
+
+       notifiers = (thread_notifiers *) zmalloc(sizeof(*notifiers));
+       if (!notifiers) {
+               goto error_alloc;
+       }
+       quit_pipe = lttng_pipe_open(FD_CLOEXEC);
+       if (!quit_pipe) {
+               goto error;
+       }
+       notifiers->quit_pipe = quit_pipe;
+       notifiers->apps_cmd_pipe_read_fd = apps_cmd_pipe_read_fd;
+
+       thread = lttng_thread_create("UST application management",
+                       thread_application_management,
+                       shutdown_application_management_thread,
+                       cleanup_application_management_thread,
+                       notifiers);
+       if (!thread) {
+               goto error;
+       }
+
+       lttng_thread_put(thread);
+       return true;
+error:
+       cleanup_application_management_thread(notifiers);
+error_alloc:
+       return false;
+}
diff --git a/src/bin/lttng-sessiond/manage-consumer.c b/src/bin/lttng-sessiond/manage-consumer.c
deleted file mode 100644 (file)
index 719d42b..0000000
+++ /dev/null
@@ -1,471 +0,0 @@
-/*
- * Copyright (C) 2011 David Goulet <david.goulet@polymtl.ca>
- * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- * Copyright (C) 2013 Jérémie Galarneau <jeremie.galarneau@efficios.com>
- *
- * SPDX-License-Identifier: GPL-2.0-only
- *
- */
-
-#include <signal.h>
-
-#include <common/pipe.h>
-#include <common/utils.h>
-
-#include "manage-consumer.h"
-#include "testpoint.h"
-#include "health-sessiond.h"
-#include "utils.h"
-#include "thread.h"
-#include "ust-consumer.h"
-
-struct thread_notifiers {
-       struct lttng_pipe *quit_pipe;
-       struct consumer_data *consumer_data;
-       sem_t ready;
-       int initialization_result;
-};
-
-static void mark_thread_as_ready(struct thread_notifiers *notifiers)
-{
-       DBG("Marking consumer management thread as ready");
-       notifiers->initialization_result = 0;
-       sem_post(&notifiers->ready);
-}
-
-static void mark_thread_intialization_as_failed(
-               struct thread_notifiers *notifiers)
-{
-       ERR("Consumer management thread entering error state");
-       notifiers->initialization_result = -1;
-       sem_post(&notifiers->ready);
-}
-
-static void wait_until_thread_is_ready(struct thread_notifiers *notifiers)
-{
-       DBG("Waiting for consumer management thread to be ready");
-       sem_wait(&notifiers->ready);
-       DBG("Consumer management thread is ready");
-}
-
-/*
- * This thread manage the consumer error sent back to the session daemon.
- */
-static void *thread_consumer_management(void *data)
-{
-       int sock = -1, i, ret, pollfd, err = -1, should_quit = 0;
-       uint32_t revents, nb_fd;
-       enum lttcomm_return_code code;
-       struct lttng_poll_event events;
-       struct thread_notifiers *notifiers = data;
-       struct consumer_data *consumer_data = notifiers->consumer_data;
-       const int quit_pipe_read_fd = lttng_pipe_get_readfd(notifiers->quit_pipe);
-       struct consumer_socket *cmd_socket_wrapper = NULL;
-
-       DBG("[thread] Manage consumer started");
-
-       rcu_register_thread();
-       rcu_thread_online();
-
-       health_register(the_health_sessiond, HEALTH_SESSIOND_TYPE_CONSUMER);
-
-       health_code_update();
-
-       /*
-        * Pass 3 as size here for the thread quit pipe, consumerd_err_sock and the
-        * metadata_sock. Nothing more will be added to this poll set.
-        */
-       ret = lttng_poll_create(&events, 3, LTTNG_CLOEXEC);
-       if (ret < 0) {
-               mark_thread_intialization_as_failed(notifiers);
-               goto error_poll;
-       }
-
-       ret = lttng_poll_add(&events, quit_pipe_read_fd, LPOLLIN | LPOLLERR);
-       if (ret < 0) {
-               mark_thread_intialization_as_failed(notifiers);
-               goto error;
-       }
-
-       /*
-        * The error socket here is already in a listening state which was done
-        * just before spawning this thread to avoid a race between the consumer
-        * daemon exec trying to connect and the listen() call.
-        */
-       ret = lttng_poll_add(&events, consumer_data->err_sock, LPOLLIN | LPOLLRDHUP);
-       if (ret < 0) {
-               mark_thread_intialization_as_failed(notifiers);
-               goto error;
-       }
-
-       health_code_update();
-
-       /* Infinite blocking call, waiting for transmission */
-       health_poll_entry();
-
-       if (testpoint(sessiond_thread_manage_consumer)) {
-               mark_thread_intialization_as_failed(notifiers);
-               goto error;
-       }
-
-       ret = lttng_poll_wait(&events, -1);
-       health_poll_exit();
-       if (ret < 0) {
-               mark_thread_intialization_as_failed(notifiers);
-               goto error;
-       }
-
-       nb_fd = ret;
-
-       for (i = 0; i < nb_fd; i++) {
-               /* Fetch once the poll data */
-               revents = LTTNG_POLL_GETEV(&events, i);
-               pollfd = LTTNG_POLL_GETFD(&events, i);
-
-               health_code_update();
-
-               /* Thread quit pipe has been closed. Killing thread. */
-               if (pollfd == quit_pipe_read_fd) {
-                       err = 0;
-                       mark_thread_intialization_as_failed(notifiers);
-                       goto exit;
-               } else if (pollfd == consumer_data->err_sock) {
-                       /* Event on the registration socket */
-                       if (revents & LPOLLIN) {
-                               continue;
-                       } else if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
-                               ERR("consumer err socket poll error");
-                               mark_thread_intialization_as_failed(notifiers);
-                               goto error;
-                       } else {
-                               ERR("Unexpected poll events %u for sock %d", revents, pollfd);
-                               mark_thread_intialization_as_failed(notifiers);
-                               goto error;
-                       }
-               }
-       }
-
-       sock = lttcomm_accept_unix_sock(consumer_data->err_sock);
-       if (sock < 0) {
-               mark_thread_intialization_as_failed(notifiers);
-               goto error;
-       }
-
-       /*
-        * Set the CLOEXEC flag. Return code is useless because either way, the
-        * show must go on.
-        */
-       (void) utils_set_fd_cloexec(sock);
-
-       health_code_update();
-
-       DBG2("Receiving code from consumer err_sock");
-
-       /* Getting status code from kconsumerd */
-       ret = lttcomm_recv_unix_sock(sock, &code,
-                       sizeof(enum lttcomm_return_code));
-       if (ret <= 0) {
-               mark_thread_intialization_as_failed(notifiers);
-               goto error;
-       }
-
-       health_code_update();
-       if (code != LTTCOMM_CONSUMERD_COMMAND_SOCK_READY) {
-               ERR("consumer error when waiting for SOCK_READY : %s",
-                               lttcomm_get_readable_code(-code));
-               mark_thread_intialization_as_failed(notifiers);
-               goto error;
-       }
-
-       /* Connect both command and metadata sockets. */
-       consumer_data->cmd_sock =
-                       lttcomm_connect_unix_sock(
-                               consumer_data->cmd_unix_sock_path);
-       consumer_data->metadata_fd =
-                       lttcomm_connect_unix_sock(
-                               consumer_data->cmd_unix_sock_path);
-       if (consumer_data->cmd_sock < 0 || consumer_data->metadata_fd < 0) {
-               PERROR("consumer connect cmd socket");
-               mark_thread_intialization_as_failed(notifiers);
-               goto error;
-       }
-
-       consumer_data->metadata_sock.fd_ptr = &consumer_data->metadata_fd;
-
-       /* Create metadata socket lock. */
-       consumer_data->metadata_sock.lock = zmalloc(sizeof(pthread_mutex_t));
-       if (consumer_data->metadata_sock.lock == NULL) {
-               PERROR("zmalloc pthread mutex");
-               mark_thread_intialization_as_failed(notifiers);
-               goto error;
-       }
-       pthread_mutex_init(consumer_data->metadata_sock.lock, NULL);
-
-       DBG("Consumer command socket ready (fd: %d)", consumer_data->cmd_sock);
-       DBG("Consumer metadata socket ready (fd: %d)",
-                       consumer_data->metadata_fd);
-
-       /*
-        * Remove the consumerd error sock since we've established a connection.
-        */
-       ret = lttng_poll_del(&events, consumer_data->err_sock);
-       if (ret < 0) {
-               mark_thread_intialization_as_failed(notifiers);
-               goto error;
-       }
-
-       /* Add new accepted error socket. */
-       ret = lttng_poll_add(&events, sock, LPOLLIN | LPOLLRDHUP);
-       if (ret < 0) {
-               mark_thread_intialization_as_failed(notifiers);
-               goto error;
-       }
-
-       /* Add metadata socket that is successfully connected. */
-       ret = lttng_poll_add(&events, consumer_data->metadata_fd,
-                       LPOLLIN | LPOLLRDHUP);
-       if (ret < 0) {
-               mark_thread_intialization_as_failed(notifiers);
-               goto error;
-       }
-
-       health_code_update();
-
-       /*
-        * Transfer the write-end of the channel monitoring pipe to the consumer
-        * by issuing a SET_CHANNEL_MONITOR_PIPE command.
-        */
-       cmd_socket_wrapper = consumer_allocate_socket(&consumer_data->cmd_sock);
-       if (!cmd_socket_wrapper) {
-               mark_thread_intialization_as_failed(notifiers);
-               goto error;
-       }
-       cmd_socket_wrapper->lock = &consumer_data->lock;
-
-       pthread_mutex_lock(cmd_socket_wrapper->lock);
-       ret = consumer_init(cmd_socket_wrapper, the_sessiond_uuid);
-       if (ret) {
-               ERR("Failed to send sessiond uuid to consumer daemon");
-               mark_thread_intialization_as_failed(notifiers);
-               pthread_mutex_unlock(cmd_socket_wrapper->lock);
-               goto error;
-       }
-       pthread_mutex_unlock(cmd_socket_wrapper->lock);
-
-       ret = consumer_send_channel_monitor_pipe(cmd_socket_wrapper,
-                       consumer_data->channel_monitor_pipe);
-       if (ret) {
-               mark_thread_intialization_as_failed(notifiers);
-               goto error;
-       }
-
-       /* Discard the socket wrapper as it is no longer needed. */
-       consumer_destroy_socket(cmd_socket_wrapper);
-       cmd_socket_wrapper = NULL;
-
-       /* The thread is completely initialized, signal that it is ready. */
-       mark_thread_as_ready(notifiers);
-
-       /* Infinite blocking call, waiting for transmission */
-       while (1) {
-               health_code_update();
-
-               /* Exit the thread because the thread quit pipe has been triggered. */
-               if (should_quit) {
-                       /* Not a health error. */
-                       err = 0;
-                       goto exit;
-               }
-
-               health_poll_entry();
-               ret = lttng_poll_wait(&events, -1);
-               health_poll_exit();
-               if (ret < 0) {
-                       goto error;
-               }
-
-               nb_fd = ret;
-
-               for (i = 0; i < nb_fd; i++) {
-                       /* Fetch once the poll data */
-                       revents = LTTNG_POLL_GETEV(&events, i);
-                       pollfd = LTTNG_POLL_GETFD(&events, i);
-
-                       health_code_update();
-
-                       /*
-                        * Thread quit pipe has been triggered, flag that we should stop
-                        * but continue the current loop to handle potential data from
-                        * consumer.
-                        */
-                       if (pollfd == quit_pipe_read_fd) {
-                               should_quit = 1;
-                       } else if (pollfd == sock) {
-                               /* Event on the consumerd socket */
-                               if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)
-                                               && !(revents & LPOLLIN)) {
-                                       ERR("consumer err socket second poll error");
-                                       goto error;
-                               }
-                               health_code_update();
-                               /* Wait for any kconsumerd error */
-                               ret = lttcomm_recv_unix_sock(sock, &code,
-                                               sizeof(enum lttcomm_return_code));
-                               if (ret <= 0) {
-                                       ERR("consumer closed the command socket");
-                                       goto error;
-                               }
-
-                               ERR("consumer return code : %s",
-                                               lttcomm_get_readable_code(-code));
-
-                               goto exit;
-                       } else if (pollfd == consumer_data->metadata_fd) {
-                               if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)
-                                               && !(revents & LPOLLIN)) {
-                                       ERR("consumer err metadata socket second poll error");
-                                       goto error;
-                               }
-                               /* UST metadata requests */
-                               ret = ust_consumer_metadata_request(
-                                               &consumer_data->metadata_sock);
-                               if (ret < 0) {
-                                       ERR("Handling metadata request");
-                                       goto error;
-                               }
-                       }
-                       /* No need for an else branch all FDs are tested prior. */
-               }
-               health_code_update();
-       }
-
-exit:
-error:
-       /*
-        * We lock here because we are about to close the sockets and some other
-        * thread might be using them so get exclusive access which will abort all
-        * other consumer command by other threads.
-        */
-       pthread_mutex_lock(&consumer_data->lock);
-
-       /* Immediately set the consumerd state to stopped */
-       if (consumer_data->type == LTTNG_CONSUMER_KERNEL) {
-               uatomic_set(&the_kernel_consumerd_state, CONSUMER_ERROR);
-       } else if (consumer_data->type == LTTNG_CONSUMER64_UST ||
-                       consumer_data->type == LTTNG_CONSUMER32_UST) {
-               uatomic_set(&the_ust_consumerd_state, CONSUMER_ERROR);
-       } else {
-               /* Code flow error... */
-               abort();
-       }
-
-       if (consumer_data->err_sock >= 0) {
-               ret = close(consumer_data->err_sock);
-               if (ret) {
-                       PERROR("close");
-               }
-               consumer_data->err_sock = -1;
-       }
-       if (consumer_data->cmd_sock >= 0) {
-               ret = close(consumer_data->cmd_sock);
-               if (ret) {
-                       PERROR("close");
-               }
-               consumer_data->cmd_sock = -1;
-       }
-       if (consumer_data->metadata_sock.fd_ptr &&
-           *consumer_data->metadata_sock.fd_ptr >= 0) {
-               ret = close(*consumer_data->metadata_sock.fd_ptr);
-               if (ret) {
-                       PERROR("close");
-               }
-       }
-       if (sock >= 0) {
-               ret = close(sock);
-               if (ret) {
-                       PERROR("close");
-               }
-       }
-
-       unlink(consumer_data->err_unix_sock_path);
-       unlink(consumer_data->cmd_unix_sock_path);
-       pthread_mutex_unlock(&consumer_data->lock);
-
-       /* Cleanup metadata socket mutex. */
-       if (consumer_data->metadata_sock.lock) {
-               pthread_mutex_destroy(consumer_data->metadata_sock.lock);
-               free(consumer_data->metadata_sock.lock);
-       }
-       lttng_poll_clean(&events);
-
-       if (cmd_socket_wrapper) {
-               consumer_destroy_socket(cmd_socket_wrapper);
-       }
-error_poll:
-       if (err) {
-               health_error();
-               ERR("Health error occurred in %s", __func__);
-       }
-       health_unregister(the_health_sessiond);
-       DBG("consumer thread cleanup completed");
-
-       rcu_thread_offline();
-       rcu_unregister_thread();
-
-       return NULL;
-}
-
-static bool shutdown_consumer_management_thread(void *data)
-{
-       struct thread_notifiers *notifiers = data;
-       const int write_fd = lttng_pipe_get_writefd(notifiers->quit_pipe);
-
-       return notify_thread_pipe(write_fd) == 1;
-}
-
-static void cleanup_consumer_management_thread(void *data)
-{
-       struct thread_notifiers *notifiers = data;
-
-       lttng_pipe_destroy(notifiers->quit_pipe);
-       free(notifiers);
-}
-
-bool launch_consumer_management_thread(struct consumer_data *consumer_data)
-{
-       struct lttng_pipe *quit_pipe;
-       struct thread_notifiers *notifiers = NULL;
-       struct lttng_thread *thread;
-
-       notifiers = zmalloc(sizeof(*notifiers));
-       if (!notifiers) {
-               goto error_alloc;
-       }
-
-       quit_pipe = lttng_pipe_open(FD_CLOEXEC);
-       if (!quit_pipe) {
-               goto error;
-       }
-       notifiers->quit_pipe = quit_pipe;
-       notifiers->consumer_data = consumer_data;
-       sem_init(&notifiers->ready, 0, 0);
-
-       thread = lttng_thread_create("Consumer management",
-                       thread_consumer_management,
-                       shutdown_consumer_management_thread,
-                       cleanup_consumer_management_thread,
-                       notifiers);
-       if (!thread) {
-               goto error;
-       }
-       wait_until_thread_is_ready(notifiers);
-       lttng_thread_put(thread);
-       if (notifiers->initialization_result) {
-               return false;
-       }
-       return true;
-error:
-       cleanup_consumer_management_thread(notifiers);
-error_alloc:
-       return false;
-}
diff --git a/src/bin/lttng-sessiond/manage-consumer.cpp b/src/bin/lttng-sessiond/manage-consumer.cpp
new file mode 100644 (file)
index 0000000..d7df6ce
--- /dev/null
@@ -0,0 +1,471 @@
+/*
+ * Copyright (C) 2011 David Goulet <david.goulet@polymtl.ca>
+ * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ * Copyright (C) 2013 Jérémie Galarneau <jeremie.galarneau@efficios.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0-only
+ *
+ */
+
+#include <signal.h>
+
+#include <common/pipe.h>
+#include <common/utils.h>
+
+#include "manage-consumer.h"
+#include "testpoint.h"
+#include "health-sessiond.h"
+#include "utils.h"
+#include "thread.h"
+#include "ust-consumer.h"
+
+struct thread_notifiers {
+       struct lttng_pipe *quit_pipe;
+       struct consumer_data *consumer_data;
+       sem_t ready;
+       int initialization_result;
+};
+
+static void mark_thread_as_ready(struct thread_notifiers *notifiers)
+{
+       DBG("Marking consumer management thread as ready");
+       notifiers->initialization_result = 0;
+       sem_post(&notifiers->ready);
+}
+
+static void mark_thread_intialization_as_failed(
+               struct thread_notifiers *notifiers)
+{
+       ERR("Consumer management thread entering error state");
+       notifiers->initialization_result = -1;
+       sem_post(&notifiers->ready);
+}
+
+static void wait_until_thread_is_ready(struct thread_notifiers *notifiers)
+{
+       DBG("Waiting for consumer management thread to be ready");
+       sem_wait(&notifiers->ready);
+       DBG("Consumer management thread is ready");
+}
+
+/*
+ * This thread manage the consumer error sent back to the session daemon.
+ */
+static void *thread_consumer_management(void *data)
+{
+       int sock = -1, i, ret, pollfd, err = -1, should_quit = 0;
+       uint32_t revents, nb_fd;
+       enum lttcomm_return_code code;
+       struct lttng_poll_event events;
+       struct thread_notifiers *notifiers = (thread_notifiers *) data;
+       struct consumer_data *consumer_data = notifiers->consumer_data;
+       const int quit_pipe_read_fd = lttng_pipe_get_readfd(notifiers->quit_pipe);
+       struct consumer_socket *cmd_socket_wrapper = NULL;
+
+       DBG("[thread] Manage consumer started");
+
+       rcu_register_thread();
+       rcu_thread_online();
+
+       health_register(the_health_sessiond, HEALTH_SESSIOND_TYPE_CONSUMER);
+
+       health_code_update();
+
+       /*
+        * Pass 3 as size here for the thread quit pipe, consumerd_err_sock and the
+        * metadata_sock. Nothing more will be added to this poll set.
+        */
+       ret = lttng_poll_create(&events, 3, LTTNG_CLOEXEC);
+       if (ret < 0) {
+               mark_thread_intialization_as_failed(notifiers);
+               goto error_poll;
+       }
+
+       ret = lttng_poll_add(&events, quit_pipe_read_fd, LPOLLIN | LPOLLERR);
+       if (ret < 0) {
+               mark_thread_intialization_as_failed(notifiers);
+               goto error;
+       }
+
+       /*
+        * The error socket here is already in a listening state which was done
+        * just before spawning this thread to avoid a race between the consumer
+        * daemon exec trying to connect and the listen() call.
+        */
+       ret = lttng_poll_add(&events, consumer_data->err_sock, LPOLLIN | LPOLLRDHUP);
+       if (ret < 0) {
+               mark_thread_intialization_as_failed(notifiers);
+               goto error;
+       }
+
+       health_code_update();
+
+       /* Infinite blocking call, waiting for transmission */
+       health_poll_entry();
+
+       if (testpoint(sessiond_thread_manage_consumer)) {
+               mark_thread_intialization_as_failed(notifiers);
+               goto error;
+       }
+
+       ret = lttng_poll_wait(&events, -1);
+       health_poll_exit();
+       if (ret < 0) {
+               mark_thread_intialization_as_failed(notifiers);
+               goto error;
+       }
+
+       nb_fd = ret;
+
+       for (i = 0; i < nb_fd; i++) {
+               /* Fetch once the poll data */
+               revents = LTTNG_POLL_GETEV(&events, i);
+               pollfd = LTTNG_POLL_GETFD(&events, i);
+
+               health_code_update();
+
+               /* Thread quit pipe has been closed. Killing thread. */
+               if (pollfd == quit_pipe_read_fd) {
+                       err = 0;
+                       mark_thread_intialization_as_failed(notifiers);
+                       goto exit;
+               } else if (pollfd == consumer_data->err_sock) {
+                       /* Event on the registration socket */
+                       if (revents & LPOLLIN) {
+                               continue;
+                       } else if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
+                               ERR("consumer err socket poll error");
+                               mark_thread_intialization_as_failed(notifiers);
+                               goto error;
+                       } else {
+                               ERR("Unexpected poll events %u for sock %d", revents, pollfd);
+                               mark_thread_intialization_as_failed(notifiers);
+                               goto error;
+                       }
+               }
+       }
+
+       sock = lttcomm_accept_unix_sock(consumer_data->err_sock);
+       if (sock < 0) {
+               mark_thread_intialization_as_failed(notifiers);
+               goto error;
+       }
+
+       /*
+        * Set the CLOEXEC flag. Return code is useless because either way, the
+        * show must go on.
+        */
+       (void) utils_set_fd_cloexec(sock);
+
+       health_code_update();
+
+       DBG2("Receiving code from consumer err_sock");
+
+       /* Getting status code from kconsumerd */
+       ret = lttcomm_recv_unix_sock(sock, &code,
+                       sizeof(enum lttcomm_return_code));
+       if (ret <= 0) {
+               mark_thread_intialization_as_failed(notifiers);
+               goto error;
+       }
+
+       health_code_update();
+       if (code != LTTCOMM_CONSUMERD_COMMAND_SOCK_READY) {
+               ERR("consumer error when waiting for SOCK_READY : %s",
+                               lttcomm_get_readable_code((lttcomm_return_code) -code));
+               mark_thread_intialization_as_failed(notifiers);
+               goto error;
+       }
+
+       /* Connect both command and metadata sockets. */
+       consumer_data->cmd_sock =
+                       lttcomm_connect_unix_sock(
+                               consumer_data->cmd_unix_sock_path);
+       consumer_data->metadata_fd =
+                       lttcomm_connect_unix_sock(
+                               consumer_data->cmd_unix_sock_path);
+       if (consumer_data->cmd_sock < 0 || consumer_data->metadata_fd < 0) {
+               PERROR("consumer connect cmd socket");
+               mark_thread_intialization_as_failed(notifiers);
+               goto error;
+       }
+
+       consumer_data->metadata_sock.fd_ptr = &consumer_data->metadata_fd;
+
+       /* Create metadata socket lock. */
+       consumer_data->metadata_sock.lock = (pthread_mutex_t *) zmalloc(sizeof(pthread_mutex_t));
+       if (consumer_data->metadata_sock.lock == NULL) {
+               PERROR("zmalloc pthread mutex");
+               mark_thread_intialization_as_failed(notifiers);
+               goto error;
+       }
+       pthread_mutex_init(consumer_data->metadata_sock.lock, NULL);
+
+       DBG("Consumer command socket ready (fd: %d)", consumer_data->cmd_sock);
+       DBG("Consumer metadata socket ready (fd: %d)",
+                       consumer_data->metadata_fd);
+
+       /*
+        * Remove the consumerd error sock since we've established a connection.
+        */
+       ret = lttng_poll_del(&events, consumer_data->err_sock);
+       if (ret < 0) {
+               mark_thread_intialization_as_failed(notifiers);
+               goto error;
+       }
+
+       /* Add new accepted error socket. */
+       ret = lttng_poll_add(&events, sock, LPOLLIN | LPOLLRDHUP);
+       if (ret < 0) {
+               mark_thread_intialization_as_failed(notifiers);
+               goto error;
+       }
+
+       /* Add metadata socket that is successfully connected. */
+       ret = lttng_poll_add(&events, consumer_data->metadata_fd,
+                       LPOLLIN | LPOLLRDHUP);
+       if (ret < 0) {
+               mark_thread_intialization_as_failed(notifiers);
+               goto error;
+       }
+
+       health_code_update();
+
+       /*
+        * Transfer the write-end of the channel monitoring pipe to the consumer
+        * by issuing a SET_CHANNEL_MONITOR_PIPE command.
+        */
+       cmd_socket_wrapper = consumer_allocate_socket(&consumer_data->cmd_sock);
+       if (!cmd_socket_wrapper) {
+               mark_thread_intialization_as_failed(notifiers);
+               goto error;
+       }
+       cmd_socket_wrapper->lock = &consumer_data->lock;
+
+       pthread_mutex_lock(cmd_socket_wrapper->lock);
+       ret = consumer_init(cmd_socket_wrapper, the_sessiond_uuid);
+       if (ret) {
+               ERR("Failed to send sessiond uuid to consumer daemon");
+               mark_thread_intialization_as_failed(notifiers);
+               pthread_mutex_unlock(cmd_socket_wrapper->lock);
+               goto error;
+       }
+       pthread_mutex_unlock(cmd_socket_wrapper->lock);
+
+       ret = consumer_send_channel_monitor_pipe(cmd_socket_wrapper,
+                       consumer_data->channel_monitor_pipe);
+       if (ret) {
+               mark_thread_intialization_as_failed(notifiers);
+               goto error;
+       }
+
+       /* Discard the socket wrapper as it is no longer needed. */
+       consumer_destroy_socket(cmd_socket_wrapper);
+       cmd_socket_wrapper = NULL;
+
+       /* The thread is completely initialized, signal that it is ready. */
+       mark_thread_as_ready(notifiers);
+
+       /* Infinite blocking call, waiting for transmission */
+       while (1) {
+               health_code_update();
+
+               /* Exit the thread because the thread quit pipe has been triggered. */
+               if (should_quit) {
+                       /* Not a health error. */
+                       err = 0;
+                       goto exit;
+               }
+
+               health_poll_entry();
+               ret = lttng_poll_wait(&events, -1);
+               health_poll_exit();
+               if (ret < 0) {
+                       goto error;
+               }
+
+               nb_fd = ret;
+
+               for (i = 0; i < nb_fd; i++) {
+                       /* Fetch once the poll data */
+                       revents = LTTNG_POLL_GETEV(&events, i);
+                       pollfd = LTTNG_POLL_GETFD(&events, i);
+
+                       health_code_update();
+
+                       /*
+                        * Thread quit pipe has been triggered, flag that we should stop
+                        * but continue the current loop to handle potential data from
+                        * consumer.
+                        */
+                       if (pollfd == quit_pipe_read_fd) {
+                               should_quit = 1;
+                       } else if (pollfd == sock) {
+                               /* Event on the consumerd socket */
+                               if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)
+                                               && !(revents & LPOLLIN)) {
+                                       ERR("consumer err socket second poll error");
+                                       goto error;
+                               }
+                               health_code_update();
+                               /* Wait for any kconsumerd error */
+                               ret = lttcomm_recv_unix_sock(sock, &code,
+                                               sizeof(enum lttcomm_return_code));
+                               if (ret <= 0) {
+                                       ERR("consumer closed the command socket");
+                                       goto error;
+                               }
+
+                               ERR("consumer return code : %s",
+                                               lttcomm_get_readable_code((lttcomm_return_code) -code));
+
+                               goto exit;
+                       } else if (pollfd == consumer_data->metadata_fd) {
+                               if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)
+                                               && !(revents & LPOLLIN)) {
+                                       ERR("consumer err metadata socket second poll error");
+                                       goto error;
+                               }
+                               /* UST metadata requests */
+                               ret = ust_consumer_metadata_request(
+                                               &consumer_data->metadata_sock);
+                               if (ret < 0) {
+                                       ERR("Handling metadata request");
+                                       goto error;
+                               }
+                       }
+                       /* No need for an else branch all FDs are tested prior. */
+               }
+               health_code_update();
+       }
+
+exit:
+error:
+       /*
+        * We lock here because we are about to close the sockets and some other
+        * thread might be using them so get exclusive access which will abort all
+        * other consumer command by other threads.
+        */
+       pthread_mutex_lock(&consumer_data->lock);
+
+       /* Immediately set the consumerd state to stopped */
+       if (consumer_data->type == LTTNG_CONSUMER_KERNEL) {
+               uatomic_set(&the_kernel_consumerd_state, CONSUMER_ERROR);
+       } else if (consumer_data->type == LTTNG_CONSUMER64_UST ||
+                       consumer_data->type == LTTNG_CONSUMER32_UST) {
+               uatomic_set(&the_ust_consumerd_state, CONSUMER_ERROR);
+       } else {
+               /* Code flow error... */
+               abort();
+       }
+
+       if (consumer_data->err_sock >= 0) {
+               ret = close(consumer_data->err_sock);
+               if (ret) {
+                       PERROR("close");
+               }
+               consumer_data->err_sock = -1;
+       }
+       if (consumer_data->cmd_sock >= 0) {
+               ret = close(consumer_data->cmd_sock);
+               if (ret) {
+                       PERROR("close");
+               }
+               consumer_data->cmd_sock = -1;
+       }
+       if (consumer_data->metadata_sock.fd_ptr &&
+           *consumer_data->metadata_sock.fd_ptr >= 0) {
+               ret = close(*consumer_data->metadata_sock.fd_ptr);
+               if (ret) {
+                       PERROR("close");
+               }
+       }
+       if (sock >= 0) {
+               ret = close(sock);
+               if (ret) {
+                       PERROR("close");
+               }
+       }
+
+       unlink(consumer_data->err_unix_sock_path);
+       unlink(consumer_data->cmd_unix_sock_path);
+       pthread_mutex_unlock(&consumer_data->lock);
+
+       /* Cleanup metadata socket mutex. */
+       if (consumer_data->metadata_sock.lock) {
+               pthread_mutex_destroy(consumer_data->metadata_sock.lock);
+               free(consumer_data->metadata_sock.lock);
+       }
+       lttng_poll_clean(&events);
+
+       if (cmd_socket_wrapper) {
+               consumer_destroy_socket(cmd_socket_wrapper);
+       }
+error_poll:
+       if (err) {
+               health_error();
+               ERR("Health error occurred in %s", __func__);
+       }
+       health_unregister(the_health_sessiond);
+       DBG("consumer thread cleanup completed");
+
+       rcu_thread_offline();
+       rcu_unregister_thread();
+
+       return NULL;
+}
+
+static bool shutdown_consumer_management_thread(void *data)
+{
+       struct thread_notifiers *notifiers = (thread_notifiers *) data;
+       const int write_fd = lttng_pipe_get_writefd(notifiers->quit_pipe);
+
+       return notify_thread_pipe(write_fd) == 1;
+}
+
+static void cleanup_consumer_management_thread(void *data)
+{
+       struct thread_notifiers *notifiers = (thread_notifiers *) data;
+
+       lttng_pipe_destroy(notifiers->quit_pipe);
+       free(notifiers);
+}
+
+bool launch_consumer_management_thread(struct consumer_data *consumer_data)
+{
+       struct lttng_pipe *quit_pipe;
+       struct thread_notifiers *notifiers = NULL;
+       struct lttng_thread *thread;
+
+       notifiers = (thread_notifiers *) zmalloc(sizeof(*notifiers));
+       if (!notifiers) {
+               goto error_alloc;
+       }
+
+       quit_pipe = lttng_pipe_open(FD_CLOEXEC);
+       if (!quit_pipe) {
+               goto error;
+       }
+       notifiers->quit_pipe = quit_pipe;
+       notifiers->consumer_data = consumer_data;
+       sem_init(&notifiers->ready, 0, 0);
+
+       thread = lttng_thread_create("Consumer management",
+                       thread_consumer_management,
+                       shutdown_consumer_management_thread,
+                       cleanup_consumer_management_thread,
+                       notifiers);
+       if (!thread) {
+               goto error;
+       }
+       wait_until_thread_is_ready(notifiers);
+       lttng_thread_put(thread);
+       if (notifiers->initialization_result) {
+               return false;
+       }
+       return true;
+error:
+       cleanup_consumer_management_thread(notifiers);
+error_alloc:
+       return false;
+}
diff --git a/src/bin/lttng-sessiond/manage-kernel.c b/src/bin/lttng-sessiond/manage-kernel.c
deleted file mode 100644 (file)
index 55ecbf8..0000000
+++ /dev/null
@@ -1,360 +0,0 @@
-/*
- * Copyright (C) 2011 David Goulet <david.goulet@polymtl.ca>
- * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- * Copyright (C) 2013 Jérémie Galarneau <jeremie.galarneau@efficios.com>
- *
- * SPDX-License-Identifier: GPL-2.0-only
- *
- */
-
-#include <common/pipe.h>
-#include <common/utils.h>
-
-#include "manage-kernel.h"
-#include "testpoint.h"
-#include "health-sessiond.h"
-#include "utils.h"
-#include "thread.h"
-#include "kernel.h"
-#include "kernel-consumer.h"
-
-struct thread_notifiers {
-       struct lttng_pipe *quit_pipe;
-       int kernel_poll_pipe_read_fd;
-};
-
-/*
- * Update the kernel poll set of all channel fd available over all tracing
- * session. Add the wakeup pipe at the end of the set.
- */
-static int update_kernel_poll(struct lttng_poll_event *events)
-{
-       int ret;
-       struct ltt_kernel_channel *channel;
-       struct ltt_session *session;
-       const struct ltt_session_list *session_list = session_get_list();
-
-       DBG("Updating kernel poll set");
-
-       session_lock_list();
-       cds_list_for_each_entry(session, &session_list->head, list) {
-               if (!session_get(session)) {
-                       continue;
-               }
-               session_lock(session);
-               if (session->kernel_session == NULL) {
-                       session_unlock(session);
-                       session_put(session);
-                       continue;
-               }
-
-               cds_list_for_each_entry(channel,
-                               &session->kernel_session->channel_list.head, list) {
-                       /* Add channel fd to the kernel poll set */
-                       ret = lttng_poll_add(events, channel->fd, LPOLLIN | LPOLLRDNORM);
-                       if (ret < 0) {
-                               session_unlock(session);
-                               session_put(session);
-                               goto error;
-                       }
-                       DBG("Channel fd %d added to kernel set", channel->fd);
-               }
-               session_unlock(session);
-               session_put(session);
-       }
-       session_unlock_list();
-
-       return 0;
-
-error:
-       session_unlock_list();
-       return -1;
-}
-
-/*
- * Find the channel fd from 'fd' over all tracing session. When found, check
- * for new channel stream and send those stream fds to the kernel consumer.
- *
- * Useful for CPU hotplug feature.
- */
-static int update_kernel_stream(int fd)
-{
-       int ret = 0;
-       struct ltt_session *session;
-       struct ltt_kernel_session *ksess;
-       struct ltt_kernel_channel *channel;
-       const struct ltt_session_list *session_list = session_get_list();
-
-       DBG("Updating kernel streams for channel fd %d", fd);
-
-       session_lock_list();
-       cds_list_for_each_entry(session, &session_list->head, list) {
-               if (!session_get(session)) {
-                       continue;
-               }
-               session_lock(session);
-               if (session->kernel_session == NULL) {
-                       session_unlock(session);
-                       session_put(session);
-                       continue;
-               }
-               ksess = session->kernel_session;
-
-               cds_list_for_each_entry(channel,
-                               &ksess->channel_list.head, list) {
-                       struct lttng_ht_iter iter;
-                       struct consumer_socket *socket;
-
-                       if (channel->fd != fd) {
-                               continue;
-                       }
-                       DBG("Channel found, updating kernel streams");
-                       ret = kernel_open_channel_stream(channel);
-                       if (ret < 0) {
-                               goto error;
-                       }
-                       /* Update the stream global counter */
-                       ksess->stream_count_global += ret;
-
-                       /*
-                        * Have we already sent fds to the consumer? If yes, it
-                        * means that tracing is started so it is safe to send
-                        * our updated stream fds.
-                        */
-                       if (ksess->consumer_fds_sent != 1
-                                       || ksess->consumer == NULL) {
-                               ret = -1;
-                               goto error;
-                       }
-
-                       rcu_read_lock();
-                       cds_lfht_for_each_entry(ksess->consumer->socks->ht,
-                                       &iter.iter, socket, node.node) {
-                               pthread_mutex_lock(socket->lock);
-                               ret = kernel_consumer_send_channel_streams(socket,
-                                               channel, ksess,
-                                               session->output_traces ? 1 : 0);
-                               pthread_mutex_unlock(socket->lock);
-                               if (ret < 0) {
-                                       rcu_read_unlock();
-                                       goto error;
-                               }
-                       }
-                       rcu_read_unlock();
-               }
-               session_unlock(session);
-               session_put(session);
-       }
-       session_unlock_list();
-       return ret;
-
-error:
-       session_unlock(session);
-       session_put(session);
-       session_unlock_list();
-       return ret;
-}
-
-/*
- * This thread manage event coming from the kernel.
- *
- * Features supported in this thread:
- *    -) CPU Hotplug
- */
-static void *thread_kernel_management(void *data)
-{
-       int ret, i, pollfd, update_poll_flag = 1, err = -1;
-       uint32_t revents, nb_fd;
-       char tmp;
-       struct lttng_poll_event events;
-       struct thread_notifiers *notifiers = data;
-       const int quit_pipe_read_fd = lttng_pipe_get_readfd(notifiers->quit_pipe);
-
-       DBG("[thread] Thread manage kernel started");
-
-       health_register(the_health_sessiond, HEALTH_SESSIOND_TYPE_KERNEL);
-
-       /*
-        * This first step of the while is to clean this structure which could free
-        * non NULL pointers so initialize it before the loop.
-        */
-       lttng_poll_init(&events);
-
-       if (testpoint(sessiond_thread_manage_kernel)) {
-               goto error_testpoint;
-       }
-
-       health_code_update();
-
-       if (testpoint(sessiond_thread_manage_kernel_before_loop)) {
-               goto error_testpoint;
-       }
-
-       while (1) {
-               health_code_update();
-
-               if (update_poll_flag == 1) {
-                       /* Clean events object. We are about to populate it again. */
-                       lttng_poll_clean(&events);
-
-                       ret = lttng_poll_create(&events, 2, LTTNG_CLOEXEC);
-                       if (ret < 0) {
-                               goto error_poll_create;
-                       }
-
-                       ret = lttng_poll_add(&events,
-                                       notifiers->kernel_poll_pipe_read_fd,
-                                       LPOLLIN);
-                       if (ret < 0) {
-                               goto error;
-                       }
-
-                       ret = lttng_poll_add(&events,
-                                       quit_pipe_read_fd,
-                                       LPOLLIN);
-                       if (ret < 0) {
-                               goto error;
-                       }
-
-                       /* This will add the available kernel channel if any. */
-                       ret = update_kernel_poll(&events);
-                       if (ret < 0) {
-                               goto error;
-                       }
-                       update_poll_flag = 0;
-               }
-
-               DBG("Thread kernel polling");
-
-               /* Poll infinite value of time */
-       restart:
-               health_poll_entry();
-               ret = lttng_poll_wait(&events, -1);
-               DBG("Thread kernel return from poll on %d fds",
-                               LTTNG_POLL_GETNB(&events));
-               health_poll_exit();
-               if (ret < 0) {
-                       /*
-                        * Restart interrupted system call.
-                        */
-                       if (errno == EINTR) {
-                               goto restart;
-                       }
-                       goto error;
-               } else if (ret == 0) {
-                       /* Should not happen since timeout is infinite */
-                       ERR("Return value of poll is 0 with an infinite timeout.\n"
-                               "This should not have happened! Continuing...");
-                       continue;
-               }
-
-               nb_fd = ret;
-
-               for (i = 0; i < nb_fd; i++) {
-                       /* Fetch once the poll data */
-                       revents = LTTNG_POLL_GETEV(&events, i);
-                       pollfd = LTTNG_POLL_GETFD(&events, i);
-
-                       health_code_update();
-
-                       if (pollfd == quit_pipe_read_fd) {
-                               err = 0;
-                               goto exit;
-                       }
-
-                       /* Check for data on kernel pipe */
-                       if (revents & LPOLLIN) {
-                               if (pollfd == notifiers->kernel_poll_pipe_read_fd) {
-                                       (void) lttng_read(notifiers->kernel_poll_pipe_read_fd,
-                                               &tmp, 1);
-                                       /*
-                                        * Ret value is useless here, if this pipe gets any actions an
-                                        * update is required anyway.
-                                        */
-                                       update_poll_flag = 1;
-                                       continue;
-                               } else {
-                                       /*
-                                        * New CPU detected by the kernel. Adding kernel stream to
-                                        * kernel session and updating the kernel consumer
-                                        */
-                                       ret = update_kernel_stream(pollfd);
-                                       if (ret < 0) {
-                                               continue;
-                                       }
-                                       break;
-                               }
-                       } else if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
-                               update_poll_flag = 1;
-                               continue;
-                       } else {
-                               ERR("Unexpected poll events %u for sock %d", revents, pollfd);
-                               goto error;
-                       }
-               }
-       }
-
-exit:
-error:
-       lttng_poll_clean(&events);
-error_poll_create:
-error_testpoint:
-       if (err) {
-               health_error();
-               ERR("Health error occurred in %s", __func__);
-               WARN("Kernel thread died unexpectedly. "
-                               "Kernel tracing can continue but CPU hotplug is disabled.");
-       }
-       health_unregister(the_health_sessiond);
-       DBG("Kernel thread dying");
-       return NULL;
-}
-
-static bool shutdown_kernel_management_thread(void *data)
-{
-       struct thread_notifiers *notifiers = data;
-       const int write_fd = lttng_pipe_get_writefd(notifiers->quit_pipe);
-
-       return notify_thread_pipe(write_fd) == 1;
-}
-
-static void cleanup_kernel_management_thread(void *data)
-{
-       struct thread_notifiers *notifiers = data;
-
-       lttng_pipe_destroy(notifiers->quit_pipe);
-       free(notifiers);
-}
-
-bool launch_kernel_management_thread(int kernel_poll_pipe_read_fd)
-{
-       struct lttng_pipe *quit_pipe;
-       struct thread_notifiers *notifiers = NULL;
-       struct lttng_thread *thread;
-
-       notifiers = zmalloc(sizeof(*notifiers));
-       if (!notifiers) {
-               goto error_alloc;
-       }
-       quit_pipe = lttng_pipe_open(FD_CLOEXEC);
-       if (!quit_pipe) {
-               goto error;
-       }
-       notifiers->quit_pipe = quit_pipe;
-       notifiers->kernel_poll_pipe_read_fd = kernel_poll_pipe_read_fd;
-
-       thread = lttng_thread_create("Kernel management",
-                       thread_kernel_management,
-                       shutdown_kernel_management_thread,
-                       cleanup_kernel_management_thread,
-                       notifiers);
-       if (!thread) {
-               goto error;
-       }
-       lttng_thread_put(thread);
-       return true;
-error:
-       cleanup_kernel_management_thread(notifiers);
-error_alloc:
-       return false;
-}
diff --git a/src/bin/lttng-sessiond/manage-kernel.cpp b/src/bin/lttng-sessiond/manage-kernel.cpp
new file mode 100644 (file)
index 0000000..2ffe609
--- /dev/null
@@ -0,0 +1,360 @@
+/*
+ * Copyright (C) 2011 David Goulet <david.goulet@polymtl.ca>
+ * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ * Copyright (C) 2013 Jérémie Galarneau <jeremie.galarneau@efficios.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0-only
+ *
+ */
+
+#include <common/pipe.h>
+#include <common/utils.h>
+
+#include "manage-kernel.h"
+#include "testpoint.h"
+#include "health-sessiond.h"
+#include "utils.h"
+#include "thread.h"
+#include "kernel.h"
+#include "kernel-consumer.h"
+
+struct thread_notifiers {
+       struct lttng_pipe *quit_pipe;
+       int kernel_poll_pipe_read_fd;
+};
+
+/*
+ * Update the kernel poll set of all channel fd available over all tracing
+ * session. Add the wakeup pipe at the end of the set.
+ */
+static int update_kernel_poll(struct lttng_poll_event *events)
+{
+       int ret;
+       struct ltt_kernel_channel *channel;
+       struct ltt_session *session;
+       const struct ltt_session_list *session_list = session_get_list();
+
+       DBG("Updating kernel poll set");
+
+       session_lock_list();
+       cds_list_for_each_entry(session, &session_list->head, list) {
+               if (!session_get(session)) {
+                       continue;
+               }
+               session_lock(session);
+               if (session->kernel_session == NULL) {
+                       session_unlock(session);
+                       session_put(session);
+                       continue;
+               }
+
+               cds_list_for_each_entry(channel,
+                               &session->kernel_session->channel_list.head, list) {
+                       /* Add channel fd to the kernel poll set */
+                       ret = lttng_poll_add(events, channel->fd, LPOLLIN | LPOLLRDNORM);
+                       if (ret < 0) {
+                               session_unlock(session);
+                               session_put(session);
+                               goto error;
+                       }
+                       DBG("Channel fd %d added to kernel set", channel->fd);
+               }
+               session_unlock(session);
+               session_put(session);
+       }
+       session_unlock_list();
+
+       return 0;
+
+error:
+       session_unlock_list();
+       return -1;
+}
+
+/*
+ * Find the channel fd from 'fd' over all tracing session. When found, check
+ * for new channel stream and send those stream fds to the kernel consumer.
+ *
+ * Useful for CPU hotplug feature.
+ */
+static int update_kernel_stream(int fd)
+{
+       int ret = 0;
+       struct ltt_session *session;
+       struct ltt_kernel_session *ksess;
+       struct ltt_kernel_channel *channel;
+       const struct ltt_session_list *session_list = session_get_list();
+
+       DBG("Updating kernel streams for channel fd %d", fd);
+
+       session_lock_list();
+       cds_list_for_each_entry(session, &session_list->head, list) {
+               if (!session_get(session)) {
+                       continue;
+               }
+               session_lock(session);
+               if (session->kernel_session == NULL) {
+                       session_unlock(session);
+                       session_put(session);
+                       continue;
+               }
+               ksess = session->kernel_session;
+
+               cds_list_for_each_entry(channel,
+                               &ksess->channel_list.head, list) {
+                       struct lttng_ht_iter iter;
+                       struct consumer_socket *socket;
+
+                       if (channel->fd != fd) {
+                               continue;
+                       }
+                       DBG("Channel found, updating kernel streams");
+                       ret = kernel_open_channel_stream(channel);
+                       if (ret < 0) {
+                               goto error;
+                       }
+                       /* Update the stream global counter */
+                       ksess->stream_count_global += ret;
+
+                       /*
+                        * Have we already sent fds to the consumer? If yes, it
+                        * means that tracing is started so it is safe to send
+                        * our updated stream fds.
+                        */
+                       if (ksess->consumer_fds_sent != 1
+                                       || ksess->consumer == NULL) {
+                               ret = -1;
+                               goto error;
+                       }
+
+                       rcu_read_lock();
+                       cds_lfht_for_each_entry(ksess->consumer->socks->ht,
+                                       &iter.iter, socket, node.node) {
+                               pthread_mutex_lock(socket->lock);
+                               ret = kernel_consumer_send_channel_streams(socket,
+                                               channel, ksess,
+                                               session->output_traces ? 1 : 0);
+                               pthread_mutex_unlock(socket->lock);
+                               if (ret < 0) {
+                                       rcu_read_unlock();
+                                       goto error;
+                               }
+                       }
+                       rcu_read_unlock();
+               }
+               session_unlock(session);
+               session_put(session);
+       }
+       session_unlock_list();
+       return ret;
+
+error:
+       session_unlock(session);
+       session_put(session);
+       session_unlock_list();
+       return ret;
+}
+
+/*
+ * This thread manage event coming from the kernel.
+ *
+ * Features supported in this thread:
+ *    -) CPU Hotplug
+ */
+static void *thread_kernel_management(void *data)
+{
+       int ret, i, pollfd, update_poll_flag = 1, err = -1;
+       uint32_t revents, nb_fd;
+       char tmp;
+       struct lttng_poll_event events;
+       struct thread_notifiers *notifiers = (thread_notifiers *) data;
+       const int quit_pipe_read_fd = lttng_pipe_get_readfd(notifiers->quit_pipe);
+
+       DBG("[thread] Thread manage kernel started");
+
+       health_register(the_health_sessiond, HEALTH_SESSIOND_TYPE_KERNEL);
+
+       /*
+        * This first step of the while is to clean this structure which could free
+        * non NULL pointers so initialize it before the loop.
+        */
+       lttng_poll_init(&events);
+
+       if (testpoint(sessiond_thread_manage_kernel)) {
+               goto error_testpoint;
+       }
+
+       health_code_update();
+
+       if (testpoint(sessiond_thread_manage_kernel_before_loop)) {
+               goto error_testpoint;
+       }
+
+       while (1) {
+               health_code_update();
+
+               if (update_poll_flag == 1) {
+                       /* Clean events object. We are about to populate it again. */
+                       lttng_poll_clean(&events);
+
+                       ret = lttng_poll_create(&events, 2, LTTNG_CLOEXEC);
+                       if (ret < 0) {
+                               goto error_poll_create;
+                       }
+
+                       ret = lttng_poll_add(&events,
+                                       notifiers->kernel_poll_pipe_read_fd,
+                                       LPOLLIN);
+                       if (ret < 0) {
+                               goto error;
+                       }
+
+                       ret = lttng_poll_add(&events,
+                                       quit_pipe_read_fd,
+                                       LPOLLIN);
+                       if (ret < 0) {
+                               goto error;
+                       }
+
+                       /* This will add the available kernel channel if any. */
+                       ret = update_kernel_poll(&events);
+                       if (ret < 0) {
+                               goto error;
+                       }
+                       update_poll_flag = 0;
+               }
+
+               DBG("Thread kernel polling");
+
+               /* Poll infinite value of time */
+       restart:
+               health_poll_entry();
+               ret = lttng_poll_wait(&events, -1);
+               DBG("Thread kernel return from poll on %d fds",
+                               LTTNG_POLL_GETNB(&events));
+               health_poll_exit();
+               if (ret < 0) {
+                       /*
+                        * Restart interrupted system call.
+                        */
+                       if (errno == EINTR) {
+                               goto restart;
+                       }
+                       goto error;
+               } else if (ret == 0) {
+                       /* Should not happen since timeout is infinite */
+                       ERR("Return value of poll is 0 with an infinite timeout.\n"
+                               "This should not have happened! Continuing...");
+                       continue;
+               }
+
+               nb_fd = ret;
+
+               for (i = 0; i < nb_fd; i++) {
+                       /* Fetch once the poll data */
+                       revents = LTTNG_POLL_GETEV(&events, i);
+                       pollfd = LTTNG_POLL_GETFD(&events, i);
+
+                       health_code_update();
+
+                       if (pollfd == quit_pipe_read_fd) {
+                               err = 0;
+                               goto exit;
+                       }
+
+                       /* Check for data on kernel pipe */
+                       if (revents & LPOLLIN) {
+                               if (pollfd == notifiers->kernel_poll_pipe_read_fd) {
+                                       (void) lttng_read(notifiers->kernel_poll_pipe_read_fd,
+                                               &tmp, 1);
+                                       /*
+                                        * Ret value is useless here, if this pipe gets any actions an
+                                        * update is required anyway.
+                                        */
+                                       update_poll_flag = 1;
+                                       continue;
+                               } else {
+                                       /*
+                                        * New CPU detected by the kernel. Adding kernel stream to
+                                        * kernel session and updating the kernel consumer
+                                        */
+                                       ret = update_kernel_stream(pollfd);
+                                       if (ret < 0) {
+                                               continue;
+                                       }
+                                       break;
+                               }
+                       } else if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
+                               update_poll_flag = 1;
+                               continue;
+                       } else {
+                               ERR("Unexpected poll events %u for sock %d", revents, pollfd);
+                               goto error;
+                       }
+               }
+       }
+
+exit:
+error:
+       lttng_poll_clean(&events);
+error_poll_create:
+error_testpoint:
+       if (err) {
+               health_error();
+               ERR("Health error occurred in %s", __func__);
+               WARN("Kernel thread died unexpectedly. "
+                               "Kernel tracing can continue but CPU hotplug is disabled.");
+       }
+       health_unregister(the_health_sessiond);
+       DBG("Kernel thread dying");
+       return NULL;
+}
+
+static bool shutdown_kernel_management_thread(void *data)
+{
+       struct thread_notifiers *notifiers = (thread_notifiers *) data;
+       const int write_fd = lttng_pipe_get_writefd(notifiers->quit_pipe);
+
+       return notify_thread_pipe(write_fd) == 1;
+}
+
+static void cleanup_kernel_management_thread(void *data)
+{
+       struct thread_notifiers *notifiers = (thread_notifiers *) data;
+
+       lttng_pipe_destroy(notifiers->quit_pipe);
+       free(notifiers);
+}
+
+bool launch_kernel_management_thread(int kernel_poll_pipe_read_fd)
+{
+       struct lttng_pipe *quit_pipe;
+       struct thread_notifiers *notifiers = NULL;
+       struct lttng_thread *thread;
+
+       notifiers = (thread_notifiers *) zmalloc(sizeof(*notifiers));
+       if (!notifiers) {
+               goto error_alloc;
+       }
+       quit_pipe = lttng_pipe_open(FD_CLOEXEC);
+       if (!quit_pipe) {
+               goto error;
+       }
+       notifiers->quit_pipe = quit_pipe;
+       notifiers->kernel_poll_pipe_read_fd = kernel_poll_pipe_read_fd;
+
+       thread = lttng_thread_create("Kernel management",
+                       thread_kernel_management,
+                       shutdown_kernel_management_thread,
+                       cleanup_kernel_management_thread,
+                       notifiers);
+       if (!thread) {
+               goto error;
+       }
+       lttng_thread_put(thread);
+       return true;
+error:
+       cleanup_kernel_management_thread(notifiers);
+error_alloc:
+       return false;
+}
diff --git a/src/bin/lttng-sessiond/modprobe.c b/src/bin/lttng-sessiond/modprobe.c
deleted file mode 100644 (file)
index 24b34d0..0000000
+++ /dev/null
@@ -1,776 +0,0 @@
-/*
- * Copyright (C) 2011 David Goulet <dgoulet@efficios.com>
- * Copyright (C) 2014 Jan Glauber <jan.glauber@gmail.com>
- *
- * SPDX-License-Identifier: GPL-2.0-only
- *
- */
-
-/**
- * @file modprobe.c
- *
- * @brief modprobe related functions.
- *
- */
-
-#define _LGPL_SOURCE
-#include <stdio.h>
-#include <stdlib.h>
-#include <sys/wait.h>
-
-#include <common/common.h>
-#include <common/utils.h>
-
-#include "modprobe.h"
-#include "kern-modules.h"
-#include "lttng-sessiond.h"
-
-/* LTTng kernel tracer mandatory core modules list */
-struct kern_modules_param kern_modules_control_core[] = {
-       {
-               .name = (char *) "lttng-ring-buffer-client-discard",
-               .load_policy = KERNEL_MODULE_PROPERTY_LOAD_POLICY_REQUIRED,
-       },
-       {
-               .name = (char *) "lttng-ring-buffer-client-overwrite",
-               .load_policy = KERNEL_MODULE_PROPERTY_LOAD_POLICY_REQUIRED,
-       },
-       {
-               .name = (char *) "lttng-ring-buffer-metadata-client",
-               .load_policy = KERNEL_MODULE_PROPERTY_LOAD_POLICY_REQUIRED,
-       },
-       {
-               .name = (char *) "lttng-ring-buffer-client-mmap-discard",
-               .load_policy = KERNEL_MODULE_PROPERTY_LOAD_POLICY_REQUIRED,
-       },
-       {
-               .name = (char *) "lttng-ring-buffer-client-mmap-overwrite",
-               .load_policy = KERNEL_MODULE_PROPERTY_LOAD_POLICY_REQUIRED,
-       },
-       {
-               .name = (char *) "lttng-ring-buffer-metadata-mmap-client",
-               .load_policy = KERNEL_MODULE_PROPERTY_LOAD_POLICY_REQUIRED,
-       },
-       {
-               .name = (char *) "lttng-ring-buffer-event_notifier-client",
-               .load_policy = KERNEL_MODULE_PROPERTY_LOAD_POLICY_OPTIONAL,
-       },
-       {
-               .name = (char *) "lttng-counter-client-percpu-64-modular",
-               .load_policy = KERNEL_MODULE_PROPERTY_LOAD_POLICY_OPTIONAL,
-       },
-       {
-               .name = (char *) "lttng-counter-client-percpu-32-modular",
-               .load_policy = KERNEL_MODULE_PROPERTY_LOAD_POLICY_OPTIONAL,
-       },
-};
-
-/* LTTng kerneltracer probe modules list */
-struct kern_modules_param kern_modules_probes_default[] = {
-       {
-               .name = (char *) "lttng-probe-asoc",
-               .load_policy = KERNEL_MODULE_PROPERTY_LOAD_POLICY_OPTIONAL,
-       },
-       {
-               .name = (char *) "lttng-probe-block",
-               .load_policy = KERNEL_MODULE_PROPERTY_LOAD_POLICY_OPTIONAL,
-       },
-       {
-               .name = (char *) "lttng-probe-btrfs",
-               .load_policy = KERNEL_MODULE_PROPERTY_LOAD_POLICY_OPTIONAL,
-       },
-       {
-               .name = (char *) "lttng-probe-compaction",
-               .load_policy = KERNEL_MODULE_PROPERTY_LOAD_POLICY_OPTIONAL,
-       },
-       {
-               .name = (char *) "lttng-probe-ext3",
-               .load_policy = KERNEL_MODULE_PROPERTY_LOAD_POLICY_OPTIONAL,
-       },
-       {
-               .name = (char *) "lttng-probe-ext4",
-               .load_policy = KERNEL_MODULE_PROPERTY_LOAD_POLICY_OPTIONAL,
-       },
-       {
-               .name = (char *) "lttng-probe-gpio",
-               .load_policy = KERNEL_MODULE_PROPERTY_LOAD_POLICY_OPTIONAL,
-       },
-       {
-               .name = (char *) "lttng-probe-i2c",
-               .load_policy = KERNEL_MODULE_PROPERTY_LOAD_POLICY_OPTIONAL,
-       },
-       {
-               .name = (char *) "lttng-probe-irq",
-               .load_policy = KERNEL_MODULE_PROPERTY_LOAD_POLICY_OPTIONAL,
-       },
-       {
-               .name = (char *) "lttng-probe-jbd",
-               .load_policy = KERNEL_MODULE_PROPERTY_LOAD_POLICY_OPTIONAL,
-       },
-       {
-               .name = (char *) "lttng-probe-jbd2",
-               .load_policy = KERNEL_MODULE_PROPERTY_LOAD_POLICY_OPTIONAL,
-       },
-       {
-               .name = (char *) "lttng-probe-kmem",
-               .load_policy = KERNEL_MODULE_PROPERTY_LOAD_POLICY_OPTIONAL,
-       },
-       {
-               .name = (char *) "lttng-probe-kvm",
-               .load_policy = KERNEL_MODULE_PROPERTY_LOAD_POLICY_OPTIONAL,
-       },
-       {
-               .name = (char *) "lttng-probe-kvm-x86",
-               .load_policy = KERNEL_MODULE_PROPERTY_LOAD_POLICY_OPTIONAL,
-       },
-       {
-               .name = (char *) "lttng-probe-kvm-x86-mmu",
-               .load_policy = KERNEL_MODULE_PROPERTY_LOAD_POLICY_OPTIONAL,
-       },
-       {
-               .name = (char *) "lttng-probe-lock",
-               .load_policy = KERNEL_MODULE_PROPERTY_LOAD_POLICY_OPTIONAL,
-       },
-       {
-               .name = (char *) "lttng-probe-module",
-               .load_policy = KERNEL_MODULE_PROPERTY_LOAD_POLICY_OPTIONAL,
-       },
-       {
-               .name = (char *) "lttng-probe-napi",
-               .load_policy = KERNEL_MODULE_PROPERTY_LOAD_POLICY_OPTIONAL,
-       },
-       {
-               .name = (char *) "lttng-probe-net",
-               .load_policy = KERNEL_MODULE_PROPERTY_LOAD_POLICY_OPTIONAL,
-       },
-       {
-               .name = (char *) "lttng-probe-power",
-               .load_policy = KERNEL_MODULE_PROPERTY_LOAD_POLICY_OPTIONAL,
-       },
-       {
-               .name = (char *) "lttng-probe-preemptirq",
-               .load_policy = KERNEL_MODULE_PROPERTY_LOAD_POLICY_OPTIONAL,
-       },
-       {
-               .name = (char *) "lttng-probe-printk",
-               .load_policy = KERNEL_MODULE_PROPERTY_LOAD_POLICY_OPTIONAL,
-       },
-       {
-               .name = (char *) "lttng-probe-random",
-               .load_policy = KERNEL_MODULE_PROPERTY_LOAD_POLICY_OPTIONAL,
-       },
-       {
-               .name = (char *) "lttng-probe-rcu",
-               .load_policy = KERNEL_MODULE_PROPERTY_LOAD_POLICY_OPTIONAL,
-       },
-       {
-               .name = (char *) "lttng-probe-regmap",
-               .load_policy = KERNEL_MODULE_PROPERTY_LOAD_POLICY_OPTIONAL,
-       },
-       {
-               .name = (char *) "lttng-probe-regulator",
-               .load_policy = KERNEL_MODULE_PROPERTY_LOAD_POLICY_OPTIONAL,
-       },
-       {
-               .name = (char *) "lttng-probe-rpm",
-               .load_policy = KERNEL_MODULE_PROPERTY_LOAD_POLICY_OPTIONAL,
-       },
-       {
-               .name = (char *) "lttng-probe-sched",
-               .load_policy = KERNEL_MODULE_PROPERTY_LOAD_POLICY_OPTIONAL,
-       },
-       {
-               .name = (char *) "lttng-probe-scsi",
-               .load_policy = KERNEL_MODULE_PROPERTY_LOAD_POLICY_OPTIONAL,
-       },
-       {
-               .name = (char *) "lttng-probe-signal",
-               .load_policy = KERNEL_MODULE_PROPERTY_LOAD_POLICY_OPTIONAL,
-       },
-       {
-               .name = (char *) "lttng-probe-skb",
-               .load_policy = KERNEL_MODULE_PROPERTY_LOAD_POLICY_OPTIONAL,
-       },
-       {
-               .name = (char *) "lttng-probe-sock",
-               .load_policy = KERNEL_MODULE_PROPERTY_LOAD_POLICY_OPTIONAL,
-       },
-       {
-               .name = (char *) "lttng-probe-statedump",
-               .load_policy = KERNEL_MODULE_PROPERTY_LOAD_POLICY_OPTIONAL,
-       },
-       {
-               .name = (char *) "lttng-probe-sunrpc",
-               .load_policy = KERNEL_MODULE_PROPERTY_LOAD_POLICY_OPTIONAL,
-       },
-       {
-               .name = (char *) "lttng-probe-timer",
-               .load_policy = KERNEL_MODULE_PROPERTY_LOAD_POLICY_OPTIONAL,
-       },
-       {
-               .name = (char *) "lttng-probe-udp",
-               .load_policy = KERNEL_MODULE_PROPERTY_LOAD_POLICY_OPTIONAL,
-       },
-       {
-               .name = (char *) "lttng-probe-vmscan",
-               .load_policy = KERNEL_MODULE_PROPERTY_LOAD_POLICY_OPTIONAL,
-       },
-       {
-               .name = (char *) "lttng-probe-v4l2",
-               .load_policy = KERNEL_MODULE_PROPERTY_LOAD_POLICY_OPTIONAL,
-       },
-       {
-               .name = (char *) "lttng-probe-workqueue",
-               .load_policy = KERNEL_MODULE_PROPERTY_LOAD_POLICY_OPTIONAL,
-       },
-       {
-               .name = (char *) "lttng-probe-writeback",
-               .load_policy = KERNEL_MODULE_PROPERTY_LOAD_POLICY_OPTIONAL,
-       },
-       {
-               .name = (char *) "lttng-probe-x86-irq-vectors",
-               .load_policy = KERNEL_MODULE_PROPERTY_LOAD_POLICY_OPTIONAL,
-       },
-       {
-               .name = (char *) "lttng-probe-x86-exceptions",
-               .load_policy = KERNEL_MODULE_PROPERTY_LOAD_POLICY_OPTIONAL,
-       },
-};
-
-/* dynamic probe modules list */
-static struct kern_modules_param *probes;
-static int nr_probes;
-static int probes_capacity;
-
-#if HAVE_KMOD
-#include <libkmod.h>
-
-/**
- * @brief Logging function for libkmod integration.
- */
-static void log_kmod(void *data, int priority, const char *file, int line,
-               const char *fn, const char *format, va_list args)
-{
-       char *str;
-
-       if (vasprintf(&str, format, args) < 0) {
-               return;
-       }
-
-       DBG("libkmod: %s", str);
-       free(str);
-}
-
-/**
- * @brief Setup the libkmod context.
- *
- * Create the context, add a custom logging function and preload the
- * ressources for faster operation.
- *
- * @returns    \c 0 on success
- *             \c < 0 on error
- */
-static int setup_kmod_ctx(struct kmod_ctx **ctx)
-{
-       int ret = 0;
-
-       *ctx = kmod_new(NULL, NULL);
-       if (!ctx) {
-               PERROR("Unable to create kmod library context");
-               ret = -ENOMEM;
-               goto error;
-       }
-
-       kmod_set_log_fn(*ctx, log_kmod, NULL);
-       ret = kmod_load_resources(*ctx);
-       if (ret < 0) {
-               ERR("Failed to load kmod library resources");
-               goto error;
-       }
-
-error:
-       return ret;
-}
-
-/**
- * @brief Loads the kernel modules in \p modules
- *
- * @param modules      List of modules to load
- * @param entries      Number of modules in the list
- *
- * If the modules are required, we will return with error after the
- * first failed module load, otherwise we continue loading.
- *
- * @returns            \c 0 on success
- *                     \c < 0 on error
- */
-static int modprobe_lttng(struct kern_modules_param *modules,
-               int entries)
-{
-       int ret = 0, i;
-       struct kmod_ctx *ctx;
-
-       ret = setup_kmod_ctx(&ctx);
-       if (ret < 0) {
-               goto error;
-       }
-
-       for (i = 0; i < entries; i++) {
-               struct kmod_module *mod = NULL;
-
-               ret = kmod_module_new_from_name(ctx, modules[i].name, &mod);
-               if (ret < 0) {
-                       PERROR("Failed to create kmod module for %s", modules[i].name);
-                       goto error;
-               }
-
-               ret = kmod_module_probe_insert_module(mod, 0,
-                               NULL, NULL, NULL, NULL);
-               if (ret == -EEXIST) {
-                       DBG("Module %s is already loaded", modules[i].name);
-                       ret = 0;
-               } else if (ret < 0) {
-                       if (modules[i].load_policy == KERNEL_MODULE_PROPERTY_LOAD_POLICY_REQUIRED) {
-                               ERR("Unable to load required module %s",
-                                               modules[i].name);
-                               goto error;
-                       } else {
-                               DBG("Unable to load optional module %s; continuing",
-                                               modules[i].name);
-                               ret = 0;
-                       }
-               } else {
-                       DBG("Modprobe successfully %s", modules[i].name);
-                       modules[i].loaded = true;
-               }
-
-               kmod_module_unref(mod);
-       }
-
-error:
-       if (ctx) {
-               kmod_unref(ctx);
-       }
-       return ret;
-}
-
-/**
- * @brief Recursively unload modules.
- *
- * This function implements the same modules unloading behavior as
- * 'modprobe -r' or rmmod, it will recursevily go trought the \p module
- * dependencies and unload modules with a refcount of 0.
- *
- * @param mod          The module to unload
- *
- * @returns            \c 0 on success
- *                     \c < 0 on error
- */
-static int rmmod_recurse(struct kmod_module *mod) {
-       int ret = 0;
-       struct kmod_list *deps, *itr;
-
-       if (kmod_module_get_initstate(mod) == KMOD_MODULE_BUILTIN) {
-               DBG("Module %s is builtin", kmod_module_get_name(mod));
-               return ret;
-       }
-
-       ret = kmod_module_remove_module(mod, 0);
-
-       deps = kmod_module_get_dependencies(mod);
-       if (deps != NULL) {
-               kmod_list_foreach(itr, deps) {
-                       struct kmod_module *dep = kmod_module_get_module(itr);
-                       if (kmod_module_get_refcnt(dep) == 0) {
-                               DBG("Recursive remove module %s",
-                                               kmod_module_get_name(dep));
-                               rmmod_recurse(dep);
-                       }
-                       kmod_module_unref(dep);
-               }
-               kmod_module_unref_list(deps);
-       }
-
-       return ret;
-}
-
-/**
- * @brief Unloads the kernel modules in \p modules
- *
- * @param modules      List of modules to unload
- * @param entries      Number of modules in the list
- *
- */
-static void modprobe_remove_lttng(const struct kern_modules_param *modules,
-               int entries)
-{
-       int ret = 0, i;
-       struct kmod_ctx *ctx;
-
-       ret = setup_kmod_ctx(&ctx);
-       if (ret < 0) {
-               goto error;
-       }
-
-       for (i = entries - 1; i >= 0; i--) {
-               struct kmod_module *mod = NULL;
-
-               if (!modules[i].loaded) {
-                       continue;
-               }
-
-               ret = kmod_module_new_from_name(ctx, modules[i].name, &mod);
-               if (ret < 0) {
-                       PERROR("Failed to create kmod module for %s", modules[i].name);
-                       goto error;
-               }
-
-               ret = rmmod_recurse(mod);
-               if (ret == -EEXIST) {
-                       DBG("Module %s is not in kernel.", modules[i].name);
-               } else if (modules[i].load_policy == KERNEL_MODULE_PROPERTY_LOAD_POLICY_REQUIRED && ret < 0) {
-                       ERR("Unable to remove module %s", modules[i].name);
-               } else {
-                       DBG("Modprobe removal successful %s",
-                               modules[i].name);
-               }
-
-               kmod_module_unref(mod);
-       }
-
-error:
-       if (ctx) {
-               kmod_unref(ctx);
-       }
-}
-
-#else /* HAVE_KMOD */
-
-static int modprobe_lttng(struct kern_modules_param *modules,
-               int entries)
-{
-       int ret = 0, i;
-       char modprobe[256];
-
-       for (i = 0; i < entries; i++) {
-               ret = snprintf(modprobe, sizeof(modprobe),
-                               "/sbin/modprobe %s%s",
-                               modules[i].load_policy == KERNEL_MODULE_PROPERTY_LOAD_POLICY_REQUIRED ? "" : "-q ",
-                               modules[i].name);
-               if (ret < 0) {
-                       PERROR("snprintf modprobe");
-                       goto error;
-               }
-               modprobe[sizeof(modprobe) - 1] = '\0';
-               ret = system(modprobe);
-               if (ret == -1) {
-                       if (modules[i].load_policy == KERNEL_MODULE_PROPERTY_LOAD_POLICY_REQUIRED) {
-                               ERR("Unable to launch modprobe for required module %s",
-                                               modules[i].name);
-                               goto error;
-                       } else {
-                               DBG("Unable to launch modprobe for optional module %s; continuing",
-                                               modules[i].name);
-                               ret = 0;
-                       }
-               } else if (WEXITSTATUS(ret) != 0) {
-                       if (modules[i].load_policy == KERNEL_MODULE_PROPERTY_LOAD_POLICY_REQUIRED) {
-                               ERR("Unable to load required module %s",
-                                               modules[i].name);
-                               goto error;
-                       } else {
-                               DBG("Unable to load optional module %s; continuing",
-                                               modules[i].name);
-                               ret = 0;
-                       }
-               } else {
-                       DBG("Modprobe successfully %s", modules[i].name);
-                       modules[i].loaded = true;
-               }
-       }
-
-error:
-       return ret;
-}
-
-static void modprobe_remove_lttng(const struct kern_modules_param *modules,
-               int entries)
-{
-       int ret = 0, i;
-       char modprobe[256];
-
-       for (i = entries - 1; i >= 0; i--) {
-               if (!modules[i].loaded) {
-                       continue;
-               }
-               ret = snprintf(modprobe, sizeof(modprobe),
-                               "/sbin/modprobe -r -q %s",
-                               modules[i].name);
-               if (ret < 0) {
-                       PERROR("snprintf modprobe -r");
-                       return;
-               }
-               modprobe[sizeof(modprobe) - 1] = '\0';
-               ret = system(modprobe);
-               if (ret == -1) {
-                       if (modules[i].load_policy == KERNEL_MODULE_PROPERTY_LOAD_POLICY_REQUIRED) {
-                               ERR("Unable to launch modprobe -r for required module %s",
-                                               modules[i].name);
-                       } else {
-                               DBG("Unable to launch modprobe -r for optional module %s",
-                                               modules[i].name);
-                       }
-               } else if (WEXITSTATUS(ret) != 0) {
-                       if (modules[i].load_policy == KERNEL_MODULE_PROPERTY_LOAD_POLICY_REQUIRED) {
-                               ERR("Unable to remove required module %s",
-                                               modules[i].name);
-                       } else {
-                               DBG("Unable to remove optional module %s",
-                                               modules[i].name);
-                       }
-               } else {
-                       DBG("Modprobe removal successful %s", modules[i].name);
-               }
-       }
-}
-
-#endif /* HAVE_KMOD */
-
-/*
- * Remove control kernel module(s) in reverse load order.
- */
-void modprobe_remove_lttng_control(void)
-{
-       modprobe_remove_lttng(kern_modules_control_core,
-                       ARRAY_SIZE(kern_modules_control_core));
-}
-
-static void free_probes(void)
-{
-       int i;
-
-       if (!probes) {
-               return;
-       }
-       for (i = 0; i < nr_probes; ++i) {
-               free(probes[i].name);
-       }
-       free(probes);
-       probes = NULL;
-       nr_probes = 0;
-}
-
-/*
- * Remove data kernel modules in reverse load order.
- */
-void modprobe_remove_lttng_data(void)
-{
-       if (!probes) {
-               return;
-       }
-
-       modprobe_remove_lttng(probes, nr_probes);
-       free_probes();
-}
-
-/*
- * Remove all kernel modules in reverse order.
- */
-void modprobe_remove_lttng_all(void)
-{
-       modprobe_remove_lttng_data();
-       modprobe_remove_lttng_control();
-}
-
-/*
- * Load control kernel module(s).
- */
-int modprobe_lttng_control(void)
-{
-       return modprobe_lttng(kern_modules_control_core,
-                       ARRAY_SIZE(kern_modules_control_core));
-}
-
-/**
- * Grow global list of probes (double capacity or set it to 1 if
- * currently 0 and copy existing data).
- */
-static int grow_probes(void)
-{
-       int i;
-       struct kern_modules_param *tmp_probes;
-
-       /* Initialize capacity to 1 if 0. */
-       if (probes_capacity == 0) {
-               probes = zmalloc(sizeof(*probes));
-               if (!probes) {
-                       PERROR("malloc probe list");
-                       return -ENOMEM;
-               }
-
-               probes_capacity = 1;
-               return 0;
-       }
-
-       /* Double size. */
-       probes_capacity *= 2;
-
-       tmp_probes = zmalloc(sizeof(*tmp_probes) * probes_capacity);
-       if (!tmp_probes) {
-               PERROR("malloc probe list");
-               return -ENOMEM;
-       }
-
-       for (i = 0; i < nr_probes; ++i) {
-               /* Ownership of 'name' field is transferred. */
-               tmp_probes[i] = probes[i];
-       }
-
-       /* Replace probes with larger copy. */
-       free(probes);
-       probes = tmp_probes;
-
-       return 0;
-}
-
-/*
- * Appends a comma-separated list of probes to the global list
- * of probes.
- */
-static int append_list_to_probes(const char *list)
-{
-       char *next;
-       int ret;
-       char *tmp_list, *cur_list, *saveptr;
-
-       LTTNG_ASSERT(list);
-
-       cur_list = tmp_list = strdup(list);
-       if (!tmp_list) {
-               PERROR("strdup temp list");
-               return -ENOMEM;
-       }
-
-       for (;;) {
-               size_t name_len;
-               struct kern_modules_param *cur_mod;
-
-               next = strtok_r(cur_list, ",", &saveptr);
-               if (!next) {
-                       break;
-               }
-               cur_list = NULL;
-
-               /* filter leading spaces */
-               while (*next == ' ') {
-                       next++;
-               }
-
-               if (probes_capacity <= nr_probes) {
-                       ret = grow_probes();
-                       if (ret) {
-                               goto error;
-                       }
-               }
-
-               /* Length 13 is "lttng-probe-" + \0 */
-               name_len = strlen(next) + 13;
-
-               cur_mod = &probes[nr_probes];
-               cur_mod->name = zmalloc(name_len);
-               if (!cur_mod->name) {
-                       PERROR("malloc probe list");
-                       ret = -ENOMEM;
-                       goto error;
-               }
-
-               ret = snprintf(cur_mod->name, name_len, "lttng-probe-%s", next);
-               if (ret < 0) {
-                       PERROR("snprintf modprobe name");
-                       ret = -ENOMEM;
-                       goto error;
-               }
-
-               cur_mod->load_policy = KERNEL_MODULE_PROPERTY_LOAD_POLICY_OPTIONAL;
-
-               nr_probes++;
-       }
-
-       free(tmp_list);
-       return 0;
-
-error:
-       free(tmp_list);
-       free_probes();
-       return ret;
-}
-
-/*
- * Load data kernel module(s).
- */
-int modprobe_lttng_data(void)
-{
-       int ret, i;
-       char *list;
-
-       /*
-        * Base probes: either from command line option, environment
-        * variable or default list.
-        */
-       list = the_config.kmod_probes_list.value;
-       if (list) {
-               /* User-specified probes. */
-               ret = append_list_to_probes(list);
-               if (ret) {
-                       return ret;
-               }
-       } else {
-               /* Default probes. */
-               int def_len = ARRAY_SIZE(kern_modules_probes_default);
-
-               probes = zmalloc(sizeof(*probes) * def_len);
-               if (!probes) {
-                       PERROR("malloc probe list");
-                       return -ENOMEM;
-               }
-
-               nr_probes = probes_capacity = def_len;
-
-               for (i = 0; i < def_len; ++i) {
-                       char* name = strdup(kern_modules_probes_default[i].name);
-
-                       if (!name) {
-                               PERROR("strdup probe item");
-                               ret = -ENOMEM;
-                               goto error;
-                       }
-
-                       probes[i].name = name;
-                       probes[i].load_policy = kern_modules_probes_default[i].load_policy;
-               }
-       }
-
-       /*
-        * Extra modules? Append them to current probes list.
-        */
-       list = the_config.kmod_extra_probes_list.value;
-       if (list) {
-               ret = append_list_to_probes(list);
-               if (ret) {
-                       goto error;
-               }
-       }
-
-       /*
-        * Load probes modules now.
-        */
-       ret = modprobe_lttng(probes, nr_probes);
-       if (ret) {
-               goto error;
-       }
-       return ret;
-
-error:
-       free_probes();
-       return ret;
-}
diff --git a/src/bin/lttng-sessiond/modprobe.cpp b/src/bin/lttng-sessiond/modprobe.cpp
new file mode 100644 (file)
index 0000000..7e27a02
--- /dev/null
@@ -0,0 +1,776 @@
+/*
+ * Copyright (C) 2011 David Goulet <dgoulet@efficios.com>
+ * Copyright (C) 2014 Jan Glauber <jan.glauber@gmail.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0-only
+ *
+ */
+
+/**
+ * @file modprobe.c
+ *
+ * @brief modprobe related functions.
+ *
+ */
+
+#define _LGPL_SOURCE
+#include <stdio.h>
+#include <stdlib.h>
+#include <sys/wait.h>
+
+#include <common/common.h>
+#include <common/utils.h>
+
+#include "modprobe.h"
+#include "kern-modules.h"
+#include "lttng-sessiond.h"
+
+/* LTTng kernel tracer mandatory core modules list */
+struct kern_modules_param kern_modules_control_core[] = {
+       {
+               .name = (char *) "lttng-ring-buffer-client-discard",
+               .load_policy = KERNEL_MODULE_PROPERTY_LOAD_POLICY_REQUIRED,
+       },
+       {
+               .name = (char *) "lttng-ring-buffer-client-overwrite",
+               .load_policy = KERNEL_MODULE_PROPERTY_LOAD_POLICY_REQUIRED,
+       },
+       {
+               .name = (char *) "lttng-ring-buffer-metadata-client",
+               .load_policy = KERNEL_MODULE_PROPERTY_LOAD_POLICY_REQUIRED,
+       },
+       {
+               .name = (char *) "lttng-ring-buffer-client-mmap-discard",
+               .load_policy = KERNEL_MODULE_PROPERTY_LOAD_POLICY_REQUIRED,
+       },
+       {
+               .name = (char *) "lttng-ring-buffer-client-mmap-overwrite",
+               .load_policy = KERNEL_MODULE_PROPERTY_LOAD_POLICY_REQUIRED,
+       },
+       {
+               .name = (char *) "lttng-ring-buffer-metadata-mmap-client",
+               .load_policy = KERNEL_MODULE_PROPERTY_LOAD_POLICY_REQUIRED,
+       },
+       {
+               .name = (char *) "lttng-ring-buffer-event_notifier-client",
+               .load_policy = KERNEL_MODULE_PROPERTY_LOAD_POLICY_OPTIONAL,
+       },
+       {
+               .name = (char *) "lttng-counter-client-percpu-64-modular",
+               .load_policy = KERNEL_MODULE_PROPERTY_LOAD_POLICY_OPTIONAL,
+       },
+       {
+               .name = (char *) "lttng-counter-client-percpu-32-modular",
+               .load_policy = KERNEL_MODULE_PROPERTY_LOAD_POLICY_OPTIONAL,
+       },
+};
+
+/* LTTng kerneltracer probe modules list */
+struct kern_modules_param kern_modules_probes_default[] = {
+       {
+               .name = (char *) "lttng-probe-asoc",
+               .load_policy = KERNEL_MODULE_PROPERTY_LOAD_POLICY_OPTIONAL,
+       },
+       {
+               .name = (char *) "lttng-probe-block",
+               .load_policy = KERNEL_MODULE_PROPERTY_LOAD_POLICY_OPTIONAL,
+       },
+       {
+               .name = (char *) "lttng-probe-btrfs",
+               .load_policy = KERNEL_MODULE_PROPERTY_LOAD_POLICY_OPTIONAL,
+       },
+       {
+               .name = (char *) "lttng-probe-compaction",
+               .load_policy = KERNEL_MODULE_PROPERTY_LOAD_POLICY_OPTIONAL,
+       },
+       {
+               .name = (char *) "lttng-probe-ext3",
+               .load_policy = KERNEL_MODULE_PROPERTY_LOAD_POLICY_OPTIONAL,
+       },
+       {
+               .name = (char *) "lttng-probe-ext4",
+               .load_policy = KERNEL_MODULE_PROPERTY_LOAD_POLICY_OPTIONAL,
+       },
+       {
+               .name = (char *) "lttng-probe-gpio",
+               .load_policy = KERNEL_MODULE_PROPERTY_LOAD_POLICY_OPTIONAL,
+       },
+       {
+               .name = (char *) "lttng-probe-i2c",
+               .load_policy = KERNEL_MODULE_PROPERTY_LOAD_POLICY_OPTIONAL,
+       },
+       {
+               .name = (char *) "lttng-probe-irq",
+               .load_policy = KERNEL_MODULE_PROPERTY_LOAD_POLICY_OPTIONAL,
+       },
+       {
+               .name = (char *) "lttng-probe-jbd",
+               .load_policy = KERNEL_MODULE_PROPERTY_LOAD_POLICY_OPTIONAL,
+       },
+       {
+               .name = (char *) "lttng-probe-jbd2",
+               .load_policy = KERNEL_MODULE_PROPERTY_LOAD_POLICY_OPTIONAL,
+       },
+       {
+               .name = (char *) "lttng-probe-kmem",
+               .load_policy = KERNEL_MODULE_PROPERTY_LOAD_POLICY_OPTIONAL,
+       },
+       {
+               .name = (char *) "lttng-probe-kvm",
+               .load_policy = KERNEL_MODULE_PROPERTY_LOAD_POLICY_OPTIONAL,
+       },
+       {
+               .name = (char *) "lttng-probe-kvm-x86",
+               .load_policy = KERNEL_MODULE_PROPERTY_LOAD_POLICY_OPTIONAL,
+       },
+       {
+               .name = (char *) "lttng-probe-kvm-x86-mmu",
+               .load_policy = KERNEL_MODULE_PROPERTY_LOAD_POLICY_OPTIONAL,
+       },
+       {
+               .name = (char *) "lttng-probe-lock",
+               .load_policy = KERNEL_MODULE_PROPERTY_LOAD_POLICY_OPTIONAL,
+       },
+       {
+               .name = (char *) "lttng-probe-module",
+               .load_policy = KERNEL_MODULE_PROPERTY_LOAD_POLICY_OPTIONAL,
+       },
+       {
+               .name = (char *) "lttng-probe-napi",
+               .load_policy = KERNEL_MODULE_PROPERTY_LOAD_POLICY_OPTIONAL,
+       },
+       {
+               .name = (char *) "lttng-probe-net",
+               .load_policy = KERNEL_MODULE_PROPERTY_LOAD_POLICY_OPTIONAL,
+       },
+       {
+               .name = (char *) "lttng-probe-power",
+               .load_policy = KERNEL_MODULE_PROPERTY_LOAD_POLICY_OPTIONAL,
+       },
+       {
+               .name = (char *) "lttng-probe-preemptirq",
+               .load_policy = KERNEL_MODULE_PROPERTY_LOAD_POLICY_OPTIONAL,
+       },
+       {
+               .name = (char *) "lttng-probe-printk",
+               .load_policy = KERNEL_MODULE_PROPERTY_LOAD_POLICY_OPTIONAL,
+       },
+       {
+               .name = (char *) "lttng-probe-random",
+               .load_policy = KERNEL_MODULE_PROPERTY_LOAD_POLICY_OPTIONAL,
+       },
+       {
+               .name = (char *) "lttng-probe-rcu",
+               .load_policy = KERNEL_MODULE_PROPERTY_LOAD_POLICY_OPTIONAL,
+       },
+       {
+               .name = (char *) "lttng-probe-regmap",
+               .load_policy = KERNEL_MODULE_PROPERTY_LOAD_POLICY_OPTIONAL,
+       },
+       {
+               .name = (char *) "lttng-probe-regulator",
+               .load_policy = KERNEL_MODULE_PROPERTY_LOAD_POLICY_OPTIONAL,
+       },
+       {
+               .name = (char *) "lttng-probe-rpm",
+               .load_policy = KERNEL_MODULE_PROPERTY_LOAD_POLICY_OPTIONAL,
+       },
+       {
+               .name = (char *) "lttng-probe-sched",
+               .load_policy = KERNEL_MODULE_PROPERTY_LOAD_POLICY_OPTIONAL,
+       },
+       {
+               .name = (char *) "lttng-probe-scsi",
+               .load_policy = KERNEL_MODULE_PROPERTY_LOAD_POLICY_OPTIONAL,
+       },
+       {
+               .name = (char *) "lttng-probe-signal",
+               .load_policy = KERNEL_MODULE_PROPERTY_LOAD_POLICY_OPTIONAL,
+       },
+       {
+               .name = (char *) "lttng-probe-skb",
+               .load_policy = KERNEL_MODULE_PROPERTY_LOAD_POLICY_OPTIONAL,
+       },
+       {
+               .name = (char *) "lttng-probe-sock",
+               .load_policy = KERNEL_MODULE_PROPERTY_LOAD_POLICY_OPTIONAL,
+       },
+       {
+               .name = (char *) "lttng-probe-statedump",
+               .load_policy = KERNEL_MODULE_PROPERTY_LOAD_POLICY_OPTIONAL,
+       },
+       {
+               .name = (char *) "lttng-probe-sunrpc",
+               .load_policy = KERNEL_MODULE_PROPERTY_LOAD_POLICY_OPTIONAL,
+       },
+       {
+               .name = (char *) "lttng-probe-timer",
+               .load_policy = KERNEL_MODULE_PROPERTY_LOAD_POLICY_OPTIONAL,
+       },
+       {
+               .name = (char *) "lttng-probe-udp",
+               .load_policy = KERNEL_MODULE_PROPERTY_LOAD_POLICY_OPTIONAL,
+       },
+       {
+               .name = (char *) "lttng-probe-vmscan",
+               .load_policy = KERNEL_MODULE_PROPERTY_LOAD_POLICY_OPTIONAL,
+       },
+       {
+               .name = (char *) "lttng-probe-v4l2",
+               .load_policy = KERNEL_MODULE_PROPERTY_LOAD_POLICY_OPTIONAL,
+       },
+       {
+               .name = (char *) "lttng-probe-workqueue",
+               .load_policy = KERNEL_MODULE_PROPERTY_LOAD_POLICY_OPTIONAL,
+       },
+       {
+               .name = (char *) "lttng-probe-writeback",
+               .load_policy = KERNEL_MODULE_PROPERTY_LOAD_POLICY_OPTIONAL,
+       },
+       {
+               .name = (char *) "lttng-probe-x86-irq-vectors",
+               .load_policy = KERNEL_MODULE_PROPERTY_LOAD_POLICY_OPTIONAL,
+       },
+       {
+               .name = (char *) "lttng-probe-x86-exceptions",
+               .load_policy = KERNEL_MODULE_PROPERTY_LOAD_POLICY_OPTIONAL,
+       },
+};
+
+/* dynamic probe modules list */
+static struct kern_modules_param *probes;
+static int nr_probes;
+static int probes_capacity;
+
+#if HAVE_KMOD
+#include <libkmod.h>
+
+/**
+ * @brief Logging function for libkmod integration.
+ */
+static void log_kmod(void *data, int priority, const char *file, int line,
+               const char *fn, const char *format, va_list args)
+{
+       char *str;
+
+       if (vasprintf(&str, format, args) < 0) {
+               return;
+       }
+
+       DBG("libkmod: %s", str);
+       free(str);
+}
+
+/**
+ * @brief Setup the libkmod context.
+ *
+ * Create the context, add a custom logging function and preload the
+ * ressources for faster operation.
+ *
+ * @returns    \c 0 on success
+ *             \c < 0 on error
+ */
+static int setup_kmod_ctx(struct kmod_ctx **ctx)
+{
+       int ret = 0;
+
+       *ctx = kmod_new(NULL, NULL);
+       if (!ctx) {
+               PERROR("Unable to create kmod library context");
+               ret = -ENOMEM;
+               goto error;
+       }
+
+       kmod_set_log_fn(*ctx, log_kmod, NULL);
+       ret = kmod_load_resources(*ctx);
+       if (ret < 0) {
+               ERR("Failed to load kmod library resources");
+               goto error;
+       }
+
+error:
+       return ret;
+}
+
+/**
+ * @brief Loads the kernel modules in \p modules
+ *
+ * @param modules      List of modules to load
+ * @param entries      Number of modules in the list
+ *
+ * If the modules are required, we will return with error after the
+ * first failed module load, otherwise we continue loading.
+ *
+ * @returns            \c 0 on success
+ *                     \c < 0 on error
+ */
+static int modprobe_lttng(struct kern_modules_param *modules,
+               int entries)
+{
+       int ret = 0, i;
+       struct kmod_ctx *ctx;
+
+       ret = setup_kmod_ctx(&ctx);
+       if (ret < 0) {
+               goto error;
+       }
+
+       for (i = 0; i < entries; i++) {
+               struct kmod_module *mod = NULL;
+
+               ret = kmod_module_new_from_name(ctx, modules[i].name, &mod);
+               if (ret < 0) {
+                       PERROR("Failed to create kmod module for %s", modules[i].name);
+                       goto error;
+               }
+
+               ret = kmod_module_probe_insert_module(mod, 0,
+                               NULL, NULL, NULL, NULL);
+               if (ret == -EEXIST) {
+                       DBG("Module %s is already loaded", modules[i].name);
+                       ret = 0;
+               } else if (ret < 0) {
+                       if (modules[i].load_policy == KERNEL_MODULE_PROPERTY_LOAD_POLICY_REQUIRED) {
+                               ERR("Unable to load required module %s",
+                                               modules[i].name);
+                               goto error;
+                       } else {
+                               DBG("Unable to load optional module %s; continuing",
+                                               modules[i].name);
+                               ret = 0;
+                       }
+               } else {
+                       DBG("Modprobe successfully %s", modules[i].name);
+                       modules[i].loaded = true;
+               }
+
+               kmod_module_unref(mod);
+       }
+
+error:
+       if (ctx) {
+               kmod_unref(ctx);
+       }
+       return ret;
+}
+
+/**
+ * @brief Recursively unload modules.
+ *
+ * This function implements the same modules unloading behavior as
+ * 'modprobe -r' or rmmod, it will recursevily go trought the \p module
+ * dependencies and unload modules with a refcount of 0.
+ *
+ * @param mod          The module to unload
+ *
+ * @returns            \c 0 on success
+ *                     \c < 0 on error
+ */
+static int rmmod_recurse(struct kmod_module *mod) {
+       int ret = 0;
+       struct kmod_list *deps, *itr;
+
+       if (kmod_module_get_initstate(mod) == KMOD_MODULE_BUILTIN) {
+               DBG("Module %s is builtin", kmod_module_get_name(mod));
+               return ret;
+       }
+
+       ret = kmod_module_remove_module(mod, 0);
+
+       deps = kmod_module_get_dependencies(mod);
+       if (deps != NULL) {
+               kmod_list_foreach(itr, deps) {
+                       struct kmod_module *dep = kmod_module_get_module(itr);
+                       if (kmod_module_get_refcnt(dep) == 0) {
+                               DBG("Recursive remove module %s",
+                                               kmod_module_get_name(dep));
+                               rmmod_recurse(dep);
+                       }
+                       kmod_module_unref(dep);
+               }
+               kmod_module_unref_list(deps);
+       }
+
+       return ret;
+}
+
+/**
+ * @brief Unloads the kernel modules in \p modules
+ *
+ * @param modules      List of modules to unload
+ * @param entries      Number of modules in the list
+ *
+ */
+static void modprobe_remove_lttng(const struct kern_modules_param *modules,
+               int entries)
+{
+       int ret = 0, i;
+       struct kmod_ctx *ctx;
+
+       ret = setup_kmod_ctx(&ctx);
+       if (ret < 0) {
+               goto error;
+       }
+
+       for (i = entries - 1; i >= 0; i--) {
+               struct kmod_module *mod = NULL;
+
+               if (!modules[i].loaded) {
+                       continue;
+               }
+
+               ret = kmod_module_new_from_name(ctx, modules[i].name, &mod);
+               if (ret < 0) {
+                       PERROR("Failed to create kmod module for %s", modules[i].name);
+                       goto error;
+               }
+
+               ret = rmmod_recurse(mod);
+               if (ret == -EEXIST) {
+                       DBG("Module %s is not in kernel.", modules[i].name);
+               } else if (modules[i].load_policy == KERNEL_MODULE_PROPERTY_LOAD_POLICY_REQUIRED && ret < 0) {
+                       ERR("Unable to remove module %s", modules[i].name);
+               } else {
+                       DBG("Modprobe removal successful %s",
+                               modules[i].name);
+               }
+
+               kmod_module_unref(mod);
+       }
+
+error:
+       if (ctx) {
+               kmod_unref(ctx);
+       }
+}
+
+#else /* HAVE_KMOD */
+
+static int modprobe_lttng(struct kern_modules_param *modules,
+               int entries)
+{
+       int ret = 0, i;
+       char modprobe[256];
+
+       for (i = 0; i < entries; i++) {
+               ret = snprintf(modprobe, sizeof(modprobe),
+                               "/sbin/modprobe %s%s",
+                               modules[i].load_policy == KERNEL_MODULE_PROPERTY_LOAD_POLICY_REQUIRED ? "" : "-q ",
+                               modules[i].name);
+               if (ret < 0) {
+                       PERROR("snprintf modprobe");
+                       goto error;
+               }
+               modprobe[sizeof(modprobe) - 1] = '\0';
+               ret = system(modprobe);
+               if (ret == -1) {
+                       if (modules[i].load_policy == KERNEL_MODULE_PROPERTY_LOAD_POLICY_REQUIRED) {
+                               ERR("Unable to launch modprobe for required module %s",
+                                               modules[i].name);
+                               goto error;
+                       } else {
+                               DBG("Unable to launch modprobe for optional module %s; continuing",
+                                               modules[i].name);
+                               ret = 0;
+                       }
+               } else if (WEXITSTATUS(ret) != 0) {
+                       if (modules[i].load_policy == KERNEL_MODULE_PROPERTY_LOAD_POLICY_REQUIRED) {
+                               ERR("Unable to load required module %s",
+                                               modules[i].name);
+                               goto error;
+                       } else {
+                               DBG("Unable to load optional module %s; continuing",
+                                               modules[i].name);
+                               ret = 0;
+                       }
+               } else {
+                       DBG("Modprobe successfully %s", modules[i].name);
+                       modules[i].loaded = true;
+               }
+       }
+
+error:
+       return ret;
+}
+
+static void modprobe_remove_lttng(const struct kern_modules_param *modules,
+               int entries)
+{
+       int ret = 0, i;
+       char modprobe[256];
+
+       for (i = entries - 1; i >= 0; i--) {
+               if (!modules[i].loaded) {
+                       continue;
+               }
+               ret = snprintf(modprobe, sizeof(modprobe),
+                               "/sbin/modprobe -r -q %s",
+                               modules[i].name);
+               if (ret < 0) {
+                       PERROR("snprintf modprobe -r");
+                       return;
+               }
+               modprobe[sizeof(modprobe) - 1] = '\0';
+               ret = system(modprobe);
+               if (ret == -1) {
+                       if (modules[i].load_policy == KERNEL_MODULE_PROPERTY_LOAD_POLICY_REQUIRED) {
+                               ERR("Unable to launch modprobe -r for required module %s",
+                                               modules[i].name);
+                       } else {
+                               DBG("Unable to launch modprobe -r for optional module %s",
+                                               modules[i].name);
+                       }
+               } else if (WEXITSTATUS(ret) != 0) {
+                       if (modules[i].load_policy == KERNEL_MODULE_PROPERTY_LOAD_POLICY_REQUIRED) {
+                               ERR("Unable to remove required module %s",
+                                               modules[i].name);
+                       } else {
+                               DBG("Unable to remove optional module %s",
+                                               modules[i].name);
+                       }
+               } else {
+                       DBG("Modprobe removal successful %s", modules[i].name);
+               }
+       }
+}
+
+#endif /* HAVE_KMOD */
+
+/*
+ * Remove control kernel module(s) in reverse load order.
+ */
+void modprobe_remove_lttng_control(void)
+{
+       modprobe_remove_lttng(kern_modules_control_core,
+                       ARRAY_SIZE(kern_modules_control_core));
+}
+
+static void free_probes(void)
+{
+       int i;
+
+       if (!probes) {
+               return;
+       }
+       for (i = 0; i < nr_probes; ++i) {
+               free(probes[i].name);
+       }
+       free(probes);
+       probes = NULL;
+       nr_probes = 0;
+}
+
+/*
+ * Remove data kernel modules in reverse load order.
+ */
+void modprobe_remove_lttng_data(void)
+{
+       if (!probes) {
+               return;
+       }
+
+       modprobe_remove_lttng(probes, nr_probes);
+       free_probes();
+}
+
+/*
+ * Remove all kernel modules in reverse order.
+ */
+void modprobe_remove_lttng_all(void)
+{
+       modprobe_remove_lttng_data();
+       modprobe_remove_lttng_control();
+}
+
+/*
+ * Load control kernel module(s).
+ */
+int modprobe_lttng_control(void)
+{
+       return modprobe_lttng(kern_modules_control_core,
+                       ARRAY_SIZE(kern_modules_control_core));
+}
+
+/**
+ * Grow global list of probes (double capacity or set it to 1 if
+ * currently 0 and copy existing data).
+ */
+static int grow_probes(void)
+{
+       int i;
+       struct kern_modules_param *tmp_probes;
+
+       /* Initialize capacity to 1 if 0. */
+       if (probes_capacity == 0) {
+               probes = (kern_modules_param *) zmalloc(sizeof(*probes));
+               if (!probes) {
+                       PERROR("malloc probe list");
+                       return -ENOMEM;
+               }
+
+               probes_capacity = 1;
+               return 0;
+       }
+
+       /* Double size. */
+       probes_capacity *= 2;
+
+       tmp_probes = (kern_modules_param *) zmalloc(sizeof(*tmp_probes) * probes_capacity);
+       if (!tmp_probes) {
+               PERROR("malloc probe list");
+               return -ENOMEM;
+       }
+
+       for (i = 0; i < nr_probes; ++i) {
+               /* Ownership of 'name' field is transferred. */
+               tmp_probes[i] = probes[i];
+       }
+
+       /* Replace probes with larger copy. */
+       free(probes);
+       probes = tmp_probes;
+
+       return 0;
+}
+
+/*
+ * Appends a comma-separated list of probes to the global list
+ * of probes.
+ */
+static int append_list_to_probes(const char *list)
+{
+       char *next;
+       int ret;
+       char *tmp_list, *cur_list, *saveptr;
+
+       LTTNG_ASSERT(list);
+
+       cur_list = tmp_list = strdup(list);
+       if (!tmp_list) {
+               PERROR("strdup temp list");
+               return -ENOMEM;
+       }
+
+       for (;;) {
+               size_t name_len;
+               struct kern_modules_param *cur_mod;
+
+               next = strtok_r(cur_list, ",", &saveptr);
+               if (!next) {
+                       break;
+               }
+               cur_list = NULL;
+
+               /* filter leading spaces */
+               while (*next == ' ') {
+                       next++;
+               }
+
+               if (probes_capacity <= nr_probes) {
+                       ret = grow_probes();
+                       if (ret) {
+                               goto error;
+                       }
+               }
+
+               /* Length 13 is "lttng-probe-" + \0 */
+               name_len = strlen(next) + 13;
+
+               cur_mod = &probes[nr_probes];
+               cur_mod->name = (char *) zmalloc(name_len);
+               if (!cur_mod->name) {
+                       PERROR("malloc probe list");
+                       ret = -ENOMEM;
+                       goto error;
+               }
+
+               ret = snprintf(cur_mod->name, name_len, "lttng-probe-%s", next);
+               if (ret < 0) {
+                       PERROR("snprintf modprobe name");
+                       ret = -ENOMEM;
+                       goto error;
+               }
+
+               cur_mod->load_policy = KERNEL_MODULE_PROPERTY_LOAD_POLICY_OPTIONAL;
+
+               nr_probes++;
+       }
+
+       free(tmp_list);
+       return 0;
+
+error:
+       free(tmp_list);
+       free_probes();
+       return ret;
+}
+
+/*
+ * Load data kernel module(s).
+ */
+int modprobe_lttng_data(void)
+{
+       int ret, i;
+       char *list;
+
+       /*
+        * Base probes: either from command line option, environment
+        * variable or default list.
+        */
+       list = the_config.kmod_probes_list.value;
+       if (list) {
+               /* User-specified probes. */
+               ret = append_list_to_probes(list);
+               if (ret) {
+                       return ret;
+               }
+       } else {
+               /* Default probes. */
+               int def_len = ARRAY_SIZE(kern_modules_probes_default);
+
+               probes = (kern_modules_param *) zmalloc(sizeof(*probes) * def_len);
+               if (!probes) {
+                       PERROR("malloc probe list");
+                       return -ENOMEM;
+               }
+
+               nr_probes = probes_capacity = def_len;
+
+               for (i = 0; i < def_len; ++i) {
+                       char* name = strdup(kern_modules_probes_default[i].name);
+
+                       if (!name) {
+                               PERROR("strdup probe item");
+                               ret = -ENOMEM;
+                               goto error;
+                       }
+
+                       probes[i].name = name;
+                       probes[i].load_policy = kern_modules_probes_default[i].load_policy;
+               }
+       }
+
+       /*
+        * Extra modules? Append them to current probes list.
+        */
+       list = the_config.kmod_extra_probes_list.value;
+       if (list) {
+               ret = append_list_to_probes(list);
+               if (ret) {
+                       goto error;
+               }
+       }
+
+       /*
+        * Load probes modules now.
+        */
+       ret = modprobe_lttng(probes, nr_probes);
+       if (ret) {
+               goto error;
+       }
+       return ret;
+
+error:
+       free_probes();
+       return ret;
+}
diff --git a/src/bin/lttng-sessiond/notification-thread-commands.c b/src/bin/lttng-sessiond/notification-thread-commands.c
deleted file mode 100644 (file)
index 2908ccb..0000000
+++ /dev/null
@@ -1,452 +0,0 @@
-/*
- * Copyright (C) 2017 Jérémie Galarneau <jeremie.galarneau@efficios.com>
- *
- * SPDX-License-Identifier: GPL-2.0-only
- *
- */
-
-#include <lttng/trigger/trigger.h>
-#include <lttng/lttng-error.h>
-#include "notification-thread.h"
-#include "notification-thread-commands.h"
-#include <common/error.h>
-#include <unistd.h>
-#include <stdint.h>
-#include <inttypes.h>
-
-static
-void init_notification_thread_command(struct notification_thread_command *cmd)
-{
-       CDS_INIT_LIST_HEAD(&cmd->cmd_list_node);
-       lttng_waiter_init(&cmd->reply_waiter);
-}
-
-static
-int run_command_wait(struct notification_thread_handle *handle,
-               struct notification_thread_command *cmd)
-{
-       int ret;
-       uint64_t notification_counter = 1;
-
-       pthread_mutex_lock(&handle->cmd_queue.lock);
-       /* Add to queue. */
-       cds_list_add_tail(&cmd->cmd_list_node,
-                       &handle->cmd_queue.list);
-       /* Wake-up thread. */
-       ret = lttng_write(lttng_pipe_get_writefd(handle->cmd_queue.event_pipe),
-                       &notification_counter, sizeof(notification_counter));
-       if (ret != sizeof(notification_counter)) {
-               PERROR("write to notification thread's queue event fd");
-               /*
-                * Remove the command from the list so the notification
-                * thread does not process it.
-                */
-               cds_list_del(&cmd->cmd_list_node);
-               goto error_unlock_queue;
-       }
-       pthread_mutex_unlock(&handle->cmd_queue.lock);
-
-       lttng_waiter_wait(&cmd->reply_waiter);
-       return 0;
-error_unlock_queue:
-       pthread_mutex_unlock(&handle->cmd_queue.lock);
-       return -1;
-}
-
-static
-struct notification_thread_command *notification_thread_command_copy(
-       const struct notification_thread_command *original_cmd)
-{
-       struct notification_thread_command *new_cmd;
-
-       new_cmd = zmalloc(sizeof(*new_cmd));
-       if (!new_cmd) {
-               goto end;
-       }
-
-       *new_cmd = *original_cmd;
-       init_notification_thread_command(new_cmd);
-end:
-       return new_cmd;
-}
-
-static
-int run_command_no_wait(struct notification_thread_handle *handle,
-               const struct notification_thread_command *in_cmd)
-{
-       int ret;
-       uint64_t notification_counter = 1;
-       struct notification_thread_command *new_cmd =
-                       notification_thread_command_copy(in_cmd);
-
-       if (!new_cmd) {
-               goto error;
-       }
-       new_cmd->is_async = true;
-
-       pthread_mutex_lock(&handle->cmd_queue.lock);
-       /* Add to queue. */
-       cds_list_add_tail(&new_cmd->cmd_list_node,
-                       &handle->cmd_queue.list);
-       /* Wake-up thread. */
-       ret = lttng_write(lttng_pipe_get_writefd(handle->cmd_queue.event_pipe),
-                       &notification_counter, sizeof(notification_counter));
-       if (ret != sizeof(notification_counter)) {
-               PERROR("write to notification thread's queue event fd");
-               /*
-                * Remove the command from the list so the notification
-                * thread does not process it.
-                */
-               cds_list_del(&new_cmd->cmd_list_node);
-               goto error_unlock_queue;
-       }
-       pthread_mutex_unlock(&handle->cmd_queue.lock);
-       return 0;
-error_unlock_queue:
-       free(new_cmd);
-       pthread_mutex_unlock(&handle->cmd_queue.lock);
-error:
-       return -1;
-}
-
-enum lttng_error_code notification_thread_command_register_trigger(
-               struct notification_thread_handle *handle,
-               struct lttng_trigger *trigger,
-               bool is_trigger_anonymous)
-{
-       int ret;
-       enum lttng_error_code ret_code;
-       struct notification_thread_command cmd = {};
-
-       LTTNG_ASSERT(trigger);
-       init_notification_thread_command(&cmd);
-
-       cmd.type = NOTIFICATION_COMMAND_TYPE_REGISTER_TRIGGER;
-       lttng_trigger_get(trigger);
-       cmd.parameters.register_trigger.trigger = trigger;
-       cmd.parameters.register_trigger.is_trigger_anonymous =
-                       is_trigger_anonymous;
-
-       ret = run_command_wait(handle, &cmd);
-       if (ret) {
-               ret_code = LTTNG_ERR_UNK;
-               goto end;
-       }
-       ret_code = cmd.reply_code;
-end:
-       return ret_code;
-}
-
-enum lttng_error_code notification_thread_command_unregister_trigger(
-               struct notification_thread_handle *handle,
-               const struct lttng_trigger *trigger)
-{
-       int ret;
-       enum lttng_error_code ret_code;
-       struct notification_thread_command cmd = {};
-
-       init_notification_thread_command(&cmd);
-
-       cmd.type = NOTIFICATION_COMMAND_TYPE_UNREGISTER_TRIGGER;
-       cmd.parameters.unregister_trigger.trigger = trigger;
-
-       ret = run_command_wait(handle, &cmd);
-       if (ret) {
-               ret_code = LTTNG_ERR_UNK;
-               goto end;
-       }
-       ret_code = cmd.reply_code;
-end:
-       return ret_code;
-}
-
-enum lttng_error_code notification_thread_command_add_channel(
-               struct notification_thread_handle *handle,
-               char *session_name, uid_t uid, gid_t gid,
-               char *channel_name, uint64_t key,
-               enum lttng_domain_type domain, uint64_t capacity)
-{
-       int ret;
-       enum lttng_error_code ret_code;
-       struct notification_thread_command cmd = {};
-
-       init_notification_thread_command(&cmd);
-
-       cmd.type = NOTIFICATION_COMMAND_TYPE_ADD_CHANNEL;
-       cmd.parameters.add_channel.session.name = session_name;
-       cmd.parameters.add_channel.session.uid = uid;
-       cmd.parameters.add_channel.session.gid = gid;
-       cmd.parameters.add_channel.channel.name = channel_name;
-       cmd.parameters.add_channel.channel.key = key;
-       cmd.parameters.add_channel.channel.domain = domain;
-       cmd.parameters.add_channel.channel.capacity = capacity;
-
-       ret = run_command_wait(handle, &cmd);
-       if (ret) {
-               ret_code = LTTNG_ERR_UNK;
-               goto end;
-       }
-       ret_code = cmd.reply_code;
-end:
-       return ret_code;
-}
-
-enum lttng_error_code notification_thread_command_remove_channel(
-               struct notification_thread_handle *handle,
-               uint64_t key, enum lttng_domain_type domain)
-{
-       int ret;
-       enum lttng_error_code ret_code;
-       struct notification_thread_command cmd = {};
-
-       init_notification_thread_command(&cmd);
-
-       cmd.type = NOTIFICATION_COMMAND_TYPE_REMOVE_CHANNEL;
-       cmd.parameters.remove_channel.key = key;
-       cmd.parameters.remove_channel.domain = domain;
-
-       ret = run_command_wait(handle, &cmd);
-       if (ret) {
-               ret_code = LTTNG_ERR_UNK;
-               goto end;
-       }
-       ret_code = cmd.reply_code;
-end:
-       return ret_code;
-}
-
-enum lttng_error_code notification_thread_command_session_rotation_ongoing(
-               struct notification_thread_handle *handle,
-               const char *session_name, uid_t uid, gid_t gid,
-               uint64_t trace_archive_chunk_id)
-{
-       int ret;
-       enum lttng_error_code ret_code;
-       struct notification_thread_command cmd = {};
-
-       init_notification_thread_command(&cmd);
-
-       cmd.type = NOTIFICATION_COMMAND_TYPE_SESSION_ROTATION_ONGOING;
-       cmd.parameters.session_rotation.session_name = session_name;
-       cmd.parameters.session_rotation.uid = uid;
-       cmd.parameters.session_rotation.gid = gid;
-       cmd.parameters.session_rotation.trace_archive_chunk_id =
-                       trace_archive_chunk_id;
-
-       ret = run_command_wait(handle, &cmd);
-       if (ret) {
-               ret_code = LTTNG_ERR_UNK;
-               goto end;
-       }
-       ret_code = cmd.reply_code;
-end:
-       return ret_code;
-}
-
-enum lttng_error_code notification_thread_command_session_rotation_completed(
-               struct notification_thread_handle *handle,
-               const char *session_name, uid_t uid, gid_t gid,
-               uint64_t trace_archive_chunk_id,
-               struct lttng_trace_archive_location *location)
-{
-       int ret;
-       enum lttng_error_code ret_code;
-       struct notification_thread_command cmd = {};
-
-       init_notification_thread_command(&cmd);
-
-       cmd.type = NOTIFICATION_COMMAND_TYPE_SESSION_ROTATION_COMPLETED;
-       cmd.parameters.session_rotation.session_name = session_name;
-       cmd.parameters.session_rotation.uid = uid;
-       cmd.parameters.session_rotation.gid = gid;
-       cmd.parameters.session_rotation.trace_archive_chunk_id =
-                       trace_archive_chunk_id;
-       cmd.parameters.session_rotation.location = location;
-
-       ret = run_command_wait(handle, &cmd);
-       if (ret) {
-               ret_code = LTTNG_ERR_UNK;
-               goto end;
-       }
-       ret_code = cmd.reply_code;
-end:
-       return ret_code;
-}
-
-enum lttng_error_code notification_thread_command_add_tracer_event_source(
-               struct notification_thread_handle *handle,
-               int tracer_event_source_fd,
-               enum lttng_domain_type domain)
-{
-       int ret;
-       enum lttng_error_code ret_code;
-       struct notification_thread_command cmd = {};
-
-       LTTNG_ASSERT(tracer_event_source_fd >= 0);
-
-       init_notification_thread_command(&cmd);
-
-       cmd.type = NOTIFICATION_COMMAND_TYPE_ADD_TRACER_EVENT_SOURCE;
-       cmd.parameters.tracer_event_source.tracer_event_source_fd =
-                       tracer_event_source_fd;
-       cmd.parameters.tracer_event_source.domain = domain;
-
-       ret = run_command_wait(handle, &cmd);
-       if (ret) {
-               ret_code = LTTNG_ERR_UNK;
-               goto end;
-       }
-
-       ret_code = cmd.reply_code;
-end:
-       return ret_code;
-}
-
-enum lttng_error_code notification_thread_command_remove_tracer_event_source(
-               struct notification_thread_handle *handle,
-               int tracer_event_source_fd)
-{
-       int ret;
-       enum lttng_error_code ret_code;
-       struct notification_thread_command cmd = {};
-
-       init_notification_thread_command(&cmd);
-
-       cmd.type = NOTIFICATION_COMMAND_TYPE_REMOVE_TRACER_EVENT_SOURCE;
-       cmd.parameters.tracer_event_source.tracer_event_source_fd =
-                       tracer_event_source_fd;
-
-       ret = run_command_wait(handle, &cmd);
-       if (ret) {
-               ret_code = LTTNG_ERR_UNK;
-               goto end;
-       }
-
-       ret_code = cmd.reply_code;
-end:
-       return ret_code;
-}
-
-enum lttng_error_code notification_thread_command_list_triggers(
-               struct notification_thread_handle *handle,
-               uid_t uid,
-               struct lttng_triggers **triggers)
-{
-       int ret;
-       enum lttng_error_code ret_code;
-       struct notification_thread_command cmd = {};
-
-       LTTNG_ASSERT(handle);
-       LTTNG_ASSERT(triggers);
-
-       init_notification_thread_command(&cmd);
-
-       cmd.type = NOTIFICATION_COMMAND_TYPE_LIST_TRIGGERS;
-       cmd.parameters.list_triggers.uid = uid;
-
-       ret = run_command_wait(handle, &cmd);
-       if (ret) {
-               ret_code = LTTNG_ERR_UNK;
-               goto end;
-       }
-
-       ret_code = cmd.reply_code;
-       *triggers = cmd.reply.list_triggers.triggers;
-
-end:
-       return ret_code;
-}
-
-void notification_thread_command_quit(
-               struct notification_thread_handle *handle)
-{
-       int ret;
-       struct notification_thread_command cmd = {};
-
-       init_notification_thread_command(&cmd);
-
-       cmd.type = NOTIFICATION_COMMAND_TYPE_QUIT;
-       ret = run_command_wait(handle, &cmd);
-       LTTNG_ASSERT(!ret && cmd.reply_code == LTTNG_OK);
-}
-
-int notification_thread_client_communication_update(
-               struct notification_thread_handle *handle,
-               notification_client_id id,
-               enum client_transmission_status transmission_status)
-{
-       struct notification_thread_command cmd = {};
-
-       init_notification_thread_command(&cmd);
-
-       cmd.type = NOTIFICATION_COMMAND_TYPE_CLIENT_COMMUNICATION_UPDATE;
-       cmd.parameters.client_communication_update.id = id;
-       cmd.parameters.client_communication_update.status = transmission_status;
-       return run_command_no_wait(handle, &cmd);
-}
-
-enum lttng_error_code notification_thread_command_get_trigger(
-               struct notification_thread_handle *handle,
-               const struct lttng_trigger *trigger,
-               struct lttng_trigger **real_trigger)
-{
-       int ret;
-       enum lttng_error_code ret_code;
-       struct notification_thread_command cmd = {};
-
-       init_notification_thread_command(&cmd);
-
-       cmd.type = NOTIFICATION_COMMAND_TYPE_GET_TRIGGER;
-       cmd.parameters.get_trigger.trigger = trigger;
-       ret = run_command_wait(handle, &cmd);
-       if (ret) {
-               ret_code = LTTNG_ERR_UNK;
-               goto end;
-       }
-
-       ret_code = cmd.reply_code;
-       *real_trigger = cmd.reply.get_trigger.trigger;
-
-end:
-       return ret_code;
-}
-
-/*
- * Takes ownership of the payload if present.
- */
-struct lttng_event_notifier_notification *lttng_event_notifier_notification_create(
-               uint64_t tracer_token,
-               enum lttng_domain_type domain,
-               char *payload,
-               size_t payload_size)
-{
-       struct lttng_event_notifier_notification *notification = NULL;
-
-       LTTNG_ASSERT(domain != LTTNG_DOMAIN_NONE);
-       LTTNG_ASSERT((payload && payload_size) || (!payload && !payload_size));
-
-       notification = zmalloc(sizeof(struct lttng_event_notifier_notification));
-       if (notification == NULL) {
-               ERR("Error allocating notification");
-               goto end;
-       }
-
-       notification->tracer_token = tracer_token;
-       notification->type = domain;
-       notification->capture_buffer = payload;
-       notification->capture_buf_size = payload_size;
-
-end:
-       return notification;
-}
-
-void lttng_event_notifier_notification_destroy(
-               struct lttng_event_notifier_notification *notification)
-{
-       if (!notification) {
-               return;
-       }
-
-       free(notification->capture_buffer);
-       free(notification);
-}
diff --git a/src/bin/lttng-sessiond/notification-thread-commands.cpp b/src/bin/lttng-sessiond/notification-thread-commands.cpp
new file mode 100644 (file)
index 0000000..6009511
--- /dev/null
@@ -0,0 +1,452 @@
+/*
+ * Copyright (C) 2017 Jérémie Galarneau <jeremie.galarneau@efficios.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0-only
+ *
+ */
+
+#include <lttng/trigger/trigger.h>
+#include <lttng/lttng-error.h>
+#include "notification-thread.h"
+#include "notification-thread-commands.h"
+#include <common/error.h>
+#include <unistd.h>
+#include <stdint.h>
+#include <inttypes.h>
+
+static
+void init_notification_thread_command(struct notification_thread_command *cmd)
+{
+       CDS_INIT_LIST_HEAD(&cmd->cmd_list_node);
+       lttng_waiter_init(&cmd->reply_waiter);
+}
+
+static
+int run_command_wait(struct notification_thread_handle *handle,
+               struct notification_thread_command *cmd)
+{
+       int ret;
+       uint64_t notification_counter = 1;
+
+       pthread_mutex_lock(&handle->cmd_queue.lock);
+       /* Add to queue. */
+       cds_list_add_tail(&cmd->cmd_list_node,
+                       &handle->cmd_queue.list);
+       /* Wake-up thread. */
+       ret = lttng_write(lttng_pipe_get_writefd(handle->cmd_queue.event_pipe),
+                       &notification_counter, sizeof(notification_counter));
+       if (ret != sizeof(notification_counter)) {
+               PERROR("write to notification thread's queue event fd");
+               /*
+                * Remove the command from the list so the notification
+                * thread does not process it.
+                */
+               cds_list_del(&cmd->cmd_list_node);
+               goto error_unlock_queue;
+       }
+       pthread_mutex_unlock(&handle->cmd_queue.lock);
+
+       lttng_waiter_wait(&cmd->reply_waiter);
+       return 0;
+error_unlock_queue:
+       pthread_mutex_unlock(&handle->cmd_queue.lock);
+       return -1;
+}
+
+static
+struct notification_thread_command *notification_thread_command_copy(
+       const struct notification_thread_command *original_cmd)
+{
+       struct notification_thread_command *new_cmd;
+
+       new_cmd = (notification_thread_command *) zmalloc(sizeof(*new_cmd));
+       if (!new_cmd) {
+               goto end;
+       }
+
+       *new_cmd = *original_cmd;
+       init_notification_thread_command(new_cmd);
+end:
+       return new_cmd;
+}
+
+static
+int run_command_no_wait(struct notification_thread_handle *handle,
+               const struct notification_thread_command *in_cmd)
+{
+       int ret;
+       uint64_t notification_counter = 1;
+       struct notification_thread_command *new_cmd =
+                       notification_thread_command_copy(in_cmd);
+
+       if (!new_cmd) {
+               goto error;
+       }
+       new_cmd->is_async = true;
+
+       pthread_mutex_lock(&handle->cmd_queue.lock);
+       /* Add to queue. */
+       cds_list_add_tail(&new_cmd->cmd_list_node,
+                       &handle->cmd_queue.list);
+       /* Wake-up thread. */
+       ret = lttng_write(lttng_pipe_get_writefd(handle->cmd_queue.event_pipe),
+                       &notification_counter, sizeof(notification_counter));
+       if (ret != sizeof(notification_counter)) {
+               PERROR("write to notification thread's queue event fd");
+               /*
+                * Remove the command from the list so the notification
+                * thread does not process it.
+                */
+               cds_list_del(&new_cmd->cmd_list_node);
+               goto error_unlock_queue;
+       }
+       pthread_mutex_unlock(&handle->cmd_queue.lock);
+       return 0;
+error_unlock_queue:
+       free(new_cmd);
+       pthread_mutex_unlock(&handle->cmd_queue.lock);
+error:
+       return -1;
+}
+
+enum lttng_error_code notification_thread_command_register_trigger(
+               struct notification_thread_handle *handle,
+               struct lttng_trigger *trigger,
+               bool is_trigger_anonymous)
+{
+       int ret;
+       enum lttng_error_code ret_code;
+       struct notification_thread_command cmd = {};
+
+       LTTNG_ASSERT(trigger);
+       init_notification_thread_command(&cmd);
+
+       cmd.type = NOTIFICATION_COMMAND_TYPE_REGISTER_TRIGGER;
+       lttng_trigger_get(trigger);
+       cmd.parameters.register_trigger.trigger = trigger;
+       cmd.parameters.register_trigger.is_trigger_anonymous =
+                       is_trigger_anonymous;
+
+       ret = run_command_wait(handle, &cmd);
+       if (ret) {
+               ret_code = LTTNG_ERR_UNK;
+               goto end;
+       }
+       ret_code = cmd.reply_code;
+end:
+       return ret_code;
+}
+
+enum lttng_error_code notification_thread_command_unregister_trigger(
+               struct notification_thread_handle *handle,
+               const struct lttng_trigger *trigger)
+{
+       int ret;
+       enum lttng_error_code ret_code;
+       struct notification_thread_command cmd = {};
+
+       init_notification_thread_command(&cmd);
+
+       cmd.type = NOTIFICATION_COMMAND_TYPE_UNREGISTER_TRIGGER;
+       cmd.parameters.unregister_trigger.trigger = trigger;
+
+       ret = run_command_wait(handle, &cmd);
+       if (ret) {
+               ret_code = LTTNG_ERR_UNK;
+               goto end;
+       }
+       ret_code = cmd.reply_code;
+end:
+       return ret_code;
+}
+
+enum lttng_error_code notification_thread_command_add_channel(
+               struct notification_thread_handle *handle,
+               char *session_name, uid_t uid, gid_t gid,
+               char *channel_name, uint64_t key,
+               enum lttng_domain_type domain, uint64_t capacity)
+{
+       int ret;
+       enum lttng_error_code ret_code;
+       struct notification_thread_command cmd = {};
+
+       init_notification_thread_command(&cmd);
+
+       cmd.type = NOTIFICATION_COMMAND_TYPE_ADD_CHANNEL;
+       cmd.parameters.add_channel.session.name = session_name;
+       cmd.parameters.add_channel.session.uid = uid;
+       cmd.parameters.add_channel.session.gid = gid;
+       cmd.parameters.add_channel.channel.name = channel_name;
+       cmd.parameters.add_channel.channel.key = key;
+       cmd.parameters.add_channel.channel.domain = domain;
+       cmd.parameters.add_channel.channel.capacity = capacity;
+
+       ret = run_command_wait(handle, &cmd);
+       if (ret) {
+               ret_code = LTTNG_ERR_UNK;
+               goto end;
+       }
+       ret_code = cmd.reply_code;
+end:
+       return ret_code;
+}
+
+enum lttng_error_code notification_thread_command_remove_channel(
+               struct notification_thread_handle *handle,
+               uint64_t key, enum lttng_domain_type domain)
+{
+       int ret;
+       enum lttng_error_code ret_code;
+       struct notification_thread_command cmd = {};
+
+       init_notification_thread_command(&cmd);
+
+       cmd.type = NOTIFICATION_COMMAND_TYPE_REMOVE_CHANNEL;
+       cmd.parameters.remove_channel.key = key;
+       cmd.parameters.remove_channel.domain = domain;
+
+       ret = run_command_wait(handle, &cmd);
+       if (ret) {
+               ret_code = LTTNG_ERR_UNK;
+               goto end;
+       }
+       ret_code = cmd.reply_code;
+end:
+       return ret_code;
+}
+
+enum lttng_error_code notification_thread_command_session_rotation_ongoing(
+               struct notification_thread_handle *handle,
+               const char *session_name, uid_t uid, gid_t gid,
+               uint64_t trace_archive_chunk_id)
+{
+       int ret;
+       enum lttng_error_code ret_code;
+       struct notification_thread_command cmd = {};
+
+       init_notification_thread_command(&cmd);
+
+       cmd.type = NOTIFICATION_COMMAND_TYPE_SESSION_ROTATION_ONGOING;
+       cmd.parameters.session_rotation.session_name = session_name;
+       cmd.parameters.session_rotation.uid = uid;
+       cmd.parameters.session_rotation.gid = gid;
+       cmd.parameters.session_rotation.trace_archive_chunk_id =
+                       trace_archive_chunk_id;
+
+       ret = run_command_wait(handle, &cmd);
+       if (ret) {
+               ret_code = LTTNG_ERR_UNK;
+               goto end;
+       }
+       ret_code = cmd.reply_code;
+end:
+       return ret_code;
+}
+
+enum lttng_error_code notification_thread_command_session_rotation_completed(
+               struct notification_thread_handle *handle,
+               const char *session_name, uid_t uid, gid_t gid,
+               uint64_t trace_archive_chunk_id,
+               struct lttng_trace_archive_location *location)
+{
+       int ret;
+       enum lttng_error_code ret_code;
+       struct notification_thread_command cmd = {};
+
+       init_notification_thread_command(&cmd);
+
+       cmd.type = NOTIFICATION_COMMAND_TYPE_SESSION_ROTATION_COMPLETED;
+       cmd.parameters.session_rotation.session_name = session_name;
+       cmd.parameters.session_rotation.uid = uid;
+       cmd.parameters.session_rotation.gid = gid;
+       cmd.parameters.session_rotation.trace_archive_chunk_id =
+                       trace_archive_chunk_id;
+       cmd.parameters.session_rotation.location = location;
+
+       ret = run_command_wait(handle, &cmd);
+       if (ret) {
+               ret_code = LTTNG_ERR_UNK;
+               goto end;
+       }
+       ret_code = cmd.reply_code;
+end:
+       return ret_code;
+}
+
+enum lttng_error_code notification_thread_command_add_tracer_event_source(
+               struct notification_thread_handle *handle,
+               int tracer_event_source_fd,
+               enum lttng_domain_type domain)
+{
+       int ret;
+       enum lttng_error_code ret_code;
+       struct notification_thread_command cmd = {};
+
+       LTTNG_ASSERT(tracer_event_source_fd >= 0);
+
+       init_notification_thread_command(&cmd);
+
+       cmd.type = NOTIFICATION_COMMAND_TYPE_ADD_TRACER_EVENT_SOURCE;
+       cmd.parameters.tracer_event_source.tracer_event_source_fd =
+                       tracer_event_source_fd;
+       cmd.parameters.tracer_event_source.domain = domain;
+
+       ret = run_command_wait(handle, &cmd);
+       if (ret) {
+               ret_code = LTTNG_ERR_UNK;
+               goto end;
+       }
+
+       ret_code = cmd.reply_code;
+end:
+       return ret_code;
+}
+
+enum lttng_error_code notification_thread_command_remove_tracer_event_source(
+               struct notification_thread_handle *handle,
+               int tracer_event_source_fd)
+{
+       int ret;
+       enum lttng_error_code ret_code;
+       struct notification_thread_command cmd = {};
+
+       init_notification_thread_command(&cmd);
+
+       cmd.type = NOTIFICATION_COMMAND_TYPE_REMOVE_TRACER_EVENT_SOURCE;
+       cmd.parameters.tracer_event_source.tracer_event_source_fd =
+                       tracer_event_source_fd;
+
+       ret = run_command_wait(handle, &cmd);
+       if (ret) {
+               ret_code = LTTNG_ERR_UNK;
+               goto end;
+       }
+
+       ret_code = cmd.reply_code;
+end:
+       return ret_code;
+}
+
+enum lttng_error_code notification_thread_command_list_triggers(
+               struct notification_thread_handle *handle,
+               uid_t uid,
+               struct lttng_triggers **triggers)
+{
+       int ret;
+       enum lttng_error_code ret_code;
+       struct notification_thread_command cmd = {};
+
+       LTTNG_ASSERT(handle);
+       LTTNG_ASSERT(triggers);
+
+       init_notification_thread_command(&cmd);
+
+       cmd.type = NOTIFICATION_COMMAND_TYPE_LIST_TRIGGERS;
+       cmd.parameters.list_triggers.uid = uid;
+
+       ret = run_command_wait(handle, &cmd);
+       if (ret) {
+               ret_code = LTTNG_ERR_UNK;
+               goto end;
+       }
+
+       ret_code = cmd.reply_code;
+       *triggers = cmd.reply.list_triggers.triggers;
+
+end:
+       return ret_code;
+}
+
+void notification_thread_command_quit(
+               struct notification_thread_handle *handle)
+{
+       int ret;
+       struct notification_thread_command cmd = {};
+
+       init_notification_thread_command(&cmd);
+
+       cmd.type = NOTIFICATION_COMMAND_TYPE_QUIT;
+       ret = run_command_wait(handle, &cmd);
+       LTTNG_ASSERT(!ret && cmd.reply_code == LTTNG_OK);
+}
+
+int notification_thread_client_communication_update(
+               struct notification_thread_handle *handle,
+               notification_client_id id,
+               enum client_transmission_status transmission_status)
+{
+       struct notification_thread_command cmd = {};
+
+       init_notification_thread_command(&cmd);
+
+       cmd.type = NOTIFICATION_COMMAND_TYPE_CLIENT_COMMUNICATION_UPDATE;
+       cmd.parameters.client_communication_update.id = id;
+       cmd.parameters.client_communication_update.status = transmission_status;
+       return run_command_no_wait(handle, &cmd);
+}
+
+enum lttng_error_code notification_thread_command_get_trigger(
+               struct notification_thread_handle *handle,
+               const struct lttng_trigger *trigger,
+               struct lttng_trigger **real_trigger)
+{
+       int ret;
+       enum lttng_error_code ret_code;
+       struct notification_thread_command cmd = {};
+
+       init_notification_thread_command(&cmd);
+
+       cmd.type = NOTIFICATION_COMMAND_TYPE_GET_TRIGGER;
+       cmd.parameters.get_trigger.trigger = trigger;
+       ret = run_command_wait(handle, &cmd);
+       if (ret) {
+               ret_code = LTTNG_ERR_UNK;
+               goto end;
+       }
+
+       ret_code = cmd.reply_code;
+       *real_trigger = cmd.reply.get_trigger.trigger;
+
+end:
+       return ret_code;
+}
+
+/*
+ * Takes ownership of the payload if present.
+ */
+struct lttng_event_notifier_notification *lttng_event_notifier_notification_create(
+               uint64_t tracer_token,
+               enum lttng_domain_type domain,
+               char *payload,
+               size_t payload_size)
+{
+       struct lttng_event_notifier_notification *notification = NULL;
+
+       LTTNG_ASSERT(domain != LTTNG_DOMAIN_NONE);
+       LTTNG_ASSERT((payload && payload_size) || (!payload && !payload_size));
+
+       notification = (lttng_event_notifier_notification *) zmalloc(sizeof(struct lttng_event_notifier_notification));
+       if (notification == NULL) {
+               ERR("Error allocating notification");
+               goto end;
+       }
+
+       notification->tracer_token = tracer_token;
+       notification->type = domain;
+       notification->capture_buffer = payload;
+       notification->capture_buf_size = payload_size;
+
+end:
+       return notification;
+}
+
+void lttng_event_notifier_notification_destroy(
+               struct lttng_event_notifier_notification *notification)
+{
+       if (!notification) {
+               return;
+       }
+
+       free(notification->capture_buffer);
+       free(notification);
+}
diff --git a/src/bin/lttng-sessiond/notification-thread-events.c b/src/bin/lttng-sessiond/notification-thread-events.c
deleted file mode 100644 (file)
index a295739..0000000
+++ /dev/null
@@ -1,4983 +0,0 @@
-/*
- * Copyright (C) 2017 Jérémie Galarneau <jeremie.galarneau@efficios.com>
- *
- * SPDX-License-Identifier: GPL-2.0-only
- *
- */
-
-#include "lttng/action/action.h"
-#include "lttng/trigger/trigger-internal.h"
-#define _LGPL_SOURCE
-#include <urcu.h>
-#include <urcu/rculfhash.h>
-
-#include <common/defaults.h>
-#include <common/error.h>
-#include <common/futex.h>
-#include <common/unix.h>
-#include <common/dynamic-buffer.h>
-#include <common/hashtable/utils.h>
-#include <common/sessiond-comm/sessiond-comm.h>
-#include <common/macros.h>
-#include <lttng/condition/condition.h>
-#include <lttng/action/action-internal.h>
-#include <lttng/action/list-internal.h>
-#include <lttng/domain-internal.h>
-#include <lttng/notification/notification-internal.h>
-#include <lttng/condition/condition-internal.h>
-#include <lttng/condition/buffer-usage-internal.h>
-#include <lttng/condition/session-consumed-size-internal.h>
-#include <lttng/condition/session-rotation-internal.h>
-#include <lttng/condition/event-rule-matches-internal.h>
-#include <lttng/domain-internal.h>
-#include <lttng/notification/channel-internal.h>
-#include <lttng/trigger/trigger-internal.h>
-#include <lttng/event-rule/event-rule-internal.h>
-
-#include <time.h>
-#include <unistd.h>
-#include <inttypes.h>
-#include <fcntl.h>
-
-#include "condition-internal.h"
-#include "event-notifier-error-accounting.h"
-#include "notification-thread.h"
-#include "notification-thread-events.h"
-#include "notification-thread-commands.h"
-#include "lttng-sessiond.h"
-#include "kernel.h"
-
-#define CLIENT_POLL_MASK_IN (LPOLLIN | LPOLLERR | LPOLLHUP | LPOLLRDHUP)
-#define CLIENT_POLL_MASK_IN_OUT (CLIENT_POLL_MASK_IN | LPOLLOUT)
-
-/* The tracers currently limit the capture size to PIPE_BUF (4kb on linux). */
-#define MAX_CAPTURE_SIZE (PIPE_BUF)
-
-enum lttng_object_type {
-       LTTNG_OBJECT_TYPE_UNKNOWN,
-       LTTNG_OBJECT_TYPE_NONE,
-       LTTNG_OBJECT_TYPE_CHANNEL,
-       LTTNG_OBJECT_TYPE_SESSION,
-};
-
-struct lttng_trigger_list_element {
-       /* No ownership of the trigger object is assumed. */
-       struct lttng_trigger *trigger;
-       struct cds_list_head node;
-};
-
-struct lttng_channel_trigger_list {
-       struct channel_key channel_key;
-       /* List of struct lttng_trigger_list_element. */
-       struct cds_list_head list;
-       /* Node in the channel_triggers_ht */
-       struct cds_lfht_node channel_triggers_ht_node;
-       /* call_rcu delayed reclaim. */
-       struct rcu_head rcu_node;
-};
-
-/*
- * List of triggers applying to a given session.
- *
- * See:
- *   - lttng_session_trigger_list_create()
- *   - lttng_session_trigger_list_build()
- *   - lttng_session_trigger_list_destroy()
- *   - lttng_session_trigger_list_add()
- */
-struct lttng_session_trigger_list {
-       /*
-        * Not owned by this; points to the session_info structure's
-        * session name.
-        */
-       const char *session_name;
-       /* List of struct lttng_trigger_list_element. */
-       struct cds_list_head list;
-       /* Node in the session_triggers_ht */
-       struct cds_lfht_node session_triggers_ht_node;
-       /*
-        * Weak reference to the notification system's session triggers
-        * hashtable.
-        *
-        * The session trigger list structure structure is owned by
-        * the session's session_info.
-        *
-        * The session_info is kept alive the the channel_infos holding a
-        * reference to it (reference counting). When those channels are
-        * destroyed (at runtime or on teardown), the reference they hold
-        * to the session_info are released. On destruction of session_info,
-        * session_info_destroy() will remove the list of triggers applying
-        * to this session from the notification system's state.
-        *
-        * This implies that the session_triggers_ht must be destroyed
-        * after the channels.
-        */
-       struct cds_lfht *session_triggers_ht;
-       /* Used for delayed RCU reclaim. */
-       struct rcu_head rcu_node;
-};
-
-struct lttng_trigger_ht_element {
-       struct lttng_trigger *trigger;
-       struct cds_lfht_node node;
-       struct cds_lfht_node node_by_name_uid;
-       struct cds_list_head client_list_trigger_node;
-       /* call_rcu delayed reclaim. */
-       struct rcu_head rcu_node;
-};
-
-struct lttng_condition_list_element {
-       struct lttng_condition *condition;
-       struct cds_list_head node;
-};
-
-struct channel_state_sample {
-       struct channel_key key;
-       struct cds_lfht_node channel_state_ht_node;
-       uint64_t highest_usage;
-       uint64_t lowest_usage;
-       uint64_t channel_total_consumed;
-       /* call_rcu delayed reclaim. */
-       struct rcu_head rcu_node;
-};
-
-static unsigned long hash_channel_key(struct channel_key *key);
-static int evaluate_buffer_condition(const struct lttng_condition *condition,
-               struct lttng_evaluation **evaluation,
-               const struct notification_thread_state *state,
-               const struct channel_state_sample *previous_sample,
-               const struct channel_state_sample *latest_sample,
-               uint64_t previous_session_consumed_total,
-               uint64_t latest_session_consumed_total,
-               struct channel_info *channel_info);
-static
-int send_evaluation_to_clients(const struct lttng_trigger *trigger,
-               const struct lttng_evaluation *evaluation,
-               struct notification_client_list *client_list,
-               struct notification_thread_state *state,
-               uid_t channel_uid, gid_t channel_gid);
-
-
-/* session_info API */
-static
-void session_info_destroy(void *_data);
-static
-void session_info_get(struct session_info *session_info);
-static
-void session_info_put(struct session_info *session_info);
-static
-struct session_info *session_info_create(const char *name,
-               uid_t uid, gid_t gid,
-               struct lttng_session_trigger_list *trigger_list,
-               struct cds_lfht *sessions_ht);
-static
-void session_info_add_channel(struct session_info *session_info,
-               struct channel_info *channel_info);
-static
-void session_info_remove_channel(struct session_info *session_info,
-               struct channel_info *channel_info);
-
-/* lttng_session_trigger_list API */
-static
-struct lttng_session_trigger_list *lttng_session_trigger_list_create(
-               const char *session_name,
-               struct cds_lfht *session_triggers_ht);
-static
-struct lttng_session_trigger_list *lttng_session_trigger_list_build(
-               const struct notification_thread_state *state,
-               const char *session_name);
-static
-void lttng_session_trigger_list_destroy(
-               struct lttng_session_trigger_list *list);
-static
-int lttng_session_trigger_list_add(struct lttng_session_trigger_list *list,
-               struct lttng_trigger *trigger);
-
-static
-int client_handle_transmission_status(
-               struct notification_client *client,
-               enum client_transmission_status transmission_status,
-               struct notification_thread_state *state);
-
-static
-int handle_one_event_notifier_notification(
-               struct notification_thread_state *state,
-               int pipe, enum lttng_domain_type domain);
-
-static
-void free_lttng_trigger_ht_element_rcu(struct rcu_head *node);
-
-static
-int match_client_socket(struct cds_lfht_node *node, const void *key)
-{
-       /* This double-cast is intended to supress pointer-to-cast warning. */
-       const int socket = (int) (intptr_t) key;
-       const struct notification_client *client = caa_container_of(node,
-                       struct notification_client, client_socket_ht_node);
-
-       return client->socket == socket;
-}
-
-static
-int match_client_id(struct cds_lfht_node *node, const void *key)
-{
-       /* This double-cast is intended to supress pointer-to-cast warning. */
-       const notification_client_id id = *((notification_client_id *) key);
-       const struct notification_client *client = caa_container_of(
-                       node, struct notification_client, client_id_ht_node);
-
-       return client->id == id;
-}
-
-static
-int match_channel_trigger_list(struct cds_lfht_node *node, const void *key)
-{
-       struct channel_key *channel_key = (struct channel_key *) key;
-       struct lttng_channel_trigger_list *trigger_list;
-
-       trigger_list = caa_container_of(node, struct lttng_channel_trigger_list,
-                       channel_triggers_ht_node);
-
-       return !!((channel_key->key == trigger_list->channel_key.key) &&
-                       (channel_key->domain == trigger_list->channel_key.domain));
-}
-
-static
-int match_session_trigger_list(struct cds_lfht_node *node, const void *key)
-{
-       const char *session_name = (const char *) key;
-       struct lttng_session_trigger_list *trigger_list;
-
-       trigger_list = caa_container_of(node, struct lttng_session_trigger_list,
-                       session_triggers_ht_node);
-
-       return !!(strcmp(trigger_list->session_name, session_name) == 0);
-}
-
-static
-int match_channel_state_sample(struct cds_lfht_node *node, const void *key)
-{
-       struct channel_key *channel_key = (struct channel_key *) key;
-       struct channel_state_sample *sample;
-
-       sample = caa_container_of(node, struct channel_state_sample,
-                       channel_state_ht_node);
-
-       return !!((channel_key->key == sample->key.key) &&
-                       (channel_key->domain == sample->key.domain));
-}
-
-static
-int match_channel_info(struct cds_lfht_node *node, const void *key)
-{
-       struct channel_key *channel_key = (struct channel_key *) key;
-       struct channel_info *channel_info;
-
-       channel_info = caa_container_of(node, struct channel_info,
-                       channels_ht_node);
-
-       return !!((channel_key->key == channel_info->key.key) &&
-                       (channel_key->domain == channel_info->key.domain));
-}
-
-static
-int match_trigger(struct cds_lfht_node *node, const void *key)
-{
-       struct lttng_trigger *trigger_key = (struct lttng_trigger *) key;
-       struct lttng_trigger_ht_element *trigger_ht_element;
-
-       trigger_ht_element = caa_container_of(node, struct lttng_trigger_ht_element,
-                       node);
-
-       return !!lttng_trigger_is_equal(trigger_key, trigger_ht_element->trigger);
-}
-
-static
-int match_trigger_token(struct cds_lfht_node *node, const void *key)
-{
-       const uint64_t *_key = key;
-       struct notification_trigger_tokens_ht_element *element;
-
-       element = caa_container_of(node,
-                       struct notification_trigger_tokens_ht_element, node);
-       return *_key == element->token;
-}
-
-static
-int match_client_list_condition(struct cds_lfht_node *node, const void *key)
-{
-       struct lttng_condition *condition_key = (struct lttng_condition *) key;
-       struct notification_client_list *client_list;
-       const struct lttng_condition *condition;
-
-       LTTNG_ASSERT(condition_key);
-
-       client_list = caa_container_of(node, struct notification_client_list,
-                       notification_trigger_clients_ht_node);
-       condition = client_list->condition;
-
-       return !!lttng_condition_is_equal(condition_key, condition);
-}
-
-static
-int match_session(struct cds_lfht_node *node, const void *key)
-{
-       const char *name = key;
-       struct session_info *session_info = caa_container_of(
-               node, struct session_info, sessions_ht_node);
-
-       return !strcmp(session_info->name, name);
-}
-
-static
-const char *notification_command_type_str(
-               enum notification_thread_command_type type)
-{
-       switch (type) {
-       case NOTIFICATION_COMMAND_TYPE_REGISTER_TRIGGER:
-               return "REGISTER_TRIGGER";
-       case NOTIFICATION_COMMAND_TYPE_UNREGISTER_TRIGGER:
-               return "UNREGISTER_TRIGGER";
-       case NOTIFICATION_COMMAND_TYPE_ADD_CHANNEL:
-               return "ADD_CHANNEL";
-       case NOTIFICATION_COMMAND_TYPE_REMOVE_CHANNEL:
-               return "REMOVE_CHANNEL";
-       case NOTIFICATION_COMMAND_TYPE_SESSION_ROTATION_ONGOING:
-               return "SESSION_ROTATION_ONGOING";
-       case NOTIFICATION_COMMAND_TYPE_SESSION_ROTATION_COMPLETED:
-               return "SESSION_ROTATION_COMPLETED";
-       case NOTIFICATION_COMMAND_TYPE_ADD_TRACER_EVENT_SOURCE:
-               return "ADD_TRACER_EVENT_SOURCE";
-       case NOTIFICATION_COMMAND_TYPE_REMOVE_TRACER_EVENT_SOURCE:
-               return "REMOVE_TRACER_EVENT_SOURCE";
-       case NOTIFICATION_COMMAND_TYPE_LIST_TRIGGERS:
-               return "LIST_TRIGGERS";
-       case NOTIFICATION_COMMAND_TYPE_GET_TRIGGER:
-               return "GET_TRIGGER";
-       case NOTIFICATION_COMMAND_TYPE_QUIT:
-               return "QUIT";
-       case NOTIFICATION_COMMAND_TYPE_CLIENT_COMMUNICATION_UPDATE:
-               return "CLIENT_COMMUNICATION_UPDATE";
-       default:
-               abort();
-       }
-}
-
-/*
- * Match trigger based on name and credentials only.
- * Name duplication is NOT allowed for the same uid.
- */
-static
-int match_trigger_by_name_uid(struct cds_lfht_node *node,
-               const void *key)
-{
-       bool match = false;
-       const char *element_trigger_name;
-       const char *key_name;
-       enum lttng_trigger_status status;
-       const struct lttng_credentials *key_creds;
-       const struct lttng_credentials *node_creds;
-       const struct lttng_trigger *trigger_key =
-                       (const struct lttng_trigger *) key;
-       const struct lttng_trigger_ht_element *trigger_ht_element =
-                       caa_container_of(node,
-                               struct lttng_trigger_ht_element,
-                               node_by_name_uid);
-
-       status = lttng_trigger_get_name(trigger_ht_element->trigger,
-                       &element_trigger_name);
-       element_trigger_name = status == LTTNG_TRIGGER_STATUS_OK ?
-                       element_trigger_name : NULL;
-
-       status = lttng_trigger_get_name(trigger_key, &key_name);
-       key_name = status == LTTNG_TRIGGER_STATUS_OK ? key_name : NULL;
-
-       /*
-        * Compare the names.
-        * Consider null names as not equal. This is to maintain backwards
-        * compatibility with pre-2.13 anonymous triggers. Multiples anonymous
-        * triggers are allowed for a given user.
-        */
-       if (!element_trigger_name || !key_name) {
-               goto end;
-       }
-
-       if (strcmp(element_trigger_name, key_name) != 0) {
-               goto end;
-       }
-
-       /* Compare the owners' UIDs. */
-       key_creds = lttng_trigger_get_credentials(trigger_key);
-       node_creds = lttng_trigger_get_credentials(trigger_ht_element->trigger);
-
-       match = lttng_credentials_is_equal_uid(key_creds, node_creds);
-
-end:
-       return match;
-}
-
-/*
- * Hash trigger based on name and credentials only.
- */
-static
-unsigned long hash_trigger_by_name_uid(const struct lttng_trigger *trigger)
-{
-       unsigned long hash = 0;
-       const struct lttng_credentials *trigger_creds;
-       const char *trigger_name;
-       enum lttng_trigger_status status;
-
-       status = lttng_trigger_get_name(trigger, &trigger_name);
-       if (status == LTTNG_TRIGGER_STATUS_OK) {
-               hash = hash_key_str(trigger_name, lttng_ht_seed);
-       }
-
-       trigger_creds = lttng_trigger_get_credentials(trigger);
-       hash ^= hash_key_ulong((void *) (unsigned long) LTTNG_OPTIONAL_GET(trigger_creds->uid),
-                       lttng_ht_seed);
-
-       return hash;
-}
-
-static
-unsigned long hash_channel_key(struct channel_key *key)
-{
-       unsigned long key_hash = hash_key_u64(&key->key, lttng_ht_seed);
-       unsigned long domain_hash = hash_key_ulong(
-               (void *) (unsigned long) key->domain, lttng_ht_seed);
-
-       return key_hash ^ domain_hash;
-}
-
-static
-unsigned long hash_client_socket(int socket)
-{
-       return hash_key_ulong((void *) (unsigned long) socket, lttng_ht_seed);
-}
-
-static
-unsigned long hash_client_id(notification_client_id id)
-{
-       return hash_key_u64(&id, lttng_ht_seed);
-}
-
-/*
- * Get the type of object to which a given condition applies. Bindings let
- * the notification system evaluate a trigger's condition when a given
- * object's state is updated.
- *
- * For instance, a condition bound to a channel will be evaluated everytime
- * the channel's state is changed by a channel monitoring sample.
- */
-static
-enum lttng_object_type get_condition_binding_object(
-               const struct lttng_condition *condition)
-{
-       switch (lttng_condition_get_type(condition)) {
-       case LTTNG_CONDITION_TYPE_BUFFER_USAGE_LOW:
-       case LTTNG_CONDITION_TYPE_BUFFER_USAGE_HIGH:
-       case LTTNG_CONDITION_TYPE_SESSION_CONSUMED_SIZE:
-               return LTTNG_OBJECT_TYPE_CHANNEL;
-       case LTTNG_CONDITION_TYPE_SESSION_ROTATION_ONGOING:
-       case LTTNG_CONDITION_TYPE_SESSION_ROTATION_COMPLETED:
-               return LTTNG_OBJECT_TYPE_SESSION;
-       case LTTNG_CONDITION_TYPE_EVENT_RULE_MATCHES:
-               return LTTNG_OBJECT_TYPE_NONE;
-       default:
-               return LTTNG_OBJECT_TYPE_UNKNOWN;
-       }
-}
-
-static
-void free_channel_info_rcu(struct rcu_head *node)
-{
-       free(caa_container_of(node, struct channel_info, rcu_node));
-}
-
-static
-void channel_info_destroy(struct channel_info *channel_info)
-{
-       if (!channel_info) {
-               return;
-       }
-
-       if (channel_info->session_info) {
-               session_info_remove_channel(channel_info->session_info,
-                               channel_info);
-               session_info_put(channel_info->session_info);
-       }
-       if (channel_info->name) {
-               free(channel_info->name);
-       }
-       call_rcu(&channel_info->rcu_node, free_channel_info_rcu);
-}
-
-static
-void free_session_info_rcu(struct rcu_head *node)
-{
-       free(caa_container_of(node, struct session_info, rcu_node));
-}
-
-/* Don't call directly, use the ref-counting mechanism. */
-static
-void session_info_destroy(void *_data)
-{
-       struct session_info *session_info = _data;
-       int ret;
-
-       LTTNG_ASSERT(session_info);
-       if (session_info->channel_infos_ht) {
-               ret = cds_lfht_destroy(session_info->channel_infos_ht, NULL);
-               if (ret) {
-                       ERR("Failed to destroy channel information hash table");
-               }
-       }
-       lttng_session_trigger_list_destroy(session_info->trigger_list);
-
-       rcu_read_lock();
-       cds_lfht_del(session_info->sessions_ht,
-                       &session_info->sessions_ht_node);
-       rcu_read_unlock();
-       free(session_info->name);
-       call_rcu(&session_info->rcu_node, free_session_info_rcu);
-}
-
-static
-void session_info_get(struct session_info *session_info)
-{
-       if (!session_info) {
-               return;
-       }
-       lttng_ref_get(&session_info->ref);
-}
-
-static
-void session_info_put(struct session_info *session_info)
-{
-       if (!session_info) {
-               return;
-       }
-       lttng_ref_put(&session_info->ref);
-}
-
-static
-struct session_info *session_info_create(const char *name, uid_t uid, gid_t gid,
-               struct lttng_session_trigger_list *trigger_list,
-               struct cds_lfht *sessions_ht)
-{
-       struct session_info *session_info;
-
-       LTTNG_ASSERT(name);
-
-       session_info = zmalloc(sizeof(*session_info));
-       if (!session_info) {
-               goto end;
-       }
-       lttng_ref_init(&session_info->ref, session_info_destroy);
-
-       session_info->channel_infos_ht = cds_lfht_new(DEFAULT_HT_SIZE,
-                       1, 0, CDS_LFHT_AUTO_RESIZE | CDS_LFHT_ACCOUNTING, NULL);
-       if (!session_info->channel_infos_ht) {
-               goto error;
-       }
-
-       cds_lfht_node_init(&session_info->sessions_ht_node);
-       session_info->name = strdup(name);
-       if (!session_info->name) {
-               goto error;
-       }
-       session_info->uid = uid;
-       session_info->gid = gid;
-       session_info->trigger_list = trigger_list;
-       session_info->sessions_ht = sessions_ht;
-end:
-       return session_info;
-error:
-       session_info_put(session_info);
-       return NULL;
-}
-
-static
-void session_info_add_channel(struct session_info *session_info,
-               struct channel_info *channel_info)
-{
-       rcu_read_lock();
-       cds_lfht_add(session_info->channel_infos_ht,
-                       hash_channel_key(&channel_info->key),
-                       &channel_info->session_info_channels_ht_node);
-       rcu_read_unlock();
-}
-
-static
-void session_info_remove_channel(struct session_info *session_info,
-               struct channel_info *channel_info)
-{
-       rcu_read_lock();
-       cds_lfht_del(session_info->channel_infos_ht,
-                       &channel_info->session_info_channels_ht_node);
-       rcu_read_unlock();
-}
-
-static
-struct channel_info *channel_info_create(const char *channel_name,
-               struct channel_key *channel_key, uint64_t channel_capacity,
-               struct session_info *session_info)
-{
-       struct channel_info *channel_info = zmalloc(sizeof(*channel_info));
-
-       if (!channel_info) {
-               goto end;
-       }
-
-       cds_lfht_node_init(&channel_info->channels_ht_node);
-       cds_lfht_node_init(&channel_info->session_info_channels_ht_node);
-       memcpy(&channel_info->key, channel_key, sizeof(*channel_key));
-       channel_info->capacity = channel_capacity;
-
-       channel_info->name = strdup(channel_name);
-       if (!channel_info->name) {
-               goto error;
-       }
-
-       /*
-        * Set the references between session and channel infos:
-        *   - channel_info holds a strong reference to session_info
-        *   - session_info holds a weak reference to channel_info
-        */
-       session_info_get(session_info);
-       session_info_add_channel(session_info, channel_info);
-       channel_info->session_info = session_info;
-end:
-       return channel_info;
-error:
-       channel_info_destroy(channel_info);
-       return NULL;
-}
-
-bool notification_client_list_get(struct notification_client_list *list)
-{
-       return urcu_ref_get_unless_zero(&list->ref);
-}
-
-static
-void free_notification_client_list_rcu(struct rcu_head *node)
-{
-       free(caa_container_of(node, struct notification_client_list,
-                       rcu_node));
-}
-
-static
-void notification_client_list_release(struct urcu_ref *list_ref)
-{
-       struct notification_client_list *list =
-                       container_of(list_ref, typeof(*list), ref);
-       struct notification_client_list_element *client_list_element, *tmp;
-
-       lttng_condition_put(list->condition);
-
-       if (list->notification_trigger_clients_ht) {
-               rcu_read_lock();
-
-               cds_lfht_del(list->notification_trigger_clients_ht,
-                               &list->notification_trigger_clients_ht_node);
-               rcu_read_unlock();
-               list->notification_trigger_clients_ht = NULL;
-       }
-       cds_list_for_each_entry_safe(client_list_element, tmp,
-                                    &list->clients_list, node) {
-               free(client_list_element);
-       }
-
-       LTTNG_ASSERT(cds_list_empty(&list->triggers_list));
-
-       pthread_mutex_destroy(&list->lock);
-       call_rcu(&list->rcu_node, free_notification_client_list_rcu);
-}
-
-static
-bool condition_applies_to_client(const struct lttng_condition *condition,
-               struct notification_client *client)
-{
-       bool applies = false;
-       struct lttng_condition_list_element *condition_list_element;
-
-       cds_list_for_each_entry(condition_list_element, &client->condition_list,
-                       node) {
-               applies = lttng_condition_is_equal(
-                               condition_list_element->condition,
-                               condition);
-               if (applies) {
-                       break;
-               }
-       }
-
-       return applies;
-}
-
-static
-struct notification_client_list *notification_client_list_create(
-               struct notification_thread_state *state,
-               const struct lttng_condition *condition)
-{
-       struct notification_client *client;
-       struct cds_lfht_iter iter;
-       struct notification_client_list *client_list;
-
-       client_list = zmalloc(sizeof(*client_list));
-       if (!client_list) {
-               PERROR("Failed to allocate notification client list");
-               goto end;
-       }
-
-       pthread_mutex_init(&client_list->lock, NULL);
-       /*
-        * The trigger that owns the condition has the first reference to this
-        * client list.
-        */
-       urcu_ref_init(&client_list->ref);
-       cds_lfht_node_init(&client_list->notification_trigger_clients_ht_node);
-       CDS_INIT_LIST_HEAD(&client_list->clients_list);
-       CDS_INIT_LIST_HEAD(&client_list->triggers_list);
-
-       /*
-        * Create a copy of the condition so that it's independent of any
-        * trigger. The client list may outlive the trigger object (which owns
-        * the condition) that is used to create it.
-        */
-       client_list->condition = lttng_condition_copy(condition);
-
-       /* Build a list of clients to which this new condition applies. */
-       cds_lfht_for_each_entry (state->client_socket_ht, &iter, client,
-                       client_socket_ht_node) {
-               struct notification_client_list_element *client_list_element;
-
-               if (!condition_applies_to_client(condition, client)) {
-                       continue;
-               }
-
-               client_list_element = zmalloc(sizeof(*client_list_element));
-               if (!client_list_element) {
-                       goto error_put_client_list;
-               }
-
-               CDS_INIT_LIST_HEAD(&client_list_element->node);
-               client_list_element->client = client;
-               cds_list_add(&client_list_element->node, &client_list->clients_list);
-       }
-
-       client_list->notification_trigger_clients_ht =
-                       state->notification_trigger_clients_ht;
-
-       rcu_read_lock();
-       /*
-        * Add the client list to the global list of client list.
-        */
-       cds_lfht_add_unique(state->notification_trigger_clients_ht,
-                       lttng_condition_hash(client_list->condition),
-                       match_client_list_condition,
-                       client_list->condition,
-                       &client_list->notification_trigger_clients_ht_node);
-       rcu_read_unlock();
-       goto end;
-
-error_put_client_list:
-       notification_client_list_put(client_list);
-       client_list = NULL;
-
-end:
-       return client_list;
-}
-
-void notification_client_list_put(struct notification_client_list *list)
-{
-       if (!list) {
-               return;
-       }
-       return urcu_ref_put(&list->ref, notification_client_list_release);
-}
-
-/* Provides a reference to the returned list. */
-static
-struct notification_client_list *get_client_list_from_condition(
-       struct notification_thread_state *state,
-       const struct lttng_condition *condition)
-{
-       struct cds_lfht_node *node;
-       struct cds_lfht_iter iter;
-       struct notification_client_list *list = NULL;
-
-       rcu_read_lock();
-       cds_lfht_lookup(state->notification_trigger_clients_ht,
-                       lttng_condition_hash(condition),
-                       match_client_list_condition,
-                       condition,
-                       &iter);
-       node = cds_lfht_iter_get_node(&iter);
-       if (node) {
-               list = container_of(node, struct notification_client_list,
-                               notification_trigger_clients_ht_node);
-               list = notification_client_list_get(list) ? list : NULL;
-       }
-
-       rcu_read_unlock();
-       return list;
-}
-
-static
-int evaluate_channel_condition_for_client(
-               const struct lttng_condition *condition,
-               struct notification_thread_state *state,
-               struct lttng_evaluation **evaluation,
-               uid_t *session_uid, gid_t *session_gid)
-{
-       int ret;
-       struct cds_lfht_iter iter;
-       struct cds_lfht_node *node;
-       struct channel_info *channel_info = NULL;
-       struct channel_key *channel_key = NULL;
-       struct channel_state_sample *last_sample = NULL;
-       struct lttng_channel_trigger_list *channel_trigger_list = NULL;
-
-       rcu_read_lock();
-
-       /* Find the channel associated with the condition. */
-       cds_lfht_for_each_entry(state->channel_triggers_ht, &iter,
-                       channel_trigger_list, channel_triggers_ht_node) {
-               struct lttng_trigger_list_element *element;
-
-               cds_list_for_each_entry(element, &channel_trigger_list->list, node) {
-                       const struct lttng_condition *current_condition =
-                                       lttng_trigger_get_const_condition(
-                                               element->trigger);
-
-                       LTTNG_ASSERT(current_condition);
-                       if (!lttng_condition_is_equal(condition,
-                                       current_condition)) {
-                               continue;
-                       }
-
-                       /* Found the trigger, save the channel key. */
-                       channel_key = &channel_trigger_list->channel_key;
-                       break;
-               }
-               if (channel_key) {
-                       /* The channel key was found stop iteration. */
-                       break;
-               }
-       }
-
-       if (!channel_key){
-               /* No channel found; normal exit. */
-               DBG("No known channel associated with newly subscribed-to condition");
-               ret = 0;
-               goto end;
-       }
-
-       /* Fetch channel info for the matching channel. */
-       cds_lfht_lookup(state->channels_ht,
-                       hash_channel_key(channel_key),
-                       match_channel_info,
-                       channel_key,
-                       &iter);
-       node = cds_lfht_iter_get_node(&iter);
-       LTTNG_ASSERT(node);
-       channel_info = caa_container_of(node, struct channel_info,
-                       channels_ht_node);
-
-       /* Retrieve the channel's last sample, if it exists. */
-       cds_lfht_lookup(state->channel_state_ht,
-                       hash_channel_key(channel_key),
-                       match_channel_state_sample,
-                       channel_key,
-                       &iter);
-       node = cds_lfht_iter_get_node(&iter);
-       if (node) {
-               last_sample = caa_container_of(node,
-                               struct channel_state_sample,
-                               channel_state_ht_node);
-       } else {
-               /* Nothing to evaluate, no sample was ever taken. Normal exit */
-               DBG("No channel sample associated with newly subscribed-to condition");
-               ret = 0;
-               goto end;
-       }
-
-       ret = evaluate_buffer_condition(condition, evaluation, state,
-                       NULL, last_sample,
-                       0, channel_info->session_info->consumed_data_size,
-                       channel_info);
-       if (ret) {
-               WARN("Fatal error occurred while evaluating a newly subscribed-to condition");
-               goto end;
-       }
-
-       *session_uid = channel_info->session_info->uid;
-       *session_gid = channel_info->session_info->gid;
-end:
-       rcu_read_unlock();
-       return ret;
-}
-
-static
-const char *get_condition_session_name(const struct lttng_condition *condition)
-{
-       const char *session_name = NULL;
-       enum lttng_condition_status status;
-
-       switch (lttng_condition_get_type(condition)) {
-       case LTTNG_CONDITION_TYPE_BUFFER_USAGE_LOW:
-       case LTTNG_CONDITION_TYPE_BUFFER_USAGE_HIGH:
-               status = lttng_condition_buffer_usage_get_session_name(
-                               condition, &session_name);
-               break;
-       case LTTNG_CONDITION_TYPE_SESSION_CONSUMED_SIZE:
-               status = lttng_condition_session_consumed_size_get_session_name(
-                               condition, &session_name);
-               break;
-       case LTTNG_CONDITION_TYPE_SESSION_ROTATION_ONGOING:
-       case LTTNG_CONDITION_TYPE_SESSION_ROTATION_COMPLETED:
-               status = lttng_condition_session_rotation_get_session_name(
-                               condition, &session_name);
-               break;
-       default:
-               abort();
-       }
-       if (status != LTTNG_CONDITION_STATUS_OK) {
-               ERR("Failed to retrieve session rotation condition's session name");
-               goto end;
-       }
-end:
-       return session_name;
-}
-
-static
-int evaluate_session_condition_for_client(
-               const struct lttng_condition *condition,
-               struct notification_thread_state *state,
-               struct lttng_evaluation **evaluation,
-               uid_t *session_uid, gid_t *session_gid)
-{
-       int ret;
-       struct cds_lfht_iter iter;
-       struct cds_lfht_node *node;
-       const char *session_name;
-       struct session_info *session_info = NULL;
-
-       rcu_read_lock();
-       session_name = get_condition_session_name(condition);
-
-       /* Find the session associated with the trigger. */
-       cds_lfht_lookup(state->sessions_ht,
-                       hash_key_str(session_name, lttng_ht_seed),
-                       match_session,
-                       session_name,
-                       &iter);
-       node = cds_lfht_iter_get_node(&iter);
-       if (!node) {
-               DBG("No known session matching name \"%s\"",
-                               session_name);
-               ret = 0;
-               goto end;
-       }
-
-       session_info = caa_container_of(node, struct session_info,
-                       sessions_ht_node);
-       session_info_get(session_info);
-
-       /*
-        * Evaluation is performed in-line here since only one type of
-        * session-bound condition is handled for the moment.
-        */
-       switch (lttng_condition_get_type(condition)) {
-       case LTTNG_CONDITION_TYPE_SESSION_ROTATION_ONGOING:
-               if (!session_info->rotation.ongoing) {
-                       ret = 0;
-                       goto end_session_put;
-               }
-
-               *evaluation = lttng_evaluation_session_rotation_ongoing_create(
-                               session_info->rotation.id);
-               if (!*evaluation) {
-                       /* Fatal error. */
-                       ERR("Failed to create session rotation ongoing evaluation for session \"%s\"",
-                                       session_info->name);
-                       ret = -1;
-                       goto end_session_put;
-               }
-               ret = 0;
-               break;
-       default:
-               ret = 0;
-               goto end_session_put;
-       }
-
-       *session_uid = session_info->uid;
-       *session_gid = session_info->gid;
-
-end_session_put:
-       session_info_put(session_info);
-end:
-       rcu_read_unlock();
-       return ret;
-}
-
-static
-int evaluate_condition_for_client(const struct lttng_trigger *trigger,
-               const struct lttng_condition *condition,
-               struct notification_client *client,
-               struct notification_thread_state *state)
-{
-       int ret;
-       struct lttng_evaluation *evaluation = NULL;
-       struct notification_client_list client_list = {
-               .lock = PTHREAD_MUTEX_INITIALIZER,
-       };
-       struct notification_client_list_element client_list_element = { 0 };
-       uid_t object_uid = 0;
-       gid_t object_gid = 0;
-
-       LTTNG_ASSERT(trigger);
-       LTTNG_ASSERT(condition);
-       LTTNG_ASSERT(client);
-       LTTNG_ASSERT(state);
-
-       switch (get_condition_binding_object(condition)) {
-       case LTTNG_OBJECT_TYPE_SESSION:
-               ret = evaluate_session_condition_for_client(condition, state,
-                               &evaluation, &object_uid, &object_gid);
-               break;
-       case LTTNG_OBJECT_TYPE_CHANNEL:
-               ret = evaluate_channel_condition_for_client(condition, state,
-                               &evaluation, &object_uid, &object_gid);
-               break;
-       case LTTNG_OBJECT_TYPE_NONE:
-               DBG("Newly subscribed-to condition not bound to object, nothing to evaluate");
-               ret = 0;
-               goto end;
-       case LTTNG_OBJECT_TYPE_UNKNOWN:
-       default:
-               ret = -1;
-               goto end;
-       }
-       if (ret) {
-               /* Fatal error. */
-               goto end;
-       }
-       if (!evaluation) {
-               /* Evaluation yielded nothing. Normal exit. */
-               DBG("Newly subscribed-to condition evaluated to false, nothing to report to client");
-               ret = 0;
-               goto end;
-       }
-
-       /*
-        * Create a temporary client list with the client currently
-        * subscribing.
-        */
-       cds_lfht_node_init(&client_list.notification_trigger_clients_ht_node);
-       CDS_INIT_LIST_HEAD(&client_list.clients_list);
-
-       CDS_INIT_LIST_HEAD(&client_list_element.node);
-       client_list_element.client = client;
-       cds_list_add(&client_list_element.node, &client_list.clients_list);
-
-       /* Send evaluation result to the newly-subscribed client. */
-       DBG("Newly subscribed-to condition evaluated to true, notifying client");
-       ret = send_evaluation_to_clients(trigger, evaluation, &client_list,
-                       state, object_uid, object_gid);
-
-end:
-       return ret;
-}
-
-static
-int notification_thread_client_subscribe(struct notification_client *client,
-               struct lttng_condition *condition,
-               struct notification_thread_state *state,
-               enum lttng_notification_channel_status *_status)
-{
-       int ret = 0;
-       struct notification_client_list *client_list = NULL;
-       struct lttng_condition_list_element *condition_list_element = NULL;
-       struct notification_client_list_element *client_list_element = NULL;
-       struct lttng_trigger_ht_element *trigger_ht_element;
-       enum lttng_notification_channel_status status =
-                       LTTNG_NOTIFICATION_CHANNEL_STATUS_OK;
-
-       /*
-        * Ensure that the client has not already subscribed to this condition
-        * before.
-        */
-       cds_list_for_each_entry(condition_list_element, &client->condition_list, node) {
-               if (lttng_condition_is_equal(condition_list_element->condition,
-                               condition)) {
-                       status = LTTNG_NOTIFICATION_CHANNEL_STATUS_ALREADY_SUBSCRIBED;
-                       goto end;
-               }
-       }
-
-       condition_list_element = zmalloc(sizeof(*condition_list_element));
-       if (!condition_list_element) {
-               ret = -1;
-               goto error;
-       }
-       client_list_element = zmalloc(sizeof(*client_list_element));
-       if (!client_list_element) {
-               ret = -1;
-               goto error;
-       }
-
-       /*
-        * Add the newly-subscribed condition to the client's subscription list.
-        */
-       CDS_INIT_LIST_HEAD(&condition_list_element->node);
-       condition_list_element->condition = condition;
-       condition = NULL;
-       cds_list_add(&condition_list_element->node, &client->condition_list);
-
-       client_list = get_client_list_from_condition(
-                       state, condition_list_element->condition);
-       if (!client_list) {
-               /*
-                * No notification-emiting trigger registered with this
-                * condition. We don't evaluate the condition right away
-                * since this trigger is not registered yet.
-                */
-               free(client_list_element);
-               goto end;
-       }
-
-       /*
-        * The condition to which the client just subscribed is evaluated
-        * at this point so that conditions that are already TRUE result
-        * in a notification being sent out.
-        *
-        * Note the iteration on all triggers which share an identical
-        * `condition` than the one to which the client is registering. This is
-        * done to ensure that the client receives a distinct notification for
-        * all triggers that have a `notify` action that have this condition.
-        */
-       pthread_mutex_lock(&client_list->lock);
-       cds_list_for_each_entry(trigger_ht_element,
-                       &client_list->triggers_list, client_list_trigger_node) {
-               if (evaluate_condition_for_client(trigger_ht_element->trigger, condition_list_element->condition,
-                               client, state)) {
-                       WARN("Evaluation of a condition on client subscription failed, aborting.");
-                       ret = -1;
-                       free(client_list_element);
-                       pthread_mutex_unlock(&client_list->lock);
-                       goto end;
-               }
-       }
-       pthread_mutex_unlock(&client_list->lock);
-
-       /*
-        * Add the client to the list of clients interested in a given trigger
-        * if a "notification" trigger with a corresponding condition was
-        * added prior.
-        */
-       client_list_element->client = client;
-       CDS_INIT_LIST_HEAD(&client_list_element->node);
-
-       pthread_mutex_lock(&client_list->lock);
-       cds_list_add(&client_list_element->node, &client_list->clients_list);
-       pthread_mutex_unlock(&client_list->lock);
-end:
-       if (_status) {
-               *_status = status;
-       }
-       if (client_list) {
-               notification_client_list_put(client_list);
-       }
-       lttng_condition_destroy(condition);
-       return ret;
-error:
-       free(condition_list_element);
-       free(client_list_element);
-       lttng_condition_destroy(condition);
-       return ret;
-}
-
-static
-int notification_thread_client_unsubscribe(
-               struct notification_client *client,
-               struct lttng_condition *condition,
-               struct notification_thread_state *state,
-               enum lttng_notification_channel_status *_status)
-{
-       struct notification_client_list *client_list;
-       struct lttng_condition_list_element *condition_list_element,
-                       *condition_tmp;
-       struct notification_client_list_element *client_list_element,
-                       *client_tmp;
-       bool condition_found = false;
-       enum lttng_notification_channel_status status =
-                       LTTNG_NOTIFICATION_CHANNEL_STATUS_OK;
-
-       /* Remove the condition from the client's condition list. */
-       cds_list_for_each_entry_safe(condition_list_element, condition_tmp,
-                       &client->condition_list, node) {
-               if (!lttng_condition_is_equal(condition_list_element->condition,
-                               condition)) {
-                       continue;
-               }
-
-               cds_list_del(&condition_list_element->node);
-               /*
-                * The caller may be iterating on the client's conditions to
-                * tear down a client's connection. In this case, the condition
-                * will be destroyed at the end.
-                */
-               if (condition != condition_list_element->condition) {
-                       lttng_condition_destroy(
-                                       condition_list_element->condition);
-               }
-               free(condition_list_element);
-               condition_found = true;
-               break;
-       }
-
-       if (!condition_found) {
-               status = LTTNG_NOTIFICATION_CHANNEL_STATUS_UNKNOWN_CONDITION;
-               goto end;
-       }
-
-       /*
-        * Remove the client from the list of clients interested the trigger
-        * matching the condition.
-        */
-       client_list = get_client_list_from_condition(state, condition);
-       if (!client_list) {
-               goto end;
-       }
-
-       pthread_mutex_lock(&client_list->lock);
-       cds_list_for_each_entry_safe(client_list_element, client_tmp,
-                       &client_list->clients_list, node) {
-               if (client_list_element->client->id != client->id) {
-                       continue;
-               }
-               cds_list_del(&client_list_element->node);
-               free(client_list_element);
-               break;
-       }
-       pthread_mutex_unlock(&client_list->lock);
-       notification_client_list_put(client_list);
-       client_list = NULL;
-end:
-       lttng_condition_destroy(condition);
-       if (_status) {
-               *_status = status;
-       }
-       return 0;
-}
-
-static
-void free_notification_client_rcu(struct rcu_head *node)
-{
-       free(caa_container_of(node, struct notification_client, rcu_node));
-}
-
-static
-void notification_client_destroy(struct notification_client *client,
-               struct notification_thread_state *state)
-{
-       if (!client) {
-               return;
-       }
-
-       /*
-        * The client object is not reachable by other threads, no need to lock
-        * the client here.
-        */
-       if (client->socket >= 0) {
-               (void) lttcomm_close_unix_sock(client->socket);
-               client->socket = -1;
-       }
-       client->communication.active = false;
-       lttng_payload_reset(&client->communication.inbound.payload);
-       lttng_payload_reset(&client->communication.outbound.payload);
-       pthread_mutex_destroy(&client->lock);
-       call_rcu(&client->rcu_node, free_notification_client_rcu);
-}
-
-/*
- * Call with rcu_read_lock held (and hold for the lifetime of the returned
- * client pointer).
- */
-static
-struct notification_client *get_client_from_socket(int socket,
-               struct notification_thread_state *state)
-{
-       struct cds_lfht_iter iter;
-       struct cds_lfht_node *node;
-       struct notification_client *client = NULL;
-
-       cds_lfht_lookup(state->client_socket_ht,
-                       hash_client_socket(socket),
-                       match_client_socket,
-                       (void *) (unsigned long) socket,
-                       &iter);
-       node = cds_lfht_iter_get_node(&iter);
-       if (!node) {
-               goto end;
-       }
-
-       client = caa_container_of(node, struct notification_client,
-                       client_socket_ht_node);
-end:
-       return client;
-}
-
-/*
- * Call with rcu_read_lock held (and hold for the lifetime of the returned
- * client pointer).
- */
-static
-struct notification_client *get_client_from_id(notification_client_id id,
-               struct notification_thread_state *state)
-{
-       struct cds_lfht_iter iter;
-       struct cds_lfht_node *node;
-       struct notification_client *client = NULL;
-
-       cds_lfht_lookup(state->client_id_ht,
-                       hash_client_id(id),
-                       match_client_id,
-                       &id,
-                       &iter);
-       node = cds_lfht_iter_get_node(&iter);
-       if (!node) {
-               goto end;
-       }
-
-       client = caa_container_of(node, struct notification_client,
-                       client_id_ht_node);
-end:
-       return client;
-}
-
-static
-bool buffer_usage_condition_applies_to_channel(
-               const struct lttng_condition *condition,
-               const struct channel_info *channel_info)
-{
-       enum lttng_condition_status status;
-       enum lttng_domain_type condition_domain;
-       const char *condition_session_name = NULL;
-       const char *condition_channel_name = NULL;
-
-       status = lttng_condition_buffer_usage_get_domain_type(condition,
-                       &condition_domain);
-       LTTNG_ASSERT(status == LTTNG_CONDITION_STATUS_OK);
-       if (channel_info->key.domain != condition_domain) {
-               goto fail;
-       }
-
-       status = lttng_condition_buffer_usage_get_session_name(
-                       condition, &condition_session_name);
-       LTTNG_ASSERT((status == LTTNG_CONDITION_STATUS_OK) && condition_session_name);
-
-       status = lttng_condition_buffer_usage_get_channel_name(
-                       condition, &condition_channel_name);
-       LTTNG_ASSERT((status == LTTNG_CONDITION_STATUS_OK) && condition_channel_name);
-
-       if (strcmp(channel_info->session_info->name, condition_session_name)) {
-               goto fail;
-       }
-       if (strcmp(channel_info->name, condition_channel_name)) {
-               goto fail;
-       }
-
-       return true;
-fail:
-       return false;
-}
-
-static
-bool session_consumed_size_condition_applies_to_channel(
-               const struct lttng_condition *condition,
-               const struct channel_info *channel_info)
-{
-       enum lttng_condition_status status;
-       const char *condition_session_name = NULL;
-
-       status = lttng_condition_session_consumed_size_get_session_name(
-                       condition, &condition_session_name);
-       LTTNG_ASSERT((status == LTTNG_CONDITION_STATUS_OK) && condition_session_name);
-
-       if (strcmp(channel_info->session_info->name, condition_session_name)) {
-               goto fail;
-       }
-
-       return true;
-fail:
-       return false;
-}
-
-static
-bool trigger_applies_to_channel(const struct lttng_trigger *trigger,
-               const struct channel_info *channel_info)
-{
-       const struct lttng_condition *condition;
-       bool trigger_applies;
-
-       condition = lttng_trigger_get_const_condition(trigger);
-       if (!condition) {
-               goto fail;
-       }
-
-       switch (lttng_condition_get_type(condition)) {
-       case LTTNG_CONDITION_TYPE_BUFFER_USAGE_LOW:
-       case LTTNG_CONDITION_TYPE_BUFFER_USAGE_HIGH:
-               trigger_applies = buffer_usage_condition_applies_to_channel(
-                               condition, channel_info);
-               break;
-       case LTTNG_CONDITION_TYPE_SESSION_CONSUMED_SIZE:
-               trigger_applies = session_consumed_size_condition_applies_to_channel(
-                               condition, channel_info);
-               break;
-       default:
-               goto fail;
-       }
-
-       return trigger_applies;
-fail:
-       return false;
-}
-
-/* Must be called with RCU read lock held. */
-static
-struct lttng_session_trigger_list *get_session_trigger_list(
-               struct notification_thread_state *state,
-               const char *session_name)
-{
-       struct lttng_session_trigger_list *list = NULL;
-       struct cds_lfht_node *node;
-       struct cds_lfht_iter iter;
-
-       cds_lfht_lookup(state->session_triggers_ht,
-                       hash_key_str(session_name, lttng_ht_seed),
-                       match_session_trigger_list,
-                       session_name,
-                       &iter);
-       node = cds_lfht_iter_get_node(&iter);
-       if (!node) {
-               /*
-                * Not an error, the list of triggers applying to that session
-                * will be initialized when the session is created.
-                */
-               DBG("No trigger list found for session \"%s\" as it is not yet known to the notification system",
-                               session_name);
-               goto end;
-       }
-
-       list = caa_container_of(node,
-                       struct lttng_session_trigger_list,
-                       session_triggers_ht_node);
-end:
-       return list;
-}
-
-/*
- * Allocate an empty lttng_session_trigger_list for the session named
- * 'session_name'.
- *
- * No ownership of 'session_name' is assumed by the session trigger list.
- * It is the caller's responsability to ensure the session name is alive
- * for as long as this list is.
- */
-static
-struct lttng_session_trigger_list *lttng_session_trigger_list_create(
-               const char *session_name,
-               struct cds_lfht *session_triggers_ht)
-{
-       struct lttng_session_trigger_list *list;
-
-       list = zmalloc(sizeof(*list));
-       if (!list) {
-               goto end;
-       }
-       list->session_name = session_name;
-       CDS_INIT_LIST_HEAD(&list->list);
-       cds_lfht_node_init(&list->session_triggers_ht_node);
-       list->session_triggers_ht = session_triggers_ht;
-
-       rcu_read_lock();
-       /* Publish the list through the session_triggers_ht. */
-       cds_lfht_add(session_triggers_ht,
-                       hash_key_str(session_name, lttng_ht_seed),
-                       &list->session_triggers_ht_node);
-       rcu_read_unlock();
-end:
-       return list;
-}
-
-static
-void free_session_trigger_list_rcu(struct rcu_head *node)
-{
-       free(caa_container_of(node, struct lttng_session_trigger_list,
-                       rcu_node));
-}
-
-static
-void lttng_session_trigger_list_destroy(struct lttng_session_trigger_list *list)
-{
-       struct lttng_trigger_list_element *trigger_list_element, *tmp;
-
-       /* Empty the list element by element, and then free the list itself. */
-       cds_list_for_each_entry_safe(trigger_list_element, tmp,
-                       &list->list, node) {
-               cds_list_del(&trigger_list_element->node);
-               free(trigger_list_element);
-       }
-       rcu_read_lock();
-       /* Unpublish the list from the session_triggers_ht. */
-       cds_lfht_del(list->session_triggers_ht,
-                       &list->session_triggers_ht_node);
-       rcu_read_unlock();
-       call_rcu(&list->rcu_node, free_session_trigger_list_rcu);
-}
-
-static
-int lttng_session_trigger_list_add(struct lttng_session_trigger_list *list,
-               struct lttng_trigger *trigger)
-{
-       int ret = 0;
-       struct lttng_trigger_list_element *new_element =
-                       zmalloc(sizeof(*new_element));
-
-       if (!new_element) {
-               ret = -1;
-               goto end;
-       }
-       CDS_INIT_LIST_HEAD(&new_element->node);
-       new_element->trigger = trigger;
-       cds_list_add(&new_element->node, &list->list);
-end:
-       return ret;
-}
-
-static
-bool trigger_applies_to_session(const struct lttng_trigger *trigger,
-               const char *session_name)
-{
-       bool applies = false;
-       const struct lttng_condition *condition;
-
-       condition = lttng_trigger_get_const_condition(trigger);
-       switch (lttng_condition_get_type(condition)) {
-       case LTTNG_CONDITION_TYPE_SESSION_ROTATION_ONGOING:
-       case LTTNG_CONDITION_TYPE_SESSION_ROTATION_COMPLETED:
-       {
-               enum lttng_condition_status condition_status;
-               const char *condition_session_name;
-
-               condition_status = lttng_condition_session_rotation_get_session_name(
-                       condition, &condition_session_name);
-               if (condition_status != LTTNG_CONDITION_STATUS_OK) {
-                       ERR("Failed to retrieve session rotation condition's session name");
-                       goto end;
-               }
-
-               LTTNG_ASSERT(condition_session_name);
-               applies = !strcmp(condition_session_name, session_name);
-               break;
-       }
-       default:
-               goto end;
-       }
-end:
-       return applies;
-}
-
-/*
- * Allocate and initialize an lttng_session_trigger_list which contains
- * all triggers that apply to the session named 'session_name'.
- *
- * No ownership of 'session_name' is assumed by the session trigger list.
- * It is the caller's responsability to ensure the session name is alive
- * for as long as this list is.
- */
-static
-struct lttng_session_trigger_list *lttng_session_trigger_list_build(
-               const struct notification_thread_state *state,
-               const char *session_name)
-{
-       int trigger_count = 0;
-       struct lttng_session_trigger_list *session_trigger_list = NULL;
-       struct lttng_trigger_ht_element *trigger_ht_element = NULL;
-       struct cds_lfht_iter iter;
-
-       session_trigger_list = lttng_session_trigger_list_create(session_name,
-                       state->session_triggers_ht);
-
-       /* Add all triggers applying to the session named 'session_name'. */
-       cds_lfht_for_each_entry(state->triggers_ht, &iter, trigger_ht_element,
-                       node) {
-               int ret;
-
-               if (!trigger_applies_to_session(trigger_ht_element->trigger,
-                               session_name)) {
-                       continue;
-               }
-
-               ret = lttng_session_trigger_list_add(session_trigger_list,
-                               trigger_ht_element->trigger);
-               if (ret) {
-                       goto error;
-               }
-
-               trigger_count++;
-       }
-
-       DBG("Found %i triggers that apply to newly created session",
-                       trigger_count);
-       return session_trigger_list;
-error:
-       lttng_session_trigger_list_destroy(session_trigger_list);
-       return NULL;
-}
-
-static
-struct session_info *find_or_create_session_info(
-               struct notification_thread_state *state,
-               const char *name, uid_t uid, gid_t gid)
-{
-       struct session_info *session = NULL;
-       struct cds_lfht_node *node;
-       struct cds_lfht_iter iter;
-       struct lttng_session_trigger_list *trigger_list;
-
-       rcu_read_lock();
-       cds_lfht_lookup(state->sessions_ht,
-                       hash_key_str(name, lttng_ht_seed),
-                       match_session,
-                       name,
-                       &iter);
-       node = cds_lfht_iter_get_node(&iter);
-       if (node) {
-               DBG("Found session info of session \"%s\" (uid = %i, gid = %i)",
-                               name, uid, gid);
-               session = caa_container_of(node, struct session_info,
-                               sessions_ht_node);
-               LTTNG_ASSERT(session->uid == uid);
-               LTTNG_ASSERT(session->gid == gid);
-               session_info_get(session);
-               goto end;
-       }
-
-       trigger_list = lttng_session_trigger_list_build(state, name);
-       if (!trigger_list) {
-               goto error;
-       }
-
-       session = session_info_create(name, uid, gid, trigger_list,
-                       state->sessions_ht);
-       if (!session) {
-               ERR("Failed to allocation session info for session \"%s\" (uid = %i, gid = %i)",
-                               name, uid, gid);
-               lttng_session_trigger_list_destroy(trigger_list);
-               goto error;
-       }
-       trigger_list = NULL;
-
-       cds_lfht_add(state->sessions_ht, hash_key_str(name, lttng_ht_seed),
-                       &session->sessions_ht_node);
-end:
-       rcu_read_unlock();
-       return session;
-error:
-       rcu_read_unlock();
-       session_info_put(session);
-       return NULL;
-}
-
-static
-int handle_notification_thread_command_add_channel(
-               struct notification_thread_state *state,
-               const char *session_name, uid_t session_uid, gid_t session_gid,
-               const char *channel_name, enum lttng_domain_type channel_domain,
-               uint64_t channel_key_int, uint64_t channel_capacity,
-               enum lttng_error_code *cmd_result)
-{
-       struct cds_list_head trigger_list;
-       struct channel_info *new_channel_info = NULL;
-       struct channel_key channel_key = {
-               .key = channel_key_int,
-               .domain = channel_domain,
-       };
-       struct lttng_channel_trigger_list *channel_trigger_list = NULL;
-       struct lttng_trigger_ht_element *trigger_ht_element = NULL;
-       int trigger_count = 0;
-       struct cds_lfht_iter iter;
-       struct session_info *session_info = NULL;
-
-       DBG("Adding channel %s from session %s, channel key = %" PRIu64 " in %s domain",
-                       channel_name, session_name, channel_key_int,
-                       lttng_domain_type_str(channel_domain));
-
-       CDS_INIT_LIST_HEAD(&trigger_list);
-
-       session_info = find_or_create_session_info(state, session_name,
-                       session_uid, session_gid);
-       if (!session_info) {
-               /* Allocation error or an internal error occurred. */
-               goto error;
-       }
-
-       new_channel_info = channel_info_create(channel_name, &channel_key,
-                       channel_capacity, session_info);
-       if (!new_channel_info) {
-               goto error;
-       }
-
-       rcu_read_lock();
-       /* Build a list of all triggers applying to the new channel. */
-       cds_lfht_for_each_entry(state->triggers_ht, &iter, trigger_ht_element,
-                       node) {
-               struct lttng_trigger_list_element *new_element;
-
-               if (!trigger_applies_to_channel(trigger_ht_element->trigger,
-                               new_channel_info)) {
-                       continue;
-               }
-
-               new_element = zmalloc(sizeof(*new_element));
-               if (!new_element) {
-                       rcu_read_unlock();
-                       goto error;
-               }
-               CDS_INIT_LIST_HEAD(&new_element->node);
-               new_element->trigger = trigger_ht_element->trigger;
-               cds_list_add(&new_element->node, &trigger_list);
-               trigger_count++;
-       }
-       rcu_read_unlock();
-
-       DBG("Found %i triggers that apply to newly added channel",
-                       trigger_count);
-       channel_trigger_list = zmalloc(sizeof(*channel_trigger_list));
-       if (!channel_trigger_list) {
-               goto error;
-       }
-       channel_trigger_list->channel_key = new_channel_info->key;
-       CDS_INIT_LIST_HEAD(&channel_trigger_list->list);
-       cds_lfht_node_init(&channel_trigger_list->channel_triggers_ht_node);
-       cds_list_splice(&trigger_list, &channel_trigger_list->list);
-
-       rcu_read_lock();
-       /* Add channel to the channel_ht which owns the channel_infos. */
-       cds_lfht_add(state->channels_ht,
-                       hash_channel_key(&new_channel_info->key),
-                       &new_channel_info->channels_ht_node);
-       /*
-        * Add the list of triggers associated with this channel to the
-        * channel_triggers_ht.
-        */
-       cds_lfht_add(state->channel_triggers_ht,
-                       hash_channel_key(&new_channel_info->key),
-                       &channel_trigger_list->channel_triggers_ht_node);
-       rcu_read_unlock();
-       session_info_put(session_info);
-       *cmd_result = LTTNG_OK;
-       return 0;
-error:
-       channel_info_destroy(new_channel_info);
-       session_info_put(session_info);
-       return 1;
-}
-
-static
-void free_channel_trigger_list_rcu(struct rcu_head *node)
-{
-       free(caa_container_of(node, struct lttng_channel_trigger_list,
-                       rcu_node));
-}
-
-static
-void free_channel_state_sample_rcu(struct rcu_head *node)
-{
-       free(caa_container_of(node, struct channel_state_sample,
-                       rcu_node));
-}
-
-static
-int handle_notification_thread_command_remove_channel(
-       struct notification_thread_state *state,
-       uint64_t channel_key, enum lttng_domain_type domain,
-       enum lttng_error_code *cmd_result)
-{
-       struct cds_lfht_node *node;
-       struct cds_lfht_iter iter;
-       struct lttng_channel_trigger_list *trigger_list;
-       struct lttng_trigger_list_element *trigger_list_element, *tmp;
-       struct channel_key key = { .key = channel_key, .domain = domain };
-       struct channel_info *channel_info;
-
-       DBG("Removing channel key = %" PRIu64 " in %s domain",
-                       channel_key, lttng_domain_type_str(domain));
-
-       rcu_read_lock();
-
-       cds_lfht_lookup(state->channel_triggers_ht,
-                       hash_channel_key(&key),
-                       match_channel_trigger_list,
-                       &key,
-                       &iter);
-       node = cds_lfht_iter_get_node(&iter);
-       /*
-        * There is a severe internal error if we are being asked to remove a
-        * channel that doesn't exist.
-        */
-       if (!node) {
-               ERR("Channel being removed is unknown to the notification thread");
-               goto end;
-       }
-
-       /* Free the list of triggers associated with this channel. */
-       trigger_list = caa_container_of(node, struct lttng_channel_trigger_list,
-                       channel_triggers_ht_node);
-       cds_list_for_each_entry_safe(trigger_list_element, tmp,
-                       &trigger_list->list, node) {
-               cds_list_del(&trigger_list_element->node);
-               free(trigger_list_element);
-       }
-       cds_lfht_del(state->channel_triggers_ht, node);
-       call_rcu(&trigger_list->rcu_node, free_channel_trigger_list_rcu);
-
-       /* Free sampled channel state. */
-       cds_lfht_lookup(state->channel_state_ht,
-                       hash_channel_key(&key),
-                       match_channel_state_sample,
-                       &key,
-                       &iter);
-       node = cds_lfht_iter_get_node(&iter);
-       /*
-        * This is expected to be NULL if the channel is destroyed before we
-        * received a sample.
-        */
-       if (node) {
-               struct channel_state_sample *sample = caa_container_of(node,
-                               struct channel_state_sample,
-                               channel_state_ht_node);
-
-               cds_lfht_del(state->channel_state_ht, node);
-               call_rcu(&sample->rcu_node, free_channel_state_sample_rcu);
-       }
-
-       /* Remove the channel from the channels_ht and free it. */
-       cds_lfht_lookup(state->channels_ht,
-                       hash_channel_key(&key),
-                       match_channel_info,
-                       &key,
-                       &iter);
-       node = cds_lfht_iter_get_node(&iter);
-       LTTNG_ASSERT(node);
-       channel_info = caa_container_of(node, struct channel_info,
-                       channels_ht_node);
-       cds_lfht_del(state->channels_ht, node);
-       channel_info_destroy(channel_info);
-end:
-       rcu_read_unlock();
-       *cmd_result = LTTNG_OK;
-       return 0;
-}
-
-static
-int handle_notification_thread_command_session_rotation(
-       struct notification_thread_state *state,
-       enum notification_thread_command_type cmd_type,
-       const char *session_name, uid_t session_uid, gid_t session_gid,
-       uint64_t trace_archive_chunk_id,
-       struct lttng_trace_archive_location *location,
-       enum lttng_error_code *_cmd_result)
-{
-       int ret = 0;
-       enum lttng_error_code cmd_result = LTTNG_OK;
-       struct lttng_session_trigger_list *trigger_list;
-       struct lttng_trigger_list_element *trigger_list_element;
-       struct session_info *session_info;
-       const struct lttng_credentials session_creds = {
-               .uid = LTTNG_OPTIONAL_INIT_VALUE(session_uid),
-               .gid = LTTNG_OPTIONAL_INIT_VALUE(session_gid),
-       };
-
-       rcu_read_lock();
-
-       session_info = find_or_create_session_info(state, session_name,
-                       session_uid, session_gid);
-       if (!session_info) {
-               /* Allocation error or an internal error occurred. */
-               ret = -1;
-               cmd_result = LTTNG_ERR_NOMEM;
-               goto end;
-       }
-
-       session_info->rotation.ongoing =
-                       cmd_type == NOTIFICATION_COMMAND_TYPE_SESSION_ROTATION_ONGOING;
-       session_info->rotation.id = trace_archive_chunk_id;
-       trigger_list = get_session_trigger_list(state, session_name);
-       if (!trigger_list) {
-               DBG("No triggers applying to session \"%s\" found",
-                               session_name);
-               goto end;
-       }
-
-       cds_list_for_each_entry(trigger_list_element, &trigger_list->list,
-                       node) {
-               const struct lttng_condition *condition;
-               struct lttng_trigger *trigger;
-               struct notification_client_list *client_list;
-               struct lttng_evaluation *evaluation = NULL;
-               enum lttng_condition_type condition_type;
-               enum action_executor_status executor_status;
-
-               trigger = trigger_list_element->trigger;
-               condition = lttng_trigger_get_const_condition(trigger);
-               LTTNG_ASSERT(condition);
-               condition_type = lttng_condition_get_type(condition);
-
-               if (condition_type == LTTNG_CONDITION_TYPE_SESSION_ROTATION_ONGOING &&
-                               cmd_type != NOTIFICATION_COMMAND_TYPE_SESSION_ROTATION_ONGOING) {
-                       continue;
-               } else if (condition_type == LTTNG_CONDITION_TYPE_SESSION_ROTATION_COMPLETED &&
-                               cmd_type != NOTIFICATION_COMMAND_TYPE_SESSION_ROTATION_COMPLETED) {
-                       continue;
-               }
-
-               client_list = get_client_list_from_condition(state, condition);
-               if (cmd_type == NOTIFICATION_COMMAND_TYPE_SESSION_ROTATION_ONGOING) {
-                       evaluation = lttng_evaluation_session_rotation_ongoing_create(
-                                       trace_archive_chunk_id);
-               } else {
-                       evaluation = lttng_evaluation_session_rotation_completed_create(
-                                       trace_archive_chunk_id, location);
-               }
-
-               if (!evaluation) {
-                       /* Internal error */
-                       ret = -1;
-                       cmd_result = LTTNG_ERR_UNK;
-                       goto put_list;
-               }
-
-               /*
-                * Ownership of `evaluation` transferred to the action executor
-                * no matter the result.
-                */
-               executor_status = action_executor_enqueue_trigger(
-                               state->executor, trigger, evaluation,
-                               &session_creds, client_list);
-               evaluation = NULL;
-               switch (executor_status) {
-               case ACTION_EXECUTOR_STATUS_OK:
-                       break;
-               case ACTION_EXECUTOR_STATUS_ERROR:
-               case ACTION_EXECUTOR_STATUS_INVALID:
-                       /*
-                        * TODO Add trigger identification (name/id) when
-                        * it is added to the API.
-                        */
-                       ERR("Fatal error occurred while enqueuing action associated with session rotation trigger");
-                       ret = -1;
-                       goto put_list;
-               case ACTION_EXECUTOR_STATUS_OVERFLOW:
-                       /*
-                        * TODO Add trigger identification (name/id) when
-                        * it is added to the API.
-                        *
-                        * Not a fatal error.
-                        */
-                       WARN("No space left when enqueuing action associated with session rotation trigger");
-                       ret = 0;
-                       goto put_list;
-               default:
-                       abort();
-               }
-
-put_list:
-               notification_client_list_put(client_list);
-               if (caa_unlikely(ret)) {
-                       break;
-               }
-       }
-end:
-       session_info_put(session_info);
-       *_cmd_result = cmd_result;
-       rcu_read_unlock();
-       return ret;
-}
-
-static
-int handle_notification_thread_command_add_tracer_event_source(
-               struct notification_thread_state *state,
-               int tracer_event_source_fd,
-               enum lttng_domain_type domain_type,
-               enum lttng_error_code *_cmd_result)
-{
-       int ret = 0;
-       enum lttng_error_code cmd_result = LTTNG_OK;
-       struct notification_event_tracer_event_source_element *element = NULL;
-
-       element = zmalloc(sizeof(*element));
-       if (!element) {
-               cmd_result = LTTNG_ERR_NOMEM;
-               ret = -1;
-               goto end;
-       }
-
-       element->fd = tracer_event_source_fd;
-       element->domain = domain_type;
-
-       cds_list_add(&element->node, &state->tracer_event_sources_list);
-
-       DBG3("Adding tracer event source fd to poll set: tracer_event_source_fd = %d, domain = '%s'",
-                       tracer_event_source_fd,
-                       lttng_domain_type_str(domain_type));
-
-       /* Adding the read side pipe to the event poll. */
-       ret = lttng_poll_add(&state->events, tracer_event_source_fd, LPOLLIN | LPOLLERR);
-       if (ret < 0) {
-               ERR("Failed to add tracer event source to poll set: tracer_event_source_fd = %d, domain = '%s'",
-                               tracer_event_source_fd,
-                               lttng_domain_type_str(element->domain));
-               cds_list_del(&element->node);
-               free(element);
-               goto end;
-       }
-
-       element->is_fd_in_poll_set = true;
-
-end:
-       *_cmd_result = cmd_result;
-       return ret;
-}
-
-static
-int drain_event_notifier_notification_pipe(
-               struct notification_thread_state *state,
-               int pipe, enum lttng_domain_type domain)
-{
-       struct lttng_poll_event events = {0};
-       int ret;
-
-       ret = lttng_poll_create(&events, 1, LTTNG_CLOEXEC);
-       if (ret < 0) {
-               ERR("Error creating lttng_poll_event");
-               goto end;
-       }
-
-       ret = lttng_poll_add(&events, pipe, LPOLLIN);
-       if (ret < 0) {
-               ERR("Error adding fd event notifier notification pipe to lttng_poll_event: fd = %d",
-                               pipe);
-               goto end;
-       }
-
-       while (true) {
-               /*
-                * Continue to consume notifications as long as there are new
-                * ones coming in. The tracer has been asked to stop producing
-                * them.
-                *
-                * LPOLLIN is explicitly checked since LPOLLHUP is implicitly
-                * monitored (on Linux, at least) and will be returned when
-                * the pipe is closed but empty.
-                */
-               ret = lttng_poll_wait_interruptible(&events, 0);
-               if (ret == 0 || (LTTNG_POLL_GETEV(&events, 0) & LPOLLIN) == 0) {
-                       /* No more notification to be read on this pipe. */
-                       ret = 0;
-                       goto end;
-               } else if (ret < 0) {
-                       PERROR("Failed on lttng_poll_wait_interruptible() call");
-                       ret = -1;
-                       goto end;
-               }
-
-               ret = handle_one_event_notifier_notification(state, pipe, domain);
-               if (ret) {
-                       ERR("Error consuming an event notifier notification from pipe: fd = %d",
-                                       pipe);
-               }
-       }
-end:
-       lttng_poll_clean(&events);
-       return ret;
-}
-
-static
-struct notification_event_tracer_event_source_element *
-find_tracer_event_source_element(struct notification_thread_state *state,
-               int tracer_event_source_fd)
-{
-       struct notification_event_tracer_event_source_element *source_element;
-
-       cds_list_for_each_entry(source_element,
-                       &state->tracer_event_sources_list, node) {
-               if (source_element->fd == tracer_event_source_fd) {
-                       goto end;
-               }
-       }
-
-       source_element = NULL;
-end:
-       return source_element;
-}
-
-static
-int remove_tracer_event_source_from_pollset(
-               struct notification_thread_state *state,
-               struct notification_event_tracer_event_source_element *source_element)
-{
-       int ret = 0;
-
-       LTTNG_ASSERT(source_element->is_fd_in_poll_set);
-
-       DBG3("Removing tracer event source from poll set: tracer_event_source_fd = %d, domain = '%s'",
-                       source_element->fd,
-                       lttng_domain_type_str(source_element->domain));
-
-       /* Removing the fd from the event poll set. */
-       ret = lttng_poll_del(&state->events, source_element->fd);
-       if (ret < 0) {
-               ERR("Failed to remove tracer event source from poll set: tracer_event_source_fd = %d, domain = '%s'",
-                               source_element->fd,
-                               lttng_domain_type_str(source_element->domain));
-               ret = -1;
-               goto end;
-       }
-
-       source_element->is_fd_in_poll_set = false;
-
-       /*
-        * Force the notification thread to restart the poll() loop to ensure
-        * that any events from the removed fd are removed.
-        */
-       state->restart_poll = true;
-
-       ret = drain_event_notifier_notification_pipe(state, source_element->fd,
-                       source_element->domain);
-       if (ret) {
-               ERR("Error draining event notifier notification: tracer_event_source_fd = %d, domain = %s",
-                               source_element->fd,
-                               lttng_domain_type_str(source_element->domain));
-               ret = -1;
-               goto end;
-       }
-
-end:
-       return ret;
-}
-
-int handle_notification_thread_tracer_event_source_died(
-               struct notification_thread_state *state,
-               int tracer_event_source_fd)
-{
-       int ret = 0;
-       struct notification_event_tracer_event_source_element *source_element;
-
-       source_element = find_tracer_event_source_element(state,
-                       tracer_event_source_fd);
-
-       LTTNG_ASSERT(source_element);
-
-       ret = remove_tracer_event_source_from_pollset(state, source_element);
-       if (ret) {
-               ERR("Failed to remove dead tracer event source from poll set");
-       }
-
-       return ret;
-}
-
-static
-int handle_notification_thread_command_remove_tracer_event_source(
-               struct notification_thread_state *state,
-               int tracer_event_source_fd,
-               enum lttng_error_code *_cmd_result)
-{
-       int ret = 0;
-       enum lttng_error_code cmd_result = LTTNG_OK;
-       struct notification_event_tracer_event_source_element *source_element = NULL;
-
-       source_element = find_tracer_event_source_element(state,
-                       tracer_event_source_fd);
-
-       LTTNG_ASSERT(source_element);
-
-       /* Remove the tracer source from the list. */
-       cds_list_del(&source_element->node);
-
-       if (!source_element->is_fd_in_poll_set) {
-               /* Skip the poll set removal. */
-               goto end;
-       }
-
-       ret = remove_tracer_event_source_from_pollset(state, source_element);
-       if (ret) {
-               ERR("Failed to remove tracer event source from poll set");
-               cmd_result = LTTNG_ERR_FATAL;
-       }
-
-end:
-       free(source_element);
-       *_cmd_result = cmd_result;
-       return ret;
-}
-
-static int handle_notification_thread_command_list_triggers(
-               struct notification_thread_handle *handle,
-               struct notification_thread_state *state,
-               uid_t client_uid,
-               struct lttng_triggers **triggers,
-               enum lttng_error_code *_cmd_result)
-{
-       int ret = 0;
-       enum lttng_error_code cmd_result = LTTNG_OK;
-       struct cds_lfht_iter iter;
-       struct lttng_trigger_ht_element *trigger_ht_element;
-       struct lttng_triggers *local_triggers = NULL;
-       const struct lttng_credentials *creds;
-
-       rcu_read_lock();
-
-       local_triggers = lttng_triggers_create();
-       if (!local_triggers) {
-               /* Not a fatal error. */
-               cmd_result = LTTNG_ERR_NOMEM;
-               goto end;
-       }
-
-       cds_lfht_for_each_entry(state->triggers_ht, &iter,
-                       trigger_ht_element, node) {
-               /*
-                * Only return the triggers to which the client has access.
-                * The root user has visibility over all triggers.
-                */
-               creds = lttng_trigger_get_credentials(trigger_ht_element->trigger);
-               if (client_uid != lttng_credentials_get_uid(creds) && client_uid != 0) {
-                       continue;
-               }
-
-               ret = lttng_triggers_add(local_triggers,
-                               trigger_ht_element->trigger);
-               if (ret < 0) {
-                       /* Not a fatal error. */
-                       ret = 0;
-                       cmd_result = LTTNG_ERR_NOMEM;
-                       goto end;
-               }
-       }
-
-       /* Transferring ownership to the caller. */
-       *triggers = local_triggers;
-       local_triggers = NULL;
-
-end:
-       rcu_read_unlock();
-       lttng_triggers_destroy(local_triggers);
-       *_cmd_result = cmd_result;
-       return ret;
-}
-
-static inline void get_trigger_info_for_log(const struct lttng_trigger *trigger,
-               const char **trigger_name,
-               uid_t *trigger_owner_uid)
-{
-       enum lttng_trigger_status trigger_status;
-
-       trigger_status = lttng_trigger_get_name(trigger, trigger_name);
-       switch (trigger_status) {
-       case LTTNG_TRIGGER_STATUS_OK:
-               break;
-       case LTTNG_TRIGGER_STATUS_UNSET:
-               *trigger_name = "(anonymous)";
-               break;
-       default:
-               abort();
-       }
-
-       trigger_status = lttng_trigger_get_owner_uid(trigger,
-                       trigger_owner_uid);
-       LTTNG_ASSERT(trigger_status == LTTNG_TRIGGER_STATUS_OK);
-}
-
-static int handle_notification_thread_command_get_trigger(
-               struct notification_thread_state *state,
-               const struct lttng_trigger *trigger,
-               struct lttng_trigger **registered_trigger,
-               enum lttng_error_code *_cmd_result)
-{
-       int ret = -1;
-       struct cds_lfht_iter iter;
-       struct lttng_trigger_ht_element *trigger_ht_element;
-       enum lttng_error_code cmd_result = LTTNG_ERR_TRIGGER_NOT_FOUND;
-       const char *trigger_name;
-       uid_t trigger_owner_uid;
-
-       rcu_read_lock();
-
-       cds_lfht_for_each_entry(
-                       state->triggers_ht, &iter, trigger_ht_element, node) {
-               if (lttng_trigger_is_equal(
-                                   trigger, trigger_ht_element->trigger)) {
-                       /* Take one reference on the return trigger. */
-                       *registered_trigger = trigger_ht_element->trigger;
-                       lttng_trigger_get(*registered_trigger);
-                       ret = 0;
-                       cmd_result = LTTNG_OK;
-                       goto end;
-               }
-       }
-
-       /* Not a fatal error if the trigger is not found. */
-       get_trigger_info_for_log(trigger, &trigger_name, &trigger_owner_uid);
-       DBG("Failed to retrieve registered version of trigger: trigger name = '%s', trigger owner uid = %d",
-                       trigger_name, (int) trigger_owner_uid);
-
-       ret = 0;
-
-end:
-       rcu_read_unlock();
-       *_cmd_result = cmd_result;
-       return ret;
-}
-
-static
-bool condition_is_supported(struct lttng_condition *condition)
-{
-       bool is_supported;
-
-       switch (lttng_condition_get_type(condition)) {
-       case LTTNG_CONDITION_TYPE_BUFFER_USAGE_LOW:
-       case LTTNG_CONDITION_TYPE_BUFFER_USAGE_HIGH:
-       {
-               int ret;
-               enum lttng_domain_type domain;
-
-               ret = lttng_condition_buffer_usage_get_domain_type(condition,
-                               &domain);
-               LTTNG_ASSERT(ret == 0);
-
-               if (domain != LTTNG_DOMAIN_KERNEL) {
-                       is_supported = true;
-                       goto end;
-               }
-
-               /*
-                * Older kernel tracers don't expose the API to monitor their
-                * buffers. Therefore, we reject triggers that require that
-                * mechanism to be available to be evaluated.
-                *
-                * Assume unsupported on error.
-                */
-               is_supported = kernel_supports_ring_buffer_snapshot_sample_positions() == 1;
-               break;
-       }
-       case LTTNG_CONDITION_TYPE_EVENT_RULE_MATCHES:
-       {
-               const struct lttng_event_rule *event_rule;
-               enum lttng_domain_type domain;
-               const enum lttng_condition_status status =
-                               lttng_condition_event_rule_matches_get_rule(
-                                               condition, &event_rule);
-
-               LTTNG_ASSERT(status == LTTNG_CONDITION_STATUS_OK);
-
-               domain = lttng_event_rule_get_domain_type(event_rule);
-               if (domain != LTTNG_DOMAIN_KERNEL) {
-                       is_supported = true;
-                       goto end;
-               }
-
-               /*
-                * Older kernel tracers can't emit notification. Therefore, we
-                * reject triggers that require that mechanism to be available
-                * to be evaluated.
-                *
-                * Assume unsupported on error.
-                */
-               is_supported = kernel_supports_event_notifiers() == 1;
-               break;
-       }
-       default:
-               is_supported = true;
-       }
-end:
-       return is_supported;
-}
-
-/* Must be called with RCU read lock held. */
-static
-int bind_trigger_to_matching_session(struct lttng_trigger *trigger,
-               struct notification_thread_state *state)
-{
-       int ret = 0;
-       const struct lttng_condition *condition;
-       const char *session_name;
-       struct lttng_session_trigger_list *trigger_list;
-
-       condition = lttng_trigger_get_const_condition(trigger);
-       switch (lttng_condition_get_type(condition)) {
-       case LTTNG_CONDITION_TYPE_SESSION_ROTATION_ONGOING:
-       case LTTNG_CONDITION_TYPE_SESSION_ROTATION_COMPLETED:
-       {
-               enum lttng_condition_status status;
-
-               status = lttng_condition_session_rotation_get_session_name(
-                               condition, &session_name);
-               if (status != LTTNG_CONDITION_STATUS_OK) {
-                       ERR("Failed to bind trigger to session: unable to get 'session_rotation' condition's session name");
-                       ret = -1;
-                       goto end;
-               }
-               break;
-       }
-       default:
-               ret = -1;
-               goto end;
-       }
-
-       trigger_list = get_session_trigger_list(state, session_name);
-       if (!trigger_list) {
-               DBG("Unable to bind trigger applying to session \"%s\" as it is not yet known to the notification system",
-                               session_name);
-               goto end;
-
-       }
-
-       DBG("Newly registered trigger bound to session \"%s\"",
-                       session_name);
-       ret = lttng_session_trigger_list_add(trigger_list, trigger);
-end:
-       return ret;
-}
-
-/* Must be called with RCU read lock held. */
-static
-int bind_trigger_to_matching_channels(struct lttng_trigger *trigger,
-               struct notification_thread_state *state)
-{
-       int ret = 0;
-       struct cds_lfht_node *node;
-       struct cds_lfht_iter iter;
-       struct channel_info *channel;
-
-       cds_lfht_for_each_entry(state->channels_ht, &iter, channel,
-                       channels_ht_node) {
-               struct lttng_trigger_list_element *trigger_list_element;
-               struct lttng_channel_trigger_list *trigger_list;
-               struct cds_lfht_iter lookup_iter;
-
-               if (!trigger_applies_to_channel(trigger, channel)) {
-                       continue;
-               }
-
-               cds_lfht_lookup(state->channel_triggers_ht,
-                               hash_channel_key(&channel->key),
-                               match_channel_trigger_list,
-                               &channel->key,
-                               &lookup_iter);
-               node = cds_lfht_iter_get_node(&lookup_iter);
-               LTTNG_ASSERT(node);
-               trigger_list = caa_container_of(node,
-                               struct lttng_channel_trigger_list,
-                               channel_triggers_ht_node);
-
-               trigger_list_element = zmalloc(sizeof(*trigger_list_element));
-               if (!trigger_list_element) {
-                       ret = -1;
-                       goto end;
-               }
-               CDS_INIT_LIST_HEAD(&trigger_list_element->node);
-               trigger_list_element->trigger = trigger;
-               cds_list_add(&trigger_list_element->node, &trigger_list->list);
-               DBG("Newly registered trigger bound to channel \"%s\"",
-                               channel->name);
-       }
-end:
-       return ret;
-}
-
-static
-bool is_trigger_action_notify(const struct lttng_trigger *trigger)
-{
-       bool is_notify = false;
-       unsigned int i, count;
-       enum lttng_action_status action_status;
-       const struct lttng_action *action =
-                       lttng_trigger_get_const_action(trigger);
-       enum lttng_action_type action_type;
-
-       LTTNG_ASSERT(action);
-       action_type = lttng_action_get_type(action);
-       if (action_type == LTTNG_ACTION_TYPE_NOTIFY) {
-               is_notify = true;
-               goto end;
-       } else if (action_type != LTTNG_ACTION_TYPE_LIST) {
-               goto end;
-       }
-
-       action_status = lttng_action_list_get_count(action, &count);
-       LTTNG_ASSERT(action_status == LTTNG_ACTION_STATUS_OK);
-
-       for (i = 0; i < count; i++) {
-               const struct lttng_action *inner_action =
-                               lttng_action_list_get_at_index(
-                                               action, i);
-
-               action_type = lttng_action_get_type(inner_action);
-               if (action_type == LTTNG_ACTION_TYPE_NOTIFY) {
-                       is_notify = true;
-                       goto end;
-               }
-       }
-
-end:
-       return is_notify;
-}
-
-static bool trigger_name_taken(struct notification_thread_state *state,
-               const struct lttng_trigger *trigger)
-{
-       struct cds_lfht_iter iter;
-
-       /*
-        * No duplicata is allowed in the triggers_by_name_uid_ht.
-        * The match is done against the trigger name and uid.
-        */
-       cds_lfht_lookup(state->triggers_by_name_uid_ht,
-                       hash_trigger_by_name_uid(trigger),
-                       match_trigger_by_name_uid,
-                       trigger,
-                       &iter);
-       return !!cds_lfht_iter_get_node(&iter);
-}
-
-static
-enum lttng_error_code generate_trigger_name(
-               struct notification_thread_state *state,
-               struct lttng_trigger *trigger, const char **name)
-{
-       enum lttng_error_code ret_code = LTTNG_OK;
-       bool taken = false;
-       enum lttng_trigger_status status;
-
-       do {
-               const int ret = lttng_trigger_generate_name(trigger,
-                               state->trigger_id.name_offset++);
-               if (ret) {
-                       /* The only reason this can fail right now. */
-                       ret_code = LTTNG_ERR_NOMEM;
-                       break;
-               }
-
-               status = lttng_trigger_get_name(trigger, name);
-               LTTNG_ASSERT(status == LTTNG_TRIGGER_STATUS_OK);
-
-               taken = trigger_name_taken(state, trigger);
-       } while (taken || state->trigger_id.name_offset == UINT64_MAX);
-
-       return ret_code;
-}
-
-static inline
-void notif_thread_state_remove_trigger_ht_elem(
-               struct notification_thread_state *state,
-               struct lttng_trigger_ht_element *trigger_ht_element)
-{
-       LTTNG_ASSERT(state);
-       LTTNG_ASSERT(trigger_ht_element);
-
-       cds_lfht_del(state->triggers_ht, &trigger_ht_element->node);
-       cds_lfht_del(state->triggers_by_name_uid_ht, &trigger_ht_element->node_by_name_uid);
-}
-
-static
-enum lttng_error_code setup_tracer_notifier(
-               struct notification_thread_state *state,
-               struct lttng_trigger *trigger)
-{
-       enum lttng_error_code ret;
-       enum event_notifier_error_accounting_status error_accounting_status;
-       struct cds_lfht_node *node;
-       uint64_t error_counter_index = 0;
-       struct lttng_condition *condition = lttng_trigger_get_condition(trigger);
-       struct notification_trigger_tokens_ht_element *trigger_tokens_ht_element = NULL;
-
-       trigger_tokens_ht_element = zmalloc(sizeof(*trigger_tokens_ht_element));
-       if (!trigger_tokens_ht_element) {
-               ret = LTTNG_ERR_NOMEM;
-               goto end;
-       }
-
-       /* Add trigger token to the trigger_tokens_ht. */
-       cds_lfht_node_init(&trigger_tokens_ht_element->node);
-       trigger_tokens_ht_element->token = LTTNG_OPTIONAL_GET(trigger->tracer_token);
-       trigger_tokens_ht_element->trigger = trigger;
-
-       node = cds_lfht_add_unique(state->trigger_tokens_ht,
-                       hash_key_u64(&trigger_tokens_ht_element->token, lttng_ht_seed),
-                       match_trigger_token,
-                       &trigger_tokens_ht_element->token,
-                       &trigger_tokens_ht_element->node);
-       if (node != &trigger_tokens_ht_element->node) {
-               ret = LTTNG_ERR_TRIGGER_EXISTS;
-               goto error_free_ht_element;
-       }
-
-       error_accounting_status = event_notifier_error_accounting_register_event_notifier(
-                       trigger, &error_counter_index);
-       if (error_accounting_status != EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_OK) {
-               if (error_accounting_status == EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_NO_INDEX_AVAILABLE) {
-                       DBG("Trigger list error accounting counter full.");
-                       ret = LTTNG_ERR_EVENT_NOTIFIER_ERROR_ACCOUNTING_FULL;
-               } else {
-                       ERR("Error registering trigger for error accounting");
-                       ret = LTTNG_ERR_EVENT_NOTIFIER_REGISTRATION;
-               }
-
-               goto error_remove_ht_element;
-       }
-
-       lttng_condition_event_rule_matches_set_error_counter_index(
-                       condition, error_counter_index);
-
-       ret = LTTNG_OK;
-       goto end;
-
-error_remove_ht_element:
-       cds_lfht_del(state->trigger_tokens_ht, &trigger_tokens_ht_element->node);
-error_free_ht_element:
-       free(trigger_tokens_ht_element);
-end:
-       return ret;
-}
-
-/*
- * FIXME A client's credentials are not checked when registering a trigger.
- *
- * The effects of this are benign since:
- *     - The client will succeed in registering the trigger, as it is valid,
- *     - The trigger will, internally, be bound to the channel/session,
- *     - The notifications will not be sent since the client's credentials
- *       are checked against the channel at that moment.
- *
- * If this function returns a non-zero value, it means something is
- * fundamentally broken and the whole subsystem/thread will be torn down.
- *
- * If a non-fatal error occurs, just set the cmd_result to the appropriate
- * error code.
- */
-static
-int handle_notification_thread_command_register_trigger(
-               struct notification_thread_state *state,
-               struct lttng_trigger *trigger,
-               bool is_trigger_anonymous,
-               enum lttng_error_code *cmd_result)
-{
-       int ret = 0;
-       struct lttng_condition *condition;
-       struct notification_client_list *client_list = NULL;
-       struct lttng_trigger_ht_element *trigger_ht_element = NULL;
-       struct cds_lfht_node *node;
-       const char* trigger_name;
-       bool free_trigger = true;
-       struct lttng_evaluation *evaluation = NULL;
-       struct lttng_credentials object_creds;
-       uid_t object_uid;
-       gid_t object_gid;
-       enum action_executor_status executor_status;
-       const uint64_t trigger_tracer_token =
-                       state->trigger_id.next_tracer_token++;
-
-       rcu_read_lock();
-
-       /* Set the trigger's tracer token. */
-       lttng_trigger_set_tracer_token(trigger, trigger_tracer_token);
-
-       if (!is_trigger_anonymous) {
-               if (lttng_trigger_get_name(trigger, &trigger_name) ==
-                               LTTNG_TRIGGER_STATUS_UNSET) {
-                       const enum lttng_error_code ret_code =
-                                       generate_trigger_name(state, trigger,
-                                                       &trigger_name);
-
-                       if (ret_code != LTTNG_OK) {
-                               /* Fatal error. */
-                               ret = -1;
-                               *cmd_result = ret_code;
-                               goto error;
-                       }
-               } else if (trigger_name_taken(state, trigger)) {
-                       /* Not a fatal error. */
-                       *cmd_result = LTTNG_ERR_TRIGGER_EXISTS;
-                       ret = 0;
-                       goto error;
-               }
-       } else {
-               trigger_name = "(anonymous)";
-       }
-
-       condition = lttng_trigger_get_condition(trigger);
-       LTTNG_ASSERT(condition);
-
-       /* Some conditions require tracers to implement a minimal ABI version. */
-       if (!condition_is_supported(condition)) {
-               *cmd_result = LTTNG_ERR_NOT_SUPPORTED;
-               goto error;
-       }
-
-       trigger_ht_element = zmalloc(sizeof(*trigger_ht_element));
-       if (!trigger_ht_element) {
-               ret = -1;
-               goto error;
-       }
-
-       /* Add trigger to the trigger_ht. */
-       cds_lfht_node_init(&trigger_ht_element->node);
-       cds_lfht_node_init(&trigger_ht_element->node_by_name_uid);
-       trigger_ht_element->trigger = trigger;
-
-       node = cds_lfht_add_unique(state->triggers_ht,
-                       lttng_condition_hash(condition),
-                       match_trigger,
-                       trigger,
-                       &trigger_ht_element->node);
-       if (node != &trigger_ht_element->node) {
-               /* Not a fatal error, simply report it to the client. */
-               *cmd_result = LTTNG_ERR_TRIGGER_EXISTS;
-               goto error_free_ht_element;
-       }
-
-       node = cds_lfht_add_unique(state->triggers_by_name_uid_ht,
-                       hash_trigger_by_name_uid(trigger),
-                       match_trigger_by_name_uid,
-                       trigger,
-                       &trigger_ht_element->node_by_name_uid);
-       if (node != &trigger_ht_element->node_by_name_uid) {
-               /* Internal error: add to triggers_ht should have failed. */
-               ret = -1;
-               goto error_free_ht_element;
-       }
-
-       /* From this point consider the trigger registered. */
-       lttng_trigger_set_as_registered(trigger);
-
-       /*
-        * Some triggers might need a tracer notifier depending on its
-        * condition and actions.
-        */
-       if (lttng_trigger_needs_tracer_notifier(trigger)) {
-               enum lttng_error_code error_code;
-
-               error_code = setup_tracer_notifier(state, trigger);
-               if (error_code != LTTNG_OK) {
-                       notif_thread_state_remove_trigger_ht_elem(state,
-                                       trigger_ht_element);
-                       if (error_code == LTTNG_ERR_NOMEM) {
-                               ret = -1;
-                       } else {
-                               *cmd_result = error_code;
-                               ret = 0;
-                       }
-
-                       goto error_free_ht_element;
-               }
-       }
-
-       /*
-        * The rest only applies to triggers that have a "notify" action.
-        * It is not skipped as this is the only action type currently
-        * supported.
-        */
-       if (is_trigger_action_notify(trigger)) {
-               /*
-                * Find or create the client list of this condition. It may
-                * already be present if another trigger is already registered
-                * with the same condition.
-                */
-               client_list = get_client_list_from_condition(state, condition);
-               if (!client_list) {
-                       /*
-                        * No client list for this condition yet. We create new
-                        * one and build it up.
-                        */
-                       client_list = notification_client_list_create(state, condition);
-                       if (!client_list) {
-                               ERR("Error creating notification client list for trigger %s", trigger->name);
-                               goto error_free_ht_element;
-                       }
-               }
-
-               CDS_INIT_LIST_HEAD(&trigger_ht_element->client_list_trigger_node);
-
-               pthread_mutex_lock(&client_list->lock);
-               cds_list_add(&trigger_ht_element->client_list_trigger_node, &client_list->triggers_list);
-               pthread_mutex_unlock(&client_list->lock);
-       }
-
-       /*
-        * Ownership of the trigger and of its wrapper was transfered to
-        * the triggers_ht. Same for token ht element if necessary.
-        */
-       trigger_ht_element = NULL;
-       free_trigger = false;
-
-       switch (get_condition_binding_object(condition)) {
-       case LTTNG_OBJECT_TYPE_SESSION:
-               /* Add the trigger to the list if it matches a known session. */
-               ret = bind_trigger_to_matching_session(trigger, state);
-               if (ret) {
-                       goto error_free_ht_element;
-               }
-               break;
-       case LTTNG_OBJECT_TYPE_CHANNEL:
-               /*
-                * Add the trigger to list of triggers bound to the channels
-                * currently known.
-                */
-               ret = bind_trigger_to_matching_channels(trigger, state);
-               if (ret) {
-                       goto error_free_ht_element;
-               }
-               break;
-       case LTTNG_OBJECT_TYPE_NONE:
-               break;
-       default:
-               ERR("Unknown object type on which to bind a newly registered trigger was encountered");
-               ret = -1;
-               goto error_free_ht_element;
-       }
-
-       /*
-        * The new trigger's condition must be evaluated against the current
-        * state.
-        *
-        * In the case of `notify` action, nothing preventing clients from
-        * subscribing to a condition before the corresponding trigger is
-        * registered, we have to evaluate this new condition right away.
-        *
-        * At some point, we were waiting for the next "evaluation" (e.g. on
-        * reception of a channel sample) to evaluate this new condition, but
-        * that was broken.
-        *
-        * The reason it was broken is that waiting for the next sample
-        * does not allow us to properly handle transitions for edge-triggered
-        * conditions.
-        *
-        * Consider this example: when we handle a new channel sample, we
-        * evaluate each conditions twice: once with the previous state, and
-        * again with the newest state. We then use those two results to
-        * determine whether a state change happened: a condition was false and
-        * became true. If a state change happened, we have to notify clients.
-        *
-        * Now, if a client subscribes to a given notification and registers
-        * a trigger *after* that subscription, we have to make sure the
-        * condition is evaluated at this point while considering only the
-        * current state. Otherwise, the next evaluation cycle may only see
-        * that the evaluations remain the same (true for samples n-1 and n) and
-        * the client will never know that the condition has been met.
-        */
-       switch (get_condition_binding_object(condition)) {
-       case LTTNG_OBJECT_TYPE_SESSION:
-               ret = evaluate_session_condition_for_client(condition, state,
-                               &evaluation, &object_uid,
-                               &object_gid);
-               LTTNG_OPTIONAL_SET(&object_creds.uid, object_uid);
-               LTTNG_OPTIONAL_SET(&object_creds.gid, object_gid);
-               break;
-       case LTTNG_OBJECT_TYPE_CHANNEL:
-               ret = evaluate_channel_condition_for_client(condition, state,
-                               &evaluation, &object_uid,
-                               &object_gid);
-               LTTNG_OPTIONAL_SET(&object_creds.uid, object_uid);
-               LTTNG_OPTIONAL_SET(&object_creds.gid, object_gid);
-               break;
-       case LTTNG_OBJECT_TYPE_NONE:
-               ret = 0;
-               break;
-       case LTTNG_OBJECT_TYPE_UNKNOWN:
-       default:
-               ret = -1;
-               break;
-       }
-
-       if (ret) {
-               /* Fatal error. */
-               goto error_free_ht_element;
-       }
-
-       DBG("Newly registered trigger's condition evaluated to %s",
-                       evaluation ? "true" : "false");
-       if (!evaluation) {
-               /* Evaluation yielded nothing. Normal exit. */
-               ret = 0;
-               goto success;
-       }
-
-       /*
-        * Ownership of `evaluation` transferred to the action executor
-        * no matter the result.
-        */
-       executor_status = action_executor_enqueue_trigger(state->executor,
-                       trigger, evaluation, &object_creds, client_list);
-       evaluation = NULL;
-       switch (executor_status) {
-       case ACTION_EXECUTOR_STATUS_OK:
-               break;
-       case ACTION_EXECUTOR_STATUS_ERROR:
-       case ACTION_EXECUTOR_STATUS_INVALID:
-               /*
-                * TODO Add trigger identification (name/id) when
-                * it is added to the API.
-                */
-               ERR("Fatal error occurred while enqueuing action associated to newly registered trigger");
-               ret = -1;
-               goto error_free_ht_element;
-       case ACTION_EXECUTOR_STATUS_OVERFLOW:
-               /*
-                * TODO Add trigger identification (name/id) when
-                * it is added to the API.
-                *
-                * Not a fatal error.
-                */
-               WARN("No space left when enqueuing action associated to newly registered trigger");
-               ret = 0;
-               goto success;
-       default:
-               abort();
-       }
-
-success:
-       *cmd_result = LTTNG_OK;
-       DBG("Registered trigger: name = `%s`, tracer token = %" PRIu64,
-                       trigger_name, trigger_tracer_token);
-       goto end;
-
-error_free_ht_element:
-       if (trigger_ht_element) {
-               /* Delayed removal due to RCU constraint on delete. */
-               call_rcu(&trigger_ht_element->rcu_node,
-                               free_lttng_trigger_ht_element_rcu);
-       }
-error:
-       if (free_trigger) {
-               /*
-                * Other objects might have a reference to the trigger, mark it
-                * as unregistered.
-                */
-               lttng_trigger_set_as_unregistered(trigger);
-               lttng_trigger_destroy(trigger);
-       }
-end:
-       rcu_read_unlock();
-       return ret;
-}
-
-static
-void free_lttng_trigger_ht_element_rcu(struct rcu_head *node)
-{
-       free(caa_container_of(node, struct lttng_trigger_ht_element,
-                       rcu_node));
-}
-
-static
-void free_notification_trigger_tokens_ht_element_rcu(struct rcu_head *node)
-{
-       free(caa_container_of(node, struct notification_trigger_tokens_ht_element,
-                       rcu_node));
-}
-
-static
-void teardown_tracer_notifier(struct notification_thread_state *state,
-               const struct lttng_trigger *trigger)
-{
-       struct cds_lfht_iter iter;
-       struct notification_trigger_tokens_ht_element *trigger_tokens_ht_element;
-
-       cds_lfht_for_each_entry(state->trigger_tokens_ht, &iter,
-                       trigger_tokens_ht_element, node) {
-
-               if (!lttng_trigger_is_equal(trigger,
-                                       trigger_tokens_ht_element->trigger)) {
-                       continue;
-               }
-
-               event_notifier_error_accounting_unregister_event_notifier(
-                               trigger_tokens_ht_element->trigger);
-
-               /* TODO talk to all app and remove it */
-               DBG("Removed trigger from tokens_ht");
-               cds_lfht_del(state->trigger_tokens_ht,
-                               &trigger_tokens_ht_element->node);
-
-               call_rcu(&trigger_tokens_ht_element->rcu_node,
-                               free_notification_trigger_tokens_ht_element_rcu);
-
-               break;
-       }
-}
-
-static
-int handle_notification_thread_command_unregister_trigger(
-               struct notification_thread_state *state,
-               const struct lttng_trigger *trigger,
-               enum lttng_error_code *_cmd_reply)
-{
-       struct cds_lfht_iter iter;
-       struct cds_lfht_node *triggers_ht_node;
-       struct lttng_channel_trigger_list *trigger_list;
-       struct notification_client_list *client_list;
-       struct lttng_trigger_ht_element *trigger_ht_element = NULL;
-       const struct lttng_condition *condition = lttng_trigger_get_const_condition(
-                       trigger);
-       enum lttng_error_code cmd_reply;
-
-       rcu_read_lock();
-
-       cds_lfht_lookup(state->triggers_ht,
-                       lttng_condition_hash(condition),
-                       match_trigger,
-                       trigger,
-                       &iter);
-       triggers_ht_node = cds_lfht_iter_get_node(&iter);
-       if (!triggers_ht_node) {
-               cmd_reply = LTTNG_ERR_TRIGGER_NOT_FOUND;
-               goto end;
-       } else {
-               cmd_reply = LTTNG_OK;
-       }
-
-       trigger_ht_element = caa_container_of(triggers_ht_node,
-                       struct lttng_trigger_ht_element, node);
-
-       /* Remove trigger from channel_triggers_ht. */
-       cds_lfht_for_each_entry(state->channel_triggers_ht, &iter, trigger_list,
-                       channel_triggers_ht_node) {
-               struct lttng_trigger_list_element *trigger_element, *tmp;
-
-               cds_list_for_each_entry_safe(trigger_element, tmp,
-                               &trigger_list->list, node) {
-                       if (!lttng_trigger_is_equal(trigger, trigger_element->trigger)) {
-                               continue;
-                       }
-
-                       DBG("Removed trigger from channel_triggers_ht");
-                       cds_list_del(&trigger_element->node);
-                       /* A trigger can only appear once per channel */
-                       break;
-               }
-       }
-
-       if (lttng_trigger_needs_tracer_notifier(trigger)) {
-               teardown_tracer_notifier(state, trigger);
-       }
-
-       if (is_trigger_action_notify(trigger)) {
-               /*
-                * Remove and release the client list from
-                * notification_trigger_clients_ht.
-                */
-               client_list = get_client_list_from_condition(state, condition);
-               LTTNG_ASSERT(client_list);
-
-               pthread_mutex_lock(&client_list->lock);
-               cds_list_del(&trigger_ht_element->client_list_trigger_node);
-               pthread_mutex_unlock(&client_list->lock);
-
-               /* Put new reference and the hashtable's reference. */
-               notification_client_list_put(client_list);
-               notification_client_list_put(client_list);
-               client_list = NULL;
-       }
-
-       /* Remove trigger from triggers_ht. */
-       notif_thread_state_remove_trigger_ht_elem(state, trigger_ht_element);
-
-       /* Release the ownership of the trigger. */
-       lttng_trigger_destroy(trigger_ht_element->trigger);
-       call_rcu(&trigger_ht_element->rcu_node, free_lttng_trigger_ht_element_rcu);
-end:
-       rcu_read_unlock();
-       if (_cmd_reply) {
-               *_cmd_reply = cmd_reply;
-       }
-       return 0;
-}
-
-/* Returns 0 on success, 1 on exit requested, negative value on error. */
-int handle_notification_thread_command(
-               struct notification_thread_handle *handle,
-               struct notification_thread_state *state)
-{
-       int ret;
-       uint64_t counter;
-       struct notification_thread_command *cmd;
-
-       /* Read the event pipe to put it back into a quiescent state. */
-       ret = lttng_read(lttng_pipe_get_readfd(handle->cmd_queue.event_pipe), &counter,
-                       sizeof(counter));
-       if (ret != sizeof(counter)) {
-               goto error;
-       }
-
-       pthread_mutex_lock(&handle->cmd_queue.lock);
-       cmd = cds_list_first_entry(&handle->cmd_queue.list,
-                       struct notification_thread_command, cmd_list_node);
-       cds_list_del(&cmd->cmd_list_node);
-       pthread_mutex_unlock(&handle->cmd_queue.lock);
-
-       DBG("Received `%s` command",
-                       notification_command_type_str(cmd->type));
-       switch (cmd->type) {
-       case NOTIFICATION_COMMAND_TYPE_REGISTER_TRIGGER:
-               ret = handle_notification_thread_command_register_trigger(state,
-                               cmd->parameters.register_trigger.trigger,
-                               cmd->parameters.register_trigger.is_trigger_anonymous,
-                               &cmd->reply_code);
-               break;
-       case NOTIFICATION_COMMAND_TYPE_UNREGISTER_TRIGGER:
-               ret = handle_notification_thread_command_unregister_trigger(
-                               state,
-                               cmd->parameters.unregister_trigger.trigger,
-                               &cmd->reply_code);
-               break;
-       case NOTIFICATION_COMMAND_TYPE_ADD_CHANNEL:
-               ret = handle_notification_thread_command_add_channel(
-                               state,
-                               cmd->parameters.add_channel.session.name,
-                               cmd->parameters.add_channel.session.uid,
-                               cmd->parameters.add_channel.session.gid,
-                               cmd->parameters.add_channel.channel.name,
-                               cmd->parameters.add_channel.channel.domain,
-                               cmd->parameters.add_channel.channel.key,
-                               cmd->parameters.add_channel.channel.capacity,
-                               &cmd->reply_code);
-               break;
-       case NOTIFICATION_COMMAND_TYPE_REMOVE_CHANNEL:
-               ret = handle_notification_thread_command_remove_channel(
-                               state, cmd->parameters.remove_channel.key,
-                               cmd->parameters.remove_channel.domain,
-                               &cmd->reply_code);
-               break;
-       case NOTIFICATION_COMMAND_TYPE_SESSION_ROTATION_ONGOING:
-       case NOTIFICATION_COMMAND_TYPE_SESSION_ROTATION_COMPLETED:
-               ret = handle_notification_thread_command_session_rotation(
-                               state,
-                               cmd->type,
-                               cmd->parameters.session_rotation.session_name,
-                               cmd->parameters.session_rotation.uid,
-                               cmd->parameters.session_rotation.gid,
-                               cmd->parameters.session_rotation.trace_archive_chunk_id,
-                               cmd->parameters.session_rotation.location,
-                               &cmd->reply_code);
-               break;
-       case NOTIFICATION_COMMAND_TYPE_ADD_TRACER_EVENT_SOURCE:
-               ret = handle_notification_thread_command_add_tracer_event_source(
-                               state,
-                               cmd->parameters.tracer_event_source.tracer_event_source_fd,
-                               cmd->parameters.tracer_event_source.domain,
-                               &cmd->reply_code);
-               break;
-       case NOTIFICATION_COMMAND_TYPE_REMOVE_TRACER_EVENT_SOURCE:
-               ret = handle_notification_thread_command_remove_tracer_event_source(
-                               state,
-                               cmd->parameters.tracer_event_source.tracer_event_source_fd,
-                               &cmd->reply_code);
-               break;
-       case NOTIFICATION_COMMAND_TYPE_LIST_TRIGGERS:
-       {
-               struct lttng_triggers *triggers = NULL;
-
-               ret = handle_notification_thread_command_list_triggers(
-                               handle,
-                               state,
-                               cmd->parameters.list_triggers.uid,
-                               &triggers,
-                               &cmd->reply_code);
-               cmd->reply.list_triggers.triggers = triggers;
-               ret = 0;
-               break;
-       }
-       case NOTIFICATION_COMMAND_TYPE_QUIT:
-               cmd->reply_code = LTTNG_OK;
-               ret = 1;
-               goto end;
-       case NOTIFICATION_COMMAND_TYPE_GET_TRIGGER:
-       {
-               struct lttng_trigger *trigger = NULL;
-
-               ret = handle_notification_thread_command_get_trigger(state,
-                               cmd->parameters.get_trigger.trigger, &trigger,
-                               &cmd->reply_code);
-               cmd->reply.get_trigger.trigger = trigger;
-               break;
-       }
-       case NOTIFICATION_COMMAND_TYPE_CLIENT_COMMUNICATION_UPDATE:
-       {
-               const enum client_transmission_status client_status =
-                               cmd->parameters.client_communication_update
-                                               .status;
-               const notification_client_id client_id =
-                               cmd->parameters.client_communication_update.id;
-               struct notification_client *client;
-
-               rcu_read_lock();
-               client = get_client_from_id(client_id, state);
-
-               if (!client) {
-                       /*
-                        * Client error was probably already picked-up by the
-                        * notification thread or it has disconnected
-                        * gracefully while this command was queued.
-                        */
-                       DBG("Failed to find notification client to update communication status, client id = %" PRIu64,
-                                       client_id);
-                       ret = 0;
-               } else {
-                       ret = client_handle_transmission_status(
-                                       client, client_status, state);
-               }
-               rcu_read_unlock();
-               break;
-       }
-       default:
-               ERR("Unknown internal command received");
-               goto error_unlock;
-       }
-
-       if (ret) {
-               goto error_unlock;
-       }
-end:
-       if (cmd->is_async) {
-               free(cmd);
-               cmd = NULL;
-       } else {
-               lttng_waiter_wake_up(&cmd->reply_waiter);
-       }
-       return ret;
-error_unlock:
-       /* Wake-up and return a fatal error to the calling thread. */
-       lttng_waiter_wake_up(&cmd->reply_waiter);
-       cmd->reply_code = LTTNG_ERR_FATAL;
-error:
-       /* Indicate a fatal error to the caller. */
-       return -1;
-}
-
-static
-int socket_set_non_blocking(int socket)
-{
-       int ret, flags;
-
-       /* Set the pipe as non-blocking. */
-       ret = fcntl(socket, F_GETFL, 0);
-       if (ret == -1) {
-               PERROR("fcntl get socket flags");
-               goto end;
-       }
-       flags = ret;
-
-       ret = fcntl(socket, F_SETFL, flags | O_NONBLOCK);
-       if (ret == -1) {
-               PERROR("fcntl set O_NONBLOCK socket flag");
-               goto end;
-       }
-       DBG("Client socket (fd = %i) set as non-blocking", socket);
-end:
-       return ret;
-}
-
-static
-int client_reset_inbound_state(struct notification_client *client)
-{
-       int ret;
-
-
-       lttng_payload_clear(&client->communication.inbound.payload);
-
-       client->communication.inbound.bytes_to_receive =
-                       sizeof(struct lttng_notification_channel_message);
-       client->communication.inbound.msg_type =
-                       LTTNG_NOTIFICATION_CHANNEL_MESSAGE_TYPE_UNKNOWN;
-       LTTNG_SOCK_SET_UID_CRED(&client->communication.inbound.creds, -1);
-       LTTNG_SOCK_SET_GID_CRED(&client->communication.inbound.creds, -1);
-       ret = lttng_dynamic_buffer_set_size(
-                       &client->communication.inbound.payload.buffer,
-                       client->communication.inbound.bytes_to_receive);
-
-       return ret;
-}
-
-int handle_notification_thread_client_connect(
-               struct notification_thread_state *state)
-{
-       int ret;
-       struct notification_client *client;
-
-       DBG("Handling new notification channel client connection");
-
-       client = zmalloc(sizeof(*client));
-       if (!client) {
-               /* Fatal error. */
-               ret = -1;
-               goto error;
-       }
-
-       pthread_mutex_init(&client->lock, NULL);
-       client->id = state->next_notification_client_id++;
-       CDS_INIT_LIST_HEAD(&client->condition_list);
-       lttng_payload_init(&client->communication.inbound.payload);
-       lttng_payload_init(&client->communication.outbound.payload);
-       client->communication.inbound.expect_creds = true;
-
-       ret = client_reset_inbound_state(client);
-       if (ret) {
-               ERR("Failed to reset client communication's inbound state");
-               ret = 0;
-               goto error;
-       }
-
-       ret = lttcomm_accept_unix_sock(state->notification_channel_socket);
-       if (ret < 0) {
-               ERR("Failed to accept new notification channel client connection");
-               ret = 0;
-               goto error;
-       }
-
-       client->socket = ret;
-
-       ret = socket_set_non_blocking(client->socket);
-       if (ret) {
-               ERR("Failed to set new notification channel client connection socket as non-blocking");
-               goto error;
-       }
-
-       ret = lttcomm_setsockopt_creds_unix_sock(client->socket);
-       if (ret < 0) {
-               ERR("Failed to set socket options on new notification channel client socket");
-               ret = 0;
-               goto error;
-       }
-
-       ret = lttng_poll_add(&state->events, client->socket,
-                       LPOLLIN | LPOLLERR |
-                       LPOLLHUP | LPOLLRDHUP);
-       if (ret < 0) {
-               ERR("Failed to add notification channel client socket to poll set");
-               ret = 0;
-               goto error;
-       }
-       DBG("Added new notification channel client socket (%i) to poll set",
-                       client->socket);
-
-       rcu_read_lock();
-       cds_lfht_add(state->client_socket_ht,
-                       hash_client_socket(client->socket),
-                       &client->client_socket_ht_node);
-       cds_lfht_add(state->client_id_ht,
-                       hash_client_id(client->id),
-                       &client->client_id_ht_node);
-       rcu_read_unlock();
-
-       return ret;
-
-error:
-       notification_client_destroy(client, state);
-       return ret;
-}
-
-/*
- * RCU read-lock must be held by the caller.
- * Client lock must _not_ be held by the caller.
- */
-static
-int notification_thread_client_disconnect(
-               struct notification_client *client,
-               struct notification_thread_state *state)
-{
-       int ret;
-       struct lttng_condition_list_element *condition_list_element, *tmp;
-
-       /* Acquire the client lock to disable its communication atomically. */
-       pthread_mutex_lock(&client->lock);
-       client->communication.active = false;
-       cds_lfht_del(state->client_socket_ht, &client->client_socket_ht_node);
-       cds_lfht_del(state->client_id_ht, &client->client_id_ht_node);
-       pthread_mutex_unlock(&client->lock);
-
-       ret = lttng_poll_del(&state->events, client->socket);
-       if (ret) {
-               ERR("Failed to remove client socket %d from poll set",
-                               client->socket);
-       }
-
-       /* Release all conditions to which the client was subscribed. */
-       cds_list_for_each_entry_safe(condition_list_element, tmp,
-                       &client->condition_list, node) {
-               (void) notification_thread_client_unsubscribe(client,
-                               condition_list_element->condition, state, NULL);
-       }
-
-       /*
-        * Client no longer accessible to other threads (through the
-        * client lists).
-        */
-       notification_client_destroy(client, state);
-       return ret;
-}
-
-int handle_notification_thread_client_disconnect(
-               int client_socket, struct notification_thread_state *state)
-{
-       int ret = 0;
-       struct notification_client *client;
-
-       rcu_read_lock();
-       DBG("Closing client connection (socket fd = %i)",
-                       client_socket);
-       client = get_client_from_socket(client_socket, state);
-       if (!client) {
-               /* Internal state corruption, fatal error. */
-               ERR("Unable to find client (socket fd = %i)",
-                               client_socket);
-               ret = -1;
-               goto end;
-       }
-
-       ret = notification_thread_client_disconnect(client, state);
-end:
-       rcu_read_unlock();
-       return ret;
-}
-
-int handle_notification_thread_client_disconnect_all(
-               struct notification_thread_state *state)
-{
-       struct cds_lfht_iter iter;
-       struct notification_client *client;
-       bool error_encoutered = false;
-
-       rcu_read_lock();
-       DBG("Closing all client connections");
-       cds_lfht_for_each_entry(state->client_socket_ht, &iter, client,
-                       client_socket_ht_node) {
-               int ret;
-
-               ret = notification_thread_client_disconnect(
-                               client, state);
-               if (ret) {
-                       error_encoutered = true;
-               }
-       }
-       rcu_read_unlock();
-       return error_encoutered ? 1 : 0;
-}
-
-int handle_notification_thread_trigger_unregister_all(
-               struct notification_thread_state *state)
-{
-       bool error_occurred = false;
-       struct cds_lfht_iter iter;
-       struct lttng_trigger_ht_element *trigger_ht_element;
-
-       rcu_read_lock();
-       cds_lfht_for_each_entry(state->triggers_ht, &iter, trigger_ht_element,
-                       node) {
-               int ret = handle_notification_thread_command_unregister_trigger(
-                               state, trigger_ht_element->trigger, NULL);
-               if (ret) {
-                       error_occurred = true;
-               }
-       }
-       rcu_read_unlock();
-       return error_occurred ? -1 : 0;
-}
-
-static
-int client_handle_transmission_status(
-               struct notification_client *client,
-               enum client_transmission_status transmission_status,
-               struct notification_thread_state *state)
-{
-       int ret = 0;
-
-       switch (transmission_status) {
-       case CLIENT_TRANSMISSION_STATUS_COMPLETE:
-               ret = lttng_poll_mod(&state->events, client->socket,
-                               CLIENT_POLL_MASK_IN);
-               if (ret) {
-                       goto end;
-               }
-
-               break;
-       case CLIENT_TRANSMISSION_STATUS_QUEUED:
-               /*
-                * We want to be notified whenever there is buffer space
-                * available to send the rest of the payload.
-                */
-               ret = lttng_poll_mod(&state->events, client->socket,
-                               CLIENT_POLL_MASK_IN_OUT);
-               if (ret) {
-                       goto end;
-               }
-               break;
-       case CLIENT_TRANSMISSION_STATUS_FAIL:
-               ret = notification_thread_client_disconnect(client, state);
-               if (ret) {
-                       goto end;
-               }
-               break;
-       case CLIENT_TRANSMISSION_STATUS_ERROR:
-               ret = -1;
-               goto end;
-       default:
-               abort();
-       }
-end:
-       return ret;
-}
-
-/* Client lock must be acquired by caller. */
-static
-enum client_transmission_status client_flush_outgoing_queue(
-               struct notification_client *client)
-{
-       ssize_t ret;
-       size_t to_send_count;
-       enum client_transmission_status status;
-       struct lttng_payload_view pv = lttng_payload_view_from_payload(
-                       &client->communication.outbound.payload, 0, -1);
-       const int fds_to_send_count =
-                       lttng_payload_view_get_fd_handle_count(&pv);
-
-       ASSERT_LOCKED(client->lock);
-
-       if (!client->communication.active) {
-               status = CLIENT_TRANSMISSION_STATUS_FAIL;
-               goto end;
-       }
-
-       if (pv.buffer.size == 0) {
-               /*
-                * If both data and fds are equal to zero, we are in an invalid
-                * state.
-                */
-               LTTNG_ASSERT(fds_to_send_count != 0);
-               goto send_fds;
-       }
-
-       /* Send data. */
-       to_send_count = pv.buffer.size;
-       DBG("Flushing client (socket fd = %i) outgoing queue",
-                       client->socket);
-
-       ret = lttcomm_send_unix_sock_non_block(client->socket,
-                       pv.buffer.data,
-                       to_send_count);
-       if ((ret >= 0 && ret < to_send_count)) {
-               DBG("Client (socket fd = %i) outgoing queue could not be completely flushed",
-                               client->socket);
-               to_send_count -= max(ret, 0);
-
-               memmove(client->communication.outbound.payload.buffer.data,
-                               pv.buffer.data +
-                               pv.buffer.size - to_send_count,
-                               to_send_count);
-               ret = lttng_dynamic_buffer_set_size(
-                               &client->communication.outbound.payload.buffer,
-                               to_send_count);
-               if (ret) {
-                       goto error;
-               }
-
-               status = CLIENT_TRANSMISSION_STATUS_QUEUED;
-               goto end;
-       } else if (ret < 0) {
-               /* Generic error, disable the client's communication. */
-               ERR("Failed to flush outgoing queue, disconnecting client (socket fd = %i)",
-                               client->socket);
-               client->communication.active = false;
-               status = CLIENT_TRANSMISSION_STATUS_FAIL;
-               goto end;
-       } else {
-               /*
-                * No error and flushed the queue completely.
-                *
-                * The payload buffer size is used later to
-                * check if there is notifications queued. So albeit that the
-                * direct caller knows that the transmission is complete, we
-                * need to set the buffer size to zero.
-                */
-               ret = lttng_dynamic_buffer_set_size(
-                               &client->communication.outbound.payload.buffer, 0);
-               if (ret) {
-                       goto error;
-               }
-       }
-
-send_fds:
-       /* No fds to send, transmission is complete. */
-       if (fds_to_send_count == 0) {
-               status = CLIENT_TRANSMISSION_STATUS_COMPLETE;
-               goto end;
-       }
-
-       ret = lttcomm_send_payload_view_fds_unix_sock_non_block(
-                       client->socket, &pv);
-       if (ret < 0) {
-               /* Generic error, disable the client's communication. */
-               ERR("Failed to flush outgoing fds queue, disconnecting client (socket fd = %i)",
-                               client->socket);
-               client->communication.active = false;
-               status = CLIENT_TRANSMISSION_STATUS_FAIL;
-               goto end;
-       } else if (ret == 0) {
-               /* Nothing could be sent. */
-               status = CLIENT_TRANSMISSION_STATUS_QUEUED;
-       } else {
-               /* Fd passing is an all or nothing kind of thing. */
-               status = CLIENT_TRANSMISSION_STATUS_COMPLETE;
-               /*
-                * The payload _fd_array count is used later to
-                * check if there is notifications queued. So although the
-                * direct caller knows that the transmission is complete, we
-                * need to clear the _fd_array for the queuing check.
-                */
-               lttng_dynamic_pointer_array_clear(
-                               &client->communication.outbound.payload
-                                                ._fd_handles);
-       }
-
-end:
-       if (status == CLIENT_TRANSMISSION_STATUS_COMPLETE) {
-               client->communication.outbound.queued_command_reply = false;
-               client->communication.outbound.dropped_notification = false;
-               lttng_payload_clear(&client->communication.outbound.payload);
-       }
-
-       return status;
-error:
-       return CLIENT_TRANSMISSION_STATUS_ERROR;
-}
-
-static
-bool client_has_outbound_data_left(
-               const struct notification_client *client)
-{
-       const struct lttng_payload_view pv = lttng_payload_view_from_payload(
-                       &client->communication.outbound.payload, 0, -1);
-       const bool has_data = pv.buffer.size != 0;
-       const bool has_fds = lttng_payload_view_get_fd_handle_count(&pv);
-
-       return has_data || has_fds;
-}
-
-/* Client lock must _not_ be held by the caller. */
-static
-int client_send_command_reply(struct notification_client *client,
-               struct notification_thread_state *state,
-               enum lttng_notification_channel_status status)
-{
-       int ret;
-       struct lttng_notification_channel_command_reply reply = {
-               .status = (int8_t) status,
-       };
-       struct lttng_notification_channel_message msg = {
-               .type = (int8_t) LTTNG_NOTIFICATION_CHANNEL_MESSAGE_TYPE_COMMAND_REPLY,
-               .size = sizeof(reply),
-       };
-       char buffer[sizeof(msg) + sizeof(reply)];
-       enum client_transmission_status transmission_status;
-
-       memcpy(buffer, &msg, sizeof(msg));
-       memcpy(buffer + sizeof(msg), &reply, sizeof(reply));
-       DBG("Send command reply (%i)", (int) status);
-
-       pthread_mutex_lock(&client->lock);
-       if (client->communication.outbound.queued_command_reply) {
-               /* Protocol error. */
-               goto error_unlock;
-       }
-
-       /* Enqueue buffer to outgoing queue and flush it. */
-       ret = lttng_dynamic_buffer_append(
-                       &client->communication.outbound.payload.buffer,
-                       buffer, sizeof(buffer));
-       if (ret) {
-               goto error_unlock;
-       }
-
-       transmission_status = client_flush_outgoing_queue(client);
-
-       if (client_has_outbound_data_left(client)) {
-               /* Queue could not be emptied. */
-               client->communication.outbound.queued_command_reply = true;
-       }
-
-       pthread_mutex_unlock(&client->lock);
-       ret = client_handle_transmission_status(
-                       client, transmission_status, state);
-       if (ret) {
-               goto error;
-       }
-
-       return 0;
-error_unlock:
-       pthread_mutex_unlock(&client->lock);
-error:
-       return -1;
-}
-
-static
-int client_handle_message_unknown(struct notification_client *client,
-               struct notification_thread_state *state)
-{
-       int ret;
-       /*
-        * Receiving message header. The function will be called again
-        * once the rest of the message as been received and can be
-        * interpreted.
-        */
-       const struct lttng_notification_channel_message *msg;
-
-       LTTNG_ASSERT(sizeof(*msg) == client->communication.inbound.payload.buffer.size);
-       msg = (const struct lttng_notification_channel_message *)
-                             client->communication.inbound.payload.buffer.data;
-
-       if (msg->size == 0 ||
-                       msg->size > DEFAULT_MAX_NOTIFICATION_CLIENT_MESSAGE_PAYLOAD_SIZE) {
-               ERR("Invalid notification channel message: length = %u",
-                               msg->size);
-               ret = -1;
-               goto end;
-       }
-
-       switch (msg->type) {
-       case LTTNG_NOTIFICATION_CHANNEL_MESSAGE_TYPE_SUBSCRIBE:
-       case LTTNG_NOTIFICATION_CHANNEL_MESSAGE_TYPE_UNSUBSCRIBE:
-       case LTTNG_NOTIFICATION_CHANNEL_MESSAGE_TYPE_HANDSHAKE:
-               break;
-       default:
-               ret = -1;
-               ERR("Invalid notification channel message: unexpected message type");
-               goto end;
-       }
-
-       client->communication.inbound.bytes_to_receive = msg->size;
-       client->communication.inbound.fds_to_receive = msg->fds;
-       client->communication.inbound.msg_type =
-                       (enum lttng_notification_channel_message_type) msg->type;
-       ret = lttng_dynamic_buffer_set_size(
-                       &client->communication.inbound.payload.buffer, msg->size);
-
-       /* msg is not valid anymore due to lttng_dynamic_buffer_set_size. */
-       msg = NULL;
-end:
-       return ret;
-}
-
-static
-int client_handle_message_handshake(struct notification_client *client,
-               struct notification_thread_state *state)
-{
-       int ret;
-       struct lttng_notification_channel_command_handshake *handshake_client;
-       const struct lttng_notification_channel_command_handshake handshake_reply = {
-                       .major = LTTNG_NOTIFICATION_CHANNEL_VERSION_MAJOR,
-                       .minor = LTTNG_NOTIFICATION_CHANNEL_VERSION_MINOR,
-       };
-       const struct lttng_notification_channel_message msg_header = {
-                       .type = LTTNG_NOTIFICATION_CHANNEL_MESSAGE_TYPE_HANDSHAKE,
-                       .size = sizeof(handshake_reply),
-       };
-       enum lttng_notification_channel_status status =
-                       LTTNG_NOTIFICATION_CHANNEL_STATUS_OK;
-       char send_buffer[sizeof(msg_header) + sizeof(handshake_reply)];
-
-       memcpy(send_buffer, &msg_header, sizeof(msg_header));
-       memcpy(send_buffer + sizeof(msg_header), &handshake_reply,
-                       sizeof(handshake_reply));
-
-       handshake_client =
-                       (struct lttng_notification_channel_command_handshake *)
-                                       client->communication.inbound.payload.buffer
-                                                       .data;
-       client->major = handshake_client->major;
-       client->minor = handshake_client->minor;
-       if (!client->communication.inbound.creds_received) {
-               ERR("No credentials received from client");
-               ret = -1;
-               goto end;
-       }
-
-       client->uid = LTTNG_SOCK_GET_UID_CRED(
-                       &client->communication.inbound.creds);
-       client->gid = LTTNG_SOCK_GET_GID_CRED(
-                       &client->communication.inbound.creds);
-       client->is_sessiond = LTTNG_SOCK_GET_PID_CRED(&client->communication.inbound.creds) == getpid();
-       DBG("Received handshake from client: uid = %u, gid = %u, protocol version = %i.%i, client is sessiond = %s",
-                       client->uid, client->gid, (int) client->major,
-                       (int) client->minor,
-                       client->is_sessiond ? "true" : "false");
-
-       if (handshake_client->major !=
-                       LTTNG_NOTIFICATION_CHANNEL_VERSION_MAJOR) {
-               status = LTTNG_NOTIFICATION_CHANNEL_STATUS_UNSUPPORTED_VERSION;
-       }
-
-       pthread_mutex_lock(&client->lock);
-       /* Outgoing queue will be flushed when the command reply is sent. */
-       ret = lttng_dynamic_buffer_append(
-                       &client->communication.outbound.payload.buffer, send_buffer,
-                       sizeof(send_buffer));
-       if (ret) {
-               ERR("Failed to send protocol version to notification channel client");
-               goto end_unlock;
-       }
-
-       client->validated = true;
-       client->communication.active = true;
-       pthread_mutex_unlock(&client->lock);
-
-       /* Set reception state to receive the next message header. */
-       ret = client_reset_inbound_state(client);
-       if (ret) {
-               ERR("Failed to reset client communication's inbound state");
-               goto end;
-       }
-
-       /* Flushes the outgoing queue. */
-       ret = client_send_command_reply(client, state, status);
-       if (ret) {
-               ERR("Failed to send reply to notification channel client");
-               goto end;
-       }
-
-       goto end;
-end_unlock:
-       pthread_mutex_unlock(&client->lock);
-end:
-       return ret;
-}
-
-static
-int client_handle_message_subscription(
-               struct notification_client *client,
-               enum lttng_notification_channel_message_type msg_type,
-               struct notification_thread_state *state)
-{
-       int ret;
-       struct lttng_condition *condition;
-       enum lttng_notification_channel_status status =
-                       LTTNG_NOTIFICATION_CHANNEL_STATUS_OK;
-       struct lttng_payload_view condition_view =
-                       lttng_payload_view_from_payload(
-                                       &client->communication.inbound.payload,
-                                       0, -1);
-       size_t expected_condition_size;
-
-       /*
-        * No need to lock client to sample the inbound state as the only
-        * other thread accessing clients (action executor) only uses the
-        * outbound state.
-        */
-       expected_condition_size = client->communication.inbound.payload.buffer.size;
-       ret = lttng_condition_create_from_payload(&condition_view, &condition);
-       if (ret != expected_condition_size) {
-               ERR("Malformed condition received from client");
-               goto end;
-       }
-
-       /* Ownership of condition is always transferred. */
-       if (msg_type == LTTNG_NOTIFICATION_CHANNEL_MESSAGE_TYPE_SUBSCRIBE) {
-               ret = notification_thread_client_subscribe(
-                               client, condition, state, &status);
-       } else {
-               ret = notification_thread_client_unsubscribe(
-                               client, condition, state, &status);
-       }
-
-       if (ret) {
-               goto end;
-       }
-
-       /* Set reception state to receive the next message header. */
-       ret = client_reset_inbound_state(client);
-       if (ret) {
-               ERR("Failed to reset client communication's inbound state");
-               goto end;
-       }
-
-       ret = client_send_command_reply(client, state, status);
-       if (ret) {
-               ERR("Failed to send reply to notification channel client");
-               goto end;
-       }
-
-end:
-       return ret;
-}
-
-static
-int client_dispatch_message(struct notification_client *client,
-               struct notification_thread_state *state)
-{
-       int ret = 0;
-
-       if (client->communication.inbound.msg_type !=
-                       LTTNG_NOTIFICATION_CHANNEL_MESSAGE_TYPE_HANDSHAKE &&
-                       client->communication.inbound.msg_type !=
-                               LTTNG_NOTIFICATION_CHANNEL_MESSAGE_TYPE_UNKNOWN &&
-                       !client->validated) {
-               WARN("client attempted a command before handshake");
-               ret = -1;
-               goto end;
-       }
-
-       switch (client->communication.inbound.msg_type) {
-       case LTTNG_NOTIFICATION_CHANNEL_MESSAGE_TYPE_UNKNOWN:
-       {
-               ret = client_handle_message_unknown(client, state);
-               break;
-       }
-       case LTTNG_NOTIFICATION_CHANNEL_MESSAGE_TYPE_HANDSHAKE:
-       {
-               ret = client_handle_message_handshake(client, state);
-               break;
-       }
-       case LTTNG_NOTIFICATION_CHANNEL_MESSAGE_TYPE_SUBSCRIBE:
-       case LTTNG_NOTIFICATION_CHANNEL_MESSAGE_TYPE_UNSUBSCRIBE:
-       {
-               ret = client_handle_message_subscription(client,
-                               client->communication.inbound.msg_type, state);
-               break;
-       }
-       default:
-               abort();
-       }
-end:
-       return ret;
-}
-
-/* Incoming data from client. */
-int handle_notification_thread_client_in(
-               struct notification_thread_state *state, int socket)
-{
-       int ret = 0;
-       struct notification_client *client;
-       ssize_t recv_ret;
-       size_t offset;
-
-       rcu_read_lock();
-       client = get_client_from_socket(socket, state);
-       if (!client) {
-               /* Internal error, abort. */
-               ret = -1;
-               goto end;
-       }
-
-       if (client->communication.inbound.bytes_to_receive == 0 &&
-                       client->communication.inbound.fds_to_receive != 0) {
-               /* Only FDs left to receive. */
-               goto receive_fds;
-       }
-
-       offset = client->communication.inbound.payload.buffer.size -
-                       client->communication.inbound.bytes_to_receive;
-       if (client->communication.inbound.expect_creds) {
-               recv_ret = lttcomm_recv_creds_unix_sock(socket,
-                               client->communication.inbound.payload.buffer.data + offset,
-                               client->communication.inbound.bytes_to_receive,
-                               &client->communication.inbound.creds);
-               if (recv_ret > 0) {
-                       client->communication.inbound.expect_creds = false;
-                       client->communication.inbound.creds_received = true;
-               }
-       } else {
-               recv_ret = lttcomm_recv_unix_sock_non_block(socket,
-                               client->communication.inbound.payload.buffer.data + offset,
-                               client->communication.inbound.bytes_to_receive);
-       }
-       if (recv_ret >= 0) {
-               client->communication.inbound.bytes_to_receive -= recv_ret;
-       } else {
-               goto error_disconnect_client;
-       }
-
-       if (client->communication.inbound.bytes_to_receive != 0) {
-               /* Message incomplete wait for more data. */
-               ret = 0;
-               goto end;
-       }
-
-receive_fds:
-       LTTNG_ASSERT(client->communication.inbound.bytes_to_receive == 0);
-
-       /* Receive fds. */
-       if (client->communication.inbound.fds_to_receive != 0) {
-               ret = lttcomm_recv_payload_fds_unix_sock_non_block(
-                               client->socket,
-                               client->communication.inbound.fds_to_receive,
-                               &client->communication.inbound.payload);
-               if (ret > 0) {
-                       /*
-                        * Fds received. non blocking fds passing is all
-                        * or nothing.
-                        */
-                       ssize_t expected_size;
-
-                       expected_size = sizeof(int) *
-                                       client->communication.inbound
-                                                       .fds_to_receive;
-                       LTTNG_ASSERT(ret == expected_size);
-                       client->communication.inbound.fds_to_receive = 0;
-               } else if (ret == 0) {
-                       /* Received nothing. */
-                       ret = 0;
-                       goto end;
-               } else {
-                       goto error_disconnect_client;
-               }
-       }
-
-       /* At this point the message is complete.*/
-       LTTNG_ASSERT(client->communication.inbound.bytes_to_receive == 0 &&
-                       client->communication.inbound.fds_to_receive == 0);
-       ret = client_dispatch_message(client, state);
-       if (ret) {
-               /*
-                * Only returns an error if this client must be
-                * disconnected.
-                */
-               goto error_disconnect_client;
-       }
-
-end:
-       rcu_read_unlock();
-       return ret;
-
-error_disconnect_client:
-       ret = notification_thread_client_disconnect(client, state);
-       goto end;
-}
-
-/* Client ready to receive outgoing data. */
-int handle_notification_thread_client_out(
-               struct notification_thread_state *state, int socket)
-{
-       int ret;
-       struct notification_client *client;
-       enum client_transmission_status transmission_status;
-
-       rcu_read_lock();
-       client = get_client_from_socket(socket, state);
-       if (!client) {
-               /* Internal error, abort. */
-               ret = -1;
-               goto end;
-       }
-
-       pthread_mutex_lock(&client->lock);
-       transmission_status = client_flush_outgoing_queue(client);
-       pthread_mutex_unlock(&client->lock);
-
-       ret = client_handle_transmission_status(
-                       client, transmission_status, state);
-       if (ret) {
-               goto end;
-       }
-end:
-       rcu_read_unlock();
-       return ret;
-}
-
-static
-bool evaluate_buffer_usage_condition(const struct lttng_condition *condition,
-               const struct channel_state_sample *sample,
-               uint64_t buffer_capacity)
-{
-       bool result = false;
-       uint64_t threshold;
-       enum lttng_condition_type condition_type;
-       const struct lttng_condition_buffer_usage *use_condition = container_of(
-                       condition, struct lttng_condition_buffer_usage,
-                       parent);
-
-       if (use_condition->threshold_bytes.set) {
-               threshold = use_condition->threshold_bytes.value;
-       } else {
-               /*
-                * Threshold was expressed as a ratio.
-                *
-                * TODO the threshold (in bytes) of conditions expressed
-                * as a ratio of total buffer size could be cached to
-                * forego this double-multiplication or it could be performed
-                * as fixed-point math.
-                *
-                * Note that caching should accommodates the case where the
-                * condition applies to multiple channels (i.e. don't assume
-                * that all channels matching my_chann* have the same size...)
-                */
-               threshold = (uint64_t) (use_condition->threshold_ratio.value *
-                               (double) buffer_capacity);
-       }
-
-       condition_type = lttng_condition_get_type(condition);
-       if (condition_type == LTTNG_CONDITION_TYPE_BUFFER_USAGE_LOW) {
-               DBG("Low buffer usage condition being evaluated: threshold = %" PRIu64 ", highest usage = %" PRIu64,
-                               threshold, sample->highest_usage);
-
-               /*
-                * The low condition should only be triggered once _all_ of the
-                * streams in a channel have gone below the "low" threshold.
-                */
-               if (sample->highest_usage <= threshold) {
-                       result = true;
-               }
-       } else {
-               DBG("High buffer usage condition being evaluated: threshold = %" PRIu64 ", highest usage = %" PRIu64,
-                               threshold, sample->highest_usage);
-
-               /*
-                * For high buffer usage scenarios, we want to trigger whenever
-                * _any_ of the streams has reached the "high" threshold.
-                */
-               if (sample->highest_usage >= threshold) {
-                       result = true;
-               }
-       }
-
-       return result;
-}
-
-static
-bool evaluate_session_consumed_size_condition(
-               const struct lttng_condition *condition,
-               uint64_t session_consumed_size)
-{
-       uint64_t threshold;
-       const struct lttng_condition_session_consumed_size *size_condition =
-                       container_of(condition,
-                               struct lttng_condition_session_consumed_size,
-                               parent);
-
-       threshold = size_condition->consumed_threshold_bytes.value;
-       DBG("Session consumed size condition being evaluated: threshold = %" PRIu64 ", current size = %" PRIu64,
-                       threshold, session_consumed_size);
-       return session_consumed_size >= threshold;
-}
-
-static
-int evaluate_buffer_condition(const struct lttng_condition *condition,
-               struct lttng_evaluation **evaluation,
-               const struct notification_thread_state *state,
-               const struct channel_state_sample *previous_sample,
-               const struct channel_state_sample *latest_sample,
-               uint64_t previous_session_consumed_total,
-               uint64_t latest_session_consumed_total,
-               struct channel_info *channel_info)
-{
-       int ret = 0;
-       enum lttng_condition_type condition_type;
-       const bool previous_sample_available = !!previous_sample;
-       bool previous_sample_result = false;
-       bool latest_sample_result;
-
-       condition_type = lttng_condition_get_type(condition);
-
-       switch (condition_type) {
-       case LTTNG_CONDITION_TYPE_BUFFER_USAGE_LOW:
-       case LTTNG_CONDITION_TYPE_BUFFER_USAGE_HIGH:
-               if (caa_likely(previous_sample_available)) {
-                       previous_sample_result =
-                               evaluate_buffer_usage_condition(condition,
-                                       previous_sample, channel_info->capacity);
-               }
-               latest_sample_result = evaluate_buffer_usage_condition(
-                               condition, latest_sample,
-                               channel_info->capacity);
-               break;
-       case LTTNG_CONDITION_TYPE_SESSION_CONSUMED_SIZE:
-               if (caa_likely(previous_sample_available)) {
-                       previous_sample_result =
-                               evaluate_session_consumed_size_condition(
-                                       condition,
-                                       previous_session_consumed_total);
-               }
-               latest_sample_result =
-                               evaluate_session_consumed_size_condition(
-                                       condition,
-                                       latest_session_consumed_total);
-               break;
-       default:
-               /* Unknown condition type; internal error. */
-               abort();
-       }
-
-       if (!latest_sample_result ||
-                       (previous_sample_result == latest_sample_result)) {
-               /*
-                * Only trigger on a condition evaluation transition.
-                *
-                * NOTE: This edge-triggered logic may not be appropriate for
-                * future condition types.
-                */
-               goto end;
-       }
-
-       if (!evaluation || !latest_sample_result) {
-               goto end;
-       }
-
-       switch (condition_type) {
-       case LTTNG_CONDITION_TYPE_BUFFER_USAGE_LOW:
-       case LTTNG_CONDITION_TYPE_BUFFER_USAGE_HIGH:
-               *evaluation = lttng_evaluation_buffer_usage_create(
-                               condition_type,
-                               latest_sample->highest_usage,
-                               channel_info->capacity);
-               break;
-       case LTTNG_CONDITION_TYPE_SESSION_CONSUMED_SIZE:
-               *evaluation = lttng_evaluation_session_consumed_size_create(
-                               latest_session_consumed_total);
-               break;
-       default:
-               abort();
-       }
-
-       if (!*evaluation) {
-               ret = -1;
-               goto end;
-       }
-end:
-       return ret;
-}
-
-static
-int client_notification_overflow(struct notification_client *client)
-{
-       int ret = 0;
-       const struct lttng_notification_channel_message msg = {
-               .type = (int8_t) LTTNG_NOTIFICATION_CHANNEL_MESSAGE_TYPE_NOTIFICATION_DROPPED,
-       };
-
-       ASSERT_LOCKED(client->lock);
-
-       DBG("Dropping notification addressed to client (socket fd = %i)",
-                       client->socket);
-       if (client->communication.outbound.dropped_notification) {
-               /*
-                * The client already has a "notification dropped" message
-                * in its outgoing queue. Nothing to do since all
-                * of those messages are coalesced.
-                */
-               goto end;
-       }
-
-       client->communication.outbound.dropped_notification = true;
-       ret = lttng_dynamic_buffer_append(
-                       &client->communication.outbound.payload.buffer, &msg,
-                       sizeof(msg));
-       if (ret) {
-               PERROR("Failed to enqueue \"dropped notification\" message in client's (socket fd = %i) outgoing queue",
-                               client->socket);
-       }
-end:
-       return ret;
-}
-
-static int client_handle_transmission_status_wrapper(
-               struct notification_client *client,
-               enum client_transmission_status status,
-               void *user_data)
-{
-       return client_handle_transmission_status(client, status,
-                       (struct notification_thread_state *) user_data);
-}
-
-static
-int send_evaluation_to_clients(const struct lttng_trigger *trigger,
-               const struct lttng_evaluation *evaluation,
-               struct notification_client_list* client_list,
-               struct notification_thread_state *state,
-               uid_t object_uid, gid_t object_gid)
-{
-       const struct lttng_credentials creds = {
-               .uid = LTTNG_OPTIONAL_INIT_VALUE(object_uid),
-               .gid = LTTNG_OPTIONAL_INIT_VALUE(object_gid),
-       };
-
-       return notification_client_list_send_evaluation(client_list,
-                       trigger, evaluation,
-                       &creds,
-                       client_handle_transmission_status_wrapper, state);
-}
-
-/*
- * Permission checks relative to notification channel clients are performed
- * here. Notice how object, client, and trigger credentials are involved in
- * this check.
- *
- * The `object` credentials are the credentials associated with the "subject"
- * of a condition. For instance, a `rotation completed` condition applies
- * to a session. When that condition is met, it will produce an evaluation
- * against a session. Hence, in this case, the `object` credentials are the
- * credentials of the "subject" session.
- *
- * The `trigger` credentials are the credentials of the user that registered the
- * trigger.
- *
- * The `client` credentials are the credentials of the user that created a given
- * notification channel.
- *
- * In terms of visibility, it is expected that non-privilieged users can only
- * register triggers against "their" objects (their own sessions and
- * applications they are allowed to interact with). They can then open a
- * notification channel and subscribe to notifications associated with those
- * triggers.
- *
- * As for privilieged users, they can register triggers against the objects of
- * other users. They can then subscribe to the notifications associated to their
- * triggers. Privilieged users _can't_ subscribe to the notifications of
- * triggers owned by other users; they must create their own triggers.
- *
- * This is more a concern of usability than security. It would be difficult for
- * a root user reliably subscribe to a specific set of conditions without
- * interference from external users (those could, for instance, unregister
- * their triggers).
- */
-int notification_client_list_send_evaluation(
-               struct notification_client_list *client_list,
-               const struct lttng_trigger *trigger,
-               const struct lttng_evaluation *evaluation,
-               const struct lttng_credentials *source_object_creds,
-               report_client_transmission_result_cb client_report,
-               void *user_data)
-{
-       int ret = 0;
-       struct lttng_payload msg_payload;
-       struct notification_client_list_element *client_list_element, *tmp;
-       const struct lttng_notification notification = {
-               .trigger = (struct lttng_trigger *) trigger,
-               .evaluation = (struct lttng_evaluation *) evaluation,
-       };
-       struct lttng_notification_channel_message msg_header = {
-               .type = (int8_t) LTTNG_NOTIFICATION_CHANNEL_MESSAGE_TYPE_NOTIFICATION,
-       };
-       const struct lttng_credentials *trigger_creds =
-                       lttng_trigger_get_credentials(trigger);
-
-       lttng_payload_init(&msg_payload);
-
-       ret = lttng_dynamic_buffer_append(&msg_payload.buffer, &msg_header,
-                       sizeof(msg_header));
-       if (ret) {
-               goto end;
-       }
-
-       ret = lttng_notification_serialize(&notification, &msg_payload);
-       if (ret) {
-               ERR("Failed to serialize notification");
-               ret = -1;
-               goto end;
-       }
-
-       /* Update payload size. */
-       ((struct lttng_notification_channel_message *) msg_payload.buffer.data)
-                       ->size = (uint32_t)(
-                       msg_payload.buffer.size - sizeof(msg_header));
-
-       /* Update the payload number of fds. */
-       {
-               const struct lttng_payload_view pv = lttng_payload_view_from_payload(
-                               &msg_payload, 0, -1);
-
-               ((struct lttng_notification_channel_message *)
-                               msg_payload.buffer.data)->fds = (uint32_t)
-                               lttng_payload_view_get_fd_handle_count(&pv);
-       }
-
-       pthread_mutex_lock(&client_list->lock);
-       cds_list_for_each_entry_safe(client_list_element, tmp,
-                       &client_list->clients_list, node) {
-               enum client_transmission_status transmission_status;
-               struct notification_client *client =
-                               client_list_element->client;
-
-               ret = 0;
-               pthread_mutex_lock(&client->lock);
-               if (!client->communication.active) {
-                       /*
-                        * Skip inactive client (protocol error or
-                        * disconnecting).
-                        */
-                       DBG("Skipping client at it is marked as inactive");
-                       goto skip_client;
-               }
-
-               if (lttng_trigger_is_hidden(trigger) && !client->is_sessiond) {
-                       /*
-                        * Notifications resulting from an hidden trigger are
-                        * only sent to the session daemon.
-                        */
-                       DBG("Skipping client as the trigger is hidden and the client is not the session daemon");
-                       goto skip_client;
-               }
-
-               if (source_object_creds) {
-                       if (client->uid != lttng_credentials_get_uid(source_object_creds) &&
-                                       client->gid != lttng_credentials_get_gid(source_object_creds) &&
-                                       client->uid != 0) {
-                               /*
-                                * Client is not allowed to monitor this
-                                * object.
-                                */
-                               DBG("Skipping client at it does not have the object permission to receive notification for this trigger");
-                               goto skip_client;
-                       }
-               }
-
-               if (client->uid != lttng_credentials_get_uid(trigger_creds)) {
-                       DBG("Skipping client at it does not have the permission to receive notification for this trigger");
-                       goto skip_client;
-               }
-
-               DBG("Sending notification to client (fd = %i, %zu bytes)",
-                               client->socket, msg_payload.buffer.size);
-
-               if (client_has_outbound_data_left(client)) {
-                       /*
-                        * Outgoing data is already buffered for this client;
-                        * drop the notification and enqueue a "dropped
-                        * notification" message if this is the first dropped
-                        * notification since the socket spilled-over to the
-                        * queue.
-                        */
-                       ret = client_notification_overflow(client);
-                       if (ret) {
-                               /* Fatal error. */
-                               goto skip_client;
-                       }
-               }
-
-               ret = lttng_payload_copy(&msg_payload, &client->communication.outbound.payload);
-               if (ret) {
-                       /* Fatal error. */
-                       goto skip_client;
-               }
-
-               transmission_status = client_flush_outgoing_queue(client);
-               pthread_mutex_unlock(&client->lock);
-               ret = client_report(client, transmission_status, user_data);
-               if (ret) {
-                       /* Fatal error. */
-                       goto end_unlock_list;
-               }
-
-               continue;
-
-skip_client:
-               pthread_mutex_unlock(&client->lock);
-               if (ret) {
-                       /* Fatal error. */
-                       goto end_unlock_list;
-               }
-       }
-       ret = 0;
-
-end_unlock_list:
-       pthread_mutex_unlock(&client_list->lock);
-end:
-       lttng_payload_reset(&msg_payload);
-       return ret;
-}
-
-static
-struct lttng_event_notifier_notification *recv_one_event_notifier_notification(
-               int notification_pipe_read_fd, enum lttng_domain_type domain)
-{
-       int ret;
-       uint64_t token;
-       struct lttng_event_notifier_notification *notification = NULL;
-       char *capture_buffer = NULL;
-       size_t capture_buffer_size;
-       void *reception_buffer;
-       size_t reception_size;
-
-       struct lttng_ust_abi_event_notifier_notification ust_notification;
-       struct lttng_kernel_abi_event_notifier_notification kernel_notification;
-
-       /* Init lttng_event_notifier_notification */
-       switch(domain) {
-       case LTTNG_DOMAIN_UST:
-               reception_buffer = (void *) &ust_notification;
-               reception_size = sizeof(ust_notification);
-               break;
-       case LTTNG_DOMAIN_KERNEL:
-               reception_buffer = (void *) &kernel_notification;
-               reception_size = sizeof(kernel_notification);
-               break;
-       default:
-               abort();
-       }
-
-       /*
-        * The monitoring pipe only holds messages smaller than PIPE_BUF,
-        * ensuring that read/write of tracer notifications are atomic.
-        */
-       ret = lttng_read(notification_pipe_read_fd, reception_buffer,
-                       reception_size);
-       if (ret != reception_size) {
-               PERROR("Failed to read from event source notification pipe: fd = %d, size to read = %zu, ret = %d",
-                               notification_pipe_read_fd, reception_size, ret);
-               ret = -1;
-               goto end;
-       }
-
-       switch(domain) {
-       case LTTNG_DOMAIN_UST:
-               token = ust_notification.token;
-               capture_buffer_size = ust_notification.capture_buf_size;
-               break;
-       case LTTNG_DOMAIN_KERNEL:
-               token = kernel_notification.token;
-               capture_buffer_size = kernel_notification.capture_buf_size;
-               break;
-       default:
-               abort();
-       }
-
-       if (capture_buffer_size == 0) {
-               capture_buffer = NULL;
-               goto skip_capture;
-       }
-
-       if (capture_buffer_size > MAX_CAPTURE_SIZE) {
-               ERR("Event notifier has a capture payload size which exceeds the maximum allowed size: capture_payload_size = %zu bytes, max allowed size = %d bytes",
-                               capture_buffer_size, MAX_CAPTURE_SIZE);
-               goto end;
-       }
-
-       capture_buffer = zmalloc(capture_buffer_size);
-       if (!capture_buffer) {
-               ERR("Failed to allocate capture buffer");
-               goto end;
-       }
-
-       /* Fetch additional payload (capture). */
-       ret = lttng_read(notification_pipe_read_fd, capture_buffer, capture_buffer_size);
-       if (ret != capture_buffer_size) {
-               ERR("Failed to read from event source pipe (fd = %i)",
-                               notification_pipe_read_fd);
-               goto end;
-       }
-
-skip_capture:
-       notification = lttng_event_notifier_notification_create(token, domain,
-                       capture_buffer, capture_buffer_size);
-       if (notification == NULL) {
-               goto end;
-       }
-
-       /*
-        * Ownership transfered to the lttng_event_notifier_notification object.
-        */
-       capture_buffer = NULL;
-
-end:
-       free(capture_buffer);
-       return notification;
-}
-
-static
-int dispatch_one_event_notifier_notification(struct notification_thread_state *state,
-               struct lttng_event_notifier_notification *notification)
-{
-       struct cds_lfht_node *node;
-       struct cds_lfht_iter iter;
-       struct notification_trigger_tokens_ht_element *element;
-       struct lttng_evaluation *evaluation = NULL;
-       enum action_executor_status executor_status;
-       struct notification_client_list *client_list = NULL;
-       int ret;
-       unsigned int capture_count = 0;
-
-       /* Find triggers associated with this token. */
-       rcu_read_lock();
-       cds_lfht_lookup(state->trigger_tokens_ht,
-                       hash_key_u64(&notification->tracer_token, lttng_ht_seed),
-                       match_trigger_token, &notification->tracer_token, &iter);
-       node = cds_lfht_iter_get_node(&iter);
-       if (caa_unlikely(!node)) {
-               /*
-                * This is not an error, slow consumption of the tracer
-                * notifications can lead to situations where a trigger is
-                * removed but we still get tracer notifications matching a
-                * trigger that no longer exists.
-                */
-               ret = 0;
-               goto end_unlock;
-       }
-
-       element = caa_container_of(node,
-                       struct notification_trigger_tokens_ht_element,
-                       node);
-
-       if (lttng_condition_event_rule_matches_get_capture_descriptor_count(
-                           lttng_trigger_get_const_condition(element->trigger),
-                           &capture_count) != LTTNG_CONDITION_STATUS_OK) {
-               ERR("Failed to get capture count");
-               ret = -1;
-               goto end;
-       }
-
-       if (!notification->capture_buffer && capture_count != 0) {
-               ERR("Expected capture but capture buffer is null");
-               ret = -1;
-               goto end;
-       }
-
-       evaluation = lttng_evaluation_event_rule_matches_create(
-                       container_of(lttng_trigger_get_const_condition(
-                                                    element->trigger),
-                                       struct lttng_condition_event_rule_matches,
-                                       parent),
-                       notification->capture_buffer,
-                       notification->capture_buf_size, false);
-
-       if (evaluation == NULL) {
-               ERR("Failed to create event rule matches evaluation while creating and enqueuing action executor job");
-               ret = -1;
-               goto end_unlock;
-       }
-
-       client_list = get_client_list_from_condition(state,
-                       lttng_trigger_get_const_condition(element->trigger));
-       executor_status = action_executor_enqueue_trigger(state->executor,
-                       element->trigger, evaluation, NULL, client_list);
-       switch (executor_status) {
-       case ACTION_EXECUTOR_STATUS_OK:
-               ret = 0;
-               break;
-       case ACTION_EXECUTOR_STATUS_OVERFLOW:
-       {
-               struct notification_client_list_element *client_list_element,
-                               *tmp;
-
-               /*
-                * Not a fatal error; this is expected and simply means the
-                * executor has too much work queued already.
-                */
-               ret = 0;
-
-               /* No clients subscribed to notifications for this trigger. */
-               if (!client_list) {
-                       break;
-               }
-
-               /* Warn clients that a notification (or more) was dropped. */
-               pthread_mutex_lock(&client_list->lock);
-               cds_list_for_each_entry_safe(client_list_element, tmp,
-                               &client_list->clients_list, node) {
-                       enum client_transmission_status transmission_status;
-                       struct notification_client *client =
-                                       client_list_element->client;
-
-                       pthread_mutex_lock(&client->lock);
-                       ret = client_notification_overflow(client);
-                       if (ret) {
-                               /* Fatal error. */
-                               goto next_client;
-                       }
-
-                       transmission_status =
-                                       client_flush_outgoing_queue(client);
-                       ret = client_handle_transmission_status(
-                                       client, transmission_status, state);
-                       if (ret) {
-                               /* Fatal error. */
-                               goto next_client;
-                       }
-next_client:
-                       pthread_mutex_unlock(&client->lock);
-                       if (ret) {
-                               break;
-                       }
-               }
-
-               pthread_mutex_unlock(&client_list->lock);
-               break;
-       }
-       case ACTION_EXECUTOR_STATUS_INVALID:
-       case ACTION_EXECUTOR_STATUS_ERROR:
-               /* Fatal error, shut down everything. */
-               ERR("Fatal error encoutered while enqueuing action to the action executor");
-               ret = -1;
-               goto end_unlock;
-       default:
-               /* Unhandled error. */
-               abort();
-       }
-
-end_unlock:
-       notification_client_list_put(client_list);
-       rcu_read_unlock();
-end:
-       return ret;
-}
-
-static
-int handle_one_event_notifier_notification(
-               struct notification_thread_state *state,
-               int pipe, enum lttng_domain_type domain)
-{
-       int ret = 0;
-       struct lttng_event_notifier_notification *notification = NULL;
-
-       notification = recv_one_event_notifier_notification(pipe, domain);
-       if (notification == NULL) {
-               /* Reception failed, don't consider it fatal. */
-               ERR("Error receiving an event notifier notification from tracer: fd = %i, domain = %s",
-                               pipe, lttng_domain_type_str(domain));
-               goto end;
-       }
-
-       ret = dispatch_one_event_notifier_notification(state, notification);
-       if (ret) {
-               ERR("Error dispatching an event notifier notification from tracer: fd = %i, domain = %s",
-                               pipe, lttng_domain_type_str(domain));
-               goto end;
-       }
-
-end:
-       lttng_event_notifier_notification_destroy(notification);
-       return ret;
-}
-
-int handle_notification_thread_event_notification(struct notification_thread_state *state,
-               int pipe, enum lttng_domain_type domain)
-{
-       return handle_one_event_notifier_notification(state, pipe, domain);
-}
-
-int handle_notification_thread_channel_sample(
-               struct notification_thread_state *state, int pipe,
-               enum lttng_domain_type domain)
-{
-       int ret = 0;
-       struct lttcomm_consumer_channel_monitor_msg sample_msg;
-       struct channel_info *channel_info;
-       struct cds_lfht_node *node;
-       struct cds_lfht_iter iter;
-       struct lttng_channel_trigger_list *trigger_list;
-       struct lttng_trigger_list_element *trigger_list_element;
-       bool previous_sample_available = false;
-       struct channel_state_sample previous_sample, latest_sample;
-       uint64_t previous_session_consumed_total, latest_session_consumed_total;
-       struct lttng_credentials channel_creds;
-
-       /*
-        * The monitoring pipe only holds messages smaller than PIPE_BUF,
-        * ensuring that read/write of sampling messages are atomic.
-        */
-       ret = lttng_read(pipe, &sample_msg, sizeof(sample_msg));
-       if (ret != sizeof(sample_msg)) {
-               ERR("Failed to read from monitoring pipe (fd = %i)",
-                               pipe);
-               ret = -1;
-               goto end;
-       }
-
-       ret = 0;
-       latest_sample.key.key = sample_msg.key;
-       latest_sample.key.domain = domain;
-       latest_sample.highest_usage = sample_msg.highest;
-       latest_sample.lowest_usage = sample_msg.lowest;
-       latest_sample.channel_total_consumed = sample_msg.total_consumed;
-
-       rcu_read_lock();
-
-       /* Retrieve the channel's informations */
-       cds_lfht_lookup(state->channels_ht,
-                       hash_channel_key(&latest_sample.key),
-                       match_channel_info,
-                       &latest_sample.key,
-                       &iter);
-       node = cds_lfht_iter_get_node(&iter);
-       if (caa_unlikely(!node)) {
-               /*
-                * Not an error since the consumer can push a sample to the pipe
-                * and the rest of the session daemon could notify us of the
-                * channel's destruction before we get a chance to process that
-                * sample.
-                */
-               DBG("Received a sample for an unknown channel from consumerd, key = %" PRIu64 " in %s domain",
-                               latest_sample.key.key,
-                               lttng_domain_type_str(domain));
-               goto end_unlock;
-       }
-       channel_info = caa_container_of(node, struct channel_info,
-                       channels_ht_node);
-       DBG("Handling channel sample for channel %s (key = %" PRIu64 ") in session %s (highest usage = %" PRIu64 ", lowest usage = %" PRIu64", total consumed = %" PRIu64")",
-                       channel_info->name,
-                       latest_sample.key.key,
-                       channel_info->session_info->name,
-                       latest_sample.highest_usage,
-                       latest_sample.lowest_usage,
-                       latest_sample.channel_total_consumed);
-
-       previous_session_consumed_total =
-                       channel_info->session_info->consumed_data_size;
-
-       /* Retrieve the channel's last sample, if it exists, and update it. */
-       cds_lfht_lookup(state->channel_state_ht,
-                       hash_channel_key(&latest_sample.key),
-                       match_channel_state_sample,
-                       &latest_sample.key,
-                       &iter);
-       node = cds_lfht_iter_get_node(&iter);
-       if (caa_likely(node)) {
-               struct channel_state_sample *stored_sample;
-
-               /* Update the sample stored. */
-               stored_sample = caa_container_of(node,
-                               struct channel_state_sample,
-                               channel_state_ht_node);
-
-               memcpy(&previous_sample, stored_sample,
-                               sizeof(previous_sample));
-               stored_sample->highest_usage = latest_sample.highest_usage;
-               stored_sample->lowest_usage = latest_sample.lowest_usage;
-               stored_sample->channel_total_consumed = latest_sample.channel_total_consumed;
-               previous_sample_available = true;
-
-               latest_session_consumed_total =
-                               previous_session_consumed_total +
-                               (latest_sample.channel_total_consumed - previous_sample.channel_total_consumed);
-       } else {
-               /*
-                * This is the channel's first sample, allocate space for and
-                * store the new sample.
-                */
-               struct channel_state_sample *stored_sample;
-
-               stored_sample = zmalloc(sizeof(*stored_sample));
-               if (!stored_sample) {
-                       ret = -1;
-                       goto end_unlock;
-               }
-
-               memcpy(stored_sample, &latest_sample, sizeof(*stored_sample));
-               cds_lfht_node_init(&stored_sample->channel_state_ht_node);
-               cds_lfht_add(state->channel_state_ht,
-                               hash_channel_key(&stored_sample->key),
-                               &stored_sample->channel_state_ht_node);
-
-               latest_session_consumed_total =
-                               previous_session_consumed_total +
-                               latest_sample.channel_total_consumed;
-       }
-
-       channel_info->session_info->consumed_data_size =
-                       latest_session_consumed_total;
-
-       /* Find triggers associated with this channel. */
-       cds_lfht_lookup(state->channel_triggers_ht,
-                       hash_channel_key(&latest_sample.key),
-                       match_channel_trigger_list,
-                       &latest_sample.key,
-                       &iter);
-       node = cds_lfht_iter_get_node(&iter);
-       if (caa_likely(!node)) {
-               goto end_unlock;
-       }
-
-       channel_creds = (typeof(channel_creds)) {
-               .uid = LTTNG_OPTIONAL_INIT_VALUE(channel_info->session_info->uid),
-               .gid = LTTNG_OPTIONAL_INIT_VALUE(channel_info->session_info->gid),
-       };
-
-       trigger_list = caa_container_of(node, struct lttng_channel_trigger_list,
-                       channel_triggers_ht_node);
-       cds_list_for_each_entry(trigger_list_element, &trigger_list->list,
-                       node) {
-               const struct lttng_condition *condition;
-               struct lttng_trigger *trigger;
-               struct notification_client_list *client_list = NULL;
-               struct lttng_evaluation *evaluation = NULL;
-               enum action_executor_status executor_status;
-
-               ret = 0;
-               trigger = trigger_list_element->trigger;
-               condition = lttng_trigger_get_const_condition(trigger);
-               LTTNG_ASSERT(condition);
-
-               /*
-                * Check if any client is subscribed to the result of this
-                * evaluation.
-                */
-               client_list = get_client_list_from_condition(state, condition);
-
-               ret = evaluate_buffer_condition(condition, &evaluation, state,
-                               previous_sample_available ? &previous_sample : NULL,
-                               &latest_sample,
-                               previous_session_consumed_total,
-                               latest_session_consumed_total,
-                               channel_info);
-               if (caa_unlikely(ret)) {
-                       goto put_list;
-               }
-
-               if (caa_likely(!evaluation)) {
-                       goto put_list;
-               }
-
-               /*
-                * Ownership of `evaluation` transferred to the action executor
-                * no matter the result.
-                */
-               executor_status = action_executor_enqueue_trigger(
-                               state->executor, trigger, evaluation,
-                               &channel_creds, client_list);
-               evaluation = NULL;
-               switch (executor_status) {
-               case ACTION_EXECUTOR_STATUS_OK:
-                       break;
-               case ACTION_EXECUTOR_STATUS_ERROR:
-               case ACTION_EXECUTOR_STATUS_INVALID:
-                       /*
-                        * TODO Add trigger identification (name/id) when
-                        * it is added to the API.
-                        */
-                       ERR("Fatal error occurred while enqueuing action associated with buffer-condition trigger");
-                       ret = -1;
-                       goto put_list;
-               case ACTION_EXECUTOR_STATUS_OVERFLOW:
-                       /*
-                        * TODO Add trigger identification (name/id) when
-                        * it is added to the API.
-                        *
-                        * Not a fatal error.
-                        */
-                       WARN("No space left when enqueuing action associated with buffer-condition trigger");
-                       ret = 0;
-                       goto put_list;
-               default:
-                       abort();
-               }
-
-put_list:
-               notification_client_list_put(client_list);
-               if (caa_unlikely(ret)) {
-                       break;
-               }
-       }
-end_unlock:
-       rcu_read_unlock();
-end:
-       return ret;
-}
diff --git a/src/bin/lttng-sessiond/notification-thread-events.cpp b/src/bin/lttng-sessiond/notification-thread-events.cpp
new file mode 100644 (file)
index 0000000..6f449fc
--- /dev/null
@@ -0,0 +1,4983 @@
+/*
+ * Copyright (C) 2017 Jérémie Galarneau <jeremie.galarneau@efficios.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0-only
+ *
+ */
+
+#include "lttng/action/action.h"
+#include "lttng/trigger/trigger-internal.h"
+#define _LGPL_SOURCE
+#include <urcu.h>
+#include <urcu/rculfhash.h>
+
+#include <common/defaults.h>
+#include <common/error.h>
+#include <common/futex.h>
+#include <common/unix.h>
+#include <common/dynamic-buffer.h>
+#include <common/hashtable/utils.h>
+#include <common/sessiond-comm/sessiond-comm.h>
+#include <common/macros.h>
+#include <lttng/condition/condition.h>
+#include <lttng/action/action-internal.h>
+#include <lttng/action/list-internal.h>
+#include <lttng/domain-internal.h>
+#include <lttng/notification/notification-internal.h>
+#include <lttng/condition/condition-internal.h>
+#include <lttng/condition/buffer-usage-internal.h>
+#include <lttng/condition/session-consumed-size-internal.h>
+#include <lttng/condition/session-rotation-internal.h>
+#include <lttng/condition/event-rule-matches-internal.h>
+#include <lttng/domain-internal.h>
+#include <lttng/notification/channel-internal.h>
+#include <lttng/trigger/trigger-internal.h>
+#include <lttng/event-rule/event-rule-internal.h>
+
+#include <time.h>
+#include <unistd.h>
+#include <inttypes.h>
+#include <fcntl.h>
+
+#include "condition-internal.h"
+#include "event-notifier-error-accounting.h"
+#include "notification-thread.h"
+#include "notification-thread-events.h"
+#include "notification-thread-commands.h"
+#include "lttng-sessiond.h"
+#include "kernel.h"
+
+#define CLIENT_POLL_MASK_IN (LPOLLIN | LPOLLERR | LPOLLHUP | LPOLLRDHUP)
+#define CLIENT_POLL_MASK_IN_OUT (CLIENT_POLL_MASK_IN | LPOLLOUT)
+
+/* The tracers currently limit the capture size to PIPE_BUF (4kb on linux). */
+#define MAX_CAPTURE_SIZE (PIPE_BUF)
+
+enum lttng_object_type {
+       LTTNG_OBJECT_TYPE_UNKNOWN,
+       LTTNG_OBJECT_TYPE_NONE,
+       LTTNG_OBJECT_TYPE_CHANNEL,
+       LTTNG_OBJECT_TYPE_SESSION,
+};
+
+struct lttng_trigger_list_element {
+       /* No ownership of the trigger object is assumed. */
+       struct lttng_trigger *trigger;
+       struct cds_list_head node;
+};
+
+struct lttng_channel_trigger_list {
+       struct channel_key channel_key;
+       /* List of struct lttng_trigger_list_element. */
+       struct cds_list_head list;
+       /* Node in the channel_triggers_ht */
+       struct cds_lfht_node channel_triggers_ht_node;
+       /* call_rcu delayed reclaim. */
+       struct rcu_head rcu_node;
+};
+
+/*
+ * List of triggers applying to a given session.
+ *
+ * See:
+ *   - lttng_session_trigger_list_create()
+ *   - lttng_session_trigger_list_build()
+ *   - lttng_session_trigger_list_destroy()
+ *   - lttng_session_trigger_list_add()
+ */
+struct lttng_session_trigger_list {
+       /*
+        * Not owned by this; points to the session_info structure's
+        * session name.
+        */
+       const char *session_name;
+       /* List of struct lttng_trigger_list_element. */
+       struct cds_list_head list;
+       /* Node in the session_triggers_ht */
+       struct cds_lfht_node session_triggers_ht_node;
+       /*
+        * Weak reference to the notification system's session triggers
+        * hashtable.
+        *
+        * The session trigger list structure structure is owned by
+        * the session's session_info.
+        *
+        * The session_info is kept alive the the channel_infos holding a
+        * reference to it (reference counting). When those channels are
+        * destroyed (at runtime or on teardown), the reference they hold
+        * to the session_info are released. On destruction of session_info,
+        * session_info_destroy() will remove the list of triggers applying
+        * to this session from the notification system's state.
+        *
+        * This implies that the session_triggers_ht must be destroyed
+        * after the channels.
+        */
+       struct cds_lfht *session_triggers_ht;
+       /* Used for delayed RCU reclaim. */
+       struct rcu_head rcu_node;
+};
+
+struct lttng_trigger_ht_element {
+       struct lttng_trigger *trigger;
+       struct cds_lfht_node node;
+       struct cds_lfht_node node_by_name_uid;
+       struct cds_list_head client_list_trigger_node;
+       /* call_rcu delayed reclaim. */
+       struct rcu_head rcu_node;
+};
+
+struct lttng_condition_list_element {
+       struct lttng_condition *condition;
+       struct cds_list_head node;
+};
+
+struct channel_state_sample {
+       struct channel_key key;
+       struct cds_lfht_node channel_state_ht_node;
+       uint64_t highest_usage;
+       uint64_t lowest_usage;
+       uint64_t channel_total_consumed;
+       /* call_rcu delayed reclaim. */
+       struct rcu_head rcu_node;
+};
+
+static unsigned long hash_channel_key(struct channel_key *key);
+static int evaluate_buffer_condition(const struct lttng_condition *condition,
+               struct lttng_evaluation **evaluation,
+               const struct notification_thread_state *state,
+               const struct channel_state_sample *previous_sample,
+               const struct channel_state_sample *latest_sample,
+               uint64_t previous_session_consumed_total,
+               uint64_t latest_session_consumed_total,
+               struct channel_info *channel_info);
+static
+int send_evaluation_to_clients(const struct lttng_trigger *trigger,
+               const struct lttng_evaluation *evaluation,
+               struct notification_client_list *client_list,
+               struct notification_thread_state *state,
+               uid_t channel_uid, gid_t channel_gid);
+
+
+/* session_info API */
+static
+void session_info_destroy(void *_data);
+static
+void session_info_get(struct session_info *session_info);
+static
+void session_info_put(struct session_info *session_info);
+static
+struct session_info *session_info_create(const char *name,
+               uid_t uid, gid_t gid,
+               struct lttng_session_trigger_list *trigger_list,
+               struct cds_lfht *sessions_ht);
+static
+void session_info_add_channel(struct session_info *session_info,
+               struct channel_info *channel_info);
+static
+void session_info_remove_channel(struct session_info *session_info,
+               struct channel_info *channel_info);
+
+/* lttng_session_trigger_list API */
+static
+struct lttng_session_trigger_list *lttng_session_trigger_list_create(
+               const char *session_name,
+               struct cds_lfht *session_triggers_ht);
+static
+struct lttng_session_trigger_list *lttng_session_trigger_list_build(
+               const struct notification_thread_state *state,
+               const char *session_name);
+static
+void lttng_session_trigger_list_destroy(
+               struct lttng_session_trigger_list *list);
+static
+int lttng_session_trigger_list_add(struct lttng_session_trigger_list *list,
+               struct lttng_trigger *trigger);
+
+static
+int client_handle_transmission_status(
+               struct notification_client *client,
+               enum client_transmission_status transmission_status,
+               struct notification_thread_state *state);
+
+static
+int handle_one_event_notifier_notification(
+               struct notification_thread_state *state,
+               int pipe, enum lttng_domain_type domain);
+
+static
+void free_lttng_trigger_ht_element_rcu(struct rcu_head *node);
+
+static
+int match_client_socket(struct cds_lfht_node *node, const void *key)
+{
+       /* This double-cast is intended to supress pointer-to-cast warning. */
+       const int socket = (int) (intptr_t) key;
+       const struct notification_client *client = caa_container_of(node,
+                       struct notification_client, client_socket_ht_node);
+
+       return client->socket == socket;
+}
+
+static
+int match_client_id(struct cds_lfht_node *node, const void *key)
+{
+       /* This double-cast is intended to supress pointer-to-cast warning. */
+       const notification_client_id id = *((notification_client_id *) key);
+       const struct notification_client *client = caa_container_of(
+                       node, struct notification_client, client_id_ht_node);
+
+       return client->id == id;
+}
+
+static
+int match_channel_trigger_list(struct cds_lfht_node *node, const void *key)
+{
+       struct channel_key *channel_key = (struct channel_key *) key;
+       struct lttng_channel_trigger_list *trigger_list;
+
+       trigger_list = caa_container_of(node, struct lttng_channel_trigger_list,
+                       channel_triggers_ht_node);
+
+       return !!((channel_key->key == trigger_list->channel_key.key) &&
+                       (channel_key->domain == trigger_list->channel_key.domain));
+}
+
+static
+int match_session_trigger_list(struct cds_lfht_node *node, const void *key)
+{
+       const char *session_name = (const char *) key;
+       struct lttng_session_trigger_list *trigger_list;
+
+       trigger_list = caa_container_of(node, struct lttng_session_trigger_list,
+                       session_triggers_ht_node);
+
+       return !!(strcmp(trigger_list->session_name, session_name) == 0);
+}
+
+static
+int match_channel_state_sample(struct cds_lfht_node *node, const void *key)
+{
+       struct channel_key *channel_key = (struct channel_key *) key;
+       struct channel_state_sample *sample;
+
+       sample = caa_container_of(node, struct channel_state_sample,
+                       channel_state_ht_node);
+
+       return !!((channel_key->key == sample->key.key) &&
+                       (channel_key->domain == sample->key.domain));
+}
+
+static
+int match_channel_info(struct cds_lfht_node *node, const void *key)
+{
+       struct channel_key *channel_key = (struct channel_key *) key;
+       struct channel_info *channel_info;
+
+       channel_info = caa_container_of(node, struct channel_info,
+                       channels_ht_node);
+
+       return !!((channel_key->key == channel_info->key.key) &&
+                       (channel_key->domain == channel_info->key.domain));
+}
+
+static
+int match_trigger(struct cds_lfht_node *node, const void *key)
+{
+       struct lttng_trigger *trigger_key = (struct lttng_trigger *) key;
+       struct lttng_trigger_ht_element *trigger_ht_element;
+
+       trigger_ht_element = caa_container_of(node, struct lttng_trigger_ht_element,
+                       node);
+
+       return !!lttng_trigger_is_equal(trigger_key, trigger_ht_element->trigger);
+}
+
+static
+int match_trigger_token(struct cds_lfht_node *node, const void *key)
+{
+       const uint64_t *_key = (uint64_t *) key;
+       struct notification_trigger_tokens_ht_element *element;
+
+       element = caa_container_of(node,
+                       struct notification_trigger_tokens_ht_element, node);
+       return *_key == element->token;
+}
+
+static
+int match_client_list_condition(struct cds_lfht_node *node, const void *key)
+{
+       struct lttng_condition *condition_key = (struct lttng_condition *) key;
+       struct notification_client_list *client_list;
+       const struct lttng_condition *condition;
+
+       LTTNG_ASSERT(condition_key);
+
+       client_list = caa_container_of(node, struct notification_client_list,
+                       notification_trigger_clients_ht_node);
+       condition = client_list->condition;
+
+       return !!lttng_condition_is_equal(condition_key, condition);
+}
+
+static
+int match_session(struct cds_lfht_node *node, const void *key)
+{
+       const char *name = (const char *) key;
+       struct session_info *session_info = caa_container_of(
+               node, struct session_info, sessions_ht_node);
+
+       return !strcmp(session_info->name, name);
+}
+
+static
+const char *notification_command_type_str(
+               enum notification_thread_command_type type)
+{
+       switch (type) {
+       case NOTIFICATION_COMMAND_TYPE_REGISTER_TRIGGER:
+               return "REGISTER_TRIGGER";
+       case NOTIFICATION_COMMAND_TYPE_UNREGISTER_TRIGGER:
+               return "UNREGISTER_TRIGGER";
+       case NOTIFICATION_COMMAND_TYPE_ADD_CHANNEL:
+               return "ADD_CHANNEL";
+       case NOTIFICATION_COMMAND_TYPE_REMOVE_CHANNEL:
+               return "REMOVE_CHANNEL";
+       case NOTIFICATION_COMMAND_TYPE_SESSION_ROTATION_ONGOING:
+               return "SESSION_ROTATION_ONGOING";
+       case NOTIFICATION_COMMAND_TYPE_SESSION_ROTATION_COMPLETED:
+               return "SESSION_ROTATION_COMPLETED";
+       case NOTIFICATION_COMMAND_TYPE_ADD_TRACER_EVENT_SOURCE:
+               return "ADD_TRACER_EVENT_SOURCE";
+       case NOTIFICATION_COMMAND_TYPE_REMOVE_TRACER_EVENT_SOURCE:
+               return "REMOVE_TRACER_EVENT_SOURCE";
+       case NOTIFICATION_COMMAND_TYPE_LIST_TRIGGERS:
+               return "LIST_TRIGGERS";
+       case NOTIFICATION_COMMAND_TYPE_GET_TRIGGER:
+               return "GET_TRIGGER";
+       case NOTIFICATION_COMMAND_TYPE_QUIT:
+               return "QUIT";
+       case NOTIFICATION_COMMAND_TYPE_CLIENT_COMMUNICATION_UPDATE:
+               return "CLIENT_COMMUNICATION_UPDATE";
+       default:
+               abort();
+       }
+}
+
+/*
+ * Match trigger based on name and credentials only.
+ * Name duplication is NOT allowed for the same uid.
+ */
+static
+int match_trigger_by_name_uid(struct cds_lfht_node *node,
+               const void *key)
+{
+       bool match = false;
+       const char *element_trigger_name;
+       const char *key_name;
+       enum lttng_trigger_status status;
+       const struct lttng_credentials *key_creds;
+       const struct lttng_credentials *node_creds;
+       const struct lttng_trigger *trigger_key =
+                       (const struct lttng_trigger *) key;
+       const struct lttng_trigger_ht_element *trigger_ht_element =
+                       caa_container_of(node,
+                               struct lttng_trigger_ht_element,
+                               node_by_name_uid);
+
+       status = lttng_trigger_get_name(trigger_ht_element->trigger,
+                       &element_trigger_name);
+       element_trigger_name = status == LTTNG_TRIGGER_STATUS_OK ?
+                       element_trigger_name : NULL;
+
+       status = lttng_trigger_get_name(trigger_key, &key_name);
+       key_name = status == LTTNG_TRIGGER_STATUS_OK ? key_name : NULL;
+
+       /*
+        * Compare the names.
+        * Consider null names as not equal. This is to maintain backwards
+        * compatibility with pre-2.13 anonymous triggers. Multiples anonymous
+        * triggers are allowed for a given user.
+        */
+       if (!element_trigger_name || !key_name) {
+               goto end;
+       }
+
+       if (strcmp(element_trigger_name, key_name) != 0) {
+               goto end;
+       }
+
+       /* Compare the owners' UIDs. */
+       key_creds = lttng_trigger_get_credentials(trigger_key);
+       node_creds = lttng_trigger_get_credentials(trigger_ht_element->trigger);
+
+       match = lttng_credentials_is_equal_uid(key_creds, node_creds);
+
+end:
+       return match;
+}
+
+/*
+ * Hash trigger based on name and credentials only.
+ */
+static
+unsigned long hash_trigger_by_name_uid(const struct lttng_trigger *trigger)
+{
+       unsigned long hash = 0;
+       const struct lttng_credentials *trigger_creds;
+       const char *trigger_name;
+       enum lttng_trigger_status status;
+
+       status = lttng_trigger_get_name(trigger, &trigger_name);
+       if (status == LTTNG_TRIGGER_STATUS_OK) {
+               hash = hash_key_str(trigger_name, lttng_ht_seed);
+       }
+
+       trigger_creds = lttng_trigger_get_credentials(trigger);
+       hash ^= hash_key_ulong((void *) (unsigned long) LTTNG_OPTIONAL_GET(trigger_creds->uid),
+                       lttng_ht_seed);
+
+       return hash;
+}
+
+static
+unsigned long hash_channel_key(struct channel_key *key)
+{
+       unsigned long key_hash = hash_key_u64(&key->key, lttng_ht_seed);
+       unsigned long domain_hash = hash_key_ulong(
+               (void *) (unsigned long) key->domain, lttng_ht_seed);
+
+       return key_hash ^ domain_hash;
+}
+
+static
+unsigned long hash_client_socket(int socket)
+{
+       return hash_key_ulong((void *) (unsigned long) socket, lttng_ht_seed);
+}
+
+static
+unsigned long hash_client_id(notification_client_id id)
+{
+       return hash_key_u64(&id, lttng_ht_seed);
+}
+
+/*
+ * Get the type of object to which a given condition applies. Bindings let
+ * the notification system evaluate a trigger's condition when a given
+ * object's state is updated.
+ *
+ * For instance, a condition bound to a channel will be evaluated everytime
+ * the channel's state is changed by a channel monitoring sample.
+ */
+static
+enum lttng_object_type get_condition_binding_object(
+               const struct lttng_condition *condition)
+{
+       switch (lttng_condition_get_type(condition)) {
+       case LTTNG_CONDITION_TYPE_BUFFER_USAGE_LOW:
+       case LTTNG_CONDITION_TYPE_BUFFER_USAGE_HIGH:
+       case LTTNG_CONDITION_TYPE_SESSION_CONSUMED_SIZE:
+               return LTTNG_OBJECT_TYPE_CHANNEL;
+       case LTTNG_CONDITION_TYPE_SESSION_ROTATION_ONGOING:
+       case LTTNG_CONDITION_TYPE_SESSION_ROTATION_COMPLETED:
+               return LTTNG_OBJECT_TYPE_SESSION;
+       case LTTNG_CONDITION_TYPE_EVENT_RULE_MATCHES:
+               return LTTNG_OBJECT_TYPE_NONE;
+       default:
+               return LTTNG_OBJECT_TYPE_UNKNOWN;
+       }
+}
+
+static
+void free_channel_info_rcu(struct rcu_head *node)
+{
+       free(caa_container_of(node, struct channel_info, rcu_node));
+}
+
+static
+void channel_info_destroy(struct channel_info *channel_info)
+{
+       if (!channel_info) {
+               return;
+       }
+
+       if (channel_info->session_info) {
+               session_info_remove_channel(channel_info->session_info,
+                               channel_info);
+               session_info_put(channel_info->session_info);
+       }
+       if (channel_info->name) {
+               free(channel_info->name);
+       }
+       call_rcu(&channel_info->rcu_node, free_channel_info_rcu);
+}
+
+static
+void free_session_info_rcu(struct rcu_head *node)
+{
+       free(caa_container_of(node, struct session_info, rcu_node));
+}
+
+/* Don't call directly, use the ref-counting mechanism. */
+static
+void session_info_destroy(void *_data)
+{
+       struct session_info *session_info = (struct session_info *) _data;
+       int ret;
+
+       LTTNG_ASSERT(session_info);
+       if (session_info->channel_infos_ht) {
+               ret = cds_lfht_destroy(session_info->channel_infos_ht, NULL);
+               if (ret) {
+                       ERR("Failed to destroy channel information hash table");
+               }
+       }
+       lttng_session_trigger_list_destroy(session_info->trigger_list);
+
+       rcu_read_lock();
+       cds_lfht_del(session_info->sessions_ht,
+                       &session_info->sessions_ht_node);
+       rcu_read_unlock();
+       free(session_info->name);
+       call_rcu(&session_info->rcu_node, free_session_info_rcu);
+}
+
+static
+void session_info_get(struct session_info *session_info)
+{
+       if (!session_info) {
+               return;
+       }
+       lttng_ref_get(&session_info->ref);
+}
+
+static
+void session_info_put(struct session_info *session_info)
+{
+       if (!session_info) {
+               return;
+       }
+       lttng_ref_put(&session_info->ref);
+}
+
+static
+struct session_info *session_info_create(const char *name, uid_t uid, gid_t gid,
+               struct lttng_session_trigger_list *trigger_list,
+               struct cds_lfht *sessions_ht)
+{
+       struct session_info *session_info;
+
+       LTTNG_ASSERT(name);
+
+       session_info = (struct session_info *) zmalloc(sizeof(*session_info));
+       if (!session_info) {
+               goto end;
+       }
+       lttng_ref_init(&session_info->ref, session_info_destroy);
+
+       session_info->channel_infos_ht = cds_lfht_new(DEFAULT_HT_SIZE,
+                       1, 0, CDS_LFHT_AUTO_RESIZE | CDS_LFHT_ACCOUNTING, NULL);
+       if (!session_info->channel_infos_ht) {
+               goto error;
+       }
+
+       cds_lfht_node_init(&session_info->sessions_ht_node);
+       session_info->name = strdup(name);
+       if (!session_info->name) {
+               goto error;
+       }
+       session_info->uid = uid;
+       session_info->gid = gid;
+       session_info->trigger_list = trigger_list;
+       session_info->sessions_ht = sessions_ht;
+end:
+       return session_info;
+error:
+       session_info_put(session_info);
+       return NULL;
+}
+
+static
+void session_info_add_channel(struct session_info *session_info,
+               struct channel_info *channel_info)
+{
+       rcu_read_lock();
+       cds_lfht_add(session_info->channel_infos_ht,
+                       hash_channel_key(&channel_info->key),
+                       &channel_info->session_info_channels_ht_node);
+       rcu_read_unlock();
+}
+
+static
+void session_info_remove_channel(struct session_info *session_info,
+               struct channel_info *channel_info)
+{
+       rcu_read_lock();
+       cds_lfht_del(session_info->channel_infos_ht,
+                       &channel_info->session_info_channels_ht_node);
+       rcu_read_unlock();
+}
+
+static
+struct channel_info *channel_info_create(const char *channel_name,
+               struct channel_key *channel_key, uint64_t channel_capacity,
+               struct session_info *session_info)
+{
+       struct channel_info *channel_info = (struct channel_info *) zmalloc(sizeof(*channel_info));
+
+       if (!channel_info) {
+               goto end;
+       }
+
+       cds_lfht_node_init(&channel_info->channels_ht_node);
+       cds_lfht_node_init(&channel_info->session_info_channels_ht_node);
+       memcpy(&channel_info->key, channel_key, sizeof(*channel_key));
+       channel_info->capacity = channel_capacity;
+
+       channel_info->name = strdup(channel_name);
+       if (!channel_info->name) {
+               goto error;
+       }
+
+       /*
+        * Set the references between session and channel infos:
+        *   - channel_info holds a strong reference to session_info
+        *   - session_info holds a weak reference to channel_info
+        */
+       session_info_get(session_info);
+       session_info_add_channel(session_info, channel_info);
+       channel_info->session_info = session_info;
+end:
+       return channel_info;
+error:
+       channel_info_destroy(channel_info);
+       return NULL;
+}
+
+bool notification_client_list_get(struct notification_client_list *list)
+{
+       return urcu_ref_get_unless_zero(&list->ref);
+}
+
+static
+void free_notification_client_list_rcu(struct rcu_head *node)
+{
+       free(caa_container_of(node, struct notification_client_list,
+                       rcu_node));
+}
+
+static
+void notification_client_list_release(struct urcu_ref *list_ref)
+{
+       struct notification_client_list *list =
+                       container_of(list_ref, typeof(*list), ref);
+       struct notification_client_list_element *client_list_element, *tmp;
+
+       lttng_condition_put(list->condition);
+
+       if (list->notification_trigger_clients_ht) {
+               rcu_read_lock();
+
+               cds_lfht_del(list->notification_trigger_clients_ht,
+                               &list->notification_trigger_clients_ht_node);
+               rcu_read_unlock();
+               list->notification_trigger_clients_ht = NULL;
+       }
+       cds_list_for_each_entry_safe(client_list_element, tmp,
+                                    &list->clients_list, node) {
+               free(client_list_element);
+       }
+
+       LTTNG_ASSERT(cds_list_empty(&list->triggers_list));
+
+       pthread_mutex_destroy(&list->lock);
+       call_rcu(&list->rcu_node, free_notification_client_list_rcu);
+}
+
+static
+bool condition_applies_to_client(const struct lttng_condition *condition,
+               struct notification_client *client)
+{
+       bool applies = false;
+       struct lttng_condition_list_element *condition_list_element;
+
+       cds_list_for_each_entry(condition_list_element, &client->condition_list,
+                       node) {
+               applies = lttng_condition_is_equal(
+                               condition_list_element->condition,
+                               condition);
+               if (applies) {
+                       break;
+               }
+       }
+
+       return applies;
+}
+
+static
+struct notification_client_list *notification_client_list_create(
+               struct notification_thread_state *state,
+               const struct lttng_condition *condition)
+{
+       struct notification_client *client;
+       struct cds_lfht_iter iter;
+       struct notification_client_list *client_list;
+
+       client_list = (notification_client_list *) zmalloc(sizeof(*client_list));
+       if (!client_list) {
+               PERROR("Failed to allocate notification client list");
+               goto end;
+       }
+
+       pthread_mutex_init(&client_list->lock, NULL);
+       /*
+        * The trigger that owns the condition has the first reference to this
+        * client list.
+        */
+       urcu_ref_init(&client_list->ref);
+       cds_lfht_node_init(&client_list->notification_trigger_clients_ht_node);
+       CDS_INIT_LIST_HEAD(&client_list->clients_list);
+       CDS_INIT_LIST_HEAD(&client_list->triggers_list);
+
+       /*
+        * Create a copy of the condition so that it's independent of any
+        * trigger. The client list may outlive the trigger object (which owns
+        * the condition) that is used to create it.
+        */
+       client_list->condition = lttng_condition_copy(condition);
+
+       /* Build a list of clients to which this new condition applies. */
+       cds_lfht_for_each_entry (state->client_socket_ht, &iter, client,
+                       client_socket_ht_node) {
+               struct notification_client_list_element *client_list_element;
+
+               if (!condition_applies_to_client(condition, client)) {
+                       continue;
+               }
+
+               client_list_element = (notification_client_list_element *) zmalloc(sizeof(*client_list_element));
+               if (!client_list_element) {
+                       goto error_put_client_list;
+               }
+
+               CDS_INIT_LIST_HEAD(&client_list_element->node);
+               client_list_element->client = client;
+               cds_list_add(&client_list_element->node, &client_list->clients_list);
+       }
+
+       client_list->notification_trigger_clients_ht =
+                       state->notification_trigger_clients_ht;
+
+       rcu_read_lock();
+       /*
+        * Add the client list to the global list of client list.
+        */
+       cds_lfht_add_unique(state->notification_trigger_clients_ht,
+                       lttng_condition_hash(client_list->condition),
+                       match_client_list_condition,
+                       client_list->condition,
+                       &client_list->notification_trigger_clients_ht_node);
+       rcu_read_unlock();
+       goto end;
+
+error_put_client_list:
+       notification_client_list_put(client_list);
+       client_list = NULL;
+
+end:
+       return client_list;
+}
+
+void notification_client_list_put(struct notification_client_list *list)
+{
+       if (!list) {
+               return;
+       }
+       return urcu_ref_put(&list->ref, notification_client_list_release);
+}
+
+/* Provides a reference to the returned list. */
+static
+struct notification_client_list *get_client_list_from_condition(
+       struct notification_thread_state *state,
+       const struct lttng_condition *condition)
+{
+       struct cds_lfht_node *node;
+       struct cds_lfht_iter iter;
+       struct notification_client_list *list = NULL;
+
+       rcu_read_lock();
+       cds_lfht_lookup(state->notification_trigger_clients_ht,
+                       lttng_condition_hash(condition),
+                       match_client_list_condition,
+                       condition,
+                       &iter);
+       node = cds_lfht_iter_get_node(&iter);
+       if (node) {
+               list = container_of(node, struct notification_client_list,
+                               notification_trigger_clients_ht_node);
+               list = notification_client_list_get(list) ? list : NULL;
+       }
+
+       rcu_read_unlock();
+       return list;
+}
+
+static
+int evaluate_channel_condition_for_client(
+               const struct lttng_condition *condition,
+               struct notification_thread_state *state,
+               struct lttng_evaluation **evaluation,
+               uid_t *session_uid, gid_t *session_gid)
+{
+       int ret;
+       struct cds_lfht_iter iter;
+       struct cds_lfht_node *node;
+       struct channel_info *channel_info = NULL;
+       struct channel_key *channel_key = NULL;
+       struct channel_state_sample *last_sample = NULL;
+       struct lttng_channel_trigger_list *channel_trigger_list = NULL;
+
+       rcu_read_lock();
+
+       /* Find the channel associated with the condition. */
+       cds_lfht_for_each_entry(state->channel_triggers_ht, &iter,
+                       channel_trigger_list, channel_triggers_ht_node) {
+               struct lttng_trigger_list_element *element;
+
+               cds_list_for_each_entry(element, &channel_trigger_list->list, node) {
+                       const struct lttng_condition *current_condition =
+                                       lttng_trigger_get_const_condition(
+                                               element->trigger);
+
+                       LTTNG_ASSERT(current_condition);
+                       if (!lttng_condition_is_equal(condition,
+                                       current_condition)) {
+                               continue;
+                       }
+
+                       /* Found the trigger, save the channel key. */
+                       channel_key = &channel_trigger_list->channel_key;
+                       break;
+               }
+               if (channel_key) {
+                       /* The channel key was found stop iteration. */
+                       break;
+               }
+       }
+
+       if (!channel_key){
+               /* No channel found; normal exit. */
+               DBG("No known channel associated with newly subscribed-to condition");
+               ret = 0;
+               goto end;
+       }
+
+       /* Fetch channel info for the matching channel. */
+       cds_lfht_lookup(state->channels_ht,
+                       hash_channel_key(channel_key),
+                       match_channel_info,
+                       channel_key,
+                       &iter);
+       node = cds_lfht_iter_get_node(&iter);
+       LTTNG_ASSERT(node);
+       channel_info = caa_container_of(node, struct channel_info,
+                       channels_ht_node);
+
+       /* Retrieve the channel's last sample, if it exists. */
+       cds_lfht_lookup(state->channel_state_ht,
+                       hash_channel_key(channel_key),
+                       match_channel_state_sample,
+                       channel_key,
+                       &iter);
+       node = cds_lfht_iter_get_node(&iter);
+       if (node) {
+               last_sample = caa_container_of(node,
+                               struct channel_state_sample,
+                               channel_state_ht_node);
+       } else {
+               /* Nothing to evaluate, no sample was ever taken. Normal exit */
+               DBG("No channel sample associated with newly subscribed-to condition");
+               ret = 0;
+               goto end;
+       }
+
+       ret = evaluate_buffer_condition(condition, evaluation, state,
+                       NULL, last_sample,
+                       0, channel_info->session_info->consumed_data_size,
+                       channel_info);
+       if (ret) {
+               WARN("Fatal error occurred while evaluating a newly subscribed-to condition");
+               goto end;
+       }
+
+       *session_uid = channel_info->session_info->uid;
+       *session_gid = channel_info->session_info->gid;
+end:
+       rcu_read_unlock();
+       return ret;
+}
+
+static
+const char *get_condition_session_name(const struct lttng_condition *condition)
+{
+       const char *session_name = NULL;
+       enum lttng_condition_status status;
+
+       switch (lttng_condition_get_type(condition)) {
+       case LTTNG_CONDITION_TYPE_BUFFER_USAGE_LOW:
+       case LTTNG_CONDITION_TYPE_BUFFER_USAGE_HIGH:
+               status = lttng_condition_buffer_usage_get_session_name(
+                               condition, &session_name);
+               break;
+       case LTTNG_CONDITION_TYPE_SESSION_CONSUMED_SIZE:
+               status = lttng_condition_session_consumed_size_get_session_name(
+                               condition, &session_name);
+               break;
+       case LTTNG_CONDITION_TYPE_SESSION_ROTATION_ONGOING:
+       case LTTNG_CONDITION_TYPE_SESSION_ROTATION_COMPLETED:
+               status = lttng_condition_session_rotation_get_session_name(
+                               condition, &session_name);
+               break;
+       default:
+               abort();
+       }
+       if (status != LTTNG_CONDITION_STATUS_OK) {
+               ERR("Failed to retrieve session rotation condition's session name");
+               goto end;
+       }
+end:
+       return session_name;
+}
+
+static
+int evaluate_session_condition_for_client(
+               const struct lttng_condition *condition,
+               struct notification_thread_state *state,
+               struct lttng_evaluation **evaluation,
+               uid_t *session_uid, gid_t *session_gid)
+{
+       int ret;
+       struct cds_lfht_iter iter;
+       struct cds_lfht_node *node;
+       const char *session_name;
+       struct session_info *session_info = NULL;
+
+       rcu_read_lock();
+       session_name = get_condition_session_name(condition);
+
+       /* Find the session associated with the trigger. */
+       cds_lfht_lookup(state->sessions_ht,
+                       hash_key_str(session_name, lttng_ht_seed),
+                       match_session,
+                       session_name,
+                       &iter);
+       node = cds_lfht_iter_get_node(&iter);
+       if (!node) {
+               DBG("No known session matching name \"%s\"",
+                               session_name);
+               ret = 0;
+               goto end;
+       }
+
+       session_info = caa_container_of(node, struct session_info,
+                       sessions_ht_node);
+       session_info_get(session_info);
+
+       /*
+        * Evaluation is performed in-line here since only one type of
+        * session-bound condition is handled for the moment.
+        */
+       switch (lttng_condition_get_type(condition)) {
+       case LTTNG_CONDITION_TYPE_SESSION_ROTATION_ONGOING:
+               if (!session_info->rotation.ongoing) {
+                       ret = 0;
+                       goto end_session_put;
+               }
+
+               *evaluation = lttng_evaluation_session_rotation_ongoing_create(
+                               session_info->rotation.id);
+               if (!*evaluation) {
+                       /* Fatal error. */
+                       ERR("Failed to create session rotation ongoing evaluation for session \"%s\"",
+                                       session_info->name);
+                       ret = -1;
+                       goto end_session_put;
+               }
+               ret = 0;
+               break;
+       default:
+               ret = 0;
+               goto end_session_put;
+       }
+
+       *session_uid = session_info->uid;
+       *session_gid = session_info->gid;
+
+end_session_put:
+       session_info_put(session_info);
+end:
+       rcu_read_unlock();
+       return ret;
+}
+
+static
+int evaluate_condition_for_client(const struct lttng_trigger *trigger,
+               const struct lttng_condition *condition,
+               struct notification_client *client,
+               struct notification_thread_state *state)
+{
+       int ret;
+       struct lttng_evaluation *evaluation = NULL;
+       struct notification_client_list client_list = {
+               .lock = PTHREAD_MUTEX_INITIALIZER,
+       };
+       struct notification_client_list_element client_list_element = { 0 };
+       uid_t object_uid = 0;
+       gid_t object_gid = 0;
+
+       LTTNG_ASSERT(trigger);
+       LTTNG_ASSERT(condition);
+       LTTNG_ASSERT(client);
+       LTTNG_ASSERT(state);
+
+       switch (get_condition_binding_object(condition)) {
+       case LTTNG_OBJECT_TYPE_SESSION:
+               ret = evaluate_session_condition_for_client(condition, state,
+                               &evaluation, &object_uid, &object_gid);
+               break;
+       case LTTNG_OBJECT_TYPE_CHANNEL:
+               ret = evaluate_channel_condition_for_client(condition, state,
+                               &evaluation, &object_uid, &object_gid);
+               break;
+       case LTTNG_OBJECT_TYPE_NONE:
+               DBG("Newly subscribed-to condition not bound to object, nothing to evaluate");
+               ret = 0;
+               goto end;
+       case LTTNG_OBJECT_TYPE_UNKNOWN:
+       default:
+               ret = -1;
+               goto end;
+       }
+       if (ret) {
+               /* Fatal error. */
+               goto end;
+       }
+       if (!evaluation) {
+               /* Evaluation yielded nothing. Normal exit. */
+               DBG("Newly subscribed-to condition evaluated to false, nothing to report to client");
+               ret = 0;
+               goto end;
+       }
+
+       /*
+        * Create a temporary client list with the client currently
+        * subscribing.
+        */
+       cds_lfht_node_init(&client_list.notification_trigger_clients_ht_node);
+       CDS_INIT_LIST_HEAD(&client_list.clients_list);
+
+       CDS_INIT_LIST_HEAD(&client_list_element.node);
+       client_list_element.client = client;
+       cds_list_add(&client_list_element.node, &client_list.clients_list);
+
+       /* Send evaluation result to the newly-subscribed client. */
+       DBG("Newly subscribed-to condition evaluated to true, notifying client");
+       ret = send_evaluation_to_clients(trigger, evaluation, &client_list,
+                       state, object_uid, object_gid);
+
+end:
+       return ret;
+}
+
+static
+int notification_thread_client_subscribe(struct notification_client *client,
+               struct lttng_condition *condition,
+               struct notification_thread_state *state,
+               enum lttng_notification_channel_status *_status)
+{
+       int ret = 0;
+       struct notification_client_list *client_list = NULL;
+       struct lttng_condition_list_element *condition_list_element = NULL;
+       struct notification_client_list_element *client_list_element = NULL;
+       struct lttng_trigger_ht_element *trigger_ht_element;
+       enum lttng_notification_channel_status status =
+                       LTTNG_NOTIFICATION_CHANNEL_STATUS_OK;
+
+       /*
+        * Ensure that the client has not already subscribed to this condition
+        * before.
+        */
+       cds_list_for_each_entry(condition_list_element, &client->condition_list, node) {
+               if (lttng_condition_is_equal(condition_list_element->condition,
+                               condition)) {
+                       status = LTTNG_NOTIFICATION_CHANNEL_STATUS_ALREADY_SUBSCRIBED;
+                       goto end;
+               }
+       }
+
+       condition_list_element = (lttng_condition_list_element *) zmalloc(sizeof(*condition_list_element));
+       if (!condition_list_element) {
+               ret = -1;
+               goto error;
+       }
+       client_list_element = (notification_client_list_element *) zmalloc(sizeof(*client_list_element));
+       if (!client_list_element) {
+               ret = -1;
+               goto error;
+       }
+
+       /*
+        * Add the newly-subscribed condition to the client's subscription list.
+        */
+       CDS_INIT_LIST_HEAD(&condition_list_element->node);
+       condition_list_element->condition = condition;
+       condition = NULL;
+       cds_list_add(&condition_list_element->node, &client->condition_list);
+
+       client_list = get_client_list_from_condition(
+                       state, condition_list_element->condition);
+       if (!client_list) {
+               /*
+                * No notification-emiting trigger registered with this
+                * condition. We don't evaluate the condition right away
+                * since this trigger is not registered yet.
+                */
+               free(client_list_element);
+               goto end;
+       }
+
+       /*
+        * The condition to which the client just subscribed is evaluated
+        * at this point so that conditions that are already TRUE result
+        * in a notification being sent out.
+        *
+        * Note the iteration on all triggers which share an identical
+        * `condition` than the one to which the client is registering. This is
+        * done to ensure that the client receives a distinct notification for
+        * all triggers that have a `notify` action that have this condition.
+        */
+       pthread_mutex_lock(&client_list->lock);
+       cds_list_for_each_entry(trigger_ht_element,
+                       &client_list->triggers_list, client_list_trigger_node) {
+               if (evaluate_condition_for_client(trigger_ht_element->trigger, condition_list_element->condition,
+                               client, state)) {
+                       WARN("Evaluation of a condition on client subscription failed, aborting.");
+                       ret = -1;
+                       free(client_list_element);
+                       pthread_mutex_unlock(&client_list->lock);
+                       goto end;
+               }
+       }
+       pthread_mutex_unlock(&client_list->lock);
+
+       /*
+        * Add the client to the list of clients interested in a given trigger
+        * if a "notification" trigger with a corresponding condition was
+        * added prior.
+        */
+       client_list_element->client = client;
+       CDS_INIT_LIST_HEAD(&client_list_element->node);
+
+       pthread_mutex_lock(&client_list->lock);
+       cds_list_add(&client_list_element->node, &client_list->clients_list);
+       pthread_mutex_unlock(&client_list->lock);
+end:
+       if (_status) {
+               *_status = status;
+       }
+       if (client_list) {
+               notification_client_list_put(client_list);
+       }
+       lttng_condition_destroy(condition);
+       return ret;
+error:
+       free(condition_list_element);
+       free(client_list_element);
+       lttng_condition_destroy(condition);
+       return ret;
+}
+
+static
+int notification_thread_client_unsubscribe(
+               struct notification_client *client,
+               struct lttng_condition *condition,
+               struct notification_thread_state *state,
+               enum lttng_notification_channel_status *_status)
+{
+       struct notification_client_list *client_list;
+       struct lttng_condition_list_element *condition_list_element,
+                       *condition_tmp;
+       struct notification_client_list_element *client_list_element,
+                       *client_tmp;
+       bool condition_found = false;
+       enum lttng_notification_channel_status status =
+                       LTTNG_NOTIFICATION_CHANNEL_STATUS_OK;
+
+       /* Remove the condition from the client's condition list. */
+       cds_list_for_each_entry_safe(condition_list_element, condition_tmp,
+                       &client->condition_list, node) {
+               if (!lttng_condition_is_equal(condition_list_element->condition,
+                               condition)) {
+                       continue;
+               }
+
+               cds_list_del(&condition_list_element->node);
+               /*
+                * The caller may be iterating on the client's conditions to
+                * tear down a client's connection. In this case, the condition
+                * will be destroyed at the end.
+                */
+               if (condition != condition_list_element->condition) {
+                       lttng_condition_destroy(
+                                       condition_list_element->condition);
+               }
+               free(condition_list_element);
+               condition_found = true;
+               break;
+       }
+
+       if (!condition_found) {
+               status = LTTNG_NOTIFICATION_CHANNEL_STATUS_UNKNOWN_CONDITION;
+               goto end;
+       }
+
+       /*
+        * Remove the client from the list of clients interested the trigger
+        * matching the condition.
+        */
+       client_list = get_client_list_from_condition(state, condition);
+       if (!client_list) {
+               goto end;
+       }
+
+       pthread_mutex_lock(&client_list->lock);
+       cds_list_for_each_entry_safe(client_list_element, client_tmp,
+                       &client_list->clients_list, node) {
+               if (client_list_element->client->id != client->id) {
+                       continue;
+               }
+               cds_list_del(&client_list_element->node);
+               free(client_list_element);
+               break;
+       }
+       pthread_mutex_unlock(&client_list->lock);
+       notification_client_list_put(client_list);
+       client_list = NULL;
+end:
+       lttng_condition_destroy(condition);
+       if (_status) {
+               *_status = status;
+       }
+       return 0;
+}
+
+static
+void free_notification_client_rcu(struct rcu_head *node)
+{
+       free(caa_container_of(node, struct notification_client, rcu_node));
+}
+
+static
+void notification_client_destroy(struct notification_client *client,
+               struct notification_thread_state *state)
+{
+       if (!client) {
+               return;
+       }
+
+       /*
+        * The client object is not reachable by other threads, no need to lock
+        * the client here.
+        */
+       if (client->socket >= 0) {
+               (void) lttcomm_close_unix_sock(client->socket);
+               client->socket = -1;
+       }
+       client->communication.active = false;
+       lttng_payload_reset(&client->communication.inbound.payload);
+       lttng_payload_reset(&client->communication.outbound.payload);
+       pthread_mutex_destroy(&client->lock);
+       call_rcu(&client->rcu_node, free_notification_client_rcu);
+}
+
+/*
+ * Call with rcu_read_lock held (and hold for the lifetime of the returned
+ * client pointer).
+ */
+static
+struct notification_client *get_client_from_socket(int socket,
+               struct notification_thread_state *state)
+{
+       struct cds_lfht_iter iter;
+       struct cds_lfht_node *node;
+       struct notification_client *client = NULL;
+
+       cds_lfht_lookup(state->client_socket_ht,
+                       hash_client_socket(socket),
+                       match_client_socket,
+                       (void *) (unsigned long) socket,
+                       &iter);
+       node = cds_lfht_iter_get_node(&iter);
+       if (!node) {
+               goto end;
+       }
+
+       client = caa_container_of(node, struct notification_client,
+                       client_socket_ht_node);
+end:
+       return client;
+}
+
+/*
+ * Call with rcu_read_lock held (and hold for the lifetime of the returned
+ * client pointer).
+ */
+static
+struct notification_client *get_client_from_id(notification_client_id id,
+               struct notification_thread_state *state)
+{
+       struct cds_lfht_iter iter;
+       struct cds_lfht_node *node;
+       struct notification_client *client = NULL;
+
+       cds_lfht_lookup(state->client_id_ht,
+                       hash_client_id(id),
+                       match_client_id,
+                       &id,
+                       &iter);
+       node = cds_lfht_iter_get_node(&iter);
+       if (!node) {
+               goto end;
+       }
+
+       client = caa_container_of(node, struct notification_client,
+                       client_id_ht_node);
+end:
+       return client;
+}
+
+static
+bool buffer_usage_condition_applies_to_channel(
+               const struct lttng_condition *condition,
+               const struct channel_info *channel_info)
+{
+       enum lttng_condition_status status;
+       enum lttng_domain_type condition_domain;
+       const char *condition_session_name = NULL;
+       const char *condition_channel_name = NULL;
+
+       status = lttng_condition_buffer_usage_get_domain_type(condition,
+                       &condition_domain);
+       LTTNG_ASSERT(status == LTTNG_CONDITION_STATUS_OK);
+       if (channel_info->key.domain != condition_domain) {
+               goto fail;
+       }
+
+       status = lttng_condition_buffer_usage_get_session_name(
+                       condition, &condition_session_name);
+       LTTNG_ASSERT((status == LTTNG_CONDITION_STATUS_OK) && condition_session_name);
+
+       status = lttng_condition_buffer_usage_get_channel_name(
+                       condition, &condition_channel_name);
+       LTTNG_ASSERT((status == LTTNG_CONDITION_STATUS_OK) && condition_channel_name);
+
+       if (strcmp(channel_info->session_info->name, condition_session_name)) {
+               goto fail;
+       }
+       if (strcmp(channel_info->name, condition_channel_name)) {
+               goto fail;
+       }
+
+       return true;
+fail:
+       return false;
+}
+
+static
+bool session_consumed_size_condition_applies_to_channel(
+               const struct lttng_condition *condition,
+               const struct channel_info *channel_info)
+{
+       enum lttng_condition_status status;
+       const char *condition_session_name = NULL;
+
+       status = lttng_condition_session_consumed_size_get_session_name(
+                       condition, &condition_session_name);
+       LTTNG_ASSERT((status == LTTNG_CONDITION_STATUS_OK) && condition_session_name);
+
+       if (strcmp(channel_info->session_info->name, condition_session_name)) {
+               goto fail;
+       }
+
+       return true;
+fail:
+       return false;
+}
+
+static
+bool trigger_applies_to_channel(const struct lttng_trigger *trigger,
+               const struct channel_info *channel_info)
+{
+       const struct lttng_condition *condition;
+       bool trigger_applies;
+
+       condition = lttng_trigger_get_const_condition(trigger);
+       if (!condition) {
+               goto fail;
+       }
+
+       switch (lttng_condition_get_type(condition)) {
+       case LTTNG_CONDITION_TYPE_BUFFER_USAGE_LOW:
+       case LTTNG_CONDITION_TYPE_BUFFER_USAGE_HIGH:
+               trigger_applies = buffer_usage_condition_applies_to_channel(
+                               condition, channel_info);
+               break;
+       case LTTNG_CONDITION_TYPE_SESSION_CONSUMED_SIZE:
+               trigger_applies = session_consumed_size_condition_applies_to_channel(
+                               condition, channel_info);
+               break;
+       default:
+               goto fail;
+       }
+
+       return trigger_applies;
+fail:
+       return false;
+}
+
+/* Must be called with RCU read lock held. */
+static
+struct lttng_session_trigger_list *get_session_trigger_list(
+               struct notification_thread_state *state,
+               const char *session_name)
+{
+       struct lttng_session_trigger_list *list = NULL;
+       struct cds_lfht_node *node;
+       struct cds_lfht_iter iter;
+
+       cds_lfht_lookup(state->session_triggers_ht,
+                       hash_key_str(session_name, lttng_ht_seed),
+                       match_session_trigger_list,
+                       session_name,
+                       &iter);
+       node = cds_lfht_iter_get_node(&iter);
+       if (!node) {
+               /*
+                * Not an error, the list of triggers applying to that session
+                * will be initialized when the session is created.
+                */
+               DBG("No trigger list found for session \"%s\" as it is not yet known to the notification system",
+                               session_name);
+               goto end;
+       }
+
+       list = caa_container_of(node,
+                       struct lttng_session_trigger_list,
+                       session_triggers_ht_node);
+end:
+       return list;
+}
+
+/*
+ * Allocate an empty lttng_session_trigger_list for the session named
+ * 'session_name'.
+ *
+ * No ownership of 'session_name' is assumed by the session trigger list.
+ * It is the caller's responsability to ensure the session name is alive
+ * for as long as this list is.
+ */
+static
+struct lttng_session_trigger_list *lttng_session_trigger_list_create(
+               const char *session_name,
+               struct cds_lfht *session_triggers_ht)
+{
+       struct lttng_session_trigger_list *list;
+
+       list = (lttng_session_trigger_list *) zmalloc(sizeof(*list));
+       if (!list) {
+               goto end;
+       }
+       list->session_name = session_name;
+       CDS_INIT_LIST_HEAD(&list->list);
+       cds_lfht_node_init(&list->session_triggers_ht_node);
+       list->session_triggers_ht = session_triggers_ht;
+
+       rcu_read_lock();
+       /* Publish the list through the session_triggers_ht. */
+       cds_lfht_add(session_triggers_ht,
+                       hash_key_str(session_name, lttng_ht_seed),
+                       &list->session_triggers_ht_node);
+       rcu_read_unlock();
+end:
+       return list;
+}
+
+static
+void free_session_trigger_list_rcu(struct rcu_head *node)
+{
+       free(caa_container_of(node, struct lttng_session_trigger_list,
+                       rcu_node));
+}
+
+static
+void lttng_session_trigger_list_destroy(struct lttng_session_trigger_list *list)
+{
+       struct lttng_trigger_list_element *trigger_list_element, *tmp;
+
+       /* Empty the list element by element, and then free the list itself. */
+       cds_list_for_each_entry_safe(trigger_list_element, tmp,
+                       &list->list, node) {
+               cds_list_del(&trigger_list_element->node);
+               free(trigger_list_element);
+       }
+       rcu_read_lock();
+       /* Unpublish the list from the session_triggers_ht. */
+       cds_lfht_del(list->session_triggers_ht,
+                       &list->session_triggers_ht_node);
+       rcu_read_unlock();
+       call_rcu(&list->rcu_node, free_session_trigger_list_rcu);
+}
+
+static
+int lttng_session_trigger_list_add(struct lttng_session_trigger_list *list,
+               struct lttng_trigger *trigger)
+{
+       int ret = 0;
+       struct lttng_trigger_list_element *new_element =
+                       (lttng_trigger_list_element *) zmalloc(sizeof(*new_element));
+
+       if (!new_element) {
+               ret = -1;
+               goto end;
+       }
+       CDS_INIT_LIST_HEAD(&new_element->node);
+       new_element->trigger = trigger;
+       cds_list_add(&new_element->node, &list->list);
+end:
+       return ret;
+}
+
+static
+bool trigger_applies_to_session(const struct lttng_trigger *trigger,
+               const char *session_name)
+{
+       bool applies = false;
+       const struct lttng_condition *condition;
+
+       condition = lttng_trigger_get_const_condition(trigger);
+       switch (lttng_condition_get_type(condition)) {
+       case LTTNG_CONDITION_TYPE_SESSION_ROTATION_ONGOING:
+       case LTTNG_CONDITION_TYPE_SESSION_ROTATION_COMPLETED:
+       {
+               enum lttng_condition_status condition_status;
+               const char *condition_session_name;
+
+               condition_status = lttng_condition_session_rotation_get_session_name(
+                       condition, &condition_session_name);
+               if (condition_status != LTTNG_CONDITION_STATUS_OK) {
+                       ERR("Failed to retrieve session rotation condition's session name");
+                       goto end;
+               }
+
+               LTTNG_ASSERT(condition_session_name);
+               applies = !strcmp(condition_session_name, session_name);
+               break;
+       }
+       default:
+               goto end;
+       }
+end:
+       return applies;
+}
+
+/*
+ * Allocate and initialize an lttng_session_trigger_list which contains
+ * all triggers that apply to the session named 'session_name'.
+ *
+ * No ownership of 'session_name' is assumed by the session trigger list.
+ * It is the caller's responsability to ensure the session name is alive
+ * for as long as this list is.
+ */
+static
+struct lttng_session_trigger_list *lttng_session_trigger_list_build(
+               const struct notification_thread_state *state,
+               const char *session_name)
+{
+       int trigger_count = 0;
+       struct lttng_session_trigger_list *session_trigger_list = NULL;
+       struct lttng_trigger_ht_element *trigger_ht_element = NULL;
+       struct cds_lfht_iter iter;
+
+       session_trigger_list = lttng_session_trigger_list_create(session_name,
+                       state->session_triggers_ht);
+
+       /* Add all triggers applying to the session named 'session_name'. */
+       cds_lfht_for_each_entry(state->triggers_ht, &iter, trigger_ht_element,
+                       node) {
+               int ret;
+
+               if (!trigger_applies_to_session(trigger_ht_element->trigger,
+                               session_name)) {
+                       continue;
+               }
+
+               ret = lttng_session_trigger_list_add(session_trigger_list,
+                               trigger_ht_element->trigger);
+               if (ret) {
+                       goto error;
+               }
+
+               trigger_count++;
+       }
+
+       DBG("Found %i triggers that apply to newly created session",
+                       trigger_count);
+       return session_trigger_list;
+error:
+       lttng_session_trigger_list_destroy(session_trigger_list);
+       return NULL;
+}
+
+static
+struct session_info *find_or_create_session_info(
+               struct notification_thread_state *state,
+               const char *name, uid_t uid, gid_t gid)
+{
+       struct session_info *session = NULL;
+       struct cds_lfht_node *node;
+       struct cds_lfht_iter iter;
+       struct lttng_session_trigger_list *trigger_list;
+
+       rcu_read_lock();
+       cds_lfht_lookup(state->sessions_ht,
+                       hash_key_str(name, lttng_ht_seed),
+                       match_session,
+                       name,
+                       &iter);
+       node = cds_lfht_iter_get_node(&iter);
+       if (node) {
+               DBG("Found session info of session \"%s\" (uid = %i, gid = %i)",
+                               name, uid, gid);
+               session = caa_container_of(node, struct session_info,
+                               sessions_ht_node);
+               LTTNG_ASSERT(session->uid == uid);
+               LTTNG_ASSERT(session->gid == gid);
+               session_info_get(session);
+               goto end;
+       }
+
+       trigger_list = lttng_session_trigger_list_build(state, name);
+       if (!trigger_list) {
+               goto error;
+       }
+
+       session = session_info_create(name, uid, gid, trigger_list,
+                       state->sessions_ht);
+       if (!session) {
+               ERR("Failed to allocation session info for session \"%s\" (uid = %i, gid = %i)",
+                               name, uid, gid);
+               lttng_session_trigger_list_destroy(trigger_list);
+               goto error;
+       }
+       trigger_list = NULL;
+
+       cds_lfht_add(state->sessions_ht, hash_key_str(name, lttng_ht_seed),
+                       &session->sessions_ht_node);
+end:
+       rcu_read_unlock();
+       return session;
+error:
+       rcu_read_unlock();
+       session_info_put(session);
+       return NULL;
+}
+
+static
+int handle_notification_thread_command_add_channel(
+               struct notification_thread_state *state,
+               const char *session_name, uid_t session_uid, gid_t session_gid,
+               const char *channel_name, enum lttng_domain_type channel_domain,
+               uint64_t channel_key_int, uint64_t channel_capacity,
+               enum lttng_error_code *cmd_result)
+{
+       struct cds_list_head trigger_list;
+       struct channel_info *new_channel_info = NULL;
+       struct channel_key channel_key = {
+               .key = channel_key_int,
+               .domain = channel_domain,
+       };
+       struct lttng_channel_trigger_list *channel_trigger_list = NULL;
+       struct lttng_trigger_ht_element *trigger_ht_element = NULL;
+       int trigger_count = 0;
+       struct cds_lfht_iter iter;
+       struct session_info *session_info = NULL;
+
+       DBG("Adding channel %s from session %s, channel key = %" PRIu64 " in %s domain",
+                       channel_name, session_name, channel_key_int,
+                       lttng_domain_type_str(channel_domain));
+
+       CDS_INIT_LIST_HEAD(&trigger_list);
+
+       session_info = find_or_create_session_info(state, session_name,
+                       session_uid, session_gid);
+       if (!session_info) {
+               /* Allocation error or an internal error occurred. */
+               goto error;
+       }
+
+       new_channel_info = channel_info_create(channel_name, &channel_key,
+                       channel_capacity, session_info);
+       if (!new_channel_info) {
+               goto error;
+       }
+
+       rcu_read_lock();
+       /* Build a list of all triggers applying to the new channel. */
+       cds_lfht_for_each_entry(state->triggers_ht, &iter, trigger_ht_element,
+                       node) {
+               struct lttng_trigger_list_element *new_element;
+
+               if (!trigger_applies_to_channel(trigger_ht_element->trigger,
+                               new_channel_info)) {
+                       continue;
+               }
+
+               new_element = (lttng_trigger_list_element *) zmalloc(sizeof(*new_element));
+               if (!new_element) {
+                       rcu_read_unlock();
+                       goto error;
+               }
+               CDS_INIT_LIST_HEAD(&new_element->node);
+               new_element->trigger = trigger_ht_element->trigger;
+               cds_list_add(&new_element->node, &trigger_list);
+               trigger_count++;
+       }
+       rcu_read_unlock();
+
+       DBG("Found %i triggers that apply to newly added channel",
+                       trigger_count);
+       channel_trigger_list = (lttng_channel_trigger_list *) zmalloc(sizeof(*channel_trigger_list));
+       if (!channel_trigger_list) {
+               goto error;
+       }
+       channel_trigger_list->channel_key = new_channel_info->key;
+       CDS_INIT_LIST_HEAD(&channel_trigger_list->list);
+       cds_lfht_node_init(&channel_trigger_list->channel_triggers_ht_node);
+       cds_list_splice(&trigger_list, &channel_trigger_list->list);
+
+       rcu_read_lock();
+       /* Add channel to the channel_ht which owns the channel_infos. */
+       cds_lfht_add(state->channels_ht,
+                       hash_channel_key(&new_channel_info->key),
+                       &new_channel_info->channels_ht_node);
+       /*
+        * Add the list of triggers associated with this channel to the
+        * channel_triggers_ht.
+        */
+       cds_lfht_add(state->channel_triggers_ht,
+                       hash_channel_key(&new_channel_info->key),
+                       &channel_trigger_list->channel_triggers_ht_node);
+       rcu_read_unlock();
+       session_info_put(session_info);
+       *cmd_result = LTTNG_OK;
+       return 0;
+error:
+       channel_info_destroy(new_channel_info);
+       session_info_put(session_info);
+       return 1;
+}
+
+static
+void free_channel_trigger_list_rcu(struct rcu_head *node)
+{
+       free(caa_container_of(node, struct lttng_channel_trigger_list,
+                       rcu_node));
+}
+
+static
+void free_channel_state_sample_rcu(struct rcu_head *node)
+{
+       free(caa_container_of(node, struct channel_state_sample,
+                       rcu_node));
+}
+
+static
+int handle_notification_thread_command_remove_channel(
+       struct notification_thread_state *state,
+       uint64_t channel_key, enum lttng_domain_type domain,
+       enum lttng_error_code *cmd_result)
+{
+       struct cds_lfht_node *node;
+       struct cds_lfht_iter iter;
+       struct lttng_channel_trigger_list *trigger_list;
+       struct lttng_trigger_list_element *trigger_list_element, *tmp;
+       struct channel_key key = { .key = channel_key, .domain = domain };
+       struct channel_info *channel_info;
+
+       DBG("Removing channel key = %" PRIu64 " in %s domain",
+                       channel_key, lttng_domain_type_str(domain));
+
+       rcu_read_lock();
+
+       cds_lfht_lookup(state->channel_triggers_ht,
+                       hash_channel_key(&key),
+                       match_channel_trigger_list,
+                       &key,
+                       &iter);
+       node = cds_lfht_iter_get_node(&iter);
+       /*
+        * There is a severe internal error if we are being asked to remove a
+        * channel that doesn't exist.
+        */
+       if (!node) {
+               ERR("Channel being removed is unknown to the notification thread");
+               goto end;
+       }
+
+       /* Free the list of triggers associated with this channel. */
+       trigger_list = caa_container_of(node, struct lttng_channel_trigger_list,
+                       channel_triggers_ht_node);
+       cds_list_for_each_entry_safe(trigger_list_element, tmp,
+                       &trigger_list->list, node) {
+               cds_list_del(&trigger_list_element->node);
+               free(trigger_list_element);
+       }
+       cds_lfht_del(state->channel_triggers_ht, node);
+       call_rcu(&trigger_list->rcu_node, free_channel_trigger_list_rcu);
+
+       /* Free sampled channel state. */
+       cds_lfht_lookup(state->channel_state_ht,
+                       hash_channel_key(&key),
+                       match_channel_state_sample,
+                       &key,
+                       &iter);
+       node = cds_lfht_iter_get_node(&iter);
+       /*
+        * This is expected to be NULL if the channel is destroyed before we
+        * received a sample.
+        */
+       if (node) {
+               struct channel_state_sample *sample = caa_container_of(node,
+                               struct channel_state_sample,
+                               channel_state_ht_node);
+
+               cds_lfht_del(state->channel_state_ht, node);
+               call_rcu(&sample->rcu_node, free_channel_state_sample_rcu);
+       }
+
+       /* Remove the channel from the channels_ht and free it. */
+       cds_lfht_lookup(state->channels_ht,
+                       hash_channel_key(&key),
+                       match_channel_info,
+                       &key,
+                       &iter);
+       node = cds_lfht_iter_get_node(&iter);
+       LTTNG_ASSERT(node);
+       channel_info = caa_container_of(node, struct channel_info,
+                       channels_ht_node);
+       cds_lfht_del(state->channels_ht, node);
+       channel_info_destroy(channel_info);
+end:
+       rcu_read_unlock();
+       *cmd_result = LTTNG_OK;
+       return 0;
+}
+
+static
+int handle_notification_thread_command_session_rotation(
+       struct notification_thread_state *state,
+       enum notification_thread_command_type cmd_type,
+       const char *session_name, uid_t session_uid, gid_t session_gid,
+       uint64_t trace_archive_chunk_id,
+       struct lttng_trace_archive_location *location,
+       enum lttng_error_code *_cmd_result)
+{
+       int ret = 0;
+       enum lttng_error_code cmd_result = LTTNG_OK;
+       struct lttng_session_trigger_list *trigger_list;
+       struct lttng_trigger_list_element *trigger_list_element;
+       struct session_info *session_info;
+       const struct lttng_credentials session_creds = {
+               .uid = LTTNG_OPTIONAL_INIT_VALUE(session_uid),
+               .gid = LTTNG_OPTIONAL_INIT_VALUE(session_gid),
+       };
+
+       rcu_read_lock();
+
+       session_info = find_or_create_session_info(state, session_name,
+                       session_uid, session_gid);
+       if (!session_info) {
+               /* Allocation error or an internal error occurred. */
+               ret = -1;
+               cmd_result = LTTNG_ERR_NOMEM;
+               goto end;
+       }
+
+       session_info->rotation.ongoing =
+                       cmd_type == NOTIFICATION_COMMAND_TYPE_SESSION_ROTATION_ONGOING;
+       session_info->rotation.id = trace_archive_chunk_id;
+       trigger_list = get_session_trigger_list(state, session_name);
+       if (!trigger_list) {
+               DBG("No triggers applying to session \"%s\" found",
+                               session_name);
+               goto end;
+       }
+
+       cds_list_for_each_entry(trigger_list_element, &trigger_list->list,
+                       node) {
+               const struct lttng_condition *condition;
+               struct lttng_trigger *trigger;
+               struct notification_client_list *client_list;
+               struct lttng_evaluation *evaluation = NULL;
+               enum lttng_condition_type condition_type;
+               enum action_executor_status executor_status;
+
+               trigger = trigger_list_element->trigger;
+               condition = lttng_trigger_get_const_condition(trigger);
+               LTTNG_ASSERT(condition);
+               condition_type = lttng_condition_get_type(condition);
+
+               if (condition_type == LTTNG_CONDITION_TYPE_SESSION_ROTATION_ONGOING &&
+                               cmd_type != NOTIFICATION_COMMAND_TYPE_SESSION_ROTATION_ONGOING) {
+                       continue;
+               } else if (condition_type == LTTNG_CONDITION_TYPE_SESSION_ROTATION_COMPLETED &&
+                               cmd_type != NOTIFICATION_COMMAND_TYPE_SESSION_ROTATION_COMPLETED) {
+                       continue;
+               }
+
+               client_list = get_client_list_from_condition(state, condition);
+               if (cmd_type == NOTIFICATION_COMMAND_TYPE_SESSION_ROTATION_ONGOING) {
+                       evaluation = lttng_evaluation_session_rotation_ongoing_create(
+                                       trace_archive_chunk_id);
+               } else {
+                       evaluation = lttng_evaluation_session_rotation_completed_create(
+                                       trace_archive_chunk_id, location);
+               }
+
+               if (!evaluation) {
+                       /* Internal error */
+                       ret = -1;
+                       cmd_result = LTTNG_ERR_UNK;
+                       goto put_list;
+               }
+
+               /*
+                * Ownership of `evaluation` transferred to the action executor
+                * no matter the result.
+                */
+               executor_status = action_executor_enqueue_trigger(
+                               state->executor, trigger, evaluation,
+                               &session_creds, client_list);
+               evaluation = NULL;
+               switch (executor_status) {
+               case ACTION_EXECUTOR_STATUS_OK:
+                       break;
+               case ACTION_EXECUTOR_STATUS_ERROR:
+               case ACTION_EXECUTOR_STATUS_INVALID:
+                       /*
+                        * TODO Add trigger identification (name/id) when
+                        * it is added to the API.
+                        */
+                       ERR("Fatal error occurred while enqueuing action associated with session rotation trigger");
+                       ret = -1;
+                       goto put_list;
+               case ACTION_EXECUTOR_STATUS_OVERFLOW:
+                       /*
+                        * TODO Add trigger identification (name/id) when
+                        * it is added to the API.
+                        *
+                        * Not a fatal error.
+                        */
+                       WARN("No space left when enqueuing action associated with session rotation trigger");
+                       ret = 0;
+                       goto put_list;
+               default:
+                       abort();
+               }
+
+put_list:
+               notification_client_list_put(client_list);
+               if (caa_unlikely(ret)) {
+                       break;
+               }
+       }
+end:
+       session_info_put(session_info);
+       *_cmd_result = cmd_result;
+       rcu_read_unlock();
+       return ret;
+}
+
+static
+int handle_notification_thread_command_add_tracer_event_source(
+               struct notification_thread_state *state,
+               int tracer_event_source_fd,
+               enum lttng_domain_type domain_type,
+               enum lttng_error_code *_cmd_result)
+{
+       int ret = 0;
+       enum lttng_error_code cmd_result = LTTNG_OK;
+       struct notification_event_tracer_event_source_element *element = NULL;
+
+       element = (notification_event_tracer_event_source_element *) zmalloc(sizeof(*element));
+       if (!element) {
+               cmd_result = LTTNG_ERR_NOMEM;
+               ret = -1;
+               goto end;
+       }
+
+       element->fd = tracer_event_source_fd;
+       element->domain = domain_type;
+
+       cds_list_add(&element->node, &state->tracer_event_sources_list);
+
+       DBG3("Adding tracer event source fd to poll set: tracer_event_source_fd = %d, domain = '%s'",
+                       tracer_event_source_fd,
+                       lttng_domain_type_str(domain_type));
+
+       /* Adding the read side pipe to the event poll. */
+       ret = lttng_poll_add(&state->events, tracer_event_source_fd, LPOLLIN | LPOLLERR);
+       if (ret < 0) {
+               ERR("Failed to add tracer event source to poll set: tracer_event_source_fd = %d, domain = '%s'",
+                               tracer_event_source_fd,
+                               lttng_domain_type_str(element->domain));
+               cds_list_del(&element->node);
+               free(element);
+               goto end;
+       }
+
+       element->is_fd_in_poll_set = true;
+
+end:
+       *_cmd_result = cmd_result;
+       return ret;
+}
+
+static
+int drain_event_notifier_notification_pipe(
+               struct notification_thread_state *state,
+               int pipe, enum lttng_domain_type domain)
+{
+       struct lttng_poll_event events = {0};
+       int ret;
+
+       ret = lttng_poll_create(&events, 1, LTTNG_CLOEXEC);
+       if (ret < 0) {
+               ERR("Error creating lttng_poll_event");
+               goto end;
+       }
+
+       ret = lttng_poll_add(&events, pipe, LPOLLIN);
+       if (ret < 0) {
+               ERR("Error adding fd event notifier notification pipe to lttng_poll_event: fd = %d",
+                               pipe);
+               goto end;
+       }
+
+       while (true) {
+               /*
+                * Continue to consume notifications as long as there are new
+                * ones coming in. The tracer has been asked to stop producing
+                * them.
+                *
+                * LPOLLIN is explicitly checked since LPOLLHUP is implicitly
+                * monitored (on Linux, at least) and will be returned when
+                * the pipe is closed but empty.
+                */
+               ret = lttng_poll_wait_interruptible(&events, 0);
+               if (ret == 0 || (LTTNG_POLL_GETEV(&events, 0) & LPOLLIN) == 0) {
+                       /* No more notification to be read on this pipe. */
+                       ret = 0;
+                       goto end;
+               } else if (ret < 0) {
+                       PERROR("Failed on lttng_poll_wait_interruptible() call");
+                       ret = -1;
+                       goto end;
+               }
+
+               ret = handle_one_event_notifier_notification(state, pipe, domain);
+               if (ret) {
+                       ERR("Error consuming an event notifier notification from pipe: fd = %d",
+                                       pipe);
+               }
+       }
+end:
+       lttng_poll_clean(&events);
+       return ret;
+}
+
+static
+struct notification_event_tracer_event_source_element *
+find_tracer_event_source_element(struct notification_thread_state *state,
+               int tracer_event_source_fd)
+{
+       struct notification_event_tracer_event_source_element *source_element;
+
+       cds_list_for_each_entry(source_element,
+                       &state->tracer_event_sources_list, node) {
+               if (source_element->fd == tracer_event_source_fd) {
+                       goto end;
+               }
+       }
+
+       source_element = NULL;
+end:
+       return source_element;
+}
+
+static
+int remove_tracer_event_source_from_pollset(
+               struct notification_thread_state *state,
+               struct notification_event_tracer_event_source_element *source_element)
+{
+       int ret = 0;
+
+       LTTNG_ASSERT(source_element->is_fd_in_poll_set);
+
+       DBG3("Removing tracer event source from poll set: tracer_event_source_fd = %d, domain = '%s'",
+                       source_element->fd,
+                       lttng_domain_type_str(source_element->domain));
+
+       /* Removing the fd from the event poll set. */
+       ret = lttng_poll_del(&state->events, source_element->fd);
+       if (ret < 0) {
+               ERR("Failed to remove tracer event source from poll set: tracer_event_source_fd = %d, domain = '%s'",
+                               source_element->fd,
+                               lttng_domain_type_str(source_element->domain));
+               ret = -1;
+               goto end;
+       }
+
+       source_element->is_fd_in_poll_set = false;
+
+       /*
+        * Force the notification thread to restart the poll() loop to ensure
+        * that any events from the removed fd are removed.
+        */
+       state->restart_poll = true;
+
+       ret = drain_event_notifier_notification_pipe(state, source_element->fd,
+                       source_element->domain);
+       if (ret) {
+               ERR("Error draining event notifier notification: tracer_event_source_fd = %d, domain = %s",
+                               source_element->fd,
+                               lttng_domain_type_str(source_element->domain));
+               ret = -1;
+               goto end;
+       }
+
+end:
+       return ret;
+}
+
+int handle_notification_thread_tracer_event_source_died(
+               struct notification_thread_state *state,
+               int tracer_event_source_fd)
+{
+       int ret = 0;
+       struct notification_event_tracer_event_source_element *source_element;
+
+       source_element = find_tracer_event_source_element(state,
+                       tracer_event_source_fd);
+
+       LTTNG_ASSERT(source_element);
+
+       ret = remove_tracer_event_source_from_pollset(state, source_element);
+       if (ret) {
+               ERR("Failed to remove dead tracer event source from poll set");
+       }
+
+       return ret;
+}
+
+static
+int handle_notification_thread_command_remove_tracer_event_source(
+               struct notification_thread_state *state,
+               int tracer_event_source_fd,
+               enum lttng_error_code *_cmd_result)
+{
+       int ret = 0;
+       enum lttng_error_code cmd_result = LTTNG_OK;
+       struct notification_event_tracer_event_source_element *source_element = NULL;
+
+       source_element = find_tracer_event_source_element(state,
+                       tracer_event_source_fd);
+
+       LTTNG_ASSERT(source_element);
+
+       /* Remove the tracer source from the list. */
+       cds_list_del(&source_element->node);
+
+       if (!source_element->is_fd_in_poll_set) {
+               /* Skip the poll set removal. */
+               goto end;
+       }
+
+       ret = remove_tracer_event_source_from_pollset(state, source_element);
+       if (ret) {
+               ERR("Failed to remove tracer event source from poll set");
+               cmd_result = LTTNG_ERR_FATAL;
+       }
+
+end:
+       free(source_element);
+       *_cmd_result = cmd_result;
+       return ret;
+}
+
+static int handle_notification_thread_command_list_triggers(
+               struct notification_thread_handle *handle,
+               struct notification_thread_state *state,
+               uid_t client_uid,
+               struct lttng_triggers **triggers,
+               enum lttng_error_code *_cmd_result)
+{
+       int ret = 0;
+       enum lttng_error_code cmd_result = LTTNG_OK;
+       struct cds_lfht_iter iter;
+       struct lttng_trigger_ht_element *trigger_ht_element;
+       struct lttng_triggers *local_triggers = NULL;
+       const struct lttng_credentials *creds;
+
+       rcu_read_lock();
+
+       local_triggers = lttng_triggers_create();
+       if (!local_triggers) {
+               /* Not a fatal error. */
+               cmd_result = LTTNG_ERR_NOMEM;
+               goto end;
+       }
+
+       cds_lfht_for_each_entry(state->triggers_ht, &iter,
+                       trigger_ht_element, node) {
+               /*
+                * Only return the triggers to which the client has access.
+                * The root user has visibility over all triggers.
+                */
+               creds = lttng_trigger_get_credentials(trigger_ht_element->trigger);
+               if (client_uid != lttng_credentials_get_uid(creds) && client_uid != 0) {
+                       continue;
+               }
+
+               ret = lttng_triggers_add(local_triggers,
+                               trigger_ht_element->trigger);
+               if (ret < 0) {
+                       /* Not a fatal error. */
+                       ret = 0;
+                       cmd_result = LTTNG_ERR_NOMEM;
+                       goto end;
+               }
+       }
+
+       /* Transferring ownership to the caller. */
+       *triggers = local_triggers;
+       local_triggers = NULL;
+
+end:
+       rcu_read_unlock();
+       lttng_triggers_destroy(local_triggers);
+       *_cmd_result = cmd_result;
+       return ret;
+}
+
+static inline void get_trigger_info_for_log(const struct lttng_trigger *trigger,
+               const char **trigger_name,
+               uid_t *trigger_owner_uid)
+{
+       enum lttng_trigger_status trigger_status;
+
+       trigger_status = lttng_trigger_get_name(trigger, trigger_name);
+       switch (trigger_status) {
+       case LTTNG_TRIGGER_STATUS_OK:
+               break;
+       case LTTNG_TRIGGER_STATUS_UNSET:
+               *trigger_name = "(anonymous)";
+               break;
+       default:
+               abort();
+       }
+
+       trigger_status = lttng_trigger_get_owner_uid(trigger,
+                       trigger_owner_uid);
+       LTTNG_ASSERT(trigger_status == LTTNG_TRIGGER_STATUS_OK);
+}
+
+static int handle_notification_thread_command_get_trigger(
+               struct notification_thread_state *state,
+               const struct lttng_trigger *trigger,
+               struct lttng_trigger **registered_trigger,
+               enum lttng_error_code *_cmd_result)
+{
+       int ret = -1;
+       struct cds_lfht_iter iter;
+       struct lttng_trigger_ht_element *trigger_ht_element;
+       enum lttng_error_code cmd_result = LTTNG_ERR_TRIGGER_NOT_FOUND;
+       const char *trigger_name;
+       uid_t trigger_owner_uid;
+
+       rcu_read_lock();
+
+       cds_lfht_for_each_entry(
+                       state->triggers_ht, &iter, trigger_ht_element, node) {
+               if (lttng_trigger_is_equal(
+                                   trigger, trigger_ht_element->trigger)) {
+                       /* Take one reference on the return trigger. */
+                       *registered_trigger = trigger_ht_element->trigger;
+                       lttng_trigger_get(*registered_trigger);
+                       ret = 0;
+                       cmd_result = LTTNG_OK;
+                       goto end;
+               }
+       }
+
+       /* Not a fatal error if the trigger is not found. */
+       get_trigger_info_for_log(trigger, &trigger_name, &trigger_owner_uid);
+       DBG("Failed to retrieve registered version of trigger: trigger name = '%s', trigger owner uid = %d",
+                       trigger_name, (int) trigger_owner_uid);
+
+       ret = 0;
+
+end:
+       rcu_read_unlock();
+       *_cmd_result = cmd_result;
+       return ret;
+}
+
+static
+bool condition_is_supported(struct lttng_condition *condition)
+{
+       bool is_supported;
+
+       switch (lttng_condition_get_type(condition)) {
+       case LTTNG_CONDITION_TYPE_BUFFER_USAGE_LOW:
+       case LTTNG_CONDITION_TYPE_BUFFER_USAGE_HIGH:
+       {
+               int ret;
+               enum lttng_domain_type domain;
+
+               ret = lttng_condition_buffer_usage_get_domain_type(condition,
+                               &domain);
+               LTTNG_ASSERT(ret == 0);
+
+               if (domain != LTTNG_DOMAIN_KERNEL) {
+                       is_supported = true;
+                       goto end;
+               }
+
+               /*
+                * Older kernel tracers don't expose the API to monitor their
+                * buffers. Therefore, we reject triggers that require that
+                * mechanism to be available to be evaluated.
+                *
+                * Assume unsupported on error.
+                */
+               is_supported = kernel_supports_ring_buffer_snapshot_sample_positions() == 1;
+               break;
+       }
+       case LTTNG_CONDITION_TYPE_EVENT_RULE_MATCHES:
+       {
+               const struct lttng_event_rule *event_rule;
+               enum lttng_domain_type domain;
+               const enum lttng_condition_status status =
+                               lttng_condition_event_rule_matches_get_rule(
+                                               condition, &event_rule);
+
+               LTTNG_ASSERT(status == LTTNG_CONDITION_STATUS_OK);
+
+               domain = lttng_event_rule_get_domain_type(event_rule);
+               if (domain != LTTNG_DOMAIN_KERNEL) {
+                       is_supported = true;
+                       goto end;
+               }
+
+               /*
+                * Older kernel tracers can't emit notification. Therefore, we
+                * reject triggers that require that mechanism to be available
+                * to be evaluated.
+                *
+                * Assume unsupported on error.
+                */
+               is_supported = kernel_supports_event_notifiers() == 1;
+               break;
+       }
+       default:
+               is_supported = true;
+       }
+end:
+       return is_supported;
+}
+
+/* Must be called with RCU read lock held. */
+static
+int bind_trigger_to_matching_session(struct lttng_trigger *trigger,
+               struct notification_thread_state *state)
+{
+       int ret = 0;
+       const struct lttng_condition *condition;
+       const char *session_name;
+       struct lttng_session_trigger_list *trigger_list;
+
+       condition = lttng_trigger_get_const_condition(trigger);
+       switch (lttng_condition_get_type(condition)) {
+       case LTTNG_CONDITION_TYPE_SESSION_ROTATION_ONGOING:
+       case LTTNG_CONDITION_TYPE_SESSION_ROTATION_COMPLETED:
+       {
+               enum lttng_condition_status status;
+
+               status = lttng_condition_session_rotation_get_session_name(
+                               condition, &session_name);
+               if (status != LTTNG_CONDITION_STATUS_OK) {
+                       ERR("Failed to bind trigger to session: unable to get 'session_rotation' condition's session name");
+                       ret = -1;
+                       goto end;
+               }
+               break;
+       }
+       default:
+               ret = -1;
+               goto end;
+       }
+
+       trigger_list = get_session_trigger_list(state, session_name);
+       if (!trigger_list) {
+               DBG("Unable to bind trigger applying to session \"%s\" as it is not yet known to the notification system",
+                               session_name);
+               goto end;
+
+       }
+
+       DBG("Newly registered trigger bound to session \"%s\"",
+                       session_name);
+       ret = lttng_session_trigger_list_add(trigger_list, trigger);
+end:
+       return ret;
+}
+
+/* Must be called with RCU read lock held. */
+static
+int bind_trigger_to_matching_channels(struct lttng_trigger *trigger,
+               struct notification_thread_state *state)
+{
+       int ret = 0;
+       struct cds_lfht_node *node;
+       struct cds_lfht_iter iter;
+       struct channel_info *channel;
+
+       cds_lfht_for_each_entry(state->channels_ht, &iter, channel,
+                       channels_ht_node) {
+               struct lttng_trigger_list_element *trigger_list_element;
+               struct lttng_channel_trigger_list *trigger_list;
+               struct cds_lfht_iter lookup_iter;
+
+               if (!trigger_applies_to_channel(trigger, channel)) {
+                       continue;
+               }
+
+               cds_lfht_lookup(state->channel_triggers_ht,
+                               hash_channel_key(&channel->key),
+                               match_channel_trigger_list,
+                               &channel->key,
+                               &lookup_iter);
+               node = cds_lfht_iter_get_node(&lookup_iter);
+               LTTNG_ASSERT(node);
+               trigger_list = caa_container_of(node,
+                               struct lttng_channel_trigger_list,
+                               channel_triggers_ht_node);
+
+               trigger_list_element = (lttng_trigger_list_element *) zmalloc(sizeof(*trigger_list_element));
+               if (!trigger_list_element) {
+                       ret = -1;
+                       goto end;
+               }
+               CDS_INIT_LIST_HEAD(&trigger_list_element->node);
+               trigger_list_element->trigger = trigger;
+               cds_list_add(&trigger_list_element->node, &trigger_list->list);
+               DBG("Newly registered trigger bound to channel \"%s\"",
+                               channel->name);
+       }
+end:
+       return ret;
+}
+
+static
+bool is_trigger_action_notify(const struct lttng_trigger *trigger)
+{
+       bool is_notify = false;
+       unsigned int i, count;
+       enum lttng_action_status action_status;
+       const struct lttng_action *action =
+                       lttng_trigger_get_const_action(trigger);
+       enum lttng_action_type action_type;
+
+       LTTNG_ASSERT(action);
+       action_type = lttng_action_get_type(action);
+       if (action_type == LTTNG_ACTION_TYPE_NOTIFY) {
+               is_notify = true;
+               goto end;
+       } else if (action_type != LTTNG_ACTION_TYPE_LIST) {
+               goto end;
+       }
+
+       action_status = lttng_action_list_get_count(action, &count);
+       LTTNG_ASSERT(action_status == LTTNG_ACTION_STATUS_OK);
+
+       for (i = 0; i < count; i++) {
+               const struct lttng_action *inner_action =
+                               lttng_action_list_get_at_index(
+                                               action, i);
+
+               action_type = lttng_action_get_type(inner_action);
+               if (action_type == LTTNG_ACTION_TYPE_NOTIFY) {
+                       is_notify = true;
+                       goto end;
+               }
+       }
+
+end:
+       return is_notify;
+}
+
+static bool trigger_name_taken(struct notification_thread_state *state,
+               const struct lttng_trigger *trigger)
+{
+       struct cds_lfht_iter iter;
+
+       /*
+        * No duplicata is allowed in the triggers_by_name_uid_ht.
+        * The match is done against the trigger name and uid.
+        */
+       cds_lfht_lookup(state->triggers_by_name_uid_ht,
+                       hash_trigger_by_name_uid(trigger),
+                       match_trigger_by_name_uid,
+                       trigger,
+                       &iter);
+       return !!cds_lfht_iter_get_node(&iter);
+}
+
+static
+enum lttng_error_code generate_trigger_name(
+               struct notification_thread_state *state,
+               struct lttng_trigger *trigger, const char **name)
+{
+       enum lttng_error_code ret_code = LTTNG_OK;
+       bool taken = false;
+       enum lttng_trigger_status status;
+
+       do {
+               const int ret = lttng_trigger_generate_name(trigger,
+                               state->trigger_id.name_offset++);
+               if (ret) {
+                       /* The only reason this can fail right now. */
+                       ret_code = LTTNG_ERR_NOMEM;
+                       break;
+               }
+
+               status = lttng_trigger_get_name(trigger, name);
+               LTTNG_ASSERT(status == LTTNG_TRIGGER_STATUS_OK);
+
+               taken = trigger_name_taken(state, trigger);
+       } while (taken || state->trigger_id.name_offset == UINT64_MAX);
+
+       return ret_code;
+}
+
+static inline
+void notif_thread_state_remove_trigger_ht_elem(
+               struct notification_thread_state *state,
+               struct lttng_trigger_ht_element *trigger_ht_element)
+{
+       LTTNG_ASSERT(state);
+       LTTNG_ASSERT(trigger_ht_element);
+
+       cds_lfht_del(state->triggers_ht, &trigger_ht_element->node);
+       cds_lfht_del(state->triggers_by_name_uid_ht, &trigger_ht_element->node_by_name_uid);
+}
+
+static
+enum lttng_error_code setup_tracer_notifier(
+               struct notification_thread_state *state,
+               struct lttng_trigger *trigger)
+{
+       enum lttng_error_code ret;
+       enum event_notifier_error_accounting_status error_accounting_status;
+       struct cds_lfht_node *node;
+       uint64_t error_counter_index = 0;
+       struct lttng_condition *condition = lttng_trigger_get_condition(trigger);
+       struct notification_trigger_tokens_ht_element *trigger_tokens_ht_element = NULL;
+
+       trigger_tokens_ht_element = (notification_trigger_tokens_ht_element *) zmalloc(sizeof(*trigger_tokens_ht_element));
+       if (!trigger_tokens_ht_element) {
+               ret = LTTNG_ERR_NOMEM;
+               goto end;
+       }
+
+       /* Add trigger token to the trigger_tokens_ht. */
+       cds_lfht_node_init(&trigger_tokens_ht_element->node);
+       trigger_tokens_ht_element->token = LTTNG_OPTIONAL_GET(trigger->tracer_token);
+       trigger_tokens_ht_element->trigger = trigger;
+
+       node = cds_lfht_add_unique(state->trigger_tokens_ht,
+                       hash_key_u64(&trigger_tokens_ht_element->token, lttng_ht_seed),
+                       match_trigger_token,
+                       &trigger_tokens_ht_element->token,
+                       &trigger_tokens_ht_element->node);
+       if (node != &trigger_tokens_ht_element->node) {
+               ret = LTTNG_ERR_TRIGGER_EXISTS;
+               goto error_free_ht_element;
+       }
+
+       error_accounting_status = event_notifier_error_accounting_register_event_notifier(
+                       trigger, &error_counter_index);
+       if (error_accounting_status != EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_OK) {
+               if (error_accounting_status == EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_NO_INDEX_AVAILABLE) {
+                       DBG("Trigger list error accounting counter full.");
+                       ret = LTTNG_ERR_EVENT_NOTIFIER_ERROR_ACCOUNTING_FULL;
+               } else {
+                       ERR("Error registering trigger for error accounting");
+                       ret = LTTNG_ERR_EVENT_NOTIFIER_REGISTRATION;
+               }
+
+               goto error_remove_ht_element;
+       }
+
+       lttng_condition_event_rule_matches_set_error_counter_index(
+                       condition, error_counter_index);
+
+       ret = LTTNG_OK;
+       goto end;
+
+error_remove_ht_element:
+       cds_lfht_del(state->trigger_tokens_ht, &trigger_tokens_ht_element->node);
+error_free_ht_element:
+       free(trigger_tokens_ht_element);
+end:
+       return ret;
+}
+
+/*
+ * FIXME A client's credentials are not checked when registering a trigger.
+ *
+ * The effects of this are benign since:
+ *     - The client will succeed in registering the trigger, as it is valid,
+ *     - The trigger will, internally, be bound to the channel/session,
+ *     - The notifications will not be sent since the client's credentials
+ *       are checked against the channel at that moment.
+ *
+ * If this function returns a non-zero value, it means something is
+ * fundamentally broken and the whole subsystem/thread will be torn down.
+ *
+ * If a non-fatal error occurs, just set the cmd_result to the appropriate
+ * error code.
+ */
+static
+int handle_notification_thread_command_register_trigger(
+               struct notification_thread_state *state,
+               struct lttng_trigger *trigger,
+               bool is_trigger_anonymous,
+               enum lttng_error_code *cmd_result)
+{
+       int ret = 0;
+       struct lttng_condition *condition;
+       struct notification_client_list *client_list = NULL;
+       struct lttng_trigger_ht_element *trigger_ht_element = NULL;
+       struct cds_lfht_node *node;
+       const char* trigger_name;
+       bool free_trigger = true;
+       struct lttng_evaluation *evaluation = NULL;
+       struct lttng_credentials object_creds;
+       uid_t object_uid;
+       gid_t object_gid;
+       enum action_executor_status executor_status;
+       const uint64_t trigger_tracer_token =
+                       state->trigger_id.next_tracer_token++;
+
+       rcu_read_lock();
+
+       /* Set the trigger's tracer token. */
+       lttng_trigger_set_tracer_token(trigger, trigger_tracer_token);
+
+       if (!is_trigger_anonymous) {
+               if (lttng_trigger_get_name(trigger, &trigger_name) ==
+                               LTTNG_TRIGGER_STATUS_UNSET) {
+                       const enum lttng_error_code ret_code =
+                                       generate_trigger_name(state, trigger,
+                                                       &trigger_name);
+
+                       if (ret_code != LTTNG_OK) {
+                               /* Fatal error. */
+                               ret = -1;
+                               *cmd_result = ret_code;
+                               goto error;
+                       }
+               } else if (trigger_name_taken(state, trigger)) {
+                       /* Not a fatal error. */
+                       *cmd_result = LTTNG_ERR_TRIGGER_EXISTS;
+                       ret = 0;
+                       goto error;
+               }
+       } else {
+               trigger_name = "(anonymous)";
+       }
+
+       condition = lttng_trigger_get_condition(trigger);
+       LTTNG_ASSERT(condition);
+
+       /* Some conditions require tracers to implement a minimal ABI version. */
+       if (!condition_is_supported(condition)) {
+               *cmd_result = LTTNG_ERR_NOT_SUPPORTED;
+               goto error;
+       }
+
+       trigger_ht_element = (lttng_trigger_ht_element *) zmalloc(sizeof(*trigger_ht_element));
+       if (!trigger_ht_element) {
+               ret = -1;
+               goto error;
+       }
+
+       /* Add trigger to the trigger_ht. */
+       cds_lfht_node_init(&trigger_ht_element->node);
+       cds_lfht_node_init(&trigger_ht_element->node_by_name_uid);
+       trigger_ht_element->trigger = trigger;
+
+       node = cds_lfht_add_unique(state->triggers_ht,
+                       lttng_condition_hash(condition),
+                       match_trigger,
+                       trigger,
+                       &trigger_ht_element->node);
+       if (node != &trigger_ht_element->node) {
+               /* Not a fatal error, simply report it to the client. */
+               *cmd_result = LTTNG_ERR_TRIGGER_EXISTS;
+               goto error_free_ht_element;
+       }
+
+       node = cds_lfht_add_unique(state->triggers_by_name_uid_ht,
+                       hash_trigger_by_name_uid(trigger),
+                       match_trigger_by_name_uid,
+                       trigger,
+                       &trigger_ht_element->node_by_name_uid);
+       if (node != &trigger_ht_element->node_by_name_uid) {
+               /* Internal error: add to triggers_ht should have failed. */
+               ret = -1;
+               goto error_free_ht_element;
+       }
+
+       /* From this point consider the trigger registered. */
+       lttng_trigger_set_as_registered(trigger);
+
+       /*
+        * Some triggers might need a tracer notifier depending on its
+        * condition and actions.
+        */
+       if (lttng_trigger_needs_tracer_notifier(trigger)) {
+               enum lttng_error_code error_code;
+
+               error_code = setup_tracer_notifier(state, trigger);
+               if (error_code != LTTNG_OK) {
+                       notif_thread_state_remove_trigger_ht_elem(state,
+                                       trigger_ht_element);
+                       if (error_code == LTTNG_ERR_NOMEM) {
+                               ret = -1;
+                       } else {
+                               *cmd_result = error_code;
+                               ret = 0;
+                       }
+
+                       goto error_free_ht_element;
+               }
+       }
+
+       /*
+        * The rest only applies to triggers that have a "notify" action.
+        * It is not skipped as this is the only action type currently
+        * supported.
+        */
+       if (is_trigger_action_notify(trigger)) {
+               /*
+                * Find or create the client list of this condition. It may
+                * already be present if another trigger is already registered
+                * with the same condition.
+                */
+               client_list = get_client_list_from_condition(state, condition);
+               if (!client_list) {
+                       /*
+                        * No client list for this condition yet. We create new
+                        * one and build it up.
+                        */
+                       client_list = notification_client_list_create(state, condition);
+                       if (!client_list) {
+                               ERR("Error creating notification client list for trigger %s", trigger->name);
+                               goto error_free_ht_element;
+                       }
+               }
+
+               CDS_INIT_LIST_HEAD(&trigger_ht_element->client_list_trigger_node);
+
+               pthread_mutex_lock(&client_list->lock);
+               cds_list_add(&trigger_ht_element->client_list_trigger_node, &client_list->triggers_list);
+               pthread_mutex_unlock(&client_list->lock);
+       }
+
+       /*
+        * Ownership of the trigger and of its wrapper was transfered to
+        * the triggers_ht. Same for token ht element if necessary.
+        */
+       trigger_ht_element = NULL;
+       free_trigger = false;
+
+       switch (get_condition_binding_object(condition)) {
+       case LTTNG_OBJECT_TYPE_SESSION:
+               /* Add the trigger to the list if it matches a known session. */
+               ret = bind_trigger_to_matching_session(trigger, state);
+               if (ret) {
+                       goto error_free_ht_element;
+               }
+               break;
+       case LTTNG_OBJECT_TYPE_CHANNEL:
+               /*
+                * Add the trigger to list of triggers bound to the channels
+                * currently known.
+                */
+               ret = bind_trigger_to_matching_channels(trigger, state);
+               if (ret) {
+                       goto error_free_ht_element;
+               }
+               break;
+       case LTTNG_OBJECT_TYPE_NONE:
+               break;
+       default:
+               ERR("Unknown object type on which to bind a newly registered trigger was encountered");
+               ret = -1;
+               goto error_free_ht_element;
+       }
+
+       /*
+        * The new trigger's condition must be evaluated against the current
+        * state.
+        *
+        * In the case of `notify` action, nothing preventing clients from
+        * subscribing to a condition before the corresponding trigger is
+        * registered, we have to evaluate this new condition right away.
+        *
+        * At some point, we were waiting for the next "evaluation" (e.g. on
+        * reception of a channel sample) to evaluate this new condition, but
+        * that was broken.
+        *
+        * The reason it was broken is that waiting for the next sample
+        * does not allow us to properly handle transitions for edge-triggered
+        * conditions.
+        *
+        * Consider this example: when we handle a new channel sample, we
+        * evaluate each conditions twice: once with the previous state, and
+        * again with the newest state. We then use those two results to
+        * determine whether a state change happened: a condition was false and
+        * became true. If a state change happened, we have to notify clients.
+        *
+        * Now, if a client subscribes to a given notification and registers
+        * a trigger *after* that subscription, we have to make sure the
+        * condition is evaluated at this point while considering only the
+        * current state. Otherwise, the next evaluation cycle may only see
+        * that the evaluations remain the same (true for samples n-1 and n) and
+        * the client will never know that the condition has been met.
+        */
+       switch (get_condition_binding_object(condition)) {
+       case LTTNG_OBJECT_TYPE_SESSION:
+               ret = evaluate_session_condition_for_client(condition, state,
+                               &evaluation, &object_uid,
+                               &object_gid);
+               LTTNG_OPTIONAL_SET(&object_creds.uid, object_uid);
+               LTTNG_OPTIONAL_SET(&object_creds.gid, object_gid);
+               break;
+       case LTTNG_OBJECT_TYPE_CHANNEL:
+               ret = evaluate_channel_condition_for_client(condition, state,
+                               &evaluation, &object_uid,
+                               &object_gid);
+               LTTNG_OPTIONAL_SET(&object_creds.uid, object_uid);
+               LTTNG_OPTIONAL_SET(&object_creds.gid, object_gid);
+               break;
+       case LTTNG_OBJECT_TYPE_NONE:
+               ret = 0;
+               break;
+       case LTTNG_OBJECT_TYPE_UNKNOWN:
+       default:
+               ret = -1;
+               break;
+       }
+
+       if (ret) {
+               /* Fatal error. */
+               goto error_free_ht_element;
+       }
+
+       DBG("Newly registered trigger's condition evaluated to %s",
+                       evaluation ? "true" : "false");
+       if (!evaluation) {
+               /* Evaluation yielded nothing. Normal exit. */
+               ret = 0;
+               goto success;
+       }
+
+       /*
+        * Ownership of `evaluation` transferred to the action executor
+        * no matter the result.
+        */
+       executor_status = action_executor_enqueue_trigger(state->executor,
+                       trigger, evaluation, &object_creds, client_list);
+       evaluation = NULL;
+       switch (executor_status) {
+       case ACTION_EXECUTOR_STATUS_OK:
+               break;
+       case ACTION_EXECUTOR_STATUS_ERROR:
+       case ACTION_EXECUTOR_STATUS_INVALID:
+               /*
+                * TODO Add trigger identification (name/id) when
+                * it is added to the API.
+                */
+               ERR("Fatal error occurred while enqueuing action associated to newly registered trigger");
+               ret = -1;
+               goto error_free_ht_element;
+       case ACTION_EXECUTOR_STATUS_OVERFLOW:
+               /*
+                * TODO Add trigger identification (name/id) when
+                * it is added to the API.
+                *
+                * Not a fatal error.
+                */
+               WARN("No space left when enqueuing action associated to newly registered trigger");
+               ret = 0;
+               goto success;
+       default:
+               abort();
+       }
+
+success:
+       *cmd_result = LTTNG_OK;
+       DBG("Registered trigger: name = `%s`, tracer token = %" PRIu64,
+                       trigger_name, trigger_tracer_token);
+       goto end;
+
+error_free_ht_element:
+       if (trigger_ht_element) {
+               /* Delayed removal due to RCU constraint on delete. */
+               call_rcu(&trigger_ht_element->rcu_node,
+                               free_lttng_trigger_ht_element_rcu);
+       }
+error:
+       if (free_trigger) {
+               /*
+                * Other objects might have a reference to the trigger, mark it
+                * as unregistered.
+                */
+               lttng_trigger_set_as_unregistered(trigger);
+               lttng_trigger_destroy(trigger);
+       }
+end:
+       rcu_read_unlock();
+       return ret;
+}
+
+static
+void free_lttng_trigger_ht_element_rcu(struct rcu_head *node)
+{
+       free(caa_container_of(node, struct lttng_trigger_ht_element,
+                       rcu_node));
+}
+
+static
+void free_notification_trigger_tokens_ht_element_rcu(struct rcu_head *node)
+{
+       free(caa_container_of(node, struct notification_trigger_tokens_ht_element,
+                       rcu_node));
+}
+
+static
+void teardown_tracer_notifier(struct notification_thread_state *state,
+               const struct lttng_trigger *trigger)
+{
+       struct cds_lfht_iter iter;
+       struct notification_trigger_tokens_ht_element *trigger_tokens_ht_element;
+
+       cds_lfht_for_each_entry(state->trigger_tokens_ht, &iter,
+                       trigger_tokens_ht_element, node) {
+
+               if (!lttng_trigger_is_equal(trigger,
+                                       trigger_tokens_ht_element->trigger)) {
+                       continue;
+               }
+
+               event_notifier_error_accounting_unregister_event_notifier(
+                               trigger_tokens_ht_element->trigger);
+
+               /* TODO talk to all app and remove it */
+               DBG("Removed trigger from tokens_ht");
+               cds_lfht_del(state->trigger_tokens_ht,
+                               &trigger_tokens_ht_element->node);
+
+               call_rcu(&trigger_tokens_ht_element->rcu_node,
+                               free_notification_trigger_tokens_ht_element_rcu);
+
+               break;
+       }
+}
+
+static
+int handle_notification_thread_command_unregister_trigger(
+               struct notification_thread_state *state,
+               const struct lttng_trigger *trigger,
+               enum lttng_error_code *_cmd_reply)
+{
+       struct cds_lfht_iter iter;
+       struct cds_lfht_node *triggers_ht_node;
+       struct lttng_channel_trigger_list *trigger_list;
+       struct notification_client_list *client_list;
+       struct lttng_trigger_ht_element *trigger_ht_element = NULL;
+       const struct lttng_condition *condition = lttng_trigger_get_const_condition(
+                       trigger);
+       enum lttng_error_code cmd_reply;
+
+       rcu_read_lock();
+
+       cds_lfht_lookup(state->triggers_ht,
+                       lttng_condition_hash(condition),
+                       match_trigger,
+                       trigger,
+                       &iter);
+       triggers_ht_node = cds_lfht_iter_get_node(&iter);
+       if (!triggers_ht_node) {
+               cmd_reply = LTTNG_ERR_TRIGGER_NOT_FOUND;
+               goto end;
+       } else {
+               cmd_reply = LTTNG_OK;
+       }
+
+       trigger_ht_element = caa_container_of(triggers_ht_node,
+                       struct lttng_trigger_ht_element, node);
+
+       /* Remove trigger from channel_triggers_ht. */
+       cds_lfht_for_each_entry(state->channel_triggers_ht, &iter, trigger_list,
+                       channel_triggers_ht_node) {
+               struct lttng_trigger_list_element *trigger_element, *tmp;
+
+               cds_list_for_each_entry_safe(trigger_element, tmp,
+                               &trigger_list->list, node) {
+                       if (!lttng_trigger_is_equal(trigger, trigger_element->trigger)) {
+                               continue;
+                       }
+
+                       DBG("Removed trigger from channel_triggers_ht");
+                       cds_list_del(&trigger_element->node);
+                       /* A trigger can only appear once per channel */
+                       break;
+               }
+       }
+
+       if (lttng_trigger_needs_tracer_notifier(trigger)) {
+               teardown_tracer_notifier(state, trigger);
+       }
+
+       if (is_trigger_action_notify(trigger)) {
+               /*
+                * Remove and release the client list from
+                * notification_trigger_clients_ht.
+                */
+               client_list = get_client_list_from_condition(state, condition);
+               LTTNG_ASSERT(client_list);
+
+               pthread_mutex_lock(&client_list->lock);
+               cds_list_del(&trigger_ht_element->client_list_trigger_node);
+               pthread_mutex_unlock(&client_list->lock);
+
+               /* Put new reference and the hashtable's reference. */
+               notification_client_list_put(client_list);
+               notification_client_list_put(client_list);
+               client_list = NULL;
+       }
+
+       /* Remove trigger from triggers_ht. */
+       notif_thread_state_remove_trigger_ht_elem(state, trigger_ht_element);
+
+       /* Release the ownership of the trigger. */
+       lttng_trigger_destroy(trigger_ht_element->trigger);
+       call_rcu(&trigger_ht_element->rcu_node, free_lttng_trigger_ht_element_rcu);
+end:
+       rcu_read_unlock();
+       if (_cmd_reply) {
+               *_cmd_reply = cmd_reply;
+       }
+       return 0;
+}
+
+/* Returns 0 on success, 1 on exit requested, negative value on error. */
+int handle_notification_thread_command(
+               struct notification_thread_handle *handle,
+               struct notification_thread_state *state)
+{
+       int ret;
+       uint64_t counter;
+       struct notification_thread_command *cmd;
+
+       /* Read the event pipe to put it back into a quiescent state. */
+       ret = lttng_read(lttng_pipe_get_readfd(handle->cmd_queue.event_pipe), &counter,
+                       sizeof(counter));
+       if (ret != sizeof(counter)) {
+               goto error;
+       }
+
+       pthread_mutex_lock(&handle->cmd_queue.lock);
+       cmd = cds_list_first_entry(&handle->cmd_queue.list,
+                       struct notification_thread_command, cmd_list_node);
+       cds_list_del(&cmd->cmd_list_node);
+       pthread_mutex_unlock(&handle->cmd_queue.lock);
+
+       DBG("Received `%s` command",
+                       notification_command_type_str(cmd->type));
+       switch (cmd->type) {
+       case NOTIFICATION_COMMAND_TYPE_REGISTER_TRIGGER:
+               ret = handle_notification_thread_command_register_trigger(state,
+                               cmd->parameters.register_trigger.trigger,
+                               cmd->parameters.register_trigger.is_trigger_anonymous,
+                               &cmd->reply_code);
+               break;
+       case NOTIFICATION_COMMAND_TYPE_UNREGISTER_TRIGGER:
+               ret = handle_notification_thread_command_unregister_trigger(
+                               state,
+                               cmd->parameters.unregister_trigger.trigger,
+                               &cmd->reply_code);
+               break;
+       case NOTIFICATION_COMMAND_TYPE_ADD_CHANNEL:
+               ret = handle_notification_thread_command_add_channel(
+                               state,
+                               cmd->parameters.add_channel.session.name,
+                               cmd->parameters.add_channel.session.uid,
+                               cmd->parameters.add_channel.session.gid,
+                               cmd->parameters.add_channel.channel.name,
+                               cmd->parameters.add_channel.channel.domain,
+                               cmd->parameters.add_channel.channel.key,
+                               cmd->parameters.add_channel.channel.capacity,
+                               &cmd->reply_code);
+               break;
+       case NOTIFICATION_COMMAND_TYPE_REMOVE_CHANNEL:
+               ret = handle_notification_thread_command_remove_channel(
+                               state, cmd->parameters.remove_channel.key,
+                               cmd->parameters.remove_channel.domain,
+                               &cmd->reply_code);
+               break;
+       case NOTIFICATION_COMMAND_TYPE_SESSION_ROTATION_ONGOING:
+       case NOTIFICATION_COMMAND_TYPE_SESSION_ROTATION_COMPLETED:
+               ret = handle_notification_thread_command_session_rotation(
+                               state,
+                               cmd->type,
+                               cmd->parameters.session_rotation.session_name,
+                               cmd->parameters.session_rotation.uid,
+                               cmd->parameters.session_rotation.gid,
+                               cmd->parameters.session_rotation.trace_archive_chunk_id,
+                               cmd->parameters.session_rotation.location,
+                               &cmd->reply_code);
+               break;
+       case NOTIFICATION_COMMAND_TYPE_ADD_TRACER_EVENT_SOURCE:
+               ret = handle_notification_thread_command_add_tracer_event_source(
+                               state,
+                               cmd->parameters.tracer_event_source.tracer_event_source_fd,
+                               cmd->parameters.tracer_event_source.domain,
+                               &cmd->reply_code);
+               break;
+       case NOTIFICATION_COMMAND_TYPE_REMOVE_TRACER_EVENT_SOURCE:
+               ret = handle_notification_thread_command_remove_tracer_event_source(
+                               state,
+                               cmd->parameters.tracer_event_source.tracer_event_source_fd,
+                               &cmd->reply_code);
+               break;
+       case NOTIFICATION_COMMAND_TYPE_LIST_TRIGGERS:
+       {
+               struct lttng_triggers *triggers = NULL;
+
+               ret = handle_notification_thread_command_list_triggers(
+                               handle,
+                               state,
+                               cmd->parameters.list_triggers.uid,
+                               &triggers,
+                               &cmd->reply_code);
+               cmd->reply.list_triggers.triggers = triggers;
+               ret = 0;
+               break;
+       }
+       case NOTIFICATION_COMMAND_TYPE_QUIT:
+               cmd->reply_code = LTTNG_OK;
+               ret = 1;
+               goto end;
+       case NOTIFICATION_COMMAND_TYPE_GET_TRIGGER:
+       {
+               struct lttng_trigger *trigger = NULL;
+
+               ret = handle_notification_thread_command_get_trigger(state,
+                               cmd->parameters.get_trigger.trigger, &trigger,
+                               &cmd->reply_code);
+               cmd->reply.get_trigger.trigger = trigger;
+               break;
+       }
+       case NOTIFICATION_COMMAND_TYPE_CLIENT_COMMUNICATION_UPDATE:
+       {
+               const enum client_transmission_status client_status =
+                               cmd->parameters.client_communication_update
+                                               .status;
+               const notification_client_id client_id =
+                               cmd->parameters.client_communication_update.id;
+               struct notification_client *client;
+
+               rcu_read_lock();
+               client = get_client_from_id(client_id, state);
+
+               if (!client) {
+                       /*
+                        * Client error was probably already picked-up by the
+                        * notification thread or it has disconnected
+                        * gracefully while this command was queued.
+                        */
+                       DBG("Failed to find notification client to update communication status, client id = %" PRIu64,
+                                       client_id);
+                       ret = 0;
+               } else {
+                       ret = client_handle_transmission_status(
+                                       client, client_status, state);
+               }
+               rcu_read_unlock();
+               break;
+       }
+       default:
+               ERR("Unknown internal command received");
+               goto error_unlock;
+       }
+
+       if (ret) {
+               goto error_unlock;
+       }
+end:
+       if (cmd->is_async) {
+               free(cmd);
+               cmd = NULL;
+       } else {
+               lttng_waiter_wake_up(&cmd->reply_waiter);
+       }
+       return ret;
+error_unlock:
+       /* Wake-up and return a fatal error to the calling thread. */
+       lttng_waiter_wake_up(&cmd->reply_waiter);
+       cmd->reply_code = LTTNG_ERR_FATAL;
+error:
+       /* Indicate a fatal error to the caller. */
+       return -1;
+}
+
+static
+int socket_set_non_blocking(int socket)
+{
+       int ret, flags;
+
+       /* Set the pipe as non-blocking. */
+       ret = fcntl(socket, F_GETFL, 0);
+       if (ret == -1) {
+               PERROR("fcntl get socket flags");
+               goto end;
+       }
+       flags = ret;
+
+       ret = fcntl(socket, F_SETFL, flags | O_NONBLOCK);
+       if (ret == -1) {
+               PERROR("fcntl set O_NONBLOCK socket flag");
+               goto end;
+       }
+       DBG("Client socket (fd = %i) set as non-blocking", socket);
+end:
+       return ret;
+}
+
+static
+int client_reset_inbound_state(struct notification_client *client)
+{
+       int ret;
+
+
+       lttng_payload_clear(&client->communication.inbound.payload);
+
+       client->communication.inbound.bytes_to_receive =
+                       sizeof(struct lttng_notification_channel_message);
+       client->communication.inbound.msg_type =
+                       LTTNG_NOTIFICATION_CHANNEL_MESSAGE_TYPE_UNKNOWN;
+       LTTNG_SOCK_SET_UID_CRED(&client->communication.inbound.creds, -1);
+       LTTNG_SOCK_SET_GID_CRED(&client->communication.inbound.creds, -1);
+       ret = lttng_dynamic_buffer_set_size(
+                       &client->communication.inbound.payload.buffer,
+                       client->communication.inbound.bytes_to_receive);
+
+       return ret;
+}
+
+int handle_notification_thread_client_connect(
+               struct notification_thread_state *state)
+{
+       int ret;
+       struct notification_client *client;
+
+       DBG("Handling new notification channel client connection");
+
+       client = (notification_client *) zmalloc(sizeof(*client));
+       if (!client) {
+               /* Fatal error. */
+               ret = -1;
+               goto error;
+       }
+
+       pthread_mutex_init(&client->lock, NULL);
+       client->id = state->next_notification_client_id++;
+       CDS_INIT_LIST_HEAD(&client->condition_list);
+       lttng_payload_init(&client->communication.inbound.payload);
+       lttng_payload_init(&client->communication.outbound.payload);
+       client->communication.inbound.expect_creds = true;
+
+       ret = client_reset_inbound_state(client);
+       if (ret) {
+               ERR("Failed to reset client communication's inbound state");
+               ret = 0;
+               goto error;
+       }
+
+       ret = lttcomm_accept_unix_sock(state->notification_channel_socket);
+       if (ret < 0) {
+               ERR("Failed to accept new notification channel client connection");
+               ret = 0;
+               goto error;
+       }
+
+       client->socket = ret;
+
+       ret = socket_set_non_blocking(client->socket);
+       if (ret) {
+               ERR("Failed to set new notification channel client connection socket as non-blocking");
+               goto error;
+       }
+
+       ret = lttcomm_setsockopt_creds_unix_sock(client->socket);
+       if (ret < 0) {
+               ERR("Failed to set socket options on new notification channel client socket");
+               ret = 0;
+               goto error;
+       }
+
+       ret = lttng_poll_add(&state->events, client->socket,
+                       LPOLLIN | LPOLLERR |
+                       LPOLLHUP | LPOLLRDHUP);
+       if (ret < 0) {
+               ERR("Failed to add notification channel client socket to poll set");
+               ret = 0;
+               goto error;
+       }
+       DBG("Added new notification channel client socket (%i) to poll set",
+                       client->socket);
+
+       rcu_read_lock();
+       cds_lfht_add(state->client_socket_ht,
+                       hash_client_socket(client->socket),
+                       &client->client_socket_ht_node);
+       cds_lfht_add(state->client_id_ht,
+                       hash_client_id(client->id),
+                       &client->client_id_ht_node);
+       rcu_read_unlock();
+
+       return ret;
+
+error:
+       notification_client_destroy(client, state);
+       return ret;
+}
+
+/*
+ * RCU read-lock must be held by the caller.
+ * Client lock must _not_ be held by the caller.
+ */
+static
+int notification_thread_client_disconnect(
+               struct notification_client *client,
+               struct notification_thread_state *state)
+{
+       int ret;
+       struct lttng_condition_list_element *condition_list_element, *tmp;
+
+       /* Acquire the client lock to disable its communication atomically. */
+       pthread_mutex_lock(&client->lock);
+       client->communication.active = false;
+       cds_lfht_del(state->client_socket_ht, &client->client_socket_ht_node);
+       cds_lfht_del(state->client_id_ht, &client->client_id_ht_node);
+       pthread_mutex_unlock(&client->lock);
+
+       ret = lttng_poll_del(&state->events, client->socket);
+       if (ret) {
+               ERR("Failed to remove client socket %d from poll set",
+                               client->socket);
+       }
+
+       /* Release all conditions to which the client was subscribed. */
+       cds_list_for_each_entry_safe(condition_list_element, tmp,
+                       &client->condition_list, node) {
+               (void) notification_thread_client_unsubscribe(client,
+                               condition_list_element->condition, state, NULL);
+       }
+
+       /*
+        * Client no longer accessible to other threads (through the
+        * client lists).
+        */
+       notification_client_destroy(client, state);
+       return ret;
+}
+
+int handle_notification_thread_client_disconnect(
+               int client_socket, struct notification_thread_state *state)
+{
+       int ret = 0;
+       struct notification_client *client;
+
+       rcu_read_lock();
+       DBG("Closing client connection (socket fd = %i)",
+                       client_socket);
+       client = get_client_from_socket(client_socket, state);
+       if (!client) {
+               /* Internal state corruption, fatal error. */
+               ERR("Unable to find client (socket fd = %i)",
+                               client_socket);
+               ret = -1;
+               goto end;
+       }
+
+       ret = notification_thread_client_disconnect(client, state);
+end:
+       rcu_read_unlock();
+       return ret;
+}
+
+int handle_notification_thread_client_disconnect_all(
+               struct notification_thread_state *state)
+{
+       struct cds_lfht_iter iter;
+       struct notification_client *client;
+       bool error_encoutered = false;
+
+       rcu_read_lock();
+       DBG("Closing all client connections");
+       cds_lfht_for_each_entry(state->client_socket_ht, &iter, client,
+                       client_socket_ht_node) {
+               int ret;
+
+               ret = notification_thread_client_disconnect(
+                               client, state);
+               if (ret) {
+                       error_encoutered = true;
+               }
+       }
+       rcu_read_unlock();
+       return error_encoutered ? 1 : 0;
+}
+
+int handle_notification_thread_trigger_unregister_all(
+               struct notification_thread_state *state)
+{
+       bool error_occurred = false;
+       struct cds_lfht_iter iter;
+       struct lttng_trigger_ht_element *trigger_ht_element;
+
+       rcu_read_lock();
+       cds_lfht_for_each_entry(state->triggers_ht, &iter, trigger_ht_element,
+                       node) {
+               int ret = handle_notification_thread_command_unregister_trigger(
+                               state, trigger_ht_element->trigger, NULL);
+               if (ret) {
+                       error_occurred = true;
+               }
+       }
+       rcu_read_unlock();
+       return error_occurred ? -1 : 0;
+}
+
+static
+int client_handle_transmission_status(
+               struct notification_client *client,
+               enum client_transmission_status transmission_status,
+               struct notification_thread_state *state)
+{
+       int ret = 0;
+
+       switch (transmission_status) {
+       case CLIENT_TRANSMISSION_STATUS_COMPLETE:
+               ret = lttng_poll_mod(&state->events, client->socket,
+                               CLIENT_POLL_MASK_IN);
+               if (ret) {
+                       goto end;
+               }
+
+               break;
+       case CLIENT_TRANSMISSION_STATUS_QUEUED:
+               /*
+                * We want to be notified whenever there is buffer space
+                * available to send the rest of the payload.
+                */
+               ret = lttng_poll_mod(&state->events, client->socket,
+                               CLIENT_POLL_MASK_IN_OUT);
+               if (ret) {
+                       goto end;
+               }
+               break;
+       case CLIENT_TRANSMISSION_STATUS_FAIL:
+               ret = notification_thread_client_disconnect(client, state);
+               if (ret) {
+                       goto end;
+               }
+               break;
+       case CLIENT_TRANSMISSION_STATUS_ERROR:
+               ret = -1;
+               goto end;
+       default:
+               abort();
+       }
+end:
+       return ret;
+}
+
+/* Client lock must be acquired by caller. */
+static
+enum client_transmission_status client_flush_outgoing_queue(
+               struct notification_client *client)
+{
+       ssize_t ret;
+       size_t to_send_count;
+       enum client_transmission_status status;
+       struct lttng_payload_view pv = lttng_payload_view_from_payload(
+                       &client->communication.outbound.payload, 0, -1);
+       const int fds_to_send_count =
+                       lttng_payload_view_get_fd_handle_count(&pv);
+
+       ASSERT_LOCKED(client->lock);
+
+       if (!client->communication.active) {
+               status = CLIENT_TRANSMISSION_STATUS_FAIL;
+               goto end;
+       }
+
+       if (pv.buffer.size == 0) {
+               /*
+                * If both data and fds are equal to zero, we are in an invalid
+                * state.
+                */
+               LTTNG_ASSERT(fds_to_send_count != 0);
+               goto send_fds;
+       }
+
+       /* Send data. */
+       to_send_count = pv.buffer.size;
+       DBG("Flushing client (socket fd = %i) outgoing queue",
+                       client->socket);
+
+       ret = lttcomm_send_unix_sock_non_block(client->socket,
+                       pv.buffer.data,
+                       to_send_count);
+       if ((ret >= 0 && ret < to_send_count)) {
+               DBG("Client (socket fd = %i) outgoing queue could not be completely flushed",
+                               client->socket);
+               to_send_count -= std::max(ret, (ssize_t) 0);
+
+               memmove(client->communication.outbound.payload.buffer.data,
+                               pv.buffer.data +
+                               pv.buffer.size - to_send_count,
+                               to_send_count);
+               ret = lttng_dynamic_buffer_set_size(
+                               &client->communication.outbound.payload.buffer,
+                               to_send_count);
+               if (ret) {
+                       goto error;
+               }
+
+               status = CLIENT_TRANSMISSION_STATUS_QUEUED;
+               goto end;
+       } else if (ret < 0) {
+               /* Generic error, disable the client's communication. */
+               ERR("Failed to flush outgoing queue, disconnecting client (socket fd = %i)",
+                               client->socket);
+               client->communication.active = false;
+               status = CLIENT_TRANSMISSION_STATUS_FAIL;
+               goto end;
+       } else {
+               /*
+                * No error and flushed the queue completely.
+                *
+                * The payload buffer size is used later to
+                * check if there is notifications queued. So albeit that the
+                * direct caller knows that the transmission is complete, we
+                * need to set the buffer size to zero.
+                */
+               ret = lttng_dynamic_buffer_set_size(
+                               &client->communication.outbound.payload.buffer, 0);
+               if (ret) {
+                       goto error;
+               }
+       }
+
+send_fds:
+       /* No fds to send, transmission is complete. */
+       if (fds_to_send_count == 0) {
+               status = CLIENT_TRANSMISSION_STATUS_COMPLETE;
+               goto end;
+       }
+
+       ret = lttcomm_send_payload_view_fds_unix_sock_non_block(
+                       client->socket, &pv);
+       if (ret < 0) {
+               /* Generic error, disable the client's communication. */
+               ERR("Failed to flush outgoing fds queue, disconnecting client (socket fd = %i)",
+                               client->socket);
+               client->communication.active = false;
+               status = CLIENT_TRANSMISSION_STATUS_FAIL;
+               goto end;
+       } else if (ret == 0) {
+               /* Nothing could be sent. */
+               status = CLIENT_TRANSMISSION_STATUS_QUEUED;
+       } else {
+               /* Fd passing is an all or nothing kind of thing. */
+               status = CLIENT_TRANSMISSION_STATUS_COMPLETE;
+               /*
+                * The payload _fd_array count is used later to
+                * check if there is notifications queued. So although the
+                * direct caller knows that the transmission is complete, we
+                * need to clear the _fd_array for the queuing check.
+                */
+               lttng_dynamic_pointer_array_clear(
+                               &client->communication.outbound.payload
+                                                ._fd_handles);
+       }
+
+end:
+       if (status == CLIENT_TRANSMISSION_STATUS_COMPLETE) {
+               client->communication.outbound.queued_command_reply = false;
+               client->communication.outbound.dropped_notification = false;
+               lttng_payload_clear(&client->communication.outbound.payload);
+       }
+
+       return status;
+error:
+       return CLIENT_TRANSMISSION_STATUS_ERROR;
+}
+
+static
+bool client_has_outbound_data_left(
+               const struct notification_client *client)
+{
+       const struct lttng_payload_view pv = lttng_payload_view_from_payload(
+                       &client->communication.outbound.payload, 0, -1);
+       const bool has_data = pv.buffer.size != 0;
+       const bool has_fds = lttng_payload_view_get_fd_handle_count(&pv);
+
+       return has_data || has_fds;
+}
+
+/* Client lock must _not_ be held by the caller. */
+static
+int client_send_command_reply(struct notification_client *client,
+               struct notification_thread_state *state,
+               enum lttng_notification_channel_status status)
+{
+       int ret;
+       struct lttng_notification_channel_command_reply reply = {
+               .status = (int8_t) status,
+       };
+       struct lttng_notification_channel_message msg = {
+               .type = (int8_t) LTTNG_NOTIFICATION_CHANNEL_MESSAGE_TYPE_COMMAND_REPLY,
+               .size = sizeof(reply),
+       };
+       char buffer[sizeof(msg) + sizeof(reply)];
+       enum client_transmission_status transmission_status;
+
+       memcpy(buffer, &msg, sizeof(msg));
+       memcpy(buffer + sizeof(msg), &reply, sizeof(reply));
+       DBG("Send command reply (%i)", (int) status);
+
+       pthread_mutex_lock(&client->lock);
+       if (client->communication.outbound.queued_command_reply) {
+               /* Protocol error. */
+               goto error_unlock;
+       }
+
+       /* Enqueue buffer to outgoing queue and flush it. */
+       ret = lttng_dynamic_buffer_append(
+                       &client->communication.outbound.payload.buffer,
+                       buffer, sizeof(buffer));
+       if (ret) {
+               goto error_unlock;
+       }
+
+       transmission_status = client_flush_outgoing_queue(client);
+
+       if (client_has_outbound_data_left(client)) {
+               /* Queue could not be emptied. */
+               client->communication.outbound.queued_command_reply = true;
+       }
+
+       pthread_mutex_unlock(&client->lock);
+       ret = client_handle_transmission_status(
+                       client, transmission_status, state);
+       if (ret) {
+               goto error;
+       }
+
+       return 0;
+error_unlock:
+       pthread_mutex_unlock(&client->lock);
+error:
+       return -1;
+}
+
+static
+int client_handle_message_unknown(struct notification_client *client,
+               struct notification_thread_state *state)
+{
+       int ret;
+       /*
+        * Receiving message header. The function will be called again
+        * once the rest of the message as been received and can be
+        * interpreted.
+        */
+       const struct lttng_notification_channel_message *msg;
+
+       LTTNG_ASSERT(sizeof(*msg) == client->communication.inbound.payload.buffer.size);
+       msg = (const struct lttng_notification_channel_message *)
+                             client->communication.inbound.payload.buffer.data;
+
+       if (msg->size == 0 ||
+                       msg->size > DEFAULT_MAX_NOTIFICATION_CLIENT_MESSAGE_PAYLOAD_SIZE) {
+               ERR("Invalid notification channel message: length = %u",
+                               msg->size);
+               ret = -1;
+               goto end;
+       }
+
+       switch (msg->type) {
+       case LTTNG_NOTIFICATION_CHANNEL_MESSAGE_TYPE_SUBSCRIBE:
+       case LTTNG_NOTIFICATION_CHANNEL_MESSAGE_TYPE_UNSUBSCRIBE:
+       case LTTNG_NOTIFICATION_CHANNEL_MESSAGE_TYPE_HANDSHAKE:
+               break;
+       default:
+               ret = -1;
+               ERR("Invalid notification channel message: unexpected message type");
+               goto end;
+       }
+
+       client->communication.inbound.bytes_to_receive = msg->size;
+       client->communication.inbound.fds_to_receive = msg->fds;
+       client->communication.inbound.msg_type =
+                       (enum lttng_notification_channel_message_type) msg->type;
+       ret = lttng_dynamic_buffer_set_size(
+                       &client->communication.inbound.payload.buffer, msg->size);
+
+       /* msg is not valid anymore due to lttng_dynamic_buffer_set_size. */
+       msg = NULL;
+end:
+       return ret;
+}
+
+static
+int client_handle_message_handshake(struct notification_client *client,
+               struct notification_thread_state *state)
+{
+       int ret;
+       struct lttng_notification_channel_command_handshake *handshake_client;
+       const struct lttng_notification_channel_command_handshake handshake_reply = {
+                       .major = LTTNG_NOTIFICATION_CHANNEL_VERSION_MAJOR,
+                       .minor = LTTNG_NOTIFICATION_CHANNEL_VERSION_MINOR,
+       };
+       const struct lttng_notification_channel_message msg_header = {
+                       .type = LTTNG_NOTIFICATION_CHANNEL_MESSAGE_TYPE_HANDSHAKE,
+                       .size = sizeof(handshake_reply),
+       };
+       enum lttng_notification_channel_status status =
+                       LTTNG_NOTIFICATION_CHANNEL_STATUS_OK;
+       char send_buffer[sizeof(msg_header) + sizeof(handshake_reply)];
+
+       memcpy(send_buffer, &msg_header, sizeof(msg_header));
+       memcpy(send_buffer + sizeof(msg_header), &handshake_reply,
+                       sizeof(handshake_reply));
+
+       handshake_client =
+                       (struct lttng_notification_channel_command_handshake *)
+                                       client->communication.inbound.payload.buffer
+                                                       .data;
+       client->major = handshake_client->major;
+       client->minor = handshake_client->minor;
+       if (!client->communication.inbound.creds_received) {
+               ERR("No credentials received from client");
+               ret = -1;
+               goto end;
+       }
+
+       client->uid = LTTNG_SOCK_GET_UID_CRED(
+                       &client->communication.inbound.creds);
+       client->gid = LTTNG_SOCK_GET_GID_CRED(
+                       &client->communication.inbound.creds);
+       client->is_sessiond = LTTNG_SOCK_GET_PID_CRED(&client->communication.inbound.creds) == getpid();
+       DBG("Received handshake from client: uid = %u, gid = %u, protocol version = %i.%i, client is sessiond = %s",
+                       client->uid, client->gid, (int) client->major,
+                       (int) client->minor,
+                       client->is_sessiond ? "true" : "false");
+
+       if (handshake_client->major !=
+                       LTTNG_NOTIFICATION_CHANNEL_VERSION_MAJOR) {
+               status = LTTNG_NOTIFICATION_CHANNEL_STATUS_UNSUPPORTED_VERSION;
+       }
+
+       pthread_mutex_lock(&client->lock);
+       /* Outgoing queue will be flushed when the command reply is sent. */
+       ret = lttng_dynamic_buffer_append(
+                       &client->communication.outbound.payload.buffer, send_buffer,
+                       sizeof(send_buffer));
+       if (ret) {
+               ERR("Failed to send protocol version to notification channel client");
+               goto end_unlock;
+       }
+
+       client->validated = true;
+       client->communication.active = true;
+       pthread_mutex_unlock(&client->lock);
+
+       /* Set reception state to receive the next message header. */
+       ret = client_reset_inbound_state(client);
+       if (ret) {
+               ERR("Failed to reset client communication's inbound state");
+               goto end;
+       }
+
+       /* Flushes the outgoing queue. */
+       ret = client_send_command_reply(client, state, status);
+       if (ret) {
+               ERR("Failed to send reply to notification channel client");
+               goto end;
+       }
+
+       goto end;
+end_unlock:
+       pthread_mutex_unlock(&client->lock);
+end:
+       return ret;
+}
+
+static
+int client_handle_message_subscription(
+               struct notification_client *client,
+               enum lttng_notification_channel_message_type msg_type,
+               struct notification_thread_state *state)
+{
+       int ret;
+       struct lttng_condition *condition;
+       enum lttng_notification_channel_status status =
+                       LTTNG_NOTIFICATION_CHANNEL_STATUS_OK;
+       struct lttng_payload_view condition_view =
+                       lttng_payload_view_from_payload(
+                                       &client->communication.inbound.payload,
+                                       0, -1);
+       size_t expected_condition_size;
+
+       /*
+        * No need to lock client to sample the inbound state as the only
+        * other thread accessing clients (action executor) only uses the
+        * outbound state.
+        */
+       expected_condition_size = client->communication.inbound.payload.buffer.size;
+       ret = lttng_condition_create_from_payload(&condition_view, &condition);
+       if (ret != expected_condition_size) {
+               ERR("Malformed condition received from client");
+               goto end;
+       }
+
+       /* Ownership of condition is always transferred. */
+       if (msg_type == LTTNG_NOTIFICATION_CHANNEL_MESSAGE_TYPE_SUBSCRIBE) {
+               ret = notification_thread_client_subscribe(
+                               client, condition, state, &status);
+       } else {
+               ret = notification_thread_client_unsubscribe(
+                               client, condition, state, &status);
+       }
+
+       if (ret) {
+               goto end;
+       }
+
+       /* Set reception state to receive the next message header. */
+       ret = client_reset_inbound_state(client);
+       if (ret) {
+               ERR("Failed to reset client communication's inbound state");
+               goto end;
+       }
+
+       ret = client_send_command_reply(client, state, status);
+       if (ret) {
+               ERR("Failed to send reply to notification channel client");
+               goto end;
+       }
+
+end:
+       return ret;
+}
+
+static
+int client_dispatch_message(struct notification_client *client,
+               struct notification_thread_state *state)
+{
+       int ret = 0;
+
+       if (client->communication.inbound.msg_type !=
+                       LTTNG_NOTIFICATION_CHANNEL_MESSAGE_TYPE_HANDSHAKE &&
+                       client->communication.inbound.msg_type !=
+                               LTTNG_NOTIFICATION_CHANNEL_MESSAGE_TYPE_UNKNOWN &&
+                       !client->validated) {
+               WARN("client attempted a command before handshake");
+               ret = -1;
+               goto end;
+       }
+
+       switch (client->communication.inbound.msg_type) {
+       case LTTNG_NOTIFICATION_CHANNEL_MESSAGE_TYPE_UNKNOWN:
+       {
+               ret = client_handle_message_unknown(client, state);
+               break;
+       }
+       case LTTNG_NOTIFICATION_CHANNEL_MESSAGE_TYPE_HANDSHAKE:
+       {
+               ret = client_handle_message_handshake(client, state);
+               break;
+       }
+       case LTTNG_NOTIFICATION_CHANNEL_MESSAGE_TYPE_SUBSCRIBE:
+       case LTTNG_NOTIFICATION_CHANNEL_MESSAGE_TYPE_UNSUBSCRIBE:
+       {
+               ret = client_handle_message_subscription(client,
+                               client->communication.inbound.msg_type, state);
+               break;
+       }
+       default:
+               abort();
+       }
+end:
+       return ret;
+}
+
+/* Incoming data from client. */
+int handle_notification_thread_client_in(
+               struct notification_thread_state *state, int socket)
+{
+       int ret = 0;
+       struct notification_client *client;
+       ssize_t recv_ret;
+       size_t offset;
+
+       rcu_read_lock();
+       client = get_client_from_socket(socket, state);
+       if (!client) {
+               /* Internal error, abort. */
+               ret = -1;
+               goto end;
+       }
+
+       if (client->communication.inbound.bytes_to_receive == 0 &&
+                       client->communication.inbound.fds_to_receive != 0) {
+               /* Only FDs left to receive. */
+               goto receive_fds;
+       }
+
+       offset = client->communication.inbound.payload.buffer.size -
+                       client->communication.inbound.bytes_to_receive;
+       if (client->communication.inbound.expect_creds) {
+               recv_ret = lttcomm_recv_creds_unix_sock(socket,
+                               client->communication.inbound.payload.buffer.data + offset,
+                               client->communication.inbound.bytes_to_receive,
+                               &client->communication.inbound.creds);
+               if (recv_ret > 0) {
+                       client->communication.inbound.expect_creds = false;
+                       client->communication.inbound.creds_received = true;
+               }
+       } else {
+               recv_ret = lttcomm_recv_unix_sock_non_block(socket,
+                               client->communication.inbound.payload.buffer.data + offset,
+                               client->communication.inbound.bytes_to_receive);
+       }
+       if (recv_ret >= 0) {
+               client->communication.inbound.bytes_to_receive -= recv_ret;
+       } else {
+               goto error_disconnect_client;
+       }
+
+       if (client->communication.inbound.bytes_to_receive != 0) {
+               /* Message incomplete wait for more data. */
+               ret = 0;
+               goto end;
+       }
+
+receive_fds:
+       LTTNG_ASSERT(client->communication.inbound.bytes_to_receive == 0);
+
+       /* Receive fds. */
+       if (client->communication.inbound.fds_to_receive != 0) {
+               ret = lttcomm_recv_payload_fds_unix_sock_non_block(
+                               client->socket,
+                               client->communication.inbound.fds_to_receive,
+                               &client->communication.inbound.payload);
+               if (ret > 0) {
+                       /*
+                        * Fds received. non blocking fds passing is all
+                        * or nothing.
+                        */
+                       ssize_t expected_size;
+
+                       expected_size = sizeof(int) *
+                                       client->communication.inbound
+                                                       .fds_to_receive;
+                       LTTNG_ASSERT(ret == expected_size);
+                       client->communication.inbound.fds_to_receive = 0;
+               } else if (ret == 0) {
+                       /* Received nothing. */
+                       ret = 0;
+                       goto end;
+               } else {
+                       goto error_disconnect_client;
+               }
+       }
+
+       /* At this point the message is complete.*/
+       LTTNG_ASSERT(client->communication.inbound.bytes_to_receive == 0 &&
+                       client->communication.inbound.fds_to_receive == 0);
+       ret = client_dispatch_message(client, state);
+       if (ret) {
+               /*
+                * Only returns an error if this client must be
+                * disconnected.
+                */
+               goto error_disconnect_client;
+       }
+
+end:
+       rcu_read_unlock();
+       return ret;
+
+error_disconnect_client:
+       ret = notification_thread_client_disconnect(client, state);
+       goto end;
+}
+
+/* Client ready to receive outgoing data. */
+int handle_notification_thread_client_out(
+               struct notification_thread_state *state, int socket)
+{
+       int ret;
+       struct notification_client *client;
+       enum client_transmission_status transmission_status;
+
+       rcu_read_lock();
+       client = get_client_from_socket(socket, state);
+       if (!client) {
+               /* Internal error, abort. */
+               ret = -1;
+               goto end;
+       }
+
+       pthread_mutex_lock(&client->lock);
+       transmission_status = client_flush_outgoing_queue(client);
+       pthread_mutex_unlock(&client->lock);
+
+       ret = client_handle_transmission_status(
+                       client, transmission_status, state);
+       if (ret) {
+               goto end;
+       }
+end:
+       rcu_read_unlock();
+       return ret;
+}
+
+static
+bool evaluate_buffer_usage_condition(const struct lttng_condition *condition,
+               const struct channel_state_sample *sample,
+               uint64_t buffer_capacity)
+{
+       bool result = false;
+       uint64_t threshold;
+       enum lttng_condition_type condition_type;
+       const struct lttng_condition_buffer_usage *use_condition = container_of(
+                       condition, struct lttng_condition_buffer_usage,
+                       parent);
+
+       if (use_condition->threshold_bytes.set) {
+               threshold = use_condition->threshold_bytes.value;
+       } else {
+               /*
+                * Threshold was expressed as a ratio.
+                *
+                * TODO the threshold (in bytes) of conditions expressed
+                * as a ratio of total buffer size could be cached to
+                * forego this double-multiplication or it could be performed
+                * as fixed-point math.
+                *
+                * Note that caching should accommodates the case where the
+                * condition applies to multiple channels (i.e. don't assume
+                * that all channels matching my_chann* have the same size...)
+                */
+               threshold = (uint64_t) (use_condition->threshold_ratio.value *
+                               (double) buffer_capacity);
+       }
+
+       condition_type = lttng_condition_get_type(condition);
+       if (condition_type == LTTNG_CONDITION_TYPE_BUFFER_USAGE_LOW) {
+               DBG("Low buffer usage condition being evaluated: threshold = %" PRIu64 ", highest usage = %" PRIu64,
+                               threshold, sample->highest_usage);
+
+               /*
+                * The low condition should only be triggered once _all_ of the
+                * streams in a channel have gone below the "low" threshold.
+                */
+               if (sample->highest_usage <= threshold) {
+                       result = true;
+               }
+       } else {
+               DBG("High buffer usage condition being evaluated: threshold = %" PRIu64 ", highest usage = %" PRIu64,
+                               threshold, sample->highest_usage);
+
+               /*
+                * For high buffer usage scenarios, we want to trigger whenever
+                * _any_ of the streams has reached the "high" threshold.
+                */
+               if (sample->highest_usage >= threshold) {
+                       result = true;
+               }
+       }
+
+       return result;
+}
+
+static
+bool evaluate_session_consumed_size_condition(
+               const struct lttng_condition *condition,
+               uint64_t session_consumed_size)
+{
+       uint64_t threshold;
+       const struct lttng_condition_session_consumed_size *size_condition =
+                       container_of(condition,
+                               struct lttng_condition_session_consumed_size,
+                               parent);
+
+       threshold = size_condition->consumed_threshold_bytes.value;
+       DBG("Session consumed size condition being evaluated: threshold = %" PRIu64 ", current size = %" PRIu64,
+                       threshold, session_consumed_size);
+       return session_consumed_size >= threshold;
+}
+
+static
+int evaluate_buffer_condition(const struct lttng_condition *condition,
+               struct lttng_evaluation **evaluation,
+               const struct notification_thread_state *state,
+               const struct channel_state_sample *previous_sample,
+               const struct channel_state_sample *latest_sample,
+               uint64_t previous_session_consumed_total,
+               uint64_t latest_session_consumed_total,
+               struct channel_info *channel_info)
+{
+       int ret = 0;
+       enum lttng_condition_type condition_type;
+       const bool previous_sample_available = !!previous_sample;
+       bool previous_sample_result = false;
+       bool latest_sample_result;
+
+       condition_type = lttng_condition_get_type(condition);
+
+       switch (condition_type) {
+       case LTTNG_CONDITION_TYPE_BUFFER_USAGE_LOW:
+       case LTTNG_CONDITION_TYPE_BUFFER_USAGE_HIGH:
+               if (caa_likely(previous_sample_available)) {
+                       previous_sample_result =
+                               evaluate_buffer_usage_condition(condition,
+                                       previous_sample, channel_info->capacity);
+               }
+               latest_sample_result = evaluate_buffer_usage_condition(
+                               condition, latest_sample,
+                               channel_info->capacity);
+               break;
+       case LTTNG_CONDITION_TYPE_SESSION_CONSUMED_SIZE:
+               if (caa_likely(previous_sample_available)) {
+                       previous_sample_result =
+                               evaluate_session_consumed_size_condition(
+                                       condition,
+                                       previous_session_consumed_total);
+               }
+               latest_sample_result =
+                               evaluate_session_consumed_size_condition(
+                                       condition,
+                                       latest_session_consumed_total);
+               break;
+       default:
+               /* Unknown condition type; internal error. */
+               abort();
+       }
+
+       if (!latest_sample_result ||
+                       (previous_sample_result == latest_sample_result)) {
+               /*
+                * Only trigger on a condition evaluation transition.
+                *
+                * NOTE: This edge-triggered logic may not be appropriate for
+                * future condition types.
+                */
+               goto end;
+       }
+
+       if (!evaluation || !latest_sample_result) {
+               goto end;
+       }
+
+       switch (condition_type) {
+       case LTTNG_CONDITION_TYPE_BUFFER_USAGE_LOW:
+       case LTTNG_CONDITION_TYPE_BUFFER_USAGE_HIGH:
+               *evaluation = lttng_evaluation_buffer_usage_create(
+                               condition_type,
+                               latest_sample->highest_usage,
+                               channel_info->capacity);
+               break;
+       case LTTNG_CONDITION_TYPE_SESSION_CONSUMED_SIZE:
+               *evaluation = lttng_evaluation_session_consumed_size_create(
+                               latest_session_consumed_total);
+               break;
+       default:
+               abort();
+       }
+
+       if (!*evaluation) {
+               ret = -1;
+               goto end;
+       }
+end:
+       return ret;
+}
+
+static
+int client_notification_overflow(struct notification_client *client)
+{
+       int ret = 0;
+       const struct lttng_notification_channel_message msg = {
+               .type = (int8_t) LTTNG_NOTIFICATION_CHANNEL_MESSAGE_TYPE_NOTIFICATION_DROPPED,
+       };
+
+       ASSERT_LOCKED(client->lock);
+
+       DBG("Dropping notification addressed to client (socket fd = %i)",
+                       client->socket);
+       if (client->communication.outbound.dropped_notification) {
+               /*
+                * The client already has a "notification dropped" message
+                * in its outgoing queue. Nothing to do since all
+                * of those messages are coalesced.
+                */
+               goto end;
+       }
+
+       client->communication.outbound.dropped_notification = true;
+       ret = lttng_dynamic_buffer_append(
+                       &client->communication.outbound.payload.buffer, &msg,
+                       sizeof(msg));
+       if (ret) {
+               PERROR("Failed to enqueue \"dropped notification\" message in client's (socket fd = %i) outgoing queue",
+                               client->socket);
+       }
+end:
+       return ret;
+}
+
+static int client_handle_transmission_status_wrapper(
+               struct notification_client *client,
+               enum client_transmission_status status,
+               void *user_data)
+{
+       return client_handle_transmission_status(client, status,
+                       (struct notification_thread_state *) user_data);
+}
+
+static
+int send_evaluation_to_clients(const struct lttng_trigger *trigger,
+               const struct lttng_evaluation *evaluation,
+               struct notification_client_list* client_list,
+               struct notification_thread_state *state,
+               uid_t object_uid, gid_t object_gid)
+{
+       const struct lttng_credentials creds = {
+               .uid = LTTNG_OPTIONAL_INIT_VALUE(object_uid),
+               .gid = LTTNG_OPTIONAL_INIT_VALUE(object_gid),
+       };
+
+       return notification_client_list_send_evaluation(client_list,
+                       trigger, evaluation,
+                       &creds,
+                       client_handle_transmission_status_wrapper, state);
+}
+
+/*
+ * Permission checks relative to notification channel clients are performed
+ * here. Notice how object, client, and trigger credentials are involved in
+ * this check.
+ *
+ * The `object` credentials are the credentials associated with the "subject"
+ * of a condition. For instance, a `rotation completed` condition applies
+ * to a session. When that condition is met, it will produce an evaluation
+ * against a session. Hence, in this case, the `object` credentials are the
+ * credentials of the "subject" session.
+ *
+ * The `trigger` credentials are the credentials of the user that registered the
+ * trigger.
+ *
+ * The `client` credentials are the credentials of the user that created a given
+ * notification channel.
+ *
+ * In terms of visibility, it is expected that non-privilieged users can only
+ * register triggers against "their" objects (their own sessions and
+ * applications they are allowed to interact with). They can then open a
+ * notification channel and subscribe to notifications associated with those
+ * triggers.
+ *
+ * As for privilieged users, they can register triggers against the objects of
+ * other users. They can then subscribe to the notifications associated to their
+ * triggers. Privilieged users _can't_ subscribe to the notifications of
+ * triggers owned by other users; they must create their own triggers.
+ *
+ * This is more a concern of usability than security. It would be difficult for
+ * a root user reliably subscribe to a specific set of conditions without
+ * interference from external users (those could, for instance, unregister
+ * their triggers).
+ */
+int notification_client_list_send_evaluation(
+               struct notification_client_list *client_list,
+               const struct lttng_trigger *trigger,
+               const struct lttng_evaluation *evaluation,
+               const struct lttng_credentials *source_object_creds,
+               report_client_transmission_result_cb client_report,
+               void *user_data)
+{
+       int ret = 0;
+       struct lttng_payload msg_payload;
+       struct notification_client_list_element *client_list_element, *tmp;
+       const struct lttng_notification notification = {
+               .trigger = (struct lttng_trigger *) trigger,
+               .evaluation = (struct lttng_evaluation *) evaluation,
+       };
+       struct lttng_notification_channel_message msg_header = {
+               .type = (int8_t) LTTNG_NOTIFICATION_CHANNEL_MESSAGE_TYPE_NOTIFICATION,
+       };
+       const struct lttng_credentials *trigger_creds =
+                       lttng_trigger_get_credentials(trigger);
+
+       lttng_payload_init(&msg_payload);
+
+       ret = lttng_dynamic_buffer_append(&msg_payload.buffer, &msg_header,
+                       sizeof(msg_header));
+       if (ret) {
+               goto end;
+       }
+
+       ret = lttng_notification_serialize(&notification, &msg_payload);
+       if (ret) {
+               ERR("Failed to serialize notification");
+               ret = -1;
+               goto end;
+       }
+
+       /* Update payload size. */
+       ((struct lttng_notification_channel_message *) msg_payload.buffer.data)
+                       ->size = (uint32_t)(
+                       msg_payload.buffer.size - sizeof(msg_header));
+
+       /* Update the payload number of fds. */
+       {
+               const struct lttng_payload_view pv = lttng_payload_view_from_payload(
+                               &msg_payload, 0, -1);
+
+               ((struct lttng_notification_channel_message *)
+                               msg_payload.buffer.data)->fds = (uint32_t)
+                               lttng_payload_view_get_fd_handle_count(&pv);
+       }
+
+       pthread_mutex_lock(&client_list->lock);
+       cds_list_for_each_entry_safe(client_list_element, tmp,
+                       &client_list->clients_list, node) {
+               enum client_transmission_status transmission_status;
+               struct notification_client *client =
+                               client_list_element->client;
+
+               ret = 0;
+               pthread_mutex_lock(&client->lock);
+               if (!client->communication.active) {
+                       /*
+                        * Skip inactive client (protocol error or
+                        * disconnecting).
+                        */
+                       DBG("Skipping client at it is marked as inactive");
+                       goto skip_client;
+               }
+
+               if (lttng_trigger_is_hidden(trigger) && !client->is_sessiond) {
+                       /*
+                        * Notifications resulting from an hidden trigger are
+                        * only sent to the session daemon.
+                        */
+                       DBG("Skipping client as the trigger is hidden and the client is not the session daemon");
+                       goto skip_client;
+               }
+
+               if (source_object_creds) {
+                       if (client->uid != lttng_credentials_get_uid(source_object_creds) &&
+                                       client->gid != lttng_credentials_get_gid(source_object_creds) &&
+                                       client->uid != 0) {
+                               /*
+                                * Client is not allowed to monitor this
+                                * object.
+                                */
+                               DBG("Skipping client at it does not have the object permission to receive notification for this trigger");
+                               goto skip_client;
+                       }
+               }
+
+               if (client->uid != lttng_credentials_get_uid(trigger_creds)) {
+                       DBG("Skipping client at it does not have the permission to receive notification for this trigger");
+                       goto skip_client;
+               }
+
+               DBG("Sending notification to client (fd = %i, %zu bytes)",
+                               client->socket, msg_payload.buffer.size);
+
+               if (client_has_outbound_data_left(client)) {
+                       /*
+                        * Outgoing data is already buffered for this client;
+                        * drop the notification and enqueue a "dropped
+                        * notification" message if this is the first dropped
+                        * notification since the socket spilled-over to the
+                        * queue.
+                        */
+                       ret = client_notification_overflow(client);
+                       if (ret) {
+                               /* Fatal error. */
+                               goto skip_client;
+                       }
+               }
+
+               ret = lttng_payload_copy(&msg_payload, &client->communication.outbound.payload);
+               if (ret) {
+                       /* Fatal error. */
+                       goto skip_client;
+               }
+
+               transmission_status = client_flush_outgoing_queue(client);
+               pthread_mutex_unlock(&client->lock);
+               ret = client_report(client, transmission_status, user_data);
+               if (ret) {
+                       /* Fatal error. */
+                       goto end_unlock_list;
+               }
+
+               continue;
+
+skip_client:
+               pthread_mutex_unlock(&client->lock);
+               if (ret) {
+                       /* Fatal error. */
+                       goto end_unlock_list;
+               }
+       }
+       ret = 0;
+
+end_unlock_list:
+       pthread_mutex_unlock(&client_list->lock);
+end:
+       lttng_payload_reset(&msg_payload);
+       return ret;
+}
+
+static
+struct lttng_event_notifier_notification *recv_one_event_notifier_notification(
+               int notification_pipe_read_fd, enum lttng_domain_type domain)
+{
+       int ret;
+       uint64_t token;
+       struct lttng_event_notifier_notification *notification = NULL;
+       char *capture_buffer = NULL;
+       size_t capture_buffer_size;
+       void *reception_buffer;
+       size_t reception_size;
+
+       struct lttng_ust_abi_event_notifier_notification ust_notification;
+       struct lttng_kernel_abi_event_notifier_notification kernel_notification;
+
+       /* Init lttng_event_notifier_notification */
+       switch(domain) {
+       case LTTNG_DOMAIN_UST:
+               reception_buffer = (void *) &ust_notification;
+               reception_size = sizeof(ust_notification);
+               break;
+       case LTTNG_DOMAIN_KERNEL:
+               reception_buffer = (void *) &kernel_notification;
+               reception_size = sizeof(kernel_notification);
+               break;
+       default:
+               abort();
+       }
+
+       /*
+        * The monitoring pipe only holds messages smaller than PIPE_BUF,
+        * ensuring that read/write of tracer notifications are atomic.
+        */
+       ret = lttng_read(notification_pipe_read_fd, reception_buffer,
+                       reception_size);
+       if (ret != reception_size) {
+               PERROR("Failed to read from event source notification pipe: fd = %d, size to read = %zu, ret = %d",
+                               notification_pipe_read_fd, reception_size, ret);
+               ret = -1;
+               goto end;
+       }
+
+       switch(domain) {
+       case LTTNG_DOMAIN_UST:
+               token = ust_notification.token;
+               capture_buffer_size = ust_notification.capture_buf_size;
+               break;
+       case LTTNG_DOMAIN_KERNEL:
+               token = kernel_notification.token;
+               capture_buffer_size = kernel_notification.capture_buf_size;
+               break;
+       default:
+               abort();
+       }
+
+       if (capture_buffer_size == 0) {
+               capture_buffer = NULL;
+               goto skip_capture;
+       }
+
+       if (capture_buffer_size > MAX_CAPTURE_SIZE) {
+               ERR("Event notifier has a capture payload size which exceeds the maximum allowed size: capture_payload_size = %zu bytes, max allowed size = %d bytes",
+                               capture_buffer_size, MAX_CAPTURE_SIZE);
+               goto end;
+       }
+
+       capture_buffer = (char *) zmalloc(capture_buffer_size);
+       if (!capture_buffer) {
+               ERR("Failed to allocate capture buffer");
+               goto end;
+       }
+
+       /* Fetch additional payload (capture). */
+       ret = lttng_read(notification_pipe_read_fd, capture_buffer, capture_buffer_size);
+       if (ret != capture_buffer_size) {
+               ERR("Failed to read from event source pipe (fd = %i)",
+                               notification_pipe_read_fd);
+               goto end;
+       }
+
+skip_capture:
+       notification = lttng_event_notifier_notification_create(token, domain,
+                       capture_buffer, capture_buffer_size);
+       if (notification == NULL) {
+               goto end;
+       }
+
+       /*
+        * Ownership transfered to the lttng_event_notifier_notification object.
+        */
+       capture_buffer = NULL;
+
+end:
+       free(capture_buffer);
+       return notification;
+}
+
+static
+int dispatch_one_event_notifier_notification(struct notification_thread_state *state,
+               struct lttng_event_notifier_notification *notification)
+{
+       struct cds_lfht_node *node;
+       struct cds_lfht_iter iter;
+       struct notification_trigger_tokens_ht_element *element;
+       struct lttng_evaluation *evaluation = NULL;
+       enum action_executor_status executor_status;
+       struct notification_client_list *client_list = NULL;
+       int ret;
+       unsigned int capture_count = 0;
+
+       /* Find triggers associated with this token. */
+       rcu_read_lock();
+       cds_lfht_lookup(state->trigger_tokens_ht,
+                       hash_key_u64(&notification->tracer_token, lttng_ht_seed),
+                       match_trigger_token, &notification->tracer_token, &iter);
+       node = cds_lfht_iter_get_node(&iter);
+       if (caa_unlikely(!node)) {
+               /*
+                * This is not an error, slow consumption of the tracer
+                * notifications can lead to situations where a trigger is
+                * removed but we still get tracer notifications matching a
+                * trigger that no longer exists.
+                */
+               ret = 0;
+               goto end_unlock;
+       }
+
+       element = caa_container_of(node,
+                       struct notification_trigger_tokens_ht_element,
+                       node);
+
+       if (lttng_condition_event_rule_matches_get_capture_descriptor_count(
+                           lttng_trigger_get_const_condition(element->trigger),
+                           &capture_count) != LTTNG_CONDITION_STATUS_OK) {
+               ERR("Failed to get capture count");
+               ret = -1;
+               goto end;
+       }
+
+       if (!notification->capture_buffer && capture_count != 0) {
+               ERR("Expected capture but capture buffer is null");
+               ret = -1;
+               goto end;
+       }
+
+       evaluation = lttng_evaluation_event_rule_matches_create(
+                       container_of(lttng_trigger_get_const_condition(
+                                                    element->trigger),
+                                       struct lttng_condition_event_rule_matches,
+                                       parent),
+                       notification->capture_buffer,
+                       notification->capture_buf_size, false);
+
+       if (evaluation == NULL) {
+               ERR("Failed to create event rule matches evaluation while creating and enqueuing action executor job");
+               ret = -1;
+               goto end_unlock;
+       }
+
+       client_list = get_client_list_from_condition(state,
+                       lttng_trigger_get_const_condition(element->trigger));
+       executor_status = action_executor_enqueue_trigger(state->executor,
+                       element->trigger, evaluation, NULL, client_list);
+       switch (executor_status) {
+       case ACTION_EXECUTOR_STATUS_OK:
+               ret = 0;
+               break;
+       case ACTION_EXECUTOR_STATUS_OVERFLOW:
+       {
+               struct notification_client_list_element *client_list_element,
+                               *tmp;
+
+               /*
+                * Not a fatal error; this is expected and simply means the
+                * executor has too much work queued already.
+                */
+               ret = 0;
+
+               /* No clients subscribed to notifications for this trigger. */
+               if (!client_list) {
+                       break;
+               }
+
+               /* Warn clients that a notification (or more) was dropped. */
+               pthread_mutex_lock(&client_list->lock);
+               cds_list_for_each_entry_safe(client_list_element, tmp,
+                               &client_list->clients_list, node) {
+                       enum client_transmission_status transmission_status;
+                       struct notification_client *client =
+                                       client_list_element->client;
+
+                       pthread_mutex_lock(&client->lock);
+                       ret = client_notification_overflow(client);
+                       if (ret) {
+                               /* Fatal error. */
+                               goto next_client;
+                       }
+
+                       transmission_status =
+                                       client_flush_outgoing_queue(client);
+                       ret = client_handle_transmission_status(
+                                       client, transmission_status, state);
+                       if (ret) {
+                               /* Fatal error. */
+                               goto next_client;
+                       }
+next_client:
+                       pthread_mutex_unlock(&client->lock);
+                       if (ret) {
+                               break;
+                       }
+               }
+
+               pthread_mutex_unlock(&client_list->lock);
+               break;
+       }
+       case ACTION_EXECUTOR_STATUS_INVALID:
+       case ACTION_EXECUTOR_STATUS_ERROR:
+               /* Fatal error, shut down everything. */
+               ERR("Fatal error encoutered while enqueuing action to the action executor");
+               ret = -1;
+               goto end_unlock;
+       default:
+               /* Unhandled error. */
+               abort();
+       }
+
+end_unlock:
+       notification_client_list_put(client_list);
+       rcu_read_unlock();
+end:
+       return ret;
+}
+
+static
+int handle_one_event_notifier_notification(
+               struct notification_thread_state *state,
+               int pipe, enum lttng_domain_type domain)
+{
+       int ret = 0;
+       struct lttng_event_notifier_notification *notification = NULL;
+
+       notification = recv_one_event_notifier_notification(pipe, domain);
+       if (notification == NULL) {
+               /* Reception failed, don't consider it fatal. */
+               ERR("Error receiving an event notifier notification from tracer: fd = %i, domain = %s",
+                               pipe, lttng_domain_type_str(domain));
+               goto end;
+       }
+
+       ret = dispatch_one_event_notifier_notification(state, notification);
+       if (ret) {
+               ERR("Error dispatching an event notifier notification from tracer: fd = %i, domain = %s",
+                               pipe, lttng_domain_type_str(domain));
+               goto end;
+       }
+
+end:
+       lttng_event_notifier_notification_destroy(notification);
+       return ret;
+}
+
+int handle_notification_thread_event_notification(struct notification_thread_state *state,
+               int pipe, enum lttng_domain_type domain)
+{
+       return handle_one_event_notifier_notification(state, pipe, domain);
+}
+
+int handle_notification_thread_channel_sample(
+               struct notification_thread_state *state, int pipe,
+               enum lttng_domain_type domain)
+{
+       int ret = 0;
+       struct lttcomm_consumer_channel_monitor_msg sample_msg;
+       struct channel_info *channel_info;
+       struct cds_lfht_node *node;
+       struct cds_lfht_iter iter;
+       struct lttng_channel_trigger_list *trigger_list;
+       struct lttng_trigger_list_element *trigger_list_element;
+       bool previous_sample_available = false;
+       struct channel_state_sample previous_sample, latest_sample;
+       uint64_t previous_session_consumed_total, latest_session_consumed_total;
+       struct lttng_credentials channel_creds;
+
+       /*
+        * The monitoring pipe only holds messages smaller than PIPE_BUF,
+        * ensuring that read/write of sampling messages are atomic.
+        */
+       ret = lttng_read(pipe, &sample_msg, sizeof(sample_msg));
+       if (ret != sizeof(sample_msg)) {
+               ERR("Failed to read from monitoring pipe (fd = %i)",
+                               pipe);
+               ret = -1;
+               goto end;
+       }
+
+       ret = 0;
+       latest_sample.key.key = sample_msg.key;
+       latest_sample.key.domain = domain;
+       latest_sample.highest_usage = sample_msg.highest;
+       latest_sample.lowest_usage = sample_msg.lowest;
+       latest_sample.channel_total_consumed = sample_msg.total_consumed;
+
+       rcu_read_lock();
+
+       /* Retrieve the channel's informations */
+       cds_lfht_lookup(state->channels_ht,
+                       hash_channel_key(&latest_sample.key),
+                       match_channel_info,
+                       &latest_sample.key,
+                       &iter);
+       node = cds_lfht_iter_get_node(&iter);
+       if (caa_unlikely(!node)) {
+               /*
+                * Not an error since the consumer can push a sample to the pipe
+                * and the rest of the session daemon could notify us of the
+                * channel's destruction before we get a chance to process that
+                * sample.
+                */
+               DBG("Received a sample for an unknown channel from consumerd, key = %" PRIu64 " in %s domain",
+                               latest_sample.key.key,
+                               lttng_domain_type_str(domain));
+               goto end_unlock;
+       }
+       channel_info = caa_container_of(node, struct channel_info,
+                       channels_ht_node);
+       DBG("Handling channel sample for channel %s (key = %" PRIu64 ") in session %s (highest usage = %" PRIu64 ", lowest usage = %" PRIu64", total consumed = %" PRIu64")",
+                       channel_info->name,
+                       latest_sample.key.key,
+                       channel_info->session_info->name,
+                       latest_sample.highest_usage,
+                       latest_sample.lowest_usage,
+                       latest_sample.channel_total_consumed);
+
+       previous_session_consumed_total =
+                       channel_info->session_info->consumed_data_size;
+
+       /* Retrieve the channel's last sample, if it exists, and update it. */
+       cds_lfht_lookup(state->channel_state_ht,
+                       hash_channel_key(&latest_sample.key),
+                       match_channel_state_sample,
+                       &latest_sample.key,
+                       &iter);
+       node = cds_lfht_iter_get_node(&iter);
+       if (caa_likely(node)) {
+               struct channel_state_sample *stored_sample;
+
+               /* Update the sample stored. */
+               stored_sample = caa_container_of(node,
+                               struct channel_state_sample,
+                               channel_state_ht_node);
+
+               memcpy(&previous_sample, stored_sample,
+                               sizeof(previous_sample));
+               stored_sample->highest_usage = latest_sample.highest_usage;
+               stored_sample->lowest_usage = latest_sample.lowest_usage;
+               stored_sample->channel_total_consumed = latest_sample.channel_total_consumed;
+               previous_sample_available = true;
+
+               latest_session_consumed_total =
+                               previous_session_consumed_total +
+                               (latest_sample.channel_total_consumed - previous_sample.channel_total_consumed);
+       } else {
+               /*
+                * This is the channel's first sample, allocate space for and
+                * store the new sample.
+                */
+               struct channel_state_sample *stored_sample;
+
+               stored_sample = (channel_state_sample *) zmalloc(sizeof(*stored_sample));
+               if (!stored_sample) {
+                       ret = -1;
+                       goto end_unlock;
+               }
+
+               memcpy(stored_sample, &latest_sample, sizeof(*stored_sample));
+               cds_lfht_node_init(&stored_sample->channel_state_ht_node);
+               cds_lfht_add(state->channel_state_ht,
+                               hash_channel_key(&stored_sample->key),
+                               &stored_sample->channel_state_ht_node);
+
+               latest_session_consumed_total =
+                               previous_session_consumed_total +
+                               latest_sample.channel_total_consumed;
+       }
+
+       channel_info->session_info->consumed_data_size =
+                       latest_session_consumed_total;
+
+       /* Find triggers associated with this channel. */
+       cds_lfht_lookup(state->channel_triggers_ht,
+                       hash_channel_key(&latest_sample.key),
+                       match_channel_trigger_list,
+                       &latest_sample.key,
+                       &iter);
+       node = cds_lfht_iter_get_node(&iter);
+       if (caa_likely(!node)) {
+               goto end_unlock;
+       }
+
+       channel_creds = (typeof(channel_creds)) {
+               .uid = LTTNG_OPTIONAL_INIT_VALUE(channel_info->session_info->uid),
+               .gid = LTTNG_OPTIONAL_INIT_VALUE(channel_info->session_info->gid),
+       };
+
+       trigger_list = caa_container_of(node, struct lttng_channel_trigger_list,
+                       channel_triggers_ht_node);
+       cds_list_for_each_entry(trigger_list_element, &trigger_list->list,
+                       node) {
+               const struct lttng_condition *condition;
+               struct lttng_trigger *trigger;
+               struct notification_client_list *client_list = NULL;
+               struct lttng_evaluation *evaluation = NULL;
+               enum action_executor_status executor_status;
+
+               ret = 0;
+               trigger = trigger_list_element->trigger;
+               condition = lttng_trigger_get_const_condition(trigger);
+               LTTNG_ASSERT(condition);
+
+               /*
+                * Check if any client is subscribed to the result of this
+                * evaluation.
+                */
+               client_list = get_client_list_from_condition(state, condition);
+
+               ret = evaluate_buffer_condition(condition, &evaluation, state,
+                               previous_sample_available ? &previous_sample : NULL,
+                               &latest_sample,
+                               previous_session_consumed_total,
+                               latest_session_consumed_total,
+                               channel_info);
+               if (caa_unlikely(ret)) {
+                       goto put_list;
+               }
+
+               if (caa_likely(!evaluation)) {
+                       goto put_list;
+               }
+
+               /*
+                * Ownership of `evaluation` transferred to the action executor
+                * no matter the result.
+                */
+               executor_status = action_executor_enqueue_trigger(
+                               state->executor, trigger, evaluation,
+                               &channel_creds, client_list);
+               evaluation = NULL;
+               switch (executor_status) {
+               case ACTION_EXECUTOR_STATUS_OK:
+                       break;
+               case ACTION_EXECUTOR_STATUS_ERROR:
+               case ACTION_EXECUTOR_STATUS_INVALID:
+                       /*
+                        * TODO Add trigger identification (name/id) when
+                        * it is added to the API.
+                        */
+                       ERR("Fatal error occurred while enqueuing action associated with buffer-condition trigger");
+                       ret = -1;
+                       goto put_list;
+               case ACTION_EXECUTOR_STATUS_OVERFLOW:
+                       /*
+                        * TODO Add trigger identification (name/id) when
+                        * it is added to the API.
+                        *
+                        * Not a fatal error.
+                        */
+                       WARN("No space left when enqueuing action associated with buffer-condition trigger");
+                       ret = 0;
+                       goto put_list;
+               default:
+                       abort();
+               }
+
+put_list:
+               notification_client_list_put(client_list);
+               if (caa_unlikely(ret)) {
+                       break;
+               }
+       }
+end_unlock:
+       rcu_read_unlock();
+end:
+       return ret;
+}
diff --git a/src/bin/lttng-sessiond/notification-thread.c b/src/bin/lttng-sessiond/notification-thread.c
deleted file mode 100644 (file)
index c15e356..0000000
+++ /dev/null
@@ -1,817 +0,0 @@
-/*
- * Copyright (C) 2017 Jérémie Galarneau <jeremie.galarneau@efficios.com>
- *
- * SPDX-License-Identifier: GPL-2.0-only
- *
- */
-
-#define _LGPL_SOURCE
-#include <lttng/trigger/trigger.h>
-#include <lttng/notification/channel-internal.h>
-#include <lttng/notification/notification-internal.h>
-#include <lttng/condition/condition-internal.h>
-#include <lttng/condition/buffer-usage-internal.h>
-#include <common/error.h>
-#include <common/config/session-config.h>
-#include <common/defaults.h>
-#include <common/utils.h>
-#include <common/align.h>
-#include <common/time.h>
-#include <sys/stat.h>
-#include <time.h>
-#include <signal.h>
-
-#include "notification-thread.h"
-#include "notification-thread-events.h"
-#include "notification-thread-commands.h"
-#include "lttng-sessiond.h"
-#include "health-sessiond.h"
-#include "thread.h"
-#include "testpoint.h"
-
-#include "kernel.h"
-#include <common/kernel-ctl/kernel-ctl.h>
-
-#include <urcu.h>
-#include <urcu/list.h>
-#include <urcu/rculfhash.h>
-
-
-int notifier_consumption_paused;
-/*
- * Destroy the thread data previously created by the init function.
- */
-void notification_thread_handle_destroy(
-               struct notification_thread_handle *handle)
-{
-       int ret;
-
-       if (!handle) {
-               goto end;
-       }
-
-       LTTNG_ASSERT(cds_list_empty(&handle->cmd_queue.list));
-       pthread_mutex_destroy(&handle->cmd_queue.lock);
-       sem_destroy(&handle->ready);
-
-       if (handle->cmd_queue.event_pipe) {
-               lttng_pipe_destroy(handle->cmd_queue.event_pipe);
-       }
-       if (handle->channel_monitoring_pipes.ust32_consumer >= 0) {
-               ret = close(handle->channel_monitoring_pipes.ust32_consumer);
-               if (ret) {
-                       PERROR("close 32-bit consumer channel monitoring pipe");
-               }
-       }
-       if (handle->channel_monitoring_pipes.ust64_consumer >= 0) {
-               ret = close(handle->channel_monitoring_pipes.ust64_consumer);
-               if (ret) {
-                       PERROR("close 64-bit consumer channel monitoring pipe");
-               }
-       }
-       if (handle->channel_monitoring_pipes.kernel_consumer >= 0) {
-               ret = close(handle->channel_monitoring_pipes.kernel_consumer);
-               if (ret) {
-                       PERROR("close kernel consumer channel monitoring pipe");
-               }
-       }
-
-end:
-       free(handle);
-}
-
-struct notification_thread_handle *notification_thread_handle_create(
-               struct lttng_pipe *ust32_channel_monitor_pipe,
-               struct lttng_pipe *ust64_channel_monitor_pipe,
-               struct lttng_pipe *kernel_channel_monitor_pipe)
-{
-       int ret;
-       struct notification_thread_handle *handle;
-       struct lttng_pipe *event_pipe = NULL;
-
-       handle = zmalloc(sizeof(*handle));
-       if (!handle) {
-               goto end;
-       }
-
-       sem_init(&handle->ready, 0, 0);
-
-       event_pipe = lttng_pipe_open(FD_CLOEXEC);
-       if (!event_pipe) {
-               ERR("event_pipe creation");
-               goto error;
-       }
-
-       handle->cmd_queue.event_pipe = event_pipe;
-       event_pipe = NULL;
-
-       CDS_INIT_LIST_HEAD(&handle->cmd_queue.list);
-       ret = pthread_mutex_init(&handle->cmd_queue.lock, NULL);
-       if (ret) {
-               goto error;
-       }
-
-       if (ust32_channel_monitor_pipe) {
-               handle->channel_monitoring_pipes.ust32_consumer =
-                               lttng_pipe_release_readfd(
-                                       ust32_channel_monitor_pipe);
-               if (handle->channel_monitoring_pipes.ust32_consumer < 0) {
-                       goto error;
-               }
-       } else {
-               handle->channel_monitoring_pipes.ust32_consumer = -1;
-       }
-       if (ust64_channel_monitor_pipe) {
-               handle->channel_monitoring_pipes.ust64_consumer =
-                               lttng_pipe_release_readfd(
-                                       ust64_channel_monitor_pipe);
-               if (handle->channel_monitoring_pipes.ust64_consumer < 0) {
-                       goto error;
-               }
-       } else {
-               handle->channel_monitoring_pipes.ust64_consumer = -1;
-       }
-       if (kernel_channel_monitor_pipe) {
-               handle->channel_monitoring_pipes.kernel_consumer =
-                               lttng_pipe_release_readfd(
-                                       kernel_channel_monitor_pipe);
-               if (handle->channel_monitoring_pipes.kernel_consumer < 0) {
-                       goto error;
-               }
-       } else {
-               handle->channel_monitoring_pipes.kernel_consumer = -1;
-       }
-
-end:
-       return handle;
-error:
-       lttng_pipe_destroy(event_pipe);
-       notification_thread_handle_destroy(handle);
-       return NULL;
-}
-
-static
-char *get_notification_channel_sock_path(void)
-{
-       int ret;
-       bool is_root = !getuid();
-       char *sock_path;
-
-       sock_path = zmalloc(LTTNG_PATH_MAX);
-       if (!sock_path) {
-               goto error;
-       }
-
-       if (is_root) {
-               ret = snprintf(sock_path, LTTNG_PATH_MAX,
-                               DEFAULT_GLOBAL_NOTIFICATION_CHANNEL_UNIX_SOCK);
-               if (ret < 0) {
-                       goto error;
-               }
-       } else {
-               const char *home_path = utils_get_home_dir();
-
-               if (!home_path) {
-                       ERR("Can't get HOME directory for socket creation");
-                       goto error;
-               }
-
-               ret = snprintf(sock_path, LTTNG_PATH_MAX,
-                               DEFAULT_HOME_NOTIFICATION_CHANNEL_UNIX_SOCK,
-                               home_path);
-               if (ret < 0) {
-                       goto error;
-               }
-       }
-
-       return sock_path;
-error:
-       free(sock_path);
-       return NULL;
-}
-
-static
-void notification_channel_socket_destroy(int fd)
-{
-       int ret;
-       char *sock_path = get_notification_channel_sock_path();
-
-       DBG("Destroying notification channel socket");
-
-       if (sock_path) {
-               ret = unlink(sock_path);
-               free(sock_path);
-               if (ret < 0) {
-                       PERROR("unlink notification channel socket");
-               }
-       }
-
-       ret = close(fd);
-       if (ret) {
-               PERROR("close notification channel socket");
-       }
-}
-
-static
-int notification_channel_socket_create(void)
-{
-       int fd = -1, ret;
-       char *sock_path = get_notification_channel_sock_path();
-
-       DBG("Creating notification channel UNIX socket at %s",
-                       sock_path);
-
-       ret = lttcomm_create_unix_sock(sock_path);
-       if (ret < 0) {
-               ERR("Failed to create notification socket");
-               goto error;
-       }
-       fd = ret;
-
-       ret = chmod(sock_path, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP);
-       if (ret < 0) {
-               ERR("Set file permissions failed: %s", sock_path);
-               PERROR("chmod notification channel socket");
-               goto error;
-       }
-
-       if (getuid() == 0) {
-               gid_t gid;
-
-               ret = utils_get_group_id(the_config.tracing_group_name.value,
-                               true, &gid);
-               if (ret) {
-                       /* Default to root group. */
-                       gid = 0;
-               }
-
-               ret = chown(sock_path, 0, gid);
-               if (ret) {
-                       ERR("Failed to set the notification channel socket's group");
-                       ret = -1;
-                       goto error;
-               }
-       }
-
-       DBG("Notification channel UNIX socket created (fd = %i)",
-                       fd);
-       free(sock_path);
-       return fd;
-error:
-       if (fd >= 0 && close(fd) < 0) {
-               PERROR("close notification channel socket");
-       }
-       free(sock_path);
-       return ret;
-}
-
-static
-int init_poll_set(struct lttng_poll_event *poll_set,
-               struct notification_thread_handle *handle,
-               int notification_channel_socket)
-{
-       int ret;
-
-       /*
-        * Create pollset with size 5:
-        *      - notification channel socket (listen for new connections),
-        *      - command queue event fd (internal sessiond commands),
-        *      - consumerd (32-bit user space) channel monitor pipe,
-        *      - consumerd (64-bit user space) channel monitor pipe,
-        *      - consumerd (kernel) channel monitor pipe.
-        */
-       ret = lttng_poll_create(poll_set, 5, LTTNG_CLOEXEC);
-       if (ret < 0) {
-               goto end;
-       }
-
-       ret = lttng_poll_add(poll_set, notification_channel_socket,
-                       LPOLLIN | LPOLLERR | LPOLLHUP | LPOLLRDHUP);
-       if (ret < 0) {
-               ERR("Failed to add notification channel socket to pollset");
-               goto error;
-       }
-       ret = lttng_poll_add(poll_set, lttng_pipe_get_readfd(handle->cmd_queue.event_pipe),
-                       LPOLLIN | LPOLLERR);
-       if (ret < 0) {
-               ERR("Failed to add notification command queue event fd to pollset");
-               goto error;
-       }
-       ret = lttng_poll_add(poll_set,
-                       handle->channel_monitoring_pipes.ust32_consumer,
-                       LPOLLIN | LPOLLERR);
-       if (ret < 0) {
-               ERR("Failed to add ust-32 channel monitoring pipe fd to pollset");
-               goto error;
-       }
-       ret = lttng_poll_add(poll_set,
-                       handle->channel_monitoring_pipes.ust64_consumer,
-                       LPOLLIN | LPOLLERR);
-       if (ret < 0) {
-               ERR("Failed to add ust-64 channel monitoring pipe fd to pollset");
-               goto error;
-       }
-       if (handle->channel_monitoring_pipes.kernel_consumer < 0) {
-               goto end;
-       }
-       ret = lttng_poll_add(poll_set,
-                       handle->channel_monitoring_pipes.kernel_consumer,
-                       LPOLLIN | LPOLLERR);
-       if (ret < 0) {
-               ERR("Failed to add kernel channel monitoring pipe fd to pollset");
-               goto error;
-       }
-end:
-       return ret;
-error:
-       lttng_poll_clean(poll_set);
-       return ret;
-}
-
-static
-void fini_thread_state(struct notification_thread_state *state)
-{
-       int ret;
-
-       if (state->client_socket_ht) {
-               ret = handle_notification_thread_client_disconnect_all(state);
-               LTTNG_ASSERT(!ret);
-               ret = cds_lfht_destroy(state->client_socket_ht, NULL);
-               LTTNG_ASSERT(!ret);
-       }
-       if (state->client_id_ht) {
-               ret = cds_lfht_destroy(state->client_id_ht, NULL);
-               LTTNG_ASSERT(!ret);
-       }
-       if (state->triggers_ht) {
-               ret = handle_notification_thread_trigger_unregister_all(state);
-               LTTNG_ASSERT(!ret);
-               ret = cds_lfht_destroy(state->triggers_ht, NULL);
-               LTTNG_ASSERT(!ret);
-       }
-       if (state->channel_triggers_ht) {
-               ret = cds_lfht_destroy(state->channel_triggers_ht, NULL);
-               LTTNG_ASSERT(!ret);
-       }
-       if (state->channel_state_ht) {
-               ret = cds_lfht_destroy(state->channel_state_ht, NULL);
-               LTTNG_ASSERT(!ret);
-       }
-       if (state->notification_trigger_clients_ht) {
-               ret = cds_lfht_destroy(state->notification_trigger_clients_ht,
-                               NULL);
-               LTTNG_ASSERT(!ret);
-       }
-       if (state->channels_ht) {
-               ret = cds_lfht_destroy(state->channels_ht, NULL);
-               LTTNG_ASSERT(!ret);
-       }
-       if (state->sessions_ht) {
-               ret = cds_lfht_destroy(state->sessions_ht, NULL);
-               LTTNG_ASSERT(!ret);
-       }
-       if (state->triggers_by_name_uid_ht) {
-               ret = cds_lfht_destroy(state->triggers_by_name_uid_ht, NULL);
-               LTTNG_ASSERT(!ret);
-       }
-       if (state->trigger_tokens_ht) {
-               ret = cds_lfht_destroy(state->trigger_tokens_ht, NULL);
-               LTTNG_ASSERT(!ret);
-       }
-       /*
-        * Must be destroyed after all channels have been destroyed.
-        * See comment in struct lttng_session_trigger_list.
-        */
-       if (state->session_triggers_ht) {
-               ret = cds_lfht_destroy(state->session_triggers_ht, NULL);
-               LTTNG_ASSERT(!ret);
-       }
-       if (state->notification_channel_socket >= 0) {
-               notification_channel_socket_destroy(
-                               state->notification_channel_socket);
-       }
-
-       LTTNG_ASSERT(cds_list_empty(&state->tracer_event_sources_list));
-
-       if (state->executor) {
-               action_executor_destroy(state->executor);
-       }
-       lttng_poll_clean(&state->events);
-}
-
-static
-void mark_thread_as_ready(struct notification_thread_handle *handle)
-{
-       DBG("Marking notification thread as ready");
-       sem_post(&handle->ready);
-}
-
-static
-void wait_until_thread_is_ready(struct notification_thread_handle *handle)
-{
-       DBG("Waiting for notification thread to be ready");
-       sem_wait(&handle->ready);
-       DBG("Notification thread is ready");
-}
-
-static
-int init_thread_state(struct notification_thread_handle *handle,
-               struct notification_thread_state *state)
-{
-       int ret;
-
-       memset(state, 0, sizeof(*state));
-       state->notification_channel_socket = -1;
-       state->trigger_id.next_tracer_token = 1;
-       lttng_poll_init(&state->events);
-
-       ret = notification_channel_socket_create();
-       if (ret < 0) {
-               goto end;
-       }
-       state->notification_channel_socket = ret;
-
-       ret = init_poll_set(&state->events, handle,
-                       state->notification_channel_socket);
-       if (ret) {
-               goto end;
-       }
-
-       DBG("Listening on notification channel socket");
-       ret = lttcomm_listen_unix_sock(state->notification_channel_socket);
-       if (ret < 0) {
-               ERR("Listen failed on notification channel socket");
-               goto error;
-       }
-
-       state->client_socket_ht = cds_lfht_new(DEFAULT_HT_SIZE, 1, 0,
-                       CDS_LFHT_AUTO_RESIZE | CDS_LFHT_ACCOUNTING, NULL);
-       if (!state->client_socket_ht) {
-               goto error;
-       }
-
-       state->client_id_ht = cds_lfht_new(DEFAULT_HT_SIZE, 1, 0,
-                       CDS_LFHT_AUTO_RESIZE | CDS_LFHT_ACCOUNTING, NULL);
-       if (!state->client_id_ht) {
-               goto error;
-       }
-
-       state->channel_triggers_ht = cds_lfht_new(DEFAULT_HT_SIZE, 1, 0,
-                       CDS_LFHT_AUTO_RESIZE | CDS_LFHT_ACCOUNTING, NULL);
-       if (!state->channel_triggers_ht) {
-               goto error;
-       }
-
-       state->session_triggers_ht = cds_lfht_new(DEFAULT_HT_SIZE, 1, 0,
-                       CDS_LFHT_AUTO_RESIZE | CDS_LFHT_ACCOUNTING, NULL);
-       if (!state->session_triggers_ht) {
-               goto error;
-       }
-
-       state->channel_state_ht = cds_lfht_new(DEFAULT_HT_SIZE, 1, 0,
-                       CDS_LFHT_AUTO_RESIZE | CDS_LFHT_ACCOUNTING, NULL);
-       if (!state->channel_state_ht) {
-               goto error;
-       }
-
-       state->notification_trigger_clients_ht = cds_lfht_new(DEFAULT_HT_SIZE,
-                       1, 0, CDS_LFHT_AUTO_RESIZE | CDS_LFHT_ACCOUNTING, NULL);
-       if (!state->notification_trigger_clients_ht) {
-               goto error;
-       }
-
-       state->channels_ht = cds_lfht_new(DEFAULT_HT_SIZE,
-                       1, 0, CDS_LFHT_AUTO_RESIZE | CDS_LFHT_ACCOUNTING, NULL);
-       if (!state->channels_ht) {
-               goto error;
-       }
-       state->sessions_ht = cds_lfht_new(DEFAULT_HT_SIZE,
-                       1, 0, CDS_LFHT_AUTO_RESIZE | CDS_LFHT_ACCOUNTING, NULL);
-       if (!state->sessions_ht) {
-               goto error;
-       }
-       state->triggers_ht = cds_lfht_new(DEFAULT_HT_SIZE,
-                       1, 0, CDS_LFHT_AUTO_RESIZE | CDS_LFHT_ACCOUNTING, NULL);
-       if (!state->triggers_ht) {
-               goto error;
-       }
-       state->triggers_by_name_uid_ht = cds_lfht_new(DEFAULT_HT_SIZE,
-                       1, 0, CDS_LFHT_AUTO_RESIZE | CDS_LFHT_ACCOUNTING, NULL);
-       if (!state->triggers_by_name_uid_ht) {
-               goto error;
-       }
-
-       state->trigger_tokens_ht = cds_lfht_new(DEFAULT_HT_SIZE,
-                       1, 0, CDS_LFHT_AUTO_RESIZE | CDS_LFHT_ACCOUNTING, NULL);
-       if (!state->trigger_tokens_ht) {
-               goto error;
-       }
-
-       CDS_INIT_LIST_HEAD(&state->tracer_event_sources_list);
-
-       state->executor = action_executor_create(handle);
-       if (!state->executor) {
-               goto error;
-       }
-
-       state->restart_poll = false;
-
-       mark_thread_as_ready(handle);
-end:
-       return 0;
-error:
-       fini_thread_state(state);
-       return -1;
-}
-
-static
-int handle_channel_monitoring_pipe(int fd, uint32_t revents,
-               struct notification_thread_handle *handle,
-               struct notification_thread_state *state)
-{
-       int ret = 0;
-       enum lttng_domain_type domain;
-
-       if (fd == handle->channel_monitoring_pipes.ust32_consumer ||
-                       fd == handle->channel_monitoring_pipes.ust64_consumer) {
-               domain = LTTNG_DOMAIN_UST;
-       } else if (fd == handle->channel_monitoring_pipes.kernel_consumer) {
-               domain = LTTNG_DOMAIN_KERNEL;
-       } else {
-               abort();
-       }
-
-       if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
-               ret = lttng_poll_del(&state->events, fd);
-               if (ret) {
-                       ERR("Failed to remove consumer monitoring pipe from poll set");
-               }
-               goto end;
-       }
-
-       ret = handle_notification_thread_channel_sample(
-                       state, fd, domain);
-       if (ret) {
-               ERR("Consumer sample handling error occurred");
-               ret = -1;
-               goto end;
-       }
-end:
-       return ret;
-}
-
-static int handle_event_notification_pipe(int event_source_fd,
-               enum lttng_domain_type domain,
-               uint32_t revents,
-               struct notification_thread_state *state)
-{
-       int ret = 0;
-
-       if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
-               ret = handle_notification_thread_tracer_event_source_died(
-                               state, event_source_fd);
-               if (ret) {
-                       ERR("Failed to remove event notification pipe from poll set: fd = %d",
-                                       event_source_fd);
-               }
-               goto end;
-       }
-
-       if (testpoint(sessiond_handle_notifier_event_pipe)) {
-               ret = 0;
-               goto end;
-       }
-
-       if (caa_unlikely(notifier_consumption_paused)) {
-               DBG("Event notifier notification consumption paused, sleeping...");
-               sleep(1);
-               goto end;
-       }
-
-       ret = handle_notification_thread_event_notification(
-                       state, event_source_fd, domain);
-       if (ret) {
-               ERR("Event notification handling error occurred for fd: %d",
-                               event_source_fd);
-               ret = -1;
-               goto end;
-       }
-
-end:
-       return ret;
-}
-
-/*
- * Return the event source domain type via parameter.
- */
-static bool fd_is_event_notification_source(const struct notification_thread_state *state,
-               int fd,
-               enum lttng_domain_type *domain)
-{
-       struct notification_event_tracer_event_source_element *source_element;
-
-       LTTNG_ASSERT(domain);
-
-       cds_list_for_each_entry(source_element,
-                       &state->tracer_event_sources_list, node) {
-               if (source_element->fd != fd) {
-                       continue;
-               }
-
-               *domain = source_element->domain;
-               return true;
-       }
-
-       return false;
-}
-
-/*
- * This thread services notification channel clients and commands received
- * from various lttng-sessiond components over a command queue.
- */
-static
-void *thread_notification(void *data)
-{
-       int ret;
-       struct notification_thread_handle *handle = data;
-       struct notification_thread_state state;
-       enum lttng_domain_type domain;
-
-       DBG("Started notification thread");
-
-       health_register(the_health_sessiond, HEALTH_SESSIOND_TYPE_NOTIFICATION);
-       rcu_register_thread();
-       rcu_thread_online();
-
-       if (!handle) {
-               ERR("Invalid thread context provided");
-               goto end;
-       }
-
-       health_code_update();
-
-       ret = init_thread_state(handle, &state);
-       if (ret) {
-               goto end;
-       }
-
-       if (testpoint(sessiond_thread_notification)) {
-               goto end;
-       }
-
-       while (true) {
-               int fd_count, i;
-
-               health_poll_entry();
-               DBG("Entering poll wait");
-               ret = lttng_poll_wait(&state.events, -1);
-               DBG("Poll wait returned (%i)", ret);
-               health_poll_exit();
-               if (ret < 0) {
-                       /*
-                        * Restart interrupted system call.
-                        */
-                       if (errno == EINTR) {
-                               continue;
-                       }
-                       ERR("Error encountered during lttng_poll_wait (%i)", ret);
-                       goto error;
-               }
-
-               /*
-                * Reset restart_poll flag so that calls below might turn it
-                * on.
-                */
-               state.restart_poll = false;
-
-               fd_count = ret;
-               for (i = 0; i < fd_count; i++) {
-                       int fd = LTTNG_POLL_GETFD(&state.events, i);
-                       uint32_t revents = LTTNG_POLL_GETEV(&state.events, i);
-
-                       DBG("Handling fd (%i) activity (%u)", fd, revents);
-
-                       if (fd == state.notification_channel_socket) {
-                               if (revents & LPOLLIN) {
-                                       ret = handle_notification_thread_client_connect(
-                                                       &state);
-                                       if (ret < 0) {
-                                               goto error;
-                                       }
-                               } else if (revents &
-                                               (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
-                                       ERR("Notification socket poll error");
-                                       goto error;
-                               } else {
-                                       ERR("Unexpected poll events %u for notification socket %i", revents, fd);
-                                       goto error;
-                               }
-                       } else if (fd == lttng_pipe_get_readfd(handle->cmd_queue.event_pipe)) {
-                               ret = handle_notification_thread_command(handle,
-                                               &state);
-                               if (ret < 0) {
-                                       DBG("Error encountered while servicing command queue");
-                                       goto error;
-                               } else if (ret > 0) {
-                                       goto exit;
-                               }
-                       } else if (fd == handle->channel_monitoring_pipes.ust32_consumer ||
-                                       fd == handle->channel_monitoring_pipes.ust64_consumer ||
-                                       fd == handle->channel_monitoring_pipes.kernel_consumer) {
-                               ret = handle_channel_monitoring_pipe(fd,
-                                               revents, handle, &state);
-                               if (ret) {
-                                       goto error;
-                               }
-                       } else if (fd_is_event_notification_source(&state, fd, &domain)) {
-                               ret = handle_event_notification_pipe(fd, domain, revents, &state);
-                               if (ret) {
-                                       goto error;
-                               }
-                       } else {
-                               /* Activity on a client's socket. */
-                               if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
-                                       /*
-                                        * It doesn't matter if a command was
-                                        * pending on the client socket at this
-                                        * point since it now has no way to
-                                        * receive the notifications to which
-                                        * it was subscribing or unsubscribing.
-                                        */
-                                       ret = handle_notification_thread_client_disconnect(
-                                                       fd, &state);
-                                       if (ret) {
-                                               goto error;
-                                       }
-                               } else {
-                                       if (revents & LPOLLIN) {
-                                               ret = handle_notification_thread_client_in(
-                                                       &state, fd);
-                                               if (ret) {
-                                                       goto error;
-                                               }
-                                       }
-
-                                       if (revents & LPOLLOUT) {
-                                               ret = handle_notification_thread_client_out(
-                                                       &state, fd);
-                                               if (ret) {
-                                                       goto error;
-                                               }
-                                       }
-                               }
-                       }
-
-                       /*
-                        * Calls above might have changed the state of the
-                        * FDs in `state.events`. Call _poll_wait() again to
-                        * ensure we have a consistent state.
-                        */
-                       if (state.restart_poll) {
-                               break;
-                       }
-               }
-       }
-exit:
-error:
-       fini_thread_state(&state);
-end:
-       rcu_thread_offline();
-       rcu_unregister_thread();
-       health_unregister(the_health_sessiond);
-       return NULL;
-}
-
-static
-bool shutdown_notification_thread(void *thread_data)
-{
-       struct notification_thread_handle *handle = thread_data;
-
-       notification_thread_command_quit(handle);
-       return true;
-}
-
-struct lttng_thread *launch_notification_thread(
-               struct notification_thread_handle *handle)
-{
-       struct lttng_thread *thread;
-
-       thread = lttng_thread_create("Notification",
-                       thread_notification,
-                       shutdown_notification_thread,
-                       NULL,
-                       handle);
-       if (!thread) {
-               goto error;
-       }
-
-       /*
-        * Wait for the thread to be marked as "ready" before returning
-        * as other subsystems depend on the notification subsystem
-        * (e.g. rotation thread).
-        */
-       wait_until_thread_is_ready(handle);
-       return thread;
-error:
-       return NULL;
-}
diff --git a/src/bin/lttng-sessiond/notification-thread.cpp b/src/bin/lttng-sessiond/notification-thread.cpp
new file mode 100644 (file)
index 0000000..e8a2ade
--- /dev/null
@@ -0,0 +1,817 @@
+/*
+ * Copyright (C) 2017 Jérémie Galarneau <jeremie.galarneau@efficios.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0-only
+ *
+ */
+
+#define _LGPL_SOURCE
+#include <lttng/trigger/trigger.h>
+#include <lttng/notification/channel-internal.h>
+#include <lttng/notification/notification-internal.h>
+#include <lttng/condition/condition-internal.h>
+#include <lttng/condition/buffer-usage-internal.h>
+#include <common/error.h>
+#include <common/config/session-config.h>
+#include <common/defaults.h>
+#include <common/utils.h>
+#include <common/align.h>
+#include <common/time.h>
+#include <sys/stat.h>
+#include <time.h>
+#include <signal.h>
+
+#include "notification-thread.h"
+#include "notification-thread-events.h"
+#include "notification-thread-commands.h"
+#include "lttng-sessiond.h"
+#include "health-sessiond.h"
+#include "thread.h"
+#include "testpoint.h"
+
+#include "kernel.h"
+#include <common/kernel-ctl/kernel-ctl.h>
+
+#include <urcu.h>
+#include <urcu/list.h>
+#include <urcu/rculfhash.h>
+
+
+int notifier_consumption_paused;
+/*
+ * Destroy the thread data previously created by the init function.
+ */
+void notification_thread_handle_destroy(
+               struct notification_thread_handle *handle)
+{
+       int ret;
+
+       if (!handle) {
+               goto end;
+       }
+
+       LTTNG_ASSERT(cds_list_empty(&handle->cmd_queue.list));
+       pthread_mutex_destroy(&handle->cmd_queue.lock);
+       sem_destroy(&handle->ready);
+
+       if (handle->cmd_queue.event_pipe) {
+               lttng_pipe_destroy(handle->cmd_queue.event_pipe);
+       }
+       if (handle->channel_monitoring_pipes.ust32_consumer >= 0) {
+               ret = close(handle->channel_monitoring_pipes.ust32_consumer);
+               if (ret) {
+                       PERROR("close 32-bit consumer channel monitoring pipe");
+               }
+       }
+       if (handle->channel_monitoring_pipes.ust64_consumer >= 0) {
+               ret = close(handle->channel_monitoring_pipes.ust64_consumer);
+               if (ret) {
+                       PERROR("close 64-bit consumer channel monitoring pipe");
+               }
+       }
+       if (handle->channel_monitoring_pipes.kernel_consumer >= 0) {
+               ret = close(handle->channel_monitoring_pipes.kernel_consumer);
+               if (ret) {
+                       PERROR("close kernel consumer channel monitoring pipe");
+               }
+       }
+
+end:
+       free(handle);
+}
+
+struct notification_thread_handle *notification_thread_handle_create(
+               struct lttng_pipe *ust32_channel_monitor_pipe,
+               struct lttng_pipe *ust64_channel_monitor_pipe,
+               struct lttng_pipe *kernel_channel_monitor_pipe)
+{
+       int ret;
+       struct notification_thread_handle *handle;
+       struct lttng_pipe *event_pipe = NULL;
+
+       handle = (notification_thread_handle *) zmalloc(sizeof(*handle));
+       if (!handle) {
+               goto end;
+       }
+
+       sem_init(&handle->ready, 0, 0);
+
+       event_pipe = lttng_pipe_open(FD_CLOEXEC);
+       if (!event_pipe) {
+               ERR("event_pipe creation");
+               goto error;
+       }
+
+       handle->cmd_queue.event_pipe = event_pipe;
+       event_pipe = NULL;
+
+       CDS_INIT_LIST_HEAD(&handle->cmd_queue.list);
+       ret = pthread_mutex_init(&handle->cmd_queue.lock, NULL);
+       if (ret) {
+               goto error;
+       }
+
+       if (ust32_channel_monitor_pipe) {
+               handle->channel_monitoring_pipes.ust32_consumer =
+                               lttng_pipe_release_readfd(
+                                       ust32_channel_monitor_pipe);
+               if (handle->channel_monitoring_pipes.ust32_consumer < 0) {
+                       goto error;
+               }
+       } else {
+               handle->channel_monitoring_pipes.ust32_consumer = -1;
+       }
+       if (ust64_channel_monitor_pipe) {
+               handle->channel_monitoring_pipes.ust64_consumer =
+                               lttng_pipe_release_readfd(
+                                       ust64_channel_monitor_pipe);
+               if (handle->channel_monitoring_pipes.ust64_consumer < 0) {
+                       goto error;
+               }
+       } else {
+               handle->channel_monitoring_pipes.ust64_consumer = -1;
+       }
+       if (kernel_channel_monitor_pipe) {
+               handle->channel_monitoring_pipes.kernel_consumer =
+                               lttng_pipe_release_readfd(
+                                       kernel_channel_monitor_pipe);
+               if (handle->channel_monitoring_pipes.kernel_consumer < 0) {
+                       goto error;
+               }
+       } else {
+               handle->channel_monitoring_pipes.kernel_consumer = -1;
+       }
+
+end:
+       return handle;
+error:
+       lttng_pipe_destroy(event_pipe);
+       notification_thread_handle_destroy(handle);
+       return NULL;
+}
+
+static
+char *get_notification_channel_sock_path(void)
+{
+       int ret;
+       bool is_root = !getuid();
+       char *sock_path;
+
+       sock_path = (char *) zmalloc(LTTNG_PATH_MAX);
+       if (!sock_path) {
+               goto error;
+       }
+
+       if (is_root) {
+               ret = snprintf(sock_path, LTTNG_PATH_MAX,
+                               DEFAULT_GLOBAL_NOTIFICATION_CHANNEL_UNIX_SOCK);
+               if (ret < 0) {
+                       goto error;
+               }
+       } else {
+               const char *home_path = utils_get_home_dir();
+
+               if (!home_path) {
+                       ERR("Can't get HOME directory for socket creation");
+                       goto error;
+               }
+
+               ret = snprintf(sock_path, LTTNG_PATH_MAX,
+                               DEFAULT_HOME_NOTIFICATION_CHANNEL_UNIX_SOCK,
+                               home_path);
+               if (ret < 0) {
+                       goto error;
+               }
+       }
+
+       return sock_path;
+error:
+       free(sock_path);
+       return NULL;
+}
+
+static
+void notification_channel_socket_destroy(int fd)
+{
+       int ret;
+       char *sock_path = get_notification_channel_sock_path();
+
+       DBG("Destroying notification channel socket");
+
+       if (sock_path) {
+               ret = unlink(sock_path);
+               free(sock_path);
+               if (ret < 0) {
+                       PERROR("unlink notification channel socket");
+               }
+       }
+
+       ret = close(fd);
+       if (ret) {
+               PERROR("close notification channel socket");
+       }
+}
+
+static
+int notification_channel_socket_create(void)
+{
+       int fd = -1, ret;
+       char *sock_path = get_notification_channel_sock_path();
+
+       DBG("Creating notification channel UNIX socket at %s",
+                       sock_path);
+
+       ret = lttcomm_create_unix_sock(sock_path);
+       if (ret < 0) {
+               ERR("Failed to create notification socket");
+               goto error;
+       }
+       fd = ret;
+
+       ret = chmod(sock_path, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP);
+       if (ret < 0) {
+               ERR("Set file permissions failed: %s", sock_path);
+               PERROR("chmod notification channel socket");
+               goto error;
+       }
+
+       if (getuid() == 0) {
+               gid_t gid;
+
+               ret = utils_get_group_id(the_config.tracing_group_name.value,
+                               true, &gid);
+               if (ret) {
+                       /* Default to root group. */
+                       gid = 0;
+               }
+
+               ret = chown(sock_path, 0, gid);
+               if (ret) {
+                       ERR("Failed to set the notification channel socket's group");
+                       ret = -1;
+                       goto error;
+               }
+       }
+
+       DBG("Notification channel UNIX socket created (fd = %i)",
+                       fd);
+       free(sock_path);
+       return fd;
+error:
+       if (fd >= 0 && close(fd) < 0) {
+               PERROR("close notification channel socket");
+       }
+       free(sock_path);
+       return ret;
+}
+
+static
+int init_poll_set(struct lttng_poll_event *poll_set,
+               struct notification_thread_handle *handle,
+               int notification_channel_socket)
+{
+       int ret;
+
+       /*
+        * Create pollset with size 5:
+        *      - notification channel socket (listen for new connections),
+        *      - command queue event fd (internal sessiond commands),
+        *      - consumerd (32-bit user space) channel monitor pipe,
+        *      - consumerd (64-bit user space) channel monitor pipe,
+        *      - consumerd (kernel) channel monitor pipe.
+        */
+       ret = lttng_poll_create(poll_set, 5, LTTNG_CLOEXEC);
+       if (ret < 0) {
+               goto end;
+       }
+
+       ret = lttng_poll_add(poll_set, notification_channel_socket,
+                       LPOLLIN | LPOLLERR | LPOLLHUP | LPOLLRDHUP);
+       if (ret < 0) {
+               ERR("Failed to add notification channel socket to pollset");
+               goto error;
+       }
+       ret = lttng_poll_add(poll_set, lttng_pipe_get_readfd(handle->cmd_queue.event_pipe),
+                       LPOLLIN | LPOLLERR);
+       if (ret < 0) {
+               ERR("Failed to add notification command queue event fd to pollset");
+               goto error;
+       }
+       ret = lttng_poll_add(poll_set,
+                       handle->channel_monitoring_pipes.ust32_consumer,
+                       LPOLLIN | LPOLLERR);
+       if (ret < 0) {
+               ERR("Failed to add ust-32 channel monitoring pipe fd to pollset");
+               goto error;
+       }
+       ret = lttng_poll_add(poll_set,
+                       handle->channel_monitoring_pipes.ust64_consumer,
+                       LPOLLIN | LPOLLERR);
+       if (ret < 0) {
+               ERR("Failed to add ust-64 channel monitoring pipe fd to pollset");
+               goto error;
+       }
+       if (handle->channel_monitoring_pipes.kernel_consumer < 0) {
+               goto end;
+       }
+       ret = lttng_poll_add(poll_set,
+                       handle->channel_monitoring_pipes.kernel_consumer,
+                       LPOLLIN | LPOLLERR);
+       if (ret < 0) {
+               ERR("Failed to add kernel channel monitoring pipe fd to pollset");
+               goto error;
+       }
+end:
+       return ret;
+error:
+       lttng_poll_clean(poll_set);
+       return ret;
+}
+
+static
+void fini_thread_state(struct notification_thread_state *state)
+{
+       int ret;
+
+       if (state->client_socket_ht) {
+               ret = handle_notification_thread_client_disconnect_all(state);
+               LTTNG_ASSERT(!ret);
+               ret = cds_lfht_destroy(state->client_socket_ht, NULL);
+               LTTNG_ASSERT(!ret);
+       }
+       if (state->client_id_ht) {
+               ret = cds_lfht_destroy(state->client_id_ht, NULL);
+               LTTNG_ASSERT(!ret);
+       }
+       if (state->triggers_ht) {
+               ret = handle_notification_thread_trigger_unregister_all(state);
+               LTTNG_ASSERT(!ret);
+               ret = cds_lfht_destroy(state->triggers_ht, NULL);
+               LTTNG_ASSERT(!ret);
+       }
+       if (state->channel_triggers_ht) {
+               ret = cds_lfht_destroy(state->channel_triggers_ht, NULL);
+               LTTNG_ASSERT(!ret);
+       }
+       if (state->channel_state_ht) {
+               ret = cds_lfht_destroy(state->channel_state_ht, NULL);
+               LTTNG_ASSERT(!ret);
+       }
+       if (state->notification_trigger_clients_ht) {
+               ret = cds_lfht_destroy(state->notification_trigger_clients_ht,
+                               NULL);
+               LTTNG_ASSERT(!ret);
+       }
+       if (state->channels_ht) {
+               ret = cds_lfht_destroy(state->channels_ht, NULL);
+               LTTNG_ASSERT(!ret);
+       }
+       if (state->sessions_ht) {
+               ret = cds_lfht_destroy(state->sessions_ht, NULL);
+               LTTNG_ASSERT(!ret);
+       }
+       if (state->triggers_by_name_uid_ht) {
+               ret = cds_lfht_destroy(state->triggers_by_name_uid_ht, NULL);
+               LTTNG_ASSERT(!ret);
+       }
+       if (state->trigger_tokens_ht) {
+               ret = cds_lfht_destroy(state->trigger_tokens_ht, NULL);
+               LTTNG_ASSERT(!ret);
+       }
+       /*
+        * Must be destroyed after all channels have been destroyed.
+        * See comment in struct lttng_session_trigger_list.
+        */
+       if (state->session_triggers_ht) {
+               ret = cds_lfht_destroy(state->session_triggers_ht, NULL);
+               LTTNG_ASSERT(!ret);
+       }
+       if (state->notification_channel_socket >= 0) {
+               notification_channel_socket_destroy(
+                               state->notification_channel_socket);
+       }
+
+       LTTNG_ASSERT(cds_list_empty(&state->tracer_event_sources_list));
+
+       if (state->executor) {
+               action_executor_destroy(state->executor);
+       }
+       lttng_poll_clean(&state->events);
+}
+
+static
+void mark_thread_as_ready(struct notification_thread_handle *handle)
+{
+       DBG("Marking notification thread as ready");
+       sem_post(&handle->ready);
+}
+
+static
+void wait_until_thread_is_ready(struct notification_thread_handle *handle)
+{
+       DBG("Waiting for notification thread to be ready");
+       sem_wait(&handle->ready);
+       DBG("Notification thread is ready");
+}
+
+static
+int init_thread_state(struct notification_thread_handle *handle,
+               struct notification_thread_state *state)
+{
+       int ret;
+
+       memset(state, 0, sizeof(*state));
+       state->notification_channel_socket = -1;
+       state->trigger_id.next_tracer_token = 1;
+       lttng_poll_init(&state->events);
+
+       ret = notification_channel_socket_create();
+       if (ret < 0) {
+               goto end;
+       }
+       state->notification_channel_socket = ret;
+
+       ret = init_poll_set(&state->events, handle,
+                       state->notification_channel_socket);
+       if (ret) {
+               goto end;
+       }
+
+       DBG("Listening on notification channel socket");
+       ret = lttcomm_listen_unix_sock(state->notification_channel_socket);
+       if (ret < 0) {
+               ERR("Listen failed on notification channel socket");
+               goto error;
+       }
+
+       state->client_socket_ht = cds_lfht_new(DEFAULT_HT_SIZE, 1, 0,
+                       CDS_LFHT_AUTO_RESIZE | CDS_LFHT_ACCOUNTING, NULL);
+       if (!state->client_socket_ht) {
+               goto error;
+       }
+
+       state->client_id_ht = cds_lfht_new(DEFAULT_HT_SIZE, 1, 0,
+                       CDS_LFHT_AUTO_RESIZE | CDS_LFHT_ACCOUNTING, NULL);
+       if (!state->client_id_ht) {
+               goto error;
+       }
+
+       state->channel_triggers_ht = cds_lfht_new(DEFAULT_HT_SIZE, 1, 0,
+                       CDS_LFHT_AUTO_RESIZE | CDS_LFHT_ACCOUNTING, NULL);
+       if (!state->channel_triggers_ht) {
+               goto error;
+       }
+
+       state->session_triggers_ht = cds_lfht_new(DEFAULT_HT_SIZE, 1, 0,
+                       CDS_LFHT_AUTO_RESIZE | CDS_LFHT_ACCOUNTING, NULL);
+       if (!state->session_triggers_ht) {
+               goto error;
+       }
+
+       state->channel_state_ht = cds_lfht_new(DEFAULT_HT_SIZE, 1, 0,
+                       CDS_LFHT_AUTO_RESIZE | CDS_LFHT_ACCOUNTING, NULL);
+       if (!state->channel_state_ht) {
+               goto error;
+       }
+
+       state->notification_trigger_clients_ht = cds_lfht_new(DEFAULT_HT_SIZE,
+                       1, 0, CDS_LFHT_AUTO_RESIZE | CDS_LFHT_ACCOUNTING, NULL);
+       if (!state->notification_trigger_clients_ht) {
+               goto error;
+       }
+
+       state->channels_ht = cds_lfht_new(DEFAULT_HT_SIZE,
+                       1, 0, CDS_LFHT_AUTO_RESIZE | CDS_LFHT_ACCOUNTING, NULL);
+       if (!state->channels_ht) {
+               goto error;
+       }
+       state->sessions_ht = cds_lfht_new(DEFAULT_HT_SIZE,
+                       1, 0, CDS_LFHT_AUTO_RESIZE | CDS_LFHT_ACCOUNTING, NULL);
+       if (!state->sessions_ht) {
+               goto error;
+       }
+       state->triggers_ht = cds_lfht_new(DEFAULT_HT_SIZE,
+                       1, 0, CDS_LFHT_AUTO_RESIZE | CDS_LFHT_ACCOUNTING, NULL);
+       if (!state->triggers_ht) {
+               goto error;
+       }
+       state->triggers_by_name_uid_ht = cds_lfht_new(DEFAULT_HT_SIZE,
+                       1, 0, CDS_LFHT_AUTO_RESIZE | CDS_LFHT_ACCOUNTING, NULL);
+       if (!state->triggers_by_name_uid_ht) {
+               goto error;
+       }
+
+       state->trigger_tokens_ht = cds_lfht_new(DEFAULT_HT_SIZE,
+                       1, 0, CDS_LFHT_AUTO_RESIZE | CDS_LFHT_ACCOUNTING, NULL);
+       if (!state->trigger_tokens_ht) {
+               goto error;
+       }
+
+       CDS_INIT_LIST_HEAD(&state->tracer_event_sources_list);
+
+       state->executor = action_executor_create(handle);
+       if (!state->executor) {
+               goto error;
+       }
+
+       state->restart_poll = false;
+
+       mark_thread_as_ready(handle);
+end:
+       return 0;
+error:
+       fini_thread_state(state);
+       return -1;
+}
+
+static
+int handle_channel_monitoring_pipe(int fd, uint32_t revents,
+               struct notification_thread_handle *handle,
+               struct notification_thread_state *state)
+{
+       int ret = 0;
+       enum lttng_domain_type domain;
+
+       if (fd == handle->channel_monitoring_pipes.ust32_consumer ||
+                       fd == handle->channel_monitoring_pipes.ust64_consumer) {
+               domain = LTTNG_DOMAIN_UST;
+       } else if (fd == handle->channel_monitoring_pipes.kernel_consumer) {
+               domain = LTTNG_DOMAIN_KERNEL;
+       } else {
+               abort();
+       }
+
+       if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
+               ret = lttng_poll_del(&state->events, fd);
+               if (ret) {
+                       ERR("Failed to remove consumer monitoring pipe from poll set");
+               }
+               goto end;
+       }
+
+       ret = handle_notification_thread_channel_sample(
+                       state, fd, domain);
+       if (ret) {
+               ERR("Consumer sample handling error occurred");
+               ret = -1;
+               goto end;
+       }
+end:
+       return ret;
+}
+
+static int handle_event_notification_pipe(int event_source_fd,
+               enum lttng_domain_type domain,
+               uint32_t revents,
+               struct notification_thread_state *state)
+{
+       int ret = 0;
+
+       if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
+               ret = handle_notification_thread_tracer_event_source_died(
+                               state, event_source_fd);
+               if (ret) {
+                       ERR("Failed to remove event notification pipe from poll set: fd = %d",
+                                       event_source_fd);
+               }
+               goto end;
+       }
+
+       if (testpoint(sessiond_handle_notifier_event_pipe)) {
+               ret = 0;
+               goto end;
+       }
+
+       if (caa_unlikely(notifier_consumption_paused)) {
+               DBG("Event notifier notification consumption paused, sleeping...");
+               sleep(1);
+               goto end;
+       }
+
+       ret = handle_notification_thread_event_notification(
+                       state, event_source_fd, domain);
+       if (ret) {
+               ERR("Event notification handling error occurred for fd: %d",
+                               event_source_fd);
+               ret = -1;
+               goto end;
+       }
+
+end:
+       return ret;
+}
+
+/*
+ * Return the event source domain type via parameter.
+ */
+static bool fd_is_event_notification_source(const struct notification_thread_state *state,
+               int fd,
+               enum lttng_domain_type *domain)
+{
+       struct notification_event_tracer_event_source_element *source_element;
+
+       LTTNG_ASSERT(domain);
+
+       cds_list_for_each_entry(source_element,
+                       &state->tracer_event_sources_list, node) {
+               if (source_element->fd != fd) {
+                       continue;
+               }
+
+               *domain = source_element->domain;
+               return true;
+       }
+
+       return false;
+}
+
+/*
+ * This thread services notification channel clients and commands received
+ * from various lttng-sessiond components over a command queue.
+ */
+static
+void *thread_notification(void *data)
+{
+       int ret;
+       struct notification_thread_handle *handle = (notification_thread_handle *) data;
+       struct notification_thread_state state;
+       enum lttng_domain_type domain;
+
+       DBG("Started notification thread");
+
+       health_register(the_health_sessiond, HEALTH_SESSIOND_TYPE_NOTIFICATION);
+       rcu_register_thread();
+       rcu_thread_online();
+
+       if (!handle) {
+               ERR("Invalid thread context provided");
+               goto end;
+       }
+
+       health_code_update();
+
+       ret = init_thread_state(handle, &state);
+       if (ret) {
+               goto end;
+       }
+
+       if (testpoint(sessiond_thread_notification)) {
+               goto end;
+       }
+
+       while (true) {
+               int fd_count, i;
+
+               health_poll_entry();
+               DBG("Entering poll wait");
+               ret = lttng_poll_wait(&state.events, -1);
+               DBG("Poll wait returned (%i)", ret);
+               health_poll_exit();
+               if (ret < 0) {
+                       /*
+                        * Restart interrupted system call.
+                        */
+                       if (errno == EINTR) {
+                               continue;
+                       }
+                       ERR("Error encountered during lttng_poll_wait (%i)", ret);
+                       goto error;
+               }
+
+               /*
+                * Reset restart_poll flag so that calls below might turn it
+                * on.
+                */
+               state.restart_poll = false;
+
+               fd_count = ret;
+               for (i = 0; i < fd_count; i++) {
+                       int fd = LTTNG_POLL_GETFD(&state.events, i);
+                       uint32_t revents = LTTNG_POLL_GETEV(&state.events, i);
+
+                       DBG("Handling fd (%i) activity (%u)", fd, revents);
+
+                       if (fd == state.notification_channel_socket) {
+                               if (revents & LPOLLIN) {
+                                       ret = handle_notification_thread_client_connect(
+                                                       &state);
+                                       if (ret < 0) {
+                                               goto error;
+                                       }
+                               } else if (revents &
+                                               (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
+                                       ERR("Notification socket poll error");
+                                       goto error;
+                               } else {
+                                       ERR("Unexpected poll events %u for notification socket %i", revents, fd);
+                                       goto error;
+                               }
+                       } else if (fd == lttng_pipe_get_readfd(handle->cmd_queue.event_pipe)) {
+                               ret = handle_notification_thread_command(handle,
+                                               &state);
+                               if (ret < 0) {
+                                       DBG("Error encountered while servicing command queue");
+                                       goto error;
+                               } else if (ret > 0) {
+                                       goto exit;
+                               }
+                       } else if (fd == handle->channel_monitoring_pipes.ust32_consumer ||
+                                       fd == handle->channel_monitoring_pipes.ust64_consumer ||
+                                       fd == handle->channel_monitoring_pipes.kernel_consumer) {
+                               ret = handle_channel_monitoring_pipe(fd,
+                                               revents, handle, &state);
+                               if (ret) {
+                                       goto error;
+                               }
+                       } else if (fd_is_event_notification_source(&state, fd, &domain)) {
+                               ret = handle_event_notification_pipe(fd, domain, revents, &state);
+                               if (ret) {
+                                       goto error;
+                               }
+                       } else {
+                               /* Activity on a client's socket. */
+                               if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
+                                       /*
+                                        * It doesn't matter if a command was
+                                        * pending on the client socket at this
+                                        * point since it now has no way to
+                                        * receive the notifications to which
+                                        * it was subscribing or unsubscribing.
+                                        */
+                                       ret = handle_notification_thread_client_disconnect(
+                                                       fd, &state);
+                                       if (ret) {
+                                               goto error;
+                                       }
+                               } else {
+                                       if (revents & LPOLLIN) {
+                                               ret = handle_notification_thread_client_in(
+                                                       &state, fd);
+                                               if (ret) {
+                                                       goto error;
+                                               }
+                                       }
+
+                                       if (revents & LPOLLOUT) {
+                                               ret = handle_notification_thread_client_out(
+                                                       &state, fd);
+                                               if (ret) {
+                                                       goto error;
+                                               }
+                                       }
+                               }
+                       }
+
+                       /*
+                        * Calls above might have changed the state of the
+                        * FDs in `state.events`. Call _poll_wait() again to
+                        * ensure we have a consistent state.
+                        */
+                       if (state.restart_poll) {
+                               break;
+                       }
+               }
+       }
+exit:
+error:
+       fini_thread_state(&state);
+end:
+       rcu_thread_offline();
+       rcu_unregister_thread();
+       health_unregister(the_health_sessiond);
+       return NULL;
+}
+
+static
+bool shutdown_notification_thread(void *thread_data)
+{
+       struct notification_thread_handle *handle = (notification_thread_handle *) thread_data;
+
+       notification_thread_command_quit(handle);
+       return true;
+}
+
+struct lttng_thread *launch_notification_thread(
+               struct notification_thread_handle *handle)
+{
+       struct lttng_thread *thread;
+
+       thread = lttng_thread_create("Notification",
+                       thread_notification,
+                       shutdown_notification_thread,
+                       NULL,
+                       handle);
+       if (!thread) {
+               goto error;
+       }
+
+       /*
+        * Wait for the thread to be marked as "ready" before returning
+        * as other subsystems depend on the notification subsystem
+        * (e.g. rotation thread).
+        */
+       wait_until_thread_is_ready(handle);
+       return thread;
+error:
+       return NULL;
+}
diff --git a/src/bin/lttng-sessiond/notify-apps.c b/src/bin/lttng-sessiond/notify-apps.c
deleted file mode 100644 (file)
index 7c86b57..0000000
+++ /dev/null
@@ -1,241 +0,0 @@
-/*
- * Copyright (C) 2013 David Goulet <dgoulet@efficios.com>
- *
- * SPDX-License-Identifier: GPL-2.0-only
- *
- */
-
-#define _LGPL_SOURCE
-
-#include <common/common.h>
-#include <common/utils.h>
-
-#include "fd-limit.h"
-#include "lttng-sessiond.h"
-#include "notify-apps.h"
-#include "health-sessiond.h"
-#include "testpoint.h"
-#include "utils.h"
-#include "thread.h"
-
-struct thread_notifiers {
-       struct lttng_pipe *quit_pipe;
-       int apps_cmd_notify_pipe_read_fd;
-};
-
-/*
- * This thread manage application notify communication.
- */
-static void *thread_application_notification(void *data)
-{
-       int i, ret, pollfd, err = -1;
-       ssize_t size_ret;
-       uint32_t revents, nb_fd;
-       struct lttng_poll_event events;
-       struct thread_notifiers *notifiers = data;
-       const int quit_pipe_read_fd = lttng_pipe_get_readfd(notifiers->quit_pipe);
-
-       DBG("[ust-thread] Manage application notify command");
-
-       rcu_register_thread();
-       rcu_thread_online();
-
-       health_register(the_health_sessiond,
-                       HEALTH_SESSIOND_TYPE_APP_MANAGE_NOTIFY);
-
-       if (testpoint(sessiond_thread_app_manage_notify)) {
-               goto error_testpoint;
-       }
-
-       health_code_update();
-
-       ret = lttng_poll_create(&events, 2, LTTNG_CLOEXEC);
-       if (ret < 0) {
-               goto error_poll_create;
-       }
-
-       /* Add notify pipe to the pollset. */
-       ret = lttng_poll_add(&events, notifiers->apps_cmd_notify_pipe_read_fd,
-                       LPOLLIN | LPOLLERR | LPOLLHUP | LPOLLRDHUP);
-       if (ret < 0) {
-               goto error;
-       }
-
-       ret = lttng_poll_add(&events, quit_pipe_read_fd,
-                       LPOLLIN | LPOLLERR);
-       if (ret < 0) {
-               goto error;
-       }
-
-       health_code_update();
-
-       while (1) {
-               DBG3("[ust-thread] Manage notify polling");
-
-               /* Inifinite blocking call, waiting for transmission */
-restart:
-               health_poll_entry();
-               ret = lttng_poll_wait(&events, -1);
-               DBG3("[ust-thread] Manage notify return from poll on %d fds",
-                               LTTNG_POLL_GETNB(&events));
-               health_poll_exit();
-               if (ret < 0) {
-                       /*
-                        * Restart interrupted system call.
-                        */
-                       if (errno == EINTR) {
-                               goto restart;
-                       }
-                       goto error;
-               }
-
-               nb_fd = ret;
-
-               for (i = 0; i < nb_fd; i++) {
-                       health_code_update();
-
-                       /* Fetch once the poll data */
-                       revents = LTTNG_POLL_GETEV(&events, i);
-                       pollfd = LTTNG_POLL_GETFD(&events, i);
-
-                       /* Thread quit pipe has been closed. Killing thread. */
-                       if (pollfd == quit_pipe_read_fd) {
-                               err = 0;
-                               goto exit;
-                       } else if (pollfd == notifiers->apps_cmd_notify_pipe_read_fd) {
-                               /* Inspect the apps cmd pipe */
-                               int sock;
-
-                               if (revents & LPOLLIN) {
-                                       /* Get socket from dispatch thread. */
-                                       size_ret = lttng_read(notifiers->apps_cmd_notify_pipe_read_fd,
-                                                       &sock, sizeof(sock));
-                                       if (size_ret < sizeof(sock)) {
-                                               PERROR("read apps notify pipe");
-                                               goto error;
-                                       }
-                                       health_code_update();
-
-                                       ret = lttng_poll_add(&events, sock,
-                                                       LPOLLIN | LPOLLERR | LPOLLHUP | LPOLLRDHUP);
-                                       if (ret < 0) {
-                                               /*
-                                                * It's possible we've reached the max poll fd allowed.
-                                                * Let's close the socket but continue normal execution.
-                                                */
-                                               ret = close(sock);
-                                               if (ret) {
-                                                       PERROR("close notify socket %d", sock);
-                                               }
-                                               lttng_fd_put(LTTNG_FD_APPS, 1);
-                                               continue;
-                                       }
-                                       DBG3("UST thread notify added sock %d to pollset", sock);
-                               } else if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
-                                       ERR("Apps notify command pipe error");
-                                       goto error;
-                               } else {
-                                       ERR("Unexpected poll events %u for sock %d", revents, pollfd);
-                                       goto error;
-                               }
-                       } else {
-                               /*
-                                * At this point, we know that a registered application
-                                * triggered the event.
-                                */
-                               if (revents & (LPOLLIN | LPOLLPRI)) {
-                                       ret = ust_app_recv_notify(pollfd);
-                                       if (ret < 0) {
-                                               /* Removing from the poll set */
-                                               ret = lttng_poll_del(&events, pollfd);
-                                               if (ret < 0) {
-                                                       goto error;
-                                               }
-
-                                               /* The socket is closed after a grace period here. */
-                                               ust_app_notify_sock_unregister(pollfd);
-                                       }
-                               } else if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
-                                       /* Removing from the poll set */
-                                       ret = lttng_poll_del(&events, pollfd);
-                                       if (ret < 0) {
-                                               goto error;
-                                       }
-
-                                       /* The socket is closed after a grace period here. */
-                                       ust_app_notify_sock_unregister(pollfd);
-                               } else {
-                                       ERR("Unexpected poll events %u for sock %d", revents, pollfd);
-                                       goto error;
-                               }
-                               health_code_update();
-                       }
-               }
-       }
-
-exit:
-error:
-       lttng_poll_clean(&events);
-error_poll_create:
-error_testpoint:
-
-       DBG("Application notify communication apps thread cleanup complete");
-       if (err) {
-               health_error();
-               ERR("Health error occurred in %s", __func__);
-       }
-       health_unregister(the_health_sessiond);
-       rcu_thread_offline();
-       rcu_unregister_thread();
-       return NULL;
-}
-
-static bool shutdown_application_notification_thread(void *data)
-{
-       struct thread_notifiers *notifiers = data;
-       const int write_fd = lttng_pipe_get_writefd(notifiers->quit_pipe);
-
-       return notify_thread_pipe(write_fd) == 1;
-}
-
-static void cleanup_application_notification_thread(void *data)
-{
-       struct thread_notifiers *notifiers = data;
-
-       lttng_pipe_destroy(notifiers->quit_pipe);
-       free(notifiers);
-}
-
-bool launch_application_notification_thread(int apps_cmd_notify_pipe_read_fd)
-{
-       struct lttng_thread *thread;
-       struct thread_notifiers *notifiers;
-       struct lttng_pipe *quit_pipe;
-
-       notifiers = zmalloc(sizeof(*notifiers));
-       if (!notifiers) {
-               goto error_alloc;
-       }
-       notifiers->apps_cmd_notify_pipe_read_fd = apps_cmd_notify_pipe_read_fd;
-
-       quit_pipe = lttng_pipe_open(FD_CLOEXEC);
-       if (!quit_pipe) {
-               goto error;
-       }
-       notifiers->quit_pipe = quit_pipe;
-
-       thread = lttng_thread_create("Application notification",
-                       thread_application_notification,
-                       shutdown_application_notification_thread,
-                       cleanup_application_notification_thread,
-                       notifiers);
-       if (!thread) {
-               goto error;
-       }
-       lttng_thread_put(thread);
-       return true;
-error:
-       cleanup_application_notification_thread(notifiers);
-error_alloc:
-       return false;
-}
diff --git a/src/bin/lttng-sessiond/notify-apps.cpp b/src/bin/lttng-sessiond/notify-apps.cpp
new file mode 100644 (file)
index 0000000..0e799ff
--- /dev/null
@@ -0,0 +1,241 @@
+/*
+ * Copyright (C) 2013 David Goulet <dgoulet@efficios.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0-only
+ *
+ */
+
+#define _LGPL_SOURCE
+
+#include <common/common.h>
+#include <common/utils.h>
+
+#include "fd-limit.h"
+#include "lttng-sessiond.h"
+#include "notify-apps.h"
+#include "health-sessiond.h"
+#include "testpoint.h"
+#include "utils.h"
+#include "thread.h"
+
+struct thread_notifiers {
+       struct lttng_pipe *quit_pipe;
+       int apps_cmd_notify_pipe_read_fd;
+};
+
+/*
+ * This thread manage application notify communication.
+ */
+static void *thread_application_notification(void *data)
+{
+       int i, ret, pollfd, err = -1;
+       ssize_t size_ret;
+       uint32_t revents, nb_fd;
+       struct lttng_poll_event events;
+       struct thread_notifiers *notifiers = (thread_notifiers *) data;
+       const int quit_pipe_read_fd = lttng_pipe_get_readfd(notifiers->quit_pipe);
+
+       DBG("[ust-thread] Manage application notify command");
+
+       rcu_register_thread();
+       rcu_thread_online();
+
+       health_register(the_health_sessiond,
+                       HEALTH_SESSIOND_TYPE_APP_MANAGE_NOTIFY);
+
+       if (testpoint(sessiond_thread_app_manage_notify)) {
+               goto error_testpoint;
+       }
+
+       health_code_update();
+
+       ret = lttng_poll_create(&events, 2, LTTNG_CLOEXEC);
+       if (ret < 0) {
+               goto error_poll_create;
+       }
+
+       /* Add notify pipe to the pollset. */
+       ret = lttng_poll_add(&events, notifiers->apps_cmd_notify_pipe_read_fd,
+                       LPOLLIN | LPOLLERR | LPOLLHUP | LPOLLRDHUP);
+       if (ret < 0) {
+               goto error;
+       }
+
+       ret = lttng_poll_add(&events, quit_pipe_read_fd,
+                       LPOLLIN | LPOLLERR);
+       if (ret < 0) {
+               goto error;
+       }
+
+       health_code_update();
+
+       while (1) {
+               DBG3("[ust-thread] Manage notify polling");
+
+               /* Inifinite blocking call, waiting for transmission */
+restart:
+               health_poll_entry();
+               ret = lttng_poll_wait(&events, -1);
+               DBG3("[ust-thread] Manage notify return from poll on %d fds",
+                               LTTNG_POLL_GETNB(&events));
+               health_poll_exit();
+               if (ret < 0) {
+                       /*
+                        * Restart interrupted system call.
+                        */
+                       if (errno == EINTR) {
+                               goto restart;
+                       }
+                       goto error;
+               }
+
+               nb_fd = ret;
+
+               for (i = 0; i < nb_fd; i++) {
+                       health_code_update();
+
+                       /* Fetch once the poll data */
+                       revents = LTTNG_POLL_GETEV(&events, i);
+                       pollfd = LTTNG_POLL_GETFD(&events, i);
+
+                       /* Thread quit pipe has been closed. Killing thread. */
+                       if (pollfd == quit_pipe_read_fd) {
+                               err = 0;
+                               goto exit;
+                       } else if (pollfd == notifiers->apps_cmd_notify_pipe_read_fd) {
+                               /* Inspect the apps cmd pipe */
+                               int sock;
+
+                               if (revents & LPOLLIN) {
+                                       /* Get socket from dispatch thread. */
+                                       size_ret = lttng_read(notifiers->apps_cmd_notify_pipe_read_fd,
+                                                       &sock, sizeof(sock));
+                                       if (size_ret < sizeof(sock)) {
+                                               PERROR("read apps notify pipe");
+                                               goto error;
+                                       }
+                                       health_code_update();
+
+                                       ret = lttng_poll_add(&events, sock,
+                                                       LPOLLIN | LPOLLERR | LPOLLHUP | LPOLLRDHUP);
+                                       if (ret < 0) {
+                                               /*
+                                                * It's possible we've reached the max poll fd allowed.
+                                                * Let's close the socket but continue normal execution.
+                                                */
+                                               ret = close(sock);
+                                               if (ret) {
+                                                       PERROR("close notify socket %d", sock);
+                                               }
+                                               lttng_fd_put(LTTNG_FD_APPS, 1);
+                                               continue;
+                                       }
+                                       DBG3("UST thread notify added sock %d to pollset", sock);
+                               } else if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
+                                       ERR("Apps notify command pipe error");
+                                       goto error;
+                               } else {
+                                       ERR("Unexpected poll events %u for sock %d", revents, pollfd);
+                                       goto error;
+                               }
+                       } else {
+                               /*
+                                * At this point, we know that a registered application
+                                * triggered the event.
+                                */
+                               if (revents & (LPOLLIN | LPOLLPRI)) {
+                                       ret = ust_app_recv_notify(pollfd);
+                                       if (ret < 0) {
+                                               /* Removing from the poll set */
+                                               ret = lttng_poll_del(&events, pollfd);
+                                               if (ret < 0) {
+                                                       goto error;
+                                               }
+
+                                               /* The socket is closed after a grace period here. */
+                                               ust_app_notify_sock_unregister(pollfd);
+                                       }
+                               } else if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
+                                       /* Removing from the poll set */
+                                       ret = lttng_poll_del(&events, pollfd);
+                                       if (ret < 0) {
+                                               goto error;
+                                       }
+
+                                       /* The socket is closed after a grace period here. */
+                                       ust_app_notify_sock_unregister(pollfd);
+                               } else {
+                                       ERR("Unexpected poll events %u for sock %d", revents, pollfd);
+                                       goto error;
+                               }
+                               health_code_update();
+                       }
+               }
+       }
+
+exit:
+error:
+       lttng_poll_clean(&events);
+error_poll_create:
+error_testpoint:
+
+       DBG("Application notify communication apps thread cleanup complete");
+       if (err) {
+               health_error();
+               ERR("Health error occurred in %s", __func__);
+       }
+       health_unregister(the_health_sessiond);
+       rcu_thread_offline();
+       rcu_unregister_thread();
+       return NULL;
+}
+
+static bool shutdown_application_notification_thread(void *data)
+{
+       struct thread_notifiers *notifiers = (thread_notifiers *) data;
+       const int write_fd = lttng_pipe_get_writefd(notifiers->quit_pipe);
+
+       return notify_thread_pipe(write_fd) == 1;
+}
+
+static void cleanup_application_notification_thread(void *data)
+{
+       struct thread_notifiers *notifiers = (thread_notifiers *) data;
+
+       lttng_pipe_destroy(notifiers->quit_pipe);
+       free(notifiers);
+}
+
+bool launch_application_notification_thread(int apps_cmd_notify_pipe_read_fd)
+{
+       struct lttng_thread *thread;
+       struct thread_notifiers *notifiers;
+       struct lttng_pipe *quit_pipe;
+
+       notifiers = (thread_notifiers *) zmalloc(sizeof(*notifiers));
+       if (!notifiers) {
+               goto error_alloc;
+       }
+       notifiers->apps_cmd_notify_pipe_read_fd = apps_cmd_notify_pipe_read_fd;
+
+       quit_pipe = lttng_pipe_open(FD_CLOEXEC);
+       if (!quit_pipe) {
+               goto error;
+       }
+       notifiers->quit_pipe = quit_pipe;
+
+       thread = lttng_thread_create("Application notification",
+                       thread_application_notification,
+                       shutdown_application_notification_thread,
+                       cleanup_application_notification_thread,
+                       notifiers);
+       if (!thread) {
+               goto error;
+       }
+       lttng_thread_put(thread);
+       return true;
+error:
+       cleanup_application_notification_thread(notifiers);
+error_alloc:
+       return false;
+}
diff --git a/src/bin/lttng-sessiond/process-utils.c b/src/bin/lttng-sessiond/process-utils.c
deleted file mode 100644 (file)
index 2d629b5..0000000
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
- * Copyright (C) 2011 David Goulet <david.goulet@polymtl.ca>
- * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- * Copyright (C) 2013 Jérémie Galarneau <jeremie.galarneau@efficios.com>
- *
- * SPDX-License-Identifier: GPL-2.0-only
- *
- */
-
-#include <signal.h>
-#include "lttng-sessiond.h"
-
-/* Notify parents that we are ready for cmd and health check */
-void sessiond_signal_parents(void)
-{
-       /*
-        * Notify parent pid that we are ready to accept command
-        * for client side.  This ppid is the one from the
-        * external process that spawned us.
-        */
-       if (the_config.sig_parent) {
-               kill(the_ppid, SIGUSR1);
-       }
-
-       /*
-        * Notify the parent of the fork() process that we are
-        * ready.
-        */
-       if (the_config.daemonize || the_config.background) {
-               kill(the_child_ppid, SIGUSR1);
-       }
-}
diff --git a/src/bin/lttng-sessiond/process-utils.cpp b/src/bin/lttng-sessiond/process-utils.cpp
new file mode 100644 (file)
index 0000000..2d629b5
--- /dev/null
@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) 2011 David Goulet <david.goulet@polymtl.ca>
+ * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ * Copyright (C) 2013 Jérémie Galarneau <jeremie.galarneau@efficios.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0-only
+ *
+ */
+
+#include <signal.h>
+#include "lttng-sessiond.h"
+
+/* Notify parents that we are ready for cmd and health check */
+void sessiond_signal_parents(void)
+{
+       /*
+        * Notify parent pid that we are ready to accept command
+        * for client side.  This ppid is the one from the
+        * external process that spawned us.
+        */
+       if (the_config.sig_parent) {
+               kill(the_ppid, SIGUSR1);
+       }
+
+       /*
+        * Notify the parent of the fork() process that we are
+        * ready.
+        */
+       if (the_config.daemonize || the_config.background) {
+               kill(the_child_ppid, SIGUSR1);
+       }
+}
diff --git a/src/bin/lttng-sessiond/register.c b/src/bin/lttng-sessiond/register.c
deleted file mode 100644 (file)
index bd6cd52..0000000
+++ /dev/null
@@ -1,448 +0,0 @@
-/*
- * Copyright (C) 2011 David Goulet <david.goulet@polymtl.ca>
- * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- * Copyright (C) 2013 Jérémie Galarneau <jeremie.galarneau@efficios.com>
- *
- * SPDX-License-Identifier: GPL-2.0-only
- *
- */
-
-#include <stddef.h>
-#include <stdlib.h>
-#include <urcu.h>
-#include <common/futex.h>
-#include <common/macros.h>
-#include <common/shm.h>
-#include <common/utils.h>
-#include <sys/stat.h>
-
-#include "register.h"
-#include "lttng-sessiond.h"
-#include "testpoint.h"
-#include "health-sessiond.h"
-#include "fd-limit.h"
-#include "utils.h"
-#include "thread.h"
-
-struct thread_state {
-       struct lttng_pipe *quit_pipe;
-       struct ust_cmd_queue *ust_cmd_queue;
-       sem_t ready;
-       bool running;
-       int application_socket;
-};
-
-/*
- * Creates the application socket.
- */
-static int create_application_socket(void)
-{
-       int ret = 0;
-       int apps_sock;
-       const mode_t old_umask = umask(0);
-
-       /* Create the application unix socket */
-       apps_sock = lttcomm_create_unix_sock(
-                       the_config.apps_unix_sock_path.value);
-       if (apps_sock < 0) {
-               ERR("Create unix sock failed: %s",
-                               the_config.apps_unix_sock_path.value);
-               ret = -1;
-               goto end;
-       }
-
-       /* Set the cloexec flag */
-       ret = utils_set_fd_cloexec(apps_sock);
-       if (ret < 0) {
-               ERR("Unable to set CLOEXEC flag to the app Unix socket (fd: %d). "
-                               "Continuing but note that the consumer daemon will have a "
-                               "reference to this socket on exec()", apps_sock);
-       }
-
-       /* File permission MUST be 666 */
-       ret = chmod(the_config.apps_unix_sock_path.value,
-                       S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH |
-                                       S_IWOTH);
-       if (ret < 0) {
-               PERROR("Set file permissions failed on %s",
-                               the_config.apps_unix_sock_path.value);
-               goto error_close_socket;
-       }
-
-       DBG3("Session daemon application socket created (fd = %d) ", apps_sock);
-       ret = apps_sock;
-end:
-       umask(old_umask);
-       return ret;
-error_close_socket:
-       if (close(apps_sock)) {
-               PERROR("Failed to close application socket in error path");
-       }
-       apps_sock = -1;
-       ret = -1;
-       goto end;
-}
-
-/*
- * Notify UST applications using the shm mmap futex.
- */
-static int notify_ust_apps(int active, bool is_root)
-{
-       char *wait_shm_mmap;
-
-       DBG("Notifying applications of session daemon state: %d", active);
-
-       /* See shm.c for this call implying mmap, shm and futex calls */
-       wait_shm_mmap = shm_ust_get_mmap(
-                       the_config.wait_shm_path.value, is_root);
-       if (wait_shm_mmap == NULL) {
-               goto error;
-       }
-
-       /* Wake waiting process */
-       futex_wait_update((int32_t *) wait_shm_mmap, active);
-
-       /* Apps notified successfully */
-       return 0;
-
-error:
-       return -1;
-}
-
-static void cleanup_application_registration_thread(void *data)
-{
-       struct thread_state *thread_state = data;
-
-       if (!data) {
-               return;
-       }
-
-       lttng_pipe_destroy(thread_state->quit_pipe);
-       free(thread_state);
-}
-
-static void set_thread_status(struct thread_state *thread_state, bool running)
-{
-       DBG("Marking application registration thread's state as %s", running ? "running" : "error");
-       thread_state->running = running;
-       sem_post(&thread_state->ready);
-}
-
-static bool wait_thread_status(struct thread_state *thread_state)
-{
-       DBG("Waiting for application registration thread to be ready");
-       sem_wait(&thread_state->ready);
-       if (thread_state->running) {
-               DBG("Application registration thread is ready");
-       } else {
-               ERR("Initialization of application registration thread failed");
-       }
-
-       return thread_state->running;
-}
-
-static void thread_init_cleanup(void *data)
-{
-       struct thread_state *thread_state = data;
-
-       set_thread_status(thread_state, false);
-}
-
-/*
- * This thread manage application registration.
- */
-static void *thread_application_registration(void *data)
-{
-       int sock = -1, i, ret, pollfd, err = -1;
-       uint32_t revents, nb_fd;
-       struct lttng_poll_event events;
-       /*
-        * Gets allocated in this thread, enqueued to a global queue, dequeued
-        * and freed in the manage apps thread.
-        */
-       struct ust_command *ust_cmd = NULL;
-       const bool is_root = (getuid() == 0);
-       struct thread_state *thread_state = data;
-       const int application_socket = thread_state->application_socket;
-       const int quit_pipe_read_fd = lttng_pipe_get_readfd(
-                       thread_state->quit_pipe);
-
-       DBG("[thread] Manage application registration started");
-
-       pthread_cleanup_push(thread_init_cleanup, thread_state);
-       health_register(the_health_sessiond, HEALTH_SESSIOND_TYPE_APP_REG);
-
-       ret = lttcomm_listen_unix_sock(application_socket);
-       if (ret < 0) {
-               goto error_listen;
-       }
-
-       /*
-        * Pass 2 as size here for the thread quit pipe and apps_sock. Nothing
-        * more will be added to this poll set.
-        */
-       ret = lttng_poll_create(&events, 2, LTTNG_CLOEXEC);
-       if (ret < 0) {
-               goto error_create_poll;
-       }
-
-       /* Add the application registration socket */
-       ret = lttng_poll_add(&events, application_socket, LPOLLIN | LPOLLRDHUP);
-       if (ret < 0) {
-               goto error_poll_add;
-       }
-
-       /* Add the application registration socket */
-       ret = lttng_poll_add(&events, quit_pipe_read_fd, LPOLLIN | LPOLLRDHUP);
-       if (ret < 0) {
-               goto error_poll_add;
-       }
-
-       set_thread_status(thread_state, true);
-       pthread_cleanup_pop(0);
-
-       if (testpoint(sessiond_thread_registration_apps)) {
-               goto error_poll_add;
-       }
-
-       while (1) {
-               DBG("Accepting application registration");
-
-               /* Inifinite blocking call, waiting for transmission */
-       restart:
-               health_poll_entry();
-               ret = lttng_poll_wait(&events, -1);
-               health_poll_exit();
-               if (ret < 0) {
-                       /*
-                        * Restart interrupted system call.
-                        */
-                       if (errno == EINTR) {
-                               goto restart;
-                       }
-                       goto error;
-               }
-
-               nb_fd = ret;
-
-               for (i = 0; i < nb_fd; i++) {
-                       health_code_update();
-
-                       /* Fetch once the poll data */
-                       revents = LTTNG_POLL_GETEV(&events, i);
-                       pollfd = LTTNG_POLL_GETFD(&events, i);
-
-                       /* Thread quit pipe has been closed. Killing thread. */
-                       if (pollfd == quit_pipe_read_fd) {
-                               err = 0;
-                               goto exit;
-                       } else {
-                               /* Event on the registration socket */
-                               if (revents & LPOLLIN) {
-                                       sock = lttcomm_accept_unix_sock(application_socket);
-                                       if (sock < 0) {
-                                               goto error;
-                                       }
-
-                                       /*
-                                        * Set socket timeout for both receiving and ending.
-                                        * app_socket_timeout is in seconds, whereas
-                                        * lttcomm_setsockopt_rcv_timeout and
-                                        * lttcomm_setsockopt_snd_timeout expect msec as
-                                        * parameter.
-                                        */
-                                       if (the_config.app_socket_timeout >= 0) {
-                                               (void) lttcomm_setsockopt_rcv_timeout(sock,
-                                                               the_config.app_socket_timeout * 1000);
-                                               (void) lttcomm_setsockopt_snd_timeout(sock,
-                                                               the_config.app_socket_timeout * 1000);
-                                       }
-
-                                       /*
-                                        * Set the CLOEXEC flag. Return code is useless because
-                                        * either way, the show must go on.
-                                        */
-                                       (void) utils_set_fd_cloexec(sock);
-
-                                       /* Create UST registration command for enqueuing */
-                                       ust_cmd = zmalloc(sizeof(struct ust_command));
-                                       if (ust_cmd == NULL) {
-                                               PERROR("ust command zmalloc");
-                                               ret = close(sock);
-                                               if (ret) {
-                                                       PERROR("close");
-                                               }
-                                               sock = -1;
-                                               goto error;
-                                       }
-
-                                       /*
-                                        * Using message-based transmissions to ensure we don't
-                                        * have to deal with partially received messages.
-                                        */
-                                       ret = lttng_fd_get(LTTNG_FD_APPS, 1);
-                                       if (ret < 0) {
-                                               ERR("Exhausted file descriptors allowed for applications.");
-                                               free(ust_cmd);
-                                               ret = close(sock);
-                                               if (ret) {
-                                                       PERROR("close");
-                                               }
-                                               sock = -1;
-                                               continue;
-                                       }
-
-                                       health_code_update();
-                                       ret = ust_app_recv_registration(sock, &ust_cmd->reg_msg);
-                                       if (ret < 0) {
-                                               free(ust_cmd);
-                                               /* Close socket of the application. */
-                                               ret = close(sock);
-                                               if (ret) {
-                                                       PERROR("close");
-                                               }
-                                               lttng_fd_put(LTTNG_FD_APPS, 1);
-                                               sock = -1;
-                                               continue;
-                                       }
-                                       health_code_update();
-
-                                       ust_cmd->sock = sock;
-                                       sock = -1;
-
-                                       DBG("UST registration received with pid:%d ppid:%d uid:%d"
-                                                       " gid:%d sock:%d name:%s (version %d.%d)",
-                                                       ust_cmd->reg_msg.pid, ust_cmd->reg_msg.ppid,
-                                                       ust_cmd->reg_msg.uid, ust_cmd->reg_msg.gid,
-                                                       ust_cmd->sock, ust_cmd->reg_msg.name,
-                                                       ust_cmd->reg_msg.major, ust_cmd->reg_msg.minor);
-
-                                       /*
-                                        * Lock free enqueue the registration request. The red pill
-                                        * has been taken! This apps will be part of the *system*.
-                                        */
-                                       cds_wfcq_enqueue(&thread_state->ust_cmd_queue->head,
-                                                       &thread_state->ust_cmd_queue->tail,
-                                                       &ust_cmd->node);
-
-                                       /*
-                                        * Wake the registration queue futex. Implicit memory
-                                        * barrier with the exchange in cds_wfcq_enqueue.
-                                        */
-                                       futex_nto1_wake(&thread_state->ust_cmd_queue->futex);
-                               } else if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
-                                       ERR("Register apps socket poll error");
-                                       goto error;
-                               } else {
-                                       ERR("Unexpected poll events %u for sock %d", revents, pollfd);
-                                       goto error;
-                               }
-                       }
-               }
-       }
-
-exit:
-error:
-       /* Notify that the registration thread is gone */
-       notify_ust_apps(0, is_root);
-
-       ret = close(application_socket);
-       if (ret) {
-               PERROR("Failed to close application registration socket");
-       }
-       if (sock >= 0) {
-               ret = close(sock);
-               if (ret) {
-                       PERROR("Failed to close application socket");
-               }
-               lttng_fd_put(LTTNG_FD_APPS, 1);
-       }
-       unlink(the_config.apps_unix_sock_path.value);
-
-error_poll_add:
-       lttng_poll_clean(&events);
-error_listen:
-error_create_poll:
-       DBG("UST Registration thread cleanup complete");
-       if (err) {
-               health_error();
-               ERR("Health error occurred in %s", __func__);
-       }
-       health_unregister(the_health_sessiond);
-       return NULL;
-}
-
-static bool shutdown_application_registration_thread(void *data)
-{
-       struct thread_state *thread_state = data;
-       const int write_fd = lttng_pipe_get_writefd(thread_state->quit_pipe);
-
-       return notify_thread_pipe(write_fd) == 1;
-}
-
-struct lttng_thread *launch_application_registration_thread(
-               struct ust_cmd_queue *cmd_queue)
-{
-       int ret;
-       struct lttng_pipe *quit_pipe;
-       struct thread_state *thread_state = NULL;
-       struct lttng_thread *thread = NULL;
-       const bool is_root = (getuid() == 0);
-       int application_socket = -1;
-
-       thread_state = zmalloc(sizeof(*thread_state));
-       if (!thread_state) {
-               goto error_alloc;
-       }
-       quit_pipe = lttng_pipe_open(FD_CLOEXEC);
-       if (!quit_pipe) {
-               goto error;
-       }
-       thread_state->quit_pipe = quit_pipe;
-       thread_state->ust_cmd_queue = cmd_queue;
-       application_socket = create_application_socket();
-       if (application_socket < 0) {
-               goto error;
-       }
-       thread_state->application_socket = application_socket;
-       sem_init(&thread_state->ready, 0, 0);
-
-       thread = lttng_thread_create("UST application registration",
-                       thread_application_registration,
-                       shutdown_application_registration_thread,
-                       cleanup_application_registration_thread,
-                       thread_state);
-       if (!thread) {
-               goto error;
-       }
-       /*
-        * The application registration thread now owns the application socket
-        * and the global thread state. The thread state is used to wait for
-        * the thread's status, but its ownership now belongs to the thread.
-        */
-       application_socket = -1;
-       if (!wait_thread_status(thread_state)) {
-               thread_state = NULL;
-               goto error;
-       }
-
-       /* Notify all applications to register. */
-       ret = notify_ust_apps(1, is_root);
-       if (ret < 0) {
-               ERR("Failed to notify applications or create the wait shared memory.\n"
-                       "Execution continues but there might be problems for already\n"
-                       "running applications that wishes to register.");
-       }
-
-       return thread;
-error:
-       lttng_thread_put(thread);
-       cleanup_application_registration_thread(thread_state);
-       if (application_socket >= 0) {
-               if (close(application_socket)) {
-                       PERROR("Failed to close application registration socket");
-               }
-       }
-error_alloc:
-       return NULL;
-}
diff --git a/src/bin/lttng-sessiond/register.cpp b/src/bin/lttng-sessiond/register.cpp
new file mode 100644 (file)
index 0000000..5bcc637
--- /dev/null
@@ -0,0 +1,450 @@
+/*
+ * Copyright (C) 2011 David Goulet <david.goulet@polymtl.ca>
+ * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ * Copyright (C) 2013 Jérémie Galarneau <jeremie.galarneau@efficios.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0-only
+ *
+ */
+
+#include <stddef.h>
+#include <stdlib.h>
+#include <urcu.h>
+#include <common/futex.h>
+#include <common/macros.h>
+#include <common/shm.h>
+#include <common/utils.h>
+#include <sys/stat.h>
+
+#include "register.h"
+#include "lttng-sessiond.h"
+#include "testpoint.h"
+#include "health-sessiond.h"
+#include "fd-limit.h"
+#include "utils.h"
+#include "thread.h"
+
+struct thread_state {
+       struct lttng_pipe *quit_pipe;
+       struct ust_cmd_queue *ust_cmd_queue;
+       sem_t ready;
+       bool running;
+       int application_socket;
+};
+
+/*
+ * Creates the application socket.
+ */
+static int create_application_socket(void)
+{
+       int ret = 0;
+       int apps_sock;
+       const mode_t old_umask = umask(0);
+
+       /* Create the application unix socket */
+       apps_sock = lttcomm_create_unix_sock(
+                       the_config.apps_unix_sock_path.value);
+       if (apps_sock < 0) {
+               ERR("Create unix sock failed: %s",
+                               the_config.apps_unix_sock_path.value);
+               ret = -1;
+               goto end;
+       }
+
+       /* Set the cloexec flag */
+       ret = utils_set_fd_cloexec(apps_sock);
+       if (ret < 0) {
+               ERR("Unable to set CLOEXEC flag to the app Unix socket (fd: %d). "
+                               "Continuing but note that the consumer daemon will have a "
+                               "reference to this socket on exec()", apps_sock);
+       }
+
+       /* File permission MUST be 666 */
+       ret = chmod(the_config.apps_unix_sock_path.value,
+                       S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH |
+                                       S_IWOTH);
+       if (ret < 0) {
+               PERROR("Set file permissions failed on %s",
+                               the_config.apps_unix_sock_path.value);
+               goto error_close_socket;
+       }
+
+       DBG3("Session daemon application socket created (fd = %d) ", apps_sock);
+       ret = apps_sock;
+end:
+       umask(old_umask);
+       return ret;
+error_close_socket:
+       if (close(apps_sock)) {
+               PERROR("Failed to close application socket in error path");
+       }
+       apps_sock = -1;
+       ret = -1;
+       goto end;
+}
+
+/*
+ * Notify UST applications using the shm mmap futex.
+ */
+static int notify_ust_apps(int active, bool is_root)
+{
+       char *wait_shm_mmap;
+
+       DBG("Notifying applications of session daemon state: %d", active);
+
+       /* See shm.c for this call implying mmap, shm and futex calls */
+       wait_shm_mmap = shm_ust_get_mmap(
+                       the_config.wait_shm_path.value, is_root);
+       if (wait_shm_mmap == NULL) {
+               goto error;
+       }
+
+       /* Wake waiting process */
+       futex_wait_update((int32_t *) wait_shm_mmap, active);
+
+       /* Apps notified successfully */
+       return 0;
+
+error:
+       return -1;
+}
+
+static void cleanup_application_registration_thread(void *data)
+{
+       struct thread_state *thread_state = (struct thread_state *) data;
+
+       if (!data) {
+               return;
+       }
+
+       lttng_pipe_destroy(thread_state->quit_pipe);
+       free(thread_state);
+}
+
+static void set_thread_status(struct thread_state *thread_state, bool running)
+{
+       DBG("Marking application registration thread's state as %s", running ? "running" : "error");
+       thread_state->running = running;
+       sem_post(&thread_state->ready);
+}
+
+static bool wait_thread_status(struct thread_state *thread_state)
+{
+       DBG("Waiting for application registration thread to be ready");
+       sem_wait(&thread_state->ready);
+       if (thread_state->running) {
+               DBG("Application registration thread is ready");
+       } else {
+               ERR("Initialization of application registration thread failed");
+       }
+
+       return thread_state->running;
+}
+
+static void thread_init_cleanup(void *data)
+{
+       struct thread_state *thread_state = (struct thread_state *) data;
+
+       set_thread_status(thread_state, false);
+}
+
+/*
+ * This thread manage application registration.
+ */
+static void *thread_application_registration(void *data)
+{
+       int sock = -1, i, ret, pollfd, err = -1;
+       uint32_t revents, nb_fd;
+       struct lttng_poll_event events;
+       /*
+        * Gets allocated in this thread, enqueued to a global queue, dequeued
+        * and freed in the manage apps thread.
+        */
+       struct ust_command *ust_cmd = NULL;
+       const bool is_root = (getuid() == 0);
+       struct thread_state *thread_state = (struct thread_state *) data;
+       const int application_socket = thread_state->application_socket;
+       const int quit_pipe_read_fd = lttng_pipe_get_readfd(
+                       thread_state->quit_pipe);
+
+       DBG("[thread] Manage application registration started");
+
+       pthread_cleanup_push(thread_init_cleanup, thread_state);
+       health_register(the_health_sessiond, HEALTH_SESSIOND_TYPE_APP_REG);
+
+       ret = lttcomm_listen_unix_sock(application_socket);
+       if (ret < 0) {
+               goto error_listen;
+       }
+
+       /*
+        * Pass 2 as size here for the thread quit pipe and apps_sock. Nothing
+        * more will be added to this poll set.
+        */
+       ret = lttng_poll_create(&events, 2, LTTNG_CLOEXEC);
+       if (ret < 0) {
+               goto error_create_poll;
+       }
+
+       /* Add the application registration socket */
+       ret = lttng_poll_add(&events, application_socket, LPOLLIN | LPOLLRDHUP);
+       if (ret < 0) {
+               goto error_poll_add;
+       }
+
+       /* Add the application registration socket */
+       ret = lttng_poll_add(&events, quit_pipe_read_fd, LPOLLIN | LPOLLRDHUP);
+       if (ret < 0) {
+               goto error_poll_add;
+       }
+
+       set_thread_status(thread_state, true);
+       pthread_cleanup_pop(0);
+
+       if (testpoint(sessiond_thread_registration_apps)) {
+               goto error_poll_add;
+       }
+
+       while (1) {
+               DBG("Accepting application registration");
+
+               /* Inifinite blocking call, waiting for transmission */
+       restart:
+               health_poll_entry();
+               ret = lttng_poll_wait(&events, -1);
+               health_poll_exit();
+               if (ret < 0) {
+                       /*
+                        * Restart interrupted system call.
+                        */
+                       if (errno == EINTR) {
+                               goto restart;
+                       }
+                       goto error;
+               }
+
+               nb_fd = ret;
+
+               for (i = 0; i < nb_fd; i++) {
+                       health_code_update();
+
+                       /* Fetch once the poll data */
+                       revents = LTTNG_POLL_GETEV(&events, i);
+                       pollfd = LTTNG_POLL_GETFD(&events, i);
+
+                       /* Thread quit pipe has been closed. Killing thread. */
+                       if (pollfd == quit_pipe_read_fd) {
+                               err = 0;
+                               goto exit;
+                       } else {
+                               /* Event on the registration socket */
+                               if (revents & LPOLLIN) {
+                                       sock = lttcomm_accept_unix_sock(application_socket);
+                                       if (sock < 0) {
+                                               goto error;
+                                       }
+
+                                       /*
+                                        * Set socket timeout for both receiving and ending.
+                                        * app_socket_timeout is in seconds, whereas
+                                        * lttcomm_setsockopt_rcv_timeout and
+                                        * lttcomm_setsockopt_snd_timeout expect msec as
+                                        * parameter.
+                                        */
+                                       if (the_config.app_socket_timeout >= 0) {
+                                               (void) lttcomm_setsockopt_rcv_timeout(sock,
+                                                               the_config.app_socket_timeout * 1000);
+                                               (void) lttcomm_setsockopt_snd_timeout(sock,
+                                                               the_config.app_socket_timeout * 1000);
+                                       }
+
+                                       /*
+                                        * Set the CLOEXEC flag. Return code is useless because
+                                        * either way, the show must go on.
+                                        */
+                                       (void) utils_set_fd_cloexec(sock);
+
+                                       /* Create UST registration command for enqueuing */
+                                       ust_cmd = (ust_command *) zmalloc(sizeof(struct ust_command));
+                                       if (ust_cmd == NULL) {
+                                               PERROR("ust command zmalloc");
+                                               ret = close(sock);
+                                               if (ret) {
+                                                       PERROR("close");
+                                               }
+                                               sock = -1;
+                                               goto error;
+                                       }
+
+                                       /*
+                                        * Using message-based transmissions to ensure we don't
+                                        * have to deal with partially received messages.
+                                        */
+                                       ret = lttng_fd_get(LTTNG_FD_APPS, 1);
+                                       if (ret < 0) {
+                                               ERR("Exhausted file descriptors allowed for applications.");
+                                               free(ust_cmd);
+                                               ret = close(sock);
+                                               if (ret) {
+                                                       PERROR("close");
+                                               }
+                                               sock = -1;
+                                               continue;
+                                       }
+
+                                       health_code_update();
+                                       ret = ust_app_recv_registration(sock, &ust_cmd->reg_msg);
+                                       if (ret < 0) {
+                                               free(ust_cmd);
+                                               /* Close socket of the application. */
+                                               ret = close(sock);
+                                               if (ret) {
+                                                       PERROR("close");
+                                               }
+                                               lttng_fd_put(LTTNG_FD_APPS, 1);
+                                               sock = -1;
+                                               continue;
+                                       }
+                                       health_code_update();
+
+                                       ust_cmd->sock = sock;
+                                       sock = -1;
+
+                                       DBG("UST registration received with pid:%d ppid:%d uid:%d"
+                                                       " gid:%d sock:%d name:%s (version %d.%d)",
+                                                       ust_cmd->reg_msg.pid, ust_cmd->reg_msg.ppid,
+                                                       ust_cmd->reg_msg.uid, ust_cmd->reg_msg.gid,
+                                                       ust_cmd->sock, ust_cmd->reg_msg.name,
+                                                       ust_cmd->reg_msg.major, ust_cmd->reg_msg.minor);
+
+                                       /*
+                                        * Lock free enqueue the registration request. The red pill
+                                        * has been taken! This apps will be part of the *system*.
+                                        */
+                                       cds_wfcq_head_ptr_t head;
+                                       head.h = &thread_state->ust_cmd_queue->head;
+                                       cds_wfcq_enqueue(head,
+                                                       &thread_state->ust_cmd_queue->tail,
+                                                       &ust_cmd->node);
+
+                                       /*
+                                        * Wake the registration queue futex. Implicit memory
+                                        * barrier with the exchange in cds_wfcq_enqueue.
+                                        */
+                                       futex_nto1_wake(&thread_state->ust_cmd_queue->futex);
+                               } else if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
+                                       ERR("Register apps socket poll error");
+                                       goto error;
+                               } else {
+                                       ERR("Unexpected poll events %u for sock %d", revents, pollfd);
+                                       goto error;
+                               }
+                       }
+               }
+       }
+
+exit:
+error:
+       /* Notify that the registration thread is gone */
+       notify_ust_apps(0, is_root);
+
+       ret = close(application_socket);
+       if (ret) {
+               PERROR("Failed to close application registration socket");
+       }
+       if (sock >= 0) {
+               ret = close(sock);
+               if (ret) {
+                       PERROR("Failed to close application socket");
+               }
+               lttng_fd_put(LTTNG_FD_APPS, 1);
+       }
+       unlink(the_config.apps_unix_sock_path.value);
+
+error_poll_add:
+       lttng_poll_clean(&events);
+error_listen:
+error_create_poll:
+       DBG("UST Registration thread cleanup complete");
+       if (err) {
+               health_error();
+               ERR("Health error occurred in %s", __func__);
+       }
+       health_unregister(the_health_sessiond);
+       return NULL;
+}
+
+static bool shutdown_application_registration_thread(void *data)
+{
+       struct thread_state *thread_state = (struct thread_state *) data;
+       const int write_fd = lttng_pipe_get_writefd(thread_state->quit_pipe);
+
+       return notify_thread_pipe(write_fd) == 1;
+}
+
+struct lttng_thread *launch_application_registration_thread(
+               struct ust_cmd_queue *cmd_queue)
+{
+       int ret;
+       struct lttng_pipe *quit_pipe;
+       struct thread_state *thread_state = NULL;
+       struct lttng_thread *thread = NULL;
+       const bool is_root = (getuid() == 0);
+       int application_socket = -1;
+
+       thread_state = (struct thread_state *) zmalloc(sizeof(*thread_state));
+       if (!thread_state) {
+               goto error_alloc;
+       }
+       quit_pipe = lttng_pipe_open(FD_CLOEXEC);
+       if (!quit_pipe) {
+               goto error;
+       }
+       thread_state->quit_pipe = quit_pipe;
+       thread_state->ust_cmd_queue = cmd_queue;
+       application_socket = create_application_socket();
+       if (application_socket < 0) {
+               goto error;
+       }
+       thread_state->application_socket = application_socket;
+       sem_init(&thread_state->ready, 0, 0);
+
+       thread = lttng_thread_create("UST application registration",
+                       thread_application_registration,
+                       shutdown_application_registration_thread,
+                       cleanup_application_registration_thread,
+                       thread_state);
+       if (!thread) {
+               goto error;
+       }
+       /*
+        * The application registration thread now owns the application socket
+        * and the global thread state. The thread state is used to wait for
+        * the thread's status, but its ownership now belongs to the thread.
+        */
+       application_socket = -1;
+       if (!wait_thread_status(thread_state)) {
+               thread_state = NULL;
+               goto error;
+       }
+
+       /* Notify all applications to register. */
+       ret = notify_ust_apps(1, is_root);
+       if (ret < 0) {
+               ERR("Failed to notify applications or create the wait shared memory.\n"
+                       "Execution continues but there might be problems for already\n"
+                       "running applications that wishes to register.");
+       }
+
+       return thread;
+error:
+       lttng_thread_put(thread);
+       cleanup_application_registration_thread(thread_state);
+       if (application_socket >= 0) {
+               if (close(application_socket)) {
+                       PERROR("Failed to close application registration socket");
+               }
+       }
+error_alloc:
+       return NULL;
+}
diff --git a/src/bin/lttng-sessiond/rotate.c b/src/bin/lttng-sessiond/rotate.c
deleted file mode 100644 (file)
index cdf95f3..0000000
+++ /dev/null
@@ -1,148 +0,0 @@
-/*
- * Copyright (C) 2017 Julien Desfossez <jdesfossez@efficios.com>
- * Copyright (C) 2018 Jérémie Galarneau <jeremie.galarneau@efficios.com>
- *
- * SPDX-License-Identifier: GPL-2.0-only
- *
- */
-
-#define _LGPL_SOURCE
-#include <lttng/trigger/trigger.h>
-#include <common/error.h>
-#include <common/config/session-config.h>
-#include <common/defaults.h>
-#include <common/utils.h>
-#include <common/futex.h>
-#include <common/align.h>
-#include <common/time.h>
-#include <common/hashtable/utils.h>
-#include <common/kernel-ctl/kernel-ctl.h>
-#include <common/credentials.h>
-#include <sys/stat.h>
-#include <time.h>
-#include <signal.h>
-#include <inttypes.h>
-
-#include <lttng/notification/channel-internal.h>
-#include <lttng/rotate-internal.h>
-
-#include "session.h"
-#include "rotate.h"
-#include "rotation-thread.h"
-#include "lttng-sessiond.h"
-#include "health-sessiond.h"
-#include "cmd.h"
-#include "utils.h"
-#include "notification-thread-commands.h"
-
-#include <urcu.h>
-#include <urcu/list.h>
-#include <urcu/rculfhash.h>
-
-int subscribe_session_consumed_size_rotation(struct ltt_session *session, uint64_t size,
-               struct notification_thread_handle *notification_thread_handle)
-{
-       int ret;
-       enum lttng_condition_status condition_status;
-       enum lttng_notification_channel_status nc_status;
-       struct lttng_action *action;
-       const struct lttng_credentials session_creds = {
-               .uid = LTTNG_OPTIONAL_INIT_VALUE(session->uid),
-               .gid = LTTNG_OPTIONAL_INIT_VALUE(session->gid),
-       };
-
-       session->rotate_condition = lttng_condition_session_consumed_size_create();
-       if (!session->rotate_condition) {
-               ERR("Failed to create session consumed size condition object");
-               ret = -1;
-               goto end;
-       }
-
-       condition_status = lttng_condition_session_consumed_size_set_threshold(
-                       session->rotate_condition, size);
-       if (condition_status != LTTNG_CONDITION_STATUS_OK) {
-               ERR("Could not set session consumed size condition threshold (size = %" PRIu64 ")",
-                               size);
-               ret = -1;
-               goto end;
-       }
-
-       condition_status =
-                       lttng_condition_session_consumed_size_set_session_name(
-                               session->rotate_condition, session->name);
-       if (condition_status != LTTNG_CONDITION_STATUS_OK) {
-               ERR("Could not set session consumed size condition session name (name = %s)",
-                               session->name);
-               ret = -1;
-               goto end;
-       }
-
-       action = lttng_action_notify_create();
-       if (!action) {
-               ERR("Could not create notify action");
-               ret = -1;
-               goto end;
-       }
-
-       session->rotate_trigger = lttng_trigger_create(session->rotate_condition,
-                       action);
-       if (!session->rotate_trigger) {
-               ERR("Could not create size-based rotation trigger");
-               ret = -1;
-               goto end;
-       }
-
-       /* Ensure this trigger is not visible to external users. */
-       lttng_trigger_set_hidden(session->rotate_trigger);
-       lttng_trigger_set_credentials(
-                       session->rotate_trigger, &session_creds);
-
-       nc_status = lttng_notification_channel_subscribe(
-                       rotate_notification_channel, session->rotate_condition);
-       if (nc_status != LTTNG_NOTIFICATION_CHANNEL_STATUS_OK) {
-               ERR("Could not subscribe to session consumed size notification");
-               ret = -1;
-               goto end;
-       }
-
-       ret = notification_thread_command_register_trigger(
-                       notification_thread_handle, session->rotate_trigger,
-                       true);
-       if (ret < 0 && ret != -LTTNG_ERR_TRIGGER_EXISTS) {
-               ERR("Register trigger, %s", lttng_strerror(ret));
-               ret = -1;
-               goto end;
-       }
-
-       ret = 0;
-
-end:
-       return ret;
-}
-
-int unsubscribe_session_consumed_size_rotation(struct ltt_session *session,
-               struct notification_thread_handle *notification_thread_handle)
-{
-       int ret = 0;
-       enum lttng_notification_channel_status status;
-
-       status = lttng_notification_channel_unsubscribe(
-                       rotate_notification_channel,
-                       session->rotate_condition);
-       if (status != LTTNG_NOTIFICATION_CHANNEL_STATUS_OK) {
-               ERR("Session unsubscribe error: %d", (int) status);
-               ret = -1;
-               goto end;
-       }
-
-       ret = notification_thread_command_unregister_trigger(
-                       notification_thread_handle, session->rotate_trigger);
-       if (ret != LTTNG_OK) {
-               ERR("Session unregister trigger error: %d", ret);
-               goto end;
-       }
-
-       ret = 0;
-end:
-       return ret;
-}
diff --git a/src/bin/lttng-sessiond/rotate.cpp b/src/bin/lttng-sessiond/rotate.cpp
new file mode 100644 (file)
index 0000000..cdf95f3
--- /dev/null
@@ -0,0 +1,148 @@
+/*
+ * Copyright (C) 2017 Julien Desfossez <jdesfossez@efficios.com>
+ * Copyright (C) 2018 Jérémie Galarneau <jeremie.galarneau@efficios.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0-only
+ *
+ */
+
+#define _LGPL_SOURCE
+#include <lttng/trigger/trigger.h>
+#include <common/error.h>
+#include <common/config/session-config.h>
+#include <common/defaults.h>
+#include <common/utils.h>
+#include <common/futex.h>
+#include <common/align.h>
+#include <common/time.h>
+#include <common/hashtable/utils.h>
+#include <common/kernel-ctl/kernel-ctl.h>
+#include <common/credentials.h>
+#include <sys/stat.h>
+#include <time.h>
+#include <signal.h>
+#include <inttypes.h>
+
+#include <lttng/notification/channel-internal.h>
+#include <lttng/rotate-internal.h>
+
+#include "session.h"
+#include "rotate.h"
+#include "rotation-thread.h"
+#include "lttng-sessiond.h"
+#include "health-sessiond.h"
+#include "cmd.h"
+#include "utils.h"
+#include "notification-thread-commands.h"
+
+#include <urcu.h>
+#include <urcu/list.h>
+#include <urcu/rculfhash.h>
+
+int subscribe_session_consumed_size_rotation(struct ltt_session *session, uint64_t size,
+               struct notification_thread_handle *notification_thread_handle)
+{
+       int ret;
+       enum lttng_condition_status condition_status;
+       enum lttng_notification_channel_status nc_status;
+       struct lttng_action *action;
+       const struct lttng_credentials session_creds = {
+               .uid = LTTNG_OPTIONAL_INIT_VALUE(session->uid),
+               .gid = LTTNG_OPTIONAL_INIT_VALUE(session->gid),
+       };
+
+       session->rotate_condition = lttng_condition_session_consumed_size_create();
+       if (!session->rotate_condition) {
+               ERR("Failed to create session consumed size condition object");
+               ret = -1;
+               goto end;
+       }
+
+       condition_status = lttng_condition_session_consumed_size_set_threshold(
+                       session->rotate_condition, size);
+       if (condition_status != LTTNG_CONDITION_STATUS_OK) {
+               ERR("Could not set session consumed size condition threshold (size = %" PRIu64 ")",
+                               size);
+               ret = -1;
+               goto end;
+       }
+
+       condition_status =
+                       lttng_condition_session_consumed_size_set_session_name(
+                               session->rotate_condition, session->name);
+       if (condition_status != LTTNG_CONDITION_STATUS_OK) {
+               ERR("Could not set session consumed size condition session name (name = %s)",
+                               session->name);
+               ret = -1;
+               goto end;
+       }
+
+       action = lttng_action_notify_create();
+       if (!action) {
+               ERR("Could not create notify action");
+               ret = -1;
+               goto end;
+       }
+
+       session->rotate_trigger = lttng_trigger_create(session->rotate_condition,
+                       action);
+       if (!session->rotate_trigger) {
+               ERR("Could not create size-based rotation trigger");
+               ret = -1;
+               goto end;
+       }
+
+       /* Ensure this trigger is not visible to external users. */
+       lttng_trigger_set_hidden(session->rotate_trigger);
+       lttng_trigger_set_credentials(
+                       session->rotate_trigger, &session_creds);
+
+       nc_status = lttng_notification_channel_subscribe(
+                       rotate_notification_channel, session->rotate_condition);
+       if (nc_status != LTTNG_NOTIFICATION_CHANNEL_STATUS_OK) {
+               ERR("Could not subscribe to session consumed size notification");
+               ret = -1;
+               goto end;
+       }
+
+       ret = notification_thread_command_register_trigger(
+                       notification_thread_handle, session->rotate_trigger,
+                       true);
+       if (ret < 0 && ret != -LTTNG_ERR_TRIGGER_EXISTS) {
+               ERR("Register trigger, %s", lttng_strerror(ret));
+               ret = -1;
+               goto end;
+       }
+
+       ret = 0;
+
+end:
+       return ret;
+}
+
+int unsubscribe_session_consumed_size_rotation(struct ltt_session *session,
+               struct notification_thread_handle *notification_thread_handle)
+{
+       int ret = 0;
+       enum lttng_notification_channel_status status;
+
+       status = lttng_notification_channel_unsubscribe(
+                       rotate_notification_channel,
+                       session->rotate_condition);
+       if (status != LTTNG_NOTIFICATION_CHANNEL_STATUS_OK) {
+               ERR("Session unsubscribe error: %d", (int) status);
+               ret = -1;
+               goto end;
+       }
+
+       ret = notification_thread_command_unregister_trigger(
+                       notification_thread_handle, session->rotate_trigger);
+       if (ret != LTTNG_OK) {
+               ERR("Session unregister trigger error: %d", ret);
+               goto end;
+       }
+
+       ret = 0;
+end:
+       return ret;
+}
diff --git a/src/bin/lttng-sessiond/rotation-thread.c b/src/bin/lttng-sessiond/rotation-thread.c
deleted file mode 100644 (file)
index 3373443..0000000
+++ /dev/null
@@ -1,896 +0,0 @@
-/*
- * Copyright (C) 2017 Julien Desfossez <jdesfossez@efficios.com>
- * Copyright (C) 2018 Jérémie Galarneau <jeremie.galarneau@efficios.com>
- *
- * SPDX-License-Identifier: GPL-2.0-only
- *
- */
-
-#define _LGPL_SOURCE
-#include <lttng/trigger/trigger.h>
-#include <common/error.h>
-#include <common/config/session-config.h>
-#include <common/defaults.h>
-#include <common/utils.h>
-#include <common/futex.h>
-#include <common/align.h>
-#include <common/time.h>
-#include <common/hashtable/utils.h>
-#include <sys/stat.h>
-#include <time.h>
-#include <signal.h>
-#include <inttypes.h>
-
-#include <common/kernel-ctl/kernel-ctl.h>
-#include <lttng/notification/channel-internal.h>
-#include <lttng/rotate-internal.h>
-#include <lttng/location-internal.h>
-
-#include "rotation-thread.h"
-#include "lttng-sessiond.h"
-#include "health-sessiond.h"
-#include "rotate.h"
-#include "cmd.h"
-#include "session.h"
-#include "timer.h"
-#include "notification-thread-commands.h"
-#include "utils.h"
-#include "thread.h"
-
-#include <urcu.h>
-#include <urcu/list.h>
-
-struct lttng_notification_channel *rotate_notification_channel = NULL;
-
-struct rotation_thread {
-       struct lttng_poll_event events;
-};
-
-struct rotation_thread_job {
-       enum rotation_thread_job_type type;
-       struct ltt_session *session;
-       /* List member in struct rotation_thread_timer_queue. */
-       struct cds_list_head head;
-};
-
-/*
- * The timer thread enqueues jobs and wakes up the rotation thread.
- * When the rotation thread wakes up, it empties the queue.
- */
-struct rotation_thread_timer_queue {
-       struct lttng_pipe *event_pipe;
-       struct cds_list_head list;
-       pthread_mutex_t lock;
-};
-
-struct rotation_thread_handle {
-       struct rotation_thread_timer_queue *rotation_timer_queue;
-       /* Access to the notification thread cmd_queue */
-       struct notification_thread_handle *notification_thread_handle;
-       /* Thread-specific quit pipe. */
-       struct lttng_pipe *quit_pipe;
-};
-
-static
-const char *get_job_type_str(enum rotation_thread_job_type job_type)
-{
-       switch (job_type) {
-       case ROTATION_THREAD_JOB_TYPE_CHECK_PENDING_ROTATION:
-               return "CHECK_PENDING_ROTATION";
-       case ROTATION_THREAD_JOB_TYPE_SCHEDULED_ROTATION:
-               return "SCHEDULED_ROTATION";
-       default:
-               abort();
-       }
-}
-
-struct rotation_thread_timer_queue *rotation_thread_timer_queue_create(void)
-{
-       struct rotation_thread_timer_queue *queue = NULL;
-
-       queue = zmalloc(sizeof(*queue));
-       if (!queue) {
-               PERROR("Failed to allocate timer rotate queue");
-               goto end;
-       }
-
-       queue->event_pipe = lttng_pipe_open(FD_CLOEXEC | O_NONBLOCK);
-       CDS_INIT_LIST_HEAD(&queue->list);
-       pthread_mutex_init(&queue->lock, NULL);
-end:
-       return queue;
-}
-
-void rotation_thread_timer_queue_destroy(
-               struct rotation_thread_timer_queue *queue)
-{
-       if (!queue) {
-               return;
-       }
-
-       lttng_pipe_destroy(queue->event_pipe);
-
-       pthread_mutex_lock(&queue->lock);
-       LTTNG_ASSERT(cds_list_empty(&queue->list));
-       pthread_mutex_unlock(&queue->lock);
-       pthread_mutex_destroy(&queue->lock);
-       free(queue);
-}
-
-/*
- * Destroy the thread data previously created by the init function.
- */
-void rotation_thread_handle_destroy(
-               struct rotation_thread_handle *handle)
-{
-       lttng_pipe_destroy(handle->quit_pipe);
-       free(handle);
-}
-
-struct rotation_thread_handle *rotation_thread_handle_create(
-               struct rotation_thread_timer_queue *rotation_timer_queue,
-               struct notification_thread_handle *notification_thread_handle)
-{
-       struct rotation_thread_handle *handle;
-
-       handle = zmalloc(sizeof(*handle));
-       if (!handle) {
-               goto end;
-       }
-
-       handle->rotation_timer_queue = rotation_timer_queue;
-       handle->notification_thread_handle = notification_thread_handle;
-       handle->quit_pipe = lttng_pipe_open(FD_CLOEXEC);
-       if (!handle->quit_pipe) {
-               goto error;
-       }
-
-end:
-       return handle;
-error:
-       rotation_thread_handle_destroy(handle);
-       return NULL;
-}
-
-/*
- * Called with the rotation_thread_timer_queue lock held.
- * Return true if the same timer job already exists in the queue, false if not.
- */
-static
-bool timer_job_exists(const struct rotation_thread_timer_queue *queue,
-               enum rotation_thread_job_type job_type,
-               struct ltt_session *session)
-{
-       bool exists = false;
-       struct rotation_thread_job *job;
-
-       cds_list_for_each_entry(job, &queue->list, head) {
-               if (job->session == session && job->type == job_type) {
-                       exists = true;
-                       goto end;
-               }
-       }
-end:
-       return exists;
-}
-
-void rotation_thread_enqueue_job(struct rotation_thread_timer_queue *queue,
-               enum rotation_thread_job_type job_type,
-               struct ltt_session *session)
-{
-       int ret;
-       const char dummy = '!';
-       struct rotation_thread_job *job = NULL;
-       const char *job_type_str = get_job_type_str(job_type);
-
-       pthread_mutex_lock(&queue->lock);
-       if (timer_job_exists(queue, job_type, session)) {
-               /*
-                * This timer job is already pending, we don't need to add
-                * it.
-                */
-               goto end;
-       }
-
-       job = zmalloc(sizeof(struct rotation_thread_job));
-       if (!job) {
-               PERROR("Failed to allocate rotation thread job of type \"%s\" for session \"%s\"",
-                               job_type_str, session->name);
-               goto end;
-       }
-       /* No reason for this to fail as the caller must hold a reference. */
-       (void) session_get(session);
-
-       job->session = session;
-       job->type = job_type;
-       cds_list_add_tail(&job->head, &queue->list);
-
-       ret = lttng_write(lttng_pipe_get_writefd(queue->event_pipe), &dummy,
-                       sizeof(dummy));
-       if (ret < 0) {
-               /*
-                * We do not want to block in the timer handler, the job has
-                * been enqueued in the list, the wakeup pipe is probably full,
-                * the job will be processed when the rotation_thread catches
-                * up.
-                */
-               if (errno == EAGAIN || errno == EWOULDBLOCK) {
-                       /*
-                        * Not an error, but would be surprising and indicate
-                        * that the rotation thread can't keep up with the
-                        * current load.
-                        */
-                       DBG("Wake-up pipe of rotation thread job queue is full");
-                       goto end;
-               }
-               PERROR("Failed to wake-up the rotation thread after pushing a job of type \"%s\" for session \"%s\"",
-                               job_type_str, session->name);
-               goto end;
-       }
-
-end:
-       pthread_mutex_unlock(&queue->lock);
-}
-
-static
-int init_poll_set(struct lttng_poll_event *poll_set,
-               struct rotation_thread_handle *handle)
-{
-       int ret;
-
-       /*
-        * Create pollset with size 3:
-        *      - rotation thread quit pipe,
-        *      - rotation thread timer queue pipe,
-        *      - notification channel sock,
-        */
-       ret = lttng_poll_create(poll_set, 5, LTTNG_CLOEXEC);
-       if (ret < 0) {
-               goto error;
-       }
-
-       ret = lttng_poll_add(poll_set,
-                       lttng_pipe_get_readfd(handle->quit_pipe),
-                       LPOLLIN | LPOLLERR);
-       if (ret < 0) {
-               ERR("Failed to add quit pipe read fd to poll set");
-               goto error;
-       }
-
-       ret = lttng_poll_add(poll_set,
-                       lttng_pipe_get_readfd(handle->rotation_timer_queue->event_pipe),
-                       LPOLLIN | LPOLLERR);
-       if (ret < 0) {
-               ERR("Failed to add rotate_pending fd to poll set");
-               goto error;
-       }
-
-       return ret;
-error:
-       lttng_poll_clean(poll_set);
-       return ret;
-}
-
-static
-void fini_thread_state(struct rotation_thread *state)
-{
-       lttng_poll_clean(&state->events);
-       if (rotate_notification_channel) {
-               lttng_notification_channel_destroy(rotate_notification_channel);
-       }
-}
-
-static
-int init_thread_state(struct rotation_thread_handle *handle,
-               struct rotation_thread *state)
-{
-       int ret;
-
-       memset(state, 0, sizeof(*state));
-       lttng_poll_init(&state->events);
-
-       ret = init_poll_set(&state->events, handle);
-       if (ret) {
-               ERR("Failed to initialize rotation thread poll set");
-               goto end;
-       }
-
-       rotate_notification_channel = lttng_notification_channel_create(
-                       lttng_session_daemon_notification_endpoint);
-       if (!rotate_notification_channel) {
-               ERR("Could not create notification channel");
-               ret = -1;
-               goto end;
-       }
-       ret = lttng_poll_add(&state->events, rotate_notification_channel->socket,
-                       LPOLLIN | LPOLLERR);
-       if (ret < 0) {
-               ERR("Failed to add notification fd to pollset");
-               goto end;
-       }
-
-end:
-       return ret;
-}
-
-static
-void check_session_rotation_pending_on_consumers(struct ltt_session *session,
-               bool *_rotation_completed)
-{
-       int ret = 0;
-       struct consumer_socket *socket;
-       struct cds_lfht_iter iter;
-       enum consumer_trace_chunk_exists_status exists_status;
-       uint64_t relayd_id;
-       bool chunk_exists_on_peer = false;
-       enum lttng_trace_chunk_status chunk_status;
-
-       LTTNG_ASSERT(session->chunk_being_archived);
-
-       /*
-        * Check for a local pending rotation on all consumers (32-bit
-        * user space, 64-bit user space, and kernel).
-        */
-       rcu_read_lock();
-       if (!session->ust_session) {
-               goto skip_ust;
-       }
-       cds_lfht_for_each_entry(session->ust_session->consumer->socks->ht,
-                       &iter, socket, node.node) {
-               relayd_id = session->ust_session->consumer->type == CONSUMER_DST_LOCAL ?
-                               -1ULL :
-                               session->ust_session->consumer->net_seq_index;
-
-               pthread_mutex_lock(socket->lock);
-               ret = consumer_trace_chunk_exists(socket,
-                               relayd_id,
-                               session->id, session->chunk_being_archived,
-                               &exists_status);
-               if (ret) {
-                       pthread_mutex_unlock(socket->lock);
-                       ERR("Error occurred while checking rotation status on consumer daemon");
-                       goto end;
-               }
-
-               if (exists_status != CONSUMER_TRACE_CHUNK_EXISTS_STATUS_UNKNOWN_CHUNK) {
-                       pthread_mutex_unlock(socket->lock);
-                       chunk_exists_on_peer = true;
-                       goto end;
-               }
-               pthread_mutex_unlock(socket->lock);
-       }
-
-skip_ust:
-       if (!session->kernel_session) {
-               goto skip_kernel;
-       }
-       cds_lfht_for_each_entry(session->kernel_session->consumer->socks->ht,
-                               &iter, socket, node.node) {
-               pthread_mutex_lock(socket->lock);
-               relayd_id = session->kernel_session->consumer->type == CONSUMER_DST_LOCAL ?
-                               -1ULL :
-                               session->kernel_session->consumer->net_seq_index;
-
-               ret = consumer_trace_chunk_exists(socket,
-                               relayd_id,
-                               session->id, session->chunk_being_archived,
-                               &exists_status);
-               if (ret) {
-                       pthread_mutex_unlock(socket->lock);
-                       ERR("Error occurred while checking rotation status on consumer daemon");
-                       goto end;
-               }
-
-               if (exists_status != CONSUMER_TRACE_CHUNK_EXISTS_STATUS_UNKNOWN_CHUNK) {
-                       pthread_mutex_unlock(socket->lock);
-                       chunk_exists_on_peer = true;
-                       goto end;
-               }
-               pthread_mutex_unlock(socket->lock);
-       }
-skip_kernel:
-end:
-       rcu_read_unlock();
-
-       if (!chunk_exists_on_peer) {
-               uint64_t chunk_being_archived_id;
-
-               chunk_status = lttng_trace_chunk_get_id(
-                               session->chunk_being_archived,
-                               &chunk_being_archived_id);
-               LTTNG_ASSERT(chunk_status == LTTNG_TRACE_CHUNK_STATUS_OK);
-               DBG("Rotation of trace archive %" PRIu64 " of session \"%s\" is complete on all consumers",
-                               chunk_being_archived_id,
-                               session->name);
-       }
-       *_rotation_completed = !chunk_exists_on_peer;
-       if (ret) {
-               ret = session_reset_rotation_state(session,
-                               LTTNG_ROTATION_STATE_ERROR);
-               if (ret) {
-                       ERR("Failed to reset rotation state of session \"%s\"",
-                                       session->name);
-               }
-       }
-}
-
-/*
- * Check if the last rotation was completed, called with session lock held.
- * Should only return non-zero in the event of a fatal error. Doing so will
- * shutdown the thread.
- */
-static
-int check_session_rotation_pending(struct ltt_session *session,
-               struct notification_thread_handle *notification_thread_handle)
-{
-       int ret;
-       struct lttng_trace_archive_location *location;
-       enum lttng_trace_chunk_status chunk_status;
-       bool rotation_completed = false;
-       const char *archived_chunk_name;
-       uint64_t chunk_being_archived_id;
-
-       if (!session->chunk_being_archived) {
-               ret = 0;
-               goto end;
-       }
-
-       chunk_status = lttng_trace_chunk_get_id(session->chunk_being_archived,
-                       &chunk_being_archived_id);
-       LTTNG_ASSERT(chunk_status == LTTNG_TRACE_CHUNK_STATUS_OK);
-
-       DBG("Checking for pending rotation on session \"%s\", trace archive %" PRIu64,
-                       session->name, chunk_being_archived_id);
-
-       /*
-        * The rotation-pending check timer of a session is launched in
-        * one-shot mode. If the rotation is incomplete, the rotation
-        * thread will re-enable the pending-check timer.
-        *
-        * The timer thread can't stop the timer itself since it is involved
-        * in the check for the timer's quiescence.
-        */
-       ret = timer_session_rotation_pending_check_stop(session);
-       if (ret) {
-               goto check_ongoing_rotation;
-       }
-
-       check_session_rotation_pending_on_consumers(session,
-                       &rotation_completed);
-       if (!rotation_completed ||
-                       session->rotation_state == LTTNG_ROTATION_STATE_ERROR) {
-               goto check_ongoing_rotation;
-       }
-
-       /*
-        * Now we can clear the "ONGOING" state in the session. New
-        * rotations can start now.
-        */
-       chunk_status = lttng_trace_chunk_get_name(session->chunk_being_archived,
-                       &archived_chunk_name, NULL);
-       LTTNG_ASSERT(chunk_status == LTTNG_TRACE_CHUNK_STATUS_OK);
-       free(session->last_archived_chunk_name);
-       session->last_archived_chunk_name = strdup(archived_chunk_name);
-       if (!session->last_archived_chunk_name) {
-               PERROR("Failed to duplicate archived chunk name");
-       }
-       session_reset_rotation_state(session, LTTNG_ROTATION_STATE_COMPLETED);
-
-       if (!session->quiet_rotation) {
-               location = session_get_trace_archive_location(session);
-               ret = notification_thread_command_session_rotation_completed(
-                               notification_thread_handle,
-                               session->name,
-                               session->uid,
-                               session->gid,
-                               session->last_archived_chunk_id.value,
-                               location);
-               lttng_trace_archive_location_put(location);
-               if (ret != LTTNG_OK) {
-                       ERR("Failed to notify notification thread of completed rotation for session %s",
-                                       session->name);
-               }
-       }
-
-       ret = 0;
-check_ongoing_rotation:
-       if (session->rotation_state == LTTNG_ROTATION_STATE_ONGOING) {
-               chunk_status = lttng_trace_chunk_get_id(
-                               session->chunk_being_archived,
-                               &chunk_being_archived_id);
-               LTTNG_ASSERT(chunk_status == LTTNG_TRACE_CHUNK_STATUS_OK);
-
-               DBG("Rotation of trace archive %" PRIu64 " is still pending for session %s",
-                               chunk_being_archived_id, session->name);
-               ret = timer_session_rotation_pending_check_start(session,
-                               DEFAULT_ROTATE_PENDING_TIMER);
-               if (ret) {
-                       ERR("Failed to re-enable rotation pending timer");
-                       ret = -1;
-                       goto end;
-               }
-       }
-
-end:
-       return ret;
-}
-
-/* Call with the session and session_list locks held. */
-static
-int launch_session_rotation(struct ltt_session *session)
-{
-       int ret;
-       struct lttng_rotate_session_return rotation_return;
-
-       DBG("Launching scheduled time-based rotation on session \"%s\"",
-                       session->name);
-
-       ret = cmd_rotate_session(session, &rotation_return, false,
-               LTTNG_TRACE_CHUNK_COMMAND_TYPE_MOVE_TO_COMPLETED);
-       if (ret == LTTNG_OK) {
-               DBG("Scheduled time-based rotation successfully launched on session \"%s\"",
-                               session->name);
-       } else {
-               /* Don't consider errors as fatal. */
-               DBG("Scheduled time-based rotation aborted for session %s: %s",
-                               session->name, lttng_strerror(ret));
-       }
-       return 0;
-}
-
-static
-int run_job(struct rotation_thread_job *job, struct ltt_session *session,
-               struct notification_thread_handle *notification_thread_handle)
-{
-       int ret;
-
-       switch (job->type) {
-       case ROTATION_THREAD_JOB_TYPE_SCHEDULED_ROTATION:
-               ret = launch_session_rotation(session);
-               break;
-       case ROTATION_THREAD_JOB_TYPE_CHECK_PENDING_ROTATION:
-               ret = check_session_rotation_pending(session,
-                               notification_thread_handle);
-               break;
-       default:
-               abort();
-       }
-       return ret;
-}
-
-static
-int handle_job_queue(struct rotation_thread_handle *handle,
-               struct rotation_thread *state,
-               struct rotation_thread_timer_queue *queue)
-{
-       int ret = 0;
-
-       for (;;) {
-               struct ltt_session *session;
-               struct rotation_thread_job *job;
-
-               /* Take the queue lock only to pop an element from the list. */
-               pthread_mutex_lock(&queue->lock);
-               if (cds_list_empty(&queue->list)) {
-                       pthread_mutex_unlock(&queue->lock);
-                       break;
-               }
-               job = cds_list_first_entry(&queue->list,
-                               typeof(*job), head);
-               cds_list_del(&job->head);
-               pthread_mutex_unlock(&queue->lock);
-
-               session_lock_list();
-               session = job->session;
-               if (!session) {
-                       DBG("Session \"%s\" not found",
-                                       session->name);
-                       /*
-                        * This is a non-fatal error, and we cannot report it to
-                        * the user (timer), so just print the error and
-                        * continue the processing.
-                        *
-                        * While the timer thread will purge pending signals for
-                        * a session on the session's destruction, it is
-                        * possible for a job targeting that session to have
-                        * already been queued before it was destroyed.
-                        */
-                       free(job);
-                       session_put(session);
-                       session_unlock_list();
-                       continue;
-               }
-
-               session_lock(session);
-               ret = run_job(job, session, handle->notification_thread_handle);
-               session_unlock(session);
-               /* Release reference held by the job. */
-               session_put(session);
-               session_unlock_list();
-               free(job);
-               if (ret) {
-                       goto end;
-               }
-       }
-
-       ret = 0;
-
-end:
-       return ret;
-}
-
-static
-int handle_condition(const struct lttng_condition *condition,
-               const struct lttng_evaluation *evaluation,
-               struct notification_thread_handle *notification_thread_handle)
-{
-       int ret = 0;
-       const char *condition_session_name = NULL;
-       enum lttng_condition_type condition_type;
-       enum lttng_condition_status condition_status;
-       enum lttng_evaluation_status evaluation_status;
-       uint64_t consumed;
-       struct ltt_session *session;
-
-       condition_type = lttng_condition_get_type(condition);
-
-       if (condition_type != LTTNG_CONDITION_TYPE_SESSION_CONSUMED_SIZE) {
-               ret = -1;
-               ERR("Condition type and session usage type are not the same");
-               goto end;
-       }
-
-       /* Fetch info to test */
-       condition_status = lttng_condition_session_consumed_size_get_session_name(
-                       condition, &condition_session_name);
-       if (condition_status != LTTNG_CONDITION_STATUS_OK) {
-               ERR("Session name could not be fetched");
-               ret = -1;
-               goto end;
-       }
-       evaluation_status = lttng_evaluation_session_consumed_size_get_consumed_size(evaluation,
-                       &consumed);
-       if (evaluation_status != LTTNG_EVALUATION_STATUS_OK) {
-               ERR("Failed to get evaluation");
-               ret = -1;
-               goto end;
-       }
-
-       session_lock_list();
-       session = session_find_by_name(condition_session_name);
-       if (!session) {
-               ret = -1;
-               session_unlock_list();
-               ERR("Session \"%s\" not found",
-                               condition_session_name);
-               goto end;
-       }
-       session_lock(session);
-
-       ret = unsubscribe_session_consumed_size_rotation(session,
-                       notification_thread_handle);
-       if (ret) {
-               goto end_unlock;
-       }
-
-       ret = cmd_rotate_session(session, NULL, false,
-               LTTNG_TRACE_CHUNK_COMMAND_TYPE_MOVE_TO_COMPLETED);
-       if (ret == -LTTNG_ERR_ROTATION_PENDING) {
-               DBG("Rotate already pending, subscribe to the next threshold value");
-       } else if (ret != LTTNG_OK) {
-               ERR("Failed to rotate on size notification with error: %s",
-                               lttng_strerror(ret));
-               ret = -1;
-               goto end_unlock;
-       }
-       ret = subscribe_session_consumed_size_rotation(session,
-                       consumed + session->rotate_size,
-                       notification_thread_handle);
-       if (ret) {
-               ERR("Failed to subscribe to session consumed size condition");
-               goto end_unlock;
-       }
-       ret = 0;
-
-end_unlock:
-       session_unlock(session);
-       session_put(session);
-       session_unlock_list();
-end:
-       return ret;
-}
-
-static
-int handle_notification_channel(int fd,
-               struct rotation_thread_handle *handle,
-               struct rotation_thread *state)
-{
-       int ret;
-       bool notification_pending;
-       struct lttng_notification *notification = NULL;
-       enum lttng_notification_channel_status status;
-       const struct lttng_evaluation *notification_evaluation;
-       const struct lttng_condition *notification_condition;
-
-       status = lttng_notification_channel_has_pending_notification(
-                       rotate_notification_channel, &notification_pending);
-       if (status != LTTNG_NOTIFICATION_CHANNEL_STATUS_OK) {
-               ERR("Error occurred while checking for pending notification");
-               ret = -1;
-               goto end;
-       }
-
-       if (!notification_pending) {
-               ret = 0;
-               goto end;
-       }
-
-       /* Receive the next notification. */
-       status = lttng_notification_channel_get_next_notification(
-                       rotate_notification_channel,
-                       &notification);
-
-       switch (status) {
-       case LTTNG_NOTIFICATION_CHANNEL_STATUS_OK:
-               break;
-       case LTTNG_NOTIFICATION_CHANNEL_STATUS_NOTIFICATIONS_DROPPED:
-               /* Not an error, we will wait for the next one */
-               ret = 0;
-               goto end;;
-       case LTTNG_NOTIFICATION_CHANNEL_STATUS_CLOSED:
-               ERR("Notification channel was closed");
-               ret = -1;
-               goto end;
-       default:
-               /* Unhandled conditions / errors. */
-               ERR("Unknown notification channel status");
-               ret = -1;
-               goto end;
-       }
-
-       notification_condition = lttng_notification_get_condition(notification);
-       notification_evaluation = lttng_notification_get_evaluation(notification);
-
-       ret = handle_condition(notification_condition, notification_evaluation,
-                       handle->notification_thread_handle);
-
-end:
-       lttng_notification_destroy(notification);
-       return ret;
-}
-
-static
-void *thread_rotation(void *data)
-{
-       int ret;
-       struct rotation_thread_handle *handle = data;
-       struct rotation_thread thread;
-       int queue_pipe_fd;
-
-       DBG("Started rotation thread");
-       rcu_register_thread();
-       rcu_thread_online();
-       health_register(the_health_sessiond, HEALTH_SESSIOND_TYPE_ROTATION);
-       health_code_update();
-
-       if (!handle) {
-               ERR("Invalid thread context provided");
-               goto end;
-       }
-
-       queue_pipe_fd = lttng_pipe_get_readfd(
-                       handle->rotation_timer_queue->event_pipe);
-
-
-       ret = init_thread_state(handle, &thread);
-       if (ret) {
-               goto error;
-       }
-
-       while (true) {
-               int fd_count, i;
-
-               health_poll_entry();
-               DBG("Entering poll wait");
-               ret = lttng_poll_wait(&thread.events, -1);
-               DBG("Poll wait returned (%i)", ret);
-               health_poll_exit();
-               if (ret < 0) {
-                       /*
-                        * Restart interrupted system call.
-                        */
-                       if (errno == EINTR) {
-                               continue;
-                       }
-                       ERR("Error encountered during lttng_poll_wait (%i)", ret);
-                       goto error;
-               }
-
-               fd_count = ret;
-               for (i = 0; i < fd_count; i++) {
-                       int fd = LTTNG_POLL_GETFD(&thread.events, i);
-                       uint32_t revents = LTTNG_POLL_GETEV(&thread.events, i);
-
-                       DBG("Handling fd (%i) activity (%u)",
-                                       fd, revents);
-
-                       if (revents & LPOLLERR) {
-                               ERR("Polling returned an error on fd %i", fd);
-                               goto error;
-                       }
-
-                       if (fd == rotate_notification_channel->socket) {
-                               ret = handle_notification_channel(fd, handle,
-                                               &thread);
-                               if (ret) {
-                                       ERR("Error occurred while handling activity on notification channel socket");
-                                       goto error;
-                               }
-                       } else {
-                               /* Job queue or quit pipe activity. */
-
-                               /*
-                                * The job queue is serviced if there is
-                                * activity on the quit pipe to ensure it is
-                                * flushed and all references held in the queue
-                                * are released.
-                                */
-                               ret = handle_job_queue(handle, &thread,
-                                               handle->rotation_timer_queue);
-                               if (ret) {
-                                       ERR("Failed to handle rotation timer pipe event");
-                                       goto error;
-                               }
-
-                               if (fd == queue_pipe_fd) {
-                                       char buf;
-
-                                       ret = lttng_read(fd, &buf, 1);
-                                       if (ret != 1) {
-                                               ERR("Failed to read from wakeup pipe (fd = %i)", fd);
-                                               goto error;
-                                       }
-                               } else {
-                                       DBG("Quit pipe activity");
-                                       goto exit;
-                               }
-                       }
-               }
-       }
-exit:
-error:
-       DBG("Thread exit");
-       fini_thread_state(&thread);
-end:
-       health_unregister(the_health_sessiond);
-       rcu_thread_offline();
-       rcu_unregister_thread();
-       return NULL;
-}
-
-static
-bool shutdown_rotation_thread(void *thread_data)
-{
-       struct rotation_thread_handle *handle = thread_data;
-       const int write_fd = lttng_pipe_get_writefd(handle->quit_pipe);
-
-       return notify_thread_pipe(write_fd) == 1;
-}
-
-bool launch_rotation_thread(struct rotation_thread_handle *handle)
-{
-       struct lttng_thread *thread;
-
-       thread = lttng_thread_create("Rotation",
-                       thread_rotation,
-                       shutdown_rotation_thread,
-                       NULL,
-                       handle);
-       if (!thread) {
-               goto error;
-       }
-       lttng_thread_put(thread);
-       return true;
-error:
-       return false;
-}
diff --git a/src/bin/lttng-sessiond/rotation-thread.cpp b/src/bin/lttng-sessiond/rotation-thread.cpp
new file mode 100644 (file)
index 0000000..8754d48
--- /dev/null
@@ -0,0 +1,896 @@
+/*
+ * Copyright (C) 2017 Julien Desfossez <jdesfossez@efficios.com>
+ * Copyright (C) 2018 Jérémie Galarneau <jeremie.galarneau@efficios.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0-only
+ *
+ */
+
+#define _LGPL_SOURCE
+#include <lttng/trigger/trigger.h>
+#include <common/error.h>
+#include <common/config/session-config.h>
+#include <common/defaults.h>
+#include <common/utils.h>
+#include <common/futex.h>
+#include <common/align.h>
+#include <common/time.h>
+#include <common/hashtable/utils.h>
+#include <sys/stat.h>
+#include <time.h>
+#include <signal.h>
+#include <inttypes.h>
+
+#include <common/kernel-ctl/kernel-ctl.h>
+#include <lttng/notification/channel-internal.h>
+#include <lttng/rotate-internal.h>
+#include <lttng/location-internal.h>
+
+#include "rotation-thread.h"
+#include "lttng-sessiond.h"
+#include "health-sessiond.h"
+#include "rotate.h"
+#include "cmd.h"
+#include "session.h"
+#include "timer.h"
+#include "notification-thread-commands.h"
+#include "utils.h"
+#include "thread.h"
+
+#include <urcu.h>
+#include <urcu/list.h>
+
+struct lttng_notification_channel *rotate_notification_channel = NULL;
+
+struct rotation_thread {
+       struct lttng_poll_event events;
+};
+
+struct rotation_thread_job {
+       enum rotation_thread_job_type type;
+       struct ltt_session *session;
+       /* List member in struct rotation_thread_timer_queue. */
+       struct cds_list_head head;
+};
+
+/*
+ * The timer thread enqueues jobs and wakes up the rotation thread.
+ * When the rotation thread wakes up, it empties the queue.
+ */
+struct rotation_thread_timer_queue {
+       struct lttng_pipe *event_pipe;
+       struct cds_list_head list;
+       pthread_mutex_t lock;
+};
+
+struct rotation_thread_handle {
+       struct rotation_thread_timer_queue *rotation_timer_queue;
+       /* Access to the notification thread cmd_queue */
+       struct notification_thread_handle *notification_thread_handle;
+       /* Thread-specific quit pipe. */
+       struct lttng_pipe *quit_pipe;
+};
+
+static
+const char *get_job_type_str(enum rotation_thread_job_type job_type)
+{
+       switch (job_type) {
+       case ROTATION_THREAD_JOB_TYPE_CHECK_PENDING_ROTATION:
+               return "CHECK_PENDING_ROTATION";
+       case ROTATION_THREAD_JOB_TYPE_SCHEDULED_ROTATION:
+               return "SCHEDULED_ROTATION";
+       default:
+               abort();
+       }
+}
+
+struct rotation_thread_timer_queue *rotation_thread_timer_queue_create(void)
+{
+       struct rotation_thread_timer_queue *queue = NULL;
+
+       queue = (rotation_thread_timer_queue *) zmalloc(sizeof(*queue));
+       if (!queue) {
+               PERROR("Failed to allocate timer rotate queue");
+               goto end;
+       }
+
+       queue->event_pipe = lttng_pipe_open(FD_CLOEXEC | O_NONBLOCK);
+       CDS_INIT_LIST_HEAD(&queue->list);
+       pthread_mutex_init(&queue->lock, NULL);
+end:
+       return queue;
+}
+
+void rotation_thread_timer_queue_destroy(
+               struct rotation_thread_timer_queue *queue)
+{
+       if (!queue) {
+               return;
+       }
+
+       lttng_pipe_destroy(queue->event_pipe);
+
+       pthread_mutex_lock(&queue->lock);
+       LTTNG_ASSERT(cds_list_empty(&queue->list));
+       pthread_mutex_unlock(&queue->lock);
+       pthread_mutex_destroy(&queue->lock);
+       free(queue);
+}
+
+/*
+ * Destroy the thread data previously created by the init function.
+ */
+void rotation_thread_handle_destroy(
+               struct rotation_thread_handle *handle)
+{
+       lttng_pipe_destroy(handle->quit_pipe);
+       free(handle);
+}
+
+struct rotation_thread_handle *rotation_thread_handle_create(
+               struct rotation_thread_timer_queue *rotation_timer_queue,
+               struct notification_thread_handle *notification_thread_handle)
+{
+       struct rotation_thread_handle *handle;
+
+       handle = (rotation_thread_handle *) zmalloc(sizeof(*handle));
+       if (!handle) {
+               goto end;
+       }
+
+       handle->rotation_timer_queue = rotation_timer_queue;
+       handle->notification_thread_handle = notification_thread_handle;
+       handle->quit_pipe = lttng_pipe_open(FD_CLOEXEC);
+       if (!handle->quit_pipe) {
+               goto error;
+       }
+
+end:
+       return handle;
+error:
+       rotation_thread_handle_destroy(handle);
+       return NULL;
+}
+
+/*
+ * Called with the rotation_thread_timer_queue lock held.
+ * Return true if the same timer job already exists in the queue, false if not.
+ */
+static
+bool timer_job_exists(const struct rotation_thread_timer_queue *queue,
+               enum rotation_thread_job_type job_type,
+               struct ltt_session *session)
+{
+       bool exists = false;
+       struct rotation_thread_job *job;
+
+       cds_list_for_each_entry(job, &queue->list, head) {
+               if (job->session == session && job->type == job_type) {
+                       exists = true;
+                       goto end;
+               }
+       }
+end:
+       return exists;
+}
+
+void rotation_thread_enqueue_job(struct rotation_thread_timer_queue *queue,
+               enum rotation_thread_job_type job_type,
+               struct ltt_session *session)
+{
+       int ret;
+       const char dummy = '!';
+       struct rotation_thread_job *job = NULL;
+       const char *job_type_str = get_job_type_str(job_type);
+
+       pthread_mutex_lock(&queue->lock);
+       if (timer_job_exists(queue, job_type, session)) {
+               /*
+                * This timer job is already pending, we don't need to add
+                * it.
+                */
+               goto end;
+       }
+
+       job = (rotation_thread_job *) zmalloc(sizeof(struct rotation_thread_job));
+       if (!job) {
+               PERROR("Failed to allocate rotation thread job of type \"%s\" for session \"%s\"",
+                               job_type_str, session->name);
+               goto end;
+       }
+       /* No reason for this to fail as the caller must hold a reference. */
+       (void) session_get(session);
+
+       job->session = session;
+       job->type = job_type;
+       cds_list_add_tail(&job->head, &queue->list);
+
+       ret = lttng_write(lttng_pipe_get_writefd(queue->event_pipe), &dummy,
+                       sizeof(dummy));
+       if (ret < 0) {
+               /*
+                * We do not want to block in the timer handler, the job has
+                * been enqueued in the list, the wakeup pipe is probably full,
+                * the job will be processed when the rotation_thread catches
+                * up.
+                */
+               if (errno == EAGAIN || errno == EWOULDBLOCK) {
+                       /*
+                        * Not an error, but would be surprising and indicate
+                        * that the rotation thread can't keep up with the
+                        * current load.
+                        */
+                       DBG("Wake-up pipe of rotation thread job queue is full");
+                       goto end;
+               }
+               PERROR("Failed to wake-up the rotation thread after pushing a job of type \"%s\" for session \"%s\"",
+                               job_type_str, session->name);
+               goto end;
+       }
+
+end:
+       pthread_mutex_unlock(&queue->lock);
+}
+
+static
+int init_poll_set(struct lttng_poll_event *poll_set,
+               struct rotation_thread_handle *handle)
+{
+       int ret;
+
+       /*
+        * Create pollset with size 3:
+        *      - rotation thread quit pipe,
+        *      - rotation thread timer queue pipe,
+        *      - notification channel sock,
+        */
+       ret = lttng_poll_create(poll_set, 5, LTTNG_CLOEXEC);
+       if (ret < 0) {
+               goto error;
+       }
+
+       ret = lttng_poll_add(poll_set,
+                       lttng_pipe_get_readfd(handle->quit_pipe),
+                       LPOLLIN | LPOLLERR);
+       if (ret < 0) {
+               ERR("Failed to add quit pipe read fd to poll set");
+               goto error;
+       }
+
+       ret = lttng_poll_add(poll_set,
+                       lttng_pipe_get_readfd(handle->rotation_timer_queue->event_pipe),
+                       LPOLLIN | LPOLLERR);
+       if (ret < 0) {
+               ERR("Failed to add rotate_pending fd to poll set");
+               goto error;
+       }
+
+       return ret;
+error:
+       lttng_poll_clean(poll_set);
+       return ret;
+}
+
+static
+void fini_thread_state(struct rotation_thread *state)
+{
+       lttng_poll_clean(&state->events);
+       if (rotate_notification_channel) {
+               lttng_notification_channel_destroy(rotate_notification_channel);
+       }
+}
+
+static
+int init_thread_state(struct rotation_thread_handle *handle,
+               struct rotation_thread *state)
+{
+       int ret;
+
+       memset(state, 0, sizeof(*state));
+       lttng_poll_init(&state->events);
+
+       ret = init_poll_set(&state->events, handle);
+       if (ret) {
+               ERR("Failed to initialize rotation thread poll set");
+               goto end;
+       }
+
+       rotate_notification_channel = lttng_notification_channel_create(
+                       lttng_session_daemon_notification_endpoint);
+       if (!rotate_notification_channel) {
+               ERR("Could not create notification channel");
+               ret = -1;
+               goto end;
+       }
+       ret = lttng_poll_add(&state->events, rotate_notification_channel->socket,
+                       LPOLLIN | LPOLLERR);
+       if (ret < 0) {
+               ERR("Failed to add notification fd to pollset");
+               goto end;
+       }
+
+end:
+       return ret;
+}
+
+static
+void check_session_rotation_pending_on_consumers(struct ltt_session *session,
+               bool *_rotation_completed)
+{
+       int ret = 0;
+       struct consumer_socket *socket;
+       struct cds_lfht_iter iter;
+       enum consumer_trace_chunk_exists_status exists_status;
+       uint64_t relayd_id;
+       bool chunk_exists_on_peer = false;
+       enum lttng_trace_chunk_status chunk_status;
+
+       LTTNG_ASSERT(session->chunk_being_archived);
+
+       /*
+        * Check for a local pending rotation on all consumers (32-bit
+        * user space, 64-bit user space, and kernel).
+        */
+       rcu_read_lock();
+       if (!session->ust_session) {
+               goto skip_ust;
+       }
+       cds_lfht_for_each_entry(session->ust_session->consumer->socks->ht,
+                       &iter, socket, node.node) {
+               relayd_id = session->ust_session->consumer->type == CONSUMER_DST_LOCAL ?
+                               -1ULL :
+                               session->ust_session->consumer->net_seq_index;
+
+               pthread_mutex_lock(socket->lock);
+               ret = consumer_trace_chunk_exists(socket,
+                               relayd_id,
+                               session->id, session->chunk_being_archived,
+                               &exists_status);
+               if (ret) {
+                       pthread_mutex_unlock(socket->lock);
+                       ERR("Error occurred while checking rotation status on consumer daemon");
+                       goto end;
+               }
+
+               if (exists_status != CONSUMER_TRACE_CHUNK_EXISTS_STATUS_UNKNOWN_CHUNK) {
+                       pthread_mutex_unlock(socket->lock);
+                       chunk_exists_on_peer = true;
+                       goto end;
+               }
+               pthread_mutex_unlock(socket->lock);
+       }
+
+skip_ust:
+       if (!session->kernel_session) {
+               goto skip_kernel;
+       }
+       cds_lfht_for_each_entry(session->kernel_session->consumer->socks->ht,
+                               &iter, socket, node.node) {
+               pthread_mutex_lock(socket->lock);
+               relayd_id = session->kernel_session->consumer->type == CONSUMER_DST_LOCAL ?
+                               -1ULL :
+                               session->kernel_session->consumer->net_seq_index;
+
+               ret = consumer_trace_chunk_exists(socket,
+                               relayd_id,
+                               session->id, session->chunk_being_archived,
+                               &exists_status);
+               if (ret) {
+                       pthread_mutex_unlock(socket->lock);
+                       ERR("Error occurred while checking rotation status on consumer daemon");
+                       goto end;
+               }
+
+               if (exists_status != CONSUMER_TRACE_CHUNK_EXISTS_STATUS_UNKNOWN_CHUNK) {
+                       pthread_mutex_unlock(socket->lock);
+                       chunk_exists_on_peer = true;
+                       goto end;
+               }
+               pthread_mutex_unlock(socket->lock);
+       }
+skip_kernel:
+end:
+       rcu_read_unlock();
+
+       if (!chunk_exists_on_peer) {
+               uint64_t chunk_being_archived_id;
+
+               chunk_status = lttng_trace_chunk_get_id(
+                               session->chunk_being_archived,
+                               &chunk_being_archived_id);
+               LTTNG_ASSERT(chunk_status == LTTNG_TRACE_CHUNK_STATUS_OK);
+               DBG("Rotation of trace archive %" PRIu64 " of session \"%s\" is complete on all consumers",
+                               chunk_being_archived_id,
+                               session->name);
+       }
+       *_rotation_completed = !chunk_exists_on_peer;
+       if (ret) {
+               ret = session_reset_rotation_state(session,
+                               LTTNG_ROTATION_STATE_ERROR);
+               if (ret) {
+                       ERR("Failed to reset rotation state of session \"%s\"",
+                                       session->name);
+               }
+       }
+}
+
+/*
+ * Check if the last rotation was completed, called with session lock held.
+ * Should only return non-zero in the event of a fatal error. Doing so will
+ * shutdown the thread.
+ */
+static
+int check_session_rotation_pending(struct ltt_session *session,
+               struct notification_thread_handle *notification_thread_handle)
+{
+       int ret;
+       struct lttng_trace_archive_location *location;
+       enum lttng_trace_chunk_status chunk_status;
+       bool rotation_completed = false;
+       const char *archived_chunk_name;
+       uint64_t chunk_being_archived_id;
+
+       if (!session->chunk_being_archived) {
+               ret = 0;
+               goto end;
+       }
+
+       chunk_status = lttng_trace_chunk_get_id(session->chunk_being_archived,
+                       &chunk_being_archived_id);
+       LTTNG_ASSERT(chunk_status == LTTNG_TRACE_CHUNK_STATUS_OK);
+
+       DBG("Checking for pending rotation on session \"%s\", trace archive %" PRIu64,
+                       session->name, chunk_being_archived_id);
+
+       /*
+        * The rotation-pending check timer of a session is launched in
+        * one-shot mode. If the rotation is incomplete, the rotation
+        * thread will re-enable the pending-check timer.
+        *
+        * The timer thread can't stop the timer itself since it is involved
+        * in the check for the timer's quiescence.
+        */
+       ret = timer_session_rotation_pending_check_stop(session);
+       if (ret) {
+               goto check_ongoing_rotation;
+       }
+
+       check_session_rotation_pending_on_consumers(session,
+                       &rotation_completed);
+       if (!rotation_completed ||
+                       session->rotation_state == LTTNG_ROTATION_STATE_ERROR) {
+               goto check_ongoing_rotation;
+       }
+
+       /*
+        * Now we can clear the "ONGOING" state in the session. New
+        * rotations can start now.
+        */
+       chunk_status = lttng_trace_chunk_get_name(session->chunk_being_archived,
+                       &archived_chunk_name, NULL);
+       LTTNG_ASSERT(chunk_status == LTTNG_TRACE_CHUNK_STATUS_OK);
+       free(session->last_archived_chunk_name);
+       session->last_archived_chunk_name = strdup(archived_chunk_name);
+       if (!session->last_archived_chunk_name) {
+               PERROR("Failed to duplicate archived chunk name");
+       }
+       session_reset_rotation_state(session, LTTNG_ROTATION_STATE_COMPLETED);
+
+       if (!session->quiet_rotation) {
+               location = session_get_trace_archive_location(session);
+               ret = notification_thread_command_session_rotation_completed(
+                               notification_thread_handle,
+                               session->name,
+                               session->uid,
+                               session->gid,
+                               session->last_archived_chunk_id.value,
+                               location);
+               lttng_trace_archive_location_put(location);
+               if (ret != LTTNG_OK) {
+                       ERR("Failed to notify notification thread of completed rotation for session %s",
+                                       session->name);
+               }
+       }
+
+       ret = 0;
+check_ongoing_rotation:
+       if (session->rotation_state == LTTNG_ROTATION_STATE_ONGOING) {
+               chunk_status = lttng_trace_chunk_get_id(
+                               session->chunk_being_archived,
+                               &chunk_being_archived_id);
+               LTTNG_ASSERT(chunk_status == LTTNG_TRACE_CHUNK_STATUS_OK);
+
+               DBG("Rotation of trace archive %" PRIu64 " is still pending for session %s",
+                               chunk_being_archived_id, session->name);
+               ret = timer_session_rotation_pending_check_start(session,
+                               DEFAULT_ROTATE_PENDING_TIMER);
+               if (ret) {
+                       ERR("Failed to re-enable rotation pending timer");
+                       ret = -1;
+                       goto end;
+               }
+       }
+
+end:
+       return ret;
+}
+
+/* Call with the session and session_list locks held. */
+static
+int launch_session_rotation(struct ltt_session *session)
+{
+       int ret;
+       struct lttng_rotate_session_return rotation_return;
+
+       DBG("Launching scheduled time-based rotation on session \"%s\"",
+                       session->name);
+
+       ret = cmd_rotate_session(session, &rotation_return, false,
+               LTTNG_TRACE_CHUNK_COMMAND_TYPE_MOVE_TO_COMPLETED);
+       if (ret == LTTNG_OK) {
+               DBG("Scheduled time-based rotation successfully launched on session \"%s\"",
+                               session->name);
+       } else {
+               /* Don't consider errors as fatal. */
+               DBG("Scheduled time-based rotation aborted for session %s: %s",
+                               session->name, lttng_strerror(ret));
+       }
+       return 0;
+}
+
+static
+int run_job(struct rotation_thread_job *job, struct ltt_session *session,
+               struct notification_thread_handle *notification_thread_handle)
+{
+       int ret;
+
+       switch (job->type) {
+       case ROTATION_THREAD_JOB_TYPE_SCHEDULED_ROTATION:
+               ret = launch_session_rotation(session);
+               break;
+       case ROTATION_THREAD_JOB_TYPE_CHECK_PENDING_ROTATION:
+               ret = check_session_rotation_pending(session,
+                               notification_thread_handle);
+               break;
+       default:
+               abort();
+       }
+       return ret;
+}
+
+static
+int handle_job_queue(struct rotation_thread_handle *handle,
+               struct rotation_thread *state,
+               struct rotation_thread_timer_queue *queue)
+{
+       int ret = 0;
+
+       for (;;) {
+               struct ltt_session *session;
+               struct rotation_thread_job *job;
+
+               /* Take the queue lock only to pop an element from the list. */
+               pthread_mutex_lock(&queue->lock);
+               if (cds_list_empty(&queue->list)) {
+                       pthread_mutex_unlock(&queue->lock);
+                       break;
+               }
+               job = cds_list_first_entry(&queue->list,
+                               typeof(*job), head);
+               cds_list_del(&job->head);
+               pthread_mutex_unlock(&queue->lock);
+
+               session_lock_list();
+               session = job->session;
+               if (!session) {
+                       DBG("Session \"%s\" not found",
+                                       session->name);
+                       /*
+                        * This is a non-fatal error, and we cannot report it to
+                        * the user (timer), so just print the error and
+                        * continue the processing.
+                        *
+                        * While the timer thread will purge pending signals for
+                        * a session on the session's destruction, it is
+                        * possible for a job targeting that session to have
+                        * already been queued before it was destroyed.
+                        */
+                       free(job);
+                       session_put(session);
+                       session_unlock_list();
+                       continue;
+               }
+
+               session_lock(session);
+               ret = run_job(job, session, handle->notification_thread_handle);
+               session_unlock(session);
+               /* Release reference held by the job. */
+               session_put(session);
+               session_unlock_list();
+               free(job);
+               if (ret) {
+                       goto end;
+               }
+       }
+
+       ret = 0;
+
+end:
+       return ret;
+}
+
+static
+int handle_condition(const struct lttng_condition *condition,
+               const struct lttng_evaluation *evaluation,
+               struct notification_thread_handle *notification_thread_handle)
+{
+       int ret = 0;
+       const char *condition_session_name = NULL;
+       enum lttng_condition_type condition_type;
+       enum lttng_condition_status condition_status;
+       enum lttng_evaluation_status evaluation_status;
+       uint64_t consumed;
+       struct ltt_session *session;
+
+       condition_type = lttng_condition_get_type(condition);
+
+       if (condition_type != LTTNG_CONDITION_TYPE_SESSION_CONSUMED_SIZE) {
+               ret = -1;
+               ERR("Condition type and session usage type are not the same");
+               goto end;
+       }
+
+       /* Fetch info to test */
+       condition_status = lttng_condition_session_consumed_size_get_session_name(
+                       condition, &condition_session_name);
+       if (condition_status != LTTNG_CONDITION_STATUS_OK) {
+               ERR("Session name could not be fetched");
+               ret = -1;
+               goto end;
+       }
+       evaluation_status = lttng_evaluation_session_consumed_size_get_consumed_size(evaluation,
+                       &consumed);
+       if (evaluation_status != LTTNG_EVALUATION_STATUS_OK) {
+               ERR("Failed to get evaluation");
+               ret = -1;
+               goto end;
+       }
+
+       session_lock_list();
+       session = session_find_by_name(condition_session_name);
+       if (!session) {
+               ret = -1;
+               session_unlock_list();
+               ERR("Session \"%s\" not found",
+                               condition_session_name);
+               goto end;
+       }
+       session_lock(session);
+
+       ret = unsubscribe_session_consumed_size_rotation(session,
+                       notification_thread_handle);
+       if (ret) {
+               goto end_unlock;
+       }
+
+       ret = cmd_rotate_session(session, NULL, false,
+               LTTNG_TRACE_CHUNK_COMMAND_TYPE_MOVE_TO_COMPLETED);
+       if (ret == -LTTNG_ERR_ROTATION_PENDING) {
+               DBG("Rotate already pending, subscribe to the next threshold value");
+       } else if (ret != LTTNG_OK) {
+               ERR("Failed to rotate on size notification with error: %s",
+                               lttng_strerror(ret));
+               ret = -1;
+               goto end_unlock;
+       }
+       ret = subscribe_session_consumed_size_rotation(session,
+                       consumed + session->rotate_size,
+                       notification_thread_handle);
+       if (ret) {
+               ERR("Failed to subscribe to session consumed size condition");
+               goto end_unlock;
+       }
+       ret = 0;
+
+end_unlock:
+       session_unlock(session);
+       session_put(session);
+       session_unlock_list();
+end:
+       return ret;
+}
+
+static
+int handle_notification_channel(int fd,
+               struct rotation_thread_handle *handle,
+               struct rotation_thread *state)
+{
+       int ret;
+       bool notification_pending;
+       struct lttng_notification *notification = NULL;
+       enum lttng_notification_channel_status status;
+       const struct lttng_evaluation *notification_evaluation;
+       const struct lttng_condition *notification_condition;
+
+       status = lttng_notification_channel_has_pending_notification(
+                       rotate_notification_channel, &notification_pending);
+       if (status != LTTNG_NOTIFICATION_CHANNEL_STATUS_OK) {
+               ERR("Error occurred while checking for pending notification");
+               ret = -1;
+               goto end;
+       }
+
+       if (!notification_pending) {
+               ret = 0;
+               goto end;
+       }
+
+       /* Receive the next notification. */
+       status = lttng_notification_channel_get_next_notification(
+                       rotate_notification_channel,
+                       &notification);
+
+       switch (status) {
+       case LTTNG_NOTIFICATION_CHANNEL_STATUS_OK:
+               break;
+       case LTTNG_NOTIFICATION_CHANNEL_STATUS_NOTIFICATIONS_DROPPED:
+               /* Not an error, we will wait for the next one */
+               ret = 0;
+               goto end;;
+       case LTTNG_NOTIFICATION_CHANNEL_STATUS_CLOSED:
+               ERR("Notification channel was closed");
+               ret = -1;
+               goto end;
+       default:
+               /* Unhandled conditions / errors. */
+               ERR("Unknown notification channel status");
+               ret = -1;
+               goto end;
+       }
+
+       notification_condition = lttng_notification_get_condition(notification);
+       notification_evaluation = lttng_notification_get_evaluation(notification);
+
+       ret = handle_condition(notification_condition, notification_evaluation,
+                       handle->notification_thread_handle);
+
+end:
+       lttng_notification_destroy(notification);
+       return ret;
+}
+
+static
+void *thread_rotation(void *data)
+{
+       int ret;
+       struct rotation_thread_handle *handle = (rotation_thread_handle *) data;
+       struct rotation_thread thread;
+       int queue_pipe_fd;
+
+       DBG("Started rotation thread");
+       rcu_register_thread();
+       rcu_thread_online();
+       health_register(the_health_sessiond, HEALTH_SESSIOND_TYPE_ROTATION);
+       health_code_update();
+
+       if (!handle) {
+               ERR("Invalid thread context provided");
+               goto end;
+       }
+
+       queue_pipe_fd = lttng_pipe_get_readfd(
+                       handle->rotation_timer_queue->event_pipe);
+
+
+       ret = init_thread_state(handle, &thread);
+       if (ret) {
+               goto error;
+       }
+
+       while (true) {
+               int fd_count, i;
+
+               health_poll_entry();
+               DBG("Entering poll wait");
+               ret = lttng_poll_wait(&thread.events, -1);
+               DBG("Poll wait returned (%i)", ret);
+               health_poll_exit();
+               if (ret < 0) {
+                       /*
+                        * Restart interrupted system call.
+                        */
+                       if (errno == EINTR) {
+                               continue;
+                       }
+                       ERR("Error encountered during lttng_poll_wait (%i)", ret);
+                       goto error;
+               }
+
+               fd_count = ret;
+               for (i = 0; i < fd_count; i++) {
+                       int fd = LTTNG_POLL_GETFD(&thread.events, i);
+                       uint32_t revents = LTTNG_POLL_GETEV(&thread.events, i);
+
+                       DBG("Handling fd (%i) activity (%u)",
+                                       fd, revents);
+
+                       if (revents & LPOLLERR) {
+                               ERR("Polling returned an error on fd %i", fd);
+                               goto error;
+                       }
+
+                       if (fd == rotate_notification_channel->socket) {
+                               ret = handle_notification_channel(fd, handle,
+                                               &thread);
+                               if (ret) {
+                                       ERR("Error occurred while handling activity on notification channel socket");
+                                       goto error;
+                               }
+                       } else {
+                               /* Job queue or quit pipe activity. */
+
+                               /*
+                                * The job queue is serviced if there is
+                                * activity on the quit pipe to ensure it is
+                                * flushed and all references held in the queue
+                                * are released.
+                                */
+                               ret = handle_job_queue(handle, &thread,
+                                               handle->rotation_timer_queue);
+                               if (ret) {
+                                       ERR("Failed to handle rotation timer pipe event");
+                                       goto error;
+                               }
+
+                               if (fd == queue_pipe_fd) {
+                                       char buf;
+
+                                       ret = lttng_read(fd, &buf, 1);
+                                       if (ret != 1) {
+                                               ERR("Failed to read from wakeup pipe (fd = %i)", fd);
+                                               goto error;
+                                       }
+                               } else {
+                                       DBG("Quit pipe activity");
+                                       goto exit;
+                               }
+                       }
+               }
+       }
+exit:
+error:
+       DBG("Thread exit");
+       fini_thread_state(&thread);
+end:
+       health_unregister(the_health_sessiond);
+       rcu_thread_offline();
+       rcu_unregister_thread();
+       return NULL;
+}
+
+static
+bool shutdown_rotation_thread(void *thread_data)
+{
+       struct rotation_thread_handle *handle = (rotation_thread_handle *) thread_data;
+       const int write_fd = lttng_pipe_get_writefd(handle->quit_pipe);
+
+       return notify_thread_pipe(write_fd) == 1;
+}
+
+bool launch_rotation_thread(struct rotation_thread_handle *handle)
+{
+       struct lttng_thread *thread;
+
+       thread = lttng_thread_create("Rotation",
+                       thread_rotation,
+                       shutdown_rotation_thread,
+                       NULL,
+                       handle);
+       if (!thread) {
+               goto error;
+       }
+       lttng_thread_put(thread);
+       return true;
+error:
+       return false;
+}
diff --git a/src/bin/lttng-sessiond/save.c b/src/bin/lttng-sessiond/save.c
deleted file mode 100644 (file)
index f0a6e43..0000000
+++ /dev/null
@@ -1,2897 +0,0 @@
-/*
- * Copyright (C) 2014 Jérémie Galarneau <jeremie.galarneau@efficios.com>
- *
- * SPDX-License-Identifier: GPL-2.0-only
- *
- */
-
-#define _LGPL_SOURCE
-#include <inttypes.h>
-#include <string.h>
-#include <urcu/uatomic.h>
-#include <unistd.h>
-
-#include <common/defaults.h>
-#include <common/error.h>
-#include <common/config/session-config.h>
-#include <common/utils.h>
-#include <common/runas.h>
-#include <lttng/save-internal.h>
-
-#include "kernel.h"
-#include "save.h"
-#include "session.h"
-#include "lttng-syscall.h"
-#include "trace-ust.h"
-#include "agent.h"
-
-/* Return LTTNG_OK on success else a LTTNG_ERR* code. */
-static
-int save_kernel_channel_attributes(struct config_writer *writer,
-       struct lttng_channel_attr *attr)
-{
-       int ret;
-
-       ret = config_writer_write_element_string(writer,
-               config_element_overwrite_mode,
-               attr->overwrite ? config_overwrite_mode_overwrite :
-                       config_overwrite_mode_discard);
-       if (ret) {
-               ret = LTTNG_ERR_SAVE_IO_FAIL;
-               goto end;
-       }
-
-       ret = config_writer_write_element_unsigned_int(writer,
-               config_element_subbuf_size, attr->subbuf_size);
-       if (ret) {
-               ret = LTTNG_ERR_SAVE_IO_FAIL;
-               goto end;
-       }
-
-       ret = config_writer_write_element_unsigned_int(writer,
-               config_element_num_subbuf,
-               attr->num_subbuf);
-       if (ret) {
-               ret = LTTNG_ERR_SAVE_IO_FAIL;
-               goto end;
-       }
-
-       ret = config_writer_write_element_unsigned_int(writer,
-               config_element_switch_timer_interval,
-               attr->switch_timer_interval);
-       if (ret) {
-               ret = LTTNG_ERR_SAVE_IO_FAIL;
-               goto end;
-       }
-
-       ret = config_writer_write_element_unsigned_int(writer,
-               config_element_read_timer_interval,
-               attr->read_timer_interval);
-       if (ret) {
-               ret = LTTNG_ERR_SAVE_IO_FAIL;
-               goto end;
-       }
-
-       ret = config_writer_write_element_string(writer,
-               config_element_output_type,
-               attr->output == LTTNG_EVENT_SPLICE ?
-               config_output_type_splice : config_output_type_mmap);
-       if (ret) {
-               ret = LTTNG_ERR_SAVE_IO_FAIL;
-               goto end;
-       }
-
-       ret = config_writer_write_element_unsigned_int(writer,
-               config_element_tracefile_size, attr->tracefile_size);
-       if (ret) {
-               ret = LTTNG_ERR_SAVE_IO_FAIL;
-               goto end;
-       }
-
-       ret = config_writer_write_element_unsigned_int(writer,
-               config_element_tracefile_count,
-               attr->tracefile_count);
-       if (ret) {
-               ret = LTTNG_ERR_SAVE_IO_FAIL;
-               goto end;
-       }
-
-       ret = config_writer_write_element_unsigned_int(writer,
-               config_element_live_timer_interval,
-               attr->live_timer_interval);
-       if (ret) {
-               ret = LTTNG_ERR_SAVE_IO_FAIL;
-               goto end;
-       }
-
-       if (attr->extended.ptr) {
-               struct lttng_channel_extended *ext = NULL;
-
-               ext = (struct lttng_channel_extended *) attr->extended.ptr;
-               ret = config_writer_write_element_unsigned_int(writer,
-                               config_element_monitor_timer_interval,
-                               ext->monitor_timer_interval);
-               if (ret) {
-                       ret = LTTNG_ERR_SAVE_IO_FAIL;
-                       goto end;
-               }
-
-               ret = config_writer_write_element_signed_int(writer,
-                               config_element_blocking_timeout,
-                               ext->blocking_timeout);
-               if (ret) {
-                       ret = LTTNG_ERR_SAVE_IO_FAIL;
-                       goto end;
-               }
-       }
-
-       ret = LTTNG_OK;
-end:
-       return ret;
-}
-
-/* Return LTTNG_OK on success else a LTTNG_ERR* code. */
-static
-int save_ust_channel_attributes(struct config_writer *writer,
-       struct lttng_ust_abi_channel_attr *attr)
-{
-       int ret;
-       struct ltt_ust_channel *channel = NULL;
-
-       ret = config_writer_write_element_string(writer,
-               config_element_overwrite_mode,
-               attr->overwrite ? config_overwrite_mode_overwrite :
-                       config_overwrite_mode_discard);
-       if (ret) {
-               ret = LTTNG_ERR_SAVE_IO_FAIL;
-               goto end;
-       }
-
-       ret = config_writer_write_element_unsigned_int(writer,
-               config_element_subbuf_size, attr->subbuf_size);
-       if (ret) {
-               ret = LTTNG_ERR_SAVE_IO_FAIL;
-               goto end;
-       }
-
-       ret = config_writer_write_element_unsigned_int(writer,
-               config_element_num_subbuf,
-               attr->num_subbuf);
-       if (ret) {
-               ret = LTTNG_ERR_SAVE_IO_FAIL;
-               goto end;
-       }
-
-       ret = config_writer_write_element_unsigned_int(writer,
-               config_element_switch_timer_interval,
-               attr->switch_timer_interval);
-       if (ret) {
-               ret = LTTNG_ERR_SAVE_IO_FAIL;
-               goto end;
-       }
-
-       ret = config_writer_write_element_unsigned_int(writer,
-               config_element_read_timer_interval,
-               attr->read_timer_interval);
-       if (ret) {
-               ret = LTTNG_ERR_SAVE_IO_FAIL;
-               goto end;
-       }
-
-       ret = config_writer_write_element_string(writer,
-               config_element_output_type,
-               attr->output == LTTNG_UST_ABI_MMAP ?
-               config_output_type_mmap : config_output_type_splice);
-       if (ret) {
-               ret = LTTNG_ERR_SAVE_IO_FAIL;
-               goto end;
-       }
-
-       ret = config_writer_write_element_signed_int(writer,
-                       config_element_blocking_timeout,
-                       attr->u.s.blocking_timeout);
-       if (ret) {
-               ret = LTTNG_ERR_SAVE_IO_FAIL;
-               goto end;
-       }
-
-       /*
-        * Fetch the monitor timer which is located in the parent of
-        * lttng_ust_channel_attr
-        */
-       channel = caa_container_of(attr, struct ltt_ust_channel, attr);
-       ret = config_writer_write_element_unsigned_int(writer,
-               config_element_monitor_timer_interval,
-               channel->monitor_timer_interval);
-       if (ret) {
-               ret = LTTNG_ERR_SAVE_IO_FAIL;
-               goto end;
-       }
-
-       ret = LTTNG_OK;
-end:
-       return ret;
-}
-
-static
-const char *get_kernel_instrumentation_string(
-       enum lttng_kernel_abi_instrumentation instrumentation)
-{
-       const char *instrumentation_string;
-
-       switch (instrumentation) {
-       case LTTNG_KERNEL_ABI_ALL:
-               instrumentation_string = config_event_type_all;
-               break;
-       case LTTNG_KERNEL_ABI_TRACEPOINT:
-               instrumentation_string = config_event_type_tracepoint;
-               break;
-       case LTTNG_KERNEL_ABI_KPROBE:
-               instrumentation_string = config_event_type_probe;
-               break;
-       case LTTNG_KERNEL_ABI_UPROBE:
-               instrumentation_string = config_event_type_userspace_probe;
-               break;
-       case LTTNG_KERNEL_ABI_FUNCTION:
-               instrumentation_string = config_event_type_function_entry;
-               break;
-       case LTTNG_KERNEL_ABI_KRETPROBE:
-               instrumentation_string = config_event_type_function;
-               break;
-       case LTTNG_KERNEL_ABI_NOOP:
-               instrumentation_string = config_event_type_noop;
-               break;
-       case LTTNG_KERNEL_ABI_SYSCALL:
-               instrumentation_string = config_event_type_syscall;
-               break;
-       default:
-               instrumentation_string = NULL;
-       }
-
-       return instrumentation_string;
-}
-
-static
-const char *get_kernel_context_type_string(
-       enum lttng_kernel_abi_context_type context_type)
-{
-       const char *context_type_string;
-
-       switch (context_type) {
-       case LTTNG_KERNEL_ABI_CONTEXT_PID:
-               context_type_string = config_event_context_pid;
-               break;
-       case LTTNG_KERNEL_ABI_CONTEXT_PROCNAME:
-               context_type_string = config_event_context_procname;
-               break;
-       case LTTNG_KERNEL_ABI_CONTEXT_PRIO:
-               context_type_string = config_event_context_prio;
-               break;
-       case LTTNG_KERNEL_ABI_CONTEXT_NICE:
-               context_type_string = config_event_context_nice;
-               break;
-       case LTTNG_KERNEL_ABI_CONTEXT_VPID:
-               context_type_string = config_event_context_vpid;
-               break;
-       case LTTNG_KERNEL_ABI_CONTEXT_TID:
-               context_type_string = config_event_context_tid;
-               break;
-       case LTTNG_KERNEL_ABI_CONTEXT_VTID:
-               context_type_string = config_event_context_vtid;
-               break;
-       case LTTNG_KERNEL_ABI_CONTEXT_PPID:
-               context_type_string = config_event_context_ppid;
-               break;
-       case LTTNG_KERNEL_ABI_CONTEXT_VPPID:
-               context_type_string = config_event_context_vppid;
-               break;
-       case LTTNG_KERNEL_ABI_CONTEXT_HOSTNAME:
-               context_type_string = config_event_context_hostname;
-               break;
-       case LTTNG_KERNEL_ABI_CONTEXT_INTERRUPTIBLE:
-               context_type_string = config_event_context_interruptible;
-               break;
-       case LTTNG_KERNEL_ABI_CONTEXT_PREEMPTIBLE:
-               context_type_string = config_event_context_preemptible;
-               break;
-       case LTTNG_KERNEL_ABI_CONTEXT_NEED_RESCHEDULE:
-               context_type_string = config_event_context_need_reschedule;
-               break;
-       case LTTNG_KERNEL_ABI_CONTEXT_MIGRATABLE:
-               context_type_string = config_event_context_migratable;
-               break;
-       case LTTNG_KERNEL_ABI_CONTEXT_CALLSTACK_USER:
-               context_type_string = config_event_context_callstack_user;
-               break;
-       case LTTNG_KERNEL_ABI_CONTEXT_CALLSTACK_KERNEL:
-               context_type_string = config_event_context_callstack_kernel;
-               break;
-       case LTTNG_KERNEL_ABI_CONTEXT_CGROUP_NS:
-               context_type_string = config_event_context_cgroup_ns;
-               break;
-       case LTTNG_KERNEL_ABI_CONTEXT_IPC_NS:
-               context_type_string = config_event_context_ipc_ns;
-               break;
-       case LTTNG_KERNEL_ABI_CONTEXT_MNT_NS:
-               context_type_string = config_event_context_mnt_ns;
-               break;
-       case LTTNG_KERNEL_ABI_CONTEXT_NET_NS:
-               context_type_string = config_event_context_net_ns;
-               break;
-       case LTTNG_KERNEL_ABI_CONTEXT_PID_NS:
-               context_type_string = config_event_context_pid_ns;
-               break;
-       case LTTNG_KERNEL_ABI_CONTEXT_TIME_NS:
-               context_type_string = config_event_context_time_ns;
-               break;
-       case LTTNG_KERNEL_ABI_CONTEXT_USER_NS:
-               context_type_string = config_event_context_user_ns;
-               break;
-       case LTTNG_KERNEL_ABI_CONTEXT_UTS_NS:
-               context_type_string = config_event_context_uts_ns;
-               break;
-       case LTTNG_KERNEL_ABI_CONTEXT_UID:
-               context_type_string = config_event_context_uid;
-               break;
-       case LTTNG_KERNEL_ABI_CONTEXT_EUID:
-               context_type_string = config_event_context_euid;
-               break;
-       case LTTNG_KERNEL_ABI_CONTEXT_SUID:
-               context_type_string = config_event_context_suid;
-               break;
-       case LTTNG_KERNEL_ABI_CONTEXT_GID:
-               context_type_string = config_event_context_gid;
-               break;
-       case LTTNG_KERNEL_ABI_CONTEXT_EGID:
-               context_type_string = config_event_context_egid;
-               break;
-       case LTTNG_KERNEL_ABI_CONTEXT_SGID:
-               context_type_string = config_event_context_sgid;
-               break;
-       case LTTNG_KERNEL_ABI_CONTEXT_VUID:
-               context_type_string = config_event_context_vuid;
-               break;
-       case LTTNG_KERNEL_ABI_CONTEXT_VEUID:
-               context_type_string = config_event_context_veuid;
-               break;
-       case LTTNG_KERNEL_ABI_CONTEXT_VSUID:
-               context_type_string = config_event_context_vsuid;
-               break;
-       case LTTNG_KERNEL_ABI_CONTEXT_VGID:
-               context_type_string = config_event_context_vgid;
-               break;
-       case LTTNG_KERNEL_ABI_CONTEXT_VEGID:
-               context_type_string = config_event_context_vegid;
-               break;
-       case LTTNG_KERNEL_ABI_CONTEXT_VSGID:
-               context_type_string = config_event_context_vsgid;
-               break;
-       default:
-               context_type_string = NULL;
-       }
-
-       return context_type_string;
-}
-
-static
-const char *get_ust_context_type_string(
-       enum lttng_ust_abi_context_type context_type)
-{
-       const char *context_type_string;
-
-       switch (context_type) {
-       case LTTNG_UST_ABI_CONTEXT_PROCNAME:
-               context_type_string = config_event_context_procname;
-               break;
-       case LTTNG_UST_ABI_CONTEXT_VPID:
-               context_type_string = config_event_context_vpid;
-               break;
-       case LTTNG_UST_ABI_CONTEXT_VTID:
-               context_type_string = config_event_context_vtid;
-               break;
-       case LTTNG_UST_ABI_CONTEXT_IP:
-               context_type_string = config_event_context_ip;
-               break;
-       case LTTNG_UST_ABI_CONTEXT_PTHREAD_ID:
-               context_type_string = config_event_context_pthread_id;
-               break;
-       case LTTNG_UST_ABI_CONTEXT_APP_CONTEXT:
-               context_type_string = config_event_context_app;
-               break;
-       case LTTNG_UST_ABI_CONTEXT_CGROUP_NS:
-               context_type_string = config_event_context_cgroup_ns;
-               break;
-       case LTTNG_UST_ABI_CONTEXT_IPC_NS:
-               context_type_string = config_event_context_ipc_ns;
-               break;
-       case LTTNG_UST_ABI_CONTEXT_MNT_NS:
-               context_type_string = config_event_context_mnt_ns;
-               break;
-       case LTTNG_UST_ABI_CONTEXT_NET_NS:
-               context_type_string = config_event_context_net_ns;
-               break;
-       case LTTNG_UST_ABI_CONTEXT_TIME_NS:
-               context_type_string = config_event_context_time_ns;
-               break;
-       case LTTNG_UST_ABI_CONTEXT_PID_NS:
-               context_type_string = config_event_context_pid_ns;
-               break;
-       case LTTNG_UST_ABI_CONTEXT_USER_NS:
-               context_type_string = config_event_context_user_ns;
-               break;
-       case LTTNG_UST_ABI_CONTEXT_UTS_NS:
-               context_type_string = config_event_context_uts_ns;
-               break;
-       case LTTNG_UST_ABI_CONTEXT_VUID:
-               context_type_string = config_event_context_vuid;
-               break;
-       case LTTNG_UST_ABI_CONTEXT_VEUID:
-               context_type_string = config_event_context_veuid;
-               break;
-       case LTTNG_UST_ABI_CONTEXT_VSUID:
-               context_type_string = config_event_context_vsuid;
-               break;
-       case LTTNG_UST_ABI_CONTEXT_VGID:
-               context_type_string = config_event_context_vgid;
-               break;
-       case LTTNG_UST_ABI_CONTEXT_VEGID:
-               context_type_string = config_event_context_vegid;
-               break;
-       case LTTNG_UST_ABI_CONTEXT_VSGID:
-               context_type_string = config_event_context_vsgid;
-               break;
-       case LTTNG_UST_ABI_CONTEXT_PERF_THREAD_COUNTER:
-               /*
-                * Error, should not be stored in the XML, perf contexts
-                * are stored as a node of type event_perf_context_type.
-                */
-       default:
-               context_type_string = NULL;
-               break;
-       }
-
-       return context_type_string;
-}
-
-static
-const char *get_buffer_type_string(
-       enum lttng_buffer_type buffer_type)
-{
-       const char *buffer_type_string;
-
-       switch (buffer_type) {
-       case LTTNG_BUFFER_PER_PID:
-               buffer_type_string = config_buffer_type_per_pid;
-               break;
-       case LTTNG_BUFFER_PER_UID:
-               buffer_type_string = config_buffer_type_per_uid;
-               break;
-       case LTTNG_BUFFER_GLOBAL:
-               buffer_type_string = config_buffer_type_global;
-               break;
-       default:
-               buffer_type_string = NULL;
-       }
-
-       return buffer_type_string;
-}
-
-static
-const char *get_loglevel_type_string(
-       enum lttng_ust_abi_loglevel_type loglevel_type)
-{
-       const char *loglevel_type_string;
-
-       switch (loglevel_type) {
-       case LTTNG_UST_ABI_LOGLEVEL_ALL:
-               loglevel_type_string = config_loglevel_type_all;
-               break;
-       case LTTNG_UST_ABI_LOGLEVEL_RANGE:
-               loglevel_type_string = config_loglevel_type_range;
-               break;
-       case LTTNG_UST_ABI_LOGLEVEL_SINGLE:
-               loglevel_type_string = config_loglevel_type_single;
-               break;
-       default:
-               loglevel_type_string = NULL;
-       }
-
-       return loglevel_type_string;
-}
-
-/* Return LTTNG_OK on success else a LTTNG_ERR* code. */
-static
-int save_kernel_function_event(struct config_writer *writer,
-               struct ltt_kernel_event *event)
-{
-       int ret;
-
-       ret = config_writer_open_element(writer, config_element_function_attributes);
-       if (ret) {
-               ret = LTTNG_ERR_SAVE_IO_FAIL;
-               goto end;
-       }
-
-       ret = config_writer_write_element_string(writer, config_element_name,
-                       event->event->u.ftrace.symbol_name);
-       if (ret) {
-               ret = LTTNG_ERR_SAVE_IO_FAIL;
-               goto end;
-       }
-
-       /* /function attributes */
-       ret = config_writer_close_element(writer);
-       if (ret) {
-               ret = LTTNG_ERR_SAVE_IO_FAIL;
-               goto end;
-       }
-end:
-       return ret;
-}
-
-static
-int save_kernel_kprobe_event(struct config_writer *writer,
-               struct ltt_kernel_event *event)
-{
-       int ret;
-       const char *symbol_name;
-       uint64_t addr;
-       uint64_t offset;
-
-       switch (event->event->instrumentation) {
-       case LTTNG_KERNEL_ABI_KPROBE:
-               /*
-                * Comments in lttng-kernel.h mention that
-                * either addr or symbol_name are set, not both.
-                */
-               addr = event->event->u.kprobe.addr;
-               offset = event->event->u.kprobe.offset;
-               symbol_name = addr ? NULL : event->event->u.kprobe.symbol_name;
-               break;
-       case LTTNG_KERNEL_ABI_KRETPROBE:
-               addr = event->event->u.kretprobe.addr;
-               offset = event->event->u.kretprobe.offset;
-               symbol_name = addr ? NULL : event->event->u.kretprobe.symbol_name;
-               break;
-       default:
-               LTTNG_ASSERT(1);
-               ERR("Unsupported kernel instrumentation type.");
-               ret = LTTNG_ERR_INVALID;
-               goto end;
-       }
-
-       ret = config_writer_open_element(writer, config_element_probe_attributes);
-       if (ret) {
-               ret = LTTNG_ERR_SAVE_IO_FAIL;
-               goto end;
-       }
-
-       if (addr) {
-               ret = config_writer_write_element_unsigned_int( writer,
-                               config_element_address, addr);
-               if (ret) {
-                       ret = LTTNG_ERR_SAVE_IO_FAIL;
-                       goto end;
-               }
-       } else if (symbol_name) {
-               ret = config_writer_write_element_string(writer,
-                                config_element_symbol_name, symbol_name);
-               if (ret) {
-                       ret = LTTNG_ERR_SAVE_IO_FAIL;
-                       goto end;
-               }
-               /* If the offset is non-zero, write it.*/
-               if (offset) {
-                       ret = config_writer_write_element_unsigned_int(writer,
-                               config_element_offset, offset);
-                       if (ret) {
-                               ret = LTTNG_ERR_SAVE_IO_FAIL;
-                               goto end;
-                       }
-               }
-       } else {
-               /*
-                * This really should not happen as we are either setting the
-                * address or the symbol above.
-                */
-               ERR("Invalid probe/function description.");
-               ret = LTTNG_ERR_INVALID;
-               goto end;
-       }
-
-
-       ret = config_writer_close_element(writer);
-       if (ret) {
-               ret = LTTNG_ERR_SAVE_IO_FAIL;
-               goto end;
-       }
-end:
-       return ret;
-}
-
-/*
- * Save the userspace probe tracepoint event associated with the event to the
- * config writer.
- */
-static
-int save_kernel_userspace_probe_tracepoint_event(struct config_writer *writer,
-               struct ltt_kernel_event *event)
-{
-       int ret = 0;
-       const char *probe_name, *provider_name, *binary_path;
-       const struct lttng_userspace_probe_location *userspace_probe_location;
-       const struct lttng_userspace_probe_location_lookup_method *lookup_method;
-       enum lttng_userspace_probe_location_lookup_method_type lookup_type;
-
-       /* Get userspace probe location from the event. */
-       userspace_probe_location = event->userspace_probe_location;
-       if (!userspace_probe_location) {
-               ret = LTTNG_ERR_SAVE_IO_FAIL;
-               goto end;
-       }
-
-       /* Get lookup method and lookup method type. */
-       lookup_method = lttng_userspace_probe_location_get_lookup_method(userspace_probe_location);
-       if (!lookup_method) {
-               ret = LTTNG_ERR_SAVE_IO_FAIL;
-               goto end;
-       }
-
-       lookup_type = lttng_userspace_probe_location_lookup_method_get_type(lookup_method);
-
-       /* Get the binary path, probe name and provider name. */
-       binary_path =
-               lttng_userspace_probe_location_tracepoint_get_binary_path(
-                               userspace_probe_location);
-       if (!binary_path) {
-               ret = LTTNG_ERR_SAVE_IO_FAIL;
-               goto end;
-       }
-
-       probe_name =
-               lttng_userspace_probe_location_tracepoint_get_probe_name(
-                               userspace_probe_location);
-       if (!probe_name) {
-               ret = LTTNG_ERR_SAVE_IO_FAIL;
-               goto end;
-       }
-
-       provider_name =
-               lttng_userspace_probe_location_tracepoint_get_provider_name(
-                               userspace_probe_location);
-       if (!provider_name) {
-               ret = LTTNG_ERR_SAVE_IO_FAIL;
-               goto end;
-       }
-
-       /* Open a userspace probe tracepoint attribute. */
-       ret = config_writer_open_element(writer, config_element_userspace_probe_tracepoint_attributes);
-       if (ret) {
-               ret = LTTNG_ERR_SAVE_IO_FAIL;
-               goto end;
-       }
-
-       switch (lookup_type) {
-       case LTTNG_USERSPACE_PROBE_LOCATION_LOOKUP_METHOD_TYPE_TRACEPOINT_SDT:
-               ret = config_writer_write_element_string(writer,
-                               config_element_userspace_probe_lookup,
-                               config_element_userspace_probe_lookup_tracepoint_sdt);
-               if (ret) {
-                       ret = LTTNG_ERR_SAVE_IO_FAIL;
-                       goto end;
-               }
-               break;
-       default:
-               ERR("Unsupported kernel userspace probe tracepoint lookup method.");
-               ret = LTTNG_ERR_INVALID;
-               goto end;
-       }
-
-       /* Write the binary path, provider name and the probe name. */
-       ret = config_writer_write_element_string(writer,
-                       config_element_userspace_probe_location_binary_path,
-                       binary_path);
-       if (ret) {
-               ret = LTTNG_ERR_SAVE_IO_FAIL;
-               goto end;
-       }
-
-       ret = config_writer_write_element_string(writer,
-                       config_element_userspace_probe_tracepoint_location_provider_name,
-                       provider_name);
-       if (ret) {
-               ret = LTTNG_ERR_SAVE_IO_FAIL;
-               goto end;
-       }
-
-       ret = config_writer_write_element_string(writer,
-                       config_element_userspace_probe_tracepoint_location_probe_name,
-                       probe_name);
-       if (ret) {
-               ret = LTTNG_ERR_SAVE_IO_FAIL;
-               goto end;
-       }
-
-       /* Close the userspace probe tracepoint attribute. */
-       ret = config_writer_close_element(writer);
-       if (ret) {
-               ret = LTTNG_ERR_SAVE_IO_FAIL;
-               goto end;
-       }
-
-end:
-       return ret;
-}
-
-/*
- * Save the userspace probe function event associated with the event to the
- * config writer.
- */
-static
-int save_kernel_userspace_probe_function_event(struct config_writer *writer,
-               struct ltt_kernel_event *event)
-{
-       int ret = 0;
-       const char *function_name, *binary_path;
-       const struct lttng_userspace_probe_location *userspace_probe_location;
-       const struct lttng_userspace_probe_location_lookup_method *lookup_method;
-       enum lttng_userspace_probe_location_lookup_method_type lookup_type;
-
-       /* Get userspace probe location from the event. */
-       userspace_probe_location = event->userspace_probe_location;
-       if (!userspace_probe_location) {
-               ret = LTTNG_ERR_SAVE_IO_FAIL;
-               goto end;
-       }
-
-       /* Get lookup method and lookup method type. */
-       lookup_method = lttng_userspace_probe_location_get_lookup_method(
-                       userspace_probe_location);
-       if (!lookup_method) {
-               ret = LTTNG_ERR_SAVE_IO_FAIL;
-               goto end;
-       }
-
-       /* Get the binary path and the function name. */
-       binary_path =
-               lttng_userspace_probe_location_function_get_binary_path(
-                               userspace_probe_location);
-       if (!binary_path) {
-               ret = LTTNG_ERR_SAVE_IO_FAIL;
-               goto end;
-       }
-
-       function_name =
-               lttng_userspace_probe_location_function_get_function_name(
-                               userspace_probe_location);
-       if (!function_name) {
-               ret = LTTNG_ERR_SAVE_IO_FAIL;
-               goto end;
-       }
-
-       /* Open a userspace probe function attribute. */
-       ret = config_writer_open_element(writer,
-                       config_element_userspace_probe_function_attributes);
-       if (ret) {
-               ret = LTTNG_ERR_SAVE_IO_FAIL;
-               goto end;
-       }
-
-       lookup_type = lttng_userspace_probe_location_lookup_method_get_type(lookup_method);
-       switch (lookup_type) {
-       case LTTNG_USERSPACE_PROBE_LOCATION_LOOKUP_METHOD_TYPE_FUNCTION_ELF:
-               ret = config_writer_write_element_string(writer,
-                               config_element_userspace_probe_lookup,
-                               config_element_userspace_probe_lookup_function_elf);
-               if (ret) {
-                       ret = LTTNG_ERR_SAVE_IO_FAIL;
-                       goto end;
-               }
-               break;
-       case LTTNG_USERSPACE_PROBE_LOCATION_LOOKUP_METHOD_TYPE_FUNCTION_DEFAULT:
-               ret = config_writer_write_element_string(writer,
-                               config_element_userspace_probe_lookup,
-                               config_element_userspace_probe_lookup_function_default);
-               if (ret) {
-                       ret = LTTNG_ERR_SAVE_IO_FAIL;
-                       goto end;
-               }
-               break;
-       default:
-               ERR("Unsupported kernel userspace probe function lookup method.");
-               ret = LTTNG_ERR_INVALID;
-               goto end;
-       }
-
-       /* Write the binary path and the function name. */
-       ret = config_writer_write_element_string(writer,
-                       config_element_userspace_probe_location_binary_path,
-                       binary_path);
-       if (ret) {
-               ret = LTTNG_ERR_SAVE_IO_FAIL;
-               goto end;
-       }
-
-       ret = config_writer_write_element_string(writer,
-                       config_element_userspace_probe_function_location_function_name,
-                       function_name);
-       if (ret) {
-               ret = LTTNG_ERR_SAVE_IO_FAIL;
-               goto end;
-       }
-
-       /* Close the userspace probe function attribute. */
-       ret = config_writer_close_element(writer);
-       if (ret) {
-               ret = LTTNG_ERR_SAVE_IO_FAIL;
-               goto end;
-       }
-
-end:
-       return ret;
-}
-
-static
-int save_kernel_userspace_probe_event(struct config_writer *writer,
-               struct ltt_kernel_event *event)
-{
-       int ret;
-       struct lttng_userspace_probe_location *userspace_probe_location;
-
-       /* Get userspace probe location from the event. */
-       userspace_probe_location = event->userspace_probe_location;
-       if (!userspace_probe_location) {
-               ret = LTTNG_ERR_SAVE_IO_FAIL;
-               goto end;
-       }
-
-       switch(lttng_userspace_probe_location_get_type(userspace_probe_location)) {
-       case LTTNG_USERSPACE_PROBE_LOCATION_TYPE_FUNCTION:
-       {
-               ret = save_kernel_userspace_probe_function_event(writer, event);
-               if (ret) {
-                       ret = LTTNG_ERR_SAVE_IO_FAIL;
-                       goto end;
-               }
-               break;
-       }
-       case LTTNG_USERSPACE_PROBE_LOCATION_TYPE_TRACEPOINT:
-       {
-               ret = save_kernel_userspace_probe_tracepoint_event(writer, event);
-               if (ret) {
-                       ret = LTTNG_ERR_SAVE_IO_FAIL;
-                       goto end;
-               }
-               break;
-       }
-       case LTTNG_USERSPACE_PROBE_LOCATION_TYPE_UNKNOWN:
-       default:
-               ERR("Unsupported kernel userspace probe location type.");
-               ret = LTTNG_ERR_INVALID;
-               goto end;
-       }
-
-end:
-       return ret;
-}
-
-static
-int save_kernel_event(struct config_writer *writer,
-               struct ltt_kernel_event *event)
-{
-       int ret;
-       const char *instrumentation_type;
-
-       ret = config_writer_open_element(writer, config_element_event);
-       if (ret) {
-               ret = LTTNG_ERR_SAVE_IO_FAIL;
-               goto end;
-       }
-
-       if (event->event->name[0]) {
-               ret = config_writer_write_element_string(writer,
-                       config_element_name, event->event->name);
-               if (ret) {
-                       ret = LTTNG_ERR_SAVE_IO_FAIL;
-                       goto end;
-               }
-       }
-
-       ret = config_writer_write_element_bool(writer, config_element_enabled,
-               event->enabled);
-       if (ret) {
-               ret = LTTNG_ERR_SAVE_IO_FAIL;
-               goto end;
-       }
-
-       instrumentation_type = get_kernel_instrumentation_string(
-               event->event->instrumentation);
-       if (!instrumentation_type) {
-               ret = LTTNG_ERR_INVALID;
-               goto end;
-       }
-
-       ret = config_writer_write_element_string(writer, config_element_type,
-               instrumentation_type);
-       if (ret) {
-               ret = LTTNG_ERR_SAVE_IO_FAIL;
-               goto end;
-       }
-
-       if (event->filter_expression) {
-               ret = config_writer_write_element_string(writer,
-                               config_element_filter,
-                               event->filter_expression);
-               if (ret) {
-                       ret = LTTNG_ERR_SAVE_IO_FAIL;
-                       goto end;
-               }
-       }
-
-       if (event->event->instrumentation == LTTNG_KERNEL_ABI_FUNCTION ||
-               event->event->instrumentation == LTTNG_KERNEL_ABI_KPROBE ||
-               event->event->instrumentation == LTTNG_KERNEL_ABI_UPROBE ||
-               event->event->instrumentation == LTTNG_KERNEL_ABI_KRETPROBE) {
-
-               ret = config_writer_open_element(writer,
-                       config_element_attributes);
-               if (ret) {
-                       ret = LTTNG_ERR_SAVE_IO_FAIL;
-                       goto end;
-               }
-
-               switch (event->event->instrumentation) {
-               case LTTNG_KERNEL_ABI_SYSCALL:
-               case LTTNG_KERNEL_ABI_FUNCTION:
-                       ret = save_kernel_function_event(writer, event);
-                       if (ret) {
-                               goto end;
-                       }
-                       break;
-               case LTTNG_KERNEL_ABI_KPROBE:
-               case LTTNG_KERNEL_ABI_KRETPROBE:
-                       ret = save_kernel_kprobe_event(writer, event);
-                       if (ret) {
-                               goto end;
-                       }
-                       break;
-               case LTTNG_KERNEL_ABI_UPROBE:
-                       ret = save_kernel_userspace_probe_event(writer, event);
-                       if (ret) {
-                               goto end;
-                       }
-                       break;
-               default:
-                       ERR("Unsupported kernel instrumentation type.");
-                       ret = LTTNG_ERR_INVALID;
-                       goto end;
-               }
-
-               /* /attributes */
-               ret = config_writer_close_element(writer);
-               if (ret) {
-                       ret = LTTNG_ERR_SAVE_IO_FAIL;
-                       goto end;
-               }
-       }
-
-       /* /event */
-       ret = config_writer_close_element(writer);
-       if (ret) {
-               ret = LTTNG_ERR_SAVE_IO_FAIL;
-               goto end;
-       }
-
-       ret = LTTNG_OK;
-end:
-       return ret;
-}
-
-/* Return LTTNG_OK on success else a LTTNG_ERR* code. */
-static
-int save_kernel_events(struct config_writer *writer,
-       struct ltt_kernel_channel *kchan)
-{
-       int ret;
-       struct ltt_kernel_event *event;
-
-       ret = config_writer_open_element(writer, config_element_events);
-       if (ret) {
-               ret = LTTNG_ERR_SAVE_IO_FAIL;
-               goto end;
-       }
-
-       cds_list_for_each_entry(event, &kchan->events_list.head, list) {
-               ret = save_kernel_event(writer, event);
-               if (ret != LTTNG_OK) {
-                       goto end;
-               }
-       }
-
-       /* /events */
-       ret = config_writer_close_element(writer);
-       if (ret) {
-               ret = LTTNG_ERR_SAVE_IO_FAIL;
-               goto end;
-       }
-
-       ret = LTTNG_OK;
-end:
-       return ret;
-}
-
-/* Return LTTNG_OK on success else a LTTNG_ERR* code. */
-static
-int save_ust_event(struct config_writer *writer,
-       struct ltt_ust_event *event)
-{
-       int ret;
-       const char *loglevel_type_string;
-
-       ret = config_writer_open_element(writer, config_element_event);
-       if (ret) {
-               ret = LTTNG_ERR_SAVE_IO_FAIL;
-               goto end;
-       }
-
-       if (event->attr.name[0]) {
-               ret = config_writer_write_element_string(writer,
-                       config_element_name, event->attr.name);
-               if (ret) {
-                       ret = LTTNG_ERR_SAVE_IO_FAIL;
-                       goto end;
-               }
-       }
-
-       ret = config_writer_write_element_bool(writer, config_element_enabled,
-               event->enabled);
-       if (ret) {
-               ret = LTTNG_ERR_SAVE_IO_FAIL;
-               goto end;
-       }
-
-       if (event->attr.instrumentation != LTTNG_UST_ABI_TRACEPOINT) {
-               ERR("Unsupported UST instrumentation type.");
-               ret = LTTNG_ERR_INVALID;
-               goto end;
-       }
-       ret = config_writer_write_element_string(writer, config_element_type,
-               config_event_type_tracepoint);
-       if (ret) {
-               ret = LTTNG_ERR_SAVE_IO_FAIL;
-               goto end;
-       }
-
-       loglevel_type_string = get_loglevel_type_string(
-               event->attr.loglevel_type);
-       if (!loglevel_type_string) {
-               ERR("Unsupported UST loglevel type.");
-               ret = LTTNG_ERR_INVALID;
-               goto end;
-       }
-
-       ret = config_writer_write_element_string(writer,
-               config_element_loglevel_type, loglevel_type_string);
-       if (ret) {
-               ret = LTTNG_ERR_SAVE_IO_FAIL;
-               goto end;
-       }
-
-       /* The log level is irrelevant if no "filtering" is enabled */
-       if (event->attr.loglevel_type != LTTNG_UST_ABI_LOGLEVEL_ALL) {
-               ret = config_writer_write_element_signed_int(writer,
-                               config_element_loglevel, event->attr.loglevel);
-               if (ret) {
-                       ret = LTTNG_ERR_SAVE_IO_FAIL;
-                       goto end;
-               }
-       }
-
-       if (event->filter_expression) {
-               ret = config_writer_write_element_string(writer,
-                       config_element_filter, event->filter_expression);
-               if (ret) {
-                       ret = LTTNG_ERR_SAVE_IO_FAIL;
-                       goto end;
-               }
-       }
-
-       if (event->exclusion && event->exclusion->count) {
-               uint32_t i;
-
-               ret = config_writer_open_element(writer,
-                       config_element_exclusions);
-               if (ret) {
-                       ret = LTTNG_ERR_SAVE_IO_FAIL;
-                       goto end;
-               }
-
-               for (i = 0; i < event->exclusion->count; i++) {
-                       ret = config_writer_write_element_string(writer,
-                               config_element_exclusion,
-                               LTTNG_EVENT_EXCLUSION_NAME_AT(
-                                       event->exclusion, i));
-                       if (ret) {
-                               ret = LTTNG_ERR_SAVE_IO_FAIL;
-                               goto end;
-                       }
-               }
-
-               /* /exclusions */
-               ret = config_writer_close_element(writer);
-               if (ret) {
-                       ret = LTTNG_ERR_SAVE_IO_FAIL;
-                       goto end;
-               }
-       }
-
-       /* /event */
-       ret = config_writer_close_element(writer);
-       if (ret) {
-               ret = LTTNG_ERR_SAVE_IO_FAIL;
-               goto end;
-       }
-
-       ret = LTTNG_OK;
-end:
-       return ret;
-}
-
-/* Return LTTNG_OK on success else a LTTNG_ERR* code. */
-static
-int save_ust_events(struct config_writer *writer,
-       struct lttng_ht *events)
-{
-       int ret;
-       struct ltt_ust_event *event;
-       struct lttng_ht_node_str *node;
-       struct lttng_ht_iter iter;
-
-       ret = config_writer_open_element(writer, config_element_events);
-       if (ret) {
-               ret = LTTNG_ERR_SAVE_IO_FAIL;
-               goto end;
-       }
-
-       rcu_read_lock();
-       cds_lfht_for_each_entry(events->ht, &iter.iter, node, node) {
-               event = caa_container_of(node, struct ltt_ust_event, node);
-
-               if (event->internal) {
-                       /* Internal events must not be exposed to clients */
-                       continue;
-               }
-               ret = save_ust_event(writer, event);
-               if (ret != LTTNG_OK) {
-                       rcu_read_unlock();
-                       goto end;
-               }
-       }
-       rcu_read_unlock();
-
-       /* /events */
-       ret = config_writer_close_element(writer);
-       if (ret) {
-               ret = LTTNG_ERR_SAVE_IO_FAIL;
-               goto end;
-       }
-
-       ret = LTTNG_OK;
-end:
-       return ret;
-}
-
-/* Return LTTNG_OK on success else a LTTNG_ERR* code. */
-static
-int init_ust_event_from_agent_event(struct ltt_ust_event *ust_event,
-               struct agent_event *agent_event)
-{
-       int ret;
-       enum lttng_ust_abi_loglevel_type ust_loglevel_type;
-
-       ust_event->enabled = AGENT_EVENT_IS_ENABLED(agent_event);
-       ust_event->attr.instrumentation = LTTNG_UST_ABI_TRACEPOINT;
-       if (lttng_strncpy(ust_event->attr.name, agent_event->name,
-                       LTTNG_SYMBOL_NAME_LEN)) {
-               ret = LTTNG_ERR_INVALID;
-               goto end;
-       }
-       switch (agent_event->loglevel_type) {
-       case LTTNG_EVENT_LOGLEVEL_ALL:
-               ust_loglevel_type = LTTNG_UST_ABI_LOGLEVEL_ALL;
-               break;
-       case LTTNG_EVENT_LOGLEVEL_SINGLE:
-               ust_loglevel_type = LTTNG_UST_ABI_LOGLEVEL_SINGLE;
-               break;
-       case LTTNG_EVENT_LOGLEVEL_RANGE:
-               ust_loglevel_type = LTTNG_UST_ABI_LOGLEVEL_RANGE;
-               break;
-       default:
-               ERR("Invalid agent_event loglevel_type.");
-               ret = LTTNG_ERR_INVALID;
-               goto end;
-       }
-
-       ust_event->attr.loglevel_type = ust_loglevel_type;
-       ust_event->attr.loglevel = agent_event->loglevel_value;
-       ust_event->filter_expression = agent_event->filter_expression;
-       ust_event->exclusion = agent_event->exclusion;
-
-       ret = LTTNG_OK;
-end:
-       return ret;
-}
-
-/* Return LTTNG_OK on success else a LTTNG_ERR* code. */
-static
-int save_agent_events(struct config_writer *writer,
-               struct agent *agent)
-{
-       int ret;
-       struct lttng_ht_iter iter;
-       struct lttng_ht_node_str *node;
-
-       ret = config_writer_open_element(writer, config_element_events);
-       if (ret) {
-               ret = LTTNG_ERR_SAVE_IO_FAIL;
-               goto end;
-       }
-
-       rcu_read_lock();
-       cds_lfht_for_each_entry(agent->events->ht, &iter.iter, node, node) {
-               struct agent_event *agent_event;
-               struct ltt_ust_event fake_event;
-
-               memset(&fake_event, 0, sizeof(fake_event));
-               agent_event = caa_container_of(node, struct agent_event, node);
-
-               /*
-                * Initialize a fake ust event to reuse the same serialization
-                * function since UST and agent events contain the same info
-                * (and one could wonder why they don't reuse the same
-                * structures...).
-                */
-               ret = init_ust_event_from_agent_event(&fake_event, agent_event);
-               if (ret != LTTNG_OK) {
-                       rcu_read_unlock();
-                       goto end;
-               }
-               ret = save_ust_event(writer, &fake_event);
-               if (ret != LTTNG_OK) {
-                       rcu_read_unlock();
-                       goto end;
-               }
-       }
-       rcu_read_unlock();
-
-       /* /events */
-       ret = config_writer_close_element(writer);
-       if (ret) {
-               ret = LTTNG_ERR_SAVE_IO_FAIL;
-               goto end;
-       }
-
-       ret = LTTNG_OK;
-end:
-       return ret;
-}
-
-/* Return LTTNG_OK on success else a LTTNG_ERR* code. */
-static
-int save_kernel_context(struct config_writer *writer,
-       struct lttng_kernel_abi_context *ctx)
-{
-       int ret = LTTNG_OK;
-
-       if (!ctx) {
-               goto end;
-       }
-
-       ret = config_writer_open_element(writer, config_element_context);
-       if (ret) {
-               ret = LTTNG_ERR_SAVE_IO_FAIL;
-               goto end;
-       }
-
-       if (ctx->ctx == LTTNG_KERNEL_ABI_CONTEXT_PERF_CPU_COUNTER) {
-               ret = config_writer_open_element(writer,
-                               config_element_context_perf);
-               if (ret) {
-                       ret = LTTNG_ERR_SAVE_IO_FAIL;
-                       goto end;
-               }
-
-               ret = config_writer_write_element_unsigned_int(writer,
-                       config_element_type, ctx->u.perf_counter.type);
-               if (ret) {
-                       ret = LTTNG_ERR_SAVE_IO_FAIL;
-                       goto end;
-               }
-
-               ret = config_writer_write_element_unsigned_int(writer,
-                       config_element_config, ctx->u.perf_counter.config);
-               if (ret) {
-                       ret = LTTNG_ERR_SAVE_IO_FAIL;
-                       goto end;
-               }
-
-               ret = config_writer_write_element_string(writer,
-                       config_element_name, ctx->u.perf_counter.name);
-               if (ret) {
-                       ret = LTTNG_ERR_SAVE_IO_FAIL;
-                       goto end;
-               }
-
-               /* /perf */
-               ret = config_writer_close_element(writer);
-               if (ret) {
-                       ret = LTTNG_ERR_SAVE_IO_FAIL;
-                       goto end;
-               }
-       } else {
-               const char *context_type_string =
-                       get_kernel_context_type_string(ctx->ctx);
-
-               if (!context_type_string) {
-                       ERR("Unsupported kernel context type.");
-                       ret = LTTNG_ERR_INVALID;
-                       goto end;
-               }
-
-               ret = config_writer_write_element_string(writer,
-                       config_element_type, context_type_string);
-               if (ret) {
-                       ret = LTTNG_ERR_SAVE_IO_FAIL;
-                       goto end;
-               }
-       }
-
-       /* /context */
-       ret = config_writer_close_element(writer);
-       if (ret) {
-               ret = LTTNG_ERR_SAVE_IO_FAIL;
-               goto end;
-       }
-
-       ret = LTTNG_OK;
-end:
-       return ret;
-}
-
-/* Return LTTNG_OK on success else a LTTNG_ERR* code. */
-static
-int save_kernel_contexts(struct config_writer *writer,
-               struct ltt_kernel_channel *kchan)
-{
-       int ret;
-       struct ltt_kernel_context *ctx;
-
-       if (cds_list_empty(&kchan->ctx_list)) {
-               ret = LTTNG_OK;
-               goto end;
-       }
-
-       ret = config_writer_open_element(writer, config_element_contexts);
-       if (ret) {
-               ret = LTTNG_ERR_SAVE_IO_FAIL;
-               goto end;
-       }
-
-       cds_list_for_each_entry(ctx, &kchan->ctx_list, list) {
-               ret = save_kernel_context(writer, &ctx->ctx);
-               if (ret != LTTNG_OK) {
-                       goto end;
-               }
-       }
-
-       /* /contexts */
-       ret = config_writer_close_element(writer);
-       if (ret) {
-               ret = LTTNG_ERR_SAVE_IO_FAIL;
-               goto end;
-       }
-
-       ret = LTTNG_OK;
-end:
-       return ret;
-}
-
-/* Return LTTNG_OK on success else a LTTNG_ERR* code. */
-static
-int save_ust_context_perf_thread_counter(struct config_writer *writer,
-               struct ltt_ust_context *ctx)
-{
-       int ret;
-
-       LTTNG_ASSERT(writer);
-       LTTNG_ASSERT(ctx);
-
-       /* Perf contexts are saved as event_perf_context_type */
-       ret = config_writer_open_element(writer, config_element_context_perf);
-       if (ret) {
-               ret = LTTNG_ERR_SAVE_IO_FAIL;
-               goto end;
-       }
-
-       ret = config_writer_write_element_unsigned_int(writer,
-                       config_element_type, ctx->ctx.u.perf_counter.type);
-       if (ret) {
-               ret = LTTNG_ERR_SAVE_IO_FAIL;
-               goto end;
-       }
-
-       ret = config_writer_write_element_unsigned_int(writer,
-                       config_element_config, ctx->ctx.u.perf_counter.config);
-       if (ret) {
-               ret = LTTNG_ERR_SAVE_IO_FAIL;
-               goto end;
-       }
-
-       ret = config_writer_write_element_string(writer, config_element_name,
-                       ctx->ctx.u.perf_counter.name);
-       if (ret) {
-               ret = LTTNG_ERR_SAVE_IO_FAIL;
-               goto end;
-       }
-
-       /* /perf */
-       ret = config_writer_close_element(writer);
-       if (ret) {
-               ret = LTTNG_ERR_SAVE_IO_FAIL;
-               goto end;
-       }
-
-       ret = LTTNG_OK;
-end:
-       return ret;
-}
-
-/* Return LTTNG_OK on success else a LTTNG_ERR* code. */
-static
-int save_ust_context_app_ctx(struct config_writer *writer,
-               struct ltt_ust_context *ctx)
-{
-       int ret;
-
-       LTTNG_ASSERT(writer);
-       LTTNG_ASSERT(ctx);
-
-       /* Application contexts are saved as application_context_type */
-       ret = config_writer_open_element(writer, config_element_context_app);
-       if (ret) {
-               ret = LTTNG_ERR_SAVE_IO_FAIL;
-               goto end;
-       }
-
-       ret = config_writer_write_element_string(writer,
-                       config_element_context_app_provider_name,
-                       ctx->ctx.u.app_ctx.provider_name);
-       if (ret) {
-               ret = LTTNG_ERR_SAVE_IO_FAIL;
-               goto end;
-       }
-
-       ret = config_writer_write_element_string(writer,
-                       config_element_context_app_ctx_name,
-                       ctx->ctx.u.app_ctx.ctx_name);
-       if (ret) {
-               ret = LTTNG_ERR_SAVE_IO_FAIL;
-               goto end;
-       }
-
-       /* /app */
-       ret = config_writer_close_element(writer);
-       if (ret) {
-               ret = LTTNG_ERR_SAVE_IO_FAIL;
-               goto end;
-       }
-
-       ret = LTTNG_OK;
-end:
-       return ret;
-}
-
-/* Return LTTNG_OK on success else a LTTNG_ERR* code. */
-static
-int save_ust_context_generic(struct config_writer *writer,
-               struct ltt_ust_context *ctx)
-{
-       int ret;
-       const char *context_type_string;
-
-       LTTNG_ASSERT(writer);
-       LTTNG_ASSERT(ctx);
-
-       /* Save context as event_context_type_type */
-       context_type_string = get_ust_context_type_string(
-                       ctx->ctx.ctx);
-       if (!context_type_string) {
-               ERR("Unsupported UST context type.");
-               ret = LTTNG_ERR_SAVE_IO_FAIL;
-               goto end;
-       }
-
-       ret = config_writer_write_element_string(writer,
-                       config_element_type, context_type_string);
-       if (ret) {
-               ret = LTTNG_ERR_SAVE_IO_FAIL;
-               goto end;
-       }
-
-       ret = LTTNG_OK;
-end:
-       return ret;
-}
-
-/* Return LTTNG_OK on success else a LTTNG_ERR* code. */
-static
-int save_ust_context(struct config_writer *writer,
-       struct cds_list_head *ctx_list)
-{
-       int ret;
-       struct ltt_ust_context *ctx;
-
-       LTTNG_ASSERT(writer);
-       LTTNG_ASSERT(ctx_list);
-
-       ret = config_writer_open_element(writer, config_element_contexts);
-       if (ret) {
-               ret = LTTNG_ERR_SAVE_IO_FAIL;
-               goto end;
-       }
-
-       cds_list_for_each_entry(ctx, ctx_list, list) {
-               ret = config_writer_open_element(writer,
-                       config_element_context);
-               if (ret) {
-                       ret = LTTNG_ERR_SAVE_IO_FAIL;
-                       goto end;
-               }
-
-               switch (ctx->ctx.ctx) {
-               case LTTNG_UST_ABI_CONTEXT_PERF_THREAD_COUNTER:
-                       ret = save_ust_context_perf_thread_counter(writer, ctx);
-                       break;
-               case LTTNG_UST_ABI_CONTEXT_APP_CONTEXT:
-                       ret = save_ust_context_app_ctx(writer, ctx);
-                       break;
-               default:
-                       /* Save generic context. */
-                       ret = save_ust_context_generic(writer, ctx);
-               }
-               if (ret != LTTNG_OK) {
-                       goto end;
-               }
-
-               /* /context */
-               ret = config_writer_close_element(writer);
-               if (ret) {
-                       ret = LTTNG_ERR_SAVE_IO_FAIL;
-                       goto end;
-               }
-       }
-
-       /* /contexts */
-       ret = config_writer_close_element(writer);
-       if (ret) {
-               ret = LTTNG_ERR_SAVE_IO_FAIL;
-               goto end;
-       }
-
-       ret = LTTNG_OK;
-end:
-       return ret;
-}
-
-/* Return LTTNG_OK on success else a LTTNG_ERR* code. */
-static
-int save_kernel_channel(struct config_writer *writer,
-       struct ltt_kernel_channel *kchan)
-{
-       int ret;
-
-       LTTNG_ASSERT(writer);
-       LTTNG_ASSERT(kchan);
-
-       ret = config_writer_open_element(writer, config_element_channel);
-       if (ret) {
-               ret = LTTNG_ERR_SAVE_IO_FAIL;
-               goto end;
-       }
-
-       ret = config_writer_write_element_string(writer, config_element_name,
-               kchan->channel->name);
-       if (ret) {
-               ret = LTTNG_ERR_SAVE_IO_FAIL;
-               goto end;
-       }
-
-       ret = config_writer_write_element_bool(writer, config_element_enabled,
-               kchan->channel->enabled);
-       if (ret) {
-               ret = LTTNG_ERR_SAVE_IO_FAIL;
-               goto end;
-       }
-
-       ret = save_kernel_channel_attributes(writer, &kchan->channel->attr);
-       if (ret != LTTNG_OK) {
-               goto end;
-       }
-
-       ret = save_kernel_events(writer, kchan);
-       if (ret != LTTNG_OK) {
-               goto end;
-       }
-
-       ret = save_kernel_contexts(writer, kchan);
-       if (ret != LTTNG_OK) {
-               goto end;
-       }
-
-       /* /channel */
-       ret = config_writer_close_element(writer);
-       if (ret) {
-               ret = LTTNG_ERR_SAVE_IO_FAIL;
-               goto end;
-       }
-
-       ret = LTTNG_OK;
-end:
-       return ret;
-}
-
-/* Return LTTNG_OK on success else a LTTNG_ERR* code. */
-static
-int save_ust_channel(struct config_writer *writer,
-       struct ltt_ust_channel *ust_chan,
-       struct ltt_ust_session *session)
-{
-       int ret;
-
-       LTTNG_ASSERT(writer);
-       LTTNG_ASSERT(ust_chan);
-       LTTNG_ASSERT(session);
-
-       ret = config_writer_open_element(writer, config_element_channel);
-       if (ret) {
-               ret = LTTNG_ERR_SAVE_IO_FAIL;
-               goto end;
-       }
-
-       ret = config_writer_write_element_string(writer, config_element_name,
-               ust_chan->name);
-       if (ret) {
-               ret = LTTNG_ERR_SAVE_IO_FAIL;
-               goto end;
-       }
-
-       ret = config_writer_write_element_bool(writer, config_element_enabled,
-               ust_chan->enabled);
-       if (ret) {
-               ret = LTTNG_ERR_SAVE_IO_FAIL;
-               goto end;
-       }
-
-       ret = save_ust_channel_attributes(writer, &ust_chan->attr);
-       if (ret != LTTNG_OK) {
-               goto end;
-       }
-
-       ret = config_writer_write_element_unsigned_int(writer,
-               config_element_tracefile_size, ust_chan->tracefile_size);
-       if (ret) {
-               ret = LTTNG_ERR_SAVE_IO_FAIL;
-               goto end;
-       }
-
-       ret = config_writer_write_element_unsigned_int(writer,
-               config_element_tracefile_count, ust_chan->tracefile_count);
-       if (ret) {
-               ret = LTTNG_ERR_SAVE_IO_FAIL;
-               goto end;
-       }
-
-       ret = config_writer_write_element_unsigned_int(writer,
-               config_element_live_timer_interval,
-               session->live_timer_interval);
-       if (ret) {
-               ret = LTTNG_ERR_SAVE_IO_FAIL;
-               goto end;
-       }
-
-       if (ust_chan->domain == LTTNG_DOMAIN_UST) {
-               ret = save_ust_events(writer, ust_chan->events);
-               if (ret != LTTNG_OK) {
-                       goto end;
-               }
-       } else {
-               struct agent *agent = NULL;
-
-               agent = trace_ust_find_agent(session, ust_chan->domain);
-               if (!agent) {
-                       ret = LTTNG_ERR_SAVE_IO_FAIL;
-                       ERR("Could not find agent associated to UST subdomain");
-                       goto end;
-               }
-
-               /*
-                * Channels associated with a UST sub-domain (such as JUL, Log4j
-                * or Python) don't have any non-internal events. We retrieve
-                * the "agent" events associated with this channel and serialize
-                * them.
-                */
-               ret = save_agent_events(writer, agent);
-               if (ret != LTTNG_OK) {
-                       goto end;
-               }
-       }
-
-       ret = save_ust_context(writer, &ust_chan->ctx_list);
-       if (ret != LTTNG_OK) {
-               goto end;
-       }
-
-       /* /channel */
-       ret = config_writer_close_element(writer);
-       if (ret) {
-               ret = LTTNG_ERR_SAVE_IO_FAIL;
-               goto end;
-       }
-
-       ret = LTTNG_OK;
-end:
-       return ret;
-}
-
-/* Return LTTNG_OK on success else a LTTNG_ERR* code. */
-static
-int save_kernel_session(struct config_writer *writer,
-       struct ltt_session *session)
-{
-       int ret;
-       struct ltt_kernel_channel *kchan;
-
-       LTTNG_ASSERT(writer);
-       LTTNG_ASSERT(session);
-
-       ret = config_writer_write_element_string(writer, config_element_type,
-               config_domain_type_kernel);
-       if (ret) {
-               ret = LTTNG_ERR_SAVE_IO_FAIL;
-               goto end;
-       }
-
-       ret = config_writer_write_element_string(writer,
-               config_element_buffer_type, config_buffer_type_global);
-       if (ret) {
-               ret = LTTNG_ERR_SAVE_IO_FAIL;
-               goto end;
-       }
-
-       ret = config_writer_open_element(writer,
-               config_element_channels);
-       if (ret) {
-               ret = LTTNG_ERR_SAVE_IO_FAIL;
-               goto end;
-       }
-
-       cds_list_for_each_entry(kchan, &session->kernel_session->channel_list.head,
-                       list) {
-               ret = save_kernel_channel(writer, kchan);
-               if (ret != LTTNG_OK) {
-                       goto end;
-               }
-       }
-
-       /* /channels */
-       ret = config_writer_close_element(writer);
-       if (ret) {
-               ret = LTTNG_ERR_SAVE_IO_FAIL;
-               goto end;
-       }
-
-       ret = LTTNG_OK;
-end:
-       return ret;
-}
-
-static
-const char *get_config_domain_str(enum lttng_domain_type domain)
-{
-       const char *str_dom;
-
-       switch (domain) {
-       case LTTNG_DOMAIN_KERNEL:
-               str_dom = config_domain_type_kernel;
-               break;
-       case LTTNG_DOMAIN_UST:
-               str_dom = config_domain_type_ust;
-               break;
-       case LTTNG_DOMAIN_JUL:
-               str_dom = config_domain_type_jul;
-               break;
-       case LTTNG_DOMAIN_LOG4J:
-               str_dom = config_domain_type_log4j;
-               break;
-       case LTTNG_DOMAIN_PYTHON:
-               str_dom = config_domain_type_python;
-               break;
-       default:
-               abort();
-       }
-
-       return str_dom;
-}
-
-/* Return LTTNG_OK on success else a LTTNG_ERR* code. */
-static int save_process_attr_tracker(struct config_writer *writer,
-               struct ltt_session *sess,
-               int domain,
-               enum lttng_process_attr process_attr)
-{
-       int ret = LTTNG_OK;
-       const char *element_id_tracker, *element_target_id, *element_id;
-       const struct process_attr_tracker *tracker;
-       enum lttng_tracking_policy tracking_policy;
-       struct lttng_process_attr_values *values = NULL;
-
-       switch (process_attr) {
-       case LTTNG_PROCESS_ATTR_PROCESS_ID:
-               element_id_tracker = config_element_process_attr_tracker_pid;
-               element_target_id = config_element_process_attr_pid_value;
-               element_id = config_element_process_attr_id;
-               break;
-       case LTTNG_PROCESS_ATTR_VIRTUAL_PROCESS_ID:
-               element_id_tracker = config_element_process_attr_tracker_vpid;
-               element_target_id = config_element_process_attr_vpid_value;
-               element_id = config_element_process_attr_id;
-               break;
-       case LTTNG_PROCESS_ATTR_USER_ID:
-               element_id_tracker = config_element_process_attr_tracker_uid;
-               element_target_id = config_element_process_attr_uid_value;
-               element_id = config_element_process_attr_id;
-               break;
-       case LTTNG_PROCESS_ATTR_VIRTUAL_USER_ID:
-               element_id_tracker = config_element_process_attr_tracker_vuid;
-               element_target_id = config_element_process_attr_vuid_value;
-               element_id = config_element_process_attr_id;
-               break;
-       case LTTNG_PROCESS_ATTR_GROUP_ID:
-               element_id_tracker = config_element_process_attr_tracker_gid;
-               element_target_id = config_element_process_attr_gid_value;
-               element_id = config_element_process_attr_id;
-               break;
-       case LTTNG_PROCESS_ATTR_VIRTUAL_GROUP_ID:
-               element_id_tracker = config_element_process_attr_tracker_vgid;
-               element_target_id = config_element_process_attr_vgid_value;
-               element_id = config_element_process_attr_id;
-               break;
-       default:
-               ret = LTTNG_ERR_SAVE_IO_FAIL;
-               goto end;
-       }
-
-       switch (domain) {
-       case LTTNG_DOMAIN_KERNEL:
-       {
-               tracker = kernel_get_process_attr_tracker(
-                               sess->kernel_session, process_attr);
-               LTTNG_ASSERT(tracker);
-               break;
-       }
-       case LTTNG_DOMAIN_UST:
-       {
-               tracker = trace_ust_get_process_attr_tracker(
-                               sess->ust_session, process_attr);
-               LTTNG_ASSERT(tracker);
-               break;
-       }
-       case LTTNG_DOMAIN_JUL:
-       case LTTNG_DOMAIN_LOG4J:
-       case LTTNG_DOMAIN_PYTHON:
-       default:
-               ret = LTTNG_ERR_UNSUPPORTED_DOMAIN;
-               goto end;
-       }
-
-       tracking_policy = process_attr_tracker_get_tracking_policy(tracker);
-       if (tracking_policy == LTTNG_TRACKING_POLICY_INCLUDE_ALL) {
-               /* Tracking all, nothing to output. */
-               ret = LTTNG_OK;
-               goto end;
-       }
-
-       ret = config_writer_open_element(writer, element_id_tracker);
-       if (ret) {
-               ret = LTTNG_ERR_SAVE_IO_FAIL;
-               goto end;
-       }
-
-       ret = config_writer_open_element(
-                       writer, config_element_process_attr_values);
-       if (ret) {
-               ret = LTTNG_ERR_SAVE_IO_FAIL;
-               goto end;
-       }
-
-       if (tracking_policy == LTTNG_TRACKING_POLICY_INCLUDE_SET) {
-               unsigned int i, count;
-               enum process_attr_tracker_status status =
-                               process_attr_tracker_get_inclusion_set(
-                                               tracker, &values);
-
-               if (status != PROCESS_ATTR_TRACKER_STATUS_OK) {
-                       ret = LTTNG_ERR_NOMEM;
-                       goto end;
-               }
-
-               count = _lttng_process_attr_values_get_count(values);
-
-               for (i = 0; i < count; i++) {
-                       unsigned int integral_value = UINT_MAX;
-                       const char *name = NULL;
-                       const struct process_attr_value *value =
-                                       lttng_process_attr_tracker_values_get_at_index(
-                                                       values, i);
-
-                       LTTNG_ASSERT(value);
-                       ret = config_writer_open_element(
-                                       writer, element_target_id);
-                       if (ret) {
-                               ret = LTTNG_ERR_SAVE_IO_FAIL;
-                               goto end;
-                       }
-
-                       switch (value->type) {
-                       case LTTNG_PROCESS_ATTR_VALUE_TYPE_PID:
-                               integral_value =
-                                               (unsigned int) value->value.pid;
-                               break;
-                       case LTTNG_PROCESS_ATTR_VALUE_TYPE_UID:
-                               integral_value =
-                                               (unsigned int) value->value.uid;
-                               break;
-                       case LTTNG_PROCESS_ATTR_VALUE_TYPE_GID:
-                               integral_value =
-                                               (unsigned int) value->value.gid;
-                               break;
-                       case LTTNG_PROCESS_ATTR_VALUE_TYPE_USER_NAME:
-                               name = value->value.user_name;
-                               LTTNG_ASSERT(name);
-                               break;
-                       case LTTNG_PROCESS_ATTR_VALUE_TYPE_GROUP_NAME:
-                               name = value->value.group_name;
-                               LTTNG_ASSERT(name);
-                               break;
-                       default:
-                               abort();
-                       }
-
-                       if (name) {
-                               ret = config_writer_write_element_string(writer,
-                                               config_element_name, name);
-                       } else {
-                               ret = config_writer_write_element_unsigned_int(
-                                               writer, element_id,
-                                               integral_value);
-                       }
-
-                       if (ret) {
-                               ret = LTTNG_ERR_SAVE_IO_FAIL;
-                               goto end;
-                       }
-
-                       /* /$element_target_id */
-                       ret = config_writer_close_element(writer);
-                       if (ret) {
-                               ret = LTTNG_ERR_SAVE_IO_FAIL;
-                               goto end;
-                       }
-               }
-       }
-
-       /* /values */
-       ret = config_writer_close_element(writer);
-       if (ret) {
-               ret = LTTNG_ERR_SAVE_IO_FAIL;
-               goto end;
-       }
-
-       /* /$element_id_tracker */
-       ret = config_writer_close_element(writer);
-       if (ret) {
-               ret = LTTNG_ERR_SAVE_IO_FAIL;
-               goto end;
-       }
-
-       ret = LTTNG_OK;
-end:
-       lttng_process_attr_values_destroy(values);
-       return ret;
-}
-
-/* Return LTTNG_OK on success else a LTTNG_ERR* code. */
-static int save_process_attr_trackers(struct config_writer *writer,
-               struct ltt_session *sess,
-               int domain)
-{
-       int ret;
-
-       switch (domain) {
-       case LTTNG_DOMAIN_KERNEL:
-               ret = save_process_attr_tracker(writer, sess, domain,
-                               LTTNG_PROCESS_ATTR_PROCESS_ID);
-               if (ret != LTTNG_OK) {
-                       goto end;
-               }
-               ret = save_process_attr_tracker(writer, sess, domain,
-                               LTTNG_PROCESS_ATTR_VIRTUAL_PROCESS_ID);
-               if (ret != LTTNG_OK) {
-                       goto end;
-               }
-               ret = save_process_attr_tracker(writer, sess, domain,
-                               LTTNG_PROCESS_ATTR_USER_ID);
-               if (ret != LTTNG_OK) {
-                       goto end;
-               }
-               ret = save_process_attr_tracker(writer, sess, domain,
-                               LTTNG_PROCESS_ATTR_VIRTUAL_USER_ID);
-               if (ret != LTTNG_OK) {
-                       goto end;
-               }
-               ret = save_process_attr_tracker(writer, sess, domain,
-                               LTTNG_PROCESS_ATTR_GROUP_ID);
-               if (ret != LTTNG_OK) {
-                       goto end;
-               }
-               ret = save_process_attr_tracker(writer, sess, domain,
-                               LTTNG_PROCESS_ATTR_VIRTUAL_GROUP_ID);
-               if (ret != LTTNG_OK) {
-                       goto end;
-               }
-               break;
-       case LTTNG_DOMAIN_UST:
-               ret = save_process_attr_tracker(writer, sess, domain,
-                               LTTNG_PROCESS_ATTR_VIRTUAL_PROCESS_ID);
-               if (ret != LTTNG_OK) {
-                       goto end;
-               }
-               ret = save_process_attr_tracker(writer, sess, domain,
-                               LTTNG_PROCESS_ATTR_VIRTUAL_USER_ID);
-               if (ret != LTTNG_OK) {
-                       goto end;
-               }
-               ret = save_process_attr_tracker(writer, sess, domain,
-                               LTTNG_PROCESS_ATTR_VIRTUAL_GROUP_ID);
-               if (ret != LTTNG_OK) {
-                       goto end;
-               }
-               break;
-       default:
-               ret = LTTNG_ERR_INVALID;
-               goto end;
-       }
-       ret = LTTNG_OK;
-end:
-       return ret;
-}
-
-/* Return LTTNG_OK on success else a LTTNG_ERR* code. */
-static
-int save_ust_domain(struct config_writer *writer,
-       struct ltt_session *session, enum lttng_domain_type domain)
-{
-       int ret;
-       struct ltt_ust_channel *ust_chan;
-       const char *buffer_type_string;
-       struct lttng_ht_node_str *node;
-       struct lttng_ht_iter iter;
-       const char *config_domain_name;
-
-       LTTNG_ASSERT(writer);
-       LTTNG_ASSERT(session);
-
-       ret = config_writer_open_element(writer,
-                       config_element_domain);
-       if (ret) {
-               ret = LTTNG_ERR_SAVE_IO_FAIL;
-               goto end;
-       }
-
-       config_domain_name = get_config_domain_str(domain);
-       if (!config_domain_name) {
-               ret = LTTNG_ERR_INVALID;
-               goto end;
-       }
-
-       ret = config_writer_write_element_string(writer,
-                       config_element_type, config_domain_name);
-       if (ret) {
-               ret = LTTNG_ERR_SAVE_IO_FAIL;
-               goto end;
-       }
-
-       buffer_type_string = get_buffer_type_string(
-                       session->ust_session->buffer_type);
-       if (!buffer_type_string) {
-               ERR("Unsupported buffer type.");
-               ret = LTTNG_ERR_INVALID;
-               goto end;
-       }
-
-       ret = config_writer_write_element_string(writer,
-                       config_element_buffer_type, buffer_type_string);
-       if (ret) {
-               ret = LTTNG_ERR_SAVE_IO_FAIL;
-               goto end;
-       }
-
-       ret = config_writer_open_element(writer, config_element_channels);
-       if (ret) {
-               ret = LTTNG_ERR_SAVE_IO_FAIL;
-               goto end;
-       }
-
-       rcu_read_lock();
-       cds_lfht_for_each_entry(session->ust_session->domain_global.channels->ht,
-                       &iter.iter, node, node) {
-               ust_chan = caa_container_of(node, struct ltt_ust_channel, node);
-               if (domain == ust_chan->domain) {
-                       ret = save_ust_channel(writer, ust_chan, session->ust_session);
-                       if (ret != LTTNG_OK) {
-                               rcu_read_unlock();
-                               goto end;
-                       }
-               }
-       }
-       rcu_read_unlock();
-
-       /* /channels */
-       ret = config_writer_close_element(writer);
-       if (ret) {
-               ret = LTTNG_ERR_SAVE_IO_FAIL;
-               goto end;
-       }
-
-       if (domain == LTTNG_DOMAIN_UST) {
-               ret = config_writer_open_element(
-                               writer, config_element_process_attr_trackers);
-               if (ret) {
-                       ret = LTTNG_ERR_SAVE_IO_FAIL;
-                       goto end;
-               }
-
-               ret = save_process_attr_trackers(
-                               writer, session, LTTNG_DOMAIN_UST);
-               if (ret != LTTNG_OK) {
-                       goto end;
-               }
-
-               /* /trackers */
-               ret = config_writer_close_element(writer);
-               if (ret) {
-                       ret = LTTNG_ERR_SAVE_IO_FAIL;
-                       goto end;
-               }
-       }
-
-       /* /domain */
-       ret = config_writer_close_element(writer);
-       if (ret) {
-               ret = LTTNG_ERR_SAVE_IO_FAIL;
-               goto end;
-       }
-
-       ret = LTTNG_OK;
-end:
-       return ret;
-}
-
-/* Return LTTNG_OK on success else a LTTNG_ERR* code. */
-static
-int save_domains(struct config_writer *writer, struct ltt_session *session)
-{
-       int ret = LTTNG_OK;
-
-       LTTNG_ASSERT(writer);
-       LTTNG_ASSERT(session);
-
-       if (!session->kernel_session && !session->ust_session) {
-               goto end;
-       }
-
-       ret = config_writer_open_element(writer, config_element_domains);
-       if (ret) {
-               ret = LTTNG_ERR_SAVE_IO_FAIL;
-               goto end;
-       }
-
-       if (session->kernel_session) {
-               ret = config_writer_open_element(writer,
-                       config_element_domain);
-               if (ret) {
-                       ret = LTTNG_ERR_SAVE_IO_FAIL;
-                       goto end;
-               }
-
-               ret = save_kernel_session(writer, session);
-               if (ret != LTTNG_OK) {
-                       goto end;
-               }
-
-               ret = config_writer_open_element(
-                               writer, config_element_process_attr_trackers);
-               if (ret) {
-                       ret = LTTNG_ERR_SAVE_IO_FAIL;
-                       goto end;
-               }
-
-               ret = save_process_attr_trackers(
-                               writer, session, LTTNG_DOMAIN_KERNEL);
-               if (ret != LTTNG_OK) {
-                       goto end;
-               }
-
-               /* /trackers */
-               ret = config_writer_close_element(writer);
-               if (ret) {
-                       ret = LTTNG_ERR_SAVE_IO_FAIL;
-                       goto end;
-               }
-               /* /domain */
-               ret = config_writer_close_element(writer);
-               if (ret) {
-                       ret = LTTNG_ERR_SAVE_IO_FAIL;
-                       goto end;
-               }
-       }
-
-       if (session->ust_session) {
-               ret = save_ust_domain(writer, session, LTTNG_DOMAIN_UST);
-               if (ret != LTTNG_OK) {
-                       goto end;
-               }
-
-               ret = save_ust_domain(writer, session, LTTNG_DOMAIN_JUL);
-               if (ret != LTTNG_OK) {
-                       goto end;
-               }
-
-               ret = save_ust_domain(writer, session, LTTNG_DOMAIN_LOG4J);
-               if (ret != LTTNG_OK) {
-                       goto end;
-               }
-
-               ret = save_ust_domain(writer, session, LTTNG_DOMAIN_PYTHON);
-               if (ret != LTTNG_OK) {
-                       goto end;
-               }
-       }
-
-       /* /domains */
-       ret = config_writer_close_element(writer);
-       if (ret) {
-               ret = LTTNG_ERR_SAVE_IO_FAIL;
-               goto end;
-       }
-
-       ret = LTTNG_OK;
-end:
-       return ret;
-}
-
-/* Return LTTNG_OK on success else a LTTNG_ERR* code. */
-static
-int save_consumer_output(struct config_writer *writer,
-       struct consumer_output *output)
-{
-       int ret;
-
-       LTTNG_ASSERT(writer);
-       LTTNG_ASSERT(output);
-
-       ret = config_writer_open_element(writer, config_element_consumer_output);
-       if (ret) {
-               ret = LTTNG_ERR_SAVE_IO_FAIL;
-               goto end;
-       }
-
-       ret = config_writer_write_element_bool(writer, config_element_enabled,
-                       output->enabled);
-       if (ret) {
-               ret = LTTNG_ERR_SAVE_IO_FAIL;
-               goto end;
-       }
-
-       ret = config_writer_open_element(writer, config_element_destination);
-       if (ret) {
-               ret = LTTNG_ERR_SAVE_IO_FAIL;
-               goto end;
-       }
-
-       switch (output->type) {
-       case CONSUMER_DST_LOCAL:
-               ret = config_writer_write_element_string(writer,
-                       config_element_path, output->dst.session_root_path);
-               if (ret) {
-                       ret = LTTNG_ERR_SAVE_IO_FAIL;
-                       goto end;
-               }
-               break;
-       case CONSUMER_DST_NET:
-       {
-               char *uri;
-
-               uri = zmalloc(PATH_MAX);
-               if (!uri) {
-                       ret = LTTNG_ERR_NOMEM;
-                       goto end;
-               }
-
-               ret = config_writer_open_element(writer, config_element_net_output);
-               if (ret) {
-                       ret = LTTNG_ERR_SAVE_IO_FAIL;
-                       goto end_net_output;
-               }
-
-               if (output->dst.net.control_isset &&
-                       output->dst.net.data_isset) {
-                       ret = uri_to_str_url(&output->dst.net.control, uri, PATH_MAX);
-                       if (ret < 0) {
-                               ret = LTTNG_ERR_INVALID;
-                               goto end_net_output;
-                       }
-
-                       ret = config_writer_write_element_string(writer,
-                                       config_element_control_uri, uri);
-                       if (ret) {
-                               ret = LTTNG_ERR_SAVE_IO_FAIL;
-                               goto end_net_output;
-                       }
-
-                       ret = uri_to_str_url(&output->dst.net.data, uri, PATH_MAX);
-                       if (ret < 0) {
-                               ret = LTTNG_ERR_INVALID;
-                               goto end_net_output;
-                       }
-
-                       ret = config_writer_write_element_string(writer,
-                                       config_element_data_uri, uri);
-                       if (ret) {
-                               ret = LTTNG_ERR_SAVE_IO_FAIL;
-                               goto end_net_output;
-                       }
-                       ret = LTTNG_OK;
-end_net_output:
-                       free(uri);
-                       if (ret != LTTNG_OK) {
-                               goto end;
-                       }
-               } else {
-                       ret = !output->dst.net.control_isset ?
-                               LTTNG_ERR_URL_CTRL_MISS :
-                               LTTNG_ERR_URL_DATA_MISS;
-                       free(uri);
-                       goto end;
-               }
-
-               ret = config_writer_close_element(writer);
-               if (ret) {
-                       ret = LTTNG_ERR_SAVE_IO_FAIL;
-                       goto end;
-               }
-               break;
-       }
-       default:
-               ERR("Unsupported consumer output type.");
-               ret = LTTNG_ERR_INVALID;
-               goto end;
-       }
-
-       /* /destination */
-       ret = config_writer_close_element(writer);
-       if (ret) {
-               ret = LTTNG_ERR_SAVE_IO_FAIL;
-               goto end;
-       }
-
-       /* /consumer_output */
-       ret = config_writer_close_element(writer);
-       if (ret) {
-               ret = LTTNG_ERR_SAVE_IO_FAIL;
-               goto end;
-       }
-
-       ret = LTTNG_OK;
-end:
-       return ret;
-}
-
-/* Return LTTNG_OK on success else a LTTNG_ERR* code. */
-static
-int save_snapshot_outputs(struct config_writer *writer,
-       struct snapshot *snapshot)
-{
-       int ret;
-       struct lttng_ht_iter iter;
-       struct snapshot_output *output;
-
-       LTTNG_ASSERT(writer);
-       LTTNG_ASSERT(snapshot);
-
-       ret = config_writer_open_element(writer, config_element_snapshot_outputs);
-       if (ret) {
-               ret = LTTNG_ERR_SAVE_IO_FAIL;
-               goto end;
-       }
-
-       rcu_read_lock();
-       cds_lfht_for_each_entry(snapshot->output_ht->ht, &iter.iter, output,
-                       node.node) {
-               ret = config_writer_open_element(writer,
-                       config_element_output);
-               if (ret) {
-                       ret = LTTNG_ERR_SAVE_IO_FAIL;
-                       goto end_unlock;
-               }
-
-               ret = config_writer_write_element_string(writer,
-                       config_element_name, output->name);
-               if (ret) {
-                       ret = LTTNG_ERR_SAVE_IO_FAIL;
-                       goto end_unlock;
-               }
-
-               ret = config_writer_write_element_unsigned_int(writer,
-                       config_element_max_size, output->max_size);
-               if (ret) {
-                       ret = LTTNG_ERR_SAVE_IO_FAIL;
-                       goto end_unlock;
-               }
-
-               ret = save_consumer_output(writer, output->consumer);
-               if (ret != LTTNG_OK) {
-                       goto end_unlock;
-               }
-
-               /* /output */
-               ret = config_writer_close_element(writer);
-               if (ret) {
-                       ret = LTTNG_ERR_SAVE_IO_FAIL;
-                       goto end_unlock;
-               }
-       }
-       rcu_read_unlock();
-
-       /* /snapshot_outputs */
-       ret = config_writer_close_element(writer);
-       if (ret) {
-               ret = LTTNG_ERR_SAVE_IO_FAIL;
-               goto end;
-       }
-
-       ret = LTTNG_OK;
-end:
-       return ret;
-end_unlock:
-       rcu_read_unlock();
-       return ret;
-}
-
-/* Return LTTNG_OK on success else a LTTNG_ERR* code. */
-static
-int save_session_output(struct config_writer *writer,
-       struct ltt_session *session)
-{
-       int ret;
-
-       LTTNG_ASSERT(writer);
-       LTTNG_ASSERT(session);
-
-       if ((session->snapshot_mode && session->snapshot.nb_output == 0) ||
-               (!session->snapshot_mode && !session->consumer)) {
-               /* Session is in no output mode */
-               ret = LTTNG_OK;
-               goto end;
-       }
-
-       ret = config_writer_open_element(writer, config_element_output);
-       if (ret) {
-               ret = LTTNG_ERR_SAVE_IO_FAIL;
-               goto end;
-       }
-
-       if (session->snapshot_mode) {
-               ret = save_snapshot_outputs(writer, &session->snapshot);
-               if (ret != LTTNG_OK) {
-                       goto end;
-               }
-       } else {
-               if (session->consumer) {
-                       ret = save_consumer_output(writer, session->consumer);
-                       if (ret != LTTNG_OK) {
-                               goto end;
-                       }
-               }
-       }
-
-       /* /output */
-       ret = config_writer_close_element(writer);
-       if (ret) {
-               ret = LTTNG_ERR_SAVE_IO_FAIL;
-               goto end;
-       }
-       ret = LTTNG_OK;
-end:
-       return ret;
-}
-
-static
-int save_session_rotation_schedule(struct config_writer *writer,
-               enum lttng_rotation_schedule_type type, uint64_t value)
-{
-       int ret = 0;
-       const char *element_name;
-       const char *value_name;
-
-       switch (type) {
-       case LTTNG_ROTATION_SCHEDULE_TYPE_PERIODIC:
-               element_name = config_element_rotation_schedule_periodic;
-               value_name = config_element_rotation_schedule_periodic_time_us;
-               break;
-       case LTTNG_ROTATION_SCHEDULE_TYPE_SIZE_THRESHOLD:
-               element_name = config_element_rotation_schedule_size_threshold;
-               value_name = config_element_rotation_schedule_size_threshold_bytes;
-               break;
-       default:
-               ret = -1;
-               goto end;
-       }
-
-       ret = config_writer_open_element(writer, element_name);
-       if (ret) {
-               goto end;
-       }
-
-       ret = config_writer_write_element_unsigned_int(writer,
-                       value_name, value);
-       if (ret) {
-               goto end;
-       }
-
-       /* Close schedule descriptor element. */
-       ret = config_writer_close_element(writer);
-       if (ret) {
-               goto end;
-       }
-end:
-       return ret;
-}
-
-static
-int save_session_rotation_schedules(struct config_writer *writer,
-       struct ltt_session *session)
-{
-       int ret;
-
-       ret = config_writer_open_element(writer,
-                       config_element_rotation_schedules);
-       if (ret) {
-               goto end;
-       }
-       if (session->rotate_timer_period) {
-               ret = save_session_rotation_schedule(writer,
-                               LTTNG_ROTATION_SCHEDULE_TYPE_PERIODIC,
-                               session->rotate_timer_period);
-               if (ret) {
-                       goto close_schedules;
-               }
-       }
-       if (session->rotate_size) {
-               ret = save_session_rotation_schedule(writer,
-                               LTTNG_ROTATION_SCHEDULE_TYPE_SIZE_THRESHOLD,
-                               session->rotate_size);
-               if (ret) {
-                       goto close_schedules;
-               }
-       }
-
-close_schedules:
-       /* Close rotation schedules element. */
-       ret = config_writer_close_element(writer);
-       if (ret) {
-               goto end;
-       }
-end:
-       return ret;
-}
-
-/*
- * Save the given session.
- *
- * Return LTTNG_OK on success else a LTTNG_ERR* code.
- */
-static
-int save_session(struct ltt_session *session,
-       struct lttng_save_session_attr *attr, lttng_sock_cred *creds)
-{
-       int ret, fd = -1;
-       char config_file_path[LTTNG_PATH_MAX];
-       size_t len;
-       struct config_writer *writer = NULL;
-       size_t session_name_len;
-       const char *provided_path;
-       int file_open_flags = O_CREAT | O_WRONLY | O_TRUNC;
-
-       LTTNG_ASSERT(session);
-       LTTNG_ASSERT(attr);
-       LTTNG_ASSERT(creds);
-
-       session_name_len = strlen(session->name);
-       memset(config_file_path, 0, sizeof(config_file_path));
-
-       if (!session_access_ok(session,
-               LTTNG_SOCK_GET_UID_CRED(creds)) || session->destroyed) {
-               ret = LTTNG_ERR_EPERM;
-               goto end;
-       }
-
-       provided_path = lttng_save_session_attr_get_output_url(attr);
-       if (provided_path) {
-               DBG3("Save session in provided path %s", provided_path);
-               len = strlen(provided_path);
-               if (len >= sizeof(config_file_path)) {
-                       ret = LTTNG_ERR_SET_URL;
-                       goto end;
-               }
-               strncpy(config_file_path, provided_path, sizeof(config_file_path));
-       } else {
-               ssize_t ret_len;
-               char *home_dir = utils_get_user_home_dir(
-                       LTTNG_SOCK_GET_UID_CRED(creds));
-               if (!home_dir) {
-                       ret = LTTNG_ERR_SET_URL;
-                       goto end;
-               }
-
-               ret_len = snprintf(config_file_path, sizeof(config_file_path),
-                               DEFAULT_SESSION_HOME_CONFIGPATH, home_dir);
-               free(home_dir);
-               if (ret_len < 0) {
-                       PERROR("snprintf save session");
-                       ret = LTTNG_ERR_SET_URL;
-                       goto end;
-               }
-               len = ret_len;
-       }
-
-       /*
-        * Check the path fits in the config file path dst including the '/'
-        * followed by trailing .lttng extension and the NULL terminated string.
-        */
-       if ((len + session_name_len + 2 +
-                       sizeof(DEFAULT_SESSION_CONFIG_FILE_EXTENSION))
-                       > sizeof(config_file_path)) {
-               ret = LTTNG_ERR_SET_URL;
-               goto end;
-       }
-
-       ret = run_as_mkdir_recursive(config_file_path, S_IRWXU | S_IRWXG,
-                       LTTNG_SOCK_GET_UID_CRED(creds), LTTNG_SOCK_GET_GID_CRED(creds));
-       if (ret) {
-               ret = LTTNG_ERR_SET_URL;
-               goto end;
-       }
-
-       /*
-        * At this point, we know that everything fits in the buffer. Validation
-        * was done just above.
-        */
-       config_file_path[len++] = '/';
-       strncpy(config_file_path + len, session->name, sizeof(config_file_path) - len);
-       len += session_name_len;
-       strcpy(config_file_path + len, DEFAULT_SESSION_CONFIG_FILE_EXTENSION);
-       len += sizeof(DEFAULT_SESSION_CONFIG_FILE_EXTENSION);
-       config_file_path[len] = '\0';
-
-       if (!attr->overwrite) {
-               file_open_flags |= O_EXCL;
-       }
-
-       fd = run_as_open(config_file_path, file_open_flags,
-               S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP,
-               LTTNG_SOCK_GET_UID_CRED(creds), LTTNG_SOCK_GET_GID_CRED(creds));
-       if (fd < 0) {
-               PERROR("Could not create configuration file");
-               switch (errno) {
-               case EEXIST:
-                       ret = LTTNG_ERR_SAVE_FILE_EXIST;
-                       break;
-               case EACCES:
-                       ret = LTTNG_ERR_EPERM;
-                       break;
-               default:
-                       ret = LTTNG_ERR_SAVE_IO_FAIL;
-                       break;
-               }
-               goto end;
-       }
-
-       writer = config_writer_create(fd, 1);
-       if (!writer) {
-               ret = LTTNG_ERR_NOMEM;
-               goto end;
-       }
-
-       ret = config_writer_open_element(writer, config_element_sessions);
-       if (ret) {
-               ret = LTTNG_ERR_SAVE_IO_FAIL;
-               goto end;
-       }
-
-       ret = config_writer_open_element(writer, config_element_session);
-       if (ret) {
-               ret = LTTNG_ERR_SAVE_IO_FAIL;
-               goto end;
-       }
-
-       ret = config_writer_write_element_string(writer, config_element_name,
-                       session->name);
-       if (ret) {
-               ret = LTTNG_ERR_SAVE_IO_FAIL;
-               goto end;
-       }
-
-       if (session->shm_path[0] != '\0') {
-               ret = config_writer_write_element_string(writer,
-                               config_element_shared_memory_path,
-                               session->shm_path);
-               if (ret) {
-                       ret = LTTNG_ERR_SAVE_IO_FAIL;
-                       goto end;
-               }
-       }
-
-       ret = save_domains(writer, session);
-       if (ret != LTTNG_OK) {
-               goto end;
-       }
-
-       ret = config_writer_write_element_bool(writer, config_element_started,
-                       session->active);
-       if (ret) {
-               ret = LTTNG_ERR_SAVE_IO_FAIL;
-               goto end;
-       }
-
-       if (session->snapshot_mode || session->live_timer ||
-                       session->rotate_timer_period || session->rotate_size) {
-               ret = config_writer_open_element(writer, config_element_attributes);
-               if (ret) {
-                       ret = LTTNG_ERR_SAVE_IO_FAIL;
-                       goto end;
-               }
-
-               if (session->snapshot_mode) {
-                       ret = config_writer_write_element_bool(writer,
-                                       config_element_snapshot_mode, 1);
-                       if (ret) {
-                               ret = LTTNG_ERR_SAVE_IO_FAIL;
-                               goto end;
-                       }
-               } else if (session->live_timer) {
-                       ret = config_writer_write_element_unsigned_int(writer,
-                                       config_element_live_timer_interval, session->live_timer);
-                       if (ret) {
-                               ret = LTTNG_ERR_SAVE_IO_FAIL;
-                               goto end;
-                       }
-               }
-               if (session->rotate_timer_period || session->rotate_size) {
-                       ret = save_session_rotation_schedules(writer,
-                                       session);
-                       if (ret) {
-                               ret = LTTNG_ERR_SAVE_IO_FAIL;
-                               goto end;
-                       }
-               }
-
-               /* /attributes */
-               ret = config_writer_close_element(writer);
-               if (ret) {
-                       ret = LTTNG_ERR_SAVE_IO_FAIL;
-                       goto end;
-               }
-       }
-
-       ret = save_session_output(writer, session);
-       if (ret != LTTNG_OK) {
-               goto end;
-       }
-
-       /* /session */
-       ret = config_writer_close_element(writer);
-       if (ret) {
-               ret = LTTNG_ERR_SAVE_IO_FAIL;
-               goto end;
-       }
-
-       /* /sessions */
-       ret = config_writer_close_element(writer);
-       if (ret) {
-               ret = LTTNG_ERR_SAVE_IO_FAIL;
-               goto end;
-       }
-
-       ret = LTTNG_OK;
-end:
-       if (writer && config_writer_destroy(writer)) {
-               /* Preserve the original error code */
-               ret = ret != LTTNG_OK ? ret : LTTNG_ERR_SAVE_IO_FAIL;
-       }
-       if (ret != LTTNG_OK) {
-               /* Delete file in case of error */
-               if ((fd >= 0) && unlink(config_file_path)) {
-                       PERROR("Unlinking XML session configuration.");
-               }
-       }
-
-       if (fd >= 0) {
-               int closeret;
-
-               closeret = close(fd);
-               if (closeret) {
-                       PERROR("Closing XML session configuration");
-               }
-       }
-
-       return ret;
-}
-
-int cmd_save_sessions(struct lttng_save_session_attr *attr,
-       lttng_sock_cred *creds)
-{
-       int ret;
-       const char *session_name;
-       struct ltt_session *session;
-
-       session_lock_list();
-
-       session_name = lttng_save_session_attr_get_session_name(attr);
-       if (session_name) {
-               session = session_find_by_name(session_name);
-               if (!session) {
-                       ret = LTTNG_ERR_SESS_NOT_FOUND;
-                       goto end;
-               }
-
-               session_lock(session);
-               ret = save_session(session, attr, creds);
-               session_unlock(session);
-               session_put(session);
-               if (ret != LTTNG_OK) {
-                       goto end;
-               }
-       } else {
-               struct ltt_session_list *list = session_get_list();
-
-               cds_list_for_each_entry(session, &list->head, list) {
-                       if (!session_get(session)) {
-                               continue;
-                       }
-                       session_lock(session);
-                       ret = save_session(session, attr, creds);
-                       session_unlock(session);
-                       session_put(session);
-                       /* Don't abort if we don't have the required permissions. */
-                       if (ret != LTTNG_OK && ret != LTTNG_ERR_EPERM) {
-                               goto end;
-                       }
-               }
-       }
-       ret = LTTNG_OK;
-
-end:
-       session_unlock_list();
-       return ret;
-}
diff --git a/src/bin/lttng-sessiond/save.cpp b/src/bin/lttng-sessiond/save.cpp
new file mode 100644 (file)
index 0000000..2055e86
--- /dev/null
@@ -0,0 +1,2897 @@
+/*
+ * Copyright (C) 2014 Jérémie Galarneau <jeremie.galarneau@efficios.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0-only
+ *
+ */
+
+#define _LGPL_SOURCE
+#include <inttypes.h>
+#include <string.h>
+#include <urcu/uatomic.h>
+#include <unistd.h>
+
+#include <common/defaults.h>
+#include <common/error.h>
+#include <common/config/session-config.h>
+#include <common/utils.h>
+#include <common/runas.h>
+#include <lttng/save-internal.h>
+
+#include "kernel.h"
+#include "save.h"
+#include "session.h"
+#include "lttng-syscall.h"
+#include "trace-ust.h"
+#include "agent.h"
+
+/* Return LTTNG_OK on success else a LTTNG_ERR* code. */
+static
+int save_kernel_channel_attributes(struct config_writer *writer,
+       struct lttng_channel_attr *attr)
+{
+       int ret;
+
+       ret = config_writer_write_element_string(writer,
+               config_element_overwrite_mode,
+               attr->overwrite ? config_overwrite_mode_overwrite :
+                       config_overwrite_mode_discard);
+       if (ret) {
+               ret = LTTNG_ERR_SAVE_IO_FAIL;
+               goto end;
+       }
+
+       ret = config_writer_write_element_unsigned_int(writer,
+               config_element_subbuf_size, attr->subbuf_size);
+       if (ret) {
+               ret = LTTNG_ERR_SAVE_IO_FAIL;
+               goto end;
+       }
+
+       ret = config_writer_write_element_unsigned_int(writer,
+               config_element_num_subbuf,
+               attr->num_subbuf);
+       if (ret) {
+               ret = LTTNG_ERR_SAVE_IO_FAIL;
+               goto end;
+       }
+
+       ret = config_writer_write_element_unsigned_int(writer,
+               config_element_switch_timer_interval,
+               attr->switch_timer_interval);
+       if (ret) {
+               ret = LTTNG_ERR_SAVE_IO_FAIL;
+               goto end;
+       }
+
+       ret = config_writer_write_element_unsigned_int(writer,
+               config_element_read_timer_interval,
+               attr->read_timer_interval);
+       if (ret) {
+               ret = LTTNG_ERR_SAVE_IO_FAIL;
+               goto end;
+       }
+
+       ret = config_writer_write_element_string(writer,
+               config_element_output_type,
+               attr->output == LTTNG_EVENT_SPLICE ?
+               config_output_type_splice : config_output_type_mmap);
+       if (ret) {
+               ret = LTTNG_ERR_SAVE_IO_FAIL;
+               goto end;
+       }
+
+       ret = config_writer_write_element_unsigned_int(writer,
+               config_element_tracefile_size, attr->tracefile_size);
+       if (ret) {
+               ret = LTTNG_ERR_SAVE_IO_FAIL;
+               goto end;
+       }
+
+       ret = config_writer_write_element_unsigned_int(writer,
+               config_element_tracefile_count,
+               attr->tracefile_count);
+       if (ret) {
+               ret = LTTNG_ERR_SAVE_IO_FAIL;
+               goto end;
+       }
+
+       ret = config_writer_write_element_unsigned_int(writer,
+               config_element_live_timer_interval,
+               attr->live_timer_interval);
+       if (ret) {
+               ret = LTTNG_ERR_SAVE_IO_FAIL;
+               goto end;
+       }
+
+       if (attr->extended.ptr) {
+               struct lttng_channel_extended *ext = NULL;
+
+               ext = (struct lttng_channel_extended *) attr->extended.ptr;
+               ret = config_writer_write_element_unsigned_int(writer,
+                               config_element_monitor_timer_interval,
+                               ext->monitor_timer_interval);
+               if (ret) {
+                       ret = LTTNG_ERR_SAVE_IO_FAIL;
+                       goto end;
+               }
+
+               ret = config_writer_write_element_signed_int(writer,
+                               config_element_blocking_timeout,
+                               ext->blocking_timeout);
+               if (ret) {
+                       ret = LTTNG_ERR_SAVE_IO_FAIL;
+                       goto end;
+               }
+       }
+
+       ret = LTTNG_OK;
+end:
+       return ret;
+}
+
+/* Return LTTNG_OK on success else a LTTNG_ERR* code. */
+static
+int save_ust_channel_attributes(struct config_writer *writer,
+       struct lttng_ust_abi_channel_attr *attr)
+{
+       int ret;
+       struct ltt_ust_channel *channel = NULL;
+
+       ret = config_writer_write_element_string(writer,
+               config_element_overwrite_mode,
+               attr->overwrite ? config_overwrite_mode_overwrite :
+                       config_overwrite_mode_discard);
+       if (ret) {
+               ret = LTTNG_ERR_SAVE_IO_FAIL;
+               goto end;
+       }
+
+       ret = config_writer_write_element_unsigned_int(writer,
+               config_element_subbuf_size, attr->subbuf_size);
+       if (ret) {
+               ret = LTTNG_ERR_SAVE_IO_FAIL;
+               goto end;
+       }
+
+       ret = config_writer_write_element_unsigned_int(writer,
+               config_element_num_subbuf,
+               attr->num_subbuf);
+       if (ret) {
+               ret = LTTNG_ERR_SAVE_IO_FAIL;
+               goto end;
+       }
+
+       ret = config_writer_write_element_unsigned_int(writer,
+               config_element_switch_timer_interval,
+               attr->switch_timer_interval);
+       if (ret) {
+               ret = LTTNG_ERR_SAVE_IO_FAIL;
+               goto end;
+       }
+
+       ret = config_writer_write_element_unsigned_int(writer,
+               config_element_read_timer_interval,
+               attr->read_timer_interval);
+       if (ret) {
+               ret = LTTNG_ERR_SAVE_IO_FAIL;
+               goto end;
+       }
+
+       ret = config_writer_write_element_string(writer,
+               config_element_output_type,
+               attr->output == LTTNG_UST_ABI_MMAP ?
+               config_output_type_mmap : config_output_type_splice);
+       if (ret) {
+               ret = LTTNG_ERR_SAVE_IO_FAIL;
+               goto end;
+       }
+
+       ret = config_writer_write_element_signed_int(writer,
+                       config_element_blocking_timeout,
+                       attr->u.s.blocking_timeout);
+       if (ret) {
+               ret = LTTNG_ERR_SAVE_IO_FAIL;
+               goto end;
+       }
+
+       /*
+        * Fetch the monitor timer which is located in the parent of
+        * lttng_ust_channel_attr
+        */
+       channel = caa_container_of(attr, struct ltt_ust_channel, attr);
+       ret = config_writer_write_element_unsigned_int(writer,
+               config_element_monitor_timer_interval,
+               channel->monitor_timer_interval);
+       if (ret) {
+               ret = LTTNG_ERR_SAVE_IO_FAIL;
+               goto end;
+       }
+
+       ret = LTTNG_OK;
+end:
+       return ret;
+}
+
+static
+const char *get_kernel_instrumentation_string(
+       enum lttng_kernel_abi_instrumentation instrumentation)
+{
+       const char *instrumentation_string;
+
+       switch (instrumentation) {
+       case LTTNG_KERNEL_ABI_ALL:
+               instrumentation_string = config_event_type_all;
+               break;
+       case LTTNG_KERNEL_ABI_TRACEPOINT:
+               instrumentation_string = config_event_type_tracepoint;
+               break;
+       case LTTNG_KERNEL_ABI_KPROBE:
+               instrumentation_string = config_event_type_probe;
+               break;
+       case LTTNG_KERNEL_ABI_UPROBE:
+               instrumentation_string = config_event_type_userspace_probe;
+               break;
+       case LTTNG_KERNEL_ABI_FUNCTION:
+               instrumentation_string = config_event_type_function_entry;
+               break;
+       case LTTNG_KERNEL_ABI_KRETPROBE:
+               instrumentation_string = config_event_type_function;
+               break;
+       case LTTNG_KERNEL_ABI_NOOP:
+               instrumentation_string = config_event_type_noop;
+               break;
+       case LTTNG_KERNEL_ABI_SYSCALL:
+               instrumentation_string = config_event_type_syscall;
+               break;
+       default:
+               instrumentation_string = NULL;
+       }
+
+       return instrumentation_string;
+}
+
+static
+const char *get_kernel_context_type_string(
+       enum lttng_kernel_abi_context_type context_type)
+{
+       const char *context_type_string;
+
+       switch (context_type) {
+       case LTTNG_KERNEL_ABI_CONTEXT_PID:
+               context_type_string = config_event_context_pid;
+               break;
+       case LTTNG_KERNEL_ABI_CONTEXT_PROCNAME:
+               context_type_string = config_event_context_procname;
+               break;
+       case LTTNG_KERNEL_ABI_CONTEXT_PRIO:
+               context_type_string = config_event_context_prio;
+               break;
+       case LTTNG_KERNEL_ABI_CONTEXT_NICE:
+               context_type_string = config_event_context_nice;
+               break;
+       case LTTNG_KERNEL_ABI_CONTEXT_VPID:
+               context_type_string = config_event_context_vpid;
+               break;
+       case LTTNG_KERNEL_ABI_CONTEXT_TID:
+               context_type_string = config_event_context_tid;
+               break;
+       case LTTNG_KERNEL_ABI_CONTEXT_VTID:
+               context_type_string = config_event_context_vtid;
+               break;
+       case LTTNG_KERNEL_ABI_CONTEXT_PPID:
+               context_type_string = config_event_context_ppid;
+               break;
+       case LTTNG_KERNEL_ABI_CONTEXT_VPPID:
+               context_type_string = config_event_context_vppid;
+               break;
+       case LTTNG_KERNEL_ABI_CONTEXT_HOSTNAME:
+               context_type_string = config_event_context_hostname;
+               break;
+       case LTTNG_KERNEL_ABI_CONTEXT_INTERRUPTIBLE:
+               context_type_string = config_event_context_interruptible;
+               break;
+       case LTTNG_KERNEL_ABI_CONTEXT_PREEMPTIBLE:
+               context_type_string = config_event_context_preemptible;
+               break;
+       case LTTNG_KERNEL_ABI_CONTEXT_NEED_RESCHEDULE:
+               context_type_string = config_event_context_need_reschedule;
+               break;
+       case LTTNG_KERNEL_ABI_CONTEXT_MIGRATABLE:
+               context_type_string = config_event_context_migratable;
+               break;
+       case LTTNG_KERNEL_ABI_CONTEXT_CALLSTACK_USER:
+               context_type_string = config_event_context_callstack_user;
+               break;
+       case LTTNG_KERNEL_ABI_CONTEXT_CALLSTACK_KERNEL:
+               context_type_string = config_event_context_callstack_kernel;
+               break;
+       case LTTNG_KERNEL_ABI_CONTEXT_CGROUP_NS:
+               context_type_string = config_event_context_cgroup_ns;
+               break;
+       case LTTNG_KERNEL_ABI_CONTEXT_IPC_NS:
+               context_type_string = config_event_context_ipc_ns;
+               break;
+       case LTTNG_KERNEL_ABI_CONTEXT_MNT_NS:
+               context_type_string = config_event_context_mnt_ns;
+               break;
+       case LTTNG_KERNEL_ABI_CONTEXT_NET_NS:
+               context_type_string = config_event_context_net_ns;
+               break;
+       case LTTNG_KERNEL_ABI_CONTEXT_PID_NS:
+               context_type_string = config_event_context_pid_ns;
+               break;
+       case LTTNG_KERNEL_ABI_CONTEXT_TIME_NS:
+               context_type_string = config_event_context_time_ns;
+               break;
+       case LTTNG_KERNEL_ABI_CONTEXT_USER_NS:
+               context_type_string = config_event_context_user_ns;
+               break;
+       case LTTNG_KERNEL_ABI_CONTEXT_UTS_NS:
+               context_type_string = config_event_context_uts_ns;
+               break;
+       case LTTNG_KERNEL_ABI_CONTEXT_UID:
+               context_type_string = config_event_context_uid;
+               break;
+       case LTTNG_KERNEL_ABI_CONTEXT_EUID:
+               context_type_string = config_event_context_euid;
+               break;
+       case LTTNG_KERNEL_ABI_CONTEXT_SUID:
+               context_type_string = config_event_context_suid;
+               break;
+       case LTTNG_KERNEL_ABI_CONTEXT_GID:
+               context_type_string = config_event_context_gid;
+               break;
+       case LTTNG_KERNEL_ABI_CONTEXT_EGID:
+               context_type_string = config_event_context_egid;
+               break;
+       case LTTNG_KERNEL_ABI_CONTEXT_SGID:
+               context_type_string = config_event_context_sgid;
+               break;
+       case LTTNG_KERNEL_ABI_CONTEXT_VUID:
+               context_type_string = config_event_context_vuid;
+               break;
+       case LTTNG_KERNEL_ABI_CONTEXT_VEUID:
+               context_type_string = config_event_context_veuid;
+               break;
+       case LTTNG_KERNEL_ABI_CONTEXT_VSUID:
+               context_type_string = config_event_context_vsuid;
+               break;
+       case LTTNG_KERNEL_ABI_CONTEXT_VGID:
+               context_type_string = config_event_context_vgid;
+               break;
+       case LTTNG_KERNEL_ABI_CONTEXT_VEGID:
+               context_type_string = config_event_context_vegid;
+               break;
+       case LTTNG_KERNEL_ABI_CONTEXT_VSGID:
+               context_type_string = config_event_context_vsgid;
+               break;
+       default:
+               context_type_string = NULL;
+       }
+
+       return context_type_string;
+}
+
+static
+const char *get_ust_context_type_string(
+       enum lttng_ust_abi_context_type context_type)
+{
+       const char *context_type_string;
+
+       switch (context_type) {
+       case LTTNG_UST_ABI_CONTEXT_PROCNAME:
+               context_type_string = config_event_context_procname;
+               break;
+       case LTTNG_UST_ABI_CONTEXT_VPID:
+               context_type_string = config_event_context_vpid;
+               break;
+       case LTTNG_UST_ABI_CONTEXT_VTID:
+               context_type_string = config_event_context_vtid;
+               break;
+       case LTTNG_UST_ABI_CONTEXT_IP:
+               context_type_string = config_event_context_ip;
+               break;
+       case LTTNG_UST_ABI_CONTEXT_PTHREAD_ID:
+               context_type_string = config_event_context_pthread_id;
+               break;
+       case LTTNG_UST_ABI_CONTEXT_APP_CONTEXT:
+               context_type_string = config_event_context_app;
+               break;
+       case LTTNG_UST_ABI_CONTEXT_CGROUP_NS:
+               context_type_string = config_event_context_cgroup_ns;
+               break;
+       case LTTNG_UST_ABI_CONTEXT_IPC_NS:
+               context_type_string = config_event_context_ipc_ns;
+               break;
+       case LTTNG_UST_ABI_CONTEXT_MNT_NS:
+               context_type_string = config_event_context_mnt_ns;
+               break;
+       case LTTNG_UST_ABI_CONTEXT_NET_NS:
+               context_type_string = config_event_context_net_ns;
+               break;
+       case LTTNG_UST_ABI_CONTEXT_TIME_NS:
+               context_type_string = config_event_context_time_ns;
+               break;
+       case LTTNG_UST_ABI_CONTEXT_PID_NS:
+               context_type_string = config_event_context_pid_ns;
+               break;
+       case LTTNG_UST_ABI_CONTEXT_USER_NS:
+               context_type_string = config_event_context_user_ns;
+               break;
+       case LTTNG_UST_ABI_CONTEXT_UTS_NS:
+               context_type_string = config_event_context_uts_ns;
+               break;
+       case LTTNG_UST_ABI_CONTEXT_VUID:
+               context_type_string = config_event_context_vuid;
+               break;
+       case LTTNG_UST_ABI_CONTEXT_VEUID:
+               context_type_string = config_event_context_veuid;
+               break;
+       case LTTNG_UST_ABI_CONTEXT_VSUID:
+               context_type_string = config_event_context_vsuid;
+               break;
+       case LTTNG_UST_ABI_CONTEXT_VGID:
+               context_type_string = config_event_context_vgid;
+               break;
+       case LTTNG_UST_ABI_CONTEXT_VEGID:
+               context_type_string = config_event_context_vegid;
+               break;
+       case LTTNG_UST_ABI_CONTEXT_VSGID:
+               context_type_string = config_event_context_vsgid;
+               break;
+       case LTTNG_UST_ABI_CONTEXT_PERF_THREAD_COUNTER:
+               /*
+                * Error, should not be stored in the XML, perf contexts
+                * are stored as a node of type event_perf_context_type.
+                */
+       default:
+               context_type_string = NULL;
+               break;
+       }
+
+       return context_type_string;
+}
+
+static
+const char *get_buffer_type_string(
+       enum lttng_buffer_type buffer_type)
+{
+       const char *buffer_type_string;
+
+       switch (buffer_type) {
+       case LTTNG_BUFFER_PER_PID:
+               buffer_type_string = config_buffer_type_per_pid;
+               break;
+       case LTTNG_BUFFER_PER_UID:
+               buffer_type_string = config_buffer_type_per_uid;
+               break;
+       case LTTNG_BUFFER_GLOBAL:
+               buffer_type_string = config_buffer_type_global;
+               break;
+       default:
+               buffer_type_string = NULL;
+       }
+
+       return buffer_type_string;
+}
+
+static
+const char *get_loglevel_type_string(
+       enum lttng_ust_abi_loglevel_type loglevel_type)
+{
+       const char *loglevel_type_string;
+
+       switch (loglevel_type) {
+       case LTTNG_UST_ABI_LOGLEVEL_ALL:
+               loglevel_type_string = config_loglevel_type_all;
+               break;
+       case LTTNG_UST_ABI_LOGLEVEL_RANGE:
+               loglevel_type_string = config_loglevel_type_range;
+               break;
+       case LTTNG_UST_ABI_LOGLEVEL_SINGLE:
+               loglevel_type_string = config_loglevel_type_single;
+               break;
+       default:
+               loglevel_type_string = NULL;
+       }
+
+       return loglevel_type_string;
+}
+
+/* Return LTTNG_OK on success else a LTTNG_ERR* code. */
+static
+int save_kernel_function_event(struct config_writer *writer,
+               struct ltt_kernel_event *event)
+{
+       int ret;
+
+       ret = config_writer_open_element(writer, config_element_function_attributes);
+       if (ret) {
+               ret = LTTNG_ERR_SAVE_IO_FAIL;
+               goto end;
+       }
+
+       ret = config_writer_write_element_string(writer, config_element_name,
+                       event->event->u.ftrace.symbol_name);
+       if (ret) {
+               ret = LTTNG_ERR_SAVE_IO_FAIL;
+               goto end;
+       }
+
+       /* /function attributes */
+       ret = config_writer_close_element(writer);
+       if (ret) {
+               ret = LTTNG_ERR_SAVE_IO_FAIL;
+               goto end;
+       }
+end:
+       return ret;
+}
+
+static
+int save_kernel_kprobe_event(struct config_writer *writer,
+               struct ltt_kernel_event *event)
+{
+       int ret;
+       const char *symbol_name;
+       uint64_t addr;
+       uint64_t offset;
+
+       switch (event->event->instrumentation) {
+       case LTTNG_KERNEL_ABI_KPROBE:
+               /*
+                * Comments in lttng-kernel.h mention that
+                * either addr or symbol_name are set, not both.
+                */
+               addr = event->event->u.kprobe.addr;
+               offset = event->event->u.kprobe.offset;
+               symbol_name = addr ? NULL : event->event->u.kprobe.symbol_name;
+               break;
+       case LTTNG_KERNEL_ABI_KRETPROBE:
+               addr = event->event->u.kretprobe.addr;
+               offset = event->event->u.kretprobe.offset;
+               symbol_name = addr ? NULL : event->event->u.kretprobe.symbol_name;
+               break;
+       default:
+               LTTNG_ASSERT(1);
+               ERR("Unsupported kernel instrumentation type.");
+               ret = LTTNG_ERR_INVALID;
+               goto end;
+       }
+
+       ret = config_writer_open_element(writer, config_element_probe_attributes);
+       if (ret) {
+               ret = LTTNG_ERR_SAVE_IO_FAIL;
+               goto end;
+       }
+
+       if (addr) {
+               ret = config_writer_write_element_unsigned_int( writer,
+                               config_element_address, addr);
+               if (ret) {
+                       ret = LTTNG_ERR_SAVE_IO_FAIL;
+                       goto end;
+               }
+       } else if (symbol_name) {
+               ret = config_writer_write_element_string(writer,
+                                config_element_symbol_name, symbol_name);
+               if (ret) {
+                       ret = LTTNG_ERR_SAVE_IO_FAIL;
+                       goto end;
+               }
+               /* If the offset is non-zero, write it.*/
+               if (offset) {
+                       ret = config_writer_write_element_unsigned_int(writer,
+                               config_element_offset, offset);
+                       if (ret) {
+                               ret = LTTNG_ERR_SAVE_IO_FAIL;
+                               goto end;
+                       }
+               }
+       } else {
+               /*
+                * This really should not happen as we are either setting the
+                * address or the symbol above.
+                */
+               ERR("Invalid probe/function description.");
+               ret = LTTNG_ERR_INVALID;
+               goto end;
+       }
+
+
+       ret = config_writer_close_element(writer);
+       if (ret) {
+               ret = LTTNG_ERR_SAVE_IO_FAIL;
+               goto end;
+       }
+end:
+       return ret;
+}
+
+/*
+ * Save the userspace probe tracepoint event associated with the event to the
+ * config writer.
+ */
+static
+int save_kernel_userspace_probe_tracepoint_event(struct config_writer *writer,
+               struct ltt_kernel_event *event)
+{
+       int ret = 0;
+       const char *probe_name, *provider_name, *binary_path;
+       const struct lttng_userspace_probe_location *userspace_probe_location;
+       const struct lttng_userspace_probe_location_lookup_method *lookup_method;
+       enum lttng_userspace_probe_location_lookup_method_type lookup_type;
+
+       /* Get userspace probe location from the event. */
+       userspace_probe_location = event->userspace_probe_location;
+       if (!userspace_probe_location) {
+               ret = LTTNG_ERR_SAVE_IO_FAIL;
+               goto end;
+       }
+
+       /* Get lookup method and lookup method type. */
+       lookup_method = lttng_userspace_probe_location_get_lookup_method(userspace_probe_location);
+       if (!lookup_method) {
+               ret = LTTNG_ERR_SAVE_IO_FAIL;
+               goto end;
+       }
+
+       lookup_type = lttng_userspace_probe_location_lookup_method_get_type(lookup_method);
+
+       /* Get the binary path, probe name and provider name. */
+       binary_path =
+               lttng_userspace_probe_location_tracepoint_get_binary_path(
+                               userspace_probe_location);
+       if (!binary_path) {
+               ret = LTTNG_ERR_SAVE_IO_FAIL;
+               goto end;
+       }
+
+       probe_name =
+               lttng_userspace_probe_location_tracepoint_get_probe_name(
+                               userspace_probe_location);
+       if (!probe_name) {
+               ret = LTTNG_ERR_SAVE_IO_FAIL;
+               goto end;
+       }
+
+       provider_name =
+               lttng_userspace_probe_location_tracepoint_get_provider_name(
+                               userspace_probe_location);
+       if (!provider_name) {
+               ret = LTTNG_ERR_SAVE_IO_FAIL;
+               goto end;
+       }
+
+       /* Open a userspace probe tracepoint attribute. */
+       ret = config_writer_open_element(writer, config_element_userspace_probe_tracepoint_attributes);
+       if (ret) {
+               ret = LTTNG_ERR_SAVE_IO_FAIL;
+               goto end;
+       }
+
+       switch (lookup_type) {
+       case LTTNG_USERSPACE_PROBE_LOCATION_LOOKUP_METHOD_TYPE_TRACEPOINT_SDT:
+               ret = config_writer_write_element_string(writer,
+                               config_element_userspace_probe_lookup,
+                               config_element_userspace_probe_lookup_tracepoint_sdt);
+               if (ret) {
+                       ret = LTTNG_ERR_SAVE_IO_FAIL;
+                       goto end;
+               }
+               break;
+       default:
+               ERR("Unsupported kernel userspace probe tracepoint lookup method.");
+               ret = LTTNG_ERR_INVALID;
+               goto end;
+       }
+
+       /* Write the binary path, provider name and the probe name. */
+       ret = config_writer_write_element_string(writer,
+                       config_element_userspace_probe_location_binary_path,
+                       binary_path);
+       if (ret) {
+               ret = LTTNG_ERR_SAVE_IO_FAIL;
+               goto end;
+       }
+
+       ret = config_writer_write_element_string(writer,
+                       config_element_userspace_probe_tracepoint_location_provider_name,
+                       provider_name);
+       if (ret) {
+               ret = LTTNG_ERR_SAVE_IO_FAIL;
+               goto end;
+       }
+
+       ret = config_writer_write_element_string(writer,
+                       config_element_userspace_probe_tracepoint_location_probe_name,
+                       probe_name);
+       if (ret) {
+               ret = LTTNG_ERR_SAVE_IO_FAIL;
+               goto end;
+       }
+
+       /* Close the userspace probe tracepoint attribute. */
+       ret = config_writer_close_element(writer);
+       if (ret) {
+               ret = LTTNG_ERR_SAVE_IO_FAIL;
+               goto end;
+       }
+
+end:
+       return ret;
+}
+
+/*
+ * Save the userspace probe function event associated with the event to the
+ * config writer.
+ */
+static
+int save_kernel_userspace_probe_function_event(struct config_writer *writer,
+               struct ltt_kernel_event *event)
+{
+       int ret = 0;
+       const char *function_name, *binary_path;
+       const struct lttng_userspace_probe_location *userspace_probe_location;
+       const struct lttng_userspace_probe_location_lookup_method *lookup_method;
+       enum lttng_userspace_probe_location_lookup_method_type lookup_type;
+
+       /* Get userspace probe location from the event. */
+       userspace_probe_location = event->userspace_probe_location;
+       if (!userspace_probe_location) {
+               ret = LTTNG_ERR_SAVE_IO_FAIL;
+               goto end;
+       }
+
+       /* Get lookup method and lookup method type. */
+       lookup_method = lttng_userspace_probe_location_get_lookup_method(
+                       userspace_probe_location);
+       if (!lookup_method) {
+               ret = LTTNG_ERR_SAVE_IO_FAIL;
+               goto end;
+       }
+
+       /* Get the binary path and the function name. */
+       binary_path =
+               lttng_userspace_probe_location_function_get_binary_path(
+                               userspace_probe_location);
+       if (!binary_path) {
+               ret = LTTNG_ERR_SAVE_IO_FAIL;
+               goto end;
+       }
+
+       function_name =
+               lttng_userspace_probe_location_function_get_function_name(
+                               userspace_probe_location);
+       if (!function_name) {
+               ret = LTTNG_ERR_SAVE_IO_FAIL;
+               goto end;
+       }
+
+       /* Open a userspace probe function attribute. */
+       ret = config_writer_open_element(writer,
+                       config_element_userspace_probe_function_attributes);
+       if (ret) {
+               ret = LTTNG_ERR_SAVE_IO_FAIL;
+               goto end;
+       }
+
+       lookup_type = lttng_userspace_probe_location_lookup_method_get_type(lookup_method);
+       switch (lookup_type) {
+       case LTTNG_USERSPACE_PROBE_LOCATION_LOOKUP_METHOD_TYPE_FUNCTION_ELF:
+               ret = config_writer_write_element_string(writer,
+                               config_element_userspace_probe_lookup,
+                               config_element_userspace_probe_lookup_function_elf);
+               if (ret) {
+                       ret = LTTNG_ERR_SAVE_IO_FAIL;
+                       goto end;
+               }
+               break;
+       case LTTNG_USERSPACE_PROBE_LOCATION_LOOKUP_METHOD_TYPE_FUNCTION_DEFAULT:
+               ret = config_writer_write_element_string(writer,
+                               config_element_userspace_probe_lookup,
+                               config_element_userspace_probe_lookup_function_default);
+               if (ret) {
+                       ret = LTTNG_ERR_SAVE_IO_FAIL;
+                       goto end;
+               }
+               break;
+       default:
+               ERR("Unsupported kernel userspace probe function lookup method.");
+               ret = LTTNG_ERR_INVALID;
+               goto end;
+       }
+
+       /* Write the binary path and the function name. */
+       ret = config_writer_write_element_string(writer,
+                       config_element_userspace_probe_location_binary_path,
+                       binary_path);
+       if (ret) {
+               ret = LTTNG_ERR_SAVE_IO_FAIL;
+               goto end;
+       }
+
+       ret = config_writer_write_element_string(writer,
+                       config_element_userspace_probe_function_location_function_name,
+                       function_name);
+       if (ret) {
+               ret = LTTNG_ERR_SAVE_IO_FAIL;
+               goto end;
+       }
+
+       /* Close the userspace probe function attribute. */
+       ret = config_writer_close_element(writer);
+       if (ret) {
+               ret = LTTNG_ERR_SAVE_IO_FAIL;
+               goto end;
+       }
+
+end:
+       return ret;
+}
+
+static
+int save_kernel_userspace_probe_event(struct config_writer *writer,
+               struct ltt_kernel_event *event)
+{
+       int ret;
+       struct lttng_userspace_probe_location *userspace_probe_location;
+
+       /* Get userspace probe location from the event. */
+       userspace_probe_location = event->userspace_probe_location;
+       if (!userspace_probe_location) {
+               ret = LTTNG_ERR_SAVE_IO_FAIL;
+               goto end;
+       }
+
+       switch(lttng_userspace_probe_location_get_type(userspace_probe_location)) {
+       case LTTNG_USERSPACE_PROBE_LOCATION_TYPE_FUNCTION:
+       {
+               ret = save_kernel_userspace_probe_function_event(writer, event);
+               if (ret) {
+                       ret = LTTNG_ERR_SAVE_IO_FAIL;
+                       goto end;
+               }
+               break;
+       }
+       case LTTNG_USERSPACE_PROBE_LOCATION_TYPE_TRACEPOINT:
+       {
+               ret = save_kernel_userspace_probe_tracepoint_event(writer, event);
+               if (ret) {
+                       ret = LTTNG_ERR_SAVE_IO_FAIL;
+                       goto end;
+               }
+               break;
+       }
+       case LTTNG_USERSPACE_PROBE_LOCATION_TYPE_UNKNOWN:
+       default:
+               ERR("Unsupported kernel userspace probe location type.");
+               ret = LTTNG_ERR_INVALID;
+               goto end;
+       }
+
+end:
+       return ret;
+}
+
+static
+int save_kernel_event(struct config_writer *writer,
+               struct ltt_kernel_event *event)
+{
+       int ret;
+       const char *instrumentation_type;
+
+       ret = config_writer_open_element(writer, config_element_event);
+       if (ret) {
+               ret = LTTNG_ERR_SAVE_IO_FAIL;
+               goto end;
+       }
+
+       if (event->event->name[0]) {
+               ret = config_writer_write_element_string(writer,
+                       config_element_name, event->event->name);
+               if (ret) {
+                       ret = LTTNG_ERR_SAVE_IO_FAIL;
+                       goto end;
+               }
+       }
+
+       ret = config_writer_write_element_bool(writer, config_element_enabled,
+               event->enabled);
+       if (ret) {
+               ret = LTTNG_ERR_SAVE_IO_FAIL;
+               goto end;
+       }
+
+       instrumentation_type = get_kernel_instrumentation_string(
+               event->event->instrumentation);
+       if (!instrumentation_type) {
+               ret = LTTNG_ERR_INVALID;
+               goto end;
+       }
+
+       ret = config_writer_write_element_string(writer, config_element_type,
+               instrumentation_type);
+       if (ret) {
+               ret = LTTNG_ERR_SAVE_IO_FAIL;
+               goto end;
+       }
+
+       if (event->filter_expression) {
+               ret = config_writer_write_element_string(writer,
+                               config_element_filter,
+                               event->filter_expression);
+               if (ret) {
+                       ret = LTTNG_ERR_SAVE_IO_FAIL;
+                       goto end;
+               }
+       }
+
+       if (event->event->instrumentation == LTTNG_KERNEL_ABI_FUNCTION ||
+               event->event->instrumentation == LTTNG_KERNEL_ABI_KPROBE ||
+               event->event->instrumentation == LTTNG_KERNEL_ABI_UPROBE ||
+               event->event->instrumentation == LTTNG_KERNEL_ABI_KRETPROBE) {
+
+               ret = config_writer_open_element(writer,
+                       config_element_attributes);
+               if (ret) {
+                       ret = LTTNG_ERR_SAVE_IO_FAIL;
+                       goto end;
+               }
+
+               switch (event->event->instrumentation) {
+               case LTTNG_KERNEL_ABI_SYSCALL:
+               case LTTNG_KERNEL_ABI_FUNCTION:
+                       ret = save_kernel_function_event(writer, event);
+                       if (ret) {
+                               goto end;
+                       }
+                       break;
+               case LTTNG_KERNEL_ABI_KPROBE:
+               case LTTNG_KERNEL_ABI_KRETPROBE:
+                       ret = save_kernel_kprobe_event(writer, event);
+                       if (ret) {
+                               goto end;
+                       }
+                       break;
+               case LTTNG_KERNEL_ABI_UPROBE:
+                       ret = save_kernel_userspace_probe_event(writer, event);
+                       if (ret) {
+                               goto end;
+                       }
+                       break;
+               default:
+                       ERR("Unsupported kernel instrumentation type.");
+                       ret = LTTNG_ERR_INVALID;
+                       goto end;
+               }
+
+               /* /attributes */
+               ret = config_writer_close_element(writer);
+               if (ret) {
+                       ret = LTTNG_ERR_SAVE_IO_FAIL;
+                       goto end;
+               }
+       }
+
+       /* /event */
+       ret = config_writer_close_element(writer);
+       if (ret) {
+               ret = LTTNG_ERR_SAVE_IO_FAIL;
+               goto end;
+       }
+
+       ret = LTTNG_OK;
+end:
+       return ret;
+}
+
+/* Return LTTNG_OK on success else a LTTNG_ERR* code. */
+static
+int save_kernel_events(struct config_writer *writer,
+       struct ltt_kernel_channel *kchan)
+{
+       int ret;
+       struct ltt_kernel_event *event;
+
+       ret = config_writer_open_element(writer, config_element_events);
+       if (ret) {
+               ret = LTTNG_ERR_SAVE_IO_FAIL;
+               goto end;
+       }
+
+       cds_list_for_each_entry(event, &kchan->events_list.head, list) {
+               ret = save_kernel_event(writer, event);
+               if (ret != LTTNG_OK) {
+                       goto end;
+               }
+       }
+
+       /* /events */
+       ret = config_writer_close_element(writer);
+       if (ret) {
+               ret = LTTNG_ERR_SAVE_IO_FAIL;
+               goto end;
+       }
+
+       ret = LTTNG_OK;
+end:
+       return ret;
+}
+
+/* Return LTTNG_OK on success else a LTTNG_ERR* code. */
+static
+int save_ust_event(struct config_writer *writer,
+       struct ltt_ust_event *event)
+{
+       int ret;
+       const char *loglevel_type_string;
+
+       ret = config_writer_open_element(writer, config_element_event);
+       if (ret) {
+               ret = LTTNG_ERR_SAVE_IO_FAIL;
+               goto end;
+       }
+
+       if (event->attr.name[0]) {
+               ret = config_writer_write_element_string(writer,
+                       config_element_name, event->attr.name);
+               if (ret) {
+                       ret = LTTNG_ERR_SAVE_IO_FAIL;
+                       goto end;
+               }
+       }
+
+       ret = config_writer_write_element_bool(writer, config_element_enabled,
+               event->enabled);
+       if (ret) {
+               ret = LTTNG_ERR_SAVE_IO_FAIL;
+               goto end;
+       }
+
+       if (event->attr.instrumentation != LTTNG_UST_ABI_TRACEPOINT) {
+               ERR("Unsupported UST instrumentation type.");
+               ret = LTTNG_ERR_INVALID;
+               goto end;
+       }
+       ret = config_writer_write_element_string(writer, config_element_type,
+               config_event_type_tracepoint);
+       if (ret) {
+               ret = LTTNG_ERR_SAVE_IO_FAIL;
+               goto end;
+       }
+
+       loglevel_type_string = get_loglevel_type_string(
+               (lttng_ust_abi_loglevel_type) event->attr.loglevel_type);
+       if (!loglevel_type_string) {
+               ERR("Unsupported UST loglevel type.");
+               ret = LTTNG_ERR_INVALID;
+               goto end;
+       }
+
+       ret = config_writer_write_element_string(writer,
+               config_element_loglevel_type, loglevel_type_string);
+       if (ret) {
+               ret = LTTNG_ERR_SAVE_IO_FAIL;
+               goto end;
+       }
+
+       /* The log level is irrelevant if no "filtering" is enabled */
+       if (event->attr.loglevel_type != LTTNG_UST_ABI_LOGLEVEL_ALL) {
+               ret = config_writer_write_element_signed_int(writer,
+                               config_element_loglevel, event->attr.loglevel);
+               if (ret) {
+                       ret = LTTNG_ERR_SAVE_IO_FAIL;
+                       goto end;
+               }
+       }
+
+       if (event->filter_expression) {
+               ret = config_writer_write_element_string(writer,
+                       config_element_filter, event->filter_expression);
+               if (ret) {
+                       ret = LTTNG_ERR_SAVE_IO_FAIL;
+                       goto end;
+               }
+       }
+
+       if (event->exclusion && event->exclusion->count) {
+               uint32_t i;
+
+               ret = config_writer_open_element(writer,
+                       config_element_exclusions);
+               if (ret) {
+                       ret = LTTNG_ERR_SAVE_IO_FAIL;
+                       goto end;
+               }
+
+               for (i = 0; i < event->exclusion->count; i++) {
+                       ret = config_writer_write_element_string(writer,
+                               config_element_exclusion,
+                               LTTNG_EVENT_EXCLUSION_NAME_AT(
+                                       event->exclusion, i));
+                       if (ret) {
+                               ret = LTTNG_ERR_SAVE_IO_FAIL;
+                               goto end;
+                       }
+               }
+
+               /* /exclusions */
+               ret = config_writer_close_element(writer);
+               if (ret) {
+                       ret = LTTNG_ERR_SAVE_IO_FAIL;
+                       goto end;
+               }
+       }
+
+       /* /event */
+       ret = config_writer_close_element(writer);
+       if (ret) {
+               ret = LTTNG_ERR_SAVE_IO_FAIL;
+               goto end;
+       }
+
+       ret = LTTNG_OK;
+end:
+       return ret;
+}
+
+/* Return LTTNG_OK on success else a LTTNG_ERR* code. */
+static
+int save_ust_events(struct config_writer *writer,
+       struct lttng_ht *events)
+{
+       int ret;
+       struct ltt_ust_event *event;
+       struct lttng_ht_node_str *node;
+       struct lttng_ht_iter iter;
+
+       ret = config_writer_open_element(writer, config_element_events);
+       if (ret) {
+               ret = LTTNG_ERR_SAVE_IO_FAIL;
+               goto end;
+       }
+
+       rcu_read_lock();
+       cds_lfht_for_each_entry(events->ht, &iter.iter, node, node) {
+               event = caa_container_of(node, struct ltt_ust_event, node);
+
+               if (event->internal) {
+                       /* Internal events must not be exposed to clients */
+                       continue;
+               }
+               ret = save_ust_event(writer, event);
+               if (ret != LTTNG_OK) {
+                       rcu_read_unlock();
+                       goto end;
+               }
+       }
+       rcu_read_unlock();
+
+       /* /events */
+       ret = config_writer_close_element(writer);
+       if (ret) {
+               ret = LTTNG_ERR_SAVE_IO_FAIL;
+               goto end;
+       }
+
+       ret = LTTNG_OK;
+end:
+       return ret;
+}
+
+/* Return LTTNG_OK on success else a LTTNG_ERR* code. */
+static
+int init_ust_event_from_agent_event(struct ltt_ust_event *ust_event,
+               struct agent_event *agent_event)
+{
+       int ret;
+       enum lttng_ust_abi_loglevel_type ust_loglevel_type;
+
+       ust_event->enabled = AGENT_EVENT_IS_ENABLED(agent_event);
+       ust_event->attr.instrumentation = LTTNG_UST_ABI_TRACEPOINT;
+       if (lttng_strncpy(ust_event->attr.name, agent_event->name,
+                       LTTNG_SYMBOL_NAME_LEN)) {
+               ret = LTTNG_ERR_INVALID;
+               goto end;
+       }
+       switch (agent_event->loglevel_type) {
+       case LTTNG_EVENT_LOGLEVEL_ALL:
+               ust_loglevel_type = LTTNG_UST_ABI_LOGLEVEL_ALL;
+               break;
+       case LTTNG_EVENT_LOGLEVEL_SINGLE:
+               ust_loglevel_type = LTTNG_UST_ABI_LOGLEVEL_SINGLE;
+               break;
+       case LTTNG_EVENT_LOGLEVEL_RANGE:
+               ust_loglevel_type = LTTNG_UST_ABI_LOGLEVEL_RANGE;
+               break;
+       default:
+               ERR("Invalid agent_event loglevel_type.");
+               ret = LTTNG_ERR_INVALID;
+               goto end;
+       }
+
+       ust_event->attr.loglevel_type = ust_loglevel_type;
+       ust_event->attr.loglevel = agent_event->loglevel_value;
+       ust_event->filter_expression = agent_event->filter_expression;
+       ust_event->exclusion = agent_event->exclusion;
+
+       ret = LTTNG_OK;
+end:
+       return ret;
+}
+
+/* Return LTTNG_OK on success else a LTTNG_ERR* code. */
+static
+int save_agent_events(struct config_writer *writer,
+               struct agent *agent)
+{
+       int ret;
+       struct lttng_ht_iter iter;
+       struct lttng_ht_node_str *node;
+
+       ret = config_writer_open_element(writer, config_element_events);
+       if (ret) {
+               ret = LTTNG_ERR_SAVE_IO_FAIL;
+               goto end;
+       }
+
+       rcu_read_lock();
+       cds_lfht_for_each_entry(agent->events->ht, &iter.iter, node, node) {
+               struct agent_event *agent_event;
+               struct ltt_ust_event fake_event;
+
+               memset(&fake_event, 0, sizeof(fake_event));
+               agent_event = caa_container_of(node, struct agent_event, node);
+
+               /*
+                * Initialize a fake ust event to reuse the same serialization
+                * function since UST and agent events contain the same info
+                * (and one could wonder why they don't reuse the same
+                * structures...).
+                */
+               ret = init_ust_event_from_agent_event(&fake_event, agent_event);
+               if (ret != LTTNG_OK) {
+                       rcu_read_unlock();
+                       goto end;
+               }
+               ret = save_ust_event(writer, &fake_event);
+               if (ret != LTTNG_OK) {
+                       rcu_read_unlock();
+                       goto end;
+               }
+       }
+       rcu_read_unlock();
+
+       /* /events */
+       ret = config_writer_close_element(writer);
+       if (ret) {
+               ret = LTTNG_ERR_SAVE_IO_FAIL;
+               goto end;
+       }
+
+       ret = LTTNG_OK;
+end:
+       return ret;
+}
+
+/* Return LTTNG_OK on success else a LTTNG_ERR* code. */
+static
+int save_kernel_context(struct config_writer *writer,
+       struct lttng_kernel_abi_context *ctx)
+{
+       int ret = LTTNG_OK;
+
+       if (!ctx) {
+               goto end;
+       }
+
+       ret = config_writer_open_element(writer, config_element_context);
+       if (ret) {
+               ret = LTTNG_ERR_SAVE_IO_FAIL;
+               goto end;
+       }
+
+       if (ctx->ctx == LTTNG_KERNEL_ABI_CONTEXT_PERF_CPU_COUNTER) {
+               ret = config_writer_open_element(writer,
+                               config_element_context_perf);
+               if (ret) {
+                       ret = LTTNG_ERR_SAVE_IO_FAIL;
+                       goto end;
+               }
+
+               ret = config_writer_write_element_unsigned_int(writer,
+                       config_element_type, ctx->u.perf_counter.type);
+               if (ret) {
+                       ret = LTTNG_ERR_SAVE_IO_FAIL;
+                       goto end;
+               }
+
+               ret = config_writer_write_element_unsigned_int(writer,
+                       config_element_config, ctx->u.perf_counter.config);
+               if (ret) {
+                       ret = LTTNG_ERR_SAVE_IO_FAIL;
+                       goto end;
+               }
+
+               ret = config_writer_write_element_string(writer,
+                       config_element_name, ctx->u.perf_counter.name);
+               if (ret) {
+                       ret = LTTNG_ERR_SAVE_IO_FAIL;
+                       goto end;
+               }
+
+               /* /perf */
+               ret = config_writer_close_element(writer);
+               if (ret) {
+                       ret = LTTNG_ERR_SAVE_IO_FAIL;
+                       goto end;
+               }
+       } else {
+               const char *context_type_string =
+                       get_kernel_context_type_string(ctx->ctx);
+
+               if (!context_type_string) {
+                       ERR("Unsupported kernel context type.");
+                       ret = LTTNG_ERR_INVALID;
+                       goto end;
+               }
+
+               ret = config_writer_write_element_string(writer,
+                       config_element_type, context_type_string);
+               if (ret) {
+                       ret = LTTNG_ERR_SAVE_IO_FAIL;
+                       goto end;
+               }
+       }
+
+       /* /context */
+       ret = config_writer_close_element(writer);
+       if (ret) {
+               ret = LTTNG_ERR_SAVE_IO_FAIL;
+               goto end;
+       }
+
+       ret = LTTNG_OK;
+end:
+       return ret;
+}
+
+/* Return LTTNG_OK on success else a LTTNG_ERR* code. */
+static
+int save_kernel_contexts(struct config_writer *writer,
+               struct ltt_kernel_channel *kchan)
+{
+       int ret;
+       struct ltt_kernel_context *ctx;
+
+       if (cds_list_empty(&kchan->ctx_list)) {
+               ret = LTTNG_OK;
+               goto end;
+       }
+
+       ret = config_writer_open_element(writer, config_element_contexts);
+       if (ret) {
+               ret = LTTNG_ERR_SAVE_IO_FAIL;
+               goto end;
+       }
+
+       cds_list_for_each_entry(ctx, &kchan->ctx_list, list) {
+               ret = save_kernel_context(writer, &ctx->ctx);
+               if (ret != LTTNG_OK) {
+                       goto end;
+               }
+       }
+
+       /* /contexts */
+       ret = config_writer_close_element(writer);
+       if (ret) {
+               ret = LTTNG_ERR_SAVE_IO_FAIL;
+               goto end;
+       }
+
+       ret = LTTNG_OK;
+end:
+       return ret;
+}
+
+/* Return LTTNG_OK on success else a LTTNG_ERR* code. */
+static
+int save_ust_context_perf_thread_counter(struct config_writer *writer,
+               struct ltt_ust_context *ctx)
+{
+       int ret;
+
+       LTTNG_ASSERT(writer);
+       LTTNG_ASSERT(ctx);
+
+       /* Perf contexts are saved as event_perf_context_type */
+       ret = config_writer_open_element(writer, config_element_context_perf);
+       if (ret) {
+               ret = LTTNG_ERR_SAVE_IO_FAIL;
+               goto end;
+       }
+
+       ret = config_writer_write_element_unsigned_int(writer,
+                       config_element_type, ctx->ctx.u.perf_counter.type);
+       if (ret) {
+               ret = LTTNG_ERR_SAVE_IO_FAIL;
+               goto end;
+       }
+
+       ret = config_writer_write_element_unsigned_int(writer,
+                       config_element_config, ctx->ctx.u.perf_counter.config);
+       if (ret) {
+               ret = LTTNG_ERR_SAVE_IO_FAIL;
+               goto end;
+       }
+
+       ret = config_writer_write_element_string(writer, config_element_name,
+                       ctx->ctx.u.perf_counter.name);
+       if (ret) {
+               ret = LTTNG_ERR_SAVE_IO_FAIL;
+               goto end;
+       }
+
+       /* /perf */
+       ret = config_writer_close_element(writer);
+       if (ret) {
+               ret = LTTNG_ERR_SAVE_IO_FAIL;
+               goto end;
+       }
+
+       ret = LTTNG_OK;
+end:
+       return ret;
+}
+
+/* Return LTTNG_OK on success else a LTTNG_ERR* code. */
+static
+int save_ust_context_app_ctx(struct config_writer *writer,
+               struct ltt_ust_context *ctx)
+{
+       int ret;
+
+       LTTNG_ASSERT(writer);
+       LTTNG_ASSERT(ctx);
+
+       /* Application contexts are saved as application_context_type */
+       ret = config_writer_open_element(writer, config_element_context_app);
+       if (ret) {
+               ret = LTTNG_ERR_SAVE_IO_FAIL;
+               goto end;
+       }
+
+       ret = config_writer_write_element_string(writer,
+                       config_element_context_app_provider_name,
+                       ctx->ctx.u.app_ctx.provider_name);
+       if (ret) {
+               ret = LTTNG_ERR_SAVE_IO_FAIL;
+               goto end;
+       }
+
+       ret = config_writer_write_element_string(writer,
+                       config_element_context_app_ctx_name,
+                       ctx->ctx.u.app_ctx.ctx_name);
+       if (ret) {
+               ret = LTTNG_ERR_SAVE_IO_FAIL;
+               goto end;
+       }
+
+       /* /app */
+       ret = config_writer_close_element(writer);
+       if (ret) {
+               ret = LTTNG_ERR_SAVE_IO_FAIL;
+               goto end;
+       }
+
+       ret = LTTNG_OK;
+end:
+       return ret;
+}
+
+/* Return LTTNG_OK on success else a LTTNG_ERR* code. */
+static
+int save_ust_context_generic(struct config_writer *writer,
+               struct ltt_ust_context *ctx)
+{
+       int ret;
+       const char *context_type_string;
+
+       LTTNG_ASSERT(writer);
+       LTTNG_ASSERT(ctx);
+
+       /* Save context as event_context_type_type */
+       context_type_string = get_ust_context_type_string(
+                       ctx->ctx.ctx);
+       if (!context_type_string) {
+               ERR("Unsupported UST context type.");
+               ret = LTTNG_ERR_SAVE_IO_FAIL;
+               goto end;
+       }
+
+       ret = config_writer_write_element_string(writer,
+                       config_element_type, context_type_string);
+       if (ret) {
+               ret = LTTNG_ERR_SAVE_IO_FAIL;
+               goto end;
+       }
+
+       ret = LTTNG_OK;
+end:
+       return ret;
+}
+
+/* Return LTTNG_OK on success else a LTTNG_ERR* code. */
+static
+int save_ust_context(struct config_writer *writer,
+       struct cds_list_head *ctx_list)
+{
+       int ret;
+       struct ltt_ust_context *ctx;
+
+       LTTNG_ASSERT(writer);
+       LTTNG_ASSERT(ctx_list);
+
+       ret = config_writer_open_element(writer, config_element_contexts);
+       if (ret) {
+               ret = LTTNG_ERR_SAVE_IO_FAIL;
+               goto end;
+       }
+
+       cds_list_for_each_entry(ctx, ctx_list, list) {
+               ret = config_writer_open_element(writer,
+                       config_element_context);
+               if (ret) {
+                       ret = LTTNG_ERR_SAVE_IO_FAIL;
+                       goto end;
+               }
+
+               switch (ctx->ctx.ctx) {
+               case LTTNG_UST_ABI_CONTEXT_PERF_THREAD_COUNTER:
+                       ret = save_ust_context_perf_thread_counter(writer, ctx);
+                       break;
+               case LTTNG_UST_ABI_CONTEXT_APP_CONTEXT:
+                       ret = save_ust_context_app_ctx(writer, ctx);
+                       break;
+               default:
+                       /* Save generic context. */
+                       ret = save_ust_context_generic(writer, ctx);
+               }
+               if (ret != LTTNG_OK) {
+                       goto end;
+               }
+
+               /* /context */
+               ret = config_writer_close_element(writer);
+               if (ret) {
+                       ret = LTTNG_ERR_SAVE_IO_FAIL;
+                       goto end;
+               }
+       }
+
+       /* /contexts */
+       ret = config_writer_close_element(writer);
+       if (ret) {
+               ret = LTTNG_ERR_SAVE_IO_FAIL;
+               goto end;
+       }
+
+       ret = LTTNG_OK;
+end:
+       return ret;
+}
+
+/* Return LTTNG_OK on success else a LTTNG_ERR* code. */
+static
+int save_kernel_channel(struct config_writer *writer,
+       struct ltt_kernel_channel *kchan)
+{
+       int ret;
+
+       LTTNG_ASSERT(writer);
+       LTTNG_ASSERT(kchan);
+
+       ret = config_writer_open_element(writer, config_element_channel);
+       if (ret) {
+               ret = LTTNG_ERR_SAVE_IO_FAIL;
+               goto end;
+       }
+
+       ret = config_writer_write_element_string(writer, config_element_name,
+               kchan->channel->name);
+       if (ret) {
+               ret = LTTNG_ERR_SAVE_IO_FAIL;
+               goto end;
+       }
+
+       ret = config_writer_write_element_bool(writer, config_element_enabled,
+               kchan->channel->enabled);
+       if (ret) {
+               ret = LTTNG_ERR_SAVE_IO_FAIL;
+               goto end;
+       }
+
+       ret = save_kernel_channel_attributes(writer, &kchan->channel->attr);
+       if (ret != LTTNG_OK) {
+               goto end;
+       }
+
+       ret = save_kernel_events(writer, kchan);
+       if (ret != LTTNG_OK) {
+               goto end;
+       }
+
+       ret = save_kernel_contexts(writer, kchan);
+       if (ret != LTTNG_OK) {
+               goto end;
+       }
+
+       /* /channel */
+       ret = config_writer_close_element(writer);
+       if (ret) {
+               ret = LTTNG_ERR_SAVE_IO_FAIL;
+               goto end;
+       }
+
+       ret = LTTNG_OK;
+end:
+       return ret;
+}
+
+/* Return LTTNG_OK on success else a LTTNG_ERR* code. */
+static
+int save_ust_channel(struct config_writer *writer,
+       struct ltt_ust_channel *ust_chan,
+       struct ltt_ust_session *session)
+{
+       int ret;
+
+       LTTNG_ASSERT(writer);
+       LTTNG_ASSERT(ust_chan);
+       LTTNG_ASSERT(session);
+
+       ret = config_writer_open_element(writer, config_element_channel);
+       if (ret) {
+               ret = LTTNG_ERR_SAVE_IO_FAIL;
+               goto end;
+       }
+
+       ret = config_writer_write_element_string(writer, config_element_name,
+               ust_chan->name);
+       if (ret) {
+               ret = LTTNG_ERR_SAVE_IO_FAIL;
+               goto end;
+       }
+
+       ret = config_writer_write_element_bool(writer, config_element_enabled,
+               ust_chan->enabled);
+       if (ret) {
+               ret = LTTNG_ERR_SAVE_IO_FAIL;
+               goto end;
+       }
+
+       ret = save_ust_channel_attributes(writer, &ust_chan->attr);
+       if (ret != LTTNG_OK) {
+               goto end;
+       }
+
+       ret = config_writer_write_element_unsigned_int(writer,
+               config_element_tracefile_size, ust_chan->tracefile_size);
+       if (ret) {
+               ret = LTTNG_ERR_SAVE_IO_FAIL;
+               goto end;
+       }
+
+       ret = config_writer_write_element_unsigned_int(writer,
+               config_element_tracefile_count, ust_chan->tracefile_count);
+       if (ret) {
+               ret = LTTNG_ERR_SAVE_IO_FAIL;
+               goto end;
+       }
+
+       ret = config_writer_write_element_unsigned_int(writer,
+               config_element_live_timer_interval,
+               session->live_timer_interval);
+       if (ret) {
+               ret = LTTNG_ERR_SAVE_IO_FAIL;
+               goto end;
+       }
+
+       if (ust_chan->domain == LTTNG_DOMAIN_UST) {
+               ret = save_ust_events(writer, ust_chan->events);
+               if (ret != LTTNG_OK) {
+                       goto end;
+               }
+       } else {
+               struct agent *agent = NULL;
+
+               agent = trace_ust_find_agent(session, ust_chan->domain);
+               if (!agent) {
+                       ret = LTTNG_ERR_SAVE_IO_FAIL;
+                       ERR("Could not find agent associated to UST subdomain");
+                       goto end;
+               }
+
+               /*
+                * Channels associated with a UST sub-domain (such as JUL, Log4j
+                * or Python) don't have any non-internal events. We retrieve
+                * the "agent" events associated with this channel and serialize
+                * them.
+                */
+               ret = save_agent_events(writer, agent);
+               if (ret != LTTNG_OK) {
+                       goto end;
+               }
+       }
+
+       ret = save_ust_context(writer, &ust_chan->ctx_list);
+       if (ret != LTTNG_OK) {
+               goto end;
+       }
+
+       /* /channel */
+       ret = config_writer_close_element(writer);
+       if (ret) {
+               ret = LTTNG_ERR_SAVE_IO_FAIL;
+               goto end;
+       }
+
+       ret = LTTNG_OK;
+end:
+       return ret;
+}
+
+/* Return LTTNG_OK on success else a LTTNG_ERR* code. */
+static
+int save_kernel_session(struct config_writer *writer,
+       struct ltt_session *session)
+{
+       int ret;
+       struct ltt_kernel_channel *kchan;
+
+       LTTNG_ASSERT(writer);
+       LTTNG_ASSERT(session);
+
+       ret = config_writer_write_element_string(writer, config_element_type,
+               config_domain_type_kernel);
+       if (ret) {
+               ret = LTTNG_ERR_SAVE_IO_FAIL;
+               goto end;
+       }
+
+       ret = config_writer_write_element_string(writer,
+               config_element_buffer_type, config_buffer_type_global);
+       if (ret) {
+               ret = LTTNG_ERR_SAVE_IO_FAIL;
+               goto end;
+       }
+
+       ret = config_writer_open_element(writer,
+               config_element_channels);
+       if (ret) {
+               ret = LTTNG_ERR_SAVE_IO_FAIL;
+               goto end;
+       }
+
+       cds_list_for_each_entry(kchan, &session->kernel_session->channel_list.head,
+                       list) {
+               ret = save_kernel_channel(writer, kchan);
+               if (ret != LTTNG_OK) {
+                       goto end;
+               }
+       }
+
+       /* /channels */
+       ret = config_writer_close_element(writer);
+       if (ret) {
+               ret = LTTNG_ERR_SAVE_IO_FAIL;
+               goto end;
+       }
+
+       ret = LTTNG_OK;
+end:
+       return ret;
+}
+
+static
+const char *get_config_domain_str(enum lttng_domain_type domain)
+{
+       const char *str_dom;
+
+       switch (domain) {
+       case LTTNG_DOMAIN_KERNEL:
+               str_dom = config_domain_type_kernel;
+               break;
+       case LTTNG_DOMAIN_UST:
+               str_dom = config_domain_type_ust;
+               break;
+       case LTTNG_DOMAIN_JUL:
+               str_dom = config_domain_type_jul;
+               break;
+       case LTTNG_DOMAIN_LOG4J:
+               str_dom = config_domain_type_log4j;
+               break;
+       case LTTNG_DOMAIN_PYTHON:
+               str_dom = config_domain_type_python;
+               break;
+       default:
+               abort();
+       }
+
+       return str_dom;
+}
+
+/* Return LTTNG_OK on success else a LTTNG_ERR* code. */
+static int save_process_attr_tracker(struct config_writer *writer,
+               struct ltt_session *sess,
+               int domain,
+               enum lttng_process_attr process_attr)
+{
+       int ret = LTTNG_OK;
+       const char *element_id_tracker, *element_target_id, *element_id;
+       const struct process_attr_tracker *tracker;
+       enum lttng_tracking_policy tracking_policy;
+       struct lttng_process_attr_values *values = NULL;
+
+       switch (process_attr) {
+       case LTTNG_PROCESS_ATTR_PROCESS_ID:
+               element_id_tracker = config_element_process_attr_tracker_pid;
+               element_target_id = config_element_process_attr_pid_value;
+               element_id = config_element_process_attr_id;
+               break;
+       case LTTNG_PROCESS_ATTR_VIRTUAL_PROCESS_ID:
+               element_id_tracker = config_element_process_attr_tracker_vpid;
+               element_target_id = config_element_process_attr_vpid_value;
+               element_id = config_element_process_attr_id;
+               break;
+       case LTTNG_PROCESS_ATTR_USER_ID:
+               element_id_tracker = config_element_process_attr_tracker_uid;
+               element_target_id = config_element_process_attr_uid_value;
+               element_id = config_element_process_attr_id;
+               break;
+       case LTTNG_PROCESS_ATTR_VIRTUAL_USER_ID:
+               element_id_tracker = config_element_process_attr_tracker_vuid;
+               element_target_id = config_element_process_attr_vuid_value;
+               element_id = config_element_process_attr_id;
+               break;
+       case LTTNG_PROCESS_ATTR_GROUP_ID:
+               element_id_tracker = config_element_process_attr_tracker_gid;
+               element_target_id = config_element_process_attr_gid_value;
+               element_id = config_element_process_attr_id;
+               break;
+       case LTTNG_PROCESS_ATTR_VIRTUAL_GROUP_ID:
+               element_id_tracker = config_element_process_attr_tracker_vgid;
+               element_target_id = config_element_process_attr_vgid_value;
+               element_id = config_element_process_attr_id;
+               break;
+       default:
+               ret = LTTNG_ERR_SAVE_IO_FAIL;
+               goto end;
+       }
+
+       switch (domain) {
+       case LTTNG_DOMAIN_KERNEL:
+       {
+               tracker = kernel_get_process_attr_tracker(
+                               sess->kernel_session, process_attr);
+               LTTNG_ASSERT(tracker);
+               break;
+       }
+       case LTTNG_DOMAIN_UST:
+       {
+               tracker = trace_ust_get_process_attr_tracker(
+                               sess->ust_session, process_attr);
+               LTTNG_ASSERT(tracker);
+               break;
+       }
+       case LTTNG_DOMAIN_JUL:
+       case LTTNG_DOMAIN_LOG4J:
+       case LTTNG_DOMAIN_PYTHON:
+       default:
+               ret = LTTNG_ERR_UNSUPPORTED_DOMAIN;
+               goto end;
+       }
+
+       tracking_policy = process_attr_tracker_get_tracking_policy(tracker);
+       if (tracking_policy == LTTNG_TRACKING_POLICY_INCLUDE_ALL) {
+               /* Tracking all, nothing to output. */
+               ret = LTTNG_OK;
+               goto end;
+       }
+
+       ret = config_writer_open_element(writer, element_id_tracker);
+       if (ret) {
+               ret = LTTNG_ERR_SAVE_IO_FAIL;
+               goto end;
+       }
+
+       ret = config_writer_open_element(
+                       writer, config_element_process_attr_values);
+       if (ret) {
+               ret = LTTNG_ERR_SAVE_IO_FAIL;
+               goto end;
+       }
+
+       if (tracking_policy == LTTNG_TRACKING_POLICY_INCLUDE_SET) {
+               unsigned int i, count;
+               enum process_attr_tracker_status status =
+                               process_attr_tracker_get_inclusion_set(
+                                               tracker, &values);
+
+               if (status != PROCESS_ATTR_TRACKER_STATUS_OK) {
+                       ret = LTTNG_ERR_NOMEM;
+                       goto end;
+               }
+
+               count = _lttng_process_attr_values_get_count(values);
+
+               for (i = 0; i < count; i++) {
+                       unsigned int integral_value = UINT_MAX;
+                       const char *name = NULL;
+                       const struct process_attr_value *value =
+                                       lttng_process_attr_tracker_values_get_at_index(
+                                                       values, i);
+
+                       LTTNG_ASSERT(value);
+                       ret = config_writer_open_element(
+                                       writer, element_target_id);
+                       if (ret) {
+                               ret = LTTNG_ERR_SAVE_IO_FAIL;
+                               goto end;
+                       }
+
+                       switch (value->type) {
+                       case LTTNG_PROCESS_ATTR_VALUE_TYPE_PID:
+                               integral_value =
+                                               (unsigned int) value->value.pid;
+                               break;
+                       case LTTNG_PROCESS_ATTR_VALUE_TYPE_UID:
+                               integral_value =
+                                               (unsigned int) value->value.uid;
+                               break;
+                       case LTTNG_PROCESS_ATTR_VALUE_TYPE_GID:
+                               integral_value =
+                                               (unsigned int) value->value.gid;
+                               break;
+                       case LTTNG_PROCESS_ATTR_VALUE_TYPE_USER_NAME:
+                               name = value->value.user_name;
+                               LTTNG_ASSERT(name);
+                               break;
+                       case LTTNG_PROCESS_ATTR_VALUE_TYPE_GROUP_NAME:
+                               name = value->value.group_name;
+                               LTTNG_ASSERT(name);
+                               break;
+                       default:
+                               abort();
+                       }
+
+                       if (name) {
+                               ret = config_writer_write_element_string(writer,
+                                               config_element_name, name);
+                       } else {
+                               ret = config_writer_write_element_unsigned_int(
+                                               writer, element_id,
+                                               integral_value);
+                       }
+
+                       if (ret) {
+                               ret = LTTNG_ERR_SAVE_IO_FAIL;
+                               goto end;
+                       }
+
+                       /* /$element_target_id */
+                       ret = config_writer_close_element(writer);
+                       if (ret) {
+                               ret = LTTNG_ERR_SAVE_IO_FAIL;
+                               goto end;
+                       }
+               }
+       }
+
+       /* /values */
+       ret = config_writer_close_element(writer);
+       if (ret) {
+               ret = LTTNG_ERR_SAVE_IO_FAIL;
+               goto end;
+       }
+
+       /* /$element_id_tracker */
+       ret = config_writer_close_element(writer);
+       if (ret) {
+               ret = LTTNG_ERR_SAVE_IO_FAIL;
+               goto end;
+       }
+
+       ret = LTTNG_OK;
+end:
+       lttng_process_attr_values_destroy(values);
+       return ret;
+}
+
+/* Return LTTNG_OK on success else a LTTNG_ERR* code. */
+static int save_process_attr_trackers(struct config_writer *writer,
+               struct ltt_session *sess,
+               int domain)
+{
+       int ret;
+
+       switch (domain) {
+       case LTTNG_DOMAIN_KERNEL:
+               ret = save_process_attr_tracker(writer, sess, domain,
+                               LTTNG_PROCESS_ATTR_PROCESS_ID);
+               if (ret != LTTNG_OK) {
+                       goto end;
+               }
+               ret = save_process_attr_tracker(writer, sess, domain,
+                               LTTNG_PROCESS_ATTR_VIRTUAL_PROCESS_ID);
+               if (ret != LTTNG_OK) {
+                       goto end;
+               }
+               ret = save_process_attr_tracker(writer, sess, domain,
+                               LTTNG_PROCESS_ATTR_USER_ID);
+               if (ret != LTTNG_OK) {
+                       goto end;
+               }
+               ret = save_process_attr_tracker(writer, sess, domain,
+                               LTTNG_PROCESS_ATTR_VIRTUAL_USER_ID);
+               if (ret != LTTNG_OK) {
+                       goto end;
+               }
+               ret = save_process_attr_tracker(writer, sess, domain,
+                               LTTNG_PROCESS_ATTR_GROUP_ID);
+               if (ret != LTTNG_OK) {
+                       goto end;
+               }
+               ret = save_process_attr_tracker(writer, sess, domain,
+                               LTTNG_PROCESS_ATTR_VIRTUAL_GROUP_ID);
+               if (ret != LTTNG_OK) {
+                       goto end;
+               }
+               break;
+       case LTTNG_DOMAIN_UST:
+               ret = save_process_attr_tracker(writer, sess, domain,
+                               LTTNG_PROCESS_ATTR_VIRTUAL_PROCESS_ID);
+               if (ret != LTTNG_OK) {
+                       goto end;
+               }
+               ret = save_process_attr_tracker(writer, sess, domain,
+                               LTTNG_PROCESS_ATTR_VIRTUAL_USER_ID);
+               if (ret != LTTNG_OK) {
+                       goto end;
+               }
+               ret = save_process_attr_tracker(writer, sess, domain,
+                               LTTNG_PROCESS_ATTR_VIRTUAL_GROUP_ID);
+               if (ret != LTTNG_OK) {
+                       goto end;
+               }
+               break;
+       default:
+               ret = LTTNG_ERR_INVALID;
+               goto end;
+       }
+       ret = LTTNG_OK;
+end:
+       return ret;
+}
+
+/* Return LTTNG_OK on success else a LTTNG_ERR* code. */
+static
+int save_ust_domain(struct config_writer *writer,
+       struct ltt_session *session, enum lttng_domain_type domain)
+{
+       int ret;
+       struct ltt_ust_channel *ust_chan;
+       const char *buffer_type_string;
+       struct lttng_ht_node_str *node;
+       struct lttng_ht_iter iter;
+       const char *config_domain_name;
+
+       LTTNG_ASSERT(writer);
+       LTTNG_ASSERT(session);
+
+       ret = config_writer_open_element(writer,
+                       config_element_domain);
+       if (ret) {
+               ret = LTTNG_ERR_SAVE_IO_FAIL;
+               goto end;
+       }
+
+       config_domain_name = get_config_domain_str(domain);
+       if (!config_domain_name) {
+               ret = LTTNG_ERR_INVALID;
+               goto end;
+       }
+
+       ret = config_writer_write_element_string(writer,
+                       config_element_type, config_domain_name);
+       if (ret) {
+               ret = LTTNG_ERR_SAVE_IO_FAIL;
+               goto end;
+       }
+
+       buffer_type_string = get_buffer_type_string(
+                       session->ust_session->buffer_type);
+       if (!buffer_type_string) {
+               ERR("Unsupported buffer type.");
+               ret = LTTNG_ERR_INVALID;
+               goto end;
+       }
+
+       ret = config_writer_write_element_string(writer,
+                       config_element_buffer_type, buffer_type_string);
+       if (ret) {
+               ret = LTTNG_ERR_SAVE_IO_FAIL;
+               goto end;
+       }
+
+       ret = config_writer_open_element(writer, config_element_channels);
+       if (ret) {
+               ret = LTTNG_ERR_SAVE_IO_FAIL;
+               goto end;
+       }
+
+       rcu_read_lock();
+       cds_lfht_for_each_entry(session->ust_session->domain_global.channels->ht,
+                       &iter.iter, node, node) {
+               ust_chan = caa_container_of(node, struct ltt_ust_channel, node);
+               if (domain == ust_chan->domain) {
+                       ret = save_ust_channel(writer, ust_chan, session->ust_session);
+                       if (ret != LTTNG_OK) {
+                               rcu_read_unlock();
+                               goto end;
+                       }
+               }
+       }
+       rcu_read_unlock();
+
+       /* /channels */
+       ret = config_writer_close_element(writer);
+       if (ret) {
+               ret = LTTNG_ERR_SAVE_IO_FAIL;
+               goto end;
+       }
+
+       if (domain == LTTNG_DOMAIN_UST) {
+               ret = config_writer_open_element(
+                               writer, config_element_process_attr_trackers);
+               if (ret) {
+                       ret = LTTNG_ERR_SAVE_IO_FAIL;
+                       goto end;
+               }
+
+               ret = save_process_attr_trackers(
+                               writer, session, LTTNG_DOMAIN_UST);
+               if (ret != LTTNG_OK) {
+                       goto end;
+               }
+
+               /* /trackers */
+               ret = config_writer_close_element(writer);
+               if (ret) {
+                       ret = LTTNG_ERR_SAVE_IO_FAIL;
+                       goto end;
+               }
+       }
+
+       /* /domain */
+       ret = config_writer_close_element(writer);
+       if (ret) {
+               ret = LTTNG_ERR_SAVE_IO_FAIL;
+               goto end;
+       }
+
+       ret = LTTNG_OK;
+end:
+       return ret;
+}
+
+/* Return LTTNG_OK on success else a LTTNG_ERR* code. */
+static
+int save_domains(struct config_writer *writer, struct ltt_session *session)
+{
+       int ret = LTTNG_OK;
+
+       LTTNG_ASSERT(writer);
+       LTTNG_ASSERT(session);
+
+       if (!session->kernel_session && !session->ust_session) {
+               goto end;
+       }
+
+       ret = config_writer_open_element(writer, config_element_domains);
+       if (ret) {
+               ret = LTTNG_ERR_SAVE_IO_FAIL;
+               goto end;
+       }
+
+       if (session->kernel_session) {
+               ret = config_writer_open_element(writer,
+                       config_element_domain);
+               if (ret) {
+                       ret = LTTNG_ERR_SAVE_IO_FAIL;
+                       goto end;
+               }
+
+               ret = save_kernel_session(writer, session);
+               if (ret != LTTNG_OK) {
+                       goto end;
+               }
+
+               ret = config_writer_open_element(
+                               writer, config_element_process_attr_trackers);
+               if (ret) {
+                       ret = LTTNG_ERR_SAVE_IO_FAIL;
+                       goto end;
+               }
+
+               ret = save_process_attr_trackers(
+                               writer, session, LTTNG_DOMAIN_KERNEL);
+               if (ret != LTTNG_OK) {
+                       goto end;
+               }
+
+               /* /trackers */
+               ret = config_writer_close_element(writer);
+               if (ret) {
+                       ret = LTTNG_ERR_SAVE_IO_FAIL;
+                       goto end;
+               }
+               /* /domain */
+               ret = config_writer_close_element(writer);
+               if (ret) {
+                       ret = LTTNG_ERR_SAVE_IO_FAIL;
+                       goto end;
+               }
+       }
+
+       if (session->ust_session) {
+               ret = save_ust_domain(writer, session, LTTNG_DOMAIN_UST);
+               if (ret != LTTNG_OK) {
+                       goto end;
+               }
+
+               ret = save_ust_domain(writer, session, LTTNG_DOMAIN_JUL);
+               if (ret != LTTNG_OK) {
+                       goto end;
+               }
+
+               ret = save_ust_domain(writer, session, LTTNG_DOMAIN_LOG4J);
+               if (ret != LTTNG_OK) {
+                       goto end;
+               }
+
+               ret = save_ust_domain(writer, session, LTTNG_DOMAIN_PYTHON);
+               if (ret != LTTNG_OK) {
+                       goto end;
+               }
+       }
+
+       /* /domains */
+       ret = config_writer_close_element(writer);
+       if (ret) {
+               ret = LTTNG_ERR_SAVE_IO_FAIL;
+               goto end;
+       }
+
+       ret = LTTNG_OK;
+end:
+       return ret;
+}
+
+/* Return LTTNG_OK on success else a LTTNG_ERR* code. */
+static
+int save_consumer_output(struct config_writer *writer,
+       struct consumer_output *output)
+{
+       int ret;
+
+       LTTNG_ASSERT(writer);
+       LTTNG_ASSERT(output);
+
+       ret = config_writer_open_element(writer, config_element_consumer_output);
+       if (ret) {
+               ret = LTTNG_ERR_SAVE_IO_FAIL;
+               goto end;
+       }
+
+       ret = config_writer_write_element_bool(writer, config_element_enabled,
+                       output->enabled);
+       if (ret) {
+               ret = LTTNG_ERR_SAVE_IO_FAIL;
+               goto end;
+       }
+
+       ret = config_writer_open_element(writer, config_element_destination);
+       if (ret) {
+               ret = LTTNG_ERR_SAVE_IO_FAIL;
+               goto end;
+       }
+
+       switch (output->type) {
+       case CONSUMER_DST_LOCAL:
+               ret = config_writer_write_element_string(writer,
+                       config_element_path, output->dst.session_root_path);
+               if (ret) {
+                       ret = LTTNG_ERR_SAVE_IO_FAIL;
+                       goto end;
+               }
+               break;
+       case CONSUMER_DST_NET:
+       {
+               char *uri;
+
+               uri = (char *) zmalloc(PATH_MAX);
+               if (!uri) {
+                       ret = LTTNG_ERR_NOMEM;
+                       goto end;
+               }
+
+               ret = config_writer_open_element(writer, config_element_net_output);
+               if (ret) {
+                       ret = LTTNG_ERR_SAVE_IO_FAIL;
+                       goto end_net_output;
+               }
+
+               if (output->dst.net.control_isset &&
+                       output->dst.net.data_isset) {
+                       ret = uri_to_str_url(&output->dst.net.control, uri, PATH_MAX);
+                       if (ret < 0) {
+                               ret = LTTNG_ERR_INVALID;
+                               goto end_net_output;
+                       }
+
+                       ret = config_writer_write_element_string(writer,
+                                       config_element_control_uri, uri);
+                       if (ret) {
+                               ret = LTTNG_ERR_SAVE_IO_FAIL;
+                               goto end_net_output;
+                       }
+
+                       ret = uri_to_str_url(&output->dst.net.data, uri, PATH_MAX);
+                       if (ret < 0) {
+                               ret = LTTNG_ERR_INVALID;
+                               goto end_net_output;
+                       }
+
+                       ret = config_writer_write_element_string(writer,
+                                       config_element_data_uri, uri);
+                       if (ret) {
+                               ret = LTTNG_ERR_SAVE_IO_FAIL;
+                               goto end_net_output;
+                       }
+                       ret = LTTNG_OK;
+end_net_output:
+                       free(uri);
+                       if (ret != LTTNG_OK) {
+                               goto end;
+                       }
+               } else {
+                       ret = !output->dst.net.control_isset ?
+                               LTTNG_ERR_URL_CTRL_MISS :
+                               LTTNG_ERR_URL_DATA_MISS;
+                       free(uri);
+                       goto end;
+               }
+
+               ret = config_writer_close_element(writer);
+               if (ret) {
+                       ret = LTTNG_ERR_SAVE_IO_FAIL;
+                       goto end;
+               }
+               break;
+       }
+       default:
+               ERR("Unsupported consumer output type.");
+               ret = LTTNG_ERR_INVALID;
+               goto end;
+       }
+
+       /* /destination */
+       ret = config_writer_close_element(writer);
+       if (ret) {
+               ret = LTTNG_ERR_SAVE_IO_FAIL;
+               goto end;
+       }
+
+       /* /consumer_output */
+       ret = config_writer_close_element(writer);
+       if (ret) {
+               ret = LTTNG_ERR_SAVE_IO_FAIL;
+               goto end;
+       }
+
+       ret = LTTNG_OK;
+end:
+       return ret;
+}
+
+/* Return LTTNG_OK on success else a LTTNG_ERR* code. */
+static
+int save_snapshot_outputs(struct config_writer *writer,
+       struct snapshot *snapshot)
+{
+       int ret;
+       struct lttng_ht_iter iter;
+       struct snapshot_output *output;
+
+       LTTNG_ASSERT(writer);
+       LTTNG_ASSERT(snapshot);
+
+       ret = config_writer_open_element(writer, config_element_snapshot_outputs);
+       if (ret) {
+               ret = LTTNG_ERR_SAVE_IO_FAIL;
+               goto end;
+       }
+
+       rcu_read_lock();
+       cds_lfht_for_each_entry(snapshot->output_ht->ht, &iter.iter, output,
+                       node.node) {
+               ret = config_writer_open_element(writer,
+                       config_element_output);
+               if (ret) {
+                       ret = LTTNG_ERR_SAVE_IO_FAIL;
+                       goto end_unlock;
+               }
+
+               ret = config_writer_write_element_string(writer,
+                       config_element_name, output->name);
+               if (ret) {
+                       ret = LTTNG_ERR_SAVE_IO_FAIL;
+                       goto end_unlock;
+               }
+
+               ret = config_writer_write_element_unsigned_int(writer,
+                       config_element_max_size, output->max_size);
+               if (ret) {
+                       ret = LTTNG_ERR_SAVE_IO_FAIL;
+                       goto end_unlock;
+               }
+
+               ret = save_consumer_output(writer, output->consumer);
+               if (ret != LTTNG_OK) {
+                       goto end_unlock;
+               }
+
+               /* /output */
+               ret = config_writer_close_element(writer);
+               if (ret) {
+                       ret = LTTNG_ERR_SAVE_IO_FAIL;
+                       goto end_unlock;
+               }
+       }
+       rcu_read_unlock();
+
+       /* /snapshot_outputs */
+       ret = config_writer_close_element(writer);
+       if (ret) {
+               ret = LTTNG_ERR_SAVE_IO_FAIL;
+               goto end;
+       }
+
+       ret = LTTNG_OK;
+end:
+       return ret;
+end_unlock:
+       rcu_read_unlock();
+       return ret;
+}
+
+/* Return LTTNG_OK on success else a LTTNG_ERR* code. */
+static
+int save_session_output(struct config_writer *writer,
+       struct ltt_session *session)
+{
+       int ret;
+
+       LTTNG_ASSERT(writer);
+       LTTNG_ASSERT(session);
+
+       if ((session->snapshot_mode && session->snapshot.nb_output == 0) ||
+               (!session->snapshot_mode && !session->consumer)) {
+               /* Session is in no output mode */
+               ret = LTTNG_OK;
+               goto end;
+       }
+
+       ret = config_writer_open_element(writer, config_element_output);
+       if (ret) {
+               ret = LTTNG_ERR_SAVE_IO_FAIL;
+               goto end;
+       }
+
+       if (session->snapshot_mode) {
+               ret = save_snapshot_outputs(writer, &session->snapshot);
+               if (ret != LTTNG_OK) {
+                       goto end;
+               }
+       } else {
+               if (session->consumer) {
+                       ret = save_consumer_output(writer, session->consumer);
+                       if (ret != LTTNG_OK) {
+                               goto end;
+                       }
+               }
+       }
+
+       /* /output */
+       ret = config_writer_close_element(writer);
+       if (ret) {
+               ret = LTTNG_ERR_SAVE_IO_FAIL;
+               goto end;
+       }
+       ret = LTTNG_OK;
+end:
+       return ret;
+}
+
+static
+int save_session_rotation_schedule(struct config_writer *writer,
+               enum lttng_rotation_schedule_type type, uint64_t value)
+{
+       int ret = 0;
+       const char *element_name;
+       const char *value_name;
+
+       switch (type) {
+       case LTTNG_ROTATION_SCHEDULE_TYPE_PERIODIC:
+               element_name = config_element_rotation_schedule_periodic;
+               value_name = config_element_rotation_schedule_periodic_time_us;
+               break;
+       case LTTNG_ROTATION_SCHEDULE_TYPE_SIZE_THRESHOLD:
+               element_name = config_element_rotation_schedule_size_threshold;
+               value_name = config_element_rotation_schedule_size_threshold_bytes;
+               break;
+       default:
+               ret = -1;
+               goto end;
+       }
+
+       ret = config_writer_open_element(writer, element_name);
+       if (ret) {
+               goto end;
+       }
+
+       ret = config_writer_write_element_unsigned_int(writer,
+                       value_name, value);
+       if (ret) {
+               goto end;
+       }
+
+       /* Close schedule descriptor element. */
+       ret = config_writer_close_element(writer);
+       if (ret) {
+               goto end;
+       }
+end:
+       return ret;
+}
+
+static
+int save_session_rotation_schedules(struct config_writer *writer,
+       struct ltt_session *session)
+{
+       int ret;
+
+       ret = config_writer_open_element(writer,
+                       config_element_rotation_schedules);
+       if (ret) {
+               goto end;
+       }
+       if (session->rotate_timer_period) {
+               ret = save_session_rotation_schedule(writer,
+                               LTTNG_ROTATION_SCHEDULE_TYPE_PERIODIC,
+                               session->rotate_timer_period);
+               if (ret) {
+                       goto close_schedules;
+               }
+       }
+       if (session->rotate_size) {
+               ret = save_session_rotation_schedule(writer,
+                               LTTNG_ROTATION_SCHEDULE_TYPE_SIZE_THRESHOLD,
+                               session->rotate_size);
+               if (ret) {
+                       goto close_schedules;
+               }
+       }
+
+close_schedules:
+       /* Close rotation schedules element. */
+       ret = config_writer_close_element(writer);
+       if (ret) {
+               goto end;
+       }
+end:
+       return ret;
+}
+
+/*
+ * Save the given session.
+ *
+ * Return LTTNG_OK on success else a LTTNG_ERR* code.
+ */
+static
+int save_session(struct ltt_session *session,
+       struct lttng_save_session_attr *attr, lttng_sock_cred *creds)
+{
+       int ret, fd = -1;
+       char config_file_path[LTTNG_PATH_MAX];
+       size_t len;
+       struct config_writer *writer = NULL;
+       size_t session_name_len;
+       const char *provided_path;
+       int file_open_flags = O_CREAT | O_WRONLY | O_TRUNC;
+
+       LTTNG_ASSERT(session);
+       LTTNG_ASSERT(attr);
+       LTTNG_ASSERT(creds);
+
+       session_name_len = strlen(session->name);
+       memset(config_file_path, 0, sizeof(config_file_path));
+
+       if (!session_access_ok(session,
+               LTTNG_SOCK_GET_UID_CRED(creds)) || session->destroyed) {
+               ret = LTTNG_ERR_EPERM;
+               goto end;
+       }
+
+       provided_path = lttng_save_session_attr_get_output_url(attr);
+       if (provided_path) {
+               DBG3("Save session in provided path %s", provided_path);
+               len = strlen(provided_path);
+               if (len >= sizeof(config_file_path)) {
+                       ret = LTTNG_ERR_SET_URL;
+                       goto end;
+               }
+               strncpy(config_file_path, provided_path, sizeof(config_file_path));
+       } else {
+               ssize_t ret_len;
+               char *home_dir = utils_get_user_home_dir(
+                       LTTNG_SOCK_GET_UID_CRED(creds));
+               if (!home_dir) {
+                       ret = LTTNG_ERR_SET_URL;
+                       goto end;
+               }
+
+               ret_len = snprintf(config_file_path, sizeof(config_file_path),
+                               DEFAULT_SESSION_HOME_CONFIGPATH, home_dir);
+               free(home_dir);
+               if (ret_len < 0) {
+                       PERROR("snprintf save session");
+                       ret = LTTNG_ERR_SET_URL;
+                       goto end;
+               }
+               len = ret_len;
+       }
+
+       /*
+        * Check the path fits in the config file path dst including the '/'
+        * followed by trailing .lttng extension and the NULL terminated string.
+        */
+       if ((len + session_name_len + 2 +
+                       sizeof(DEFAULT_SESSION_CONFIG_FILE_EXTENSION))
+                       > sizeof(config_file_path)) {
+               ret = LTTNG_ERR_SET_URL;
+               goto end;
+       }
+
+       ret = run_as_mkdir_recursive(config_file_path, S_IRWXU | S_IRWXG,
+                       LTTNG_SOCK_GET_UID_CRED(creds), LTTNG_SOCK_GET_GID_CRED(creds));
+       if (ret) {
+               ret = LTTNG_ERR_SET_URL;
+               goto end;
+       }
+
+       /*
+        * At this point, we know that everything fits in the buffer. Validation
+        * was done just above.
+        */
+       config_file_path[len++] = '/';
+       strncpy(config_file_path + len, session->name, sizeof(config_file_path) - len);
+       len += session_name_len;
+       strcpy(config_file_path + len, DEFAULT_SESSION_CONFIG_FILE_EXTENSION);
+       len += sizeof(DEFAULT_SESSION_CONFIG_FILE_EXTENSION);
+       config_file_path[len] = '\0';
+
+       if (!attr->overwrite) {
+               file_open_flags |= O_EXCL;
+       }
+
+       fd = run_as_open(config_file_path, file_open_flags,
+               S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP,
+               LTTNG_SOCK_GET_UID_CRED(creds), LTTNG_SOCK_GET_GID_CRED(creds));
+       if (fd < 0) {
+               PERROR("Could not create configuration file");
+               switch (errno) {
+               case EEXIST:
+                       ret = LTTNG_ERR_SAVE_FILE_EXIST;
+                       break;
+               case EACCES:
+                       ret = LTTNG_ERR_EPERM;
+                       break;
+               default:
+                       ret = LTTNG_ERR_SAVE_IO_FAIL;
+                       break;
+               }
+               goto end;
+       }
+
+       writer = config_writer_create(fd, 1);
+       if (!writer) {
+               ret = LTTNG_ERR_NOMEM;
+               goto end;
+       }
+
+       ret = config_writer_open_element(writer, config_element_sessions);
+       if (ret) {
+               ret = LTTNG_ERR_SAVE_IO_FAIL;
+               goto end;
+       }
+
+       ret = config_writer_open_element(writer, config_element_session);
+       if (ret) {
+               ret = LTTNG_ERR_SAVE_IO_FAIL;
+               goto end;
+       }
+
+       ret = config_writer_write_element_string(writer, config_element_name,
+                       session->name);
+       if (ret) {
+               ret = LTTNG_ERR_SAVE_IO_FAIL;
+               goto end;
+       }
+
+       if (session->shm_path[0] != '\0') {
+               ret = config_writer_write_element_string(writer,
+                               config_element_shared_memory_path,
+                               session->shm_path);
+               if (ret) {
+                       ret = LTTNG_ERR_SAVE_IO_FAIL;
+                       goto end;
+               }
+       }
+
+       ret = save_domains(writer, session);
+       if (ret != LTTNG_OK) {
+               goto end;
+       }
+
+       ret = config_writer_write_element_bool(writer, config_element_started,
+                       session->active);
+       if (ret) {
+               ret = LTTNG_ERR_SAVE_IO_FAIL;
+               goto end;
+       }
+
+       if (session->snapshot_mode || session->live_timer ||
+                       session->rotate_timer_period || session->rotate_size) {
+               ret = config_writer_open_element(writer, config_element_attributes);
+               if (ret) {
+                       ret = LTTNG_ERR_SAVE_IO_FAIL;
+                       goto end;
+               }
+
+               if (session->snapshot_mode) {
+                       ret = config_writer_write_element_bool(writer,
+                                       config_element_snapshot_mode, 1);
+                       if (ret) {
+                               ret = LTTNG_ERR_SAVE_IO_FAIL;
+                               goto end;
+                       }
+               } else if (session->live_timer) {
+                       ret = config_writer_write_element_unsigned_int(writer,
+                                       config_element_live_timer_interval, session->live_timer);
+                       if (ret) {
+                               ret = LTTNG_ERR_SAVE_IO_FAIL;
+                               goto end;
+                       }
+               }
+               if (session->rotate_timer_period || session->rotate_size) {
+                       ret = save_session_rotation_schedules(writer,
+                                       session);
+                       if (ret) {
+                               ret = LTTNG_ERR_SAVE_IO_FAIL;
+                               goto end;
+                       }
+               }
+
+               /* /attributes */
+               ret = config_writer_close_element(writer);
+               if (ret) {
+                       ret = LTTNG_ERR_SAVE_IO_FAIL;
+                       goto end;
+               }
+       }
+
+       ret = save_session_output(writer, session);
+       if (ret != LTTNG_OK) {
+               goto end;
+       }
+
+       /* /session */
+       ret = config_writer_close_element(writer);
+       if (ret) {
+               ret = LTTNG_ERR_SAVE_IO_FAIL;
+               goto end;
+       }
+
+       /* /sessions */
+       ret = config_writer_close_element(writer);
+       if (ret) {
+               ret = LTTNG_ERR_SAVE_IO_FAIL;
+               goto end;
+       }
+
+       ret = LTTNG_OK;
+end:
+       if (writer && config_writer_destroy(writer)) {
+               /* Preserve the original error code */
+               ret = ret != LTTNG_OK ? ret : LTTNG_ERR_SAVE_IO_FAIL;
+       }
+       if (ret != LTTNG_OK) {
+               /* Delete file in case of error */
+               if ((fd >= 0) && unlink(config_file_path)) {
+                       PERROR("Unlinking XML session configuration.");
+               }
+       }
+
+       if (fd >= 0) {
+               int closeret;
+
+               closeret = close(fd);
+               if (closeret) {
+                       PERROR("Closing XML session configuration");
+               }
+       }
+
+       return ret;
+}
+
+int cmd_save_sessions(struct lttng_save_session_attr *attr,
+       lttng_sock_cred *creds)
+{
+       int ret;
+       const char *session_name;
+       struct ltt_session *session;
+
+       session_lock_list();
+
+       session_name = lttng_save_session_attr_get_session_name(attr);
+       if (session_name) {
+               session = session_find_by_name(session_name);
+               if (!session) {
+                       ret = LTTNG_ERR_SESS_NOT_FOUND;
+                       goto end;
+               }
+
+               session_lock(session);
+               ret = save_session(session, attr, creds);
+               session_unlock(session);
+               session_put(session);
+               if (ret != LTTNG_OK) {
+                       goto end;
+               }
+       } else {
+               struct ltt_session_list *list = session_get_list();
+
+               cds_list_for_each_entry(session, &list->head, list) {
+                       if (!session_get(session)) {
+                               continue;
+                       }
+                       session_lock(session);
+                       ret = save_session(session, attr, creds);
+                       session_unlock(session);
+                       session_put(session);
+                       /* Don't abort if we don't have the required permissions. */
+                       if (ret != LTTNG_OK && ret != LTTNG_ERR_EPERM) {
+                               goto end;
+                       }
+               }
+       }
+       ret = LTTNG_OK;
+
+end:
+       session_unlock_list();
+       return ret;
+}
diff --git a/src/bin/lttng-sessiond/session.c b/src/bin/lttng-sessiond/session.c
deleted file mode 100644 (file)
index dc6dea4..0000000
+++ /dev/null
@@ -1,1427 +0,0 @@
-/*
- * Copyright (C) 2011 David Goulet <david.goulet@polymtl.ca>
- *
- * SPDX-License-Identifier: GPL-2.0-only
- *
- */
-
-#define _LGPL_SOURCE
-#include <limits.h>
-#include <inttypes.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <sys/stat.h>
-#include <urcu.h>
-#include <dirent.h>
-#include <sys/types.h>
-#include <pthread.h>
-
-#include <common/common.h>
-#include <common/utils.h>
-#include <common/trace-chunk.h>
-#include <common/sessiond-comm/sessiond-comm.h>
-#include <lttng/location-internal.h>
-#include "lttng-sessiond.h"
-#include "kernel.h"
-
-#include "session.h"
-#include "utils.h"
-#include "trace-ust.h"
-#include "timer.h"
-#include "cmd.h"
-
-struct ltt_session_destroy_notifier_element {
-       ltt_session_destroy_notifier notifier;
-       void *user_data;
-};
-
-struct ltt_session_clear_notifier_element {
-       ltt_session_clear_notifier notifier;
-       void *user_data;
-};
-
-/*
- * NOTES:
- *
- * No ltt_session.lock is taken here because those data structure are widely
- * spread across the lttng-tools code base so before calling functions below
- * that can read/write a session, the caller MUST acquire the session lock
- * using session_lock() and session_unlock().
- */
-
-/*
- * Init tracing session list.
- *
- * Please see session.h for more explanation and correct usage of the list.
- */
-static struct ltt_session_list ltt_session_list = {
-       .head = CDS_LIST_HEAD_INIT(ltt_session_list.head),
-       .lock = PTHREAD_MUTEX_INITIALIZER,
-       .removal_cond = PTHREAD_COND_INITIALIZER,
-       .next_uuid = 0,
-};
-
-/* These characters are forbidden in a session name. Used by validate_name. */
-static const char *forbidden_name_chars = "/";
-
-/* Global hash table to keep the sessions, indexed by id. */
-static struct lttng_ht *ltt_sessions_ht_by_id = NULL;
-/* Global hash table to keep the sessions, indexed by name. */
-static struct lttng_ht *ltt_sessions_ht_by_name = NULL;
-
-/*
- * Validate the session name for forbidden characters.
- *
- * Return 0 on success else -1 meaning a forbidden char. has been found.
- */
-static int validate_name(const char *name)
-{
-       int ret;
-       char *tok, *tmp_name;
-
-       LTTNG_ASSERT(name);
-
-       tmp_name = strdup(name);
-       if (!tmp_name) {
-               /* ENOMEM here. */
-               ret = -1;
-               goto error;
-       }
-
-       tok = strpbrk(tmp_name, forbidden_name_chars);
-       if (tok) {
-               DBG("Session name %s contains a forbidden character", name);
-               /* Forbidden character has been found. */
-               ret = -1;
-               goto error;
-       }
-       ret = 0;
-
-error:
-       free(tmp_name);
-       return ret;
-}
-
-/*
- * Add a ltt_session structure to the global list.
- *
- * The caller MUST acquire the session list lock before.
- * Returns the unique identifier for the session.
- */
-static uint64_t add_session_list(struct ltt_session *ls)
-{
-       LTTNG_ASSERT(ls);
-
-       cds_list_add(&ls->list, &ltt_session_list.head);
-       return ltt_session_list.next_uuid++;
-}
-
-/*
- * Delete a ltt_session structure to the global list.
- *
- * The caller MUST acquire the session list lock before.
- */
-static void del_session_list(struct ltt_session *ls)
-{
-       LTTNG_ASSERT(ls);
-
-       cds_list_del(&ls->list);
-}
-
-/*
- * Return a pointer to the session list.
- */
-struct ltt_session_list *session_get_list(void)
-{
-       return &ltt_session_list;
-}
-
-/*
- * Returns once the session list is empty.
- */
-void session_list_wait_empty(void)
-{
-       pthread_mutex_lock(&ltt_session_list.lock);
-       while (!cds_list_empty(&ltt_session_list.head)) {
-               pthread_cond_wait(&ltt_session_list.removal_cond,
-                               &ltt_session_list.lock);
-       }
-       pthread_mutex_unlock(&ltt_session_list.lock);
-}
-
-/*
- * Acquire session list lock
- */
-void session_lock_list(void)
-{
-       pthread_mutex_lock(&ltt_session_list.lock);
-}
-
-/*
- * Try to acquire session list lock
- */
-int session_trylock_list(void)
-{
-       return pthread_mutex_trylock(&ltt_session_list.lock);
-}
-
-/*
- * Release session list lock
- */
-void session_unlock_list(void)
-{
-       pthread_mutex_unlock(&ltt_session_list.lock);
-}
-
-/*
- * Get the session's consumer destination type.
- *
- * The caller must hold the session lock.
- */
-enum consumer_dst_type session_get_consumer_destination_type(
-               const struct ltt_session *session)
-{
-       /*
-        * The output information is duplicated in both of those session types.
-        * Hence, it doesn't matter from which it is retrieved. However, it is
-        * possible for only one of them to be set.
-        */
-       return session->kernel_session ?
-                       session->kernel_session->consumer->type :
-                       session->ust_session->consumer->type;
-}
-
-/*
- * Get the session's consumer network hostname.
- * The caller must ensure that the destination is of type "net".
- *
- * The caller must hold the session lock.
- */
-const char *session_get_net_consumer_hostname(const struct ltt_session *session)
-{
-       const char *hostname = NULL;
-       const struct consumer_output *output;
-
-       output = session->kernel_session ?
-                       session->kernel_session->consumer :
-                       session->ust_session->consumer;
-
-       /*
-        * hostname is assumed to be the same for both control and data
-        * connections.
-        */
-       switch (output->dst.net.control.dtype) {
-       case LTTNG_DST_IPV4:
-               hostname = output->dst.net.control.dst.ipv4;
-               break;
-       case LTTNG_DST_IPV6:
-               hostname = output->dst.net.control.dst.ipv6;
-               break;
-       default:
-               abort();
-       }
-       return hostname;
-}
-
-/*
- * Get the session's consumer network control and data ports.
- * The caller must ensure that the destination is of type "net".
- *
- * The caller must hold the session lock.
- */
-void session_get_net_consumer_ports(const struct ltt_session *session,
-               uint16_t *control_port, uint16_t *data_port)
-{
-       const struct consumer_output *output;
-
-       output = session->kernel_session ?
-                       session->kernel_session->consumer :
-                       session->ust_session->consumer;
-       *control_port = output->dst.net.control.port;
-       *data_port = output->dst.net.data.port;
-}
-
-/*
- * Get the location of the latest trace archive produced by a rotation.
- *
- * The caller must hold the session lock.
- */
-struct lttng_trace_archive_location *session_get_trace_archive_location(
-               const struct ltt_session *session)
-{
-       int ret;
-       struct lttng_trace_archive_location *location = NULL;
-       char *chunk_path = NULL;
-
-       if (session->rotation_state != LTTNG_ROTATION_STATE_COMPLETED ||
-                       !session->last_archived_chunk_name) {
-               goto end;
-       }
-
-       switch (session_get_consumer_destination_type(session)) {
-       case CONSUMER_DST_LOCAL:
-               ret = asprintf(&chunk_path,
-                               "%s/" DEFAULT_ARCHIVED_TRACE_CHUNKS_DIRECTORY "/%s",
-                               session_get_base_path(session),
-                               session->last_archived_chunk_name);
-               if (ret == -1) {
-                       goto end;
-               }
-               location = lttng_trace_archive_location_local_create(
-                               chunk_path);
-               break;
-       case CONSUMER_DST_NET:
-       {
-               const char *hostname;
-               uint16_t control_port, data_port;
-
-               hostname = session_get_net_consumer_hostname(session);
-               session_get_net_consumer_ports(session,
-                               &control_port,
-                               &data_port);
-               location = lttng_trace_archive_location_relay_create(
-                               hostname,
-                               LTTNG_TRACE_ARCHIVE_LOCATION_RELAY_PROTOCOL_TYPE_TCP,
-                               control_port, data_port, session->last_chunk_path);
-               break;
-       }
-       default:
-               abort();
-       }
-end:
-       free(chunk_path);
-       return location;
-}
-
-/*
- * Allocate the ltt_sessions_ht_by_id and ltt_sessions_ht_by_name HT.
- *
- * The session list lock must be held.
- */
-static int ltt_sessions_ht_alloc(void)
-{
-       int ret = 0;
-
-       DBG("Allocating ltt_sessions_ht_by_id");
-       ltt_sessions_ht_by_id = lttng_ht_new(0, LTTNG_HT_TYPE_U64);
-       if (!ltt_sessions_ht_by_id) {
-               ret = -1;
-               ERR("Failed to allocate ltt_sessions_ht_by_id");
-               goto end;
-       }
-
-       DBG("Allocating ltt_sessions_ht_by_name");
-       ltt_sessions_ht_by_name = lttng_ht_new(0, LTTNG_HT_TYPE_STRING);
-       if (!ltt_sessions_ht_by_name) {
-               ret = -1;
-               ERR("Failed to allocate ltt_sessions_ht_by_name");
-               goto end;
-       }
-
-end:
-       return ret;
-}
-
-/*
- * Destroy the ltt_sessions_ht_by_id HT.
- *
- * The session list lock must be held.
- */
-static void ltt_sessions_ht_destroy(void)
-{
-       if (ltt_sessions_ht_by_id) {
-               ht_cleanup_push(ltt_sessions_ht_by_id);
-               ltt_sessions_ht_by_id = NULL;
-       }
-
-       if (ltt_sessions_ht_by_name) {
-               ht_cleanup_push(ltt_sessions_ht_by_name);
-               ltt_sessions_ht_by_name = NULL;
-       }
-
-       return;
-}
-
-/*
- * Add a ltt_session to the ltt_sessions_ht_by_id and ltt_sessions_ht_by_name.
- * If unallocated, the ltt_sessions_ht_by_id and ltt_sessions_ht_by_name. HTs
- * are allocated. The session list lock must be held.
- */
-static void add_session_ht(struct ltt_session *ls)
-{
-       int ret;
-
-       LTTNG_ASSERT(ls);
-
-       if (!ltt_sessions_ht_by_id) {
-               ret = ltt_sessions_ht_alloc();
-               if (ret) {
-                       ERR("Error allocating the sessions HT");
-                       goto end;
-               }
-       }
-
-       /* Should always be present with ltt_sessions_ht_by_id. */
-       LTTNG_ASSERT(ltt_sessions_ht_by_name);
-
-       lttng_ht_node_init_u64(&ls->node, ls->id);
-       lttng_ht_add_unique_u64(ltt_sessions_ht_by_id, &ls->node);
-
-       lttng_ht_node_init_str(&ls->node_by_name, ls->name);
-       lttng_ht_add_unique_str(ltt_sessions_ht_by_name, &ls->node_by_name);
-
-end:
-       return;
-}
-
-/*
- * Test if ltt_sessions_ht_by_id/name are empty.
- * Return 1 if empty, 0 if not empty.
- * The session list lock must be held.
- */
-static int ltt_sessions_ht_empty(void)
-{
-       unsigned long count;
-
-       if (!ltt_sessions_ht_by_id) {
-               count = 0;
-               goto end;
-       }
-
-       LTTNG_ASSERT(ltt_sessions_ht_by_name);
-
-       count = lttng_ht_get_count(ltt_sessions_ht_by_id);
-       LTTNG_ASSERT(count == lttng_ht_get_count(ltt_sessions_ht_by_name));
-end:
-       return count ? 0 : 1;
-}
-
-/*
- * Remove a ltt_session from the ltt_sessions_ht_by_id/name.
- * If empty, the ltt_sessions_ht_by_id/name HTs are freed.
- * The session list lock must be held.
- */
-static void del_session_ht(struct ltt_session *ls)
-{
-       struct lttng_ht_iter iter;
-       int ret;
-
-       LTTNG_ASSERT(ls);
-       LTTNG_ASSERT(ltt_sessions_ht_by_id);
-       LTTNG_ASSERT(ltt_sessions_ht_by_name);
-
-       iter.iter.node = &ls->node.node;
-       ret = lttng_ht_del(ltt_sessions_ht_by_id, &iter);
-       LTTNG_ASSERT(!ret);
-
-       iter.iter.node = &ls->node_by_name.node;
-       ret = lttng_ht_del(ltt_sessions_ht_by_name, &iter);
-       LTTNG_ASSERT(!ret);
-
-       if (ltt_sessions_ht_empty()) {
-               DBG("Empty ltt_sessions_ht_by_id/name, destroying hast tables");
-               ltt_sessions_ht_destroy();
-       }
-}
-
-/*
- * Acquire session lock
- */
-void session_lock(struct ltt_session *session)
-{
-       LTTNG_ASSERT(session);
-
-       pthread_mutex_lock(&session->lock);
-}
-
-/*
- * Release session lock
- */
-void session_unlock(struct ltt_session *session)
-{
-       LTTNG_ASSERT(session);
-
-       pthread_mutex_unlock(&session->lock);
-}
-
-static
-int _session_set_trace_chunk_no_lock_check(struct ltt_session *session,
-               struct lttng_trace_chunk *new_trace_chunk,
-               struct lttng_trace_chunk **_current_trace_chunk)
-{
-       int ret = 0;
-       unsigned int i, refs_to_acquire = 0, refs_acquired = 0, refs_to_release = 0;
-       struct cds_lfht_iter iter;
-       struct consumer_socket *socket;
-       struct lttng_trace_chunk *current_trace_chunk;
-       uint64_t chunk_id;
-       enum lttng_trace_chunk_status chunk_status;
-
-       rcu_read_lock();
-       /*
-        * Ownership of current trace chunk is transferred to
-        * `current_trace_chunk`.
-        */
-       current_trace_chunk = session->current_trace_chunk;
-       session->current_trace_chunk = NULL;
-       if (session->ust_session) {
-               lttng_trace_chunk_put(
-                               session->ust_session->current_trace_chunk);
-               session->ust_session->current_trace_chunk = NULL;
-       }
-       if (session->kernel_session) {
-               lttng_trace_chunk_put(
-                               session->kernel_session->current_trace_chunk);
-               session->kernel_session->current_trace_chunk = NULL;
-       }
-       if (!new_trace_chunk) {
-               ret = 0;
-               goto end;
-       }
-       chunk_status = lttng_trace_chunk_get_id(new_trace_chunk, &chunk_id);
-       LTTNG_ASSERT(chunk_status == LTTNG_TRACE_CHUNK_STATUS_OK);
-
-       refs_to_acquire = 1;
-       refs_to_acquire += !!session->ust_session;
-       refs_to_acquire += !!session->kernel_session;
-
-       for (refs_acquired = 0; refs_acquired < refs_to_acquire;
-                       refs_acquired++) {
-               if (!lttng_trace_chunk_get(new_trace_chunk)) {
-                       ERR("Failed to acquire reference to new trace chunk of session \"%s\"",
-                                       session->name);
-                       goto error;
-               }
-       }
-
-       if (session->ust_session) {
-               const uint64_t relayd_id =
-                               session->ust_session->consumer->net_seq_index;
-               const bool is_local_trace =
-                               session->ust_session->consumer->type ==
-                               CONSUMER_DST_LOCAL;
-
-               session->ust_session->current_trace_chunk = new_trace_chunk;
-               if (is_local_trace) {
-                       enum lttng_error_code ret_error_code;
-
-                       ret_error_code = ust_app_create_channel_subdirectories(
-                                       session->ust_session);
-                       if (ret_error_code != LTTNG_OK) {
-                               goto error;
-                       }
-               }
-               cds_lfht_for_each_entry(
-                               session->ust_session->consumer->socks->ht,
-                               &iter, socket, node.node) {
-                       pthread_mutex_lock(socket->lock);
-                       ret = consumer_create_trace_chunk(socket,
-                                       relayd_id,
-                                       session->id, new_trace_chunk,
-                                       DEFAULT_UST_TRACE_DIR);
-                       pthread_mutex_unlock(socket->lock);
-                       if (ret) {
-                               goto error;
-                       }
-               }
-       }
-       if (session->kernel_session) {
-               const uint64_t relayd_id =
-                               session->kernel_session->consumer->net_seq_index;
-               const bool is_local_trace =
-                               session->kernel_session->consumer->type ==
-                               CONSUMER_DST_LOCAL;
-
-               session->kernel_session->current_trace_chunk = new_trace_chunk;
-               if (is_local_trace) {
-                       enum lttng_error_code ret_error_code;
-
-                       ret_error_code = kernel_create_channel_subdirectories(
-                                       session->kernel_session);
-                       if (ret_error_code != LTTNG_OK) {
-                               goto error;
-                       }
-               }
-               cds_lfht_for_each_entry(
-                               session->kernel_session->consumer->socks->ht,
-                               &iter, socket, node.node) {
-                       pthread_mutex_lock(socket->lock);
-                       ret = consumer_create_trace_chunk(socket,
-                                       relayd_id,
-                                       session->id, new_trace_chunk,
-                                       DEFAULT_KERNEL_TRACE_DIR);
-                       pthread_mutex_unlock(socket->lock);
-                       if (ret) {
-                               goto error;
-                       }
-               }
-       }
-
-       /*
-        * Update local current trace chunk state last, only if all remote
-        * creations succeeded.
-        */
-       session->current_trace_chunk = new_trace_chunk;
-       LTTNG_OPTIONAL_SET(&session->most_recent_chunk_id, chunk_id);
-end:
-       if (_current_trace_chunk) {
-               *_current_trace_chunk = current_trace_chunk;
-               current_trace_chunk = NULL;
-       }
-end_no_move:
-       rcu_read_unlock();
-       lttng_trace_chunk_put(current_trace_chunk);
-       return ret;
-error:
-       if (session->ust_session) {
-               session->ust_session->current_trace_chunk = NULL;
-       }
-       if (session->kernel_session) {
-               session->kernel_session->current_trace_chunk = NULL;
-       }
-       /*
-        * Release references taken in the case where all references could not
-        * be acquired.
-        */
-       refs_to_release = refs_to_acquire - refs_acquired;
-       for (i = 0; i < refs_to_release; i++) {
-               lttng_trace_chunk_put(new_trace_chunk);
-       }
-       ret = -1;
-       goto end_no_move;
-}
-
-struct lttng_trace_chunk *session_create_new_trace_chunk(
-               const struct ltt_session *session,
-               const struct consumer_output *consumer_output_override,
-               const char *session_base_path_override,
-               const char *chunk_name_override)
-{
-       int ret;
-       struct lttng_trace_chunk *trace_chunk = NULL;
-       enum lttng_trace_chunk_status chunk_status;
-       const time_t chunk_creation_ts = time(NULL);
-       bool is_local_trace;
-       const char *base_path;
-       struct lttng_directory_handle *session_output_directory = NULL;
-       const struct lttng_credentials session_credentials = {
-               .uid = LTTNG_OPTIONAL_INIT_VALUE(session->uid),
-               .gid = LTTNG_OPTIONAL_INIT_VALUE(session->gid),
-       };
-       uint64_t next_chunk_id;
-       const struct consumer_output *output;
-       const char *new_path;
-
-       if (consumer_output_override) {
-               output = consumer_output_override;
-       } else {
-               LTTNG_ASSERT(session->ust_session || session->kernel_session);
-               output = session->ust_session ?
-                                        session->ust_session->consumer :
-                                        session->kernel_session->consumer;
-       }
-
-       is_local_trace = output->type == CONSUMER_DST_LOCAL;
-       base_path = session_base_path_override ? :
-                       consumer_output_get_base_path(output);
-
-       if (chunk_creation_ts == (time_t) -1) {
-               PERROR("Failed to sample time while creation session \"%s\" trace chunk",
-                               session->name);
-               goto error;
-       }
-
-       next_chunk_id = session->most_recent_chunk_id.is_set ?
-                       session->most_recent_chunk_id.value + 1 : 0;
-
-       if (session->current_trace_chunk &&
-                       !lttng_trace_chunk_get_name_overridden(session->current_trace_chunk)) {
-               chunk_status = lttng_trace_chunk_rename_path(session->current_trace_chunk,
-                                       DEFAULT_CHUNK_TMP_OLD_DIRECTORY);
-               if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) {
-                       goto error;
-               }
-       }
-       if (!session->current_trace_chunk) {
-               if (!session->rotated) {
-                       new_path = "";
-               } else {
-                       new_path = NULL;
-               }
-       } else {
-               new_path = DEFAULT_CHUNK_TMP_NEW_DIRECTORY;
-       }
-
-       trace_chunk = lttng_trace_chunk_create(next_chunk_id,
-                       chunk_creation_ts, new_path);
-       if (!trace_chunk) {
-               goto error;
-       }
-
-       if (chunk_name_override) {
-               chunk_status = lttng_trace_chunk_override_name(trace_chunk,
-                               chunk_name_override);
-               if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) {
-                       goto error;
-               }
-       }
-
-       if (!is_local_trace) {
-               /*
-                * No need to set crendentials and output directory
-                * for remote trace chunks.
-                */
-               goto end;
-       }
-
-       chunk_status = lttng_trace_chunk_set_credentials(trace_chunk,
-                       &session_credentials);
-       if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) {
-               goto error;
-       }
-
-       DBG("Creating base output directory of session \"%s\" at %s",
-                       session->name, base_path);
-       ret = utils_mkdir_recursive(base_path, S_IRWXU | S_IRWXG,
-                       session->uid, session->gid);
-       if (ret) {
-               goto error;
-       }
-       session_output_directory = lttng_directory_handle_create(base_path);
-       if (!session_output_directory) {
-               goto error;
-       }
-       chunk_status = lttng_trace_chunk_set_as_owner(trace_chunk,
-                       session_output_directory);
-       lttng_directory_handle_put(session_output_directory);
-       session_output_directory = NULL;
-       if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) {
-               goto error;
-       }
-end:
-       return trace_chunk;
-error:
-       lttng_directory_handle_put(session_output_directory);
-       lttng_trace_chunk_put(trace_chunk);
-       trace_chunk = NULL;
-       goto end;
-}
-
-int session_close_trace_chunk(struct ltt_session *session,
-               struct lttng_trace_chunk *trace_chunk,
-               enum lttng_trace_chunk_command_type close_command,
-               char *closed_trace_chunk_path)
-{
-       int ret = 0;
-       bool error_occurred = false;
-       struct cds_lfht_iter iter;
-       struct consumer_socket *socket;
-       enum lttng_trace_chunk_status chunk_status;
-       const time_t chunk_close_timestamp = time(NULL);
-       const char *new_path;
-
-       chunk_status = lttng_trace_chunk_set_close_command(
-                       trace_chunk, close_command);
-       if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) {
-               ret = -1;
-               goto end;
-       }
-
-       if (chunk_close_timestamp == (time_t) -1) {
-               ERR("Failed to sample the close timestamp of the current trace chunk of session \"%s\"",
-                               session->name);
-               ret = -1;
-               goto end;
-       }
-
-       if (close_command == LTTNG_TRACE_CHUNK_COMMAND_TYPE_DELETE && !session->rotated) {
-               /* New chunk stays in session output directory. */
-               new_path = "";
-       } else {
-               /* Use chunk name for new chunk. */
-               new_path = NULL;
-       }
-       if (session->current_trace_chunk &&
-                       !lttng_trace_chunk_get_name_overridden(session->current_trace_chunk)) {
-               /* Rename new chunk path. */
-               chunk_status = lttng_trace_chunk_rename_path(session->current_trace_chunk,
-                                       new_path);
-               if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) {
-                       ret = -1;
-                       goto end;
-               }
-       }
-       if (!lttng_trace_chunk_get_name_overridden(trace_chunk) &&
-                       close_command == LTTNG_TRACE_CHUNK_COMMAND_TYPE_NO_OPERATION) {
-               const char *old_path;
-
-               if (!session->rotated) {
-                       old_path = "";
-               } else {
-                       old_path = NULL;
-               }
-               /* We need to move back the .tmp_old_chunk to its rightful place. */
-               chunk_status = lttng_trace_chunk_rename_path(trace_chunk,
-                                       old_path);
-               if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) {
-                       ret = -1;
-                       goto end;
-               }
-       }
-       if (close_command == LTTNG_TRACE_CHUNK_COMMAND_TYPE_MOVE_TO_COMPLETED) {
-               session->rotated = true;
-       }
-       chunk_status = lttng_trace_chunk_set_close_timestamp(trace_chunk,
-                       chunk_close_timestamp);
-       if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) {
-               ERR("Failed to set the close timestamp of the current trace chunk of session \"%s\"",
-                               session->name);
-               ret = -1;
-               goto end;
-       }
-
-       if (session->ust_session) {
-               const uint64_t relayd_id =
-                               session->ust_session->consumer->net_seq_index;
-
-               cds_lfht_for_each_entry(
-                               session->ust_session->consumer->socks->ht,
-                               &iter, socket, node.node) {
-                       pthread_mutex_lock(socket->lock);
-                       ret = consumer_close_trace_chunk(socket,
-                                       relayd_id,
-                                       session->id,
-                                       trace_chunk, closed_trace_chunk_path);
-                       pthread_mutex_unlock(socket->lock);
-                       if (ret) {
-                               ERR("Failed to close trace chunk on user space consumer");
-                               error_occurred = true;
-                       }
-               }
-       }
-       if (session->kernel_session) {
-               const uint64_t relayd_id =
-                               session->kernel_session->consumer->net_seq_index;
-
-               cds_lfht_for_each_entry(
-                               session->kernel_session->consumer->socks->ht,
-                               &iter, socket, node.node) {
-                       pthread_mutex_lock(socket->lock);
-                       ret = consumer_close_trace_chunk(socket,
-                                       relayd_id,
-                                       session->id,
-                                       trace_chunk, closed_trace_chunk_path);
-                       pthread_mutex_unlock(socket->lock);
-                       if (ret) {
-                               ERR("Failed to close trace chunk on kernel consumer");
-                               error_occurred = true;
-                       }
-               }
-       }
-       ret = error_occurred ? -1 : 0;
-end:
-       return ret;
-}
-
-/*
- * This function skips the metadata channel as the begin/end timestamps of a
- * metadata packet are useless.
- *
- * Moreover, opening a packet after a "clear" will cause problems for live
- * sessions as it will introduce padding that was not part of the first trace
- * chunk. The relay daemon expects the content of the metadata stream of
- * successive metadata trace chunks to be strict supersets of one another.
- *
- * For example, flushing a packet at the beginning of the metadata stream of
- * a trace chunk resulting from a "clear" session command will cause the
- * size of the metadata stream of the new trace chunk to not match the size of
- * the metadata stream of the original chunk. This will confuse the relay
- * daemon as the same "offset" in a metadata stream will no longer point
- * to the same content.
- */
-static
-enum lttng_error_code session_kernel_open_packets(struct ltt_session *session)
-{
-       enum lttng_error_code ret = LTTNG_OK;
-       struct consumer_socket *socket;
-       struct lttng_ht_iter iter;
-       struct cds_lfht_node *node;
-       struct ltt_kernel_channel *chan;
-
-       rcu_read_lock();
-
-       cds_lfht_first(session->kernel_session->consumer->socks->ht, &iter.iter);
-       node = cds_lfht_iter_get_node(&iter.iter);
-       socket = container_of(node, typeof(*socket), node.node);
-
-       cds_list_for_each_entry(chan,
-                       &session->kernel_session->channel_list.head, list) {
-               int open_ret;
-
-               DBG("Open packet of kernel channel: channel key = %" PRIu64
-                               ", session name = %s, session_id = %" PRIu64,
-                               chan->key, session->name, session->id);
-
-               open_ret = consumer_open_channel_packets(socket, chan->key);
-               if (open_ret < 0) {
-                       /* General error (no known error expected). */
-                       ret = LTTNG_ERR_UNK;
-                       goto end;
-               }
-       }
-
-end:
-       rcu_read_unlock();
-       return ret;
-}
-
-enum lttng_error_code session_open_packets(struct ltt_session *session)
-{
-       enum lttng_error_code ret = LTTNG_OK;
-
-       DBG("Opening packets of session channels: session name = %s, session id = %" PRIu64,
-                       session->name, session->id);
-
-       if (session->ust_session) {
-               ret = ust_app_open_packets(session);
-               if (ret != LTTNG_OK) {
-                       goto end;
-               }
-       }
-
-       if (session->kernel_session) {
-               ret = session_kernel_open_packets(session);
-               if (ret != LTTNG_OK) {
-                       goto end;
-               }
-       }
-
-end:
-       return ret;
-}
-
-/*
- * Set a session's current trace chunk.
- *
- * Must be called with the session lock held.
- */
-int session_set_trace_chunk(struct ltt_session *session,
-               struct lttng_trace_chunk *new_trace_chunk,
-               struct lttng_trace_chunk **current_trace_chunk)
-{
-       ASSERT_LOCKED(session->lock);
-       return _session_set_trace_chunk_no_lock_check(session, new_trace_chunk,
-                       current_trace_chunk);
-}
-
-static
-void session_notify_destruction(const struct ltt_session *session)
-{
-       size_t i;
-       const size_t count = lttng_dynamic_array_get_count(
-                       &session->destroy_notifiers);
-
-       for (i = 0; i < count; i++) {
-               const struct ltt_session_destroy_notifier_element *element =
-                       lttng_dynamic_array_get_element(
-                                       &session->destroy_notifiers, i);
-
-               element->notifier(session, element->user_data);
-       }
-}
-
-/*
- * Fire each clear notifier once, and remove them from the array.
- */
-void session_notify_clear(struct ltt_session *session)
-{
-       size_t i;
-       const size_t count = lttng_dynamic_array_get_count(
-                       &session->clear_notifiers);
-
-       for (i = 0; i < count; i++) {
-               const struct ltt_session_clear_notifier_element *element =
-                       lttng_dynamic_array_get_element(
-                                       &session->clear_notifiers, i);
-
-               element->notifier(session, element->user_data);
-       }
-       lttng_dynamic_array_clear(&session->clear_notifiers);
-}
-
-static
-void session_release(struct urcu_ref *ref)
-{
-       int ret;
-       struct ltt_ust_session *usess;
-       struct ltt_kernel_session *ksess;
-       struct ltt_session *session = container_of(ref, typeof(*session), ref);
-       const bool session_published = session->published;
-
-       LTTNG_ASSERT(!session->chunk_being_archived);
-
-       usess = session->ust_session;
-       ksess = session->kernel_session;
-
-       /* Clean kernel session teardown, keeping data for destroy notifier. */
-       kernel_destroy_session(ksess);
-
-       /* UST session teardown, keeping data for destroy notifier. */
-       if (usess) {
-               /* Close any relayd session */
-               consumer_output_send_destroy_relayd(usess->consumer);
-
-               /* Destroy every UST application related to this session. */
-               ret = ust_app_destroy_trace_all(usess);
-               if (ret) {
-                       ERR("Error in ust_app_destroy_trace_all");
-               }
-
-               /* Clean up the rest, keeping destroy notifier data. */
-               trace_ust_destroy_session(usess);
-       }
-
-       /*
-        * Must notify the kernel thread here to update it's poll set in order to
-        * remove the channel(s)' fd just destroyed.
-        */
-       ret = notify_thread_pipe(the_kernel_poll_pipe[1]);
-       if (ret < 0) {
-               PERROR("write kernel poll pipe");
-       }
-
-       DBG("Destroying session %s (id %" PRIu64 ")", session->name, session->id);
-
-       snapshot_destroy(&session->snapshot);
-
-       pthread_mutex_destroy(&session->lock);
-
-       if (session_published) {
-               ASSERT_LOCKED(ltt_session_list.lock);
-               del_session_list(session);
-               del_session_ht(session);
-       }
-       session_notify_destruction(session);
-
-       consumer_output_put(session->consumer);
-       kernel_free_session(ksess);
-       session->kernel_session = NULL;
-       if (usess) {
-               trace_ust_free_session(usess);
-               session->ust_session = NULL;
-       }
-       lttng_dynamic_array_reset(&session->destroy_notifiers);
-       lttng_dynamic_array_reset(&session->clear_notifiers);
-       free(session->last_archived_chunk_name);
-       free(session->base_path);
-       free(session);
-       if (session_published) {
-               /*
-                * Broadcast after free-ing to ensure the memory is
-                * reclaimed before the main thread exits.
-                */
-               ASSERT_LOCKED(ltt_session_list.lock);
-               pthread_cond_broadcast(&ltt_session_list.removal_cond);
-       }
-}
-
-/*
- * Acquire a reference to a session.
- * This function may fail (return false); its return value must be checked.
- */
-bool session_get(struct ltt_session *session)
-{
-       return urcu_ref_get_unless_zero(&session->ref);
-}
-
-/*
- * Release a reference to a session.
- */
-void session_put(struct ltt_session *session)
-{
-       if (!session) {
-               return;
-       }
-       /*
-        * The session list lock must be held as any session_put()
-        * may cause the removal of the session from the session_list.
-        */
-       ASSERT_LOCKED(ltt_session_list.lock);
-       LTTNG_ASSERT(session->ref.refcount);
-       urcu_ref_put(&session->ref, session_release);
-}
-
-/*
- * Destroy a session.
- *
- * This method does not immediately release/free the session as other
- * components may still hold a reference to the session. However,
- * the session should no longer be presented to the user.
- *
- * Releases the session list's reference to the session
- * and marks it as destroyed. Iterations on the session list should be
- * mindful of the "destroyed" flag.
- */
-void session_destroy(struct ltt_session *session)
-{
-       LTTNG_ASSERT(!session->destroyed);
-       session->destroyed = true;
-       session_put(session);
-}
-
-int session_add_destroy_notifier(struct ltt_session *session,
-               ltt_session_destroy_notifier notifier, void *user_data)
-{
-       const struct ltt_session_destroy_notifier_element element = {
-               .notifier = notifier,
-               .user_data = user_data
-       };
-
-       return lttng_dynamic_array_add_element(&session->destroy_notifiers,
-                       &element);
-}
-
-int session_add_clear_notifier(struct ltt_session *session,
-               ltt_session_clear_notifier notifier, void *user_data)
-{
-       const struct ltt_session_clear_notifier_element element = {
-               .notifier = notifier,
-               .user_data = user_data
-       };
-
-       return lttng_dynamic_array_add_element(&session->clear_notifiers,
-                       &element);
-}
-
-/*
- * Return a ltt_session structure ptr that matches name. If no session found,
- * NULL is returned. This must be called with the session list lock held using
- * session_lock_list and session_unlock_list.
- * A reference to the session is implicitly acquired by this function.
- */
-struct ltt_session *session_find_by_name(const char *name)
-{
-       struct ltt_session *iter;
-
-       LTTNG_ASSERT(name);
-       ASSERT_LOCKED(ltt_session_list.lock);
-
-       DBG2("Trying to find session by name %s", name);
-
-       cds_list_for_each_entry(iter, &ltt_session_list.head, list) {
-               if (!strncmp(iter->name, name, NAME_MAX) &&
-                               !iter->destroyed) {
-                       goto found;
-               }
-       }
-
-       return NULL;
-found:
-       return session_get(iter) ? iter : NULL;
-}
-
-/*
- * Return an ltt_session that matches the id. If no session is found,
- * NULL is returned. This must be called with rcu_read_lock and
- * session list lock held (to guarantee the lifetime of the session).
- */
-struct ltt_session *session_find_by_id(uint64_t id)
-{
-       struct lttng_ht_node_u64 *node;
-       struct lttng_ht_iter iter;
-       struct ltt_session *ls;
-
-       ASSERT_LOCKED(ltt_session_list.lock);
-
-       if (!ltt_sessions_ht_by_id) {
-               goto end;
-       }
-
-       lttng_ht_lookup(ltt_sessions_ht_by_id, &id, &iter);
-       node = lttng_ht_iter_get_node_u64(&iter);
-       if (node == NULL) {
-               goto end;
-       }
-       ls = caa_container_of(node, struct ltt_session, node);
-
-       DBG3("Session %" PRIu64 " found by id.", id);
-       return session_get(ls) ? ls : NULL;
-
-end:
-       DBG3("Session %" PRIu64 " NOT found by id", id);
-       return NULL;
-}
-
-/*
- * Create a new session and add it to the session list.
- * Session list lock must be held by the caller.
- */
-enum lttng_error_code session_create(const char *name, uid_t uid, gid_t gid,
-               struct ltt_session **out_session)
-{
-       int ret;
-       enum lttng_error_code ret_code;
-       struct ltt_session *new_session = NULL;
-
-       ASSERT_LOCKED(ltt_session_list.lock);
-       if (name) {
-               struct ltt_session *clashing_session;
-
-               clashing_session = session_find_by_name(name);
-               if (clashing_session) {
-                       session_put(clashing_session);
-                       ret_code = LTTNG_ERR_EXIST_SESS;
-                       goto error;
-               }
-       }
-       new_session = zmalloc(sizeof(struct ltt_session));
-       if (!new_session) {
-               PERROR("Failed to allocate an ltt_session structure");
-               ret_code = LTTNG_ERR_NOMEM;
-               goto error;
-       }
-
-       lttng_dynamic_array_init(&new_session->destroy_notifiers,
-                       sizeof(struct ltt_session_destroy_notifier_element),
-                       NULL);
-       lttng_dynamic_array_init(&new_session->clear_notifiers,
-                       sizeof(struct ltt_session_clear_notifier_element),
-                       NULL);
-       urcu_ref_init(&new_session->ref);
-       pthread_mutex_init(&new_session->lock, NULL);
-
-       new_session->creation_time = time(NULL);
-       if (new_session->creation_time == (time_t) -1) {
-               PERROR("Failed to sample session creation time");
-               ret_code = LTTNG_ERR_SESSION_FAIL;
-               goto error;
-       }
-
-       /* Create default consumer output. */
-       new_session->consumer = consumer_create_output(CONSUMER_DST_LOCAL);
-       if (new_session->consumer == NULL) {
-               ret_code = LTTNG_ERR_NOMEM;
-               goto error;
-       }
-
-       if (name) {
-               ret = lttng_strncpy(new_session->name, name, sizeof(new_session->name));
-               if (ret) {
-                       ret_code = LTTNG_ERR_SESSION_INVALID_CHAR;
-                       goto error;
-               }
-               ret = validate_name(name);
-               if (ret < 0) {
-                       ret_code = LTTNG_ERR_SESSION_INVALID_CHAR;
-                       goto error;
-               }
-       } else {
-               int i = 0;
-               bool found_name = false;
-               char datetime[16];
-               struct tm *timeinfo;
-
-               timeinfo = localtime(&new_session->creation_time);
-               if (!timeinfo) {
-                       ret_code = LTTNG_ERR_SESSION_FAIL;
-                       goto error;
-               }
-               strftime(datetime, sizeof(datetime), "%Y%m%d-%H%M%S", timeinfo);
-               for (i = 0; i < INT_MAX; i++) {
-                       struct ltt_session *clashing_session;
-
-                       if (i == 0) {
-                               ret = snprintf(new_session->name,
-                                               sizeof(new_session->name),
-                                               "%s-%s",
-                                               DEFAULT_SESSION_NAME,
-                                               datetime);
-                       } else {
-                               ret = snprintf(new_session->name,
-                                               sizeof(new_session->name),
-                                               "%s%d-%s",
-                                               DEFAULT_SESSION_NAME, i,
-                                               datetime);
-                       }
-                       new_session->name_contains_creation_time = true;
-                       if (ret == -1 || ret >= sizeof(new_session->name)) {
-                               /*
-                                * Null-terminate in case the name is used
-                                * in logging statements.
-                                */
-                               new_session->name[sizeof(new_session->name) - 1] = '\0';
-                               ret_code = LTTNG_ERR_SESSION_FAIL;
-                               goto error;
-                       }
-
-                       clashing_session =
-                                       session_find_by_name(new_session->name);
-                       session_put(clashing_session);
-                       if (!clashing_session) {
-                               found_name = true;
-                               break;
-                       }
-               }
-               if (found_name) {
-                       DBG("Generated session name \"%s\"", new_session->name);
-                       new_session->has_auto_generated_name = true;
-               } else {
-                       ERR("Failed to auto-generate a session name");
-                       ret_code = LTTNG_ERR_SESSION_FAIL;
-                       goto error;
-               }
-       }
-
-       ret = gethostname(new_session->hostname, sizeof(new_session->hostname));
-       if (ret < 0) {
-               if (errno == ENAMETOOLONG) {
-                       new_session->hostname[sizeof(new_session->hostname) - 1] = '\0';
-                       ERR("Hostname exceeds the maximal permitted length and has been truncated to %s",
-                                       new_session->hostname);
-               } else {
-                       ret_code = LTTNG_ERR_SESSION_FAIL;
-                       goto error;
-               }
-       }
-
-       new_session->uid = uid;
-       new_session->gid = gid;
-
-       ret = snapshot_init(&new_session->snapshot);
-       if (ret < 0) {
-               ret_code = LTTNG_ERR_NOMEM;
-               goto error;
-       }
-
-       new_session->rotation_state = LTTNG_ROTATION_STATE_NO_ROTATION;
-
-       /* Add new session to the session list. */
-       new_session->id = add_session_list(new_session);
-
-       /*
-        * Add the new session to the ltt_sessions_ht_by_id.
-        * No ownership is taken by the hash table; it is merely
-        * a wrapper around the session list used for faster access
-        * by session id.
-        */
-       add_session_ht(new_session);
-       new_session->published = true;
-
-       /*
-        * Consumer is left to NULL since the create_session_uri command will
-        * set it up and, if valid, assign it to the session.
-        */
-       DBG("Tracing session %s created with ID %" PRIu64 " by uid = %d, gid = %d",
-                       new_session->name, new_session->id, new_session->uid,
-                       new_session->gid);
-       ret_code = LTTNG_OK;
-end:
-       if (new_session) {
-               (void) session_get(new_session);
-               *out_session = new_session;
-       }
-       return ret_code;
-error:
-       session_put(new_session);
-       new_session = NULL;
-       goto end;
-}
-
-/*
- * Check if the UID matches the session. Root user has access to all
- * sessions.
- */
-bool session_access_ok(struct ltt_session *session, uid_t uid)
-{
-       LTTNG_ASSERT(session);
-       return (uid == session->uid) || uid == 0;
-}
-
-/*
- * Set a session's rotation state and reset all associated state.
- *
- * This function resets the rotation state (check timers, pending
- * flags, etc.) and sets the result of the last rotation. The result
- * can be queries by a liblttng-ctl client.
- *
- * Be careful of the result passed to this function. For instance,
- * on failure to launch a rotation, a client will expect the rotation
- * state to be set to "NO_ROTATION". If an error occurred while the
- * rotation was "ONGOING", result should be set to "ERROR", which will
- * allow a client to report it.
- *
- * Must be called with the session and session_list locks held.
- */
-int session_reset_rotation_state(struct ltt_session *session,
-               enum lttng_rotation_state result)
-{
-       int ret = 0;
-
-       ASSERT_LOCKED(ltt_session_list.lock);
-       ASSERT_LOCKED(session->lock);
-
-       session->rotation_state = result;
-       if (session->rotation_pending_check_timer_enabled) {
-               ret = timer_session_rotation_pending_check_stop(session);
-       }
-       if (session->chunk_being_archived) {
-               uint64_t chunk_id;
-               enum lttng_trace_chunk_status chunk_status;
-
-               chunk_status = lttng_trace_chunk_get_id(
-                               session->chunk_being_archived,
-                               &chunk_id);
-               LTTNG_ASSERT(chunk_status == LTTNG_TRACE_CHUNK_STATUS_OK);
-               LTTNG_OPTIONAL_SET(&session->last_archived_chunk_id,
-                               chunk_id);
-               lttng_trace_chunk_put(session->chunk_being_archived);
-               session->chunk_being_archived = NULL;
-               /*
-                * Fire the clear reply notifiers if we are completing a clear
-                * rotation.
-                */
-               session_notify_clear(session);
-       }
-       return ret;
-}
-
-/*
- * Sample the id of a session looked up via its name.
- * Here the term "sampling" hint the caller that this return the id at a given
- * point in time with no guarantee that the session for which the id was
- * sampled still exist at that point.
- *
- * Return 0 when the session is not found,
- * Return 1 when the session is found and set `id`.
- */
-bool sample_session_id_by_name(const char *name, uint64_t *id)
-{
-       bool found = false;
-       struct lttng_ht_node_str *node;
-       struct lttng_ht_iter iter;
-       struct ltt_session *ls;
-
-       rcu_read_lock();
-
-       if (!ltt_sessions_ht_by_name) {
-               found = false;
-               goto end;
-       }
-
-       lttng_ht_lookup(ltt_sessions_ht_by_name, name, &iter);
-       node = lttng_ht_iter_get_node_str(&iter);
-       if (node == NULL) {
-               found = false;
-               goto end;
-       }
-
-       ls = caa_container_of(node, struct ltt_session, node_by_name);
-       *id = ls->id;
-       found = true;
-
-       DBG3("Session id `%" PRIu64 "` sampled for session `%s", *id, name);
-end:
-       rcu_read_unlock();
-       return found;
-}
diff --git a/src/bin/lttng-sessiond/session.cpp b/src/bin/lttng-sessiond/session.cpp
new file mode 100644 (file)
index 0000000..0d19042
--- /dev/null
@@ -0,0 +1,1427 @@
+/*
+ * Copyright (C) 2011 David Goulet <david.goulet@polymtl.ca>
+ *
+ * SPDX-License-Identifier: GPL-2.0-only
+ *
+ */
+
+#define _LGPL_SOURCE
+#include <limits.h>
+#include <inttypes.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/stat.h>
+#include <urcu.h>
+#include <dirent.h>
+#include <sys/types.h>
+#include <pthread.h>
+
+#include <common/common.h>
+#include <common/utils.h>
+#include <common/trace-chunk.h>
+#include <common/sessiond-comm/sessiond-comm.h>
+#include <lttng/location-internal.h>
+#include "lttng-sessiond.h"
+#include "kernel.h"
+
+#include "session.h"
+#include "utils.h"
+#include "trace-ust.h"
+#include "timer.h"
+#include "cmd.h"
+
+struct ltt_session_destroy_notifier_element {
+       ltt_session_destroy_notifier notifier;
+       void *user_data;
+};
+
+struct ltt_session_clear_notifier_element {
+       ltt_session_clear_notifier notifier;
+       void *user_data;
+};
+
+/*
+ * NOTES:
+ *
+ * No ltt_session.lock is taken here because those data structure are widely
+ * spread across the lttng-tools code base so before calling functions below
+ * that can read/write a session, the caller MUST acquire the session lock
+ * using session_lock() and session_unlock().
+ */
+
+/*
+ * Init tracing session list.
+ *
+ * Please see session.h for more explanation and correct usage of the list.
+ */
+static struct ltt_session_list ltt_session_list = {
+       .lock = PTHREAD_MUTEX_INITIALIZER,
+       .removal_cond = PTHREAD_COND_INITIALIZER,
+       .next_uuid = 0,
+       .head = CDS_LIST_HEAD_INIT(ltt_session_list.head),
+};
+
+/* These characters are forbidden in a session name. Used by validate_name. */
+static const char *forbidden_name_chars = "/";
+
+/* Global hash table to keep the sessions, indexed by id. */
+static struct lttng_ht *ltt_sessions_ht_by_id = NULL;
+/* Global hash table to keep the sessions, indexed by name. */
+static struct lttng_ht *ltt_sessions_ht_by_name = NULL;
+
+/*
+ * Validate the session name for forbidden characters.
+ *
+ * Return 0 on success else -1 meaning a forbidden char. has been found.
+ */
+static int validate_name(const char *name)
+{
+       int ret;
+       char *tok, *tmp_name;
+
+       LTTNG_ASSERT(name);
+
+       tmp_name = strdup(name);
+       if (!tmp_name) {
+               /* ENOMEM here. */
+               ret = -1;
+               goto error;
+       }
+
+       tok = strpbrk(tmp_name, forbidden_name_chars);
+       if (tok) {
+               DBG("Session name %s contains a forbidden character", name);
+               /* Forbidden character has been found. */
+               ret = -1;
+               goto error;
+       }
+       ret = 0;
+
+error:
+       free(tmp_name);
+       return ret;
+}
+
+/*
+ * Add a ltt_session structure to the global list.
+ *
+ * The caller MUST acquire the session list lock before.
+ * Returns the unique identifier for the session.
+ */
+static uint64_t add_session_list(struct ltt_session *ls)
+{
+       LTTNG_ASSERT(ls);
+
+       cds_list_add(&ls->list, &ltt_session_list.head);
+       return ltt_session_list.next_uuid++;
+}
+
+/*
+ * Delete a ltt_session structure to the global list.
+ *
+ * The caller MUST acquire the session list lock before.
+ */
+static void del_session_list(struct ltt_session *ls)
+{
+       LTTNG_ASSERT(ls);
+
+       cds_list_del(&ls->list);
+}
+
+/*
+ * Return a pointer to the session list.
+ */
+struct ltt_session_list *session_get_list(void)
+{
+       return &ltt_session_list;
+}
+
+/*
+ * Returns once the session list is empty.
+ */
+void session_list_wait_empty(void)
+{
+       pthread_mutex_lock(&ltt_session_list.lock);
+       while (!cds_list_empty(&ltt_session_list.head)) {
+               pthread_cond_wait(&ltt_session_list.removal_cond,
+                               &ltt_session_list.lock);
+       }
+       pthread_mutex_unlock(&ltt_session_list.lock);
+}
+
+/*
+ * Acquire session list lock
+ */
+void session_lock_list(void)
+{
+       pthread_mutex_lock(&ltt_session_list.lock);
+}
+
+/*
+ * Try to acquire session list lock
+ */
+int session_trylock_list(void)
+{
+       return pthread_mutex_trylock(&ltt_session_list.lock);
+}
+
+/*
+ * Release session list lock
+ */
+void session_unlock_list(void)
+{
+       pthread_mutex_unlock(&ltt_session_list.lock);
+}
+
+/*
+ * Get the session's consumer destination type.
+ *
+ * The caller must hold the session lock.
+ */
+enum consumer_dst_type session_get_consumer_destination_type(
+               const struct ltt_session *session)
+{
+       /*
+        * The output information is duplicated in both of those session types.
+        * Hence, it doesn't matter from which it is retrieved. However, it is
+        * possible for only one of them to be set.
+        */
+       return session->kernel_session ?
+                       session->kernel_session->consumer->type :
+                       session->ust_session->consumer->type;
+}
+
+/*
+ * Get the session's consumer network hostname.
+ * The caller must ensure that the destination is of type "net".
+ *
+ * The caller must hold the session lock.
+ */
+const char *session_get_net_consumer_hostname(const struct ltt_session *session)
+{
+       const char *hostname = NULL;
+       const struct consumer_output *output;
+
+       output = session->kernel_session ?
+                       session->kernel_session->consumer :
+                       session->ust_session->consumer;
+
+       /*
+        * hostname is assumed to be the same for both control and data
+        * connections.
+        */
+       switch (output->dst.net.control.dtype) {
+       case LTTNG_DST_IPV4:
+               hostname = output->dst.net.control.dst.ipv4;
+               break;
+       case LTTNG_DST_IPV6:
+               hostname = output->dst.net.control.dst.ipv6;
+               break;
+       default:
+               abort();
+       }
+       return hostname;
+}
+
+/*
+ * Get the session's consumer network control and data ports.
+ * The caller must ensure that the destination is of type "net".
+ *
+ * The caller must hold the session lock.
+ */
+void session_get_net_consumer_ports(const struct ltt_session *session,
+               uint16_t *control_port, uint16_t *data_port)
+{
+       const struct consumer_output *output;
+
+       output = session->kernel_session ?
+                       session->kernel_session->consumer :
+                       session->ust_session->consumer;
+       *control_port = output->dst.net.control.port;
+       *data_port = output->dst.net.data.port;
+}
+
+/*
+ * Get the location of the latest trace archive produced by a rotation.
+ *
+ * The caller must hold the session lock.
+ */
+struct lttng_trace_archive_location *session_get_trace_archive_location(
+               const struct ltt_session *session)
+{
+       int ret;
+       struct lttng_trace_archive_location *location = NULL;
+       char *chunk_path = NULL;
+
+       if (session->rotation_state != LTTNG_ROTATION_STATE_COMPLETED ||
+                       !session->last_archived_chunk_name) {
+               goto end;
+       }
+
+       switch (session_get_consumer_destination_type(session)) {
+       case CONSUMER_DST_LOCAL:
+               ret = asprintf(&chunk_path,
+                               "%s/" DEFAULT_ARCHIVED_TRACE_CHUNKS_DIRECTORY "/%s",
+                               session_get_base_path(session),
+                               session->last_archived_chunk_name);
+               if (ret == -1) {
+                       goto end;
+               }
+               location = lttng_trace_archive_location_local_create(
+                               chunk_path);
+               break;
+       case CONSUMER_DST_NET:
+       {
+               const char *hostname;
+               uint16_t control_port, data_port;
+
+               hostname = session_get_net_consumer_hostname(session);
+               session_get_net_consumer_ports(session,
+                               &control_port,
+                               &data_port);
+               location = lttng_trace_archive_location_relay_create(
+                               hostname,
+                               LTTNG_TRACE_ARCHIVE_LOCATION_RELAY_PROTOCOL_TYPE_TCP,
+                               control_port, data_port, session->last_chunk_path);
+               break;
+       }
+       default:
+               abort();
+       }
+end:
+       free(chunk_path);
+       return location;
+}
+
+/*
+ * Allocate the ltt_sessions_ht_by_id and ltt_sessions_ht_by_name HT.
+ *
+ * The session list lock must be held.
+ */
+static int ltt_sessions_ht_alloc(void)
+{
+       int ret = 0;
+
+       DBG("Allocating ltt_sessions_ht_by_id");
+       ltt_sessions_ht_by_id = lttng_ht_new(0, LTTNG_HT_TYPE_U64);
+       if (!ltt_sessions_ht_by_id) {
+               ret = -1;
+               ERR("Failed to allocate ltt_sessions_ht_by_id");
+               goto end;
+       }
+
+       DBG("Allocating ltt_sessions_ht_by_name");
+       ltt_sessions_ht_by_name = lttng_ht_new(0, LTTNG_HT_TYPE_STRING);
+       if (!ltt_sessions_ht_by_name) {
+               ret = -1;
+               ERR("Failed to allocate ltt_sessions_ht_by_name");
+               goto end;
+       }
+
+end:
+       return ret;
+}
+
+/*
+ * Destroy the ltt_sessions_ht_by_id HT.
+ *
+ * The session list lock must be held.
+ */
+static void ltt_sessions_ht_destroy(void)
+{
+       if (ltt_sessions_ht_by_id) {
+               ht_cleanup_push(ltt_sessions_ht_by_id);
+               ltt_sessions_ht_by_id = NULL;
+       }
+
+       if (ltt_sessions_ht_by_name) {
+               ht_cleanup_push(ltt_sessions_ht_by_name);
+               ltt_sessions_ht_by_name = NULL;
+       }
+
+       return;
+}
+
+/*
+ * Add a ltt_session to the ltt_sessions_ht_by_id and ltt_sessions_ht_by_name.
+ * If unallocated, the ltt_sessions_ht_by_id and ltt_sessions_ht_by_name. HTs
+ * are allocated. The session list lock must be held.
+ */
+static void add_session_ht(struct ltt_session *ls)
+{
+       int ret;
+
+       LTTNG_ASSERT(ls);
+
+       if (!ltt_sessions_ht_by_id) {
+               ret = ltt_sessions_ht_alloc();
+               if (ret) {
+                       ERR("Error allocating the sessions HT");
+                       goto end;
+               }
+       }
+
+       /* Should always be present with ltt_sessions_ht_by_id. */
+       LTTNG_ASSERT(ltt_sessions_ht_by_name);
+
+       lttng_ht_node_init_u64(&ls->node, ls->id);
+       lttng_ht_add_unique_u64(ltt_sessions_ht_by_id, &ls->node);
+
+       lttng_ht_node_init_str(&ls->node_by_name, ls->name);
+       lttng_ht_add_unique_str(ltt_sessions_ht_by_name, &ls->node_by_name);
+
+end:
+       return;
+}
+
+/*
+ * Test if ltt_sessions_ht_by_id/name are empty.
+ * Return 1 if empty, 0 if not empty.
+ * The session list lock must be held.
+ */
+static int ltt_sessions_ht_empty(void)
+{
+       unsigned long count;
+
+       if (!ltt_sessions_ht_by_id) {
+               count = 0;
+               goto end;
+       }
+
+       LTTNG_ASSERT(ltt_sessions_ht_by_name);
+
+       count = lttng_ht_get_count(ltt_sessions_ht_by_id);
+       LTTNG_ASSERT(count == lttng_ht_get_count(ltt_sessions_ht_by_name));
+end:
+       return count ? 0 : 1;
+}
+
+/*
+ * Remove a ltt_session from the ltt_sessions_ht_by_id/name.
+ * If empty, the ltt_sessions_ht_by_id/name HTs are freed.
+ * The session list lock must be held.
+ */
+static void del_session_ht(struct ltt_session *ls)
+{
+       struct lttng_ht_iter iter;
+       int ret;
+
+       LTTNG_ASSERT(ls);
+       LTTNG_ASSERT(ltt_sessions_ht_by_id);
+       LTTNG_ASSERT(ltt_sessions_ht_by_name);
+
+       iter.iter.node = &ls->node.node;
+       ret = lttng_ht_del(ltt_sessions_ht_by_id, &iter);
+       LTTNG_ASSERT(!ret);
+
+       iter.iter.node = &ls->node_by_name.node;
+       ret = lttng_ht_del(ltt_sessions_ht_by_name, &iter);
+       LTTNG_ASSERT(!ret);
+
+       if (ltt_sessions_ht_empty()) {
+               DBG("Empty ltt_sessions_ht_by_id/name, destroying hast tables");
+               ltt_sessions_ht_destroy();
+       }
+}
+
+/*
+ * Acquire session lock
+ */
+void session_lock(struct ltt_session *session)
+{
+       LTTNG_ASSERT(session);
+
+       pthread_mutex_lock(&session->lock);
+}
+
+/*
+ * Release session lock
+ */
+void session_unlock(struct ltt_session *session)
+{
+       LTTNG_ASSERT(session);
+
+       pthread_mutex_unlock(&session->lock);
+}
+
+static
+int _session_set_trace_chunk_no_lock_check(struct ltt_session *session,
+               struct lttng_trace_chunk *new_trace_chunk,
+               struct lttng_trace_chunk **_current_trace_chunk)
+{
+       int ret = 0;
+       unsigned int i, refs_to_acquire = 0, refs_acquired = 0, refs_to_release = 0;
+       struct cds_lfht_iter iter;
+       struct consumer_socket *socket;
+       struct lttng_trace_chunk *current_trace_chunk;
+       uint64_t chunk_id;
+       enum lttng_trace_chunk_status chunk_status;
+
+       rcu_read_lock();
+       /*
+        * Ownership of current trace chunk is transferred to
+        * `current_trace_chunk`.
+        */
+       current_trace_chunk = session->current_trace_chunk;
+       session->current_trace_chunk = NULL;
+       if (session->ust_session) {
+               lttng_trace_chunk_put(
+                               session->ust_session->current_trace_chunk);
+               session->ust_session->current_trace_chunk = NULL;
+       }
+       if (session->kernel_session) {
+               lttng_trace_chunk_put(
+                               session->kernel_session->current_trace_chunk);
+               session->kernel_session->current_trace_chunk = NULL;
+       }
+       if (!new_trace_chunk) {
+               ret = 0;
+               goto end;
+       }
+       chunk_status = lttng_trace_chunk_get_id(new_trace_chunk, &chunk_id);
+       LTTNG_ASSERT(chunk_status == LTTNG_TRACE_CHUNK_STATUS_OK);
+
+       refs_to_acquire = 1;
+       refs_to_acquire += !!session->ust_session;
+       refs_to_acquire += !!session->kernel_session;
+
+       for (refs_acquired = 0; refs_acquired < refs_to_acquire;
+                       refs_acquired++) {
+               if (!lttng_trace_chunk_get(new_trace_chunk)) {
+                       ERR("Failed to acquire reference to new trace chunk of session \"%s\"",
+                                       session->name);
+                       goto error;
+               }
+       }
+
+       if (session->ust_session) {
+               const uint64_t relayd_id =
+                               session->ust_session->consumer->net_seq_index;
+               const bool is_local_trace =
+                               session->ust_session->consumer->type ==
+                               CONSUMER_DST_LOCAL;
+
+               session->ust_session->current_trace_chunk = new_trace_chunk;
+               if (is_local_trace) {
+                       enum lttng_error_code ret_error_code;
+
+                       ret_error_code = ust_app_create_channel_subdirectories(
+                                       session->ust_session);
+                       if (ret_error_code != LTTNG_OK) {
+                               goto error;
+                       }
+               }
+               cds_lfht_for_each_entry(
+                               session->ust_session->consumer->socks->ht,
+                               &iter, socket, node.node) {
+                       pthread_mutex_lock(socket->lock);
+                       ret = consumer_create_trace_chunk(socket,
+                                       relayd_id,
+                                       session->id, new_trace_chunk,
+                                       DEFAULT_UST_TRACE_DIR);
+                       pthread_mutex_unlock(socket->lock);
+                       if (ret) {
+                               goto error;
+                       }
+               }
+       }
+       if (session->kernel_session) {
+               const uint64_t relayd_id =
+                               session->kernel_session->consumer->net_seq_index;
+               const bool is_local_trace =
+                               session->kernel_session->consumer->type ==
+                               CONSUMER_DST_LOCAL;
+
+               session->kernel_session->current_trace_chunk = new_trace_chunk;
+               if (is_local_trace) {
+                       enum lttng_error_code ret_error_code;
+
+                       ret_error_code = kernel_create_channel_subdirectories(
+                                       session->kernel_session);
+                       if (ret_error_code != LTTNG_OK) {
+                               goto error;
+                       }
+               }
+               cds_lfht_for_each_entry(
+                               session->kernel_session->consumer->socks->ht,
+                               &iter, socket, node.node) {
+                       pthread_mutex_lock(socket->lock);
+                       ret = consumer_create_trace_chunk(socket,
+                                       relayd_id,
+                                       session->id, new_trace_chunk,
+                                       DEFAULT_KERNEL_TRACE_DIR);
+                       pthread_mutex_unlock(socket->lock);
+                       if (ret) {
+                               goto error;
+                       }
+               }
+       }
+
+       /*
+        * Update local current trace chunk state last, only if all remote
+        * creations succeeded.
+        */
+       session->current_trace_chunk = new_trace_chunk;
+       LTTNG_OPTIONAL_SET(&session->most_recent_chunk_id, chunk_id);
+end:
+       if (_current_trace_chunk) {
+               *_current_trace_chunk = current_trace_chunk;
+               current_trace_chunk = NULL;
+       }
+end_no_move:
+       rcu_read_unlock();
+       lttng_trace_chunk_put(current_trace_chunk);
+       return ret;
+error:
+       if (session->ust_session) {
+               session->ust_session->current_trace_chunk = NULL;
+       }
+       if (session->kernel_session) {
+               session->kernel_session->current_trace_chunk = NULL;
+       }
+       /*
+        * Release references taken in the case where all references could not
+        * be acquired.
+        */
+       refs_to_release = refs_to_acquire - refs_acquired;
+       for (i = 0; i < refs_to_release; i++) {
+               lttng_trace_chunk_put(new_trace_chunk);
+       }
+       ret = -1;
+       goto end_no_move;
+}
+
+struct lttng_trace_chunk *session_create_new_trace_chunk(
+               const struct ltt_session *session,
+               const struct consumer_output *consumer_output_override,
+               const char *session_base_path_override,
+               const char *chunk_name_override)
+{
+       int ret;
+       struct lttng_trace_chunk *trace_chunk = NULL;
+       enum lttng_trace_chunk_status chunk_status;
+       const time_t chunk_creation_ts = time(NULL);
+       bool is_local_trace;
+       const char *base_path;
+       struct lttng_directory_handle *session_output_directory = NULL;
+       const struct lttng_credentials session_credentials = {
+               .uid = LTTNG_OPTIONAL_INIT_VALUE(session->uid),
+               .gid = LTTNG_OPTIONAL_INIT_VALUE(session->gid),
+       };
+       uint64_t next_chunk_id;
+       const struct consumer_output *output;
+       const char *new_path;
+
+       if (consumer_output_override) {
+               output = consumer_output_override;
+       } else {
+               LTTNG_ASSERT(session->ust_session || session->kernel_session);
+               output = session->ust_session ?
+                                        session->ust_session->consumer :
+                                        session->kernel_session->consumer;
+       }
+
+       is_local_trace = output->type == CONSUMER_DST_LOCAL;
+       base_path = session_base_path_override ? :
+                       consumer_output_get_base_path(output);
+
+       if (chunk_creation_ts == (time_t) -1) {
+               PERROR("Failed to sample time while creation session \"%s\" trace chunk",
+                               session->name);
+               goto error;
+       }
+
+       next_chunk_id = session->most_recent_chunk_id.is_set ?
+                       session->most_recent_chunk_id.value + 1 : 0;
+
+       if (session->current_trace_chunk &&
+                       !lttng_trace_chunk_get_name_overridden(session->current_trace_chunk)) {
+               chunk_status = lttng_trace_chunk_rename_path(session->current_trace_chunk,
+                                       DEFAULT_CHUNK_TMP_OLD_DIRECTORY);
+               if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) {
+                       goto error;
+               }
+       }
+       if (!session->current_trace_chunk) {
+               if (!session->rotated) {
+                       new_path = "";
+               } else {
+                       new_path = NULL;
+               }
+       } else {
+               new_path = DEFAULT_CHUNK_TMP_NEW_DIRECTORY;
+       }
+
+       trace_chunk = lttng_trace_chunk_create(next_chunk_id,
+                       chunk_creation_ts, new_path);
+       if (!trace_chunk) {
+               goto error;
+       }
+
+       if (chunk_name_override) {
+               chunk_status = lttng_trace_chunk_override_name(trace_chunk,
+                               chunk_name_override);
+               if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) {
+                       goto error;
+               }
+       }
+
+       if (!is_local_trace) {
+               /*
+                * No need to set crendentials and output directory
+                * for remote trace chunks.
+                */
+               goto end;
+       }
+
+       chunk_status = lttng_trace_chunk_set_credentials(trace_chunk,
+                       &session_credentials);
+       if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) {
+               goto error;
+       }
+
+       DBG("Creating base output directory of session \"%s\" at %s",
+                       session->name, base_path);
+       ret = utils_mkdir_recursive(base_path, S_IRWXU | S_IRWXG,
+                       session->uid, session->gid);
+       if (ret) {
+               goto error;
+       }
+       session_output_directory = lttng_directory_handle_create(base_path);
+       if (!session_output_directory) {
+               goto error;
+       }
+       chunk_status = lttng_trace_chunk_set_as_owner(trace_chunk,
+                       session_output_directory);
+       lttng_directory_handle_put(session_output_directory);
+       session_output_directory = NULL;
+       if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) {
+               goto error;
+       }
+end:
+       return trace_chunk;
+error:
+       lttng_directory_handle_put(session_output_directory);
+       lttng_trace_chunk_put(trace_chunk);
+       trace_chunk = NULL;
+       goto end;
+}
+
+int session_close_trace_chunk(struct ltt_session *session,
+               struct lttng_trace_chunk *trace_chunk,
+               enum lttng_trace_chunk_command_type close_command,
+               char *closed_trace_chunk_path)
+{
+       int ret = 0;
+       bool error_occurred = false;
+       struct cds_lfht_iter iter;
+       struct consumer_socket *socket;
+       enum lttng_trace_chunk_status chunk_status;
+       const time_t chunk_close_timestamp = time(NULL);
+       const char *new_path;
+
+       chunk_status = lttng_trace_chunk_set_close_command(
+                       trace_chunk, close_command);
+       if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) {
+               ret = -1;
+               goto end;
+       }
+
+       if (chunk_close_timestamp == (time_t) -1) {
+               ERR("Failed to sample the close timestamp of the current trace chunk of session \"%s\"",
+                               session->name);
+               ret = -1;
+               goto end;
+       }
+
+       if (close_command == LTTNG_TRACE_CHUNK_COMMAND_TYPE_DELETE && !session->rotated) {
+               /* New chunk stays in session output directory. */
+               new_path = "";
+       } else {
+               /* Use chunk name for new chunk. */
+               new_path = NULL;
+       }
+       if (session->current_trace_chunk &&
+                       !lttng_trace_chunk_get_name_overridden(session->current_trace_chunk)) {
+               /* Rename new chunk path. */
+               chunk_status = lttng_trace_chunk_rename_path(session->current_trace_chunk,
+                                       new_path);
+               if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) {
+                       ret = -1;
+                       goto end;
+               }
+       }
+       if (!lttng_trace_chunk_get_name_overridden(trace_chunk) &&
+                       close_command == LTTNG_TRACE_CHUNK_COMMAND_TYPE_NO_OPERATION) {
+               const char *old_path;
+
+               if (!session->rotated) {
+                       old_path = "";
+               } else {
+                       old_path = NULL;
+               }
+               /* We need to move back the .tmp_old_chunk to its rightful place. */
+               chunk_status = lttng_trace_chunk_rename_path(trace_chunk,
+                                       old_path);
+               if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) {
+                       ret = -1;
+                       goto end;
+               }
+       }
+       if (close_command == LTTNG_TRACE_CHUNK_COMMAND_TYPE_MOVE_TO_COMPLETED) {
+               session->rotated = true;
+       }
+       chunk_status = lttng_trace_chunk_set_close_timestamp(trace_chunk,
+                       chunk_close_timestamp);
+       if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) {
+               ERR("Failed to set the close timestamp of the current trace chunk of session \"%s\"",
+                               session->name);
+               ret = -1;
+               goto end;
+       }
+
+       if (session->ust_session) {
+               const uint64_t relayd_id =
+                               session->ust_session->consumer->net_seq_index;
+
+               cds_lfht_for_each_entry(
+                               session->ust_session->consumer->socks->ht,
+                               &iter, socket, node.node) {
+                       pthread_mutex_lock(socket->lock);
+                       ret = consumer_close_trace_chunk(socket,
+                                       relayd_id,
+                                       session->id,
+                                       trace_chunk, closed_trace_chunk_path);
+                       pthread_mutex_unlock(socket->lock);
+                       if (ret) {
+                               ERR("Failed to close trace chunk on user space consumer");
+                               error_occurred = true;
+                       }
+               }
+       }
+       if (session->kernel_session) {
+               const uint64_t relayd_id =
+                               session->kernel_session->consumer->net_seq_index;
+
+               cds_lfht_for_each_entry(
+                               session->kernel_session->consumer->socks->ht,
+                               &iter, socket, node.node) {
+                       pthread_mutex_lock(socket->lock);
+                       ret = consumer_close_trace_chunk(socket,
+                                       relayd_id,
+                                       session->id,
+                                       trace_chunk, closed_trace_chunk_path);
+                       pthread_mutex_unlock(socket->lock);
+                       if (ret) {
+                               ERR("Failed to close trace chunk on kernel consumer");
+                               error_occurred = true;
+                       }
+               }
+       }
+       ret = error_occurred ? -1 : 0;
+end:
+       return ret;
+}
+
+/*
+ * This function skips the metadata channel as the begin/end timestamps of a
+ * metadata packet are useless.
+ *
+ * Moreover, opening a packet after a "clear" will cause problems for live
+ * sessions as it will introduce padding that was not part of the first trace
+ * chunk. The relay daemon expects the content of the metadata stream of
+ * successive metadata trace chunks to be strict supersets of one another.
+ *
+ * For example, flushing a packet at the beginning of the metadata stream of
+ * a trace chunk resulting from a "clear" session command will cause the
+ * size of the metadata stream of the new trace chunk to not match the size of
+ * the metadata stream of the original chunk. This will confuse the relay
+ * daemon as the same "offset" in a metadata stream will no longer point
+ * to the same content.
+ */
+static
+enum lttng_error_code session_kernel_open_packets(struct ltt_session *session)
+{
+       enum lttng_error_code ret = LTTNG_OK;
+       struct consumer_socket *socket;
+       struct lttng_ht_iter iter;
+       struct cds_lfht_node *node;
+       struct ltt_kernel_channel *chan;
+
+       rcu_read_lock();
+
+       cds_lfht_first(session->kernel_session->consumer->socks->ht, &iter.iter);
+       node = cds_lfht_iter_get_node(&iter.iter);
+       socket = container_of(node, typeof(*socket), node.node);
+
+       cds_list_for_each_entry(chan,
+                       &session->kernel_session->channel_list.head, list) {
+               int open_ret;
+
+               DBG("Open packet of kernel channel: channel key = %" PRIu64
+                               ", session name = %s, session_id = %" PRIu64,
+                               chan->key, session->name, session->id);
+
+               open_ret = consumer_open_channel_packets(socket, chan->key);
+               if (open_ret < 0) {
+                       /* General error (no known error expected). */
+                       ret = LTTNG_ERR_UNK;
+                       goto end;
+               }
+       }
+
+end:
+       rcu_read_unlock();
+       return ret;
+}
+
+enum lttng_error_code session_open_packets(struct ltt_session *session)
+{
+       enum lttng_error_code ret = LTTNG_OK;
+
+       DBG("Opening packets of session channels: session name = %s, session id = %" PRIu64,
+                       session->name, session->id);
+
+       if (session->ust_session) {
+               ret = ust_app_open_packets(session);
+               if (ret != LTTNG_OK) {
+                       goto end;
+               }
+       }
+
+       if (session->kernel_session) {
+               ret = session_kernel_open_packets(session);
+               if (ret != LTTNG_OK) {
+                       goto end;
+               }
+       }
+
+end:
+       return ret;
+}
+
+/*
+ * Set a session's current trace chunk.
+ *
+ * Must be called with the session lock held.
+ */
+int session_set_trace_chunk(struct ltt_session *session,
+               struct lttng_trace_chunk *new_trace_chunk,
+               struct lttng_trace_chunk **current_trace_chunk)
+{
+       ASSERT_LOCKED(session->lock);
+       return _session_set_trace_chunk_no_lock_check(session, new_trace_chunk,
+                       current_trace_chunk);
+}
+
+static
+void session_notify_destruction(const struct ltt_session *session)
+{
+       size_t i;
+       const size_t count = lttng_dynamic_array_get_count(
+                       &session->destroy_notifiers);
+
+       for (i = 0; i < count; i++) {
+               const struct ltt_session_destroy_notifier_element *element =
+                       (ltt_session_destroy_notifier_element *) lttng_dynamic_array_get_element(
+                                       &session->destroy_notifiers, i);
+
+               element->notifier(session, element->user_data);
+       }
+}
+
+/*
+ * Fire each clear notifier once, and remove them from the array.
+ */
+void session_notify_clear(struct ltt_session *session)
+{
+       size_t i;
+       const size_t count = lttng_dynamic_array_get_count(
+                       &session->clear_notifiers);
+
+       for (i = 0; i < count; i++) {
+               const struct ltt_session_clear_notifier_element *element =
+                       (ltt_session_clear_notifier_element *) lttng_dynamic_array_get_element(
+                                       &session->clear_notifiers, i);
+
+               element->notifier(session, element->user_data);
+       }
+       lttng_dynamic_array_clear(&session->clear_notifiers);
+}
+
+static
+void session_release(struct urcu_ref *ref)
+{
+       int ret;
+       struct ltt_ust_session *usess;
+       struct ltt_kernel_session *ksess;
+       struct ltt_session *session = container_of(ref, typeof(*session), ref);
+       const bool session_published = session->published;
+
+       LTTNG_ASSERT(!session->chunk_being_archived);
+
+       usess = session->ust_session;
+       ksess = session->kernel_session;
+
+       /* Clean kernel session teardown, keeping data for destroy notifier. */
+       kernel_destroy_session(ksess);
+
+       /* UST session teardown, keeping data for destroy notifier. */
+       if (usess) {
+               /* Close any relayd session */
+               consumer_output_send_destroy_relayd(usess->consumer);
+
+               /* Destroy every UST application related to this session. */
+               ret = ust_app_destroy_trace_all(usess);
+               if (ret) {
+                       ERR("Error in ust_app_destroy_trace_all");
+               }
+
+               /* Clean up the rest, keeping destroy notifier data. */
+               trace_ust_destroy_session(usess);
+       }
+
+       /*
+        * Must notify the kernel thread here to update it's poll set in order to
+        * remove the channel(s)' fd just destroyed.
+        */
+       ret = notify_thread_pipe(the_kernel_poll_pipe[1]);
+       if (ret < 0) {
+               PERROR("write kernel poll pipe");
+       }
+
+       DBG("Destroying session %s (id %" PRIu64 ")", session->name, session->id);
+
+       snapshot_destroy(&session->snapshot);
+
+       pthread_mutex_destroy(&session->lock);
+
+       if (session_published) {
+               ASSERT_LOCKED(ltt_session_list.lock);
+               del_session_list(session);
+               del_session_ht(session);
+       }
+       session_notify_destruction(session);
+
+       consumer_output_put(session->consumer);
+       kernel_free_session(ksess);
+       session->kernel_session = NULL;
+       if (usess) {
+               trace_ust_free_session(usess);
+               session->ust_session = NULL;
+       }
+       lttng_dynamic_array_reset(&session->destroy_notifiers);
+       lttng_dynamic_array_reset(&session->clear_notifiers);
+       free(session->last_archived_chunk_name);
+       free(session->base_path);
+       free(session);
+       if (session_published) {
+               /*
+                * Broadcast after free-ing to ensure the memory is
+                * reclaimed before the main thread exits.
+                */
+               ASSERT_LOCKED(ltt_session_list.lock);
+               pthread_cond_broadcast(&ltt_session_list.removal_cond);
+       }
+}
+
+/*
+ * Acquire a reference to a session.
+ * This function may fail (return false); its return value must be checked.
+ */
+bool session_get(struct ltt_session *session)
+{
+       return urcu_ref_get_unless_zero(&session->ref);
+}
+
+/*
+ * Release a reference to a session.
+ */
+void session_put(struct ltt_session *session)
+{
+       if (!session) {
+               return;
+       }
+       /*
+        * The session list lock must be held as any session_put()
+        * may cause the removal of the session from the session_list.
+        */
+       ASSERT_LOCKED(ltt_session_list.lock);
+       LTTNG_ASSERT(session->ref.refcount);
+       urcu_ref_put(&session->ref, session_release);
+}
+
+/*
+ * Destroy a session.
+ *
+ * This method does not immediately release/free the session as other
+ * components may still hold a reference to the session. However,
+ * the session should no longer be presented to the user.
+ *
+ * Releases the session list's reference to the session
+ * and marks it as destroyed. Iterations on the session list should be
+ * mindful of the "destroyed" flag.
+ */
+void session_destroy(struct ltt_session *session)
+{
+       LTTNG_ASSERT(!session->destroyed);
+       session->destroyed = true;
+       session_put(session);
+}
+
+int session_add_destroy_notifier(struct ltt_session *session,
+               ltt_session_destroy_notifier notifier, void *user_data)
+{
+       const struct ltt_session_destroy_notifier_element element = {
+               .notifier = notifier,
+               .user_data = user_data
+       };
+
+       return lttng_dynamic_array_add_element(&session->destroy_notifiers,
+                       &element);
+}
+
+int session_add_clear_notifier(struct ltt_session *session,
+               ltt_session_clear_notifier notifier, void *user_data)
+{
+       const struct ltt_session_clear_notifier_element element = {
+               .notifier = notifier,
+               .user_data = user_data
+       };
+
+       return lttng_dynamic_array_add_element(&session->clear_notifiers,
+                       &element);
+}
+
+/*
+ * Return a ltt_session structure ptr that matches name. If no session found,
+ * NULL is returned. This must be called with the session list lock held using
+ * session_lock_list and session_unlock_list.
+ * A reference to the session is implicitly acquired by this function.
+ */
+struct ltt_session *session_find_by_name(const char *name)
+{
+       struct ltt_session *iter;
+
+       LTTNG_ASSERT(name);
+       ASSERT_LOCKED(ltt_session_list.lock);
+
+       DBG2("Trying to find session by name %s", name);
+
+       cds_list_for_each_entry(iter, &ltt_session_list.head, list) {
+               if (!strncmp(iter->name, name, NAME_MAX) &&
+                               !iter->destroyed) {
+                       goto found;
+               }
+       }
+
+       return NULL;
+found:
+       return session_get(iter) ? iter : NULL;
+}
+
+/*
+ * Return an ltt_session that matches the id. If no session is found,
+ * NULL is returned. This must be called with rcu_read_lock and
+ * session list lock held (to guarantee the lifetime of the session).
+ */
+struct ltt_session *session_find_by_id(uint64_t id)
+{
+       struct lttng_ht_node_u64 *node;
+       struct lttng_ht_iter iter;
+       struct ltt_session *ls;
+
+       ASSERT_LOCKED(ltt_session_list.lock);
+
+       if (!ltt_sessions_ht_by_id) {
+               goto end;
+       }
+
+       lttng_ht_lookup(ltt_sessions_ht_by_id, &id, &iter);
+       node = lttng_ht_iter_get_node_u64(&iter);
+       if (node == NULL) {
+               goto end;
+       }
+       ls = caa_container_of(node, struct ltt_session, node);
+
+       DBG3("Session %" PRIu64 " found by id.", id);
+       return session_get(ls) ? ls : NULL;
+
+end:
+       DBG3("Session %" PRIu64 " NOT found by id", id);
+       return NULL;
+}
+
+/*
+ * Create a new session and add it to the session list.
+ * Session list lock must be held by the caller.
+ */
+enum lttng_error_code session_create(const char *name, uid_t uid, gid_t gid,
+               struct ltt_session **out_session)
+{
+       int ret;
+       enum lttng_error_code ret_code;
+       struct ltt_session *new_session = NULL;
+
+       ASSERT_LOCKED(ltt_session_list.lock);
+       if (name) {
+               struct ltt_session *clashing_session;
+
+               clashing_session = session_find_by_name(name);
+               if (clashing_session) {
+                       session_put(clashing_session);
+                       ret_code = LTTNG_ERR_EXIST_SESS;
+                       goto error;
+               }
+       }
+       new_session = (ltt_session *) zmalloc(sizeof(struct ltt_session));
+       if (!new_session) {
+               PERROR("Failed to allocate an ltt_session structure");
+               ret_code = LTTNG_ERR_NOMEM;
+               goto error;
+       }
+
+       lttng_dynamic_array_init(&new_session->destroy_notifiers,
+                       sizeof(struct ltt_session_destroy_notifier_element),
+                       NULL);
+       lttng_dynamic_array_init(&new_session->clear_notifiers,
+                       sizeof(struct ltt_session_clear_notifier_element),
+                       NULL);
+       urcu_ref_init(&new_session->ref);
+       pthread_mutex_init(&new_session->lock, NULL);
+
+       new_session->creation_time = time(NULL);
+       if (new_session->creation_time == (time_t) -1) {
+               PERROR("Failed to sample session creation time");
+               ret_code = LTTNG_ERR_SESSION_FAIL;
+               goto error;
+       }
+
+       /* Create default consumer output. */
+       new_session->consumer = consumer_create_output(CONSUMER_DST_LOCAL);
+       if (new_session->consumer == NULL) {
+               ret_code = LTTNG_ERR_NOMEM;
+               goto error;
+       }
+
+       if (name) {
+               ret = lttng_strncpy(new_session->name, name, sizeof(new_session->name));
+               if (ret) {
+                       ret_code = LTTNG_ERR_SESSION_INVALID_CHAR;
+                       goto error;
+               }
+               ret = validate_name(name);
+               if (ret < 0) {
+                       ret_code = LTTNG_ERR_SESSION_INVALID_CHAR;
+                       goto error;
+               }
+       } else {
+               int i = 0;
+               bool found_name = false;
+               char datetime[16];
+               struct tm *timeinfo;
+
+               timeinfo = localtime(&new_session->creation_time);
+               if (!timeinfo) {
+                       ret_code = LTTNG_ERR_SESSION_FAIL;
+                       goto error;
+               }
+               strftime(datetime, sizeof(datetime), "%Y%m%d-%H%M%S", timeinfo);
+               for (i = 0; i < INT_MAX; i++) {
+                       struct ltt_session *clashing_session;
+
+                       if (i == 0) {
+                               ret = snprintf(new_session->name,
+                                               sizeof(new_session->name),
+                                               "%s-%s",
+                                               DEFAULT_SESSION_NAME,
+                                               datetime);
+                       } else {
+                               ret = snprintf(new_session->name,
+                                               sizeof(new_session->name),
+                                               "%s%d-%s",
+                                               DEFAULT_SESSION_NAME, i,
+                                               datetime);
+                       }
+                       new_session->name_contains_creation_time = true;
+                       if (ret == -1 || ret >= sizeof(new_session->name)) {
+                               /*
+                                * Null-terminate in case the name is used
+                                * in logging statements.
+                                */
+                               new_session->name[sizeof(new_session->name) - 1] = '\0';
+                               ret_code = LTTNG_ERR_SESSION_FAIL;
+                               goto error;
+                       }
+
+                       clashing_session =
+                                       session_find_by_name(new_session->name);
+                       session_put(clashing_session);
+                       if (!clashing_session) {
+                               found_name = true;
+                               break;
+                       }
+               }
+               if (found_name) {
+                       DBG("Generated session name \"%s\"", new_session->name);
+                       new_session->has_auto_generated_name = true;
+               } else {
+                       ERR("Failed to auto-generate a session name");
+                       ret_code = LTTNG_ERR_SESSION_FAIL;
+                       goto error;
+               }
+       }
+
+       ret = gethostname(new_session->hostname, sizeof(new_session->hostname));
+       if (ret < 0) {
+               if (errno == ENAMETOOLONG) {
+                       new_session->hostname[sizeof(new_session->hostname) - 1] = '\0';
+                       ERR("Hostname exceeds the maximal permitted length and has been truncated to %s",
+                                       new_session->hostname);
+               } else {
+                       ret_code = LTTNG_ERR_SESSION_FAIL;
+                       goto error;
+               }
+       }
+
+       new_session->uid = uid;
+       new_session->gid = gid;
+
+       ret = snapshot_init(&new_session->snapshot);
+       if (ret < 0) {
+               ret_code = LTTNG_ERR_NOMEM;
+               goto error;
+       }
+
+       new_session->rotation_state = LTTNG_ROTATION_STATE_NO_ROTATION;
+
+       /* Add new session to the session list. */
+       new_session->id = add_session_list(new_session);
+
+       /*
+        * Add the new session to the ltt_sessions_ht_by_id.
+        * No ownership is taken by the hash table; it is merely
+        * a wrapper around the session list used for faster access
+        * by session id.
+        */
+       add_session_ht(new_session);
+       new_session->published = true;
+
+       /*
+        * Consumer is left to NULL since the create_session_uri command will
+        * set it up and, if valid, assign it to the session.
+        */
+       DBG("Tracing session %s created with ID %" PRIu64 " by uid = %d, gid = %d",
+                       new_session->name, new_session->id, new_session->uid,
+                       new_session->gid);
+       ret_code = LTTNG_OK;
+end:
+       if (new_session) {
+               (void) session_get(new_session);
+               *out_session = new_session;
+       }
+       return ret_code;
+error:
+       session_put(new_session);
+       new_session = NULL;
+       goto end;
+}
+
+/*
+ * Check if the UID matches the session. Root user has access to all
+ * sessions.
+ */
+bool session_access_ok(struct ltt_session *session, uid_t uid)
+{
+       LTTNG_ASSERT(session);
+       return (uid == session->uid) || uid == 0;
+}
+
+/*
+ * Set a session's rotation state and reset all associated state.
+ *
+ * This function resets the rotation state (check timers, pending
+ * flags, etc.) and sets the result of the last rotation. The result
+ * can be queries by a liblttng-ctl client.
+ *
+ * Be careful of the result passed to this function. For instance,
+ * on failure to launch a rotation, a client will expect the rotation
+ * state to be set to "NO_ROTATION". If an error occurred while the
+ * rotation was "ONGOING", result should be set to "ERROR", which will
+ * allow a client to report it.
+ *
+ * Must be called with the session and session_list locks held.
+ */
+int session_reset_rotation_state(struct ltt_session *session,
+               enum lttng_rotation_state result)
+{
+       int ret = 0;
+
+       ASSERT_LOCKED(ltt_session_list.lock);
+       ASSERT_LOCKED(session->lock);
+
+       session->rotation_state = result;
+       if (session->rotation_pending_check_timer_enabled) {
+               ret = timer_session_rotation_pending_check_stop(session);
+       }
+       if (session->chunk_being_archived) {
+               uint64_t chunk_id;
+               enum lttng_trace_chunk_status chunk_status;
+
+               chunk_status = lttng_trace_chunk_get_id(
+                               session->chunk_being_archived,
+                               &chunk_id);
+               LTTNG_ASSERT(chunk_status == LTTNG_TRACE_CHUNK_STATUS_OK);
+               LTTNG_OPTIONAL_SET(&session->last_archived_chunk_id,
+                               chunk_id);
+               lttng_trace_chunk_put(session->chunk_being_archived);
+               session->chunk_being_archived = NULL;
+               /*
+                * Fire the clear reply notifiers if we are completing a clear
+                * rotation.
+                */
+               session_notify_clear(session);
+       }
+       return ret;
+}
+
+/*
+ * Sample the id of a session looked up via its name.
+ * Here the term "sampling" hint the caller that this return the id at a given
+ * point in time with no guarantee that the session for which the id was
+ * sampled still exist at that point.
+ *
+ * Return 0 when the session is not found,
+ * Return 1 when the session is found and set `id`.
+ */
+bool sample_session_id_by_name(const char *name, uint64_t *id)
+{
+       bool found = false;
+       struct lttng_ht_node_str *node;
+       struct lttng_ht_iter iter;
+       struct ltt_session *ls;
+
+       rcu_read_lock();
+
+       if (!ltt_sessions_ht_by_name) {
+               found = false;
+               goto end;
+       }
+
+       lttng_ht_lookup(ltt_sessions_ht_by_name, name, &iter);
+       node = lttng_ht_iter_get_node_str(&iter);
+       if (node == NULL) {
+               found = false;
+               goto end;
+       }
+
+       ls = caa_container_of(node, struct ltt_session, node_by_name);
+       *id = ls->id;
+       found = true;
+
+       DBG3("Session id `%" PRIu64 "` sampled for session `%s", *id, name);
+end:
+       rcu_read_unlock();
+       return found;
+}
diff --git a/src/bin/lttng-sessiond/sessiond-config.c b/src/bin/lttng-sessiond/sessiond-config.c
deleted file mode 100644 (file)
index 707e579..0000000
+++ /dev/null
@@ -1,540 +0,0 @@
-/*
- * Copyright (C) 2017 Jérémie Galarneau <jeremie.galarneau@efficios.com>
- *
- * SPDX-License-Identifier: GPL-2.0-only
- *
- */
-
-#include "version.h"
-#include "sessiond-config.h"
-#include "lttng-ust-ctl.h"
-#include <common/defaults.h>
-#include <limits.h>
-#include <ctype.h>
-#include <common/error.h>
-#include <common/utils.h>
-#include <common/compat/errno.h>
-#include <common/compat/getenv.h>
-
-static
-struct sessiond_config sessiond_config_build_defaults = {
-       .quiet =                                false,
-       .verbose =                              0,
-       .verbose_consumer =                     0,
-
-       .agent_tcp_port =                       { .begin = DEFAULT_AGENT_TCP_PORT_RANGE_BEGIN, .end = DEFAULT_AGENT_TCP_PORT_RANGE_END },
-       .event_notifier_buffer_size_kernel =    DEFAULT_EVENT_NOTIFIER_ERROR_COUNT_MAP_SIZE,
-       .event_notifier_buffer_size_userspace = DEFAULT_EVENT_NOTIFIER_ERROR_COUNT_MAP_SIZE,
-       .app_socket_timeout =                   DEFAULT_APP_SOCKET_RW_TIMEOUT,
-
-       .no_kernel =                            false,
-       .background =                           false,
-       .daemonize =                            false,
-       .sig_parent =                           false,
-
-       .tracing_group_name.value =             (char *) DEFAULT_TRACING_GROUP,
-       .kmod_probes_list.value =               NULL,
-       .kmod_extra_probes_list.value =         NULL,
-
-       .rundir.value =                         NULL,
-
-       .apps_unix_sock_path.value =            NULL,
-       .client_unix_sock_path.value =          NULL,
-       .wait_shm_path.value =                  NULL,
-       .health_unix_sock_path.value =          NULL,
-       .lttng_ust_clock_plugin.value =         NULL,
-       .pid_file_path.value =                  NULL,
-       .lock_file_path.value =                 NULL,
-       .agent_port_file_path.value =           NULL,
-       .load_session_path.value =              NULL,
-
-       .consumerd32_path.value =               NULL,
-       .consumerd32_bin_path.value =           NULL,
-       .consumerd32_lib_dir.value =            NULL,
-       .consumerd32_err_unix_sock_path.value = NULL,
-       .consumerd32_cmd_unix_sock_path.value = NULL,
-
-       .consumerd64_path.value =               NULL,
-       .consumerd64_bin_path.value =           NULL,
-       .consumerd64_lib_dir.value =            NULL,
-       .consumerd64_err_unix_sock_path.value = NULL,
-       .consumerd64_cmd_unix_sock_path.value = NULL,
-
-       .kconsumerd_path.value =                NULL,
-       .kconsumerd_err_unix_sock_path.value =  NULL,
-       .kconsumerd_cmd_unix_sock_path.value =  NULL,
-};
-
-static
-void config_string_fini(struct config_string *str)
-{
-       config_string_set(str, NULL);
-}
-
-static
-void config_string_set_static(struct config_string *config_str,
-               const char *value)
-{
-       config_string_set(config_str, (char *) value);
-       config_str->should_free = false;
-}
-
-/* Only use for dynamically-allocated strings. */
-void config_string_set(struct config_string *config_str, char *value)
-{
-       LTTNG_ASSERT(config_str);
-       if (config_str->should_free) {
-               free(config_str->value);
-               config_str->should_free = false;
-       }
-
-       config_str->should_free = !!value;
-       config_str->value = value;
-}
-
-int sessiond_config_apply_env_config(struct sessiond_config *config)
-{
-       int ret = 0;
-       const char *env_value;
-
-       env_value = getenv(DEFAULT_APP_SOCKET_TIMEOUT_ENV);
-       if (env_value) {
-               char *endptr;
-               long int_val;
-
-               errno = 0;
-               int_val = strtoul(env_value, &endptr, 0);
-               if (errno != 0 || int_val > INT_MAX ||
-                               (int_val < 0 && int_val != -1)) {
-                       ERR("Invalid value \"%s\" used for \"%s\" environment variable",
-                                       env_value, DEFAULT_APP_SOCKET_TIMEOUT_ENV);
-                       ret = -1;
-                       goto end;
-               }
-
-               config->app_socket_timeout = int_val;
-       }
-
-       env_value = lttng_secure_getenv("LTTNG_CONSUMERD32_BIN");
-       if (env_value) {
-               config_string_set_static(&config->consumerd32_bin_path,
-                               env_value);
-       }
-       env_value = lttng_secure_getenv("LTTNG_CONSUMERD64_BIN");
-       if (env_value) {
-               config_string_set_static(&config->consumerd64_bin_path,
-                               env_value);
-       }
-
-       env_value = lttng_secure_getenv("LTTNG_CONSUMERD32_LIBDIR");
-       if (env_value) {
-               config_string_set_static(&config->consumerd32_lib_dir,
-                               env_value);
-       }
-       env_value = lttng_secure_getenv("LTTNG_CONSUMERD64_LIBDIR");
-       if (env_value) {
-               config_string_set_static(&config->consumerd64_lib_dir,
-                               env_value);
-       }
-
-       env_value = lttng_secure_getenv("LTTNG_UST_CLOCK_PLUGIN");
-       if (env_value) {
-               config_string_set_static(&config->lttng_ust_clock_plugin,
-                               env_value);
-       }
-
-       env_value = lttng_secure_getenv(DEFAULT_LTTNG_KMOD_PROBES);
-       if (env_value) {
-               config_string_set_static(&config->kmod_probes_list,
-                               env_value);
-       }
-
-       env_value = lttng_secure_getenv(DEFAULT_LTTNG_EXTRA_KMOD_PROBES);
-       if (env_value) {
-               config_string_set_static(&config->kmod_extra_probes_list,
-                               env_value);
-       }
-end:
-       return ret;
-}
-
-static
-int config_set_paths_root(struct sessiond_config *config)
-{
-       int ret = 0;
-
-       config_string_set(&config->rundir, strdup(DEFAULT_LTTNG_RUNDIR));
-       if (!config->rundir.value) {
-               ERR("Failed to set rundir");
-               ret = -1;
-               goto end;
-       }
-
-       config_string_set_static(&config->apps_unix_sock_path,
-                       DEFAULT_GLOBAL_APPS_UNIX_SOCK);
-       config_string_set_static(&config->client_unix_sock_path,
-                       DEFAULT_GLOBAL_CLIENT_UNIX_SOCK);
-       config_string_set_static(&config->wait_shm_path,
-                       DEFAULT_GLOBAL_APPS_WAIT_SHM_PATH);
-       config_string_set_static(&config->health_unix_sock_path,
-                       DEFAULT_GLOBAL_HEALTH_UNIX_SOCK);
-end:
-       return ret;
-}
-
-static
-int config_set_paths_non_root(struct sessiond_config *config)
-{
-       int ret = 0;
-       const char *home_path = utils_get_home_dir();
-       char *str;
-
-       if (home_path == NULL) {
-               ERR("Can't get HOME directory for sockets creation.");
-               ret = -1;
-               goto end;
-       }
-
-       /*
-        * Create rundir from home path. This will create something like
-        * $HOME/.lttng
-        */
-       ret = asprintf(&str, DEFAULT_LTTNG_HOME_RUNDIR, home_path);
-       if (ret < 0) {
-               ERR("Failed to set rundir");
-               goto end;
-       }
-       config_string_set(&config->rundir, str);
-       str = NULL;
-
-       ret = asprintf(&str, DEFAULT_HOME_APPS_UNIX_SOCK, home_path);
-       if (ret < 0) {
-               ERR("Failed to set default home apps unix socket path");
-               goto end;
-       }
-       config_string_set(&config->apps_unix_sock_path, str);
-       str = NULL;
-
-       ret = asprintf(&str, DEFAULT_HOME_CLIENT_UNIX_SOCK, home_path);
-       if (ret < 0) {
-               ERR("Failed to set default home client unix socket path");
-               goto end;
-       }
-       config_string_set(&config->client_unix_sock_path, str);
-       str = NULL;
-
-       ret = asprintf(&str, DEFAULT_HOME_APPS_WAIT_SHM_PATH, getuid());
-       if (ret < 0) {
-               ERR("Failed to set default home apps wait shm path");
-               goto end;
-       }
-       config_string_set(&config->wait_shm_path, str);
-       str = NULL;
-
-       ret = asprintf(&str, DEFAULT_HOME_HEALTH_UNIX_SOCK, home_path);
-       if (ret < 0) {
-               ERR("Failed to set default home health UNIX socket path");
-               goto end;
-       }
-       config_string_set(&config->health_unix_sock_path, str);
-       str = NULL;
-
-       ret = 0;
-end:
-       return ret;
-}
-
-int sessiond_config_init(struct sessiond_config *config)
-{
-       int ret;
-       bool is_root = (getuid() == 0);
-       char *str;
-
-       LTTNG_ASSERT(config);
-       memcpy(config, &sessiond_config_build_defaults, sizeof(*config));
-
-       if (is_root) {
-               ret = config_set_paths_root(config);
-       } else {
-               ret = config_set_paths_non_root(config);
-       }
-       if (ret < 0) {
-               goto error;
-       }
-
-       /* 32 bits consumerd path setup */
-       ret = asprintf(&str, DEFAULT_USTCONSUMERD32_PATH,
-                       config->rundir.value);
-       if (ret < 0) {
-               ERR("Failed to set 32-bit consumer path");
-               goto error;
-       }
-       config_string_set(&config->consumerd32_path, str);
-       str = NULL;
-
-       ret = asprintf(&str, DEFAULT_USTCONSUMERD32_ERR_SOCK_PATH,
-                       config->rundir.value);
-       if (ret < 0) {
-               ERR("Failed to set 32-bit consumer error socket path");
-               goto error;
-       }
-       config_string_set(&config->consumerd32_err_unix_sock_path, str);
-       str = NULL;
-
-       ret = asprintf(&str, DEFAULT_USTCONSUMERD32_CMD_SOCK_PATH,
-                       config->rundir.value);
-       if (ret < 0) {
-               ERR("Failed to set 32-bit consumer command socket path");
-               goto error;
-       }
-       config_string_set(&config->consumerd32_cmd_unix_sock_path, str);
-       str = NULL;
-
-       /* 64 bits consumerd path setup */
-       ret = asprintf(&str, DEFAULT_USTCONSUMERD64_PATH,
-                       config->rundir.value);
-       if (ret < 0) {
-               ERR("Failed to set 64-bit consumer path");
-               goto error;
-       }
-       config_string_set(&config->consumerd64_path, str);
-       str = NULL;
-
-       ret = asprintf(&str, DEFAULT_USTCONSUMERD64_ERR_SOCK_PATH,
-                       config->rundir.value);
-       if (ret < 0) {
-               ERR("Failed to set 64-bit consumer error socket path");
-               goto error;
-       }
-       config_string_set(&config->consumerd64_err_unix_sock_path, str);
-       str = NULL;
-
-       ret = asprintf(&str, DEFAULT_USTCONSUMERD64_CMD_SOCK_PATH,
-                       config->rundir.value);
-       if (ret < 0) {
-               ERR("Failed to set 64-bit consumer command socket path");
-               goto error;
-       }
-       config_string_set(&config->consumerd64_cmd_unix_sock_path, str);
-       str = NULL;
-
-       /* kconsumerd consumerd path setup */
-       ret = asprintf(&str, DEFAULT_KCONSUMERD_PATH,
-                       config->rundir.value);
-       if (ret < 0) {
-               ERR("Failed to set kernel consumer path");
-               goto error;
-       }
-       config_string_set(&config->kconsumerd_path, str);
-       str = NULL;
-
-       ret = asprintf(&str, DEFAULT_KCONSUMERD_ERR_SOCK_PATH,
-                       config->rundir.value);
-       if (ret < 0) {
-               ERR("Failed to set kernel consumer error socket path");
-               goto error;
-       }
-       config_string_set(&config->kconsumerd_err_unix_sock_path, str);
-       str = NULL;
-
-       ret = asprintf(&str, DEFAULT_KCONSUMERD_CMD_SOCK_PATH,
-                       config->rundir.value);
-       if (ret < 0) {
-               ERR("Failed to set kernel consumer command socket path");
-               goto error;
-       }
-       config_string_set(&config->kconsumerd_cmd_unix_sock_path, str);
-       str = NULL;
-
-       ret = asprintf(&str, "%s/%s", config->rundir.value,
-                       DEFAULT_LTTNG_SESSIOND_PIDFILE);
-       if (ret < 0) {
-               ERR("Failed to set PID file path");
-               goto error;
-       }
-       config_string_set(&config->pid_file_path, str);
-       str = NULL;
-
-       ret = asprintf(&str, "%s/%s", config->rundir.value,
-                       DEFAULT_LTTNG_SESSIOND_LOCKFILE);
-       if (ret < 0) {
-               ERR("Failed to set lock file path");
-               goto error;
-       }
-       config_string_set(&config->lock_file_path, str);
-       str = NULL;
-
-       ret = asprintf(&str, "%s/%s", config->rundir.value,
-                       DEFAULT_LTTNG_SESSIOND_AGENTPORT_FILE);
-       if (ret < 0) {
-               ERR("Failed to set agent port file path");
-               goto error;
-       }
-       config_string_set(&config->agent_port_file_path, str);
-       str = NULL;
-
-       /*
-        * Allow INSTALL_BIN_PATH to be used as a target path for the
-        * native architecture size consumer if CONFIG_CONSUMER*_PATH
-        * has not been defined.
-        */
-#if (CAA_BITS_PER_LONG == 32)
-       config_string_set_static(&config->consumerd32_bin_path,
-                       INSTALL_BIN_PATH "/" DEFAULT_CONSUMERD_FILE);
-       config_string_set_static(&config->consumerd32_lib_dir,
-                       INSTALL_LIB_PATH);
-#elif (CAA_BITS_PER_LONG == 64)
-       config_string_set_static(&config->consumerd64_bin_path,
-                       INSTALL_BIN_PATH "/" DEFAULT_CONSUMERD_FILE);
-       config_string_set_static(&config->consumerd64_lib_dir,
-                       INSTALL_LIB_PATH);
-#else
-#error "Unknown bitness"
-#endif
-       ret = 0;
-       return ret;
-error:
-       sessiond_config_fini(config);
-       return ret;
-}
-
-void sessiond_config_fini(struct sessiond_config *config)
-{
-       config_string_fini(&config->tracing_group_name);
-       config_string_fini(&config->kmod_probes_list);
-       config_string_fini(&config->kmod_extra_probes_list);
-       config_string_fini(&config->rundir);
-       config_string_fini(&config->apps_unix_sock_path);
-       config_string_fini(&config->client_unix_sock_path);
-       config_string_fini(&config->wait_shm_path);
-       config_string_fini(&config->health_unix_sock_path);
-       config_string_fini(&config->lttng_ust_clock_plugin);
-       config_string_fini(&config->pid_file_path);
-       config_string_fini(&config->lock_file_path);
-       config_string_fini(&config->load_session_path);
-       config_string_fini(&config->agent_port_file_path);
-       config_string_fini(&config->consumerd32_path);
-       config_string_fini(&config->consumerd32_bin_path);
-       config_string_fini(&config->consumerd32_lib_dir);
-       config_string_fini(&config->consumerd32_err_unix_sock_path);
-       config_string_fini(&config->consumerd32_cmd_unix_sock_path);
-       config_string_fini(&config->consumerd64_path);
-       config_string_fini(&config->consumerd64_bin_path);
-       config_string_fini(&config->consumerd64_lib_dir);
-       config_string_fini(&config->consumerd64_err_unix_sock_path);
-       config_string_fini(&config->consumerd64_cmd_unix_sock_path);
-       config_string_fini(&config->kconsumerd_path);
-       config_string_fini(&config->kconsumerd_err_unix_sock_path);
-       config_string_fini(&config->kconsumerd_cmd_unix_sock_path);
-}
-
-static
-int resolve_path(struct config_string *path)
-{
-       int ret = 0;
-       char *absolute_path;
-
-       if (!path->value || path->value[0] == '/') {
-               goto end;
-       }
-
-       absolute_path = utils_expand_path(path->value);
-       if (!absolute_path) {
-               ret = -1;
-               goto end;
-       }
-
-       config_string_set(path, absolute_path);
-end:
-       return ret;
-}
-
-#define RESOLVE_CHECK(path_config_str)         \
-       if (resolve_path(path_config_str))      \
-               return -1
-
-int sessiond_config_resolve_paths(struct sessiond_config *config)
-{
-       RESOLVE_CHECK(&config->apps_unix_sock_path);
-       RESOLVE_CHECK(&config->client_unix_sock_path);
-       RESOLVE_CHECK(&config->wait_shm_path);
-       RESOLVE_CHECK(&config->health_unix_sock_path);
-       RESOLVE_CHECK(&config->lttng_ust_clock_plugin);
-       RESOLVE_CHECK(&config->pid_file_path);
-       RESOLVE_CHECK(&config->lock_file_path);
-       RESOLVE_CHECK(&config->load_session_path);
-       RESOLVE_CHECK(&config->agent_port_file_path);
-       RESOLVE_CHECK(&config->consumerd32_path);
-       RESOLVE_CHECK(&config->consumerd32_bin_path);
-       RESOLVE_CHECK(&config->consumerd32_lib_dir);
-       RESOLVE_CHECK(&config->consumerd32_err_unix_sock_path);
-       RESOLVE_CHECK(&config->consumerd32_cmd_unix_sock_path);
-       RESOLVE_CHECK(&config->consumerd64_path);
-       RESOLVE_CHECK(&config->consumerd64_bin_path);
-       RESOLVE_CHECK(&config->consumerd64_lib_dir);
-       RESOLVE_CHECK(&config->consumerd64_err_unix_sock_path);
-       RESOLVE_CHECK(&config->consumerd64_cmd_unix_sock_path);
-       RESOLVE_CHECK(&config->kconsumerd_path);
-       RESOLVE_CHECK(&config->kconsumerd_err_unix_sock_path);
-       RESOLVE_CHECK(&config->kconsumerd_cmd_unix_sock_path);
-       return 0;
-}
-
-void sessiond_config_log(struct sessiond_config *config)
-{
-       DBG_NO_LOC("[sessiond configuration]");
-       DBG_NO_LOC("\tversion                        %s", VERSION);
-       if (GIT_VERSION[0] != '\0') {
-               DBG_NO_LOC("\tgit version                    %s", GIT_VERSION);
-       }
-       if (EXTRA_VERSION_NAME[0] != '\0') {
-               DBG_NO_LOC("\textra version name             %s", EXTRA_VERSION_NAME);
-       }
-       if (EXTRA_VERSION_DESCRIPTION[0] != '\0') {
-               DBG_NO_LOC("\textra version description:\n\t%s", EXTRA_VERSION_DESCRIPTION);
-       }
-       if (EXTRA_VERSION_PATCHES[0] != '\0') {
-               DBG_NO_LOC("\textra version patches:\n\t%s", EXTRA_VERSION_PATCHES);
-       }
-       DBG_NO_LOC("\tverbose:                       %i", config->verbose);
-       DBG_NO_LOC("\tverbose consumer:              %i", config->verbose_consumer);
-       DBG_NO_LOC("\tquiet mode:                    %s", config->quiet ? "True" : "False");
-       if (config->agent_tcp_port.begin == config->agent_tcp_port.end) {
-               DBG_NO_LOC("\tagent_tcp_port:                %i", config->agent_tcp_port.begin);
-       } else {
-               DBG_NO_LOC("\tagent_tcp_port:                [%i, %i]",
-                               config->agent_tcp_port.begin,
-                               config->agent_tcp_port.end);
-       }
-       DBG_NO_LOC("\tapplication socket timeout:    %i", config->app_socket_timeout);
-       DBG_NO_LOC("\tno-kernel:                     %s", config->no_kernel ? "True" : "False");
-       DBG_NO_LOC("\tbackground:                    %s", config->background ? "True" : "False");
-       DBG_NO_LOC("\tdaemonize:                     %s", config->daemonize ? "True" : "False");
-       DBG_NO_LOC("\tsignal parent on start:        %s", config->sig_parent ? "True" : "False");
-       DBG_NO_LOC("\ttracing group name:            %s", config->tracing_group_name.value ? : "Unknown");
-       DBG_NO_LOC("\tkmod_probe_list:               %s", config->kmod_probes_list.value ? : "None");
-       DBG_NO_LOC("\tkmod_extra_probe_list:         %s", config->kmod_extra_probes_list.value ? : "None");
-       DBG_NO_LOC("\trundir:                        %s", config->rundir.value ? : "Unknown");
-       DBG_NO_LOC("\tapplication socket path:       %s", config->apps_unix_sock_path.value ? : "Unknown");
-       DBG_NO_LOC("\tclient socket path:            %s", config->client_unix_sock_path.value ? : "Unknown");
-       DBG_NO_LOC("\twait shm path:                 %s", config->wait_shm_path.value ? : "Unknown");
-       DBG_NO_LOC("\thealth socket path:            %s", config->health_unix_sock_path.value ? : "Unknown");
-       DBG_NO_LOC("\tLTTNG_UST_CLOCK_PLUGIN:        %s", config->lttng_ust_clock_plugin.value ? : "None");
-       DBG_NO_LOC("\tpid file path:                 %s", config->pid_file_path.value ? : "Unknown");
-       DBG_NO_LOC("\tlock file path:                %s", config->lock_file_path.value ? : "Unknown");
-       DBG_NO_LOC("\tsession load path:             %s", config->load_session_path.value ? : "None");
-       DBG_NO_LOC("\tagent port file path:          %s", config->agent_port_file_path.value ? : "Unknown");
-       DBG_NO_LOC("\tconsumerd32 path:              %s", config->consumerd32_path.value ? : "Unknown");
-       DBG_NO_LOC("\tconsumerd32 bin path:          %s", config->consumerd32_bin_path.value ? : "Unknown");
-       DBG_NO_LOC("\tconsumerd32 lib dir:           %s", config->consumerd32_lib_dir.value ? : "Unknown");
-       DBG_NO_LOC("\tconsumerd32 err unix sock path:%s", config->consumerd32_err_unix_sock_path.value ? : "Unknown");
-       DBG_NO_LOC("\tconsumerd32 cmd unix sock path:%s", config->consumerd32_cmd_unix_sock_path.value ? : "Unknown");
-       DBG_NO_LOC("\tconsumerd64 path:              %s", config->consumerd64_path.value ? : "Unknown");
-       DBG_NO_LOC("\tconsumerd64 bin path:          %s", config->consumerd64_bin_path.value ? : "Unknown");
-       DBG_NO_LOC("\tconsumerd64 lib dir:           %s", config->consumerd64_lib_dir.value ? : "Unknown");
-       DBG_NO_LOC("\tconsumerd64 err unix sock path:%s", config->consumerd64_err_unix_sock_path.value ? : "Unknown");
-       DBG_NO_LOC("\tconsumerd64 cmd unix sock path:%s", config->consumerd64_cmd_unix_sock_path.value ? : "Unknown");
-       DBG_NO_LOC("\tkconsumerd path:               %s", config->kconsumerd_path.value ? : "Unknown");
-       DBG_NO_LOC("\tkconsumerd err unix sock path: %s", config->kconsumerd_err_unix_sock_path.value ? : "Unknown");
-       DBG_NO_LOC("\tkconsumerd cmd unix sock path: %s", config->kconsumerd_cmd_unix_sock_path.value ? : "Unknown");
-}
diff --git a/src/bin/lttng-sessiond/sessiond-config.cpp b/src/bin/lttng-sessiond/sessiond-config.cpp
new file mode 100644 (file)
index 0000000..05b77e6
--- /dev/null
@@ -0,0 +1,542 @@
+/*
+ * Copyright (C) 2017 Jérémie Galarneau <jeremie.galarneau@efficios.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0-only
+ *
+ */
+
+#include "version.h"
+#include "sessiond-config.h"
+#include "lttng-ust-ctl.h"
+#include <common/defaults.h>
+#include <limits.h>
+#include <ctype.h>
+#include <common/error.h>
+#include <common/utils.h>
+#include <common/compat/errno.h>
+#include <common/compat/getenv.h>
+
+static
+struct sessiond_config sessiond_config_build_defaults = {
+       .verbose =                              0,
+       .verbose_consumer =                     0,
+       .agent_tcp_port =                       { .begin = DEFAULT_AGENT_TCP_PORT_RANGE_BEGIN, .end = DEFAULT_AGENT_TCP_PORT_RANGE_END },
+
+       .event_notifier_buffer_size_kernel =    DEFAULT_EVENT_NOTIFIER_ERROR_COUNT_MAP_SIZE,
+       .event_notifier_buffer_size_userspace = DEFAULT_EVENT_NOTIFIER_ERROR_COUNT_MAP_SIZE,
+       .app_socket_timeout =                   DEFAULT_APP_SOCKET_RW_TIMEOUT,
+
+       .quiet =                                false,
+
+
+       .no_kernel =                            false,
+       .background =                           false,
+       .daemonize =                            false,
+       .sig_parent =                           false,
+
+       .tracing_group_name = { (char *) DEFAULT_TRACING_GROUP },
+       .kmod_probes_list = { nullptr },
+       .kmod_extra_probes_list = { nullptr },
+
+       .rundir = { nullptr },
+
+       .apps_unix_sock_path = { nullptr },
+       .client_unix_sock_path = { nullptr },
+       .wait_shm_path = { nullptr },
+       .health_unix_sock_path = { nullptr },
+       .lttng_ust_clock_plugin = { nullptr },
+       .pid_file_path = { nullptr },
+       .lock_file_path = { nullptr },
+       .load_session_path = { nullptr },
+       .agent_port_file_path = { nullptr },
+
+       .consumerd32_path = { nullptr },
+       .consumerd32_bin_path = { nullptr },
+       .consumerd32_lib_dir = { nullptr },
+       .consumerd32_err_unix_sock_path = { nullptr },
+       .consumerd32_cmd_unix_sock_path = { nullptr },
+
+       .consumerd64_path = { nullptr },
+       .consumerd64_bin_path = { nullptr },
+       .consumerd64_lib_dir = { nullptr },
+       .consumerd64_err_unix_sock_path = { nullptr },
+       .consumerd64_cmd_unix_sock_path = { nullptr },
+
+       .kconsumerd_path = { nullptr },
+       .kconsumerd_err_unix_sock_path = { nullptr },
+       .kconsumerd_cmd_unix_sock_path = { nullptr },
+};
+
+static
+void config_string_fini(struct config_string *str)
+{
+       config_string_set(str, NULL);
+}
+
+static
+void config_string_set_static(struct config_string *config_str,
+               const char *value)
+{
+       config_string_set(config_str, (char *) value);
+       config_str->should_free = false;
+}
+
+/* Only use for dynamically-allocated strings. */
+void config_string_set(struct config_string *config_str, char *value)
+{
+       LTTNG_ASSERT(config_str);
+       if (config_str->should_free) {
+               free(config_str->value);
+               config_str->should_free = false;
+       }
+
+       config_str->should_free = !!value;
+       config_str->value = value;
+}
+
+int sessiond_config_apply_env_config(struct sessiond_config *config)
+{
+       int ret = 0;
+       const char *env_value;
+
+       env_value = getenv(DEFAULT_APP_SOCKET_TIMEOUT_ENV);
+       if (env_value) {
+               char *endptr;
+               long int_val;
+
+               errno = 0;
+               int_val = strtoul(env_value, &endptr, 0);
+               if (errno != 0 || int_val > INT_MAX ||
+                               (int_val < 0 && int_val != -1)) {
+                       ERR("Invalid value \"%s\" used for \"%s\" environment variable",
+                                       env_value, DEFAULT_APP_SOCKET_TIMEOUT_ENV);
+                       ret = -1;
+                       goto end;
+               }
+
+               config->app_socket_timeout = int_val;
+       }
+
+       env_value = lttng_secure_getenv("LTTNG_CONSUMERD32_BIN");
+       if (env_value) {
+               config_string_set_static(&config->consumerd32_bin_path,
+                               env_value);
+       }
+       env_value = lttng_secure_getenv("LTTNG_CONSUMERD64_BIN");
+       if (env_value) {
+               config_string_set_static(&config->consumerd64_bin_path,
+                               env_value);
+       }
+
+       env_value = lttng_secure_getenv("LTTNG_CONSUMERD32_LIBDIR");
+       if (env_value) {
+               config_string_set_static(&config->consumerd32_lib_dir,
+                               env_value);
+       }
+       env_value = lttng_secure_getenv("LTTNG_CONSUMERD64_LIBDIR");
+       if (env_value) {
+               config_string_set_static(&config->consumerd64_lib_dir,
+                               env_value);
+       }
+
+       env_value = lttng_secure_getenv("LTTNG_UST_CLOCK_PLUGIN");
+       if (env_value) {
+               config_string_set_static(&config->lttng_ust_clock_plugin,
+                               env_value);
+       }
+
+       env_value = lttng_secure_getenv(DEFAULT_LTTNG_KMOD_PROBES);
+       if (env_value) {
+               config_string_set_static(&config->kmod_probes_list,
+                               env_value);
+       }
+
+       env_value = lttng_secure_getenv(DEFAULT_LTTNG_EXTRA_KMOD_PROBES);
+       if (env_value) {
+               config_string_set_static(&config->kmod_extra_probes_list,
+                               env_value);
+       }
+end:
+       return ret;
+}
+
+static
+int config_set_paths_root(struct sessiond_config *config)
+{
+       int ret = 0;
+
+       config_string_set(&config->rundir, strdup(DEFAULT_LTTNG_RUNDIR));
+       if (!config->rundir.value) {
+               ERR("Failed to set rundir");
+               ret = -1;
+               goto end;
+       }
+
+       config_string_set_static(&config->apps_unix_sock_path,
+                       DEFAULT_GLOBAL_APPS_UNIX_SOCK);
+       config_string_set_static(&config->client_unix_sock_path,
+                       DEFAULT_GLOBAL_CLIENT_UNIX_SOCK);
+       config_string_set_static(&config->wait_shm_path,
+                       DEFAULT_GLOBAL_APPS_WAIT_SHM_PATH);
+       config_string_set_static(&config->health_unix_sock_path,
+                       DEFAULT_GLOBAL_HEALTH_UNIX_SOCK);
+end:
+       return ret;
+}
+
+static
+int config_set_paths_non_root(struct sessiond_config *config)
+{
+       int ret = 0;
+       const char *home_path = utils_get_home_dir();
+       char *str;
+
+       if (home_path == NULL) {
+               ERR("Can't get HOME directory for sockets creation.");
+               ret = -1;
+               goto end;
+       }
+
+       /*
+        * Create rundir from home path. This will create something like
+        * $HOME/.lttng
+        */
+       ret = asprintf(&str, DEFAULT_LTTNG_HOME_RUNDIR, home_path);
+       if (ret < 0) {
+               ERR("Failed to set rundir");
+               goto end;
+       }
+       config_string_set(&config->rundir, str);
+       str = NULL;
+
+       ret = asprintf(&str, DEFAULT_HOME_APPS_UNIX_SOCK, home_path);
+       if (ret < 0) {
+               ERR("Failed to set default home apps unix socket path");
+               goto end;
+       }
+       config_string_set(&config->apps_unix_sock_path, str);
+       str = NULL;
+
+       ret = asprintf(&str, DEFAULT_HOME_CLIENT_UNIX_SOCK, home_path);
+       if (ret < 0) {
+               ERR("Failed to set default home client unix socket path");
+               goto end;
+       }
+       config_string_set(&config->client_unix_sock_path, str);
+       str = NULL;
+
+       ret = asprintf(&str, DEFAULT_HOME_APPS_WAIT_SHM_PATH, getuid());
+       if (ret < 0) {
+               ERR("Failed to set default home apps wait shm path");
+               goto end;
+       }
+       config_string_set(&config->wait_shm_path, str);
+       str = NULL;
+
+       ret = asprintf(&str, DEFAULT_HOME_HEALTH_UNIX_SOCK, home_path);
+       if (ret < 0) {
+               ERR("Failed to set default home health UNIX socket path");
+               goto end;
+       }
+       config_string_set(&config->health_unix_sock_path, str);
+       str = NULL;
+
+       ret = 0;
+end:
+       return ret;
+}
+
+int sessiond_config_init(struct sessiond_config *config)
+{
+       int ret;
+       bool is_root = (getuid() == 0);
+       char *str;
+
+       LTTNG_ASSERT(config);
+       memcpy(config, &sessiond_config_build_defaults, sizeof(*config));
+
+       if (is_root) {
+               ret = config_set_paths_root(config);
+       } else {
+               ret = config_set_paths_non_root(config);
+       }
+       if (ret < 0) {
+               goto error;
+       }
+
+       /* 32 bits consumerd path setup */
+       ret = asprintf(&str, DEFAULT_USTCONSUMERD32_PATH,
+                       config->rundir.value);
+       if (ret < 0) {
+               ERR("Failed to set 32-bit consumer path");
+               goto error;
+       }
+       config_string_set(&config->consumerd32_path, str);
+       str = NULL;
+
+       ret = asprintf(&str, DEFAULT_USTCONSUMERD32_ERR_SOCK_PATH,
+                       config->rundir.value);
+       if (ret < 0) {
+               ERR("Failed to set 32-bit consumer error socket path");
+               goto error;
+       }
+       config_string_set(&config->consumerd32_err_unix_sock_path, str);
+       str = NULL;
+
+       ret = asprintf(&str, DEFAULT_USTCONSUMERD32_CMD_SOCK_PATH,
+                       config->rundir.value);
+       if (ret < 0) {
+               ERR("Failed to set 32-bit consumer command socket path");
+               goto error;
+       }
+       config_string_set(&config->consumerd32_cmd_unix_sock_path, str);
+       str = NULL;
+
+       /* 64 bits consumerd path setup */
+       ret = asprintf(&str, DEFAULT_USTCONSUMERD64_PATH,
+                       config->rundir.value);
+       if (ret < 0) {
+               ERR("Failed to set 64-bit consumer path");
+               goto error;
+       }
+       config_string_set(&config->consumerd64_path, str);
+       str = NULL;
+
+       ret = asprintf(&str, DEFAULT_USTCONSUMERD64_ERR_SOCK_PATH,
+                       config->rundir.value);
+       if (ret < 0) {
+               ERR("Failed to set 64-bit consumer error socket path");
+               goto error;
+       }
+       config_string_set(&config->consumerd64_err_unix_sock_path, str);
+       str = NULL;
+
+       ret = asprintf(&str, DEFAULT_USTCONSUMERD64_CMD_SOCK_PATH,
+                       config->rundir.value);
+       if (ret < 0) {
+               ERR("Failed to set 64-bit consumer command socket path");
+               goto error;
+       }
+       config_string_set(&config->consumerd64_cmd_unix_sock_path, str);
+       str = NULL;
+
+       /* kconsumerd consumerd path setup */
+       ret = asprintf(&str, DEFAULT_KCONSUMERD_PATH,
+                       config->rundir.value);
+       if (ret < 0) {
+               ERR("Failed to set kernel consumer path");
+               goto error;
+       }
+       config_string_set(&config->kconsumerd_path, str);
+       str = NULL;
+
+       ret = asprintf(&str, DEFAULT_KCONSUMERD_ERR_SOCK_PATH,
+                       config->rundir.value);
+       if (ret < 0) {
+               ERR("Failed to set kernel consumer error socket path");
+               goto error;
+       }
+       config_string_set(&config->kconsumerd_err_unix_sock_path, str);
+       str = NULL;
+
+       ret = asprintf(&str, DEFAULT_KCONSUMERD_CMD_SOCK_PATH,
+                       config->rundir.value);
+       if (ret < 0) {
+               ERR("Failed to set kernel consumer command socket path");
+               goto error;
+       }
+       config_string_set(&config->kconsumerd_cmd_unix_sock_path, str);
+       str = NULL;
+
+       ret = asprintf(&str, "%s/%s", config->rundir.value,
+                       DEFAULT_LTTNG_SESSIOND_PIDFILE);
+       if (ret < 0) {
+               ERR("Failed to set PID file path");
+               goto error;
+       }
+       config_string_set(&config->pid_file_path, str);
+       str = NULL;
+
+       ret = asprintf(&str, "%s/%s", config->rundir.value,
+                       DEFAULT_LTTNG_SESSIOND_LOCKFILE);
+       if (ret < 0) {
+               ERR("Failed to set lock file path");
+               goto error;
+       }
+       config_string_set(&config->lock_file_path, str);
+       str = NULL;
+
+       ret = asprintf(&str, "%s/%s", config->rundir.value,
+                       DEFAULT_LTTNG_SESSIOND_AGENTPORT_FILE);
+       if (ret < 0) {
+               ERR("Failed to set agent port file path");
+               goto error;
+       }
+       config_string_set(&config->agent_port_file_path, str);
+       str = NULL;
+
+       /*
+        * Allow INSTALL_BIN_PATH to be used as a target path for the
+        * native architecture size consumer if CONFIG_CONSUMER*_PATH
+        * has not been defined.
+        */
+#if (CAA_BITS_PER_LONG == 32)
+       config_string_set_static(&config->consumerd32_bin_path,
+                       INSTALL_BIN_PATH "/" DEFAULT_CONSUMERD_FILE);
+       config_string_set_static(&config->consumerd32_lib_dir,
+                       INSTALL_LIB_PATH);
+#elif (CAA_BITS_PER_LONG == 64)
+       config_string_set_static(&config->consumerd64_bin_path,
+                       INSTALL_BIN_PATH "/" DEFAULT_CONSUMERD_FILE);
+       config_string_set_static(&config->consumerd64_lib_dir,
+                       INSTALL_LIB_PATH);
+#else
+#error "Unknown bitness"
+#endif
+       ret = 0;
+       return ret;
+error:
+       sessiond_config_fini(config);
+       return ret;
+}
+
+void sessiond_config_fini(struct sessiond_config *config)
+{
+       config_string_fini(&config->tracing_group_name);
+       config_string_fini(&config->kmod_probes_list);
+       config_string_fini(&config->kmod_extra_probes_list);
+       config_string_fini(&config->rundir);
+       config_string_fini(&config->apps_unix_sock_path);
+       config_string_fini(&config->client_unix_sock_path);
+       config_string_fini(&config->wait_shm_path);
+       config_string_fini(&config->health_unix_sock_path);
+       config_string_fini(&config->lttng_ust_clock_plugin);
+       config_string_fini(&config->pid_file_path);
+       config_string_fini(&config->lock_file_path);
+       config_string_fini(&config->load_session_path);
+       config_string_fini(&config->agent_port_file_path);
+       config_string_fini(&config->consumerd32_path);
+       config_string_fini(&config->consumerd32_bin_path);
+       config_string_fini(&config->consumerd32_lib_dir);
+       config_string_fini(&config->consumerd32_err_unix_sock_path);
+       config_string_fini(&config->consumerd32_cmd_unix_sock_path);
+       config_string_fini(&config->consumerd64_path);
+       config_string_fini(&config->consumerd64_bin_path);
+       config_string_fini(&config->consumerd64_lib_dir);
+       config_string_fini(&config->consumerd64_err_unix_sock_path);
+       config_string_fini(&config->consumerd64_cmd_unix_sock_path);
+       config_string_fini(&config->kconsumerd_path);
+       config_string_fini(&config->kconsumerd_err_unix_sock_path);
+       config_string_fini(&config->kconsumerd_cmd_unix_sock_path);
+}
+
+static
+int resolve_path(struct config_string *path)
+{
+       int ret = 0;
+       char *absolute_path;
+
+       if (!path->value || path->value[0] == '/') {
+               goto end;
+       }
+
+       absolute_path = utils_expand_path(path->value);
+       if (!absolute_path) {
+               ret = -1;
+               goto end;
+       }
+
+       config_string_set(path, absolute_path);
+end:
+       return ret;
+}
+
+#define RESOLVE_CHECK(path_config_str)         \
+       if (resolve_path(path_config_str))      \
+               return -1
+
+int sessiond_config_resolve_paths(struct sessiond_config *config)
+{
+       RESOLVE_CHECK(&config->apps_unix_sock_path);
+       RESOLVE_CHECK(&config->client_unix_sock_path);
+       RESOLVE_CHECK(&config->wait_shm_path);
+       RESOLVE_CHECK(&config->health_unix_sock_path);
+       RESOLVE_CHECK(&config->lttng_ust_clock_plugin);
+       RESOLVE_CHECK(&config->pid_file_path);
+       RESOLVE_CHECK(&config->lock_file_path);
+       RESOLVE_CHECK(&config->load_session_path);
+       RESOLVE_CHECK(&config->agent_port_file_path);
+       RESOLVE_CHECK(&config->consumerd32_path);
+       RESOLVE_CHECK(&config->consumerd32_bin_path);
+       RESOLVE_CHECK(&config->consumerd32_lib_dir);
+       RESOLVE_CHECK(&config->consumerd32_err_unix_sock_path);
+       RESOLVE_CHECK(&config->consumerd32_cmd_unix_sock_path);
+       RESOLVE_CHECK(&config->consumerd64_path);
+       RESOLVE_CHECK(&config->consumerd64_bin_path);
+       RESOLVE_CHECK(&config->consumerd64_lib_dir);
+       RESOLVE_CHECK(&config->consumerd64_err_unix_sock_path);
+       RESOLVE_CHECK(&config->consumerd64_cmd_unix_sock_path);
+       RESOLVE_CHECK(&config->kconsumerd_path);
+       RESOLVE_CHECK(&config->kconsumerd_err_unix_sock_path);
+       RESOLVE_CHECK(&config->kconsumerd_cmd_unix_sock_path);
+       return 0;
+}
+
+void sessiond_config_log(struct sessiond_config *config)
+{
+       DBG_NO_LOC("[sessiond configuration]");
+       DBG_NO_LOC("\tversion                        %s", VERSION);
+       if (GIT_VERSION[0] != '\0') {
+               DBG_NO_LOC("\tgit version                    %s", GIT_VERSION);
+       }
+       if (EXTRA_VERSION_NAME[0] != '\0') {
+               DBG_NO_LOC("\textra version name             %s", EXTRA_VERSION_NAME);
+       }
+       if (EXTRA_VERSION_DESCRIPTION[0] != '\0') {
+               DBG_NO_LOC("\textra version description:\n\t%s", EXTRA_VERSION_DESCRIPTION);
+       }
+       if (EXTRA_VERSION_PATCHES[0] != '\0') {
+               DBG_NO_LOC("\textra version patches:\n\t%s", EXTRA_VERSION_PATCHES);
+       }
+       DBG_NO_LOC("\tverbose:                       %i", config->verbose);
+       DBG_NO_LOC("\tverbose consumer:              %i", config->verbose_consumer);
+       DBG_NO_LOC("\tquiet mode:                    %s", config->quiet ? "True" : "False");
+       if (config->agent_tcp_port.begin == config->agent_tcp_port.end) {
+               DBG_NO_LOC("\tagent_tcp_port:                %i", config->agent_tcp_port.begin);
+       } else {
+               DBG_NO_LOC("\tagent_tcp_port:                [%i, %i]",
+                               config->agent_tcp_port.begin,
+                               config->agent_tcp_port.end);
+       }
+       DBG_NO_LOC("\tapplication socket timeout:    %i", config->app_socket_timeout);
+       DBG_NO_LOC("\tno-kernel:                     %s", config->no_kernel ? "True" : "False");
+       DBG_NO_LOC("\tbackground:                    %s", config->background ? "True" : "False");
+       DBG_NO_LOC("\tdaemonize:                     %s", config->daemonize ? "True" : "False");
+       DBG_NO_LOC("\tsignal parent on start:        %s", config->sig_parent ? "True" : "False");
+       DBG_NO_LOC("\ttracing group name:            %s", config->tracing_group_name.value ? : "Unknown");
+       DBG_NO_LOC("\tkmod_probe_list:               %s", config->kmod_probes_list.value ? : "None");
+       DBG_NO_LOC("\tkmod_extra_probe_list:         %s", config->kmod_extra_probes_list.value ? : "None");
+       DBG_NO_LOC("\trundir:                        %s", config->rundir.value ? : "Unknown");
+       DBG_NO_LOC("\tapplication socket path:       %s", config->apps_unix_sock_path.value ? : "Unknown");
+       DBG_NO_LOC("\tclient socket path:            %s", config->client_unix_sock_path.value ? : "Unknown");
+       DBG_NO_LOC("\twait shm path:                 %s", config->wait_shm_path.value ? : "Unknown");
+       DBG_NO_LOC("\thealth socket path:            %s", config->health_unix_sock_path.value ? : "Unknown");
+       DBG_NO_LOC("\tLTTNG_UST_CLOCK_PLUGIN:        %s", config->lttng_ust_clock_plugin.value ? : "None");
+       DBG_NO_LOC("\tpid file path:                 %s", config->pid_file_path.value ? : "Unknown");
+       DBG_NO_LOC("\tlock file path:                %s", config->lock_file_path.value ? : "Unknown");
+       DBG_NO_LOC("\tsession load path:             %s", config->load_session_path.value ? : "None");
+       DBG_NO_LOC("\tagent port file path:          %s", config->agent_port_file_path.value ? : "Unknown");
+       DBG_NO_LOC("\tconsumerd32 path:              %s", config->consumerd32_path.value ? : "Unknown");
+       DBG_NO_LOC("\tconsumerd32 bin path:          %s", config->consumerd32_bin_path.value ? : "Unknown");
+       DBG_NO_LOC("\tconsumerd32 lib dir:           %s", config->consumerd32_lib_dir.value ? : "Unknown");
+       DBG_NO_LOC("\tconsumerd32 err unix sock path:%s", config->consumerd32_err_unix_sock_path.value ? : "Unknown");
+       DBG_NO_LOC("\tconsumerd32 cmd unix sock path:%s", config->consumerd32_cmd_unix_sock_path.value ? : "Unknown");
+       DBG_NO_LOC("\tconsumerd64 path:              %s", config->consumerd64_path.value ? : "Unknown");
+       DBG_NO_LOC("\tconsumerd64 bin path:          %s", config->consumerd64_bin_path.value ? : "Unknown");
+       DBG_NO_LOC("\tconsumerd64 lib dir:           %s", config->consumerd64_lib_dir.value ? : "Unknown");
+       DBG_NO_LOC("\tconsumerd64 err unix sock path:%s", config->consumerd64_err_unix_sock_path.value ? : "Unknown");
+       DBG_NO_LOC("\tconsumerd64 cmd unix sock path:%s", config->consumerd64_cmd_unix_sock_path.value ? : "Unknown");
+       DBG_NO_LOC("\tkconsumerd path:               %s", config->kconsumerd_path.value ? : "Unknown");
+       DBG_NO_LOC("\tkconsumerd err unix sock path: %s", config->kconsumerd_err_unix_sock_path.value ? : "Unknown");
+       DBG_NO_LOC("\tkconsumerd cmd unix sock path: %s", config->kconsumerd_cmd_unix_sock_path.value ? : "Unknown");
+}
diff --git a/src/bin/lttng-sessiond/snapshot.c b/src/bin/lttng-sessiond/snapshot.c
deleted file mode 100644 (file)
index 6e4a0c3..0000000
+++ /dev/null
@@ -1,329 +0,0 @@
-/*
- * Copyright (C) 2013 David Goulet <dgoulet@efficios.com>
- *
- * SPDX-License-Identifier: GPL-2.0-only
- *
- */
-
-#define _LGPL_SOURCE
-#include <inttypes.h>
-#include <string.h>
-#include <urcu/uatomic.h>
-
-#include <common/defaults.h>
-
-#include "snapshot.h"
-#include "utils.h"
-
-/*
- * Return the atomically incremented value of next_output_id.
- */
-static inline unsigned long get_next_output_id(struct snapshot *snapshot)
-{
-       return uatomic_add_return(&snapshot->next_output_id, 1);
-}
-
-/*
- * Initialized snapshot output with the given values.
- *
- * Return 0 on success or else a negative value.
- */
-static int output_init(const struct ltt_session *session,
-               uint64_t max_size, const char *name,
-               struct lttng_uri *uris, size_t nb_uri,
-               struct consumer_output *consumer, struct snapshot_output *output,
-               struct snapshot *snapshot)
-{
-       int ret = 0, i;
-
-       memset(output, 0, sizeof(struct snapshot_output));
-
-       /*
-        * max_size of -1ULL means unset. Set to default (unlimited).
-        */
-       if (max_size == (uint64_t) -1ULL) {
-               max_size = 0;
-       }
-       output->max_size = max_size;
-
-       if (snapshot) {
-               output->id = get_next_output_id(snapshot);
-       }
-       lttng_ht_node_init_ulong(&output->node, (unsigned long) output->id);
-
-       if (name && name[0] != '\0') {
-               if (lttng_strncpy(output->name, name, sizeof(output->name))) {
-                       ret = -LTTNG_ERR_INVALID;
-                       goto error;
-               }
-       } else {
-               /* Set default name. */
-               ret = snprintf(output->name, sizeof(output->name), "%s-%" PRIu32,
-                               DEFAULT_SNAPSHOT_NAME, output->id);
-               if (ret < 0) {
-                       ret = -ENOMEM;
-                       goto error;
-               }
-       }
-
-       if (!consumer) {
-               goto end;
-       }
-
-       output->consumer = consumer_copy_output(consumer);
-       if (!output->consumer) {
-               ret = -ENOMEM;
-               goto error;
-       }
-       output->consumer->snapshot = 1;
-
-       /* No URL given. */
-       if (nb_uri == 0) {
-               ret = 0;
-               goto end;
-       }
-
-       if (uris[0].dtype == LTTNG_DST_PATH) {
-               memset(output->consumer->dst.session_root_path, 0,
-                               sizeof(output->consumer->dst.session_root_path));
-               if (lttng_strncpy(output->consumer->dst.session_root_path,
-                               uris[0].dst.path,
-                               sizeof(output->consumer->dst.session_root_path))) {
-                       ret = -LTTNG_ERR_INVALID;
-                       goto error;
-               }
-               output->consumer->type = CONSUMER_DST_LOCAL;
-               ret = 0;
-               goto end;
-       }
-
-       if (nb_uri != 2) {
-               /* Absolutely needs two URIs for network. */
-               ret = -LTTNG_ERR_INVALID;
-               goto error;
-       }
-
-       for (i = 0; i < nb_uri; i ++) {
-               /* Network URIs */
-               ret = consumer_set_network_uri(session, output->consumer,
-                               &uris[i]);
-               if (ret < 0) {
-                       goto error;
-               }
-       }
-
-error:
-end:
-       return ret;
-}
-
-/*
- * Initialize a snapshot output object using the given parameters and URI(s).
- * The name value and uris can be NULL.
- *
- * Return 0 on success or else a negative value.
- */
-int snapshot_output_init_with_uri(const struct ltt_session *session,
-               uint64_t max_size, const char *name,
-               struct lttng_uri *uris, size_t nb_uri,
-               struct consumer_output *consumer, struct snapshot_output *output,
-               struct snapshot *snapshot)
-{
-       return output_init(session, max_size, name, uris, nb_uri, consumer,
-                       output, snapshot);
-}
-
-/*
- * Initialize a snapshot output object using the given parameters. The name
- * value and url can be NULL.
- *
- * Return 0 on success or else a negative value.
- */
-int snapshot_output_init(const struct ltt_session *session,
-               uint64_t max_size, const char *name,
-               const char *ctrl_url, const char *data_url,
-               struct consumer_output *consumer, struct snapshot_output *output,
-               struct snapshot *snapshot)
-{
-       int ret = 0, nb_uri;
-       struct lttng_uri *uris = NULL;
-
-       /* Create an array of URIs from URLs. */
-       nb_uri = uri_parse_str_urls(ctrl_url, data_url, &uris);
-       if (nb_uri < 0) {
-               ret = nb_uri;
-               goto error;
-       }
-
-       ret = output_init(session, max_size, name, uris, nb_uri, consumer,
-                       output, snapshot);
-
-error:
-       free(uris);
-       return ret;
-}
-
-struct snapshot_output *snapshot_output_alloc(void)
-{
-       return zmalloc(sizeof(struct snapshot_output));
-}
-
-/*
- * Delete output from the snapshot object.
- */
-void snapshot_delete_output(struct snapshot *snapshot,
-               struct snapshot_output *output)
-{
-       int ret;
-       struct lttng_ht_iter iter;
-
-       LTTNG_ASSERT(snapshot);
-       LTTNG_ASSERT(snapshot->output_ht);
-       LTTNG_ASSERT(output);
-
-       iter.iter.node = &output->node.node;
-       rcu_read_lock();
-       ret = lttng_ht_del(snapshot->output_ht, &iter);
-       rcu_read_unlock();
-       LTTNG_ASSERT(!ret);
-       /*
-        * This is safe because the ownership of a snapshot object is in a session
-        * for which the session lock need to be acquired to read and modify it.
-        */
-       snapshot->nb_output--;
-}
-
-/*
- * Add output object to the snapshot.
- */
-void snapshot_add_output(struct snapshot *snapshot,
-               struct snapshot_output *output)
-{
-       LTTNG_ASSERT(snapshot);
-       LTTNG_ASSERT(snapshot->output_ht);
-       LTTNG_ASSERT(output);
-
-       rcu_read_lock();
-       lttng_ht_add_unique_ulong(snapshot->output_ht, &output->node);
-       rcu_read_unlock();
-       /*
-        * This is safe because the ownership of a snapshot object is in a session
-        * for which the session lock need to be acquired to read and modify it.
-        */
-       snapshot->nb_output++;
-}
-
-/*
- * Destroy and free a snapshot output object.
- */
-void snapshot_output_destroy(struct snapshot_output *obj)
-{
-       LTTNG_ASSERT(obj);
-
-       if (obj->consumer) {
-               consumer_output_send_destroy_relayd(obj->consumer);
-               consumer_output_put(obj->consumer);
-       }
-       free(obj);
-}
-
-/*
- * RCU read side lock MUST be acquired before calling this since the returned
- * pointer is in a RCU hash table.
- *
- * Return the reference on success or else NULL.
- */
-struct snapshot_output *snapshot_find_output_by_name(const char *name,
-               struct snapshot *snapshot)
-{
-       struct lttng_ht_iter iter;
-       struct snapshot_output *output = NULL;
-
-       LTTNG_ASSERT(snapshot);
-       LTTNG_ASSERT(name);
-
-       cds_lfht_for_each_entry(snapshot->output_ht->ht, &iter.iter, output,
-               node.node) {
-               if (!strncmp(output->name, name, strlen(name))) {
-                       return output;
-               }
-       }
-
-       /* Not found */
-       return NULL;
-}
-
-/*
- * RCU read side lock MUST be acquired before calling this since the returned
- * pointer is in a RCU hash table.
- *
- * Return the reference on success or else NULL.
- */
-struct snapshot_output *snapshot_find_output_by_id(uint32_t id,
-               struct snapshot *snapshot)
-{
-       struct lttng_ht_node_ulong *node;
-       struct lttng_ht_iter iter;
-       struct snapshot_output *output = NULL;
-
-       LTTNG_ASSERT(snapshot);
-
-       lttng_ht_lookup(snapshot->output_ht, (void *)((unsigned long) id), &iter);
-       node = lttng_ht_iter_get_node_ulong(&iter);
-       if (!node) {
-               DBG3("Snapshot output not found with id %" PRId32, id);
-               goto error;
-       }
-       output = caa_container_of(node, struct snapshot_output, node);
-
-error:
-       return output;
-}
-
-/*
- * Initialized a snapshot object that was already allocated.
- *
- * Return 0 on success or else a negative errno value.
- */
-int snapshot_init(struct snapshot *obj)
-{
-       int ret;
-
-       LTTNG_ASSERT(obj);
-
-       memset(obj, 0, sizeof(struct snapshot));
-
-       obj->output_ht = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
-       if (!obj->output_ht) {
-               ret = -ENOMEM;
-               goto error;
-       }
-
-       ret = 0;
-
-error:
-       return ret;
-}
-
-/*
- * Destroy snapshot object but the pointer is not freed so it's safe to pass a
- * static reference.
- */
-void snapshot_destroy(struct snapshot *obj)
-{
-       struct lttng_ht_iter iter;
-       struct snapshot_output *output;
-
-       if (!obj->output_ht) {
-               return;
-       }
-
-       rcu_read_lock();
-       cds_lfht_for_each_entry(obj->output_ht->ht, &iter.iter, output,
-                       node.node) {
-               snapshot_delete_output(obj, output);
-               snapshot_output_destroy(output);
-       }
-       rcu_read_unlock();
-       ht_cleanup_push(obj->output_ht);
-}
diff --git a/src/bin/lttng-sessiond/snapshot.cpp b/src/bin/lttng-sessiond/snapshot.cpp
new file mode 100644 (file)
index 0000000..0347220
--- /dev/null
@@ -0,0 +1,329 @@
+/*
+ * Copyright (C) 2013 David Goulet <dgoulet@efficios.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0-only
+ *
+ */
+
+#define _LGPL_SOURCE
+#include <inttypes.h>
+#include <string.h>
+#include <urcu/uatomic.h>
+
+#include <common/defaults.h>
+
+#include "snapshot.h"
+#include "utils.h"
+
+/*
+ * Return the atomically incremented value of next_output_id.
+ */
+static inline unsigned long get_next_output_id(struct snapshot *snapshot)
+{
+       return uatomic_add_return(&snapshot->next_output_id, 1);
+}
+
+/*
+ * Initialized snapshot output with the given values.
+ *
+ * Return 0 on success or else a negative value.
+ */
+static int output_init(const struct ltt_session *session,
+               uint64_t max_size, const char *name,
+               struct lttng_uri *uris, size_t nb_uri,
+               struct consumer_output *consumer, struct snapshot_output *output,
+               struct snapshot *snapshot)
+{
+       int ret = 0, i;
+
+       memset(output, 0, sizeof(struct snapshot_output));
+
+       /*
+        * max_size of -1ULL means unset. Set to default (unlimited).
+        */
+       if (max_size == (uint64_t) -1ULL) {
+               max_size = 0;
+       }
+       output->max_size = max_size;
+
+       if (snapshot) {
+               output->id = get_next_output_id(snapshot);
+       }
+       lttng_ht_node_init_ulong(&output->node, (unsigned long) output->id);
+
+       if (name && name[0] != '\0') {
+               if (lttng_strncpy(output->name, name, sizeof(output->name))) {
+                       ret = -LTTNG_ERR_INVALID;
+                       goto error;
+               }
+       } else {
+               /* Set default name. */
+               ret = snprintf(output->name, sizeof(output->name), "%s-%" PRIu32,
+                               DEFAULT_SNAPSHOT_NAME, output->id);
+               if (ret < 0) {
+                       ret = -ENOMEM;
+                       goto error;
+               }
+       }
+
+       if (!consumer) {
+               goto end;
+       }
+
+       output->consumer = consumer_copy_output(consumer);
+       if (!output->consumer) {
+               ret = -ENOMEM;
+               goto error;
+       }
+       output->consumer->snapshot = 1;
+
+       /* No URL given. */
+       if (nb_uri == 0) {
+               ret = 0;
+               goto end;
+       }
+
+       if (uris[0].dtype == LTTNG_DST_PATH) {
+               memset(output->consumer->dst.session_root_path, 0,
+                               sizeof(output->consumer->dst.session_root_path));
+               if (lttng_strncpy(output->consumer->dst.session_root_path,
+                               uris[0].dst.path,
+                               sizeof(output->consumer->dst.session_root_path))) {
+                       ret = -LTTNG_ERR_INVALID;
+                       goto error;
+               }
+               output->consumer->type = CONSUMER_DST_LOCAL;
+               ret = 0;
+               goto end;
+       }
+
+       if (nb_uri != 2) {
+               /* Absolutely needs two URIs for network. */
+               ret = -LTTNG_ERR_INVALID;
+               goto error;
+       }
+
+       for (i = 0; i < nb_uri; i ++) {
+               /* Network URIs */
+               ret = consumer_set_network_uri(session, output->consumer,
+                               &uris[i]);
+               if (ret < 0) {
+                       goto error;
+               }
+       }
+
+error:
+end:
+       return ret;
+}
+
+/*
+ * Initialize a snapshot output object using the given parameters and URI(s).
+ * The name value and uris can be NULL.
+ *
+ * Return 0 on success or else a negative value.
+ */
+int snapshot_output_init_with_uri(const struct ltt_session *session,
+               uint64_t max_size, const char *name,
+               struct lttng_uri *uris, size_t nb_uri,
+               struct consumer_output *consumer, struct snapshot_output *output,
+               struct snapshot *snapshot)
+{
+       return output_init(session, max_size, name, uris, nb_uri, consumer,
+                       output, snapshot);
+}
+
+/*
+ * Initialize a snapshot output object using the given parameters. The name
+ * value and url can be NULL.
+ *
+ * Return 0 on success or else a negative value.
+ */
+int snapshot_output_init(const struct ltt_session *session,
+               uint64_t max_size, const char *name,
+               const char *ctrl_url, const char *data_url,
+               struct consumer_output *consumer, struct snapshot_output *output,
+               struct snapshot *snapshot)
+{
+       int ret = 0, nb_uri;
+       struct lttng_uri *uris = NULL;
+
+       /* Create an array of URIs from URLs. */
+       nb_uri = uri_parse_str_urls(ctrl_url, data_url, &uris);
+       if (nb_uri < 0) {
+               ret = nb_uri;
+               goto error;
+       }
+
+       ret = output_init(session, max_size, name, uris, nb_uri, consumer,
+                       output, snapshot);
+
+error:
+       free(uris);
+       return ret;
+}
+
+struct snapshot_output *snapshot_output_alloc(void)
+{
+       return (snapshot_output *) zmalloc(sizeof(struct snapshot_output));
+}
+
+/*
+ * Delete output from the snapshot object.
+ */
+void snapshot_delete_output(struct snapshot *snapshot,
+               struct snapshot_output *output)
+{
+       int ret;
+       struct lttng_ht_iter iter;
+
+       LTTNG_ASSERT(snapshot);
+       LTTNG_ASSERT(snapshot->output_ht);
+       LTTNG_ASSERT(output);
+
+       iter.iter.node = &output->node.node;
+       rcu_read_lock();
+       ret = lttng_ht_del(snapshot->output_ht, &iter);
+       rcu_read_unlock();
+       LTTNG_ASSERT(!ret);
+       /*
+        * This is safe because the ownership of a snapshot object is in a session
+        * for which the session lock need to be acquired to read and modify it.
+        */
+       snapshot->nb_output--;
+}
+
+/*
+ * Add output object to the snapshot.
+ */
+void snapshot_add_output(struct snapshot *snapshot,
+               struct snapshot_output *output)
+{
+       LTTNG_ASSERT(snapshot);
+       LTTNG_ASSERT(snapshot->output_ht);
+       LTTNG_ASSERT(output);
+
+       rcu_read_lock();
+       lttng_ht_add_unique_ulong(snapshot->output_ht, &output->node);
+       rcu_read_unlock();
+       /*
+        * This is safe because the ownership of a snapshot object is in a session
+        * for which the session lock need to be acquired to read and modify it.
+        */
+       snapshot->nb_output++;
+}
+
+/*
+ * Destroy and free a snapshot output object.
+ */
+void snapshot_output_destroy(struct snapshot_output *obj)
+{
+       LTTNG_ASSERT(obj);
+
+       if (obj->consumer) {
+               consumer_output_send_destroy_relayd(obj->consumer);
+               consumer_output_put(obj->consumer);
+       }
+       free(obj);
+}
+
+/*
+ * RCU read side lock MUST be acquired before calling this since the returned
+ * pointer is in a RCU hash table.
+ *
+ * Return the reference on success or else NULL.
+ */
+struct snapshot_output *snapshot_find_output_by_name(const char *name,
+               struct snapshot *snapshot)
+{
+       struct lttng_ht_iter iter;
+       struct snapshot_output *output = NULL;
+
+       LTTNG_ASSERT(snapshot);
+       LTTNG_ASSERT(name);
+
+       cds_lfht_for_each_entry(snapshot->output_ht->ht, &iter.iter, output,
+               node.node) {
+               if (!strncmp(output->name, name, strlen(name))) {
+                       return output;
+               }
+       }
+
+       /* Not found */
+       return NULL;
+}
+
+/*
+ * RCU read side lock MUST be acquired before calling this since the returned
+ * pointer is in a RCU hash table.
+ *
+ * Return the reference on success or else NULL.
+ */
+struct snapshot_output *snapshot_find_output_by_id(uint32_t id,
+               struct snapshot *snapshot)
+{
+       struct lttng_ht_node_ulong *node;
+       struct lttng_ht_iter iter;
+       struct snapshot_output *output = NULL;
+
+       LTTNG_ASSERT(snapshot);
+
+       lttng_ht_lookup(snapshot->output_ht, (void *)((unsigned long) id), &iter);
+       node = lttng_ht_iter_get_node_ulong(&iter);
+       if (!node) {
+               DBG3("Snapshot output not found with id %" PRId32, id);
+               goto error;
+       }
+       output = caa_container_of(node, struct snapshot_output, node);
+
+error:
+       return output;
+}
+
+/*
+ * Initialized a snapshot object that was already allocated.
+ *
+ * Return 0 on success or else a negative errno value.
+ */
+int snapshot_init(struct snapshot *obj)
+{
+       int ret;
+
+       LTTNG_ASSERT(obj);
+
+       memset(obj, 0, sizeof(struct snapshot));
+
+       obj->output_ht = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
+       if (!obj->output_ht) {
+               ret = -ENOMEM;
+               goto error;
+       }
+
+       ret = 0;
+
+error:
+       return ret;
+}
+
+/*
+ * Destroy snapshot object but the pointer is not freed so it's safe to pass a
+ * static reference.
+ */
+void snapshot_destroy(struct snapshot *obj)
+{
+       struct lttng_ht_iter iter;
+       struct snapshot_output *output;
+
+       if (!obj->output_ht) {
+               return;
+       }
+
+       rcu_read_lock();
+       cds_lfht_for_each_entry(obj->output_ht->ht, &iter.iter, output,
+                       node.node) {
+               snapshot_delete_output(obj, output);
+               snapshot_output_destroy(output);
+       }
+       rcu_read_unlock();
+       ht_cleanup_push(obj->output_ht);
+}
diff --git a/src/bin/lttng-sessiond/thread-utils.c b/src/bin/lttng-sessiond/thread-utils.c
deleted file mode 100644 (file)
index 1eb25ea..0000000
+++ /dev/null
@@ -1,142 +0,0 @@
-/*
- * Copyright (C) 2011 David Goulet <david.goulet@polymtl.ca>
- * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- * Copyright (C) 2013 Jérémie Galarneau <jeremie.galarneau@efficios.com>
- *
- * SPDX-License-Identifier: GPL-2.0-only
- *
- */
-
-#include "lttng-sessiond.h"
-#include "utils.h"
-#include <common/utils.h>
-#include <pthread.h>
-
-/*
- * Quit pipe for all threads. This permits a single cancellation point
- * for all threads when receiving an event on the pipe.
- */
-static int thread_quit_pipe[2] = { -1, -1 };
-
-/*
- * Init thread quit pipe.
- *
- * Return -1 on error or 0 if all pipes are created.
- */
-static int __init_thread_quit_pipe(int *a_pipe)
-{
-       int ret, i;
-
-       ret = pipe(a_pipe);
-       if (ret < 0) {
-               PERROR("thread quit pipe");
-               goto error;
-       }
-
-       for (i = 0; i < 2; i++) {
-               ret = fcntl(a_pipe[i], F_SETFD, FD_CLOEXEC);
-               if (ret < 0) {
-                       PERROR("fcntl");
-                       goto error;
-               }
-       }
-
-error:
-       return ret;
-}
-
-int sessiond_init_thread_quit_pipe(void)
-{
-       return __init_thread_quit_pipe(thread_quit_pipe);
-}
-
-int sessiond_check_thread_quit_pipe(int fd, uint32_t events)
-{
-       return (fd == thread_quit_pipe[0] && (events & LPOLLIN));
-}
-
-/*
- * Wait for a notification on the quit pipe (with a timeout).
- *
- * A timeout value of -1U means no timeout.
- *
- * Returns 1 if the caller should quit, 0 if the timeout was reached, and
- * -1 if an error was encountered.
- */
-int sessiond_wait_for_quit_pipe(int timeout_ms)
-{
-       int ret;
-       struct lttng_poll_event events;
-
-       ret = lttng_poll_create(&events, 1, LTTNG_CLOEXEC);
-       if (ret < 0) {
-               PERROR("Failed to initialize poll/epoll set");
-               ret = -1;
-               goto end;
-       }
-       ret = lttng_poll_add(&events, thread_quit_pipe[0], LPOLLIN | LPOLLERR);
-       if (ret < 0) {
-               PERROR("Failed to add file descriptor to poll/epoll set");
-               ret = -1;
-               goto end_clean_poll;
-       }
-       ret = lttng_poll_wait(&events, timeout_ms);
-       if (ret > 0) {
-               /* Should quit. */
-               ret = 1;
-       } else if (ret < 0 && errno != EINTR) {
-               /* Unknown error. */
-               PERROR("Failed to epoll()/poll() thread quit pipe");
-               ret = -1;
-       } else {
-               /* Timeout reached. */
-               ret = 0;
-       }
-end_clean_poll:
-       lttng_poll_clean(&events);
-end:
-       return ret;
-}
-
-int sessiond_notify_quit_pipe(void)
-{
-       return notify_thread_pipe(thread_quit_pipe[1]);
-}
-
-void sessiond_close_quit_pipe(void)
-{
-       utils_close_pipe(thread_quit_pipe);
-}
-
-static
-int __sessiond_set_thread_pollset(struct lttng_poll_event *events, size_t size,
-               int *a_pipe)
-{
-       int ret;
-
-       LTTNG_ASSERT(events);
-
-       ret = lttng_poll_create(events, size, LTTNG_CLOEXEC);
-       if (ret < 0) {
-               goto error;
-       }
-
-       /* Add quit pipe */
-       ret = lttng_poll_add(events, a_pipe[0], LPOLLIN | LPOLLERR);
-       if (ret < 0) {
-               goto error;
-       }
-
-       return 0;
-
-error:
-       return ret;
-}
-
-/*
- * Create a poll set with O_CLOEXEC and add the thread quit pipe to the set.
- */
-int sessiond_set_thread_pollset(struct lttng_poll_event *events, size_t size)
-{
-       return __sessiond_set_thread_pollset(events, size, thread_quit_pipe);
-}
diff --git a/src/bin/lttng-sessiond/thread-utils.cpp b/src/bin/lttng-sessiond/thread-utils.cpp
new file mode 100644 (file)
index 0000000..1eb25ea
--- /dev/null
@@ -0,0 +1,142 @@
+/*
+ * Copyright (C) 2011 David Goulet <david.goulet@polymtl.ca>
+ * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ * Copyright (C) 2013 Jérémie Galarneau <jeremie.galarneau@efficios.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0-only
+ *
+ */
+
+#include "lttng-sessiond.h"
+#include "utils.h"
+#include <common/utils.h>
+#include <pthread.h>
+
+/*
+ * Quit pipe for all threads. This permits a single cancellation point
+ * for all threads when receiving an event on the pipe.
+ */
+static int thread_quit_pipe[2] = { -1, -1 };
+
+/*
+ * Init thread quit pipe.
+ *
+ * Return -1 on error or 0 if all pipes are created.
+ */
+static int __init_thread_quit_pipe(int *a_pipe)
+{
+       int ret, i;
+
+       ret = pipe(a_pipe);
+       if (ret < 0) {
+               PERROR("thread quit pipe");
+               goto error;
+       }
+
+       for (i = 0; i < 2; i++) {
+               ret = fcntl(a_pipe[i], F_SETFD, FD_CLOEXEC);
+               if (ret < 0) {
+                       PERROR("fcntl");
+                       goto error;
+               }
+       }
+
+error:
+       return ret;
+}
+
+int sessiond_init_thread_quit_pipe(void)
+{
+       return __init_thread_quit_pipe(thread_quit_pipe);
+}
+
+int sessiond_check_thread_quit_pipe(int fd, uint32_t events)
+{
+       return (fd == thread_quit_pipe[0] && (events & LPOLLIN));
+}
+
+/*
+ * Wait for a notification on the quit pipe (with a timeout).
+ *
+ * A timeout value of -1U means no timeout.
+ *
+ * Returns 1 if the caller should quit, 0 if the timeout was reached, and
+ * -1 if an error was encountered.
+ */
+int sessiond_wait_for_quit_pipe(int timeout_ms)
+{
+       int ret;
+       struct lttng_poll_event events;
+
+       ret = lttng_poll_create(&events, 1, LTTNG_CLOEXEC);
+       if (ret < 0) {
+               PERROR("Failed to initialize poll/epoll set");
+               ret = -1;
+               goto end;
+       }
+       ret = lttng_poll_add(&events, thread_quit_pipe[0], LPOLLIN | LPOLLERR);
+       if (ret < 0) {
+               PERROR("Failed to add file descriptor to poll/epoll set");
+               ret = -1;
+               goto end_clean_poll;
+       }
+       ret = lttng_poll_wait(&events, timeout_ms);
+       if (ret > 0) {
+               /* Should quit. */
+               ret = 1;
+       } else if (ret < 0 && errno != EINTR) {
+               /* Unknown error. */
+               PERROR("Failed to epoll()/poll() thread quit pipe");
+               ret = -1;
+       } else {
+               /* Timeout reached. */
+               ret = 0;
+       }
+end_clean_poll:
+       lttng_poll_clean(&events);
+end:
+       return ret;
+}
+
+int sessiond_notify_quit_pipe(void)
+{
+       return notify_thread_pipe(thread_quit_pipe[1]);
+}
+
+void sessiond_close_quit_pipe(void)
+{
+       utils_close_pipe(thread_quit_pipe);
+}
+
+static
+int __sessiond_set_thread_pollset(struct lttng_poll_event *events, size_t size,
+               int *a_pipe)
+{
+       int ret;
+
+       LTTNG_ASSERT(events);
+
+       ret = lttng_poll_create(events, size, LTTNG_CLOEXEC);
+       if (ret < 0) {
+               goto error;
+       }
+
+       /* Add quit pipe */
+       ret = lttng_poll_add(events, a_pipe[0], LPOLLIN | LPOLLERR);
+       if (ret < 0) {
+               goto error;
+       }
+
+       return 0;
+
+error:
+       return ret;
+}
+
+/*
+ * Create a poll set with O_CLOEXEC and add the thread quit pipe to the set.
+ */
+int sessiond_set_thread_pollset(struct lttng_poll_event *events, size_t size)
+{
+       return __sessiond_set_thread_pollset(events, size, thread_quit_pipe);
+}
diff --git a/src/bin/lttng-sessiond/thread.c b/src/bin/lttng-sessiond/thread.c
deleted file mode 100644 (file)
index a4e3cb8..0000000
+++ /dev/null
@@ -1,209 +0,0 @@
-/*
- * Copyright (C) 2018 Jérémie Galarneau <jeremie.galarneau@efficios.com>
- *
- * SPDX-License-Identifier: GPL-2.0-only
- *
- */
-
-#include "thread.h"
-#include <urcu/list.h>
-#include <urcu/ref.h>
-#include <pthread.h>
-#include <common/macros.h>
-#include <common/error.h>
-#include <common/defaults.h>
-
-static struct thread_list {
-       struct cds_list_head head;
-       pthread_mutex_t lock;
-} thread_list = {
-       .head = CDS_LIST_HEAD_INIT(thread_list.head),
-       .lock = PTHREAD_MUTEX_INITIALIZER,
-};
-
-struct lttng_thread {
-       struct urcu_ref ref;
-       struct cds_list_head node;
-       pthread_t thread;
-       const char *name;
-       /* Main thread function */
-       lttng_thread_entry_point entry;
-       /*
-        * Thread-specific shutdown method. Allows threads to implement their
-        * own shutdown mechanism as some of them use a structured message
-        * passed through a command queue and some rely on a dedicated "quit"
-        * pipe.
-        */
-       lttng_thread_shutdown_cb shutdown;
-       lttng_thread_cleanup_cb cleanup;
-       /* Thread implementation-specific data. */
-       void *data;
-};
-
-static
-void lttng_thread_destroy(struct lttng_thread *thread)
-{
-       if (thread->cleanup) {
-               thread->cleanup(thread->data);
-       }
-       free(thread);
-}
-
-static
-void lttng_thread_release(struct urcu_ref *ref)
-{
-       lttng_thread_destroy(container_of(ref, struct lttng_thread, ref));
-}
-
-static
-void *launch_thread(void *data)
-{
-       void *ret;
-       struct lttng_thread *thread = (struct lttng_thread *) data;
-
-       logger_set_thread_name(thread->name, true);
-       DBG("Entering thread entry point");
-       ret = thread->entry(thread->data);
-       DBG("Thread entry point has returned");
-       return ret;
-}
-
-struct lttng_thread *lttng_thread_create(const char *name,
-               lttng_thread_entry_point entry,
-               lttng_thread_shutdown_cb shutdown,
-               lttng_thread_cleanup_cb cleanup,
-               void *thread_data)
-{
-       int ret;
-       struct lttng_thread *thread;
-
-       thread = zmalloc(sizeof(*thread));
-       if (!thread) {
-               goto error_alloc;
-       }
-
-       urcu_ref_init(&thread->ref);
-       CDS_INIT_LIST_HEAD(&thread->node);
-       /*
-        * Thread names are assumed to be statically allocated strings.
-        * It is unnecessary to copy this attribute.
-        */
-       thread->name = name;
-       thread->entry = entry;
-       thread->shutdown = shutdown;
-       thread->cleanup = cleanup;
-       thread->data = thread_data;
-
-       pthread_mutex_lock(&thread_list.lock);
-       /*
-        * Add the thread at the head of the list to shutdown threads in the
-        * opposite order of their creation. A reference is taken for the
-        * thread list which will be released on shutdown of the thread.
-        */
-       cds_list_add(&thread->node, &thread_list.head);
-       (void) lttng_thread_get(thread);
-
-       ret = pthread_create(&thread->thread, default_pthread_attr(),
-                       launch_thread, thread);
-       if (ret) {
-               PERROR("Failed to create \"%s\" thread", thread->name);
-               goto error_pthread_create;
-       }
-
-       pthread_mutex_unlock(&thread_list.lock);
-       return thread;
-
-error_pthread_create:
-       cds_list_del(&thread->node);
-       /* Release list reference. */
-       lttng_thread_put(thread);
-       pthread_mutex_unlock(&thread_list.lock);
-       /* Release initial reference. */
-       lttng_thread_put(thread);
-error_alloc:
-       return NULL;
-}
-
-bool lttng_thread_get(struct lttng_thread *thread)
-{
-       return urcu_ref_get_unless_zero(&thread->ref);
-}
-
-void lttng_thread_put(struct lttng_thread *thread)
-{
-       if (!thread) {
-               return;
-       }
-       LTTNG_ASSERT(thread->ref.refcount);
-       urcu_ref_put(&thread->ref, lttng_thread_release);
-}
-
-const char *lttng_thread_get_name(const struct lttng_thread *thread)
-{
-       return thread->name;
-}
-
-static
-bool _lttng_thread_shutdown(struct lttng_thread *thread)
-{
-       int ret;
-       void *status;
-       bool result = true;
-
-       DBG("Shutting down \"%s\" thread", thread->name);
-       if (thread->shutdown) {
-               result = thread->shutdown(thread->data);
-               if (!result) {
-                       result = false;
-                       goto end;
-               }
-       }
-
-       ret = pthread_join(thread->thread, &status);
-       if (ret) {
-               PERROR("Failed to join \"%s\" thread", thread->name);
-               result = false;
-               goto end;
-       }
-       DBG("Joined thread \"%s\"", thread->name);
-end:
-       return result;
-}
-
-bool lttng_thread_shutdown(struct lttng_thread *thread)
-{
-       const bool result = _lttng_thread_shutdown(thread);
-
-       if (result) {
-               /* Release the list's reference to the thread. */
-               pthread_mutex_lock(&thread_list.lock);
-               cds_list_del(&thread->node);
-               lttng_thread_put(thread);
-               pthread_mutex_unlock(&thread_list.lock);
-       }
-       return result;
-}
-
-void lttng_thread_list_shutdown_orphans(void)
-{
-       struct lttng_thread *thread, *tmp;
-
-       pthread_mutex_lock(&thread_list.lock);
-       cds_list_for_each_entry_safe(thread, tmp, &thread_list.head, node) {
-               bool result;
-               const long ref = uatomic_read(&thread->ref.refcount);
-
-               if (ref != 1) {
-                       /*
-                        * Other external references to the thread exist, skip.
-                        */
-                       continue;
-               }
-
-               result = _lttng_thread_shutdown(thread);
-               if (!result) {
-                       ERR("Failed to shutdown thread \"%s\"", thread->name);
-               }
-       }
-       pthread_mutex_unlock(&thread_list.lock);
-}
diff --git a/src/bin/lttng-sessiond/thread.cpp b/src/bin/lttng-sessiond/thread.cpp
new file mode 100644 (file)
index 0000000..446a8c9
--- /dev/null
@@ -0,0 +1,209 @@
+/*
+ * Copyright (C) 2018 Jérémie Galarneau <jeremie.galarneau@efficios.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0-only
+ *
+ */
+
+#include "thread.h"
+#include <urcu/list.h>
+#include <urcu/ref.h>
+#include <pthread.h>
+#include <common/macros.h>
+#include <common/error.h>
+#include <common/defaults.h>
+
+static struct thread_list {
+       struct cds_list_head head;
+       pthread_mutex_t lock;
+} thread_list = {
+       .head = CDS_LIST_HEAD_INIT(thread_list.head),
+       .lock = PTHREAD_MUTEX_INITIALIZER,
+};
+
+struct lttng_thread {
+       struct urcu_ref ref;
+       struct cds_list_head node;
+       pthread_t thread;
+       const char *name;
+       /* Main thread function */
+       lttng_thread_entry_point entry;
+       /*
+        * Thread-specific shutdown method. Allows threads to implement their
+        * own shutdown mechanism as some of them use a structured message
+        * passed through a command queue and some rely on a dedicated "quit"
+        * pipe.
+        */
+       lttng_thread_shutdown_cb shutdown;
+       lttng_thread_cleanup_cb cleanup;
+       /* Thread implementation-specific data. */
+       void *data;
+};
+
+static
+void lttng_thread_destroy(struct lttng_thread *thread)
+{
+       if (thread->cleanup) {
+               thread->cleanup(thread->data);
+       }
+       free(thread);
+}
+
+static
+void lttng_thread_release(struct urcu_ref *ref)
+{
+       lttng_thread_destroy(container_of(ref, struct lttng_thread, ref));
+}
+
+static
+void *launch_thread(void *data)
+{
+       void *ret;
+       struct lttng_thread *thread = (struct lttng_thread *) data;
+
+       logger_set_thread_name(thread->name, true);
+       DBG("Entering thread entry point");
+       ret = thread->entry(thread->data);
+       DBG("Thread entry point has returned");
+       return ret;
+}
+
+struct lttng_thread *lttng_thread_create(const char *name,
+               lttng_thread_entry_point entry,
+               lttng_thread_shutdown_cb shutdown,
+               lttng_thread_cleanup_cb cleanup,
+               void *thread_data)
+{
+       int ret;
+       struct lttng_thread *thread;
+
+       thread = (lttng_thread *) zmalloc(sizeof(*thread));
+       if (!thread) {
+               goto error_alloc;
+       }
+
+       urcu_ref_init(&thread->ref);
+       CDS_INIT_LIST_HEAD(&thread->node);
+       /*
+        * Thread names are assumed to be statically allocated strings.
+        * It is unnecessary to copy this attribute.
+        */
+       thread->name = name;
+       thread->entry = entry;
+       thread->shutdown = shutdown;
+       thread->cleanup = cleanup;
+       thread->data = thread_data;
+
+       pthread_mutex_lock(&thread_list.lock);
+       /*
+        * Add the thread at the head of the list to shutdown threads in the
+        * opposite order of their creation. A reference is taken for the
+        * thread list which will be released on shutdown of the thread.
+        */
+       cds_list_add(&thread->node, &thread_list.head);
+       (void) lttng_thread_get(thread);
+
+       ret = pthread_create(&thread->thread, default_pthread_attr(),
+                       launch_thread, thread);
+       if (ret) {
+               PERROR("Failed to create \"%s\" thread", thread->name);
+               goto error_pthread_create;
+       }
+
+       pthread_mutex_unlock(&thread_list.lock);
+       return thread;
+
+error_pthread_create:
+       cds_list_del(&thread->node);
+       /* Release list reference. */
+       lttng_thread_put(thread);
+       pthread_mutex_unlock(&thread_list.lock);
+       /* Release initial reference. */
+       lttng_thread_put(thread);
+error_alloc:
+       return NULL;
+}
+
+bool lttng_thread_get(struct lttng_thread *thread)
+{
+       return urcu_ref_get_unless_zero(&thread->ref);
+}
+
+void lttng_thread_put(struct lttng_thread *thread)
+{
+       if (!thread) {
+               return;
+       }
+       LTTNG_ASSERT(thread->ref.refcount);
+       urcu_ref_put(&thread->ref, lttng_thread_release);
+}
+
+const char *lttng_thread_get_name(const struct lttng_thread *thread)
+{
+       return thread->name;
+}
+
+static
+bool _lttng_thread_shutdown(struct lttng_thread *thread)
+{
+       int ret;
+       void *status;
+       bool result = true;
+
+       DBG("Shutting down \"%s\" thread", thread->name);
+       if (thread->shutdown) {
+               result = thread->shutdown(thread->data);
+               if (!result) {
+                       result = false;
+                       goto end;
+               }
+       }
+
+       ret = pthread_join(thread->thread, &status);
+       if (ret) {
+               PERROR("Failed to join \"%s\" thread", thread->name);
+               result = false;
+               goto end;
+       }
+       DBG("Joined thread \"%s\"", thread->name);
+end:
+       return result;
+}
+
+bool lttng_thread_shutdown(struct lttng_thread *thread)
+{
+       const bool result = _lttng_thread_shutdown(thread);
+
+       if (result) {
+               /* Release the list's reference to the thread. */
+               pthread_mutex_lock(&thread_list.lock);
+               cds_list_del(&thread->node);
+               lttng_thread_put(thread);
+               pthread_mutex_unlock(&thread_list.lock);
+       }
+       return result;
+}
+
+void lttng_thread_list_shutdown_orphans(void)
+{
+       struct lttng_thread *thread, *tmp;
+
+       pthread_mutex_lock(&thread_list.lock);
+       cds_list_for_each_entry_safe(thread, tmp, &thread_list.head, node) {
+               bool result;
+               const long ref = uatomic_read(&thread->ref.refcount);
+
+               if (ref != 1) {
+                       /*
+                        * Other external references to the thread exist, skip.
+                        */
+                       continue;
+               }
+
+               result = _lttng_thread_shutdown(thread);
+               if (!result) {
+                       ERR("Failed to shutdown thread \"%s\"", thread->name);
+               }
+       }
+       pthread_mutex_unlock(&thread_list.lock);
+}
diff --git a/src/bin/lttng-sessiond/timer.c b/src/bin/lttng-sessiond/timer.c
deleted file mode 100644 (file)
index 4e522a7..0000000
+++ /dev/null
@@ -1,431 +0,0 @@
-/*
- * Copyright (C) 2017 Julien Desfossez <jdesfossez@efficios.com>
- * Copyright (C) 2018 Jérémie Galarneau <jeremie.galarneau@efficios.com>
- *
- * SPDX-License-Identifier: GPL-2.0-only
- *
- */
-
-#define _LGPL_SOURCE
-#include <inttypes.h>
-#include <signal.h>
-
-#include "timer.h"
-#include "health-sessiond.h"
-#include "rotation-thread.h"
-#include "thread.h"
-
-#define LTTNG_SESSIOND_SIG_QS                          SIGRTMIN + 10
-#define LTTNG_SESSIOND_SIG_EXIT                                SIGRTMIN + 11
-#define LTTNG_SESSIOND_SIG_PENDING_ROTATION_CHECK      SIGRTMIN + 12
-#define LTTNG_SESSIOND_SIG_SCHEDULED_ROTATION          SIGRTMIN + 13
-
-#define UINT_TO_PTR(value)                             \
-       ({                                              \
-               LTTNG_ASSERT(value <= UINTPTR_MAX);             \
-               (void *) (uintptr_t) value;             \
-       })
-#define PTR_TO_UINT(ptr) ((uintptr_t) ptr)
-
-/*
- * Handle timer teardown race wrt memory free of private data by sessiond
- * signals are handled by a single thread, which permits a synchronization
- * point between handling of each signal. Internal lock ensures mutual
- * exclusion.
- */
-static
-struct timer_signal_data {
-       /* Thread managing signals. */
-       pthread_t tid;
-       int qs_done;
-       pthread_mutex_t lock;
-} timer_signal = {
-       .tid = 0,
-       .qs_done = 0,
-       .lock = PTHREAD_MUTEX_INITIALIZER,
-};
-
-/*
- * Set custom signal mask to current thread.
- */
-static
-void setmask(sigset_t *mask)
-{
-       int ret;
-
-       ret = sigemptyset(mask);
-       if (ret) {
-               PERROR("sigemptyset");
-       }
-       ret = sigaddset(mask, LTTNG_SESSIOND_SIG_QS);
-       if (ret) {
-               PERROR("sigaddset teardown");
-       }
-       ret = sigaddset(mask, LTTNG_SESSIOND_SIG_EXIT);
-       if (ret) {
-               PERROR("sigaddset exit");
-       }
-       ret = sigaddset(mask, LTTNG_SESSIOND_SIG_PENDING_ROTATION_CHECK);
-       if (ret) {
-               PERROR("sigaddset pending rotation check");
-       }
-       ret = sigaddset(mask, LTTNG_SESSIOND_SIG_SCHEDULED_ROTATION);
-       if (ret) {
-               PERROR("sigaddset scheduled rotation");
-       }
-}
-
-/*
- * This is the same function as timer_signal_thread_qs, when it
- * returns, it means that no timer signr is currently pending or being handled
- * by the timer thread. This cannot be called from the timer thread.
- */
-static
-void timer_signal_thread_qs(unsigned int signr)
-{
-       sigset_t pending_set;
-       int ret;
-
-       /*
-        * We need to be the only thread interacting with the thread
-        * that manages signals for teardown synchronization.
-        */
-       pthread_mutex_lock(&timer_signal.lock);
-
-       /* Ensure we don't have any signal queued for this session. */
-       for (;;) {
-               ret = sigemptyset(&pending_set);
-               if (ret == -1) {
-                       PERROR("sigemptyset");
-               }
-               ret = sigpending(&pending_set);
-               if (ret == -1) {
-                       PERROR("sigpending");
-               }
-               if (!sigismember(&pending_set, signr)) {
-                       break;
-               }
-               caa_cpu_relax();
-       }
-
-       /*
-        * From this point, no new signal handler will be fired that would try to
-        * access "session". However, we still need to wait for any currently
-        * executing handler to complete.
-        */
-       cmm_smp_mb();
-       CMM_STORE_SHARED(timer_signal.qs_done, 0);
-       cmm_smp_mb();
-
-       /*
-        * Kill with LTTNG_SESSIOND_SIG_QS, so signal management thread
-        * wakes up.
-        */
-       kill(getpid(), LTTNG_SESSIOND_SIG_QS);
-
-       while (!CMM_LOAD_SHARED(timer_signal.qs_done)) {
-               caa_cpu_relax();
-       }
-       cmm_smp_mb();
-
-       pthread_mutex_unlock(&timer_signal.lock);
-}
-
-/*
- * Start a timer on a session that will fire at a given interval
- * (timer_interval_us) and fire a given signal (signal).
- *
- * Returns a negative value on error, 0 if a timer was created, and
- * a positive value if no timer was created (not an error).
- */
-static
-int timer_start(timer_t *timer_id, struct ltt_session *session,
-               unsigned int timer_interval_us, int signal, bool one_shot)
-{
-       int ret = 0, delete_ret;
-       struct sigevent sev = {};
-       struct itimerspec its;
-
-       sev.sigev_notify = SIGEV_SIGNAL;
-       sev.sigev_signo = signal;
-       sev.sigev_value.sival_ptr = session;
-       ret = timer_create(CLOCK_MONOTONIC, &sev, timer_id);
-       if (ret == -1) {
-               PERROR("timer_create");
-               goto end;
-       }
-
-       its.it_value.tv_sec = timer_interval_us / 1000000;
-       its.it_value.tv_nsec = (timer_interval_us % 1000000) * 1000;
-       if (one_shot) {
-               its.it_interval.tv_sec = 0;
-               its.it_interval.tv_nsec = 0;
-       } else {
-               its.it_interval.tv_sec = its.it_value.tv_sec;
-               its.it_interval.tv_nsec = its.it_value.tv_nsec;
-       }
-
-       ret = timer_settime(*timer_id, 0, &its, NULL);
-       if (ret == -1) {
-               PERROR("timer_settime");
-               goto error_destroy_timer;
-       }
-       goto end;
-
-error_destroy_timer:
-       delete_ret = timer_delete(*timer_id);
-       if (delete_ret == -1) {
-               PERROR("timer_delete");
-       }
-
-end:
-       return ret;
-}
-
-static
-int timer_stop(timer_t *timer_id, int signal)
-{
-       int ret = 0;
-
-       ret = timer_delete(*timer_id);
-       if (ret == -1) {
-               PERROR("timer_delete");
-               goto end;
-       }
-
-       timer_signal_thread_qs(signal);
-       *timer_id = 0;
-end:
-       return ret;
-}
-
-int timer_session_rotation_pending_check_start(struct ltt_session *session,
-               unsigned int interval_us)
-{
-       int ret;
-
-       if (!session_get(session)) {
-               ret = -1;
-               goto end;
-       }
-       DBG("Enabling session rotation pending check timer on session %" PRIu64,
-                       session->id);
-       /*
-        * We arm this timer in a one-shot mode so we don't have to disable it
-        * explicitly (which could deadlock if the timer thread is blocked
-        * writing in the rotation_timer_pipe).
-        *
-        * Instead, we re-arm it if needed after the rotation_pending check as
-        * returned. Also, this timer is usually only needed once, so there is
-        * no need to go through the whole signal teardown scheme everytime.
-        */
-       ret = timer_start(&session->rotation_pending_check_timer,
-                       session, interval_us,
-                       LTTNG_SESSIOND_SIG_PENDING_ROTATION_CHECK,
-                       /* one-shot */ true);
-       if (ret == 0) {
-               session->rotation_pending_check_timer_enabled = true;
-       }
-end:
-       return ret;
-}
-
-/*
- * Call with session and session_list locks held.
- */
-int timer_session_rotation_pending_check_stop(struct ltt_session *session)
-{
-       int ret;
-
-       LTTNG_ASSERT(session);
-       LTTNG_ASSERT(session->rotation_pending_check_timer_enabled);
-
-       DBG("Disabling session rotation pending check timer on session %" PRIu64,
-                       session->id);
-       ret = timer_stop(&session->rotation_pending_check_timer,
-                       LTTNG_SESSIOND_SIG_PENDING_ROTATION_CHECK);
-       if (ret == -1) {
-               ERR("Failed to stop rotate_pending_check timer");
-       } else {
-               session->rotation_pending_check_timer_enabled = false;
-               /*
-                * The timer's reference to the session can be released safely.
-                */
-               session_put(session);
-       }
-       return ret;
-}
-
-/*
- * Call with session and session_list locks held.
- */
-int timer_session_rotation_schedule_timer_start(struct ltt_session *session,
-               unsigned int interval_us)
-{
-       int ret;
-
-       if (!session_get(session)) {
-               ret = -1;
-               goto end;
-       }
-       DBG("Enabling scheduled rotation timer on session \"%s\" (%ui %s)", session->name,
-                       interval_us, USEC_UNIT);
-       ret = timer_start(&session->rotation_schedule_timer, session,
-                       interval_us, LTTNG_SESSIOND_SIG_SCHEDULED_ROTATION,
-                       /* one-shot */ false);
-       if (ret < 0) {
-               goto end;
-       }
-       session->rotation_schedule_timer_enabled = true;
-end:
-       return ret;
-}
-
-/*
- * Call with session and session_list locks held.
- */
-int timer_session_rotation_schedule_timer_stop(struct ltt_session *session)
-{
-       int ret = 0;
-
-       LTTNG_ASSERT(session);
-
-       if (!session->rotation_schedule_timer_enabled) {
-               goto end;
-       }
-
-       DBG("Disabling scheduled rotation timer on session %s", session->name);
-       ret = timer_stop(&session->rotation_schedule_timer,
-                       LTTNG_SESSIOND_SIG_SCHEDULED_ROTATION);
-       if (ret < 0) {
-               ERR("Failed to stop scheduled rotation timer of session \"%s\"",
-                               session->name);
-               goto end;
-       }
-
-       session->rotation_schedule_timer_enabled = false;
-       /* The timer's reference to the session can be released safely. */
-       session_put(session);
-       ret = 0;
-end:
-       return ret;
-}
-
-/*
- * Block the RT signals for the entire process. It must be called from the
- * sessiond main before creating the threads
- */
-int timer_signal_init(void)
-{
-       int ret;
-       sigset_t mask;
-
-       /* Block signal for entire process, so only our thread processes it. */
-       setmask(&mask);
-       ret = pthread_sigmask(SIG_BLOCK, &mask, NULL);
-       if (ret) {
-               errno = ret;
-               PERROR("pthread_sigmask");
-               return -1;
-       }
-       return 0;
-}
-
-/*
- * This thread is the sighandler for the timer signals.
- */
-static
-void *thread_timer(void *data)
-{
-       int signr;
-       sigset_t mask;
-       siginfo_t info;
-       struct timer_thread_parameters *ctx = data;
-
-       rcu_register_thread();
-       rcu_thread_online();
-
-       health_register(the_health_sessiond, HEALTH_SESSIOND_TYPE_TIMER);
-       health_code_update();
-
-       /* Only self thread will receive signal mask. */
-       setmask(&mask);
-       CMM_STORE_SHARED(timer_signal.tid, pthread_self());
-
-       while (1) {
-               health_code_update();
-
-               health_poll_entry();
-               signr = sigwaitinfo(&mask, &info);
-               health_poll_exit();
-
-               /*
-                * NOTE: cascading conditions are used instead of a switch case
-                * since the use of SIGRTMIN in the definition of the signals'
-                * values prevents the reduction to an integer constant.
-                */
-               if (signr == -1) {
-                       if (errno != EINTR) {
-                               PERROR("sigwaitinfo");
-                       }
-                       continue;
-               } else if (signr == LTTNG_SESSIOND_SIG_QS) {
-                       cmm_smp_mb();
-                       CMM_STORE_SHARED(timer_signal.qs_done, 1);
-                       cmm_smp_mb();
-               } else if (signr == LTTNG_SESSIOND_SIG_EXIT) {
-                       goto end;
-               } else if (signr == LTTNG_SESSIOND_SIG_PENDING_ROTATION_CHECK) {
-                       struct ltt_session *session =
-                                       (struct ltt_session *) info.si_value.sival_ptr;
-
-                       rotation_thread_enqueue_job(ctx->rotation_thread_job_queue,
-                                       ROTATION_THREAD_JOB_TYPE_CHECK_PENDING_ROTATION,
-                                       session);
-               } else if (signr == LTTNG_SESSIOND_SIG_SCHEDULED_ROTATION) {
-                       rotation_thread_enqueue_job(ctx->rotation_thread_job_queue,
-                                       ROTATION_THREAD_JOB_TYPE_SCHEDULED_ROTATION,
-                                       (struct ltt_session *) info.si_value.sival_ptr);
-                       /*
-                        * The scheduled periodic rotation timer is not in
-                        * "one-shot" mode. The reference to the session is not
-                        * released since the timer is still enabled and can
-                        * still fire.
-                        */
-               } else {
-                       ERR("Unexpected signal %d", info.si_signo);
-               }
-       }
-
-end:
-       DBG("Thread exit");
-       health_unregister(the_health_sessiond);
-       rcu_thread_offline();
-       rcu_unregister_thread();
-       return NULL;
-}
-
-static
-bool shutdown_timer_thread(void *data)
-{
-       return kill(getpid(), LTTNG_SESSIOND_SIG_EXIT) == 0;
-}
-
-bool launch_timer_thread(
-               struct timer_thread_parameters *timer_thread_parameters)
-{
-       struct lttng_thread *thread;
-
-       thread = lttng_thread_create("Timer",
-                       thread_timer,
-                       shutdown_timer_thread,
-                       NULL,
-                       timer_thread_parameters);
-       if (!thread) {
-               goto error;
-       }
-       lttng_thread_put(thread);
-       return true;
-error:
-       return false;
-}
diff --git a/src/bin/lttng-sessiond/timer.cpp b/src/bin/lttng-sessiond/timer.cpp
new file mode 100644 (file)
index 0000000..5a1f35d
--- /dev/null
@@ -0,0 +1,431 @@
+/*
+ * Copyright (C) 2017 Julien Desfossez <jdesfossez@efficios.com>
+ * Copyright (C) 2018 Jérémie Galarneau <jeremie.galarneau@efficios.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0-only
+ *
+ */
+
+#define _LGPL_SOURCE
+#include <inttypes.h>
+#include <signal.h>
+
+#include "timer.h"
+#include "health-sessiond.h"
+#include "rotation-thread.h"
+#include "thread.h"
+
+#define LTTNG_SESSIOND_SIG_QS                          SIGRTMIN + 10
+#define LTTNG_SESSIOND_SIG_EXIT                                SIGRTMIN + 11
+#define LTTNG_SESSIOND_SIG_PENDING_ROTATION_CHECK      SIGRTMIN + 12
+#define LTTNG_SESSIOND_SIG_SCHEDULED_ROTATION          SIGRTMIN + 13
+
+#define UINT_TO_PTR(value)                             \
+       ({                                              \
+               LTTNG_ASSERT(value <= UINTPTR_MAX);             \
+               (void *) (uintptr_t) value;             \
+       })
+#define PTR_TO_UINT(ptr) ((uintptr_t) ptr)
+
+/*
+ * Handle timer teardown race wrt memory free of private data by sessiond
+ * signals are handled by a single thread, which permits a synchronization
+ * point between handling of each signal. Internal lock ensures mutual
+ * exclusion.
+ */
+static
+struct timer_signal_data {
+       /* Thread managing signals. */
+       pthread_t tid;
+       int qs_done;
+       pthread_mutex_t lock;
+} timer_signal = {
+       .tid = 0,
+       .qs_done = 0,
+       .lock = PTHREAD_MUTEX_INITIALIZER,
+};
+
+/*
+ * Set custom signal mask to current thread.
+ */
+static
+void setmask(sigset_t *mask)
+{
+       int ret;
+
+       ret = sigemptyset(mask);
+       if (ret) {
+               PERROR("sigemptyset");
+       }
+       ret = sigaddset(mask, LTTNG_SESSIOND_SIG_QS);
+       if (ret) {
+               PERROR("sigaddset teardown");
+       }
+       ret = sigaddset(mask, LTTNG_SESSIOND_SIG_EXIT);
+       if (ret) {
+               PERROR("sigaddset exit");
+       }
+       ret = sigaddset(mask, LTTNG_SESSIOND_SIG_PENDING_ROTATION_CHECK);
+       if (ret) {
+               PERROR("sigaddset pending rotation check");
+       }
+       ret = sigaddset(mask, LTTNG_SESSIOND_SIG_SCHEDULED_ROTATION);
+       if (ret) {
+               PERROR("sigaddset scheduled rotation");
+       }
+}
+
+/*
+ * This is the same function as timer_signal_thread_qs, when it
+ * returns, it means that no timer signr is currently pending or being handled
+ * by the timer thread. This cannot be called from the timer thread.
+ */
+static
+void timer_signal_thread_qs(unsigned int signr)
+{
+       sigset_t pending_set;
+       int ret;
+
+       /*
+        * We need to be the only thread interacting with the thread
+        * that manages signals for teardown synchronization.
+        */
+       pthread_mutex_lock(&timer_signal.lock);
+
+       /* Ensure we don't have any signal queued for this session. */
+       for (;;) {
+               ret = sigemptyset(&pending_set);
+               if (ret == -1) {
+                       PERROR("sigemptyset");
+               }
+               ret = sigpending(&pending_set);
+               if (ret == -1) {
+                       PERROR("sigpending");
+               }
+               if (!sigismember(&pending_set, signr)) {
+                       break;
+               }
+               caa_cpu_relax();
+       }
+
+       /*
+        * From this point, no new signal handler will be fired that would try to
+        * access "session". However, we still need to wait for any currently
+        * executing handler to complete.
+        */
+       cmm_smp_mb();
+       CMM_STORE_SHARED(timer_signal.qs_done, 0);
+       cmm_smp_mb();
+
+       /*
+        * Kill with LTTNG_SESSIOND_SIG_QS, so signal management thread
+        * wakes up.
+        */
+       kill(getpid(), LTTNG_SESSIOND_SIG_QS);
+
+       while (!CMM_LOAD_SHARED(timer_signal.qs_done)) {
+               caa_cpu_relax();
+       }
+       cmm_smp_mb();
+
+       pthread_mutex_unlock(&timer_signal.lock);
+}
+
+/*
+ * Start a timer on a session that will fire at a given interval
+ * (timer_interval_us) and fire a given signal (signal).
+ *
+ * Returns a negative value on error, 0 if a timer was created, and
+ * a positive value if no timer was created (not an error).
+ */
+static
+int timer_start(timer_t *timer_id, struct ltt_session *session,
+               unsigned int timer_interval_us, int signal, bool one_shot)
+{
+       int ret = 0, delete_ret;
+       struct sigevent sev = {};
+       struct itimerspec its;
+
+       sev.sigev_notify = SIGEV_SIGNAL;
+       sev.sigev_signo = signal;
+       sev.sigev_value.sival_ptr = session;
+       ret = timer_create(CLOCK_MONOTONIC, &sev, timer_id);
+       if (ret == -1) {
+               PERROR("timer_create");
+               goto end;
+       }
+
+       its.it_value.tv_sec = timer_interval_us / 1000000;
+       its.it_value.tv_nsec = (timer_interval_us % 1000000) * 1000;
+       if (one_shot) {
+               its.it_interval.tv_sec = 0;
+               its.it_interval.tv_nsec = 0;
+       } else {
+               its.it_interval.tv_sec = its.it_value.tv_sec;
+               its.it_interval.tv_nsec = its.it_value.tv_nsec;
+       }
+
+       ret = timer_settime(*timer_id, 0, &its, NULL);
+       if (ret == -1) {
+               PERROR("timer_settime");
+               goto error_destroy_timer;
+       }
+       goto end;
+
+error_destroy_timer:
+       delete_ret = timer_delete(*timer_id);
+       if (delete_ret == -1) {
+               PERROR("timer_delete");
+       }
+
+end:
+       return ret;
+}
+
+static
+int timer_stop(timer_t *timer_id, int signal)
+{
+       int ret = 0;
+
+       ret = timer_delete(*timer_id);
+       if (ret == -1) {
+               PERROR("timer_delete");
+               goto end;
+       }
+
+       timer_signal_thread_qs(signal);
+       *timer_id = 0;
+end:
+       return ret;
+}
+
+int timer_session_rotation_pending_check_start(struct ltt_session *session,
+               unsigned int interval_us)
+{
+       int ret;
+
+       if (!session_get(session)) {
+               ret = -1;
+               goto end;
+       }
+       DBG("Enabling session rotation pending check timer on session %" PRIu64,
+                       session->id);
+       /*
+        * We arm this timer in a one-shot mode so we don't have to disable it
+        * explicitly (which could deadlock if the timer thread is blocked
+        * writing in the rotation_timer_pipe).
+        *
+        * Instead, we re-arm it if needed after the rotation_pending check as
+        * returned. Also, this timer is usually only needed once, so there is
+        * no need to go through the whole signal teardown scheme everytime.
+        */
+       ret = timer_start(&session->rotation_pending_check_timer,
+                       session, interval_us,
+                       LTTNG_SESSIOND_SIG_PENDING_ROTATION_CHECK,
+                       /* one-shot */ true);
+       if (ret == 0) {
+               session->rotation_pending_check_timer_enabled = true;
+       }
+end:
+       return ret;
+}
+
+/*
+ * Call with session and session_list locks held.
+ */
+int timer_session_rotation_pending_check_stop(struct ltt_session *session)
+{
+       int ret;
+
+       LTTNG_ASSERT(session);
+       LTTNG_ASSERT(session->rotation_pending_check_timer_enabled);
+
+       DBG("Disabling session rotation pending check timer on session %" PRIu64,
+                       session->id);
+       ret = timer_stop(&session->rotation_pending_check_timer,
+                       LTTNG_SESSIOND_SIG_PENDING_ROTATION_CHECK);
+       if (ret == -1) {
+               ERR("Failed to stop rotate_pending_check timer");
+       } else {
+               session->rotation_pending_check_timer_enabled = false;
+               /*
+                * The timer's reference to the session can be released safely.
+                */
+               session_put(session);
+       }
+       return ret;
+}
+
+/*
+ * Call with session and session_list locks held.
+ */
+int timer_session_rotation_schedule_timer_start(struct ltt_session *session,
+               unsigned int interval_us)
+{
+       int ret;
+
+       if (!session_get(session)) {
+               ret = -1;
+               goto end;
+       }
+       DBG("Enabling scheduled rotation timer on session \"%s\" (%ui %s)", session->name,
+                       interval_us, USEC_UNIT);
+       ret = timer_start(&session->rotation_schedule_timer, session,
+                       interval_us, LTTNG_SESSIOND_SIG_SCHEDULED_ROTATION,
+                       /* one-shot */ false);
+       if (ret < 0) {
+               goto end;
+       }
+       session->rotation_schedule_timer_enabled = true;
+end:
+       return ret;
+}
+
+/*
+ * Call with session and session_list locks held.
+ */
+int timer_session_rotation_schedule_timer_stop(struct ltt_session *session)
+{
+       int ret = 0;
+
+       LTTNG_ASSERT(session);
+
+       if (!session->rotation_schedule_timer_enabled) {
+               goto end;
+       }
+
+       DBG("Disabling scheduled rotation timer on session %s", session->name);
+       ret = timer_stop(&session->rotation_schedule_timer,
+                       LTTNG_SESSIOND_SIG_SCHEDULED_ROTATION);
+       if (ret < 0) {
+               ERR("Failed to stop scheduled rotation timer of session \"%s\"",
+                               session->name);
+               goto end;
+       }
+
+       session->rotation_schedule_timer_enabled = false;
+       /* The timer's reference to the session can be released safely. */
+       session_put(session);
+       ret = 0;
+end:
+       return ret;
+}
+
+/*
+ * Block the RT signals for the entire process. It must be called from the
+ * sessiond main before creating the threads
+ */
+int timer_signal_init(void)
+{
+       int ret;
+       sigset_t mask;
+
+       /* Block signal for entire process, so only our thread processes it. */
+       setmask(&mask);
+       ret = pthread_sigmask(SIG_BLOCK, &mask, NULL);
+       if (ret) {
+               errno = ret;
+               PERROR("pthread_sigmask");
+               return -1;
+       }
+       return 0;
+}
+
+/*
+ * This thread is the sighandler for the timer signals.
+ */
+static
+void *thread_timer(void *data)
+{
+       int signr;
+       sigset_t mask;
+       siginfo_t info;
+       struct timer_thread_parameters *ctx = (timer_thread_parameters *) data;
+
+       rcu_register_thread();
+       rcu_thread_online();
+
+       health_register(the_health_sessiond, HEALTH_SESSIOND_TYPE_TIMER);
+       health_code_update();
+
+       /* Only self thread will receive signal mask. */
+       setmask(&mask);
+       CMM_STORE_SHARED(timer_signal.tid, pthread_self());
+
+       while (1) {
+               health_code_update();
+
+               health_poll_entry();
+               signr = sigwaitinfo(&mask, &info);
+               health_poll_exit();
+
+               /*
+                * NOTE: cascading conditions are used instead of a switch case
+                * since the use of SIGRTMIN in the definition of the signals'
+                * values prevents the reduction to an integer constant.
+                */
+               if (signr == -1) {
+                       if (errno != EINTR) {
+                               PERROR("sigwaitinfo");
+                       }
+                       continue;
+               } else if (signr == LTTNG_SESSIOND_SIG_QS) {
+                       cmm_smp_mb();
+                       CMM_STORE_SHARED(timer_signal.qs_done, 1);
+                       cmm_smp_mb();
+               } else if (signr == LTTNG_SESSIOND_SIG_EXIT) {
+                       goto end;
+               } else if (signr == LTTNG_SESSIOND_SIG_PENDING_ROTATION_CHECK) {
+                       struct ltt_session *session =
+                                       (struct ltt_session *) info.si_value.sival_ptr;
+
+                       rotation_thread_enqueue_job(ctx->rotation_thread_job_queue,
+                                       ROTATION_THREAD_JOB_TYPE_CHECK_PENDING_ROTATION,
+                                       session);
+               } else if (signr == LTTNG_SESSIOND_SIG_SCHEDULED_ROTATION) {
+                       rotation_thread_enqueue_job(ctx->rotation_thread_job_queue,
+                                       ROTATION_THREAD_JOB_TYPE_SCHEDULED_ROTATION,
+                                       (struct ltt_session *) info.si_value.sival_ptr);
+                       /*
+                        * The scheduled periodic rotation timer is not in
+                        * "one-shot" mode. The reference to the session is not
+                        * released since the timer is still enabled and can
+                        * still fire.
+                        */
+               } else {
+                       ERR("Unexpected signal %d", info.si_signo);
+               }
+       }
+
+end:
+       DBG("Thread exit");
+       health_unregister(the_health_sessiond);
+       rcu_thread_offline();
+       rcu_unregister_thread();
+       return NULL;
+}
+
+static
+bool shutdown_timer_thread(void *data)
+{
+       return kill(getpid(), LTTNG_SESSIOND_SIG_EXIT) == 0;
+}
+
+bool launch_timer_thread(
+               struct timer_thread_parameters *timer_thread_parameters)
+{
+       struct lttng_thread *thread;
+
+       thread = lttng_thread_create("Timer",
+                       thread_timer,
+                       shutdown_timer_thread,
+                       NULL,
+                       timer_thread_parameters);
+       if (!thread) {
+               goto error;
+       }
+       lttng_thread_put(thread);
+       return true;
+error:
+       return false;
+}
diff --git a/src/bin/lttng-sessiond/trace-kernel.c b/src/bin/lttng-sessiond/trace-kernel.c
deleted file mode 100644 (file)
index cbafd12..0000000
+++ /dev/null
@@ -1,1052 +0,0 @@
-/*
- * Copyright (C) 2011 David Goulet <david.goulet@polymtl.ca>
- *
- * SPDX-License-Identifier: GPL-2.0-only
- *
- */
-
-#define _LGPL_SOURCE
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <unistd.h>
-
-#include <lttng/event.h>
-#include <lttng/lttng-error.h>
-#include <lttng/kernel-probe.h>
-#include <lttng/userspace-probe.h>
-#include <lttng/userspace-probe-internal.h>
-#include <lttng/event-rule/event-rule.h>
-#include <lttng/event-rule/event-rule-internal.h>
-#include <lttng/event-rule/kernel-kprobe.h>
-#include <lttng/event-rule/kernel-kprobe-internal.h>
-#include <lttng/event-rule/kernel-syscall.h>
-#include <lttng/event-rule/kernel-syscall-internal.h>
-#include <lttng/event-rule/kernel-tracepoint.h>
-#include <lttng/event-rule/kernel-tracepoint-internal.h>
-#include <lttng/event-rule/kernel-uprobe.h>
-#include <lttng/event-rule/kernel-uprobe-internal.h>
-#include <common/common.h>
-#include <common/defaults.h>
-#include <common/trace-chunk.h>
-#include <common/macros.h>
-
-#include "consumer.h"
-#include "trace-kernel.h"
-#include "lttng-sessiond.h"
-#include "notification-thread-commands.h"
-
-/*
- * Find the channel name for the given kernel session.
- */
-struct ltt_kernel_channel *trace_kernel_get_channel_by_name(
-               const char *name, struct ltt_kernel_session *session)
-{
-       struct ltt_kernel_channel *chan;
-
-       LTTNG_ASSERT(session);
-       LTTNG_ASSERT(name);
-
-       /*
-        * If we receive an empty string for channel name, it means the
-        * default channel name is requested.
-        */
-       if (name[0] == '\0')
-               name = DEFAULT_CHANNEL_NAME;
-
-       DBG("Trying to find channel %s", name);
-
-       cds_list_for_each_entry(chan, &session->channel_list.head, list) {
-               if (strcmp(name, chan->channel->name) == 0) {
-                       DBG("Found channel by name %s", name);
-                       return chan;
-               }
-       }
-
-       return NULL;
-}
-
-/*
- * Find the event for the given channel.
- */
-struct ltt_kernel_event *trace_kernel_find_event(
-               char *name, struct ltt_kernel_channel *channel,
-               enum lttng_event_type type,
-               struct lttng_bytecode *filter)
-{
-       struct ltt_kernel_event *ev;
-       int found = 0;
-
-       LTTNG_ASSERT(name);
-       LTTNG_ASSERT(channel);
-
-       cds_list_for_each_entry(ev, &channel->events_list.head, list) {
-               if (type != LTTNG_EVENT_ALL && ev->type != type) {
-                       continue;
-               }
-               if (strcmp(name, ev->event->name)) {
-                       continue;
-               }
-               if ((ev->filter && !filter) || (!ev->filter && filter)) {
-                       continue;
-               }
-               if (ev->filter && filter) {
-                       if (ev->filter->len != filter->len ||
-                                       memcmp(ev->filter->data, filter->data,
-                                               filter->len) != 0) {
-                               continue;
-                       }
-               }
-               found = 1;
-               break;
-       }
-       if (found) {
-               DBG("Found event %s for channel %s", name,
-                       channel->channel->name);
-               return ev;
-       } else {
-               return NULL;
-       }
-}
-
-/*
- * Find the event name for the given channel.
- */
-struct ltt_kernel_event *trace_kernel_get_event_by_name(
-               char *name, struct ltt_kernel_channel *channel,
-               enum lttng_event_type type)
-{
-       struct ltt_kernel_event *ev;
-       int found = 0;
-
-       LTTNG_ASSERT(name);
-       LTTNG_ASSERT(channel);
-
-       cds_list_for_each_entry(ev, &channel->events_list.head, list) {
-               if (type != LTTNG_EVENT_ALL && ev->type != type) {
-                       continue;
-               }
-               if (strcmp(name, ev->event->name)) {
-                       continue;
-               }
-               found = 1;
-               break;
-       }
-       if (found) {
-               DBG("Found event %s for channel %s", name,
-                       channel->channel->name);
-               return ev;
-       } else {
-               return NULL;
-       }
-}
-
-/*
- * Allocate and initialize a kernel session data structure.
- *
- * Return pointer to structure or NULL.
- */
-struct ltt_kernel_session *trace_kernel_create_session(void)
-{
-       struct ltt_kernel_session *lks = NULL;
-
-       /* Allocate a new ltt kernel session */
-       lks = zmalloc(sizeof(struct ltt_kernel_session));
-       if (lks == NULL) {
-               PERROR("create kernel session zmalloc");
-               goto alloc_error;
-       }
-
-       /* Init data structure */
-       lks->fd = -1;
-       lks->metadata_stream_fd = -1;
-       lks->channel_count = 0;
-       lks->stream_count_global = 0;
-       lks->metadata = NULL;
-       CDS_INIT_LIST_HEAD(&lks->channel_list.head);
-
-       lks->tracker_pid = process_attr_tracker_create();
-       if (!lks->tracker_pid) {
-               goto error;
-       }
-       lks->tracker_vpid = process_attr_tracker_create();
-       if (!lks->tracker_vpid) {
-               goto error;
-       }
-       lks->tracker_uid = process_attr_tracker_create();
-       if (!lks->tracker_uid) {
-               goto error;
-       }
-       lks->tracker_vuid = process_attr_tracker_create();
-       if (!lks->tracker_vuid) {
-               goto error;
-       }
-       lks->tracker_gid = process_attr_tracker_create();
-       if (!lks->tracker_gid) {
-               goto error;
-       }
-       lks->tracker_vgid = process_attr_tracker_create();
-       if (!lks->tracker_vgid) {
-               goto error;
-       }
-       lks->consumer = consumer_create_output(CONSUMER_DST_LOCAL);
-       if (lks->consumer == NULL) {
-               goto error;
-       }
-
-       return lks;
-
-error:
-       process_attr_tracker_destroy(lks->tracker_pid);
-       process_attr_tracker_destroy(lks->tracker_vpid);
-       process_attr_tracker_destroy(lks->tracker_uid);
-       process_attr_tracker_destroy(lks->tracker_vuid);
-       process_attr_tracker_destroy(lks->tracker_gid);
-       process_attr_tracker_destroy(lks->tracker_vgid);
-       free(lks);
-
-alloc_error:
-       return NULL;
-}
-
-/*
- * Allocate and initialize a kernel channel data structure.
- *
- * Return pointer to structure or NULL.
- */
-struct ltt_kernel_channel *trace_kernel_create_channel(
-               struct lttng_channel *chan)
-{
-       struct ltt_kernel_channel *lkc;
-       struct lttng_channel_extended *extended = NULL;
-
-       LTTNG_ASSERT(chan);
-
-       lkc = zmalloc(sizeof(struct ltt_kernel_channel));
-       if (lkc == NULL) {
-               PERROR("ltt_kernel_channel zmalloc");
-               goto error;
-       }
-
-       lkc->channel = zmalloc(sizeof(struct lttng_channel));
-       if (lkc->channel == NULL) {
-               PERROR("lttng_channel zmalloc");
-               goto error;
-       }
-
-       extended = zmalloc(sizeof(struct lttng_channel_extended));
-       if (!extended) {
-               PERROR("lttng_channel_channel zmalloc");
-               goto error;
-       }
-       memcpy(lkc->channel, chan, sizeof(struct lttng_channel));
-       memcpy(extended, chan->attr.extended.ptr, sizeof(struct lttng_channel_extended));
-       lkc->channel->attr.extended.ptr = extended;
-       extended = NULL;
-
-       /*
-        * If we receive an empty string for channel name, it means the
-        * default channel name is requested.
-        */
-       if (chan->name[0] == '\0') {
-               strncpy(lkc->channel->name, DEFAULT_CHANNEL_NAME,
-                       sizeof(lkc->channel->name));
-       }
-       lkc->channel->name[LTTNG_KERNEL_ABI_SYM_NAME_LEN - 1] = '\0';
-
-       lkc->fd = -1;
-       lkc->stream_count = 0;
-       lkc->event_count = 0;
-       lkc->enabled = 1;
-       lkc->published_to_notification_thread = false;
-       /* Init linked list */
-       CDS_INIT_LIST_HEAD(&lkc->events_list.head);
-       CDS_INIT_LIST_HEAD(&lkc->stream_list.head);
-       CDS_INIT_LIST_HEAD(&lkc->ctx_list);
-
-       return lkc;
-
-error:
-       if (lkc) {
-               free(lkc->channel);
-       }
-       free(extended);
-       free(lkc);
-       return NULL;
-}
-
-/*
- * Allocate and init a kernel context object.
- *
- * Return the allocated object or NULL on error.
- */
-struct ltt_kernel_context *trace_kernel_create_context(
-               struct lttng_kernel_abi_context *ctx)
-{
-       struct ltt_kernel_context *kctx;
-
-       kctx = zmalloc(sizeof(*kctx));
-       if (!kctx) {
-               PERROR("zmalloc kernel context");
-               goto error;
-       }
-
-       if (ctx) {
-               memcpy(&kctx->ctx, ctx, sizeof(kctx->ctx));
-       }
-error:
-       return kctx;
-}
-
-/*
- * Allocate and init a kernel context object from an existing kernel context
- * object.
- *
- * Return the allocated object or NULL on error.
- */
-struct ltt_kernel_context *trace_kernel_copy_context(
-               struct ltt_kernel_context *kctx)
-{
-       struct ltt_kernel_context *kctx_copy;
-
-       LTTNG_ASSERT(kctx);
-       kctx_copy = zmalloc(sizeof(*kctx_copy));
-       if (!kctx_copy) {
-               PERROR("zmalloc ltt_kernel_context");
-               goto error;
-       }
-
-       memcpy(kctx_copy, kctx, sizeof(*kctx_copy));
-       memset(&kctx_copy->list, 0, sizeof(kctx_copy->list));
-
-error:
-       return kctx_copy;
-}
-
-/*
- * Allocate and initialize a kernel event. Set name and event type.
- * We own filter_expression, and filter.
- *
- * Return pointer to structure or NULL.
- */
-enum lttng_error_code trace_kernel_create_event(
-               struct lttng_event *ev, char *filter_expression,
-               struct lttng_bytecode *filter,
-               struct ltt_kernel_event **kernel_event)
-{
-       enum lttng_error_code ret;
-       struct lttng_kernel_abi_event *attr;
-       struct ltt_kernel_event *local_kernel_event;
-       struct lttng_userspace_probe_location *userspace_probe_location = NULL;
-
-       LTTNG_ASSERT(ev);
-
-       local_kernel_event = zmalloc(sizeof(struct ltt_kernel_event));
-       attr = zmalloc(sizeof(struct lttng_kernel_abi_event));
-       if (local_kernel_event == NULL || attr == NULL) {
-               PERROR("kernel event zmalloc");
-               ret = LTTNG_ERR_NOMEM;
-               goto error;
-       }
-
-       switch (ev->type) {
-       case LTTNG_EVENT_PROBE:
-               attr->instrumentation = LTTNG_KERNEL_ABI_KPROBE;
-               attr->u.kprobe.addr = ev->attr.probe.addr;
-               attr->u.kprobe.offset = ev->attr.probe.offset;
-               strncpy(attr->u.kprobe.symbol_name,
-                               ev->attr.probe.symbol_name, LTTNG_KERNEL_ABI_SYM_NAME_LEN);
-               attr->u.kprobe.symbol_name[LTTNG_KERNEL_ABI_SYM_NAME_LEN - 1] = '\0';
-               break;
-       case LTTNG_EVENT_USERSPACE_PROBE:
-       {
-               const struct lttng_userspace_probe_location* location = NULL;
-               const struct lttng_userspace_probe_location_lookup_method *lookup = NULL;
-
-               location = lttng_event_get_userspace_probe_location(ev);
-               if (!location) {
-                       ret = LTTNG_ERR_PROBE_LOCATION_INVAL;
-                       goto error;
-               }
-
-               /*
-                * From this point on, the specific term 'uprobe' is used
-                * instead of the generic 'userspace probe' because it's the
-                * technology used at the moment for this instrumentation.
-                * LTTng currently implements userspace probes using uprobes.
-                * In the interactions with the kernel tracer, we use the
-                * uprobe term.
-                */
-               attr->instrumentation = LTTNG_KERNEL_ABI_UPROBE;
-
-               lookup = lttng_userspace_probe_location_get_lookup_method(
-                               location);
-               if (!lookup) {
-                       ret = LTTNG_ERR_PROBE_LOCATION_INVAL;
-                       goto error;
-               }
-
-               /*
-                * From the kernel tracer's perspective, all userspace probe
-                * event types are all the same: a file and an offset.
-                */
-               switch (lttng_userspace_probe_location_lookup_method_get_type(lookup)) {
-               case LTTNG_USERSPACE_PROBE_LOCATION_LOOKUP_METHOD_TYPE_FUNCTION_ELF:
-                       /* Get the file descriptor on the target binary. */
-                       attr->u.uprobe.fd =
-                                       lttng_userspace_probe_location_function_get_binary_fd(location);
-
-                       /*
-                        * Save a reference to the probe location used during
-                        * the listing of events.
-                        */
-                       userspace_probe_location =
-                                       lttng_userspace_probe_location_copy(location);
-                       break;
-               case LTTNG_USERSPACE_PROBE_LOCATION_LOOKUP_METHOD_TYPE_TRACEPOINT_SDT:
-                       /* Get the file descriptor on the target binary. */
-                       attr->u.uprobe.fd =
-                                       lttng_userspace_probe_location_tracepoint_get_binary_fd(location);
-
-                       /*
-                        * Save a reference to the probe location used during the listing of
-                        * events.
-                        */
-                       userspace_probe_location =
-                                       lttng_userspace_probe_location_copy(location);
-                       break;
-               default:
-                       DBG("Unsupported lookup method type");
-                       ret = LTTNG_ERR_PROBE_LOCATION_INVAL;
-                       goto error;
-               }
-               break;
-       }
-       case LTTNG_EVENT_FUNCTION:
-               attr->instrumentation = LTTNG_KERNEL_ABI_KRETPROBE;
-               attr->u.kretprobe.addr = ev->attr.probe.addr;
-               attr->u.kretprobe.offset = ev->attr.probe.offset;
-               strncpy(attr->u.kretprobe.symbol_name,
-                               ev->attr.probe.symbol_name, LTTNG_KERNEL_ABI_SYM_NAME_LEN);
-               attr->u.kretprobe.symbol_name[LTTNG_KERNEL_ABI_SYM_NAME_LEN - 1] = '\0';
-               break;
-       case LTTNG_EVENT_FUNCTION_ENTRY:
-               attr->instrumentation = LTTNG_KERNEL_ABI_FUNCTION;
-               strncpy(attr->u.ftrace.symbol_name,
-                               ev->attr.ftrace.symbol_name, LTTNG_KERNEL_ABI_SYM_NAME_LEN);
-               attr->u.ftrace.symbol_name[LTTNG_KERNEL_ABI_SYM_NAME_LEN - 1] = '\0';
-               break;
-       case LTTNG_EVENT_TRACEPOINT:
-               attr->instrumentation = LTTNG_KERNEL_ABI_TRACEPOINT;
-               break;
-       case LTTNG_EVENT_SYSCALL:
-               attr->instrumentation = LTTNG_KERNEL_ABI_SYSCALL;
-               attr->u.syscall.abi = LTTNG_KERNEL_ABI_SYSCALL_ABI_ALL;
-               attr->u.syscall.entryexit = LTTNG_KERNEL_ABI_SYSCALL_ENTRYEXIT;
-               attr->u.syscall.match = LTTNG_KERNEL_ABI_SYSCALL_MATCH_NAME;
-               break;
-       case LTTNG_EVENT_ALL:
-               attr->instrumentation = LTTNG_KERNEL_ABI_ALL;
-               break;
-       default:
-               ERR("Unknown kernel instrumentation type (%d)", ev->type);
-               ret = LTTNG_ERR_INVALID;
-               goto error;
-       }
-
-       /* Copy event name */
-       strncpy(attr->name, ev->name, LTTNG_KERNEL_ABI_SYM_NAME_LEN);
-       attr->name[LTTNG_KERNEL_ABI_SYM_NAME_LEN - 1] = '\0';
-
-       /* Setting up a kernel event */
-       local_kernel_event->fd = -1;
-       local_kernel_event->event = attr;
-       local_kernel_event->enabled = 1;
-       local_kernel_event->filter_expression = filter_expression;
-       local_kernel_event->filter = filter;
-       local_kernel_event->userspace_probe_location = userspace_probe_location;
-
-       *kernel_event = local_kernel_event;
-
-       return LTTNG_OK;
-
-error:
-       free(filter_expression);
-       free(filter);
-       free(local_kernel_event);
-       free(attr);
-       return ret;
-}
-
-/*
- * Allocate and initialize a kernel token event rule.
- *
- * Return pointer to structure or NULL.
- */
-enum lttng_error_code trace_kernel_create_event_notifier_rule(
-               struct lttng_trigger *trigger,
-               uint64_t token,
-               uint64_t error_counter_index,
-               struct ltt_kernel_event_notifier_rule **event_notifier_rule)
-{
-       enum lttng_error_code ret = LTTNG_OK;
-       enum lttng_condition_type condition_type;
-       enum lttng_event_rule_type event_rule_type;
-       enum lttng_condition_status condition_status;
-       struct ltt_kernel_event_notifier_rule *local_kernel_token_event_rule;
-       const struct lttng_condition *condition = NULL;
-       const struct lttng_event_rule *event_rule = NULL;
-
-       LTTNG_ASSERT(event_notifier_rule);
-
-       condition = lttng_trigger_get_const_condition(trigger);
-       LTTNG_ASSERT(condition);
-
-       condition_type = lttng_condition_get_type(condition);
-       LTTNG_ASSERT(condition_type == LTTNG_CONDITION_TYPE_EVENT_RULE_MATCHES);
-
-       condition_status = lttng_condition_event_rule_matches_get_rule(
-                       condition, &event_rule);
-       LTTNG_ASSERT(condition_status == LTTNG_CONDITION_STATUS_OK);
-       LTTNG_ASSERT(event_rule);
-
-       event_rule_type = lttng_event_rule_get_type(event_rule);
-       LTTNG_ASSERT(event_rule_type != LTTNG_EVENT_RULE_TYPE_UNKNOWN);
-
-       local_kernel_token_event_rule =
-                       zmalloc(sizeof(struct ltt_kernel_event_notifier_rule));
-       if (local_kernel_token_event_rule == NULL) {
-               PERROR("Failed to allocate ltt_kernel_token_event_rule structure");
-               ret = LTTNG_ERR_NOMEM;
-               goto error;
-       }
-
-       local_kernel_token_event_rule->fd = -1;
-       local_kernel_token_event_rule->enabled = 1;
-       local_kernel_token_event_rule->token = token;
-       local_kernel_token_event_rule->error_counter_index = error_counter_index;
-
-       /* Get the reference of the event rule. */
-       lttng_trigger_get(trigger);
-
-       local_kernel_token_event_rule->trigger = trigger;
-       /* The event rule still owns the filter and bytecode. */
-       local_kernel_token_event_rule->filter =
-                       lttng_event_rule_get_filter_bytecode(event_rule);
-
-       DBG3("Created kernel event notifier rule: token =  %" PRIu64,
-                       local_kernel_token_event_rule->token);
-error:
-       *event_notifier_rule = local_kernel_token_event_rule;
-       return ret;
-}
-
-/*
- * Initialize a kernel trigger from an event rule.
- */
-enum lttng_error_code trace_kernel_init_event_notifier_from_event_rule(
-               const struct lttng_event_rule *rule,
-               struct lttng_kernel_abi_event_notifier *kernel_event_notifier)
-{
-       enum lttng_error_code ret_code;
-       const char *name;
-       int strncpy_ret;
-
-       switch (lttng_event_rule_get_type(rule)) {
-       case LTTNG_EVENT_RULE_TYPE_KERNEL_KPROBE:
-       {
-               uint64_t address = 0, offset = 0;
-               const char *symbol_name = NULL;
-               const struct lttng_kernel_probe_location *location = NULL;
-               enum lttng_kernel_probe_location_status k_status;
-               enum lttng_event_rule_status status;
-
-               status = lttng_event_rule_kernel_kprobe_get_location(rule, &location);
-               if (status != LTTNG_EVENT_RULE_STATUS_OK) {
-                       ret_code = LTTNG_ERR_PROBE_LOCATION_INVAL;
-                       goto error;
-               }
-
-               switch (lttng_kernel_probe_location_get_type(location)) {
-               case LTTNG_KERNEL_PROBE_LOCATION_TYPE_ADDRESS:
-               {
-                       k_status = lttng_kernel_probe_location_address_get_address(
-                                       location, &address);
-                       LTTNG_ASSERT(k_status == LTTNG_KERNEL_PROBE_LOCATION_STATUS_OK);
-                       break;
-               }
-               case LTTNG_KERNEL_PROBE_LOCATION_TYPE_SYMBOL_OFFSET:
-               {
-                       k_status = lttng_kernel_probe_location_symbol_get_offset(
-                                       location, &offset);
-                       LTTNG_ASSERT(k_status == LTTNG_KERNEL_PROBE_LOCATION_STATUS_OK);
-                       symbol_name = lttng_kernel_probe_location_symbol_get_name(
-                                       location);
-                       break;
-               }
-               default:
-                       abort();
-               }
-
-               kernel_event_notifier->event.instrumentation = LTTNG_KERNEL_ABI_KPROBE;
-               kernel_event_notifier->event.u.kprobe.addr = address;
-               kernel_event_notifier->event.u.kprobe.offset = offset;
-               if (symbol_name) {
-                       strncpy_ret = lttng_strncpy(
-                                       kernel_event_notifier->event.u.kprobe.symbol_name,
-                                       symbol_name, LTTNG_KERNEL_ABI_SYM_NAME_LEN);
-
-                       if (strncpy_ret) {
-                               ret_code = LTTNG_ERR_INVALID;
-                               goto error;
-                       }
-               }
-
-               kernel_event_notifier->event.u.kprobe.symbol_name[LTTNG_KERNEL_ABI_SYM_NAME_LEN - 1] = '\0';
-
-               status = lttng_event_rule_kernel_kprobe_get_event_name(rule, &name);
-               LTTNG_ASSERT(status == LTTNG_EVENT_RULE_STATUS_OK);
-               ret_code = LTTNG_OK;
-               break;
-       }
-       case LTTNG_EVENT_RULE_TYPE_KERNEL_UPROBE:
-       {
-               const struct lttng_userspace_probe_location* location = NULL;
-               const struct lttng_userspace_probe_location_lookup_method *lookup = NULL;
-               enum lttng_event_rule_status status;
-
-               status = lttng_event_rule_kernel_uprobe_get_location(rule, &location);
-               if (status != LTTNG_EVENT_RULE_STATUS_OK) {
-                       ret_code = LTTNG_ERR_PROBE_LOCATION_INVAL;
-                       goto error;
-               }
-
-               kernel_event_notifier->event.instrumentation = LTTNG_KERNEL_ABI_UPROBE;
-
-               lookup = lttng_userspace_probe_location_get_lookup_method(
-                               location);
-               if (!lookup) {
-                       ret_code = LTTNG_ERR_PROBE_LOCATION_INVAL;
-                       goto error;
-               }
-
-               /*
-                * From the kernel tracer's perspective, all userspace probe
-                * event types are all the same: a file and an offset.
-                */
-               switch (lttng_userspace_probe_location_lookup_method_get_type(lookup)) {
-               case LTTNG_USERSPACE_PROBE_LOCATION_LOOKUP_METHOD_TYPE_FUNCTION_ELF:
-                       /* Get the file descriptor on the target binary. */
-                       kernel_event_notifier->event.u.uprobe.fd =
-                                       lttng_userspace_probe_location_function_get_binary_fd(location);
-
-                       break;
-               case LTTNG_USERSPACE_PROBE_LOCATION_LOOKUP_METHOD_TYPE_TRACEPOINT_SDT:
-                       /* Get the file descriptor on the target binary. */
-                       kernel_event_notifier->event.u.uprobe.fd =
-                                       lttng_userspace_probe_location_tracepoint_get_binary_fd(location);
-                       break;
-               default:
-                       abort();
-               }
-
-               status = lttng_event_rule_kernel_uprobe_get_event_name(
-                               rule, &name);
-               LTTNG_ASSERT(status == LTTNG_EVENT_RULE_STATUS_OK);
-               ret_code = LTTNG_OK;
-               break;
-       }
-       case LTTNG_EVENT_RULE_TYPE_KERNEL_TRACEPOINT:
-       {
-               const enum lttng_event_rule_status status =
-                               lttng_event_rule_kernel_tracepoint_get_name_pattern(
-                                               rule, &name);
-
-               LTTNG_ASSERT(status == LTTNG_EVENT_RULE_STATUS_OK);
-               kernel_event_notifier->event.instrumentation =
-                               LTTNG_KERNEL_ABI_TRACEPOINT;
-
-               ret_code = LTTNG_OK;
-               break;
-       }
-       case LTTNG_EVENT_RULE_TYPE_KERNEL_SYSCALL:
-       {
-               const enum lttng_event_rule_status status =
-                               lttng_event_rule_kernel_syscall_get_name_pattern(
-                                               rule, &name);
-               const enum lttng_event_rule_kernel_syscall_emission_site
-                       emission_site =
-                       lttng_event_rule_kernel_syscall_get_emission_site(rule);
-               enum lttng_kernel_abi_syscall_entryexit entryexit;
-
-               LTTNG_ASSERT(status == LTTNG_EVENT_RULE_STATUS_OK);
-               LTTNG_ASSERT(emission_site != LTTNG_EVENT_RULE_KERNEL_SYSCALL_EMISSION_SITE_UNKNOWN);
-
-               switch(emission_site) {
-               case LTTNG_EVENT_RULE_KERNEL_SYSCALL_EMISSION_SITE_ENTRY:
-                       entryexit = LTTNG_KERNEL_ABI_SYSCALL_ENTRY;
-                       break;
-               case LTTNG_EVENT_RULE_KERNEL_SYSCALL_EMISSION_SITE_EXIT:
-                       entryexit = LTTNG_KERNEL_ABI_SYSCALL_EXIT;
-                       break;
-               case LTTNG_EVENT_RULE_KERNEL_SYSCALL_EMISSION_SITE_ENTRY_EXIT:
-                       entryexit = LTTNG_KERNEL_ABI_SYSCALL_ENTRYEXIT;
-                       break;
-               default:
-                       abort();
-                       break;
-               }
-
-               kernel_event_notifier->event.instrumentation =
-                               LTTNG_KERNEL_ABI_SYSCALL;
-               kernel_event_notifier->event.u.syscall.abi =
-                               LTTNG_KERNEL_ABI_SYSCALL_ABI_ALL;
-               kernel_event_notifier->event.u.syscall.entryexit =
-                               entryexit;
-               kernel_event_notifier->event.u.syscall.match =
-                               LTTNG_KERNEL_ABI_SYSCALL_MATCH_NAME;
-               ret_code = LTTNG_OK;
-               break;
-       }
-       default:
-               abort();
-               break;
-       }
-
-       strncpy_ret = lttng_strncpy(kernel_event_notifier->event.name, name,
-                       LTTNG_KERNEL_ABI_SYM_NAME_LEN);
-       if (strncpy_ret) {
-               ret_code = LTTNG_ERR_INVALID;
-               goto error;
-       }
-
-error:
-       return ret_code;
-}
-/*
- * Allocate and initialize a kernel metadata.
- *
- * Return pointer to structure or NULL.
- */
-struct ltt_kernel_metadata *trace_kernel_create_metadata(void)
-{
-       int ret;
-       struct ltt_kernel_metadata *lkm;
-       struct lttng_channel *chan;
-
-       lkm = zmalloc(sizeof(struct ltt_kernel_metadata));
-       chan = zmalloc(sizeof(struct lttng_channel));
-       if (lkm == NULL || chan == NULL) {
-               PERROR("kernel metadata zmalloc");
-               goto error;
-       }
-
-       ret = lttng_strncpy(
-                       chan->name, DEFAULT_METADATA_NAME, sizeof(chan->name));
-       if (ret) {
-               ERR("Failed to initialize metadata channel name to `%s`",
-                               DEFAULT_METADATA_NAME);
-               goto error;
-       }
-
-       /* Set default attributes */
-       chan->attr.overwrite = DEFAULT_METADATA_OVERWRITE;
-       chan->attr.subbuf_size = default_get_metadata_subbuf_size();
-       chan->attr.num_subbuf = DEFAULT_METADATA_SUBBUF_NUM;
-       chan->attr.switch_timer_interval = DEFAULT_METADATA_SWITCH_TIMER;
-       chan->attr.read_timer_interval = DEFAULT_METADATA_READ_TIMER;;
-
-
-       /*
-        * The metadata channel of kernel sessions must use the "mmap"
-        * back-end since the consumer daemon accumulates complete
-        * metadata units before sending them to the relay daemon in
-        * live mode. The consumer daemon also needs to extract the contents
-        * of the metadata cache when computing a rotation position.
-        *
-        * In both cases, it is not possible to rely on the splice
-        * back-end as the consumer daemon may need to accumulate more
-        * content than can be backed by the ring buffer's underlying
-        * pages.
-        */
-       chan->attr.output = LTTNG_EVENT_MMAP;
-       chan->attr.tracefile_size = 0;
-       chan->attr.tracefile_count = 0;
-       chan->attr.live_timer_interval = 0;
-
-       /* Init metadata */
-       lkm->fd = -1;
-       lkm->conf = chan;
-
-       return lkm;
-
-error:
-       free(lkm);
-       free(chan);
-       return NULL;
-}
-
-/*
- * Allocate and initialize a kernel stream. The stream is set to ACTIVE_FD by
- * default.
- *
- * Return pointer to structure or NULL.
- */
-struct ltt_kernel_stream *trace_kernel_create_stream(const char *name,
-               unsigned int count)
-{
-       int ret;
-       struct ltt_kernel_stream *lks;
-
-       LTTNG_ASSERT(name);
-
-       lks = zmalloc(sizeof(struct ltt_kernel_stream));
-       if (lks == NULL) {
-               PERROR("kernel stream zmalloc");
-               goto error;
-       }
-
-       /* Set name */
-       ret = snprintf(lks->name, sizeof(lks->name), "%s_%u", name, count);
-       if (ret < 0) {
-               PERROR("snprintf stream name");
-               goto error;
-       }
-       lks->name[sizeof(lks->name) - 1] = '\0';
-
-       /* Init stream */
-       lks->fd = -1;
-       lks->state = 0;
-       lks->cpu = count;
-
-       return lks;
-
-error:
-       return NULL;
-}
-
-/*
- * Cleanup kernel stream structure.
- */
-void trace_kernel_destroy_stream(struct ltt_kernel_stream *stream)
-{
-       LTTNG_ASSERT(stream);
-
-       DBG("[trace] Closing stream fd %d", stream->fd);
-       /* Close kernel fd */
-       if (stream->fd >= 0) {
-               int ret;
-
-               ret = close(stream->fd);
-               if (ret) {
-                       PERROR("close");
-               }
-       }
-       /* Remove from stream list */
-       cds_list_del(&stream->list);
-
-       free(stream);
-}
-
-/*
- * Cleanup kernel event structure.
- */
-void trace_kernel_destroy_event(struct ltt_kernel_event *event)
-{
-       LTTNG_ASSERT(event);
-
-       if (event->fd >= 0) {
-               int ret;
-
-               DBG("[trace] Closing event fd %d", event->fd);
-               /* Close kernel fd */
-               ret = close(event->fd);
-               if (ret) {
-                       PERROR("close");
-               }
-       } else {
-               DBG("[trace] Tearing down event (no associated file descriptor)");
-       }
-
-       /* Remove from event list */
-       cds_list_del(&event->list);
-
-       free(event->filter_expression);
-       free(event->filter);
-
-       free(event->event);
-       free(event);
-}
-
-/*
- * Cleanup kernel event structure.
- */
-static void free_token_event_rule_rcu(struct rcu_head *rcu_node)
-{
-       struct ltt_kernel_event_notifier_rule *rule = caa_container_of(rcu_node,
-                       struct ltt_kernel_event_notifier_rule, rcu_node);
-
-       free(rule);
-}
-
-void trace_kernel_destroy_event_notifier_rule(
-               struct ltt_kernel_event_notifier_rule *event)
-{
-       LTTNG_ASSERT(event);
-
-       if (event->fd >= 0) {
-               const int ret = close(event->fd);
-
-               DBG("Closing kernel event notifier rule file descriptor: fd = %d",
-                               event->fd);
-               if (ret) {
-                       PERROR("Failed to close kernel event notifier file descriptor: fd = %d",
-                                       event->fd);
-               }
-       } else {
-               DBG("Destroying kernel event notifier rule (no associated file descriptor)");
-       }
-
-       lttng_trigger_put(event->trigger);
-       call_rcu(&event->rcu_node, free_token_event_rule_rcu);
-}
-/*
- * Cleanup kernel context structure.
- */
-void trace_kernel_destroy_context(struct ltt_kernel_context *ctx)
-{
-       LTTNG_ASSERT(ctx);
-
-       if (ctx->in_list) {
-               cds_list_del(&ctx->list);
-       }
-       free(ctx);
-}
-
-/*
- * Cleanup kernel channel structure.
- */
-void trace_kernel_destroy_channel(struct ltt_kernel_channel *channel)
-{
-       struct ltt_kernel_stream *stream, *stmp;
-       struct ltt_kernel_event *event, *etmp;
-       struct ltt_kernel_context *ctx, *ctmp;
-       int ret;
-       enum lttng_error_code status;
-
-       LTTNG_ASSERT(channel);
-
-       DBG("[trace] Closing channel fd %d", channel->fd);
-       /* Close kernel fd */
-       if (channel->fd >= 0) {
-               ret = close(channel->fd);
-               if (ret) {
-                       PERROR("close");
-               }
-       }
-
-       /* For each stream in the channel list */
-       cds_list_for_each_entry_safe(stream, stmp, &channel->stream_list.head, list) {
-               trace_kernel_destroy_stream(stream);
-       }
-
-       /* For each event in the channel list */
-       cds_list_for_each_entry_safe(event, etmp, &channel->events_list.head, list) {
-               trace_kernel_destroy_event(event);
-       }
-
-       /* For each context in the channel list */
-       cds_list_for_each_entry_safe(ctx, ctmp, &channel->ctx_list, list) {
-               trace_kernel_destroy_context(ctx);
-       }
-
-       /* Remove from channel list */
-       cds_list_del(&channel->list);
-
-       if (the_notification_thread_handle &&
-                       channel->published_to_notification_thread) {
-               status = notification_thread_command_remove_channel(
-                               the_notification_thread_handle, channel->key,
-                               LTTNG_DOMAIN_KERNEL);
-               LTTNG_ASSERT(status == LTTNG_OK);
-       }
-       free(channel->channel->attr.extended.ptr);
-       free(channel->channel);
-       free(channel);
-}
-
-/*
- * Cleanup kernel metadata structure.
- */
-void trace_kernel_destroy_metadata(struct ltt_kernel_metadata *metadata)
-{
-       LTTNG_ASSERT(metadata);
-
-       DBG("[trace] Closing metadata fd %d", metadata->fd);
-       /* Close kernel fd */
-       if (metadata->fd >= 0) {
-               int ret;
-
-               ret = close(metadata->fd);
-               if (ret) {
-                       PERROR("close");
-               }
-       }
-
-       free(metadata->conf);
-       free(metadata);
-}
-
-/*
- * Cleanup kernel session structure
- *
- * Should *NOT* be called with RCU read-side lock held.
- */
-void trace_kernel_destroy_session(struct ltt_kernel_session *session)
-{
-       struct ltt_kernel_channel *channel, *ctmp;
-       int ret;
-
-       LTTNG_ASSERT(session);
-
-       DBG("[trace] Closing session fd %d", session->fd);
-       /* Close kernel fds */
-       if (session->fd >= 0) {
-               ret = close(session->fd);
-               if (ret) {
-                       PERROR("close");
-               }
-       }
-
-       if (session->metadata_stream_fd >= 0) {
-               DBG("[trace] Closing metadata stream fd %d", session->metadata_stream_fd);
-               ret = close(session->metadata_stream_fd);
-               if (ret) {
-                       PERROR("close");
-               }
-       }
-
-       if (session->metadata != NULL) {
-               trace_kernel_destroy_metadata(session->metadata);
-       }
-
-       cds_list_for_each_entry_safe(channel, ctmp, &session->channel_list.head, list) {
-               trace_kernel_destroy_channel(channel);
-       }
-}
-
-/* Free elements needed by destroy notifiers. */
-void trace_kernel_free_session(struct ltt_kernel_session *session)
-{
-       /* Wipe consumer output object */
-       consumer_output_put(session->consumer);
-
-       process_attr_tracker_destroy(session->tracker_pid);
-       process_attr_tracker_destroy(session->tracker_vpid);
-       process_attr_tracker_destroy(session->tracker_uid);
-       process_attr_tracker_destroy(session->tracker_vuid);
-       process_attr_tracker_destroy(session->tracker_gid);
-       process_attr_tracker_destroy(session->tracker_vgid);
-
-       free(session);
-}
diff --git a/src/bin/lttng-sessiond/trace-kernel.cpp b/src/bin/lttng-sessiond/trace-kernel.cpp
new file mode 100644 (file)
index 0000000..cf1455e
--- /dev/null
@@ -0,0 +1,1052 @@
+/*
+ * Copyright (C) 2011 David Goulet <david.goulet@polymtl.ca>
+ *
+ * SPDX-License-Identifier: GPL-2.0-only
+ *
+ */
+
+#define _LGPL_SOURCE
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+#include <lttng/event.h>
+#include <lttng/lttng-error.h>
+#include <lttng/kernel-probe.h>
+#include <lttng/userspace-probe.h>
+#include <lttng/userspace-probe-internal.h>
+#include <lttng/event-rule/event-rule.h>
+#include <lttng/event-rule/event-rule-internal.h>
+#include <lttng/event-rule/kernel-kprobe.h>
+#include <lttng/event-rule/kernel-kprobe-internal.h>
+#include <lttng/event-rule/kernel-syscall.h>
+#include <lttng/event-rule/kernel-syscall-internal.h>
+#include <lttng/event-rule/kernel-tracepoint.h>
+#include <lttng/event-rule/kernel-tracepoint-internal.h>
+#include <lttng/event-rule/kernel-uprobe.h>
+#include <lttng/event-rule/kernel-uprobe-internal.h>
+#include <common/common.h>
+#include <common/defaults.h>
+#include <common/trace-chunk.h>
+#include <common/macros.h>
+
+#include "consumer.h"
+#include "trace-kernel.h"
+#include "lttng-sessiond.h"
+#include "notification-thread-commands.h"
+
+/*
+ * Find the channel name for the given kernel session.
+ */
+struct ltt_kernel_channel *trace_kernel_get_channel_by_name(
+               const char *name, struct ltt_kernel_session *session)
+{
+       struct ltt_kernel_channel *chan;
+
+       LTTNG_ASSERT(session);
+       LTTNG_ASSERT(name);
+
+       /*
+        * If we receive an empty string for channel name, it means the
+        * default channel name is requested.
+        */
+       if (name[0] == '\0')
+               name = DEFAULT_CHANNEL_NAME;
+
+       DBG("Trying to find channel %s", name);
+
+       cds_list_for_each_entry(chan, &session->channel_list.head, list) {
+               if (strcmp(name, chan->channel->name) == 0) {
+                       DBG("Found channel by name %s", name);
+                       return chan;
+               }
+       }
+
+       return NULL;
+}
+
+/*
+ * Find the event for the given channel.
+ */
+struct ltt_kernel_event *trace_kernel_find_event(
+               char *name, struct ltt_kernel_channel *channel,
+               enum lttng_event_type type,
+               struct lttng_bytecode *filter)
+{
+       struct ltt_kernel_event *ev;
+       int found = 0;
+
+       LTTNG_ASSERT(name);
+       LTTNG_ASSERT(channel);
+
+       cds_list_for_each_entry(ev, &channel->events_list.head, list) {
+               if (type != LTTNG_EVENT_ALL && ev->type != type) {
+                       continue;
+               }
+               if (strcmp(name, ev->event->name)) {
+                       continue;
+               }
+               if ((ev->filter && !filter) || (!ev->filter && filter)) {
+                       continue;
+               }
+               if (ev->filter && filter) {
+                       if (ev->filter->len != filter->len ||
+                                       memcmp(ev->filter->data, filter->data,
+                                               filter->len) != 0) {
+                               continue;
+                       }
+               }
+               found = 1;
+               break;
+       }
+       if (found) {
+               DBG("Found event %s for channel %s", name,
+                       channel->channel->name);
+               return ev;
+       } else {
+               return NULL;
+       }
+}
+
+/*
+ * Find the event name for the given channel.
+ */
+struct ltt_kernel_event *trace_kernel_get_event_by_name(
+               char *name, struct ltt_kernel_channel *channel,
+               enum lttng_event_type type)
+{
+       struct ltt_kernel_event *ev;
+       int found = 0;
+
+       LTTNG_ASSERT(name);
+       LTTNG_ASSERT(channel);
+
+       cds_list_for_each_entry(ev, &channel->events_list.head, list) {
+               if (type != LTTNG_EVENT_ALL && ev->type != type) {
+                       continue;
+               }
+               if (strcmp(name, ev->event->name)) {
+                       continue;
+               }
+               found = 1;
+               break;
+       }
+       if (found) {
+               DBG("Found event %s for channel %s", name,
+                       channel->channel->name);
+               return ev;
+       } else {
+               return NULL;
+       }
+}
+
+/*
+ * Allocate and initialize a kernel session data structure.
+ *
+ * Return pointer to structure or NULL.
+ */
+struct ltt_kernel_session *trace_kernel_create_session(void)
+{
+       struct ltt_kernel_session *lks = NULL;
+
+       /* Allocate a new ltt kernel session */
+       lks = (ltt_kernel_session *) zmalloc(sizeof(struct ltt_kernel_session));
+       if (lks == NULL) {
+               PERROR("create kernel session zmalloc");
+               goto alloc_error;
+       }
+
+       /* Init data structure */
+       lks->fd = -1;
+       lks->metadata_stream_fd = -1;
+       lks->channel_count = 0;
+       lks->stream_count_global = 0;
+       lks->metadata = NULL;
+       CDS_INIT_LIST_HEAD(&lks->channel_list.head);
+
+       lks->tracker_pid = process_attr_tracker_create();
+       if (!lks->tracker_pid) {
+               goto error;
+       }
+       lks->tracker_vpid = process_attr_tracker_create();
+       if (!lks->tracker_vpid) {
+               goto error;
+       }
+       lks->tracker_uid = process_attr_tracker_create();
+       if (!lks->tracker_uid) {
+               goto error;
+       }
+       lks->tracker_vuid = process_attr_tracker_create();
+       if (!lks->tracker_vuid) {
+               goto error;
+       }
+       lks->tracker_gid = process_attr_tracker_create();
+       if (!lks->tracker_gid) {
+               goto error;
+       }
+       lks->tracker_vgid = process_attr_tracker_create();
+       if (!lks->tracker_vgid) {
+               goto error;
+       }
+       lks->consumer = consumer_create_output(CONSUMER_DST_LOCAL);
+       if (lks->consumer == NULL) {
+               goto error;
+       }
+
+       return lks;
+
+error:
+       process_attr_tracker_destroy(lks->tracker_pid);
+       process_attr_tracker_destroy(lks->tracker_vpid);
+       process_attr_tracker_destroy(lks->tracker_uid);
+       process_attr_tracker_destroy(lks->tracker_vuid);
+       process_attr_tracker_destroy(lks->tracker_gid);
+       process_attr_tracker_destroy(lks->tracker_vgid);
+       free(lks);
+
+alloc_error:
+       return NULL;
+}
+
+/*
+ * Allocate and initialize a kernel channel data structure.
+ *
+ * Return pointer to structure or NULL.
+ */
+struct ltt_kernel_channel *trace_kernel_create_channel(
+               struct lttng_channel *chan)
+{
+       struct ltt_kernel_channel *lkc;
+       struct lttng_channel_extended *extended = NULL;
+
+       LTTNG_ASSERT(chan);
+
+       lkc = (ltt_kernel_channel *) zmalloc(sizeof(struct ltt_kernel_channel));
+       if (lkc == NULL) {
+               PERROR("ltt_kernel_channel zmalloc");
+               goto error;
+       }
+
+       lkc->channel = (lttng_channel *) zmalloc(sizeof(struct lttng_channel));
+       if (lkc->channel == NULL) {
+               PERROR("lttng_channel zmalloc");
+               goto error;
+       }
+
+       extended = (lttng_channel_extended *) zmalloc(sizeof(struct lttng_channel_extended));
+       if (!extended) {
+               PERROR("lttng_channel_channel zmalloc");
+               goto error;
+       }
+       memcpy(lkc->channel, chan, sizeof(struct lttng_channel));
+       memcpy(extended, chan->attr.extended.ptr, sizeof(struct lttng_channel_extended));
+       lkc->channel->attr.extended.ptr = extended;
+       extended = NULL;
+
+       /*
+        * If we receive an empty string for channel name, it means the
+        * default channel name is requested.
+        */
+       if (chan->name[0] == '\0') {
+               strncpy(lkc->channel->name, DEFAULT_CHANNEL_NAME,
+                       sizeof(lkc->channel->name));
+       }
+       lkc->channel->name[LTTNG_KERNEL_ABI_SYM_NAME_LEN - 1] = '\0';
+
+       lkc->fd = -1;
+       lkc->stream_count = 0;
+       lkc->event_count = 0;
+       lkc->enabled = 1;
+       lkc->published_to_notification_thread = false;
+       /* Init linked list */
+       CDS_INIT_LIST_HEAD(&lkc->events_list.head);
+       CDS_INIT_LIST_HEAD(&lkc->stream_list.head);
+       CDS_INIT_LIST_HEAD(&lkc->ctx_list);
+
+       return lkc;
+
+error:
+       if (lkc) {
+               free(lkc->channel);
+       }
+       free(extended);
+       free(lkc);
+       return NULL;
+}
+
+/*
+ * Allocate and init a kernel context object.
+ *
+ * Return the allocated object or NULL on error.
+ */
+struct ltt_kernel_context *trace_kernel_create_context(
+               struct lttng_kernel_abi_context *ctx)
+{
+       struct ltt_kernel_context *kctx;
+
+       kctx = (ltt_kernel_context *) zmalloc(sizeof(*kctx));
+       if (!kctx) {
+               PERROR("zmalloc kernel context");
+               goto error;
+       }
+
+       if (ctx) {
+               memcpy(&kctx->ctx, ctx, sizeof(kctx->ctx));
+       }
+error:
+       return kctx;
+}
+
+/*
+ * Allocate and init a kernel context object from an existing kernel context
+ * object.
+ *
+ * Return the allocated object or NULL on error.
+ */
+struct ltt_kernel_context *trace_kernel_copy_context(
+               struct ltt_kernel_context *kctx)
+{
+       struct ltt_kernel_context *kctx_copy;
+
+       LTTNG_ASSERT(kctx);
+       kctx_copy = (ltt_kernel_context *) zmalloc(sizeof(*kctx_copy));
+       if (!kctx_copy) {
+               PERROR("zmalloc ltt_kernel_context");
+               goto error;
+       }
+
+       memcpy(kctx_copy, kctx, sizeof(*kctx_copy));
+       memset(&kctx_copy->list, 0, sizeof(kctx_copy->list));
+
+error:
+       return kctx_copy;
+}
+
+/*
+ * Allocate and initialize a kernel event. Set name and event type.
+ * We own filter_expression, and filter.
+ *
+ * Return pointer to structure or NULL.
+ */
+enum lttng_error_code trace_kernel_create_event(
+               struct lttng_event *ev, char *filter_expression,
+               struct lttng_bytecode *filter,
+               struct ltt_kernel_event **kernel_event)
+{
+       enum lttng_error_code ret;
+       struct lttng_kernel_abi_event *attr;
+       struct ltt_kernel_event *local_kernel_event;
+       struct lttng_userspace_probe_location *userspace_probe_location = NULL;
+
+       LTTNG_ASSERT(ev);
+
+       local_kernel_event = (ltt_kernel_event *) zmalloc(sizeof(struct ltt_kernel_event));
+       attr = (lttng_kernel_abi_event *) zmalloc(sizeof(struct lttng_kernel_abi_event));
+       if (local_kernel_event == NULL || attr == NULL) {
+               PERROR("kernel event zmalloc");
+               ret = LTTNG_ERR_NOMEM;
+               goto error;
+       }
+
+       switch (ev->type) {
+       case LTTNG_EVENT_PROBE:
+               attr->instrumentation = LTTNG_KERNEL_ABI_KPROBE;
+               attr->u.kprobe.addr = ev->attr.probe.addr;
+               attr->u.kprobe.offset = ev->attr.probe.offset;
+               strncpy(attr->u.kprobe.symbol_name,
+                               ev->attr.probe.symbol_name, LTTNG_KERNEL_ABI_SYM_NAME_LEN);
+               attr->u.kprobe.symbol_name[LTTNG_KERNEL_ABI_SYM_NAME_LEN - 1] = '\0';
+               break;
+       case LTTNG_EVENT_USERSPACE_PROBE:
+       {
+               const struct lttng_userspace_probe_location* location = NULL;
+               const struct lttng_userspace_probe_location_lookup_method *lookup = NULL;
+
+               location = lttng_event_get_userspace_probe_location(ev);
+               if (!location) {
+                       ret = LTTNG_ERR_PROBE_LOCATION_INVAL;
+                       goto error;
+               }
+
+               /*
+                * From this point on, the specific term 'uprobe' is used
+                * instead of the generic 'userspace probe' because it's the
+                * technology used at the moment for this instrumentation.
+                * LTTng currently implements userspace probes using uprobes.
+                * In the interactions with the kernel tracer, we use the
+                * uprobe term.
+                */
+               attr->instrumentation = LTTNG_KERNEL_ABI_UPROBE;
+
+               lookup = lttng_userspace_probe_location_get_lookup_method(
+                               location);
+               if (!lookup) {
+                       ret = LTTNG_ERR_PROBE_LOCATION_INVAL;
+                       goto error;
+               }
+
+               /*
+                * From the kernel tracer's perspective, all userspace probe
+                * event types are all the same: a file and an offset.
+                */
+               switch (lttng_userspace_probe_location_lookup_method_get_type(lookup)) {
+               case LTTNG_USERSPACE_PROBE_LOCATION_LOOKUP_METHOD_TYPE_FUNCTION_ELF:
+                       /* Get the file descriptor on the target binary. */
+                       attr->u.uprobe.fd =
+                                       lttng_userspace_probe_location_function_get_binary_fd(location);
+
+                       /*
+                        * Save a reference to the probe location used during
+                        * the listing of events.
+                        */
+                       userspace_probe_location =
+                                       lttng_userspace_probe_location_copy(location);
+                       break;
+               case LTTNG_USERSPACE_PROBE_LOCATION_LOOKUP_METHOD_TYPE_TRACEPOINT_SDT:
+                       /* Get the file descriptor on the target binary. */
+                       attr->u.uprobe.fd =
+                                       lttng_userspace_probe_location_tracepoint_get_binary_fd(location);
+
+                       /*
+                        * Save a reference to the probe location used during the listing of
+                        * events.
+                        */
+                       userspace_probe_location =
+                                       lttng_userspace_probe_location_copy(location);
+                       break;
+               default:
+                       DBG("Unsupported lookup method type");
+                       ret = LTTNG_ERR_PROBE_LOCATION_INVAL;
+                       goto error;
+               }
+               break;
+       }
+       case LTTNG_EVENT_FUNCTION:
+               attr->instrumentation = LTTNG_KERNEL_ABI_KRETPROBE;
+               attr->u.kretprobe.addr = ev->attr.probe.addr;
+               attr->u.kretprobe.offset = ev->attr.probe.offset;
+               strncpy(attr->u.kretprobe.symbol_name,
+                               ev->attr.probe.symbol_name, LTTNG_KERNEL_ABI_SYM_NAME_LEN);
+               attr->u.kretprobe.symbol_name[LTTNG_KERNEL_ABI_SYM_NAME_LEN - 1] = '\0';
+               break;
+       case LTTNG_EVENT_FUNCTION_ENTRY:
+               attr->instrumentation = LTTNG_KERNEL_ABI_FUNCTION;
+               strncpy(attr->u.ftrace.symbol_name,
+                               ev->attr.ftrace.symbol_name, LTTNG_KERNEL_ABI_SYM_NAME_LEN);
+               attr->u.ftrace.symbol_name[LTTNG_KERNEL_ABI_SYM_NAME_LEN - 1] = '\0';
+               break;
+       case LTTNG_EVENT_TRACEPOINT:
+               attr->instrumentation = LTTNG_KERNEL_ABI_TRACEPOINT;
+               break;
+       case LTTNG_EVENT_SYSCALL:
+               attr->instrumentation = LTTNG_KERNEL_ABI_SYSCALL;
+               attr->u.syscall.abi = LTTNG_KERNEL_ABI_SYSCALL_ABI_ALL;
+               attr->u.syscall.entryexit = LTTNG_KERNEL_ABI_SYSCALL_ENTRYEXIT;
+               attr->u.syscall.match = LTTNG_KERNEL_ABI_SYSCALL_MATCH_NAME;
+               break;
+       case LTTNG_EVENT_ALL:
+               attr->instrumentation = LTTNG_KERNEL_ABI_ALL;
+               break;
+       default:
+               ERR("Unknown kernel instrumentation type (%d)", ev->type);
+               ret = LTTNG_ERR_INVALID;
+               goto error;
+       }
+
+       /* Copy event name */
+       strncpy(attr->name, ev->name, LTTNG_KERNEL_ABI_SYM_NAME_LEN);
+       attr->name[LTTNG_KERNEL_ABI_SYM_NAME_LEN - 1] = '\0';
+
+       /* Setting up a kernel event */
+       local_kernel_event->fd = -1;
+       local_kernel_event->event = attr;
+       local_kernel_event->enabled = 1;
+       local_kernel_event->filter_expression = filter_expression;
+       local_kernel_event->filter = filter;
+       local_kernel_event->userspace_probe_location = userspace_probe_location;
+
+       *kernel_event = local_kernel_event;
+
+       return LTTNG_OK;
+
+error:
+       free(filter_expression);
+       free(filter);
+       free(local_kernel_event);
+       free(attr);
+       return ret;
+}
+
+/*
+ * Allocate and initialize a kernel token event rule.
+ *
+ * Return pointer to structure or NULL.
+ */
+enum lttng_error_code trace_kernel_create_event_notifier_rule(
+               struct lttng_trigger *trigger,
+               uint64_t token,
+               uint64_t error_counter_index,
+               struct ltt_kernel_event_notifier_rule **event_notifier_rule)
+{
+       enum lttng_error_code ret = LTTNG_OK;
+       enum lttng_condition_type condition_type;
+       enum lttng_event_rule_type event_rule_type;
+       enum lttng_condition_status condition_status;
+       struct ltt_kernel_event_notifier_rule *local_kernel_token_event_rule;
+       const struct lttng_condition *condition = NULL;
+       const struct lttng_event_rule *event_rule = NULL;
+
+       LTTNG_ASSERT(event_notifier_rule);
+
+       condition = lttng_trigger_get_const_condition(trigger);
+       LTTNG_ASSERT(condition);
+
+       condition_type = lttng_condition_get_type(condition);
+       LTTNG_ASSERT(condition_type == LTTNG_CONDITION_TYPE_EVENT_RULE_MATCHES);
+
+       condition_status = lttng_condition_event_rule_matches_get_rule(
+                       condition, &event_rule);
+       LTTNG_ASSERT(condition_status == LTTNG_CONDITION_STATUS_OK);
+       LTTNG_ASSERT(event_rule);
+
+       event_rule_type = lttng_event_rule_get_type(event_rule);
+       LTTNG_ASSERT(event_rule_type != LTTNG_EVENT_RULE_TYPE_UNKNOWN);
+
+       local_kernel_token_event_rule =
+                       (ltt_kernel_event_notifier_rule *) zmalloc(sizeof(struct ltt_kernel_event_notifier_rule));
+       if (local_kernel_token_event_rule == NULL) {
+               PERROR("Failed to allocate ltt_kernel_token_event_rule structure");
+               ret = LTTNG_ERR_NOMEM;
+               goto error;
+       }
+
+       local_kernel_token_event_rule->fd = -1;
+       local_kernel_token_event_rule->enabled = 1;
+       local_kernel_token_event_rule->token = token;
+       local_kernel_token_event_rule->error_counter_index = error_counter_index;
+
+       /* Get the reference of the event rule. */
+       lttng_trigger_get(trigger);
+
+       local_kernel_token_event_rule->trigger = trigger;
+       /* The event rule still owns the filter and bytecode. */
+       local_kernel_token_event_rule->filter =
+                       lttng_event_rule_get_filter_bytecode(event_rule);
+
+       DBG3("Created kernel event notifier rule: token =  %" PRIu64,
+                       local_kernel_token_event_rule->token);
+error:
+       *event_notifier_rule = local_kernel_token_event_rule;
+       return ret;
+}
+
+/*
+ * Initialize a kernel trigger from an event rule.
+ */
+enum lttng_error_code trace_kernel_init_event_notifier_from_event_rule(
+               const struct lttng_event_rule *rule,
+               struct lttng_kernel_abi_event_notifier *kernel_event_notifier)
+{
+       enum lttng_error_code ret_code;
+       const char *name;
+       int strncpy_ret;
+
+       switch (lttng_event_rule_get_type(rule)) {
+       case LTTNG_EVENT_RULE_TYPE_KERNEL_KPROBE:
+       {
+               uint64_t address = 0, offset = 0;
+               const char *symbol_name = NULL;
+               const struct lttng_kernel_probe_location *location = NULL;
+               enum lttng_kernel_probe_location_status k_status;
+               enum lttng_event_rule_status status;
+
+               status = lttng_event_rule_kernel_kprobe_get_location(rule, &location);
+               if (status != LTTNG_EVENT_RULE_STATUS_OK) {
+                       ret_code = LTTNG_ERR_PROBE_LOCATION_INVAL;
+                       goto error;
+               }
+
+               switch (lttng_kernel_probe_location_get_type(location)) {
+               case LTTNG_KERNEL_PROBE_LOCATION_TYPE_ADDRESS:
+               {
+                       k_status = lttng_kernel_probe_location_address_get_address(
+                                       location, &address);
+                       LTTNG_ASSERT(k_status == LTTNG_KERNEL_PROBE_LOCATION_STATUS_OK);
+                       break;
+               }
+               case LTTNG_KERNEL_PROBE_LOCATION_TYPE_SYMBOL_OFFSET:
+               {
+                       k_status = lttng_kernel_probe_location_symbol_get_offset(
+                                       location, &offset);
+                       LTTNG_ASSERT(k_status == LTTNG_KERNEL_PROBE_LOCATION_STATUS_OK);
+                       symbol_name = lttng_kernel_probe_location_symbol_get_name(
+                                       location);
+                       break;
+               }
+               default:
+                       abort();
+               }
+
+               kernel_event_notifier->event.instrumentation = LTTNG_KERNEL_ABI_KPROBE;
+               kernel_event_notifier->event.u.kprobe.addr = address;
+               kernel_event_notifier->event.u.kprobe.offset = offset;
+               if (symbol_name) {
+                       strncpy_ret = lttng_strncpy(
+                                       kernel_event_notifier->event.u.kprobe.symbol_name,
+                                       symbol_name, LTTNG_KERNEL_ABI_SYM_NAME_LEN);
+
+                       if (strncpy_ret) {
+                               ret_code = LTTNG_ERR_INVALID;
+                               goto error;
+                       }
+               }
+
+               kernel_event_notifier->event.u.kprobe.symbol_name[LTTNG_KERNEL_ABI_SYM_NAME_LEN - 1] = '\0';
+
+               status = lttng_event_rule_kernel_kprobe_get_event_name(rule, &name);
+               LTTNG_ASSERT(status == LTTNG_EVENT_RULE_STATUS_OK);
+               ret_code = LTTNG_OK;
+               break;
+       }
+       case LTTNG_EVENT_RULE_TYPE_KERNEL_UPROBE:
+       {
+               const struct lttng_userspace_probe_location* location = NULL;
+               const struct lttng_userspace_probe_location_lookup_method *lookup = NULL;
+               enum lttng_event_rule_status status;
+
+               status = lttng_event_rule_kernel_uprobe_get_location(rule, &location);
+               if (status != LTTNG_EVENT_RULE_STATUS_OK) {
+                       ret_code = LTTNG_ERR_PROBE_LOCATION_INVAL;
+                       goto error;
+               }
+
+               kernel_event_notifier->event.instrumentation = LTTNG_KERNEL_ABI_UPROBE;
+
+               lookup = lttng_userspace_probe_location_get_lookup_method(
+                               location);
+               if (!lookup) {
+                       ret_code = LTTNG_ERR_PROBE_LOCATION_INVAL;
+                       goto error;
+               }
+
+               /*
+                * From the kernel tracer's perspective, all userspace probe
+                * event types are all the same: a file and an offset.
+                */
+               switch (lttng_userspace_probe_location_lookup_method_get_type(lookup)) {
+               case LTTNG_USERSPACE_PROBE_LOCATION_LOOKUP_METHOD_TYPE_FUNCTION_ELF:
+                       /* Get the file descriptor on the target binary. */
+                       kernel_event_notifier->event.u.uprobe.fd =
+                                       lttng_userspace_probe_location_function_get_binary_fd(location);
+
+                       break;
+               case LTTNG_USERSPACE_PROBE_LOCATION_LOOKUP_METHOD_TYPE_TRACEPOINT_SDT:
+                       /* Get the file descriptor on the target binary. */
+                       kernel_event_notifier->event.u.uprobe.fd =
+                                       lttng_userspace_probe_location_tracepoint_get_binary_fd(location);
+                       break;
+               default:
+                       abort();
+               }
+
+               status = lttng_event_rule_kernel_uprobe_get_event_name(
+                               rule, &name);
+               LTTNG_ASSERT(status == LTTNG_EVENT_RULE_STATUS_OK);
+               ret_code = LTTNG_OK;
+               break;
+       }
+       case LTTNG_EVENT_RULE_TYPE_KERNEL_TRACEPOINT:
+       {
+               const enum lttng_event_rule_status status =
+                               lttng_event_rule_kernel_tracepoint_get_name_pattern(
+                                               rule, &name);
+
+               LTTNG_ASSERT(status == LTTNG_EVENT_RULE_STATUS_OK);
+               kernel_event_notifier->event.instrumentation =
+                               LTTNG_KERNEL_ABI_TRACEPOINT;
+
+               ret_code = LTTNG_OK;
+               break;
+       }
+       case LTTNG_EVENT_RULE_TYPE_KERNEL_SYSCALL:
+       {
+               const enum lttng_event_rule_status status =
+                               lttng_event_rule_kernel_syscall_get_name_pattern(
+                                               rule, &name);
+               const enum lttng_event_rule_kernel_syscall_emission_site
+                       emission_site =
+                       lttng_event_rule_kernel_syscall_get_emission_site(rule);
+               enum lttng_kernel_abi_syscall_entryexit entryexit;
+
+               LTTNG_ASSERT(status == LTTNG_EVENT_RULE_STATUS_OK);
+               LTTNG_ASSERT(emission_site != LTTNG_EVENT_RULE_KERNEL_SYSCALL_EMISSION_SITE_UNKNOWN);
+
+               switch(emission_site) {
+               case LTTNG_EVENT_RULE_KERNEL_SYSCALL_EMISSION_SITE_ENTRY:
+                       entryexit = LTTNG_KERNEL_ABI_SYSCALL_ENTRY;
+                       break;
+               case LTTNG_EVENT_RULE_KERNEL_SYSCALL_EMISSION_SITE_EXIT:
+                       entryexit = LTTNG_KERNEL_ABI_SYSCALL_EXIT;
+                       break;
+               case LTTNG_EVENT_RULE_KERNEL_SYSCALL_EMISSION_SITE_ENTRY_EXIT:
+                       entryexit = LTTNG_KERNEL_ABI_SYSCALL_ENTRYEXIT;
+                       break;
+               default:
+                       abort();
+                       break;
+               }
+
+               kernel_event_notifier->event.instrumentation =
+                               LTTNG_KERNEL_ABI_SYSCALL;
+               kernel_event_notifier->event.u.syscall.abi =
+                               LTTNG_KERNEL_ABI_SYSCALL_ABI_ALL;
+               kernel_event_notifier->event.u.syscall.entryexit =
+                               entryexit;
+               kernel_event_notifier->event.u.syscall.match =
+                               LTTNG_KERNEL_ABI_SYSCALL_MATCH_NAME;
+               ret_code = LTTNG_OK;
+               break;
+       }
+       default:
+               abort();
+               break;
+       }
+
+       strncpy_ret = lttng_strncpy(kernel_event_notifier->event.name, name,
+                       LTTNG_KERNEL_ABI_SYM_NAME_LEN);
+       if (strncpy_ret) {
+               ret_code = LTTNG_ERR_INVALID;
+               goto error;
+       }
+
+error:
+       return ret_code;
+}
+/*
+ * Allocate and initialize a kernel metadata.
+ *
+ * Return pointer to structure or NULL.
+ */
+struct ltt_kernel_metadata *trace_kernel_create_metadata(void)
+{
+       int ret;
+       struct ltt_kernel_metadata *lkm;
+       struct lttng_channel *chan;
+
+       lkm = (ltt_kernel_metadata *) zmalloc(sizeof(struct ltt_kernel_metadata));
+       chan = (lttng_channel *) zmalloc(sizeof(struct lttng_channel));
+       if (lkm == NULL || chan == NULL) {
+               PERROR("kernel metadata zmalloc");
+               goto error;
+       }
+
+       ret = lttng_strncpy(
+                       chan->name, DEFAULT_METADATA_NAME, sizeof(chan->name));
+       if (ret) {
+               ERR("Failed to initialize metadata channel name to `%s`",
+                               DEFAULT_METADATA_NAME);
+               goto error;
+       }
+
+       /* Set default attributes */
+       chan->attr.overwrite = DEFAULT_METADATA_OVERWRITE;
+       chan->attr.subbuf_size = default_get_metadata_subbuf_size();
+       chan->attr.num_subbuf = DEFAULT_METADATA_SUBBUF_NUM;
+       chan->attr.switch_timer_interval = DEFAULT_METADATA_SWITCH_TIMER;
+       chan->attr.read_timer_interval = DEFAULT_METADATA_READ_TIMER;;
+
+
+       /*
+        * The metadata channel of kernel sessions must use the "mmap"
+        * back-end since the consumer daemon accumulates complete
+        * metadata units before sending them to the relay daemon in
+        * live mode. The consumer daemon also needs to extract the contents
+        * of the metadata cache when computing a rotation position.
+        *
+        * In both cases, it is not possible to rely on the splice
+        * back-end as the consumer daemon may need to accumulate more
+        * content than can be backed by the ring buffer's underlying
+        * pages.
+        */
+       chan->attr.output = LTTNG_EVENT_MMAP;
+       chan->attr.tracefile_size = 0;
+       chan->attr.tracefile_count = 0;
+       chan->attr.live_timer_interval = 0;
+
+       /* Init metadata */
+       lkm->fd = -1;
+       lkm->conf = chan;
+
+       return lkm;
+
+error:
+       free(lkm);
+       free(chan);
+       return NULL;
+}
+
+/*
+ * Allocate and initialize a kernel stream. The stream is set to ACTIVE_FD by
+ * default.
+ *
+ * Return pointer to structure or NULL.
+ */
+struct ltt_kernel_stream *trace_kernel_create_stream(const char *name,
+               unsigned int count)
+{
+       int ret;
+       struct ltt_kernel_stream *lks;
+
+       LTTNG_ASSERT(name);
+
+       lks = (ltt_kernel_stream *) zmalloc(sizeof(struct ltt_kernel_stream));
+       if (lks == NULL) {
+               PERROR("kernel stream zmalloc");
+               goto error;
+       }
+
+       /* Set name */
+       ret = snprintf(lks->name, sizeof(lks->name), "%s_%u", name, count);
+       if (ret < 0) {
+               PERROR("snprintf stream name");
+               goto error;
+       }
+       lks->name[sizeof(lks->name) - 1] = '\0';
+
+       /* Init stream */
+       lks->fd = -1;
+       lks->state = 0;
+       lks->cpu = count;
+
+       return lks;
+
+error:
+       return NULL;
+}
+
+/*
+ * Cleanup kernel stream structure.
+ */
+void trace_kernel_destroy_stream(struct ltt_kernel_stream *stream)
+{
+       LTTNG_ASSERT(stream);
+
+       DBG("[trace] Closing stream fd %d", stream->fd);
+       /* Close kernel fd */
+       if (stream->fd >= 0) {
+               int ret;
+
+               ret = close(stream->fd);
+               if (ret) {
+                       PERROR("close");
+               }
+       }
+       /* Remove from stream list */
+       cds_list_del(&stream->list);
+
+       free(stream);
+}
+
+/*
+ * Cleanup kernel event structure.
+ */
+void trace_kernel_destroy_event(struct ltt_kernel_event *event)
+{
+       LTTNG_ASSERT(event);
+
+       if (event->fd >= 0) {
+               int ret;
+
+               DBG("[trace] Closing event fd %d", event->fd);
+               /* Close kernel fd */
+               ret = close(event->fd);
+               if (ret) {
+                       PERROR("close");
+               }
+       } else {
+               DBG("[trace] Tearing down event (no associated file descriptor)");
+       }
+
+       /* Remove from event list */
+       cds_list_del(&event->list);
+
+       free(event->filter_expression);
+       free(event->filter);
+
+       free(event->event);
+       free(event);
+}
+
+/*
+ * Cleanup kernel event structure.
+ */
+static void free_token_event_rule_rcu(struct rcu_head *rcu_node)
+{
+       struct ltt_kernel_event_notifier_rule *rule = caa_container_of(rcu_node,
+                       struct ltt_kernel_event_notifier_rule, rcu_node);
+
+       free(rule);
+}
+
+void trace_kernel_destroy_event_notifier_rule(
+               struct ltt_kernel_event_notifier_rule *event)
+{
+       LTTNG_ASSERT(event);
+
+       if (event->fd >= 0) {
+               const int ret = close(event->fd);
+
+               DBG("Closing kernel event notifier rule file descriptor: fd = %d",
+                               event->fd);
+               if (ret) {
+                       PERROR("Failed to close kernel event notifier file descriptor: fd = %d",
+                                       event->fd);
+               }
+       } else {
+               DBG("Destroying kernel event notifier rule (no associated file descriptor)");
+       }
+
+       lttng_trigger_put(event->trigger);
+       call_rcu(&event->rcu_node, free_token_event_rule_rcu);
+}
+/*
+ * Cleanup kernel context structure.
+ */
+void trace_kernel_destroy_context(struct ltt_kernel_context *ctx)
+{
+       LTTNG_ASSERT(ctx);
+
+       if (ctx->in_list) {
+               cds_list_del(&ctx->list);
+       }
+       free(ctx);
+}
+
+/*
+ * Cleanup kernel channel structure.
+ */
+void trace_kernel_destroy_channel(struct ltt_kernel_channel *channel)
+{
+       struct ltt_kernel_stream *stream, *stmp;
+       struct ltt_kernel_event *event, *etmp;
+       struct ltt_kernel_context *ctx, *ctmp;
+       int ret;
+       enum lttng_error_code status;
+
+       LTTNG_ASSERT(channel);
+
+       DBG("[trace] Closing channel fd %d", channel->fd);
+       /* Close kernel fd */
+       if (channel->fd >= 0) {
+               ret = close(channel->fd);
+               if (ret) {
+                       PERROR("close");
+               }
+       }
+
+       /* For each stream in the channel list */
+       cds_list_for_each_entry_safe(stream, stmp, &channel->stream_list.head, list) {
+               trace_kernel_destroy_stream(stream);
+       }
+
+       /* For each event in the channel list */
+       cds_list_for_each_entry_safe(event, etmp, &channel->events_list.head, list) {
+               trace_kernel_destroy_event(event);
+       }
+
+       /* For each context in the channel list */
+       cds_list_for_each_entry_safe(ctx, ctmp, &channel->ctx_list, list) {
+               trace_kernel_destroy_context(ctx);
+       }
+
+       /* Remove from channel list */
+       cds_list_del(&channel->list);
+
+       if (the_notification_thread_handle &&
+                       channel->published_to_notification_thread) {
+               status = notification_thread_command_remove_channel(
+                               the_notification_thread_handle, channel->key,
+                               LTTNG_DOMAIN_KERNEL);
+               LTTNG_ASSERT(status == LTTNG_OK);
+       }
+       free(channel->channel->attr.extended.ptr);
+       free(channel->channel);
+       free(channel);
+}
+
+/*
+ * Cleanup kernel metadata structure.
+ */
+void trace_kernel_destroy_metadata(struct ltt_kernel_metadata *metadata)
+{
+       LTTNG_ASSERT(metadata);
+
+       DBG("[trace] Closing metadata fd %d", metadata->fd);
+       /* Close kernel fd */
+       if (metadata->fd >= 0) {
+               int ret;
+
+               ret = close(metadata->fd);
+               if (ret) {
+                       PERROR("close");
+               }
+       }
+
+       free(metadata->conf);
+       free(metadata);
+}
+
+/*
+ * Cleanup kernel session structure
+ *
+ * Should *NOT* be called with RCU read-side lock held.
+ */
+void trace_kernel_destroy_session(struct ltt_kernel_session *session)
+{
+       struct ltt_kernel_channel *channel, *ctmp;
+       int ret;
+
+       LTTNG_ASSERT(session);
+
+       DBG("[trace] Closing session fd %d", session->fd);
+       /* Close kernel fds */
+       if (session->fd >= 0) {
+               ret = close(session->fd);
+               if (ret) {
+                       PERROR("close");
+               }
+       }
+
+       if (session->metadata_stream_fd >= 0) {
+               DBG("[trace] Closing metadata stream fd %d", session->metadata_stream_fd);
+               ret = close(session->metadata_stream_fd);
+               if (ret) {
+                       PERROR("close");
+               }
+       }
+
+       if (session->metadata != NULL) {
+               trace_kernel_destroy_metadata(session->metadata);
+       }
+
+       cds_list_for_each_entry_safe(channel, ctmp, &session->channel_list.head, list) {
+               trace_kernel_destroy_channel(channel);
+       }
+}
+
+/* Free elements needed by destroy notifiers. */
+void trace_kernel_free_session(struct ltt_kernel_session *session)
+{
+       /* Wipe consumer output object */
+       consumer_output_put(session->consumer);
+
+       process_attr_tracker_destroy(session->tracker_pid);
+       process_attr_tracker_destroy(session->tracker_vpid);
+       process_attr_tracker_destroy(session->tracker_uid);
+       process_attr_tracker_destroy(session->tracker_vuid);
+       process_attr_tracker_destroy(session->tracker_gid);
+       process_attr_tracker_destroy(session->tracker_vgid);
+
+       free(session);
+}
diff --git a/src/bin/lttng-sessiond/trace-ust.c b/src/bin/lttng-sessiond/trace-ust.c
deleted file mode 100644 (file)
index 47e98cc..0000000
+++ /dev/null
@@ -1,1440 +0,0 @@
-/*
- * Copyright (C) 2011 David Goulet <david.goulet@polymtl.ca>
- * Copyright (C) 2016 Jérémie Galarneau <jeremie.galarneau@efficios.com>
- *
- * SPDX-License-Identifier: GPL-2.0-only
- *
- */
-
-#define _LGPL_SOURCE
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <unistd.h>
-#include <inttypes.h>
-
-#include <common/common.h>
-#include <common/defaults.h>
-#include <common/trace-chunk.h>
-#include <common/utils.h>
-
-#include "buffer-registry.h"
-#include "trace-ust.h"
-#include "utils.h"
-#include "ust-app.h"
-#include "agent.h"
-
-/*
- * Match function for the events hash table lookup.
- *
- * Matches by name only. Used by the disable command.
- */
-int trace_ust_ht_match_event_by_name(struct cds_lfht_node *node,
-               const void *_key)
-{
-       struct ltt_ust_event *event;
-       const char *name;
-
-       LTTNG_ASSERT(node);
-       LTTNG_ASSERT(_key);
-
-       event = caa_container_of(node, struct ltt_ust_event, node.node);
-       name = _key;
-
-       /* Event name */
-       if (strncmp(event->attr.name, name, sizeof(event->attr.name)) != 0) {
-               goto no_match;
-       }
-
-       /* Match */
-       return 1;
-
-no_match:
-       return 0;
-}
-
-/*
- * Match function for the hash table lookup.
- *
- * It matches an ust event based on three attributes which are the event name,
- * the filter bytecode and the loglevel.
- */
-int trace_ust_ht_match_event(struct cds_lfht_node *node, const void *_key)
-{
-       struct ltt_ust_event *event;
-       const struct ltt_ust_ht_key *key;
-       int ev_loglevel_value;
-       int ll_match;
-
-       LTTNG_ASSERT(node);
-       LTTNG_ASSERT(_key);
-
-       event = caa_container_of(node, struct ltt_ust_event, node.node);
-       key = _key;
-       ev_loglevel_value = event->attr.loglevel;
-
-       /* Match the 4 elements of the key: name, filter, loglevel, exclusions. */
-
-       /* Event name */
-       if (strncmp(event->attr.name, key->name, sizeof(event->attr.name)) != 0) {
-               goto no_match;
-       }
-
-       /* Event loglevel value and type. */
-       ll_match = loglevels_match(event->attr.loglevel_type,
-               ev_loglevel_value, key->loglevel_type,
-               key->loglevel_value, LTTNG_UST_ABI_LOGLEVEL_ALL);
-
-       if (!ll_match) {
-               goto no_match;
-       }
-
-       /* Only one of the filters is NULL, fail. */
-       if ((key->filter && !event->filter) || (!key->filter && event->filter)) {
-               goto no_match;
-       }
-
-       if (key->filter && event->filter) {
-               /* Both filters exists, check length followed by the bytecode. */
-               if (event->filter->len != key->filter->len ||
-                               memcmp(event->filter->data, key->filter->data,
-                                       event->filter->len) != 0) {
-                       goto no_match;
-               }
-       }
-
-       /* If only one of the exclusions is NULL, fail. */
-       if ((key->exclusion && !event->exclusion) || (!key->exclusion && event->exclusion)) {
-               goto no_match;
-       }
-
-       if (key->exclusion && event->exclusion) {
-               size_t i;
-
-               /* Check exclusion counts first. */
-               if (event->exclusion->count != key->exclusion->count) {
-                       goto no_match;
-               }
-
-               /* Compare names individually. */
-               for (i = 0; i < event->exclusion->count; ++i) {
-                       size_t j;
-                       bool found = false;
-                       const char *name_ev =
-                               LTTNG_EVENT_EXCLUSION_NAME_AT(
-                                       event->exclusion, i);
-
-                       /*
-                        * Compare this exclusion name to all the key's
-                        * exclusion names.
-                        */
-                       for (j = 0; j < key->exclusion->count; ++j) {
-                               const char *name_key =
-                                       LTTNG_EVENT_EXCLUSION_NAME_AT(
-                                               key->exclusion, j);
-
-                               if (!strncmp(name_ev, name_key,
-                                               LTTNG_SYMBOL_NAME_LEN)) {
-                                       /* Names match! */
-                                       found = true;
-                                       break;
-                               }
-                       }
-
-                       /*
-                        * If the current exclusion name was not found amongst
-                        * the key's exclusion names, then there's no match.
-                        */
-                       if (!found) {
-                               goto no_match;
-                       }
-               }
-       }
-       /* Match. */
-       return 1;
-
-no_match:
-       return 0;
-}
-
-/*
- * Find the channel in the hashtable and return channel pointer. RCU read side
- * lock MUST be acquired before calling this.
- */
-struct ltt_ust_channel *trace_ust_find_channel_by_name(struct lttng_ht *ht,
-               const char *name)
-{
-       struct lttng_ht_node_str *node;
-       struct lttng_ht_iter iter;
-
-       /*
-        * If we receive an empty string for channel name, it means the
-        * default channel name is requested.
-        */
-       if (name[0] == '\0')
-               name = DEFAULT_CHANNEL_NAME;
-
-       lttng_ht_lookup(ht, (void *)name, &iter);
-       node = lttng_ht_iter_get_node_str(&iter);
-       if (node == NULL) {
-               goto error;
-       }
-
-       DBG2("Trace UST channel %s found by name", name);
-
-       return caa_container_of(node, struct ltt_ust_channel, node);
-
-error:
-       DBG2("Trace UST channel %s not found by name", name);
-       return NULL;
-}
-
-/*
- * Find the event in the hashtable and return event pointer. RCU read side lock
- * MUST be acquired before calling this.
- */
-struct ltt_ust_event *trace_ust_find_event(struct lttng_ht *ht,
-               char *name, struct lttng_bytecode *filter,
-               enum lttng_ust_abi_loglevel_type loglevel_type, int loglevel_value,
-               struct lttng_event_exclusion *exclusion)
-{
-       struct lttng_ht_node_str *node;
-       struct lttng_ht_iter iter;
-       struct ltt_ust_ht_key key;
-
-       LTTNG_ASSERT(name);
-       LTTNG_ASSERT(ht);
-
-       key.name = name;
-       key.filter = filter;
-       key.loglevel_type = loglevel_type;
-       key.loglevel_value = loglevel_value;
-       key.exclusion = exclusion;
-
-       cds_lfht_lookup(ht->ht, ht->hash_fct((void *) name, lttng_ht_seed),
-                       trace_ust_ht_match_event, &key, &iter.iter);
-       node = lttng_ht_iter_get_node_str(&iter);
-       if (node == NULL) {
-               goto error;
-       }
-
-       DBG2("Trace UST event %s found", key.name);
-
-       return caa_container_of(node, struct ltt_ust_event, node);
-
-error:
-       DBG2("Trace UST event %s NOT found", key.name);
-       return NULL;
-}
-
-/*
- * Lookup an agent in the session agents hash table by domain type and return
- * the object if found else NULL.
- *
- * RCU read side lock must be acquired before calling and only released
- * once the agent is no longer in scope or being used.
- */
-struct agent *trace_ust_find_agent(struct ltt_ust_session *session,
-               enum lttng_domain_type domain_type)
-{
-       struct agent *agt = NULL;
-       struct lttng_ht_node_u64 *node;
-       struct lttng_ht_iter iter;
-       uint64_t key;
-
-       LTTNG_ASSERT(session);
-
-       DBG3("Trace ust agent lookup for domain %d", domain_type);
-
-       key = domain_type;
-
-       lttng_ht_lookup(session->agents, &key, &iter);
-       node = lttng_ht_iter_get_node_u64(&iter);
-       if (!node) {
-               goto end;
-       }
-       agt = caa_container_of(node, struct agent, node);
-
-end:
-       return agt;
-}
-
-/*
- * Allocate and initialize a ust session data structure.
- *
- * Return pointer to structure or NULL.
- */
-struct ltt_ust_session *trace_ust_create_session(uint64_t session_id)
-{
-       struct ltt_ust_session *lus;
-
-       /* Allocate a new ltt ust session */
-       lus = zmalloc(sizeof(struct ltt_ust_session));
-       if (lus == NULL) {
-               PERROR("create ust session zmalloc");
-               goto error_alloc;
-       }
-
-       /* Init data structure */
-       lus->id = session_id;
-       lus->active = 0;
-
-       /* Set default metadata channel attribute. */
-       lus->metadata_attr.overwrite = DEFAULT_CHANNEL_OVERWRITE;
-       lus->metadata_attr.subbuf_size = default_get_metadata_subbuf_size();
-       lus->metadata_attr.num_subbuf = DEFAULT_METADATA_SUBBUF_NUM;
-       lus->metadata_attr.switch_timer_interval = DEFAULT_METADATA_SWITCH_TIMER;
-       lus->metadata_attr.read_timer_interval = DEFAULT_METADATA_READ_TIMER;
-       lus->metadata_attr.output = LTTNG_UST_ABI_MMAP;
-
-       /*
-        * Default buffer type. This can be changed through an enable channel
-        * requesting a different type. Note that this can only be changed once
-        * during the session lifetime which is at the first enable channel and
-        * only before start. The flag buffer_type_changed indicates the status.
-        */
-       lus->buffer_type = LTTNG_BUFFER_PER_UID;
-       /* Once set to 1, the buffer_type is immutable for the session. */
-       lus->buffer_type_changed = 0;
-       /* Init it in case it get used after allocation. */
-       CDS_INIT_LIST_HEAD(&lus->buffer_reg_uid_list);
-
-       /* Alloc UST global domain channels' HT */
-       lus->domain_global.channels = lttng_ht_new(0, LTTNG_HT_TYPE_STRING);
-       /* Alloc agent hash table. */
-       lus->agents = lttng_ht_new(0, LTTNG_HT_TYPE_U64);
-
-       lus->tracker_vpid = process_attr_tracker_create();
-       if (!lus->tracker_vpid) {
-               goto error;
-       }
-       lus->tracker_vuid = process_attr_tracker_create();
-       if (!lus->tracker_vuid) {
-               goto error;
-       }
-       lus->tracker_vgid = process_attr_tracker_create();
-       if (!lus->tracker_vgid) {
-               goto error;
-       }
-       lus->consumer = consumer_create_output(CONSUMER_DST_LOCAL);
-       if (lus->consumer == NULL) {
-               goto error;
-       }
-
-       DBG2("UST trace session create successful");
-
-       return lus;
-
-error:
-       process_attr_tracker_destroy(lus->tracker_vpid);
-       process_attr_tracker_destroy(lus->tracker_vuid);
-       process_attr_tracker_destroy(lus->tracker_vgid);
-       ht_cleanup_push(lus->domain_global.channels);
-       ht_cleanup_push(lus->agents);
-       free(lus);
-error_alloc:
-       return NULL;
-}
-
-/*
- * Allocate and initialize a ust channel data structure.
- *
- * Return pointer to structure or NULL.
- */
-struct ltt_ust_channel *trace_ust_create_channel(struct lttng_channel *chan,
-               enum lttng_domain_type domain)
-{
-       struct ltt_ust_channel *luc;
-
-       LTTNG_ASSERT(chan);
-
-       luc = zmalloc(sizeof(struct ltt_ust_channel));
-       if (luc == NULL) {
-               PERROR("ltt_ust_channel zmalloc");
-               goto error;
-       }
-
-       luc->domain = domain;
-
-       /* Copy UST channel attributes */
-       luc->attr.overwrite = chan->attr.overwrite;
-       luc->attr.subbuf_size = chan->attr.subbuf_size;
-       luc->attr.num_subbuf = chan->attr.num_subbuf;
-       luc->attr.switch_timer_interval = chan->attr.switch_timer_interval;
-       luc->attr.read_timer_interval = chan->attr.read_timer_interval;
-       luc->attr.output = (enum lttng_ust_abi_output) chan->attr.output;
-       luc->monitor_timer_interval = ((struct lttng_channel_extended *)
-                       chan->attr.extended.ptr)->monitor_timer_interval;
-       luc->attr.u.s.blocking_timeout = ((struct lttng_channel_extended *)
-                       chan->attr.extended.ptr)->blocking_timeout;
-
-       /* Translate to UST output enum */
-       switch (luc->attr.output) {
-       default:
-               luc->attr.output = LTTNG_UST_ABI_MMAP;
-               break;
-       }
-
-       /*
-        * If we receive an empty string for channel name, it means the
-        * default channel name is requested.
-        */
-       if (chan->name[0] == '\0') {
-               strncpy(luc->name, DEFAULT_CHANNEL_NAME, sizeof(luc->name));
-       } else {
-               /* Copy channel name */
-               strncpy(luc->name, chan->name, sizeof(luc->name));
-       }
-       luc->name[LTTNG_UST_ABI_SYM_NAME_LEN - 1] = '\0';
-
-       /* Init node */
-       lttng_ht_node_init_str(&luc->node, luc->name);
-       CDS_INIT_LIST_HEAD(&luc->ctx_list);
-
-       /* Alloc hash tables */
-       luc->events = lttng_ht_new(0, LTTNG_HT_TYPE_STRING);
-       luc->ctx = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
-
-       /* On-disk circular buffer parameters */
-       luc->tracefile_size = chan->attr.tracefile_size;
-       luc->tracefile_count = chan->attr.tracefile_count;
-
-       DBG2("Trace UST channel %s created", luc->name);
-
-error:
-       return luc;
-}
-
-/*
- * Validates an exclusion list.
- *
- * Returns 0 if valid, negative value if invalid.
- */
-static int validate_exclusion(struct lttng_event_exclusion *exclusion)
-{
-       size_t i;
-       int ret = 0;
-
-       LTTNG_ASSERT(exclusion);
-
-       for (i = 0; i < exclusion->count; ++i) {
-               size_t j;
-               const char *name_a =
-                       LTTNG_EVENT_EXCLUSION_NAME_AT(exclusion, i);
-
-               for (j = 0; j < i; ++j) {
-                       const char *name_b =
-                               LTTNG_EVENT_EXCLUSION_NAME_AT(exclusion, j);
-
-                       if (!strncmp(name_a, name_b, LTTNG_SYMBOL_NAME_LEN)) {
-                               /* Match! */
-                               ret = -1;
-                               goto end;
-                       }
-               }
-       }
-
-end:
-       return ret;
-}
-
-/*
- * Allocate and initialize a ust event. Set name and event type.
- * We own filter_expression, filter, and exclusion.
- *
- * Return an lttng_error_code
- */
-enum lttng_error_code trace_ust_create_event(struct lttng_event *ev,
-               char *filter_expression,
-               struct lttng_bytecode *filter,
-               struct lttng_event_exclusion *exclusion,
-               bool internal_event,
-               struct ltt_ust_event **ust_event)
-{
-       struct ltt_ust_event *local_ust_event;
-       enum lttng_error_code ret = LTTNG_OK;
-
-       LTTNG_ASSERT(ev);
-
-       if (exclusion && validate_exclusion(exclusion)) {
-               ret = LTTNG_ERR_INVALID;
-               goto error;
-       }
-
-       local_ust_event = zmalloc(sizeof(struct ltt_ust_event));
-       if (local_ust_event == NULL) {
-               PERROR("ust event zmalloc");
-               ret = LTTNG_ERR_NOMEM;
-               goto error;
-       }
-
-       local_ust_event->internal = internal_event;
-
-       switch (ev->type) {
-       case LTTNG_EVENT_PROBE:
-               local_ust_event->attr.instrumentation = LTTNG_UST_ABI_PROBE;
-               break;
-       case LTTNG_EVENT_FUNCTION:
-               local_ust_event->attr.instrumentation = LTTNG_UST_ABI_FUNCTION;
-               break;
-       case LTTNG_EVENT_FUNCTION_ENTRY:
-               local_ust_event->attr.instrumentation = LTTNG_UST_ABI_FUNCTION;
-               break;
-       case LTTNG_EVENT_TRACEPOINT:
-               local_ust_event->attr.instrumentation = LTTNG_UST_ABI_TRACEPOINT;
-               break;
-       default:
-               ERR("Unknown ust instrumentation type (%d)", ev->type);
-               ret = LTTNG_ERR_INVALID;
-               goto error_free_event;
-       }
-
-       /* Copy event name */
-       strncpy(local_ust_event->attr.name, ev->name, LTTNG_UST_ABI_SYM_NAME_LEN);
-       local_ust_event->attr.name[LTTNG_UST_ABI_SYM_NAME_LEN - 1] = '\0';
-
-       switch (ev->loglevel_type) {
-       case LTTNG_EVENT_LOGLEVEL_ALL:
-               local_ust_event->attr.loglevel_type = LTTNG_UST_ABI_LOGLEVEL_ALL;
-               local_ust_event->attr.loglevel = -1;    /* Force to -1 */
-               break;
-       case LTTNG_EVENT_LOGLEVEL_RANGE:
-               local_ust_event->attr.loglevel_type = LTTNG_UST_ABI_LOGLEVEL_RANGE;
-               local_ust_event->attr.loglevel = ev->loglevel;
-               break;
-       case LTTNG_EVENT_LOGLEVEL_SINGLE:
-               local_ust_event->attr.loglevel_type = LTTNG_UST_ABI_LOGLEVEL_SINGLE;
-               local_ust_event->attr.loglevel = ev->loglevel;
-               break;
-       default:
-               ERR("Unknown ust loglevel type (%d)", ev->loglevel_type);
-               ret = LTTNG_ERR_INVALID;
-               goto error_free_event;
-       }
-
-       /* Same layout. */
-       local_ust_event->filter_expression = filter_expression;
-       local_ust_event->filter = filter;
-       local_ust_event->exclusion = exclusion;
-
-       /* Init node */
-       lttng_ht_node_init_str(&local_ust_event->node, local_ust_event->attr.name);
-
-       DBG2("Trace UST event %s, loglevel (%d,%d) created",
-               local_ust_event->attr.name, local_ust_event->attr.loglevel_type,
-               local_ust_event->attr.loglevel);
-
-       *ust_event = local_ust_event;
-
-       return ret;
-
-error_free_event:
-       free(local_ust_event);
-error:
-       free(filter_expression);
-       free(filter);
-       free(exclusion);
-       return ret;
-}
-
-static
-int trace_ust_context_type_event_to_ust(
-               enum lttng_event_context_type type)
-{
-       int utype;
-
-       switch (type) {
-       case LTTNG_EVENT_CONTEXT_VTID:
-               utype = LTTNG_UST_ABI_CONTEXT_VTID;
-               break;
-       case LTTNG_EVENT_CONTEXT_VPID:
-               utype = LTTNG_UST_ABI_CONTEXT_VPID;
-               break;
-       case LTTNG_EVENT_CONTEXT_PTHREAD_ID:
-               utype = LTTNG_UST_ABI_CONTEXT_PTHREAD_ID;
-               break;
-       case LTTNG_EVENT_CONTEXT_PROCNAME:
-               utype = LTTNG_UST_ABI_CONTEXT_PROCNAME;
-               break;
-       case LTTNG_EVENT_CONTEXT_IP:
-               utype = LTTNG_UST_ABI_CONTEXT_IP;
-               break;
-       case LTTNG_EVENT_CONTEXT_PERF_THREAD_COUNTER:
-               if (!lttng_ust_ctl_has_perf_counters()) {
-                       utype = -1;
-                       WARN("Perf counters not implemented in UST");
-               } else {
-                       utype = LTTNG_UST_ABI_CONTEXT_PERF_THREAD_COUNTER;
-               }
-               break;
-       case LTTNG_EVENT_CONTEXT_APP_CONTEXT:
-               utype = LTTNG_UST_ABI_CONTEXT_APP_CONTEXT;
-               break;
-       case LTTNG_EVENT_CONTEXT_CGROUP_NS:
-               utype = LTTNG_UST_ABI_CONTEXT_CGROUP_NS;
-               break;
-       case LTTNG_EVENT_CONTEXT_IPC_NS:
-               utype = LTTNG_UST_ABI_CONTEXT_IPC_NS;
-               break;
-       case LTTNG_EVENT_CONTEXT_MNT_NS:
-               utype = LTTNG_UST_ABI_CONTEXT_MNT_NS;
-               break;
-       case LTTNG_EVENT_CONTEXT_NET_NS:
-               utype = LTTNG_UST_ABI_CONTEXT_NET_NS;
-               break;
-       case LTTNG_EVENT_CONTEXT_PID_NS:
-               utype = LTTNG_UST_ABI_CONTEXT_PID_NS;
-               break;
-       case LTTNG_EVENT_CONTEXT_TIME_NS:
-               utype = LTTNG_UST_ABI_CONTEXT_TIME_NS;
-               break;
-       case LTTNG_EVENT_CONTEXT_USER_NS:
-               utype = LTTNG_UST_ABI_CONTEXT_USER_NS;
-               break;
-       case LTTNG_EVENT_CONTEXT_UTS_NS:
-               utype = LTTNG_UST_ABI_CONTEXT_UTS_NS;
-               break;
-       case LTTNG_EVENT_CONTEXT_VUID:
-               utype = LTTNG_UST_ABI_CONTEXT_VUID;
-               break;
-       case LTTNG_EVENT_CONTEXT_VEUID:
-               utype = LTTNG_UST_ABI_CONTEXT_VEUID;
-               break;
-       case LTTNG_EVENT_CONTEXT_VSUID:
-               utype = LTTNG_UST_ABI_CONTEXT_VSUID;
-               break;
-       case LTTNG_EVENT_CONTEXT_VGID:
-               utype = LTTNG_UST_ABI_CONTEXT_VGID;
-               break;
-       case LTTNG_EVENT_CONTEXT_VEGID:
-               utype = LTTNG_UST_ABI_CONTEXT_VEGID;
-               break;
-       case LTTNG_EVENT_CONTEXT_VSGID:
-               utype = LTTNG_UST_ABI_CONTEXT_VSGID;
-               break;
-       default:
-               utype = -1;
-               break;
-       }
-       return utype;
-}
-
-/*
- * Return 1 if contexts match, 0 otherwise.
- */
-int trace_ust_match_context(const struct ltt_ust_context *uctx,
-               const struct lttng_event_context *ctx)
-{
-       int utype;
-
-       utype = trace_ust_context_type_event_to_ust(ctx->ctx);
-       if (utype < 0) {
-               return 0;
-       }
-       if (uctx->ctx.ctx != utype) {
-               return 0;
-       }
-       switch (utype) {
-       case LTTNG_UST_ABI_CONTEXT_PERF_THREAD_COUNTER:
-               if (uctx->ctx.u.perf_counter.type
-                               != ctx->u.perf_counter.type) {
-                       return 0;
-               }
-               if (uctx->ctx.u.perf_counter.config
-                               != ctx->u.perf_counter.config) {
-                       return 0;
-               }
-               if (strncmp(uctx->ctx.u.perf_counter.name,
-                               ctx->u.perf_counter.name,
-                               LTTNG_UST_ABI_SYM_NAME_LEN)) {
-                       return 0;
-               }
-               break;
-       case LTTNG_UST_ABI_CONTEXT_APP_CONTEXT:
-               LTTNG_ASSERT(uctx->ctx.u.app_ctx.provider_name);
-               LTTNG_ASSERT(uctx->ctx.u.app_ctx.ctx_name);
-               if (strcmp(uctx->ctx.u.app_ctx.provider_name,
-                               ctx->u.app_ctx.provider_name) ||
-                               strcmp(uctx->ctx.u.app_ctx.ctx_name,
-                               ctx->u.app_ctx.ctx_name)) {
-                       return 0;
-               }
-       default:
-               break;
-
-       }
-       return 1;
-}
-
-/*
- * Allocate and initialize an UST context.
- *
- * Return pointer to structure or NULL.
- */
-struct ltt_ust_context *trace_ust_create_context(
-               const struct lttng_event_context *ctx)
-{
-       struct ltt_ust_context *uctx = NULL;
-       int utype;
-
-       LTTNG_ASSERT(ctx);
-
-       utype = trace_ust_context_type_event_to_ust(ctx->ctx);
-       if (utype < 0) {
-               ERR("Invalid UST context");
-               goto end;
-       }
-
-       uctx = zmalloc(sizeof(struct ltt_ust_context));
-       if (!uctx) {
-               PERROR("zmalloc ltt_ust_context");
-               goto end;
-       }
-
-       uctx->ctx.ctx = (enum lttng_ust_abi_context_type) utype;
-       switch (utype) {
-       case LTTNG_UST_ABI_CONTEXT_PERF_THREAD_COUNTER:
-               uctx->ctx.u.perf_counter.type = ctx->u.perf_counter.type;
-               uctx->ctx.u.perf_counter.config = ctx->u.perf_counter.config;
-               strncpy(uctx->ctx.u.perf_counter.name, ctx->u.perf_counter.name,
-                               LTTNG_UST_ABI_SYM_NAME_LEN);
-               uctx->ctx.u.perf_counter.name[LTTNG_UST_ABI_SYM_NAME_LEN - 1] = '\0';
-               break;
-       case LTTNG_UST_ABI_CONTEXT_APP_CONTEXT:
-       {
-               char *provider_name = NULL, *ctx_name = NULL;
-
-               provider_name = strdup(ctx->u.app_ctx.provider_name);
-               if (!provider_name) {
-                       goto error;
-               }
-               uctx->ctx.u.app_ctx.provider_name = provider_name;
-
-               ctx_name = strdup(ctx->u.app_ctx.ctx_name);
-               if (!ctx_name) {
-                       goto error;
-               }
-               uctx->ctx.u.app_ctx.ctx_name = ctx_name;
-               break;
-       }
-       default:
-               break;
-       }
-       lttng_ht_node_init_ulong(&uctx->node, (unsigned long) uctx->ctx.ctx);
-end:
-       return uctx;
-error:
-       trace_ust_destroy_context(uctx);
-       return NULL;
-}
-
-static void destroy_id_tracker_node_rcu(struct rcu_head *head)
-{
-       struct ust_id_tracker_node *tracker_node = caa_container_of(
-                       head, struct ust_id_tracker_node, node.head);
-       free(tracker_node);
-}
-
-static void destroy_id_tracker_node(struct ust_id_tracker_node *tracker_node)
-{
-       call_rcu(&tracker_node->node.head, destroy_id_tracker_node_rcu);
-}
-
-static int init_id_tracker(struct ust_id_tracker *id_tracker)
-{
-       int ret = LTTNG_OK;
-
-       id_tracker->ht = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
-       if (!id_tracker->ht) {
-               ret = LTTNG_ERR_NOMEM;
-               goto end;
-       }
-
-end:
-       return ret;
-}
-
-/*
- * Teardown id tracker content, but don't free id_tracker object.
- */
-static void fini_id_tracker(struct ust_id_tracker *id_tracker)
-{
-       struct ust_id_tracker_node *tracker_node;
-       struct lttng_ht_iter iter;
-
-       if (!id_tracker->ht) {
-               return;
-       }
-       rcu_read_lock();
-       cds_lfht_for_each_entry (id_tracker->ht->ht, &iter.iter, tracker_node,
-                       node.node) {
-               int ret = lttng_ht_del(id_tracker->ht, &iter);
-
-               LTTNG_ASSERT(!ret);
-               destroy_id_tracker_node(tracker_node);
-       }
-       rcu_read_unlock();
-       ht_cleanup_push(id_tracker->ht);
-       id_tracker->ht = NULL;
-}
-
-static struct ust_id_tracker_node *id_tracker_lookup(
-               struct ust_id_tracker *id_tracker,
-               int id,
-               struct lttng_ht_iter *iter)
-{
-       unsigned long _id = (unsigned long) id;
-       struct lttng_ht_node_ulong *node;
-
-       lttng_ht_lookup(id_tracker->ht, (void *) _id, iter);
-       node = lttng_ht_iter_get_node_ulong(iter);
-       if (node) {
-               return caa_container_of(node, struct ust_id_tracker_node, node);
-       } else {
-               return NULL;
-       }
-}
-
-static int id_tracker_add_id(struct ust_id_tracker *id_tracker, int id)
-{
-       int retval = LTTNG_OK;
-       struct ust_id_tracker_node *tracker_node;
-       struct lttng_ht_iter iter;
-
-       if (id < 0) {
-               retval = LTTNG_ERR_INVALID;
-               goto end;
-       }
-       tracker_node = id_tracker_lookup(id_tracker, id, &iter);
-       if (tracker_node) {
-               /* Already exists. */
-               retval = LTTNG_ERR_PROCESS_ATTR_EXISTS;
-               goto end;
-       }
-       tracker_node = zmalloc(sizeof(*tracker_node));
-       if (!tracker_node) {
-               retval = LTTNG_ERR_NOMEM;
-               goto end;
-       }
-       lttng_ht_node_init_ulong(&tracker_node->node, (unsigned long) id);
-       lttng_ht_add_unique_ulong(id_tracker->ht, &tracker_node->node);
-end:
-       return retval;
-}
-
-static int id_tracker_del_id(struct ust_id_tracker *id_tracker, int id)
-{
-       int retval = LTTNG_OK, ret;
-       struct ust_id_tracker_node *tracker_node;
-       struct lttng_ht_iter iter;
-
-       if (id < 0) {
-               retval = LTTNG_ERR_INVALID;
-               goto end;
-       }
-       tracker_node = id_tracker_lookup(id_tracker, id, &iter);
-       if (!tracker_node) {
-               /* Not found */
-               retval = LTTNG_ERR_PROCESS_ATTR_MISSING;
-               goto end;
-       }
-       ret = lttng_ht_del(id_tracker->ht, &iter);
-       LTTNG_ASSERT(!ret);
-
-       destroy_id_tracker_node(tracker_node);
-end:
-       return retval;
-}
-
-static struct ust_id_tracker *get_id_tracker(struct ltt_ust_session *session,
-               enum lttng_process_attr process_attr)
-{
-       switch (process_attr) {
-       case LTTNG_PROCESS_ATTR_VIRTUAL_PROCESS_ID:
-               return &session->vpid_tracker;
-       case LTTNG_PROCESS_ATTR_VIRTUAL_USER_ID:
-               return &session->vuid_tracker;
-       case LTTNG_PROCESS_ATTR_VIRTUAL_GROUP_ID:
-               return &session->vgid_tracker;
-       default:
-               return NULL;
-       }
-}
-
-static struct process_attr_tracker *_trace_ust_get_process_attr_tracker(
-               struct ltt_ust_session *session,
-               enum lttng_process_attr process_attr)
-{
-       switch (process_attr) {
-       case LTTNG_PROCESS_ATTR_VIRTUAL_PROCESS_ID:
-               return session->tracker_vpid;
-       case LTTNG_PROCESS_ATTR_VIRTUAL_USER_ID:
-               return session->tracker_vuid;
-       case LTTNG_PROCESS_ATTR_VIRTUAL_GROUP_ID:
-               return session->tracker_vgid;
-       default:
-               return NULL;
-       }
-}
-
-const struct process_attr_tracker *trace_ust_get_process_attr_tracker(
-               struct ltt_ust_session *session,
-               enum lttng_process_attr process_attr)
-{
-       return (const struct process_attr_tracker *)
-                       _trace_ust_get_process_attr_tracker(
-                                       session, process_attr);
-}
-
-/*
- * The session lock is held when calling this function.
- */
-int trace_ust_id_tracker_lookup(enum lttng_process_attr process_attr,
-               struct ltt_ust_session *session,
-               int id)
-{
-       struct lttng_ht_iter iter;
-       struct ust_id_tracker *id_tracker;
-
-       id_tracker = get_id_tracker(session, process_attr);
-       if (!id_tracker) {
-               abort();
-       }
-       if (!id_tracker->ht) {
-               return 1;
-       }
-       if (id_tracker_lookup(id_tracker, id, &iter)) {
-               return 1;
-       }
-       return 0;
-}
-
-/*
- * Called with the session lock held.
- */
-enum lttng_error_code trace_ust_process_attr_tracker_set_tracking_policy(
-               struct ltt_ust_session *session,
-               enum lttng_process_attr process_attr,
-               enum lttng_tracking_policy policy)
-{
-       int ret;
-       enum lttng_error_code ret_code = LTTNG_OK;
-       struct ust_id_tracker *id_tracker =
-                       get_id_tracker(session, process_attr);
-       struct process_attr_tracker *tracker =
-                       _trace_ust_get_process_attr_tracker(
-                                       session, process_attr);
-       bool should_update_apps = false;
-       enum lttng_tracking_policy previous_policy;
-
-       if (!tracker) {
-               ret_code = LTTNG_ERR_INVALID;
-               goto end;
-       }
-
-       previous_policy = process_attr_tracker_get_tracking_policy(tracker);
-       ret = process_attr_tracker_set_tracking_policy(tracker, policy);
-       if (ret) {
-               ret_code = LTTNG_ERR_UNK;
-               goto end;
-       }
-
-       if (previous_policy == policy) {
-               goto end;
-       }
-
-       switch (policy) {
-       case LTTNG_TRACKING_POLICY_INCLUDE_ALL:
-               /* Track all values: destroy tracker if exists. */
-               if (id_tracker->ht) {
-                       fini_id_tracker(id_tracker);
-                       /* Ensure all apps have session. */
-                       should_update_apps = true;
-               }
-               break;
-       case LTTNG_TRACKING_POLICY_EXCLUDE_ALL:
-       case LTTNG_TRACKING_POLICY_INCLUDE_SET:
-               /* fall-through. */
-               fini_id_tracker(id_tracker);
-               ret_code = init_id_tracker(id_tracker);
-               if (ret_code != LTTNG_OK) {
-                       ERR("Error initializing ID tracker");
-                       goto end;
-               }
-               /* Remove all apps from session. */
-               should_update_apps = true;
-               break;
-       default:
-               abort();
-       }
-       if (should_update_apps && session->active) {
-               ust_app_global_update_all(session);
-       }
-end:
-       return ret_code;
-}
-
-/* Called with the session lock held. */
-enum lttng_error_code trace_ust_process_attr_tracker_inclusion_set_add_value(
-               struct ltt_ust_session *session,
-               enum lttng_process_attr process_attr,
-               const struct process_attr_value *value)
-{
-       enum lttng_error_code ret_code = LTTNG_OK;
-       bool should_update_apps = false;
-       struct ust_id_tracker *id_tracker =
-                       get_id_tracker(session, process_attr);
-       struct process_attr_tracker *tracker;
-       int integral_value;
-       enum process_attr_tracker_status status;
-       struct ust_app *app;
-
-       /*
-        * Convert process attribute tracker value to the integral
-        * representation required by the kern-ctl API.
-        */
-       switch (process_attr) {
-       case LTTNG_PROCESS_ATTR_PROCESS_ID:
-       case LTTNG_PROCESS_ATTR_VIRTUAL_PROCESS_ID:
-               integral_value = (int) value->value.pid;
-               break;
-       case LTTNG_PROCESS_ATTR_USER_ID:
-       case LTTNG_PROCESS_ATTR_VIRTUAL_USER_ID:
-               if (value->type == LTTNG_PROCESS_ATTR_VALUE_TYPE_USER_NAME) {
-                       uid_t uid;
-
-                       ret_code = utils_user_id_from_name(
-                                       value->value.user_name, &uid);
-                       if (ret_code != LTTNG_OK) {
-                               goto end;
-                       }
-                       integral_value = (int) uid;
-               } else {
-                       integral_value = (int) value->value.uid;
-               }
-               break;
-       case LTTNG_PROCESS_ATTR_GROUP_ID:
-       case LTTNG_PROCESS_ATTR_VIRTUAL_GROUP_ID:
-               if (value->type == LTTNG_PROCESS_ATTR_VALUE_TYPE_GROUP_NAME) {
-                       gid_t gid;
-
-                       ret_code = utils_group_id_from_name(
-                                       value->value.group_name, &gid);
-                       if (ret_code != LTTNG_OK) {
-                               goto end;
-                       }
-                       integral_value = (int) gid;
-               } else {
-                       integral_value = (int) value->value.gid;
-               }
-               break;
-       default:
-               ret_code = LTTNG_ERR_INVALID;
-               goto end;
-       }
-
-       tracker = _trace_ust_get_process_attr_tracker(session, process_attr);
-       if (!tracker) {
-               ret_code = LTTNG_ERR_INVALID;
-               goto end;
-       }
-
-       status = process_attr_tracker_inclusion_set_add_value(tracker, value);
-       if (status != PROCESS_ATTR_TRACKER_STATUS_OK) {
-               switch (status) {
-               case PROCESS_ATTR_TRACKER_STATUS_EXISTS:
-                       ret_code = LTTNG_ERR_PROCESS_ATTR_EXISTS;
-                       break;
-               case PROCESS_ATTR_TRACKER_STATUS_INVALID_TRACKING_POLICY:
-                       ret_code = LTTNG_ERR_PROCESS_ATTR_TRACKER_INVALID_TRACKING_POLICY;
-                       break;
-               case PROCESS_ATTR_TRACKER_STATUS_ERROR:
-               default:
-                       ret_code = LTTNG_ERR_UNK;
-                       break;
-               }
-               goto end;
-       }
-
-       DBG("User space track %s %d for session id %" PRIu64,
-                       lttng_process_attr_to_string(process_attr),
-                       integral_value, session->id);
-
-       ret_code = id_tracker_add_id(id_tracker, integral_value);
-       if (ret_code != LTTNG_OK) {
-               goto end;
-       }
-       /* Add session to application */
-       switch (process_attr) {
-       case LTTNG_PROCESS_ATTR_VIRTUAL_PROCESS_ID:
-               app = ust_app_find_by_pid(integral_value);
-               if (app) {
-                       should_update_apps = true;
-               }
-               break;
-       default:
-               should_update_apps = true;
-               break;
-       }
-       if (should_update_apps && session->active) {
-               ust_app_global_update_all(session);
-       }
-end:
-       return ret_code;
-}
-
-/* Called with the session lock held. */
-enum lttng_error_code trace_ust_process_attr_tracker_inclusion_set_remove_value(
-               struct ltt_ust_session *session,
-               enum lttng_process_attr process_attr,
-               const struct process_attr_value *value)
-{
-       enum lttng_error_code ret_code = LTTNG_OK;
-       bool should_update_apps = false;
-       struct ust_id_tracker *id_tracker =
-                       get_id_tracker(session, process_attr);
-       struct process_attr_tracker *tracker;
-       int integral_value;
-       enum process_attr_tracker_status status;
-       struct ust_app *app;
-
-       /*
-        * Convert process attribute tracker value to the integral
-        * representation required by the kern-ctl API.
-        */
-       switch (process_attr) {
-       case LTTNG_PROCESS_ATTR_PROCESS_ID:
-       case LTTNG_PROCESS_ATTR_VIRTUAL_PROCESS_ID:
-               integral_value = (int) value->value.pid;
-               break;
-       case LTTNG_PROCESS_ATTR_USER_ID:
-       case LTTNG_PROCESS_ATTR_VIRTUAL_USER_ID:
-               if (value->type == LTTNG_PROCESS_ATTR_VALUE_TYPE_USER_NAME) {
-                       uid_t uid;
-
-                       ret_code = utils_user_id_from_name(
-                                       value->value.user_name, &uid);
-                       if (ret_code != LTTNG_OK) {
-                               goto end;
-                       }
-                       integral_value = (int) uid;
-               } else {
-                       integral_value = (int) value->value.uid;
-               }
-               break;
-       case LTTNG_PROCESS_ATTR_GROUP_ID:
-       case LTTNG_PROCESS_ATTR_VIRTUAL_GROUP_ID:
-               if (value->type == LTTNG_PROCESS_ATTR_VALUE_TYPE_GROUP_NAME) {
-                       gid_t gid;
-
-                       ret_code = utils_group_id_from_name(
-                                       value->value.group_name, &gid);
-                       if (ret_code != LTTNG_OK) {
-                               goto end;
-                       }
-                       integral_value = (int) gid;
-               } else {
-                       integral_value = (int) value->value.gid;
-               }
-               break;
-       default:
-               ret_code = LTTNG_ERR_INVALID;
-               goto end;
-       }
-
-       tracker = _trace_ust_get_process_attr_tracker(session, process_attr);
-       if (!tracker) {
-               ret_code = LTTNG_ERR_INVALID;
-               goto end;
-       }
-
-       status = process_attr_tracker_inclusion_set_remove_value(
-                       tracker, value);
-       if (status != PROCESS_ATTR_TRACKER_STATUS_OK) {
-               switch (status) {
-               case PROCESS_ATTR_TRACKER_STATUS_MISSING:
-                       ret_code = LTTNG_ERR_PROCESS_ATTR_MISSING;
-                       break;
-               case PROCESS_ATTR_TRACKER_STATUS_INVALID_TRACKING_POLICY:
-                       ret_code = LTTNG_ERR_PROCESS_ATTR_TRACKER_INVALID_TRACKING_POLICY;
-                       break;
-               case PROCESS_ATTR_TRACKER_STATUS_ERROR:
-               default:
-                       ret_code = LTTNG_ERR_UNK;
-                       break;
-               }
-               goto end;
-       }
-
-       DBG("User space untrack %s %d for session id %" PRIu64,
-                       lttng_process_attr_to_string(process_attr),
-                       integral_value, session->id);
-
-       ret_code = id_tracker_del_id(id_tracker, integral_value);
-       if (ret_code != LTTNG_OK) {
-               goto end;
-       }
-       /* Add session to application */
-       switch (process_attr) {
-       case LTTNG_PROCESS_ATTR_VIRTUAL_PROCESS_ID:
-               app = ust_app_find_by_pid(integral_value);
-               if (app) {
-                       should_update_apps = true;
-               }
-               break;
-       default:
-               should_update_apps = true;
-               break;
-       }
-       if (should_update_apps && session->active) {
-               ust_app_global_update_all(session);
-       }
-end:
-       return ret_code;
-}
-
-/*
- * RCU safe free context structure.
- */
-static void destroy_context_rcu(struct rcu_head *head)
-{
-       struct lttng_ht_node_ulong *node =
-               caa_container_of(head, struct lttng_ht_node_ulong, head);
-       struct ltt_ust_context *ctx =
-               caa_container_of(node, struct ltt_ust_context, node);
-
-       trace_ust_destroy_context(ctx);
-}
-
-/*
- * Cleanup UST context hash table.
- */
-static void destroy_contexts(struct lttng_ht *ht)
-{
-       int ret;
-       struct lttng_ht_node_ulong *node;
-       struct lttng_ht_iter iter;
-       struct ltt_ust_context *ctx;
-
-       LTTNG_ASSERT(ht);
-
-       rcu_read_lock();
-       cds_lfht_for_each_entry(ht->ht, &iter.iter, node, node) {
-               /* Remove from ordered list. */
-               ctx = caa_container_of(node, struct ltt_ust_context, node);
-               cds_list_del(&ctx->list);
-               /* Remove from channel's hash table. */
-               ret = lttng_ht_del(ht, &iter);
-               if (!ret) {
-                       call_rcu(&node->head, destroy_context_rcu);
-               }
-       }
-       rcu_read_unlock();
-
-       ht_cleanup_push(ht);
-}
-
-/*
- * Cleanup ust event structure.
- */
-void trace_ust_destroy_event(struct ltt_ust_event *event)
-{
-       LTTNG_ASSERT(event);
-
-       DBG2("Trace destroy UST event %s", event->attr.name);
-       free(event->filter_expression);
-       free(event->filter);
-       free(event->exclusion);
-       free(event);
-}
-
-/*
- * Cleanup ust context structure.
- */
-void trace_ust_destroy_context(struct ltt_ust_context *ctx)
-{
-       LTTNG_ASSERT(ctx);
-
-       if (ctx->ctx.ctx == LTTNG_UST_ABI_CONTEXT_APP_CONTEXT) {
-               free(ctx->ctx.u.app_ctx.provider_name);
-               free(ctx->ctx.u.app_ctx.ctx_name);
-       }
-       free(ctx);
-}
-
-/*
- * URCU intermediate call to complete destroy event.
- */
-static void destroy_event_rcu(struct rcu_head *head)
-{
-       struct lttng_ht_node_str *node =
-               caa_container_of(head, struct lttng_ht_node_str, head);
-       struct ltt_ust_event *event =
-               caa_container_of(node, struct ltt_ust_event, node);
-
-       trace_ust_destroy_event(event);
-}
-
-/*
- * Cleanup UST events hashtable.
- */
-static void destroy_events(struct lttng_ht *events)
-{
-       int ret;
-       struct lttng_ht_node_str *node;
-       struct lttng_ht_iter iter;
-
-       LTTNG_ASSERT(events);
-
-       rcu_read_lock();
-       cds_lfht_for_each_entry(events->ht, &iter.iter, node, node) {
-               ret = lttng_ht_del(events, &iter);
-               LTTNG_ASSERT(!ret);
-               call_rcu(&node->head, destroy_event_rcu);
-       }
-       rcu_read_unlock();
-
-       ht_cleanup_push(events);
-}
-
-/*
- * Cleanup ust channel structure.
- *
- * Should _NOT_ be called with RCU read lock held.
- */
-static void _trace_ust_destroy_channel(struct ltt_ust_channel *channel)
-{
-       LTTNG_ASSERT(channel);
-
-       DBG2("Trace destroy UST channel %s", channel->name);
-
-       free(channel);
-}
-
-/*
- * URCU intermediate call to complete destroy channel.
- */
-static void destroy_channel_rcu(struct rcu_head *head)
-{
-       struct lttng_ht_node_str *node =
-               caa_container_of(head, struct lttng_ht_node_str, head);
-       struct ltt_ust_channel *channel =
-               caa_container_of(node, struct ltt_ust_channel, node);
-
-       _trace_ust_destroy_channel(channel);
-}
-
-void trace_ust_destroy_channel(struct ltt_ust_channel *channel)
-{
-       /* Destroying all events of the channel */
-       destroy_events(channel->events);
-       /* Destroying all context of the channel */
-       destroy_contexts(channel->ctx);
-
-       call_rcu(&channel->node.head, destroy_channel_rcu);
-}
-
-/*
- * Remove an UST channel from a channel HT.
- */
-void trace_ust_delete_channel(struct lttng_ht *ht,
-               struct ltt_ust_channel *channel)
-{
-       int ret;
-       struct lttng_ht_iter iter;
-
-       LTTNG_ASSERT(ht);
-       LTTNG_ASSERT(channel);
-
-       iter.iter.node = &channel->node.node;
-       ret = lttng_ht_del(ht, &iter);
-       LTTNG_ASSERT(!ret);
-}
-
-/*
- * Iterate over a hash table containing channels and cleanup safely.
- */
-static void destroy_channels(struct lttng_ht *channels)
-{
-       struct lttng_ht_node_str *node;
-       struct lttng_ht_iter iter;
-
-       LTTNG_ASSERT(channels);
-
-       rcu_read_lock();
-       cds_lfht_for_each_entry(channels->ht, &iter.iter, node, node) {
-               struct ltt_ust_channel *chan =
-                       caa_container_of(node, struct ltt_ust_channel, node);
-
-               trace_ust_delete_channel(channels, chan);
-               trace_ust_destroy_channel(chan);
-       }
-       rcu_read_unlock();
-
-       ht_cleanup_push(channels);
-}
-
-/*
- * Cleanup UST global domain.
- */
-static void destroy_domain_global(struct ltt_ust_domain_global *dom)
-{
-       LTTNG_ASSERT(dom);
-
-       destroy_channels(dom->channels);
-}
-
-/*
- * Cleanup ust session structure, keeping data required by
- * destroy notifier.
- *
- * Should *NOT* be called with RCU read-side lock held.
- */
-void trace_ust_destroy_session(struct ltt_ust_session *session)
-{
-       struct agent *agt;
-       struct buffer_reg_uid *reg, *sreg;
-       struct lttng_ht_iter iter;
-
-       LTTNG_ASSERT(session);
-
-       DBG2("Trace UST destroy session %" PRIu64, session->id);
-
-       /* Cleaning up UST domain */
-       destroy_domain_global(&session->domain_global);
-
-       rcu_read_lock();
-       cds_lfht_for_each_entry(session->agents->ht, &iter.iter, agt, node.node) {
-               int ret = lttng_ht_del(session->agents, &iter);
-
-               LTTNG_ASSERT(!ret);
-               agent_destroy(agt);
-       }
-       rcu_read_unlock();
-
-       ht_cleanup_push(session->agents);
-
-       /* Cleanup UID buffer registry object(s). */
-       cds_list_for_each_entry_safe(reg, sreg, &session->buffer_reg_uid_list,
-                       lnode) {
-               cds_list_del(&reg->lnode);
-               buffer_reg_uid_remove(reg);
-               buffer_reg_uid_destroy(reg, session->consumer);
-       }
-
-       process_attr_tracker_destroy(session->tracker_vpid);
-       process_attr_tracker_destroy(session->tracker_vuid);
-       process_attr_tracker_destroy(session->tracker_vgid);
-
-       fini_id_tracker(&session->vpid_tracker);
-       fini_id_tracker(&session->vuid_tracker);
-       fini_id_tracker(&session->vgid_tracker);
-       lttng_trace_chunk_put(session->current_trace_chunk);
-}
-
-/* Free elements needed by destroy notifiers. */
-void trace_ust_free_session(struct ltt_ust_session *session)
-{
-       consumer_output_put(session->consumer);
-       free(session);
-}
diff --git a/src/bin/lttng-sessiond/trace-ust.cpp b/src/bin/lttng-sessiond/trace-ust.cpp
new file mode 100644 (file)
index 0000000..0f303bf
--- /dev/null
@@ -0,0 +1,1440 @@
+/*
+ * Copyright (C) 2011 David Goulet <david.goulet@polymtl.ca>
+ * Copyright (C) 2016 Jérémie Galarneau <jeremie.galarneau@efficios.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0-only
+ *
+ */
+
+#define _LGPL_SOURCE
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <inttypes.h>
+
+#include <common/common.h>
+#include <common/defaults.h>
+#include <common/trace-chunk.h>
+#include <common/utils.h>
+
+#include "buffer-registry.h"
+#include "trace-ust.h"
+#include "utils.h"
+#include "ust-app.h"
+#include "agent.h"
+
+/*
+ * Match function for the events hash table lookup.
+ *
+ * Matches by name only. Used by the disable command.
+ */
+int trace_ust_ht_match_event_by_name(struct cds_lfht_node *node,
+               const void *_key)
+{
+       struct ltt_ust_event *event;
+       const char *name;
+
+       LTTNG_ASSERT(node);
+       LTTNG_ASSERT(_key);
+
+       event = caa_container_of(node, struct ltt_ust_event, node.node);
+       name = (const char *) _key;
+
+       /* Event name */
+       if (strncmp(event->attr.name, name, sizeof(event->attr.name)) != 0) {
+               goto no_match;
+       }
+
+       /* Match */
+       return 1;
+
+no_match:
+       return 0;
+}
+
+/*
+ * Match function for the hash table lookup.
+ *
+ * It matches an ust event based on three attributes which are the event name,
+ * the filter bytecode and the loglevel.
+ */
+int trace_ust_ht_match_event(struct cds_lfht_node *node, const void *_key)
+{
+       struct ltt_ust_event *event;
+       const struct ltt_ust_ht_key *key;
+       int ev_loglevel_value;
+       int ll_match;
+
+       LTTNG_ASSERT(node);
+       LTTNG_ASSERT(_key);
+
+       event = caa_container_of(node, struct ltt_ust_event, node.node);
+       key = (ltt_ust_ht_key *) _key;
+       ev_loglevel_value = event->attr.loglevel;
+
+       /* Match the 4 elements of the key: name, filter, loglevel, exclusions. */
+
+       /* Event name */
+       if (strncmp(event->attr.name, key->name, sizeof(event->attr.name)) != 0) {
+               goto no_match;
+       }
+
+       /* Event loglevel value and type. */
+       ll_match = loglevels_match(event->attr.loglevel_type,
+               ev_loglevel_value, key->loglevel_type,
+               key->loglevel_value, LTTNG_UST_ABI_LOGLEVEL_ALL);
+
+       if (!ll_match) {
+               goto no_match;
+       }
+
+       /* Only one of the filters is NULL, fail. */
+       if ((key->filter && !event->filter) || (!key->filter && event->filter)) {
+               goto no_match;
+       }
+
+       if (key->filter && event->filter) {
+               /* Both filters exists, check length followed by the bytecode. */
+               if (event->filter->len != key->filter->len ||
+                               memcmp(event->filter->data, key->filter->data,
+                                       event->filter->len) != 0) {
+                       goto no_match;
+               }
+       }
+
+       /* If only one of the exclusions is NULL, fail. */
+       if ((key->exclusion && !event->exclusion) || (!key->exclusion && event->exclusion)) {
+               goto no_match;
+       }
+
+       if (key->exclusion && event->exclusion) {
+               size_t i;
+
+               /* Check exclusion counts first. */
+               if (event->exclusion->count != key->exclusion->count) {
+                       goto no_match;
+               }
+
+               /* Compare names individually. */
+               for (i = 0; i < event->exclusion->count; ++i) {
+                       size_t j;
+                       bool found = false;
+                       const char *name_ev =
+                               LTTNG_EVENT_EXCLUSION_NAME_AT(
+                                       event->exclusion, i);
+
+                       /*
+                        * Compare this exclusion name to all the key's
+                        * exclusion names.
+                        */
+                       for (j = 0; j < key->exclusion->count; ++j) {
+                               const char *name_key =
+                                       LTTNG_EVENT_EXCLUSION_NAME_AT(
+                                               key->exclusion, j);
+
+                               if (!strncmp(name_ev, name_key,
+                                               LTTNG_SYMBOL_NAME_LEN)) {
+                                       /* Names match! */
+                                       found = true;
+                                       break;
+                               }
+                       }
+
+                       /*
+                        * If the current exclusion name was not found amongst
+                        * the key's exclusion names, then there's no match.
+                        */
+                       if (!found) {
+                               goto no_match;
+                       }
+               }
+       }
+       /* Match. */
+       return 1;
+
+no_match:
+       return 0;
+}
+
+/*
+ * Find the channel in the hashtable and return channel pointer. RCU read side
+ * lock MUST be acquired before calling this.
+ */
+struct ltt_ust_channel *trace_ust_find_channel_by_name(struct lttng_ht *ht,
+               const char *name)
+{
+       struct lttng_ht_node_str *node;
+       struct lttng_ht_iter iter;
+
+       /*
+        * If we receive an empty string for channel name, it means the
+        * default channel name is requested.
+        */
+       if (name[0] == '\0')
+               name = DEFAULT_CHANNEL_NAME;
+
+       lttng_ht_lookup(ht, (void *)name, &iter);
+       node = lttng_ht_iter_get_node_str(&iter);
+       if (node == NULL) {
+               goto error;
+       }
+
+       DBG2("Trace UST channel %s found by name", name);
+
+       return caa_container_of(node, struct ltt_ust_channel, node);
+
+error:
+       DBG2("Trace UST channel %s not found by name", name);
+       return NULL;
+}
+
+/*
+ * Find the event in the hashtable and return event pointer. RCU read side lock
+ * MUST be acquired before calling this.
+ */
+struct ltt_ust_event *trace_ust_find_event(struct lttng_ht *ht,
+               char *name, struct lttng_bytecode *filter,
+               enum lttng_ust_abi_loglevel_type loglevel_type, int loglevel_value,
+               struct lttng_event_exclusion *exclusion)
+{
+       struct lttng_ht_node_str *node;
+       struct lttng_ht_iter iter;
+       struct ltt_ust_ht_key key;
+
+       LTTNG_ASSERT(name);
+       LTTNG_ASSERT(ht);
+
+       key.name = name;
+       key.filter = filter;
+       key.loglevel_type = loglevel_type;
+       key.loglevel_value = loglevel_value;
+       key.exclusion = exclusion;
+
+       cds_lfht_lookup(ht->ht, ht->hash_fct((void *) name, lttng_ht_seed),
+                       trace_ust_ht_match_event, &key, &iter.iter);
+       node = lttng_ht_iter_get_node_str(&iter);
+       if (node == NULL) {
+               goto error;
+       }
+
+       DBG2("Trace UST event %s found", key.name);
+
+       return caa_container_of(node, struct ltt_ust_event, node);
+
+error:
+       DBG2("Trace UST event %s NOT found", key.name);
+       return NULL;
+}
+
+/*
+ * Lookup an agent in the session agents hash table by domain type and return
+ * the object if found else NULL.
+ *
+ * RCU read side lock must be acquired before calling and only released
+ * once the agent is no longer in scope or being used.
+ */
+struct agent *trace_ust_find_agent(struct ltt_ust_session *session,
+               enum lttng_domain_type domain_type)
+{
+       struct agent *agt = NULL;
+       struct lttng_ht_node_u64 *node;
+       struct lttng_ht_iter iter;
+       uint64_t key;
+
+       LTTNG_ASSERT(session);
+
+       DBG3("Trace ust agent lookup for domain %d", domain_type);
+
+       key = domain_type;
+
+       lttng_ht_lookup(session->agents, &key, &iter);
+       node = lttng_ht_iter_get_node_u64(&iter);
+       if (!node) {
+               goto end;
+       }
+       agt = caa_container_of(node, struct agent, node);
+
+end:
+       return agt;
+}
+
+/*
+ * Allocate and initialize a ust session data structure.
+ *
+ * Return pointer to structure or NULL.
+ */
+struct ltt_ust_session *trace_ust_create_session(uint64_t session_id)
+{
+       struct ltt_ust_session *lus;
+
+       /* Allocate a new ltt ust session */
+       lus = (ltt_ust_session *) zmalloc(sizeof(struct ltt_ust_session));
+       if (lus == NULL) {
+               PERROR("create ust session zmalloc");
+               goto error_alloc;
+       }
+
+       /* Init data structure */
+       lus->id = session_id;
+       lus->active = 0;
+
+       /* Set default metadata channel attribute. */
+       lus->metadata_attr.overwrite = DEFAULT_CHANNEL_OVERWRITE;
+       lus->metadata_attr.subbuf_size = default_get_metadata_subbuf_size();
+       lus->metadata_attr.num_subbuf = DEFAULT_METADATA_SUBBUF_NUM;
+       lus->metadata_attr.switch_timer_interval = DEFAULT_METADATA_SWITCH_TIMER;
+       lus->metadata_attr.read_timer_interval = DEFAULT_METADATA_READ_TIMER;
+       lus->metadata_attr.output = LTTNG_UST_ABI_MMAP;
+
+       /*
+        * Default buffer type. This can be changed through an enable channel
+        * requesting a different type. Note that this can only be changed once
+        * during the session lifetime which is at the first enable channel and
+        * only before start. The flag buffer_type_changed indicates the status.
+        */
+       lus->buffer_type = LTTNG_BUFFER_PER_UID;
+       /* Once set to 1, the buffer_type is immutable for the session. */
+       lus->buffer_type_changed = 0;
+       /* Init it in case it get used after allocation. */
+       CDS_INIT_LIST_HEAD(&lus->buffer_reg_uid_list);
+
+       /* Alloc UST global domain channels' HT */
+       lus->domain_global.channels = lttng_ht_new(0, LTTNG_HT_TYPE_STRING);
+       /* Alloc agent hash table. */
+       lus->agents = lttng_ht_new(0, LTTNG_HT_TYPE_U64);
+
+       lus->tracker_vpid = process_attr_tracker_create();
+       if (!lus->tracker_vpid) {
+               goto error;
+       }
+       lus->tracker_vuid = process_attr_tracker_create();
+       if (!lus->tracker_vuid) {
+               goto error;
+       }
+       lus->tracker_vgid = process_attr_tracker_create();
+       if (!lus->tracker_vgid) {
+               goto error;
+       }
+       lus->consumer = consumer_create_output(CONSUMER_DST_LOCAL);
+       if (lus->consumer == NULL) {
+               goto error;
+       }
+
+       DBG2("UST trace session create successful");
+
+       return lus;
+
+error:
+       process_attr_tracker_destroy(lus->tracker_vpid);
+       process_attr_tracker_destroy(lus->tracker_vuid);
+       process_attr_tracker_destroy(lus->tracker_vgid);
+       ht_cleanup_push(lus->domain_global.channels);
+       ht_cleanup_push(lus->agents);
+       free(lus);
+error_alloc:
+       return NULL;
+}
+
+/*
+ * Allocate and initialize a ust channel data structure.
+ *
+ * Return pointer to structure or NULL.
+ */
+struct ltt_ust_channel *trace_ust_create_channel(struct lttng_channel *chan,
+               enum lttng_domain_type domain)
+{
+       struct ltt_ust_channel *luc;
+
+       LTTNG_ASSERT(chan);
+
+       luc = (ltt_ust_channel *) zmalloc(sizeof(struct ltt_ust_channel));
+       if (luc == NULL) {
+               PERROR("ltt_ust_channel zmalloc");
+               goto error;
+       }
+
+       luc->domain = domain;
+
+       /* Copy UST channel attributes */
+       luc->attr.overwrite = chan->attr.overwrite;
+       luc->attr.subbuf_size = chan->attr.subbuf_size;
+       luc->attr.num_subbuf = chan->attr.num_subbuf;
+       luc->attr.switch_timer_interval = chan->attr.switch_timer_interval;
+       luc->attr.read_timer_interval = chan->attr.read_timer_interval;
+       luc->attr.output = (enum lttng_ust_abi_output) chan->attr.output;
+       luc->monitor_timer_interval = ((struct lttng_channel_extended *)
+                       chan->attr.extended.ptr)->monitor_timer_interval;
+       luc->attr.u.s.blocking_timeout = ((struct lttng_channel_extended *)
+                       chan->attr.extended.ptr)->blocking_timeout;
+
+       /* Translate to UST output enum */
+       switch (luc->attr.output) {
+       default:
+               luc->attr.output = LTTNG_UST_ABI_MMAP;
+               break;
+       }
+
+       /*
+        * If we receive an empty string for channel name, it means the
+        * default channel name is requested.
+        */
+       if (chan->name[0] == '\0') {
+               strncpy(luc->name, DEFAULT_CHANNEL_NAME, sizeof(luc->name));
+       } else {
+               /* Copy channel name */
+               strncpy(luc->name, chan->name, sizeof(luc->name));
+       }
+       luc->name[LTTNG_UST_ABI_SYM_NAME_LEN - 1] = '\0';
+
+       /* Init node */
+       lttng_ht_node_init_str(&luc->node, luc->name);
+       CDS_INIT_LIST_HEAD(&luc->ctx_list);
+
+       /* Alloc hash tables */
+       luc->events = lttng_ht_new(0, LTTNG_HT_TYPE_STRING);
+       luc->ctx = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
+
+       /* On-disk circular buffer parameters */
+       luc->tracefile_size = chan->attr.tracefile_size;
+       luc->tracefile_count = chan->attr.tracefile_count;
+
+       DBG2("Trace UST channel %s created", luc->name);
+
+error:
+       return luc;
+}
+
+/*
+ * Validates an exclusion list.
+ *
+ * Returns 0 if valid, negative value if invalid.
+ */
+static int validate_exclusion(struct lttng_event_exclusion *exclusion)
+{
+       size_t i;
+       int ret = 0;
+
+       LTTNG_ASSERT(exclusion);
+
+       for (i = 0; i < exclusion->count; ++i) {
+               size_t j;
+               const char *name_a =
+                       LTTNG_EVENT_EXCLUSION_NAME_AT(exclusion, i);
+
+               for (j = 0; j < i; ++j) {
+                       const char *name_b =
+                               LTTNG_EVENT_EXCLUSION_NAME_AT(exclusion, j);
+
+                       if (!strncmp(name_a, name_b, LTTNG_SYMBOL_NAME_LEN)) {
+                               /* Match! */
+                               ret = -1;
+                               goto end;
+                       }
+               }
+       }
+
+end:
+       return ret;
+}
+
+/*
+ * Allocate and initialize a ust event. Set name and event type.
+ * We own filter_expression, filter, and exclusion.
+ *
+ * Return an lttng_error_code
+ */
+enum lttng_error_code trace_ust_create_event(struct lttng_event *ev,
+               char *filter_expression,
+               struct lttng_bytecode *filter,
+               struct lttng_event_exclusion *exclusion,
+               bool internal_event,
+               struct ltt_ust_event **ust_event)
+{
+       struct ltt_ust_event *local_ust_event;
+       enum lttng_error_code ret = LTTNG_OK;
+
+       LTTNG_ASSERT(ev);
+
+       if (exclusion && validate_exclusion(exclusion)) {
+               ret = LTTNG_ERR_INVALID;
+               goto error;
+       }
+
+       local_ust_event = (ltt_ust_event *) zmalloc(sizeof(struct ltt_ust_event));
+       if (local_ust_event == NULL) {
+               PERROR("ust event zmalloc");
+               ret = LTTNG_ERR_NOMEM;
+               goto error;
+       }
+
+       local_ust_event->internal = internal_event;
+
+       switch (ev->type) {
+       case LTTNG_EVENT_PROBE:
+               local_ust_event->attr.instrumentation = LTTNG_UST_ABI_PROBE;
+               break;
+       case LTTNG_EVENT_FUNCTION:
+               local_ust_event->attr.instrumentation = LTTNG_UST_ABI_FUNCTION;
+               break;
+       case LTTNG_EVENT_FUNCTION_ENTRY:
+               local_ust_event->attr.instrumentation = LTTNG_UST_ABI_FUNCTION;
+               break;
+       case LTTNG_EVENT_TRACEPOINT:
+               local_ust_event->attr.instrumentation = LTTNG_UST_ABI_TRACEPOINT;
+               break;
+       default:
+               ERR("Unknown ust instrumentation type (%d)", ev->type);
+               ret = LTTNG_ERR_INVALID;
+               goto error_free_event;
+       }
+
+       /* Copy event name */
+       strncpy(local_ust_event->attr.name, ev->name, LTTNG_UST_ABI_SYM_NAME_LEN);
+       local_ust_event->attr.name[LTTNG_UST_ABI_SYM_NAME_LEN - 1] = '\0';
+
+       switch (ev->loglevel_type) {
+       case LTTNG_EVENT_LOGLEVEL_ALL:
+               local_ust_event->attr.loglevel_type = LTTNG_UST_ABI_LOGLEVEL_ALL;
+               local_ust_event->attr.loglevel = -1;    /* Force to -1 */
+               break;
+       case LTTNG_EVENT_LOGLEVEL_RANGE:
+               local_ust_event->attr.loglevel_type = LTTNG_UST_ABI_LOGLEVEL_RANGE;
+               local_ust_event->attr.loglevel = ev->loglevel;
+               break;
+       case LTTNG_EVENT_LOGLEVEL_SINGLE:
+               local_ust_event->attr.loglevel_type = LTTNG_UST_ABI_LOGLEVEL_SINGLE;
+               local_ust_event->attr.loglevel = ev->loglevel;
+               break;
+       default:
+               ERR("Unknown ust loglevel type (%d)", ev->loglevel_type);
+               ret = LTTNG_ERR_INVALID;
+               goto error_free_event;
+       }
+
+       /* Same layout. */
+       local_ust_event->filter_expression = filter_expression;
+       local_ust_event->filter = filter;
+       local_ust_event->exclusion = exclusion;
+
+       /* Init node */
+       lttng_ht_node_init_str(&local_ust_event->node, local_ust_event->attr.name);
+
+       DBG2("Trace UST event %s, loglevel (%d,%d) created",
+               local_ust_event->attr.name, local_ust_event->attr.loglevel_type,
+               local_ust_event->attr.loglevel);
+
+       *ust_event = local_ust_event;
+
+       return ret;
+
+error_free_event:
+       free(local_ust_event);
+error:
+       free(filter_expression);
+       free(filter);
+       free(exclusion);
+       return ret;
+}
+
+static
+int trace_ust_context_type_event_to_ust(
+               enum lttng_event_context_type type)
+{
+       int utype;
+
+       switch (type) {
+       case LTTNG_EVENT_CONTEXT_VTID:
+               utype = LTTNG_UST_ABI_CONTEXT_VTID;
+               break;
+       case LTTNG_EVENT_CONTEXT_VPID:
+               utype = LTTNG_UST_ABI_CONTEXT_VPID;
+               break;
+       case LTTNG_EVENT_CONTEXT_PTHREAD_ID:
+               utype = LTTNG_UST_ABI_CONTEXT_PTHREAD_ID;
+               break;
+       case LTTNG_EVENT_CONTEXT_PROCNAME:
+               utype = LTTNG_UST_ABI_CONTEXT_PROCNAME;
+               break;
+       case LTTNG_EVENT_CONTEXT_IP:
+               utype = LTTNG_UST_ABI_CONTEXT_IP;
+               break;
+       case LTTNG_EVENT_CONTEXT_PERF_THREAD_COUNTER:
+               if (!lttng_ust_ctl_has_perf_counters()) {
+                       utype = -1;
+                       WARN("Perf counters not implemented in UST");
+               } else {
+                       utype = LTTNG_UST_ABI_CONTEXT_PERF_THREAD_COUNTER;
+               }
+               break;
+       case LTTNG_EVENT_CONTEXT_APP_CONTEXT:
+               utype = LTTNG_UST_ABI_CONTEXT_APP_CONTEXT;
+               break;
+       case LTTNG_EVENT_CONTEXT_CGROUP_NS:
+               utype = LTTNG_UST_ABI_CONTEXT_CGROUP_NS;
+               break;
+       case LTTNG_EVENT_CONTEXT_IPC_NS:
+               utype = LTTNG_UST_ABI_CONTEXT_IPC_NS;
+               break;
+       case LTTNG_EVENT_CONTEXT_MNT_NS:
+               utype = LTTNG_UST_ABI_CONTEXT_MNT_NS;
+               break;
+       case LTTNG_EVENT_CONTEXT_NET_NS:
+               utype = LTTNG_UST_ABI_CONTEXT_NET_NS;
+               break;
+       case LTTNG_EVENT_CONTEXT_PID_NS:
+               utype = LTTNG_UST_ABI_CONTEXT_PID_NS;
+               break;
+       case LTTNG_EVENT_CONTEXT_TIME_NS:
+               utype = LTTNG_UST_ABI_CONTEXT_TIME_NS;
+               break;
+       case LTTNG_EVENT_CONTEXT_USER_NS:
+               utype = LTTNG_UST_ABI_CONTEXT_USER_NS;
+               break;
+       case LTTNG_EVENT_CONTEXT_UTS_NS:
+               utype = LTTNG_UST_ABI_CONTEXT_UTS_NS;
+               break;
+       case LTTNG_EVENT_CONTEXT_VUID:
+               utype = LTTNG_UST_ABI_CONTEXT_VUID;
+               break;
+       case LTTNG_EVENT_CONTEXT_VEUID:
+               utype = LTTNG_UST_ABI_CONTEXT_VEUID;
+               break;
+       case LTTNG_EVENT_CONTEXT_VSUID:
+               utype = LTTNG_UST_ABI_CONTEXT_VSUID;
+               break;
+       case LTTNG_EVENT_CONTEXT_VGID:
+               utype = LTTNG_UST_ABI_CONTEXT_VGID;
+               break;
+       case LTTNG_EVENT_CONTEXT_VEGID:
+               utype = LTTNG_UST_ABI_CONTEXT_VEGID;
+               break;
+       case LTTNG_EVENT_CONTEXT_VSGID:
+               utype = LTTNG_UST_ABI_CONTEXT_VSGID;
+               break;
+       default:
+               utype = -1;
+               break;
+       }
+       return utype;
+}
+
+/*
+ * Return 1 if contexts match, 0 otherwise.
+ */
+int trace_ust_match_context(const struct ltt_ust_context *uctx,
+               const struct lttng_event_context *ctx)
+{
+       int utype;
+
+       utype = trace_ust_context_type_event_to_ust(ctx->ctx);
+       if (utype < 0) {
+               return 0;
+       }
+       if (uctx->ctx.ctx != utype) {
+               return 0;
+       }
+       switch (utype) {
+       case LTTNG_UST_ABI_CONTEXT_PERF_THREAD_COUNTER:
+               if (uctx->ctx.u.perf_counter.type
+                               != ctx->u.perf_counter.type) {
+                       return 0;
+               }
+               if (uctx->ctx.u.perf_counter.config
+                               != ctx->u.perf_counter.config) {
+                       return 0;
+               }
+               if (strncmp(uctx->ctx.u.perf_counter.name,
+                               ctx->u.perf_counter.name,
+                               LTTNG_UST_ABI_SYM_NAME_LEN)) {
+                       return 0;
+               }
+               break;
+       case LTTNG_UST_ABI_CONTEXT_APP_CONTEXT:
+               LTTNG_ASSERT(uctx->ctx.u.app_ctx.provider_name);
+               LTTNG_ASSERT(uctx->ctx.u.app_ctx.ctx_name);
+               if (strcmp(uctx->ctx.u.app_ctx.provider_name,
+                               ctx->u.app_ctx.provider_name) ||
+                               strcmp(uctx->ctx.u.app_ctx.ctx_name,
+                               ctx->u.app_ctx.ctx_name)) {
+                       return 0;
+               }
+       default:
+               break;
+
+       }
+       return 1;
+}
+
+/*
+ * Allocate and initialize an UST context.
+ *
+ * Return pointer to structure or NULL.
+ */
+struct ltt_ust_context *trace_ust_create_context(
+               const struct lttng_event_context *ctx)
+{
+       struct ltt_ust_context *uctx = NULL;
+       int utype;
+
+       LTTNG_ASSERT(ctx);
+
+       utype = trace_ust_context_type_event_to_ust(ctx->ctx);
+       if (utype < 0) {
+               ERR("Invalid UST context");
+               goto end;
+       }
+
+       uctx = (ltt_ust_context *) zmalloc(sizeof(struct ltt_ust_context));
+       if (!uctx) {
+               PERROR("zmalloc ltt_ust_context");
+               goto end;
+       }
+
+       uctx->ctx.ctx = (enum lttng_ust_abi_context_type) utype;
+       switch (utype) {
+       case LTTNG_UST_ABI_CONTEXT_PERF_THREAD_COUNTER:
+               uctx->ctx.u.perf_counter.type = ctx->u.perf_counter.type;
+               uctx->ctx.u.perf_counter.config = ctx->u.perf_counter.config;
+               strncpy(uctx->ctx.u.perf_counter.name, ctx->u.perf_counter.name,
+                               LTTNG_UST_ABI_SYM_NAME_LEN);
+               uctx->ctx.u.perf_counter.name[LTTNG_UST_ABI_SYM_NAME_LEN - 1] = '\0';
+               break;
+       case LTTNG_UST_ABI_CONTEXT_APP_CONTEXT:
+       {
+               char *provider_name = NULL, *ctx_name = NULL;
+
+               provider_name = strdup(ctx->u.app_ctx.provider_name);
+               if (!provider_name) {
+                       goto error;
+               }
+               uctx->ctx.u.app_ctx.provider_name = provider_name;
+
+               ctx_name = strdup(ctx->u.app_ctx.ctx_name);
+               if (!ctx_name) {
+                       goto error;
+               }
+               uctx->ctx.u.app_ctx.ctx_name = ctx_name;
+               break;
+       }
+       default:
+               break;
+       }
+       lttng_ht_node_init_ulong(&uctx->node, (unsigned long) uctx->ctx.ctx);
+end:
+       return uctx;
+error:
+       trace_ust_destroy_context(uctx);
+       return NULL;
+}
+
+static void destroy_id_tracker_node_rcu(struct rcu_head *head)
+{
+       struct ust_id_tracker_node *tracker_node = caa_container_of(
+                       head, struct ust_id_tracker_node, node.head);
+       free(tracker_node);
+}
+
+static void destroy_id_tracker_node(struct ust_id_tracker_node *tracker_node)
+{
+       call_rcu(&tracker_node->node.head, destroy_id_tracker_node_rcu);
+}
+
+static int init_id_tracker(struct ust_id_tracker *id_tracker)
+{
+       int ret = LTTNG_OK;
+
+       id_tracker->ht = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
+       if (!id_tracker->ht) {
+               ret = LTTNG_ERR_NOMEM;
+               goto end;
+       }
+
+end:
+       return ret;
+}
+
+/*
+ * Teardown id tracker content, but don't free id_tracker object.
+ */
+static void fini_id_tracker(struct ust_id_tracker *id_tracker)
+{
+       struct ust_id_tracker_node *tracker_node;
+       struct lttng_ht_iter iter;
+
+       if (!id_tracker->ht) {
+               return;
+       }
+       rcu_read_lock();
+       cds_lfht_for_each_entry (id_tracker->ht->ht, &iter.iter, tracker_node,
+                       node.node) {
+               int ret = lttng_ht_del(id_tracker->ht, &iter);
+
+               LTTNG_ASSERT(!ret);
+               destroy_id_tracker_node(tracker_node);
+       }
+       rcu_read_unlock();
+       ht_cleanup_push(id_tracker->ht);
+       id_tracker->ht = NULL;
+}
+
+static struct ust_id_tracker_node *id_tracker_lookup(
+               struct ust_id_tracker *id_tracker,
+               int id,
+               struct lttng_ht_iter *iter)
+{
+       unsigned long _id = (unsigned long) id;
+       struct lttng_ht_node_ulong *node;
+
+       lttng_ht_lookup(id_tracker->ht, (void *) _id, iter);
+       node = lttng_ht_iter_get_node_ulong(iter);
+       if (node) {
+               return caa_container_of(node, struct ust_id_tracker_node, node);
+       } else {
+               return NULL;
+       }
+}
+
+static int id_tracker_add_id(struct ust_id_tracker *id_tracker, int id)
+{
+       int retval = LTTNG_OK;
+       struct ust_id_tracker_node *tracker_node;
+       struct lttng_ht_iter iter;
+
+       if (id < 0) {
+               retval = LTTNG_ERR_INVALID;
+               goto end;
+       }
+       tracker_node = id_tracker_lookup(id_tracker, id, &iter);
+       if (tracker_node) {
+               /* Already exists. */
+               retval = LTTNG_ERR_PROCESS_ATTR_EXISTS;
+               goto end;
+       }
+       tracker_node = (ust_id_tracker_node *) zmalloc(sizeof(*tracker_node));
+       if (!tracker_node) {
+               retval = LTTNG_ERR_NOMEM;
+               goto end;
+       }
+       lttng_ht_node_init_ulong(&tracker_node->node, (unsigned long) id);
+       lttng_ht_add_unique_ulong(id_tracker->ht, &tracker_node->node);
+end:
+       return retval;
+}
+
+static int id_tracker_del_id(struct ust_id_tracker *id_tracker, int id)
+{
+       int retval = LTTNG_OK, ret;
+       struct ust_id_tracker_node *tracker_node;
+       struct lttng_ht_iter iter;
+
+       if (id < 0) {
+               retval = LTTNG_ERR_INVALID;
+               goto end;
+       }
+       tracker_node = id_tracker_lookup(id_tracker, id, &iter);
+       if (!tracker_node) {
+               /* Not found */
+               retval = LTTNG_ERR_PROCESS_ATTR_MISSING;
+               goto end;
+       }
+       ret = lttng_ht_del(id_tracker->ht, &iter);
+       LTTNG_ASSERT(!ret);
+
+       destroy_id_tracker_node(tracker_node);
+end:
+       return retval;
+}
+
+static struct ust_id_tracker *get_id_tracker(struct ltt_ust_session *session,
+               enum lttng_process_attr process_attr)
+{
+       switch (process_attr) {
+       case LTTNG_PROCESS_ATTR_VIRTUAL_PROCESS_ID:
+               return &session->vpid_tracker;
+       case LTTNG_PROCESS_ATTR_VIRTUAL_USER_ID:
+               return &session->vuid_tracker;
+       case LTTNG_PROCESS_ATTR_VIRTUAL_GROUP_ID:
+               return &session->vgid_tracker;
+       default:
+               return NULL;
+       }
+}
+
+static struct process_attr_tracker *_trace_ust_get_process_attr_tracker(
+               struct ltt_ust_session *session,
+               enum lttng_process_attr process_attr)
+{
+       switch (process_attr) {
+       case LTTNG_PROCESS_ATTR_VIRTUAL_PROCESS_ID:
+               return session->tracker_vpid;
+       case LTTNG_PROCESS_ATTR_VIRTUAL_USER_ID:
+               return session->tracker_vuid;
+       case LTTNG_PROCESS_ATTR_VIRTUAL_GROUP_ID:
+               return session->tracker_vgid;
+       default:
+               return NULL;
+       }
+}
+
+const struct process_attr_tracker *trace_ust_get_process_attr_tracker(
+               struct ltt_ust_session *session,
+               enum lttng_process_attr process_attr)
+{
+       return (const struct process_attr_tracker *)
+                       _trace_ust_get_process_attr_tracker(
+                                       session, process_attr);
+}
+
+/*
+ * The session lock is held when calling this function.
+ */
+int trace_ust_id_tracker_lookup(enum lttng_process_attr process_attr,
+               struct ltt_ust_session *session,
+               int id)
+{
+       struct lttng_ht_iter iter;
+       struct ust_id_tracker *id_tracker;
+
+       id_tracker = get_id_tracker(session, process_attr);
+       if (!id_tracker) {
+               abort();
+       }
+       if (!id_tracker->ht) {
+               return 1;
+       }
+       if (id_tracker_lookup(id_tracker, id, &iter)) {
+               return 1;
+       }
+       return 0;
+}
+
+/*
+ * Called with the session lock held.
+ */
+enum lttng_error_code trace_ust_process_attr_tracker_set_tracking_policy(
+               struct ltt_ust_session *session,
+               enum lttng_process_attr process_attr,
+               enum lttng_tracking_policy policy)
+{
+       int ret;
+       enum lttng_error_code ret_code = LTTNG_OK;
+       struct ust_id_tracker *id_tracker =
+                       get_id_tracker(session, process_attr);
+       struct process_attr_tracker *tracker =
+                       _trace_ust_get_process_attr_tracker(
+                                       session, process_attr);
+       bool should_update_apps = false;
+       enum lttng_tracking_policy previous_policy;
+
+       if (!tracker) {
+               ret_code = LTTNG_ERR_INVALID;
+               goto end;
+       }
+
+       previous_policy = process_attr_tracker_get_tracking_policy(tracker);
+       ret = process_attr_tracker_set_tracking_policy(tracker, policy);
+       if (ret) {
+               ret_code = LTTNG_ERR_UNK;
+               goto end;
+       }
+
+       if (previous_policy == policy) {
+               goto end;
+       }
+
+       switch (policy) {
+       case LTTNG_TRACKING_POLICY_INCLUDE_ALL:
+               /* Track all values: destroy tracker if exists. */
+               if (id_tracker->ht) {
+                       fini_id_tracker(id_tracker);
+                       /* Ensure all apps have session. */
+                       should_update_apps = true;
+               }
+               break;
+       case LTTNG_TRACKING_POLICY_EXCLUDE_ALL:
+       case LTTNG_TRACKING_POLICY_INCLUDE_SET:
+               /* fall-through. */
+               fini_id_tracker(id_tracker);
+               ret_code = (lttng_error_code) init_id_tracker(id_tracker);
+               if (ret_code != LTTNG_OK) {
+                       ERR("Error initializing ID tracker");
+                       goto end;
+               }
+               /* Remove all apps from session. */
+               should_update_apps = true;
+               break;
+       default:
+               abort();
+       }
+       if (should_update_apps && session->active) {
+               ust_app_global_update_all(session);
+       }
+end:
+       return ret_code;
+}
+
+/* Called with the session lock held. */
+enum lttng_error_code trace_ust_process_attr_tracker_inclusion_set_add_value(
+               struct ltt_ust_session *session,
+               enum lttng_process_attr process_attr,
+               const struct process_attr_value *value)
+{
+       enum lttng_error_code ret_code = LTTNG_OK;
+       bool should_update_apps = false;
+       struct ust_id_tracker *id_tracker =
+                       get_id_tracker(session, process_attr);
+       struct process_attr_tracker *tracker;
+       int integral_value;
+       enum process_attr_tracker_status status;
+       struct ust_app *app;
+
+       /*
+        * Convert process attribute tracker value to the integral
+        * representation required by the kern-ctl API.
+        */
+       switch (process_attr) {
+       case LTTNG_PROCESS_ATTR_PROCESS_ID:
+       case LTTNG_PROCESS_ATTR_VIRTUAL_PROCESS_ID:
+               integral_value = (int) value->value.pid;
+               break;
+       case LTTNG_PROCESS_ATTR_USER_ID:
+       case LTTNG_PROCESS_ATTR_VIRTUAL_USER_ID:
+               if (value->type == LTTNG_PROCESS_ATTR_VALUE_TYPE_USER_NAME) {
+                       uid_t uid;
+
+                       ret_code = utils_user_id_from_name(
+                                       value->value.user_name, &uid);
+                       if (ret_code != LTTNG_OK) {
+                               goto end;
+                       }
+                       integral_value = (int) uid;
+               } else {
+                       integral_value = (int) value->value.uid;
+               }
+               break;
+       case LTTNG_PROCESS_ATTR_GROUP_ID:
+       case LTTNG_PROCESS_ATTR_VIRTUAL_GROUP_ID:
+               if (value->type == LTTNG_PROCESS_ATTR_VALUE_TYPE_GROUP_NAME) {
+                       gid_t gid;
+
+                       ret_code = utils_group_id_from_name(
+                                       value->value.group_name, &gid);
+                       if (ret_code != LTTNG_OK) {
+                               goto end;
+                       }
+                       integral_value = (int) gid;
+               } else {
+                       integral_value = (int) value->value.gid;
+               }
+               break;
+       default:
+               ret_code = LTTNG_ERR_INVALID;
+               goto end;
+       }
+
+       tracker = _trace_ust_get_process_attr_tracker(session, process_attr);
+       if (!tracker) {
+               ret_code = LTTNG_ERR_INVALID;
+               goto end;
+       }
+
+       status = process_attr_tracker_inclusion_set_add_value(tracker, value);
+       if (status != PROCESS_ATTR_TRACKER_STATUS_OK) {
+               switch (status) {
+               case PROCESS_ATTR_TRACKER_STATUS_EXISTS:
+                       ret_code = LTTNG_ERR_PROCESS_ATTR_EXISTS;
+                       break;
+               case PROCESS_ATTR_TRACKER_STATUS_INVALID_TRACKING_POLICY:
+                       ret_code = LTTNG_ERR_PROCESS_ATTR_TRACKER_INVALID_TRACKING_POLICY;
+                       break;
+               case PROCESS_ATTR_TRACKER_STATUS_ERROR:
+               default:
+                       ret_code = LTTNG_ERR_UNK;
+                       break;
+               }
+               goto end;
+       }
+
+       DBG("User space track %s %d for session id %" PRIu64,
+                       lttng_process_attr_to_string(process_attr),
+                       integral_value, session->id);
+
+       ret_code = (lttng_error_code) id_tracker_add_id(id_tracker, integral_value);
+       if (ret_code != LTTNG_OK) {
+               goto end;
+       }
+       /* Add session to application */
+       switch (process_attr) {
+       case LTTNG_PROCESS_ATTR_VIRTUAL_PROCESS_ID:
+               app = ust_app_find_by_pid(integral_value);
+               if (app) {
+                       should_update_apps = true;
+               }
+               break;
+       default:
+               should_update_apps = true;
+               break;
+       }
+       if (should_update_apps && session->active) {
+               ust_app_global_update_all(session);
+       }
+end:
+       return ret_code;
+}
+
+/* Called with the session lock held. */
+enum lttng_error_code trace_ust_process_attr_tracker_inclusion_set_remove_value(
+               struct ltt_ust_session *session,
+               enum lttng_process_attr process_attr,
+               const struct process_attr_value *value)
+{
+       enum lttng_error_code ret_code = LTTNG_OK;
+       bool should_update_apps = false;
+       struct ust_id_tracker *id_tracker =
+                       get_id_tracker(session, process_attr);
+       struct process_attr_tracker *tracker;
+       int integral_value;
+       enum process_attr_tracker_status status;
+       struct ust_app *app;
+
+       /*
+        * Convert process attribute tracker value to the integral
+        * representation required by the kern-ctl API.
+        */
+       switch (process_attr) {
+       case LTTNG_PROCESS_ATTR_PROCESS_ID:
+       case LTTNG_PROCESS_ATTR_VIRTUAL_PROCESS_ID:
+               integral_value = (int) value->value.pid;
+               break;
+       case LTTNG_PROCESS_ATTR_USER_ID:
+       case LTTNG_PROCESS_ATTR_VIRTUAL_USER_ID:
+               if (value->type == LTTNG_PROCESS_ATTR_VALUE_TYPE_USER_NAME) {
+                       uid_t uid;
+
+                       ret_code = utils_user_id_from_name(
+                                       value->value.user_name, &uid);
+                       if (ret_code != LTTNG_OK) {
+                               goto end;
+                       }
+                       integral_value = (int) uid;
+               } else {
+                       integral_value = (int) value->value.uid;
+               }
+               break;
+       case LTTNG_PROCESS_ATTR_GROUP_ID:
+       case LTTNG_PROCESS_ATTR_VIRTUAL_GROUP_ID:
+               if (value->type == LTTNG_PROCESS_ATTR_VALUE_TYPE_GROUP_NAME) {
+                       gid_t gid;
+
+                       ret_code = utils_group_id_from_name(
+                                       value->value.group_name, &gid);
+                       if (ret_code != LTTNG_OK) {
+                               goto end;
+                       }
+                       integral_value = (int) gid;
+               } else {
+                       integral_value = (int) value->value.gid;
+               }
+               break;
+       default:
+               ret_code = LTTNG_ERR_INVALID;
+               goto end;
+       }
+
+       tracker = _trace_ust_get_process_attr_tracker(session, process_attr);
+       if (!tracker) {
+               ret_code = LTTNG_ERR_INVALID;
+               goto end;
+       }
+
+       status = process_attr_tracker_inclusion_set_remove_value(
+                       tracker, value);
+       if (status != PROCESS_ATTR_TRACKER_STATUS_OK) {
+               switch (status) {
+               case PROCESS_ATTR_TRACKER_STATUS_MISSING:
+                       ret_code = LTTNG_ERR_PROCESS_ATTR_MISSING;
+                       break;
+               case PROCESS_ATTR_TRACKER_STATUS_INVALID_TRACKING_POLICY:
+                       ret_code = LTTNG_ERR_PROCESS_ATTR_TRACKER_INVALID_TRACKING_POLICY;
+                       break;
+               case PROCESS_ATTR_TRACKER_STATUS_ERROR:
+               default:
+                       ret_code = LTTNG_ERR_UNK;
+                       break;
+               }
+               goto end;
+       }
+
+       DBG("User space untrack %s %d for session id %" PRIu64,
+                       lttng_process_attr_to_string(process_attr),
+                       integral_value, session->id);
+
+       ret_code = (lttng_error_code) id_tracker_del_id(id_tracker, integral_value);
+       if (ret_code != LTTNG_OK) {
+               goto end;
+       }
+       /* Add session to application */
+       switch (process_attr) {
+       case LTTNG_PROCESS_ATTR_VIRTUAL_PROCESS_ID:
+               app = ust_app_find_by_pid(integral_value);
+               if (app) {
+                       should_update_apps = true;
+               }
+               break;
+       default:
+               should_update_apps = true;
+               break;
+       }
+       if (should_update_apps && session->active) {
+               ust_app_global_update_all(session);
+       }
+end:
+       return ret_code;
+}
+
+/*
+ * RCU safe free context structure.
+ */
+static void destroy_context_rcu(struct rcu_head *head)
+{
+       struct lttng_ht_node_ulong *node =
+               caa_container_of(head, struct lttng_ht_node_ulong, head);
+       struct ltt_ust_context *ctx =
+               caa_container_of(node, struct ltt_ust_context, node);
+
+       trace_ust_destroy_context(ctx);
+}
+
+/*
+ * Cleanup UST context hash table.
+ */
+static void destroy_contexts(struct lttng_ht *ht)
+{
+       int ret;
+       struct lttng_ht_node_ulong *node;
+       struct lttng_ht_iter iter;
+       struct ltt_ust_context *ctx;
+
+       LTTNG_ASSERT(ht);
+
+       rcu_read_lock();
+       cds_lfht_for_each_entry(ht->ht, &iter.iter, node, node) {
+               /* Remove from ordered list. */
+               ctx = caa_container_of(node, struct ltt_ust_context, node);
+               cds_list_del(&ctx->list);
+               /* Remove from channel's hash table. */
+               ret = lttng_ht_del(ht, &iter);
+               if (!ret) {
+                       call_rcu(&node->head, destroy_context_rcu);
+               }
+       }
+       rcu_read_unlock();
+
+       ht_cleanup_push(ht);
+}
+
+/*
+ * Cleanup ust event structure.
+ */
+void trace_ust_destroy_event(struct ltt_ust_event *event)
+{
+       LTTNG_ASSERT(event);
+
+       DBG2("Trace destroy UST event %s", event->attr.name);
+       free(event->filter_expression);
+       free(event->filter);
+       free(event->exclusion);
+       free(event);
+}
+
+/*
+ * Cleanup ust context structure.
+ */
+void trace_ust_destroy_context(struct ltt_ust_context *ctx)
+{
+       LTTNG_ASSERT(ctx);
+
+       if (ctx->ctx.ctx == LTTNG_UST_ABI_CONTEXT_APP_CONTEXT) {
+               free(ctx->ctx.u.app_ctx.provider_name);
+               free(ctx->ctx.u.app_ctx.ctx_name);
+       }
+       free(ctx);
+}
+
+/*
+ * URCU intermediate call to complete destroy event.
+ */
+static void destroy_event_rcu(struct rcu_head *head)
+{
+       struct lttng_ht_node_str *node =
+               caa_container_of(head, struct lttng_ht_node_str, head);
+       struct ltt_ust_event *event =
+               caa_container_of(node, struct ltt_ust_event, node);
+
+       trace_ust_destroy_event(event);
+}
+
+/*
+ * Cleanup UST events hashtable.
+ */
+static void destroy_events(struct lttng_ht *events)
+{
+       int ret;
+       struct lttng_ht_node_str *node;
+       struct lttng_ht_iter iter;
+
+       LTTNG_ASSERT(events);
+
+       rcu_read_lock();
+       cds_lfht_for_each_entry(events->ht, &iter.iter, node, node) {
+               ret = lttng_ht_del(events, &iter);
+               LTTNG_ASSERT(!ret);
+               call_rcu(&node->head, destroy_event_rcu);
+       }
+       rcu_read_unlock();
+
+       ht_cleanup_push(events);
+}
+
+/*
+ * Cleanup ust channel structure.
+ *
+ * Should _NOT_ be called with RCU read lock held.
+ */
+static void _trace_ust_destroy_channel(struct ltt_ust_channel *channel)
+{
+       LTTNG_ASSERT(channel);
+
+       DBG2("Trace destroy UST channel %s", channel->name);
+
+       free(channel);
+}
+
+/*
+ * URCU intermediate call to complete destroy channel.
+ */
+static void destroy_channel_rcu(struct rcu_head *head)
+{
+       struct lttng_ht_node_str *node =
+               caa_container_of(head, struct lttng_ht_node_str, head);
+       struct ltt_ust_channel *channel =
+               caa_container_of(node, struct ltt_ust_channel, node);
+
+       _trace_ust_destroy_channel(channel);
+}
+
+void trace_ust_destroy_channel(struct ltt_ust_channel *channel)
+{
+       /* Destroying all events of the channel */
+       destroy_events(channel->events);
+       /* Destroying all context of the channel */
+       destroy_contexts(channel->ctx);
+
+       call_rcu(&channel->node.head, destroy_channel_rcu);
+}
+
+/*
+ * Remove an UST channel from a channel HT.
+ */
+void trace_ust_delete_channel(struct lttng_ht *ht,
+               struct ltt_ust_channel *channel)
+{
+       int ret;
+       struct lttng_ht_iter iter;
+
+       LTTNG_ASSERT(ht);
+       LTTNG_ASSERT(channel);
+
+       iter.iter.node = &channel->node.node;
+       ret = lttng_ht_del(ht, &iter);
+       LTTNG_ASSERT(!ret);
+}
+
+/*
+ * Iterate over a hash table containing channels and cleanup safely.
+ */
+static void destroy_channels(struct lttng_ht *channels)
+{
+       struct lttng_ht_node_str *node;
+       struct lttng_ht_iter iter;
+
+       LTTNG_ASSERT(channels);
+
+       rcu_read_lock();
+       cds_lfht_for_each_entry(channels->ht, &iter.iter, node, node) {
+               struct ltt_ust_channel *chan =
+                       caa_container_of(node, struct ltt_ust_channel, node);
+
+               trace_ust_delete_channel(channels, chan);
+               trace_ust_destroy_channel(chan);
+       }
+       rcu_read_unlock();
+
+       ht_cleanup_push(channels);
+}
+
+/*
+ * Cleanup UST global domain.
+ */
+static void destroy_domain_global(struct ltt_ust_domain_global *dom)
+{
+       LTTNG_ASSERT(dom);
+
+       destroy_channels(dom->channels);
+}
+
+/*
+ * Cleanup ust session structure, keeping data required by
+ * destroy notifier.
+ *
+ * Should *NOT* be called with RCU read-side lock held.
+ */
+void trace_ust_destroy_session(struct ltt_ust_session *session)
+{
+       struct agent *agt;
+       struct buffer_reg_uid *reg, *sreg;
+       struct lttng_ht_iter iter;
+
+       LTTNG_ASSERT(session);
+
+       DBG2("Trace UST destroy session %" PRIu64, session->id);
+
+       /* Cleaning up UST domain */
+       destroy_domain_global(&session->domain_global);
+
+       rcu_read_lock();
+       cds_lfht_for_each_entry(session->agents->ht, &iter.iter, agt, node.node) {
+               int ret = lttng_ht_del(session->agents, &iter);
+
+               LTTNG_ASSERT(!ret);
+               agent_destroy(agt);
+       }
+       rcu_read_unlock();
+
+       ht_cleanup_push(session->agents);
+
+       /* Cleanup UID buffer registry object(s). */
+       cds_list_for_each_entry_safe(reg, sreg, &session->buffer_reg_uid_list,
+                       lnode) {
+               cds_list_del(&reg->lnode);
+               buffer_reg_uid_remove(reg);
+               buffer_reg_uid_destroy(reg, session->consumer);
+       }
+
+       process_attr_tracker_destroy(session->tracker_vpid);
+       process_attr_tracker_destroy(session->tracker_vuid);
+       process_attr_tracker_destroy(session->tracker_vgid);
+
+       fini_id_tracker(&session->vpid_tracker);
+       fini_id_tracker(&session->vuid_tracker);
+       fini_id_tracker(&session->vgid_tracker);
+       lttng_trace_chunk_put(session->current_trace_chunk);
+}
+
+/* Free elements needed by destroy notifiers. */
+void trace_ust_free_session(struct ltt_ust_session *session)
+{
+       consumer_output_put(session->consumer);
+       free(session);
+}
diff --git a/src/bin/lttng-sessiond/tracker.c b/src/bin/lttng-sessiond/tracker.c
deleted file mode 100644 (file)
index 8293904..0000000
+++ /dev/null
@@ -1,312 +0,0 @@
-/*
- * Copyright (C) 2018 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- * Copyright (C) 2020 Jérémie Galarneau <jeremie.galarneau@efficios.com>
- *
- * SPDX-License-Identifier: GPL-2.0-only
- *
- */
-
-#include "lttng/tracker.h"
-#include "common/dynamic-array.h"
-#include "common/macros.h"
-#define _LGPL_SOURCE
-#include <grp.h>
-#include <pwd.h>
-#include <sys/types.h>
-#include <unistd.h>
-#include <urcu.h>
-#include <urcu/list.h>
-#include <urcu/rculfhash.h>
-
-#include "tracker.h"
-#include <common/defaults.h>
-#include <common/error.h>
-#include <common/hashtable/hashtable.h>
-#include <common/hashtable/utils.h>
-#include <common/tracker.h>
-#include <lttng/lttng-error.h>
-
-struct process_attr_tracker_value_node {
-       struct process_attr_value *value;
-       struct cds_lfht_node inclusion_set_ht_node;
-       struct rcu_head rcu_head;
-};
-
-struct process_attr_tracker {
-       enum lttng_tracking_policy policy;
-       struct cds_lfht *inclusion_set_ht;
-};
-
-static void process_attr_tracker_value_node_rcu_free(struct rcu_head *rcu_head)
-{
-       struct process_attr_tracker_value_node *node =
-                       container_of(rcu_head, typeof(*node), rcu_head);
-
-       free(node);
-}
-
-struct process_attr_tracker *process_attr_tracker_create(void)
-{
-       struct process_attr_tracker *tracker;
-
-       tracker = zmalloc(sizeof(*tracker));
-       if (!tracker) {
-               return NULL;
-       }
-
-       (void) process_attr_tracker_set_tracking_policy(
-                       tracker, LTTNG_TRACKING_POLICY_INCLUDE_ALL);
-
-       tracker->inclusion_set_ht = cds_lfht_new(DEFAULT_HT_SIZE, 1, 0,
-                       CDS_LFHT_AUTO_RESIZE | CDS_LFHT_ACCOUNTING, NULL);
-       if (!tracker->inclusion_set_ht) {
-               goto error;
-       }
-
-       return tracker;
-error:
-       process_attr_tracker_destroy(tracker);
-       return NULL;
-}
-
-static void process_attr_tracker_remove_value_node(
-               struct process_attr_tracker *tracker,
-               struct process_attr_tracker_value_node *value_node)
-{
-       cds_lfht_del(tracker->inclusion_set_ht,
-                       &value_node->inclusion_set_ht_node);
-       process_attr_value_destroy(value_node->value);
-       call_rcu(&value_node->rcu_head,
-                       process_attr_tracker_value_node_rcu_free);
-}
-
-static void process_attr_tracker_clear_inclusion_set(
-               struct process_attr_tracker *tracker)
-{
-       int ret;
-       struct lttng_ht_iter iter;
-       struct process_attr_tracker_value_node *value_node;
-
-       if (!tracker->inclusion_set_ht) {
-               return;
-       }
-
-       rcu_read_lock();
-       cds_lfht_for_each_entry (tracker->inclusion_set_ht, &iter.iter,
-                       value_node, inclusion_set_ht_node) {
-               process_attr_tracker_remove_value_node(tracker, value_node);
-       }
-       rcu_read_unlock();
-       ret = cds_lfht_destroy(tracker->inclusion_set_ht, NULL);
-       LTTNG_ASSERT(ret == 0);
-       tracker->inclusion_set_ht = NULL;
-}
-
-static int process_attr_tracker_create_inclusion_set(
-               struct process_attr_tracker *tracker)
-{
-       LTTNG_ASSERT(!tracker->inclusion_set_ht);
-       tracker->inclusion_set_ht = cds_lfht_new(DEFAULT_HT_SIZE, 1, 0,
-                       CDS_LFHT_AUTO_RESIZE | CDS_LFHT_ACCOUNTING, NULL);
-       return tracker->inclusion_set_ht ? 0 : -1;
-}
-
-void process_attr_tracker_destroy(struct process_attr_tracker *tracker)
-{
-       if (!tracker) {
-               return;
-       }
-
-       process_attr_tracker_clear_inclusion_set(tracker);
-       free(tracker);
-}
-
-enum lttng_tracking_policy process_attr_tracker_get_tracking_policy(
-               const struct process_attr_tracker *tracker)
-{
-       return tracker->policy;
-}
-
-int process_attr_tracker_set_tracking_policy(
-               struct process_attr_tracker *tracker,
-               enum lttng_tracking_policy tracking_policy)
-{
-       int ret = 0;
-
-       if (tracker->policy == tracking_policy) {
-               goto end;
-       }
-
-       process_attr_tracker_clear_inclusion_set(tracker);
-       ret = process_attr_tracker_create_inclusion_set(tracker);
-       if (ret) {
-               goto end;
-       }
-       tracker->policy = tracking_policy;
-end:
-       return ret;
-}
-
-static int match_inclusion_set_value(
-               struct cds_lfht_node *node, const void *key)
-{
-       const struct process_attr_value *value_key = key;
-       const struct process_attr_tracker_value_node *value_node =
-                       caa_container_of(node,
-                                       struct process_attr_tracker_value_node,
-                                       inclusion_set_ht_node);
-
-       return process_attr_tracker_value_equal(value_node->value, value_key);
-}
-
-static struct process_attr_tracker_value_node *process_attr_tracker_lookup(
-               const struct process_attr_tracker *tracker,
-               const struct process_attr_value *value)
-{
-       struct cds_lfht_iter iter;
-       struct cds_lfht_node *node;
-
-       LTTNG_ASSERT(tracker->policy == LTTNG_TRACKING_POLICY_INCLUDE_SET);
-
-       rcu_read_lock();
-       cds_lfht_lookup(tracker->inclusion_set_ht,
-                       process_attr_value_hash(value),
-                       match_inclusion_set_value, value, &iter);
-       node = cds_lfht_iter_get_node(&iter);
-       rcu_read_unlock();
-
-       return node ? container_of(node, struct process_attr_tracker_value_node,
-                                     inclusion_set_ht_node) :
-                     NULL;
-}
-
-/* Protected by session mutex held by caller. */
-enum process_attr_tracker_status process_attr_tracker_inclusion_set_add_value(
-               struct process_attr_tracker *tracker,
-               const struct process_attr_value *value)
-{
-       enum process_attr_tracker_status status =
-                       PROCESS_ATTR_TRACKER_STATUS_OK;
-       struct process_attr_value *value_copy = NULL;
-       struct process_attr_tracker_value_node *value_node = NULL;
-
-       rcu_read_lock();
-       if (tracker->policy != LTTNG_TRACKING_POLICY_INCLUDE_SET) {
-               status = PROCESS_ATTR_TRACKER_STATUS_INVALID_TRACKING_POLICY;
-               goto end;
-       }
-
-       if (process_attr_tracker_lookup(tracker, value)) {
-               status = PROCESS_ATTR_TRACKER_STATUS_EXISTS;
-               goto end;
-       }
-
-       value_node = zmalloc(sizeof(*value_node));
-       if (!value_node) {
-               status = PROCESS_ATTR_TRACKER_STATUS_ERROR;
-               goto end;
-       }
-
-       value_copy = process_attr_value_copy(value);
-       if (!value_copy) {
-               status = PROCESS_ATTR_TRACKER_STATUS_ERROR;
-               goto end;
-       }
-
-       value_node->value = value_copy;
-       cds_lfht_add(tracker->inclusion_set_ht,
-                       process_attr_value_hash(value_copy),
-                       &value_node->inclusion_set_ht_node);
-       value_copy = NULL;
-       value_node = NULL;
-end:
-       if (value_copy) {
-               process_attr_value_destroy(value_copy);
-       }
-       if (value_node) {
-               free(value_node);
-       }
-       rcu_read_unlock();
-       return status;
-}
-
-/* Protected by session mutex held by caller. */
-enum process_attr_tracker_status
-process_attr_tracker_inclusion_set_remove_value(
-               struct process_attr_tracker *tracker,
-               const struct process_attr_value *value)
-{
-       struct process_attr_tracker_value_node *value_node;
-       enum process_attr_tracker_status status =
-                       PROCESS_ATTR_TRACKER_STATUS_OK;
-
-       rcu_read_lock();
-       if (tracker->policy != LTTNG_TRACKING_POLICY_INCLUDE_SET) {
-               status = PROCESS_ATTR_TRACKER_STATUS_INVALID_TRACKING_POLICY;
-               goto end;
-       }
-
-       value_node = process_attr_tracker_lookup(tracker, value);
-       if (!value_node) {
-               status = PROCESS_ATTR_TRACKER_STATUS_MISSING;
-               goto end;
-       }
-
-       process_attr_tracker_remove_value_node(tracker, value_node);
-end:
-       rcu_read_unlock();
-       return status;
-}
-
-enum process_attr_tracker_status process_attr_tracker_get_inclusion_set(
-               const struct process_attr_tracker *tracker,
-               struct lttng_process_attr_values **_values)
-{
-       struct lttng_ht_iter iter;
-       struct process_attr_tracker_value_node *value_node;
-       enum process_attr_tracker_status status =
-                       PROCESS_ATTR_TRACKER_STATUS_OK;
-       struct lttng_process_attr_values *values;
-       struct process_attr_value *new_value = NULL;
-
-       values = lttng_process_attr_values_create();
-       if (!values) {
-               status = PROCESS_ATTR_TRACKER_STATUS_ERROR;
-               goto error;
-       }
-
-       if (tracker->policy != LTTNG_TRACKING_POLICY_INCLUDE_SET) {
-               status = PROCESS_ATTR_TRACKER_STATUS_INVALID_TRACKING_POLICY;
-               goto error;
-       }
-
-       rcu_read_lock();
-       cds_lfht_for_each_entry (tracker->inclusion_set_ht, &iter.iter,
-                       value_node, inclusion_set_ht_node) {
-               int ret;
-
-               new_value = process_attr_value_copy(value_node->value);
-               if (!new_value) {
-                       status = PROCESS_ATTR_TRACKER_STATUS_ERROR;
-                       goto error_unlock;
-               }
-
-               ret = lttng_dynamic_pointer_array_add_pointer(
-                               &values->array, new_value);
-               if (ret) {
-                       status = PROCESS_ATTR_TRACKER_STATUS_ERROR;
-                       goto error_unlock;
-               }
-
-               new_value = NULL;
-       }
-       rcu_read_unlock();
-       *_values = values;
-       return status;
-error_unlock:
-       rcu_read_unlock();
-error:
-       lttng_process_attr_values_destroy(values);
-       process_attr_value_destroy(new_value);
-       return status;
-}
diff --git a/src/bin/lttng-sessiond/tracker.cpp b/src/bin/lttng-sessiond/tracker.cpp
new file mode 100644 (file)
index 0000000..1cae403
--- /dev/null
@@ -0,0 +1,312 @@
+/*
+ * Copyright (C) 2018 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ * Copyright (C) 2020 Jérémie Galarneau <jeremie.galarneau@efficios.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0-only
+ *
+ */
+
+#include "lttng/tracker.h"
+#include "common/dynamic-array.h"
+#include "common/macros.h"
+#define _LGPL_SOURCE
+#include <grp.h>
+#include <pwd.h>
+#include <sys/types.h>
+#include <unistd.h>
+#include <urcu.h>
+#include <urcu/list.h>
+#include <urcu/rculfhash.h>
+
+#include "tracker.h"
+#include <common/defaults.h>
+#include <common/error.h>
+#include <common/hashtable/hashtable.h>
+#include <common/hashtable/utils.h>
+#include <common/tracker.h>
+#include <lttng/lttng-error.h>
+
+struct process_attr_tracker_value_node {
+       struct process_attr_value *value;
+       struct cds_lfht_node inclusion_set_ht_node;
+       struct rcu_head rcu_head;
+};
+
+struct process_attr_tracker {
+       enum lttng_tracking_policy policy;
+       struct cds_lfht *inclusion_set_ht;
+};
+
+static void process_attr_tracker_value_node_rcu_free(struct rcu_head *rcu_head)
+{
+       struct process_attr_tracker_value_node *node =
+                       container_of(rcu_head, typeof(*node), rcu_head);
+
+       free(node);
+}
+
+struct process_attr_tracker *process_attr_tracker_create(void)
+{
+       struct process_attr_tracker *tracker;
+
+       tracker = (process_attr_tracker *) zmalloc(sizeof(*tracker));
+       if (!tracker) {
+               return NULL;
+       }
+
+       (void) process_attr_tracker_set_tracking_policy(
+                       tracker, LTTNG_TRACKING_POLICY_INCLUDE_ALL);
+
+       tracker->inclusion_set_ht = cds_lfht_new(DEFAULT_HT_SIZE, 1, 0,
+                       CDS_LFHT_AUTO_RESIZE | CDS_LFHT_ACCOUNTING, NULL);
+       if (!tracker->inclusion_set_ht) {
+               goto error;
+       }
+
+       return tracker;
+error:
+       process_attr_tracker_destroy(tracker);
+       return NULL;
+}
+
+static void process_attr_tracker_remove_value_node(
+               struct process_attr_tracker *tracker,
+               struct process_attr_tracker_value_node *value_node)
+{
+       cds_lfht_del(tracker->inclusion_set_ht,
+                       &value_node->inclusion_set_ht_node);
+       process_attr_value_destroy(value_node->value);
+       call_rcu(&value_node->rcu_head,
+                       process_attr_tracker_value_node_rcu_free);
+}
+
+static void process_attr_tracker_clear_inclusion_set(
+               struct process_attr_tracker *tracker)
+{
+       int ret;
+       struct lttng_ht_iter iter;
+       struct process_attr_tracker_value_node *value_node;
+
+       if (!tracker->inclusion_set_ht) {
+               return;
+       }
+
+       rcu_read_lock();
+       cds_lfht_for_each_entry (tracker->inclusion_set_ht, &iter.iter,
+                       value_node, inclusion_set_ht_node) {
+               process_attr_tracker_remove_value_node(tracker, value_node);
+       }
+       rcu_read_unlock();
+       ret = cds_lfht_destroy(tracker->inclusion_set_ht, NULL);
+       LTTNG_ASSERT(ret == 0);
+       tracker->inclusion_set_ht = NULL;
+}
+
+static int process_attr_tracker_create_inclusion_set(
+               struct process_attr_tracker *tracker)
+{
+       LTTNG_ASSERT(!tracker->inclusion_set_ht);
+       tracker->inclusion_set_ht = cds_lfht_new(DEFAULT_HT_SIZE, 1, 0,
+                       CDS_LFHT_AUTO_RESIZE | CDS_LFHT_ACCOUNTING, NULL);
+       return tracker->inclusion_set_ht ? 0 : -1;
+}
+
+void process_attr_tracker_destroy(struct process_attr_tracker *tracker)
+{
+       if (!tracker) {
+               return;
+       }
+
+       process_attr_tracker_clear_inclusion_set(tracker);
+       free(tracker);
+}
+
+enum lttng_tracking_policy process_attr_tracker_get_tracking_policy(
+               const struct process_attr_tracker *tracker)
+{
+       return tracker->policy;
+}
+
+int process_attr_tracker_set_tracking_policy(
+               struct process_attr_tracker *tracker,
+               enum lttng_tracking_policy tracking_policy)
+{
+       int ret = 0;
+
+       if (tracker->policy == tracking_policy) {
+               goto end;
+       }
+
+       process_attr_tracker_clear_inclusion_set(tracker);
+       ret = process_attr_tracker_create_inclusion_set(tracker);
+       if (ret) {
+               goto end;
+       }
+       tracker->policy = tracking_policy;
+end:
+       return ret;
+}
+
+static int match_inclusion_set_value(
+               struct cds_lfht_node *node, const void *key)
+{
+       const struct process_attr_value *value_key = (process_attr_value *) key;
+       const struct process_attr_tracker_value_node *value_node =
+                       caa_container_of(node,
+                                       struct process_attr_tracker_value_node,
+                                       inclusion_set_ht_node);
+
+       return process_attr_tracker_value_equal(value_node->value, value_key);
+}
+
+static struct process_attr_tracker_value_node *process_attr_tracker_lookup(
+               const struct process_attr_tracker *tracker,
+               const struct process_attr_value *value)
+{
+       struct cds_lfht_iter iter;
+       struct cds_lfht_node *node;
+
+       LTTNG_ASSERT(tracker->policy == LTTNG_TRACKING_POLICY_INCLUDE_SET);
+
+       rcu_read_lock();
+       cds_lfht_lookup(tracker->inclusion_set_ht,
+                       process_attr_value_hash(value),
+                       match_inclusion_set_value, value, &iter);
+       node = cds_lfht_iter_get_node(&iter);
+       rcu_read_unlock();
+
+       return node ? container_of(node, struct process_attr_tracker_value_node,
+                                     inclusion_set_ht_node) :
+                     NULL;
+}
+
+/* Protected by session mutex held by caller. */
+enum process_attr_tracker_status process_attr_tracker_inclusion_set_add_value(
+               struct process_attr_tracker *tracker,
+               const struct process_attr_value *value)
+{
+       enum process_attr_tracker_status status =
+                       PROCESS_ATTR_TRACKER_STATUS_OK;
+       struct process_attr_value *value_copy = NULL;
+       struct process_attr_tracker_value_node *value_node = NULL;
+
+       rcu_read_lock();
+       if (tracker->policy != LTTNG_TRACKING_POLICY_INCLUDE_SET) {
+               status = PROCESS_ATTR_TRACKER_STATUS_INVALID_TRACKING_POLICY;
+               goto end;
+       }
+
+       if (process_attr_tracker_lookup(tracker, value)) {
+               status = PROCESS_ATTR_TRACKER_STATUS_EXISTS;
+               goto end;
+       }
+
+       value_node = (process_attr_tracker_value_node *) zmalloc(sizeof(*value_node));
+       if (!value_node) {
+               status = PROCESS_ATTR_TRACKER_STATUS_ERROR;
+               goto end;
+       }
+
+       value_copy = process_attr_value_copy(value);
+       if (!value_copy) {
+               status = PROCESS_ATTR_TRACKER_STATUS_ERROR;
+               goto end;
+       }
+
+       value_node->value = value_copy;
+       cds_lfht_add(tracker->inclusion_set_ht,
+                       process_attr_value_hash(value_copy),
+                       &value_node->inclusion_set_ht_node);
+       value_copy = NULL;
+       value_node = NULL;
+end:
+       if (value_copy) {
+               process_attr_value_destroy(value_copy);
+       }
+       if (value_node) {
+               free(value_node);
+       }
+       rcu_read_unlock();
+       return status;
+}
+
+/* Protected by session mutex held by caller. */
+enum process_attr_tracker_status
+process_attr_tracker_inclusion_set_remove_value(
+               struct process_attr_tracker *tracker,
+               const struct process_attr_value *value)
+{
+       struct process_attr_tracker_value_node *value_node;
+       enum process_attr_tracker_status status =
+                       PROCESS_ATTR_TRACKER_STATUS_OK;
+
+       rcu_read_lock();
+       if (tracker->policy != LTTNG_TRACKING_POLICY_INCLUDE_SET) {
+               status = PROCESS_ATTR_TRACKER_STATUS_INVALID_TRACKING_POLICY;
+               goto end;
+       }
+
+       value_node = process_attr_tracker_lookup(tracker, value);
+       if (!value_node) {
+               status = PROCESS_ATTR_TRACKER_STATUS_MISSING;
+               goto end;
+       }
+
+       process_attr_tracker_remove_value_node(tracker, value_node);
+end:
+       rcu_read_unlock();
+       return status;
+}
+
+enum process_attr_tracker_status process_attr_tracker_get_inclusion_set(
+               const struct process_attr_tracker *tracker,
+               struct lttng_process_attr_values **_values)
+{
+       struct lttng_ht_iter iter;
+       struct process_attr_tracker_value_node *value_node;
+       enum process_attr_tracker_status status =
+                       PROCESS_ATTR_TRACKER_STATUS_OK;
+       struct lttng_process_attr_values *values;
+       struct process_attr_value *new_value = NULL;
+
+       values = lttng_process_attr_values_create();
+       if (!values) {
+               status = PROCESS_ATTR_TRACKER_STATUS_ERROR;
+               goto error;
+       }
+
+       if (tracker->policy != LTTNG_TRACKING_POLICY_INCLUDE_SET) {
+               status = PROCESS_ATTR_TRACKER_STATUS_INVALID_TRACKING_POLICY;
+               goto error;
+       }
+
+       rcu_read_lock();
+       cds_lfht_for_each_entry (tracker->inclusion_set_ht, &iter.iter,
+                       value_node, inclusion_set_ht_node) {
+               int ret;
+
+               new_value = process_attr_value_copy(value_node->value);
+               if (!new_value) {
+                       status = PROCESS_ATTR_TRACKER_STATUS_ERROR;
+                       goto error_unlock;
+               }
+
+               ret = lttng_dynamic_pointer_array_add_pointer(
+                               &values->array, new_value);
+               if (ret) {
+                       status = PROCESS_ATTR_TRACKER_STATUS_ERROR;
+                       goto error_unlock;
+               }
+
+               new_value = NULL;
+       }
+       rcu_read_unlock();
+       *_values = values;
+       return status;
+error_unlock:
+       rcu_read_unlock();
+error:
+       lttng_process_attr_values_destroy(values);
+       process_attr_value_destroy(new_value);
+       return status;
+}
diff --git a/src/bin/lttng-sessiond/trigger-error-query.c b/src/bin/lttng-sessiond/trigger-error-query.c
deleted file mode 100644 (file)
index 8c39e1b..0000000
+++ /dev/null
@@ -1,111 +0,0 @@
-/*
- * Copyright (C) 2021 Jérémie Galarneau <jeremie.galarneau@efficios.com>
- *
- * SPDX-License-Identifier: GPL-2.0-only
- *
- */
-
-#include "event-notifier-error-accounting.h"
-#include <lttng/error-query-internal.h>
-#include <lttng/trigger/trigger-internal.h>
-#include <lttng/action/action-internal.h>
-
-enum lttng_trigger_status lttng_trigger_add_error_results(
-               const struct lttng_trigger *trigger,
-               struct lttng_error_query_results *results)
-{
-       return LTTNG_TRIGGER_STATUS_OK;
-}
-
-enum lttng_trigger_status lttng_trigger_condition_add_error_results(
-               const struct lttng_trigger *trigger,
-               struct lttng_error_query_results *results)
-{
-       enum lttng_trigger_status status;
-       uint64_t discarded_tracer_messages_count;
-       enum event_notifier_error_accounting_status error_accounting_status;
-       struct lttng_error_query_result *discarded_tracer_messages_counter = NULL;
-       const char *trigger_name;
-       uid_t trigger_owner;
-
-       status = lttng_trigger_get_name(trigger, &trigger_name);
-       trigger_name = status == LTTNG_TRIGGER_STATUS_OK ?
-                       trigger_name : "(anonymous)";
-       status = lttng_trigger_get_owner_uid(trigger,
-                       &trigger_owner);
-       LTTNG_ASSERT(status == LTTNG_TRIGGER_STATUS_OK);
-
-       /*
-        * Only add discarded tracer messages count for applicable conditions.
-        * As of 2.13, only "event rule matches" conditions can generate
-        * reportable errors hence why this function is very specific to this
-        * condition type.
-        */
-       if (!lttng_trigger_needs_tracer_notifier(trigger)) {
-               status = LTTNG_TRIGGER_STATUS_OK;
-               goto end;
-       }
-
-       error_accounting_status = event_notifier_error_accounting_get_count(
-                       trigger, &discarded_tracer_messages_count);
-       if (error_accounting_status != EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_OK) {
-               ERR("Failed to retrieve tracer discarded messages count for trigger: trigger name = '%s', trigger owner uid = %d",
-                               trigger_name, (int) trigger_owner);
-               status = LTTNG_TRIGGER_STATUS_ERROR;
-               goto end;
-       }
-
-       discarded_tracer_messages_counter = lttng_error_query_result_counter_create(
-                       "discarded tracer messages",
-                       "Count of messages discarded by the tracer due to a communication error with the session daemon",
-                       discarded_tracer_messages_count);
-       if (!discarded_tracer_messages_counter) {
-               status = LTTNG_TRIGGER_STATUS_ERROR;
-               goto end;
-       }
-
-       if (lttng_error_query_results_add_result(
-                           results, discarded_tracer_messages_counter)) {
-               status = LTTNG_TRIGGER_STATUS_ERROR;
-               goto end;
-       }
-
-       /* Ownership transferred to the results. */
-       discarded_tracer_messages_counter = NULL;
-
-       status = LTTNG_TRIGGER_STATUS_OK;
-end:
-       lttng_error_query_result_destroy(discarded_tracer_messages_counter);
-       return status;
-}
-
-enum lttng_trigger_status lttng_trigger_add_action_error_query_results(
-               struct lttng_trigger *trigger,
-               struct lttng_error_query_results *results)
-{
-       enum lttng_trigger_status status;
-       const char *trigger_name;
-       uid_t trigger_owner;
-       enum lttng_action_status action_status;
-
-       status = lttng_trigger_get_name(trigger, &trigger_name);
-       trigger_name = status == LTTNG_TRIGGER_STATUS_OK ?
-                       trigger_name : "(anonymous)";
-       status = lttng_trigger_get_owner_uid(trigger,
-                       &trigger_owner);
-       LTTNG_ASSERT(status == LTTNG_TRIGGER_STATUS_OK);
-
-       action_status = lttng_action_add_error_query_results(
-                       lttng_trigger_get_action(trigger), results);
-       switch (action_status) {
-       case LTTNG_ACTION_STATUS_OK:
-               break;
-       default:
-               status = LTTNG_TRIGGER_STATUS_ERROR;
-               goto end;
-       }
-
-       status = LTTNG_TRIGGER_STATUS_OK;
-end:
-       return status;
-}
diff --git a/src/bin/lttng-sessiond/trigger-error-query.cpp b/src/bin/lttng-sessiond/trigger-error-query.cpp
new file mode 100644 (file)
index 0000000..8c39e1b
--- /dev/null
@@ -0,0 +1,111 @@
+/*
+ * Copyright (C) 2021 Jérémie Galarneau <jeremie.galarneau@efficios.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0-only
+ *
+ */
+
+#include "event-notifier-error-accounting.h"
+#include <lttng/error-query-internal.h>
+#include <lttng/trigger/trigger-internal.h>
+#include <lttng/action/action-internal.h>
+
+enum lttng_trigger_status lttng_trigger_add_error_results(
+               const struct lttng_trigger *trigger,
+               struct lttng_error_query_results *results)
+{
+       return LTTNG_TRIGGER_STATUS_OK;
+}
+
+enum lttng_trigger_status lttng_trigger_condition_add_error_results(
+               const struct lttng_trigger *trigger,
+               struct lttng_error_query_results *results)
+{
+       enum lttng_trigger_status status;
+       uint64_t discarded_tracer_messages_count;
+       enum event_notifier_error_accounting_status error_accounting_status;
+       struct lttng_error_query_result *discarded_tracer_messages_counter = NULL;
+       const char *trigger_name;
+       uid_t trigger_owner;
+
+       status = lttng_trigger_get_name(trigger, &trigger_name);
+       trigger_name = status == LTTNG_TRIGGER_STATUS_OK ?
+                       trigger_name : "(anonymous)";
+       status = lttng_trigger_get_owner_uid(trigger,
+                       &trigger_owner);
+       LTTNG_ASSERT(status == LTTNG_TRIGGER_STATUS_OK);
+
+       /*
+        * Only add discarded tracer messages count for applicable conditions.
+        * As of 2.13, only "event rule matches" conditions can generate
+        * reportable errors hence why this function is very specific to this
+        * condition type.
+        */
+       if (!lttng_trigger_needs_tracer_notifier(trigger)) {
+               status = LTTNG_TRIGGER_STATUS_OK;
+               goto end;
+       }
+
+       error_accounting_status = event_notifier_error_accounting_get_count(
+                       trigger, &discarded_tracer_messages_count);
+       if (error_accounting_status != EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_OK) {
+               ERR("Failed to retrieve tracer discarded messages count for trigger: trigger name = '%s', trigger owner uid = %d",
+                               trigger_name, (int) trigger_owner);
+               status = LTTNG_TRIGGER_STATUS_ERROR;
+               goto end;
+       }
+
+       discarded_tracer_messages_counter = lttng_error_query_result_counter_create(
+                       "discarded tracer messages",
+                       "Count of messages discarded by the tracer due to a communication error with the session daemon",
+                       discarded_tracer_messages_count);
+       if (!discarded_tracer_messages_counter) {
+               status = LTTNG_TRIGGER_STATUS_ERROR;
+               goto end;
+       }
+
+       if (lttng_error_query_results_add_result(
+                           results, discarded_tracer_messages_counter)) {
+               status = LTTNG_TRIGGER_STATUS_ERROR;
+               goto end;
+       }
+
+       /* Ownership transferred to the results. */
+       discarded_tracer_messages_counter = NULL;
+
+       status = LTTNG_TRIGGER_STATUS_OK;
+end:
+       lttng_error_query_result_destroy(discarded_tracer_messages_counter);
+       return status;
+}
+
+enum lttng_trigger_status lttng_trigger_add_action_error_query_results(
+               struct lttng_trigger *trigger,
+               struct lttng_error_query_results *results)
+{
+       enum lttng_trigger_status status;
+       const char *trigger_name;
+       uid_t trigger_owner;
+       enum lttng_action_status action_status;
+
+       status = lttng_trigger_get_name(trigger, &trigger_name);
+       trigger_name = status == LTTNG_TRIGGER_STATUS_OK ?
+                       trigger_name : "(anonymous)";
+       status = lttng_trigger_get_owner_uid(trigger,
+                       &trigger_owner);
+       LTTNG_ASSERT(status == LTTNG_TRIGGER_STATUS_OK);
+
+       action_status = lttng_action_add_error_query_results(
+                       lttng_trigger_get_action(trigger), results);
+       switch (action_status) {
+       case LTTNG_ACTION_STATUS_OK:
+               break;
+       default:
+               status = LTTNG_TRIGGER_STATUS_ERROR;
+               goto end;
+       }
+
+       status = LTTNG_TRIGGER_STATUS_OK;
+end:
+       return status;
+}
diff --git a/src/bin/lttng-sessiond/ust-app.c b/src/bin/lttng-sessiond/ust-app.c
deleted file mode 100644 (file)
index d303e8b..0000000
+++ /dev/null
@@ -1,7818 +0,0 @@
-/*
- * Copyright (C) 2011 David Goulet <david.goulet@polymtl.ca>
- * Copyright (C) 2016 Jérémie Galarneau <jeremie.galarneau@efficios.com>
- *
- * SPDX-License-Identifier: GPL-2.0-only
- *
- */
-
-#define _LGPL_SOURCE
-#include <errno.h>
-#include <fcntl.h>
-#include <inttypes.h>
-#include <pthread.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <sys/mman.h>
-#include <sys/stat.h>
-#include <sys/types.h>
-#include <unistd.h>
-#include <urcu/compiler.h>
-#include <signal.h>
-
-#include <common/bytecode/bytecode.h>
-#include <common/compat/errno.h>
-#include <common/common.h>
-#include <common/hashtable/utils.h>
-#include <lttng/event-rule/event-rule.h>
-#include <lttng/event-rule/event-rule-internal.h>
-#include <lttng/event-rule/user-tracepoint.h>
-#include <lttng/condition/condition.h>
-#include <lttng/condition/event-rule-matches-internal.h>
-#include <lttng/condition/event-rule-matches.h>
-#include <lttng/trigger/trigger-internal.h>
-#include <common/sessiond-comm/sessiond-comm.h>
-
-#include "buffer-registry.h"
-#include "condition-internal.h"
-#include "fd-limit.h"
-#include "health-sessiond.h"
-#include "ust-app.h"
-#include "ust-consumer.h"
-#include "lttng-ust-ctl.h"
-#include "lttng-ust-error.h"
-#include "utils.h"
-#include "session.h"
-#include "lttng-sessiond.h"
-#include "notification-thread-commands.h"
-#include "rotate.h"
-#include "event.h"
-#include "event-notifier-error-accounting.h"
-
-
-struct lttng_ht *ust_app_ht;
-struct lttng_ht *ust_app_ht_by_sock;
-struct lttng_ht *ust_app_ht_by_notify_sock;
-
-static
-int ust_app_flush_app_session(struct ust_app *app, struct ust_app_session *ua_sess);
-
-/* Next available channel key. Access under next_channel_key_lock. */
-static uint64_t _next_channel_key;
-static pthread_mutex_t next_channel_key_lock = PTHREAD_MUTEX_INITIALIZER;
-
-/* Next available session ID. Access under next_session_id_lock. */
-static uint64_t _next_session_id;
-static pthread_mutex_t next_session_id_lock = PTHREAD_MUTEX_INITIALIZER;
-
-/*
- * Return the incremented value of next_channel_key.
- */
-static uint64_t get_next_channel_key(void)
-{
-       uint64_t ret;
-
-       pthread_mutex_lock(&next_channel_key_lock);
-       ret = ++_next_channel_key;
-       pthread_mutex_unlock(&next_channel_key_lock);
-       return ret;
-}
-
-/*
- * Return the atomically incremented value of next_session_id.
- */
-static uint64_t get_next_session_id(void)
-{
-       uint64_t ret;
-
-       pthread_mutex_lock(&next_session_id_lock);
-       ret = ++_next_session_id;
-       pthread_mutex_unlock(&next_session_id_lock);
-       return ret;
-}
-
-static void copy_channel_attr_to_ustctl(
-               struct lttng_ust_ctl_consumer_channel_attr *attr,
-               struct lttng_ust_abi_channel_attr *uattr)
-{
-       /* Copy event attributes since the layout is different. */
-       attr->subbuf_size = uattr->subbuf_size;
-       attr->num_subbuf = uattr->num_subbuf;
-       attr->overwrite = uattr->overwrite;
-       attr->switch_timer_interval = uattr->switch_timer_interval;
-       attr->read_timer_interval = uattr->read_timer_interval;
-       attr->output = uattr->output;
-       attr->blocking_timeout = uattr->u.s.blocking_timeout;
-}
-
-/*
- * Match function for the hash table lookup.
- *
- * It matches an ust app event based on three attributes which are the event
- * name, the filter bytecode and the loglevel.
- */
-static int ht_match_ust_app_event(struct cds_lfht_node *node, const void *_key)
-{
-       struct ust_app_event *event;
-       const struct ust_app_ht_key *key;
-       int ev_loglevel_value;
-
-       LTTNG_ASSERT(node);
-       LTTNG_ASSERT(_key);
-
-       event = caa_container_of(node, struct ust_app_event, node.node);
-       key = _key;
-       ev_loglevel_value = event->attr.loglevel;
-
-       /* Match the 4 elements of the key: name, filter, loglevel, exclusions */
-
-       /* Event name */
-       if (strncmp(event->attr.name, key->name, sizeof(event->attr.name)) != 0) {
-               goto no_match;
-       }
-
-       /* Event loglevel. */
-       if (ev_loglevel_value != key->loglevel_type) {
-               if (event->attr.loglevel_type == LTTNG_UST_ABI_LOGLEVEL_ALL
-                               && key->loglevel_type == 0 &&
-                               ev_loglevel_value == -1) {
-                       /*
-                        * Match is accepted. This is because on event creation, the
-                        * loglevel is set to -1 if the event loglevel type is ALL so 0 and
-                        * -1 are accepted for this loglevel type since 0 is the one set by
-                        * the API when receiving an enable event.
-                        */
-               } else {
-                       goto no_match;
-               }
-       }
-
-       /* One of the filters is NULL, fail. */
-       if ((key->filter && !event->filter) || (!key->filter && event->filter)) {
-               goto no_match;
-       }
-
-       if (key->filter && event->filter) {
-               /* Both filters exists, check length followed by the bytecode. */
-               if (event->filter->len != key->filter->len ||
-                               memcmp(event->filter->data, key->filter->data,
-                                       event->filter->len) != 0) {
-                       goto no_match;
-               }
-       }
-
-       /* One of the exclusions is NULL, fail. */
-       if ((key->exclusion && !event->exclusion) || (!key->exclusion && event->exclusion)) {
-               goto no_match;
-       }
-
-       if (key->exclusion && event->exclusion) {
-               /* Both exclusions exists, check count followed by the names. */
-               if (event->exclusion->count != key->exclusion->count ||
-                               memcmp(event->exclusion->names, key->exclusion->names,
-                                       event->exclusion->count * LTTNG_UST_ABI_SYM_NAME_LEN) != 0) {
-                       goto no_match;
-               }
-       }
-
-
-       /* Match. */
-       return 1;
-
-no_match:
-       return 0;
-}
-
-/*
- * Unique add of an ust app event in the given ht. This uses the custom
- * ht_match_ust_app_event match function and the event name as hash.
- */
-static void add_unique_ust_app_event(struct ust_app_channel *ua_chan,
-               struct ust_app_event *event)
-{
-       struct cds_lfht_node *node_ptr;
-       struct ust_app_ht_key key;
-       struct lttng_ht *ht;
-
-       LTTNG_ASSERT(ua_chan);
-       LTTNG_ASSERT(ua_chan->events);
-       LTTNG_ASSERT(event);
-
-       ht = ua_chan->events;
-       key.name = event->attr.name;
-       key.filter = event->filter;
-       key.loglevel_type = event->attr.loglevel;
-       key.exclusion = event->exclusion;
-
-       node_ptr = cds_lfht_add_unique(ht->ht,
-                       ht->hash_fct(event->node.key, lttng_ht_seed),
-                       ht_match_ust_app_event, &key, &event->node.node);
-       LTTNG_ASSERT(node_ptr == &event->node.node);
-}
-
-/*
- * Close the notify socket from the given RCU head object. This MUST be called
- * through a call_rcu().
- */
-static void close_notify_sock_rcu(struct rcu_head *head)
-{
-       int ret;
-       struct ust_app_notify_sock_obj *obj =
-               caa_container_of(head, struct ust_app_notify_sock_obj, head);
-
-       /* Must have a valid fd here. */
-       LTTNG_ASSERT(obj->fd >= 0);
-
-       ret = close(obj->fd);
-       if (ret) {
-               ERR("close notify sock %d RCU", obj->fd);
-       }
-       lttng_fd_put(LTTNG_FD_APPS, 1);
-
-       free(obj);
-}
-
-/*
- * Return the session registry according to the buffer type of the given
- * session.
- *
- * A registry per UID object MUST exists before calling this function or else
- * it LTTNG_ASSERT() if not found. RCU read side lock must be acquired.
- */
-static struct ust_registry_session *get_session_registry(
-               struct ust_app_session *ua_sess)
-{
-       struct ust_registry_session *registry = NULL;
-
-       LTTNG_ASSERT(ua_sess);
-
-       switch (ua_sess->buffer_type) {
-       case LTTNG_BUFFER_PER_PID:
-       {
-               struct buffer_reg_pid *reg_pid = buffer_reg_pid_find(ua_sess->id);
-               if (!reg_pid) {
-                       goto error;
-               }
-               registry = reg_pid->registry->reg.ust;
-               break;
-       }
-       case LTTNG_BUFFER_PER_UID:
-       {
-               struct buffer_reg_uid *reg_uid = buffer_reg_uid_find(
-                               ua_sess->tracing_id, ua_sess->bits_per_long,
-                               lttng_credentials_get_uid(&ua_sess->real_credentials));
-               if (!reg_uid) {
-                       goto error;
-               }
-               registry = reg_uid->registry->reg.ust;
-               break;
-       }
-       default:
-               abort();
-       };
-
-error:
-       return registry;
-}
-
-/*
- * Delete ust context safely. RCU read lock must be held before calling
- * this function.
- */
-static
-void delete_ust_app_ctx(int sock, struct ust_app_ctx *ua_ctx,
-               struct ust_app *app)
-{
-       int ret;
-
-       LTTNG_ASSERT(ua_ctx);
-
-       if (ua_ctx->obj) {
-               pthread_mutex_lock(&app->sock_lock);
-               ret = lttng_ust_ctl_release_object(sock, ua_ctx->obj);
-               pthread_mutex_unlock(&app->sock_lock);
-               if (ret < 0) {
-                       if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
-                               DBG3("UST app release ctx failed. Application is dead: pid = %d, sock = %d",
-                                               app->pid, app->sock);
-                       } else if (ret == -EAGAIN) {
-                               WARN("UST app release ctx failed. Communication time out: pid = %d, sock = %d",
-                                               app->pid, app->sock);
-                       } else {
-                               ERR("UST app release ctx obj handle %d failed with ret %d: pid = %d, sock = %d",
-                                               ua_ctx->obj->handle, ret,
-                                               app->pid, app->sock);
-                       }
-               }
-               free(ua_ctx->obj);
-       }
-       free(ua_ctx);
-}
-
-/*
- * Delete ust app event safely. RCU read lock must be held before calling
- * this function.
- */
-static
-void delete_ust_app_event(int sock, struct ust_app_event *ua_event,
-               struct ust_app *app)
-{
-       int ret;
-
-       LTTNG_ASSERT(ua_event);
-
-       free(ua_event->filter);
-       if (ua_event->exclusion != NULL)
-               free(ua_event->exclusion);
-       if (ua_event->obj != NULL) {
-               pthread_mutex_lock(&app->sock_lock);
-               ret = lttng_ust_ctl_release_object(sock, ua_event->obj);
-               pthread_mutex_unlock(&app->sock_lock);
-               if (ret < 0) {
-                       if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
-                               DBG3("UST app release event failed. Application is dead: pid = %d, sock = %d",
-                                               app->pid, app->sock);
-                       } else if (ret == -EAGAIN) {
-                               WARN("UST app release event failed. Communication time out: pid = %d, sock = %d",
-                                               app->pid, app->sock);
-                       } else {
-                               ERR("UST app release event obj failed with ret %d: pid = %d, sock = %d",
-                                               ret, app->pid, app->sock);
-                       }
-               }
-               free(ua_event->obj);
-       }
-       free(ua_event);
-}
-
-/*
- * Delayed reclaim of a ust_app_event_notifier_rule object. This MUST be called
- * through a call_rcu().
- */
-static
-void free_ust_app_event_notifier_rule_rcu(struct rcu_head *head)
-{
-       struct ust_app_event_notifier_rule *obj = caa_container_of(
-                       head, struct ust_app_event_notifier_rule, rcu_head);
-
-       free(obj);
-}
-
-/*
- * Delete ust app event notifier rule safely.
- */
-static void delete_ust_app_event_notifier_rule(int sock,
-               struct ust_app_event_notifier_rule *ua_event_notifier_rule,
-               struct ust_app *app)
-{
-       int ret;
-
-       LTTNG_ASSERT(ua_event_notifier_rule);
-
-       if (ua_event_notifier_rule->exclusion != NULL) {
-               free(ua_event_notifier_rule->exclusion);
-       }
-
-       if (ua_event_notifier_rule->obj != NULL) {
-               pthread_mutex_lock(&app->sock_lock);
-               ret = lttng_ust_ctl_release_object(sock, ua_event_notifier_rule->obj);
-               pthread_mutex_unlock(&app->sock_lock);
-               if (ret < 0) {
-                       if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
-                               DBG3("UST app release event notifier failed. Application is dead: pid = %d, sock = %d",
-                                               app->pid, app->sock);
-                       } else if (ret == -EAGAIN) {
-                               WARN("UST app release event notifier failed. Communication time out: pid = %d, sock = %d",
-                                               app->pid, app->sock);
-                       } else {
-                               ERR("UST app release event notifier failed with ret %d: pid = %d, sock = %d",
-                                               ret, app->pid, app->sock);
-                       }
-               }
-
-               free(ua_event_notifier_rule->obj);
-       }
-
-       lttng_trigger_put(ua_event_notifier_rule->trigger);
-       call_rcu(&ua_event_notifier_rule->rcu_head,
-                       free_ust_app_event_notifier_rule_rcu);
-}
-
-/*
- * Release ust data object of the given stream.
- *
- * Return 0 on success or else a negative value.
- */
-static int release_ust_app_stream(int sock, struct ust_app_stream *stream,
-               struct ust_app *app)
-{
-       int ret = 0;
-
-       LTTNG_ASSERT(stream);
-
-       if (stream->obj) {
-               pthread_mutex_lock(&app->sock_lock);
-               ret = lttng_ust_ctl_release_object(sock, stream->obj);
-               pthread_mutex_unlock(&app->sock_lock);
-               if (ret < 0) {
-                       if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
-                               DBG3("UST app release stream failed. Application is dead: pid = %d, sock = %d",
-                                               app->pid, app->sock);
-                       } else if (ret == -EAGAIN) {
-                               WARN("UST app release stream failed. Communication time out: pid = %d, sock = %d",
-                                               app->pid, app->sock);
-                       } else {
-                               ERR("UST app release stream obj failed with ret %d: pid = %d, sock = %d",
-                                               ret, app->pid, app->sock);
-                       }
-               }
-               lttng_fd_put(LTTNG_FD_APPS, 2);
-               free(stream->obj);
-       }
-
-       return ret;
-}
-
-/*
- * Delete ust app stream safely. RCU read lock must be held before calling
- * this function.
- */
-static
-void delete_ust_app_stream(int sock, struct ust_app_stream *stream,
-               struct ust_app *app)
-{
-       LTTNG_ASSERT(stream);
-
-       (void) release_ust_app_stream(sock, stream, app);
-       free(stream);
-}
-
-/*
- * We need to execute ht_destroy outside of RCU read-side critical
- * section and outside of call_rcu thread, so we postpone its execution
- * using ht_cleanup_push. It is simpler than to change the semantic of
- * the many callers of delete_ust_app_session().
- */
-static
-void delete_ust_app_channel_rcu(struct rcu_head *head)
-{
-       struct ust_app_channel *ua_chan =
-               caa_container_of(head, struct ust_app_channel, rcu_head);
-
-       ht_cleanup_push(ua_chan->ctx);
-       ht_cleanup_push(ua_chan->events);
-       free(ua_chan);
-}
-
-/*
- * Extract the lost packet or discarded events counter when the channel is
- * being deleted and store the value in the parent channel so we can
- * access it from lttng list and at stop/destroy.
- *
- * The session list lock must be held by the caller.
- */
-static
-void save_per_pid_lost_discarded_counters(struct ust_app_channel *ua_chan)
-{
-       uint64_t discarded = 0, lost = 0;
-       struct ltt_session *session;
-       struct ltt_ust_channel *uchan;
-
-       if (ua_chan->attr.type != LTTNG_UST_ABI_CHAN_PER_CPU) {
-               return;
-       }
-
-       rcu_read_lock();
-       session = session_find_by_id(ua_chan->session->tracing_id);
-       if (!session || !session->ust_session) {
-               /*
-                * Not finding the session is not an error because there are
-                * multiple ways the channels can be torn down.
-                *
-                * 1) The session daemon can initiate the destruction of the
-                *    ust app session after receiving a destroy command or
-                *    during its shutdown/teardown.
-                * 2) The application, since we are in per-pid tracing, is
-                *    unregistering and tearing down its ust app session.
-                *
-                * Both paths are protected by the session list lock which
-                * ensures that the accounting of lost packets and discarded
-                * events is done exactly once. The session is then unpublished
-                * from the session list, resulting in this condition.
-                */
-               goto end;
-       }
-
-       if (ua_chan->attr.overwrite) {
-               consumer_get_lost_packets(ua_chan->session->tracing_id,
-                               ua_chan->key, session->ust_session->consumer,
-                               &lost);
-       } else {
-               consumer_get_discarded_events(ua_chan->session->tracing_id,
-                               ua_chan->key, session->ust_session->consumer,
-                               &discarded);
-       }
-       uchan = trace_ust_find_channel_by_name(
-                       session->ust_session->domain_global.channels,
-                       ua_chan->name);
-       if (!uchan) {
-               ERR("Missing UST channel to store discarded counters");
-               goto end;
-       }
-
-       uchan->per_pid_closed_app_discarded += discarded;
-       uchan->per_pid_closed_app_lost += lost;
-
-end:
-       rcu_read_unlock();
-       if (session) {
-               session_put(session);
-       }
-}
-
-/*
- * Delete ust app channel safely. RCU read lock must be held before calling
- * this function.
- *
- * The session list lock must be held by the caller.
- */
-static
-void delete_ust_app_channel(int sock, struct ust_app_channel *ua_chan,
-               struct ust_app *app)
-{
-       int ret;
-       struct lttng_ht_iter iter;
-       struct ust_app_event *ua_event;
-       struct ust_app_ctx *ua_ctx;
-       struct ust_app_stream *stream, *stmp;
-       struct ust_registry_session *registry;
-
-       LTTNG_ASSERT(ua_chan);
-
-       DBG3("UST app deleting channel %s", ua_chan->name);
-
-       /* Wipe stream */
-       cds_list_for_each_entry_safe(stream, stmp, &ua_chan->streams.head, list) {
-               cds_list_del(&stream->list);
-               delete_ust_app_stream(sock, stream, app);
-       }
-
-       /* Wipe context */
-       cds_lfht_for_each_entry(ua_chan->ctx->ht, &iter.iter, ua_ctx, node.node) {
-               cds_list_del(&ua_ctx->list);
-               ret = lttng_ht_del(ua_chan->ctx, &iter);
-               LTTNG_ASSERT(!ret);
-               delete_ust_app_ctx(sock, ua_ctx, app);
-       }
-
-       /* Wipe events */
-       cds_lfht_for_each_entry(ua_chan->events->ht, &iter.iter, ua_event,
-                       node.node) {
-               ret = lttng_ht_del(ua_chan->events, &iter);
-               LTTNG_ASSERT(!ret);
-               delete_ust_app_event(sock, ua_event, app);
-       }
-
-       if (ua_chan->session->buffer_type == LTTNG_BUFFER_PER_PID) {
-               /* Wipe and free registry from session registry. */
-               registry = get_session_registry(ua_chan->session);
-               if (registry) {
-                       ust_registry_channel_del_free(registry, ua_chan->key,
-                               sock >= 0);
-               }
-               /*
-                * A negative socket can be used by the caller when
-                * cleaning-up a ua_chan in an error path. Skip the
-                * accounting in this case.
-                */
-               if (sock >= 0) {
-                       save_per_pid_lost_discarded_counters(ua_chan);
-               }
-       }
-
-       if (ua_chan->obj != NULL) {
-               /* Remove channel from application UST object descriptor. */
-               iter.iter.node = &ua_chan->ust_objd_node.node;
-               ret = lttng_ht_del(app->ust_objd, &iter);
-               LTTNG_ASSERT(!ret);
-               pthread_mutex_lock(&app->sock_lock);
-               ret = lttng_ust_ctl_release_object(sock, ua_chan->obj);
-               pthread_mutex_unlock(&app->sock_lock);
-               if (ret < 0) {
-                       if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
-                               DBG3("UST app channel %s release failed. Application is dead: pid = %d, sock = %d",
-                                               ua_chan->name, app->pid,
-                                               app->sock);
-                       } else if (ret == -EAGAIN) {
-                               WARN("UST app channel %s release failed. Communication time out: pid = %d, sock = %d",
-                                               ua_chan->name, app->pid,
-                                               app->sock);
-                       } else {
-                               ERR("UST app channel %s release failed with ret %d: pid = %d, sock = %d",
-                                               ua_chan->name, ret, app->pid,
-                                               app->sock);
-                       }
-               }
-               lttng_fd_put(LTTNG_FD_APPS, 1);
-               free(ua_chan->obj);
-       }
-       call_rcu(&ua_chan->rcu_head, delete_ust_app_channel_rcu);
-}
-
-int ust_app_register_done(struct ust_app *app)
-{
-       int ret;
-
-       pthread_mutex_lock(&app->sock_lock);
-       ret = lttng_ust_ctl_register_done(app->sock);
-       pthread_mutex_unlock(&app->sock_lock);
-       return ret;
-}
-
-int ust_app_release_object(struct ust_app *app, struct lttng_ust_abi_object_data *data)
-{
-       int ret, sock;
-
-       if (app) {
-               pthread_mutex_lock(&app->sock_lock);
-               sock = app->sock;
-       } else {
-               sock = -1;
-       }
-       ret = lttng_ust_ctl_release_object(sock, data);
-       if (app) {
-               pthread_mutex_unlock(&app->sock_lock);
-       }
-       return ret;
-}
-
-/*
- * Push metadata to consumer socket.
- *
- * RCU read-side lock must be held to guarantee existance of socket.
- * Must be called with the ust app session lock held.
- * Must be called with the registry lock held.
- *
- * On success, return the len of metadata pushed or else a negative value.
- * Returning a -EPIPE return value means we could not send the metadata,
- * but it can be caused by recoverable errors (e.g. the application has
- * terminated concurrently).
- */
-ssize_t ust_app_push_metadata(struct ust_registry_session *registry,
-               struct consumer_socket *socket, int send_zero_data)
-{
-       int ret;
-       char *metadata_str = NULL;
-       size_t len, offset, new_metadata_len_sent;
-       ssize_t ret_val;
-       uint64_t metadata_key, metadata_version;
-
-       LTTNG_ASSERT(registry);
-       LTTNG_ASSERT(socket);
-
-       metadata_key = registry->metadata_key;
-
-       /*
-        * Means that no metadata was assigned to the session. This can
-        * happens if no start has been done previously.
-        */
-       if (!metadata_key) {
-               return 0;
-       }
-
-       offset = registry->metadata_len_sent;
-       len = registry->metadata_len - registry->metadata_len_sent;
-       new_metadata_len_sent = registry->metadata_len;
-       metadata_version = registry->metadata_version;
-       if (len == 0) {
-               DBG3("No metadata to push for metadata key %" PRIu64,
-                               registry->metadata_key);
-               ret_val = len;
-               if (send_zero_data) {
-                       DBG("No metadata to push");
-                       goto push_data;
-               }
-               goto end;
-       }
-
-       /* Allocate only what we have to send. */
-       metadata_str = zmalloc(len);
-       if (!metadata_str) {
-               PERROR("zmalloc ust app metadata string");
-               ret_val = -ENOMEM;
-               goto error;
-       }
-       /* Copy what we haven't sent out. */
-       memcpy(metadata_str, registry->metadata + offset, len);
-
-push_data:
-       pthread_mutex_unlock(&registry->lock);
-       /*
-        * We need to unlock the registry while we push metadata to
-        * break a circular dependency between the consumerd metadata
-        * lock and the sessiond registry lock. Indeed, pushing metadata
-        * to the consumerd awaits that it gets pushed all the way to
-        * relayd, but doing so requires grabbing the metadata lock. If
-        * a concurrent metadata request is being performed by
-        * consumerd, this can try to grab the registry lock on the
-        * sessiond while holding the metadata lock on the consumer
-        * daemon. Those push and pull schemes are performed on two
-        * different bidirectionnal communication sockets.
-        */
-       ret = consumer_push_metadata(socket, metadata_key,
-                       metadata_str, len, offset, metadata_version);
-       pthread_mutex_lock(&registry->lock);
-       if (ret < 0) {
-               /*
-                * There is an acceptable race here between the registry
-                * metadata key assignment and the creation on the
-                * consumer. The session daemon can concurrently push
-                * metadata for this registry while being created on the
-                * consumer since the metadata key of the registry is
-                * assigned *before* it is setup to avoid the consumer
-                * to ask for metadata that could possibly be not found
-                * in the session daemon.
-                *
-                * The metadata will get pushed either by the session
-                * being stopped or the consumer requesting metadata if
-                * that race is triggered.
-                */
-               if (ret == -LTTCOMM_CONSUMERD_CHANNEL_FAIL) {
-                       ret = 0;
-               } else {
-                       ERR("Error pushing metadata to consumer");
-               }
-               ret_val = ret;
-               goto error_push;
-       } else {
-               /*
-                * Metadata may have been concurrently pushed, since
-                * we're not holding the registry lock while pushing to
-                * consumer.  This is handled by the fact that we send
-                * the metadata content, size, and the offset at which
-                * that metadata belongs. This may arrive out of order
-                * on the consumer side, and the consumer is able to
-                * deal with overlapping fragments. The consumer
-                * supports overlapping fragments, which must be
-                * contiguous starting from offset 0. We keep the
-                * largest metadata_len_sent value of the concurrent
-                * send.
-                */
-               registry->metadata_len_sent =
-                       max_t(size_t, registry->metadata_len_sent,
-                               new_metadata_len_sent);
-       }
-       free(metadata_str);
-       return len;
-
-end:
-error:
-       if (ret_val) {
-               /*
-                * On error, flag the registry that the metadata is
-                * closed. We were unable to push anything and this
-                * means that either the consumer is not responding or
-                * the metadata cache has been destroyed on the
-                * consumer.
-                */
-               registry->metadata_closed = 1;
-       }
-error_push:
-       free(metadata_str);
-       return ret_val;
-}
-
-/*
- * For a given application and session, push metadata to consumer.
- * Either sock or consumer is required : if sock is NULL, the default
- * socket to send the metadata is retrieved from consumer, if sock
- * is not NULL we use it to send the metadata.
- * RCU read-side lock must be held while calling this function,
- * therefore ensuring existance of registry. It also ensures existance
- * of socket throughout this function.
- *
- * Return 0 on success else a negative error.
- * Returning a -EPIPE return value means we could not send the metadata,
- * but it can be caused by recoverable errors (e.g. the application has
- * terminated concurrently).
- */
-static int push_metadata(struct ust_registry_session *registry,
-               struct consumer_output *consumer)
-{
-       int ret_val;
-       ssize_t ret;
-       struct consumer_socket *socket;
-
-       LTTNG_ASSERT(registry);
-       LTTNG_ASSERT(consumer);
-
-       pthread_mutex_lock(&registry->lock);
-       if (registry->metadata_closed) {
-               ret_val = -EPIPE;
-               goto error;
-       }
-
-       /* Get consumer socket to use to push the metadata.*/
-       socket = consumer_find_socket_by_bitness(registry->bits_per_long,
-                       consumer);
-       if (!socket) {
-               ret_val = -1;
-               goto error;
-       }
-
-       ret = ust_app_push_metadata(registry, socket, 0);
-       if (ret < 0) {
-               ret_val = ret;
-               goto error;
-       }
-       pthread_mutex_unlock(&registry->lock);
-       return 0;
-
-error:
-       pthread_mutex_unlock(&registry->lock);
-       return ret_val;
-}
-
-/*
- * Send to the consumer a close metadata command for the given session. Once
- * done, the metadata channel is deleted and the session metadata pointer is
- * nullified. The session lock MUST be held unless the application is
- * in the destroy path.
- *
- * Do not hold the registry lock while communicating with the consumerd, because
- * doing so causes inter-process deadlocks between consumerd and sessiond with
- * the metadata request notification.
- *
- * Return 0 on success else a negative value.
- */
-static int close_metadata(struct ust_registry_session *registry,
-               struct consumer_output *consumer)
-{
-       int ret;
-       struct consumer_socket *socket;
-       uint64_t metadata_key;
-       bool registry_was_already_closed;
-
-       LTTNG_ASSERT(registry);
-       LTTNG_ASSERT(consumer);
-
-       rcu_read_lock();
-
-       pthread_mutex_lock(&registry->lock);
-       metadata_key = registry->metadata_key;
-       registry_was_already_closed = registry->metadata_closed;
-       if (metadata_key != 0) {
-               /*
-                * Metadata closed. Even on error this means that the consumer
-                * is not responding or not found so either way a second close
-                * should NOT be emit for this registry.
-                */
-               registry->metadata_closed = 1;
-       }
-       pthread_mutex_unlock(&registry->lock);
-
-       if (metadata_key == 0 || registry_was_already_closed) {
-               ret = 0;
-               goto end;
-       }
-
-       /* Get consumer socket to use to push the metadata.*/
-       socket = consumer_find_socket_by_bitness(registry->bits_per_long,
-                       consumer);
-       if (!socket) {
-               ret = -1;
-               goto end;
-       }
-
-       ret = consumer_close_metadata(socket, metadata_key);
-       if (ret < 0) {
-               goto end;
-       }
-
-end:
-       rcu_read_unlock();
-       return ret;
-}
-
-/*
- * We need to execute ht_destroy outside of RCU read-side critical
- * section and outside of call_rcu thread, so we postpone its execution
- * using ht_cleanup_push. It is simpler than to change the semantic of
- * the many callers of delete_ust_app_session().
- */
-static
-void delete_ust_app_session_rcu(struct rcu_head *head)
-{
-       struct ust_app_session *ua_sess =
-               caa_container_of(head, struct ust_app_session, rcu_head);
-
-       ht_cleanup_push(ua_sess->channels);
-       free(ua_sess);
-}
-
-/*
- * Delete ust app session safely. RCU read lock must be held before calling
- * this function.
- *
- * The session list lock must be held by the caller.
- */
-static
-void delete_ust_app_session(int sock, struct ust_app_session *ua_sess,
-               struct ust_app *app)
-{
-       int ret;
-       struct lttng_ht_iter iter;
-       struct ust_app_channel *ua_chan;
-       struct ust_registry_session *registry;
-
-       LTTNG_ASSERT(ua_sess);
-
-       pthread_mutex_lock(&ua_sess->lock);
-
-       LTTNG_ASSERT(!ua_sess->deleted);
-       ua_sess->deleted = true;
-
-       registry = get_session_registry(ua_sess);
-       /* Registry can be null on error path during initialization. */
-       if (registry) {
-               /* Push metadata for application before freeing the application. */
-               (void) push_metadata(registry, ua_sess->consumer);
-
-               /*
-                * Don't ask to close metadata for global per UID buffers. Close
-                * metadata only on destroy trace session in this case. Also, the
-                * previous push metadata could have flag the metadata registry to
-                * close so don't send a close command if closed.
-                */
-               if (ua_sess->buffer_type != LTTNG_BUFFER_PER_UID) {
-                       /* And ask to close it for this session registry. */
-                       (void) close_metadata(registry, ua_sess->consumer);
-               }
-       }
-
-       cds_lfht_for_each_entry(ua_sess->channels->ht, &iter.iter, ua_chan,
-                       node.node) {
-               ret = lttng_ht_del(ua_sess->channels, &iter);
-               LTTNG_ASSERT(!ret);
-               delete_ust_app_channel(sock, ua_chan, app);
-       }
-
-       /* In case of per PID, the registry is kept in the session. */
-       if (ua_sess->buffer_type == LTTNG_BUFFER_PER_PID) {
-               struct buffer_reg_pid *reg_pid = buffer_reg_pid_find(ua_sess->id);
-               if (reg_pid) {
-                       /*
-                        * Registry can be null on error path during
-                        * initialization.
-                        */
-                       buffer_reg_pid_remove(reg_pid);
-                       buffer_reg_pid_destroy(reg_pid);
-               }
-       }
-
-       if (ua_sess->handle != -1) {
-               pthread_mutex_lock(&app->sock_lock);
-               ret = lttng_ust_ctl_release_handle(sock, ua_sess->handle);
-               pthread_mutex_unlock(&app->sock_lock);
-               if (ret < 0) {
-                       if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
-                               DBG3("UST app release session handle failed. Application is dead: pid = %d, sock = %d",
-                                               app->pid, app->sock);
-                       } else if (ret == -EAGAIN) {
-                               WARN("UST app release session handle failed. Communication time out: pid = %d, sock = %d",
-                                               app->pid, app->sock);
-                       } else {
-                               ERR("UST app release session handle failed with ret %d: pid = %d, sock = %d",
-                                               ret, app->pid, app->sock);
-                       }
-               }
-
-               /* Remove session from application UST object descriptor. */
-               iter.iter.node = &ua_sess->ust_objd_node.node;
-               ret = lttng_ht_del(app->ust_sessions_objd, &iter);
-               LTTNG_ASSERT(!ret);
-       }
-
-       pthread_mutex_unlock(&ua_sess->lock);
-
-       consumer_output_put(ua_sess->consumer);
-
-       call_rcu(&ua_sess->rcu_head, delete_ust_app_session_rcu);
-}
-
-/*
- * Delete a traceable application structure from the global list. Never call
- * this function outside of a call_rcu call.
- *
- * RCU read side lock should _NOT_ be held when calling this function.
- */
-static
-void delete_ust_app(struct ust_app *app)
-{
-       int ret, sock;
-       struct ust_app_session *ua_sess, *tmp_ua_sess;
-       struct lttng_ht_iter iter;
-       struct ust_app_event_notifier_rule *event_notifier_rule;
-       bool event_notifier_write_fd_is_open;
-
-       /*
-        * The session list lock must be held during this function to guarantee
-        * the existence of ua_sess.
-        */
-       session_lock_list();
-       /* Delete ust app sessions info */
-       sock = app->sock;
-       app->sock = -1;
-
-       /* Wipe sessions */
-       cds_list_for_each_entry_safe(ua_sess, tmp_ua_sess, &app->teardown_head,
-                       teardown_node) {
-               /* Free every object in the session and the session. */
-               rcu_read_lock();
-               delete_ust_app_session(sock, ua_sess, app);
-               rcu_read_unlock();
-       }
-
-       /* Remove the event notifier rules associated with this app. */
-       rcu_read_lock();
-       cds_lfht_for_each_entry (app->token_to_event_notifier_rule_ht->ht,
-                       &iter.iter, event_notifier_rule, node.node) {
-               ret = lttng_ht_del(app->token_to_event_notifier_rule_ht, &iter);
-               LTTNG_ASSERT(!ret);
-
-               delete_ust_app_event_notifier_rule(
-                               app->sock, event_notifier_rule, app);
-       }
-
-       rcu_read_unlock();
-
-       ht_cleanup_push(app->sessions);
-       ht_cleanup_push(app->ust_sessions_objd);
-       ht_cleanup_push(app->ust_objd);
-       ht_cleanup_push(app->token_to_event_notifier_rule_ht);
-
-       /*
-        * This could be NULL if the event notifier setup failed (e.g the app
-        * was killed or the tracer does not support this feature).
-        */
-       if (app->event_notifier_group.object) {
-               enum lttng_error_code ret_code;
-               enum event_notifier_error_accounting_status status;
-
-               const int event_notifier_read_fd = lttng_pipe_get_readfd(
-                               app->event_notifier_group.event_pipe);
-
-               ret_code = notification_thread_command_remove_tracer_event_source(
-                               the_notification_thread_handle,
-                               event_notifier_read_fd);
-               if (ret_code != LTTNG_OK) {
-                       ERR("Failed to remove application tracer event source from notification thread");
-               }
-
-               status = event_notifier_error_accounting_unregister_app(app);
-               if (status != EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_OK) {
-                       ERR("Error unregistering app from event notifier error accounting");
-               }
-
-               lttng_ust_ctl_release_object(sock, app->event_notifier_group.object);
-               free(app->event_notifier_group.object);
-       }
-
-       event_notifier_write_fd_is_open = lttng_pipe_is_write_open(
-                       app->event_notifier_group.event_pipe);
-       lttng_pipe_destroy(app->event_notifier_group.event_pipe);
-       /*
-        * Release the file descriptors reserved for the event notifier pipe.
-        * The app could be destroyed before the write end of the pipe could be
-        * passed to the application (and closed). In that case, both file
-        * descriptors must be released.
-        */
-       lttng_fd_put(LTTNG_FD_APPS, event_notifier_write_fd_is_open ? 2 : 1);
-
-       /*
-        * Wait until we have deleted the application from the sock hash table
-        * before closing this socket, otherwise an application could re-use the
-        * socket ID and race with the teardown, using the same hash table entry.
-        *
-        * It's OK to leave the close in call_rcu. We want it to stay unique for
-        * all RCU readers that could run concurrently with unregister app,
-        * therefore we _need_ to only close that socket after a grace period. So
-        * it should stay in this RCU callback.
-        *
-        * This close() is a very important step of the synchronization model so
-        * every modification to this function must be carefully reviewed.
-        */
-       ret = close(sock);
-       if (ret) {
-               PERROR("close");
-       }
-       lttng_fd_put(LTTNG_FD_APPS, 1);
-
-       DBG2("UST app pid %d deleted", app->pid);
-       free(app);
-       session_unlock_list();
-}
-
-/*
- * URCU intermediate call to delete an UST app.
- */
-static
-void delete_ust_app_rcu(struct rcu_head *head)
-{
-       struct lttng_ht_node_ulong *node =
-               caa_container_of(head, struct lttng_ht_node_ulong, head);
-       struct ust_app *app =
-               caa_container_of(node, struct ust_app, pid_n);
-
-       DBG3("Call RCU deleting app PID %d", app->pid);
-       delete_ust_app(app);
-}
-
-/*
- * Delete the session from the application ht and delete the data structure by
- * freeing every object inside and releasing them.
- *
- * The session list lock must be held by the caller.
- */
-static void destroy_app_session(struct ust_app *app,
-               struct ust_app_session *ua_sess)
-{
-       int ret;
-       struct lttng_ht_iter iter;
-
-       LTTNG_ASSERT(app);
-       LTTNG_ASSERT(ua_sess);
-
-       iter.iter.node = &ua_sess->node.node;
-       ret = lttng_ht_del(app->sessions, &iter);
-       if (ret) {
-               /* Already scheduled for teardown. */
-               goto end;
-       }
-
-       /* Once deleted, free the data structure. */
-       delete_ust_app_session(app->sock, ua_sess, app);
-
-end:
-       return;
-}
-
-/*
- * Alloc new UST app session.
- */
-static
-struct ust_app_session *alloc_ust_app_session(void)
-{
-       struct ust_app_session *ua_sess;
-
-       /* Init most of the default value by allocating and zeroing */
-       ua_sess = zmalloc(sizeof(struct ust_app_session));
-       if (ua_sess == NULL) {
-               PERROR("malloc");
-               goto error_free;
-       }
-
-       ua_sess->handle = -1;
-       ua_sess->channels = lttng_ht_new(0, LTTNG_HT_TYPE_STRING);
-       ua_sess->metadata_attr.type = LTTNG_UST_ABI_CHAN_METADATA;
-       pthread_mutex_init(&ua_sess->lock, NULL);
-
-       return ua_sess;
-
-error_free:
-       return NULL;
-}
-
-/*
- * Alloc new UST app channel.
- */
-static
-struct ust_app_channel *alloc_ust_app_channel(const char *name,
-               struct ust_app_session *ua_sess,
-               struct lttng_ust_abi_channel_attr *attr)
-{
-       struct ust_app_channel *ua_chan;
-
-       /* Init most of the default value by allocating and zeroing */
-       ua_chan = zmalloc(sizeof(struct ust_app_channel));
-       if (ua_chan == NULL) {
-               PERROR("malloc");
-               goto error;
-       }
-
-       /* Setup channel name */
-       strncpy(ua_chan->name, name, sizeof(ua_chan->name));
-       ua_chan->name[sizeof(ua_chan->name) - 1] = '\0';
-
-       ua_chan->enabled = 1;
-       ua_chan->handle = -1;
-       ua_chan->session = ua_sess;
-       ua_chan->key = get_next_channel_key();
-       ua_chan->ctx = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
-       ua_chan->events = lttng_ht_new(0, LTTNG_HT_TYPE_STRING);
-       lttng_ht_node_init_str(&ua_chan->node, ua_chan->name);
-
-       CDS_INIT_LIST_HEAD(&ua_chan->streams.head);
-       CDS_INIT_LIST_HEAD(&ua_chan->ctx_list);
-
-       /* Copy attributes */
-       if (attr) {
-               /* Translate from lttng_ust_channel to lttng_ust_ctl_consumer_channel_attr. */
-               ua_chan->attr.subbuf_size = attr->subbuf_size;
-               ua_chan->attr.num_subbuf = attr->num_subbuf;
-               ua_chan->attr.overwrite = attr->overwrite;
-               ua_chan->attr.switch_timer_interval = attr->switch_timer_interval;
-               ua_chan->attr.read_timer_interval = attr->read_timer_interval;
-               ua_chan->attr.output = attr->output;
-               ua_chan->attr.blocking_timeout = attr->u.s.blocking_timeout;
-       }
-       /* By default, the channel is a per cpu channel. */
-       ua_chan->attr.type = LTTNG_UST_ABI_CHAN_PER_CPU;
-
-       DBG3("UST app channel %s allocated", ua_chan->name);
-
-       return ua_chan;
-
-error:
-       return NULL;
-}
-
-/*
- * Allocate and initialize a UST app stream.
- *
- * Return newly allocated stream pointer or NULL on error.
- */
-struct ust_app_stream *ust_app_alloc_stream(void)
-{
-       struct ust_app_stream *stream = NULL;
-
-       stream = zmalloc(sizeof(*stream));
-       if (stream == NULL) {
-               PERROR("zmalloc ust app stream");
-               goto error;
-       }
-
-       /* Zero could be a valid value for a handle so flag it to -1. */
-       stream->handle = -1;
-
-error:
-       return stream;
-}
-
-/*
- * Alloc new UST app event.
- */
-static
-struct ust_app_event *alloc_ust_app_event(char *name,
-               struct lttng_ust_abi_event *attr)
-{
-       struct ust_app_event *ua_event;
-
-       /* Init most of the default value by allocating and zeroing */
-       ua_event = zmalloc(sizeof(struct ust_app_event));
-       if (ua_event == NULL) {
-               PERROR("Failed to allocate ust_app_event structure");
-               goto error;
-       }
-
-       ua_event->enabled = 1;
-       strncpy(ua_event->name, name, sizeof(ua_event->name));
-       ua_event->name[sizeof(ua_event->name) - 1] = '\0';
-       lttng_ht_node_init_str(&ua_event->node, ua_event->name);
-
-       /* Copy attributes */
-       if (attr) {
-               memcpy(&ua_event->attr, attr, sizeof(ua_event->attr));
-       }
-
-       DBG3("UST app event %s allocated", ua_event->name);
-
-       return ua_event;
-
-error:
-       return NULL;
-}
-
-/*
- * Allocate a new UST app event notifier rule.
- */
-static struct ust_app_event_notifier_rule *alloc_ust_app_event_notifier_rule(
-               struct lttng_trigger *trigger)
-{
-       enum lttng_event_rule_generate_exclusions_status
-                       generate_exclusion_status;
-       enum lttng_condition_status cond_status;
-       struct ust_app_event_notifier_rule *ua_event_notifier_rule;
-       struct lttng_condition *condition = NULL;
-       const struct lttng_event_rule *event_rule = NULL;
-
-       ua_event_notifier_rule = zmalloc(sizeof(struct ust_app_event_notifier_rule));
-       if (ua_event_notifier_rule == NULL) {
-               PERROR("Failed to allocate ust_app_event_notifier_rule structure");
-               goto error;
-       }
-
-       ua_event_notifier_rule->enabled = 1;
-       ua_event_notifier_rule->token = lttng_trigger_get_tracer_token(trigger);
-       lttng_ht_node_init_u64(&ua_event_notifier_rule->node,
-                       ua_event_notifier_rule->token);
-
-       condition = lttng_trigger_get_condition(trigger);
-       LTTNG_ASSERT(condition);
-       LTTNG_ASSERT(lttng_condition_get_type(condition) ==
-                       LTTNG_CONDITION_TYPE_EVENT_RULE_MATCHES);
-
-       cond_status = lttng_condition_event_rule_matches_get_rule(
-                       condition, &event_rule);
-       LTTNG_ASSERT(cond_status == LTTNG_CONDITION_STATUS_OK);
-       LTTNG_ASSERT(event_rule);
-
-       ua_event_notifier_rule->error_counter_index =
-                       lttng_condition_event_rule_matches_get_error_counter_index(condition);
-       /* Acquire the event notifier's reference to the trigger. */
-       lttng_trigger_get(trigger);
-
-       ua_event_notifier_rule->trigger = trigger;
-       ua_event_notifier_rule->filter = lttng_event_rule_get_filter_bytecode(event_rule);
-       generate_exclusion_status = lttng_event_rule_generate_exclusions(
-                       event_rule, &ua_event_notifier_rule->exclusion);
-       switch (generate_exclusion_status) {
-       case LTTNG_EVENT_RULE_GENERATE_EXCLUSIONS_STATUS_OK:
-       case LTTNG_EVENT_RULE_GENERATE_EXCLUSIONS_STATUS_NONE:
-               break;
-       default:
-               /* Error occurred. */
-               ERR("Failed to generate exclusions from trigger while allocating an event notifier rule");
-               goto error_put_trigger;
-       }
-
-       DBG3("UST app event notifier rule allocated: token = %" PRIu64,
-                       ua_event_notifier_rule->token);
-
-       return ua_event_notifier_rule;
-
-error_put_trigger:
-       lttng_trigger_put(trigger);
-error:
-       free(ua_event_notifier_rule);
-       return NULL;
-}
-
-/*
- * Alloc new UST app context.
- */
-static
-struct ust_app_ctx *alloc_ust_app_ctx(struct lttng_ust_context_attr *uctx)
-{
-       struct ust_app_ctx *ua_ctx;
-
-       ua_ctx = zmalloc(sizeof(struct ust_app_ctx));
-       if (ua_ctx == NULL) {
-               goto error;
-       }
-
-       CDS_INIT_LIST_HEAD(&ua_ctx->list);
-
-       if (uctx) {
-               memcpy(&ua_ctx->ctx, uctx, sizeof(ua_ctx->ctx));
-               if (uctx->ctx == LTTNG_UST_ABI_CONTEXT_APP_CONTEXT) {
-                       char *provider_name = NULL, *ctx_name = NULL;
-
-                       provider_name = strdup(uctx->u.app_ctx.provider_name);
-                       ctx_name = strdup(uctx->u.app_ctx.ctx_name);
-                       if (!provider_name || !ctx_name) {
-                               free(provider_name);
-                               free(ctx_name);
-                               goto error;
-                       }
-
-                       ua_ctx->ctx.u.app_ctx.provider_name = provider_name;
-                       ua_ctx->ctx.u.app_ctx.ctx_name = ctx_name;
-               }
-       }
-
-       DBG3("UST app context %d allocated", ua_ctx->ctx.ctx);
-       return ua_ctx;
-error:
-       free(ua_ctx);
-       return NULL;
-}
-
-/*
- * Create a liblttng-ust filter bytecode from given bytecode.
- *
- * Return allocated filter or NULL on error.
- */
-static struct lttng_ust_abi_filter_bytecode *create_ust_filter_bytecode_from_bytecode(
-               const struct lttng_bytecode *orig_f)
-{
-       struct lttng_ust_abi_filter_bytecode *filter = NULL;
-
-       /* Copy filter bytecode. */
-       filter = zmalloc(sizeof(*filter) + orig_f->len);
-       if (!filter) {
-               PERROR("Failed to allocate lttng_ust_filter_bytecode: bytecode len = %" PRIu32 " bytes", orig_f->len);
-               goto error;
-       }
-
-       LTTNG_ASSERT(sizeof(struct lttng_bytecode) ==
-                       sizeof(struct lttng_ust_abi_filter_bytecode));
-       memcpy(filter, orig_f, sizeof(*filter) + orig_f->len);
-error:
-       return filter;
-}
-
-/*
- * Create a liblttng-ust capture bytecode from given bytecode.
- *
- * Return allocated filter or NULL on error.
- */
-static struct lttng_ust_abi_capture_bytecode *
-create_ust_capture_bytecode_from_bytecode(const struct lttng_bytecode *orig_f)
-{
-       struct lttng_ust_abi_capture_bytecode *capture = NULL;
-
-       /* Copy capture bytecode. */
-       capture = zmalloc(sizeof(*capture) + orig_f->len);
-       if (!capture) {
-               PERROR("Failed to allocate lttng_ust_abi_capture_bytecode: bytecode len = %" PRIu32 " bytes", orig_f->len);
-               goto error;
-       }
-
-       LTTNG_ASSERT(sizeof(struct lttng_bytecode) ==
-                       sizeof(struct lttng_ust_abi_capture_bytecode));
-       memcpy(capture, orig_f, sizeof(*capture) + orig_f->len);
-error:
-       return capture;
-}
-
-/*
- * Find an ust_app using the sock and return it. RCU read side lock must be
- * held before calling this helper function.
- */
-struct ust_app *ust_app_find_by_sock(int sock)
-{
-       struct lttng_ht_node_ulong *node;
-       struct lttng_ht_iter iter;
-
-       lttng_ht_lookup(ust_app_ht_by_sock, (void *)((unsigned long) sock), &iter);
-       node = lttng_ht_iter_get_node_ulong(&iter);
-       if (node == NULL) {
-               DBG2("UST app find by sock %d not found", sock);
-               goto error;
-       }
-
-       return caa_container_of(node, struct ust_app, sock_n);
-
-error:
-       return NULL;
-}
-
-/*
- * Find an ust_app using the notify sock and return it. RCU read side lock must
- * be held before calling this helper function.
- */
-static struct ust_app *find_app_by_notify_sock(int sock)
-{
-       struct lttng_ht_node_ulong *node;
-       struct lttng_ht_iter iter;
-
-       lttng_ht_lookup(ust_app_ht_by_notify_sock, (void *)((unsigned long) sock),
-                       &iter);
-       node = lttng_ht_iter_get_node_ulong(&iter);
-       if (node == NULL) {
-               DBG2("UST app find by notify sock %d not found", sock);
-               goto error;
-       }
-
-       return caa_container_of(node, struct ust_app, notify_sock_n);
-
-error:
-       return NULL;
-}
-
-/*
- * Lookup for an ust app event based on event name, filter bytecode and the
- * event loglevel.
- *
- * Return an ust_app_event object or NULL on error.
- */
-static struct ust_app_event *find_ust_app_event(struct lttng_ht *ht,
-               const char *name, const struct lttng_bytecode *filter,
-               int loglevel_value,
-               const struct lttng_event_exclusion *exclusion)
-{
-       struct lttng_ht_iter iter;
-       struct lttng_ht_node_str *node;
-       struct ust_app_event *event = NULL;
-       struct ust_app_ht_key key;
-
-       LTTNG_ASSERT(name);
-       LTTNG_ASSERT(ht);
-
-       /* Setup key for event lookup. */
-       key.name = name;
-       key.filter = filter;
-       key.loglevel_type = loglevel_value;
-       /* lttng_event_exclusion and lttng_ust_event_exclusion structures are similar */
-       key.exclusion = exclusion;
-
-       /* Lookup using the event name as hash and a custom match fct. */
-       cds_lfht_lookup(ht->ht, ht->hash_fct((void *) name, lttng_ht_seed),
-                       ht_match_ust_app_event, &key, &iter.iter);
-       node = lttng_ht_iter_get_node_str(&iter);
-       if (node == NULL) {
-               goto end;
-       }
-
-       event = caa_container_of(node, struct ust_app_event, node);
-
-end:
-       return event;
-}
-
-/*
- * Look-up an event notifier rule based on its token id.
- *
- * Must be called with the RCU read lock held.
- * Return an ust_app_event_notifier_rule object or NULL on error.
- */
-static struct ust_app_event_notifier_rule *find_ust_app_event_notifier_rule(
-               struct lttng_ht *ht, uint64_t token)
-{
-       struct lttng_ht_iter iter;
-       struct lttng_ht_node_u64 *node;
-       struct ust_app_event_notifier_rule *event_notifier_rule = NULL;
-
-       LTTNG_ASSERT(ht);
-
-       lttng_ht_lookup(ht, &token, &iter);
-       node = lttng_ht_iter_get_node_u64(&iter);
-       if (node == NULL) {
-               DBG2("UST app event notifier rule token not found: token = %" PRIu64,
-                               token);
-               goto end;
-       }
-
-       event_notifier_rule = caa_container_of(
-                       node, struct ust_app_event_notifier_rule, node);
-end:
-       return event_notifier_rule;
-}
-
-/*
- * Create the channel context on the tracer.
- *
- * Called with UST app session lock held.
- */
-static
-int create_ust_channel_context(struct ust_app_channel *ua_chan,
-               struct ust_app_ctx *ua_ctx, struct ust_app *app)
-{
-       int ret;
-
-       health_code_update();
-
-       pthread_mutex_lock(&app->sock_lock);
-       ret = lttng_ust_ctl_add_context(app->sock, &ua_ctx->ctx,
-                       ua_chan->obj, &ua_ctx->obj);
-       pthread_mutex_unlock(&app->sock_lock);
-       if (ret < 0) {
-               if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
-                       ret = 0;
-                       DBG3("UST app create channel context failed. Application is dead: pid = %d, sock = %d",
-                                       app->pid, app->sock);
-               } else if (ret == -EAGAIN) {
-                       ret = 0;
-                       WARN("UST app create channel context failed. Communication time out: pid = %d, sock = %d",
-                                       app->pid, app->sock);
-               } else {
-                       ERR("UST app create channel context failed with ret %d: pid = %d, sock = %d",
-                                       ret, app->pid, app->sock);
-               }
-               goto error;
-       }
-
-       ua_ctx->handle = ua_ctx->obj->handle;
-
-       DBG2("UST app context handle %d created successfully for channel %s",
-                       ua_ctx->handle, ua_chan->name);
-
-error:
-       health_code_update();
-       return ret;
-}
-
-/*
- * Set the filter on the tracer.
- */
-static int set_ust_object_filter(struct ust_app *app,
-               const struct lttng_bytecode *bytecode,
-               struct lttng_ust_abi_object_data *ust_object)
-{
-       int ret;
-       struct lttng_ust_abi_filter_bytecode *ust_bytecode = NULL;
-
-       health_code_update();
-
-       ust_bytecode = create_ust_filter_bytecode_from_bytecode(bytecode);
-       if (!ust_bytecode) {
-               ret = -LTTNG_ERR_NOMEM;
-               goto error;
-       }
-       pthread_mutex_lock(&app->sock_lock);
-       ret = lttng_ust_ctl_set_filter(app->sock, ust_bytecode,
-                       ust_object);
-       pthread_mutex_unlock(&app->sock_lock);
-       if (ret < 0) {
-               if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
-                       ret = 0;
-                       DBG3("UST app  set filter failed. Application is dead: pid = %d, sock = %d",
-                                       app->pid, app->sock);
-               } else if (ret == -EAGAIN) {
-                       ret = 0;
-                       WARN("UST app  set filter failed. Communication time out: pid = %d, sock = %d",
-                                       app->pid, app->sock);
-               } else {
-                       ERR("UST app  set filter failed with ret %d: pid = %d, sock = %d, object = %p",
-                                       ret, app->pid, app->sock, ust_object);
-               }
-               goto error;
-       }
-
-       DBG2("UST filter successfully set: object = %p", ust_object);
-
-error:
-       health_code_update();
-       free(ust_bytecode);
-       return ret;
-}
-
-/*
- * Set a capture bytecode for the passed object.
- * The sequence number enforces the ordering at runtime and on reception of
- * the captured payloads.
- */
-static int set_ust_capture(struct ust_app *app,
-               const struct lttng_bytecode *bytecode,
-               unsigned int capture_seqnum,
-               struct lttng_ust_abi_object_data *ust_object)
-{
-       int ret;
-       struct lttng_ust_abi_capture_bytecode *ust_bytecode = NULL;
-
-       health_code_update();
-
-       ust_bytecode = create_ust_capture_bytecode_from_bytecode(bytecode);
-       if (!ust_bytecode) {
-               ret = -LTTNG_ERR_NOMEM;
-               goto error;
-       }
-
-       /*
-        * Set the sequence number to ensure the capture of fields is ordered.
-        */
-       ust_bytecode->seqnum = capture_seqnum;
-
-       pthread_mutex_lock(&app->sock_lock);
-       ret = lttng_ust_ctl_set_capture(app->sock, ust_bytecode,
-                       ust_object);
-       pthread_mutex_unlock(&app->sock_lock);
-       if (ret < 0) {
-               if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
-                       ret = 0;
-                       DBG3("UST app set capture failed. Application is dead: pid = %d, sock = %d",
-                                       app->pid, app->sock);
-               } else if (ret == -EAGAIN) {
-                       ret = 0;
-                       DBG3("UST app set capture failed. Communication timeout: pid = %d, sock = %d",
-                                       app->pid, app->sock);
-               } else {
-                       ERR("UST app event set capture failed with ret %d: pid = %d, sock = %d",
-                                       ret, app->pid,
-                                       app->sock);
-               }
-
-               goto error;
-       }
-
-       DBG2("UST capture successfully set: object = %p", ust_object);
-
-error:
-       health_code_update();
-       free(ust_bytecode);
-       return ret;
-}
-
-static
-struct lttng_ust_abi_event_exclusion *create_ust_exclusion_from_exclusion(
-               const struct lttng_event_exclusion *exclusion)
-{
-       struct lttng_ust_abi_event_exclusion *ust_exclusion = NULL;
-       size_t exclusion_alloc_size = sizeof(struct lttng_ust_abi_event_exclusion) +
-               LTTNG_UST_ABI_SYM_NAME_LEN * exclusion->count;
-
-       ust_exclusion = zmalloc(exclusion_alloc_size);
-       if (!ust_exclusion) {
-               PERROR("malloc");
-               goto end;
-       }
-
-       LTTNG_ASSERT(sizeof(struct lttng_event_exclusion) ==
-                       sizeof(struct lttng_ust_abi_event_exclusion));
-       memcpy(ust_exclusion, exclusion, exclusion_alloc_size);
-end:
-       return ust_exclusion;
-}
-
-/*
- * Set event exclusions on the tracer.
- */
-static int set_ust_object_exclusions(struct ust_app *app,
-               const struct lttng_event_exclusion *exclusions,
-               struct lttng_ust_abi_object_data *ust_object)
-{
-       int ret;
-       struct lttng_ust_abi_event_exclusion *ust_exclusions = NULL;
-
-       LTTNG_ASSERT(exclusions && exclusions->count > 0);
-
-       health_code_update();
-
-       ust_exclusions = create_ust_exclusion_from_exclusion(
-                       exclusions);
-       if (!ust_exclusions) {
-               ret = -LTTNG_ERR_NOMEM;
-               goto error;
-       }
-       pthread_mutex_lock(&app->sock_lock);
-       ret = lttng_ust_ctl_set_exclusion(app->sock, ust_exclusions, ust_object);
-       pthread_mutex_unlock(&app->sock_lock);
-       if (ret < 0) {
-               if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
-                       ret = 0;
-                       DBG3("UST app event exclusion failed. Application is dead: pid = %d, sock = %d",
-                                       app->pid, app->sock);
-               } else if (ret == -EAGAIN) {
-                       ret = 0;
-                       WARN("UST app event exclusion failed. Communication time out(pid: %d, sock = %d",
-                                       app->pid, app->sock);
-               } else {
-                       ERR("UST app event exclusions failed with ret %d: pid = %d, sock = %d, object = %p",
-                                       ret, app->pid, app->sock, ust_object);
-               }
-               goto error;
-       }
-
-       DBG2("UST exclusions set successfully for object %p", ust_object);
-
-error:
-       health_code_update();
-       free(ust_exclusions);
-       return ret;
-}
-
-/*
- * Disable the specified event on to UST tracer for the UST session.
- */
-static int disable_ust_object(struct ust_app *app,
-               struct lttng_ust_abi_object_data *object)
-{
-       int ret;
-
-       health_code_update();
-
-       pthread_mutex_lock(&app->sock_lock);
-       ret = lttng_ust_ctl_disable(app->sock, object);
-       pthread_mutex_unlock(&app->sock_lock);
-       if (ret < 0) {
-               if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
-                       ret = 0;
-                       DBG3("UST app disable object failed. Application is dead: pid = %d, sock = %d",
-                                       app->pid, app->sock);
-               } else if (ret == -EAGAIN) {
-                       ret = 0;
-                       WARN("UST app disable object failed. Communication time out: pid = %d, sock = %d",
-                                       app->pid, app->sock);
-               } else {
-                       ERR("UST app disable object failed with ret %d: pid = %d, sock = %d, object = %p",
-                                       ret, app->pid, app->sock, object);
-               }
-               goto error;
-       }
-
-       DBG2("UST app object %p disabled successfully for app: pid = %d",
-                       object, app->pid);
-
-error:
-       health_code_update();
-       return ret;
-}
-
-/*
- * Disable the specified channel on to UST tracer for the UST session.
- */
-static int disable_ust_channel(struct ust_app *app,
-               struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan)
-{
-       int ret;
-
-       health_code_update();
-
-       pthread_mutex_lock(&app->sock_lock);
-       ret = lttng_ust_ctl_disable(app->sock, ua_chan->obj);
-       pthread_mutex_unlock(&app->sock_lock);
-       if (ret < 0) {
-               if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
-                       ret = 0;
-                       DBG3("UST app disable channel failed. Application is dead: pid = %d, sock = %d",
-                                       app->pid, app->sock);
-               } else if (ret == -EAGAIN) {
-                       ret = 0;
-                       WARN("UST app disable channel failed. Communication time out: pid = %d, sock = %d",
-                                       app->pid, app->sock);
-               } else {
-                       ERR("UST app channel %s disable failed, session handle %d, with ret %d: pid = %d, sock = %d",
-                                       ua_chan->name, ua_sess->handle, ret,
-                                       app->pid, app->sock);
-               }
-               goto error;
-       }
-
-       DBG2("UST app channel %s disabled successfully for app: pid = %d",
-                       ua_chan->name, app->pid);
-
-error:
-       health_code_update();
-       return ret;
-}
-
-/*
- * Enable the specified channel on to UST tracer for the UST session.
- */
-static int enable_ust_channel(struct ust_app *app,
-               struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan)
-{
-       int ret;
-
-       health_code_update();
-
-       pthread_mutex_lock(&app->sock_lock);
-       ret = lttng_ust_ctl_enable(app->sock, ua_chan->obj);
-       pthread_mutex_unlock(&app->sock_lock);
-       if (ret < 0) {
-               if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
-                       ret = 0;
-                       DBG3("UST app channel %s enable failed. Application is dead: pid = %d, sock = %d",
-                                       ua_chan->name, app->pid, app->sock);
-               } else if (ret == -EAGAIN) {
-                       ret = 0;
-                       WARN("UST app channel %s enable failed. Communication time out: pid = %d, sock = %d",
-                                       ua_chan->name, app->pid, app->sock);
-               } else {
-                       ERR("UST app channel %s enable failed, session handle %d, with ret %d: pid = %d, sock = %d",
-                                       ua_chan->name, ua_sess->handle, ret,
-                                       app->pid, app->sock);
-               }
-               goto error;
-       }
-
-       ua_chan->enabled = 1;
-
-       DBG2("UST app channel %s enabled successfully for app: pid = %d",
-                       ua_chan->name, app->pid);
-
-error:
-       health_code_update();
-       return ret;
-}
-
-/*
- * Enable the specified event on to UST tracer for the UST session.
- */
-static int enable_ust_object(
-               struct ust_app *app, struct lttng_ust_abi_object_data *ust_object)
-{
-       int ret;
-
-       health_code_update();
-
-       pthread_mutex_lock(&app->sock_lock);
-       ret = lttng_ust_ctl_enable(app->sock, ust_object);
-       pthread_mutex_unlock(&app->sock_lock);
-       if (ret < 0) {
-               if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
-                       ret = 0;
-                       DBG3("UST app enable object failed. Application is dead: pid = %d, sock = %d",
-                                       app->pid, app->sock);
-               } else if (ret == -EAGAIN) {
-                       ret = 0;
-                       WARN("UST app enable object failed. Communication time out: pid = %d, sock = %d",
-                                       app->pid, app->sock);
-               } else {
-                       ERR("UST app enable object failed with ret %d: pid = %d, sock = %d, object = %p",
-                                       ret, app->pid, app->sock, ust_object);
-               }
-               goto error;
-       }
-
-       DBG2("UST app object %p enabled successfully for app: pid = %d",
-                       ust_object, app->pid);
-
-error:
-       health_code_update();
-       return ret;
-}
-
-/*
- * Send channel and stream buffer to application.
- *
- * Return 0 on success. On error, a negative value is returned.
- */
-static int send_channel_pid_to_ust(struct ust_app *app,
-               struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan)
-{
-       int ret;
-       struct ust_app_stream *stream, *stmp;
-
-       LTTNG_ASSERT(app);
-       LTTNG_ASSERT(ua_sess);
-       LTTNG_ASSERT(ua_chan);
-
-       health_code_update();
-
-       DBG("UST app sending channel %s to UST app sock %d", ua_chan->name,
-                       app->sock);
-
-       /* Send channel to the application. */
-       ret = ust_consumer_send_channel_to_ust(app, ua_sess, ua_chan);
-       if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
-               ret = -ENOTCONN;        /* Caused by app exiting. */
-               goto error;
-       } else if (ret == -EAGAIN) {
-               /* Caused by timeout. */
-               WARN("Communication with application %d timed out on send_channel for channel \"%s\" of session \"%" PRIu64 "\".",
-                               app->pid, ua_chan->name, ua_sess->tracing_id);
-               /* Treat this the same way as an application that is exiting. */
-               ret = -ENOTCONN;
-               goto error;
-       } else if (ret < 0) {
-               goto error;
-       }
-
-       health_code_update();
-
-       /* Send all streams to application. */
-       cds_list_for_each_entry_safe(stream, stmp, &ua_chan->streams.head, list) {
-               ret = ust_consumer_send_stream_to_ust(app, ua_chan, stream);
-               if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
-                       ret = -ENOTCONN; /* Caused by app exiting. */
-                       goto error;
-               } else if (ret == -EAGAIN) {
-                       /* Caused by timeout. */
-                       WARN("Communication with application %d timed out on send_stream for stream \"%s\" of channel \"%s\" of session \"%" PRIu64 "\".",
-                                       app->pid, stream->name, ua_chan->name,
-                                       ua_sess->tracing_id);
-                       /*
-                        * Treat this the same way as an application that is
-                        * exiting.
-                        */
-                       ret = -ENOTCONN;
-               } else if (ret < 0) {
-                       goto error;
-               }
-               /* We don't need the stream anymore once sent to the tracer. */
-               cds_list_del(&stream->list);
-               delete_ust_app_stream(-1, stream, app);
-       }
-       /* Flag the channel that it is sent to the application. */
-       ua_chan->is_sent = 1;
-
-error:
-       health_code_update();
-       return ret;
-}
-
-/*
- * Create the specified event onto the UST tracer for a UST session.
- *
- * Should be called with session mutex held.
- */
-static
-int create_ust_event(struct ust_app *app, struct ust_app_session *ua_sess,
-               struct ust_app_channel *ua_chan, struct ust_app_event *ua_event)
-{
-       int ret = 0;
-
-       health_code_update();
-
-       /* Create UST event on tracer */
-       pthread_mutex_lock(&app->sock_lock);
-       ret = lttng_ust_ctl_create_event(app->sock, &ua_event->attr, ua_chan->obj,
-                       &ua_event->obj);
-       pthread_mutex_unlock(&app->sock_lock);
-       if (ret < 0) {
-               if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
-                       ret = 0;
-                       DBG3("UST app create event failed. Application is dead: pid = %d, sock = %d",
-                                       app->pid, app->sock);
-               } else if (ret == -EAGAIN) {
-                       ret = 0;
-                       WARN("UST app create event failed. Communication time out: pid = %d, sock = %d",
-                                       app->pid, app->sock);
-               } else {
-                       ERR("UST app create event '%s' failed with ret %d: pid = %d, sock = %d",
-                                       ua_event->attr.name, ret, app->pid,
-                                       app->sock);
-               }
-               goto error;
-       }
-
-       ua_event->handle = ua_event->obj->handle;
-
-       DBG2("UST app event %s created successfully for pid:%d object = %p",
-                       ua_event->attr.name, app->pid, ua_event->obj);
-
-       health_code_update();
-
-       /* Set filter if one is present. */
-       if (ua_event->filter) {
-               ret = set_ust_object_filter(app, ua_event->filter, ua_event->obj);
-               if (ret < 0) {
-                       goto error;
-               }
-       }
-
-       /* Set exclusions for the event */
-       if (ua_event->exclusion) {
-               ret = set_ust_object_exclusions(app, ua_event->exclusion, ua_event->obj);
-               if (ret < 0) {
-                       goto error;
-               }
-       }
-
-       /* If event not enabled, disable it on the tracer */
-       if (ua_event->enabled) {
-               /*
-                * We now need to explicitly enable the event, since it
-                * is now disabled at creation.
-                */
-               ret = enable_ust_object(app, ua_event->obj);
-               if (ret < 0) {
-                       /*
-                        * If we hit an EPERM, something is wrong with our enable call. If
-                        * we get an EEXIST, there is a problem on the tracer side since we
-                        * just created it.
-                        */
-                       switch (ret) {
-                       case -LTTNG_UST_ERR_PERM:
-                               /* Code flow problem */
-                               abort();
-                       case -LTTNG_UST_ERR_EXIST:
-                               /* It's OK for our use case. */
-                               ret = 0;
-                               break;
-                       default:
-                               break;
-                       }
-                       goto error;
-               }
-       }
-
-error:
-       health_code_update();
-       return ret;
-}
-
-static int init_ust_event_notifier_from_event_rule(
-               const struct lttng_event_rule *rule,
-               struct lttng_ust_abi_event_notifier *event_notifier)
-{
-       enum lttng_event_rule_status status;
-       enum lttng_ust_abi_loglevel_type ust_loglevel_type = LTTNG_UST_ABI_LOGLEVEL_ALL;
-       int loglevel = -1, ret = 0;
-       const char *pattern;
-
-
-       memset(event_notifier, 0, sizeof(*event_notifier));
-
-       if (lttng_event_rule_targets_agent_domain(rule)) {
-               /*
-                * Special event for agents
-                * The actual meat of the event is in the filter that will be
-                * attached later on.
-                * Set the default values for the agent event.
-                */
-               pattern = event_get_default_agent_ust_name(
-                               lttng_event_rule_get_domain_type(rule));
-               loglevel = 0;
-               ust_loglevel_type = LTTNG_UST_ABI_LOGLEVEL_ALL;
-       } else {
-               const struct lttng_log_level_rule *log_level_rule;
-
-               LTTNG_ASSERT(lttng_event_rule_get_type(rule) ==
-                               LTTNG_EVENT_RULE_TYPE_USER_TRACEPOINT);
-
-               status = lttng_event_rule_user_tracepoint_get_name_pattern(rule, &pattern);
-               if (status != LTTNG_EVENT_RULE_STATUS_OK) {
-                       /* At this point, this is a fatal error. */
-                       abort();
-               }
-
-               status = lttng_event_rule_user_tracepoint_get_log_level_rule(
-                               rule, &log_level_rule);
-               if (status == LTTNG_EVENT_RULE_STATUS_UNSET) {
-                       ust_loglevel_type = LTTNG_UST_ABI_LOGLEVEL_ALL;
-               } else if (status == LTTNG_EVENT_RULE_STATUS_OK) {
-                       enum lttng_log_level_rule_status llr_status;
-
-                       switch (lttng_log_level_rule_get_type(log_level_rule)) {
-                       case LTTNG_LOG_LEVEL_RULE_TYPE_EXACTLY:
-                               ust_loglevel_type = LTTNG_UST_ABI_LOGLEVEL_SINGLE;
-                               llr_status = lttng_log_level_rule_exactly_get_level(
-                                               log_level_rule, &loglevel);
-                               break;
-                       case LTTNG_LOG_LEVEL_RULE_TYPE_AT_LEAST_AS_SEVERE_AS:
-                               ust_loglevel_type = LTTNG_UST_ABI_LOGLEVEL_RANGE;
-                               llr_status = lttng_log_level_rule_at_least_as_severe_as_get_level(
-                                               log_level_rule, &loglevel);
-                               break;
-                       default:
-                               abort();
-                       }
-
-                       LTTNG_ASSERT(llr_status == LTTNG_LOG_LEVEL_RULE_STATUS_OK);
-               } else {
-                       /* At this point this is a fatal error. */
-                       abort();
-               }
-       }
-
-       event_notifier->event.instrumentation = LTTNG_UST_ABI_TRACEPOINT;
-       ret = lttng_strncpy(event_notifier->event.name, pattern,
-                       LTTNG_UST_ABI_SYM_NAME_LEN - 1);
-       if (ret) {
-               ERR("Failed to copy event rule pattern to notifier: pattern = '%s' ",
-                               pattern);
-               goto end;
-       }
-
-       event_notifier->event.loglevel_type = ust_loglevel_type;
-       event_notifier->event.loglevel = loglevel;
-end:
-       return ret;
-}
-
-/*
- * Create the specified event notifier against the user space tracer of a
- * given application.
- */
-static int create_ust_event_notifier(struct ust_app *app,
-               struct ust_app_event_notifier_rule *ua_event_notifier_rule)
-{
-       int ret = 0;
-       enum lttng_condition_status condition_status;
-       const struct lttng_condition *condition = NULL;
-       struct lttng_ust_abi_event_notifier event_notifier;
-       const struct lttng_event_rule *event_rule = NULL;
-       unsigned int capture_bytecode_count = 0, i;
-       enum lttng_condition_status cond_status;
-       enum lttng_event_rule_type event_rule_type;
-
-       health_code_update();
-       LTTNG_ASSERT(app->event_notifier_group.object);
-
-       condition = lttng_trigger_get_const_condition(
-                       ua_event_notifier_rule->trigger);
-       LTTNG_ASSERT(condition);
-       LTTNG_ASSERT(lttng_condition_get_type(condition) ==
-                       LTTNG_CONDITION_TYPE_EVENT_RULE_MATCHES);
-
-       condition_status = lttng_condition_event_rule_matches_get_rule(
-                       condition, &event_rule);
-       LTTNG_ASSERT(condition_status == LTTNG_CONDITION_STATUS_OK);
-
-       LTTNG_ASSERT(event_rule);
-
-       event_rule_type = lttng_event_rule_get_type(event_rule);
-       LTTNG_ASSERT(event_rule_type == LTTNG_EVENT_RULE_TYPE_USER_TRACEPOINT ||
-                       event_rule_type == LTTNG_EVENT_RULE_TYPE_JUL_LOGGING ||
-                       event_rule_type ==
-                                       LTTNG_EVENT_RULE_TYPE_LOG4J_LOGGING ||
-                       event_rule_type ==
-                                       LTTNG_EVENT_RULE_TYPE_PYTHON_LOGGING);
-
-       init_ust_event_notifier_from_event_rule(event_rule, &event_notifier);
-       event_notifier.event.token = ua_event_notifier_rule->token;
-       event_notifier.error_counter_index = ua_event_notifier_rule->error_counter_index;
-
-       /* Create UST event notifier against the tracer. */
-       pthread_mutex_lock(&app->sock_lock);
-       ret = lttng_ust_ctl_create_event_notifier(app->sock, &event_notifier,
-                       app->event_notifier_group.object,
-                       &ua_event_notifier_rule->obj);
-       pthread_mutex_unlock(&app->sock_lock);
-       if (ret < 0) {
-               if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
-                       ret = 0;
-                       DBG3("UST app create event notifier failed. Application is dead: pid = %d, sock = %d",
-                                       app->pid, app->sock);
-               } else if (ret == -EAGAIN) {
-                       ret = 0;
-                       WARN("UST app create event notifier failed. Communication time out: pid = %d, sock = %d",
-                                       app->pid, app->sock);
-               } else {
-                       ERR("UST app create event notifier '%s' failed with ret %d: pid = %d, sock = %d",
-                                       event_notifier.event.name, ret, app->pid,
-                                       app->sock);
-               }
-               goto error;
-       }
-
-       ua_event_notifier_rule->handle = ua_event_notifier_rule->obj->handle;
-
-       DBG2("UST app event notifier %s created successfully: app = '%s': pid = %d), object = %p",
-                       event_notifier.event.name, app->name, app->pid,
-                       ua_event_notifier_rule->obj);
-
-       health_code_update();
-
-       /* Set filter if one is present. */
-       if (ua_event_notifier_rule->filter) {
-               ret = set_ust_object_filter(app, ua_event_notifier_rule->filter,
-                               ua_event_notifier_rule->obj);
-               if (ret < 0) {
-                       goto error;
-               }
-       }
-
-       /* Set exclusions for the event. */
-       if (ua_event_notifier_rule->exclusion) {
-               ret = set_ust_object_exclusions(app,
-                               ua_event_notifier_rule->exclusion,
-                               ua_event_notifier_rule->obj);
-               if (ret < 0) {
-                       goto error;
-               }
-       }
-
-       /* Set the capture bytecodes. */
-       cond_status = lttng_condition_event_rule_matches_get_capture_descriptor_count(
-                       condition, &capture_bytecode_count);
-       LTTNG_ASSERT(cond_status == LTTNG_CONDITION_STATUS_OK);
-
-       for (i = 0; i < capture_bytecode_count; i++) {
-               const struct lttng_bytecode *capture_bytecode =
-                               lttng_condition_event_rule_matches_get_capture_bytecode_at_index(
-                                               condition, i);
-
-               ret = set_ust_capture(app, capture_bytecode, i,
-                               ua_event_notifier_rule->obj);
-               if (ret < 0) {
-                       goto error;
-               }
-       }
-
-       /*
-        * We now need to explicitly enable the event, since it
-        * is disabled at creation.
-        */
-       ret = enable_ust_object(app, ua_event_notifier_rule->obj);
-       if (ret < 0) {
-               /*
-                * If we hit an EPERM, something is wrong with our enable call.
-                * If we get an EEXIST, there is a problem on the tracer side
-                * since we just created it.
-                */
-               switch (ret) {
-               case -LTTNG_UST_ERR_PERM:
-                       /* Code flow problem. */
-                       abort();
-               case -LTTNG_UST_ERR_EXIST:
-                       /* It's OK for our use case. */
-                       ret = 0;
-                       break;
-               default:
-                       break;
-               }
-
-               goto error;
-       }
-
-       ua_event_notifier_rule->enabled = true;
-
-error:
-       health_code_update();
-       return ret;
-}
-
-/*
- * Copy data between an UST app event and a LTT event.
- */
-static void shadow_copy_event(struct ust_app_event *ua_event,
-               struct ltt_ust_event *uevent)
-{
-       size_t exclusion_alloc_size;
-
-       strncpy(ua_event->name, uevent->attr.name, sizeof(ua_event->name));
-       ua_event->name[sizeof(ua_event->name) - 1] = '\0';
-
-       ua_event->enabled = uevent->enabled;
-
-       /* Copy event attributes */
-       memcpy(&ua_event->attr, &uevent->attr, sizeof(ua_event->attr));
-
-       /* Copy filter bytecode */
-       if (uevent->filter) {
-               ua_event->filter = lttng_bytecode_copy(uevent->filter);
-               /* Filter might be NULL here in case of ENONEM. */
-       }
-
-       /* Copy exclusion data */
-       if (uevent->exclusion) {
-               exclusion_alloc_size = sizeof(struct lttng_event_exclusion) +
-                               LTTNG_UST_ABI_SYM_NAME_LEN * uevent->exclusion->count;
-               ua_event->exclusion = zmalloc(exclusion_alloc_size);
-               if (ua_event->exclusion == NULL) {
-                       PERROR("malloc");
-               } else {
-                       memcpy(ua_event->exclusion, uevent->exclusion,
-                                       exclusion_alloc_size);
-               }
-       }
-}
-
-/*
- * Copy data between an UST app channel and a LTT channel.
- */
-static void shadow_copy_channel(struct ust_app_channel *ua_chan,
-               struct ltt_ust_channel *uchan)
-{
-       DBG2("UST app shadow copy of channel %s started", ua_chan->name);
-
-       strncpy(ua_chan->name, uchan->name, sizeof(ua_chan->name));
-       ua_chan->name[sizeof(ua_chan->name) - 1] = '\0';
-
-       ua_chan->tracefile_size = uchan->tracefile_size;
-       ua_chan->tracefile_count = uchan->tracefile_count;
-
-       /* Copy event attributes since the layout is different. */
-       ua_chan->attr.subbuf_size = uchan->attr.subbuf_size;
-       ua_chan->attr.num_subbuf = uchan->attr.num_subbuf;
-       ua_chan->attr.overwrite = uchan->attr.overwrite;
-       ua_chan->attr.switch_timer_interval = uchan->attr.switch_timer_interval;
-       ua_chan->attr.read_timer_interval = uchan->attr.read_timer_interval;
-       ua_chan->monitor_timer_interval = uchan->monitor_timer_interval;
-       ua_chan->attr.output = uchan->attr.output;
-       ua_chan->attr.blocking_timeout = uchan->attr.u.s.blocking_timeout;
-
-       /*
-        * Note that the attribute channel type is not set since the channel on the
-        * tracing registry side does not have this information.
-        */
-
-       ua_chan->enabled = uchan->enabled;
-       ua_chan->tracing_channel_id = uchan->id;
-
-       DBG3("UST app shadow copy of channel %s done", ua_chan->name);
-}
-
-/*
- * Copy data between a UST app session and a regular LTT session.
- */
-static void shadow_copy_session(struct ust_app_session *ua_sess,
-               struct ltt_ust_session *usess, struct ust_app *app)
-{
-       struct tm *timeinfo;
-       char datetime[16];
-       int ret;
-       char tmp_shm_path[PATH_MAX];
-
-       timeinfo = localtime(&app->registration_time);
-       strftime(datetime, sizeof(datetime), "%Y%m%d-%H%M%S", timeinfo);
-
-       DBG2("Shadow copy of session handle %d", ua_sess->handle);
-
-       ua_sess->tracing_id = usess->id;
-       ua_sess->id = get_next_session_id();
-       LTTNG_OPTIONAL_SET(&ua_sess->real_credentials.uid, app->uid);
-       LTTNG_OPTIONAL_SET(&ua_sess->real_credentials.gid, app->gid);
-       LTTNG_OPTIONAL_SET(&ua_sess->effective_credentials.uid, usess->uid);
-       LTTNG_OPTIONAL_SET(&ua_sess->effective_credentials.gid, usess->gid);
-       ua_sess->buffer_type = usess->buffer_type;
-       ua_sess->bits_per_long = app->bits_per_long;
-
-       /* There is only one consumer object per session possible. */
-       consumer_output_get(usess->consumer);
-       ua_sess->consumer = usess->consumer;
-
-       ua_sess->output_traces = usess->output_traces;
-       ua_sess->live_timer_interval = usess->live_timer_interval;
-       copy_channel_attr_to_ustctl(&ua_sess->metadata_attr,
-                       &usess->metadata_attr);
-
-       switch (ua_sess->buffer_type) {
-       case LTTNG_BUFFER_PER_PID:
-               ret = snprintf(ua_sess->path, sizeof(ua_sess->path),
-                               DEFAULT_UST_TRACE_PID_PATH "/%s-%d-%s", app->name, app->pid,
-                               datetime);
-               break;
-       case LTTNG_BUFFER_PER_UID:
-               ret = snprintf(ua_sess->path, sizeof(ua_sess->path),
-                               DEFAULT_UST_TRACE_UID_PATH,
-                               lttng_credentials_get_uid(&ua_sess->real_credentials),
-                               app->bits_per_long);
-               break;
-       default:
-               abort();
-               goto error;
-       }
-       if (ret < 0) {
-               PERROR("asprintf UST shadow copy session");
-               abort();
-               goto error;
-       }
-
-       strncpy(ua_sess->root_shm_path, usess->root_shm_path,
-               sizeof(ua_sess->root_shm_path));
-       ua_sess->root_shm_path[sizeof(ua_sess->root_shm_path) - 1] = '\0';
-       strncpy(ua_sess->shm_path, usess->shm_path,
-               sizeof(ua_sess->shm_path));
-       ua_sess->shm_path[sizeof(ua_sess->shm_path) - 1] = '\0';
-       if (ua_sess->shm_path[0]) {
-               switch (ua_sess->buffer_type) {
-               case LTTNG_BUFFER_PER_PID:
-                       ret = snprintf(tmp_shm_path, sizeof(tmp_shm_path),
-                                       "/" DEFAULT_UST_TRACE_PID_PATH "/%s-%d-%s",
-                                       app->name, app->pid, datetime);
-                       break;
-               case LTTNG_BUFFER_PER_UID:
-                       ret = snprintf(tmp_shm_path, sizeof(tmp_shm_path),
-                                       "/" DEFAULT_UST_TRACE_UID_PATH,
-                                       app->uid, app->bits_per_long);
-                       break;
-               default:
-                       abort();
-                       goto error;
-               }
-               if (ret < 0) {
-                       PERROR("sprintf UST shadow copy session");
-                       abort();
-                       goto error;
-               }
-               strncat(ua_sess->shm_path, tmp_shm_path,
-                       sizeof(ua_sess->shm_path) - strlen(ua_sess->shm_path) - 1);
-               ua_sess->shm_path[sizeof(ua_sess->shm_path) - 1] = '\0';
-       }
-       return;
-
-error:
-       consumer_output_put(ua_sess->consumer);
-}
-
-/*
- * Lookup sesison wrapper.
- */
-static
-void __lookup_session_by_app(const struct ltt_ust_session *usess,
-                       struct ust_app *app, struct lttng_ht_iter *iter)
-{
-       /* Get right UST app session from app */
-       lttng_ht_lookup(app->sessions, &usess->id, iter);
-}
-
-/*
- * Return ust app session from the app session hashtable using the UST session
- * id.
- */
-static struct ust_app_session *lookup_session_by_app(
-               const struct ltt_ust_session *usess, struct ust_app *app)
-{
-       struct lttng_ht_iter iter;
-       struct lttng_ht_node_u64 *node;
-
-       __lookup_session_by_app(usess, app, &iter);
-       node = lttng_ht_iter_get_node_u64(&iter);
-       if (node == NULL) {
-               goto error;
-       }
-
-       return caa_container_of(node, struct ust_app_session, node);
-
-error:
-       return NULL;
-}
-
-/*
- * Setup buffer registry per PID for the given session and application. If none
- * is found, a new one is created, added to the global registry and
- * initialized. If regp is valid, it's set with the newly created object.
- *
- * Return 0 on success or else a negative value.
- */
-static int setup_buffer_reg_pid(struct ust_app_session *ua_sess,
-               struct ust_app *app, struct buffer_reg_pid **regp)
-{
-       int ret = 0;
-       struct buffer_reg_pid *reg_pid;
-
-       LTTNG_ASSERT(ua_sess);
-       LTTNG_ASSERT(app);
-
-       rcu_read_lock();
-
-       reg_pid = buffer_reg_pid_find(ua_sess->id);
-       if (!reg_pid) {
-               /*
-                * This is the create channel path meaning that if there is NO
-                * registry available, we have to create one for this session.
-                */
-               ret = buffer_reg_pid_create(ua_sess->id, &reg_pid,
-                       ua_sess->root_shm_path, ua_sess->shm_path);
-               if (ret < 0) {
-                       goto error;
-               }
-       } else {
-               goto end;
-       }
-
-       /* Initialize registry. */
-       ret = ust_registry_session_init(&reg_pid->registry->reg.ust, app,
-                       app->bits_per_long, app->uint8_t_alignment,
-                       app->uint16_t_alignment, app->uint32_t_alignment,
-                       app->uint64_t_alignment, app->long_alignment,
-                       app->byte_order, app->version.major, app->version.minor,
-                       reg_pid->root_shm_path, reg_pid->shm_path,
-                       lttng_credentials_get_uid(&ua_sess->effective_credentials),
-                       lttng_credentials_get_gid(&ua_sess->effective_credentials),
-                       ua_sess->tracing_id,
-                       app->uid);
-       if (ret < 0) {
-               /*
-                * reg_pid->registry->reg.ust is NULL upon error, so we need to
-                * destroy the buffer registry, because it is always expected
-                * that if the buffer registry can be found, its ust registry is
-                * non-NULL.
-                */
-               buffer_reg_pid_destroy(reg_pid);
-               goto error;
-       }
-
-       buffer_reg_pid_add(reg_pid);
-
-       DBG3("UST app buffer registry per PID created successfully");
-
-end:
-       if (regp) {
-               *regp = reg_pid;
-       }
-error:
-       rcu_read_unlock();
-       return ret;
-}
-
-/*
- * Setup buffer registry per UID for the given session and application. If none
- * is found, a new one is created, added to the global registry and
- * initialized. If regp is valid, it's set with the newly created object.
- *
- * Return 0 on success or else a negative value.
- */
-static int setup_buffer_reg_uid(struct ltt_ust_session *usess,
-               struct ust_app_session *ua_sess,
-               struct ust_app *app, struct buffer_reg_uid **regp)
-{
-       int ret = 0;
-       struct buffer_reg_uid *reg_uid;
-
-       LTTNG_ASSERT(usess);
-       LTTNG_ASSERT(app);
-
-       rcu_read_lock();
-
-       reg_uid = buffer_reg_uid_find(usess->id, app->bits_per_long, app->uid);
-       if (!reg_uid) {
-               /*
-                * This is the create channel path meaning that if there is NO
-                * registry available, we have to create one for this session.
-                */
-               ret = buffer_reg_uid_create(usess->id, app->bits_per_long, app->uid,
-                               LTTNG_DOMAIN_UST, &reg_uid,
-                               ua_sess->root_shm_path, ua_sess->shm_path);
-               if (ret < 0) {
-                       goto error;
-               }
-       } else {
-               goto end;
-       }
-
-       /* Initialize registry. */
-       ret = ust_registry_session_init(&reg_uid->registry->reg.ust, NULL,
-                       app->bits_per_long, app->uint8_t_alignment,
-                       app->uint16_t_alignment, app->uint32_t_alignment,
-                       app->uint64_t_alignment, app->long_alignment,
-                       app->byte_order, app->version.major,
-                       app->version.minor, reg_uid->root_shm_path,
-                       reg_uid->shm_path, usess->uid, usess->gid,
-                       ua_sess->tracing_id, app->uid);
-       if (ret < 0) {
-               /*
-                * reg_uid->registry->reg.ust is NULL upon error, so we need to
-                * destroy the buffer registry, because it is always expected
-                * that if the buffer registry can be found, its ust registry is
-                * non-NULL.
-                */
-               buffer_reg_uid_destroy(reg_uid, NULL);
-               goto error;
-       }
-       /* Add node to teardown list of the session. */
-       cds_list_add(&reg_uid->lnode, &usess->buffer_reg_uid_list);
-
-       buffer_reg_uid_add(reg_uid);
-
-       DBG3("UST app buffer registry per UID created successfully");
-end:
-       if (regp) {
-               *regp = reg_uid;
-       }
-error:
-       rcu_read_unlock();
-       return ret;
-}
-
-/*
- * Create a session on the tracer side for the given app.
- *
- * On success, ua_sess_ptr is populated with the session pointer or else left
- * untouched. If the session was created, is_created is set to 1. On error,
- * it's left untouched. Note that ua_sess_ptr is mandatory but is_created can
- * be NULL.
- *
- * Returns 0 on success or else a negative code which is either -ENOMEM or
- * -ENOTCONN which is the default code if the lttng_ust_ctl_create_session fails.
- */
-static int find_or_create_ust_app_session(struct ltt_ust_session *usess,
-               struct ust_app *app, struct ust_app_session **ua_sess_ptr,
-               int *is_created)
-{
-       int ret, created = 0;
-       struct ust_app_session *ua_sess;
-
-       LTTNG_ASSERT(usess);
-       LTTNG_ASSERT(app);
-       LTTNG_ASSERT(ua_sess_ptr);
-
-       health_code_update();
-
-       ua_sess = lookup_session_by_app(usess, app);
-       if (ua_sess == NULL) {
-               DBG2("UST app pid: %d session id %" PRIu64 " not found, creating it",
-                               app->pid, usess->id);
-               ua_sess = alloc_ust_app_session();
-               if (ua_sess == NULL) {
-                       /* Only malloc can failed so something is really wrong */
-                       ret = -ENOMEM;
-                       goto error;
-               }
-               shadow_copy_session(ua_sess, usess, app);
-               created = 1;
-       }
-
-       switch (usess->buffer_type) {
-       case LTTNG_BUFFER_PER_PID:
-               /* Init local registry. */
-               ret = setup_buffer_reg_pid(ua_sess, app, NULL);
-               if (ret < 0) {
-                       delete_ust_app_session(-1, ua_sess, app);
-                       goto error;
-               }
-               break;
-       case LTTNG_BUFFER_PER_UID:
-               /* Look for a global registry. If none exists, create one. */
-               ret = setup_buffer_reg_uid(usess, ua_sess, app, NULL);
-               if (ret < 0) {
-                       delete_ust_app_session(-1, ua_sess, app);
-                       goto error;
-               }
-               break;
-       default:
-               abort();
-               ret = -EINVAL;
-               goto error;
-       }
-
-       health_code_update();
-
-       if (ua_sess->handle == -1) {
-               pthread_mutex_lock(&app->sock_lock);
-               ret = lttng_ust_ctl_create_session(app->sock);
-               pthread_mutex_unlock(&app->sock_lock);
-               if (ret < 0) {
-                       if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
-                               DBG("UST app creating session failed. Application is dead: pid = %d, sock = %d",
-                                               app->pid, app->sock);
-                               ret = 0;
-                       } else if (ret == -EAGAIN) {
-                               DBG("UST app creating session failed. Communication time out: pid = %d, sock = %d",
-                                               app->pid, app->sock);
-                               ret = 0;
-                       } else {
-                               ERR("UST app creating session failed with ret %d: pid = %d, sock =%d",
-                                               ret, app->pid, app->sock);
-                       }
-                       delete_ust_app_session(-1, ua_sess, app);
-                       if (ret != -ENOMEM) {
-                               /*
-                                * Tracer is probably gone or got an internal error so let's
-                                * behave like it will soon unregister or not usable.
-                                */
-                               ret = -ENOTCONN;
-                       }
-                       goto error;
-               }
-
-               ua_sess->handle = ret;
-
-               /* Add ust app session to app's HT */
-               lttng_ht_node_init_u64(&ua_sess->node,
-                               ua_sess->tracing_id);
-               lttng_ht_add_unique_u64(app->sessions, &ua_sess->node);
-               lttng_ht_node_init_ulong(&ua_sess->ust_objd_node, ua_sess->handle);
-               lttng_ht_add_unique_ulong(app->ust_sessions_objd,
-                               &ua_sess->ust_objd_node);
-
-               DBG2("UST app session created successfully with handle %d", ret);
-       }
-
-       *ua_sess_ptr = ua_sess;
-       if (is_created) {
-               *is_created = created;
-       }
-
-       /* Everything went well. */
-       ret = 0;
-
-error:
-       health_code_update();
-       return ret;
-}
-
-/*
- * Match function for a hash table lookup of ust_app_ctx.
- *
- * It matches an ust app context based on the context type and, in the case
- * of perf counters, their name.
- */
-static int ht_match_ust_app_ctx(struct cds_lfht_node *node, const void *_key)
-{
-       struct ust_app_ctx *ctx;
-       const struct lttng_ust_context_attr *key;
-
-       LTTNG_ASSERT(node);
-       LTTNG_ASSERT(_key);
-
-       ctx = caa_container_of(node, struct ust_app_ctx, node.node);
-       key = _key;
-
-       /* Context type */
-       if (ctx->ctx.ctx != key->ctx) {
-               goto no_match;
-       }
-
-       switch(key->ctx) {
-       case LTTNG_UST_ABI_CONTEXT_PERF_THREAD_COUNTER:
-               if (strncmp(key->u.perf_counter.name,
-                               ctx->ctx.u.perf_counter.name,
-                               sizeof(key->u.perf_counter.name))) {
-                       goto no_match;
-               }
-               break;
-       case LTTNG_UST_ABI_CONTEXT_APP_CONTEXT:
-               if (strcmp(key->u.app_ctx.provider_name,
-                               ctx->ctx.u.app_ctx.provider_name) ||
-                               strcmp(key->u.app_ctx.ctx_name,
-                               ctx->ctx.u.app_ctx.ctx_name)) {
-                       goto no_match;
-               }
-               break;
-       default:
-               break;
-       }
-
-       /* Match. */
-       return 1;
-
-no_match:
-       return 0;
-}
-
-/*
- * Lookup for an ust app context from an lttng_ust_context.
- *
- * Must be called while holding RCU read side lock.
- * Return an ust_app_ctx object or NULL on error.
- */
-static
-struct ust_app_ctx *find_ust_app_context(struct lttng_ht *ht,
-               struct lttng_ust_context_attr *uctx)
-{
-       struct lttng_ht_iter iter;
-       struct lttng_ht_node_ulong *node;
-       struct ust_app_ctx *app_ctx = NULL;
-
-       LTTNG_ASSERT(uctx);
-       LTTNG_ASSERT(ht);
-
-       /* Lookup using the lttng_ust_context_type and a custom match fct. */
-       cds_lfht_lookup(ht->ht, ht->hash_fct((void *) uctx->ctx, lttng_ht_seed),
-                       ht_match_ust_app_ctx, uctx, &iter.iter);
-       node = lttng_ht_iter_get_node_ulong(&iter);
-       if (!node) {
-               goto end;
-       }
-
-       app_ctx = caa_container_of(node, struct ust_app_ctx, node);
-
-end:
-       return app_ctx;
-}
-
-/*
- * Create a context for the channel on the tracer.
- *
- * Called with UST app session lock held and a RCU read side lock.
- */
-static
-int create_ust_app_channel_context(struct ust_app_channel *ua_chan,
-               struct lttng_ust_context_attr *uctx,
-               struct ust_app *app)
-{
-       int ret = 0;
-       struct ust_app_ctx *ua_ctx;
-
-       DBG2("UST app adding context to channel %s", ua_chan->name);
-
-       ua_ctx = find_ust_app_context(ua_chan->ctx, uctx);
-       if (ua_ctx) {
-               ret = -EEXIST;
-               goto error;
-       }
-
-       ua_ctx = alloc_ust_app_ctx(uctx);
-       if (ua_ctx == NULL) {
-               /* malloc failed */
-               ret = -ENOMEM;
-               goto error;
-       }
-
-       lttng_ht_node_init_ulong(&ua_ctx->node, (unsigned long) ua_ctx->ctx.ctx);
-       lttng_ht_add_ulong(ua_chan->ctx, &ua_ctx->node);
-       cds_list_add_tail(&ua_ctx->list, &ua_chan->ctx_list);
-
-       ret = create_ust_channel_context(ua_chan, ua_ctx, app);
-       if (ret < 0) {
-               goto error;
-       }
-
-error:
-       return ret;
-}
-
-/*
- * Enable on the tracer side a ust app event for the session and channel.
- *
- * Called with UST app session lock held.
- */
-static
-int enable_ust_app_event(struct ust_app_session *ua_sess,
-               struct ust_app_event *ua_event, struct ust_app *app)
-{
-       int ret;
-
-       ret = enable_ust_object(app, ua_event->obj);
-       if (ret < 0) {
-               goto error;
-       }
-
-       ua_event->enabled = 1;
-
-error:
-       return ret;
-}
-
-/*
- * Disable on the tracer side a ust app event for the session and channel.
- */
-static int disable_ust_app_event(struct ust_app_session *ua_sess,
-               struct ust_app_event *ua_event, struct ust_app *app)
-{
-       int ret;
-
-       ret = disable_ust_object(app, ua_event->obj);
-       if (ret < 0) {
-               goto error;
-       }
-
-       ua_event->enabled = 0;
-
-error:
-       return ret;
-}
-
-/*
- * Lookup ust app channel for session and disable it on the tracer side.
- */
-static
-int disable_ust_app_channel(struct ust_app_session *ua_sess,
-               struct ust_app_channel *ua_chan, struct ust_app *app)
-{
-       int ret;
-
-       ret = disable_ust_channel(app, ua_sess, ua_chan);
-       if (ret < 0) {
-               goto error;
-       }
-
-       ua_chan->enabled = 0;
-
-error:
-       return ret;
-}
-
-/*
- * Lookup ust app channel for session and enable it on the tracer side. This
- * MUST be called with a RCU read side lock acquired.
- */
-static int enable_ust_app_channel(struct ust_app_session *ua_sess,
-               struct ltt_ust_channel *uchan, struct ust_app *app)
-{
-       int ret = 0;
-       struct lttng_ht_iter iter;
-       struct lttng_ht_node_str *ua_chan_node;
-       struct ust_app_channel *ua_chan;
-
-       lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &iter);
-       ua_chan_node = lttng_ht_iter_get_node_str(&iter);
-       if (ua_chan_node == NULL) {
-               DBG2("Unable to find channel %s in ust session id %" PRIu64,
-                               uchan->name, ua_sess->tracing_id);
-               goto error;
-       }
-
-       ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
-
-       ret = enable_ust_channel(app, ua_sess, ua_chan);
-       if (ret < 0) {
-               goto error;
-       }
-
-error:
-       return ret;
-}
-
-/*
- * Ask the consumer to create a channel and get it if successful.
- *
- * Called with UST app session lock held.
- *
- * Return 0 on success or else a negative value.
- */
-static int do_consumer_create_channel(struct ltt_ust_session *usess,
-               struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan,
-               int bitness, struct ust_registry_session *registry,
-               uint64_t trace_archive_id)
-{
-       int ret;
-       unsigned int nb_fd = 0;
-       struct consumer_socket *socket;
-
-       LTTNG_ASSERT(usess);
-       LTTNG_ASSERT(ua_sess);
-       LTTNG_ASSERT(ua_chan);
-       LTTNG_ASSERT(registry);
-
-       rcu_read_lock();
-       health_code_update();
-
-       /* Get the right consumer socket for the application. */
-       socket = consumer_find_socket_by_bitness(bitness, usess->consumer);
-       if (!socket) {
-               ret = -EINVAL;
-               goto error;
-       }
-
-       health_code_update();
-
-       /* Need one fd for the channel. */
-       ret = lttng_fd_get(LTTNG_FD_APPS, 1);
-       if (ret < 0) {
-               ERR("Exhausted number of available FD upon create channel");
-               goto error;
-       }
-
-       /*
-        * Ask consumer to create channel. The consumer will return the number of
-        * stream we have to expect.
-        */
-       ret = ust_consumer_ask_channel(ua_sess, ua_chan, usess->consumer, socket,
-                       registry, usess->current_trace_chunk);
-       if (ret < 0) {
-               goto error_ask;
-       }
-
-       /*
-        * Compute the number of fd needed before receiving them. It must be 2 per
-        * stream (2 being the default value here).
-        */
-       nb_fd = DEFAULT_UST_STREAM_FD_NUM * ua_chan->expected_stream_count;
-
-       /* Reserve the amount of file descriptor we need. */
-       ret = lttng_fd_get(LTTNG_FD_APPS, nb_fd);
-       if (ret < 0) {
-               ERR("Exhausted number of available FD upon create channel");
-               goto error_fd_get_stream;
-       }
-
-       health_code_update();
-
-       /*
-        * Now get the channel from the consumer. This call will populate the stream
-        * list of that channel and set the ust objects.
-        */
-       if (usess->consumer->enabled) {
-               ret = ust_consumer_get_channel(socket, ua_chan);
-               if (ret < 0) {
-                       goto error_destroy;
-               }
-       }
-
-       rcu_read_unlock();
-       return 0;
-
-error_destroy:
-       lttng_fd_put(LTTNG_FD_APPS, nb_fd);
-error_fd_get_stream:
-       /*
-        * Initiate a destroy channel on the consumer since we had an error
-        * handling it on our side. The return value is of no importance since we
-        * already have a ret value set by the previous error that we need to
-        * return.
-        */
-       (void) ust_consumer_destroy_channel(socket, ua_chan);
-error_ask:
-       lttng_fd_put(LTTNG_FD_APPS, 1);
-error:
-       health_code_update();
-       rcu_read_unlock();
-       return ret;
-}
-
-/*
- * Duplicate the ust data object of the ust app stream and save it in the
- * buffer registry stream.
- *
- * Return 0 on success or else a negative value.
- */
-static int duplicate_stream_object(struct buffer_reg_stream *reg_stream,
-               struct ust_app_stream *stream)
-{
-       int ret;
-
-       LTTNG_ASSERT(reg_stream);
-       LTTNG_ASSERT(stream);
-
-       /* Duplicating a stream requires 2 new fds. Reserve them. */
-       ret = lttng_fd_get(LTTNG_FD_APPS, 2);
-       if (ret < 0) {
-               ERR("Exhausted number of available FD upon duplicate stream");
-               goto error;
-       }
-
-       /* Duplicate object for stream once the original is in the registry. */
-       ret = lttng_ust_ctl_duplicate_ust_object_data(&stream->obj,
-                       reg_stream->obj.ust);
-       if (ret < 0) {
-               ERR("Duplicate stream obj from %p to %p failed with ret %d",
-                               reg_stream->obj.ust, stream->obj, ret);
-               lttng_fd_put(LTTNG_FD_APPS, 2);
-               goto error;
-       }
-       stream->handle = stream->obj->handle;
-
-error:
-       return ret;
-}
-
-/*
- * Duplicate the ust data object of the ust app. channel and save it in the
- * buffer registry channel.
- *
- * Return 0 on success or else a negative value.
- */
-static int duplicate_channel_object(struct buffer_reg_channel *buf_reg_chan,
-               struct ust_app_channel *ua_chan)
-{
-       int ret;
-
-       LTTNG_ASSERT(buf_reg_chan);
-       LTTNG_ASSERT(ua_chan);
-
-       /* Duplicating a channel requires 1 new fd. Reserve it. */
-       ret = lttng_fd_get(LTTNG_FD_APPS, 1);
-       if (ret < 0) {
-               ERR("Exhausted number of available FD upon duplicate channel");
-               goto error_fd_get;
-       }
-
-       /* Duplicate object for stream once the original is in the registry. */
-       ret = lttng_ust_ctl_duplicate_ust_object_data(&ua_chan->obj, buf_reg_chan->obj.ust);
-       if (ret < 0) {
-               ERR("Duplicate channel obj from %p to %p failed with ret: %d",
-                               buf_reg_chan->obj.ust, ua_chan->obj, ret);
-               goto error;
-       }
-       ua_chan->handle = ua_chan->obj->handle;
-
-       return 0;
-
-error:
-       lttng_fd_put(LTTNG_FD_APPS, 1);
-error_fd_get:
-       return ret;
-}
-
-/*
- * For a given channel buffer registry, setup all streams of the given ust
- * application channel.
- *
- * Return 0 on success or else a negative value.
- */
-static int setup_buffer_reg_streams(struct buffer_reg_channel *buf_reg_chan,
-               struct ust_app_channel *ua_chan,
-               struct ust_app *app)
-{
-       int ret = 0;
-       struct ust_app_stream *stream, *stmp;
-
-       LTTNG_ASSERT(buf_reg_chan);
-       LTTNG_ASSERT(ua_chan);
-
-       DBG2("UST app setup buffer registry stream");
-
-       /* Send all streams to application. */
-       cds_list_for_each_entry_safe(stream, stmp, &ua_chan->streams.head, list) {
-               struct buffer_reg_stream *reg_stream;
-
-               ret = buffer_reg_stream_create(&reg_stream);
-               if (ret < 0) {
-                       goto error;
-               }
-
-               /*
-                * Keep original pointer and nullify it in the stream so the delete
-                * stream call does not release the object.
-                */
-               reg_stream->obj.ust = stream->obj;
-               stream->obj = NULL;
-               buffer_reg_stream_add(reg_stream, buf_reg_chan);
-
-               /* We don't need the streams anymore. */
-               cds_list_del(&stream->list);
-               delete_ust_app_stream(-1, stream, app);
-       }
-
-error:
-       return ret;
-}
-
-/*
- * Create a buffer registry channel for the given session registry and
- * application channel object. If regp pointer is valid, it's set with the
- * created object. Important, the created object is NOT added to the session
- * registry hash table.
- *
- * Return 0 on success else a negative value.
- */
-static int create_buffer_reg_channel(struct buffer_reg_session *reg_sess,
-               struct ust_app_channel *ua_chan, struct buffer_reg_channel **regp)
-{
-       int ret;
-       struct buffer_reg_channel *buf_reg_chan = NULL;
-
-       LTTNG_ASSERT(reg_sess);
-       LTTNG_ASSERT(ua_chan);
-
-       DBG2("UST app creating buffer registry channel for %s", ua_chan->name);
-
-       /* Create buffer registry channel. */
-       ret = buffer_reg_channel_create(ua_chan->tracing_channel_id, &buf_reg_chan);
-       if (ret < 0) {
-               goto error_create;
-       }
-       LTTNG_ASSERT(buf_reg_chan);
-       buf_reg_chan->consumer_key = ua_chan->key;
-       buf_reg_chan->subbuf_size = ua_chan->attr.subbuf_size;
-       buf_reg_chan->num_subbuf = ua_chan->attr.num_subbuf;
-
-       /* Create and add a channel registry to session. */
-       ret = ust_registry_channel_add(reg_sess->reg.ust,
-                       ua_chan->tracing_channel_id);
-       if (ret < 0) {
-               goto error;
-       }
-       buffer_reg_channel_add(reg_sess, buf_reg_chan);
-
-       if (regp) {
-               *regp = buf_reg_chan;
-       }
-
-       return 0;
-
-error:
-       /* Safe because the registry channel object was not added to any HT. */
-       buffer_reg_channel_destroy(buf_reg_chan, LTTNG_DOMAIN_UST);
-error_create:
-       return ret;
-}
-
-/*
- * Setup buffer registry channel for the given session registry and application
- * channel object. If regp pointer is valid, it's set with the created object.
- *
- * Return 0 on success else a negative value.
- */
-static int setup_buffer_reg_channel(struct buffer_reg_session *reg_sess,
-               struct ust_app_channel *ua_chan, struct buffer_reg_channel *buf_reg_chan,
-               struct ust_app *app)
-{
-       int ret;
-
-       LTTNG_ASSERT(reg_sess);
-       LTTNG_ASSERT(buf_reg_chan);
-       LTTNG_ASSERT(ua_chan);
-       LTTNG_ASSERT(ua_chan->obj);
-
-       DBG2("UST app setup buffer registry channel for %s", ua_chan->name);
-
-       /* Setup all streams for the registry. */
-       ret = setup_buffer_reg_streams(buf_reg_chan, ua_chan, app);
-       if (ret < 0) {
-               goto error;
-       }
-
-       buf_reg_chan->obj.ust = ua_chan->obj;
-       ua_chan->obj = NULL;
-
-       return 0;
-
-error:
-       buffer_reg_channel_remove(reg_sess, buf_reg_chan);
-       buffer_reg_channel_destroy(buf_reg_chan, LTTNG_DOMAIN_UST);
-       return ret;
-}
-
-/*
- * Send buffer registry channel to the application.
- *
- * Return 0 on success else a negative value.
- */
-static int send_channel_uid_to_ust(struct buffer_reg_channel *buf_reg_chan,
-               struct ust_app *app, struct ust_app_session *ua_sess,
-               struct ust_app_channel *ua_chan)
-{
-       int ret;
-       struct buffer_reg_stream *reg_stream;
-
-       LTTNG_ASSERT(buf_reg_chan);
-       LTTNG_ASSERT(app);
-       LTTNG_ASSERT(ua_sess);
-       LTTNG_ASSERT(ua_chan);
-
-       DBG("UST app sending buffer registry channel to ust sock %d", app->sock);
-
-       ret = duplicate_channel_object(buf_reg_chan, ua_chan);
-       if (ret < 0) {
-               goto error;
-       }
-
-       /* Send channel to the application. */
-       ret = ust_consumer_send_channel_to_ust(app, ua_sess, ua_chan);
-       if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
-               ret = -ENOTCONN;        /* Caused by app exiting. */
-               goto error;
-       } else if (ret == -EAGAIN) {
-               /* Caused by timeout. */
-               WARN("Communication with application %d timed out on send_channel for channel \"%s\" of session \"%" PRIu64 "\".",
-                               app->pid, ua_chan->name, ua_sess->tracing_id);
-               /* Treat this the same way as an application that is exiting. */
-               ret = -ENOTCONN;
-               goto error;
-       } else if (ret < 0) {
-               goto error;
-       }
-
-       health_code_update();
-
-       /* Send all streams to application. */
-       pthread_mutex_lock(&buf_reg_chan->stream_list_lock);
-       cds_list_for_each_entry(reg_stream, &buf_reg_chan->streams, lnode) {
-               struct ust_app_stream stream;
-
-               ret = duplicate_stream_object(reg_stream, &stream);
-               if (ret < 0) {
-                       goto error_stream_unlock;
-               }
-
-               ret = ust_consumer_send_stream_to_ust(app, ua_chan, &stream);
-               if (ret < 0) {
-                       if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
-                               ret = -ENOTCONN; /* Caused by app exiting. */
-                       } else if (ret == -EAGAIN) {
-                               /*
-                                * Caused by timeout.
-                                * Treat this the same way as an application
-                                * that is exiting.
-                                */
-                               WARN("Communication with application %d timed out on send_stream for stream \"%s\" of channel \"%s\" of session \"%" PRIu64 "\".",
-                                               app->pid, stream.name,
-                                               ua_chan->name,
-                                               ua_sess->tracing_id);
-                               ret = -ENOTCONN;
-                       }
-                       (void) release_ust_app_stream(-1, &stream, app);
-                       goto error_stream_unlock;
-               }
-
-               /*
-                * The return value is not important here. This function will output an
-                * error if needed.
-                */
-               (void) release_ust_app_stream(-1, &stream, app);
-       }
-       ua_chan->is_sent = 1;
-
-error_stream_unlock:
-       pthread_mutex_unlock(&buf_reg_chan->stream_list_lock);
-error:
-       return ret;
-}
-
-/*
- * Create and send to the application the created buffers with per UID buffers.
- *
- * This MUST be called with a RCU read side lock acquired.
- * The session list lock and the session's lock must be acquired.
- *
- * Return 0 on success else a negative value.
- */
-static int create_channel_per_uid(struct ust_app *app,
-               struct ltt_ust_session *usess, struct ust_app_session *ua_sess,
-               struct ust_app_channel *ua_chan)
-{
-       int ret;
-       struct buffer_reg_uid *reg_uid;
-       struct buffer_reg_channel *buf_reg_chan;
-       struct ltt_session *session = NULL;
-       enum lttng_error_code notification_ret;
-       struct ust_registry_channel *ust_reg_chan;
-
-       LTTNG_ASSERT(app);
-       LTTNG_ASSERT(usess);
-       LTTNG_ASSERT(ua_sess);
-       LTTNG_ASSERT(ua_chan);
-
-       DBG("UST app creating channel %s with per UID buffers", ua_chan->name);
-
-       reg_uid = buffer_reg_uid_find(usess->id, app->bits_per_long, app->uid);
-       /*
-        * The session creation handles the creation of this global registry
-        * object. If none can be find, there is a code flow problem or a
-        * teardown race.
-        */
-       LTTNG_ASSERT(reg_uid);
-
-       buf_reg_chan = buffer_reg_channel_find(ua_chan->tracing_channel_id,
-                       reg_uid);
-       if (buf_reg_chan) {
-               goto send_channel;
-       }
-
-       /* Create the buffer registry channel object. */
-       ret = create_buffer_reg_channel(reg_uid->registry, ua_chan, &buf_reg_chan);
-       if (ret < 0) {
-               ERR("Error creating the UST channel \"%s\" registry instance",
-                               ua_chan->name);
-               goto error;
-       }
-
-       session = session_find_by_id(ua_sess->tracing_id);
-       LTTNG_ASSERT(session);
-       LTTNG_ASSERT(pthread_mutex_trylock(&session->lock));
-       LTTNG_ASSERT(session_trylock_list());
-
-       /*
-        * Create the buffers on the consumer side. This call populates the
-        * ust app channel object with all streams and data object.
-        */
-       ret = do_consumer_create_channel(usess, ua_sess, ua_chan,
-                       app->bits_per_long, reg_uid->registry->reg.ust,
-                       session->most_recent_chunk_id.value);
-       if (ret < 0) {
-               ERR("Error creating UST channel \"%s\" on the consumer daemon",
-                               ua_chan->name);
-
-               /*
-                * Let's remove the previously created buffer registry channel so
-                * it's not visible anymore in the session registry.
-                */
-               ust_registry_channel_del_free(reg_uid->registry->reg.ust,
-                               ua_chan->tracing_channel_id, false);
-               buffer_reg_channel_remove(reg_uid->registry, buf_reg_chan);
-               buffer_reg_channel_destroy(buf_reg_chan, LTTNG_DOMAIN_UST);
-               goto error;
-       }
-
-       /*
-        * Setup the streams and add it to the session registry.
-        */
-       ret = setup_buffer_reg_channel(reg_uid->registry,
-                       ua_chan, buf_reg_chan, app);
-       if (ret < 0) {
-               ERR("Error setting up UST channel \"%s\"", ua_chan->name);
-               goto error;
-       }
-
-       /* Notify the notification subsystem of the channel's creation. */
-       pthread_mutex_lock(&reg_uid->registry->reg.ust->lock);
-       ust_reg_chan = ust_registry_channel_find(reg_uid->registry->reg.ust,
-                       ua_chan->tracing_channel_id);
-       LTTNG_ASSERT(ust_reg_chan);
-       ust_reg_chan->consumer_key = ua_chan->key;
-       ust_reg_chan = NULL;
-       pthread_mutex_unlock(&reg_uid->registry->reg.ust->lock);
-
-       notification_ret = notification_thread_command_add_channel(
-                       the_notification_thread_handle, session->name,
-                       lttng_credentials_get_uid(
-                                       &ua_sess->effective_credentials),
-                       lttng_credentials_get_gid(
-                                       &ua_sess->effective_credentials),
-                       ua_chan->name, ua_chan->key, LTTNG_DOMAIN_UST,
-                       ua_chan->attr.subbuf_size * ua_chan->attr.num_subbuf);
-       if (notification_ret != LTTNG_OK) {
-               ret = - (int) notification_ret;
-               ERR("Failed to add channel to notification thread");
-               goto error;
-       }
-
-send_channel:
-       /* Send buffers to the application. */
-       ret = send_channel_uid_to_ust(buf_reg_chan, app, ua_sess, ua_chan);
-       if (ret < 0) {
-               if (ret != -ENOTCONN) {
-                       ERR("Error sending channel to application");
-               }
-               goto error;
-       }
-
-error:
-       if (session) {
-               session_put(session);
-       }
-       return ret;
-}
-
-/*
- * Create and send to the application the created buffers with per PID buffers.
- *
- * Called with UST app session lock held.
- * The session list lock and the session's lock must be acquired.
- *
- * Return 0 on success else a negative value.
- */
-static int create_channel_per_pid(struct ust_app *app,
-               struct ltt_ust_session *usess, struct ust_app_session *ua_sess,
-               struct ust_app_channel *ua_chan)
-{
-       int ret;
-       struct ust_registry_session *registry;
-       enum lttng_error_code cmd_ret;
-       struct ltt_session *session = NULL;
-       uint64_t chan_reg_key;
-       struct ust_registry_channel *ust_reg_chan;
-
-       LTTNG_ASSERT(app);
-       LTTNG_ASSERT(usess);
-       LTTNG_ASSERT(ua_sess);
-       LTTNG_ASSERT(ua_chan);
-
-       DBG("UST app creating channel %s with per PID buffers", ua_chan->name);
-
-       rcu_read_lock();
-
-       registry = get_session_registry(ua_sess);
-       /* The UST app session lock is held, registry shall not be null. */
-       LTTNG_ASSERT(registry);
-
-       /* Create and add a new channel registry to session. */
-       ret = ust_registry_channel_add(registry, ua_chan->key);
-       if (ret < 0) {
-               ERR("Error creating the UST channel \"%s\" registry instance",
-                       ua_chan->name);
-               goto error;
-       }
-
-       session = session_find_by_id(ua_sess->tracing_id);
-       LTTNG_ASSERT(session);
-
-       LTTNG_ASSERT(pthread_mutex_trylock(&session->lock));
-       LTTNG_ASSERT(session_trylock_list());
-
-       /* Create and get channel on the consumer side. */
-       ret = do_consumer_create_channel(usess, ua_sess, ua_chan,
-                       app->bits_per_long, registry,
-                       session->most_recent_chunk_id.value);
-       if (ret < 0) {
-               ERR("Error creating UST channel \"%s\" on the consumer daemon",
-                       ua_chan->name);
-               goto error_remove_from_registry;
-       }
-
-       ret = send_channel_pid_to_ust(app, ua_sess, ua_chan);
-       if (ret < 0) {
-               if (ret != -ENOTCONN) {
-                       ERR("Error sending channel to application");
-               }
-               goto error_remove_from_registry;
-       }
-
-       chan_reg_key = ua_chan->key;
-       pthread_mutex_lock(&registry->lock);
-       ust_reg_chan = ust_registry_channel_find(registry, chan_reg_key);
-       LTTNG_ASSERT(ust_reg_chan);
-       ust_reg_chan->consumer_key = ua_chan->key;
-       pthread_mutex_unlock(&registry->lock);
-
-       cmd_ret = notification_thread_command_add_channel(
-                       the_notification_thread_handle, session->name,
-                       lttng_credentials_get_uid(
-                                       &ua_sess->effective_credentials),
-                       lttng_credentials_get_gid(
-                                       &ua_sess->effective_credentials),
-                       ua_chan->name, ua_chan->key, LTTNG_DOMAIN_UST,
-                       ua_chan->attr.subbuf_size * ua_chan->attr.num_subbuf);
-       if (cmd_ret != LTTNG_OK) {
-               ret = - (int) cmd_ret;
-               ERR("Failed to add channel to notification thread");
-               goto error_remove_from_registry;
-       }
-
-error_remove_from_registry:
-       if (ret) {
-               ust_registry_channel_del_free(registry, ua_chan->key, false);
-       }
-error:
-       rcu_read_unlock();
-       if (session) {
-               session_put(session);
-       }
-       return ret;
-}
-
-/*
- * From an already allocated ust app channel, create the channel buffers if
- * needed and send them to the application. This MUST be called with a RCU read
- * side lock acquired.
- *
- * Called with UST app session lock held.
- *
- * Return 0 on success or else a negative value. Returns -ENOTCONN if
- * the application exited concurrently.
- */
-static int ust_app_channel_send(struct ust_app *app,
-               struct ltt_ust_session *usess, struct ust_app_session *ua_sess,
-               struct ust_app_channel *ua_chan)
-{
-       int ret;
-
-       LTTNG_ASSERT(app);
-       LTTNG_ASSERT(usess);
-       LTTNG_ASSERT(usess->active);
-       LTTNG_ASSERT(ua_sess);
-       LTTNG_ASSERT(ua_chan);
-
-       /* Handle buffer type before sending the channel to the application. */
-       switch (usess->buffer_type) {
-       case LTTNG_BUFFER_PER_UID:
-       {
-               ret = create_channel_per_uid(app, usess, ua_sess, ua_chan);
-               if (ret < 0) {
-                       goto error;
-               }
-               break;
-       }
-       case LTTNG_BUFFER_PER_PID:
-       {
-               ret = create_channel_per_pid(app, usess, ua_sess, ua_chan);
-               if (ret < 0) {
-                       goto error;
-               }
-               break;
-       }
-       default:
-               abort();
-               ret = -EINVAL;
-               goto error;
-       }
-
-       /* Initialize ust objd object using the received handle and add it. */
-       lttng_ht_node_init_ulong(&ua_chan->ust_objd_node, ua_chan->handle);
-       lttng_ht_add_unique_ulong(app->ust_objd, &ua_chan->ust_objd_node);
-
-       /* If channel is not enabled, disable it on the tracer */
-       if (!ua_chan->enabled) {
-               ret = disable_ust_channel(app, ua_sess, ua_chan);
-               if (ret < 0) {
-                       goto error;
-               }
-       }
-
-error:
-       return ret;
-}
-
-/*
- * Create UST app channel and return it through ua_chanp if not NULL.
- *
- * Called with UST app session lock and RCU read-side lock held.
- *
- * Return 0 on success or else a negative value.
- */
-static int ust_app_channel_allocate(struct ust_app_session *ua_sess,
-               struct ltt_ust_channel *uchan,
-               enum lttng_ust_abi_chan_type type, struct ltt_ust_session *usess,
-               struct ust_app_channel **ua_chanp)
-{
-       int ret = 0;
-       struct lttng_ht_iter iter;
-       struct lttng_ht_node_str *ua_chan_node;
-       struct ust_app_channel *ua_chan;
-
-       /* Lookup channel in the ust app session */
-       lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &iter);
-       ua_chan_node = lttng_ht_iter_get_node_str(&iter);
-       if (ua_chan_node != NULL) {
-               ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
-               goto end;
-       }
-
-       ua_chan = alloc_ust_app_channel(uchan->name, ua_sess, &uchan->attr);
-       if (ua_chan == NULL) {
-               /* Only malloc can fail here */
-               ret = -ENOMEM;
-               goto error;
-       }
-       shadow_copy_channel(ua_chan, uchan);
-
-       /* Set channel type. */
-       ua_chan->attr.type = type;
-
-       /* Only add the channel if successful on the tracer side. */
-       lttng_ht_add_unique_str(ua_sess->channels, &ua_chan->node);
-end:
-       if (ua_chanp) {
-               *ua_chanp = ua_chan;
-       }
-
-       /* Everything went well. */
-       return 0;
-
-error:
-       return ret;
-}
-
-/*
- * Create UST app event and create it on the tracer side.
- *
- * Must be called with the RCU read side lock held.
- * Called with ust app session mutex held.
- */
-static
-int create_ust_app_event(struct ust_app_session *ua_sess,
-               struct ust_app_channel *ua_chan, struct ltt_ust_event *uevent,
-               struct ust_app *app)
-{
-       int ret = 0;
-       struct ust_app_event *ua_event;
-
-       ua_event = alloc_ust_app_event(uevent->attr.name, &uevent->attr);
-       if (ua_event == NULL) {
-               /* Only failure mode of alloc_ust_app_event(). */
-               ret = -ENOMEM;
-               goto end;
-       }
-       shadow_copy_event(ua_event, uevent);
-
-       /* Create it on the tracer side */
-       ret = create_ust_event(app, ua_sess, ua_chan, ua_event);
-       if (ret < 0) {
-               /*
-                * Not found previously means that it does not exist on the
-                * tracer. If the application reports that the event existed,
-                * it means there is a bug in the sessiond or lttng-ust
-                * (or corruption, etc.)
-                */
-               if (ret == -LTTNG_UST_ERR_EXIST) {
-                       ERR("Tracer for application reported that an event being created already existed: "
-                                       "event_name = \"%s\", pid = %d, ppid = %d, uid = %d, gid = %d",
-                                       uevent->attr.name,
-                                       app->pid, app->ppid, app->uid,
-                                       app->gid);
-               }
-               goto error;
-       }
-
-       add_unique_ust_app_event(ua_chan, ua_event);
-
-       DBG2("UST app create event completed: app = '%s' pid = %d",
-                       app->name, app->pid);
-
-end:
-       return ret;
-
-error:
-       /* Valid. Calling here is already in a read side lock */
-       delete_ust_app_event(-1, ua_event, app);
-       return ret;
-}
-
-/*
- * Create UST app event notifier rule and create it on the tracer side.
- *
- * Must be called with the RCU read side lock held.
- * Called with ust app session mutex held.
- */
-static
-int create_ust_app_event_notifier_rule(struct lttng_trigger *trigger,
-               struct ust_app *app)
-{
-       int ret = 0;
-       struct ust_app_event_notifier_rule *ua_event_notifier_rule;
-
-       ua_event_notifier_rule = alloc_ust_app_event_notifier_rule(trigger);
-       if (ua_event_notifier_rule == NULL) {
-               ret = -ENOMEM;
-               goto end;
-       }
-
-       /* Create it on the tracer side. */
-       ret = create_ust_event_notifier(app, ua_event_notifier_rule);
-       if (ret < 0) {
-               /*
-                * Not found previously means that it does not exist on the
-                * tracer. If the application reports that the event existed,
-                * it means there is a bug in the sessiond or lttng-ust
-                * (or corruption, etc.)
-                */
-               if (ret == -LTTNG_UST_ERR_EXIST) {
-                       ERR("Tracer for application reported that an event notifier being created already exists: "
-                                       "token = \"%" PRIu64 "\", pid = %d, ppid = %d, uid = %d, gid = %d",
-                                       lttng_trigger_get_tracer_token(trigger),
-                                       app->pid, app->ppid, app->uid,
-                                       app->gid);
-               }
-               goto error;
-       }
-
-       lttng_ht_add_unique_u64(app->token_to_event_notifier_rule_ht,
-                       &ua_event_notifier_rule->node);
-
-       DBG2("UST app create token event rule completed: app = '%s', pid = %d), token = %" PRIu64,
-                       app->name, app->pid, lttng_trigger_get_tracer_token(trigger));
-
-       goto end;
-
-error:
-       /* The RCU read side lock is already being held by the caller. */
-       delete_ust_app_event_notifier_rule(-1, ua_event_notifier_rule, app);
-end:
-       return ret;
-}
-
-/*
- * Create UST metadata and open it on the tracer side.
- *
- * Called with UST app session lock held and RCU read side lock.
- */
-static int create_ust_app_metadata(struct ust_app_session *ua_sess,
-               struct ust_app *app, struct consumer_output *consumer)
-{
-       int ret = 0;
-       struct ust_app_channel *metadata;
-       struct consumer_socket *socket;
-       struct ust_registry_session *registry;
-       struct ltt_session *session = NULL;
-
-       LTTNG_ASSERT(ua_sess);
-       LTTNG_ASSERT(app);
-       LTTNG_ASSERT(consumer);
-
-       registry = get_session_registry(ua_sess);
-       /* The UST app session is held registry shall not be null. */
-       LTTNG_ASSERT(registry);
-
-       pthread_mutex_lock(&registry->lock);
-
-       /* Metadata already exists for this registry or it was closed previously */
-       if (registry->metadata_key || registry->metadata_closed) {
-               ret = 0;
-               goto error;
-       }
-
-       /* Allocate UST metadata */
-       metadata = alloc_ust_app_channel(DEFAULT_METADATA_NAME, ua_sess, NULL);
-       if (!metadata) {
-               /* malloc() failed */
-               ret = -ENOMEM;
-               goto error;
-       }
-
-       memcpy(&metadata->attr, &ua_sess->metadata_attr, sizeof(metadata->attr));
-
-       /* Need one fd for the channel. */
-       ret = lttng_fd_get(LTTNG_FD_APPS, 1);
-       if (ret < 0) {
-               ERR("Exhausted number of available FD upon create metadata");
-               goto error;
-       }
-
-       /* Get the right consumer socket for the application. */
-       socket = consumer_find_socket_by_bitness(app->bits_per_long, consumer);
-       if (!socket) {
-               ret = -EINVAL;
-               goto error_consumer;
-       }
-
-       /*
-        * Keep metadata key so we can identify it on the consumer side. Assign it
-        * to the registry *before* we ask the consumer so we avoid the race of the
-        * consumer requesting the metadata and the ask_channel call on our side
-        * did not returned yet.
-        */
-       registry->metadata_key = metadata->key;
-
-       session = session_find_by_id(ua_sess->tracing_id);
-       LTTNG_ASSERT(session);
-
-       LTTNG_ASSERT(pthread_mutex_trylock(&session->lock));
-       LTTNG_ASSERT(session_trylock_list());
-
-       /*
-        * Ask the metadata channel creation to the consumer. The metadata object
-        * will be created by the consumer and kept their. However, the stream is
-        * never added or monitored until we do a first push metadata to the
-        * consumer.
-        */
-       ret = ust_consumer_ask_channel(ua_sess, metadata, consumer, socket,
-                       registry, session->current_trace_chunk);
-       if (ret < 0) {
-               /* Nullify the metadata key so we don't try to close it later on. */
-               registry->metadata_key = 0;
-               goto error_consumer;
-       }
-
-       /*
-        * The setup command will make the metadata stream be sent to the relayd,
-        * if applicable, and the thread managing the metadatas. This is important
-        * because after this point, if an error occurs, the only way the stream
-        * can be deleted is to be monitored in the consumer.
-        */
-       ret = consumer_setup_metadata(socket, metadata->key);
-       if (ret < 0) {
-               /* Nullify the metadata key so we don't try to close it later on. */
-               registry->metadata_key = 0;
-               goto error_consumer;
-       }
-
-       DBG2("UST metadata with key %" PRIu64 " created for app pid %d",
-                       metadata->key, app->pid);
-
-error_consumer:
-       lttng_fd_put(LTTNG_FD_APPS, 1);
-       delete_ust_app_channel(-1, metadata, app);
-error:
-       pthread_mutex_unlock(&registry->lock);
-       if (session) {
-               session_put(session);
-       }
-       return ret;
-}
-
-/*
- * Return ust app pointer or NULL if not found. RCU read side lock MUST be
- * acquired before calling this function.
- */
-struct ust_app *ust_app_find_by_pid(pid_t pid)
-{
-       struct ust_app *app = NULL;
-       struct lttng_ht_node_ulong *node;
-       struct lttng_ht_iter iter;
-
-       lttng_ht_lookup(ust_app_ht, (void *)((unsigned long) pid), &iter);
-       node = lttng_ht_iter_get_node_ulong(&iter);
-       if (node == NULL) {
-               DBG2("UST app no found with pid %d", pid);
-               goto error;
-       }
-
-       DBG2("Found UST app by pid %d", pid);
-
-       app = caa_container_of(node, struct ust_app, pid_n);
-
-error:
-       return app;
-}
-
-/*
- * Allocate and init an UST app object using the registration information and
- * the command socket. This is called when the command socket connects to the
- * session daemon.
- *
- * The object is returned on success or else NULL.
- */
-struct ust_app *ust_app_create(struct ust_register_msg *msg, int sock)
-{
-       int ret;
-       struct ust_app *lta = NULL;
-       struct lttng_pipe *event_notifier_event_source_pipe = NULL;
-
-       LTTNG_ASSERT(msg);
-       LTTNG_ASSERT(sock >= 0);
-
-       DBG3("UST app creating application for socket %d", sock);
-
-       if ((msg->bits_per_long == 64 &&
-                           (uatomic_read(&the_ust_consumerd64_fd) ==
-                                           -EINVAL)) ||
-                       (msg->bits_per_long == 32 &&
-                                       (uatomic_read(&the_ust_consumerd32_fd) ==
-                                                       -EINVAL))) {
-               ERR("Registration failed: application \"%s\" (pid: %d) has "
-                               "%d-bit long, but no consumerd for this size is available.\n",
-                               msg->name, msg->pid, msg->bits_per_long);
-               goto error;
-       }
-
-       /*
-        * Reserve the two file descriptors of the event source pipe. The write
-        * end will be closed once it is passed to the application, at which
-        * point a single 'put' will be performed.
-        */
-       ret = lttng_fd_get(LTTNG_FD_APPS, 2);
-       if (ret) {
-               ERR("Failed to reserve two file descriptors for the event source pipe while creating a new application instance: app = '%s', pid = %d",
-                               msg->name, (int) msg->pid);
-               goto error;
-       }
-
-       event_notifier_event_source_pipe = lttng_pipe_open(FD_CLOEXEC);
-       if (!event_notifier_event_source_pipe) {
-               PERROR("Failed to open application event source pipe: '%s' (pid = %d)",
-                               msg->name, msg->pid);
-               goto error;
-       }
-
-       lta = zmalloc(sizeof(struct ust_app));
-       if (lta == NULL) {
-               PERROR("malloc");
-               goto error_free_pipe;
-       }
-
-       lta->event_notifier_group.event_pipe = event_notifier_event_source_pipe;
-
-       lta->ppid = msg->ppid;
-       lta->uid = msg->uid;
-       lta->gid = msg->gid;
-
-       lta->bits_per_long = msg->bits_per_long;
-       lta->uint8_t_alignment = msg->uint8_t_alignment;
-       lta->uint16_t_alignment = msg->uint16_t_alignment;
-       lta->uint32_t_alignment = msg->uint32_t_alignment;
-       lta->uint64_t_alignment = msg->uint64_t_alignment;
-       lta->long_alignment = msg->long_alignment;
-       lta->byte_order = msg->byte_order;
-
-       lta->v_major = msg->major;
-       lta->v_minor = msg->minor;
-       lta->sessions = lttng_ht_new(0, LTTNG_HT_TYPE_U64);
-       lta->ust_objd = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
-       lta->ust_sessions_objd = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
-       lta->notify_sock = -1;
-       lta->token_to_event_notifier_rule_ht = lttng_ht_new(0, LTTNG_HT_TYPE_U64);
-
-       /* Copy name and make sure it's NULL terminated. */
-       strncpy(lta->name, msg->name, sizeof(lta->name));
-       lta->name[UST_APP_PROCNAME_LEN] = '\0';
-
-       /*
-        * Before this can be called, when receiving the registration information,
-        * the application compatibility is checked. So, at this point, the
-        * application can work with this session daemon.
-        */
-       lta->compatible = 1;
-
-       lta->pid = msg->pid;
-       lttng_ht_node_init_ulong(&lta->pid_n, (unsigned long) lta->pid);
-       lta->sock = sock;
-       pthread_mutex_init(&lta->sock_lock, NULL);
-       lttng_ht_node_init_ulong(&lta->sock_n, (unsigned long) lta->sock);
-
-       CDS_INIT_LIST_HEAD(&lta->teardown_head);
-       return lta;
-
-error_free_pipe:
-       lttng_pipe_destroy(event_notifier_event_source_pipe);
-       lttng_fd_put(LTTNG_FD_APPS, 2);
-error:
-       return NULL;
-}
-
-/*
- * For a given application object, add it to every hash table.
- */
-void ust_app_add(struct ust_app *app)
-{
-       LTTNG_ASSERT(app);
-       LTTNG_ASSERT(app->notify_sock >= 0);
-
-       app->registration_time = time(NULL);
-
-       rcu_read_lock();
-
-       /*
-        * On a re-registration, we want to kick out the previous registration of
-        * that pid
-        */
-       lttng_ht_add_replace_ulong(ust_app_ht, &app->pid_n);
-
-       /*
-        * The socket _should_ be unique until _we_ call close. So, a add_unique
-        * for the ust_app_ht_by_sock is used which asserts fail if the entry was
-        * already in the table.
-        */
-       lttng_ht_add_unique_ulong(ust_app_ht_by_sock, &app->sock_n);
-
-       /* Add application to the notify socket hash table. */
-       lttng_ht_node_init_ulong(&app->notify_sock_n, app->notify_sock);
-       lttng_ht_add_unique_ulong(ust_app_ht_by_notify_sock, &app->notify_sock_n);
-
-       DBG("App registered with pid:%d ppid:%d uid:%d gid:%d sock =%d name:%s "
-                       "notify_sock =%d (version %d.%d)", app->pid, app->ppid, app->uid,
-                       app->gid, app->sock, app->name, app->notify_sock, app->v_major,
-                       app->v_minor);
-
-       rcu_read_unlock();
-}
-
-/*
- * Set the application version into the object.
- *
- * Return 0 on success else a negative value either an errno code or a
- * LTTng-UST error code.
- */
-int ust_app_version(struct ust_app *app)
-{
-       int ret;
-
-       LTTNG_ASSERT(app);
-
-       pthread_mutex_lock(&app->sock_lock);
-       ret = lttng_ust_ctl_tracer_version(app->sock, &app->version);
-       pthread_mutex_unlock(&app->sock_lock);
-       if (ret < 0) {
-               if (ret == -LTTNG_UST_ERR_EXITING || ret == -EPIPE) {
-                       DBG3("UST app version failed. Application is dead: pid = %d, sock = %d",
-                                       app->pid, app->sock);
-               } else if (ret == -EAGAIN) {
-                       WARN("UST app version failed. Communication time out: pid = %d, sock = %d",
-                                       app->pid, app->sock);
-               } else {
-                       ERR("UST app version failed with ret %d: pid = %d, sock = %d",
-                                       ret, app->pid, app->sock);
-               }
-       }
-
-       return ret;
-}
-
-bool ust_app_supports_notifiers(const struct ust_app *app)
-{
-       return app->v_major >= 9;
-}
-
-bool ust_app_supports_counters(const struct ust_app *app)
-{
-       return app->v_major >= 9;
-}
-
-/*
- * Setup the base event notifier group.
- *
- * Return 0 on success else a negative value either an errno code or a
- * LTTng-UST error code.
- */
-int ust_app_setup_event_notifier_group(struct ust_app *app)
-{
-       int ret;
-       int event_pipe_write_fd;
-       struct lttng_ust_abi_object_data *event_notifier_group = NULL;
-       enum lttng_error_code lttng_ret;
-       enum event_notifier_error_accounting_status event_notifier_error_accounting_status;
-
-       LTTNG_ASSERT(app);
-
-       if (!ust_app_supports_notifiers(app)) {
-               ret = -ENOSYS;
-               goto error;
-       }
-
-       /* Get the write side of the pipe. */
-       event_pipe_write_fd = lttng_pipe_get_writefd(
-                       app->event_notifier_group.event_pipe);
-
-       pthread_mutex_lock(&app->sock_lock);
-       ret = lttng_ust_ctl_create_event_notifier_group(app->sock,
-                       event_pipe_write_fd, &event_notifier_group);
-       pthread_mutex_unlock(&app->sock_lock);
-       if (ret < 0) {
-               if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
-                       ret = 0;
-                       DBG3("UST app create event notifier group failed. Application is dead: pid = %d, sock = %d",
-                                       app->pid, app->sock);
-               } else if (ret == -EAGAIN) {
-                       ret = 0;
-                       WARN("UST app create event notifier group failed. Communication time out: pid = %d, sock = %d",
-                                       app->pid, app->sock);
-               } else {
-                       ERR("UST app create event notifier group failed with ret %d: pid = %d, sock = %d, event_pipe_write_fd: %d",
-                                       ret, app->pid, app->sock, event_pipe_write_fd);
-               }
-               goto error;
-       }
-
-       ret = lttng_pipe_write_close(app->event_notifier_group.event_pipe);
-       if (ret) {
-               ERR("Failed to close write end of the application's event source pipe: app = '%s' (pid = %d)",
-                               app->name, app->pid);
-               goto error;
-       }
-
-       /*
-        * Release the file descriptor that was reserved for the write-end of
-        * the pipe.
-        */
-       lttng_fd_put(LTTNG_FD_APPS, 1);
-
-       lttng_ret = notification_thread_command_add_tracer_event_source(
-                       the_notification_thread_handle,
-                       lttng_pipe_get_readfd(
-                                       app->event_notifier_group.event_pipe),
-                       LTTNG_DOMAIN_UST);
-       if (lttng_ret != LTTNG_OK) {
-               ERR("Failed to add tracer event source to notification thread");
-               ret = - 1;
-               goto error;
-       }
-
-       /* Assign handle only when the complete setup is valid. */
-       app->event_notifier_group.object = event_notifier_group;
-
-       event_notifier_error_accounting_status =
-                       event_notifier_error_accounting_register_app(app);
-       switch (event_notifier_error_accounting_status) {
-       case EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_OK:
-               break;
-       case EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_UNSUPPORTED:
-               DBG3("Failed to setup event notifier error accounting (application does not support notifier error accounting): app socket fd = %d, app name = '%s', app pid = %d",
-                               app->sock, app->name, (int) app->pid);
-               ret = 0;
-               goto error_accounting;
-       case EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_APP_DEAD:
-               DBG3("Failed to setup event notifier error accounting (application is dead): app socket fd = %d, app name = '%s', app pid = %d",
-                               app->sock, app->name, (int) app->pid);
-               ret = 0;
-               goto error_accounting;
-       default:
-               ERR("Failed to setup event notifier error accounting for app");
-               ret = -1;
-               goto error_accounting;
-       }
-
-       return ret;
-
-error_accounting:
-       lttng_ret = notification_thread_command_remove_tracer_event_source(
-                       the_notification_thread_handle,
-                       lttng_pipe_get_readfd(
-                                       app->event_notifier_group.event_pipe));
-       if (lttng_ret != LTTNG_OK) {
-               ERR("Failed to remove application tracer event source from notification thread");
-       }
-
-error:
-       lttng_ust_ctl_release_object(app->sock, app->event_notifier_group.object);
-       free(app->event_notifier_group.object);
-       app->event_notifier_group.object = NULL;
-       return ret;
-}
-
-/*
- * Unregister app by removing it from the global traceable app list and freeing
- * the data struct.
- *
- * The socket is already closed at this point so no close to sock.
- */
-void ust_app_unregister(int sock)
-{
-       struct ust_app *lta;
-       struct lttng_ht_node_ulong *node;
-       struct lttng_ht_iter ust_app_sock_iter;
-       struct lttng_ht_iter iter;
-       struct ust_app_session *ua_sess;
-       int ret;
-
-       rcu_read_lock();
-
-       /* Get the node reference for a call_rcu */
-       lttng_ht_lookup(ust_app_ht_by_sock, (void *)((unsigned long) sock), &ust_app_sock_iter);
-       node = lttng_ht_iter_get_node_ulong(&ust_app_sock_iter);
-       LTTNG_ASSERT(node);
-
-       lta = caa_container_of(node, struct ust_app, sock_n);
-       DBG("PID %d unregistering with sock %d", lta->pid, sock);
-
-       /*
-        * For per-PID buffers, perform "push metadata" and flush all
-        * application streams before removing app from hash tables,
-        * ensuring proper behavior of data_pending check.
-        * Remove sessions so they are not visible during deletion.
-        */
-       cds_lfht_for_each_entry(lta->sessions->ht, &iter.iter, ua_sess,
-                       node.node) {
-               struct ust_registry_session *registry;
-
-               ret = lttng_ht_del(lta->sessions, &iter);
-               if (ret) {
-                       /* The session was already removed so scheduled for teardown. */
-                       continue;
-               }
-
-               if (ua_sess->buffer_type == LTTNG_BUFFER_PER_PID) {
-                       (void) ust_app_flush_app_session(lta, ua_sess);
-               }
-
-               /*
-                * Add session to list for teardown. This is safe since at this point we
-                * are the only one using this list.
-                */
-               pthread_mutex_lock(&ua_sess->lock);
-
-               if (ua_sess->deleted) {
-                       pthread_mutex_unlock(&ua_sess->lock);
-                       continue;
-               }
-
-               /*
-                * Normally, this is done in the delete session process which is
-                * executed in the call rcu below. However, upon registration we can't
-                * afford to wait for the grace period before pushing data or else the
-                * data pending feature can race between the unregistration and stop
-                * command where the data pending command is sent *before* the grace
-                * period ended.
-                *
-                * The close metadata below nullifies the metadata pointer in the
-                * session so the delete session will NOT push/close a second time.
-                */
-               registry = get_session_registry(ua_sess);
-               if (registry) {
-                       /* Push metadata for application before freeing the application. */
-                       (void) push_metadata(registry, ua_sess->consumer);
-
-                       /*
-                        * Don't ask to close metadata for global per UID buffers. Close
-                        * metadata only on destroy trace session in this case. Also, the
-                        * previous push metadata could have flag the metadata registry to
-                        * close so don't send a close command if closed.
-                        */
-                       if (ua_sess->buffer_type != LTTNG_BUFFER_PER_UID) {
-                               /* And ask to close it for this session registry. */
-                               (void) close_metadata(registry, ua_sess->consumer);
-                       }
-               }
-               cds_list_add(&ua_sess->teardown_node, &lta->teardown_head);
-
-               pthread_mutex_unlock(&ua_sess->lock);
-       }
-
-       /* Remove application from PID hash table */
-       ret = lttng_ht_del(ust_app_ht_by_sock, &ust_app_sock_iter);
-       LTTNG_ASSERT(!ret);
-
-       /*
-        * Remove application from notify hash table. The thread handling the
-        * notify socket could have deleted the node so ignore on error because
-        * either way it's valid. The close of that socket is handled by the
-        * apps_notify_thread.
-        */
-       iter.iter.node = &lta->notify_sock_n.node;
-       (void) lttng_ht_del(ust_app_ht_by_notify_sock, &iter);
-
-       /*
-        * Ignore return value since the node might have been removed before by an
-        * add replace during app registration because the PID can be reassigned by
-        * the OS.
-        */
-       iter.iter.node = &lta->pid_n.node;
-       ret = lttng_ht_del(ust_app_ht, &iter);
-       if (ret) {
-               DBG3("Unregister app by PID %d failed. This can happen on pid reuse",
-                               lta->pid);
-       }
-
-       /* Free memory */
-       call_rcu(&lta->pid_n.head, delete_ust_app_rcu);
-
-       rcu_read_unlock();
-       return;
-}
-
-/*
- * Fill events array with all events name of all registered apps.
- */
-int ust_app_list_events(struct lttng_event **events)
-{
-       int ret, handle;
-       size_t nbmem, count = 0;
-       struct lttng_ht_iter iter;
-       struct ust_app *app;
-       struct lttng_event *tmp_event;
-
-       nbmem = UST_APP_EVENT_LIST_SIZE;
-       tmp_event = zmalloc(nbmem * sizeof(struct lttng_event));
-       if (tmp_event == NULL) {
-               PERROR("zmalloc ust app events");
-               ret = -ENOMEM;
-               goto error;
-       }
-
-       rcu_read_lock();
-
-       cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
-               struct lttng_ust_abi_tracepoint_iter uiter;
-
-               health_code_update();
-
-               if (!app->compatible) {
-                       /*
-                        * TODO: In time, we should notice the caller of this error by
-                        * telling him that this is a version error.
-                        */
-                       continue;
-               }
-               pthread_mutex_lock(&app->sock_lock);
-               handle = lttng_ust_ctl_tracepoint_list(app->sock);
-               if (handle < 0) {
-                       if (handle != -EPIPE && handle != -LTTNG_UST_ERR_EXITING) {
-                               ERR("UST app list events getting handle failed for app pid %d",
-                                               app->pid);
-                       }
-                       pthread_mutex_unlock(&app->sock_lock);
-                       continue;
-               }
-
-               while ((ret = lttng_ust_ctl_tracepoint_list_get(app->sock, handle,
-                                       &uiter)) != -LTTNG_UST_ERR_NOENT) {
-                       /* Handle ustctl error. */
-                       if (ret < 0) {
-                               int release_ret;
-
-                               if (ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
-                                       ERR("UST app tp list get failed for app %d with ret %d",
-                                                       app->sock, ret);
-                               } else {
-                                       DBG3("UST app tp list get failed. Application is dead");
-                                       break;
-                               }
-                               free(tmp_event);
-                               release_ret = lttng_ust_ctl_release_handle(app->sock, handle);
-                               if (release_ret < 0 &&
-                                               release_ret != -LTTNG_UST_ERR_EXITING &&
-                                               release_ret != -EPIPE) {
-                                       ERR("Error releasing app handle for app %d with ret %d", app->sock, release_ret);
-                               }
-                               pthread_mutex_unlock(&app->sock_lock);
-                               goto rcu_error;
-                       }
-
-                       health_code_update();
-                       if (count >= nbmem) {
-                               /* In case the realloc fails, we free the memory */
-                               struct lttng_event *new_tmp_event;
-                               size_t new_nbmem;
-
-                               new_nbmem = nbmem << 1;
-                               DBG2("Reallocating event list from %zu to %zu entries",
-                                               nbmem, new_nbmem);
-                               new_tmp_event = realloc(tmp_event,
-                                       new_nbmem * sizeof(struct lttng_event));
-                               if (new_tmp_event == NULL) {
-                                       int release_ret;
-
-                                       PERROR("realloc ust app events");
-                                       free(tmp_event);
-                                       ret = -ENOMEM;
-                                       release_ret = lttng_ust_ctl_release_handle(app->sock, handle);
-                                       if (release_ret < 0 &&
-                                                       release_ret != -LTTNG_UST_ERR_EXITING &&
-                                                       release_ret != -EPIPE) {
-                                               ERR("Error releasing app handle for app %d with ret %d", app->sock, release_ret);
-                                       }
-                                       pthread_mutex_unlock(&app->sock_lock);
-                                       goto rcu_error;
-                               }
-                               /* Zero the new memory */
-                               memset(new_tmp_event + nbmem, 0,
-                                       (new_nbmem - nbmem) * sizeof(struct lttng_event));
-                               nbmem = new_nbmem;
-                               tmp_event = new_tmp_event;
-                       }
-                       memcpy(tmp_event[count].name, uiter.name, LTTNG_UST_ABI_SYM_NAME_LEN);
-                       tmp_event[count].loglevel = uiter.loglevel;
-                       tmp_event[count].type = (enum lttng_event_type) LTTNG_UST_ABI_TRACEPOINT;
-                       tmp_event[count].pid = app->pid;
-                       tmp_event[count].enabled = -1;
-                       count++;
-               }
-               ret = lttng_ust_ctl_release_handle(app->sock, handle);
-               pthread_mutex_unlock(&app->sock_lock);
-               if (ret < 0) {
-                       if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
-                               DBG3("Error releasing app handle. Application died: pid = %d, sock = %d",
-                                               app->pid, app->sock);
-                       } else if (ret == -EAGAIN) {
-                               WARN("Error releasing app handle. Communication time out: pid = %d, sock = %d",
-                                               app->pid, app->sock);
-                       } else {
-                               ERR("Error releasing app handle with ret %d: pid = %d, sock = %d",
-                                               ret, app->pid, app->sock);
-                       }
-               }
-       }
-
-       ret = count;
-       *events = tmp_event;
-
-       DBG2("UST app list events done (%zu events)", count);
-
-rcu_error:
-       rcu_read_unlock();
-error:
-       health_code_update();
-       return ret;
-}
-
-/*
- * Fill events array with all events name of all registered apps.
- */
-int ust_app_list_event_fields(struct lttng_event_field **fields)
-{
-       int ret, handle;
-       size_t nbmem, count = 0;
-       struct lttng_ht_iter iter;
-       struct ust_app *app;
-       struct lttng_event_field *tmp_event;
-
-       nbmem = UST_APP_EVENT_LIST_SIZE;
-       tmp_event = zmalloc(nbmem * sizeof(struct lttng_event_field));
-       if (tmp_event == NULL) {
-               PERROR("zmalloc ust app event fields");
-               ret = -ENOMEM;
-               goto error;
-       }
-
-       rcu_read_lock();
-
-       cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
-               struct lttng_ust_abi_field_iter uiter;
-
-               health_code_update();
-
-               if (!app->compatible) {
-                       /*
-                        * TODO: In time, we should notice the caller of this error by
-                        * telling him that this is a version error.
-                        */
-                       continue;
-               }
-               pthread_mutex_lock(&app->sock_lock);
-               handle = lttng_ust_ctl_tracepoint_field_list(app->sock);
-               if (handle < 0) {
-                       if (handle != -EPIPE && handle != -LTTNG_UST_ERR_EXITING) {
-                               ERR("UST app list field getting handle failed for app pid %d",
-                                               app->pid);
-                       }
-                       pthread_mutex_unlock(&app->sock_lock);
-                       continue;
-               }
-
-               while ((ret = lttng_ust_ctl_tracepoint_field_list_get(app->sock, handle,
-                                       &uiter)) != -LTTNG_UST_ERR_NOENT) {
-                       /* Handle ustctl error. */
-                       if (ret < 0) {
-                               int release_ret;
-
-                               if (ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
-                                       ERR("UST app tp list field failed for app %d with ret %d",
-                                                       app->sock, ret);
-                               } else {
-                                       DBG3("UST app tp list field failed. Application is dead");
-                                       break;
-                               }
-                               free(tmp_event);
-                               release_ret = lttng_ust_ctl_release_handle(app->sock, handle);
-                               pthread_mutex_unlock(&app->sock_lock);
-                               if (release_ret < 0 &&
-                                               release_ret != -LTTNG_UST_ERR_EXITING &&
-                                               release_ret != -EPIPE) {
-                                       ERR("Error releasing app handle for app %d with ret %d", app->sock, release_ret);
-                               }
-                               goto rcu_error;
-                       }
-
-                       health_code_update();
-                       if (count >= nbmem) {
-                               /* In case the realloc fails, we free the memory */
-                               struct lttng_event_field *new_tmp_event;
-                               size_t new_nbmem;
-
-                               new_nbmem = nbmem << 1;
-                               DBG2("Reallocating event field list from %zu to %zu entries",
-                                               nbmem, new_nbmem);
-                               new_tmp_event = realloc(tmp_event,
-                                       new_nbmem * sizeof(struct lttng_event_field));
-                               if (new_tmp_event == NULL) {
-                                       int release_ret;
-
-                                       PERROR("realloc ust app event fields");
-                                       free(tmp_event);
-                                       ret = -ENOMEM;
-                                       release_ret = lttng_ust_ctl_release_handle(app->sock, handle);
-                                       pthread_mutex_unlock(&app->sock_lock);
-                                       if (release_ret &&
-                                                       release_ret != -LTTNG_UST_ERR_EXITING &&
-                                                       release_ret != -EPIPE) {
-                                               ERR("Error releasing app handle for app %d with ret %d", app->sock, release_ret);
-                                       }
-                                       goto rcu_error;
-                               }
-                               /* Zero the new memory */
-                               memset(new_tmp_event + nbmem, 0,
-                                       (new_nbmem - nbmem) * sizeof(struct lttng_event_field));
-                               nbmem = new_nbmem;
-                               tmp_event = new_tmp_event;
-                       }
-
-                       memcpy(tmp_event[count].field_name, uiter.field_name, LTTNG_UST_ABI_SYM_NAME_LEN);
-                       /* Mapping between these enums matches 1 to 1. */
-                       tmp_event[count].type = (enum lttng_event_field_type) uiter.type;
-                       tmp_event[count].nowrite = uiter.nowrite;
-
-                       memcpy(tmp_event[count].event.name, uiter.event_name, LTTNG_UST_ABI_SYM_NAME_LEN);
-                       tmp_event[count].event.loglevel = uiter.loglevel;
-                       tmp_event[count].event.type = LTTNG_EVENT_TRACEPOINT;
-                       tmp_event[count].event.pid = app->pid;
-                       tmp_event[count].event.enabled = -1;
-                       count++;
-               }
-               ret = lttng_ust_ctl_release_handle(app->sock, handle);
-               pthread_mutex_unlock(&app->sock_lock);
-               if (ret < 0 &&
-                               ret != -LTTNG_UST_ERR_EXITING &&
-                               ret != -EPIPE) {
-                       ERR("Error releasing app handle for app %d with ret %d", app->sock, ret);
-               }
-       }
-
-       ret = count;
-       *fields = tmp_event;
-
-       DBG2("UST app list event fields done (%zu events)", count);
-
-rcu_error:
-       rcu_read_unlock();
-error:
-       health_code_update();
-       return ret;
-}
-
-/*
- * Free and clean all traceable apps of the global list.
- *
- * Should _NOT_ be called with RCU read-side lock held.
- */
-void ust_app_clean_list(void)
-{
-       int ret;
-       struct ust_app *app;
-       struct lttng_ht_iter iter;
-
-       DBG2("UST app cleaning registered apps hash table");
-
-       rcu_read_lock();
-
-       /* Cleanup notify socket hash table */
-       if (ust_app_ht_by_notify_sock) {
-               cds_lfht_for_each_entry(ust_app_ht_by_notify_sock->ht, &iter.iter, app,
-                               notify_sock_n.node) {
-                       /*
-                        * Assert that all notifiers are gone as all triggers
-                        * are unregistered prior to this clean-up.
-                        */
-                       LTTNG_ASSERT(lttng_ht_get_count(app->token_to_event_notifier_rule_ht) == 0);
-
-                       ust_app_notify_sock_unregister(app->notify_sock);
-               }
-       }
-
-       if (ust_app_ht) {
-               cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
-                       ret = lttng_ht_del(ust_app_ht, &iter);
-                       LTTNG_ASSERT(!ret);
-                       call_rcu(&app->pid_n.head, delete_ust_app_rcu);
-               }
-       }
-
-       /* Cleanup socket hash table */
-       if (ust_app_ht_by_sock) {
-               cds_lfht_for_each_entry(ust_app_ht_by_sock->ht, &iter.iter, app,
-                               sock_n.node) {
-                       ret = lttng_ht_del(ust_app_ht_by_sock, &iter);
-                       LTTNG_ASSERT(!ret);
-               }
-       }
-
-       rcu_read_unlock();
-
-       /* Destroy is done only when the ht is empty */
-       if (ust_app_ht) {
-               ht_cleanup_push(ust_app_ht);
-       }
-       if (ust_app_ht_by_sock) {
-               ht_cleanup_push(ust_app_ht_by_sock);
-       }
-       if (ust_app_ht_by_notify_sock) {
-               ht_cleanup_push(ust_app_ht_by_notify_sock);
-       }
-}
-
-/*
- * Init UST app hash table.
- */
-int ust_app_ht_alloc(void)
-{
-       ust_app_ht = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
-       if (!ust_app_ht) {
-               return -1;
-       }
-       ust_app_ht_by_sock = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
-       if (!ust_app_ht_by_sock) {
-               return -1;
-       }
-       ust_app_ht_by_notify_sock = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
-       if (!ust_app_ht_by_notify_sock) {
-               return -1;
-       }
-       return 0;
-}
-
-/*
- * For a specific UST session, disable the channel for all registered apps.
- */
-int ust_app_disable_channel_glb(struct ltt_ust_session *usess,
-               struct ltt_ust_channel *uchan)
-{
-       int ret = 0;
-       struct lttng_ht_iter iter;
-       struct lttng_ht_node_str *ua_chan_node;
-       struct ust_app *app;
-       struct ust_app_session *ua_sess;
-       struct ust_app_channel *ua_chan;
-
-       LTTNG_ASSERT(usess->active);
-       DBG2("UST app disabling channel %s from global domain for session id %" PRIu64,
-                       uchan->name, usess->id);
-
-       rcu_read_lock();
-
-       /* For every registered applications */
-       cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
-               struct lttng_ht_iter uiter;
-               if (!app->compatible) {
-                       /*
-                        * TODO: In time, we should notice the caller of this error by
-                        * telling him that this is a version error.
-                        */
-                       continue;
-               }
-               ua_sess = lookup_session_by_app(usess, app);
-               if (ua_sess == NULL) {
-                       continue;
-               }
-
-               /* Get channel */
-               lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
-               ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
-               /* If the session if found for the app, the channel must be there */
-               LTTNG_ASSERT(ua_chan_node);
-
-               ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
-               /* The channel must not be already disabled */
-               LTTNG_ASSERT(ua_chan->enabled == 1);
-
-               /* Disable channel onto application */
-               ret = disable_ust_app_channel(ua_sess, ua_chan, app);
-               if (ret < 0) {
-                       /* XXX: We might want to report this error at some point... */
-                       continue;
-               }
-       }
-
-       rcu_read_unlock();
-       return ret;
-}
-
-/*
- * For a specific UST session, enable the channel for all registered apps.
- */
-int ust_app_enable_channel_glb(struct ltt_ust_session *usess,
-               struct ltt_ust_channel *uchan)
-{
-       int ret = 0;
-       struct lttng_ht_iter iter;
-       struct ust_app *app;
-       struct ust_app_session *ua_sess;
-
-       LTTNG_ASSERT(usess->active);
-       DBG2("UST app enabling channel %s to global domain for session id %" PRIu64,
-                       uchan->name, usess->id);
-
-       rcu_read_lock();
-
-       /* For every registered applications */
-       cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
-               if (!app->compatible) {
-                       /*
-                        * TODO: In time, we should notice the caller of this error by
-                        * telling him that this is a version error.
-                        */
-                       continue;
-               }
-               ua_sess = lookup_session_by_app(usess, app);
-               if (ua_sess == NULL) {
-                       continue;
-               }
-
-               /* Enable channel onto application */
-               ret = enable_ust_app_channel(ua_sess, uchan, app);
-               if (ret < 0) {
-                       /* XXX: We might want to report this error at some point... */
-                       continue;
-               }
-       }
-
-       rcu_read_unlock();
-       return ret;
-}
-
-/*
- * Disable an event in a channel and for a specific session.
- */
-int ust_app_disable_event_glb(struct ltt_ust_session *usess,
-               struct ltt_ust_channel *uchan, struct ltt_ust_event *uevent)
-{
-       int ret = 0;
-       struct lttng_ht_iter iter, uiter;
-       struct lttng_ht_node_str *ua_chan_node;
-       struct ust_app *app;
-       struct ust_app_session *ua_sess;
-       struct ust_app_channel *ua_chan;
-       struct ust_app_event *ua_event;
-
-       LTTNG_ASSERT(usess->active);
-       DBG("UST app disabling event %s for all apps in channel "
-                       "%s for session id %" PRIu64,
-                       uevent->attr.name, uchan->name, usess->id);
-
-       rcu_read_lock();
-
-       /* For all registered applications */
-       cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
-               if (!app->compatible) {
-                       /*
-                        * TODO: In time, we should notice the caller of this error by
-                        * telling him that this is a version error.
-                        */
-                       continue;
-               }
-               ua_sess = lookup_session_by_app(usess, app);
-               if (ua_sess == NULL) {
-                       /* Next app */
-                       continue;
-               }
-
-               /* Lookup channel in the ust app session */
-               lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
-               ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
-               if (ua_chan_node == NULL) {
-                       DBG2("Channel %s not found in session id %" PRIu64 " for app pid %d."
-                                       "Skipping", uchan->name, usess->id, app->pid);
-                       continue;
-               }
-               ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
-
-               ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
-                               uevent->filter, uevent->attr.loglevel,
-                               uevent->exclusion);
-               if (ua_event == NULL) {
-                       DBG2("Event %s not found in channel %s for app pid %d."
-                                       "Skipping", uevent->attr.name, uchan->name, app->pid);
-                       continue;
-               }
-
-               ret = disable_ust_app_event(ua_sess, ua_event, app);
-               if (ret < 0) {
-                       /* XXX: Report error someday... */
-                       continue;
-               }
-       }
-
-       rcu_read_unlock();
-       return ret;
-}
-
-/* The ua_sess lock must be held by the caller.  */
-static
-int ust_app_channel_create(struct ltt_ust_session *usess,
-               struct ust_app_session *ua_sess,
-               struct ltt_ust_channel *uchan, struct ust_app *app,
-               struct ust_app_channel **_ua_chan)
-{
-       int ret = 0;
-       struct ust_app_channel *ua_chan = NULL;
-
-       LTTNG_ASSERT(ua_sess);
-       ASSERT_LOCKED(ua_sess->lock);
-
-       if (!strncmp(uchan->name, DEFAULT_METADATA_NAME,
-                    sizeof(uchan->name))) {
-               copy_channel_attr_to_ustctl(&ua_sess->metadata_attr,
-                       &uchan->attr);
-               ret = 0;
-       } else {
-               struct ltt_ust_context *uctx = NULL;
-
-               /*
-                * Create channel onto application and synchronize its
-                * configuration.
-                */
-               ret = ust_app_channel_allocate(ua_sess, uchan,
-                       LTTNG_UST_ABI_CHAN_PER_CPU, usess,
-                       &ua_chan);
-               if (ret < 0) {
-                       goto error;
-               }
-
-               ret = ust_app_channel_send(app, usess,
-                       ua_sess, ua_chan);
-               if (ret) {
-                       goto error;
-               }
-
-               /* Add contexts. */
-               cds_list_for_each_entry(uctx, &uchan->ctx_list, list) {
-                       ret = create_ust_app_channel_context(ua_chan,
-                               &uctx->ctx, app);
-                       if (ret) {
-                               goto error;
-                       }
-               }
-       }
-
-error:
-       if (ret < 0) {
-               switch (ret) {
-               case -ENOTCONN:
-                       /*
-                        * The application's socket is not valid. Either a bad socket
-                        * or a timeout on it. We can't inform the caller that for a
-                        * specific app, the session failed so lets continue here.
-                        */
-                       ret = 0;        /* Not an error. */
-                       break;
-               case -ENOMEM:
-               default:
-                       break;
-               }
-       }
-
-       if (ret == 0 && _ua_chan) {
-               /*
-                * Only return the application's channel on success. Note
-                * that the channel can still be part of the application's
-                * channel hashtable on error.
-                */
-               *_ua_chan = ua_chan;
-       }
-       return ret;
-}
-
-/*
- * Enable event for a specific session and channel on the tracer.
- */
-int ust_app_enable_event_glb(struct ltt_ust_session *usess,
-               struct ltt_ust_channel *uchan, struct ltt_ust_event *uevent)
-{
-       int ret = 0;
-       struct lttng_ht_iter iter, uiter;
-       struct lttng_ht_node_str *ua_chan_node;
-       struct ust_app *app;
-       struct ust_app_session *ua_sess;
-       struct ust_app_channel *ua_chan;
-       struct ust_app_event *ua_event;
-
-       LTTNG_ASSERT(usess->active);
-       DBG("UST app enabling event %s for all apps for session id %" PRIu64,
-                       uevent->attr.name, usess->id);
-
-       /*
-        * NOTE: At this point, this function is called only if the session and
-        * channel passed are already created for all apps. and enabled on the
-        * tracer also.
-        */
-
-       rcu_read_lock();
-
-       /* For all registered applications */
-       cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
-               if (!app->compatible) {
-                       /*
-                        * TODO: In time, we should notice the caller of this error by
-                        * telling him that this is a version error.
-                        */
-                       continue;
-               }
-               ua_sess = lookup_session_by_app(usess, app);
-               if (!ua_sess) {
-                       /* The application has problem or is probably dead. */
-                       continue;
-               }
-
-               pthread_mutex_lock(&ua_sess->lock);
-
-               if (ua_sess->deleted) {
-                       pthread_mutex_unlock(&ua_sess->lock);
-                       continue;
-               }
-
-               /* Lookup channel in the ust app session */
-               lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
-               ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
-               /*
-                * It is possible that the channel cannot be found is
-                * the channel/event creation occurs concurrently with
-                * an application exit.
-                */
-               if (!ua_chan_node) {
-                       pthread_mutex_unlock(&ua_sess->lock);
-                       continue;
-               }
-
-               ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
-
-               /* Get event node */
-               ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
-                               uevent->filter, uevent->attr.loglevel, uevent->exclusion);
-               if (ua_event == NULL) {
-                       DBG3("UST app enable event %s not found for app PID %d."
-                                       "Skipping app", uevent->attr.name, app->pid);
-                       goto next_app;
-               }
-
-               ret = enable_ust_app_event(ua_sess, ua_event, app);
-               if (ret < 0) {
-                       pthread_mutex_unlock(&ua_sess->lock);
-                       goto error;
-               }
-       next_app:
-               pthread_mutex_unlock(&ua_sess->lock);
-       }
-
-error:
-       rcu_read_unlock();
-       return ret;
-}
-
-/*
- * For a specific existing UST session and UST channel, creates the event for
- * all registered apps.
- */
-int ust_app_create_event_glb(struct ltt_ust_session *usess,
-               struct ltt_ust_channel *uchan, struct ltt_ust_event *uevent)
-{
-       int ret = 0;
-       struct lttng_ht_iter iter, uiter;
-       struct lttng_ht_node_str *ua_chan_node;
-       struct ust_app *app;
-       struct ust_app_session *ua_sess;
-       struct ust_app_channel *ua_chan;
-
-       LTTNG_ASSERT(usess->active);
-       DBG("UST app creating event %s for all apps for session id %" PRIu64,
-                       uevent->attr.name, usess->id);
-
-       rcu_read_lock();
-
-       /* For all registered applications */
-       cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
-               if (!app->compatible) {
-                       /*
-                        * TODO: In time, we should notice the caller of this error by
-                        * telling him that this is a version error.
-                        */
-                       continue;
-               }
-               ua_sess = lookup_session_by_app(usess, app);
-               if (!ua_sess) {
-                       /* The application has problem or is probably dead. */
-                       continue;
-               }
-
-               pthread_mutex_lock(&ua_sess->lock);
-
-               if (ua_sess->deleted) {
-                       pthread_mutex_unlock(&ua_sess->lock);
-                       continue;
-               }
-
-               /* Lookup channel in the ust app session */
-               lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
-               ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
-               /* If the channel is not found, there is a code flow error */
-               LTTNG_ASSERT(ua_chan_node);
-
-               ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
-
-               ret = create_ust_app_event(ua_sess, ua_chan, uevent, app);
-               pthread_mutex_unlock(&ua_sess->lock);
-               if (ret < 0) {
-                       if (ret != -LTTNG_UST_ERR_EXIST) {
-                               /* Possible value at this point: -ENOMEM. If so, we stop! */
-                               break;
-                       }
-                       DBG2("UST app event %s already exist on app PID %d",
-                                       uevent->attr.name, app->pid);
-                       continue;
-               }
-       }
-
-       rcu_read_unlock();
-       return ret;
-}
-
-/*
- * Start tracing for a specific UST session and app.
- *
- * Called with UST app session lock held.
- *
- */
-static
-int ust_app_start_trace(struct ltt_ust_session *usess, struct ust_app *app)
-{
-       int ret = 0;
-       struct ust_app_session *ua_sess;
-
-       DBG("Starting tracing for ust app pid %d", app->pid);
-
-       rcu_read_lock();
-
-       if (!app->compatible) {
-               goto end;
-       }
-
-       ua_sess = lookup_session_by_app(usess, app);
-       if (ua_sess == NULL) {
-               /* The session is in teardown process. Ignore and continue. */
-               goto end;
-       }
-
-       pthread_mutex_lock(&ua_sess->lock);
-
-       if (ua_sess->deleted) {
-               pthread_mutex_unlock(&ua_sess->lock);
-               goto end;
-       }
-
-       if (ua_sess->enabled) {
-               pthread_mutex_unlock(&ua_sess->lock);
-               goto end;
-       }
-
-       /* Upon restart, we skip the setup, already done */
-       if (ua_sess->started) {
-               goto skip_setup;
-       }
-
-       health_code_update();
-
-skip_setup:
-       /* This starts the UST tracing */
-       pthread_mutex_lock(&app->sock_lock);
-       ret = lttng_ust_ctl_start_session(app->sock, ua_sess->handle);
-       pthread_mutex_unlock(&app->sock_lock);
-       if (ret < 0) {
-               if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
-                       DBG3("UST app start session failed. Application is dead: pid = %d, sock = %d",
-                                       app->pid, app->sock);
-                       pthread_mutex_unlock(&ua_sess->lock);
-                       goto end;
-               } else if (ret == -EAGAIN) {
-                       WARN("UST app start session failed. Communication time out: pid = %d, sock = %d",
-                                       app->pid, app->sock);
-                       pthread_mutex_unlock(&ua_sess->lock);
-                       goto end;
-
-               } else {
-                       ERR("UST app start session failed with ret %d: pid = %d, sock = %d",
-                                       ret, app->pid, app->sock);
-               }
-               goto error_unlock;
-       }
-
-       /* Indicate that the session has been started once */
-       ua_sess->started = 1;
-       ua_sess->enabled = 1;
-
-       pthread_mutex_unlock(&ua_sess->lock);
-
-       health_code_update();
-
-       /* Quiescent wait after starting trace */
-       pthread_mutex_lock(&app->sock_lock);
-       ret = lttng_ust_ctl_wait_quiescent(app->sock);
-       pthread_mutex_unlock(&app->sock_lock);
-       if (ret < 0) {
-               if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
-                       DBG3("UST app wait quiescent failed. Application is dead: pid = %d, sock = %d",
-                                       app->pid, app->sock);
-               } else if (ret == -EAGAIN) {
-                       WARN("UST app wait quiescent failed. Communication time out: pid =  %d, sock = %d",
-                                       app->pid, app->sock);
-               } else {
-                       ERR("UST app wait quiescent failed with ret %d: pid %d, sock = %d",
-                                       ret, app->pid, app->sock);
-               }
-       }
-
-end:
-       rcu_read_unlock();
-       health_code_update();
-       return 0;
-
-error_unlock:
-       pthread_mutex_unlock(&ua_sess->lock);
-       rcu_read_unlock();
-       health_code_update();
-       return -1;
-}
-
-/*
- * Stop tracing for a specific UST session and app.
- */
-static
-int ust_app_stop_trace(struct ltt_ust_session *usess, struct ust_app *app)
-{
-       int ret = 0;
-       struct ust_app_session *ua_sess;
-       struct ust_registry_session *registry;
-
-       DBG("Stopping tracing for ust app pid %d", app->pid);
-
-       rcu_read_lock();
-
-       if (!app->compatible) {
-               goto end_no_session;
-       }
-
-       ua_sess = lookup_session_by_app(usess, app);
-       if (ua_sess == NULL) {
-               goto end_no_session;
-       }
-
-       pthread_mutex_lock(&ua_sess->lock);
-
-       if (ua_sess->deleted) {
-               pthread_mutex_unlock(&ua_sess->lock);
-               goto end_no_session;
-       }
-
-       /*
-        * If started = 0, it means that stop trace has been called for a session
-        * that was never started. It's possible since we can have a fail start
-        * from either the application manager thread or the command thread. Simply
-        * indicate that this is a stop error.
-        */
-       if (!ua_sess->started) {
-               goto error_rcu_unlock;
-       }
-
-       health_code_update();
-
-       /* This inhibits UST tracing */
-       pthread_mutex_lock(&app->sock_lock);
-       ret = lttng_ust_ctl_stop_session(app->sock, ua_sess->handle);
-       pthread_mutex_unlock(&app->sock_lock);
-       if (ret < 0) {
-               if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
-                       DBG3("UST app stop session failed. Application is dead: pid = %d, sock = %d",
-                                       app->pid, app->sock);
-                       goto end_unlock;
-               } else if (ret == -EAGAIN) {
-                       WARN("UST app stop session failed. Communication time out: pid = %d, sock = %d",
-                                       app->pid, app->sock);
-                       goto end_unlock;
-
-               } else {
-                       ERR("UST app stop session failed with ret %d: pid = %d, sock = %d",
-                                       ret, app->pid, app->sock);
-               }
-               goto error_rcu_unlock;
-       }
-
-       health_code_update();
-       ua_sess->enabled = 0;
-
-       /* Quiescent wait after stopping trace */
-       pthread_mutex_lock(&app->sock_lock);
-       ret = lttng_ust_ctl_wait_quiescent(app->sock);
-       pthread_mutex_unlock(&app->sock_lock);
-       if (ret < 0) {
-               if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
-                       DBG3("UST app wait quiescent failed. Application is dead: pid= %d, sock = %d)",
-                                       app->pid, app->sock);
-               } else if (ret == -EAGAIN) {
-                       WARN("UST app wait quiescent failed. Communication time out: pid= %d, sock = %d)",
-                                       app->pid, app->sock);
-               } else {
-                       ERR("UST app wait quiescent failed with ret %d: pid= %d, sock = %d)",
-                                       ret, app->pid, app->sock);
-               }
-       }
-
-       health_code_update();
-
-       registry = get_session_registry(ua_sess);
-
-       /* The UST app session is held registry shall not be null. */
-       LTTNG_ASSERT(registry);
-
-       /* Push metadata for application before freeing the application. */
-       (void) push_metadata(registry, ua_sess->consumer);
-
-end_unlock:
-       pthread_mutex_unlock(&ua_sess->lock);
-end_no_session:
-       rcu_read_unlock();
-       health_code_update();
-       return 0;
-
-error_rcu_unlock:
-       pthread_mutex_unlock(&ua_sess->lock);
-       rcu_read_unlock();
-       health_code_update();
-       return -1;
-}
-
-static
-int ust_app_flush_app_session(struct ust_app *app,
-               struct ust_app_session *ua_sess)
-{
-       int ret, retval = 0;
-       struct lttng_ht_iter iter;
-       struct ust_app_channel *ua_chan;
-       struct consumer_socket *socket;
-
-       DBG("Flushing app session buffers for ust app pid %d", app->pid);
-
-       rcu_read_lock();
-
-       if (!app->compatible) {
-               goto end_not_compatible;
-       }
-
-       pthread_mutex_lock(&ua_sess->lock);
-
-       if (ua_sess->deleted) {
-               goto end_deleted;
-       }
-
-       health_code_update();
-
-       /* Flushing buffers */
-       socket = consumer_find_socket_by_bitness(app->bits_per_long,
-                       ua_sess->consumer);
-
-       /* Flush buffers and push metadata. */
-       switch (ua_sess->buffer_type) {
-       case LTTNG_BUFFER_PER_PID:
-               cds_lfht_for_each_entry(ua_sess->channels->ht, &iter.iter, ua_chan,
-                               node.node) {
-                       health_code_update();
-                       ret = consumer_flush_channel(socket, ua_chan->key);
-                       if (ret) {
-                               ERR("Error flushing consumer channel");
-                               retval = -1;
-                               continue;
-                       }
-               }
-               break;
-       case LTTNG_BUFFER_PER_UID:
-       default:
-               abort();
-               break;
-       }
-
-       health_code_update();
-
-end_deleted:
-       pthread_mutex_unlock(&ua_sess->lock);
-
-end_not_compatible:
-       rcu_read_unlock();
-       health_code_update();
-       return retval;
-}
-
-/*
- * Flush buffers for all applications for a specific UST session.
- * Called with UST session lock held.
- */
-static
-int ust_app_flush_session(struct ltt_ust_session *usess)
-
-{
-       int ret = 0;
-
-       DBG("Flushing session buffers for all ust apps");
-
-       rcu_read_lock();
-
-       /* Flush buffers and push metadata. */
-       switch (usess->buffer_type) {
-       case LTTNG_BUFFER_PER_UID:
-       {
-               struct buffer_reg_uid *reg;
-               struct lttng_ht_iter iter;
-
-               /* Flush all per UID buffers associated to that session. */
-               cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
-                       struct ust_registry_session *ust_session_reg;
-                       struct buffer_reg_channel *buf_reg_chan;
-                       struct consumer_socket *socket;
-
-                       /* Get consumer socket to use to push the metadata.*/
-                       socket = consumer_find_socket_by_bitness(reg->bits_per_long,
-                                       usess->consumer);
-                       if (!socket) {
-                               /* Ignore request if no consumer is found for the session. */
-                               continue;
-                       }
-
-                       cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
-                                       buf_reg_chan, node.node) {
-                               /*
-                                * The following call will print error values so the return
-                                * code is of little importance because whatever happens, we
-                                * have to try them all.
-                                */
-                               (void) consumer_flush_channel(socket, buf_reg_chan->consumer_key);
-                       }
-
-                       ust_session_reg = reg->registry->reg.ust;
-                       /* Push metadata. */
-                       (void) push_metadata(ust_session_reg, usess->consumer);
-               }
-               break;
-       }
-       case LTTNG_BUFFER_PER_PID:
-       {
-               struct ust_app_session *ua_sess;
-               struct lttng_ht_iter iter;
-               struct ust_app *app;
-
-               cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
-                       ua_sess = lookup_session_by_app(usess, app);
-                       if (ua_sess == NULL) {
-                               continue;
-                       }
-                       (void) ust_app_flush_app_session(app, ua_sess);
-               }
-               break;
-       }
-       default:
-               ret = -1;
-               abort();
-               break;
-       }
-
-       rcu_read_unlock();
-       health_code_update();
-       return ret;
-}
-
-static
-int ust_app_clear_quiescent_app_session(struct ust_app *app,
-               struct ust_app_session *ua_sess)
-{
-       int ret = 0;
-       struct lttng_ht_iter iter;
-       struct ust_app_channel *ua_chan;
-       struct consumer_socket *socket;
-
-       DBG("Clearing stream quiescent state for ust app pid %d", app->pid);
-
-       rcu_read_lock();
-
-       if (!app->compatible) {
-               goto end_not_compatible;
-       }
-
-       pthread_mutex_lock(&ua_sess->lock);
-
-       if (ua_sess->deleted) {
-               goto end_unlock;
-       }
-
-       health_code_update();
-
-       socket = consumer_find_socket_by_bitness(app->bits_per_long,
-                       ua_sess->consumer);
-       if (!socket) {
-               ERR("Failed to find consumer (%" PRIu32 ") socket",
-                               app->bits_per_long);
-               ret = -1;
-               goto end_unlock;
-       }
-
-       /* Clear quiescent state. */
-       switch (ua_sess->buffer_type) {
-       case LTTNG_BUFFER_PER_PID:
-               cds_lfht_for_each_entry(ua_sess->channels->ht, &iter.iter,
-                               ua_chan, node.node) {
-                       health_code_update();
-                       ret = consumer_clear_quiescent_channel(socket,
-                                       ua_chan->key);
-                       if (ret) {
-                               ERR("Error clearing quiescent state for consumer channel");
-                               ret = -1;
-                               continue;
-                       }
-               }
-               break;
-       case LTTNG_BUFFER_PER_UID:
-       default:
-               abort();
-               ret = -1;
-               break;
-       }
-
-       health_code_update();
-
-end_unlock:
-       pthread_mutex_unlock(&ua_sess->lock);
-
-end_not_compatible:
-       rcu_read_unlock();
-       health_code_update();
-       return ret;
-}
-
-/*
- * Clear quiescent state in each stream for all applications for a
- * specific UST session.
- * Called with UST session lock held.
- */
-static
-int ust_app_clear_quiescent_session(struct ltt_ust_session *usess)
-
-{
-       int ret = 0;
-
-       DBG("Clearing stream quiescent state for all ust apps");
-
-       rcu_read_lock();
-
-       switch (usess->buffer_type) {
-       case LTTNG_BUFFER_PER_UID:
-       {
-               struct lttng_ht_iter iter;
-               struct buffer_reg_uid *reg;
-
-               /*
-                * Clear quiescent for all per UID buffers associated to
-                * that session.
-                */
-               cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
-                       struct consumer_socket *socket;
-                       struct buffer_reg_channel *buf_reg_chan;
-
-                       /* Get associated consumer socket.*/
-                       socket = consumer_find_socket_by_bitness(
-                                       reg->bits_per_long, usess->consumer);
-                       if (!socket) {
-                               /*
-                                * Ignore request if no consumer is found for
-                                * the session.
-                                */
-                               continue;
-                       }
-
-                       cds_lfht_for_each_entry(reg->registry->channels->ht,
-                                       &iter.iter, buf_reg_chan, node.node) {
-                               /*
-                                * The following call will print error values so
-                                * the return code is of little importance
-                                * because whatever happens, we have to try them
-                                * all.
-                                */
-                               (void) consumer_clear_quiescent_channel(socket,
-                                               buf_reg_chan->consumer_key);
-                       }
-               }
-               break;
-       }
-       case LTTNG_BUFFER_PER_PID:
-       {
-               struct ust_app_session *ua_sess;
-               struct lttng_ht_iter iter;
-               struct ust_app *app;
-
-               cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app,
-                               pid_n.node) {
-                       ua_sess = lookup_session_by_app(usess, app);
-                       if (ua_sess == NULL) {
-                               continue;
-                       }
-                       (void) ust_app_clear_quiescent_app_session(app,
-                                       ua_sess);
-               }
-               break;
-       }
-       default:
-               ret = -1;
-               abort();
-               break;
-       }
-
-       rcu_read_unlock();
-       health_code_update();
-       return ret;
-}
-
-/*
- * Destroy a specific UST session in apps.
- */
-static int destroy_trace(struct ltt_ust_session *usess, struct ust_app *app)
-{
-       int ret;
-       struct ust_app_session *ua_sess;
-       struct lttng_ht_iter iter;
-       struct lttng_ht_node_u64 *node;
-
-       DBG("Destroy tracing for ust app pid %d", app->pid);
-
-       rcu_read_lock();
-
-       if (!app->compatible) {
-               goto end;
-       }
-
-       __lookup_session_by_app(usess, app, &iter);
-       node = lttng_ht_iter_get_node_u64(&iter);
-       if (node == NULL) {
-               /* Session is being or is deleted. */
-               goto end;
-       }
-       ua_sess = caa_container_of(node, struct ust_app_session, node);
-
-       health_code_update();
-       destroy_app_session(app, ua_sess);
-
-       health_code_update();
-
-       /* Quiescent wait after stopping trace */
-       pthread_mutex_lock(&app->sock_lock);
-       ret = lttng_ust_ctl_wait_quiescent(app->sock);
-       pthread_mutex_unlock(&app->sock_lock);
-       if (ret < 0) {
-               if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
-                       DBG3("UST app wait quiescent failed. Application is dead: pid= %d, sock = %d)",
-                                       app->pid, app->sock);
-               } else if (ret == -EAGAIN) {
-                       WARN("UST app wait quiescent failed. Communication time out: pid= %d, sock = %d)",
-                                       app->pid, app->sock);
-               } else {
-                       ERR("UST app wait quiescent failed with ret %d: pid= %d, sock = %d)",
-                                       ret, app->pid, app->sock);
-               }
-       }
-end:
-       rcu_read_unlock();
-       health_code_update();
-       return 0;
-}
-
-/*
- * Start tracing for the UST session.
- */
-int ust_app_start_trace_all(struct ltt_ust_session *usess)
-{
-       struct lttng_ht_iter iter;
-       struct ust_app *app;
-
-       DBG("Starting all UST traces");
-
-       /*
-        * Even though the start trace might fail, flag this session active so
-        * other application coming in are started by default.
-        */
-       usess->active = 1;
-
-       rcu_read_lock();
-
-       /*
-        * In a start-stop-start use-case, we need to clear the quiescent state
-        * of each channel set by the prior stop command, thus ensuring that a
-        * following stop or destroy is sure to grab a timestamp_end near those
-        * operations, even if the packet is empty.
-        */
-       (void) ust_app_clear_quiescent_session(usess);
-
-       cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
-               ust_app_global_update(usess, app);
-       }
-
-       rcu_read_unlock();
-
-       return 0;
-}
-
-/*
- * Start tracing for the UST session.
- * Called with UST session lock held.
- */
-int ust_app_stop_trace_all(struct ltt_ust_session *usess)
-{
-       int ret = 0;
-       struct lttng_ht_iter iter;
-       struct ust_app *app;
-
-       DBG("Stopping all UST traces");
-
-       /*
-        * Even though the stop trace might fail, flag this session inactive so
-        * other application coming in are not started by default.
-        */
-       usess->active = 0;
-
-       rcu_read_lock();
-
-       cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
-               ret = ust_app_stop_trace(usess, app);
-               if (ret < 0) {
-                       /* Continue to next apps even on error */
-                       continue;
-               }
-       }
-
-       (void) ust_app_flush_session(usess);
-
-       rcu_read_unlock();
-
-       return 0;
-}
-
-/*
- * Destroy app UST session.
- */
-int ust_app_destroy_trace_all(struct ltt_ust_session *usess)
-{
-       int ret = 0;
-       struct lttng_ht_iter iter;
-       struct ust_app *app;
-
-       DBG("Destroy all UST traces");
-
-       rcu_read_lock();
-
-       cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
-               ret = destroy_trace(usess, app);
-               if (ret < 0) {
-                       /* Continue to next apps even on error */
-                       continue;
-               }
-       }
-
-       rcu_read_unlock();
-
-       return 0;
-}
-
-/* The ua_sess lock must be held by the caller. */
-static
-int find_or_create_ust_app_channel(
-               struct ltt_ust_session *usess,
-               struct ust_app_session *ua_sess,
-               struct ust_app *app,
-               struct ltt_ust_channel *uchan,
-               struct ust_app_channel **ua_chan)
-{
-       int ret = 0;
-       struct lttng_ht_iter iter;
-       struct lttng_ht_node_str *ua_chan_node;
-
-       lttng_ht_lookup(ua_sess->channels, (void *) uchan->name, &iter);
-       ua_chan_node = lttng_ht_iter_get_node_str(&iter);
-       if (ua_chan_node) {
-               *ua_chan = caa_container_of(ua_chan_node,
-                       struct ust_app_channel, node);
-               goto end;
-       }
-
-       ret = ust_app_channel_create(usess, ua_sess, uchan, app, ua_chan);
-       if (ret) {
-               goto end;
-       }
-end:
-       return ret;
-}
-
-static
-int ust_app_channel_synchronize_event(struct ust_app_channel *ua_chan,
-               struct ltt_ust_event *uevent, struct ust_app_session *ua_sess,
-               struct ust_app *app)
-{
-       int ret = 0;
-       struct ust_app_event *ua_event = NULL;
-
-       ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
-               uevent->filter, uevent->attr.loglevel, uevent->exclusion);
-       if (!ua_event) {
-               ret = create_ust_app_event(ua_sess, ua_chan, uevent, app);
-               if (ret < 0) {
-                       goto end;
-               }
-       } else {
-               if (ua_event->enabled != uevent->enabled) {
-                       ret = uevent->enabled ?
-                               enable_ust_app_event(ua_sess, ua_event, app) :
-                               disable_ust_app_event(ua_sess, ua_event, app);
-               }
-       }
-
-end:
-       return ret;
-}
-
-/* Called with RCU read-side lock held. */
-static
-void ust_app_synchronize_event_notifier_rules(struct ust_app *app)
-{
-       int ret = 0;
-       enum lttng_error_code ret_code;
-       enum lttng_trigger_status t_status;
-       struct lttng_ht_iter app_trigger_iter;
-       struct lttng_triggers *triggers = NULL;
-       struct ust_app_event_notifier_rule *event_notifier_rule;
-       unsigned int count, i;
-
-       if (!ust_app_supports_notifiers(app)) {
-               goto end;
-       }
-
-       /*
-        * Currrently, registering or unregistering a trigger with an
-        * event rule condition causes a full synchronization of the event
-        * notifiers.
-        *
-        * The first step attempts to add an event notifier for all registered
-        * triggers that apply to the user space tracers. Then, the
-        * application's event notifiers rules are all checked against the list
-        * of registered triggers. Any event notifier that doesn't have a
-        * matching trigger can be assumed to have been disabled.
-        *
-        * All of this is inefficient, but is put in place to get the feature
-        * rolling as it is simpler at this moment. It will be optimized Soon™
-        * to allow the state of enabled
-        * event notifiers to be synchronized in a piece-wise way.
-        */
-
-       /* Get all triggers using uid 0 (root) */
-       ret_code = notification_thread_command_list_triggers(
-                       the_notification_thread_handle, 0, &triggers);
-       if (ret_code != LTTNG_OK) {
-               goto end;
-       }
-
-       LTTNG_ASSERT(triggers);
-
-       t_status = lttng_triggers_get_count(triggers, &count);
-       if (t_status != LTTNG_TRIGGER_STATUS_OK) {
-               goto end;
-       }
-
-       for (i = 0; i < count; i++) {
-               struct lttng_condition *condition;
-               struct lttng_event_rule *event_rule;
-               struct lttng_trigger *trigger;
-               const struct ust_app_event_notifier_rule *looked_up_event_notifier_rule;
-               enum lttng_condition_status condition_status;
-               uint64_t token;
-
-               trigger = lttng_triggers_borrow_mutable_at_index(triggers, i);
-               LTTNG_ASSERT(trigger);
-
-               token = lttng_trigger_get_tracer_token(trigger);
-               condition = lttng_trigger_get_condition(trigger);
-
-               if (lttng_condition_get_type(condition) !=
-                               LTTNG_CONDITION_TYPE_EVENT_RULE_MATCHES) {
-                       /* Does not apply */
-                       continue;
-               }
-
-               condition_status =
-                               lttng_condition_event_rule_matches_borrow_rule_mutable(
-                                               condition, &event_rule);
-               LTTNG_ASSERT(condition_status == LTTNG_CONDITION_STATUS_OK);
-
-               if (lttng_event_rule_get_domain_type(event_rule) == LTTNG_DOMAIN_KERNEL) {
-                       /* Skip kernel related triggers. */
-                       continue;
-               }
-
-               /*
-                * Find or create the associated token event rule. The caller
-                * holds the RCU read lock, so this is safe to call without
-                * explicitly acquiring it here.
-                */
-               looked_up_event_notifier_rule = find_ust_app_event_notifier_rule(
-                               app->token_to_event_notifier_rule_ht, token);
-               if (!looked_up_event_notifier_rule) {
-                       ret = create_ust_app_event_notifier_rule(trigger, app);
-                       if (ret < 0) {
-                               goto end;
-                       }
-               }
-       }
-
-       rcu_read_lock();
-       /* Remove all unknown event sources from the app. */
-       cds_lfht_for_each_entry (app->token_to_event_notifier_rule_ht->ht,
-                       &app_trigger_iter.iter, event_notifier_rule,
-                       node.node) {
-               const uint64_t app_token = event_notifier_rule->token;
-               bool found = false;
-
-               /*
-                * Check if the app event trigger still exists on the
-                * notification side.
-                */
-               for (i = 0; i < count; i++) {
-                       uint64_t notification_thread_token;
-                       const struct lttng_trigger *trigger =
-                                       lttng_triggers_get_at_index(
-                                                       triggers, i);
-
-                       LTTNG_ASSERT(trigger);
-
-                       notification_thread_token =
-                                       lttng_trigger_get_tracer_token(trigger);
-
-                       if (notification_thread_token == app_token) {
-                               found = true;
-                               break;
-                       }
-               }
-
-               if (found) {
-                       /* Still valid. */
-                       continue;
-               }
-
-               /*
-                * This trigger was unregistered, disable it on the tracer's
-                * side.
-                */
-               ret = lttng_ht_del(app->token_to_event_notifier_rule_ht,
-                               &app_trigger_iter);
-               LTTNG_ASSERT(ret == 0);
-
-               /* Callee logs errors. */
-               (void) disable_ust_object(app, event_notifier_rule->obj);
-
-               delete_ust_app_event_notifier_rule(
-                               app->sock, event_notifier_rule, app);
-       }
-
-       rcu_read_unlock();
-
-end:
-       lttng_triggers_destroy(triggers);
-       return;
-}
-
-/*
- * RCU read lock must be held by the caller.
- */
-static
-void ust_app_synchronize_all_channels(struct ltt_ust_session *usess,
-               struct ust_app_session *ua_sess,
-               struct ust_app *app)
-{
-       int ret = 0;
-       struct cds_lfht_iter uchan_iter;
-       struct ltt_ust_channel *uchan;
-
-       LTTNG_ASSERT(usess);
-       LTTNG_ASSERT(ua_sess);
-       LTTNG_ASSERT(app);
-
-       cds_lfht_for_each_entry(usess->domain_global.channels->ht, &uchan_iter,
-                       uchan, node.node) {
-               struct ust_app_channel *ua_chan;
-               struct cds_lfht_iter uevent_iter;
-               struct ltt_ust_event *uevent;
-
-               /*
-                * Search for a matching ust_app_channel. If none is found,
-                * create it. Creating the channel will cause the ua_chan
-                * structure to be allocated, the channel buffers to be
-                * allocated (if necessary) and sent to the application, and
-                * all enabled contexts will be added to the channel.
-                */
-               ret = find_or_create_ust_app_channel(usess, ua_sess,
-                       app, uchan, &ua_chan);
-               if (ret) {
-                       /* Tracer is probably gone or ENOMEM. */
-                       goto end;
-               }
-
-               if (!ua_chan) {
-                       /* ua_chan will be NULL for the metadata channel */
-                       continue;
-               }
-
-               cds_lfht_for_each_entry(uchan->events->ht, &uevent_iter, uevent,
-                               node.node) {
-                       ret = ust_app_channel_synchronize_event(ua_chan,
-                               uevent, ua_sess, app);
-                       if (ret) {
-                               goto end;
-                       }
-               }
-
-               if (ua_chan->enabled != uchan->enabled) {
-                       ret = uchan->enabled ?
-                               enable_ust_app_channel(ua_sess, uchan, app) :
-                               disable_ust_app_channel(ua_sess, ua_chan, app);
-                       if (ret) {
-                               goto end;
-                       }
-               }
-       }
-end:
-       return;
-}
-
-/*
- * The caller must ensure that the application is compatible and is tracked
- * by the process attribute trackers.
- */
-static
-void ust_app_synchronize(struct ltt_ust_session *usess,
-               struct ust_app *app)
-{
-       int ret = 0;
-       struct ust_app_session *ua_sess = NULL;
-
-       /*
-        * The application's configuration should only be synchronized for
-        * active sessions.
-        */
-       LTTNG_ASSERT(usess->active);
-
-       ret = find_or_create_ust_app_session(usess, app, &ua_sess, NULL);
-       if (ret < 0) {
-               /* Tracer is probably gone or ENOMEM. */
-               if (ua_sess) {
-                       destroy_app_session(app, ua_sess);
-               }
-               goto end;
-       }
-       LTTNG_ASSERT(ua_sess);
-
-       pthread_mutex_lock(&ua_sess->lock);
-       if (ua_sess->deleted) {
-               goto deleted_session;
-       }
-
-       rcu_read_lock();
-
-       ust_app_synchronize_all_channels(usess, ua_sess, app);
-
-       /*
-        * Create the metadata for the application. This returns gracefully if a
-        * metadata was already set for the session.
-        *
-        * The metadata channel must be created after the data channels as the
-        * consumer daemon assumes this ordering. When interacting with a relay
-        * daemon, the consumer will use this assumption to send the
-        * "STREAMS_SENT" message to the relay daemon.
-        */
-       ret = create_ust_app_metadata(ua_sess, app, usess->consumer);
-       if (ret < 0) {
-               ERR("Metadata creation failed for app sock %d for session id %" PRIu64,
-                               app->sock, usess->id);
-       }
-
-       rcu_read_unlock();
-
-deleted_session:
-       pthread_mutex_unlock(&ua_sess->lock);
-end:
-       return;
-}
-
-static
-void ust_app_global_destroy(struct ltt_ust_session *usess, struct ust_app *app)
-{
-       struct ust_app_session *ua_sess;
-
-       ua_sess = lookup_session_by_app(usess, app);
-       if (ua_sess == NULL) {
-               return;
-       }
-       destroy_app_session(app, ua_sess);
-}
-
-/*
- * Add channels/events from UST global domain to registered apps at sock.
- *
- * Called with session lock held.
- * Called with RCU read-side lock held.
- */
-void ust_app_global_update(struct ltt_ust_session *usess, struct ust_app *app)
-{
-       LTTNG_ASSERT(usess);
-       LTTNG_ASSERT(usess->active);
-
-       DBG2("UST app global update for app sock %d for session id %" PRIu64,
-                       app->sock, usess->id);
-
-       if (!app->compatible) {
-               return;
-       }
-       if (trace_ust_id_tracker_lookup(LTTNG_PROCESS_ATTR_VIRTUAL_PROCESS_ID,
-                           usess, app->pid) &&
-                       trace_ust_id_tracker_lookup(
-                                       LTTNG_PROCESS_ATTR_VIRTUAL_USER_ID,
-                                       usess, app->uid) &&
-                       trace_ust_id_tracker_lookup(
-                                       LTTNG_PROCESS_ATTR_VIRTUAL_GROUP_ID,
-                                       usess, app->gid)) {
-               /*
-                * Synchronize the application's internal tracing configuration
-                * and start tracing.
-                */
-               ust_app_synchronize(usess, app);
-               ust_app_start_trace(usess, app);
-       } else {
-               ust_app_global_destroy(usess, app);
-       }
-}
-
-/*
- * Add all event notifiers to an application.
- *
- * Called with session lock held.
- * Called with RCU read-side lock held.
- */
-void ust_app_global_update_event_notifier_rules(struct ust_app *app)
-{
-       DBG2("UST application global event notifier rules update: app = '%s', pid = %d)",
-                       app->name, app->pid);
-
-       if (!app->compatible || !ust_app_supports_notifiers(app)) {
-               return;
-       }
-
-       if (app->event_notifier_group.object == NULL) {
-               WARN("UST app global update of event notifiers for app skipped since communication handle is null: app = '%s' pid = %d)",
-                               app->name, app->pid);
-               return;
-       }
-
-       ust_app_synchronize_event_notifier_rules(app);
-}
-
-/*
- * Called with session lock held.
- */
-void ust_app_global_update_all(struct ltt_ust_session *usess)
-{
-       struct lttng_ht_iter iter;
-       struct ust_app *app;
-
-       rcu_read_lock();
-       cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
-               ust_app_global_update(usess, app);
-       }
-       rcu_read_unlock();
-}
-
-void ust_app_global_update_all_event_notifier_rules(void)
-{
-       struct lttng_ht_iter iter;
-       struct ust_app *app;
-
-       rcu_read_lock();
-       cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
-               ust_app_global_update_event_notifier_rules(app);
-       }
-
-       rcu_read_unlock();
-}
-
-/*
- * Add context to a specific channel for global UST domain.
- */
-int ust_app_add_ctx_channel_glb(struct ltt_ust_session *usess,
-               struct ltt_ust_channel *uchan, struct ltt_ust_context *uctx)
-{
-       int ret = 0;
-       struct lttng_ht_node_str *ua_chan_node;
-       struct lttng_ht_iter iter, uiter;
-       struct ust_app_channel *ua_chan = NULL;
-       struct ust_app_session *ua_sess;
-       struct ust_app *app;
-
-       LTTNG_ASSERT(usess->active);
-
-       rcu_read_lock();
-       cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
-               if (!app->compatible) {
-                       /*
-                        * TODO: In time, we should notice the caller of this error by
-                        * telling him that this is a version error.
-                        */
-                       continue;
-               }
-               ua_sess = lookup_session_by_app(usess, app);
-               if (ua_sess == NULL) {
-                       continue;
-               }
-
-               pthread_mutex_lock(&ua_sess->lock);
-
-               if (ua_sess->deleted) {
-                       pthread_mutex_unlock(&ua_sess->lock);
-                       continue;
-               }
-
-               /* Lookup channel in the ust app session */
-               lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
-               ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
-               if (ua_chan_node == NULL) {
-                       goto next_app;
-               }
-               ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel,
-                               node);
-               ret = create_ust_app_channel_context(ua_chan, &uctx->ctx, app);
-               if (ret < 0) {
-                       goto next_app;
-               }
-       next_app:
-               pthread_mutex_unlock(&ua_sess->lock);
-       }
-
-       rcu_read_unlock();
-       return ret;
-}
-
-/*
- * Receive registration and populate the given msg structure.
- *
- * On success return 0 else a negative value returned by the ustctl call.
- */
-int ust_app_recv_registration(int sock, struct ust_register_msg *msg)
-{
-       int ret;
-       uint32_t pid, ppid, uid, gid;
-
-       LTTNG_ASSERT(msg);
-
-       ret = lttng_ust_ctl_recv_reg_msg(sock, &msg->type, &msg->major, &msg->minor,
-                       &pid, &ppid, &uid, &gid,
-                       &msg->bits_per_long,
-                       &msg->uint8_t_alignment,
-                       &msg->uint16_t_alignment,
-                       &msg->uint32_t_alignment,
-                       &msg->uint64_t_alignment,
-                       &msg->long_alignment,
-                       &msg->byte_order,
-                       msg->name);
-       if (ret < 0) {
-               switch (-ret) {
-               case EPIPE:
-               case ECONNRESET:
-               case LTTNG_UST_ERR_EXITING:
-                       DBG3("UST app recv reg message failed. Application died");
-                       break;
-               case LTTNG_UST_ERR_UNSUP_MAJOR:
-                       ERR("UST app recv reg unsupported version %d.%d. Supporting %d.%d",
-                                       msg->major, msg->minor, LTTNG_UST_ABI_MAJOR_VERSION,
-                                       LTTNG_UST_ABI_MINOR_VERSION);
-                       break;
-               default:
-                       ERR("UST app recv reg message failed with ret %d", ret);
-                       break;
-               }
-               goto error;
-       }
-       msg->pid = (pid_t) pid;
-       msg->ppid = (pid_t) ppid;
-       msg->uid = (uid_t) uid;
-       msg->gid = (gid_t) gid;
-
-error:
-       return ret;
-}
-
-/*
- * Return a ust app session object using the application object and the
- * session object descriptor has a key. If not found, NULL is returned.
- * A RCU read side lock MUST be acquired when calling this function.
-*/
-static struct ust_app_session *find_session_by_objd(struct ust_app *app,
-               int objd)
-{
-       struct lttng_ht_node_ulong *node;
-       struct lttng_ht_iter iter;
-       struct ust_app_session *ua_sess = NULL;
-
-       LTTNG_ASSERT(app);
-
-       lttng_ht_lookup(app->ust_sessions_objd, (void *)((unsigned long) objd), &iter);
-       node = lttng_ht_iter_get_node_ulong(&iter);
-       if (node == NULL) {
-               DBG2("UST app session find by objd %d not found", objd);
-               goto error;
-       }
-
-       ua_sess = caa_container_of(node, struct ust_app_session, ust_objd_node);
-
-error:
-       return ua_sess;
-}
-
-/*
- * Return a ust app channel object using the application object and the channel
- * object descriptor has a key. If not found, NULL is returned. A RCU read side
- * lock MUST be acquired before calling this function.
- */
-static struct ust_app_channel *find_channel_by_objd(struct ust_app *app,
-               int objd)
-{
-       struct lttng_ht_node_ulong *node;
-       struct lttng_ht_iter iter;
-       struct ust_app_channel *ua_chan = NULL;
-
-       LTTNG_ASSERT(app);
-
-       lttng_ht_lookup(app->ust_objd, (void *)((unsigned long) objd), &iter);
-       node = lttng_ht_iter_get_node_ulong(&iter);
-       if (node == NULL) {
-               DBG2("UST app channel find by objd %d not found", objd);
-               goto error;
-       }
-
-       ua_chan = caa_container_of(node, struct ust_app_channel, ust_objd_node);
-
-error:
-       return ua_chan;
-}
-
-/*
- * Reply to a register channel notification from an application on the notify
- * socket. The channel metadata is also created.
- *
- * The session UST registry lock is acquired in this function.
- *
- * On success 0 is returned else a negative value.
- */
-static int reply_ust_register_channel(int sock, int cobjd,
-               size_t nr_fields, struct lttng_ust_ctl_field *fields)
-{
-       int ret, ret_code = 0;
-       uint32_t chan_id;
-       uint64_t chan_reg_key;
-       enum lttng_ust_ctl_channel_header type;
-       struct ust_app *app;
-       struct ust_app_channel *ua_chan;
-       struct ust_app_session *ua_sess;
-       struct ust_registry_session *registry;
-       struct ust_registry_channel *ust_reg_chan;
-
-       rcu_read_lock();
-
-       /* Lookup application. If not found, there is a code flow error. */
-       app = find_app_by_notify_sock(sock);
-       if (!app) {
-               DBG("Application socket %d is being torn down. Abort event notify",
-                               sock);
-               ret = -1;
-               goto error_rcu_unlock;
-       }
-
-       /* Lookup channel by UST object descriptor. */
-       ua_chan = find_channel_by_objd(app, cobjd);
-       if (!ua_chan) {
-               DBG("Application channel is being torn down. Abort event notify");
-               ret = 0;
-               goto error_rcu_unlock;
-       }
-
-       LTTNG_ASSERT(ua_chan->session);
-       ua_sess = ua_chan->session;
-
-       /* Get right session registry depending on the session buffer type. */
-       registry = get_session_registry(ua_sess);
-       if (!registry) {
-               DBG("Application session is being torn down. Abort event notify");
-               ret = 0;
-               goto error_rcu_unlock;
-       };
-
-       /* Depending on the buffer type, a different channel key is used. */
-       if (ua_sess->buffer_type == LTTNG_BUFFER_PER_UID) {
-               chan_reg_key = ua_chan->tracing_channel_id;
-       } else {
-               chan_reg_key = ua_chan->key;
-       }
-
-       pthread_mutex_lock(&registry->lock);
-
-       ust_reg_chan = ust_registry_channel_find(registry, chan_reg_key);
-       LTTNG_ASSERT(ust_reg_chan);
-
-       if (!ust_reg_chan->register_done) {
-               /*
-                * TODO: eventually use the registry event count for
-                * this channel to better guess header type for per-pid
-                * buffers.
-                */
-               type = LTTNG_UST_CTL_CHANNEL_HEADER_LARGE;
-               ust_reg_chan->nr_ctx_fields = nr_fields;
-               ust_reg_chan->ctx_fields = fields;
-               fields = NULL;
-               ust_reg_chan->header_type = type;
-       } else {
-               /* Get current already assigned values. */
-               type = ust_reg_chan->header_type;
-       }
-       /* Channel id is set during the object creation. */
-       chan_id = ust_reg_chan->chan_id;
-
-       /* Append to metadata */
-       if (!ust_reg_chan->metadata_dumped) {
-               ret_code = ust_metadata_channel_statedump(registry, ust_reg_chan);
-               if (ret_code) {
-                       ERR("Error appending channel metadata (errno = %d)", ret_code);
-                       goto reply;
-               }
-       }
-
-reply:
-       DBG3("UST app replying to register channel key %" PRIu64
-                       " with id %u, type = %d, ret = %d", chan_reg_key, chan_id, type,
-                       ret_code);
-
-       ret = lttng_ust_ctl_reply_register_channel(sock, chan_id, type, ret_code);
-       if (ret < 0) {
-               if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
-                       DBG3("UST app reply channel failed. Application died: pid = %d, sock = %d",
-                                       app->pid, app->sock);
-               } else if (ret == -EAGAIN) {
-                       WARN("UST app reply channel failed. Communication time out: pid = %d, sock = %d",
-                                       app->pid, app->sock);
-               } else {
-                       ERR("UST app reply channel failed with ret %d: pid = %d, sock = %d",
-                                       ret, app->pid, app->sock);
-               }
-               goto error;
-       }
-
-       /* This channel registry registration is completed. */
-       ust_reg_chan->register_done = 1;
-
-error:
-       pthread_mutex_unlock(&registry->lock);
-error_rcu_unlock:
-       rcu_read_unlock();
-       free(fields);
-       return ret;
-}
-
-/*
- * Add event to the UST channel registry. When the event is added to the
- * registry, the metadata is also created. Once done, this replies to the
- * application with the appropriate error code.
- *
- * The session UST registry lock is acquired in the function.
- *
- * On success 0 is returned else a negative value.
- */
-static int add_event_ust_registry(int sock, int sobjd, int cobjd, char *name,
-               char *sig, size_t nr_fields, struct lttng_ust_ctl_field *fields,
-               int loglevel_value, char *model_emf_uri)
-{
-       int ret, ret_code;
-       uint32_t event_id = 0;
-       uint64_t chan_reg_key;
-       struct ust_app *app;
-       struct ust_app_channel *ua_chan;
-       struct ust_app_session *ua_sess;
-       struct ust_registry_session *registry;
-
-       rcu_read_lock();
-
-       /* Lookup application. If not found, there is a code flow error. */
-       app = find_app_by_notify_sock(sock);
-       if (!app) {
-               DBG("Application socket %d is being torn down. Abort event notify",
-                               sock);
-               ret = -1;
-               goto error_rcu_unlock;
-       }
-
-       /* Lookup channel by UST object descriptor. */
-       ua_chan = find_channel_by_objd(app, cobjd);
-       if (!ua_chan) {
-               DBG("Application channel is being torn down. Abort event notify");
-               ret = 0;
-               goto error_rcu_unlock;
-       }
-
-       LTTNG_ASSERT(ua_chan->session);
-       ua_sess = ua_chan->session;
-
-       registry = get_session_registry(ua_sess);
-       if (!registry) {
-               DBG("Application session is being torn down. Abort event notify");
-               ret = 0;
-               goto error_rcu_unlock;
-       }
-
-       if (ua_sess->buffer_type == LTTNG_BUFFER_PER_UID) {
-               chan_reg_key = ua_chan->tracing_channel_id;
-       } else {
-               chan_reg_key = ua_chan->key;
-       }
-
-       pthread_mutex_lock(&registry->lock);
-
-       /*
-        * From this point on, this call acquires the ownership of the sig, fields
-        * and model_emf_uri meaning any free are done inside it if needed. These
-        * three variables MUST NOT be read/write after this.
-        */
-       ret_code = ust_registry_create_event(registry, chan_reg_key,
-                       sobjd, cobjd, name, sig, nr_fields, fields,
-                       loglevel_value, model_emf_uri, ua_sess->buffer_type,
-                       &event_id, app);
-       sig = NULL;
-       fields = NULL;
-       model_emf_uri = NULL;
-
-       /*
-        * The return value is returned to ustctl so in case of an error, the
-        * application can be notified. In case of an error, it's important not to
-        * return a negative error or else the application will get closed.
-        */
-       ret = lttng_ust_ctl_reply_register_event(sock, event_id, ret_code);
-       if (ret < 0) {
-               if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
-                       DBG3("UST app reply event failed. Application died: pid = %d, sock = %d.",
-                                       app->pid, app->sock);
-               } else if (ret == -EAGAIN) {
-                       WARN("UST app reply event failed. Communication time out: pid = %d, sock = %d",
-                                       app->pid, app->sock);
-               } else {
-                       ERR("UST app reply event failed with ret %d: pid = %d, sock = %d",
-                                       ret, app->pid, app->sock);
-               }
-               /*
-                * No need to wipe the create event since the application socket will
-                * get close on error hence cleaning up everything by itself.
-                */
-               goto error;
-       }
-
-       DBG3("UST registry event %s with id %" PRId32 " added successfully",
-                       name, event_id);
-
-error:
-       pthread_mutex_unlock(&registry->lock);
-error_rcu_unlock:
-       rcu_read_unlock();
-       free(sig);
-       free(fields);
-       free(model_emf_uri);
-       return ret;
-}
-
-/*
- * Add enum to the UST session registry. Once done, this replies to the
- * application with the appropriate error code.
- *
- * The session UST registry lock is acquired within this function.
- *
- * On success 0 is returned else a negative value.
- */
-static int add_enum_ust_registry(int sock, int sobjd, char *name,
-               struct lttng_ust_ctl_enum_entry *entries, size_t nr_entries)
-{
-       int ret = 0, ret_code;
-       struct ust_app *app;
-       struct ust_app_session *ua_sess;
-       struct ust_registry_session *registry;
-       uint64_t enum_id = -1ULL;
-
-       rcu_read_lock();
-
-       /* Lookup application. If not found, there is a code flow error. */
-       app = find_app_by_notify_sock(sock);
-       if (!app) {
-               /* Return an error since this is not an error */
-               DBG("Application socket %d is being torn down. Aborting enum registration",
-                               sock);
-               free(entries);
-               ret = -1;
-               goto error_rcu_unlock;
-       }
-
-       /* Lookup session by UST object descriptor. */
-       ua_sess = find_session_by_objd(app, sobjd);
-       if (!ua_sess) {
-               /* Return an error since this is not an error */
-               DBG("Application session is being torn down (session not found). Aborting enum registration.");
-               free(entries);
-               goto error_rcu_unlock;
-       }
-
-       registry = get_session_registry(ua_sess);
-       if (!registry) {
-               DBG("Application session is being torn down (registry not found). Aborting enum registration.");
-               free(entries);
-               goto error_rcu_unlock;
-       }
-
-       pthread_mutex_lock(&registry->lock);
-
-       /*
-        * From this point on, the callee acquires the ownership of
-        * entries. The variable entries MUST NOT be read/written after
-        * call.
-        */
-       ret_code = ust_registry_create_or_find_enum(registry, sobjd, name,
-                       entries, nr_entries, &enum_id);
-       entries = NULL;
-
-       /*
-        * The return value is returned to ustctl so in case of an error, the
-        * application can be notified. In case of an error, it's important not to
-        * return a negative error or else the application will get closed.
-        */
-       ret = lttng_ust_ctl_reply_register_enum(sock, enum_id, ret_code);
-       if (ret < 0) {
-               if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
-                       DBG3("UST app reply enum failed. Application died: pid = %d, sock = %d",
-                                       app->pid, app->sock);
-               } else if (ret == -EAGAIN) {
-                       WARN("UST app reply enum failed. Communication time out: pid = %d, sock = %d",
-                                       app->pid, app->sock);
-               } else {
-                       ERR("UST app reply enum failed with ret %d: pid = %d, sock = %d",
-                                       ret, app->pid, app->sock);
-               }
-               /*
-                * No need to wipe the create enum since the application socket will
-                * get close on error hence cleaning up everything by itself.
-                */
-               goto error;
-       }
-
-       DBG3("UST registry enum %s added successfully or already found", name);
-
-error:
-       pthread_mutex_unlock(&registry->lock);
-error_rcu_unlock:
-       rcu_read_unlock();
-       return ret;
-}
-
-/*
- * Handle application notification through the given notify socket.
- *
- * Return 0 on success or else a negative value.
- */
-int ust_app_recv_notify(int sock)
-{
-       int ret;
-       enum lttng_ust_ctl_notify_cmd cmd;
-
-       DBG3("UST app receiving notify from sock %d", sock);
-
-       ret = lttng_ust_ctl_recv_notify(sock, &cmd);
-       if (ret < 0) {
-               if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
-                       DBG3("UST app recv notify failed. Application died: sock = %d",
-                                       sock);
-               } else if (ret == -EAGAIN) {
-                       WARN("UST app recv notify failed. Communication time out: sock = %d",
-                                       sock);
-               } else {
-                       ERR("UST app recv notify failed with ret %d: sock = %d",
-                                       ret, sock);
-               }
-               goto error;
-       }
-
-       switch (cmd) {
-       case LTTNG_UST_CTL_NOTIFY_CMD_EVENT:
-       {
-               int sobjd, cobjd, loglevel_value;
-               char name[LTTNG_UST_ABI_SYM_NAME_LEN], *sig, *model_emf_uri;
-               size_t nr_fields;
-               struct lttng_ust_ctl_field *fields;
-
-               DBG2("UST app ustctl register event received");
-
-               ret = lttng_ust_ctl_recv_register_event(sock, &sobjd, &cobjd, name,
-                               &loglevel_value, &sig, &nr_fields, &fields,
-                               &model_emf_uri);
-               if (ret < 0) {
-                       if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
-                               DBG3("UST app recv event failed. Application died: sock = %d",
-                                               sock);
-                       } else if (ret == -EAGAIN) {
-                               WARN("UST app recv event failed. Communication time out: sock = %d",
-                                               sock);
-                       } else {
-                               ERR("UST app recv event failed with ret %d: sock = %d",
-                                               ret, sock);
-                       }
-                       goto error;
-               }
-
-               /*
-                * Add event to the UST registry coming from the notify socket. This
-                * call will free if needed the sig, fields and model_emf_uri. This
-                * code path loses the ownsership of these variables and transfer them
-                * to the this function.
-                */
-               ret = add_event_ust_registry(sock, sobjd, cobjd, name, sig, nr_fields,
-                               fields, loglevel_value, model_emf_uri);
-               if (ret < 0) {
-                       goto error;
-               }
-
-               break;
-       }
-       case LTTNG_UST_CTL_NOTIFY_CMD_CHANNEL:
-       {
-               int sobjd, cobjd;
-               size_t nr_fields;
-               struct lttng_ust_ctl_field *fields;
-
-               DBG2("UST app ustctl register channel received");
-
-               ret = lttng_ust_ctl_recv_register_channel(sock, &sobjd, &cobjd, &nr_fields,
-                               &fields);
-               if (ret < 0) {
-                       if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
-                               DBG3("UST app recv channel failed. Application died: sock = %d",
-                                               sock);
-                       } else if (ret == -EAGAIN) {
-                               WARN("UST app recv channel failed. Communication time out: sock = %d",
-                                               sock);
-                       } else {
-                               ERR("UST app recv channel failed with ret %d: sock = %d)",
-                                               ret, sock);
-                       }
-                       goto error;
-               }
-
-               /*
-                * The fields ownership are transfered to this function call meaning
-                * that if needed it will be freed. After this, it's invalid to access
-                * fields or clean it up.
-                */
-               ret = reply_ust_register_channel(sock, cobjd, nr_fields,
-                               fields);
-               if (ret < 0) {
-                       goto error;
-               }
-
-               break;
-       }
-       case LTTNG_UST_CTL_NOTIFY_CMD_ENUM:
-       {
-               int sobjd;
-               char name[LTTNG_UST_ABI_SYM_NAME_LEN];
-               size_t nr_entries;
-               struct lttng_ust_ctl_enum_entry *entries;
-
-               DBG2("UST app ustctl register enum received");
-
-               ret = lttng_ust_ctl_recv_register_enum(sock, &sobjd, name,
-                               &entries, &nr_entries);
-               if (ret < 0) {
-                       if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
-                               DBG3("UST app recv enum failed. Application died: sock = %d",
-                                               sock);
-                       } else if (ret == -EAGAIN) {
-                               WARN("UST app recv enum failed. Communication time out: sock = %d",
-                                               sock);
-                       } else {
-                               ERR("UST app recv enum failed with ret %d: sock = %d",
-                                               ret, sock);
-                       }
-                       goto error;
-               }
-
-               /* Callee assumes ownership of entries */
-               ret = add_enum_ust_registry(sock, sobjd, name,
-                               entries, nr_entries);
-               if (ret < 0) {
-                       goto error;
-               }
-
-               break;
-       }
-       default:
-               /* Should NEVER happen. */
-               abort();
-       }
-
-error:
-       return ret;
-}
-
-/*
- * Once the notify socket hangs up, this is called. First, it tries to find the
- * corresponding application. On failure, the call_rcu to close the socket is
- * executed. If an application is found, it tries to delete it from the notify
- * socket hash table. Whathever the result, it proceeds to the call_rcu.
- *
- * Note that an object needs to be allocated here so on ENOMEM failure, the
- * call RCU is not done but the rest of the cleanup is.
- */
-void ust_app_notify_sock_unregister(int sock)
-{
-       int err_enomem = 0;
-       struct lttng_ht_iter iter;
-       struct ust_app *app;
-       struct ust_app_notify_sock_obj *obj;
-
-       LTTNG_ASSERT(sock >= 0);
-
-       rcu_read_lock();
-
-       obj = zmalloc(sizeof(*obj));
-       if (!obj) {
-               /*
-                * An ENOMEM is kind of uncool. If this strikes we continue the
-                * procedure but the call_rcu will not be called. In this case, we
-                * accept the fd leak rather than possibly creating an unsynchronized
-                * state between threads.
-                *
-                * TODO: The notify object should be created once the notify socket is
-                * registered and stored independantely from the ust app object. The
-                * tricky part is to synchronize the teardown of the application and
-                * this notify object. Let's keep that in mind so we can avoid this
-                * kind of shenanigans with ENOMEM in the teardown path.
-                */
-               err_enomem = 1;
-       } else {
-               obj->fd = sock;
-       }
-
-       DBG("UST app notify socket unregister %d", sock);
-
-       /*
-        * Lookup application by notify socket. If this fails, this means that the
-        * hash table delete has already been done by the application
-        * unregistration process so we can safely close the notify socket in a
-        * call RCU.
-        */
-       app = find_app_by_notify_sock(sock);
-       if (!app) {
-               goto close_socket;
-       }
-
-       iter.iter.node = &app->notify_sock_n.node;
-
-       /*
-        * Whatever happens here either we fail or succeed, in both cases we have
-        * to close the socket after a grace period to continue to the call RCU
-        * here. If the deletion is successful, the application is not visible
-        * anymore by other threads and is it fails it means that it was already
-        * deleted from the hash table so either way we just have to close the
-        * socket.
-        */
-       (void) lttng_ht_del(ust_app_ht_by_notify_sock, &iter);
-
-close_socket:
-       rcu_read_unlock();
-
-       /*
-        * Close socket after a grace period to avoid for the socket to be reused
-        * before the application object is freed creating potential race between
-        * threads trying to add unique in the global hash table.
-        */
-       if (!err_enomem) {
-               call_rcu(&obj->head, close_notify_sock_rcu);
-       }
-}
-
-/*
- * Destroy a ust app data structure and free its memory.
- */
-void ust_app_destroy(struct ust_app *app)
-{
-       if (!app) {
-               return;
-       }
-
-       call_rcu(&app->pid_n.head, delete_ust_app_rcu);
-}
-
-/*
- * Take a snapshot for a given UST session. The snapshot is sent to the given
- * output.
- *
- * Returns LTTNG_OK on success or a LTTNG_ERR error code.
- */
-enum lttng_error_code ust_app_snapshot_record(
-               const struct ltt_ust_session *usess,
-               const struct consumer_output *output, int wait,
-               uint64_t nb_packets_per_stream)
-{
-       int ret = 0;
-       enum lttng_error_code status = LTTNG_OK;
-       struct lttng_ht_iter iter;
-       struct ust_app *app;
-       char *trace_path = NULL;
-
-       LTTNG_ASSERT(usess);
-       LTTNG_ASSERT(output);
-
-       rcu_read_lock();
-
-       switch (usess->buffer_type) {
-       case LTTNG_BUFFER_PER_UID:
-       {
-               struct buffer_reg_uid *reg;
-
-               cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
-                       struct buffer_reg_channel *buf_reg_chan;
-                       struct consumer_socket *socket;
-                       char pathname[PATH_MAX];
-                       size_t consumer_path_offset = 0;
-
-                       if (!reg->registry->reg.ust->metadata_key) {
-                               /* Skip since no metadata is present */
-                               continue;
-                       }
-
-                       /* Get consumer socket to use to push the metadata.*/
-                       socket = consumer_find_socket_by_bitness(reg->bits_per_long,
-                                       usess->consumer);
-                       if (!socket) {
-                               status = LTTNG_ERR_INVALID;
-                               goto error;
-                       }
-
-                       memset(pathname, 0, sizeof(pathname));
-                       ret = snprintf(pathname, sizeof(pathname),
-                                       DEFAULT_UST_TRACE_UID_PATH,
-                                       reg->uid, reg->bits_per_long);
-                       if (ret < 0) {
-                               PERROR("snprintf snapshot path");
-                               status = LTTNG_ERR_INVALID;
-                               goto error;
-                       }
-                       /* Free path allowed on previous iteration. */
-                       free(trace_path);
-                       trace_path = setup_channel_trace_path(usess->consumer, pathname,
-                                               &consumer_path_offset);
-                       if (!trace_path) {
-                               status = LTTNG_ERR_INVALID;
-                               goto error;
-                       }
-                       /* Add the UST default trace dir to path. */
-                       cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
-                                       buf_reg_chan, node.node) {
-                               status = consumer_snapshot_channel(socket,
-                                               buf_reg_chan->consumer_key,
-                                               output, 0, usess->uid,
-                                               usess->gid, &trace_path[consumer_path_offset], wait,
-                                               nb_packets_per_stream);
-                               if (status != LTTNG_OK) {
-                                       goto error;
-                               }
-                       }
-                       status = consumer_snapshot_channel(socket,
-                                       reg->registry->reg.ust->metadata_key, output, 1,
-                                       usess->uid, usess->gid, &trace_path[consumer_path_offset],
-                                       wait, 0);
-                       if (status != LTTNG_OK) {
-                               goto error;
-                       }
-               }
-               break;
-       }
-       case LTTNG_BUFFER_PER_PID:
-       {
-               cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
-                       struct consumer_socket *socket;
-                       struct lttng_ht_iter chan_iter;
-                       struct ust_app_channel *ua_chan;
-                       struct ust_app_session *ua_sess;
-                       struct ust_registry_session *registry;
-                       char pathname[PATH_MAX];
-                       size_t consumer_path_offset = 0;
-
-                       ua_sess = lookup_session_by_app(usess, app);
-                       if (!ua_sess) {
-                               /* Session not associated with this app. */
-                               continue;
-                       }
-
-                       /* Get the right consumer socket for the application. */
-                       socket = consumer_find_socket_by_bitness(app->bits_per_long,
-                                       output);
-                       if (!socket) {
-                               status = LTTNG_ERR_INVALID;
-                               goto error;
-                       }
-
-                       /* Add the UST default trace dir to path. */
-                       memset(pathname, 0, sizeof(pathname));
-                       ret = snprintf(pathname, sizeof(pathname), "%s",
-                                       ua_sess->path);
-                       if (ret < 0) {
-                               status = LTTNG_ERR_INVALID;
-                               PERROR("snprintf snapshot path");
-                               goto error;
-                       }
-                       /* Free path allowed on previous iteration. */
-                       free(trace_path);
-                       trace_path = setup_channel_trace_path(usess->consumer, pathname,
-                                       &consumer_path_offset);
-                       if (!trace_path) {
-                               status = LTTNG_ERR_INVALID;
-                               goto error;
-                       }
-                       cds_lfht_for_each_entry(ua_sess->channels->ht, &chan_iter.iter,
-                                       ua_chan, node.node) {
-                               status = consumer_snapshot_channel(socket,
-                                               ua_chan->key, output, 0,
-                                               lttng_credentials_get_uid(&ua_sess->effective_credentials),
-                                               lttng_credentials_get_gid(&ua_sess->effective_credentials),
-                                               &trace_path[consumer_path_offset], wait,
-                                               nb_packets_per_stream);
-                               switch (status) {
-                               case LTTNG_OK:
-                                       break;
-                               case LTTNG_ERR_CHAN_NOT_FOUND:
-                                       continue;
-                               default:
-                                       goto error;
-                               }
-                       }
-
-                       registry = get_session_registry(ua_sess);
-                       if (!registry) {
-                               DBG("Application session is being torn down. Skip application.");
-                               continue;
-                       }
-                       status = consumer_snapshot_channel(socket,
-                                       registry->metadata_key, output, 1,
-                                       lttng_credentials_get_uid(&ua_sess->effective_credentials),
-                                       lttng_credentials_get_gid(&ua_sess->effective_credentials),
-                                       &trace_path[consumer_path_offset], wait, 0);
-                       switch (status) {
-                       case LTTNG_OK:
-                               break;
-                       case LTTNG_ERR_CHAN_NOT_FOUND:
-                               continue;
-                       default:
-                               goto error;
-                       }
-               }
-               break;
-       }
-       default:
-               abort();
-               break;
-       }
-
-error:
-       free(trace_path);
-       rcu_read_unlock();
-       return status;
-}
-
-/*
- * Return the size taken by one more packet per stream.
- */
-uint64_t ust_app_get_size_one_more_packet_per_stream(
-               const struct ltt_ust_session *usess, uint64_t cur_nr_packets)
-{
-       uint64_t tot_size = 0;
-       struct ust_app *app;
-       struct lttng_ht_iter iter;
-
-       LTTNG_ASSERT(usess);
-
-       switch (usess->buffer_type) {
-       case LTTNG_BUFFER_PER_UID:
-       {
-               struct buffer_reg_uid *reg;
-
-               cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
-                       struct buffer_reg_channel *buf_reg_chan;
-
-                       rcu_read_lock();
-                       cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
-                                       buf_reg_chan, node.node) {
-                               if (cur_nr_packets >= buf_reg_chan->num_subbuf) {
-                                       /*
-                                        * Don't take channel into account if we
-                                        * already grab all its packets.
-                                        */
-                                       continue;
-                               }
-                               tot_size += buf_reg_chan->subbuf_size * buf_reg_chan->stream_count;
-                       }
-                       rcu_read_unlock();
-               }
-               break;
-       }
-       case LTTNG_BUFFER_PER_PID:
-       {
-               rcu_read_lock();
-               cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
-                       struct ust_app_channel *ua_chan;
-                       struct ust_app_session *ua_sess;
-                       struct lttng_ht_iter chan_iter;
-
-                       ua_sess = lookup_session_by_app(usess, app);
-                       if (!ua_sess) {
-                               /* Session not associated with this app. */
-                               continue;
-                       }
-
-                       cds_lfht_for_each_entry(ua_sess->channels->ht, &chan_iter.iter,
-                                       ua_chan, node.node) {
-                               if (cur_nr_packets >= ua_chan->attr.num_subbuf) {
-                                       /*
-                                        * Don't take channel into account if we
-                                        * already grab all its packets.
-                                        */
-                                       continue;
-                               }
-                               tot_size += ua_chan->attr.subbuf_size * ua_chan->streams.count;
-                       }
-               }
-               rcu_read_unlock();
-               break;
-       }
-       default:
-               abort();
-               break;
-       }
-
-       return tot_size;
-}
-
-int ust_app_uid_get_channel_runtime_stats(uint64_t ust_session_id,
-               struct cds_list_head *buffer_reg_uid_list,
-               struct consumer_output *consumer, uint64_t uchan_id,
-               int overwrite, uint64_t *discarded, uint64_t *lost)
-{
-       int ret;
-       uint64_t consumer_chan_key;
-
-       *discarded = 0;
-       *lost = 0;
-
-       ret = buffer_reg_uid_consumer_channel_key(
-                       buffer_reg_uid_list, uchan_id, &consumer_chan_key);
-       if (ret < 0) {
-               /* Not found */
-               ret = 0;
-               goto end;
-       }
-
-       if (overwrite) {
-               ret = consumer_get_lost_packets(ust_session_id,
-                               consumer_chan_key, consumer, lost);
-       } else {
-               ret = consumer_get_discarded_events(ust_session_id,
-                               consumer_chan_key, consumer, discarded);
-       }
-
-end:
-       return ret;
-}
-
-int ust_app_pid_get_channel_runtime_stats(struct ltt_ust_session *usess,
-               struct ltt_ust_channel *uchan,
-               struct consumer_output *consumer, int overwrite,
-               uint64_t *discarded, uint64_t *lost)
-{
-       int ret = 0;
-       struct lttng_ht_iter iter;
-       struct lttng_ht_node_str *ua_chan_node;
-       struct ust_app *app;
-       struct ust_app_session *ua_sess;
-       struct ust_app_channel *ua_chan;
-
-       *discarded = 0;
-       *lost = 0;
-
-       rcu_read_lock();
-       /*
-        * Iterate over every registered applications. Sum counters for
-        * all applications containing requested session and channel.
-        */
-       cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
-               struct lttng_ht_iter uiter;
-
-               ua_sess = lookup_session_by_app(usess, app);
-               if (ua_sess == NULL) {
-                       continue;
-               }
-
-               /* Get channel */
-               lttng_ht_lookup(ua_sess->channels, (void *) uchan->name, &uiter);
-               ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
-               /* If the session is found for the app, the channel must be there */
-               LTTNG_ASSERT(ua_chan_node);
-
-               ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
-
-               if (overwrite) {
-                       uint64_t _lost;
-
-                       ret = consumer_get_lost_packets(usess->id, ua_chan->key,
-                                       consumer, &_lost);
-                       if (ret < 0) {
-                               break;
-                       }
-                       (*lost) += _lost;
-               } else {
-                       uint64_t _discarded;
-
-                       ret = consumer_get_discarded_events(usess->id,
-                                       ua_chan->key, consumer, &_discarded);
-                       if (ret < 0) {
-                               break;
-                       }
-                       (*discarded) += _discarded;
-               }
-       }
-
-       rcu_read_unlock();
-       return ret;
-}
-
-static
-int ust_app_regenerate_statedump(struct ltt_ust_session *usess,
-               struct ust_app *app)
-{
-       int ret = 0;
-       struct ust_app_session *ua_sess;
-
-       DBG("Regenerating the metadata for ust app pid %d", app->pid);
-
-       rcu_read_lock();
-
-       ua_sess = lookup_session_by_app(usess, app);
-       if (ua_sess == NULL) {
-               /* The session is in teardown process. Ignore and continue. */
-               goto end;
-       }
-
-       pthread_mutex_lock(&ua_sess->lock);
-
-       if (ua_sess->deleted) {
-               goto end_unlock;
-       }
-
-       pthread_mutex_lock(&app->sock_lock);
-       ret = lttng_ust_ctl_regenerate_statedump(app->sock, ua_sess->handle);
-       pthread_mutex_unlock(&app->sock_lock);
-
-end_unlock:
-       pthread_mutex_unlock(&ua_sess->lock);
-
-end:
-       rcu_read_unlock();
-       health_code_update();
-       return ret;
-}
-
-/*
- * Regenerate the statedump for each app in the session.
- */
-int ust_app_regenerate_statedump_all(struct ltt_ust_session *usess)
-{
-       int ret = 0;
-       struct lttng_ht_iter iter;
-       struct ust_app *app;
-
-       DBG("Regenerating the metadata for all UST apps");
-
-       rcu_read_lock();
-
-       cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
-               if (!app->compatible) {
-                       continue;
-               }
-
-               ret = ust_app_regenerate_statedump(usess, app);
-               if (ret < 0) {
-                       /* Continue to the next app even on error */
-                       continue;
-               }
-       }
-
-       rcu_read_unlock();
-
-       return 0;
-}
-
-/*
- * Rotate all the channels of a session.
- *
- * Return LTTNG_OK on success or else an LTTng error code.
- */
-enum lttng_error_code ust_app_rotate_session(struct ltt_session *session)
-{
-       int ret;
-       enum lttng_error_code cmd_ret = LTTNG_OK;
-       struct lttng_ht_iter iter;
-       struct ust_app *app;
-       struct ltt_ust_session *usess = session->ust_session;
-
-       LTTNG_ASSERT(usess);
-
-       rcu_read_lock();
-
-       switch (usess->buffer_type) {
-       case LTTNG_BUFFER_PER_UID:
-       {
-               struct buffer_reg_uid *reg;
-
-               cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
-                       struct buffer_reg_channel *buf_reg_chan;
-                       struct consumer_socket *socket;
-
-                       /* Get consumer socket to use to push the metadata.*/
-                       socket = consumer_find_socket_by_bitness(reg->bits_per_long,
-                                       usess->consumer);
-                       if (!socket) {
-                               cmd_ret = LTTNG_ERR_INVALID;
-                               goto error;
-                       }
-
-                       /* Rotate the data channels. */
-                       cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
-                                       buf_reg_chan, node.node) {
-                               ret = consumer_rotate_channel(socket,
-                                               buf_reg_chan->consumer_key,
-                                               usess->uid, usess->gid,
-                                               usess->consumer,
-                                               /* is_metadata_channel */ false);
-                               if (ret < 0) {
-                                       cmd_ret = LTTNG_ERR_ROTATION_FAIL_CONSUMER;
-                                       goto error;
-                               }
-                       }
-
-                       /*
-                        * The metadata channel might not be present.
-                        *
-                        * Consumer stream allocation can be done
-                        * asynchronously and can fail on intermediary
-                        * operations (i.e add context) and lead to data
-                        * channels created with no metadata channel.
-                        */
-                       if (!reg->registry->reg.ust->metadata_key) {
-                               /* Skip since no metadata is present. */
-                               continue;
-                       }
-
-                       (void) push_metadata(reg->registry->reg.ust, usess->consumer);
-
-                       ret = consumer_rotate_channel(socket,
-                                       reg->registry->reg.ust->metadata_key,
-                                       usess->uid, usess->gid,
-                                       usess->consumer,
-                                       /* is_metadata_channel */ true);
-                       if (ret < 0) {
-                               cmd_ret = LTTNG_ERR_ROTATION_FAIL_CONSUMER;
-                               goto error;
-                       }
-               }
-               break;
-       }
-       case LTTNG_BUFFER_PER_PID:
-       {
-               cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
-                       struct consumer_socket *socket;
-                       struct lttng_ht_iter chan_iter;
-                       struct ust_app_channel *ua_chan;
-                       struct ust_app_session *ua_sess;
-                       struct ust_registry_session *registry;
-
-                       ua_sess = lookup_session_by_app(usess, app);
-                       if (!ua_sess) {
-                               /* Session not associated with this app. */
-                               continue;
-                       }
-
-                       /* Get the right consumer socket for the application. */
-                       socket = consumer_find_socket_by_bitness(app->bits_per_long,
-                                       usess->consumer);
-                       if (!socket) {
-                               cmd_ret = LTTNG_ERR_INVALID;
-                               goto error;
-                       }
-
-                       registry = get_session_registry(ua_sess);
-                       if (!registry) {
-                               DBG("Application session is being torn down. Skip application.");
-                               continue;
-                       }
-
-                       /* Rotate the data channels. */
-                       cds_lfht_for_each_entry(ua_sess->channels->ht, &chan_iter.iter,
-                                       ua_chan, node.node) {
-                               ret = consumer_rotate_channel(socket,
-                                               ua_chan->key,
-                                               lttng_credentials_get_uid(&ua_sess->effective_credentials),
-                                               lttng_credentials_get_gid(&ua_sess->effective_credentials),
-                                               ua_sess->consumer,
-                                               /* is_metadata_channel */ false);
-                               if (ret < 0) {
-                                       /* Per-PID buffer and application going away. */
-                                       if (ret == -LTTNG_ERR_CHAN_NOT_FOUND)
-                                               continue;
-                                       cmd_ret = LTTNG_ERR_ROTATION_FAIL_CONSUMER;
-                                       goto error;
-                               }
-                       }
-
-                       /* Rotate the metadata channel. */
-                       (void) push_metadata(registry, usess->consumer);
-                       ret = consumer_rotate_channel(socket,
-                                       registry->metadata_key,
-                                       lttng_credentials_get_uid(&ua_sess->effective_credentials),
-                                       lttng_credentials_get_gid(&ua_sess->effective_credentials),
-                                       ua_sess->consumer,
-                                       /* is_metadata_channel */ true);
-                       if (ret < 0) {
-                               /* Per-PID buffer and application going away. */
-                               if (ret == -LTTNG_ERR_CHAN_NOT_FOUND)
-                                       continue;
-                               cmd_ret = LTTNG_ERR_ROTATION_FAIL_CONSUMER;
-                               goto error;
-                       }
-               }
-               break;
-       }
-       default:
-               abort();
-               break;
-       }
-
-       cmd_ret = LTTNG_OK;
-
-error:
-       rcu_read_unlock();
-       return cmd_ret;
-}
-
-enum lttng_error_code ust_app_create_channel_subdirectories(
-               const struct ltt_ust_session *usess)
-{
-       enum lttng_error_code ret = LTTNG_OK;
-       struct lttng_ht_iter iter;
-       enum lttng_trace_chunk_status chunk_status;
-       char *pathname_index;
-       int fmt_ret;
-
-       LTTNG_ASSERT(usess->current_trace_chunk);
-       rcu_read_lock();
-
-       switch (usess->buffer_type) {
-       case LTTNG_BUFFER_PER_UID:
-       {
-               struct buffer_reg_uid *reg;
-
-               cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
-                       fmt_ret = asprintf(&pathname_index,
-                                      DEFAULT_UST_TRACE_DIR "/" DEFAULT_UST_TRACE_UID_PATH "/" DEFAULT_INDEX_DIR,
-                                      reg->uid, reg->bits_per_long);
-                       if (fmt_ret < 0) {
-                               ERR("Failed to format channel index directory");
-                               ret = LTTNG_ERR_CREATE_DIR_FAIL;
-                               goto error;
-                       }
-
-                       /*
-                        * Create the index subdirectory which will take care
-                        * of implicitly creating the channel's path.
-                        */
-                       chunk_status = lttng_trace_chunk_create_subdirectory(
-                                       usess->current_trace_chunk,
-                                       pathname_index);
-                       free(pathname_index);
-                       if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) {
-                               ret = LTTNG_ERR_CREATE_DIR_FAIL;
-                               goto error;
-                       }
-               }
-               break;
-       }
-       case LTTNG_BUFFER_PER_PID:
-       {
-               struct ust_app *app;
-
-               /*
-                * Create the toplevel ust/ directory in case no apps are running.
-                */
-               chunk_status = lttng_trace_chunk_create_subdirectory(
-                               usess->current_trace_chunk,
-                               DEFAULT_UST_TRACE_DIR);
-               if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) {
-                       ret = LTTNG_ERR_CREATE_DIR_FAIL;
-                       goto error;
-               }
-
-               cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app,
-                               pid_n.node) {
-                       struct ust_app_session *ua_sess;
-                       struct ust_registry_session *registry;
-
-                       ua_sess = lookup_session_by_app(usess, app);
-                       if (!ua_sess) {
-                               /* Session not associated with this app. */
-                               continue;
-                       }
-
-                       registry = get_session_registry(ua_sess);
-                       if (!registry) {
-                               DBG("Application session is being torn down. Skip application.");
-                               continue;
-                       }
-
-                       fmt_ret = asprintf(&pathname_index,
-                                       DEFAULT_UST_TRACE_DIR "/%s/" DEFAULT_INDEX_DIR,
-                                       ua_sess->path);
-                       if (fmt_ret < 0) {
-                               ERR("Failed to format channel index directory");
-                               ret = LTTNG_ERR_CREATE_DIR_FAIL;
-                               goto error;
-                       }
-                       /*
-                        * Create the index subdirectory which will take care
-                        * of implicitly creating the channel's path.
-                        */
-                       chunk_status = lttng_trace_chunk_create_subdirectory(
-                                       usess->current_trace_chunk,
-                                       pathname_index);
-                       free(pathname_index);
-                       if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) {
-                               ret = LTTNG_ERR_CREATE_DIR_FAIL;
-                               goto error;
-                       }
-               }
-               break;
-       }
-       default:
-               abort();
-       }
-
-       ret = LTTNG_OK;
-error:
-       rcu_read_unlock();
-       return ret;
-}
-
-/*
- * Clear all the channels of a session.
- *
- * Return LTTNG_OK on success or else an LTTng error code.
- */
-enum lttng_error_code ust_app_clear_session(struct ltt_session *session)
-{
-       int ret;
-       enum lttng_error_code cmd_ret = LTTNG_OK;
-       struct lttng_ht_iter iter;
-       struct ust_app *app;
-       struct ltt_ust_session *usess = session->ust_session;
-
-       LTTNG_ASSERT(usess);
-
-       rcu_read_lock();
-
-       if (usess->active) {
-               ERR("Expecting inactive session %s (%" PRIu64 ")", session->name, session->id);
-               cmd_ret = LTTNG_ERR_FATAL;
-               goto end;
-       }
-
-       switch (usess->buffer_type) {
-       case LTTNG_BUFFER_PER_UID:
-       {
-               struct buffer_reg_uid *reg;
-
-               cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
-                       struct buffer_reg_channel *buf_reg_chan;
-                       struct consumer_socket *socket;
-
-                       /* Get consumer socket to use to push the metadata.*/
-                       socket = consumer_find_socket_by_bitness(reg->bits_per_long,
-                                       usess->consumer);
-                       if (!socket) {
-                               cmd_ret = LTTNG_ERR_INVALID;
-                               goto error_socket;
-                       }
-
-                       /* Clear the data channels. */
-                       cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
-                                       buf_reg_chan, node.node) {
-                               ret = consumer_clear_channel(socket,
-                                               buf_reg_chan->consumer_key);
-                               if (ret < 0) {
-                                       goto error;
-                               }
-                       }
-
-                       (void) push_metadata(reg->registry->reg.ust, usess->consumer);
-
-                       /*
-                        * Clear the metadata channel.
-                        * Metadata channel is not cleared per se but we still need to
-                        * perform a rotation operation on it behind the scene.
-                        */
-                       ret = consumer_clear_channel(socket,
-                                       reg->registry->reg.ust->metadata_key);
-                       if (ret < 0) {
-                               goto error;
-                       }
-               }
-               break;
-       }
-       case LTTNG_BUFFER_PER_PID:
-       {
-               cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
-                       struct consumer_socket *socket;
-                       struct lttng_ht_iter chan_iter;
-                       struct ust_app_channel *ua_chan;
-                       struct ust_app_session *ua_sess;
-                       struct ust_registry_session *registry;
-
-                       ua_sess = lookup_session_by_app(usess, app);
-                       if (!ua_sess) {
-                               /* Session not associated with this app. */
-                               continue;
-                       }
-
-                       /* Get the right consumer socket for the application. */
-                       socket = consumer_find_socket_by_bitness(app->bits_per_long,
-                                       usess->consumer);
-                       if (!socket) {
-                               cmd_ret = LTTNG_ERR_INVALID;
-                               goto error_socket;
-                       }
-
-                       registry = get_session_registry(ua_sess);
-                       if (!registry) {
-                               DBG("Application session is being torn down. Skip application.");
-                               continue;
-                       }
-
-                       /* Clear the data channels. */
-                       cds_lfht_for_each_entry(ua_sess->channels->ht, &chan_iter.iter,
-                                       ua_chan, node.node) {
-                               ret = consumer_clear_channel(socket, ua_chan->key);
-                               if (ret < 0) {
-                                       /* Per-PID buffer and application going away. */
-                                       if (ret == -LTTNG_ERR_CHAN_NOT_FOUND) {
-                                               continue;
-                                       }
-                                       goto error;
-                               }
-                       }
-
-                       (void) push_metadata(registry, usess->consumer);
-
-                       /*
-                        * Clear the metadata channel.
-                        * Metadata channel is not cleared per se but we still need to
-                        * perform rotation operation on it behind the scene.
-                        */
-                       ret = consumer_clear_channel(socket, registry->metadata_key);
-                       if (ret < 0) {
-                               /* Per-PID buffer and application going away. */
-                               if (ret == -LTTNG_ERR_CHAN_NOT_FOUND) {
-                                       continue;
-                               }
-                               goto error;
-                       }
-               }
-               break;
-       }
-       default:
-               abort();
-               break;
-       }
-
-       cmd_ret = LTTNG_OK;
-       goto end;
-
-error:
-       switch (-ret) {
-       case LTTCOMM_CONSUMERD_RELAYD_CLEAR_DISALLOWED:
-               cmd_ret = LTTNG_ERR_CLEAR_RELAY_DISALLOWED;
-               break;
-       default:
-               cmd_ret = LTTNG_ERR_CLEAR_FAIL_CONSUMER;
-       }
-
-error_socket:
-end:
-       rcu_read_unlock();
-       return cmd_ret;
-}
-
-/*
- * This function skips the metadata channel as the begin/end timestamps of a
- * metadata packet are useless.
- *
- * Moreover, opening a packet after a "clear" will cause problems for live
- * sessions as it will introduce padding that was not part of the first trace
- * chunk. The relay daemon expects the content of the metadata stream of
- * successive metadata trace chunks to be strict supersets of one another.
- *
- * For example, flushing a packet at the beginning of the metadata stream of
- * a trace chunk resulting from a "clear" session command will cause the
- * size of the metadata stream of the new trace chunk to not match the size of
- * the metadata stream of the original chunk. This will confuse the relay
- * daemon as the same "offset" in a metadata stream will no longer point
- * to the same content.
- */
-enum lttng_error_code ust_app_open_packets(struct ltt_session *session)
-{
-       enum lttng_error_code ret = LTTNG_OK;
-       struct lttng_ht_iter iter;
-       struct ltt_ust_session *usess = session->ust_session;
-
-       LTTNG_ASSERT(usess);
-
-       rcu_read_lock();
-
-       switch (usess->buffer_type) {
-       case LTTNG_BUFFER_PER_UID:
-       {
-               struct buffer_reg_uid *reg;
-
-               cds_list_for_each_entry (
-                               reg, &usess->buffer_reg_uid_list, lnode) {
-                       struct buffer_reg_channel *buf_reg_chan;
-                       struct consumer_socket *socket;
-
-                       socket = consumer_find_socket_by_bitness(
-                                       reg->bits_per_long, usess->consumer);
-                       if (!socket) {
-                               ret = LTTNG_ERR_FATAL;
-                               goto error;
-                       }
-
-                       cds_lfht_for_each_entry(reg->registry->channels->ht,
-                                       &iter.iter, buf_reg_chan, node.node) {
-                               const int open_ret =
-                                               consumer_open_channel_packets(
-                                                       socket,
-                                                       buf_reg_chan->consumer_key);
-
-                               if (open_ret < 0) {
-                                       ret = LTTNG_ERR_UNK;
-                                       goto error;
-                               }
-                       }
-               }
-               break;
-       }
-       case LTTNG_BUFFER_PER_PID:
-       {
-               struct ust_app *app;
-
-               cds_lfht_for_each_entry (
-                               ust_app_ht->ht, &iter.iter, app, pid_n.node) {
-                       struct consumer_socket *socket;
-                       struct lttng_ht_iter chan_iter;
-                       struct ust_app_channel *ua_chan;
-                       struct ust_app_session *ua_sess;
-                       struct ust_registry_session *registry;
-
-                       ua_sess = lookup_session_by_app(usess, app);
-                       if (!ua_sess) {
-                               /* Session not associated with this app. */
-                               continue;
-                       }
-
-                       /* Get the right consumer socket for the application. */
-                       socket = consumer_find_socket_by_bitness(
-                                       app->bits_per_long, usess->consumer);
-                       if (!socket) {
-                               ret = LTTNG_ERR_FATAL;
-                               goto error;
-                       }
-
-                       registry = get_session_registry(ua_sess);
-                       if (!registry) {
-                               DBG("Application session is being torn down. Skip application.");
-                               continue;
-                       }
-
-                       cds_lfht_for_each_entry(ua_sess->channels->ht,
-                                       &chan_iter.iter, ua_chan, node.node) {
-                               const int open_ret =
-                                               consumer_open_channel_packets(
-                                                       socket,
-                                                       ua_chan->key);
-
-                               if (open_ret < 0) {
-                                       /*
-                                        * Per-PID buffer and application going
-                                        * away.
-                                        */
-                                       if (open_ret == -LTTNG_ERR_CHAN_NOT_FOUND) {
-                                               continue;
-                                       }
-
-                                       ret = LTTNG_ERR_UNK;
-                                       goto error;
-                               }
-                       }
-               }
-               break;
-       }
-       default:
-               abort();
-               break;
-       }
-
-error:
-       rcu_read_unlock();
-       return ret;
-}
diff --git a/src/bin/lttng-sessiond/ust-app.cpp b/src/bin/lttng-sessiond/ust-app.cpp
new file mode 100644 (file)
index 0000000..a848d47
--- /dev/null
@@ -0,0 +1,7818 @@
+/*
+ * Copyright (C) 2011 David Goulet <david.goulet@polymtl.ca>
+ * Copyright (C) 2016 Jérémie Galarneau <jeremie.galarneau@efficios.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0-only
+ *
+ */
+
+#define _LGPL_SOURCE
+#include <errno.h>
+#include <fcntl.h>
+#include <inttypes.h>
+#include <pthread.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <unistd.h>
+#include <urcu/compiler.h>
+#include <signal.h>
+
+#include <common/bytecode/bytecode.h>
+#include <common/compat/errno.h>
+#include <common/common.h>
+#include <common/hashtable/utils.h>
+#include <lttng/event-rule/event-rule.h>
+#include <lttng/event-rule/event-rule-internal.h>
+#include <lttng/event-rule/user-tracepoint.h>
+#include <lttng/condition/condition.h>
+#include <lttng/condition/event-rule-matches-internal.h>
+#include <lttng/condition/event-rule-matches.h>
+#include <lttng/trigger/trigger-internal.h>
+#include <common/sessiond-comm/sessiond-comm.h>
+
+#include "buffer-registry.h"
+#include "condition-internal.h"
+#include "fd-limit.h"
+#include "health-sessiond.h"
+#include "ust-app.h"
+#include "ust-consumer.h"
+#include "lttng-ust-ctl.h"
+#include "lttng-ust-error.h"
+#include "utils.h"
+#include "session.h"
+#include "lttng-sessiond.h"
+#include "notification-thread-commands.h"
+#include "rotate.h"
+#include "event.h"
+#include "event-notifier-error-accounting.h"
+
+
+struct lttng_ht *ust_app_ht;
+struct lttng_ht *ust_app_ht_by_sock;
+struct lttng_ht *ust_app_ht_by_notify_sock;
+
+static
+int ust_app_flush_app_session(struct ust_app *app, struct ust_app_session *ua_sess);
+
+/* Next available channel key. Access under next_channel_key_lock. */
+static uint64_t _next_channel_key;
+static pthread_mutex_t next_channel_key_lock = PTHREAD_MUTEX_INITIALIZER;
+
+/* Next available session ID. Access under next_session_id_lock. */
+static uint64_t _next_session_id;
+static pthread_mutex_t next_session_id_lock = PTHREAD_MUTEX_INITIALIZER;
+
+/*
+ * Return the incremented value of next_channel_key.
+ */
+static uint64_t get_next_channel_key(void)
+{
+       uint64_t ret;
+
+       pthread_mutex_lock(&next_channel_key_lock);
+       ret = ++_next_channel_key;
+       pthread_mutex_unlock(&next_channel_key_lock);
+       return ret;
+}
+
+/*
+ * Return the atomically incremented value of next_session_id.
+ */
+static uint64_t get_next_session_id(void)
+{
+       uint64_t ret;
+
+       pthread_mutex_lock(&next_session_id_lock);
+       ret = ++_next_session_id;
+       pthread_mutex_unlock(&next_session_id_lock);
+       return ret;
+}
+
+static void copy_channel_attr_to_ustctl(
+               struct lttng_ust_ctl_consumer_channel_attr *attr,
+               struct lttng_ust_abi_channel_attr *uattr)
+{
+       /* Copy event attributes since the layout is different. */
+       attr->subbuf_size = uattr->subbuf_size;
+       attr->num_subbuf = uattr->num_subbuf;
+       attr->overwrite = uattr->overwrite;
+       attr->switch_timer_interval = uattr->switch_timer_interval;
+       attr->read_timer_interval = uattr->read_timer_interval;
+       attr->output = (lttng_ust_abi_output) uattr->output;
+       attr->blocking_timeout = uattr->u.s.blocking_timeout;
+}
+
+/*
+ * Match function for the hash table lookup.
+ *
+ * It matches an ust app event based on three attributes which are the event
+ * name, the filter bytecode and the loglevel.
+ */
+static int ht_match_ust_app_event(struct cds_lfht_node *node, const void *_key)
+{
+       struct ust_app_event *event;
+       const struct ust_app_ht_key *key;
+       int ev_loglevel_value;
+
+       LTTNG_ASSERT(node);
+       LTTNG_ASSERT(_key);
+
+       event = caa_container_of(node, struct ust_app_event, node.node);
+       key = (ust_app_ht_key *) _key;
+       ev_loglevel_value = event->attr.loglevel;
+
+       /* Match the 4 elements of the key: name, filter, loglevel, exclusions */
+
+       /* Event name */
+       if (strncmp(event->attr.name, key->name, sizeof(event->attr.name)) != 0) {
+               goto no_match;
+       }
+
+       /* Event loglevel. */
+       if (ev_loglevel_value != key->loglevel_type) {
+               if (event->attr.loglevel_type == LTTNG_UST_ABI_LOGLEVEL_ALL
+                               && key->loglevel_type == 0 &&
+                               ev_loglevel_value == -1) {
+                       /*
+                        * Match is accepted. This is because on event creation, the
+                        * loglevel is set to -1 if the event loglevel type is ALL so 0 and
+                        * -1 are accepted for this loglevel type since 0 is the one set by
+                        * the API when receiving an enable event.
+                        */
+               } else {
+                       goto no_match;
+               }
+       }
+
+       /* One of the filters is NULL, fail. */
+       if ((key->filter && !event->filter) || (!key->filter && event->filter)) {
+               goto no_match;
+       }
+
+       if (key->filter && event->filter) {
+               /* Both filters exists, check length followed by the bytecode. */
+               if (event->filter->len != key->filter->len ||
+                               memcmp(event->filter->data, key->filter->data,
+                                       event->filter->len) != 0) {
+                       goto no_match;
+               }
+       }
+
+       /* One of the exclusions is NULL, fail. */
+       if ((key->exclusion && !event->exclusion) || (!key->exclusion && event->exclusion)) {
+               goto no_match;
+       }
+
+       if (key->exclusion && event->exclusion) {
+               /* Both exclusions exists, check count followed by the names. */
+               if (event->exclusion->count != key->exclusion->count ||
+                               memcmp(event->exclusion->names, key->exclusion->names,
+                                       event->exclusion->count * LTTNG_UST_ABI_SYM_NAME_LEN) != 0) {
+                       goto no_match;
+               }
+       }
+
+
+       /* Match. */
+       return 1;
+
+no_match:
+       return 0;
+}
+
+/*
+ * Unique add of an ust app event in the given ht. This uses the custom
+ * ht_match_ust_app_event match function and the event name as hash.
+ */
+static void add_unique_ust_app_event(struct ust_app_channel *ua_chan,
+               struct ust_app_event *event)
+{
+       struct cds_lfht_node *node_ptr;
+       struct ust_app_ht_key key;
+       struct lttng_ht *ht;
+
+       LTTNG_ASSERT(ua_chan);
+       LTTNG_ASSERT(ua_chan->events);
+       LTTNG_ASSERT(event);
+
+       ht = ua_chan->events;
+       key.name = event->attr.name;
+       key.filter = event->filter;
+       key.loglevel_type = (lttng_ust_abi_loglevel_type) event->attr.loglevel;
+       key.exclusion = event->exclusion;
+
+       node_ptr = cds_lfht_add_unique(ht->ht,
+                       ht->hash_fct(event->node.key, lttng_ht_seed),
+                       ht_match_ust_app_event, &key, &event->node.node);
+       LTTNG_ASSERT(node_ptr == &event->node.node);
+}
+
+/*
+ * Close the notify socket from the given RCU head object. This MUST be called
+ * through a call_rcu().
+ */
+static void close_notify_sock_rcu(struct rcu_head *head)
+{
+       int ret;
+       struct ust_app_notify_sock_obj *obj =
+               caa_container_of(head, struct ust_app_notify_sock_obj, head);
+
+       /* Must have a valid fd here. */
+       LTTNG_ASSERT(obj->fd >= 0);
+
+       ret = close(obj->fd);
+       if (ret) {
+               ERR("close notify sock %d RCU", obj->fd);
+       }
+       lttng_fd_put(LTTNG_FD_APPS, 1);
+
+       free(obj);
+}
+
+/*
+ * Return the session registry according to the buffer type of the given
+ * session.
+ *
+ * A registry per UID object MUST exists before calling this function or else
+ * it LTTNG_ASSERT() if not found. RCU read side lock must be acquired.
+ */
+static struct ust_registry_session *get_session_registry(
+               struct ust_app_session *ua_sess)
+{
+       struct ust_registry_session *registry = NULL;
+
+       LTTNG_ASSERT(ua_sess);
+
+       switch (ua_sess->buffer_type) {
+       case LTTNG_BUFFER_PER_PID:
+       {
+               struct buffer_reg_pid *reg_pid = buffer_reg_pid_find(ua_sess->id);
+               if (!reg_pid) {
+                       goto error;
+               }
+               registry = reg_pid->registry->reg.ust;
+               break;
+       }
+       case LTTNG_BUFFER_PER_UID:
+       {
+               struct buffer_reg_uid *reg_uid = buffer_reg_uid_find(
+                               ua_sess->tracing_id, ua_sess->bits_per_long,
+                               lttng_credentials_get_uid(&ua_sess->real_credentials));
+               if (!reg_uid) {
+                       goto error;
+               }
+               registry = reg_uid->registry->reg.ust;
+               break;
+       }
+       default:
+               abort();
+       };
+
+error:
+       return registry;
+}
+
+/*
+ * Delete ust context safely. RCU read lock must be held before calling
+ * this function.
+ */
+static
+void delete_ust_app_ctx(int sock, struct ust_app_ctx *ua_ctx,
+               struct ust_app *app)
+{
+       int ret;
+
+       LTTNG_ASSERT(ua_ctx);
+
+       if (ua_ctx->obj) {
+               pthread_mutex_lock(&app->sock_lock);
+               ret = lttng_ust_ctl_release_object(sock, ua_ctx->obj);
+               pthread_mutex_unlock(&app->sock_lock);
+               if (ret < 0) {
+                       if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
+                               DBG3("UST app release ctx failed. Application is dead: pid = %d, sock = %d",
+                                               app->pid, app->sock);
+                       } else if (ret == -EAGAIN) {
+                               WARN("UST app release ctx failed. Communication time out: pid = %d, sock = %d",
+                                               app->pid, app->sock);
+                       } else {
+                               ERR("UST app release ctx obj handle %d failed with ret %d: pid = %d, sock = %d",
+                                               ua_ctx->obj->handle, ret,
+                                               app->pid, app->sock);
+                       }
+               }
+               free(ua_ctx->obj);
+       }
+       free(ua_ctx);
+}
+
+/*
+ * Delete ust app event safely. RCU read lock must be held before calling
+ * this function.
+ */
+static
+void delete_ust_app_event(int sock, struct ust_app_event *ua_event,
+               struct ust_app *app)
+{
+       int ret;
+
+       LTTNG_ASSERT(ua_event);
+
+       free(ua_event->filter);
+       if (ua_event->exclusion != NULL)
+               free(ua_event->exclusion);
+       if (ua_event->obj != NULL) {
+               pthread_mutex_lock(&app->sock_lock);
+               ret = lttng_ust_ctl_release_object(sock, ua_event->obj);
+               pthread_mutex_unlock(&app->sock_lock);
+               if (ret < 0) {
+                       if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
+                               DBG3("UST app release event failed. Application is dead: pid = %d, sock = %d",
+                                               app->pid, app->sock);
+                       } else if (ret == -EAGAIN) {
+                               WARN("UST app release event failed. Communication time out: pid = %d, sock = %d",
+                                               app->pid, app->sock);
+                       } else {
+                               ERR("UST app release event obj failed with ret %d: pid = %d, sock = %d",
+                                               ret, app->pid, app->sock);
+                       }
+               }
+               free(ua_event->obj);
+       }
+       free(ua_event);
+}
+
+/*
+ * Delayed reclaim of a ust_app_event_notifier_rule object. This MUST be called
+ * through a call_rcu().
+ */
+static
+void free_ust_app_event_notifier_rule_rcu(struct rcu_head *head)
+{
+       struct ust_app_event_notifier_rule *obj = caa_container_of(
+                       head, struct ust_app_event_notifier_rule, rcu_head);
+
+       free(obj);
+}
+
+/*
+ * Delete ust app event notifier rule safely.
+ */
+static void delete_ust_app_event_notifier_rule(int sock,
+               struct ust_app_event_notifier_rule *ua_event_notifier_rule,
+               struct ust_app *app)
+{
+       int ret;
+
+       LTTNG_ASSERT(ua_event_notifier_rule);
+
+       if (ua_event_notifier_rule->exclusion != NULL) {
+               free(ua_event_notifier_rule->exclusion);
+       }
+
+       if (ua_event_notifier_rule->obj != NULL) {
+               pthread_mutex_lock(&app->sock_lock);
+               ret = lttng_ust_ctl_release_object(sock, ua_event_notifier_rule->obj);
+               pthread_mutex_unlock(&app->sock_lock);
+               if (ret < 0) {
+                       if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
+                               DBG3("UST app release event notifier failed. Application is dead: pid = %d, sock = %d",
+                                               app->pid, app->sock);
+                       } else if (ret == -EAGAIN) {
+                               WARN("UST app release event notifier failed. Communication time out: pid = %d, sock = %d",
+                                               app->pid, app->sock);
+                       } else {
+                               ERR("UST app release event notifier failed with ret %d: pid = %d, sock = %d",
+                                               ret, app->pid, app->sock);
+                       }
+               }
+
+               free(ua_event_notifier_rule->obj);
+       }
+
+       lttng_trigger_put(ua_event_notifier_rule->trigger);
+       call_rcu(&ua_event_notifier_rule->rcu_head,
+                       free_ust_app_event_notifier_rule_rcu);
+}
+
+/*
+ * Release ust data object of the given stream.
+ *
+ * Return 0 on success or else a negative value.
+ */
+static int release_ust_app_stream(int sock, struct ust_app_stream *stream,
+               struct ust_app *app)
+{
+       int ret = 0;
+
+       LTTNG_ASSERT(stream);
+
+       if (stream->obj) {
+               pthread_mutex_lock(&app->sock_lock);
+               ret = lttng_ust_ctl_release_object(sock, stream->obj);
+               pthread_mutex_unlock(&app->sock_lock);
+               if (ret < 0) {
+                       if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
+                               DBG3("UST app release stream failed. Application is dead: pid = %d, sock = %d",
+                                               app->pid, app->sock);
+                       } else if (ret == -EAGAIN) {
+                               WARN("UST app release stream failed. Communication time out: pid = %d, sock = %d",
+                                               app->pid, app->sock);
+                       } else {
+                               ERR("UST app release stream obj failed with ret %d: pid = %d, sock = %d",
+                                               ret, app->pid, app->sock);
+                       }
+               }
+               lttng_fd_put(LTTNG_FD_APPS, 2);
+               free(stream->obj);
+       }
+
+       return ret;
+}
+
+/*
+ * Delete ust app stream safely. RCU read lock must be held before calling
+ * this function.
+ */
+static
+void delete_ust_app_stream(int sock, struct ust_app_stream *stream,
+               struct ust_app *app)
+{
+       LTTNG_ASSERT(stream);
+
+       (void) release_ust_app_stream(sock, stream, app);
+       free(stream);
+}
+
+/*
+ * We need to execute ht_destroy outside of RCU read-side critical
+ * section and outside of call_rcu thread, so we postpone its execution
+ * using ht_cleanup_push. It is simpler than to change the semantic of
+ * the many callers of delete_ust_app_session().
+ */
+static
+void delete_ust_app_channel_rcu(struct rcu_head *head)
+{
+       struct ust_app_channel *ua_chan =
+               caa_container_of(head, struct ust_app_channel, rcu_head);
+
+       ht_cleanup_push(ua_chan->ctx);
+       ht_cleanup_push(ua_chan->events);
+       free(ua_chan);
+}
+
+/*
+ * Extract the lost packet or discarded events counter when the channel is
+ * being deleted and store the value in the parent channel so we can
+ * access it from lttng list and at stop/destroy.
+ *
+ * The session list lock must be held by the caller.
+ */
+static
+void save_per_pid_lost_discarded_counters(struct ust_app_channel *ua_chan)
+{
+       uint64_t discarded = 0, lost = 0;
+       struct ltt_session *session;
+       struct ltt_ust_channel *uchan;
+
+       if (ua_chan->attr.type != LTTNG_UST_ABI_CHAN_PER_CPU) {
+               return;
+       }
+
+       rcu_read_lock();
+       session = session_find_by_id(ua_chan->session->tracing_id);
+       if (!session || !session->ust_session) {
+               /*
+                * Not finding the session is not an error because there are
+                * multiple ways the channels can be torn down.
+                *
+                * 1) The session daemon can initiate the destruction of the
+                *    ust app session after receiving a destroy command or
+                *    during its shutdown/teardown.
+                * 2) The application, since we are in per-pid tracing, is
+                *    unregistering and tearing down its ust app session.
+                *
+                * Both paths are protected by the session list lock which
+                * ensures that the accounting of lost packets and discarded
+                * events is done exactly once. The session is then unpublished
+                * from the session list, resulting in this condition.
+                */
+               goto end;
+       }
+
+       if (ua_chan->attr.overwrite) {
+               consumer_get_lost_packets(ua_chan->session->tracing_id,
+                               ua_chan->key, session->ust_session->consumer,
+                               &lost);
+       } else {
+               consumer_get_discarded_events(ua_chan->session->tracing_id,
+                               ua_chan->key, session->ust_session->consumer,
+                               &discarded);
+       }
+       uchan = trace_ust_find_channel_by_name(
+                       session->ust_session->domain_global.channels,
+                       ua_chan->name);
+       if (!uchan) {
+               ERR("Missing UST channel to store discarded counters");
+               goto end;
+       }
+
+       uchan->per_pid_closed_app_discarded += discarded;
+       uchan->per_pid_closed_app_lost += lost;
+
+end:
+       rcu_read_unlock();
+       if (session) {
+               session_put(session);
+       }
+}
+
+/*
+ * Delete ust app channel safely. RCU read lock must be held before calling
+ * this function.
+ *
+ * The session list lock must be held by the caller.
+ */
+static
+void delete_ust_app_channel(int sock, struct ust_app_channel *ua_chan,
+               struct ust_app *app)
+{
+       int ret;
+       struct lttng_ht_iter iter;
+       struct ust_app_event *ua_event;
+       struct ust_app_ctx *ua_ctx;
+       struct ust_app_stream *stream, *stmp;
+       struct ust_registry_session *registry;
+
+       LTTNG_ASSERT(ua_chan);
+
+       DBG3("UST app deleting channel %s", ua_chan->name);
+
+       /* Wipe stream */
+       cds_list_for_each_entry_safe(stream, stmp, &ua_chan->streams.head, list) {
+               cds_list_del(&stream->list);
+               delete_ust_app_stream(sock, stream, app);
+       }
+
+       /* Wipe context */
+       cds_lfht_for_each_entry(ua_chan->ctx->ht, &iter.iter, ua_ctx, node.node) {
+               cds_list_del(&ua_ctx->list);
+               ret = lttng_ht_del(ua_chan->ctx, &iter);
+               LTTNG_ASSERT(!ret);
+               delete_ust_app_ctx(sock, ua_ctx, app);
+       }
+
+       /* Wipe events */
+       cds_lfht_for_each_entry(ua_chan->events->ht, &iter.iter, ua_event,
+                       node.node) {
+               ret = lttng_ht_del(ua_chan->events, &iter);
+               LTTNG_ASSERT(!ret);
+               delete_ust_app_event(sock, ua_event, app);
+       }
+
+       if (ua_chan->session->buffer_type == LTTNG_BUFFER_PER_PID) {
+               /* Wipe and free registry from session registry. */
+               registry = get_session_registry(ua_chan->session);
+               if (registry) {
+                       ust_registry_channel_del_free(registry, ua_chan->key,
+                               sock >= 0);
+               }
+               /*
+                * A negative socket can be used by the caller when
+                * cleaning-up a ua_chan in an error path. Skip the
+                * accounting in this case.
+                */
+               if (sock >= 0) {
+                       save_per_pid_lost_discarded_counters(ua_chan);
+               }
+       }
+
+       if (ua_chan->obj != NULL) {
+               /* Remove channel from application UST object descriptor. */
+               iter.iter.node = &ua_chan->ust_objd_node.node;
+               ret = lttng_ht_del(app->ust_objd, &iter);
+               LTTNG_ASSERT(!ret);
+               pthread_mutex_lock(&app->sock_lock);
+               ret = lttng_ust_ctl_release_object(sock, ua_chan->obj);
+               pthread_mutex_unlock(&app->sock_lock);
+               if (ret < 0) {
+                       if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
+                               DBG3("UST app channel %s release failed. Application is dead: pid = %d, sock = %d",
+                                               ua_chan->name, app->pid,
+                                               app->sock);
+                       } else if (ret == -EAGAIN) {
+                               WARN("UST app channel %s release failed. Communication time out: pid = %d, sock = %d",
+                                               ua_chan->name, app->pid,
+                                               app->sock);
+                       } else {
+                               ERR("UST app channel %s release failed with ret %d: pid = %d, sock = %d",
+                                               ua_chan->name, ret, app->pid,
+                                               app->sock);
+                       }
+               }
+               lttng_fd_put(LTTNG_FD_APPS, 1);
+               free(ua_chan->obj);
+       }
+       call_rcu(&ua_chan->rcu_head, delete_ust_app_channel_rcu);
+}
+
+int ust_app_register_done(struct ust_app *app)
+{
+       int ret;
+
+       pthread_mutex_lock(&app->sock_lock);
+       ret = lttng_ust_ctl_register_done(app->sock);
+       pthread_mutex_unlock(&app->sock_lock);
+       return ret;
+}
+
+int ust_app_release_object(struct ust_app *app, struct lttng_ust_abi_object_data *data)
+{
+       int ret, sock;
+
+       if (app) {
+               pthread_mutex_lock(&app->sock_lock);
+               sock = app->sock;
+       } else {
+               sock = -1;
+       }
+       ret = lttng_ust_ctl_release_object(sock, data);
+       if (app) {
+               pthread_mutex_unlock(&app->sock_lock);
+       }
+       return ret;
+}
+
+/*
+ * Push metadata to consumer socket.
+ *
+ * RCU read-side lock must be held to guarantee existance of socket.
+ * Must be called with the ust app session lock held.
+ * Must be called with the registry lock held.
+ *
+ * On success, return the len of metadata pushed or else a negative value.
+ * Returning a -EPIPE return value means we could not send the metadata,
+ * but it can be caused by recoverable errors (e.g. the application has
+ * terminated concurrently).
+ */
+ssize_t ust_app_push_metadata(struct ust_registry_session *registry,
+               struct consumer_socket *socket, int send_zero_data)
+{
+       int ret;
+       char *metadata_str = NULL;
+       size_t len, offset, new_metadata_len_sent;
+       ssize_t ret_val;
+       uint64_t metadata_key, metadata_version;
+
+       LTTNG_ASSERT(registry);
+       LTTNG_ASSERT(socket);
+
+       metadata_key = registry->metadata_key;
+
+       /*
+        * Means that no metadata was assigned to the session. This can
+        * happens if no start has been done previously.
+        */
+       if (!metadata_key) {
+               return 0;
+       }
+
+       offset = registry->metadata_len_sent;
+       len = registry->metadata_len - registry->metadata_len_sent;
+       new_metadata_len_sent = registry->metadata_len;
+       metadata_version = registry->metadata_version;
+       if (len == 0) {
+               DBG3("No metadata to push for metadata key %" PRIu64,
+                               registry->metadata_key);
+               ret_val = len;
+               if (send_zero_data) {
+                       DBG("No metadata to push");
+                       goto push_data;
+               }
+               goto end;
+       }
+
+       /* Allocate only what we have to send. */
+       metadata_str = (char *) zmalloc(len);
+       if (!metadata_str) {
+               PERROR("zmalloc ust app metadata string");
+               ret_val = -ENOMEM;
+               goto error;
+       }
+       /* Copy what we haven't sent out. */
+       memcpy(metadata_str, registry->metadata + offset, len);
+
+push_data:
+       pthread_mutex_unlock(&registry->lock);
+       /*
+        * We need to unlock the registry while we push metadata to
+        * break a circular dependency between the consumerd metadata
+        * lock and the sessiond registry lock. Indeed, pushing metadata
+        * to the consumerd awaits that it gets pushed all the way to
+        * relayd, but doing so requires grabbing the metadata lock. If
+        * a concurrent metadata request is being performed by
+        * consumerd, this can try to grab the registry lock on the
+        * sessiond while holding the metadata lock on the consumer
+        * daemon. Those push and pull schemes are performed on two
+        * different bidirectionnal communication sockets.
+        */
+       ret = consumer_push_metadata(socket, metadata_key,
+                       metadata_str, len, offset, metadata_version);
+       pthread_mutex_lock(&registry->lock);
+       if (ret < 0) {
+               /*
+                * There is an acceptable race here between the registry
+                * metadata key assignment and the creation on the
+                * consumer. The session daemon can concurrently push
+                * metadata for this registry while being created on the
+                * consumer since the metadata key of the registry is
+                * assigned *before* it is setup to avoid the consumer
+                * to ask for metadata that could possibly be not found
+                * in the session daemon.
+                *
+                * The metadata will get pushed either by the session
+                * being stopped or the consumer requesting metadata if
+                * that race is triggered.
+                */
+               if (ret == -LTTCOMM_CONSUMERD_CHANNEL_FAIL) {
+                       ret = 0;
+               } else {
+                       ERR("Error pushing metadata to consumer");
+               }
+               ret_val = ret;
+               goto error_push;
+       } else {
+               /*
+                * Metadata may have been concurrently pushed, since
+                * we're not holding the registry lock while pushing to
+                * consumer.  This is handled by the fact that we send
+                * the metadata content, size, and the offset at which
+                * that metadata belongs. This may arrive out of order
+                * on the consumer side, and the consumer is able to
+                * deal with overlapping fragments. The consumer
+                * supports overlapping fragments, which must be
+                * contiguous starting from offset 0. We keep the
+                * largest metadata_len_sent value of the concurrent
+                * send.
+                */
+               registry->metadata_len_sent =
+                       std::max(registry->metadata_len_sent,
+                               new_metadata_len_sent);
+       }
+       free(metadata_str);
+       return len;
+
+end:
+error:
+       if (ret_val) {
+               /*
+                * On error, flag the registry that the metadata is
+                * closed. We were unable to push anything and this
+                * means that either the consumer is not responding or
+                * the metadata cache has been destroyed on the
+                * consumer.
+                */
+               registry->metadata_closed = 1;
+       }
+error_push:
+       free(metadata_str);
+       return ret_val;
+}
+
+/*
+ * For a given application and session, push metadata to consumer.
+ * Either sock or consumer is required : if sock is NULL, the default
+ * socket to send the metadata is retrieved from consumer, if sock
+ * is not NULL we use it to send the metadata.
+ * RCU read-side lock must be held while calling this function,
+ * therefore ensuring existance of registry. It also ensures existance
+ * of socket throughout this function.
+ *
+ * Return 0 on success else a negative error.
+ * Returning a -EPIPE return value means we could not send the metadata,
+ * but it can be caused by recoverable errors (e.g. the application has
+ * terminated concurrently).
+ */
+static int push_metadata(struct ust_registry_session *registry,
+               struct consumer_output *consumer)
+{
+       int ret_val;
+       ssize_t ret;
+       struct consumer_socket *socket;
+
+       LTTNG_ASSERT(registry);
+       LTTNG_ASSERT(consumer);
+
+       pthread_mutex_lock(&registry->lock);
+       if (registry->metadata_closed) {
+               ret_val = -EPIPE;
+               goto error;
+       }
+
+       /* Get consumer socket to use to push the metadata.*/
+       socket = consumer_find_socket_by_bitness(registry->bits_per_long,
+                       consumer);
+       if (!socket) {
+               ret_val = -1;
+               goto error;
+       }
+
+       ret = ust_app_push_metadata(registry, socket, 0);
+       if (ret < 0) {
+               ret_val = ret;
+               goto error;
+       }
+       pthread_mutex_unlock(&registry->lock);
+       return 0;
+
+error:
+       pthread_mutex_unlock(&registry->lock);
+       return ret_val;
+}
+
+/*
+ * Send to the consumer a close metadata command for the given session. Once
+ * done, the metadata channel is deleted and the session metadata pointer is
+ * nullified. The session lock MUST be held unless the application is
+ * in the destroy path.
+ *
+ * Do not hold the registry lock while communicating with the consumerd, because
+ * doing so causes inter-process deadlocks between consumerd and sessiond with
+ * the metadata request notification.
+ *
+ * Return 0 on success else a negative value.
+ */
+static int close_metadata(struct ust_registry_session *registry,
+               struct consumer_output *consumer)
+{
+       int ret;
+       struct consumer_socket *socket;
+       uint64_t metadata_key;
+       bool registry_was_already_closed;
+
+       LTTNG_ASSERT(registry);
+       LTTNG_ASSERT(consumer);
+
+       rcu_read_lock();
+
+       pthread_mutex_lock(&registry->lock);
+       metadata_key = registry->metadata_key;
+       registry_was_already_closed = registry->metadata_closed;
+       if (metadata_key != 0) {
+               /*
+                * Metadata closed. Even on error this means that the consumer
+                * is not responding or not found so either way a second close
+                * should NOT be emit for this registry.
+                */
+               registry->metadata_closed = 1;
+       }
+       pthread_mutex_unlock(&registry->lock);
+
+       if (metadata_key == 0 || registry_was_already_closed) {
+               ret = 0;
+               goto end;
+       }
+
+       /* Get consumer socket to use to push the metadata.*/
+       socket = consumer_find_socket_by_bitness(registry->bits_per_long,
+                       consumer);
+       if (!socket) {
+               ret = -1;
+               goto end;
+       }
+
+       ret = consumer_close_metadata(socket, metadata_key);
+       if (ret < 0) {
+               goto end;
+       }
+
+end:
+       rcu_read_unlock();
+       return ret;
+}
+
+/*
+ * We need to execute ht_destroy outside of RCU read-side critical
+ * section and outside of call_rcu thread, so we postpone its execution
+ * using ht_cleanup_push. It is simpler than to change the semantic of
+ * the many callers of delete_ust_app_session().
+ */
+static
+void delete_ust_app_session_rcu(struct rcu_head *head)
+{
+       struct ust_app_session *ua_sess =
+               caa_container_of(head, struct ust_app_session, rcu_head);
+
+       ht_cleanup_push(ua_sess->channels);
+       free(ua_sess);
+}
+
+/*
+ * Delete ust app session safely. RCU read lock must be held before calling
+ * this function.
+ *
+ * The session list lock must be held by the caller.
+ */
+static
+void delete_ust_app_session(int sock, struct ust_app_session *ua_sess,
+               struct ust_app *app)
+{
+       int ret;
+       struct lttng_ht_iter iter;
+       struct ust_app_channel *ua_chan;
+       struct ust_registry_session *registry;
+
+       LTTNG_ASSERT(ua_sess);
+
+       pthread_mutex_lock(&ua_sess->lock);
+
+       LTTNG_ASSERT(!ua_sess->deleted);
+       ua_sess->deleted = true;
+
+       registry = get_session_registry(ua_sess);
+       /* Registry can be null on error path during initialization. */
+       if (registry) {
+               /* Push metadata for application before freeing the application. */
+               (void) push_metadata(registry, ua_sess->consumer);
+
+               /*
+                * Don't ask to close metadata for global per UID buffers. Close
+                * metadata only on destroy trace session in this case. Also, the
+                * previous push metadata could have flag the metadata registry to
+                * close so don't send a close command if closed.
+                */
+               if (ua_sess->buffer_type != LTTNG_BUFFER_PER_UID) {
+                       /* And ask to close it for this session registry. */
+                       (void) close_metadata(registry, ua_sess->consumer);
+               }
+       }
+
+       cds_lfht_for_each_entry(ua_sess->channels->ht, &iter.iter, ua_chan,
+                       node.node) {
+               ret = lttng_ht_del(ua_sess->channels, &iter);
+               LTTNG_ASSERT(!ret);
+               delete_ust_app_channel(sock, ua_chan, app);
+       }
+
+       /* In case of per PID, the registry is kept in the session. */
+       if (ua_sess->buffer_type == LTTNG_BUFFER_PER_PID) {
+               struct buffer_reg_pid *reg_pid = buffer_reg_pid_find(ua_sess->id);
+               if (reg_pid) {
+                       /*
+                        * Registry can be null on error path during
+                        * initialization.
+                        */
+                       buffer_reg_pid_remove(reg_pid);
+                       buffer_reg_pid_destroy(reg_pid);
+               }
+       }
+
+       if (ua_sess->handle != -1) {
+               pthread_mutex_lock(&app->sock_lock);
+               ret = lttng_ust_ctl_release_handle(sock, ua_sess->handle);
+               pthread_mutex_unlock(&app->sock_lock);
+               if (ret < 0) {
+                       if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
+                               DBG3("UST app release session handle failed. Application is dead: pid = %d, sock = %d",
+                                               app->pid, app->sock);
+                       } else if (ret == -EAGAIN) {
+                               WARN("UST app release session handle failed. Communication time out: pid = %d, sock = %d",
+                                               app->pid, app->sock);
+                       } else {
+                               ERR("UST app release session handle failed with ret %d: pid = %d, sock = %d",
+                                               ret, app->pid, app->sock);
+                       }
+               }
+
+               /* Remove session from application UST object descriptor. */
+               iter.iter.node = &ua_sess->ust_objd_node.node;
+               ret = lttng_ht_del(app->ust_sessions_objd, &iter);
+               LTTNG_ASSERT(!ret);
+       }
+
+       pthread_mutex_unlock(&ua_sess->lock);
+
+       consumer_output_put(ua_sess->consumer);
+
+       call_rcu(&ua_sess->rcu_head, delete_ust_app_session_rcu);
+}
+
+/*
+ * Delete a traceable application structure from the global list. Never call
+ * this function outside of a call_rcu call.
+ *
+ * RCU read side lock should _NOT_ be held when calling this function.
+ */
+static
+void delete_ust_app(struct ust_app *app)
+{
+       int ret, sock;
+       struct ust_app_session *ua_sess, *tmp_ua_sess;
+       struct lttng_ht_iter iter;
+       struct ust_app_event_notifier_rule *event_notifier_rule;
+       bool event_notifier_write_fd_is_open;
+
+       /*
+        * The session list lock must be held during this function to guarantee
+        * the existence of ua_sess.
+        */
+       session_lock_list();
+       /* Delete ust app sessions info */
+       sock = app->sock;
+       app->sock = -1;
+
+       /* Wipe sessions */
+       cds_list_for_each_entry_safe(ua_sess, tmp_ua_sess, &app->teardown_head,
+                       teardown_node) {
+               /* Free every object in the session and the session. */
+               rcu_read_lock();
+               delete_ust_app_session(sock, ua_sess, app);
+               rcu_read_unlock();
+       }
+
+       /* Remove the event notifier rules associated with this app. */
+       rcu_read_lock();
+       cds_lfht_for_each_entry (app->token_to_event_notifier_rule_ht->ht,
+                       &iter.iter, event_notifier_rule, node.node) {
+               ret = lttng_ht_del(app->token_to_event_notifier_rule_ht, &iter);
+               LTTNG_ASSERT(!ret);
+
+               delete_ust_app_event_notifier_rule(
+                               app->sock, event_notifier_rule, app);
+       }
+
+       rcu_read_unlock();
+
+       ht_cleanup_push(app->sessions);
+       ht_cleanup_push(app->ust_sessions_objd);
+       ht_cleanup_push(app->ust_objd);
+       ht_cleanup_push(app->token_to_event_notifier_rule_ht);
+
+       /*
+        * This could be NULL if the event notifier setup failed (e.g the app
+        * was killed or the tracer does not support this feature).
+        */
+       if (app->event_notifier_group.object) {
+               enum lttng_error_code ret_code;
+               enum event_notifier_error_accounting_status status;
+
+               const int event_notifier_read_fd = lttng_pipe_get_readfd(
+                               app->event_notifier_group.event_pipe);
+
+               ret_code = notification_thread_command_remove_tracer_event_source(
+                               the_notification_thread_handle,
+                               event_notifier_read_fd);
+               if (ret_code != LTTNG_OK) {
+                       ERR("Failed to remove application tracer event source from notification thread");
+               }
+
+               status = event_notifier_error_accounting_unregister_app(app);
+               if (status != EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_OK) {
+                       ERR("Error unregistering app from event notifier error accounting");
+               }
+
+               lttng_ust_ctl_release_object(sock, app->event_notifier_group.object);
+               free(app->event_notifier_group.object);
+       }
+
+       event_notifier_write_fd_is_open = lttng_pipe_is_write_open(
+                       app->event_notifier_group.event_pipe);
+       lttng_pipe_destroy(app->event_notifier_group.event_pipe);
+       /*
+        * Release the file descriptors reserved for the event notifier pipe.
+        * The app could be destroyed before the write end of the pipe could be
+        * passed to the application (and closed). In that case, both file
+        * descriptors must be released.
+        */
+       lttng_fd_put(LTTNG_FD_APPS, event_notifier_write_fd_is_open ? 2 : 1);
+
+       /*
+        * Wait until we have deleted the application from the sock hash table
+        * before closing this socket, otherwise an application could re-use the
+        * socket ID and race with the teardown, using the same hash table entry.
+        *
+        * It's OK to leave the close in call_rcu. We want it to stay unique for
+        * all RCU readers that could run concurrently with unregister app,
+        * therefore we _need_ to only close that socket after a grace period. So
+        * it should stay in this RCU callback.
+        *
+        * This close() is a very important step of the synchronization model so
+        * every modification to this function must be carefully reviewed.
+        */
+       ret = close(sock);
+       if (ret) {
+               PERROR("close");
+       }
+       lttng_fd_put(LTTNG_FD_APPS, 1);
+
+       DBG2("UST app pid %d deleted", app->pid);
+       free(app);
+       session_unlock_list();
+}
+
+/*
+ * URCU intermediate call to delete an UST app.
+ */
+static
+void delete_ust_app_rcu(struct rcu_head *head)
+{
+       struct lttng_ht_node_ulong *node =
+               caa_container_of(head, struct lttng_ht_node_ulong, head);
+       struct ust_app *app =
+               caa_container_of(node, struct ust_app, pid_n);
+
+       DBG3("Call RCU deleting app PID %d", app->pid);
+       delete_ust_app(app);
+}
+
+/*
+ * Delete the session from the application ht and delete the data structure by
+ * freeing every object inside and releasing them.
+ *
+ * The session list lock must be held by the caller.
+ */
+static void destroy_app_session(struct ust_app *app,
+               struct ust_app_session *ua_sess)
+{
+       int ret;
+       struct lttng_ht_iter iter;
+
+       LTTNG_ASSERT(app);
+       LTTNG_ASSERT(ua_sess);
+
+       iter.iter.node = &ua_sess->node.node;
+       ret = lttng_ht_del(app->sessions, &iter);
+       if (ret) {
+               /* Already scheduled for teardown. */
+               goto end;
+       }
+
+       /* Once deleted, free the data structure. */
+       delete_ust_app_session(app->sock, ua_sess, app);
+
+end:
+       return;
+}
+
+/*
+ * Alloc new UST app session.
+ */
+static
+struct ust_app_session *alloc_ust_app_session(void)
+{
+       struct ust_app_session *ua_sess;
+
+       /* Init most of the default value by allocating and zeroing */
+       ua_sess = (ust_app_session *) zmalloc(sizeof(struct ust_app_session));
+       if (ua_sess == NULL) {
+               PERROR("malloc");
+               goto error_free;
+       }
+
+       ua_sess->handle = -1;
+       ua_sess->channels = lttng_ht_new(0, LTTNG_HT_TYPE_STRING);
+       ua_sess->metadata_attr.type = LTTNG_UST_ABI_CHAN_METADATA;
+       pthread_mutex_init(&ua_sess->lock, NULL);
+
+       return ua_sess;
+
+error_free:
+       return NULL;
+}
+
+/*
+ * Alloc new UST app channel.
+ */
+static
+struct ust_app_channel *alloc_ust_app_channel(const char *name,
+               struct ust_app_session *ua_sess,
+               struct lttng_ust_abi_channel_attr *attr)
+{
+       struct ust_app_channel *ua_chan;
+
+       /* Init most of the default value by allocating and zeroing */
+       ua_chan = (ust_app_channel *) zmalloc(sizeof(struct ust_app_channel));
+       if (ua_chan == NULL) {
+               PERROR("malloc");
+               goto error;
+       }
+
+       /* Setup channel name */
+       strncpy(ua_chan->name, name, sizeof(ua_chan->name));
+       ua_chan->name[sizeof(ua_chan->name) - 1] = '\0';
+
+       ua_chan->enabled = 1;
+       ua_chan->handle = -1;
+       ua_chan->session = ua_sess;
+       ua_chan->key = get_next_channel_key();
+       ua_chan->ctx = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
+       ua_chan->events = lttng_ht_new(0, LTTNG_HT_TYPE_STRING);
+       lttng_ht_node_init_str(&ua_chan->node, ua_chan->name);
+
+       CDS_INIT_LIST_HEAD(&ua_chan->streams.head);
+       CDS_INIT_LIST_HEAD(&ua_chan->ctx_list);
+
+       /* Copy attributes */
+       if (attr) {
+               /* Translate from lttng_ust_channel to lttng_ust_ctl_consumer_channel_attr. */
+               ua_chan->attr.subbuf_size = attr->subbuf_size;
+               ua_chan->attr.num_subbuf = attr->num_subbuf;
+               ua_chan->attr.overwrite = attr->overwrite;
+               ua_chan->attr.switch_timer_interval = attr->switch_timer_interval;
+               ua_chan->attr.read_timer_interval = attr->read_timer_interval;
+               ua_chan->attr.output = (lttng_ust_abi_output) attr->output;
+               ua_chan->attr.blocking_timeout = attr->u.s.blocking_timeout;
+       }
+       /* By default, the channel is a per cpu channel. */
+       ua_chan->attr.type = LTTNG_UST_ABI_CHAN_PER_CPU;
+
+       DBG3("UST app channel %s allocated", ua_chan->name);
+
+       return ua_chan;
+
+error:
+       return NULL;
+}
+
+/*
+ * Allocate and initialize a UST app stream.
+ *
+ * Return newly allocated stream pointer or NULL on error.
+ */
+struct ust_app_stream *ust_app_alloc_stream(void)
+{
+       struct ust_app_stream *stream = NULL;
+
+       stream = (ust_app_stream *) zmalloc(sizeof(*stream));
+       if (stream == NULL) {
+               PERROR("zmalloc ust app stream");
+               goto error;
+       }
+
+       /* Zero could be a valid value for a handle so flag it to -1. */
+       stream->handle = -1;
+
+error:
+       return stream;
+}
+
+/*
+ * Alloc new UST app event.
+ */
+static
+struct ust_app_event *alloc_ust_app_event(char *name,
+               struct lttng_ust_abi_event *attr)
+{
+       struct ust_app_event *ua_event;
+
+       /* Init most of the default value by allocating and zeroing */
+       ua_event = (ust_app_event *) zmalloc(sizeof(struct ust_app_event));
+       if (ua_event == NULL) {
+               PERROR("Failed to allocate ust_app_event structure");
+               goto error;
+       }
+
+       ua_event->enabled = 1;
+       strncpy(ua_event->name, name, sizeof(ua_event->name));
+       ua_event->name[sizeof(ua_event->name) - 1] = '\0';
+       lttng_ht_node_init_str(&ua_event->node, ua_event->name);
+
+       /* Copy attributes */
+       if (attr) {
+               memcpy(&ua_event->attr, attr, sizeof(ua_event->attr));
+       }
+
+       DBG3("UST app event %s allocated", ua_event->name);
+
+       return ua_event;
+
+error:
+       return NULL;
+}
+
+/*
+ * Allocate a new UST app event notifier rule.
+ */
+static struct ust_app_event_notifier_rule *alloc_ust_app_event_notifier_rule(
+               struct lttng_trigger *trigger)
+{
+       enum lttng_event_rule_generate_exclusions_status
+                       generate_exclusion_status;
+       enum lttng_condition_status cond_status;
+       struct ust_app_event_notifier_rule *ua_event_notifier_rule;
+       struct lttng_condition *condition = NULL;
+       const struct lttng_event_rule *event_rule = NULL;
+
+       ua_event_notifier_rule = (ust_app_event_notifier_rule *) zmalloc(sizeof(struct ust_app_event_notifier_rule));
+       if (ua_event_notifier_rule == NULL) {
+               PERROR("Failed to allocate ust_app_event_notifier_rule structure");
+               goto error;
+       }
+
+       ua_event_notifier_rule->enabled = 1;
+       ua_event_notifier_rule->token = lttng_trigger_get_tracer_token(trigger);
+       lttng_ht_node_init_u64(&ua_event_notifier_rule->node,
+                       ua_event_notifier_rule->token);
+
+       condition = lttng_trigger_get_condition(trigger);
+       LTTNG_ASSERT(condition);
+       LTTNG_ASSERT(lttng_condition_get_type(condition) ==
+                       LTTNG_CONDITION_TYPE_EVENT_RULE_MATCHES);
+
+       cond_status = lttng_condition_event_rule_matches_get_rule(
+                       condition, &event_rule);
+       LTTNG_ASSERT(cond_status == LTTNG_CONDITION_STATUS_OK);
+       LTTNG_ASSERT(event_rule);
+
+       ua_event_notifier_rule->error_counter_index =
+                       lttng_condition_event_rule_matches_get_error_counter_index(condition);
+       /* Acquire the event notifier's reference to the trigger. */
+       lttng_trigger_get(trigger);
+
+       ua_event_notifier_rule->trigger = trigger;
+       ua_event_notifier_rule->filter = lttng_event_rule_get_filter_bytecode(event_rule);
+       generate_exclusion_status = lttng_event_rule_generate_exclusions(
+                       event_rule, &ua_event_notifier_rule->exclusion);
+       switch (generate_exclusion_status) {
+       case LTTNG_EVENT_RULE_GENERATE_EXCLUSIONS_STATUS_OK:
+       case LTTNG_EVENT_RULE_GENERATE_EXCLUSIONS_STATUS_NONE:
+               break;
+       default:
+               /* Error occurred. */
+               ERR("Failed to generate exclusions from trigger while allocating an event notifier rule");
+               goto error_put_trigger;
+       }
+
+       DBG3("UST app event notifier rule allocated: token = %" PRIu64,
+                       ua_event_notifier_rule->token);
+
+       return ua_event_notifier_rule;
+
+error_put_trigger:
+       lttng_trigger_put(trigger);
+error:
+       free(ua_event_notifier_rule);
+       return NULL;
+}
+
+/*
+ * Alloc new UST app context.
+ */
+static
+struct ust_app_ctx *alloc_ust_app_ctx(struct lttng_ust_context_attr *uctx)
+{
+       struct ust_app_ctx *ua_ctx;
+
+       ua_ctx = (ust_app_ctx *) zmalloc(sizeof(struct ust_app_ctx));
+       if (ua_ctx == NULL) {
+               goto error;
+       }
+
+       CDS_INIT_LIST_HEAD(&ua_ctx->list);
+
+       if (uctx) {
+               memcpy(&ua_ctx->ctx, uctx, sizeof(ua_ctx->ctx));
+               if (uctx->ctx == LTTNG_UST_ABI_CONTEXT_APP_CONTEXT) {
+                       char *provider_name = NULL, *ctx_name = NULL;
+
+                       provider_name = strdup(uctx->u.app_ctx.provider_name);
+                       ctx_name = strdup(uctx->u.app_ctx.ctx_name);
+                       if (!provider_name || !ctx_name) {
+                               free(provider_name);
+                               free(ctx_name);
+                               goto error;
+                       }
+
+                       ua_ctx->ctx.u.app_ctx.provider_name = provider_name;
+                       ua_ctx->ctx.u.app_ctx.ctx_name = ctx_name;
+               }
+       }
+
+       DBG3("UST app context %d allocated", ua_ctx->ctx.ctx);
+       return ua_ctx;
+error:
+       free(ua_ctx);
+       return NULL;
+}
+
+/*
+ * Create a liblttng-ust filter bytecode from given bytecode.
+ *
+ * Return allocated filter or NULL on error.
+ */
+static struct lttng_ust_abi_filter_bytecode *create_ust_filter_bytecode_from_bytecode(
+               const struct lttng_bytecode *orig_f)
+{
+       struct lttng_ust_abi_filter_bytecode *filter = NULL;
+
+       /* Copy filter bytecode. */
+       filter = (lttng_ust_abi_filter_bytecode *) zmalloc(sizeof(*filter) + orig_f->len);
+       if (!filter) {
+               PERROR("Failed to allocate lttng_ust_filter_bytecode: bytecode len = %" PRIu32 " bytes", orig_f->len);
+               goto error;
+       }
+
+       LTTNG_ASSERT(sizeof(struct lttng_bytecode) ==
+                       sizeof(struct lttng_ust_abi_filter_bytecode));
+       memcpy(filter, orig_f, sizeof(*filter) + orig_f->len);
+error:
+       return filter;
+}
+
+/*
+ * Create a liblttng-ust capture bytecode from given bytecode.
+ *
+ * Return allocated filter or NULL on error.
+ */
+static struct lttng_ust_abi_capture_bytecode *
+create_ust_capture_bytecode_from_bytecode(const struct lttng_bytecode *orig_f)
+{
+       struct lttng_ust_abi_capture_bytecode *capture = NULL;
+
+       /* Copy capture bytecode. */
+       capture = (lttng_ust_abi_capture_bytecode *) zmalloc(sizeof(*capture) + orig_f->len);
+       if (!capture) {
+               PERROR("Failed to allocate lttng_ust_abi_capture_bytecode: bytecode len = %" PRIu32 " bytes", orig_f->len);
+               goto error;
+       }
+
+       LTTNG_ASSERT(sizeof(struct lttng_bytecode) ==
+                       sizeof(struct lttng_ust_abi_capture_bytecode));
+       memcpy(capture, orig_f, sizeof(*capture) + orig_f->len);
+error:
+       return capture;
+}
+
+/*
+ * Find an ust_app using the sock and return it. RCU read side lock must be
+ * held before calling this helper function.
+ */
+struct ust_app *ust_app_find_by_sock(int sock)
+{
+       struct lttng_ht_node_ulong *node;
+       struct lttng_ht_iter iter;
+
+       lttng_ht_lookup(ust_app_ht_by_sock, (void *)((unsigned long) sock), &iter);
+       node = lttng_ht_iter_get_node_ulong(&iter);
+       if (node == NULL) {
+               DBG2("UST app find by sock %d not found", sock);
+               goto error;
+       }
+
+       return caa_container_of(node, struct ust_app, sock_n);
+
+error:
+       return NULL;
+}
+
+/*
+ * Find an ust_app using the notify sock and return it. RCU read side lock must
+ * be held before calling this helper function.
+ */
+static struct ust_app *find_app_by_notify_sock(int sock)
+{
+       struct lttng_ht_node_ulong *node;
+       struct lttng_ht_iter iter;
+
+       lttng_ht_lookup(ust_app_ht_by_notify_sock, (void *)((unsigned long) sock),
+                       &iter);
+       node = lttng_ht_iter_get_node_ulong(&iter);
+       if (node == NULL) {
+               DBG2("UST app find by notify sock %d not found", sock);
+               goto error;
+       }
+
+       return caa_container_of(node, struct ust_app, notify_sock_n);
+
+error:
+       return NULL;
+}
+
+/*
+ * Lookup for an ust app event based on event name, filter bytecode and the
+ * event loglevel.
+ *
+ * Return an ust_app_event object or NULL on error.
+ */
+static struct ust_app_event *find_ust_app_event(struct lttng_ht *ht,
+               const char *name, const struct lttng_bytecode *filter,
+               int loglevel_value,
+               const struct lttng_event_exclusion *exclusion)
+{
+       struct lttng_ht_iter iter;
+       struct lttng_ht_node_str *node;
+       struct ust_app_event *event = NULL;
+       struct ust_app_ht_key key;
+
+       LTTNG_ASSERT(name);
+       LTTNG_ASSERT(ht);
+
+       /* Setup key for event lookup. */
+       key.name = name;
+       key.filter = filter;
+       key.loglevel_type = (lttng_ust_abi_loglevel_type) loglevel_value;
+       /* lttng_event_exclusion and lttng_ust_event_exclusion structures are similar */
+       key.exclusion = exclusion;
+
+       /* Lookup using the event name as hash and a custom match fct. */
+       cds_lfht_lookup(ht->ht, ht->hash_fct((void *) name, lttng_ht_seed),
+                       ht_match_ust_app_event, &key, &iter.iter);
+       node = lttng_ht_iter_get_node_str(&iter);
+       if (node == NULL) {
+               goto end;
+       }
+
+       event = caa_container_of(node, struct ust_app_event, node);
+
+end:
+       return event;
+}
+
+/*
+ * Look-up an event notifier rule based on its token id.
+ *
+ * Must be called with the RCU read lock held.
+ * Return an ust_app_event_notifier_rule object or NULL on error.
+ */
+static struct ust_app_event_notifier_rule *find_ust_app_event_notifier_rule(
+               struct lttng_ht *ht, uint64_t token)
+{
+       struct lttng_ht_iter iter;
+       struct lttng_ht_node_u64 *node;
+       struct ust_app_event_notifier_rule *event_notifier_rule = NULL;
+
+       LTTNG_ASSERT(ht);
+
+       lttng_ht_lookup(ht, &token, &iter);
+       node = lttng_ht_iter_get_node_u64(&iter);
+       if (node == NULL) {
+               DBG2("UST app event notifier rule token not found: token = %" PRIu64,
+                               token);
+               goto end;
+       }
+
+       event_notifier_rule = caa_container_of(
+                       node, struct ust_app_event_notifier_rule, node);
+end:
+       return event_notifier_rule;
+}
+
+/*
+ * Create the channel context on the tracer.
+ *
+ * Called with UST app session lock held.
+ */
+static
+int create_ust_channel_context(struct ust_app_channel *ua_chan,
+               struct ust_app_ctx *ua_ctx, struct ust_app *app)
+{
+       int ret;
+
+       health_code_update();
+
+       pthread_mutex_lock(&app->sock_lock);
+       ret = lttng_ust_ctl_add_context(app->sock, &ua_ctx->ctx,
+                       ua_chan->obj, &ua_ctx->obj);
+       pthread_mutex_unlock(&app->sock_lock);
+       if (ret < 0) {
+               if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
+                       ret = 0;
+                       DBG3("UST app create channel context failed. Application is dead: pid = %d, sock = %d",
+                                       app->pid, app->sock);
+               } else if (ret == -EAGAIN) {
+                       ret = 0;
+                       WARN("UST app create channel context failed. Communication time out: pid = %d, sock = %d",
+                                       app->pid, app->sock);
+               } else {
+                       ERR("UST app create channel context failed with ret %d: pid = %d, sock = %d",
+                                       ret, app->pid, app->sock);
+               }
+               goto error;
+       }
+
+       ua_ctx->handle = ua_ctx->obj->handle;
+
+       DBG2("UST app context handle %d created successfully for channel %s",
+                       ua_ctx->handle, ua_chan->name);
+
+error:
+       health_code_update();
+       return ret;
+}
+
+/*
+ * Set the filter on the tracer.
+ */
+static int set_ust_object_filter(struct ust_app *app,
+               const struct lttng_bytecode *bytecode,
+               struct lttng_ust_abi_object_data *ust_object)
+{
+       int ret;
+       struct lttng_ust_abi_filter_bytecode *ust_bytecode = NULL;
+
+       health_code_update();
+
+       ust_bytecode = create_ust_filter_bytecode_from_bytecode(bytecode);
+       if (!ust_bytecode) {
+               ret = -LTTNG_ERR_NOMEM;
+               goto error;
+       }
+       pthread_mutex_lock(&app->sock_lock);
+       ret = lttng_ust_ctl_set_filter(app->sock, ust_bytecode,
+                       ust_object);
+       pthread_mutex_unlock(&app->sock_lock);
+       if (ret < 0) {
+               if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
+                       ret = 0;
+                       DBG3("UST app  set filter failed. Application is dead: pid = %d, sock = %d",
+                                       app->pid, app->sock);
+               } else if (ret == -EAGAIN) {
+                       ret = 0;
+                       WARN("UST app  set filter failed. Communication time out: pid = %d, sock = %d",
+                                       app->pid, app->sock);
+               } else {
+                       ERR("UST app  set filter failed with ret %d: pid = %d, sock = %d, object = %p",
+                                       ret, app->pid, app->sock, ust_object);
+               }
+               goto error;
+       }
+
+       DBG2("UST filter successfully set: object = %p", ust_object);
+
+error:
+       health_code_update();
+       free(ust_bytecode);
+       return ret;
+}
+
+/*
+ * Set a capture bytecode for the passed object.
+ * The sequence number enforces the ordering at runtime and on reception of
+ * the captured payloads.
+ */
+static int set_ust_capture(struct ust_app *app,
+               const struct lttng_bytecode *bytecode,
+               unsigned int capture_seqnum,
+               struct lttng_ust_abi_object_data *ust_object)
+{
+       int ret;
+       struct lttng_ust_abi_capture_bytecode *ust_bytecode = NULL;
+
+       health_code_update();
+
+       ust_bytecode = create_ust_capture_bytecode_from_bytecode(bytecode);
+       if (!ust_bytecode) {
+               ret = -LTTNG_ERR_NOMEM;
+               goto error;
+       }
+
+       /*
+        * Set the sequence number to ensure the capture of fields is ordered.
+        */
+       ust_bytecode->seqnum = capture_seqnum;
+
+       pthread_mutex_lock(&app->sock_lock);
+       ret = lttng_ust_ctl_set_capture(app->sock, ust_bytecode,
+                       ust_object);
+       pthread_mutex_unlock(&app->sock_lock);
+       if (ret < 0) {
+               if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
+                       ret = 0;
+                       DBG3("UST app set capture failed. Application is dead: pid = %d, sock = %d",
+                                       app->pid, app->sock);
+               } else if (ret == -EAGAIN) {
+                       ret = 0;
+                       DBG3("UST app set capture failed. Communication timeout: pid = %d, sock = %d",
+                                       app->pid, app->sock);
+               } else {
+                       ERR("UST app event set capture failed with ret %d: pid = %d, sock = %d",
+                                       ret, app->pid,
+                                       app->sock);
+               }
+
+               goto error;
+       }
+
+       DBG2("UST capture successfully set: object = %p", ust_object);
+
+error:
+       health_code_update();
+       free(ust_bytecode);
+       return ret;
+}
+
+static
+struct lttng_ust_abi_event_exclusion *create_ust_exclusion_from_exclusion(
+               const struct lttng_event_exclusion *exclusion)
+{
+       struct lttng_ust_abi_event_exclusion *ust_exclusion = NULL;
+       size_t exclusion_alloc_size = sizeof(struct lttng_ust_abi_event_exclusion) +
+               LTTNG_UST_ABI_SYM_NAME_LEN * exclusion->count;
+
+       ust_exclusion = (lttng_ust_abi_event_exclusion *) zmalloc(exclusion_alloc_size);
+       if (!ust_exclusion) {
+               PERROR("malloc");
+               goto end;
+       }
+
+       LTTNG_ASSERT(sizeof(struct lttng_event_exclusion) ==
+                       sizeof(struct lttng_ust_abi_event_exclusion));
+       memcpy(ust_exclusion, exclusion, exclusion_alloc_size);
+end:
+       return ust_exclusion;
+}
+
+/*
+ * Set event exclusions on the tracer.
+ */
+static int set_ust_object_exclusions(struct ust_app *app,
+               const struct lttng_event_exclusion *exclusions,
+               struct lttng_ust_abi_object_data *ust_object)
+{
+       int ret;
+       struct lttng_ust_abi_event_exclusion *ust_exclusions = NULL;
+
+       LTTNG_ASSERT(exclusions && exclusions->count > 0);
+
+       health_code_update();
+
+       ust_exclusions = create_ust_exclusion_from_exclusion(
+                       exclusions);
+       if (!ust_exclusions) {
+               ret = -LTTNG_ERR_NOMEM;
+               goto error;
+       }
+       pthread_mutex_lock(&app->sock_lock);
+       ret = lttng_ust_ctl_set_exclusion(app->sock, ust_exclusions, ust_object);
+       pthread_mutex_unlock(&app->sock_lock);
+       if (ret < 0) {
+               if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
+                       ret = 0;
+                       DBG3("UST app event exclusion failed. Application is dead: pid = %d, sock = %d",
+                                       app->pid, app->sock);
+               } else if (ret == -EAGAIN) {
+                       ret = 0;
+                       WARN("UST app event exclusion failed. Communication time out(pid: %d, sock = %d",
+                                       app->pid, app->sock);
+               } else {
+                       ERR("UST app event exclusions failed with ret %d: pid = %d, sock = %d, object = %p",
+                                       ret, app->pid, app->sock, ust_object);
+               }
+               goto error;
+       }
+
+       DBG2("UST exclusions set successfully for object %p", ust_object);
+
+error:
+       health_code_update();
+       free(ust_exclusions);
+       return ret;
+}
+
+/*
+ * Disable the specified event on to UST tracer for the UST session.
+ */
+static int disable_ust_object(struct ust_app *app,
+               struct lttng_ust_abi_object_data *object)
+{
+       int ret;
+
+       health_code_update();
+
+       pthread_mutex_lock(&app->sock_lock);
+       ret = lttng_ust_ctl_disable(app->sock, object);
+       pthread_mutex_unlock(&app->sock_lock);
+       if (ret < 0) {
+               if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
+                       ret = 0;
+                       DBG3("UST app disable object failed. Application is dead: pid = %d, sock = %d",
+                                       app->pid, app->sock);
+               } else if (ret == -EAGAIN) {
+                       ret = 0;
+                       WARN("UST app disable object failed. Communication time out: pid = %d, sock = %d",
+                                       app->pid, app->sock);
+               } else {
+                       ERR("UST app disable object failed with ret %d: pid = %d, sock = %d, object = %p",
+                                       ret, app->pid, app->sock, object);
+               }
+               goto error;
+       }
+
+       DBG2("UST app object %p disabled successfully for app: pid = %d",
+                       object, app->pid);
+
+error:
+       health_code_update();
+       return ret;
+}
+
+/*
+ * Disable the specified channel on to UST tracer for the UST session.
+ */
+static int disable_ust_channel(struct ust_app *app,
+               struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan)
+{
+       int ret;
+
+       health_code_update();
+
+       pthread_mutex_lock(&app->sock_lock);
+       ret = lttng_ust_ctl_disable(app->sock, ua_chan->obj);
+       pthread_mutex_unlock(&app->sock_lock);
+       if (ret < 0) {
+               if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
+                       ret = 0;
+                       DBG3("UST app disable channel failed. Application is dead: pid = %d, sock = %d",
+                                       app->pid, app->sock);
+               } else if (ret == -EAGAIN) {
+                       ret = 0;
+                       WARN("UST app disable channel failed. Communication time out: pid = %d, sock = %d",
+                                       app->pid, app->sock);
+               } else {
+                       ERR("UST app channel %s disable failed, session handle %d, with ret %d: pid = %d, sock = %d",
+                                       ua_chan->name, ua_sess->handle, ret,
+                                       app->pid, app->sock);
+               }
+               goto error;
+       }
+
+       DBG2("UST app channel %s disabled successfully for app: pid = %d",
+                       ua_chan->name, app->pid);
+
+error:
+       health_code_update();
+       return ret;
+}
+
+/*
+ * Enable the specified channel on to UST tracer for the UST session.
+ */
+static int enable_ust_channel(struct ust_app *app,
+               struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan)
+{
+       int ret;
+
+       health_code_update();
+
+       pthread_mutex_lock(&app->sock_lock);
+       ret = lttng_ust_ctl_enable(app->sock, ua_chan->obj);
+       pthread_mutex_unlock(&app->sock_lock);
+       if (ret < 0) {
+               if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
+                       ret = 0;
+                       DBG3("UST app channel %s enable failed. Application is dead: pid = %d, sock = %d",
+                                       ua_chan->name, app->pid, app->sock);
+               } else if (ret == -EAGAIN) {
+                       ret = 0;
+                       WARN("UST app channel %s enable failed. Communication time out: pid = %d, sock = %d",
+                                       ua_chan->name, app->pid, app->sock);
+               } else {
+                       ERR("UST app channel %s enable failed, session handle %d, with ret %d: pid = %d, sock = %d",
+                                       ua_chan->name, ua_sess->handle, ret,
+                                       app->pid, app->sock);
+               }
+               goto error;
+       }
+
+       ua_chan->enabled = 1;
+
+       DBG2("UST app channel %s enabled successfully for app: pid = %d",
+                       ua_chan->name, app->pid);
+
+error:
+       health_code_update();
+       return ret;
+}
+
+/*
+ * Enable the specified event on to UST tracer for the UST session.
+ */
+static int enable_ust_object(
+               struct ust_app *app, struct lttng_ust_abi_object_data *ust_object)
+{
+       int ret;
+
+       health_code_update();
+
+       pthread_mutex_lock(&app->sock_lock);
+       ret = lttng_ust_ctl_enable(app->sock, ust_object);
+       pthread_mutex_unlock(&app->sock_lock);
+       if (ret < 0) {
+               if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
+                       ret = 0;
+                       DBG3("UST app enable object failed. Application is dead: pid = %d, sock = %d",
+                                       app->pid, app->sock);
+               } else if (ret == -EAGAIN) {
+                       ret = 0;
+                       WARN("UST app enable object failed. Communication time out: pid = %d, sock = %d",
+                                       app->pid, app->sock);
+               } else {
+                       ERR("UST app enable object failed with ret %d: pid = %d, sock = %d, object = %p",
+                                       ret, app->pid, app->sock, ust_object);
+               }
+               goto error;
+       }
+
+       DBG2("UST app object %p enabled successfully for app: pid = %d",
+                       ust_object, app->pid);
+
+error:
+       health_code_update();
+       return ret;
+}
+
+/*
+ * Send channel and stream buffer to application.
+ *
+ * Return 0 on success. On error, a negative value is returned.
+ */
+static int send_channel_pid_to_ust(struct ust_app *app,
+               struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan)
+{
+       int ret;
+       struct ust_app_stream *stream, *stmp;
+
+       LTTNG_ASSERT(app);
+       LTTNG_ASSERT(ua_sess);
+       LTTNG_ASSERT(ua_chan);
+
+       health_code_update();
+
+       DBG("UST app sending channel %s to UST app sock %d", ua_chan->name,
+                       app->sock);
+
+       /* Send channel to the application. */
+       ret = ust_consumer_send_channel_to_ust(app, ua_sess, ua_chan);
+       if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
+               ret = -ENOTCONN;        /* Caused by app exiting. */
+               goto error;
+       } else if (ret == -EAGAIN) {
+               /* Caused by timeout. */
+               WARN("Communication with application %d timed out on send_channel for channel \"%s\" of session \"%" PRIu64 "\".",
+                               app->pid, ua_chan->name, ua_sess->tracing_id);
+               /* Treat this the same way as an application that is exiting. */
+               ret = -ENOTCONN;
+               goto error;
+       } else if (ret < 0) {
+               goto error;
+       }
+
+       health_code_update();
+
+       /* Send all streams to application. */
+       cds_list_for_each_entry_safe(stream, stmp, &ua_chan->streams.head, list) {
+               ret = ust_consumer_send_stream_to_ust(app, ua_chan, stream);
+               if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
+                       ret = -ENOTCONN; /* Caused by app exiting. */
+                       goto error;
+               } else if (ret == -EAGAIN) {
+                       /* Caused by timeout. */
+                       WARN("Communication with application %d timed out on send_stream for stream \"%s\" of channel \"%s\" of session \"%" PRIu64 "\".",
+                                       app->pid, stream->name, ua_chan->name,
+                                       ua_sess->tracing_id);
+                       /*
+                        * Treat this the same way as an application that is
+                        * exiting.
+                        */
+                       ret = -ENOTCONN;
+               } else if (ret < 0) {
+                       goto error;
+               }
+               /* We don't need the stream anymore once sent to the tracer. */
+               cds_list_del(&stream->list);
+               delete_ust_app_stream(-1, stream, app);
+       }
+       /* Flag the channel that it is sent to the application. */
+       ua_chan->is_sent = 1;
+
+error:
+       health_code_update();
+       return ret;
+}
+
+/*
+ * Create the specified event onto the UST tracer for a UST session.
+ *
+ * Should be called with session mutex held.
+ */
+static
+int create_ust_event(struct ust_app *app, struct ust_app_session *ua_sess,
+               struct ust_app_channel *ua_chan, struct ust_app_event *ua_event)
+{
+       int ret = 0;
+
+       health_code_update();
+
+       /* Create UST event on tracer */
+       pthread_mutex_lock(&app->sock_lock);
+       ret = lttng_ust_ctl_create_event(app->sock, &ua_event->attr, ua_chan->obj,
+                       &ua_event->obj);
+       pthread_mutex_unlock(&app->sock_lock);
+       if (ret < 0) {
+               if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
+                       ret = 0;
+                       DBG3("UST app create event failed. Application is dead: pid = %d, sock = %d",
+                                       app->pid, app->sock);
+               } else if (ret == -EAGAIN) {
+                       ret = 0;
+                       WARN("UST app create event failed. Communication time out: pid = %d, sock = %d",
+                                       app->pid, app->sock);
+               } else {
+                       ERR("UST app create event '%s' failed with ret %d: pid = %d, sock = %d",
+                                       ua_event->attr.name, ret, app->pid,
+                                       app->sock);
+               }
+               goto error;
+       }
+
+       ua_event->handle = ua_event->obj->handle;
+
+       DBG2("UST app event %s created successfully for pid:%d object = %p",
+                       ua_event->attr.name, app->pid, ua_event->obj);
+
+       health_code_update();
+
+       /* Set filter if one is present. */
+       if (ua_event->filter) {
+               ret = set_ust_object_filter(app, ua_event->filter, ua_event->obj);
+               if (ret < 0) {
+                       goto error;
+               }
+       }
+
+       /* Set exclusions for the event */
+       if (ua_event->exclusion) {
+               ret = set_ust_object_exclusions(app, ua_event->exclusion, ua_event->obj);
+               if (ret < 0) {
+                       goto error;
+               }
+       }
+
+       /* If event not enabled, disable it on the tracer */
+       if (ua_event->enabled) {
+               /*
+                * We now need to explicitly enable the event, since it
+                * is now disabled at creation.
+                */
+               ret = enable_ust_object(app, ua_event->obj);
+               if (ret < 0) {
+                       /*
+                        * If we hit an EPERM, something is wrong with our enable call. If
+                        * we get an EEXIST, there is a problem on the tracer side since we
+                        * just created it.
+                        */
+                       switch (ret) {
+                       case -LTTNG_UST_ERR_PERM:
+                               /* Code flow problem */
+                               abort();
+                       case -LTTNG_UST_ERR_EXIST:
+                               /* It's OK for our use case. */
+                               ret = 0;
+                               break;
+                       default:
+                               break;
+                       }
+                       goto error;
+               }
+       }
+
+error:
+       health_code_update();
+       return ret;
+}
+
+static int init_ust_event_notifier_from_event_rule(
+               const struct lttng_event_rule *rule,
+               struct lttng_ust_abi_event_notifier *event_notifier)
+{
+       enum lttng_event_rule_status status;
+       enum lttng_ust_abi_loglevel_type ust_loglevel_type = LTTNG_UST_ABI_LOGLEVEL_ALL;
+       int loglevel = -1, ret = 0;
+       const char *pattern;
+
+
+       memset(event_notifier, 0, sizeof(*event_notifier));
+
+       if (lttng_event_rule_targets_agent_domain(rule)) {
+               /*
+                * Special event for agents
+                * The actual meat of the event is in the filter that will be
+                * attached later on.
+                * Set the default values for the agent event.
+                */
+               pattern = event_get_default_agent_ust_name(
+                               lttng_event_rule_get_domain_type(rule));
+               loglevel = 0;
+               ust_loglevel_type = LTTNG_UST_ABI_LOGLEVEL_ALL;
+       } else {
+               const struct lttng_log_level_rule *log_level_rule;
+
+               LTTNG_ASSERT(lttng_event_rule_get_type(rule) ==
+                               LTTNG_EVENT_RULE_TYPE_USER_TRACEPOINT);
+
+               status = lttng_event_rule_user_tracepoint_get_name_pattern(rule, &pattern);
+               if (status != LTTNG_EVENT_RULE_STATUS_OK) {
+                       /* At this point, this is a fatal error. */
+                       abort();
+               }
+
+               status = lttng_event_rule_user_tracepoint_get_log_level_rule(
+                               rule, &log_level_rule);
+               if (status == LTTNG_EVENT_RULE_STATUS_UNSET) {
+                       ust_loglevel_type = LTTNG_UST_ABI_LOGLEVEL_ALL;
+               } else if (status == LTTNG_EVENT_RULE_STATUS_OK) {
+                       enum lttng_log_level_rule_status llr_status;
+
+                       switch (lttng_log_level_rule_get_type(log_level_rule)) {
+                       case LTTNG_LOG_LEVEL_RULE_TYPE_EXACTLY:
+                               ust_loglevel_type = LTTNG_UST_ABI_LOGLEVEL_SINGLE;
+                               llr_status = lttng_log_level_rule_exactly_get_level(
+                                               log_level_rule, &loglevel);
+                               break;
+                       case LTTNG_LOG_LEVEL_RULE_TYPE_AT_LEAST_AS_SEVERE_AS:
+                               ust_loglevel_type = LTTNG_UST_ABI_LOGLEVEL_RANGE;
+                               llr_status = lttng_log_level_rule_at_least_as_severe_as_get_level(
+                                               log_level_rule, &loglevel);
+                               break;
+                       default:
+                               abort();
+                       }
+
+                       LTTNG_ASSERT(llr_status == LTTNG_LOG_LEVEL_RULE_STATUS_OK);
+               } else {
+                       /* At this point this is a fatal error. */
+                       abort();
+               }
+       }
+
+       event_notifier->event.instrumentation = LTTNG_UST_ABI_TRACEPOINT;
+       ret = lttng_strncpy(event_notifier->event.name, pattern,
+                       LTTNG_UST_ABI_SYM_NAME_LEN - 1);
+       if (ret) {
+               ERR("Failed to copy event rule pattern to notifier: pattern = '%s' ",
+                               pattern);
+               goto end;
+       }
+
+       event_notifier->event.loglevel_type = ust_loglevel_type;
+       event_notifier->event.loglevel = loglevel;
+end:
+       return ret;
+}
+
+/*
+ * Create the specified event notifier against the user space tracer of a
+ * given application.
+ */
+static int create_ust_event_notifier(struct ust_app *app,
+               struct ust_app_event_notifier_rule *ua_event_notifier_rule)
+{
+       int ret = 0;
+       enum lttng_condition_status condition_status;
+       const struct lttng_condition *condition = NULL;
+       struct lttng_ust_abi_event_notifier event_notifier;
+       const struct lttng_event_rule *event_rule = NULL;
+       unsigned int capture_bytecode_count = 0, i;
+       enum lttng_condition_status cond_status;
+       enum lttng_event_rule_type event_rule_type;
+
+       health_code_update();
+       LTTNG_ASSERT(app->event_notifier_group.object);
+
+       condition = lttng_trigger_get_const_condition(
+                       ua_event_notifier_rule->trigger);
+       LTTNG_ASSERT(condition);
+       LTTNG_ASSERT(lttng_condition_get_type(condition) ==
+                       LTTNG_CONDITION_TYPE_EVENT_RULE_MATCHES);
+
+       condition_status = lttng_condition_event_rule_matches_get_rule(
+                       condition, &event_rule);
+       LTTNG_ASSERT(condition_status == LTTNG_CONDITION_STATUS_OK);
+
+       LTTNG_ASSERT(event_rule);
+
+       event_rule_type = lttng_event_rule_get_type(event_rule);
+       LTTNG_ASSERT(event_rule_type == LTTNG_EVENT_RULE_TYPE_USER_TRACEPOINT ||
+                       event_rule_type == LTTNG_EVENT_RULE_TYPE_JUL_LOGGING ||
+                       event_rule_type ==
+                                       LTTNG_EVENT_RULE_TYPE_LOG4J_LOGGING ||
+                       event_rule_type ==
+                                       LTTNG_EVENT_RULE_TYPE_PYTHON_LOGGING);
+
+       init_ust_event_notifier_from_event_rule(event_rule, &event_notifier);
+       event_notifier.event.token = ua_event_notifier_rule->token;
+       event_notifier.error_counter_index = ua_event_notifier_rule->error_counter_index;
+
+       /* Create UST event notifier against the tracer. */
+       pthread_mutex_lock(&app->sock_lock);
+       ret = lttng_ust_ctl_create_event_notifier(app->sock, &event_notifier,
+                       app->event_notifier_group.object,
+                       &ua_event_notifier_rule->obj);
+       pthread_mutex_unlock(&app->sock_lock);
+       if (ret < 0) {
+               if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
+                       ret = 0;
+                       DBG3("UST app create event notifier failed. Application is dead: pid = %d, sock = %d",
+                                       app->pid, app->sock);
+               } else if (ret == -EAGAIN) {
+                       ret = 0;
+                       WARN("UST app create event notifier failed. Communication time out: pid = %d, sock = %d",
+                                       app->pid, app->sock);
+               } else {
+                       ERR("UST app create event notifier '%s' failed with ret %d: pid = %d, sock = %d",
+                                       event_notifier.event.name, ret, app->pid,
+                                       app->sock);
+               }
+               goto error;
+       }
+
+       ua_event_notifier_rule->handle = ua_event_notifier_rule->obj->handle;
+
+       DBG2("UST app event notifier %s created successfully: app = '%s': pid = %d), object = %p",
+                       event_notifier.event.name, app->name, app->pid,
+                       ua_event_notifier_rule->obj);
+
+       health_code_update();
+
+       /* Set filter if one is present. */
+       if (ua_event_notifier_rule->filter) {
+               ret = set_ust_object_filter(app, ua_event_notifier_rule->filter,
+                               ua_event_notifier_rule->obj);
+               if (ret < 0) {
+                       goto error;
+               }
+       }
+
+       /* Set exclusions for the event. */
+       if (ua_event_notifier_rule->exclusion) {
+               ret = set_ust_object_exclusions(app,
+                               ua_event_notifier_rule->exclusion,
+                               ua_event_notifier_rule->obj);
+               if (ret < 0) {
+                       goto error;
+               }
+       }
+
+       /* Set the capture bytecodes. */
+       cond_status = lttng_condition_event_rule_matches_get_capture_descriptor_count(
+                       condition, &capture_bytecode_count);
+       LTTNG_ASSERT(cond_status == LTTNG_CONDITION_STATUS_OK);
+
+       for (i = 0; i < capture_bytecode_count; i++) {
+               const struct lttng_bytecode *capture_bytecode =
+                               lttng_condition_event_rule_matches_get_capture_bytecode_at_index(
+                                               condition, i);
+
+               ret = set_ust_capture(app, capture_bytecode, i,
+                               ua_event_notifier_rule->obj);
+               if (ret < 0) {
+                       goto error;
+               }
+       }
+
+       /*
+        * We now need to explicitly enable the event, since it
+        * is disabled at creation.
+        */
+       ret = enable_ust_object(app, ua_event_notifier_rule->obj);
+       if (ret < 0) {
+               /*
+                * If we hit an EPERM, something is wrong with our enable call.
+                * If we get an EEXIST, there is a problem on the tracer side
+                * since we just created it.
+                */
+               switch (ret) {
+               case -LTTNG_UST_ERR_PERM:
+                       /* Code flow problem. */
+                       abort();
+               case -LTTNG_UST_ERR_EXIST:
+                       /* It's OK for our use case. */
+                       ret = 0;
+                       break;
+               default:
+                       break;
+               }
+
+               goto error;
+       }
+
+       ua_event_notifier_rule->enabled = true;
+
+error:
+       health_code_update();
+       return ret;
+}
+
+/*
+ * Copy data between an UST app event and a LTT event.
+ */
+static void shadow_copy_event(struct ust_app_event *ua_event,
+               struct ltt_ust_event *uevent)
+{
+       size_t exclusion_alloc_size;
+
+       strncpy(ua_event->name, uevent->attr.name, sizeof(ua_event->name));
+       ua_event->name[sizeof(ua_event->name) - 1] = '\0';
+
+       ua_event->enabled = uevent->enabled;
+
+       /* Copy event attributes */
+       memcpy(&ua_event->attr, &uevent->attr, sizeof(ua_event->attr));
+
+       /* Copy filter bytecode */
+       if (uevent->filter) {
+               ua_event->filter = lttng_bytecode_copy(uevent->filter);
+               /* Filter might be NULL here in case of ENONEM. */
+       }
+
+       /* Copy exclusion data */
+       if (uevent->exclusion) {
+               exclusion_alloc_size = sizeof(struct lttng_event_exclusion) +
+                               LTTNG_UST_ABI_SYM_NAME_LEN * uevent->exclusion->count;
+               ua_event->exclusion = (lttng_event_exclusion *) zmalloc(exclusion_alloc_size);
+               if (ua_event->exclusion == NULL) {
+                       PERROR("malloc");
+               } else {
+                       memcpy(ua_event->exclusion, uevent->exclusion,
+                                       exclusion_alloc_size);
+               }
+       }
+}
+
+/*
+ * Copy data between an UST app channel and a LTT channel.
+ */
+static void shadow_copy_channel(struct ust_app_channel *ua_chan,
+               struct ltt_ust_channel *uchan)
+{
+       DBG2("UST app shadow copy of channel %s started", ua_chan->name);
+
+       strncpy(ua_chan->name, uchan->name, sizeof(ua_chan->name));
+       ua_chan->name[sizeof(ua_chan->name) - 1] = '\0';
+
+       ua_chan->tracefile_size = uchan->tracefile_size;
+       ua_chan->tracefile_count = uchan->tracefile_count;
+
+       /* Copy event attributes since the layout is different. */
+       ua_chan->attr.subbuf_size = uchan->attr.subbuf_size;
+       ua_chan->attr.num_subbuf = uchan->attr.num_subbuf;
+       ua_chan->attr.overwrite = uchan->attr.overwrite;
+       ua_chan->attr.switch_timer_interval = uchan->attr.switch_timer_interval;
+       ua_chan->attr.read_timer_interval = uchan->attr.read_timer_interval;
+       ua_chan->monitor_timer_interval = uchan->monitor_timer_interval;
+       ua_chan->attr.output = (lttng_ust_abi_output) uchan->attr.output;
+       ua_chan->attr.blocking_timeout = uchan->attr.u.s.blocking_timeout;
+
+       /*
+        * Note that the attribute channel type is not set since the channel on the
+        * tracing registry side does not have this information.
+        */
+
+       ua_chan->enabled = uchan->enabled;
+       ua_chan->tracing_channel_id = uchan->id;
+
+       DBG3("UST app shadow copy of channel %s done", ua_chan->name);
+}
+
+/*
+ * Copy data between a UST app session and a regular LTT session.
+ */
+static void shadow_copy_session(struct ust_app_session *ua_sess,
+               struct ltt_ust_session *usess, struct ust_app *app)
+{
+       struct tm *timeinfo;
+       char datetime[16];
+       int ret;
+       char tmp_shm_path[PATH_MAX];
+
+       timeinfo = localtime(&app->registration_time);
+       strftime(datetime, sizeof(datetime), "%Y%m%d-%H%M%S", timeinfo);
+
+       DBG2("Shadow copy of session handle %d", ua_sess->handle);
+
+       ua_sess->tracing_id = usess->id;
+       ua_sess->id = get_next_session_id();
+       LTTNG_OPTIONAL_SET(&ua_sess->real_credentials.uid, app->uid);
+       LTTNG_OPTIONAL_SET(&ua_sess->real_credentials.gid, app->gid);
+       LTTNG_OPTIONAL_SET(&ua_sess->effective_credentials.uid, usess->uid);
+       LTTNG_OPTIONAL_SET(&ua_sess->effective_credentials.gid, usess->gid);
+       ua_sess->buffer_type = usess->buffer_type;
+       ua_sess->bits_per_long = app->bits_per_long;
+
+       /* There is only one consumer object per session possible. */
+       consumer_output_get(usess->consumer);
+       ua_sess->consumer = usess->consumer;
+
+       ua_sess->output_traces = usess->output_traces;
+       ua_sess->live_timer_interval = usess->live_timer_interval;
+       copy_channel_attr_to_ustctl(&ua_sess->metadata_attr,
+                       &usess->metadata_attr);
+
+       switch (ua_sess->buffer_type) {
+       case LTTNG_BUFFER_PER_PID:
+               ret = snprintf(ua_sess->path, sizeof(ua_sess->path),
+                               DEFAULT_UST_TRACE_PID_PATH "/%s-%d-%s", app->name, app->pid,
+                               datetime);
+               break;
+       case LTTNG_BUFFER_PER_UID:
+               ret = snprintf(ua_sess->path, sizeof(ua_sess->path),
+                               DEFAULT_UST_TRACE_UID_PATH,
+                               lttng_credentials_get_uid(&ua_sess->real_credentials),
+                               app->bits_per_long);
+               break;
+       default:
+               abort();
+               goto error;
+       }
+       if (ret < 0) {
+               PERROR("asprintf UST shadow copy session");
+               abort();
+               goto error;
+       }
+
+       strncpy(ua_sess->root_shm_path, usess->root_shm_path,
+               sizeof(ua_sess->root_shm_path));
+       ua_sess->root_shm_path[sizeof(ua_sess->root_shm_path) - 1] = '\0';
+       strncpy(ua_sess->shm_path, usess->shm_path,
+               sizeof(ua_sess->shm_path));
+       ua_sess->shm_path[sizeof(ua_sess->shm_path) - 1] = '\0';
+       if (ua_sess->shm_path[0]) {
+               switch (ua_sess->buffer_type) {
+               case LTTNG_BUFFER_PER_PID:
+                       ret = snprintf(tmp_shm_path, sizeof(tmp_shm_path),
+                                       "/" DEFAULT_UST_TRACE_PID_PATH "/%s-%d-%s",
+                                       app->name, app->pid, datetime);
+                       break;
+               case LTTNG_BUFFER_PER_UID:
+                       ret = snprintf(tmp_shm_path, sizeof(tmp_shm_path),
+                                       "/" DEFAULT_UST_TRACE_UID_PATH,
+                                       app->uid, app->bits_per_long);
+                       break;
+               default:
+                       abort();
+                       goto error;
+               }
+               if (ret < 0) {
+                       PERROR("sprintf UST shadow copy session");
+                       abort();
+                       goto error;
+               }
+               strncat(ua_sess->shm_path, tmp_shm_path,
+                       sizeof(ua_sess->shm_path) - strlen(ua_sess->shm_path) - 1);
+               ua_sess->shm_path[sizeof(ua_sess->shm_path) - 1] = '\0';
+       }
+       return;
+
+error:
+       consumer_output_put(ua_sess->consumer);
+}
+
+/*
+ * Lookup sesison wrapper.
+ */
+static
+void __lookup_session_by_app(const struct ltt_ust_session *usess,
+                       struct ust_app *app, struct lttng_ht_iter *iter)
+{
+       /* Get right UST app session from app */
+       lttng_ht_lookup(app->sessions, &usess->id, iter);
+}
+
+/*
+ * Return ust app session from the app session hashtable using the UST session
+ * id.
+ */
+static struct ust_app_session *lookup_session_by_app(
+               const struct ltt_ust_session *usess, struct ust_app *app)
+{
+       struct lttng_ht_iter iter;
+       struct lttng_ht_node_u64 *node;
+
+       __lookup_session_by_app(usess, app, &iter);
+       node = lttng_ht_iter_get_node_u64(&iter);
+       if (node == NULL) {
+               goto error;
+       }
+
+       return caa_container_of(node, struct ust_app_session, node);
+
+error:
+       return NULL;
+}
+
+/*
+ * Setup buffer registry per PID for the given session and application. If none
+ * is found, a new one is created, added to the global registry and
+ * initialized. If regp is valid, it's set with the newly created object.
+ *
+ * Return 0 on success or else a negative value.
+ */
+static int setup_buffer_reg_pid(struct ust_app_session *ua_sess,
+               struct ust_app *app, struct buffer_reg_pid **regp)
+{
+       int ret = 0;
+       struct buffer_reg_pid *reg_pid;
+
+       LTTNG_ASSERT(ua_sess);
+       LTTNG_ASSERT(app);
+
+       rcu_read_lock();
+
+       reg_pid = buffer_reg_pid_find(ua_sess->id);
+       if (!reg_pid) {
+               /*
+                * This is the create channel path meaning that if there is NO
+                * registry available, we have to create one for this session.
+                */
+               ret = buffer_reg_pid_create(ua_sess->id, &reg_pid,
+                       ua_sess->root_shm_path, ua_sess->shm_path);
+               if (ret < 0) {
+                       goto error;
+               }
+       } else {
+               goto end;
+       }
+
+       /* Initialize registry. */
+       ret = ust_registry_session_init(&reg_pid->registry->reg.ust, app,
+                       app->bits_per_long, app->uint8_t_alignment,
+                       app->uint16_t_alignment, app->uint32_t_alignment,
+                       app->uint64_t_alignment, app->long_alignment,
+                       app->byte_order, app->version.major, app->version.minor,
+                       reg_pid->root_shm_path, reg_pid->shm_path,
+                       lttng_credentials_get_uid(&ua_sess->effective_credentials),
+                       lttng_credentials_get_gid(&ua_sess->effective_credentials),
+                       ua_sess->tracing_id,
+                       app->uid);
+       if (ret < 0) {
+               /*
+                * reg_pid->registry->reg.ust is NULL upon error, so we need to
+                * destroy the buffer registry, because it is always expected
+                * that if the buffer registry can be found, its ust registry is
+                * non-NULL.
+                */
+               buffer_reg_pid_destroy(reg_pid);
+               goto error;
+       }
+
+       buffer_reg_pid_add(reg_pid);
+
+       DBG3("UST app buffer registry per PID created successfully");
+
+end:
+       if (regp) {
+               *regp = reg_pid;
+       }
+error:
+       rcu_read_unlock();
+       return ret;
+}
+
+/*
+ * Setup buffer registry per UID for the given session and application. If none
+ * is found, a new one is created, added to the global registry and
+ * initialized. If regp is valid, it's set with the newly created object.
+ *
+ * Return 0 on success or else a negative value.
+ */
+static int setup_buffer_reg_uid(struct ltt_ust_session *usess,
+               struct ust_app_session *ua_sess,
+               struct ust_app *app, struct buffer_reg_uid **regp)
+{
+       int ret = 0;
+       struct buffer_reg_uid *reg_uid;
+
+       LTTNG_ASSERT(usess);
+       LTTNG_ASSERT(app);
+
+       rcu_read_lock();
+
+       reg_uid = buffer_reg_uid_find(usess->id, app->bits_per_long, app->uid);
+       if (!reg_uid) {
+               /*
+                * This is the create channel path meaning that if there is NO
+                * registry available, we have to create one for this session.
+                */
+               ret = buffer_reg_uid_create(usess->id, app->bits_per_long, app->uid,
+                               LTTNG_DOMAIN_UST, &reg_uid,
+                               ua_sess->root_shm_path, ua_sess->shm_path);
+               if (ret < 0) {
+                       goto error;
+               }
+       } else {
+               goto end;
+       }
+
+       /* Initialize registry. */
+       ret = ust_registry_session_init(&reg_uid->registry->reg.ust, NULL,
+                       app->bits_per_long, app->uint8_t_alignment,
+                       app->uint16_t_alignment, app->uint32_t_alignment,
+                       app->uint64_t_alignment, app->long_alignment,
+                       app->byte_order, app->version.major,
+                       app->version.minor, reg_uid->root_shm_path,
+                       reg_uid->shm_path, usess->uid, usess->gid,
+                       ua_sess->tracing_id, app->uid);
+       if (ret < 0) {
+               /*
+                * reg_uid->registry->reg.ust is NULL upon error, so we need to
+                * destroy the buffer registry, because it is always expected
+                * that if the buffer registry can be found, its ust registry is
+                * non-NULL.
+                */
+               buffer_reg_uid_destroy(reg_uid, NULL);
+               goto error;
+       }
+       /* Add node to teardown list of the session. */
+       cds_list_add(&reg_uid->lnode, &usess->buffer_reg_uid_list);
+
+       buffer_reg_uid_add(reg_uid);
+
+       DBG3("UST app buffer registry per UID created successfully");
+end:
+       if (regp) {
+               *regp = reg_uid;
+       }
+error:
+       rcu_read_unlock();
+       return ret;
+}
+
+/*
+ * Create a session on the tracer side for the given app.
+ *
+ * On success, ua_sess_ptr is populated with the session pointer or else left
+ * untouched. If the session was created, is_created is set to 1. On error,
+ * it's left untouched. Note that ua_sess_ptr is mandatory but is_created can
+ * be NULL.
+ *
+ * Returns 0 on success or else a negative code which is either -ENOMEM or
+ * -ENOTCONN which is the default code if the lttng_ust_ctl_create_session fails.
+ */
+static int find_or_create_ust_app_session(struct ltt_ust_session *usess,
+               struct ust_app *app, struct ust_app_session **ua_sess_ptr,
+               int *is_created)
+{
+       int ret, created = 0;
+       struct ust_app_session *ua_sess;
+
+       LTTNG_ASSERT(usess);
+       LTTNG_ASSERT(app);
+       LTTNG_ASSERT(ua_sess_ptr);
+
+       health_code_update();
+
+       ua_sess = lookup_session_by_app(usess, app);
+       if (ua_sess == NULL) {
+               DBG2("UST app pid: %d session id %" PRIu64 " not found, creating it",
+                               app->pid, usess->id);
+               ua_sess = alloc_ust_app_session();
+               if (ua_sess == NULL) {
+                       /* Only malloc can failed so something is really wrong */
+                       ret = -ENOMEM;
+                       goto error;
+               }
+               shadow_copy_session(ua_sess, usess, app);
+               created = 1;
+       }
+
+       switch (usess->buffer_type) {
+       case LTTNG_BUFFER_PER_PID:
+               /* Init local registry. */
+               ret = setup_buffer_reg_pid(ua_sess, app, NULL);
+               if (ret < 0) {
+                       delete_ust_app_session(-1, ua_sess, app);
+                       goto error;
+               }
+               break;
+       case LTTNG_BUFFER_PER_UID:
+               /* Look for a global registry. If none exists, create one. */
+               ret = setup_buffer_reg_uid(usess, ua_sess, app, NULL);
+               if (ret < 0) {
+                       delete_ust_app_session(-1, ua_sess, app);
+                       goto error;
+               }
+               break;
+       default:
+               abort();
+               ret = -EINVAL;
+               goto error;
+       }
+
+       health_code_update();
+
+       if (ua_sess->handle == -1) {
+               pthread_mutex_lock(&app->sock_lock);
+               ret = lttng_ust_ctl_create_session(app->sock);
+               pthread_mutex_unlock(&app->sock_lock);
+               if (ret < 0) {
+                       if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
+                               DBG("UST app creating session failed. Application is dead: pid = %d, sock = %d",
+                                               app->pid, app->sock);
+                               ret = 0;
+                       } else if (ret == -EAGAIN) {
+                               DBG("UST app creating session failed. Communication time out: pid = %d, sock = %d",
+                                               app->pid, app->sock);
+                               ret = 0;
+                       } else {
+                               ERR("UST app creating session failed with ret %d: pid = %d, sock =%d",
+                                               ret, app->pid, app->sock);
+                       }
+                       delete_ust_app_session(-1, ua_sess, app);
+                       if (ret != -ENOMEM) {
+                               /*
+                                * Tracer is probably gone or got an internal error so let's
+                                * behave like it will soon unregister or not usable.
+                                */
+                               ret = -ENOTCONN;
+                       }
+                       goto error;
+               }
+
+               ua_sess->handle = ret;
+
+               /* Add ust app session to app's HT */
+               lttng_ht_node_init_u64(&ua_sess->node,
+                               ua_sess->tracing_id);
+               lttng_ht_add_unique_u64(app->sessions, &ua_sess->node);
+               lttng_ht_node_init_ulong(&ua_sess->ust_objd_node, ua_sess->handle);
+               lttng_ht_add_unique_ulong(app->ust_sessions_objd,
+                               &ua_sess->ust_objd_node);
+
+               DBG2("UST app session created successfully with handle %d", ret);
+       }
+
+       *ua_sess_ptr = ua_sess;
+       if (is_created) {
+               *is_created = created;
+       }
+
+       /* Everything went well. */
+       ret = 0;
+
+error:
+       health_code_update();
+       return ret;
+}
+
+/*
+ * Match function for a hash table lookup of ust_app_ctx.
+ *
+ * It matches an ust app context based on the context type and, in the case
+ * of perf counters, their name.
+ */
+static int ht_match_ust_app_ctx(struct cds_lfht_node *node, const void *_key)
+{
+       struct ust_app_ctx *ctx;
+       const struct lttng_ust_context_attr *key;
+
+       LTTNG_ASSERT(node);
+       LTTNG_ASSERT(_key);
+
+       ctx = caa_container_of(node, struct ust_app_ctx, node.node);
+       key = (lttng_ust_context_attr *) _key;
+
+       /* Context type */
+       if (ctx->ctx.ctx != key->ctx) {
+               goto no_match;
+       }
+
+       switch(key->ctx) {
+       case LTTNG_UST_ABI_CONTEXT_PERF_THREAD_COUNTER:
+               if (strncmp(key->u.perf_counter.name,
+                               ctx->ctx.u.perf_counter.name,
+                               sizeof(key->u.perf_counter.name))) {
+                       goto no_match;
+               }
+               break;
+       case LTTNG_UST_ABI_CONTEXT_APP_CONTEXT:
+               if (strcmp(key->u.app_ctx.provider_name,
+                               ctx->ctx.u.app_ctx.provider_name) ||
+                               strcmp(key->u.app_ctx.ctx_name,
+                               ctx->ctx.u.app_ctx.ctx_name)) {
+                       goto no_match;
+               }
+               break;
+       default:
+               break;
+       }
+
+       /* Match. */
+       return 1;
+
+no_match:
+       return 0;
+}
+
+/*
+ * Lookup for an ust app context from an lttng_ust_context.
+ *
+ * Must be called while holding RCU read side lock.
+ * Return an ust_app_ctx object or NULL on error.
+ */
+static
+struct ust_app_ctx *find_ust_app_context(struct lttng_ht *ht,
+               struct lttng_ust_context_attr *uctx)
+{
+       struct lttng_ht_iter iter;
+       struct lttng_ht_node_ulong *node;
+       struct ust_app_ctx *app_ctx = NULL;
+
+       LTTNG_ASSERT(uctx);
+       LTTNG_ASSERT(ht);
+
+       /* Lookup using the lttng_ust_context_type and a custom match fct. */
+       cds_lfht_lookup(ht->ht, ht->hash_fct((void *) uctx->ctx, lttng_ht_seed),
+                       ht_match_ust_app_ctx, uctx, &iter.iter);
+       node = lttng_ht_iter_get_node_ulong(&iter);
+       if (!node) {
+               goto end;
+       }
+
+       app_ctx = caa_container_of(node, struct ust_app_ctx, node);
+
+end:
+       return app_ctx;
+}
+
+/*
+ * Create a context for the channel on the tracer.
+ *
+ * Called with UST app session lock held and a RCU read side lock.
+ */
+static
+int create_ust_app_channel_context(struct ust_app_channel *ua_chan,
+               struct lttng_ust_context_attr *uctx,
+               struct ust_app *app)
+{
+       int ret = 0;
+       struct ust_app_ctx *ua_ctx;
+
+       DBG2("UST app adding context to channel %s", ua_chan->name);
+
+       ua_ctx = find_ust_app_context(ua_chan->ctx, uctx);
+       if (ua_ctx) {
+               ret = -EEXIST;
+               goto error;
+       }
+
+       ua_ctx = alloc_ust_app_ctx(uctx);
+       if (ua_ctx == NULL) {
+               /* malloc failed */
+               ret = -ENOMEM;
+               goto error;
+       }
+
+       lttng_ht_node_init_ulong(&ua_ctx->node, (unsigned long) ua_ctx->ctx.ctx);
+       lttng_ht_add_ulong(ua_chan->ctx, &ua_ctx->node);
+       cds_list_add_tail(&ua_ctx->list, &ua_chan->ctx_list);
+
+       ret = create_ust_channel_context(ua_chan, ua_ctx, app);
+       if (ret < 0) {
+               goto error;
+       }
+
+error:
+       return ret;
+}
+
+/*
+ * Enable on the tracer side a ust app event for the session and channel.
+ *
+ * Called with UST app session lock held.
+ */
+static
+int enable_ust_app_event(struct ust_app_session *ua_sess,
+               struct ust_app_event *ua_event, struct ust_app *app)
+{
+       int ret;
+
+       ret = enable_ust_object(app, ua_event->obj);
+       if (ret < 0) {
+               goto error;
+       }
+
+       ua_event->enabled = 1;
+
+error:
+       return ret;
+}
+
+/*
+ * Disable on the tracer side a ust app event for the session and channel.
+ */
+static int disable_ust_app_event(struct ust_app_session *ua_sess,
+               struct ust_app_event *ua_event, struct ust_app *app)
+{
+       int ret;
+
+       ret = disable_ust_object(app, ua_event->obj);
+       if (ret < 0) {
+               goto error;
+       }
+
+       ua_event->enabled = 0;
+
+error:
+       return ret;
+}
+
+/*
+ * Lookup ust app channel for session and disable it on the tracer side.
+ */
+static
+int disable_ust_app_channel(struct ust_app_session *ua_sess,
+               struct ust_app_channel *ua_chan, struct ust_app *app)
+{
+       int ret;
+
+       ret = disable_ust_channel(app, ua_sess, ua_chan);
+       if (ret < 0) {
+               goto error;
+       }
+
+       ua_chan->enabled = 0;
+
+error:
+       return ret;
+}
+
+/*
+ * Lookup ust app channel for session and enable it on the tracer side. This
+ * MUST be called with a RCU read side lock acquired.
+ */
+static int enable_ust_app_channel(struct ust_app_session *ua_sess,
+               struct ltt_ust_channel *uchan, struct ust_app *app)
+{
+       int ret = 0;
+       struct lttng_ht_iter iter;
+       struct lttng_ht_node_str *ua_chan_node;
+       struct ust_app_channel *ua_chan;
+
+       lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &iter);
+       ua_chan_node = lttng_ht_iter_get_node_str(&iter);
+       if (ua_chan_node == NULL) {
+               DBG2("Unable to find channel %s in ust session id %" PRIu64,
+                               uchan->name, ua_sess->tracing_id);
+               goto error;
+       }
+
+       ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
+
+       ret = enable_ust_channel(app, ua_sess, ua_chan);
+       if (ret < 0) {
+               goto error;
+       }
+
+error:
+       return ret;
+}
+
+/*
+ * Ask the consumer to create a channel and get it if successful.
+ *
+ * Called with UST app session lock held.
+ *
+ * Return 0 on success or else a negative value.
+ */
+static int do_consumer_create_channel(struct ltt_ust_session *usess,
+               struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan,
+               int bitness, struct ust_registry_session *registry,
+               uint64_t trace_archive_id)
+{
+       int ret;
+       unsigned int nb_fd = 0;
+       struct consumer_socket *socket;
+
+       LTTNG_ASSERT(usess);
+       LTTNG_ASSERT(ua_sess);
+       LTTNG_ASSERT(ua_chan);
+       LTTNG_ASSERT(registry);
+
+       rcu_read_lock();
+       health_code_update();
+
+       /* Get the right consumer socket for the application. */
+       socket = consumer_find_socket_by_bitness(bitness, usess->consumer);
+       if (!socket) {
+               ret = -EINVAL;
+               goto error;
+       }
+
+       health_code_update();
+
+       /* Need one fd for the channel. */
+       ret = lttng_fd_get(LTTNG_FD_APPS, 1);
+       if (ret < 0) {
+               ERR("Exhausted number of available FD upon create channel");
+               goto error;
+       }
+
+       /*
+        * Ask consumer to create channel. The consumer will return the number of
+        * stream we have to expect.
+        */
+       ret = ust_consumer_ask_channel(ua_sess, ua_chan, usess->consumer, socket,
+                       registry, usess->current_trace_chunk);
+       if (ret < 0) {
+               goto error_ask;
+       }
+
+       /*
+        * Compute the number of fd needed before receiving them. It must be 2 per
+        * stream (2 being the default value here).
+        */
+       nb_fd = DEFAULT_UST_STREAM_FD_NUM * ua_chan->expected_stream_count;
+
+       /* Reserve the amount of file descriptor we need. */
+       ret = lttng_fd_get(LTTNG_FD_APPS, nb_fd);
+       if (ret < 0) {
+               ERR("Exhausted number of available FD upon create channel");
+               goto error_fd_get_stream;
+       }
+
+       health_code_update();
+
+       /*
+        * Now get the channel from the consumer. This call will populate the stream
+        * list of that channel and set the ust objects.
+        */
+       if (usess->consumer->enabled) {
+               ret = ust_consumer_get_channel(socket, ua_chan);
+               if (ret < 0) {
+                       goto error_destroy;
+               }
+       }
+
+       rcu_read_unlock();
+       return 0;
+
+error_destroy:
+       lttng_fd_put(LTTNG_FD_APPS, nb_fd);
+error_fd_get_stream:
+       /*
+        * Initiate a destroy channel on the consumer since we had an error
+        * handling it on our side. The return value is of no importance since we
+        * already have a ret value set by the previous error that we need to
+        * return.
+        */
+       (void) ust_consumer_destroy_channel(socket, ua_chan);
+error_ask:
+       lttng_fd_put(LTTNG_FD_APPS, 1);
+error:
+       health_code_update();
+       rcu_read_unlock();
+       return ret;
+}
+
+/*
+ * Duplicate the ust data object of the ust app stream and save it in the
+ * buffer registry stream.
+ *
+ * Return 0 on success or else a negative value.
+ */
+static int duplicate_stream_object(struct buffer_reg_stream *reg_stream,
+               struct ust_app_stream *stream)
+{
+       int ret;
+
+       LTTNG_ASSERT(reg_stream);
+       LTTNG_ASSERT(stream);
+
+       /* Duplicating a stream requires 2 new fds. Reserve them. */
+       ret = lttng_fd_get(LTTNG_FD_APPS, 2);
+       if (ret < 0) {
+               ERR("Exhausted number of available FD upon duplicate stream");
+               goto error;
+       }
+
+       /* Duplicate object for stream once the original is in the registry. */
+       ret = lttng_ust_ctl_duplicate_ust_object_data(&stream->obj,
+                       reg_stream->obj.ust);
+       if (ret < 0) {
+               ERR("Duplicate stream obj from %p to %p failed with ret %d",
+                               reg_stream->obj.ust, stream->obj, ret);
+               lttng_fd_put(LTTNG_FD_APPS, 2);
+               goto error;
+       }
+       stream->handle = stream->obj->handle;
+
+error:
+       return ret;
+}
+
+/*
+ * Duplicate the ust data object of the ust app. channel and save it in the
+ * buffer registry channel.
+ *
+ * Return 0 on success or else a negative value.
+ */
+static int duplicate_channel_object(struct buffer_reg_channel *buf_reg_chan,
+               struct ust_app_channel *ua_chan)
+{
+       int ret;
+
+       LTTNG_ASSERT(buf_reg_chan);
+       LTTNG_ASSERT(ua_chan);
+
+       /* Duplicating a channel requires 1 new fd. Reserve it. */
+       ret = lttng_fd_get(LTTNG_FD_APPS, 1);
+       if (ret < 0) {
+               ERR("Exhausted number of available FD upon duplicate channel");
+               goto error_fd_get;
+       }
+
+       /* Duplicate object for stream once the original is in the registry. */
+       ret = lttng_ust_ctl_duplicate_ust_object_data(&ua_chan->obj, buf_reg_chan->obj.ust);
+       if (ret < 0) {
+               ERR("Duplicate channel obj from %p to %p failed with ret: %d",
+                               buf_reg_chan->obj.ust, ua_chan->obj, ret);
+               goto error;
+       }
+       ua_chan->handle = ua_chan->obj->handle;
+
+       return 0;
+
+error:
+       lttng_fd_put(LTTNG_FD_APPS, 1);
+error_fd_get:
+       return ret;
+}
+
+/*
+ * For a given channel buffer registry, setup all streams of the given ust
+ * application channel.
+ *
+ * Return 0 on success or else a negative value.
+ */
+static int setup_buffer_reg_streams(struct buffer_reg_channel *buf_reg_chan,
+               struct ust_app_channel *ua_chan,
+               struct ust_app *app)
+{
+       int ret = 0;
+       struct ust_app_stream *stream, *stmp;
+
+       LTTNG_ASSERT(buf_reg_chan);
+       LTTNG_ASSERT(ua_chan);
+
+       DBG2("UST app setup buffer registry stream");
+
+       /* Send all streams to application. */
+       cds_list_for_each_entry_safe(stream, stmp, &ua_chan->streams.head, list) {
+               struct buffer_reg_stream *reg_stream;
+
+               ret = buffer_reg_stream_create(&reg_stream);
+               if (ret < 0) {
+                       goto error;
+               }
+
+               /*
+                * Keep original pointer and nullify it in the stream so the delete
+                * stream call does not release the object.
+                */
+               reg_stream->obj.ust = stream->obj;
+               stream->obj = NULL;
+               buffer_reg_stream_add(reg_stream, buf_reg_chan);
+
+               /* We don't need the streams anymore. */
+               cds_list_del(&stream->list);
+               delete_ust_app_stream(-1, stream, app);
+       }
+
+error:
+       return ret;
+}
+
+/*
+ * Create a buffer registry channel for the given session registry and
+ * application channel object. If regp pointer is valid, it's set with the
+ * created object. Important, the created object is NOT added to the session
+ * registry hash table.
+ *
+ * Return 0 on success else a negative value.
+ */
+static int create_buffer_reg_channel(struct buffer_reg_session *reg_sess,
+               struct ust_app_channel *ua_chan, struct buffer_reg_channel **regp)
+{
+       int ret;
+       struct buffer_reg_channel *buf_reg_chan = NULL;
+
+       LTTNG_ASSERT(reg_sess);
+       LTTNG_ASSERT(ua_chan);
+
+       DBG2("UST app creating buffer registry channel for %s", ua_chan->name);
+
+       /* Create buffer registry channel. */
+       ret = buffer_reg_channel_create(ua_chan->tracing_channel_id, &buf_reg_chan);
+       if (ret < 0) {
+               goto error_create;
+       }
+       LTTNG_ASSERT(buf_reg_chan);
+       buf_reg_chan->consumer_key = ua_chan->key;
+       buf_reg_chan->subbuf_size = ua_chan->attr.subbuf_size;
+       buf_reg_chan->num_subbuf = ua_chan->attr.num_subbuf;
+
+       /* Create and add a channel registry to session. */
+       ret = ust_registry_channel_add(reg_sess->reg.ust,
+                       ua_chan->tracing_channel_id);
+       if (ret < 0) {
+               goto error;
+       }
+       buffer_reg_channel_add(reg_sess, buf_reg_chan);
+
+       if (regp) {
+               *regp = buf_reg_chan;
+       }
+
+       return 0;
+
+error:
+       /* Safe because the registry channel object was not added to any HT. */
+       buffer_reg_channel_destroy(buf_reg_chan, LTTNG_DOMAIN_UST);
+error_create:
+       return ret;
+}
+
+/*
+ * Setup buffer registry channel for the given session registry and application
+ * channel object. If regp pointer is valid, it's set with the created object.
+ *
+ * Return 0 on success else a negative value.
+ */
+static int setup_buffer_reg_channel(struct buffer_reg_session *reg_sess,
+               struct ust_app_channel *ua_chan, struct buffer_reg_channel *buf_reg_chan,
+               struct ust_app *app)
+{
+       int ret;
+
+       LTTNG_ASSERT(reg_sess);
+       LTTNG_ASSERT(buf_reg_chan);
+       LTTNG_ASSERT(ua_chan);
+       LTTNG_ASSERT(ua_chan->obj);
+
+       DBG2("UST app setup buffer registry channel for %s", ua_chan->name);
+
+       /* Setup all streams for the registry. */
+       ret = setup_buffer_reg_streams(buf_reg_chan, ua_chan, app);
+       if (ret < 0) {
+               goto error;
+       }
+
+       buf_reg_chan->obj.ust = ua_chan->obj;
+       ua_chan->obj = NULL;
+
+       return 0;
+
+error:
+       buffer_reg_channel_remove(reg_sess, buf_reg_chan);
+       buffer_reg_channel_destroy(buf_reg_chan, LTTNG_DOMAIN_UST);
+       return ret;
+}
+
+/*
+ * Send buffer registry channel to the application.
+ *
+ * Return 0 on success else a negative value.
+ */
+static int send_channel_uid_to_ust(struct buffer_reg_channel *buf_reg_chan,
+               struct ust_app *app, struct ust_app_session *ua_sess,
+               struct ust_app_channel *ua_chan)
+{
+       int ret;
+       struct buffer_reg_stream *reg_stream;
+
+       LTTNG_ASSERT(buf_reg_chan);
+       LTTNG_ASSERT(app);
+       LTTNG_ASSERT(ua_sess);
+       LTTNG_ASSERT(ua_chan);
+
+       DBG("UST app sending buffer registry channel to ust sock %d", app->sock);
+
+       ret = duplicate_channel_object(buf_reg_chan, ua_chan);
+       if (ret < 0) {
+               goto error;
+       }
+
+       /* Send channel to the application. */
+       ret = ust_consumer_send_channel_to_ust(app, ua_sess, ua_chan);
+       if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
+               ret = -ENOTCONN;        /* Caused by app exiting. */
+               goto error;
+       } else if (ret == -EAGAIN) {
+               /* Caused by timeout. */
+               WARN("Communication with application %d timed out on send_channel for channel \"%s\" of session \"%" PRIu64 "\".",
+                               app->pid, ua_chan->name, ua_sess->tracing_id);
+               /* Treat this the same way as an application that is exiting. */
+               ret = -ENOTCONN;
+               goto error;
+       } else if (ret < 0) {
+               goto error;
+       }
+
+       health_code_update();
+
+       /* Send all streams to application. */
+       pthread_mutex_lock(&buf_reg_chan->stream_list_lock);
+       cds_list_for_each_entry(reg_stream, &buf_reg_chan->streams, lnode) {
+               struct ust_app_stream stream;
+
+               ret = duplicate_stream_object(reg_stream, &stream);
+               if (ret < 0) {
+                       goto error_stream_unlock;
+               }
+
+               ret = ust_consumer_send_stream_to_ust(app, ua_chan, &stream);
+               if (ret < 0) {
+                       if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
+                               ret = -ENOTCONN; /* Caused by app exiting. */
+                       } else if (ret == -EAGAIN) {
+                               /*
+                                * Caused by timeout.
+                                * Treat this the same way as an application
+                                * that is exiting.
+                                */
+                               WARN("Communication with application %d timed out on send_stream for stream \"%s\" of channel \"%s\" of session \"%" PRIu64 "\".",
+                                               app->pid, stream.name,
+                                               ua_chan->name,
+                                               ua_sess->tracing_id);
+                               ret = -ENOTCONN;
+                       }
+                       (void) release_ust_app_stream(-1, &stream, app);
+                       goto error_stream_unlock;
+               }
+
+               /*
+                * The return value is not important here. This function will output an
+                * error if needed.
+                */
+               (void) release_ust_app_stream(-1, &stream, app);
+       }
+       ua_chan->is_sent = 1;
+
+error_stream_unlock:
+       pthread_mutex_unlock(&buf_reg_chan->stream_list_lock);
+error:
+       return ret;
+}
+
+/*
+ * Create and send to the application the created buffers with per UID buffers.
+ *
+ * This MUST be called with a RCU read side lock acquired.
+ * The session list lock and the session's lock must be acquired.
+ *
+ * Return 0 on success else a negative value.
+ */
+static int create_channel_per_uid(struct ust_app *app,
+               struct ltt_ust_session *usess, struct ust_app_session *ua_sess,
+               struct ust_app_channel *ua_chan)
+{
+       int ret;
+       struct buffer_reg_uid *reg_uid;
+       struct buffer_reg_channel *buf_reg_chan;
+       struct ltt_session *session = NULL;
+       enum lttng_error_code notification_ret;
+       struct ust_registry_channel *ust_reg_chan;
+
+       LTTNG_ASSERT(app);
+       LTTNG_ASSERT(usess);
+       LTTNG_ASSERT(ua_sess);
+       LTTNG_ASSERT(ua_chan);
+
+       DBG("UST app creating channel %s with per UID buffers", ua_chan->name);
+
+       reg_uid = buffer_reg_uid_find(usess->id, app->bits_per_long, app->uid);
+       /*
+        * The session creation handles the creation of this global registry
+        * object. If none can be find, there is a code flow problem or a
+        * teardown race.
+        */
+       LTTNG_ASSERT(reg_uid);
+
+       buf_reg_chan = buffer_reg_channel_find(ua_chan->tracing_channel_id,
+                       reg_uid);
+       if (buf_reg_chan) {
+               goto send_channel;
+       }
+
+       /* Create the buffer registry channel object. */
+       ret = create_buffer_reg_channel(reg_uid->registry, ua_chan, &buf_reg_chan);
+       if (ret < 0) {
+               ERR("Error creating the UST channel \"%s\" registry instance",
+                               ua_chan->name);
+               goto error;
+       }
+
+       session = session_find_by_id(ua_sess->tracing_id);
+       LTTNG_ASSERT(session);
+       LTTNG_ASSERT(pthread_mutex_trylock(&session->lock));
+       LTTNG_ASSERT(session_trylock_list());
+
+       /*
+        * Create the buffers on the consumer side. This call populates the
+        * ust app channel object with all streams and data object.
+        */
+       ret = do_consumer_create_channel(usess, ua_sess, ua_chan,
+                       app->bits_per_long, reg_uid->registry->reg.ust,
+                       session->most_recent_chunk_id.value);
+       if (ret < 0) {
+               ERR("Error creating UST channel \"%s\" on the consumer daemon",
+                               ua_chan->name);
+
+               /*
+                * Let's remove the previously created buffer registry channel so
+                * it's not visible anymore in the session registry.
+                */
+               ust_registry_channel_del_free(reg_uid->registry->reg.ust,
+                               ua_chan->tracing_channel_id, false);
+               buffer_reg_channel_remove(reg_uid->registry, buf_reg_chan);
+               buffer_reg_channel_destroy(buf_reg_chan, LTTNG_DOMAIN_UST);
+               goto error;
+       }
+
+       /*
+        * Setup the streams and add it to the session registry.
+        */
+       ret = setup_buffer_reg_channel(reg_uid->registry,
+                       ua_chan, buf_reg_chan, app);
+       if (ret < 0) {
+               ERR("Error setting up UST channel \"%s\"", ua_chan->name);
+               goto error;
+       }
+
+       /* Notify the notification subsystem of the channel's creation. */
+       pthread_mutex_lock(&reg_uid->registry->reg.ust->lock);
+       ust_reg_chan = ust_registry_channel_find(reg_uid->registry->reg.ust,
+                       ua_chan->tracing_channel_id);
+       LTTNG_ASSERT(ust_reg_chan);
+       ust_reg_chan->consumer_key = ua_chan->key;
+       ust_reg_chan = NULL;
+       pthread_mutex_unlock(&reg_uid->registry->reg.ust->lock);
+
+       notification_ret = notification_thread_command_add_channel(
+                       the_notification_thread_handle, session->name,
+                       lttng_credentials_get_uid(
+                                       &ua_sess->effective_credentials),
+                       lttng_credentials_get_gid(
+                                       &ua_sess->effective_credentials),
+                       ua_chan->name, ua_chan->key, LTTNG_DOMAIN_UST,
+                       ua_chan->attr.subbuf_size * ua_chan->attr.num_subbuf);
+       if (notification_ret != LTTNG_OK) {
+               ret = - (int) notification_ret;
+               ERR("Failed to add channel to notification thread");
+               goto error;
+       }
+
+send_channel:
+       /* Send buffers to the application. */
+       ret = send_channel_uid_to_ust(buf_reg_chan, app, ua_sess, ua_chan);
+       if (ret < 0) {
+               if (ret != -ENOTCONN) {
+                       ERR("Error sending channel to application");
+               }
+               goto error;
+       }
+
+error:
+       if (session) {
+               session_put(session);
+       }
+       return ret;
+}
+
+/*
+ * Create and send to the application the created buffers with per PID buffers.
+ *
+ * Called with UST app session lock held.
+ * The session list lock and the session's lock must be acquired.
+ *
+ * Return 0 on success else a negative value.
+ */
+static int create_channel_per_pid(struct ust_app *app,
+               struct ltt_ust_session *usess, struct ust_app_session *ua_sess,
+               struct ust_app_channel *ua_chan)
+{
+       int ret;
+       struct ust_registry_session *registry;
+       enum lttng_error_code cmd_ret;
+       struct ltt_session *session = NULL;
+       uint64_t chan_reg_key;
+       struct ust_registry_channel *ust_reg_chan;
+
+       LTTNG_ASSERT(app);
+       LTTNG_ASSERT(usess);
+       LTTNG_ASSERT(ua_sess);
+       LTTNG_ASSERT(ua_chan);
+
+       DBG("UST app creating channel %s with per PID buffers", ua_chan->name);
+
+       rcu_read_lock();
+
+       registry = get_session_registry(ua_sess);
+       /* The UST app session lock is held, registry shall not be null. */
+       LTTNG_ASSERT(registry);
+
+       /* Create and add a new channel registry to session. */
+       ret = ust_registry_channel_add(registry, ua_chan->key);
+       if (ret < 0) {
+               ERR("Error creating the UST channel \"%s\" registry instance",
+                       ua_chan->name);
+               goto error;
+       }
+
+       session = session_find_by_id(ua_sess->tracing_id);
+       LTTNG_ASSERT(session);
+
+       LTTNG_ASSERT(pthread_mutex_trylock(&session->lock));
+       LTTNG_ASSERT(session_trylock_list());
+
+       /* Create and get channel on the consumer side. */
+       ret = do_consumer_create_channel(usess, ua_sess, ua_chan,
+                       app->bits_per_long, registry,
+                       session->most_recent_chunk_id.value);
+       if (ret < 0) {
+               ERR("Error creating UST channel \"%s\" on the consumer daemon",
+                       ua_chan->name);
+               goto error_remove_from_registry;
+       }
+
+       ret = send_channel_pid_to_ust(app, ua_sess, ua_chan);
+       if (ret < 0) {
+               if (ret != -ENOTCONN) {
+                       ERR("Error sending channel to application");
+               }
+               goto error_remove_from_registry;
+       }
+
+       chan_reg_key = ua_chan->key;
+       pthread_mutex_lock(&registry->lock);
+       ust_reg_chan = ust_registry_channel_find(registry, chan_reg_key);
+       LTTNG_ASSERT(ust_reg_chan);
+       ust_reg_chan->consumer_key = ua_chan->key;
+       pthread_mutex_unlock(&registry->lock);
+
+       cmd_ret = notification_thread_command_add_channel(
+                       the_notification_thread_handle, session->name,
+                       lttng_credentials_get_uid(
+                                       &ua_sess->effective_credentials),
+                       lttng_credentials_get_gid(
+                                       &ua_sess->effective_credentials),
+                       ua_chan->name, ua_chan->key, LTTNG_DOMAIN_UST,
+                       ua_chan->attr.subbuf_size * ua_chan->attr.num_subbuf);
+       if (cmd_ret != LTTNG_OK) {
+               ret = - (int) cmd_ret;
+               ERR("Failed to add channel to notification thread");
+               goto error_remove_from_registry;
+       }
+
+error_remove_from_registry:
+       if (ret) {
+               ust_registry_channel_del_free(registry, ua_chan->key, false);
+       }
+error:
+       rcu_read_unlock();
+       if (session) {
+               session_put(session);
+       }
+       return ret;
+}
+
+/*
+ * From an already allocated ust app channel, create the channel buffers if
+ * needed and send them to the application. This MUST be called with a RCU read
+ * side lock acquired.
+ *
+ * Called with UST app session lock held.
+ *
+ * Return 0 on success or else a negative value. Returns -ENOTCONN if
+ * the application exited concurrently.
+ */
+static int ust_app_channel_send(struct ust_app *app,
+               struct ltt_ust_session *usess, struct ust_app_session *ua_sess,
+               struct ust_app_channel *ua_chan)
+{
+       int ret;
+
+       LTTNG_ASSERT(app);
+       LTTNG_ASSERT(usess);
+       LTTNG_ASSERT(usess->active);
+       LTTNG_ASSERT(ua_sess);
+       LTTNG_ASSERT(ua_chan);
+
+       /* Handle buffer type before sending the channel to the application. */
+       switch (usess->buffer_type) {
+       case LTTNG_BUFFER_PER_UID:
+       {
+               ret = create_channel_per_uid(app, usess, ua_sess, ua_chan);
+               if (ret < 0) {
+                       goto error;
+               }
+               break;
+       }
+       case LTTNG_BUFFER_PER_PID:
+       {
+               ret = create_channel_per_pid(app, usess, ua_sess, ua_chan);
+               if (ret < 0) {
+                       goto error;
+               }
+               break;
+       }
+       default:
+               abort();
+               ret = -EINVAL;
+               goto error;
+       }
+
+       /* Initialize ust objd object using the received handle and add it. */
+       lttng_ht_node_init_ulong(&ua_chan->ust_objd_node, ua_chan->handle);
+       lttng_ht_add_unique_ulong(app->ust_objd, &ua_chan->ust_objd_node);
+
+       /* If channel is not enabled, disable it on the tracer */
+       if (!ua_chan->enabled) {
+               ret = disable_ust_channel(app, ua_sess, ua_chan);
+               if (ret < 0) {
+                       goto error;
+               }
+       }
+
+error:
+       return ret;
+}
+
+/*
+ * Create UST app channel and return it through ua_chanp if not NULL.
+ *
+ * Called with UST app session lock and RCU read-side lock held.
+ *
+ * Return 0 on success or else a negative value.
+ */
+static int ust_app_channel_allocate(struct ust_app_session *ua_sess,
+               struct ltt_ust_channel *uchan,
+               enum lttng_ust_abi_chan_type type, struct ltt_ust_session *usess,
+               struct ust_app_channel **ua_chanp)
+{
+       int ret = 0;
+       struct lttng_ht_iter iter;
+       struct lttng_ht_node_str *ua_chan_node;
+       struct ust_app_channel *ua_chan;
+
+       /* Lookup channel in the ust app session */
+       lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &iter);
+       ua_chan_node = lttng_ht_iter_get_node_str(&iter);
+       if (ua_chan_node != NULL) {
+               ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
+               goto end;
+       }
+
+       ua_chan = alloc_ust_app_channel(uchan->name, ua_sess, &uchan->attr);
+       if (ua_chan == NULL) {
+               /* Only malloc can fail here */
+               ret = -ENOMEM;
+               goto error;
+       }
+       shadow_copy_channel(ua_chan, uchan);
+
+       /* Set channel type. */
+       ua_chan->attr.type = type;
+
+       /* Only add the channel if successful on the tracer side. */
+       lttng_ht_add_unique_str(ua_sess->channels, &ua_chan->node);
+end:
+       if (ua_chanp) {
+               *ua_chanp = ua_chan;
+       }
+
+       /* Everything went well. */
+       return 0;
+
+error:
+       return ret;
+}
+
+/*
+ * Create UST app event and create it on the tracer side.
+ *
+ * Must be called with the RCU read side lock held.
+ * Called with ust app session mutex held.
+ */
+static
+int create_ust_app_event(struct ust_app_session *ua_sess,
+               struct ust_app_channel *ua_chan, struct ltt_ust_event *uevent,
+               struct ust_app *app)
+{
+       int ret = 0;
+       struct ust_app_event *ua_event;
+
+       ua_event = alloc_ust_app_event(uevent->attr.name, &uevent->attr);
+       if (ua_event == NULL) {
+               /* Only failure mode of alloc_ust_app_event(). */
+               ret = -ENOMEM;
+               goto end;
+       }
+       shadow_copy_event(ua_event, uevent);
+
+       /* Create it on the tracer side */
+       ret = create_ust_event(app, ua_sess, ua_chan, ua_event);
+       if (ret < 0) {
+               /*
+                * Not found previously means that it does not exist on the
+                * tracer. If the application reports that the event existed,
+                * it means there is a bug in the sessiond or lttng-ust
+                * (or corruption, etc.)
+                */
+               if (ret == -LTTNG_UST_ERR_EXIST) {
+                       ERR("Tracer for application reported that an event being created already existed: "
+                                       "event_name = \"%s\", pid = %d, ppid = %d, uid = %d, gid = %d",
+                                       uevent->attr.name,
+                                       app->pid, app->ppid, app->uid,
+                                       app->gid);
+               }
+               goto error;
+       }
+
+       add_unique_ust_app_event(ua_chan, ua_event);
+
+       DBG2("UST app create event completed: app = '%s' pid = %d",
+                       app->name, app->pid);
+
+end:
+       return ret;
+
+error:
+       /* Valid. Calling here is already in a read side lock */
+       delete_ust_app_event(-1, ua_event, app);
+       return ret;
+}
+
+/*
+ * Create UST app event notifier rule and create it on the tracer side.
+ *
+ * Must be called with the RCU read side lock held.
+ * Called with ust app session mutex held.
+ */
+static
+int create_ust_app_event_notifier_rule(struct lttng_trigger *trigger,
+               struct ust_app *app)
+{
+       int ret = 0;
+       struct ust_app_event_notifier_rule *ua_event_notifier_rule;
+
+       ua_event_notifier_rule = alloc_ust_app_event_notifier_rule(trigger);
+       if (ua_event_notifier_rule == NULL) {
+               ret = -ENOMEM;
+               goto end;
+       }
+
+       /* Create it on the tracer side. */
+       ret = create_ust_event_notifier(app, ua_event_notifier_rule);
+       if (ret < 0) {
+               /*
+                * Not found previously means that it does not exist on the
+                * tracer. If the application reports that the event existed,
+                * it means there is a bug in the sessiond or lttng-ust
+                * (or corruption, etc.)
+                */
+               if (ret == -LTTNG_UST_ERR_EXIST) {
+                       ERR("Tracer for application reported that an event notifier being created already exists: "
+                                       "token = \"%" PRIu64 "\", pid = %d, ppid = %d, uid = %d, gid = %d",
+                                       lttng_trigger_get_tracer_token(trigger),
+                                       app->pid, app->ppid, app->uid,
+                                       app->gid);
+               }
+               goto error;
+       }
+
+       lttng_ht_add_unique_u64(app->token_to_event_notifier_rule_ht,
+                       &ua_event_notifier_rule->node);
+
+       DBG2("UST app create token event rule completed: app = '%s', pid = %d), token = %" PRIu64,
+                       app->name, app->pid, lttng_trigger_get_tracer_token(trigger));
+
+       goto end;
+
+error:
+       /* The RCU read side lock is already being held by the caller. */
+       delete_ust_app_event_notifier_rule(-1, ua_event_notifier_rule, app);
+end:
+       return ret;
+}
+
+/*
+ * Create UST metadata and open it on the tracer side.
+ *
+ * Called with UST app session lock held and RCU read side lock.
+ */
+static int create_ust_app_metadata(struct ust_app_session *ua_sess,
+               struct ust_app *app, struct consumer_output *consumer)
+{
+       int ret = 0;
+       struct ust_app_channel *metadata;
+       struct consumer_socket *socket;
+       struct ust_registry_session *registry;
+       struct ltt_session *session = NULL;
+
+       LTTNG_ASSERT(ua_sess);
+       LTTNG_ASSERT(app);
+       LTTNG_ASSERT(consumer);
+
+       registry = get_session_registry(ua_sess);
+       /* The UST app session is held registry shall not be null. */
+       LTTNG_ASSERT(registry);
+
+       pthread_mutex_lock(&registry->lock);
+
+       /* Metadata already exists for this registry or it was closed previously */
+       if (registry->metadata_key || registry->metadata_closed) {
+               ret = 0;
+               goto error;
+       }
+
+       /* Allocate UST metadata */
+       metadata = alloc_ust_app_channel(DEFAULT_METADATA_NAME, ua_sess, NULL);
+       if (!metadata) {
+               /* malloc() failed */
+               ret = -ENOMEM;
+               goto error;
+       }
+
+       memcpy(&metadata->attr, &ua_sess->metadata_attr, sizeof(metadata->attr));
+
+       /* Need one fd for the channel. */
+       ret = lttng_fd_get(LTTNG_FD_APPS, 1);
+       if (ret < 0) {
+               ERR("Exhausted number of available FD upon create metadata");
+               goto error;
+       }
+
+       /* Get the right consumer socket for the application. */
+       socket = consumer_find_socket_by_bitness(app->bits_per_long, consumer);
+       if (!socket) {
+               ret = -EINVAL;
+               goto error_consumer;
+       }
+
+       /*
+        * Keep metadata key so we can identify it on the consumer side. Assign it
+        * to the registry *before* we ask the consumer so we avoid the race of the
+        * consumer requesting the metadata and the ask_channel call on our side
+        * did not returned yet.
+        */
+       registry->metadata_key = metadata->key;
+
+       session = session_find_by_id(ua_sess->tracing_id);
+       LTTNG_ASSERT(session);
+
+       LTTNG_ASSERT(pthread_mutex_trylock(&session->lock));
+       LTTNG_ASSERT(session_trylock_list());
+
+       /*
+        * Ask the metadata channel creation to the consumer. The metadata object
+        * will be created by the consumer and kept their. However, the stream is
+        * never added or monitored until we do a first push metadata to the
+        * consumer.
+        */
+       ret = ust_consumer_ask_channel(ua_sess, metadata, consumer, socket,
+                       registry, session->current_trace_chunk);
+       if (ret < 0) {
+               /* Nullify the metadata key so we don't try to close it later on. */
+               registry->metadata_key = 0;
+               goto error_consumer;
+       }
+
+       /*
+        * The setup command will make the metadata stream be sent to the relayd,
+        * if applicable, and the thread managing the metadatas. This is important
+        * because after this point, if an error occurs, the only way the stream
+        * can be deleted is to be monitored in the consumer.
+        */
+       ret = consumer_setup_metadata(socket, metadata->key);
+       if (ret < 0) {
+               /* Nullify the metadata key so we don't try to close it later on. */
+               registry->metadata_key = 0;
+               goto error_consumer;
+       }
+
+       DBG2("UST metadata with key %" PRIu64 " created for app pid %d",
+                       metadata->key, app->pid);
+
+error_consumer:
+       lttng_fd_put(LTTNG_FD_APPS, 1);
+       delete_ust_app_channel(-1, metadata, app);
+error:
+       pthread_mutex_unlock(&registry->lock);
+       if (session) {
+               session_put(session);
+       }
+       return ret;
+}
+
+/*
+ * Return ust app pointer or NULL if not found. RCU read side lock MUST be
+ * acquired before calling this function.
+ */
+struct ust_app *ust_app_find_by_pid(pid_t pid)
+{
+       struct ust_app *app = NULL;
+       struct lttng_ht_node_ulong *node;
+       struct lttng_ht_iter iter;
+
+       lttng_ht_lookup(ust_app_ht, (void *)((unsigned long) pid), &iter);
+       node = lttng_ht_iter_get_node_ulong(&iter);
+       if (node == NULL) {
+               DBG2("UST app no found with pid %d", pid);
+               goto error;
+       }
+
+       DBG2("Found UST app by pid %d", pid);
+
+       app = caa_container_of(node, struct ust_app, pid_n);
+
+error:
+       return app;
+}
+
+/*
+ * Allocate and init an UST app object using the registration information and
+ * the command socket. This is called when the command socket connects to the
+ * session daemon.
+ *
+ * The object is returned on success or else NULL.
+ */
+struct ust_app *ust_app_create(struct ust_register_msg *msg, int sock)
+{
+       int ret;
+       struct ust_app *lta = NULL;
+       struct lttng_pipe *event_notifier_event_source_pipe = NULL;
+
+       LTTNG_ASSERT(msg);
+       LTTNG_ASSERT(sock >= 0);
+
+       DBG3("UST app creating application for socket %d", sock);
+
+       if ((msg->bits_per_long == 64 &&
+                           (uatomic_read(&the_ust_consumerd64_fd) ==
+                                           -EINVAL)) ||
+                       (msg->bits_per_long == 32 &&
+                                       (uatomic_read(&the_ust_consumerd32_fd) ==
+                                                       -EINVAL))) {
+               ERR("Registration failed: application \"%s\" (pid: %d) has "
+                               "%d-bit long, but no consumerd for this size is available.\n",
+                               msg->name, msg->pid, msg->bits_per_long);
+               goto error;
+       }
+
+       /*
+        * Reserve the two file descriptors of the event source pipe. The write
+        * end will be closed once it is passed to the application, at which
+        * point a single 'put' will be performed.
+        */
+       ret = lttng_fd_get(LTTNG_FD_APPS, 2);
+       if (ret) {
+               ERR("Failed to reserve two file descriptors for the event source pipe while creating a new application instance: app = '%s', pid = %d",
+                               msg->name, (int) msg->pid);
+               goto error;
+       }
+
+       event_notifier_event_source_pipe = lttng_pipe_open(FD_CLOEXEC);
+       if (!event_notifier_event_source_pipe) {
+               PERROR("Failed to open application event source pipe: '%s' (pid = %d)",
+                               msg->name, msg->pid);
+               goto error;
+       }
+
+       lta = (ust_app *) zmalloc(sizeof(struct ust_app));
+       if (lta == NULL) {
+               PERROR("malloc");
+               goto error_free_pipe;
+       }
+
+       lta->event_notifier_group.event_pipe = event_notifier_event_source_pipe;
+
+       lta->ppid = msg->ppid;
+       lta->uid = msg->uid;
+       lta->gid = msg->gid;
+
+       lta->bits_per_long = msg->bits_per_long;
+       lta->uint8_t_alignment = msg->uint8_t_alignment;
+       lta->uint16_t_alignment = msg->uint16_t_alignment;
+       lta->uint32_t_alignment = msg->uint32_t_alignment;
+       lta->uint64_t_alignment = msg->uint64_t_alignment;
+       lta->long_alignment = msg->long_alignment;
+       lta->byte_order = msg->byte_order;
+
+       lta->v_major = msg->major;
+       lta->v_minor = msg->minor;
+       lta->sessions = lttng_ht_new(0, LTTNG_HT_TYPE_U64);
+       lta->ust_objd = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
+       lta->ust_sessions_objd = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
+       lta->notify_sock = -1;
+       lta->token_to_event_notifier_rule_ht = lttng_ht_new(0, LTTNG_HT_TYPE_U64);
+
+       /* Copy name and make sure it's NULL terminated. */
+       strncpy(lta->name, msg->name, sizeof(lta->name));
+       lta->name[UST_APP_PROCNAME_LEN] = '\0';
+
+       /*
+        * Before this can be called, when receiving the registration information,
+        * the application compatibility is checked. So, at this point, the
+        * application can work with this session daemon.
+        */
+       lta->compatible = 1;
+
+       lta->pid = msg->pid;
+       lttng_ht_node_init_ulong(&lta->pid_n, (unsigned long) lta->pid);
+       lta->sock = sock;
+       pthread_mutex_init(&lta->sock_lock, NULL);
+       lttng_ht_node_init_ulong(&lta->sock_n, (unsigned long) lta->sock);
+
+       CDS_INIT_LIST_HEAD(&lta->teardown_head);
+       return lta;
+
+error_free_pipe:
+       lttng_pipe_destroy(event_notifier_event_source_pipe);
+       lttng_fd_put(LTTNG_FD_APPS, 2);
+error:
+       return NULL;
+}
+
+/*
+ * For a given application object, add it to every hash table.
+ */
+void ust_app_add(struct ust_app *app)
+{
+       LTTNG_ASSERT(app);
+       LTTNG_ASSERT(app->notify_sock >= 0);
+
+       app->registration_time = time(NULL);
+
+       rcu_read_lock();
+
+       /*
+        * On a re-registration, we want to kick out the previous registration of
+        * that pid
+        */
+       lttng_ht_add_replace_ulong(ust_app_ht, &app->pid_n);
+
+       /*
+        * The socket _should_ be unique until _we_ call close. So, a add_unique
+        * for the ust_app_ht_by_sock is used which asserts fail if the entry was
+        * already in the table.
+        */
+       lttng_ht_add_unique_ulong(ust_app_ht_by_sock, &app->sock_n);
+
+       /* Add application to the notify socket hash table. */
+       lttng_ht_node_init_ulong(&app->notify_sock_n, app->notify_sock);
+       lttng_ht_add_unique_ulong(ust_app_ht_by_notify_sock, &app->notify_sock_n);
+
+       DBG("App registered with pid:%d ppid:%d uid:%d gid:%d sock =%d name:%s "
+                       "notify_sock =%d (version %d.%d)", app->pid, app->ppid, app->uid,
+                       app->gid, app->sock, app->name, app->notify_sock, app->v_major,
+                       app->v_minor);
+
+       rcu_read_unlock();
+}
+
+/*
+ * Set the application version into the object.
+ *
+ * Return 0 on success else a negative value either an errno code or a
+ * LTTng-UST error code.
+ */
+int ust_app_version(struct ust_app *app)
+{
+       int ret;
+
+       LTTNG_ASSERT(app);
+
+       pthread_mutex_lock(&app->sock_lock);
+       ret = lttng_ust_ctl_tracer_version(app->sock, &app->version);
+       pthread_mutex_unlock(&app->sock_lock);
+       if (ret < 0) {
+               if (ret == -LTTNG_UST_ERR_EXITING || ret == -EPIPE) {
+                       DBG3("UST app version failed. Application is dead: pid = %d, sock = %d",
+                                       app->pid, app->sock);
+               } else if (ret == -EAGAIN) {
+                       WARN("UST app version failed. Communication time out: pid = %d, sock = %d",
+                                       app->pid, app->sock);
+               } else {
+                       ERR("UST app version failed with ret %d: pid = %d, sock = %d",
+                                       ret, app->pid, app->sock);
+               }
+       }
+
+       return ret;
+}
+
+bool ust_app_supports_notifiers(const struct ust_app *app)
+{
+       return app->v_major >= 9;
+}
+
+bool ust_app_supports_counters(const struct ust_app *app)
+{
+       return app->v_major >= 9;
+}
+
+/*
+ * Setup the base event notifier group.
+ *
+ * Return 0 on success else a negative value either an errno code or a
+ * LTTng-UST error code.
+ */
+int ust_app_setup_event_notifier_group(struct ust_app *app)
+{
+       int ret;
+       int event_pipe_write_fd;
+       struct lttng_ust_abi_object_data *event_notifier_group = NULL;
+       enum lttng_error_code lttng_ret;
+       enum event_notifier_error_accounting_status event_notifier_error_accounting_status;
+
+       LTTNG_ASSERT(app);
+
+       if (!ust_app_supports_notifiers(app)) {
+               ret = -ENOSYS;
+               goto error;
+       }
+
+       /* Get the write side of the pipe. */
+       event_pipe_write_fd = lttng_pipe_get_writefd(
+                       app->event_notifier_group.event_pipe);
+
+       pthread_mutex_lock(&app->sock_lock);
+       ret = lttng_ust_ctl_create_event_notifier_group(app->sock,
+                       event_pipe_write_fd, &event_notifier_group);
+       pthread_mutex_unlock(&app->sock_lock);
+       if (ret < 0) {
+               if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
+                       ret = 0;
+                       DBG3("UST app create event notifier group failed. Application is dead: pid = %d, sock = %d",
+                                       app->pid, app->sock);
+               } else if (ret == -EAGAIN) {
+                       ret = 0;
+                       WARN("UST app create event notifier group failed. Communication time out: pid = %d, sock = %d",
+                                       app->pid, app->sock);
+               } else {
+                       ERR("UST app create event notifier group failed with ret %d: pid = %d, sock = %d, event_pipe_write_fd: %d",
+                                       ret, app->pid, app->sock, event_pipe_write_fd);
+               }
+               goto error;
+       }
+
+       ret = lttng_pipe_write_close(app->event_notifier_group.event_pipe);
+       if (ret) {
+               ERR("Failed to close write end of the application's event source pipe: app = '%s' (pid = %d)",
+                               app->name, app->pid);
+               goto error;
+       }
+
+       /*
+        * Release the file descriptor that was reserved for the write-end of
+        * the pipe.
+        */
+       lttng_fd_put(LTTNG_FD_APPS, 1);
+
+       lttng_ret = notification_thread_command_add_tracer_event_source(
+                       the_notification_thread_handle,
+                       lttng_pipe_get_readfd(
+                                       app->event_notifier_group.event_pipe),
+                       LTTNG_DOMAIN_UST);
+       if (lttng_ret != LTTNG_OK) {
+               ERR("Failed to add tracer event source to notification thread");
+               ret = - 1;
+               goto error;
+       }
+
+       /* Assign handle only when the complete setup is valid. */
+       app->event_notifier_group.object = event_notifier_group;
+
+       event_notifier_error_accounting_status =
+                       event_notifier_error_accounting_register_app(app);
+       switch (event_notifier_error_accounting_status) {
+       case EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_OK:
+               break;
+       case EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_UNSUPPORTED:
+               DBG3("Failed to setup event notifier error accounting (application does not support notifier error accounting): app socket fd = %d, app name = '%s', app pid = %d",
+                               app->sock, app->name, (int) app->pid);
+               ret = 0;
+               goto error_accounting;
+       case EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_APP_DEAD:
+               DBG3("Failed to setup event notifier error accounting (application is dead): app socket fd = %d, app name = '%s', app pid = %d",
+                               app->sock, app->name, (int) app->pid);
+               ret = 0;
+               goto error_accounting;
+       default:
+               ERR("Failed to setup event notifier error accounting for app");
+               ret = -1;
+               goto error_accounting;
+       }
+
+       return ret;
+
+error_accounting:
+       lttng_ret = notification_thread_command_remove_tracer_event_source(
+                       the_notification_thread_handle,
+                       lttng_pipe_get_readfd(
+                                       app->event_notifier_group.event_pipe));
+       if (lttng_ret != LTTNG_OK) {
+               ERR("Failed to remove application tracer event source from notification thread");
+       }
+
+error:
+       lttng_ust_ctl_release_object(app->sock, app->event_notifier_group.object);
+       free(app->event_notifier_group.object);
+       app->event_notifier_group.object = NULL;
+       return ret;
+}
+
+/*
+ * Unregister app by removing it from the global traceable app list and freeing
+ * the data struct.
+ *
+ * The socket is already closed at this point so no close to sock.
+ */
+void ust_app_unregister(int sock)
+{
+       struct ust_app *lta;
+       struct lttng_ht_node_ulong *node;
+       struct lttng_ht_iter ust_app_sock_iter;
+       struct lttng_ht_iter iter;
+       struct ust_app_session *ua_sess;
+       int ret;
+
+       rcu_read_lock();
+
+       /* Get the node reference for a call_rcu */
+       lttng_ht_lookup(ust_app_ht_by_sock, (void *)((unsigned long) sock), &ust_app_sock_iter);
+       node = lttng_ht_iter_get_node_ulong(&ust_app_sock_iter);
+       LTTNG_ASSERT(node);
+
+       lta = caa_container_of(node, struct ust_app, sock_n);
+       DBG("PID %d unregistering with sock %d", lta->pid, sock);
+
+       /*
+        * For per-PID buffers, perform "push metadata" and flush all
+        * application streams before removing app from hash tables,
+        * ensuring proper behavior of data_pending check.
+        * Remove sessions so they are not visible during deletion.
+        */
+       cds_lfht_for_each_entry(lta->sessions->ht, &iter.iter, ua_sess,
+                       node.node) {
+               struct ust_registry_session *registry;
+
+               ret = lttng_ht_del(lta->sessions, &iter);
+               if (ret) {
+                       /* The session was already removed so scheduled for teardown. */
+                       continue;
+               }
+
+               if (ua_sess->buffer_type == LTTNG_BUFFER_PER_PID) {
+                       (void) ust_app_flush_app_session(lta, ua_sess);
+               }
+
+               /*
+                * Add session to list for teardown. This is safe since at this point we
+                * are the only one using this list.
+                */
+               pthread_mutex_lock(&ua_sess->lock);
+
+               if (ua_sess->deleted) {
+                       pthread_mutex_unlock(&ua_sess->lock);
+                       continue;
+               }
+
+               /*
+                * Normally, this is done in the delete session process which is
+                * executed in the call rcu below. However, upon registration we can't
+                * afford to wait for the grace period before pushing data or else the
+                * data pending feature can race between the unregistration and stop
+                * command where the data pending command is sent *before* the grace
+                * period ended.
+                *
+                * The close metadata below nullifies the metadata pointer in the
+                * session so the delete session will NOT push/close a second time.
+                */
+               registry = get_session_registry(ua_sess);
+               if (registry) {
+                       /* Push metadata for application before freeing the application. */
+                       (void) push_metadata(registry, ua_sess->consumer);
+
+                       /*
+                        * Don't ask to close metadata for global per UID buffers. Close
+                        * metadata only on destroy trace session in this case. Also, the
+                        * previous push metadata could have flag the metadata registry to
+                        * close so don't send a close command if closed.
+                        */
+                       if (ua_sess->buffer_type != LTTNG_BUFFER_PER_UID) {
+                               /* And ask to close it for this session registry. */
+                               (void) close_metadata(registry, ua_sess->consumer);
+                       }
+               }
+               cds_list_add(&ua_sess->teardown_node, &lta->teardown_head);
+
+               pthread_mutex_unlock(&ua_sess->lock);
+       }
+
+       /* Remove application from PID hash table */
+       ret = lttng_ht_del(ust_app_ht_by_sock, &ust_app_sock_iter);
+       LTTNG_ASSERT(!ret);
+
+       /*
+        * Remove application from notify hash table. The thread handling the
+        * notify socket could have deleted the node so ignore on error because
+        * either way it's valid. The close of that socket is handled by the
+        * apps_notify_thread.
+        */
+       iter.iter.node = &lta->notify_sock_n.node;
+       (void) lttng_ht_del(ust_app_ht_by_notify_sock, &iter);
+
+       /*
+        * Ignore return value since the node might have been removed before by an
+        * add replace during app registration because the PID can be reassigned by
+        * the OS.
+        */
+       iter.iter.node = &lta->pid_n.node;
+       ret = lttng_ht_del(ust_app_ht, &iter);
+       if (ret) {
+               DBG3("Unregister app by PID %d failed. This can happen on pid reuse",
+                               lta->pid);
+       }
+
+       /* Free memory */
+       call_rcu(&lta->pid_n.head, delete_ust_app_rcu);
+
+       rcu_read_unlock();
+       return;
+}
+
+/*
+ * Fill events array with all events name of all registered apps.
+ */
+int ust_app_list_events(struct lttng_event **events)
+{
+       int ret, handle;
+       size_t nbmem, count = 0;
+       struct lttng_ht_iter iter;
+       struct ust_app *app;
+       struct lttng_event *tmp_event;
+
+       nbmem = UST_APP_EVENT_LIST_SIZE;
+       tmp_event = (lttng_event *) zmalloc(nbmem * sizeof(struct lttng_event));
+       if (tmp_event == NULL) {
+               PERROR("zmalloc ust app events");
+               ret = -ENOMEM;
+               goto error;
+       }
+
+       rcu_read_lock();
+
+       cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
+               struct lttng_ust_abi_tracepoint_iter uiter;
+
+               health_code_update();
+
+               if (!app->compatible) {
+                       /*
+                        * TODO: In time, we should notice the caller of this error by
+                        * telling him that this is a version error.
+                        */
+                       continue;
+               }
+               pthread_mutex_lock(&app->sock_lock);
+               handle = lttng_ust_ctl_tracepoint_list(app->sock);
+               if (handle < 0) {
+                       if (handle != -EPIPE && handle != -LTTNG_UST_ERR_EXITING) {
+                               ERR("UST app list events getting handle failed for app pid %d",
+                                               app->pid);
+                       }
+                       pthread_mutex_unlock(&app->sock_lock);
+                       continue;
+               }
+
+               while ((ret = lttng_ust_ctl_tracepoint_list_get(app->sock, handle,
+                                       &uiter)) != -LTTNG_UST_ERR_NOENT) {
+                       /* Handle ustctl error. */
+                       if (ret < 0) {
+                               int release_ret;
+
+                               if (ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
+                                       ERR("UST app tp list get failed for app %d with ret %d",
+                                                       app->sock, ret);
+                               } else {
+                                       DBG3("UST app tp list get failed. Application is dead");
+                                       break;
+                               }
+                               free(tmp_event);
+                               release_ret = lttng_ust_ctl_release_handle(app->sock, handle);
+                               if (release_ret < 0 &&
+                                               release_ret != -LTTNG_UST_ERR_EXITING &&
+                                               release_ret != -EPIPE) {
+                                       ERR("Error releasing app handle for app %d with ret %d", app->sock, release_ret);
+                               }
+                               pthread_mutex_unlock(&app->sock_lock);
+                               goto rcu_error;
+                       }
+
+                       health_code_update();
+                       if (count >= nbmem) {
+                               /* In case the realloc fails, we free the memory */
+                               struct lttng_event *new_tmp_event;
+                               size_t new_nbmem;
+
+                               new_nbmem = nbmem << 1;
+                               DBG2("Reallocating event list from %zu to %zu entries",
+                                               nbmem, new_nbmem);
+                               new_tmp_event = (lttng_event *) realloc(tmp_event,
+                                       new_nbmem * sizeof(struct lttng_event));
+                               if (new_tmp_event == NULL) {
+                                       int release_ret;
+
+                                       PERROR("realloc ust app events");
+                                       free(tmp_event);
+                                       ret = -ENOMEM;
+                                       release_ret = lttng_ust_ctl_release_handle(app->sock, handle);
+                                       if (release_ret < 0 &&
+                                                       release_ret != -LTTNG_UST_ERR_EXITING &&
+                                                       release_ret != -EPIPE) {
+                                               ERR("Error releasing app handle for app %d with ret %d", app->sock, release_ret);
+                                       }
+                                       pthread_mutex_unlock(&app->sock_lock);
+                                       goto rcu_error;
+                               }
+                               /* Zero the new memory */
+                               memset(new_tmp_event + nbmem, 0,
+                                       (new_nbmem - nbmem) * sizeof(struct lttng_event));
+                               nbmem = new_nbmem;
+                               tmp_event = new_tmp_event;
+                       }
+                       memcpy(tmp_event[count].name, uiter.name, LTTNG_UST_ABI_SYM_NAME_LEN);
+                       tmp_event[count].loglevel = uiter.loglevel;
+                       tmp_event[count].type = (enum lttng_event_type) LTTNG_UST_ABI_TRACEPOINT;
+                       tmp_event[count].pid = app->pid;
+                       tmp_event[count].enabled = -1;
+                       count++;
+               }
+               ret = lttng_ust_ctl_release_handle(app->sock, handle);
+               pthread_mutex_unlock(&app->sock_lock);
+               if (ret < 0) {
+                       if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
+                               DBG3("Error releasing app handle. Application died: pid = %d, sock = %d",
+                                               app->pid, app->sock);
+                       } else if (ret == -EAGAIN) {
+                               WARN("Error releasing app handle. Communication time out: pid = %d, sock = %d",
+                                               app->pid, app->sock);
+                       } else {
+                               ERR("Error releasing app handle with ret %d: pid = %d, sock = %d",
+                                               ret, app->pid, app->sock);
+                       }
+               }
+       }
+
+       ret = count;
+       *events = tmp_event;
+
+       DBG2("UST app list events done (%zu events)", count);
+
+rcu_error:
+       rcu_read_unlock();
+error:
+       health_code_update();
+       return ret;
+}
+
+/*
+ * Fill events array with all events name of all registered apps.
+ */
+int ust_app_list_event_fields(struct lttng_event_field **fields)
+{
+       int ret, handle;
+       size_t nbmem, count = 0;
+       struct lttng_ht_iter iter;
+       struct ust_app *app;
+       struct lttng_event_field *tmp_event;
+
+       nbmem = UST_APP_EVENT_LIST_SIZE;
+       tmp_event = (lttng_event_field *) zmalloc(nbmem * sizeof(struct lttng_event_field));
+       if (tmp_event == NULL) {
+               PERROR("zmalloc ust app event fields");
+               ret = -ENOMEM;
+               goto error;
+       }
+
+       rcu_read_lock();
+
+       cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
+               struct lttng_ust_abi_field_iter uiter;
+
+               health_code_update();
+
+               if (!app->compatible) {
+                       /*
+                        * TODO: In time, we should notice the caller of this error by
+                        * telling him that this is a version error.
+                        */
+                       continue;
+               }
+               pthread_mutex_lock(&app->sock_lock);
+               handle = lttng_ust_ctl_tracepoint_field_list(app->sock);
+               if (handle < 0) {
+                       if (handle != -EPIPE && handle != -LTTNG_UST_ERR_EXITING) {
+                               ERR("UST app list field getting handle failed for app pid %d",
+                                               app->pid);
+                       }
+                       pthread_mutex_unlock(&app->sock_lock);
+                       continue;
+               }
+
+               while ((ret = lttng_ust_ctl_tracepoint_field_list_get(app->sock, handle,
+                                       &uiter)) != -LTTNG_UST_ERR_NOENT) {
+                       /* Handle ustctl error. */
+                       if (ret < 0) {
+                               int release_ret;
+
+                               if (ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
+                                       ERR("UST app tp list field failed for app %d with ret %d",
+                                                       app->sock, ret);
+                               } else {
+                                       DBG3("UST app tp list field failed. Application is dead");
+                                       break;
+                               }
+                               free(tmp_event);
+                               release_ret = lttng_ust_ctl_release_handle(app->sock, handle);
+                               pthread_mutex_unlock(&app->sock_lock);
+                               if (release_ret < 0 &&
+                                               release_ret != -LTTNG_UST_ERR_EXITING &&
+                                               release_ret != -EPIPE) {
+                                       ERR("Error releasing app handle for app %d with ret %d", app->sock, release_ret);
+                               }
+                               goto rcu_error;
+                       }
+
+                       health_code_update();
+                       if (count >= nbmem) {
+                               /* In case the realloc fails, we free the memory */
+                               struct lttng_event_field *new_tmp_event;
+                               size_t new_nbmem;
+
+                               new_nbmem = nbmem << 1;
+                               DBG2("Reallocating event field list from %zu to %zu entries",
+                                               nbmem, new_nbmem);
+                               new_tmp_event = (lttng_event_field *) realloc(tmp_event,
+                                       new_nbmem * sizeof(struct lttng_event_field));
+                               if (new_tmp_event == NULL) {
+                                       int release_ret;
+
+                                       PERROR("realloc ust app event fields");
+                                       free(tmp_event);
+                                       ret = -ENOMEM;
+                                       release_ret = lttng_ust_ctl_release_handle(app->sock, handle);
+                                       pthread_mutex_unlock(&app->sock_lock);
+                                       if (release_ret &&
+                                                       release_ret != -LTTNG_UST_ERR_EXITING &&
+                                                       release_ret != -EPIPE) {
+                                               ERR("Error releasing app handle for app %d with ret %d", app->sock, release_ret);
+                                       }
+                                       goto rcu_error;
+                               }
+                               /* Zero the new memory */
+                               memset(new_tmp_event + nbmem, 0,
+                                       (new_nbmem - nbmem) * sizeof(struct lttng_event_field));
+                               nbmem = new_nbmem;
+                               tmp_event = new_tmp_event;
+                       }
+
+                       memcpy(tmp_event[count].field_name, uiter.field_name, LTTNG_UST_ABI_SYM_NAME_LEN);
+                       /* Mapping between these enums matches 1 to 1. */
+                       tmp_event[count].type = (enum lttng_event_field_type) uiter.type;
+                       tmp_event[count].nowrite = uiter.nowrite;
+
+                       memcpy(tmp_event[count].event.name, uiter.event_name, LTTNG_UST_ABI_SYM_NAME_LEN);
+                       tmp_event[count].event.loglevel = uiter.loglevel;
+                       tmp_event[count].event.type = LTTNG_EVENT_TRACEPOINT;
+                       tmp_event[count].event.pid = app->pid;
+                       tmp_event[count].event.enabled = -1;
+                       count++;
+               }
+               ret = lttng_ust_ctl_release_handle(app->sock, handle);
+               pthread_mutex_unlock(&app->sock_lock);
+               if (ret < 0 &&
+                               ret != -LTTNG_UST_ERR_EXITING &&
+                               ret != -EPIPE) {
+                       ERR("Error releasing app handle for app %d with ret %d", app->sock, ret);
+               }
+       }
+
+       ret = count;
+       *fields = tmp_event;
+
+       DBG2("UST app list event fields done (%zu events)", count);
+
+rcu_error:
+       rcu_read_unlock();
+error:
+       health_code_update();
+       return ret;
+}
+
+/*
+ * Free and clean all traceable apps of the global list.
+ *
+ * Should _NOT_ be called with RCU read-side lock held.
+ */
+void ust_app_clean_list(void)
+{
+       int ret;
+       struct ust_app *app;
+       struct lttng_ht_iter iter;
+
+       DBG2("UST app cleaning registered apps hash table");
+
+       rcu_read_lock();
+
+       /* Cleanup notify socket hash table */
+       if (ust_app_ht_by_notify_sock) {
+               cds_lfht_for_each_entry(ust_app_ht_by_notify_sock->ht, &iter.iter, app,
+                               notify_sock_n.node) {
+                       /*
+                        * Assert that all notifiers are gone as all triggers
+                        * are unregistered prior to this clean-up.
+                        */
+                       LTTNG_ASSERT(lttng_ht_get_count(app->token_to_event_notifier_rule_ht) == 0);
+
+                       ust_app_notify_sock_unregister(app->notify_sock);
+               }
+       }
+
+       if (ust_app_ht) {
+               cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
+                       ret = lttng_ht_del(ust_app_ht, &iter);
+                       LTTNG_ASSERT(!ret);
+                       call_rcu(&app->pid_n.head, delete_ust_app_rcu);
+               }
+       }
+
+       /* Cleanup socket hash table */
+       if (ust_app_ht_by_sock) {
+               cds_lfht_for_each_entry(ust_app_ht_by_sock->ht, &iter.iter, app,
+                               sock_n.node) {
+                       ret = lttng_ht_del(ust_app_ht_by_sock, &iter);
+                       LTTNG_ASSERT(!ret);
+               }
+       }
+
+       rcu_read_unlock();
+
+       /* Destroy is done only when the ht is empty */
+       if (ust_app_ht) {
+               ht_cleanup_push(ust_app_ht);
+       }
+       if (ust_app_ht_by_sock) {
+               ht_cleanup_push(ust_app_ht_by_sock);
+       }
+       if (ust_app_ht_by_notify_sock) {
+               ht_cleanup_push(ust_app_ht_by_notify_sock);
+       }
+}
+
+/*
+ * Init UST app hash table.
+ */
+int ust_app_ht_alloc(void)
+{
+       ust_app_ht = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
+       if (!ust_app_ht) {
+               return -1;
+       }
+       ust_app_ht_by_sock = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
+       if (!ust_app_ht_by_sock) {
+               return -1;
+       }
+       ust_app_ht_by_notify_sock = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
+       if (!ust_app_ht_by_notify_sock) {
+               return -1;
+       }
+       return 0;
+}
+
+/*
+ * For a specific UST session, disable the channel for all registered apps.
+ */
+int ust_app_disable_channel_glb(struct ltt_ust_session *usess,
+               struct ltt_ust_channel *uchan)
+{
+       int ret = 0;
+       struct lttng_ht_iter iter;
+       struct lttng_ht_node_str *ua_chan_node;
+       struct ust_app *app;
+       struct ust_app_session *ua_sess;
+       struct ust_app_channel *ua_chan;
+
+       LTTNG_ASSERT(usess->active);
+       DBG2("UST app disabling channel %s from global domain for session id %" PRIu64,
+                       uchan->name, usess->id);
+
+       rcu_read_lock();
+
+       /* For every registered applications */
+       cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
+               struct lttng_ht_iter uiter;
+               if (!app->compatible) {
+                       /*
+                        * TODO: In time, we should notice the caller of this error by
+                        * telling him that this is a version error.
+                        */
+                       continue;
+               }
+               ua_sess = lookup_session_by_app(usess, app);
+               if (ua_sess == NULL) {
+                       continue;
+               }
+
+               /* Get channel */
+               lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
+               ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
+               /* If the session if found for the app, the channel must be there */
+               LTTNG_ASSERT(ua_chan_node);
+
+               ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
+               /* The channel must not be already disabled */
+               LTTNG_ASSERT(ua_chan->enabled == 1);
+
+               /* Disable channel onto application */
+               ret = disable_ust_app_channel(ua_sess, ua_chan, app);
+               if (ret < 0) {
+                       /* XXX: We might want to report this error at some point... */
+                       continue;
+               }
+       }
+
+       rcu_read_unlock();
+       return ret;
+}
+
+/*
+ * For a specific UST session, enable the channel for all registered apps.
+ */
+int ust_app_enable_channel_glb(struct ltt_ust_session *usess,
+               struct ltt_ust_channel *uchan)
+{
+       int ret = 0;
+       struct lttng_ht_iter iter;
+       struct ust_app *app;
+       struct ust_app_session *ua_sess;
+
+       LTTNG_ASSERT(usess->active);
+       DBG2("UST app enabling channel %s to global domain for session id %" PRIu64,
+                       uchan->name, usess->id);
+
+       rcu_read_lock();
+
+       /* For every registered applications */
+       cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
+               if (!app->compatible) {
+                       /*
+                        * TODO: In time, we should notice the caller of this error by
+                        * telling him that this is a version error.
+                        */
+                       continue;
+               }
+               ua_sess = lookup_session_by_app(usess, app);
+               if (ua_sess == NULL) {
+                       continue;
+               }
+
+               /* Enable channel onto application */
+               ret = enable_ust_app_channel(ua_sess, uchan, app);
+               if (ret < 0) {
+                       /* XXX: We might want to report this error at some point... */
+                       continue;
+               }
+       }
+
+       rcu_read_unlock();
+       return ret;
+}
+
+/*
+ * Disable an event in a channel and for a specific session.
+ */
+int ust_app_disable_event_glb(struct ltt_ust_session *usess,
+               struct ltt_ust_channel *uchan, struct ltt_ust_event *uevent)
+{
+       int ret = 0;
+       struct lttng_ht_iter iter, uiter;
+       struct lttng_ht_node_str *ua_chan_node;
+       struct ust_app *app;
+       struct ust_app_session *ua_sess;
+       struct ust_app_channel *ua_chan;
+       struct ust_app_event *ua_event;
+
+       LTTNG_ASSERT(usess->active);
+       DBG("UST app disabling event %s for all apps in channel "
+                       "%s for session id %" PRIu64,
+                       uevent->attr.name, uchan->name, usess->id);
+
+       rcu_read_lock();
+
+       /* For all registered applications */
+       cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
+               if (!app->compatible) {
+                       /*
+                        * TODO: In time, we should notice the caller of this error by
+                        * telling him that this is a version error.
+                        */
+                       continue;
+               }
+               ua_sess = lookup_session_by_app(usess, app);
+               if (ua_sess == NULL) {
+                       /* Next app */
+                       continue;
+               }
+
+               /* Lookup channel in the ust app session */
+               lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
+               ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
+               if (ua_chan_node == NULL) {
+                       DBG2("Channel %s not found in session id %" PRIu64 " for app pid %d."
+                                       "Skipping", uchan->name, usess->id, app->pid);
+                       continue;
+               }
+               ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
+
+               ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
+                               uevent->filter, uevent->attr.loglevel,
+                               uevent->exclusion);
+               if (ua_event == NULL) {
+                       DBG2("Event %s not found in channel %s for app pid %d."
+                                       "Skipping", uevent->attr.name, uchan->name, app->pid);
+                       continue;
+               }
+
+               ret = disable_ust_app_event(ua_sess, ua_event, app);
+               if (ret < 0) {
+                       /* XXX: Report error someday... */
+                       continue;
+               }
+       }
+
+       rcu_read_unlock();
+       return ret;
+}
+
+/* The ua_sess lock must be held by the caller.  */
+static
+int ust_app_channel_create(struct ltt_ust_session *usess,
+               struct ust_app_session *ua_sess,
+               struct ltt_ust_channel *uchan, struct ust_app *app,
+               struct ust_app_channel **_ua_chan)
+{
+       int ret = 0;
+       struct ust_app_channel *ua_chan = NULL;
+
+       LTTNG_ASSERT(ua_sess);
+       ASSERT_LOCKED(ua_sess->lock);
+
+       if (!strncmp(uchan->name, DEFAULT_METADATA_NAME,
+                    sizeof(uchan->name))) {
+               copy_channel_attr_to_ustctl(&ua_sess->metadata_attr,
+                       &uchan->attr);
+               ret = 0;
+       } else {
+               struct ltt_ust_context *uctx = NULL;
+
+               /*
+                * Create channel onto application and synchronize its
+                * configuration.
+                */
+               ret = ust_app_channel_allocate(ua_sess, uchan,
+                       LTTNG_UST_ABI_CHAN_PER_CPU, usess,
+                       &ua_chan);
+               if (ret < 0) {
+                       goto error;
+               }
+
+               ret = ust_app_channel_send(app, usess,
+                       ua_sess, ua_chan);
+               if (ret) {
+                       goto error;
+               }
+
+               /* Add contexts. */
+               cds_list_for_each_entry(uctx, &uchan->ctx_list, list) {
+                       ret = create_ust_app_channel_context(ua_chan,
+                               &uctx->ctx, app);
+                       if (ret) {
+                               goto error;
+                       }
+               }
+       }
+
+error:
+       if (ret < 0) {
+               switch (ret) {
+               case -ENOTCONN:
+                       /*
+                        * The application's socket is not valid. Either a bad socket
+                        * or a timeout on it. We can't inform the caller that for a
+                        * specific app, the session failed so lets continue here.
+                        */
+                       ret = 0;        /* Not an error. */
+                       break;
+               case -ENOMEM:
+               default:
+                       break;
+               }
+       }
+
+       if (ret == 0 && _ua_chan) {
+               /*
+                * Only return the application's channel on success. Note
+                * that the channel can still be part of the application's
+                * channel hashtable on error.
+                */
+               *_ua_chan = ua_chan;
+       }
+       return ret;
+}
+
+/*
+ * Enable event for a specific session and channel on the tracer.
+ */
+int ust_app_enable_event_glb(struct ltt_ust_session *usess,
+               struct ltt_ust_channel *uchan, struct ltt_ust_event *uevent)
+{
+       int ret = 0;
+       struct lttng_ht_iter iter, uiter;
+       struct lttng_ht_node_str *ua_chan_node;
+       struct ust_app *app;
+       struct ust_app_session *ua_sess;
+       struct ust_app_channel *ua_chan;
+       struct ust_app_event *ua_event;
+
+       LTTNG_ASSERT(usess->active);
+       DBG("UST app enabling event %s for all apps for session id %" PRIu64,
+                       uevent->attr.name, usess->id);
+
+       /*
+        * NOTE: At this point, this function is called only if the session and
+        * channel passed are already created for all apps. and enabled on the
+        * tracer also.
+        */
+
+       rcu_read_lock();
+
+       /* For all registered applications */
+       cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
+               if (!app->compatible) {
+                       /*
+                        * TODO: In time, we should notice the caller of this error by
+                        * telling him that this is a version error.
+                        */
+                       continue;
+               }
+               ua_sess = lookup_session_by_app(usess, app);
+               if (!ua_sess) {
+                       /* The application has problem or is probably dead. */
+                       continue;
+               }
+
+               pthread_mutex_lock(&ua_sess->lock);
+
+               if (ua_sess->deleted) {
+                       pthread_mutex_unlock(&ua_sess->lock);
+                       continue;
+               }
+
+               /* Lookup channel in the ust app session */
+               lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
+               ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
+               /*
+                * It is possible that the channel cannot be found is
+                * the channel/event creation occurs concurrently with
+                * an application exit.
+                */
+               if (!ua_chan_node) {
+                       pthread_mutex_unlock(&ua_sess->lock);
+                       continue;
+               }
+
+               ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
+
+               /* Get event node */
+               ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
+                               uevent->filter, uevent->attr.loglevel, uevent->exclusion);
+               if (ua_event == NULL) {
+                       DBG3("UST app enable event %s not found for app PID %d."
+                                       "Skipping app", uevent->attr.name, app->pid);
+                       goto next_app;
+               }
+
+               ret = enable_ust_app_event(ua_sess, ua_event, app);
+               if (ret < 0) {
+                       pthread_mutex_unlock(&ua_sess->lock);
+                       goto error;
+               }
+       next_app:
+               pthread_mutex_unlock(&ua_sess->lock);
+       }
+
+error:
+       rcu_read_unlock();
+       return ret;
+}
+
+/*
+ * For a specific existing UST session and UST channel, creates the event for
+ * all registered apps.
+ */
+int ust_app_create_event_glb(struct ltt_ust_session *usess,
+               struct ltt_ust_channel *uchan, struct ltt_ust_event *uevent)
+{
+       int ret = 0;
+       struct lttng_ht_iter iter, uiter;
+       struct lttng_ht_node_str *ua_chan_node;
+       struct ust_app *app;
+       struct ust_app_session *ua_sess;
+       struct ust_app_channel *ua_chan;
+
+       LTTNG_ASSERT(usess->active);
+       DBG("UST app creating event %s for all apps for session id %" PRIu64,
+                       uevent->attr.name, usess->id);
+
+       rcu_read_lock();
+
+       /* For all registered applications */
+       cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
+               if (!app->compatible) {
+                       /*
+                        * TODO: In time, we should notice the caller of this error by
+                        * telling him that this is a version error.
+                        */
+                       continue;
+               }
+               ua_sess = lookup_session_by_app(usess, app);
+               if (!ua_sess) {
+                       /* The application has problem or is probably dead. */
+                       continue;
+               }
+
+               pthread_mutex_lock(&ua_sess->lock);
+
+               if (ua_sess->deleted) {
+                       pthread_mutex_unlock(&ua_sess->lock);
+                       continue;
+               }
+
+               /* Lookup channel in the ust app session */
+               lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
+               ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
+               /* If the channel is not found, there is a code flow error */
+               LTTNG_ASSERT(ua_chan_node);
+
+               ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
+
+               ret = create_ust_app_event(ua_sess, ua_chan, uevent, app);
+               pthread_mutex_unlock(&ua_sess->lock);
+               if (ret < 0) {
+                       if (ret != -LTTNG_UST_ERR_EXIST) {
+                               /* Possible value at this point: -ENOMEM. If so, we stop! */
+                               break;
+                       }
+                       DBG2("UST app event %s already exist on app PID %d",
+                                       uevent->attr.name, app->pid);
+                       continue;
+               }
+       }
+
+       rcu_read_unlock();
+       return ret;
+}
+
+/*
+ * Start tracing for a specific UST session and app.
+ *
+ * Called with UST app session lock held.
+ *
+ */
+static
+int ust_app_start_trace(struct ltt_ust_session *usess, struct ust_app *app)
+{
+       int ret = 0;
+       struct ust_app_session *ua_sess;
+
+       DBG("Starting tracing for ust app pid %d", app->pid);
+
+       rcu_read_lock();
+
+       if (!app->compatible) {
+               goto end;
+       }
+
+       ua_sess = lookup_session_by_app(usess, app);
+       if (ua_sess == NULL) {
+               /* The session is in teardown process. Ignore and continue. */
+               goto end;
+       }
+
+       pthread_mutex_lock(&ua_sess->lock);
+
+       if (ua_sess->deleted) {
+               pthread_mutex_unlock(&ua_sess->lock);
+               goto end;
+       }
+
+       if (ua_sess->enabled) {
+               pthread_mutex_unlock(&ua_sess->lock);
+               goto end;
+       }
+
+       /* Upon restart, we skip the setup, already done */
+       if (ua_sess->started) {
+               goto skip_setup;
+       }
+
+       health_code_update();
+
+skip_setup:
+       /* This starts the UST tracing */
+       pthread_mutex_lock(&app->sock_lock);
+       ret = lttng_ust_ctl_start_session(app->sock, ua_sess->handle);
+       pthread_mutex_unlock(&app->sock_lock);
+       if (ret < 0) {
+               if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
+                       DBG3("UST app start session failed. Application is dead: pid = %d, sock = %d",
+                                       app->pid, app->sock);
+                       pthread_mutex_unlock(&ua_sess->lock);
+                       goto end;
+               } else if (ret == -EAGAIN) {
+                       WARN("UST app start session failed. Communication time out: pid = %d, sock = %d",
+                                       app->pid, app->sock);
+                       pthread_mutex_unlock(&ua_sess->lock);
+                       goto end;
+
+               } else {
+                       ERR("UST app start session failed with ret %d: pid = %d, sock = %d",
+                                       ret, app->pid, app->sock);
+               }
+               goto error_unlock;
+       }
+
+       /* Indicate that the session has been started once */
+       ua_sess->started = 1;
+       ua_sess->enabled = 1;
+
+       pthread_mutex_unlock(&ua_sess->lock);
+
+       health_code_update();
+
+       /* Quiescent wait after starting trace */
+       pthread_mutex_lock(&app->sock_lock);
+       ret = lttng_ust_ctl_wait_quiescent(app->sock);
+       pthread_mutex_unlock(&app->sock_lock);
+       if (ret < 0) {
+               if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
+                       DBG3("UST app wait quiescent failed. Application is dead: pid = %d, sock = %d",
+                                       app->pid, app->sock);
+               } else if (ret == -EAGAIN) {
+                       WARN("UST app wait quiescent failed. Communication time out: pid =  %d, sock = %d",
+                                       app->pid, app->sock);
+               } else {
+                       ERR("UST app wait quiescent failed with ret %d: pid %d, sock = %d",
+                                       ret, app->pid, app->sock);
+               }
+       }
+
+end:
+       rcu_read_unlock();
+       health_code_update();
+       return 0;
+
+error_unlock:
+       pthread_mutex_unlock(&ua_sess->lock);
+       rcu_read_unlock();
+       health_code_update();
+       return -1;
+}
+
+/*
+ * Stop tracing for a specific UST session and app.
+ */
+static
+int ust_app_stop_trace(struct ltt_ust_session *usess, struct ust_app *app)
+{
+       int ret = 0;
+       struct ust_app_session *ua_sess;
+       struct ust_registry_session *registry;
+
+       DBG("Stopping tracing for ust app pid %d", app->pid);
+
+       rcu_read_lock();
+
+       if (!app->compatible) {
+               goto end_no_session;
+       }
+
+       ua_sess = lookup_session_by_app(usess, app);
+       if (ua_sess == NULL) {
+               goto end_no_session;
+       }
+
+       pthread_mutex_lock(&ua_sess->lock);
+
+       if (ua_sess->deleted) {
+               pthread_mutex_unlock(&ua_sess->lock);
+               goto end_no_session;
+       }
+
+       /*
+        * If started = 0, it means that stop trace has been called for a session
+        * that was never started. It's possible since we can have a fail start
+        * from either the application manager thread or the command thread. Simply
+        * indicate that this is a stop error.
+        */
+       if (!ua_sess->started) {
+               goto error_rcu_unlock;
+       }
+
+       health_code_update();
+
+       /* This inhibits UST tracing */
+       pthread_mutex_lock(&app->sock_lock);
+       ret = lttng_ust_ctl_stop_session(app->sock, ua_sess->handle);
+       pthread_mutex_unlock(&app->sock_lock);
+       if (ret < 0) {
+               if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
+                       DBG3("UST app stop session failed. Application is dead: pid = %d, sock = %d",
+                                       app->pid, app->sock);
+                       goto end_unlock;
+               } else if (ret == -EAGAIN) {
+                       WARN("UST app stop session failed. Communication time out: pid = %d, sock = %d",
+                                       app->pid, app->sock);
+                       goto end_unlock;
+
+               } else {
+                       ERR("UST app stop session failed with ret %d: pid = %d, sock = %d",
+                                       ret, app->pid, app->sock);
+               }
+               goto error_rcu_unlock;
+       }
+
+       health_code_update();
+       ua_sess->enabled = 0;
+
+       /* Quiescent wait after stopping trace */
+       pthread_mutex_lock(&app->sock_lock);
+       ret = lttng_ust_ctl_wait_quiescent(app->sock);
+       pthread_mutex_unlock(&app->sock_lock);
+       if (ret < 0) {
+               if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
+                       DBG3("UST app wait quiescent failed. Application is dead: pid= %d, sock = %d)",
+                                       app->pid, app->sock);
+               } else if (ret == -EAGAIN) {
+                       WARN("UST app wait quiescent failed. Communication time out: pid= %d, sock = %d)",
+                                       app->pid, app->sock);
+               } else {
+                       ERR("UST app wait quiescent failed with ret %d: pid= %d, sock = %d)",
+                                       ret, app->pid, app->sock);
+               }
+       }
+
+       health_code_update();
+
+       registry = get_session_registry(ua_sess);
+
+       /* The UST app session is held registry shall not be null. */
+       LTTNG_ASSERT(registry);
+
+       /* Push metadata for application before freeing the application. */
+       (void) push_metadata(registry, ua_sess->consumer);
+
+end_unlock:
+       pthread_mutex_unlock(&ua_sess->lock);
+end_no_session:
+       rcu_read_unlock();
+       health_code_update();
+       return 0;
+
+error_rcu_unlock:
+       pthread_mutex_unlock(&ua_sess->lock);
+       rcu_read_unlock();
+       health_code_update();
+       return -1;
+}
+
+static
+int ust_app_flush_app_session(struct ust_app *app,
+               struct ust_app_session *ua_sess)
+{
+       int ret, retval = 0;
+       struct lttng_ht_iter iter;
+       struct ust_app_channel *ua_chan;
+       struct consumer_socket *socket;
+
+       DBG("Flushing app session buffers for ust app pid %d", app->pid);
+
+       rcu_read_lock();
+
+       if (!app->compatible) {
+               goto end_not_compatible;
+       }
+
+       pthread_mutex_lock(&ua_sess->lock);
+
+       if (ua_sess->deleted) {
+               goto end_deleted;
+       }
+
+       health_code_update();
+
+       /* Flushing buffers */
+       socket = consumer_find_socket_by_bitness(app->bits_per_long,
+                       ua_sess->consumer);
+
+       /* Flush buffers and push metadata. */
+       switch (ua_sess->buffer_type) {
+       case LTTNG_BUFFER_PER_PID:
+               cds_lfht_for_each_entry(ua_sess->channels->ht, &iter.iter, ua_chan,
+                               node.node) {
+                       health_code_update();
+                       ret = consumer_flush_channel(socket, ua_chan->key);
+                       if (ret) {
+                               ERR("Error flushing consumer channel");
+                               retval = -1;
+                               continue;
+                       }
+               }
+               break;
+       case LTTNG_BUFFER_PER_UID:
+       default:
+               abort();
+               break;
+       }
+
+       health_code_update();
+
+end_deleted:
+       pthread_mutex_unlock(&ua_sess->lock);
+
+end_not_compatible:
+       rcu_read_unlock();
+       health_code_update();
+       return retval;
+}
+
+/*
+ * Flush buffers for all applications for a specific UST session.
+ * Called with UST session lock held.
+ */
+static
+int ust_app_flush_session(struct ltt_ust_session *usess)
+
+{
+       int ret = 0;
+
+       DBG("Flushing session buffers for all ust apps");
+
+       rcu_read_lock();
+
+       /* Flush buffers and push metadata. */
+       switch (usess->buffer_type) {
+       case LTTNG_BUFFER_PER_UID:
+       {
+               struct buffer_reg_uid *reg;
+               struct lttng_ht_iter iter;
+
+               /* Flush all per UID buffers associated to that session. */
+               cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
+                       struct ust_registry_session *ust_session_reg;
+                       struct buffer_reg_channel *buf_reg_chan;
+                       struct consumer_socket *socket;
+
+                       /* Get consumer socket to use to push the metadata.*/
+                       socket = consumer_find_socket_by_bitness(reg->bits_per_long,
+                                       usess->consumer);
+                       if (!socket) {
+                               /* Ignore request if no consumer is found for the session. */
+                               continue;
+                       }
+
+                       cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
+                                       buf_reg_chan, node.node) {
+                               /*
+                                * The following call will print error values so the return
+                                * code is of little importance because whatever happens, we
+                                * have to try them all.
+                                */
+                               (void) consumer_flush_channel(socket, buf_reg_chan->consumer_key);
+                       }
+
+                       ust_session_reg = reg->registry->reg.ust;
+                       /* Push metadata. */
+                       (void) push_metadata(ust_session_reg, usess->consumer);
+               }
+               break;
+       }
+       case LTTNG_BUFFER_PER_PID:
+       {
+               struct ust_app_session *ua_sess;
+               struct lttng_ht_iter iter;
+               struct ust_app *app;
+
+               cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
+                       ua_sess = lookup_session_by_app(usess, app);
+                       if (ua_sess == NULL) {
+                               continue;
+                       }
+                       (void) ust_app_flush_app_session(app, ua_sess);
+               }
+               break;
+       }
+       default:
+               ret = -1;
+               abort();
+               break;
+       }
+
+       rcu_read_unlock();
+       health_code_update();
+       return ret;
+}
+
+static
+int ust_app_clear_quiescent_app_session(struct ust_app *app,
+               struct ust_app_session *ua_sess)
+{
+       int ret = 0;
+       struct lttng_ht_iter iter;
+       struct ust_app_channel *ua_chan;
+       struct consumer_socket *socket;
+
+       DBG("Clearing stream quiescent state for ust app pid %d", app->pid);
+
+       rcu_read_lock();
+
+       if (!app->compatible) {
+               goto end_not_compatible;
+       }
+
+       pthread_mutex_lock(&ua_sess->lock);
+
+       if (ua_sess->deleted) {
+               goto end_unlock;
+       }
+
+       health_code_update();
+
+       socket = consumer_find_socket_by_bitness(app->bits_per_long,
+                       ua_sess->consumer);
+       if (!socket) {
+               ERR("Failed to find consumer (%" PRIu32 ") socket",
+                               app->bits_per_long);
+               ret = -1;
+               goto end_unlock;
+       }
+
+       /* Clear quiescent state. */
+       switch (ua_sess->buffer_type) {
+       case LTTNG_BUFFER_PER_PID:
+               cds_lfht_for_each_entry(ua_sess->channels->ht, &iter.iter,
+                               ua_chan, node.node) {
+                       health_code_update();
+                       ret = consumer_clear_quiescent_channel(socket,
+                                       ua_chan->key);
+                       if (ret) {
+                               ERR("Error clearing quiescent state for consumer channel");
+                               ret = -1;
+                               continue;
+                       }
+               }
+               break;
+       case LTTNG_BUFFER_PER_UID:
+       default:
+               abort();
+               ret = -1;
+               break;
+       }
+
+       health_code_update();
+
+end_unlock:
+       pthread_mutex_unlock(&ua_sess->lock);
+
+end_not_compatible:
+       rcu_read_unlock();
+       health_code_update();
+       return ret;
+}
+
+/*
+ * Clear quiescent state in each stream for all applications for a
+ * specific UST session.
+ * Called with UST session lock held.
+ */
+static
+int ust_app_clear_quiescent_session(struct ltt_ust_session *usess)
+
+{
+       int ret = 0;
+
+       DBG("Clearing stream quiescent state for all ust apps");
+
+       rcu_read_lock();
+
+       switch (usess->buffer_type) {
+       case LTTNG_BUFFER_PER_UID:
+       {
+               struct lttng_ht_iter iter;
+               struct buffer_reg_uid *reg;
+
+               /*
+                * Clear quiescent for all per UID buffers associated to
+                * that session.
+                */
+               cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
+                       struct consumer_socket *socket;
+                       struct buffer_reg_channel *buf_reg_chan;
+
+                       /* Get associated consumer socket.*/
+                       socket = consumer_find_socket_by_bitness(
+                                       reg->bits_per_long, usess->consumer);
+                       if (!socket) {
+                               /*
+                                * Ignore request if no consumer is found for
+                                * the session.
+                                */
+                               continue;
+                       }
+
+                       cds_lfht_for_each_entry(reg->registry->channels->ht,
+                                       &iter.iter, buf_reg_chan, node.node) {
+                               /*
+                                * The following call will print error values so
+                                * the return code is of little importance
+                                * because whatever happens, we have to try them
+                                * all.
+                                */
+                               (void) consumer_clear_quiescent_channel(socket,
+                                               buf_reg_chan->consumer_key);
+                       }
+               }
+               break;
+       }
+       case LTTNG_BUFFER_PER_PID:
+       {
+               struct ust_app_session *ua_sess;
+               struct lttng_ht_iter iter;
+               struct ust_app *app;
+
+               cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app,
+                               pid_n.node) {
+                       ua_sess = lookup_session_by_app(usess, app);
+                       if (ua_sess == NULL) {
+                               continue;
+                       }
+                       (void) ust_app_clear_quiescent_app_session(app,
+                                       ua_sess);
+               }
+               break;
+       }
+       default:
+               ret = -1;
+               abort();
+               break;
+       }
+
+       rcu_read_unlock();
+       health_code_update();
+       return ret;
+}
+
+/*
+ * Destroy a specific UST session in apps.
+ */
+static int destroy_trace(struct ltt_ust_session *usess, struct ust_app *app)
+{
+       int ret;
+       struct ust_app_session *ua_sess;
+       struct lttng_ht_iter iter;
+       struct lttng_ht_node_u64 *node;
+
+       DBG("Destroy tracing for ust app pid %d", app->pid);
+
+       rcu_read_lock();
+
+       if (!app->compatible) {
+               goto end;
+       }
+
+       __lookup_session_by_app(usess, app, &iter);
+       node = lttng_ht_iter_get_node_u64(&iter);
+       if (node == NULL) {
+               /* Session is being or is deleted. */
+               goto end;
+       }
+       ua_sess = caa_container_of(node, struct ust_app_session, node);
+
+       health_code_update();
+       destroy_app_session(app, ua_sess);
+
+       health_code_update();
+
+       /* Quiescent wait after stopping trace */
+       pthread_mutex_lock(&app->sock_lock);
+       ret = lttng_ust_ctl_wait_quiescent(app->sock);
+       pthread_mutex_unlock(&app->sock_lock);
+       if (ret < 0) {
+               if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
+                       DBG3("UST app wait quiescent failed. Application is dead: pid= %d, sock = %d)",
+                                       app->pid, app->sock);
+               } else if (ret == -EAGAIN) {
+                       WARN("UST app wait quiescent failed. Communication time out: pid= %d, sock = %d)",
+                                       app->pid, app->sock);
+               } else {
+                       ERR("UST app wait quiescent failed with ret %d: pid= %d, sock = %d)",
+                                       ret, app->pid, app->sock);
+               }
+       }
+end:
+       rcu_read_unlock();
+       health_code_update();
+       return 0;
+}
+
+/*
+ * Start tracing for the UST session.
+ */
+int ust_app_start_trace_all(struct ltt_ust_session *usess)
+{
+       struct lttng_ht_iter iter;
+       struct ust_app *app;
+
+       DBG("Starting all UST traces");
+
+       /*
+        * Even though the start trace might fail, flag this session active so
+        * other application coming in are started by default.
+        */
+       usess->active = 1;
+
+       rcu_read_lock();
+
+       /*
+        * In a start-stop-start use-case, we need to clear the quiescent state
+        * of each channel set by the prior stop command, thus ensuring that a
+        * following stop or destroy is sure to grab a timestamp_end near those
+        * operations, even if the packet is empty.
+        */
+       (void) ust_app_clear_quiescent_session(usess);
+
+       cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
+               ust_app_global_update(usess, app);
+       }
+
+       rcu_read_unlock();
+
+       return 0;
+}
+
+/*
+ * Start tracing for the UST session.
+ * Called with UST session lock held.
+ */
+int ust_app_stop_trace_all(struct ltt_ust_session *usess)
+{
+       int ret = 0;
+       struct lttng_ht_iter iter;
+       struct ust_app *app;
+
+       DBG("Stopping all UST traces");
+
+       /*
+        * Even though the stop trace might fail, flag this session inactive so
+        * other application coming in are not started by default.
+        */
+       usess->active = 0;
+
+       rcu_read_lock();
+
+       cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
+               ret = ust_app_stop_trace(usess, app);
+               if (ret < 0) {
+                       /* Continue to next apps even on error */
+                       continue;
+               }
+       }
+
+       (void) ust_app_flush_session(usess);
+
+       rcu_read_unlock();
+
+       return 0;
+}
+
+/*
+ * Destroy app UST session.
+ */
+int ust_app_destroy_trace_all(struct ltt_ust_session *usess)
+{
+       int ret = 0;
+       struct lttng_ht_iter iter;
+       struct ust_app *app;
+
+       DBG("Destroy all UST traces");
+
+       rcu_read_lock();
+
+       cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
+               ret = destroy_trace(usess, app);
+               if (ret < 0) {
+                       /* Continue to next apps even on error */
+                       continue;
+               }
+       }
+
+       rcu_read_unlock();
+
+       return 0;
+}
+
+/* The ua_sess lock must be held by the caller. */
+static
+int find_or_create_ust_app_channel(
+               struct ltt_ust_session *usess,
+               struct ust_app_session *ua_sess,
+               struct ust_app *app,
+               struct ltt_ust_channel *uchan,
+               struct ust_app_channel **ua_chan)
+{
+       int ret = 0;
+       struct lttng_ht_iter iter;
+       struct lttng_ht_node_str *ua_chan_node;
+
+       lttng_ht_lookup(ua_sess->channels, (void *) uchan->name, &iter);
+       ua_chan_node = lttng_ht_iter_get_node_str(&iter);
+       if (ua_chan_node) {
+               *ua_chan = caa_container_of(ua_chan_node,
+                       struct ust_app_channel, node);
+               goto end;
+       }
+
+       ret = ust_app_channel_create(usess, ua_sess, uchan, app, ua_chan);
+       if (ret) {
+               goto end;
+       }
+end:
+       return ret;
+}
+
+static
+int ust_app_channel_synchronize_event(struct ust_app_channel *ua_chan,
+               struct ltt_ust_event *uevent, struct ust_app_session *ua_sess,
+               struct ust_app *app)
+{
+       int ret = 0;
+       struct ust_app_event *ua_event = NULL;
+
+       ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
+               uevent->filter, uevent->attr.loglevel, uevent->exclusion);
+       if (!ua_event) {
+               ret = create_ust_app_event(ua_sess, ua_chan, uevent, app);
+               if (ret < 0) {
+                       goto end;
+               }
+       } else {
+               if (ua_event->enabled != uevent->enabled) {
+                       ret = uevent->enabled ?
+                               enable_ust_app_event(ua_sess, ua_event, app) :
+                               disable_ust_app_event(ua_sess, ua_event, app);
+               }
+       }
+
+end:
+       return ret;
+}
+
+/* Called with RCU read-side lock held. */
+static
+void ust_app_synchronize_event_notifier_rules(struct ust_app *app)
+{
+       int ret = 0;
+       enum lttng_error_code ret_code;
+       enum lttng_trigger_status t_status;
+       struct lttng_ht_iter app_trigger_iter;
+       struct lttng_triggers *triggers = NULL;
+       struct ust_app_event_notifier_rule *event_notifier_rule;
+       unsigned int count, i;
+
+       if (!ust_app_supports_notifiers(app)) {
+               goto end;
+       }
+
+       /*
+        * Currrently, registering or unregistering a trigger with an
+        * event rule condition causes a full synchronization of the event
+        * notifiers.
+        *
+        * The first step attempts to add an event notifier for all registered
+        * triggers that apply to the user space tracers. Then, the
+        * application's event notifiers rules are all checked against the list
+        * of registered triggers. Any event notifier that doesn't have a
+        * matching trigger can be assumed to have been disabled.
+        *
+        * All of this is inefficient, but is put in place to get the feature
+        * rolling as it is simpler at this moment. It will be optimized Soon™
+        * to allow the state of enabled
+        * event notifiers to be synchronized in a piece-wise way.
+        */
+
+       /* Get all triggers using uid 0 (root) */
+       ret_code = notification_thread_command_list_triggers(
+                       the_notification_thread_handle, 0, &triggers);
+       if (ret_code != LTTNG_OK) {
+               goto end;
+       }
+
+       LTTNG_ASSERT(triggers);
+
+       t_status = lttng_triggers_get_count(triggers, &count);
+       if (t_status != LTTNG_TRIGGER_STATUS_OK) {
+               goto end;
+       }
+
+       for (i = 0; i < count; i++) {
+               struct lttng_condition *condition;
+               struct lttng_event_rule *event_rule;
+               struct lttng_trigger *trigger;
+               const struct ust_app_event_notifier_rule *looked_up_event_notifier_rule;
+               enum lttng_condition_status condition_status;
+               uint64_t token;
+
+               trigger = lttng_triggers_borrow_mutable_at_index(triggers, i);
+               LTTNG_ASSERT(trigger);
+
+               token = lttng_trigger_get_tracer_token(trigger);
+               condition = lttng_trigger_get_condition(trigger);
+
+               if (lttng_condition_get_type(condition) !=
+                               LTTNG_CONDITION_TYPE_EVENT_RULE_MATCHES) {
+                       /* Does not apply */
+                       continue;
+               }
+
+               condition_status =
+                               lttng_condition_event_rule_matches_borrow_rule_mutable(
+                                               condition, &event_rule);
+               LTTNG_ASSERT(condition_status == LTTNG_CONDITION_STATUS_OK);
+
+               if (lttng_event_rule_get_domain_type(event_rule) == LTTNG_DOMAIN_KERNEL) {
+                       /* Skip kernel related triggers. */
+                       continue;
+               }
+
+               /*
+                * Find or create the associated token event rule. The caller
+                * holds the RCU read lock, so this is safe to call without
+                * explicitly acquiring it here.
+                */
+               looked_up_event_notifier_rule = find_ust_app_event_notifier_rule(
+                               app->token_to_event_notifier_rule_ht, token);
+               if (!looked_up_event_notifier_rule) {
+                       ret = create_ust_app_event_notifier_rule(trigger, app);
+                       if (ret < 0) {
+                               goto end;
+                       }
+               }
+       }
+
+       rcu_read_lock();
+       /* Remove all unknown event sources from the app. */
+       cds_lfht_for_each_entry (app->token_to_event_notifier_rule_ht->ht,
+                       &app_trigger_iter.iter, event_notifier_rule,
+                       node.node) {
+               const uint64_t app_token = event_notifier_rule->token;
+               bool found = false;
+
+               /*
+                * Check if the app event trigger still exists on the
+                * notification side.
+                */
+               for (i = 0; i < count; i++) {
+                       uint64_t notification_thread_token;
+                       const struct lttng_trigger *trigger =
+                                       lttng_triggers_get_at_index(
+                                                       triggers, i);
+
+                       LTTNG_ASSERT(trigger);
+
+                       notification_thread_token =
+                                       lttng_trigger_get_tracer_token(trigger);
+
+                       if (notification_thread_token == app_token) {
+                               found = true;
+                               break;
+                       }
+               }
+
+               if (found) {
+                       /* Still valid. */
+                       continue;
+               }
+
+               /*
+                * This trigger was unregistered, disable it on the tracer's
+                * side.
+                */
+               ret = lttng_ht_del(app->token_to_event_notifier_rule_ht,
+                               &app_trigger_iter);
+               LTTNG_ASSERT(ret == 0);
+
+               /* Callee logs errors. */
+               (void) disable_ust_object(app, event_notifier_rule->obj);
+
+               delete_ust_app_event_notifier_rule(
+                               app->sock, event_notifier_rule, app);
+       }
+
+       rcu_read_unlock();
+
+end:
+       lttng_triggers_destroy(triggers);
+       return;
+}
+
+/*
+ * RCU read lock must be held by the caller.
+ */
+static
+void ust_app_synchronize_all_channels(struct ltt_ust_session *usess,
+               struct ust_app_session *ua_sess,
+               struct ust_app *app)
+{
+       int ret = 0;
+       struct cds_lfht_iter uchan_iter;
+       struct ltt_ust_channel *uchan;
+
+       LTTNG_ASSERT(usess);
+       LTTNG_ASSERT(ua_sess);
+       LTTNG_ASSERT(app);
+
+       cds_lfht_for_each_entry(usess->domain_global.channels->ht, &uchan_iter,
+                       uchan, node.node) {
+               struct ust_app_channel *ua_chan;
+               struct cds_lfht_iter uevent_iter;
+               struct ltt_ust_event *uevent;
+
+               /*
+                * Search for a matching ust_app_channel. If none is found,
+                * create it. Creating the channel will cause the ua_chan
+                * structure to be allocated, the channel buffers to be
+                * allocated (if necessary) and sent to the application, and
+                * all enabled contexts will be added to the channel.
+                */
+               ret = find_or_create_ust_app_channel(usess, ua_sess,
+                       app, uchan, &ua_chan);
+               if (ret) {
+                       /* Tracer is probably gone or ENOMEM. */
+                       goto end;
+               }
+
+               if (!ua_chan) {
+                       /* ua_chan will be NULL for the metadata channel */
+                       continue;
+               }
+
+               cds_lfht_for_each_entry(uchan->events->ht, &uevent_iter, uevent,
+                               node.node) {
+                       ret = ust_app_channel_synchronize_event(ua_chan,
+                               uevent, ua_sess, app);
+                       if (ret) {
+                               goto end;
+                       }
+               }
+
+               if (ua_chan->enabled != uchan->enabled) {
+                       ret = uchan->enabled ?
+                               enable_ust_app_channel(ua_sess, uchan, app) :
+                               disable_ust_app_channel(ua_sess, ua_chan, app);
+                       if (ret) {
+                               goto end;
+                       }
+               }
+       }
+end:
+       return;
+}
+
+/*
+ * The caller must ensure that the application is compatible and is tracked
+ * by the process attribute trackers.
+ */
+static
+void ust_app_synchronize(struct ltt_ust_session *usess,
+               struct ust_app *app)
+{
+       int ret = 0;
+       struct ust_app_session *ua_sess = NULL;
+
+       /*
+        * The application's configuration should only be synchronized for
+        * active sessions.
+        */
+       LTTNG_ASSERT(usess->active);
+
+       ret = find_or_create_ust_app_session(usess, app, &ua_sess, NULL);
+       if (ret < 0) {
+               /* Tracer is probably gone or ENOMEM. */
+               if (ua_sess) {
+                       destroy_app_session(app, ua_sess);
+               }
+               goto end;
+       }
+       LTTNG_ASSERT(ua_sess);
+
+       pthread_mutex_lock(&ua_sess->lock);
+       if (ua_sess->deleted) {
+               goto deleted_session;
+       }
+
+       rcu_read_lock();
+
+       ust_app_synchronize_all_channels(usess, ua_sess, app);
+
+       /*
+        * Create the metadata for the application. This returns gracefully if a
+        * metadata was already set for the session.
+        *
+        * The metadata channel must be created after the data channels as the
+        * consumer daemon assumes this ordering. When interacting with a relay
+        * daemon, the consumer will use this assumption to send the
+        * "STREAMS_SENT" message to the relay daemon.
+        */
+       ret = create_ust_app_metadata(ua_sess, app, usess->consumer);
+       if (ret < 0) {
+               ERR("Metadata creation failed for app sock %d for session id %" PRIu64,
+                               app->sock, usess->id);
+       }
+
+       rcu_read_unlock();
+
+deleted_session:
+       pthread_mutex_unlock(&ua_sess->lock);
+end:
+       return;
+}
+
+static
+void ust_app_global_destroy(struct ltt_ust_session *usess, struct ust_app *app)
+{
+       struct ust_app_session *ua_sess;
+
+       ua_sess = lookup_session_by_app(usess, app);
+       if (ua_sess == NULL) {
+               return;
+       }
+       destroy_app_session(app, ua_sess);
+}
+
+/*
+ * Add channels/events from UST global domain to registered apps at sock.
+ *
+ * Called with session lock held.
+ * Called with RCU read-side lock held.
+ */
+void ust_app_global_update(struct ltt_ust_session *usess, struct ust_app *app)
+{
+       LTTNG_ASSERT(usess);
+       LTTNG_ASSERT(usess->active);
+
+       DBG2("UST app global update for app sock %d for session id %" PRIu64,
+                       app->sock, usess->id);
+
+       if (!app->compatible) {
+               return;
+       }
+       if (trace_ust_id_tracker_lookup(LTTNG_PROCESS_ATTR_VIRTUAL_PROCESS_ID,
+                           usess, app->pid) &&
+                       trace_ust_id_tracker_lookup(
+                                       LTTNG_PROCESS_ATTR_VIRTUAL_USER_ID,
+                                       usess, app->uid) &&
+                       trace_ust_id_tracker_lookup(
+                                       LTTNG_PROCESS_ATTR_VIRTUAL_GROUP_ID,
+                                       usess, app->gid)) {
+               /*
+                * Synchronize the application's internal tracing configuration
+                * and start tracing.
+                */
+               ust_app_synchronize(usess, app);
+               ust_app_start_trace(usess, app);
+       } else {
+               ust_app_global_destroy(usess, app);
+       }
+}
+
+/*
+ * Add all event notifiers to an application.
+ *
+ * Called with session lock held.
+ * Called with RCU read-side lock held.
+ */
+void ust_app_global_update_event_notifier_rules(struct ust_app *app)
+{
+       DBG2("UST application global event notifier rules update: app = '%s', pid = %d)",
+                       app->name, app->pid);
+
+       if (!app->compatible || !ust_app_supports_notifiers(app)) {
+               return;
+       }
+
+       if (app->event_notifier_group.object == NULL) {
+               WARN("UST app global update of event notifiers for app skipped since communication handle is null: app = '%s' pid = %d)",
+                               app->name, app->pid);
+               return;
+       }
+
+       ust_app_synchronize_event_notifier_rules(app);
+}
+
+/*
+ * Called with session lock held.
+ */
+void ust_app_global_update_all(struct ltt_ust_session *usess)
+{
+       struct lttng_ht_iter iter;
+       struct ust_app *app;
+
+       rcu_read_lock();
+       cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
+               ust_app_global_update(usess, app);
+       }
+       rcu_read_unlock();
+}
+
+void ust_app_global_update_all_event_notifier_rules(void)
+{
+       struct lttng_ht_iter iter;
+       struct ust_app *app;
+
+       rcu_read_lock();
+       cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
+               ust_app_global_update_event_notifier_rules(app);
+       }
+
+       rcu_read_unlock();
+}
+
+/*
+ * Add context to a specific channel for global UST domain.
+ */
+int ust_app_add_ctx_channel_glb(struct ltt_ust_session *usess,
+               struct ltt_ust_channel *uchan, struct ltt_ust_context *uctx)
+{
+       int ret = 0;
+       struct lttng_ht_node_str *ua_chan_node;
+       struct lttng_ht_iter iter, uiter;
+       struct ust_app_channel *ua_chan = NULL;
+       struct ust_app_session *ua_sess;
+       struct ust_app *app;
+
+       LTTNG_ASSERT(usess->active);
+
+       rcu_read_lock();
+       cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
+               if (!app->compatible) {
+                       /*
+                        * TODO: In time, we should notice the caller of this error by
+                        * telling him that this is a version error.
+                        */
+                       continue;
+               }
+               ua_sess = lookup_session_by_app(usess, app);
+               if (ua_sess == NULL) {
+                       continue;
+               }
+
+               pthread_mutex_lock(&ua_sess->lock);
+
+               if (ua_sess->deleted) {
+                       pthread_mutex_unlock(&ua_sess->lock);
+                       continue;
+               }
+
+               /* Lookup channel in the ust app session */
+               lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
+               ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
+               if (ua_chan_node == NULL) {
+                       goto next_app;
+               }
+               ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel,
+                               node);
+               ret = create_ust_app_channel_context(ua_chan, &uctx->ctx, app);
+               if (ret < 0) {
+                       goto next_app;
+               }
+       next_app:
+               pthread_mutex_unlock(&ua_sess->lock);
+       }
+
+       rcu_read_unlock();
+       return ret;
+}
+
+/*
+ * Receive registration and populate the given msg structure.
+ *
+ * On success return 0 else a negative value returned by the ustctl call.
+ */
+int ust_app_recv_registration(int sock, struct ust_register_msg *msg)
+{
+       int ret;
+       uint32_t pid, ppid, uid, gid;
+
+       LTTNG_ASSERT(msg);
+
+       ret = lttng_ust_ctl_recv_reg_msg(sock, &msg->type, &msg->major, &msg->minor,
+                       &pid, &ppid, &uid, &gid,
+                       &msg->bits_per_long,
+                       &msg->uint8_t_alignment,
+                       &msg->uint16_t_alignment,
+                       &msg->uint32_t_alignment,
+                       &msg->uint64_t_alignment,
+                       &msg->long_alignment,
+                       &msg->byte_order,
+                       msg->name);
+       if (ret < 0) {
+               switch (-ret) {
+               case EPIPE:
+               case ECONNRESET:
+               case LTTNG_UST_ERR_EXITING:
+                       DBG3("UST app recv reg message failed. Application died");
+                       break;
+               case LTTNG_UST_ERR_UNSUP_MAJOR:
+                       ERR("UST app recv reg unsupported version %d.%d. Supporting %d.%d",
+                                       msg->major, msg->minor, LTTNG_UST_ABI_MAJOR_VERSION,
+                                       LTTNG_UST_ABI_MINOR_VERSION);
+                       break;
+               default:
+                       ERR("UST app recv reg message failed with ret %d", ret);
+                       break;
+               }
+               goto error;
+       }
+       msg->pid = (pid_t) pid;
+       msg->ppid = (pid_t) ppid;
+       msg->uid = (uid_t) uid;
+       msg->gid = (gid_t) gid;
+
+error:
+       return ret;
+}
+
+/*
+ * Return a ust app session object using the application object and the
+ * session object descriptor has a key. If not found, NULL is returned.
+ * A RCU read side lock MUST be acquired when calling this function.
+*/
+static struct ust_app_session *find_session_by_objd(struct ust_app *app,
+               int objd)
+{
+       struct lttng_ht_node_ulong *node;
+       struct lttng_ht_iter iter;
+       struct ust_app_session *ua_sess = NULL;
+
+       LTTNG_ASSERT(app);
+
+       lttng_ht_lookup(app->ust_sessions_objd, (void *)((unsigned long) objd), &iter);
+       node = lttng_ht_iter_get_node_ulong(&iter);
+       if (node == NULL) {
+               DBG2("UST app session find by objd %d not found", objd);
+               goto error;
+       }
+
+       ua_sess = caa_container_of(node, struct ust_app_session, ust_objd_node);
+
+error:
+       return ua_sess;
+}
+
+/*
+ * Return a ust app channel object using the application object and the channel
+ * object descriptor has a key. If not found, NULL is returned. A RCU read side
+ * lock MUST be acquired before calling this function.
+ */
+static struct ust_app_channel *find_channel_by_objd(struct ust_app *app,
+               int objd)
+{
+       struct lttng_ht_node_ulong *node;
+       struct lttng_ht_iter iter;
+       struct ust_app_channel *ua_chan = NULL;
+
+       LTTNG_ASSERT(app);
+
+       lttng_ht_lookup(app->ust_objd, (void *)((unsigned long) objd), &iter);
+       node = lttng_ht_iter_get_node_ulong(&iter);
+       if (node == NULL) {
+               DBG2("UST app channel find by objd %d not found", objd);
+               goto error;
+       }
+
+       ua_chan = caa_container_of(node, struct ust_app_channel, ust_objd_node);
+
+error:
+       return ua_chan;
+}
+
+/*
+ * Reply to a register channel notification from an application on the notify
+ * socket. The channel metadata is also created.
+ *
+ * The session UST registry lock is acquired in this function.
+ *
+ * On success 0 is returned else a negative value.
+ */
+static int reply_ust_register_channel(int sock, int cobjd,
+               size_t nr_fields, struct lttng_ust_ctl_field *fields)
+{
+       int ret, ret_code = 0;
+       uint32_t chan_id;
+       uint64_t chan_reg_key;
+       enum lttng_ust_ctl_channel_header type;
+       struct ust_app *app;
+       struct ust_app_channel *ua_chan;
+       struct ust_app_session *ua_sess;
+       struct ust_registry_session *registry;
+       struct ust_registry_channel *ust_reg_chan;
+
+       rcu_read_lock();
+
+       /* Lookup application. If not found, there is a code flow error. */
+       app = find_app_by_notify_sock(sock);
+       if (!app) {
+               DBG("Application socket %d is being torn down. Abort event notify",
+                               sock);
+               ret = -1;
+               goto error_rcu_unlock;
+       }
+
+       /* Lookup channel by UST object descriptor. */
+       ua_chan = find_channel_by_objd(app, cobjd);
+       if (!ua_chan) {
+               DBG("Application channel is being torn down. Abort event notify");
+               ret = 0;
+               goto error_rcu_unlock;
+       }
+
+       LTTNG_ASSERT(ua_chan->session);
+       ua_sess = ua_chan->session;
+
+       /* Get right session registry depending on the session buffer type. */
+       registry = get_session_registry(ua_sess);
+       if (!registry) {
+               DBG("Application session is being torn down. Abort event notify");
+               ret = 0;
+               goto error_rcu_unlock;
+       };
+
+       /* Depending on the buffer type, a different channel key is used. */
+       if (ua_sess->buffer_type == LTTNG_BUFFER_PER_UID) {
+               chan_reg_key = ua_chan->tracing_channel_id;
+       } else {
+               chan_reg_key = ua_chan->key;
+       }
+
+       pthread_mutex_lock(&registry->lock);
+
+       ust_reg_chan = ust_registry_channel_find(registry, chan_reg_key);
+       LTTNG_ASSERT(ust_reg_chan);
+
+       if (!ust_reg_chan->register_done) {
+               /*
+                * TODO: eventually use the registry event count for
+                * this channel to better guess header type for per-pid
+                * buffers.
+                */
+               type = LTTNG_UST_CTL_CHANNEL_HEADER_LARGE;
+               ust_reg_chan->nr_ctx_fields = nr_fields;
+               ust_reg_chan->ctx_fields = fields;
+               fields = NULL;
+               ust_reg_chan->header_type = type;
+       } else {
+               /* Get current already assigned values. */
+               type = ust_reg_chan->header_type;
+       }
+       /* Channel id is set during the object creation. */
+       chan_id = ust_reg_chan->chan_id;
+
+       /* Append to metadata */
+       if (!ust_reg_chan->metadata_dumped) {
+               ret_code = ust_metadata_channel_statedump(registry, ust_reg_chan);
+               if (ret_code) {
+                       ERR("Error appending channel metadata (errno = %d)", ret_code);
+                       goto reply;
+               }
+       }
+
+reply:
+       DBG3("UST app replying to register channel key %" PRIu64
+                       " with id %u, type = %d, ret = %d", chan_reg_key, chan_id, type,
+                       ret_code);
+
+       ret = lttng_ust_ctl_reply_register_channel(sock, chan_id, type, ret_code);
+       if (ret < 0) {
+               if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
+                       DBG3("UST app reply channel failed. Application died: pid = %d, sock = %d",
+                                       app->pid, app->sock);
+               } else if (ret == -EAGAIN) {
+                       WARN("UST app reply channel failed. Communication time out: pid = %d, sock = %d",
+                                       app->pid, app->sock);
+               } else {
+                       ERR("UST app reply channel failed with ret %d: pid = %d, sock = %d",
+                                       ret, app->pid, app->sock);
+               }
+               goto error;
+       }
+
+       /* This channel registry registration is completed. */
+       ust_reg_chan->register_done = 1;
+
+error:
+       pthread_mutex_unlock(&registry->lock);
+error_rcu_unlock:
+       rcu_read_unlock();
+       free(fields);
+       return ret;
+}
+
+/*
+ * Add event to the UST channel registry. When the event is added to the
+ * registry, the metadata is also created. Once done, this replies to the
+ * application with the appropriate error code.
+ *
+ * The session UST registry lock is acquired in the function.
+ *
+ * On success 0 is returned else a negative value.
+ */
+static int add_event_ust_registry(int sock, int sobjd, int cobjd, char *name,
+               char *sig, size_t nr_fields, struct lttng_ust_ctl_field *fields,
+               int loglevel_value, char *model_emf_uri)
+{
+       int ret, ret_code;
+       uint32_t event_id = 0;
+       uint64_t chan_reg_key;
+       struct ust_app *app;
+       struct ust_app_channel *ua_chan;
+       struct ust_app_session *ua_sess;
+       struct ust_registry_session *registry;
+
+       rcu_read_lock();
+
+       /* Lookup application. If not found, there is a code flow error. */
+       app = find_app_by_notify_sock(sock);
+       if (!app) {
+               DBG("Application socket %d is being torn down. Abort event notify",
+                               sock);
+               ret = -1;
+               goto error_rcu_unlock;
+       }
+
+       /* Lookup channel by UST object descriptor. */
+       ua_chan = find_channel_by_objd(app, cobjd);
+       if (!ua_chan) {
+               DBG("Application channel is being torn down. Abort event notify");
+               ret = 0;
+               goto error_rcu_unlock;
+       }
+
+       LTTNG_ASSERT(ua_chan->session);
+       ua_sess = ua_chan->session;
+
+       registry = get_session_registry(ua_sess);
+       if (!registry) {
+               DBG("Application session is being torn down. Abort event notify");
+               ret = 0;
+               goto error_rcu_unlock;
+       }
+
+       if (ua_sess->buffer_type == LTTNG_BUFFER_PER_UID) {
+               chan_reg_key = ua_chan->tracing_channel_id;
+       } else {
+               chan_reg_key = ua_chan->key;
+       }
+
+       pthread_mutex_lock(&registry->lock);
+
+       /*
+        * From this point on, this call acquires the ownership of the sig, fields
+        * and model_emf_uri meaning any free are done inside it if needed. These
+        * three variables MUST NOT be read/write after this.
+        */
+       ret_code = ust_registry_create_event(registry, chan_reg_key,
+                       sobjd, cobjd, name, sig, nr_fields, fields,
+                       loglevel_value, model_emf_uri, ua_sess->buffer_type,
+                       &event_id, app);
+       sig = NULL;
+       fields = NULL;
+       model_emf_uri = NULL;
+
+       /*
+        * The return value is returned to ustctl so in case of an error, the
+        * application can be notified. In case of an error, it's important not to
+        * return a negative error or else the application will get closed.
+        */
+       ret = lttng_ust_ctl_reply_register_event(sock, event_id, ret_code);
+       if (ret < 0) {
+               if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
+                       DBG3("UST app reply event failed. Application died: pid = %d, sock = %d.",
+                                       app->pid, app->sock);
+               } else if (ret == -EAGAIN) {
+                       WARN("UST app reply event failed. Communication time out: pid = %d, sock = %d",
+                                       app->pid, app->sock);
+               } else {
+                       ERR("UST app reply event failed with ret %d: pid = %d, sock = %d",
+                                       ret, app->pid, app->sock);
+               }
+               /*
+                * No need to wipe the create event since the application socket will
+                * get close on error hence cleaning up everything by itself.
+                */
+               goto error;
+       }
+
+       DBG3("UST registry event %s with id %" PRId32 " added successfully",
+                       name, event_id);
+
+error:
+       pthread_mutex_unlock(&registry->lock);
+error_rcu_unlock:
+       rcu_read_unlock();
+       free(sig);
+       free(fields);
+       free(model_emf_uri);
+       return ret;
+}
+
+/*
+ * Add enum to the UST session registry. Once done, this replies to the
+ * application with the appropriate error code.
+ *
+ * The session UST registry lock is acquired within this function.
+ *
+ * On success 0 is returned else a negative value.
+ */
+static int add_enum_ust_registry(int sock, int sobjd, char *name,
+               struct lttng_ust_ctl_enum_entry *entries, size_t nr_entries)
+{
+       int ret = 0, ret_code;
+       struct ust_app *app;
+       struct ust_app_session *ua_sess;
+       struct ust_registry_session *registry;
+       uint64_t enum_id = -1ULL;
+
+       rcu_read_lock();
+
+       /* Lookup application. If not found, there is a code flow error. */
+       app = find_app_by_notify_sock(sock);
+       if (!app) {
+               /* Return an error since this is not an error */
+               DBG("Application socket %d is being torn down. Aborting enum registration",
+                               sock);
+               free(entries);
+               ret = -1;
+               goto error_rcu_unlock;
+       }
+
+       /* Lookup session by UST object descriptor. */
+       ua_sess = find_session_by_objd(app, sobjd);
+       if (!ua_sess) {
+               /* Return an error since this is not an error */
+               DBG("Application session is being torn down (session not found). Aborting enum registration.");
+               free(entries);
+               goto error_rcu_unlock;
+       }
+
+       registry = get_session_registry(ua_sess);
+       if (!registry) {
+               DBG("Application session is being torn down (registry not found). Aborting enum registration.");
+               free(entries);
+               goto error_rcu_unlock;
+       }
+
+       pthread_mutex_lock(&registry->lock);
+
+       /*
+        * From this point on, the callee acquires the ownership of
+        * entries. The variable entries MUST NOT be read/written after
+        * call.
+        */
+       ret_code = ust_registry_create_or_find_enum(registry, sobjd, name,
+                       entries, nr_entries, &enum_id);
+       entries = NULL;
+
+       /*
+        * The return value is returned to ustctl so in case of an error, the
+        * application can be notified. In case of an error, it's important not to
+        * return a negative error or else the application will get closed.
+        */
+       ret = lttng_ust_ctl_reply_register_enum(sock, enum_id, ret_code);
+       if (ret < 0) {
+               if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
+                       DBG3("UST app reply enum failed. Application died: pid = %d, sock = %d",
+                                       app->pid, app->sock);
+               } else if (ret == -EAGAIN) {
+                       WARN("UST app reply enum failed. Communication time out: pid = %d, sock = %d",
+                                       app->pid, app->sock);
+               } else {
+                       ERR("UST app reply enum failed with ret %d: pid = %d, sock = %d",
+                                       ret, app->pid, app->sock);
+               }
+               /*
+                * No need to wipe the create enum since the application socket will
+                * get close on error hence cleaning up everything by itself.
+                */
+               goto error;
+       }
+
+       DBG3("UST registry enum %s added successfully or already found", name);
+
+error:
+       pthread_mutex_unlock(&registry->lock);
+error_rcu_unlock:
+       rcu_read_unlock();
+       return ret;
+}
+
+/*
+ * Handle application notification through the given notify socket.
+ *
+ * Return 0 on success or else a negative value.
+ */
+int ust_app_recv_notify(int sock)
+{
+       int ret;
+       enum lttng_ust_ctl_notify_cmd cmd;
+
+       DBG3("UST app receiving notify from sock %d", sock);
+
+       ret = lttng_ust_ctl_recv_notify(sock, &cmd);
+       if (ret < 0) {
+               if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
+                       DBG3("UST app recv notify failed. Application died: sock = %d",
+                                       sock);
+               } else if (ret == -EAGAIN) {
+                       WARN("UST app recv notify failed. Communication time out: sock = %d",
+                                       sock);
+               } else {
+                       ERR("UST app recv notify failed with ret %d: sock = %d",
+                                       ret, sock);
+               }
+               goto error;
+       }
+
+       switch (cmd) {
+       case LTTNG_UST_CTL_NOTIFY_CMD_EVENT:
+       {
+               int sobjd, cobjd, loglevel_value;
+               char name[LTTNG_UST_ABI_SYM_NAME_LEN], *sig, *model_emf_uri;
+               size_t nr_fields;
+               struct lttng_ust_ctl_field *fields;
+
+               DBG2("UST app ustctl register event received");
+
+               ret = lttng_ust_ctl_recv_register_event(sock, &sobjd, &cobjd, name,
+                               &loglevel_value, &sig, &nr_fields, &fields,
+                               &model_emf_uri);
+               if (ret < 0) {
+                       if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
+                               DBG3("UST app recv event failed. Application died: sock = %d",
+                                               sock);
+                       } else if (ret == -EAGAIN) {
+                               WARN("UST app recv event failed. Communication time out: sock = %d",
+                                               sock);
+                       } else {
+                               ERR("UST app recv event failed with ret %d: sock = %d",
+                                               ret, sock);
+                       }
+                       goto error;
+               }
+
+               /*
+                * Add event to the UST registry coming from the notify socket. This
+                * call will free if needed the sig, fields and model_emf_uri. This
+                * code path loses the ownsership of these variables and transfer them
+                * to the this function.
+                */
+               ret = add_event_ust_registry(sock, sobjd, cobjd, name, sig, nr_fields,
+                               fields, loglevel_value, model_emf_uri);
+               if (ret < 0) {
+                       goto error;
+               }
+
+               break;
+       }
+       case LTTNG_UST_CTL_NOTIFY_CMD_CHANNEL:
+       {
+               int sobjd, cobjd;
+               size_t nr_fields;
+               struct lttng_ust_ctl_field *fields;
+
+               DBG2("UST app ustctl register channel received");
+
+               ret = lttng_ust_ctl_recv_register_channel(sock, &sobjd, &cobjd, &nr_fields,
+                               &fields);
+               if (ret < 0) {
+                       if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
+                               DBG3("UST app recv channel failed. Application died: sock = %d",
+                                               sock);
+                       } else if (ret == -EAGAIN) {
+                               WARN("UST app recv channel failed. Communication time out: sock = %d",
+                                               sock);
+                       } else {
+                               ERR("UST app recv channel failed with ret %d: sock = %d)",
+                                               ret, sock);
+                       }
+                       goto error;
+               }
+
+               /*
+                * The fields ownership are transfered to this function call meaning
+                * that if needed it will be freed. After this, it's invalid to access
+                * fields or clean it up.
+                */
+               ret = reply_ust_register_channel(sock, cobjd, nr_fields,
+                               fields);
+               if (ret < 0) {
+                       goto error;
+               }
+
+               break;
+       }
+       case LTTNG_UST_CTL_NOTIFY_CMD_ENUM:
+       {
+               int sobjd;
+               char name[LTTNG_UST_ABI_SYM_NAME_LEN];
+               size_t nr_entries;
+               struct lttng_ust_ctl_enum_entry *entries;
+
+               DBG2("UST app ustctl register enum received");
+
+               ret = lttng_ust_ctl_recv_register_enum(sock, &sobjd, name,
+                               &entries, &nr_entries);
+               if (ret < 0) {
+                       if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
+                               DBG3("UST app recv enum failed. Application died: sock = %d",
+                                               sock);
+                       } else if (ret == -EAGAIN) {
+                               WARN("UST app recv enum failed. Communication time out: sock = %d",
+                                               sock);
+                       } else {
+                               ERR("UST app recv enum failed with ret %d: sock = %d",
+                                               ret, sock);
+                       }
+                       goto error;
+               }
+
+               /* Callee assumes ownership of entries */
+               ret = add_enum_ust_registry(sock, sobjd, name,
+                               entries, nr_entries);
+               if (ret < 0) {
+                       goto error;
+               }
+
+               break;
+       }
+       default:
+               /* Should NEVER happen. */
+               abort();
+       }
+
+error:
+       return ret;
+}
+
+/*
+ * Once the notify socket hangs up, this is called. First, it tries to find the
+ * corresponding application. On failure, the call_rcu to close the socket is
+ * executed. If an application is found, it tries to delete it from the notify
+ * socket hash table. Whathever the result, it proceeds to the call_rcu.
+ *
+ * Note that an object needs to be allocated here so on ENOMEM failure, the
+ * call RCU is not done but the rest of the cleanup is.
+ */
+void ust_app_notify_sock_unregister(int sock)
+{
+       int err_enomem = 0;
+       struct lttng_ht_iter iter;
+       struct ust_app *app;
+       struct ust_app_notify_sock_obj *obj;
+
+       LTTNG_ASSERT(sock >= 0);
+
+       rcu_read_lock();
+
+       obj = (ust_app_notify_sock_obj *) zmalloc(sizeof(*obj));
+       if (!obj) {
+               /*
+                * An ENOMEM is kind of uncool. If this strikes we continue the
+                * procedure but the call_rcu will not be called. In this case, we
+                * accept the fd leak rather than possibly creating an unsynchronized
+                * state between threads.
+                *
+                * TODO: The notify object should be created once the notify socket is
+                * registered and stored independantely from the ust app object. The
+                * tricky part is to synchronize the teardown of the application and
+                * this notify object. Let's keep that in mind so we can avoid this
+                * kind of shenanigans with ENOMEM in the teardown path.
+                */
+               err_enomem = 1;
+       } else {
+               obj->fd = sock;
+       }
+
+       DBG("UST app notify socket unregister %d", sock);
+
+       /*
+        * Lookup application by notify socket. If this fails, this means that the
+        * hash table delete has already been done by the application
+        * unregistration process so we can safely close the notify socket in a
+        * call RCU.
+        */
+       app = find_app_by_notify_sock(sock);
+       if (!app) {
+               goto close_socket;
+       }
+
+       iter.iter.node = &app->notify_sock_n.node;
+
+       /*
+        * Whatever happens here either we fail or succeed, in both cases we have
+        * to close the socket after a grace period to continue to the call RCU
+        * here. If the deletion is successful, the application is not visible
+        * anymore by other threads and is it fails it means that it was already
+        * deleted from the hash table so either way we just have to close the
+        * socket.
+        */
+       (void) lttng_ht_del(ust_app_ht_by_notify_sock, &iter);
+
+close_socket:
+       rcu_read_unlock();
+
+       /*
+        * Close socket after a grace period to avoid for the socket to be reused
+        * before the application object is freed creating potential race between
+        * threads trying to add unique in the global hash table.
+        */
+       if (!err_enomem) {
+               call_rcu(&obj->head, close_notify_sock_rcu);
+       }
+}
+
+/*
+ * Destroy a ust app data structure and free its memory.
+ */
+void ust_app_destroy(struct ust_app *app)
+{
+       if (!app) {
+               return;
+       }
+
+       call_rcu(&app->pid_n.head, delete_ust_app_rcu);
+}
+
+/*
+ * Take a snapshot for a given UST session. The snapshot is sent to the given
+ * output.
+ *
+ * Returns LTTNG_OK on success or a LTTNG_ERR error code.
+ */
+enum lttng_error_code ust_app_snapshot_record(
+               const struct ltt_ust_session *usess,
+               const struct consumer_output *output, int wait,
+               uint64_t nb_packets_per_stream)
+{
+       int ret = 0;
+       enum lttng_error_code status = LTTNG_OK;
+       struct lttng_ht_iter iter;
+       struct ust_app *app;
+       char *trace_path = NULL;
+
+       LTTNG_ASSERT(usess);
+       LTTNG_ASSERT(output);
+
+       rcu_read_lock();
+
+       switch (usess->buffer_type) {
+       case LTTNG_BUFFER_PER_UID:
+       {
+               struct buffer_reg_uid *reg;
+
+               cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
+                       struct buffer_reg_channel *buf_reg_chan;
+                       struct consumer_socket *socket;
+                       char pathname[PATH_MAX];
+                       size_t consumer_path_offset = 0;
+
+                       if (!reg->registry->reg.ust->metadata_key) {
+                               /* Skip since no metadata is present */
+                               continue;
+                       }
+
+                       /* Get consumer socket to use to push the metadata.*/
+                       socket = consumer_find_socket_by_bitness(reg->bits_per_long,
+                                       usess->consumer);
+                       if (!socket) {
+                               status = LTTNG_ERR_INVALID;
+                               goto error;
+                       }
+
+                       memset(pathname, 0, sizeof(pathname));
+                       ret = snprintf(pathname, sizeof(pathname),
+                                       DEFAULT_UST_TRACE_UID_PATH,
+                                       reg->uid, reg->bits_per_long);
+                       if (ret < 0) {
+                               PERROR("snprintf snapshot path");
+                               status = LTTNG_ERR_INVALID;
+                               goto error;
+                       }
+                       /* Free path allowed on previous iteration. */
+                       free(trace_path);
+                       trace_path = setup_channel_trace_path(usess->consumer, pathname,
+                                               &consumer_path_offset);
+                       if (!trace_path) {
+                               status = LTTNG_ERR_INVALID;
+                               goto error;
+                       }
+                       /* Add the UST default trace dir to path. */
+                       cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
+                                       buf_reg_chan, node.node) {
+                               status = consumer_snapshot_channel(socket,
+                                               buf_reg_chan->consumer_key,
+                                               output, 0, usess->uid,
+                                               usess->gid, &trace_path[consumer_path_offset], wait,
+                                               nb_packets_per_stream);
+                               if (status != LTTNG_OK) {
+                                       goto error;
+                               }
+                       }
+                       status = consumer_snapshot_channel(socket,
+                                       reg->registry->reg.ust->metadata_key, output, 1,
+                                       usess->uid, usess->gid, &trace_path[consumer_path_offset],
+                                       wait, 0);
+                       if (status != LTTNG_OK) {
+                               goto error;
+                       }
+               }
+               break;
+       }
+       case LTTNG_BUFFER_PER_PID:
+       {
+               cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
+                       struct consumer_socket *socket;
+                       struct lttng_ht_iter chan_iter;
+                       struct ust_app_channel *ua_chan;
+                       struct ust_app_session *ua_sess;
+                       struct ust_registry_session *registry;
+                       char pathname[PATH_MAX];
+                       size_t consumer_path_offset = 0;
+
+                       ua_sess = lookup_session_by_app(usess, app);
+                       if (!ua_sess) {
+                               /* Session not associated with this app. */
+                               continue;
+                       }
+
+                       /* Get the right consumer socket for the application. */
+                       socket = consumer_find_socket_by_bitness(app->bits_per_long,
+                                       output);
+                       if (!socket) {
+                               status = LTTNG_ERR_INVALID;
+                               goto error;
+                       }
+
+                       /* Add the UST default trace dir to path. */
+                       memset(pathname, 0, sizeof(pathname));
+                       ret = snprintf(pathname, sizeof(pathname), "%s",
+                                       ua_sess->path);
+                       if (ret < 0) {
+                               status = LTTNG_ERR_INVALID;
+                               PERROR("snprintf snapshot path");
+                               goto error;
+                       }
+                       /* Free path allowed on previous iteration. */
+                       free(trace_path);
+                       trace_path = setup_channel_trace_path(usess->consumer, pathname,
+                                       &consumer_path_offset);
+                       if (!trace_path) {
+                               status = LTTNG_ERR_INVALID;
+                               goto error;
+                       }
+                       cds_lfht_for_each_entry(ua_sess->channels->ht, &chan_iter.iter,
+                                       ua_chan, node.node) {
+                               status = consumer_snapshot_channel(socket,
+                                               ua_chan->key, output, 0,
+                                               lttng_credentials_get_uid(&ua_sess->effective_credentials),
+                                               lttng_credentials_get_gid(&ua_sess->effective_credentials),
+                                               &trace_path[consumer_path_offset], wait,
+                                               nb_packets_per_stream);
+                               switch (status) {
+                               case LTTNG_OK:
+                                       break;
+                               case LTTNG_ERR_CHAN_NOT_FOUND:
+                                       continue;
+                               default:
+                                       goto error;
+                               }
+                       }
+
+                       registry = get_session_registry(ua_sess);
+                       if (!registry) {
+                               DBG("Application session is being torn down. Skip application.");
+                               continue;
+                       }
+                       status = consumer_snapshot_channel(socket,
+                                       registry->metadata_key, output, 1,
+                                       lttng_credentials_get_uid(&ua_sess->effective_credentials),
+                                       lttng_credentials_get_gid(&ua_sess->effective_credentials),
+                                       &trace_path[consumer_path_offset], wait, 0);
+                       switch (status) {
+                       case LTTNG_OK:
+                               break;
+                       case LTTNG_ERR_CHAN_NOT_FOUND:
+                               continue;
+                       default:
+                               goto error;
+                       }
+               }
+               break;
+       }
+       default:
+               abort();
+               break;
+       }
+
+error:
+       free(trace_path);
+       rcu_read_unlock();
+       return status;
+}
+
+/*
+ * Return the size taken by one more packet per stream.
+ */
+uint64_t ust_app_get_size_one_more_packet_per_stream(
+               const struct ltt_ust_session *usess, uint64_t cur_nr_packets)
+{
+       uint64_t tot_size = 0;
+       struct ust_app *app;
+       struct lttng_ht_iter iter;
+
+       LTTNG_ASSERT(usess);
+
+       switch (usess->buffer_type) {
+       case LTTNG_BUFFER_PER_UID:
+       {
+               struct buffer_reg_uid *reg;
+
+               cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
+                       struct buffer_reg_channel *buf_reg_chan;
+
+                       rcu_read_lock();
+                       cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
+                                       buf_reg_chan, node.node) {
+                               if (cur_nr_packets >= buf_reg_chan->num_subbuf) {
+                                       /*
+                                        * Don't take channel into account if we
+                                        * already grab all its packets.
+                                        */
+                                       continue;
+                               }
+                               tot_size += buf_reg_chan->subbuf_size * buf_reg_chan->stream_count;
+                       }
+                       rcu_read_unlock();
+               }
+               break;
+       }
+       case LTTNG_BUFFER_PER_PID:
+       {
+               rcu_read_lock();
+               cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
+                       struct ust_app_channel *ua_chan;
+                       struct ust_app_session *ua_sess;
+                       struct lttng_ht_iter chan_iter;
+
+                       ua_sess = lookup_session_by_app(usess, app);
+                       if (!ua_sess) {
+                               /* Session not associated with this app. */
+                               continue;
+                       }
+
+                       cds_lfht_for_each_entry(ua_sess->channels->ht, &chan_iter.iter,
+                                       ua_chan, node.node) {
+                               if (cur_nr_packets >= ua_chan->attr.num_subbuf) {
+                                       /*
+                                        * Don't take channel into account if we
+                                        * already grab all its packets.
+                                        */
+                                       continue;
+                               }
+                               tot_size += ua_chan->attr.subbuf_size * ua_chan->streams.count;
+                       }
+               }
+               rcu_read_unlock();
+               break;
+       }
+       default:
+               abort();
+               break;
+       }
+
+       return tot_size;
+}
+
+int ust_app_uid_get_channel_runtime_stats(uint64_t ust_session_id,
+               struct cds_list_head *buffer_reg_uid_list,
+               struct consumer_output *consumer, uint64_t uchan_id,
+               int overwrite, uint64_t *discarded, uint64_t *lost)
+{
+       int ret;
+       uint64_t consumer_chan_key;
+
+       *discarded = 0;
+       *lost = 0;
+
+       ret = buffer_reg_uid_consumer_channel_key(
+                       buffer_reg_uid_list, uchan_id, &consumer_chan_key);
+       if (ret < 0) {
+               /* Not found */
+               ret = 0;
+               goto end;
+       }
+
+       if (overwrite) {
+               ret = consumer_get_lost_packets(ust_session_id,
+                               consumer_chan_key, consumer, lost);
+       } else {
+               ret = consumer_get_discarded_events(ust_session_id,
+                               consumer_chan_key, consumer, discarded);
+       }
+
+end:
+       return ret;
+}
+
+int ust_app_pid_get_channel_runtime_stats(struct ltt_ust_session *usess,
+               struct ltt_ust_channel *uchan,
+               struct consumer_output *consumer, int overwrite,
+               uint64_t *discarded, uint64_t *lost)
+{
+       int ret = 0;
+       struct lttng_ht_iter iter;
+       struct lttng_ht_node_str *ua_chan_node;
+       struct ust_app *app;
+       struct ust_app_session *ua_sess;
+       struct ust_app_channel *ua_chan;
+
+       *discarded = 0;
+       *lost = 0;
+
+       rcu_read_lock();
+       /*
+        * Iterate over every registered applications. Sum counters for
+        * all applications containing requested session and channel.
+        */
+       cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
+               struct lttng_ht_iter uiter;
+
+               ua_sess = lookup_session_by_app(usess, app);
+               if (ua_sess == NULL) {
+                       continue;
+               }
+
+               /* Get channel */
+               lttng_ht_lookup(ua_sess->channels, (void *) uchan->name, &uiter);
+               ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
+               /* If the session is found for the app, the channel must be there */
+               LTTNG_ASSERT(ua_chan_node);
+
+               ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
+
+               if (overwrite) {
+                       uint64_t _lost;
+
+                       ret = consumer_get_lost_packets(usess->id, ua_chan->key,
+                                       consumer, &_lost);
+                       if (ret < 0) {
+                               break;
+                       }
+                       (*lost) += _lost;
+               } else {
+                       uint64_t _discarded;
+
+                       ret = consumer_get_discarded_events(usess->id,
+                                       ua_chan->key, consumer, &_discarded);
+                       if (ret < 0) {
+                               break;
+                       }
+                       (*discarded) += _discarded;
+               }
+       }
+
+       rcu_read_unlock();
+       return ret;
+}
+
+static
+int ust_app_regenerate_statedump(struct ltt_ust_session *usess,
+               struct ust_app *app)
+{
+       int ret = 0;
+       struct ust_app_session *ua_sess;
+
+       DBG("Regenerating the metadata for ust app pid %d", app->pid);
+
+       rcu_read_lock();
+
+       ua_sess = lookup_session_by_app(usess, app);
+       if (ua_sess == NULL) {
+               /* The session is in teardown process. Ignore and continue. */
+               goto end;
+       }
+
+       pthread_mutex_lock(&ua_sess->lock);
+
+       if (ua_sess->deleted) {
+               goto end_unlock;
+       }
+
+       pthread_mutex_lock(&app->sock_lock);
+       ret = lttng_ust_ctl_regenerate_statedump(app->sock, ua_sess->handle);
+       pthread_mutex_unlock(&app->sock_lock);
+
+end_unlock:
+       pthread_mutex_unlock(&ua_sess->lock);
+
+end:
+       rcu_read_unlock();
+       health_code_update();
+       return ret;
+}
+
+/*
+ * Regenerate the statedump for each app in the session.
+ */
+int ust_app_regenerate_statedump_all(struct ltt_ust_session *usess)
+{
+       int ret = 0;
+       struct lttng_ht_iter iter;
+       struct ust_app *app;
+
+       DBG("Regenerating the metadata for all UST apps");
+
+       rcu_read_lock();
+
+       cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
+               if (!app->compatible) {
+                       continue;
+               }
+
+               ret = ust_app_regenerate_statedump(usess, app);
+               if (ret < 0) {
+                       /* Continue to the next app even on error */
+                       continue;
+               }
+       }
+
+       rcu_read_unlock();
+
+       return 0;
+}
+
+/*
+ * Rotate all the channels of a session.
+ *
+ * Return LTTNG_OK on success or else an LTTng error code.
+ */
+enum lttng_error_code ust_app_rotate_session(struct ltt_session *session)
+{
+       int ret;
+       enum lttng_error_code cmd_ret = LTTNG_OK;
+       struct lttng_ht_iter iter;
+       struct ust_app *app;
+       struct ltt_ust_session *usess = session->ust_session;
+
+       LTTNG_ASSERT(usess);
+
+       rcu_read_lock();
+
+       switch (usess->buffer_type) {
+       case LTTNG_BUFFER_PER_UID:
+       {
+               struct buffer_reg_uid *reg;
+
+               cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
+                       struct buffer_reg_channel *buf_reg_chan;
+                       struct consumer_socket *socket;
+
+                       /* Get consumer socket to use to push the metadata.*/
+                       socket = consumer_find_socket_by_bitness(reg->bits_per_long,
+                                       usess->consumer);
+                       if (!socket) {
+                               cmd_ret = LTTNG_ERR_INVALID;
+                               goto error;
+                       }
+
+                       /* Rotate the data channels. */
+                       cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
+                                       buf_reg_chan, node.node) {
+                               ret = consumer_rotate_channel(socket,
+                                               buf_reg_chan->consumer_key,
+                                               usess->uid, usess->gid,
+                                               usess->consumer,
+                                               /* is_metadata_channel */ false);
+                               if (ret < 0) {
+                                       cmd_ret = LTTNG_ERR_ROTATION_FAIL_CONSUMER;
+                                       goto error;
+                               }
+                       }
+
+                       /*
+                        * The metadata channel might not be present.
+                        *
+                        * Consumer stream allocation can be done
+                        * asynchronously and can fail on intermediary
+                        * operations (i.e add context) and lead to data
+                        * channels created with no metadata channel.
+                        */
+                       if (!reg->registry->reg.ust->metadata_key) {
+                               /* Skip since no metadata is present. */
+                               continue;
+                       }
+
+                       (void) push_metadata(reg->registry->reg.ust, usess->consumer);
+
+                       ret = consumer_rotate_channel(socket,
+                                       reg->registry->reg.ust->metadata_key,
+                                       usess->uid, usess->gid,
+                                       usess->consumer,
+                                       /* is_metadata_channel */ true);
+                       if (ret < 0) {
+                               cmd_ret = LTTNG_ERR_ROTATION_FAIL_CONSUMER;
+                               goto error;
+                       }
+               }
+               break;
+       }
+       case LTTNG_BUFFER_PER_PID:
+       {
+               cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
+                       struct consumer_socket *socket;
+                       struct lttng_ht_iter chan_iter;
+                       struct ust_app_channel *ua_chan;
+                       struct ust_app_session *ua_sess;
+                       struct ust_registry_session *registry;
+
+                       ua_sess = lookup_session_by_app(usess, app);
+                       if (!ua_sess) {
+                               /* Session not associated with this app. */
+                               continue;
+                       }
+
+                       /* Get the right consumer socket for the application. */
+                       socket = consumer_find_socket_by_bitness(app->bits_per_long,
+                                       usess->consumer);
+                       if (!socket) {
+                               cmd_ret = LTTNG_ERR_INVALID;
+                               goto error;
+                       }
+
+                       registry = get_session_registry(ua_sess);
+                       if (!registry) {
+                               DBG("Application session is being torn down. Skip application.");
+                               continue;
+                       }
+
+                       /* Rotate the data channels. */
+                       cds_lfht_for_each_entry(ua_sess->channels->ht, &chan_iter.iter,
+                                       ua_chan, node.node) {
+                               ret = consumer_rotate_channel(socket,
+                                               ua_chan->key,
+                                               lttng_credentials_get_uid(&ua_sess->effective_credentials),
+                                               lttng_credentials_get_gid(&ua_sess->effective_credentials),
+                                               ua_sess->consumer,
+                                               /* is_metadata_channel */ false);
+                               if (ret < 0) {
+                                       /* Per-PID buffer and application going away. */
+                                       if (ret == -LTTNG_ERR_CHAN_NOT_FOUND)
+                                               continue;
+                                       cmd_ret = LTTNG_ERR_ROTATION_FAIL_CONSUMER;
+                                       goto error;
+                               }
+                       }
+
+                       /* Rotate the metadata channel. */
+                       (void) push_metadata(registry, usess->consumer);
+                       ret = consumer_rotate_channel(socket,
+                                       registry->metadata_key,
+                                       lttng_credentials_get_uid(&ua_sess->effective_credentials),
+                                       lttng_credentials_get_gid(&ua_sess->effective_credentials),
+                                       ua_sess->consumer,
+                                       /* is_metadata_channel */ true);
+                       if (ret < 0) {
+                               /* Per-PID buffer and application going away. */
+                               if (ret == -LTTNG_ERR_CHAN_NOT_FOUND)
+                                       continue;
+                               cmd_ret = LTTNG_ERR_ROTATION_FAIL_CONSUMER;
+                               goto error;
+                       }
+               }
+               break;
+       }
+       default:
+               abort();
+               break;
+       }
+
+       cmd_ret = LTTNG_OK;
+
+error:
+       rcu_read_unlock();
+       return cmd_ret;
+}
+
+enum lttng_error_code ust_app_create_channel_subdirectories(
+               const struct ltt_ust_session *usess)
+{
+       enum lttng_error_code ret = LTTNG_OK;
+       struct lttng_ht_iter iter;
+       enum lttng_trace_chunk_status chunk_status;
+       char *pathname_index;
+       int fmt_ret;
+
+       LTTNG_ASSERT(usess->current_trace_chunk);
+       rcu_read_lock();
+
+       switch (usess->buffer_type) {
+       case LTTNG_BUFFER_PER_UID:
+       {
+               struct buffer_reg_uid *reg;
+
+               cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
+                       fmt_ret = asprintf(&pathname_index,
+                                      DEFAULT_UST_TRACE_DIR "/" DEFAULT_UST_TRACE_UID_PATH "/" DEFAULT_INDEX_DIR,
+                                      reg->uid, reg->bits_per_long);
+                       if (fmt_ret < 0) {
+                               ERR("Failed to format channel index directory");
+                               ret = LTTNG_ERR_CREATE_DIR_FAIL;
+                               goto error;
+                       }
+
+                       /*
+                        * Create the index subdirectory which will take care
+                        * of implicitly creating the channel's path.
+                        */
+                       chunk_status = lttng_trace_chunk_create_subdirectory(
+                                       usess->current_trace_chunk,
+                                       pathname_index);
+                       free(pathname_index);
+                       if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) {
+                               ret = LTTNG_ERR_CREATE_DIR_FAIL;
+                               goto error;
+                       }
+               }
+               break;
+       }
+       case LTTNG_BUFFER_PER_PID:
+       {
+               struct ust_app *app;
+
+               /*
+                * Create the toplevel ust/ directory in case no apps are running.
+                */
+               chunk_status = lttng_trace_chunk_create_subdirectory(
+                               usess->current_trace_chunk,
+                               DEFAULT_UST_TRACE_DIR);
+               if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) {
+                       ret = LTTNG_ERR_CREATE_DIR_FAIL;
+                       goto error;
+               }
+
+               cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app,
+                               pid_n.node) {
+                       struct ust_app_session *ua_sess;
+                       struct ust_registry_session *registry;
+
+                       ua_sess = lookup_session_by_app(usess, app);
+                       if (!ua_sess) {
+                               /* Session not associated with this app. */
+                               continue;
+                       }
+
+                       registry = get_session_registry(ua_sess);
+                       if (!registry) {
+                               DBG("Application session is being torn down. Skip application.");
+                               continue;
+                       }
+
+                       fmt_ret = asprintf(&pathname_index,
+                                       DEFAULT_UST_TRACE_DIR "/%s/" DEFAULT_INDEX_DIR,
+                                       ua_sess->path);
+                       if (fmt_ret < 0) {
+                               ERR("Failed to format channel index directory");
+                               ret = LTTNG_ERR_CREATE_DIR_FAIL;
+                               goto error;
+                       }
+                       /*
+                        * Create the index subdirectory which will take care
+                        * of implicitly creating the channel's path.
+                        */
+                       chunk_status = lttng_trace_chunk_create_subdirectory(
+                                       usess->current_trace_chunk,
+                                       pathname_index);
+                       free(pathname_index);
+                       if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) {
+                               ret = LTTNG_ERR_CREATE_DIR_FAIL;
+                               goto error;
+                       }
+               }
+               break;
+       }
+       default:
+               abort();
+       }
+
+       ret = LTTNG_OK;
+error:
+       rcu_read_unlock();
+       return ret;
+}
+
+/*
+ * Clear all the channels of a session.
+ *
+ * Return LTTNG_OK on success or else an LTTng error code.
+ */
+enum lttng_error_code ust_app_clear_session(struct ltt_session *session)
+{
+       int ret;
+       enum lttng_error_code cmd_ret = LTTNG_OK;
+       struct lttng_ht_iter iter;
+       struct ust_app *app;
+       struct ltt_ust_session *usess = session->ust_session;
+
+       LTTNG_ASSERT(usess);
+
+       rcu_read_lock();
+
+       if (usess->active) {
+               ERR("Expecting inactive session %s (%" PRIu64 ")", session->name, session->id);
+               cmd_ret = LTTNG_ERR_FATAL;
+               goto end;
+       }
+
+       switch (usess->buffer_type) {
+       case LTTNG_BUFFER_PER_UID:
+       {
+               struct buffer_reg_uid *reg;
+
+               cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
+                       struct buffer_reg_channel *buf_reg_chan;
+                       struct consumer_socket *socket;
+
+                       /* Get consumer socket to use to push the metadata.*/
+                       socket = consumer_find_socket_by_bitness(reg->bits_per_long,
+                                       usess->consumer);
+                       if (!socket) {
+                               cmd_ret = LTTNG_ERR_INVALID;
+                               goto error_socket;
+                       }
+
+                       /* Clear the data channels. */
+                       cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
+                                       buf_reg_chan, node.node) {
+                               ret = consumer_clear_channel(socket,
+                                               buf_reg_chan->consumer_key);
+                               if (ret < 0) {
+                                       goto error;
+                               }
+                       }
+
+                       (void) push_metadata(reg->registry->reg.ust, usess->consumer);
+
+                       /*
+                        * Clear the metadata channel.
+                        * Metadata channel is not cleared per se but we still need to
+                        * perform a rotation operation on it behind the scene.
+                        */
+                       ret = consumer_clear_channel(socket,
+                                       reg->registry->reg.ust->metadata_key);
+                       if (ret < 0) {
+                               goto error;
+                       }
+               }
+               break;
+       }
+       case LTTNG_BUFFER_PER_PID:
+       {
+               cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
+                       struct consumer_socket *socket;
+                       struct lttng_ht_iter chan_iter;
+                       struct ust_app_channel *ua_chan;
+                       struct ust_app_session *ua_sess;
+                       struct ust_registry_session *registry;
+
+                       ua_sess = lookup_session_by_app(usess, app);
+                       if (!ua_sess) {
+                               /* Session not associated with this app. */
+                               continue;
+                       }
+
+                       /* Get the right consumer socket for the application. */
+                       socket = consumer_find_socket_by_bitness(app->bits_per_long,
+                                       usess->consumer);
+                       if (!socket) {
+                               cmd_ret = LTTNG_ERR_INVALID;
+                               goto error_socket;
+                       }
+
+                       registry = get_session_registry(ua_sess);
+                       if (!registry) {
+                               DBG("Application session is being torn down. Skip application.");
+                               continue;
+                       }
+
+                       /* Clear the data channels. */
+                       cds_lfht_for_each_entry(ua_sess->channels->ht, &chan_iter.iter,
+                                       ua_chan, node.node) {
+                               ret = consumer_clear_channel(socket, ua_chan->key);
+                               if (ret < 0) {
+                                       /* Per-PID buffer and application going away. */
+                                       if (ret == -LTTNG_ERR_CHAN_NOT_FOUND) {
+                                               continue;
+                                       }
+                                       goto error;
+                               }
+                       }
+
+                       (void) push_metadata(registry, usess->consumer);
+
+                       /*
+                        * Clear the metadata channel.
+                        * Metadata channel is not cleared per se but we still need to
+                        * perform rotation operation on it behind the scene.
+                        */
+                       ret = consumer_clear_channel(socket, registry->metadata_key);
+                       if (ret < 0) {
+                               /* Per-PID buffer and application going away. */
+                               if (ret == -LTTNG_ERR_CHAN_NOT_FOUND) {
+                                       continue;
+                               }
+                               goto error;
+                       }
+               }
+               break;
+       }
+       default:
+               abort();
+               break;
+       }
+
+       cmd_ret = LTTNG_OK;
+       goto end;
+
+error:
+       switch (-ret) {
+       case LTTCOMM_CONSUMERD_RELAYD_CLEAR_DISALLOWED:
+               cmd_ret = LTTNG_ERR_CLEAR_RELAY_DISALLOWED;
+               break;
+       default:
+               cmd_ret = LTTNG_ERR_CLEAR_FAIL_CONSUMER;
+       }
+
+error_socket:
+end:
+       rcu_read_unlock();
+       return cmd_ret;
+}
+
+/*
+ * This function skips the metadata channel as the begin/end timestamps of a
+ * metadata packet are useless.
+ *
+ * Moreover, opening a packet after a "clear" will cause problems for live
+ * sessions as it will introduce padding that was not part of the first trace
+ * chunk. The relay daemon expects the content of the metadata stream of
+ * successive metadata trace chunks to be strict supersets of one another.
+ *
+ * For example, flushing a packet at the beginning of the metadata stream of
+ * a trace chunk resulting from a "clear" session command will cause the
+ * size of the metadata stream of the new trace chunk to not match the size of
+ * the metadata stream of the original chunk. This will confuse the relay
+ * daemon as the same "offset" in a metadata stream will no longer point
+ * to the same content.
+ */
+enum lttng_error_code ust_app_open_packets(struct ltt_session *session)
+{
+       enum lttng_error_code ret = LTTNG_OK;
+       struct lttng_ht_iter iter;
+       struct ltt_ust_session *usess = session->ust_session;
+
+       LTTNG_ASSERT(usess);
+
+       rcu_read_lock();
+
+       switch (usess->buffer_type) {
+       case LTTNG_BUFFER_PER_UID:
+       {
+               struct buffer_reg_uid *reg;
+
+               cds_list_for_each_entry (
+                               reg, &usess->buffer_reg_uid_list, lnode) {
+                       struct buffer_reg_channel *buf_reg_chan;
+                       struct consumer_socket *socket;
+
+                       socket = consumer_find_socket_by_bitness(
+                                       reg->bits_per_long, usess->consumer);
+                       if (!socket) {
+                               ret = LTTNG_ERR_FATAL;
+                               goto error;
+                       }
+
+                       cds_lfht_for_each_entry(reg->registry->channels->ht,
+                                       &iter.iter, buf_reg_chan, node.node) {
+                               const int open_ret =
+                                               consumer_open_channel_packets(
+                                                       socket,
+                                                       buf_reg_chan->consumer_key);
+
+                               if (open_ret < 0) {
+                                       ret = LTTNG_ERR_UNK;
+                                       goto error;
+                               }
+                       }
+               }
+               break;
+       }
+       case LTTNG_BUFFER_PER_PID:
+       {
+               struct ust_app *app;
+
+               cds_lfht_for_each_entry (
+                               ust_app_ht->ht, &iter.iter, app, pid_n.node) {
+                       struct consumer_socket *socket;
+                       struct lttng_ht_iter chan_iter;
+                       struct ust_app_channel *ua_chan;
+                       struct ust_app_session *ua_sess;
+                       struct ust_registry_session *registry;
+
+                       ua_sess = lookup_session_by_app(usess, app);
+                       if (!ua_sess) {
+                               /* Session not associated with this app. */
+                               continue;
+                       }
+
+                       /* Get the right consumer socket for the application. */
+                       socket = consumer_find_socket_by_bitness(
+                                       app->bits_per_long, usess->consumer);
+                       if (!socket) {
+                               ret = LTTNG_ERR_FATAL;
+                               goto error;
+                       }
+
+                       registry = get_session_registry(ua_sess);
+                       if (!registry) {
+                               DBG("Application session is being torn down. Skip application.");
+                               continue;
+                       }
+
+                       cds_lfht_for_each_entry(ua_sess->channels->ht,
+                                       &chan_iter.iter, ua_chan, node.node) {
+                               const int open_ret =
+                                               consumer_open_channel_packets(
+                                                       socket,
+                                                       ua_chan->key);
+
+                               if (open_ret < 0) {
+                                       /*
+                                        * Per-PID buffer and application going
+                                        * away.
+                                        */
+                                       if (open_ret == -LTTNG_ERR_CHAN_NOT_FOUND) {
+                                               continue;
+                                       }
+
+                                       ret = LTTNG_ERR_UNK;
+                                       goto error;
+                               }
+                       }
+               }
+               break;
+       }
+       default:
+               abort();
+               break;
+       }
+
+error:
+       rcu_read_unlock();
+       return ret;
+}
index 77a365409780a36c6bcd35ca8bef6bf15632b38c..8d2d289864ed1a3114bf846f7a67d5baa1538dc8 100644 (file)
@@ -572,7 +572,7 @@ static inline
 enum lttng_error_code ust_app_snapshot_record(struct ltt_ust_session *usess,
                const struct consumer_output *output, int wait, uint64_t max_stream_size)
 {
-       return 0;
+       return LTTNG_ERR_UNK;
 }
 static inline
 unsigned int ust_app_get_nb_stream(struct ltt_ust_session *usess)
@@ -642,14 +642,14 @@ int ust_app_regenerate_statedump_all(struct ltt_ust_session *usess)
 static inline
 enum lttng_error_code ust_app_rotate_session(struct ltt_session *session)
 {
-       return 0;
+       return LTTNG_ERR_UNK;
 }
 
 static inline
 enum lttng_error_code ust_app_create_channel_subdirectories(
                const struct ltt_ust_session *session)
 {
-       return 0;
+       return LTTNG_ERR_UNK;
 }
 
 static inline
@@ -661,13 +661,13 @@ int ust_app_release_object(struct ust_app *app, struct lttng_ust_abi_object_data
 static inline
 enum lttng_error_code ust_app_clear_session(struct ltt_session *session)
 {
-       return 0;
+       return LTTNG_ERR_UNK;
 }
 
 static inline
 enum lttng_error_code ust_app_open_packets(struct ltt_session *session)
 {
-       return 0;
+       return LTTNG_ERR_UNK;
 }
 
 #endif /* HAVE_LIBLTTNG_UST_CTL */
diff --git a/src/bin/lttng-sessiond/ust-consumer.c b/src/bin/lttng-sessiond/ust-consumer.c
deleted file mode 100644 (file)
index a68c5d9..0000000
+++ /dev/null
@@ -1,513 +0,0 @@
-/*
- * Copyright (C) 2011 David Goulet <david.goulet@polymtl.ca>
- *
- * SPDX-License-Identifier: GPL-2.0-only
- *
- */
-
-#define _LGPL_SOURCE
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <unistd.h>
-#include <inttypes.h>
-
-#include <common/compat/errno.h>
-#include <common/common.h>
-#include <common/consumer/consumer.h>
-#include <common/defaults.h>
-
-#include "consumer.h"
-#include "health-sessiond.h"
-#include "ust-consumer.h"
-#include "lttng-ust-error.h"
-#include "buffer-registry.h"
-#include "session.h"
-#include "lttng-sessiond.h"
-
-/*
- * Send a single channel to the consumer using command ASK_CHANNEL_CREATION.
- *
- * Consumer socket lock MUST be acquired before calling this.
- */
-static int ask_channel_creation(struct ust_app_session *ua_sess,
-               struct ust_app_channel *ua_chan,
-               struct consumer_output *consumer,
-               struct consumer_socket *socket,
-               struct ust_registry_session *registry,
-               struct lttng_trace_chunk *trace_chunk)
-{
-       int ret, output;
-       uint32_t chan_id;
-       uint64_t key, chan_reg_key;
-       char *pathname = NULL;
-       struct lttcomm_consumer_msg msg;
-       struct ust_registry_channel *ust_reg_chan;
-       char shm_path[PATH_MAX] = "";
-       char root_shm_path[PATH_MAX] = "";
-       bool is_local_trace;
-       size_t consumer_path_offset = 0;
-
-       LTTNG_ASSERT(ua_sess);
-       LTTNG_ASSERT(ua_chan);
-       LTTNG_ASSERT(socket);
-       LTTNG_ASSERT(consumer);
-       LTTNG_ASSERT(registry);
-
-       DBG2("Asking UST consumer for channel");
-
-       is_local_trace = consumer->net_seq_index == -1ULL;
-       /* Format the channel's path (relative to the current trace chunk). */
-       pathname = setup_channel_trace_path(consumer, ua_sess->path,
-                       &consumer_path_offset);
-       if (!pathname) {
-               ret = -1;
-               goto error;
-       }
-
-       if (is_local_trace && trace_chunk) {
-               enum lttng_trace_chunk_status chunk_status;
-               char *pathname_index;
-
-               ret = asprintf(&pathname_index, "%s/" DEFAULT_INDEX_DIR,
-                               pathname);
-               if (ret < 0) {
-                       ERR("Failed to format channel index directory");
-                       ret = -1;
-                       goto error;
-               }
-
-               /*
-                * Create the index subdirectory which will take care
-                * of implicitly creating the channel's path.
-                */
-               chunk_status = lttng_trace_chunk_create_subdirectory(
-                               trace_chunk, pathname_index);
-               free(pathname_index);
-               if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) {
-                       ret = -1;
-                       goto error;
-               }
-       }
-
-       /* Depending on the buffer type, a different channel key is used. */
-       if (ua_sess->buffer_type == LTTNG_BUFFER_PER_UID) {
-               chan_reg_key = ua_chan->tracing_channel_id;
-       } else {
-               chan_reg_key = ua_chan->key;
-       }
-
-       if (ua_chan->attr.type == LTTNG_UST_ABI_CHAN_METADATA) {
-               chan_id = -1U;
-               /*
-                * Metadata channels shm_path (buffers) are handled within
-                * session daemon. Consumer daemon should not try to create
-                * those buffer files.
-                */
-       } else {
-               ust_reg_chan = ust_registry_channel_find(registry, chan_reg_key);
-               LTTNG_ASSERT(ust_reg_chan);
-               chan_id = ust_reg_chan->chan_id;
-               if (ua_sess->shm_path[0]) {
-                       strncpy(shm_path, ua_sess->shm_path, sizeof(shm_path));
-                       shm_path[sizeof(shm_path) - 1] = '\0';
-                       strncat(shm_path, "/",
-                               sizeof(shm_path) - strlen(shm_path) - 1);
-                       strncat(shm_path, ua_chan->name,
-                                       sizeof(shm_path) - strlen(shm_path) - 1);
-                               strncat(shm_path, "_",
-                                       sizeof(shm_path) - strlen(shm_path) - 1);
-               }
-               strncpy(root_shm_path, ua_sess->root_shm_path, sizeof(root_shm_path));
-               root_shm_path[sizeof(root_shm_path) - 1] = '\0';
-       }
-
-       switch (ua_chan->attr.output) {
-       case LTTNG_UST_ABI_MMAP:
-       default:
-               output = LTTNG_EVENT_MMAP;
-               break;
-       }
-
-       consumer_init_ask_channel_comm_msg(&msg,
-                       ua_chan->attr.subbuf_size,
-                       ua_chan->attr.num_subbuf,
-                       ua_chan->attr.overwrite,
-                       ua_chan->attr.switch_timer_interval,
-                       ua_chan->attr.read_timer_interval,
-                       ua_sess->live_timer_interval,
-                       ua_sess->live_timer_interval != 0,
-                       ua_chan->monitor_timer_interval,
-                       output,
-                       (int) ua_chan->attr.type,
-                       ua_sess->tracing_id,
-                       &pathname[consumer_path_offset],
-                       ua_chan->name,
-                       consumer->net_seq_index,
-                       ua_chan->key,
-                       registry->uuid,
-                       chan_id,
-                       ua_chan->tracefile_size,
-                       ua_chan->tracefile_count,
-                       ua_sess->id,
-                       ua_sess->output_traces,
-                       lttng_credentials_get_uid(&ua_sess->real_credentials),
-                       ua_chan->attr.blocking_timeout,
-                       root_shm_path, shm_path,
-                       trace_chunk,
-                       &ua_sess->effective_credentials);
-
-       health_code_update();
-
-       ret = consumer_socket_send(socket, &msg, sizeof(msg));
-       if (ret < 0) {
-               goto error;
-       }
-
-       ret = consumer_recv_status_channel(socket, &key,
-                       &ua_chan->expected_stream_count);
-       if (ret < 0) {
-               goto error;
-       }
-       /* Communication protocol error. */
-       LTTNG_ASSERT(key == ua_chan->key);
-       /* We need at least one where 1 stream for 1 cpu. */
-       if (ua_sess->output_traces) {
-               LTTNG_ASSERT(ua_chan->expected_stream_count > 0);
-       }
-
-       DBG2("UST ask channel %" PRIu64 " successfully done with %u stream(s)", key,
-                       ua_chan->expected_stream_count);
-
-error:
-       free(pathname);
-       health_code_update();
-       return ret;
-}
-
-/*
- * Ask consumer to create a channel for a given session.
- *
- * Session list and rcu read side locks must be held by the caller.
- *
- * Returns 0 on success else a negative value.
- */
-int ust_consumer_ask_channel(struct ust_app_session *ua_sess,
-               struct ust_app_channel *ua_chan,
-               struct consumer_output *consumer,
-               struct consumer_socket *socket,
-               struct ust_registry_session *registry,
-               struct lttng_trace_chunk * trace_chunk)
-{
-       int ret;
-
-       LTTNG_ASSERT(ua_sess);
-       LTTNG_ASSERT(ua_chan);
-       LTTNG_ASSERT(consumer);
-       LTTNG_ASSERT(socket);
-       LTTNG_ASSERT(registry);
-
-       if (!consumer->enabled) {
-               ret = -LTTNG_ERR_NO_CONSUMER;
-               DBG3("Consumer is disabled");
-               goto error;
-       }
-
-       pthread_mutex_lock(socket->lock);
-       ret = ask_channel_creation(ua_sess, ua_chan, consumer, socket, registry,
-                       trace_chunk);
-       pthread_mutex_unlock(socket->lock);
-       if (ret < 0) {
-               ERR("ask_channel_creation consumer command failed");
-               goto error;
-       }
-
-error:
-       return ret;
-}
-
-/*
- * Send a get channel command to consumer using the given channel key.  The
- * channel object is populated and the stream list.
- *
- * Return 0 on success else a negative value.
- */
-int ust_consumer_get_channel(struct consumer_socket *socket,
-               struct ust_app_channel *ua_chan)
-{
-       int ret;
-       struct lttcomm_consumer_msg msg;
-
-       LTTNG_ASSERT(ua_chan);
-       LTTNG_ASSERT(socket);
-
-       memset(&msg, 0, sizeof(msg));
-       msg.cmd_type = LTTNG_CONSUMER_GET_CHANNEL;
-       msg.u.get_channel.key = ua_chan->key;
-
-       pthread_mutex_lock(socket->lock);
-       health_code_update();
-
-       /* Send command and wait for OK reply. */
-       ret = consumer_send_msg(socket, &msg);
-       if (ret < 0) {
-               goto error;
-       }
-
-       /* First, get the channel from consumer. */
-       ret = lttng_ust_ctl_recv_channel_from_consumer(*socket->fd_ptr, &ua_chan->obj);
-       if (ret < 0) {
-               if (ret != -EPIPE) {
-                       ERR("Error recv channel from consumer %d with ret %d",
-                                       *socket->fd_ptr, ret);
-               } else {
-                       DBG3("UST app recv channel from consumer. Consumer is dead.");
-               }
-               goto error;
-       }
-
-       /* Next, get all streams. */
-       while (1) {
-               struct ust_app_stream *stream;
-
-               /* Create UST stream */
-               stream = ust_app_alloc_stream();
-               if (stream == NULL) {
-                       ret = -ENOMEM;
-                       goto error;
-               }
-
-               /* Stream object is populated by this call if successful. */
-               ret = lttng_ust_ctl_recv_stream_from_consumer(*socket->fd_ptr, &stream->obj);
-               if (ret < 0) {
-                       free(stream);
-                       if (ret == -LTTNG_UST_ERR_NOENT) {
-                               DBG3("UST app consumer has no more stream available");
-                               break;
-                       }
-                       if (ret != -EPIPE) {
-                               ERR("Recv stream from consumer %d with ret %d",
-                                               *socket->fd_ptr, ret);
-                       } else {
-                               DBG3("UST app recv stream from consumer. Consumer is dead.");
-                       }
-                       goto error;
-               }
-
-               /* Order is important this is why a list is used. */
-               cds_list_add_tail(&stream->list, &ua_chan->streams.head);
-               ua_chan->streams.count++;
-
-               DBG2("UST app stream %d received successfully", ua_chan->streams.count);
-       }
-
-       /* This MUST match or else we have a synchronization problem. */
-       LTTNG_ASSERT(ua_chan->expected_stream_count == ua_chan->streams.count);
-
-       /* Wait for confirmation that we can proceed with the streams. */
-       ret = consumer_recv_status_reply(socket);
-       if (ret < 0) {
-               goto error;
-       }
-
-error:
-       health_code_update();
-       pthread_mutex_unlock(socket->lock);
-       return ret;
-}
-
-/*
- * Send a destroy channel command to consumer using the given channel key.
- *
- * Note that this command MUST be used prior to a successful
- * LTTNG_CONSUMER_GET_CHANNEL because once this command is done successfully,
- * the streams are dispatched to the consumer threads and MUST be teardown
- * through the hang up process.
- *
- * Return 0 on success else a negative value.
- */
-int ust_consumer_destroy_channel(struct consumer_socket *socket,
-               struct ust_app_channel *ua_chan)
-{
-       int ret;
-       struct lttcomm_consumer_msg msg;
-
-       LTTNG_ASSERT(ua_chan);
-       LTTNG_ASSERT(socket);
-
-       memset(&msg, 0, sizeof(msg));
-       msg.cmd_type = LTTNG_CONSUMER_DESTROY_CHANNEL;
-       msg.u.destroy_channel.key = ua_chan->key;
-
-       pthread_mutex_lock(socket->lock);
-       health_code_update();
-
-       ret = consumer_send_msg(socket, &msg);
-       if (ret < 0) {
-               goto error;
-       }
-
-error:
-       health_code_update();
-       pthread_mutex_unlock(socket->lock);
-       return ret;
-}
-
-/*
- * Send a given stream to UST tracer.
- *
- * On success return 0 else a negative value.
- */
-int ust_consumer_send_stream_to_ust(struct ust_app *app,
-               struct ust_app_channel *channel, struct ust_app_stream *stream)
-{
-       int ret;
-
-       LTTNG_ASSERT(app);
-       LTTNG_ASSERT(stream);
-       LTTNG_ASSERT(channel);
-
-       DBG2("UST consumer send stream to app %d", app->sock);
-
-       /* Relay stream to application. */
-       pthread_mutex_lock(&app->sock_lock);
-       ret = lttng_ust_ctl_send_stream_to_ust(app->sock, channel->obj, stream->obj);
-       pthread_mutex_unlock(&app->sock_lock);
-       if (ret < 0) {
-               if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
-                       DBG3("UST app send stream to ust failed. Application is dead. (pid: %d, sock: %d).",
-                                       app->pid, app->sock);
-               } else if (ret == -EAGAIN) {
-                       WARN("UST app send stream to ust failed. Communication time out (pid: %d, sock: %d).",
-                                       app->pid, app->sock);
-               } else {
-                       ERR("UST app send stream, handle %d, to ust failed with ret %d (pid: %d, sock: %d).",
-                                       stream->obj->handle, ret, app->pid,
-                                       app->sock);
-               }
-               goto error;
-       }
-       channel->handle = channel->obj->handle;
-
-error:
-       return ret;
-}
-
-/*
- * Send channel previously received from the consumer to the UST tracer.
- *
- * On success return 0 else a negative value.
- */
-int ust_consumer_send_channel_to_ust(struct ust_app *app,
-               struct ust_app_session *ua_sess, struct ust_app_channel *channel)
-{
-       int ret;
-
-       LTTNG_ASSERT(app);
-       LTTNG_ASSERT(ua_sess);
-       LTTNG_ASSERT(channel);
-       LTTNG_ASSERT(channel->obj);
-
-       DBG2("UST app send channel to sock %d pid %d (name: %s, key: %" PRIu64 ")",
-                       app->sock, app->pid, channel->name, channel->tracing_channel_id);
-
-       /* Send stream to application. */
-       pthread_mutex_lock(&app->sock_lock);
-       ret = lttng_ust_ctl_send_channel_to_ust(app->sock, ua_sess->handle, channel->obj);
-       pthread_mutex_unlock(&app->sock_lock);
-       if (ret < 0) {
-               if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
-                       DBG3("UST app send channel to ust failed. Application is dead (pid: %d, sock: %d).",
-                                       app->pid, app->sock);
-               } else if (ret == -EAGAIN) {
-                       WARN("UST app send channel to ust failed. Communication timeout (pid: %d, sock: %d).",
-                                       app->pid, app->sock);
-               } else {
-                       ERR("UST app send channel %s, to ust failed with ret %d (pid: %d, sock: %d).",
-                                       channel->name, ret, app->pid,
-                                       app->sock);
-               }
-               goto error;
-       }
-
-error:
-       return ret;
-}
-
-/*
- * Handle the metadata requests from the UST consumer
- *
- * Return 0 on success else a negative value.
- */
-int ust_consumer_metadata_request(struct consumer_socket *socket)
-{
-       int ret;
-       ssize_t ret_push;
-       struct lttcomm_metadata_request_msg request;
-       struct buffer_reg_uid *reg_uid;
-       struct ust_registry_session *ust_reg;
-       struct lttcomm_consumer_msg msg;
-
-       LTTNG_ASSERT(socket);
-
-       rcu_read_lock();
-       health_code_update();
-
-       /* Wait for a metadata request */
-       pthread_mutex_lock(socket->lock);
-       ret = consumer_socket_recv(socket, &request, sizeof(request));
-       pthread_mutex_unlock(socket->lock);
-       if (ret < 0) {
-               goto end;
-       }
-
-       DBG("Metadata request received for session %" PRIu64 ", key %" PRIu64,
-                       request.session_id, request.key);
-
-       reg_uid = buffer_reg_uid_find(request.session_id,
-                       request.bits_per_long, request.uid);
-       if (reg_uid) {
-               ust_reg = reg_uid->registry->reg.ust;
-       } else {
-               struct buffer_reg_pid *reg_pid =
-                       buffer_reg_pid_find(request.session_id_per_pid);
-               if (!reg_pid) {
-                       DBG("PID registry not found for session id %" PRIu64,
-                                       request.session_id_per_pid);
-
-                       memset(&msg, 0, sizeof(msg));
-                       msg.cmd_type = LTTNG_ERR_UND;
-                       pthread_mutex_lock(socket->lock);
-                       (void) consumer_send_msg(socket, &msg);
-                       pthread_mutex_unlock(socket->lock);
-                       /*
-                        * This is possible since the session might have been destroyed
-                        * during a consumer metadata request. So here, return gracefully
-                        * because the destroy session will push the remaining metadata to
-                        * the consumer.
-                        */
-                       ret = 0;
-                       goto end;
-               }
-               ust_reg = reg_pid->registry->reg.ust;
-       }
-       LTTNG_ASSERT(ust_reg);
-
-       pthread_mutex_lock(&ust_reg->lock);
-       ret_push = ust_app_push_metadata(ust_reg, socket, 1);
-       pthread_mutex_unlock(&ust_reg->lock);
-       if (ret_push == -EPIPE) {
-               DBG("Application or relay closed while pushing metadata");
-       } else if (ret_push < 0) {
-               ERR("Pushing metadata");
-               ret = -1;
-               goto end;
-       } else {
-               DBG("UST Consumer metadata pushed successfully");
-       }
-       ret = 0;
-
-end:
-       rcu_read_unlock();
-       return ret;
-}
diff --git a/src/bin/lttng-sessiond/ust-consumer.cpp b/src/bin/lttng-sessiond/ust-consumer.cpp
new file mode 100644 (file)
index 0000000..a68c5d9
--- /dev/null
@@ -0,0 +1,513 @@
+/*
+ * Copyright (C) 2011 David Goulet <david.goulet@polymtl.ca>
+ *
+ * SPDX-License-Identifier: GPL-2.0-only
+ *
+ */
+
+#define _LGPL_SOURCE
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <inttypes.h>
+
+#include <common/compat/errno.h>
+#include <common/common.h>
+#include <common/consumer/consumer.h>
+#include <common/defaults.h>
+
+#include "consumer.h"
+#include "health-sessiond.h"
+#include "ust-consumer.h"
+#include "lttng-ust-error.h"
+#include "buffer-registry.h"
+#include "session.h"
+#include "lttng-sessiond.h"
+
+/*
+ * Send a single channel to the consumer using command ASK_CHANNEL_CREATION.
+ *
+ * Consumer socket lock MUST be acquired before calling this.
+ */
+static int ask_channel_creation(struct ust_app_session *ua_sess,
+               struct ust_app_channel *ua_chan,
+               struct consumer_output *consumer,
+               struct consumer_socket *socket,
+               struct ust_registry_session *registry,
+               struct lttng_trace_chunk *trace_chunk)
+{
+       int ret, output;
+       uint32_t chan_id;
+       uint64_t key, chan_reg_key;
+       char *pathname = NULL;
+       struct lttcomm_consumer_msg msg;
+       struct ust_registry_channel *ust_reg_chan;
+       char shm_path[PATH_MAX] = "";
+       char root_shm_path[PATH_MAX] = "";
+       bool is_local_trace;
+       size_t consumer_path_offset = 0;
+
+       LTTNG_ASSERT(ua_sess);
+       LTTNG_ASSERT(ua_chan);
+       LTTNG_ASSERT(socket);
+       LTTNG_ASSERT(consumer);
+       LTTNG_ASSERT(registry);
+
+       DBG2("Asking UST consumer for channel");
+
+       is_local_trace = consumer->net_seq_index == -1ULL;
+       /* Format the channel's path (relative to the current trace chunk). */
+       pathname = setup_channel_trace_path(consumer, ua_sess->path,
+                       &consumer_path_offset);
+       if (!pathname) {
+               ret = -1;
+               goto error;
+       }
+
+       if (is_local_trace && trace_chunk) {
+               enum lttng_trace_chunk_status chunk_status;
+               char *pathname_index;
+
+               ret = asprintf(&pathname_index, "%s/" DEFAULT_INDEX_DIR,
+                               pathname);
+               if (ret < 0) {
+                       ERR("Failed to format channel index directory");
+                       ret = -1;
+                       goto error;
+               }
+
+               /*
+                * Create the index subdirectory which will take care
+                * of implicitly creating the channel's path.
+                */
+               chunk_status = lttng_trace_chunk_create_subdirectory(
+                               trace_chunk, pathname_index);
+               free(pathname_index);
+               if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) {
+                       ret = -1;
+                       goto error;
+               }
+       }
+
+       /* Depending on the buffer type, a different channel key is used. */
+       if (ua_sess->buffer_type == LTTNG_BUFFER_PER_UID) {
+               chan_reg_key = ua_chan->tracing_channel_id;
+       } else {
+               chan_reg_key = ua_chan->key;
+       }
+
+       if (ua_chan->attr.type == LTTNG_UST_ABI_CHAN_METADATA) {
+               chan_id = -1U;
+               /*
+                * Metadata channels shm_path (buffers) are handled within
+                * session daemon. Consumer daemon should not try to create
+                * those buffer files.
+                */
+       } else {
+               ust_reg_chan = ust_registry_channel_find(registry, chan_reg_key);
+               LTTNG_ASSERT(ust_reg_chan);
+               chan_id = ust_reg_chan->chan_id;
+               if (ua_sess->shm_path[0]) {
+                       strncpy(shm_path, ua_sess->shm_path, sizeof(shm_path));
+                       shm_path[sizeof(shm_path) - 1] = '\0';
+                       strncat(shm_path, "/",
+                               sizeof(shm_path) - strlen(shm_path) - 1);
+                       strncat(shm_path, ua_chan->name,
+                                       sizeof(shm_path) - strlen(shm_path) - 1);
+                               strncat(shm_path, "_",
+                                       sizeof(shm_path) - strlen(shm_path) - 1);
+               }
+               strncpy(root_shm_path, ua_sess->root_shm_path, sizeof(root_shm_path));
+               root_shm_path[sizeof(root_shm_path) - 1] = '\0';
+       }
+
+       switch (ua_chan->attr.output) {
+       case LTTNG_UST_ABI_MMAP:
+       default:
+               output = LTTNG_EVENT_MMAP;
+               break;
+       }
+
+       consumer_init_ask_channel_comm_msg(&msg,
+                       ua_chan->attr.subbuf_size,
+                       ua_chan->attr.num_subbuf,
+                       ua_chan->attr.overwrite,
+                       ua_chan->attr.switch_timer_interval,
+                       ua_chan->attr.read_timer_interval,
+                       ua_sess->live_timer_interval,
+                       ua_sess->live_timer_interval != 0,
+                       ua_chan->monitor_timer_interval,
+                       output,
+                       (int) ua_chan->attr.type,
+                       ua_sess->tracing_id,
+                       &pathname[consumer_path_offset],
+                       ua_chan->name,
+                       consumer->net_seq_index,
+                       ua_chan->key,
+                       registry->uuid,
+                       chan_id,
+                       ua_chan->tracefile_size,
+                       ua_chan->tracefile_count,
+                       ua_sess->id,
+                       ua_sess->output_traces,
+                       lttng_credentials_get_uid(&ua_sess->real_credentials),
+                       ua_chan->attr.blocking_timeout,
+                       root_shm_path, shm_path,
+                       trace_chunk,
+                       &ua_sess->effective_credentials);
+
+       health_code_update();
+
+       ret = consumer_socket_send(socket, &msg, sizeof(msg));
+       if (ret < 0) {
+               goto error;
+       }
+
+       ret = consumer_recv_status_channel(socket, &key,
+                       &ua_chan->expected_stream_count);
+       if (ret < 0) {
+               goto error;
+       }
+       /* Communication protocol error. */
+       LTTNG_ASSERT(key == ua_chan->key);
+       /* We need at least one where 1 stream for 1 cpu. */
+       if (ua_sess->output_traces) {
+               LTTNG_ASSERT(ua_chan->expected_stream_count > 0);
+       }
+
+       DBG2("UST ask channel %" PRIu64 " successfully done with %u stream(s)", key,
+                       ua_chan->expected_stream_count);
+
+error:
+       free(pathname);
+       health_code_update();
+       return ret;
+}
+
+/*
+ * Ask consumer to create a channel for a given session.
+ *
+ * Session list and rcu read side locks must be held by the caller.
+ *
+ * Returns 0 on success else a negative value.
+ */
+int ust_consumer_ask_channel(struct ust_app_session *ua_sess,
+               struct ust_app_channel *ua_chan,
+               struct consumer_output *consumer,
+               struct consumer_socket *socket,
+               struct ust_registry_session *registry,
+               struct lttng_trace_chunk * trace_chunk)
+{
+       int ret;
+
+       LTTNG_ASSERT(ua_sess);
+       LTTNG_ASSERT(ua_chan);
+       LTTNG_ASSERT(consumer);
+       LTTNG_ASSERT(socket);
+       LTTNG_ASSERT(registry);
+
+       if (!consumer->enabled) {
+               ret = -LTTNG_ERR_NO_CONSUMER;
+               DBG3("Consumer is disabled");
+               goto error;
+       }
+
+       pthread_mutex_lock(socket->lock);
+       ret = ask_channel_creation(ua_sess, ua_chan, consumer, socket, registry,
+                       trace_chunk);
+       pthread_mutex_unlock(socket->lock);
+       if (ret < 0) {
+               ERR("ask_channel_creation consumer command failed");
+               goto error;
+       }
+
+error:
+       return ret;
+}
+
+/*
+ * Send a get channel command to consumer using the given channel key.  The
+ * channel object is populated and the stream list.
+ *
+ * Return 0 on success else a negative value.
+ */
+int ust_consumer_get_channel(struct consumer_socket *socket,
+               struct ust_app_channel *ua_chan)
+{
+       int ret;
+       struct lttcomm_consumer_msg msg;
+
+       LTTNG_ASSERT(ua_chan);
+       LTTNG_ASSERT(socket);
+
+       memset(&msg, 0, sizeof(msg));
+       msg.cmd_type = LTTNG_CONSUMER_GET_CHANNEL;
+       msg.u.get_channel.key = ua_chan->key;
+
+       pthread_mutex_lock(socket->lock);
+       health_code_update();
+
+       /* Send command and wait for OK reply. */
+       ret = consumer_send_msg(socket, &msg);
+       if (ret < 0) {
+               goto error;
+       }
+
+       /* First, get the channel from consumer. */
+       ret = lttng_ust_ctl_recv_channel_from_consumer(*socket->fd_ptr, &ua_chan->obj);
+       if (ret < 0) {
+               if (ret != -EPIPE) {
+                       ERR("Error recv channel from consumer %d with ret %d",
+                                       *socket->fd_ptr, ret);
+               } else {
+                       DBG3("UST app recv channel from consumer. Consumer is dead.");
+               }
+               goto error;
+       }
+
+       /* Next, get all streams. */
+       while (1) {
+               struct ust_app_stream *stream;
+
+               /* Create UST stream */
+               stream = ust_app_alloc_stream();
+               if (stream == NULL) {
+                       ret = -ENOMEM;
+                       goto error;
+               }
+
+               /* Stream object is populated by this call if successful. */
+               ret = lttng_ust_ctl_recv_stream_from_consumer(*socket->fd_ptr, &stream->obj);
+               if (ret < 0) {
+                       free(stream);
+                       if (ret == -LTTNG_UST_ERR_NOENT) {
+                               DBG3("UST app consumer has no more stream available");
+                               break;
+                       }
+                       if (ret != -EPIPE) {
+                               ERR("Recv stream from consumer %d with ret %d",
+                                               *socket->fd_ptr, ret);
+                       } else {
+                               DBG3("UST app recv stream from consumer. Consumer is dead.");
+                       }
+                       goto error;
+               }
+
+               /* Order is important this is why a list is used. */
+               cds_list_add_tail(&stream->list, &ua_chan->streams.head);
+               ua_chan->streams.count++;
+
+               DBG2("UST app stream %d received successfully", ua_chan->streams.count);
+       }
+
+       /* This MUST match or else we have a synchronization problem. */
+       LTTNG_ASSERT(ua_chan->expected_stream_count == ua_chan->streams.count);
+
+       /* Wait for confirmation that we can proceed with the streams. */
+       ret = consumer_recv_status_reply(socket);
+       if (ret < 0) {
+               goto error;
+       }
+
+error:
+       health_code_update();
+       pthread_mutex_unlock(socket->lock);
+       return ret;
+}
+
+/*
+ * Send a destroy channel command to consumer using the given channel key.
+ *
+ * Note that this command MUST be used prior to a successful
+ * LTTNG_CONSUMER_GET_CHANNEL because once this command is done successfully,
+ * the streams are dispatched to the consumer threads and MUST be teardown
+ * through the hang up process.
+ *
+ * Return 0 on success else a negative value.
+ */
+int ust_consumer_destroy_channel(struct consumer_socket *socket,
+               struct ust_app_channel *ua_chan)
+{
+       int ret;
+       struct lttcomm_consumer_msg msg;
+
+       LTTNG_ASSERT(ua_chan);
+       LTTNG_ASSERT(socket);
+
+       memset(&msg, 0, sizeof(msg));
+       msg.cmd_type = LTTNG_CONSUMER_DESTROY_CHANNEL;
+       msg.u.destroy_channel.key = ua_chan->key;
+
+       pthread_mutex_lock(socket->lock);
+       health_code_update();
+
+       ret = consumer_send_msg(socket, &msg);
+       if (ret < 0) {
+               goto error;
+       }
+
+error:
+       health_code_update();
+       pthread_mutex_unlock(socket->lock);
+       return ret;
+}
+
+/*
+ * Send a given stream to UST tracer.
+ *
+ * On success return 0 else a negative value.
+ */
+int ust_consumer_send_stream_to_ust(struct ust_app *app,
+               struct ust_app_channel *channel, struct ust_app_stream *stream)
+{
+       int ret;
+
+       LTTNG_ASSERT(app);
+       LTTNG_ASSERT(stream);
+       LTTNG_ASSERT(channel);
+
+       DBG2("UST consumer send stream to app %d", app->sock);
+
+       /* Relay stream to application. */
+       pthread_mutex_lock(&app->sock_lock);
+       ret = lttng_ust_ctl_send_stream_to_ust(app->sock, channel->obj, stream->obj);
+       pthread_mutex_unlock(&app->sock_lock);
+       if (ret < 0) {
+               if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
+                       DBG3("UST app send stream to ust failed. Application is dead. (pid: %d, sock: %d).",
+                                       app->pid, app->sock);
+               } else if (ret == -EAGAIN) {
+                       WARN("UST app send stream to ust failed. Communication time out (pid: %d, sock: %d).",
+                                       app->pid, app->sock);
+               } else {
+                       ERR("UST app send stream, handle %d, to ust failed with ret %d (pid: %d, sock: %d).",
+                                       stream->obj->handle, ret, app->pid,
+                                       app->sock);
+               }
+               goto error;
+       }
+       channel->handle = channel->obj->handle;
+
+error:
+       return ret;
+}
+
+/*
+ * Send channel previously received from the consumer to the UST tracer.
+ *
+ * On success return 0 else a negative value.
+ */
+int ust_consumer_send_channel_to_ust(struct ust_app *app,
+               struct ust_app_session *ua_sess, struct ust_app_channel *channel)
+{
+       int ret;
+
+       LTTNG_ASSERT(app);
+       LTTNG_ASSERT(ua_sess);
+       LTTNG_ASSERT(channel);
+       LTTNG_ASSERT(channel->obj);
+
+       DBG2("UST app send channel to sock %d pid %d (name: %s, key: %" PRIu64 ")",
+                       app->sock, app->pid, channel->name, channel->tracing_channel_id);
+
+       /* Send stream to application. */
+       pthread_mutex_lock(&app->sock_lock);
+       ret = lttng_ust_ctl_send_channel_to_ust(app->sock, ua_sess->handle, channel->obj);
+       pthread_mutex_unlock(&app->sock_lock);
+       if (ret < 0) {
+               if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
+                       DBG3("UST app send channel to ust failed. Application is dead (pid: %d, sock: %d).",
+                                       app->pid, app->sock);
+               } else if (ret == -EAGAIN) {
+                       WARN("UST app send channel to ust failed. Communication timeout (pid: %d, sock: %d).",
+                                       app->pid, app->sock);
+               } else {
+                       ERR("UST app send channel %s, to ust failed with ret %d (pid: %d, sock: %d).",
+                                       channel->name, ret, app->pid,
+                                       app->sock);
+               }
+               goto error;
+       }
+
+error:
+       return ret;
+}
+
+/*
+ * Handle the metadata requests from the UST consumer
+ *
+ * Return 0 on success else a negative value.
+ */
+int ust_consumer_metadata_request(struct consumer_socket *socket)
+{
+       int ret;
+       ssize_t ret_push;
+       struct lttcomm_metadata_request_msg request;
+       struct buffer_reg_uid *reg_uid;
+       struct ust_registry_session *ust_reg;
+       struct lttcomm_consumer_msg msg;
+
+       LTTNG_ASSERT(socket);
+
+       rcu_read_lock();
+       health_code_update();
+
+       /* Wait for a metadata request */
+       pthread_mutex_lock(socket->lock);
+       ret = consumer_socket_recv(socket, &request, sizeof(request));
+       pthread_mutex_unlock(socket->lock);
+       if (ret < 0) {
+               goto end;
+       }
+
+       DBG("Metadata request received for session %" PRIu64 ", key %" PRIu64,
+                       request.session_id, request.key);
+
+       reg_uid = buffer_reg_uid_find(request.session_id,
+                       request.bits_per_long, request.uid);
+       if (reg_uid) {
+               ust_reg = reg_uid->registry->reg.ust;
+       } else {
+               struct buffer_reg_pid *reg_pid =
+                       buffer_reg_pid_find(request.session_id_per_pid);
+               if (!reg_pid) {
+                       DBG("PID registry not found for session id %" PRIu64,
+                                       request.session_id_per_pid);
+
+                       memset(&msg, 0, sizeof(msg));
+                       msg.cmd_type = LTTNG_ERR_UND;
+                       pthread_mutex_lock(socket->lock);
+                       (void) consumer_send_msg(socket, &msg);
+                       pthread_mutex_unlock(socket->lock);
+                       /*
+                        * This is possible since the session might have been destroyed
+                        * during a consumer metadata request. So here, return gracefully
+                        * because the destroy session will push the remaining metadata to
+                        * the consumer.
+                        */
+                       ret = 0;
+                       goto end;
+               }
+               ust_reg = reg_pid->registry->reg.ust;
+       }
+       LTTNG_ASSERT(ust_reg);
+
+       pthread_mutex_lock(&ust_reg->lock);
+       ret_push = ust_app_push_metadata(ust_reg, socket, 1);
+       pthread_mutex_unlock(&ust_reg->lock);
+       if (ret_push == -EPIPE) {
+               DBG("Application or relay closed while pushing metadata");
+       } else if (ret_push < 0) {
+               ERR("Pushing metadata");
+               ret = -1;
+               goto end;
+       } else {
+               DBG("UST Consumer metadata pushed successfully");
+       }
+       ret = 0;
+
+end:
+       rcu_read_unlock();
+       return ret;
+}
index 610aa7f363ba208efdfe0c7321d105a9722c0097..d85af28684d6dc034cc93d11cf362a0a0aa9cbcc 100644 (file)
 
 #include "lttng-ust-abi.h"
 
+#ifdef __cplusplus
+extern "C" {
+#endif
+
 #ifndef LTTNG_UST_UUID_LEN
 #define LTTNG_UST_UUID_LEN     16
 #endif
@@ -655,4 +659,8 @@ int lttng_ust_ctl_counter_clear(struct lttng_ust_ctl_daemon_counter *counter,
 
 void lttng_ust_ctl_sigbus_handle(void *addr);
 
+#ifdef __cplusplus
+}
+#endif
+
 #endif /* LTTNG_UST_CTL_INTERNAL_H */
diff --git a/src/bin/lttng-sessiond/ust-field-utils.c b/src/bin/lttng-sessiond/ust-field-utils.c
deleted file mode 100644 (file)
index a89fade..0000000
+++ /dev/null
@@ -1,340 +0,0 @@
-/*
- * Copyright (C) 2018 Francis Deslauriers <francis.deslauriers@efficios.com>
- *
- * SPDX-License-Identifier: GPL-2.0-only
- *
- */
-
-#include <stdbool.h>
-#include <string.h>
-
-#include "ust-field-utils.h"
-
-/*
- * The lttng_ust_ctl_field is made of a combination of C basic types
- * lttng_ust_ctl_basic_type and _lttng_ust_ctl_basic_type.
- *
- * lttng_ust_ctl_basic_type contains an enumeration describing the abstract type.
- * _lttng_ust_ctl_basic_type does _NOT_ contain an enumeration describing the
- * abstract type.
- *
- * A layer is needed to use the same code for both structures.
- * When dealing with _lttng_ust_ctl_basic_type, we need to use the abstract type of
- * the lttng_ust_ctl_type struct.
- */
-
-/*
- * Compare two lttng_ust_ctl_integer_type fields.
- * Returns 1 if both are identical.
- */
-static bool match_lttng_ust_ctl_field_integer(const struct lttng_ust_ctl_integer_type *first,
-                       const struct lttng_ust_ctl_integer_type *second)
-{
-       if (first->size != second->size) {
-               goto no_match;
-       }
-       if (first->alignment != second->alignment) {
-               goto no_match;
-       }
-       if (first->signedness != second->signedness) {
-               goto no_match;
-       }
-       if (first->encoding != second->encoding) {
-               goto no_match;
-       }
-       if (first->base != second->base) {
-               goto no_match;
-       }
-       if (first->reverse_byte_order != second->reverse_byte_order) {
-               goto no_match;
-       }
-
-       return true;
-
-no_match:
-       return false;
-}
-
-/*
- * Compare two _lttng_ust_ctl_basic_type fields known to be of type integer.
- * Returns 1 if both are identical.
- */
-static bool match_lttng_ust_ctl_field_integer_from_raw_basic_type(
-                       const union _lttng_ust_ctl_basic_type *first,
-                       const union _lttng_ust_ctl_basic_type *second)
-{
-       return match_lttng_ust_ctl_field_integer(&first->integer, &second->integer);
-}
-
-/*
- * Compare two _lttng_ust_ctl_basic_type fields known to be of type enum.
- * Returns 1 if both are identical.
- */
-static bool match_lttng_ust_ctl_field_enum_from_raw_basic_type(
-               const union _lttng_ust_ctl_basic_type *first,
-               const union _lttng_ust_ctl_basic_type *second)
-{
-       /*
-        * Compare enumeration ID. Enumeration ID is provided to the application by
-        * the session daemon before event registration.
-        */
-       if (first->enumeration.id != second->enumeration.id) {
-               goto no_match;
-       }
-
-       /*
-        * Sanity check of the name and container type. Those were already checked
-        * during enum registration.
-        */
-       if (strncmp(first->enumeration.name, second->enumeration.name,
-                               LTTNG_UST_ABI_SYM_NAME_LEN)) {
-               goto no_match;
-       }
-       if (!match_lttng_ust_ctl_field_integer(&first->enumeration.container_type,
-                               &second->enumeration.container_type)) {
-               goto no_match;
-       }
-
-       return true;
-
-no_match:
-       return false;
-}
-
-/*
- * Compare two _lttng_ust_ctl_basic_type fields known to be of type string.
- * Returns 1 if both are identical.
- */
-static bool match_lttng_ust_ctl_field_string_from_raw_basic_type(
-               const union _lttng_ust_ctl_basic_type *first,
-               const union _lttng_ust_ctl_basic_type *second)
-{
-       return first->string.encoding == second->string.encoding;
-}
-
-/*
- * Compare two _lttng_ust_ctl_basic_type fields known to be of type float.
- * Returns 1 if both are identical.
- */
-static bool match_lttng_ust_ctl_field_float_from_raw_basic_type(
-               const union _lttng_ust_ctl_basic_type *first,
-               const union _lttng_ust_ctl_basic_type *second)
-{
-       if (first->_float.exp_dig != second->_float.exp_dig) {
-               goto no_match;
-       }
-
-       if (first->_float.mant_dig != second->_float.mant_dig) {
-               goto no_match;
-       }
-
-       if (first->_float.reverse_byte_order !=
-                       second->_float.reverse_byte_order) {
-               goto no_match;
-       }
-
-       if (first->_float.alignment != second->_float.alignment) {
-               goto no_match;
-       }
-
-       return true;
-
-no_match:
-       return false;
-}
-
-/*
- * Compare two _lttng_ust_ctl_basic_type fields given their respective abstract types.
- * Returns 1 if both are identical.
- */
-static bool match_lttng_ust_ctl_field_raw_basic_type(
-               enum lttng_ust_ctl_abstract_types first_atype,
-               const union _lttng_ust_ctl_basic_type *first,
-               enum lttng_ust_ctl_abstract_types second_atype,
-               const union _lttng_ust_ctl_basic_type *second)
-{
-       if (first_atype != second_atype) {
-               goto no_match;
-       }
-
-       switch (first_atype) {
-       case lttng_ust_ctl_atype_integer:
-               if (!match_lttng_ust_ctl_field_integer_from_raw_basic_type(first, second)) {
-                       goto no_match;
-               }
-               break;
-       case lttng_ust_ctl_atype_enum:
-               if (!match_lttng_ust_ctl_field_enum_from_raw_basic_type(first, second)) {
-                       goto no_match;
-               }
-               break;
-       case lttng_ust_ctl_atype_string:
-               if (!match_lttng_ust_ctl_field_string_from_raw_basic_type(first, second)) {
-                       goto no_match;
-               }
-               break;
-       case lttng_ust_ctl_atype_float:
-               if (!match_lttng_ust_ctl_field_float_from_raw_basic_type(first, second)) {
-                       goto no_match;
-               }
-               break;
-       default:
-               goto no_match;
-       }
-
-       return true;
-
-no_match:
-       return false;
-}
-
-/*
- * Compatibility layer between the lttng_ust_ctl_basic_type struct and
- * _lttng_ust_ctl_basic_type union.
- */
-static bool match_lttng_ust_ctl_field_basic_type(const struct lttng_ust_ctl_basic_type *first,
-               const struct lttng_ust_ctl_basic_type *second)
-{
-       return match_lttng_ust_ctl_field_raw_basic_type(first->atype, &first->u.basic,
-                               second->atype, &second->u.basic);
-}
-
-int match_lttng_ust_ctl_field(const struct lttng_ust_ctl_field *first,
-               const struct lttng_ust_ctl_field *second)
-{
-       /* Check the name of the field is identical. */
-       if (strncmp(first->name, second->name, LTTNG_UST_ABI_SYM_NAME_LEN)) {
-               goto no_match;
-       }
-
-       /* Check the field type is identical. */
-       if (first->type.atype != second->type.atype) {
-               goto no_match;
-       }
-
-       /* Check the field layout. */
-       switch (first->type.atype) {
-       case lttng_ust_ctl_atype_integer:
-       case lttng_ust_ctl_atype_enum:
-       case lttng_ust_ctl_atype_string:
-       case lttng_ust_ctl_atype_float:
-               if (!match_lttng_ust_ctl_field_raw_basic_type(first->type.atype,
-                                       &first->type.u.legacy.basic, second->type.atype,
-                                       &second->type.u.legacy.basic)) {
-                       goto no_match;
-               }
-               break;
-       case lttng_ust_ctl_atype_sequence:
-               /* Match element type of the sequence. */
-               if (!match_lttng_ust_ctl_field_basic_type(&first->type.u.legacy.sequence.elem_type,
-                                       &second->type.u.legacy.sequence.elem_type)) {
-                       goto no_match;
-               }
-
-               /* Match length type of the sequence. */
-               if (!match_lttng_ust_ctl_field_basic_type(&first->type.u.legacy.sequence.length_type,
-                                       &second->type.u.legacy.sequence.length_type)) {
-                       goto no_match;
-               }
-               break;
-       case lttng_ust_ctl_atype_array:
-               /* Match element type of the array. */
-               if (!match_lttng_ust_ctl_field_basic_type(&first->type.u.legacy.array.elem_type,
-                                       &second->type.u.legacy.array.elem_type)) {
-                       goto no_match;
-               }
-
-               /* Match length of the array. */
-               if (first->type.u.legacy.array.length != second->type.u.legacy.array.length) {
-                       goto no_match;
-               }
-               break;
-       case lttng_ust_ctl_atype_variant:
-               /* Compare number of choice of the variants. */
-               if (first->type.u.legacy.variant.nr_choices !=
-                                       second->type.u.legacy.variant.nr_choices) {
-                       goto no_match;
-               }
-
-               /* Compare tag name of the variants. */
-               if (strncmp(first->type.u.legacy.variant.tag_name,
-                                       second->type.u.legacy.variant.tag_name,
-                                       LTTNG_UST_ABI_SYM_NAME_LEN)) {
-                       goto no_match;
-               }
-               break;
-       case lttng_ust_ctl_atype_struct:
-               /* Compare number of fields of the structs. */
-               if (first->type.u.legacy._struct.nr_fields != second->type.u.legacy._struct.nr_fields) {
-                       goto no_match;
-               }
-               break;
-       case lttng_ust_ctl_atype_sequence_nestable:
-               if (first->type.u.sequence_nestable.alignment != second->type.u.sequence_nestable.alignment) {
-                       goto no_match;
-               }
-               /* Compare length_name of the sequences. */
-               if (strncmp(first->type.u.sequence_nestable.length_name,
-                                       second->type.u.sequence_nestable.length_name,
-                                       LTTNG_UST_ABI_SYM_NAME_LEN)) {
-                       goto no_match;
-               }
-               /* Comparison will be done when marshalling following items. */
-               break;
-       case lttng_ust_ctl_atype_array_nestable:
-               if (first->type.u.array_nestable.alignment != second->type.u.array_nestable.alignment) {
-                       goto no_match;
-               }
-               /* Match length of the array. */
-               if (first->type.u.array_nestable.length != second->type.u.array_nestable.length) {
-                       goto no_match;
-               }
-               /* Comparison of element type will be done when marshalling following item. */
-               break;
-       case lttng_ust_ctl_atype_enum_nestable:
-               if (first->type.u.enum_nestable.id != second->type.u.enum_nestable.id) {
-                       goto no_match;
-               }
-               /* Compare name of the enums. */
-               if (strncmp(first->type.u.enum_nestable.name,
-                                       second->type.u.enum_nestable.name,
-                                       LTTNG_UST_ABI_SYM_NAME_LEN)) {
-                       goto no_match;
-               }
-               /* Comparison of element type will be done when marshalling following item. */
-               break;
-       case lttng_ust_ctl_atype_struct_nestable:
-               if (first->type.u.struct_nestable.alignment != second->type.u.struct_nestable.alignment) {
-                       goto no_match;
-               }
-               /* Compare number of fields of the structs. */
-               if (first->type.u.struct_nestable.nr_fields != second->type.u.struct_nestable.nr_fields) {
-                       goto no_match;
-               }
-               break;
-       case lttng_ust_ctl_atype_variant_nestable:
-               if (first->type.u.variant_nestable.alignment != second->type.u.variant_nestable.alignment) {
-                       goto no_match;
-               }
-               /* Compare number of choice of the variants. */
-               if (first->type.u.variant_nestable.nr_choices !=
-                                       second->type.u.variant_nestable.nr_choices) {
-                       goto no_match;
-               }
-
-               /* Compare tag name of the variants. */
-               if (strncmp(first->type.u.variant_nestable.tag_name,
-                                       second->type.u.variant_nestable.tag_name,
-                                       LTTNG_UST_ABI_SYM_NAME_LEN)) {
-                       goto no_match;
-               }
-               break;
-       default:
-               goto no_match;
-       }
-
-       return true;
-
-no_match:
-       return false;
-}
diff --git a/src/bin/lttng-sessiond/ust-field-utils.cpp b/src/bin/lttng-sessiond/ust-field-utils.cpp
new file mode 100644 (file)
index 0000000..a89fade
--- /dev/null
@@ -0,0 +1,340 @@
+/*
+ * Copyright (C) 2018 Francis Deslauriers <francis.deslauriers@efficios.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0-only
+ *
+ */
+
+#include <stdbool.h>
+#include <string.h>
+
+#include "ust-field-utils.h"
+
+/*
+ * The lttng_ust_ctl_field is made of a combination of C basic types
+ * lttng_ust_ctl_basic_type and _lttng_ust_ctl_basic_type.
+ *
+ * lttng_ust_ctl_basic_type contains an enumeration describing the abstract type.
+ * _lttng_ust_ctl_basic_type does _NOT_ contain an enumeration describing the
+ * abstract type.
+ *
+ * A layer is needed to use the same code for both structures.
+ * When dealing with _lttng_ust_ctl_basic_type, we need to use the abstract type of
+ * the lttng_ust_ctl_type struct.
+ */
+
+/*
+ * Compare two lttng_ust_ctl_integer_type fields.
+ * Returns 1 if both are identical.
+ */
+static bool match_lttng_ust_ctl_field_integer(const struct lttng_ust_ctl_integer_type *first,
+                       const struct lttng_ust_ctl_integer_type *second)
+{
+       if (first->size != second->size) {
+               goto no_match;
+       }
+       if (first->alignment != second->alignment) {
+               goto no_match;
+       }
+       if (first->signedness != second->signedness) {
+               goto no_match;
+       }
+       if (first->encoding != second->encoding) {
+               goto no_match;
+       }
+       if (first->base != second->base) {
+               goto no_match;
+       }
+       if (first->reverse_byte_order != second->reverse_byte_order) {
+               goto no_match;
+       }
+
+       return true;
+
+no_match:
+       return false;
+}
+
+/*
+ * Compare two _lttng_ust_ctl_basic_type fields known to be of type integer.
+ * Returns 1 if both are identical.
+ */
+static bool match_lttng_ust_ctl_field_integer_from_raw_basic_type(
+                       const union _lttng_ust_ctl_basic_type *first,
+                       const union _lttng_ust_ctl_basic_type *second)
+{
+       return match_lttng_ust_ctl_field_integer(&first->integer, &second->integer);
+}
+
+/*
+ * Compare two _lttng_ust_ctl_basic_type fields known to be of type enum.
+ * Returns 1 if both are identical.
+ */
+static bool match_lttng_ust_ctl_field_enum_from_raw_basic_type(
+               const union _lttng_ust_ctl_basic_type *first,
+               const union _lttng_ust_ctl_basic_type *second)
+{
+       /*
+        * Compare enumeration ID. Enumeration ID is provided to the application by
+        * the session daemon before event registration.
+        */
+       if (first->enumeration.id != second->enumeration.id) {
+               goto no_match;
+       }
+
+       /*
+        * Sanity check of the name and container type. Those were already checked
+        * during enum registration.
+        */
+       if (strncmp(first->enumeration.name, second->enumeration.name,
+                               LTTNG_UST_ABI_SYM_NAME_LEN)) {
+               goto no_match;
+       }
+       if (!match_lttng_ust_ctl_field_integer(&first->enumeration.container_type,
+                               &second->enumeration.container_type)) {
+               goto no_match;
+       }
+
+       return true;
+
+no_match:
+       return false;
+}
+
+/*
+ * Compare two _lttng_ust_ctl_basic_type fields known to be of type string.
+ * Returns 1 if both are identical.
+ */
+static bool match_lttng_ust_ctl_field_string_from_raw_basic_type(
+               const union _lttng_ust_ctl_basic_type *first,
+               const union _lttng_ust_ctl_basic_type *second)
+{
+       return first->string.encoding == second->string.encoding;
+}
+
+/*
+ * Compare two _lttng_ust_ctl_basic_type fields known to be of type float.
+ * Returns 1 if both are identical.
+ */
+static bool match_lttng_ust_ctl_field_float_from_raw_basic_type(
+               const union _lttng_ust_ctl_basic_type *first,
+               const union _lttng_ust_ctl_basic_type *second)
+{
+       if (first->_float.exp_dig != second->_float.exp_dig) {
+               goto no_match;
+       }
+
+       if (first->_float.mant_dig != second->_float.mant_dig) {
+               goto no_match;
+       }
+
+       if (first->_float.reverse_byte_order !=
+                       second->_float.reverse_byte_order) {
+               goto no_match;
+       }
+
+       if (first->_float.alignment != second->_float.alignment) {
+               goto no_match;
+       }
+
+       return true;
+
+no_match:
+       return false;
+}
+
+/*
+ * Compare two _lttng_ust_ctl_basic_type fields given their respective abstract types.
+ * Returns 1 if both are identical.
+ */
+static bool match_lttng_ust_ctl_field_raw_basic_type(
+               enum lttng_ust_ctl_abstract_types first_atype,
+               const union _lttng_ust_ctl_basic_type *first,
+               enum lttng_ust_ctl_abstract_types second_atype,
+               const union _lttng_ust_ctl_basic_type *second)
+{
+       if (first_atype != second_atype) {
+               goto no_match;
+       }
+
+       switch (first_atype) {
+       case lttng_ust_ctl_atype_integer:
+               if (!match_lttng_ust_ctl_field_integer_from_raw_basic_type(first, second)) {
+                       goto no_match;
+               }
+               break;
+       case lttng_ust_ctl_atype_enum:
+               if (!match_lttng_ust_ctl_field_enum_from_raw_basic_type(first, second)) {
+                       goto no_match;
+               }
+               break;
+       case lttng_ust_ctl_atype_string:
+               if (!match_lttng_ust_ctl_field_string_from_raw_basic_type(first, second)) {
+                       goto no_match;
+               }
+               break;
+       case lttng_ust_ctl_atype_float:
+               if (!match_lttng_ust_ctl_field_float_from_raw_basic_type(first, second)) {
+                       goto no_match;
+               }
+               break;
+       default:
+               goto no_match;
+       }
+
+       return true;
+
+no_match:
+       return false;
+}
+
+/*
+ * Compatibility layer between the lttng_ust_ctl_basic_type struct and
+ * _lttng_ust_ctl_basic_type union.
+ */
+static bool match_lttng_ust_ctl_field_basic_type(const struct lttng_ust_ctl_basic_type *first,
+               const struct lttng_ust_ctl_basic_type *second)
+{
+       return match_lttng_ust_ctl_field_raw_basic_type(first->atype, &first->u.basic,
+                               second->atype, &second->u.basic);
+}
+
+int match_lttng_ust_ctl_field(const struct lttng_ust_ctl_field *first,
+               const struct lttng_ust_ctl_field *second)
+{
+       /* Check the name of the field is identical. */
+       if (strncmp(first->name, second->name, LTTNG_UST_ABI_SYM_NAME_LEN)) {
+               goto no_match;
+       }
+
+       /* Check the field type is identical. */
+       if (first->type.atype != second->type.atype) {
+               goto no_match;
+       }
+
+       /* Check the field layout. */
+       switch (first->type.atype) {
+       case lttng_ust_ctl_atype_integer:
+       case lttng_ust_ctl_atype_enum:
+       case lttng_ust_ctl_atype_string:
+       case lttng_ust_ctl_atype_float:
+               if (!match_lttng_ust_ctl_field_raw_basic_type(first->type.atype,
+                                       &first->type.u.legacy.basic, second->type.atype,
+                                       &second->type.u.legacy.basic)) {
+                       goto no_match;
+               }
+               break;
+       case lttng_ust_ctl_atype_sequence:
+               /* Match element type of the sequence. */
+               if (!match_lttng_ust_ctl_field_basic_type(&first->type.u.legacy.sequence.elem_type,
+                                       &second->type.u.legacy.sequence.elem_type)) {
+                       goto no_match;
+               }
+
+               /* Match length type of the sequence. */
+               if (!match_lttng_ust_ctl_field_basic_type(&first->type.u.legacy.sequence.length_type,
+                                       &second->type.u.legacy.sequence.length_type)) {
+                       goto no_match;
+               }
+               break;
+       case lttng_ust_ctl_atype_array:
+               /* Match element type of the array. */
+               if (!match_lttng_ust_ctl_field_basic_type(&first->type.u.legacy.array.elem_type,
+                                       &second->type.u.legacy.array.elem_type)) {
+                       goto no_match;
+               }
+
+               /* Match length of the array. */
+               if (first->type.u.legacy.array.length != second->type.u.legacy.array.length) {
+                       goto no_match;
+               }
+               break;
+       case lttng_ust_ctl_atype_variant:
+               /* Compare number of choice of the variants. */
+               if (first->type.u.legacy.variant.nr_choices !=
+                                       second->type.u.legacy.variant.nr_choices) {
+                       goto no_match;
+               }
+
+               /* Compare tag name of the variants. */
+               if (strncmp(first->type.u.legacy.variant.tag_name,
+                                       second->type.u.legacy.variant.tag_name,
+                                       LTTNG_UST_ABI_SYM_NAME_LEN)) {
+                       goto no_match;
+               }
+               break;
+       case lttng_ust_ctl_atype_struct:
+               /* Compare number of fields of the structs. */
+               if (first->type.u.legacy._struct.nr_fields != second->type.u.legacy._struct.nr_fields) {
+                       goto no_match;
+               }
+               break;
+       case lttng_ust_ctl_atype_sequence_nestable:
+               if (first->type.u.sequence_nestable.alignment != second->type.u.sequence_nestable.alignment) {
+                       goto no_match;
+               }
+               /* Compare length_name of the sequences. */
+               if (strncmp(first->type.u.sequence_nestable.length_name,
+                                       second->type.u.sequence_nestable.length_name,
+                                       LTTNG_UST_ABI_SYM_NAME_LEN)) {
+                       goto no_match;
+               }
+               /* Comparison will be done when marshalling following items. */
+               break;
+       case lttng_ust_ctl_atype_array_nestable:
+               if (first->type.u.array_nestable.alignment != second->type.u.array_nestable.alignment) {
+                       goto no_match;
+               }
+               /* Match length of the array. */
+               if (first->type.u.array_nestable.length != second->type.u.array_nestable.length) {
+                       goto no_match;
+               }
+               /* Comparison of element type will be done when marshalling following item. */
+               break;
+       case lttng_ust_ctl_atype_enum_nestable:
+               if (first->type.u.enum_nestable.id != second->type.u.enum_nestable.id) {
+                       goto no_match;
+               }
+               /* Compare name of the enums. */
+               if (strncmp(first->type.u.enum_nestable.name,
+                                       second->type.u.enum_nestable.name,
+                                       LTTNG_UST_ABI_SYM_NAME_LEN)) {
+                       goto no_match;
+               }
+               /* Comparison of element type will be done when marshalling following item. */
+               break;
+       case lttng_ust_ctl_atype_struct_nestable:
+               if (first->type.u.struct_nestable.alignment != second->type.u.struct_nestable.alignment) {
+                       goto no_match;
+               }
+               /* Compare number of fields of the structs. */
+               if (first->type.u.struct_nestable.nr_fields != second->type.u.struct_nestable.nr_fields) {
+                       goto no_match;
+               }
+               break;
+       case lttng_ust_ctl_atype_variant_nestable:
+               if (first->type.u.variant_nestable.alignment != second->type.u.variant_nestable.alignment) {
+                       goto no_match;
+               }
+               /* Compare number of choice of the variants. */
+               if (first->type.u.variant_nestable.nr_choices !=
+                                       second->type.u.variant_nestable.nr_choices) {
+                       goto no_match;
+               }
+
+               /* Compare tag name of the variants. */
+               if (strncmp(first->type.u.variant_nestable.tag_name,
+                                       second->type.u.variant_nestable.tag_name,
+                                       LTTNG_UST_ABI_SYM_NAME_LEN)) {
+                       goto no_match;
+               }
+               break;
+       default:
+               goto no_match;
+       }
+
+       return true;
+
+no_match:
+       return false;
+}
diff --git a/src/bin/lttng-sessiond/ust-metadata.c b/src/bin/lttng-sessiond/ust-metadata.c
deleted file mode 100644 (file)
index 7110662..0000000
+++ /dev/null
@@ -1,1344 +0,0 @@
-/*
- * Copyright (C) 2010-2013 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
- * SPDX-License-Identifier: GPL-2.0-only
- *
- */
-
-#define _LGPL_SOURCE
-#include <stdint.h>
-#include <string.h>
-#include <stdarg.h>
-#include <stdio.h>
-#include <limits.h>
-#include <unistd.h>
-#include <inttypes.h>
-#include <common/common.h>
-#include <common/time.h>
-
-#include "ust-registry.h"
-#include "ust-clock.h"
-#include "ust-app.h"
-
-#ifndef max_t
-#define max_t(type, a, b)      ((type) ((a) > (b) ? (a) : (b)))
-#endif
-
-#define NR_CLOCK_OFFSET_SAMPLES                10
-
-struct offset_sample {
-       int64_t offset;                 /* correlation offset */
-       uint64_t measure_delta;         /* lower is better */
-};
-
-static
-int _lttng_field_statedump(struct ust_registry_session *session,
-               const struct lttng_ust_ctl_field *fields, size_t nr_fields,
-               size_t *iter_field, size_t nesting);
-
-static inline
-int get_count_order(unsigned int count)
-{
-       int order;
-
-       order = lttng_fls(count) - 1;
-       if (count & (count - 1)) {
-               order++;
-       }
-       LTTNG_ASSERT(order >= 0);
-       return order;
-}
-
-/*
- * Returns offset where to write in metadata array, or negative error value on error.
- */
-static
-ssize_t metadata_reserve(struct ust_registry_session *session, size_t len)
-{
-       size_t new_len = session->metadata_len + len;
-       size_t new_alloc_len = new_len;
-       size_t old_alloc_len = session->metadata_alloc_len;
-       ssize_t ret;
-
-       if (new_alloc_len > (UINT32_MAX >> 1))
-               return -EINVAL;
-       if ((old_alloc_len << 1) > (UINT32_MAX >> 1))
-               return -EINVAL;
-
-       if (new_alloc_len > old_alloc_len) {
-               char *newptr;
-
-               new_alloc_len =
-                       max_t(size_t, 1U << get_count_order(new_alloc_len), old_alloc_len << 1);
-               newptr = realloc(session->metadata, new_alloc_len);
-               if (!newptr)
-                       return -ENOMEM;
-               session->metadata = newptr;
-               /* We zero directly the memory from start of allocation. */
-               memset(&session->metadata[old_alloc_len], 0, new_alloc_len - old_alloc_len);
-               session->metadata_alloc_len = new_alloc_len;
-       }
-       ret = session->metadata_len;
-       session->metadata_len += len;
-       return ret;
-}
-
-static
-int metadata_file_append(struct ust_registry_session *session,
-               const char *str, size_t len)
-{
-       ssize_t written;
-
-       if (session->metadata_fd < 0) {
-               return 0;
-       }
-       /* Write to metadata file */
-       written = lttng_write(session->metadata_fd, str, len);
-       if (written != len) {
-               return -1;
-       }
-       return 0;
-}
-
-/*
- * We have exclusive access to our metadata buffer (protected by the
- * ust_lock), so we can do racy operations such as looking for
- * remaining space left in packet and write, since mutual exclusion
- * protects us from concurrent writes.
- */
-static
-int lttng_metadata_printf(struct ust_registry_session *session,
-               const char *fmt, ...)
-{
-       char *str = NULL;
-       size_t len;
-       va_list ap;
-       ssize_t offset;
-       int ret;
-
-       va_start(ap, fmt);
-       ret = vasprintf(&str, fmt, ap);
-       va_end(ap);
-       if (ret < 0)
-               return -ENOMEM;
-
-       len = strlen(str);
-       offset = metadata_reserve(session, len);
-       if (offset < 0) {
-               ret = offset;
-               goto end;
-       }
-       memcpy(&session->metadata[offset], str, len);
-       ret = metadata_file_append(session, str, len);
-       if (ret) {
-               PERROR("Error appending to metadata file");
-               goto end;
-       }
-       DBG3("Append to metadata: \"%s\"", str);
-       ret = 0;
-
-end:
-       free(str);
-       return ret;
-}
-
-static
-int print_tabs(struct ust_registry_session *session, size_t nesting)
-{
-       size_t i;
-
-       for (i = 0; i < nesting; i++) {
-               int ret;
-
-               ret = lttng_metadata_printf(session, "  ");
-               if (ret) {
-                       return ret;
-               }
-       }
-       return 0;
-}
-
-static
-void sanitize_ctf_identifier(char *out, const char *in)
-{
-       size_t i;
-
-       for (i = 0; i < LTTNG_UST_ABI_SYM_NAME_LEN; i++) {
-               switch (in[i]) {
-               case '.':
-               case '$':
-               case ':':
-                       out[i] = '_';
-                       break;
-               default:
-                       out[i] = in[i];
-               }
-       }
-}
-
-static
-int print_escaped_ctf_string(struct ust_registry_session *session, const char *string)
-{
-       int ret = 0;
-       size_t i;
-       char cur;
-
-       i = 0;
-       cur = string[i];
-       while (cur != '\0') {
-               switch (cur) {
-               case '\n':
-                       ret = lttng_metadata_printf(session, "%s", "\\n");
-                       break;
-               case '\\':
-               case '"':
-                       ret = lttng_metadata_printf(session, "%c", '\\');
-                       if (ret) {
-                               goto error;
-                       }
-                       /* We still print the current char */
-                       /* Fallthrough */
-               default:
-                       ret = lttng_metadata_printf(session, "%c", cur);
-                       break;
-               }
-
-               if (ret) {
-                       goto error;
-               }
-
-               cur = string[++i];
-       }
-error:
-       return ret;
-}
-
-/* Called with session registry mutex held. */
-static
-int ust_metadata_enum_statedump(struct ust_registry_session *session,
-               const char *enum_name,
-               uint64_t enum_id,
-               const struct lttng_ust_ctl_integer_type *container_type,
-               const char *field_name, size_t *iter_field, size_t nesting)
-{
-       struct ust_registry_enum *reg_enum;
-       const struct lttng_ust_ctl_enum_entry *entries;
-       size_t nr_entries;
-       int ret = 0;
-       size_t i;
-       char identifier[LTTNG_UST_ABI_SYM_NAME_LEN];
-
-       rcu_read_lock();
-       reg_enum = ust_registry_lookup_enum_by_id(session, enum_name, enum_id);
-       rcu_read_unlock();
-       /* reg_enum can still be used because session registry mutex is held. */
-       if (!reg_enum) {
-               ret = -ENOENT;
-               goto end;
-       }
-       entries = reg_enum->entries;
-       nr_entries = reg_enum->nr_entries;
-
-       ret = print_tabs(session, nesting);
-       if (ret) {
-               goto end;
-       }
-       ret = lttng_metadata_printf(session,
-               "enum : integer { size = %u; align = %u; signed = %u; encoding = %s; base = %u; } {\n",
-               container_type->size,
-               container_type->alignment,
-               container_type->signedness,
-               (container_type->encoding == lttng_ust_ctl_encode_none)
-                       ? "none"
-                       : (container_type->encoding == lttng_ust_ctl_encode_UTF8)
-                               ? "UTF8"
-                               : "ASCII",
-               container_type->base);
-       if (ret) {
-               goto end;
-       }
-       nesting++;
-       /* Dump all entries */
-       for (i = 0; i < nr_entries; i++) {
-               const struct lttng_ust_ctl_enum_entry *entry = &entries[i];
-               int j, len;
-
-               ret = print_tabs(session, nesting);
-               if (ret) {
-                       goto end;
-               }
-               ret = lttng_metadata_printf(session,
-                               "\"");
-               if (ret) {
-                       goto end;
-               }
-               len = strlen(entry->string);
-               /* Escape the character '"' */
-               for (j = 0; j < len; j++) {
-                       char c = entry->string[j];
-
-                       switch (c) {
-                       case '"':
-                               ret = lttng_metadata_printf(session,
-                                               "\\\"");
-                               break;
-                       case '\\':
-                               ret = lttng_metadata_printf(session,
-                                               "\\\\");
-                               break;
-                       default:
-                               ret = lttng_metadata_printf(session,
-                                               "%c", c);
-                               break;
-                       }
-                       if (ret) {
-                               goto end;
-                       }
-               }
-               ret = lttng_metadata_printf(session, "\"");
-               if (ret) {
-                       goto end;
-               }
-
-               if (entry->u.extra.options &
-                               LTTNG_UST_CTL_UST_ENUM_ENTRY_OPTION_IS_AUTO) {
-                       ret = lttng_metadata_printf(session, ",\n");
-                       if (ret) {
-                               goto end;
-                       }
-               } else {
-                       ret = lttng_metadata_printf(session,
-                                       " = ");
-                       if (ret) {
-                               goto end;
-                       }
-
-                       if (entry->start.signedness) {
-                               ret = lttng_metadata_printf(session,
-                                       "%lld", (long long) entry->start.value);
-                       } else {
-                               ret = lttng_metadata_printf(session,
-                                       "%llu", entry->start.value);
-                       }
-                       if (ret) {
-                               goto end;
-                       }
-
-                       if (entry->start.signedness == entry->end.signedness &&
-                                       entry->start.value ==
-                                               entry->end.value) {
-                               ret = lttng_metadata_printf(session, ",\n");
-                       } else {
-                               if (entry->end.signedness) {
-                                       ret = lttng_metadata_printf(session,
-                                               " ... %lld,\n",
-                                               (long long) entry->end.value);
-                               } else {
-                                       ret = lttng_metadata_printf(session,
-                                               " ... %llu,\n",
-                                               entry->end.value);
-                               }
-                       }
-                       if (ret) {
-                               goto end;
-                       }
-               }
-       }
-       nesting--;
-       sanitize_ctf_identifier(identifier, field_name);
-       ret = print_tabs(session, nesting);
-       if (ret) {
-               goto end;
-       }
-       ret = lttng_metadata_printf(session, "} _%s;\n",
-                       identifier);
-end:
-       (*iter_field)++;
-       return ret;
-}
-
-static
-int _lttng_variant_statedump(struct ust_registry_session *session,
-               uint32_t nr_choices, const char *tag_name,
-               uint32_t alignment,
-               const struct lttng_ust_ctl_field *fields, size_t nr_fields,
-               size_t *iter_field, size_t nesting)
-{
-       const struct lttng_ust_ctl_field *variant = &fields[*iter_field];
-       uint32_t i;
-       int ret;
-       char identifier[LTTNG_UST_ABI_SYM_NAME_LEN];
-
-       if (variant->type.atype != lttng_ust_ctl_atype_variant) {
-               ret = -EINVAL;
-               goto end;
-       }
-       (*iter_field)++;
-       sanitize_ctf_identifier(identifier, tag_name);
-       if (alignment) {
-               ret = print_tabs(session, nesting);
-               if (ret) {
-                       goto end;
-               }
-               ret = lttng_metadata_printf(session,
-               "struct { } align(%u) _%s_padding;\n",
-                               alignment * CHAR_BIT,
-                               variant->name);
-               if (ret) {
-                       goto end;
-               }
-       }
-       ret = print_tabs(session, nesting);
-       if (ret) {
-               goto end;
-       }
-       ret = lttng_metadata_printf(session,
-                       "variant <_%s> {\n",
-                       identifier);
-       if (ret) {
-               goto end;
-       }
-
-       for (i = 0; i < nr_choices; i++) {
-               if (*iter_field >= nr_fields) {
-                       ret = -EOVERFLOW;
-                       goto end;
-               }
-               ret = _lttng_field_statedump(session,
-                               fields, nr_fields,
-                               iter_field, nesting + 1);
-               if (ret) {
-                       goto end;
-               }
-       }
-       sanitize_ctf_identifier(identifier, variant->name);
-       ret = print_tabs(session, nesting);
-       if (ret) {
-               goto end;
-       }
-       ret = lttng_metadata_printf(session,
-                       "} _%s;\n",
-                       identifier);
-       if (ret) {
-               goto end;
-       }
-end:
-       return ret;
-}
-
-static
-int _lttng_field_statedump(struct ust_registry_session *session,
-               const struct lttng_ust_ctl_field *fields, size_t nr_fields,
-               size_t *iter_field, size_t nesting)
-{
-       int ret = 0;
-       const char *bo_be = " byte_order = be;";
-       const char *bo_le = " byte_order = le;";
-       const char *bo_native = "";
-       const char *bo_reverse;
-       const struct lttng_ust_ctl_field *field;
-
-       if (*iter_field >= nr_fields) {
-               ret = -EOVERFLOW;
-               goto end;
-       }
-       field = &fields[*iter_field];
-
-       if (session->byte_order == BIG_ENDIAN) {
-               bo_reverse = bo_le;
-       } else {
-               bo_reverse = bo_be;
-       }
-
-       switch (field->type.atype) {
-       case lttng_ust_ctl_atype_integer:
-               ret = print_tabs(session, nesting);
-               if (ret) {
-                       goto end;
-               }
-               ret = lttng_metadata_printf(session,
-                       "integer { size = %u; align = %u; signed = %u; encoding = %s; base = %u;%s } _%s;\n",
-                       field->type.u.integer.size,
-                       field->type.u.integer.alignment,
-                       field->type.u.integer.signedness,
-                       (field->type.u.integer.encoding == lttng_ust_ctl_encode_none)
-                               ? "none"
-                               : (field->type.u.integer.encoding == lttng_ust_ctl_encode_UTF8)
-                                       ? "UTF8"
-                                       : "ASCII",
-                       field->type.u.integer.base,
-                       field->type.u.integer.reverse_byte_order ? bo_reverse : bo_native,
-                       field->name);
-               (*iter_field)++;
-               break;
-       case lttng_ust_ctl_atype_enum:
-               ret = ust_metadata_enum_statedump(session,
-                       field->type.u.legacy.basic.enumeration.name,
-                       field->type.u.legacy.basic.enumeration.id,
-                       &field->type.u.legacy.basic.enumeration.container_type,
-                       field->name, iter_field, nesting);
-               break;
-       case lttng_ust_ctl_atype_float:
-               ret = print_tabs(session, nesting);
-               if (ret) {
-                       goto end;
-               }
-               ret = lttng_metadata_printf(session,
-                       "floating_point { exp_dig = %u; mant_dig = %u; align = %u;%s } _%s;\n",
-                       field->type.u._float.exp_dig,
-                       field->type.u._float.mant_dig,
-                       field->type.u._float.alignment,
-                       field->type.u._float.reverse_byte_order ? bo_reverse : bo_native,
-                       field->name);
-               (*iter_field)++;
-               break;
-       case lttng_ust_ctl_atype_array:
-       {
-               const struct lttng_ust_ctl_basic_type *elem_type;
-
-               ret = print_tabs(session, nesting);
-               if (ret) {
-                       goto end;
-               }
-               elem_type = &field->type.u.legacy.array.elem_type;
-               /* Only integers are currently supported in arrays. */
-               if (elem_type->atype != lttng_ust_ctl_atype_integer) {
-                       ret = -EINVAL;
-                       goto end;
-               }
-               ret = lttng_metadata_printf(session,
-                       "integer { size = %u; align = %u; signed = %u; encoding = %s; base = %u;%s } _%s[%u];\n",
-                       elem_type->u.basic.integer.size,
-                       elem_type->u.basic.integer.alignment,
-                       elem_type->u.basic.integer.signedness,
-                       (elem_type->u.basic.integer.encoding == lttng_ust_ctl_encode_none)
-                               ? "none"
-                               : (elem_type->u.basic.integer.encoding == lttng_ust_ctl_encode_UTF8)
-                                       ? "UTF8"
-                                       : "ASCII",
-                       elem_type->u.basic.integer.base,
-                       elem_type->u.basic.integer.reverse_byte_order ? bo_reverse : bo_native,
-                       field->name, field->type.u.legacy.array.length);
-               (*iter_field)++;
-               break;
-       }
-       case lttng_ust_ctl_atype_array_nestable:
-       {
-               uint32_t array_length;
-               const struct lttng_ust_ctl_field *array_nestable;
-               const struct lttng_ust_ctl_type *elem_type;
-
-               array_length = field->type.u.array_nestable.length;
-               (*iter_field)++;
-
-               if (*iter_field >= nr_fields) {
-                       ret = -EOVERFLOW;
-                       goto end;
-               }
-               array_nestable = &fields[*iter_field];
-               elem_type = &array_nestable->type;
-
-               /* Only integers are currently supported in arrays. */
-               if (elem_type->atype != lttng_ust_ctl_atype_integer) {
-                       ret = -EINVAL;
-                       goto end;
-               }
-
-               if (field->type.u.array_nestable.alignment) {
-                       ret = print_tabs(session, nesting);
-                       if (ret) {
-                               goto end;
-                       }
-                       ret = lttng_metadata_printf(session,
-                               "struct { } align(%u) _%s_padding;\n",
-                               field->type.u.array_nestable.alignment * CHAR_BIT,
-                               field->name);
-                       if (ret) {
-                               goto end;
-                       }
-               }
-
-               ret = print_tabs(session, nesting);
-               if (ret) {
-                       goto end;
-               }
-               ret = lttng_metadata_printf(session,
-                       "integer { size = %u; align = %u; signed = %u; encoding = %s; base = %u;%s } _%s[%u];\n",
-                       elem_type->u.integer.size,
-                       elem_type->u.integer.alignment,
-                       elem_type->u.integer.signedness,
-                       (elem_type->u.integer.encoding == lttng_ust_ctl_encode_none)
-                               ? "none"
-                               : (elem_type->u.integer.encoding == lttng_ust_ctl_encode_UTF8)
-                                       ? "UTF8"
-                                       : "ASCII",
-                       elem_type->u.integer.base,
-                       elem_type->u.integer.reverse_byte_order ? bo_reverse : bo_native,
-                       field->name, array_length);
-               (*iter_field)++;
-               break;
-       }
-       case lttng_ust_ctl_atype_sequence:
-       {
-               const struct lttng_ust_ctl_basic_type *elem_type;
-               const struct lttng_ust_ctl_basic_type *length_type;
-
-               elem_type = &field->type.u.legacy.sequence.elem_type;
-               length_type = &field->type.u.legacy.sequence.length_type;
-               ret = print_tabs(session, nesting);
-               if (ret) {
-                       goto end;
-               }
-
-               /* Only integers are currently supported in sequences. */
-               if (elem_type->atype != lttng_ust_ctl_atype_integer) {
-                       ret = -EINVAL;
-                       goto end;
-               }
-
-               ret = lttng_metadata_printf(session,
-                       "integer { size = %u; align = %u; signed = %u; encoding = %s; base = %u;%s } __%s_length;\n",
-                       length_type->u.basic.integer.size,
-                       (unsigned int) length_type->u.basic.integer.alignment,
-                       length_type->u.basic.integer.signedness,
-                       (length_type->u.basic.integer.encoding == lttng_ust_ctl_encode_none)
-                               ? "none"
-                               : ((length_type->u.basic.integer.encoding == lttng_ust_ctl_encode_UTF8)
-                                       ? "UTF8"
-                                       : "ASCII"),
-                       length_type->u.basic.integer.base,
-                       length_type->u.basic.integer.reverse_byte_order ? bo_reverse : bo_native,
-                       field->name);
-               if (ret) {
-                       goto end;
-               }
-
-               ret = print_tabs(session, nesting);
-               if (ret) {
-                       goto end;
-               }
-               ret = lttng_metadata_printf(session,
-                       "integer { size = %u; align = %u; signed = %u; encoding = %s; base = %u;%s } _%s[ __%s_length ];\n",
-                       elem_type->u.basic.integer.size,
-                       (unsigned int) elem_type->u.basic.integer.alignment,
-                       elem_type->u.basic.integer.signedness,
-                       (elem_type->u.basic.integer.encoding == lttng_ust_ctl_encode_none)
-                               ? "none"
-                               : ((elem_type->u.basic.integer.encoding == lttng_ust_ctl_encode_UTF8)
-                                       ? "UTF8"
-                                       : "ASCII"),
-                       elem_type->u.basic.integer.base,
-                       elem_type->u.basic.integer.reverse_byte_order ? bo_reverse : bo_native,
-                       field->name,
-                       field->name);
-               (*iter_field)++;
-               break;
-       }
-       case lttng_ust_ctl_atype_sequence_nestable:
-       {
-               const struct lttng_ust_ctl_field *sequence_nestable;
-               const struct lttng_ust_ctl_type *elem_type;
-
-               (*iter_field)++;
-               if (*iter_field >= nr_fields) {
-                       ret = -EOVERFLOW;
-                       goto end;
-               }
-               sequence_nestable = &fields[*iter_field];
-               elem_type = &sequence_nestable->type;
-
-               /* Only integers are currently supported in sequences. */
-               if (elem_type->atype != lttng_ust_ctl_atype_integer) {
-                       ret = -EINVAL;
-                       goto end;
-               }
-
-               if (field->type.u.sequence_nestable.alignment) {
-                       ret = print_tabs(session, nesting);
-                       if (ret) {
-                               goto end;
-                       }
-                       ret = lttng_metadata_printf(session,
-                               "struct { } align(%u) _%s_padding;\n",
-                               field->type.u.sequence_nestable.alignment * CHAR_BIT,
-                               field->name);
-                       if (ret) {
-                               goto end;
-                       }
-               }
-
-               ret = print_tabs(session, nesting);
-               if (ret) {
-                       goto end;
-               }
-               ret = lttng_metadata_printf(session,
-                       "integer { size = %u; align = %u; signed = %u; encoding = %s; base = %u;%s } _%s[ _%s ];\n",
-                       elem_type->u.integer.size,
-                       (unsigned int) elem_type->u.integer.alignment,
-                       elem_type->u.integer.signedness,
-                       (elem_type->u.integer.encoding == lttng_ust_ctl_encode_none)
-                               ? "none"
-                               : ((elem_type->u.integer.encoding == lttng_ust_ctl_encode_UTF8)
-                                       ? "UTF8"
-                                       : "ASCII"),
-                       elem_type->u.integer.base,
-                       elem_type->u.integer.reverse_byte_order ? bo_reverse : bo_native,
-                       field->name,
-                       field->type.u.sequence_nestable.length_name);
-               (*iter_field)++;
-               break;
-       }
-       case lttng_ust_ctl_atype_string:
-               /* Default encoding is UTF8 */
-               ret = print_tabs(session, nesting);
-               if (ret) {
-                       goto end;
-               }
-               ret = lttng_metadata_printf(session,
-                       "string%s _%s;\n",
-                       field->type.u.string.encoding == lttng_ust_ctl_encode_ASCII ?
-                               " { encoding = ASCII; }" : "",
-                       field->name);
-               (*iter_field)++;
-               break;
-       case lttng_ust_ctl_atype_variant:
-               ret = _lttng_variant_statedump(session,
-                               field->type.u.legacy.variant.nr_choices,
-                               field->type.u.legacy.variant.tag_name,
-                               0,
-                               fields, nr_fields, iter_field, nesting);
-               if (ret) {
-                       goto end;
-               }
-               break;
-       case lttng_ust_ctl_atype_variant_nestable:
-               ret = _lttng_variant_statedump(session,
-                               field->type.u.variant_nestable.nr_choices,
-                               field->type.u.variant_nestable.tag_name,
-                               field->type.u.variant_nestable.alignment,
-                               fields, nr_fields, iter_field, nesting);
-               if (ret) {
-                       goto end;
-               }
-               break;
-       case lttng_ust_ctl_atype_struct:
-               if (field->type.u.legacy._struct.nr_fields != 0) {
-                       /* Currently only 0-length structures are supported. */
-                       ret = -EINVAL;
-                       goto end;
-               }
-               ret = print_tabs(session, nesting);
-               if (ret) {
-                       goto end;
-               }
-               ret = lttng_metadata_printf(session,
-                       "struct {} _%s;\n",
-                       field->name);
-               (*iter_field)++;
-               break;
-       case lttng_ust_ctl_atype_struct_nestable:
-               if (field->type.u.struct_nestable.nr_fields != 0) {
-                       /* Currently only 0-length structures are supported. */
-                       ret = -EINVAL;
-                       goto end;
-               }
-               ret = print_tabs(session, nesting);
-               if (ret) {
-                       goto end;
-               }
-               if (field->type.u.struct_nestable.alignment) {
-                       ret = lttng_metadata_printf(session,
-                               "struct {} align(%u) _%s;\n",
-                               field->type.u.struct_nestable.alignment * CHAR_BIT,
-                               field->name);
-                       if (ret) {
-                               goto end;
-                       }
-               } else {
-                       ret = lttng_metadata_printf(session,
-                               "struct {} _%s;\n",
-                               field->name);
-               }
-               (*iter_field)++;
-               break;
-       case lttng_ust_ctl_atype_enum_nestable:
-       {
-               const struct lttng_ust_ctl_field *container_field;
-               const struct lttng_ust_ctl_type *container_type;
-
-               (*iter_field)++;
-               if (*iter_field >= nr_fields) {
-                       ret = -EOVERFLOW;
-                       goto end;
-               }
-               container_field = &fields[*iter_field];
-               container_type = &container_field->type;
-
-               /* Only integers are supported as container types. */
-               if (container_type->atype != lttng_ust_ctl_atype_integer) {
-                       ret = -EINVAL;
-                       goto end;
-               }
-               ret = ust_metadata_enum_statedump(session,
-                       field->type.u.enum_nestable.name,
-                       field->type.u.enum_nestable.id,
-                       &container_type->u.integer,
-                       field->name, iter_field, nesting);
-               break;
-       }
-       default:
-               ret = -EINVAL;
-       }
-end:
-       return ret;
-}
-
-static
-int _lttng_context_metadata_statedump(struct ust_registry_session *session,
-               size_t nr_ctx_fields,
-               struct lttng_ust_ctl_field *ctx)
-{
-       int ret = 0;
-       size_t i = 0;
-
-       if (!ctx)
-               return 0;
-       for (;;) {
-               if (i >= nr_ctx_fields) {
-                       break;
-               }
-               ret = _lttng_field_statedump(session, ctx,
-                               nr_ctx_fields, &i, 2);
-               if (ret) {
-                       break;
-               }
-       }
-       return ret;
-}
-
-static
-int _lttng_fields_metadata_statedump(struct ust_registry_session *session,
-               struct ust_registry_event *event)
-{
-       int ret = 0;
-       size_t i = 0;
-
-       for (;;) {
-               if (i >= event->nr_fields) {
-                       break;
-               }
-               ret = _lttng_field_statedump(session, event->fields,
-                               event->nr_fields, &i, 2);
-               if (ret) {
-                       break;
-               }
-       }
-       return ret;
-}
-
-/*
- * Should be called with session registry mutex held.
- */
-int ust_metadata_event_statedump(struct ust_registry_session *session,
-               struct ust_registry_channel *chan,
-               struct ust_registry_event *event)
-{
-       int ret = 0;
-
-       /* Don't dump metadata events */
-       if (chan->chan_id == -1U)
-               return 0;
-
-       ret = lttng_metadata_printf(session,
-               "event {\n"
-               "       name = \"%s\";\n"
-               "       id = %u;\n"
-               "       stream_id = %u;\n",
-               event->name,
-               event->id,
-               chan->chan_id);
-       if (ret) {
-               goto end;
-       }
-
-       ret = lttng_metadata_printf(session,
-               "       loglevel = %d;\n",
-               event->loglevel_value);
-       if (ret) {
-               goto end;
-       }
-
-       if (event->model_emf_uri) {
-               ret = lttng_metadata_printf(session,
-                       "       model.emf.uri = \"%s\";\n",
-                       event->model_emf_uri);
-               if (ret) {
-                       goto end;
-               }
-       }
-
-       ret = lttng_metadata_printf(session,
-               "       fields := struct {\n"
-               );
-       if (ret) {
-               goto end;
-       }
-
-       ret = _lttng_fields_metadata_statedump(session, event);
-       if (ret) {
-               goto end;
-       }
-
-       ret = lttng_metadata_printf(session,
-               "       };\n"
-               "};\n\n");
-       if (ret) {
-               goto end;
-       }
-       event->metadata_dumped = 1;
-
-end:
-       return ret;
-}
-
-/*
- * Should be called with session registry mutex held.
- */
-int ust_metadata_channel_statedump(struct ust_registry_session *session,
-               struct ust_registry_channel *chan)
-{
-       int ret = 0;
-
-       /* Don't dump metadata events */
-       if (chan->chan_id == -1U)
-               return 0;
-
-       if (!chan->header_type)
-               return -EINVAL;
-
-       ret = lttng_metadata_printf(session,
-               "stream {\n"
-               "       id = %u;\n"
-               "       event.header := %s;\n"
-               "       packet.context := struct packet_context;\n",
-               chan->chan_id,
-               chan->header_type == LTTNG_UST_CTL_CHANNEL_HEADER_COMPACT ?
-                       "struct event_header_compact" :
-                       "struct event_header_large");
-       if (ret) {
-               goto end;
-       }
-
-       if (chan->ctx_fields) {
-               ret = lttng_metadata_printf(session,
-                       "       event.context := struct {\n");
-               if (ret) {
-                       goto end;
-               }
-       }
-       ret = _lttng_context_metadata_statedump(session,
-               chan->nr_ctx_fields,
-               chan->ctx_fields);
-       if (ret) {
-               goto end;
-       }
-       if (chan->ctx_fields) {
-               ret = lttng_metadata_printf(session,
-                       "       };\n");
-               if (ret) {
-                       goto end;
-               }
-       }
-
-       ret = lttng_metadata_printf(session,
-               "};\n\n");
-       /* Flag success of metadata dump. */
-       chan->metadata_dumped = 1;
-
-end:
-       return ret;
-}
-
-static
-int _lttng_stream_packet_context_declare(struct ust_registry_session *session)
-{
-       return lttng_metadata_printf(session,
-               "struct packet_context {\n"
-               "       uint64_clock_monotonic_t timestamp_begin;\n"
-               "       uint64_clock_monotonic_t timestamp_end;\n"
-               "       uint64_t content_size;\n"
-               "       uint64_t packet_size;\n"
-               "       uint64_t packet_seq_num;\n"
-               "       unsigned long events_discarded;\n"
-               "       uint32_t cpu_id;\n"
-               "};\n\n"
-               );
-}
-
-/*
- * Compact header:
- * id: range: 0 - 30.
- * id 31 is reserved to indicate an extended header.
- *
- * Large header:
- * id: range: 0 - 65534.
- * id 65535 is reserved to indicate an extended header.
- */
-static
-int _lttng_event_header_declare(struct ust_registry_session *session)
-{
-       return lttng_metadata_printf(session,
-       "struct event_header_compact {\n"
-       "       enum : uint5_t { compact = 0 ... 30, extended = 31 } id;\n"
-       "       variant <id> {\n"
-       "               struct {\n"
-       "                       uint27_clock_monotonic_t timestamp;\n"
-       "               } compact;\n"
-       "               struct {\n"
-       "                       uint32_t id;\n"
-       "                       uint64_clock_monotonic_t timestamp;\n"
-       "               } extended;\n"
-       "       } v;\n"
-       "} align(%u);\n"
-       "\n"
-       "struct event_header_large {\n"
-       "       enum : uint16_t { compact = 0 ... 65534, extended = 65535 } id;\n"
-       "       variant <id> {\n"
-       "               struct {\n"
-       "                       uint32_clock_monotonic_t timestamp;\n"
-       "               } compact;\n"
-       "               struct {\n"
-       "                       uint32_t id;\n"
-       "                       uint64_clock_monotonic_t timestamp;\n"
-       "               } extended;\n"
-       "       } v;\n"
-       "} align(%u);\n\n",
-       session->uint32_t_alignment,
-       session->uint16_t_alignment
-       );
-}
-
-/*
- * The offset between monotonic and realtime clock can be negative if
- * the system sets the REALTIME clock to 0 after boot.
- */
-static
-int measure_single_clock_offset(struct offset_sample *sample)
-{
-       uint64_t monotonic_avg, monotonic[2], measure_delta, realtime;
-       uint64_t tcf = trace_clock_freq();
-       struct timespec rts = { 0, 0 };
-       int ret;
-
-       monotonic[0] = trace_clock_read64();
-       ret = lttng_clock_gettime(CLOCK_REALTIME, &rts);
-       if (ret < 0) {
-               return ret;
-       }
-       monotonic[1] = trace_clock_read64();
-       measure_delta = monotonic[1] - monotonic[0];
-       if (measure_delta > sample->measure_delta) {
-               /*
-                * Discard value if it took longer to read than the best
-                * sample so far.
-                */
-               return 0;
-       }
-       monotonic_avg = (monotonic[0] + monotonic[1]) >> 1;
-       realtime = (uint64_t) rts.tv_sec * tcf;
-       if (tcf == NSEC_PER_SEC) {
-               realtime += rts.tv_nsec;
-       } else {
-               realtime += (uint64_t) rts.tv_nsec * tcf / NSEC_PER_SEC;
-       }
-       sample->offset = (int64_t) realtime - monotonic_avg;
-       sample->measure_delta = measure_delta;
-       return 0;
-}
-
-/*
- * Approximation of NTP time of day to clock monotonic correlation,
- * taken at start of trace. Keep the measurement that took the less time
- * to complete, thus removing imprecision caused by preemption.
- * May return a negative offset.
- */
-static
-int64_t measure_clock_offset(void)
-{
-       int i;
-       struct offset_sample offset_best_sample = {
-               .offset = 0,
-               .measure_delta = UINT64_MAX,
-       };
-
-       for (i = 0; i < NR_CLOCK_OFFSET_SAMPLES; i++) {
-               if (measure_single_clock_offset(&offset_best_sample)) {
-                       return 0;
-               }
-       }
-       return offset_best_sample.offset;
-}
-
-static
-int print_metadata_session_information(struct ust_registry_session *registry)
-{
-       int ret;
-       struct ltt_session *session = NULL;
-       char creation_datetime[ISO8601_STR_LEN];
-
-       rcu_read_lock();
-       session = session_find_by_id(registry->tracing_id);
-       if (!session) {
-               ret = -1;
-               goto error;
-       }
-
-       /* Print the trace name */
-       ret = lttng_metadata_printf(registry, " trace_name = \"");
-       if (ret) {
-               goto error;
-       }
-
-       /*
-        * This is necessary since the creation time is present in the session
-        * name when it is generated.
-        */
-       if (session->has_auto_generated_name) {
-               ret = print_escaped_ctf_string(registry, DEFAULT_SESSION_NAME);
-       } else {
-               ret = print_escaped_ctf_string(registry, session->name);
-       }
-       if (ret) {
-               goto error;
-       }
-
-       ret = lttng_metadata_printf(registry, "\";\n");
-       if (ret) {
-               goto error;
-       }
-
-       /* Prepare creation time */
-       ret = time_to_iso8601_str(session->creation_time, creation_datetime,
-                       sizeof(creation_datetime));
-       if (ret) {
-               goto error;
-       }
-
-       /* Output the reste of the information */
-       ret = lttng_metadata_printf(registry,
-                       "       trace_creation_datetime = \"%s\";\n"
-                       "       hostname = \"%s\";\n",
-                       creation_datetime, session->hostname);
-       if (ret) {
-               goto error;
-       }
-
-error:
-       if (session) {
-               session_put(session);
-       }
-       rcu_read_unlock();
-       return ret;
-}
-
-static
-int print_metadata_app_information(struct ust_registry_session *registry,
-               struct ust_app *app)
-{
-       int ret;
-       char datetime[ISO8601_STR_LEN];
-
-       if (!app) {
-               ret = 0;
-               goto end;
-       }
-
-       ret = time_to_iso8601_str(
-                       app->registration_time, datetime, sizeof(datetime));
-       if (ret) {
-               goto end;
-       }
-
-       ret = lttng_metadata_printf(registry,
-                       "       tracer_patchlevel = %u;\n"
-                       "       vpid = %d;\n"
-                       "       procname = \"%s\";\n"
-                       "       vpid_datetime = \"%s\";\n",
-                       app->version.patchlevel, (int) app->pid, app->name,
-                       datetime);
-
-end:
-       return ret;
-}
-
-/*
- * Should be called with session registry mutex held.
- */
-int ust_metadata_session_statedump(struct ust_registry_session *session,
-               struct ust_app *app,
-               uint32_t major,
-               uint32_t minor)
-{
-       char uuid_s[LTTNG_UUID_STR_LEN],
-               clock_uuid_s[LTTNG_UUID_STR_LEN];
-       int ret = 0;
-
-       LTTNG_ASSERT(session);
-
-       lttng_uuid_to_str(session->uuid, uuid_s);
-
-       /* For crash ABI */
-       ret = lttng_metadata_printf(session,
-               "/* CTF %u.%u */\n\n",
-               CTF_SPEC_MAJOR,
-               CTF_SPEC_MINOR);
-       if (ret) {
-               goto end;
-       }
-
-       ret = lttng_metadata_printf(session,
-               "typealias integer { size = 8; align = %u; signed = false; } := uint8_t;\n"
-               "typealias integer { size = 16; align = %u; signed = false; } := uint16_t;\n"
-               "typealias integer { size = 32; align = %u; signed = false; } := uint32_t;\n"
-               "typealias integer { size = 64; align = %u; signed = false; } := uint64_t;\n"
-               "typealias integer { size = %u; align = %u; signed = false; } := unsigned long;\n"
-               "typealias integer { size = 5; align = 1; signed = false; } := uint5_t;\n"
-               "typealias integer { size = 27; align = 1; signed = false; } := uint27_t;\n"
-               "\n"
-               "trace {\n"
-               "       major = %u;\n"
-               "       minor = %u;\n"
-               "       uuid = \"%s\";\n"
-               "       byte_order = %s;\n"
-               "       packet.header := struct {\n"
-               "               uint32_t magic;\n"
-               "               uint8_t  uuid[16];\n"
-               "               uint32_t stream_id;\n"
-               "               uint64_t stream_instance_id;\n"
-               "       };\n"
-               "};\n\n",
-               session->uint8_t_alignment,
-               session->uint16_t_alignment,
-               session->uint32_t_alignment,
-               session->uint64_t_alignment,
-               session->bits_per_long,
-               session->long_alignment,
-               CTF_SPEC_MAJOR,
-               CTF_SPEC_MINOR,
-               uuid_s,
-               session->byte_order == BIG_ENDIAN ? "be" : "le"
-               );
-       if (ret) {
-               goto end;
-       }
-
-       ret = lttng_metadata_printf(session,
-               "env {\n"
-               "       domain = \"ust\";\n"
-               "       tracer_name = \"lttng-ust\";\n"
-               "       tracer_major = %u;\n"
-               "       tracer_minor = %u;\n"
-               "       tracer_buffering_scheme = \"%s\";\n"
-               "       tracer_buffering_id = %u;\n"
-               "       architecture_bit_width = %u;\n",
-               major,
-               minor,
-               app ? "pid" : "uid",
-               app ? (int) app->pid : (int) session->tracing_uid,
-               session->bits_per_long);
-       if (ret) {
-               goto end;
-       }
-
-       ret = print_metadata_session_information(session);
-       if (ret) {
-               goto end;
-       }
-
-       /*
-        * If per-application registry, we can output extra information
-        * about the application.
-        */
-       ret = print_metadata_app_information(session, app);
-       if (ret) {
-               goto end;
-       }
-
-       ret = lttng_metadata_printf(session,
-               "};\n\n"
-               );
-       if (ret) {
-               goto end;
-       }
-
-       ret = lttng_metadata_printf(session,
-               "clock {\n"
-               "       name = \"%s\";\n",
-               trace_clock_name()
-               );
-       if (ret) {
-               goto end;
-       }
-
-       if (!trace_clock_uuid(clock_uuid_s)) {
-               ret = lttng_metadata_printf(session,
-                       "       uuid = \"%s\";\n",
-                       clock_uuid_s
-                       );
-               if (ret) {
-                       goto end;
-               }
-       }
-
-       ret = lttng_metadata_printf(session,
-               "       description = \"%s\";\n"
-               "       freq = %" PRIu64 "; /* Frequency, in Hz */\n"
-               "       /* clock value offset from Epoch is: offset * (1/freq) */\n"
-               "       offset = %" PRId64 ";\n"
-               "};\n\n",
-               trace_clock_description(),
-               trace_clock_freq(),
-               measure_clock_offset()
-               );
-       if (ret) {
-               goto end;
-       }
-
-       ret = lttng_metadata_printf(session,
-               "typealias integer {\n"
-               "       size = 27; align = 1; signed = false;\n"
-               "       map = clock.%s.value;\n"
-               "} := uint27_clock_monotonic_t;\n"
-               "\n"
-               "typealias integer {\n"
-               "       size = 32; align = %u; signed = false;\n"
-               "       map = clock.%s.value;\n"
-               "} := uint32_clock_monotonic_t;\n"
-               "\n"
-               "typealias integer {\n"
-               "       size = 64; align = %u; signed = false;\n"
-               "       map = clock.%s.value;\n"
-               "} := uint64_clock_monotonic_t;\n\n",
-               trace_clock_name(),
-               session->uint32_t_alignment,
-               trace_clock_name(),
-               session->uint64_t_alignment,
-               trace_clock_name()
-               );
-       if (ret) {
-               goto end;
-       }
-
-       ret = _lttng_stream_packet_context_declare(session);
-       if (ret) {
-               goto end;
-       }
-
-       ret = _lttng_event_header_declare(session);
-       if (ret) {
-               goto end;
-       }
-
-end:
-       return ret;
-}
diff --git a/src/bin/lttng-sessiond/ust-metadata.cpp b/src/bin/lttng-sessiond/ust-metadata.cpp
new file mode 100644 (file)
index 0000000..abe473f
--- /dev/null
@@ -0,0 +1,1344 @@
+/*
+ * Copyright (C) 2010-2013 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0-only
+ *
+ */
+
+#define _LGPL_SOURCE
+#include <stdint.h>
+#include <string.h>
+#include <stdarg.h>
+#include <stdio.h>
+#include <limits.h>
+#include <unistd.h>
+#include <inttypes.h>
+#include <common/common.h>
+#include <common/time.h>
+
+#include "ust-registry.h"
+#include "ust-clock.h"
+#include "ust-app.h"
+
+#ifndef max_t
+#define max_t(type, a, b)      ((type) ((a) > (b) ? (a) : (b)))
+#endif
+
+#define NR_CLOCK_OFFSET_SAMPLES                10
+
+struct offset_sample {
+       int64_t offset;                 /* correlation offset */
+       uint64_t measure_delta;         /* lower is better */
+};
+
+static
+int _lttng_field_statedump(struct ust_registry_session *session,
+               const struct lttng_ust_ctl_field *fields, size_t nr_fields,
+               size_t *iter_field, size_t nesting);
+
+static inline
+int get_count_order(unsigned int count)
+{
+       int order;
+
+       order = lttng_fls(count) - 1;
+       if (count & (count - 1)) {
+               order++;
+       }
+       LTTNG_ASSERT(order >= 0);
+       return order;
+}
+
+/*
+ * Returns offset where to write in metadata array, or negative error value on error.
+ */
+static
+ssize_t metadata_reserve(struct ust_registry_session *session, size_t len)
+{
+       size_t new_len = session->metadata_len + len;
+       size_t new_alloc_len = new_len;
+       size_t old_alloc_len = session->metadata_alloc_len;
+       ssize_t ret;
+
+       if (new_alloc_len > (UINT32_MAX >> 1))
+               return -EINVAL;
+       if ((old_alloc_len << 1) > (UINT32_MAX >> 1))
+               return -EINVAL;
+
+       if (new_alloc_len > old_alloc_len) {
+               char *newptr;
+
+               new_alloc_len =
+                       max_t(size_t, 1U << get_count_order(new_alloc_len), old_alloc_len << 1);
+               newptr = (char *) realloc(session->metadata, new_alloc_len);
+               if (!newptr)
+                       return -ENOMEM;
+               session->metadata = newptr;
+               /* We zero directly the memory from start of allocation. */
+               memset(&session->metadata[old_alloc_len], 0, new_alloc_len - old_alloc_len);
+               session->metadata_alloc_len = new_alloc_len;
+       }
+       ret = session->metadata_len;
+       session->metadata_len += len;
+       return ret;
+}
+
+static
+int metadata_file_append(struct ust_registry_session *session,
+               const char *str, size_t len)
+{
+       ssize_t written;
+
+       if (session->metadata_fd < 0) {
+               return 0;
+       }
+       /* Write to metadata file */
+       written = lttng_write(session->metadata_fd, str, len);
+       if (written != len) {
+               return -1;
+       }
+       return 0;
+}
+
+/*
+ * We have exclusive access to our metadata buffer (protected by the
+ * ust_lock), so we can do racy operations such as looking for
+ * remaining space left in packet and write, since mutual exclusion
+ * protects us from concurrent writes.
+ */
+static
+int lttng_metadata_printf(struct ust_registry_session *session,
+               const char *fmt, ...)
+{
+       char *str = NULL;
+       size_t len;
+       va_list ap;
+       ssize_t offset;
+       int ret;
+
+       va_start(ap, fmt);
+       ret = vasprintf(&str, fmt, ap);
+       va_end(ap);
+       if (ret < 0)
+               return -ENOMEM;
+
+       len = strlen(str);
+       offset = metadata_reserve(session, len);
+       if (offset < 0) {
+               ret = offset;
+               goto end;
+       }
+       memcpy(&session->metadata[offset], str, len);
+       ret = metadata_file_append(session, str, len);
+       if (ret) {
+               PERROR("Error appending to metadata file");
+               goto end;
+       }
+       DBG3("Append to metadata: \"%s\"", str);
+       ret = 0;
+
+end:
+       free(str);
+       return ret;
+}
+
+static
+int print_tabs(struct ust_registry_session *session, size_t nesting)
+{
+       size_t i;
+
+       for (i = 0; i < nesting; i++) {
+               int ret;
+
+               ret = lttng_metadata_printf(session, "  ");
+               if (ret) {
+                       return ret;
+               }
+       }
+       return 0;
+}
+
+static
+void sanitize_ctf_identifier(char *out, const char *in)
+{
+       size_t i;
+
+       for (i = 0; i < LTTNG_UST_ABI_SYM_NAME_LEN; i++) {
+               switch (in[i]) {
+               case '.':
+               case '$':
+               case ':':
+                       out[i] = '_';
+                       break;
+               default:
+                       out[i] = in[i];
+               }
+       }
+}
+
+static
+int print_escaped_ctf_string(struct ust_registry_session *session, const char *string)
+{
+       int ret = 0;
+       size_t i;
+       char cur;
+
+       i = 0;
+       cur = string[i];
+       while (cur != '\0') {
+               switch (cur) {
+               case '\n':
+                       ret = lttng_metadata_printf(session, "%s", "\\n");
+                       break;
+               case '\\':
+               case '"':
+                       ret = lttng_metadata_printf(session, "%c", '\\');
+                       if (ret) {
+                               goto error;
+                       }
+                       /* We still print the current char */
+                       /* Fallthrough */
+               default:
+                       ret = lttng_metadata_printf(session, "%c", cur);
+                       break;
+               }
+
+               if (ret) {
+                       goto error;
+               }
+
+               cur = string[++i];
+       }
+error:
+       return ret;
+}
+
+/* Called with session registry mutex held. */
+static
+int ust_metadata_enum_statedump(struct ust_registry_session *session,
+               const char *enum_name,
+               uint64_t enum_id,
+               const struct lttng_ust_ctl_integer_type *container_type,
+               const char *field_name, size_t *iter_field, size_t nesting)
+{
+       struct ust_registry_enum *reg_enum;
+       const struct lttng_ust_ctl_enum_entry *entries;
+       size_t nr_entries;
+       int ret = 0;
+       size_t i;
+       char identifier[LTTNG_UST_ABI_SYM_NAME_LEN];
+
+       rcu_read_lock();
+       reg_enum = ust_registry_lookup_enum_by_id(session, enum_name, enum_id);
+       rcu_read_unlock();
+       /* reg_enum can still be used because session registry mutex is held. */
+       if (!reg_enum) {
+               ret = -ENOENT;
+               goto end;
+       }
+       entries = reg_enum->entries;
+       nr_entries = reg_enum->nr_entries;
+
+       ret = print_tabs(session, nesting);
+       if (ret) {
+               goto end;
+       }
+       ret = lttng_metadata_printf(session,
+               "enum : integer { size = %u; align = %u; signed = %u; encoding = %s; base = %u; } {\n",
+               container_type->size,
+               container_type->alignment,
+               container_type->signedness,
+               (container_type->encoding == lttng_ust_ctl_encode_none)
+                       ? "none"
+                       : (container_type->encoding == lttng_ust_ctl_encode_UTF8)
+                               ? "UTF8"
+                               : "ASCII",
+               container_type->base);
+       if (ret) {
+               goto end;
+       }
+       nesting++;
+       /* Dump all entries */
+       for (i = 0; i < nr_entries; i++) {
+               const struct lttng_ust_ctl_enum_entry *entry = &entries[i];
+               int j, len;
+
+               ret = print_tabs(session, nesting);
+               if (ret) {
+                       goto end;
+               }
+               ret = lttng_metadata_printf(session,
+                               "\"");
+               if (ret) {
+                       goto end;
+               }
+               len = strlen(entry->string);
+               /* Escape the character '"' */
+               for (j = 0; j < len; j++) {
+                       char c = entry->string[j];
+
+                       switch (c) {
+                       case '"':
+                               ret = lttng_metadata_printf(session,
+                                               "\\\"");
+                               break;
+                       case '\\':
+                               ret = lttng_metadata_printf(session,
+                                               "\\\\");
+                               break;
+                       default:
+                               ret = lttng_metadata_printf(session,
+                                               "%c", c);
+                               break;
+                       }
+                       if (ret) {
+                               goto end;
+                       }
+               }
+               ret = lttng_metadata_printf(session, "\"");
+               if (ret) {
+                       goto end;
+               }
+
+               if (entry->u.extra.options &
+                               LTTNG_UST_CTL_UST_ENUM_ENTRY_OPTION_IS_AUTO) {
+                       ret = lttng_metadata_printf(session, ",\n");
+                       if (ret) {
+                               goto end;
+                       }
+               } else {
+                       ret = lttng_metadata_printf(session,
+                                       " = ");
+                       if (ret) {
+                               goto end;
+                       }
+
+                       if (entry->start.signedness) {
+                               ret = lttng_metadata_printf(session,
+                                       "%lld", (long long) entry->start.value);
+                       } else {
+                               ret = lttng_metadata_printf(session,
+                                       "%llu", entry->start.value);
+                       }
+                       if (ret) {
+                               goto end;
+                       }
+
+                       if (entry->start.signedness == entry->end.signedness &&
+                                       entry->start.value ==
+                                               entry->end.value) {
+                               ret = lttng_metadata_printf(session, ",\n");
+                       } else {
+                               if (entry->end.signedness) {
+                                       ret = lttng_metadata_printf(session,
+                                               " ... %lld,\n",
+                                               (long long) entry->end.value);
+                               } else {
+                                       ret = lttng_metadata_printf(session,
+                                               " ... %llu,\n",
+                                               entry->end.value);
+                               }
+                       }
+                       if (ret) {
+                               goto end;
+                       }
+               }
+       }
+       nesting--;
+       sanitize_ctf_identifier(identifier, field_name);
+       ret = print_tabs(session, nesting);
+       if (ret) {
+               goto end;
+       }
+       ret = lttng_metadata_printf(session, "} _%s;\n",
+                       identifier);
+end:
+       (*iter_field)++;
+       return ret;
+}
+
+static
+int _lttng_variant_statedump(struct ust_registry_session *session,
+               uint32_t nr_choices, const char *tag_name,
+               uint32_t alignment,
+               const struct lttng_ust_ctl_field *fields, size_t nr_fields,
+               size_t *iter_field, size_t nesting)
+{
+       const struct lttng_ust_ctl_field *variant = &fields[*iter_field];
+       uint32_t i;
+       int ret;
+       char identifier[LTTNG_UST_ABI_SYM_NAME_LEN];
+
+       if (variant->type.atype != lttng_ust_ctl_atype_variant) {
+               ret = -EINVAL;
+               goto end;
+       }
+       (*iter_field)++;
+       sanitize_ctf_identifier(identifier, tag_name);
+       if (alignment) {
+               ret = print_tabs(session, nesting);
+               if (ret) {
+                       goto end;
+               }
+               ret = lttng_metadata_printf(session,
+               "struct { } align(%u) _%s_padding;\n",
+                               alignment * CHAR_BIT,
+                               variant->name);
+               if (ret) {
+                       goto end;
+               }
+       }
+       ret = print_tabs(session, nesting);
+       if (ret) {
+               goto end;
+       }
+       ret = lttng_metadata_printf(session,
+                       "variant <_%s> {\n",
+                       identifier);
+       if (ret) {
+               goto end;
+       }
+
+       for (i = 0; i < nr_choices; i++) {
+               if (*iter_field >= nr_fields) {
+                       ret = -EOVERFLOW;
+                       goto end;
+               }
+               ret = _lttng_field_statedump(session,
+                               fields, nr_fields,
+                               iter_field, nesting + 1);
+               if (ret) {
+                       goto end;
+               }
+       }
+       sanitize_ctf_identifier(identifier, variant->name);
+       ret = print_tabs(session, nesting);
+       if (ret) {
+               goto end;
+       }
+       ret = lttng_metadata_printf(session,
+                       "} _%s;\n",
+                       identifier);
+       if (ret) {
+               goto end;
+       }
+end:
+       return ret;
+}
+
+static
+int _lttng_field_statedump(struct ust_registry_session *session,
+               const struct lttng_ust_ctl_field *fields, size_t nr_fields,
+               size_t *iter_field, size_t nesting)
+{
+       int ret = 0;
+       const char *bo_be = " byte_order = be;";
+       const char *bo_le = " byte_order = le;";
+       const char *bo_native = "";
+       const char *bo_reverse;
+       const struct lttng_ust_ctl_field *field;
+
+       if (*iter_field >= nr_fields) {
+               ret = -EOVERFLOW;
+               goto end;
+       }
+       field = &fields[*iter_field];
+
+       if (session->byte_order == BIG_ENDIAN) {
+               bo_reverse = bo_le;
+       } else {
+               bo_reverse = bo_be;
+       }
+
+       switch (field->type.atype) {
+       case lttng_ust_ctl_atype_integer:
+               ret = print_tabs(session, nesting);
+               if (ret) {
+                       goto end;
+               }
+               ret = lttng_metadata_printf(session,
+                       "integer { size = %u; align = %u; signed = %u; encoding = %s; base = %u;%s } _%s;\n",
+                       field->type.u.integer.size,
+                       field->type.u.integer.alignment,
+                       field->type.u.integer.signedness,
+                       (field->type.u.integer.encoding == lttng_ust_ctl_encode_none)
+                               ? "none"
+                               : (field->type.u.integer.encoding == lttng_ust_ctl_encode_UTF8)
+                                       ? "UTF8"
+                                       : "ASCII",
+                       field->type.u.integer.base,
+                       field->type.u.integer.reverse_byte_order ? bo_reverse : bo_native,
+                       field->name);
+               (*iter_field)++;
+               break;
+       case lttng_ust_ctl_atype_enum:
+               ret = ust_metadata_enum_statedump(session,
+                       field->type.u.legacy.basic.enumeration.name,
+                       field->type.u.legacy.basic.enumeration.id,
+                       &field->type.u.legacy.basic.enumeration.container_type,
+                       field->name, iter_field, nesting);
+               break;
+       case lttng_ust_ctl_atype_float:
+               ret = print_tabs(session, nesting);
+               if (ret) {
+                       goto end;
+               }
+               ret = lttng_metadata_printf(session,
+                       "floating_point { exp_dig = %u; mant_dig = %u; align = %u;%s } _%s;\n",
+                       field->type.u._float.exp_dig,
+                       field->type.u._float.mant_dig,
+                       field->type.u._float.alignment,
+                       field->type.u._float.reverse_byte_order ? bo_reverse : bo_native,
+                       field->name);
+               (*iter_field)++;
+               break;
+       case lttng_ust_ctl_atype_array:
+       {
+               const struct lttng_ust_ctl_basic_type *elem_type;
+
+               ret = print_tabs(session, nesting);
+               if (ret) {
+                       goto end;
+               }
+               elem_type = &field->type.u.legacy.array.elem_type;
+               /* Only integers are currently supported in arrays. */
+               if (elem_type->atype != lttng_ust_ctl_atype_integer) {
+                       ret = -EINVAL;
+                       goto end;
+               }
+               ret = lttng_metadata_printf(session,
+                       "integer { size = %u; align = %u; signed = %u; encoding = %s; base = %u;%s } _%s[%u];\n",
+                       elem_type->u.basic.integer.size,
+                       elem_type->u.basic.integer.alignment,
+                       elem_type->u.basic.integer.signedness,
+                       (elem_type->u.basic.integer.encoding == lttng_ust_ctl_encode_none)
+                               ? "none"
+                               : (elem_type->u.basic.integer.encoding == lttng_ust_ctl_encode_UTF8)
+                                       ? "UTF8"
+                                       : "ASCII",
+                       elem_type->u.basic.integer.base,
+                       elem_type->u.basic.integer.reverse_byte_order ? bo_reverse : bo_native,
+                       field->name, field->type.u.legacy.array.length);
+               (*iter_field)++;
+               break;
+       }
+       case lttng_ust_ctl_atype_array_nestable:
+       {
+               uint32_t array_length;
+               const struct lttng_ust_ctl_field *array_nestable;
+               const struct lttng_ust_ctl_type *elem_type;
+
+               array_length = field->type.u.array_nestable.length;
+               (*iter_field)++;
+
+               if (*iter_field >= nr_fields) {
+                       ret = -EOVERFLOW;
+                       goto end;
+               }
+               array_nestable = &fields[*iter_field];
+               elem_type = &array_nestable->type;
+
+               /* Only integers are currently supported in arrays. */
+               if (elem_type->atype != lttng_ust_ctl_atype_integer) {
+                       ret = -EINVAL;
+                       goto end;
+               }
+
+               if (field->type.u.array_nestable.alignment) {
+                       ret = print_tabs(session, nesting);
+                       if (ret) {
+                               goto end;
+                       }
+                       ret = lttng_metadata_printf(session,
+                               "struct { } align(%u) _%s_padding;\n",
+                               field->type.u.array_nestable.alignment * CHAR_BIT,
+                               field->name);
+                       if (ret) {
+                               goto end;
+                       }
+               }
+
+               ret = print_tabs(session, nesting);
+               if (ret) {
+                       goto end;
+               }
+               ret = lttng_metadata_printf(session,
+                       "integer { size = %u; align = %u; signed = %u; encoding = %s; base = %u;%s } _%s[%u];\n",
+                       elem_type->u.integer.size,
+                       elem_type->u.integer.alignment,
+                       elem_type->u.integer.signedness,
+                       (elem_type->u.integer.encoding == lttng_ust_ctl_encode_none)
+                               ? "none"
+                               : (elem_type->u.integer.encoding == lttng_ust_ctl_encode_UTF8)
+                                       ? "UTF8"
+                                       : "ASCII",
+                       elem_type->u.integer.base,
+                       elem_type->u.integer.reverse_byte_order ? bo_reverse : bo_native,
+                       field->name, array_length);
+               (*iter_field)++;
+               break;
+       }
+       case lttng_ust_ctl_atype_sequence:
+       {
+               const struct lttng_ust_ctl_basic_type *elem_type;
+               const struct lttng_ust_ctl_basic_type *length_type;
+
+               elem_type = &field->type.u.legacy.sequence.elem_type;
+               length_type = &field->type.u.legacy.sequence.length_type;
+               ret = print_tabs(session, nesting);
+               if (ret) {
+                       goto end;
+               }
+
+               /* Only integers are currently supported in sequences. */
+               if (elem_type->atype != lttng_ust_ctl_atype_integer) {
+                       ret = -EINVAL;
+                       goto end;
+               }
+
+               ret = lttng_metadata_printf(session,
+                       "integer { size = %u; align = %u; signed = %u; encoding = %s; base = %u;%s } __%s_length;\n",
+                       length_type->u.basic.integer.size,
+                       (unsigned int) length_type->u.basic.integer.alignment,
+                       length_type->u.basic.integer.signedness,
+                       (length_type->u.basic.integer.encoding == lttng_ust_ctl_encode_none)
+                               ? "none"
+                               : ((length_type->u.basic.integer.encoding == lttng_ust_ctl_encode_UTF8)
+                                       ? "UTF8"
+                                       : "ASCII"),
+                       length_type->u.basic.integer.base,
+                       length_type->u.basic.integer.reverse_byte_order ? bo_reverse : bo_native,
+                       field->name);
+               if (ret) {
+                       goto end;
+               }
+
+               ret = print_tabs(session, nesting);
+               if (ret) {
+                       goto end;
+               }
+               ret = lttng_metadata_printf(session,
+                       "integer { size = %u; align = %u; signed = %u; encoding = %s; base = %u;%s } _%s[ __%s_length ];\n",
+                       elem_type->u.basic.integer.size,
+                       (unsigned int) elem_type->u.basic.integer.alignment,
+                       elem_type->u.basic.integer.signedness,
+                       (elem_type->u.basic.integer.encoding == lttng_ust_ctl_encode_none)
+                               ? "none"
+                               : ((elem_type->u.basic.integer.encoding == lttng_ust_ctl_encode_UTF8)
+                                       ? "UTF8"
+                                       : "ASCII"),
+                       elem_type->u.basic.integer.base,
+                       elem_type->u.basic.integer.reverse_byte_order ? bo_reverse : bo_native,
+                       field->name,
+                       field->name);
+               (*iter_field)++;
+               break;
+       }
+       case lttng_ust_ctl_atype_sequence_nestable:
+       {
+               const struct lttng_ust_ctl_field *sequence_nestable;
+               const struct lttng_ust_ctl_type *elem_type;
+
+               (*iter_field)++;
+               if (*iter_field >= nr_fields) {
+                       ret = -EOVERFLOW;
+                       goto end;
+               }
+               sequence_nestable = &fields[*iter_field];
+               elem_type = &sequence_nestable->type;
+
+               /* Only integers are currently supported in sequences. */
+               if (elem_type->atype != lttng_ust_ctl_atype_integer) {
+                       ret = -EINVAL;
+                       goto end;
+               }
+
+               if (field->type.u.sequence_nestable.alignment) {
+                       ret = print_tabs(session, nesting);
+                       if (ret) {
+                               goto end;
+                       }
+                       ret = lttng_metadata_printf(session,
+                               "struct { } align(%u) _%s_padding;\n",
+                               field->type.u.sequence_nestable.alignment * CHAR_BIT,
+                               field->name);
+                       if (ret) {
+                               goto end;
+                       }
+               }
+
+               ret = print_tabs(session, nesting);
+               if (ret) {
+                       goto end;
+               }
+               ret = lttng_metadata_printf(session,
+                       "integer { size = %u; align = %u; signed = %u; encoding = %s; base = %u;%s } _%s[ _%s ];\n",
+                       elem_type->u.integer.size,
+                       (unsigned int) elem_type->u.integer.alignment,
+                       elem_type->u.integer.signedness,
+                       (elem_type->u.integer.encoding == lttng_ust_ctl_encode_none)
+                               ? "none"
+                               : ((elem_type->u.integer.encoding == lttng_ust_ctl_encode_UTF8)
+                                       ? "UTF8"
+                                       : "ASCII"),
+                       elem_type->u.integer.base,
+                       elem_type->u.integer.reverse_byte_order ? bo_reverse : bo_native,
+                       field->name,
+                       field->type.u.sequence_nestable.length_name);
+               (*iter_field)++;
+               break;
+       }
+       case lttng_ust_ctl_atype_string:
+               /* Default encoding is UTF8 */
+               ret = print_tabs(session, nesting);
+               if (ret) {
+                       goto end;
+               }
+               ret = lttng_metadata_printf(session,
+                       "string%s _%s;\n",
+                       field->type.u.string.encoding == lttng_ust_ctl_encode_ASCII ?
+                               " { encoding = ASCII; }" : "",
+                       field->name);
+               (*iter_field)++;
+               break;
+       case lttng_ust_ctl_atype_variant:
+               ret = _lttng_variant_statedump(session,
+                               field->type.u.legacy.variant.nr_choices,
+                               field->type.u.legacy.variant.tag_name,
+                               0,
+                               fields, nr_fields, iter_field, nesting);
+               if (ret) {
+                       goto end;
+               }
+               break;
+       case lttng_ust_ctl_atype_variant_nestable:
+               ret = _lttng_variant_statedump(session,
+                               field->type.u.variant_nestable.nr_choices,
+                               field->type.u.variant_nestable.tag_name,
+                               field->type.u.variant_nestable.alignment,
+                               fields, nr_fields, iter_field, nesting);
+               if (ret) {
+                       goto end;
+               }
+               break;
+       case lttng_ust_ctl_atype_struct:
+               if (field->type.u.legacy._struct.nr_fields != 0) {
+                       /* Currently only 0-length structures are supported. */
+                       ret = -EINVAL;
+                       goto end;
+               }
+               ret = print_tabs(session, nesting);
+               if (ret) {
+                       goto end;
+               }
+               ret = lttng_metadata_printf(session,
+                       "struct {} _%s;\n",
+                       field->name);
+               (*iter_field)++;
+               break;
+       case lttng_ust_ctl_atype_struct_nestable:
+               if (field->type.u.struct_nestable.nr_fields != 0) {
+                       /* Currently only 0-length structures are supported. */
+                       ret = -EINVAL;
+                       goto end;
+               }
+               ret = print_tabs(session, nesting);
+               if (ret) {
+                       goto end;
+               }
+               if (field->type.u.struct_nestable.alignment) {
+                       ret = lttng_metadata_printf(session,
+                               "struct {} align(%u) _%s;\n",
+                               field->type.u.struct_nestable.alignment * CHAR_BIT,
+                               field->name);
+                       if (ret) {
+                               goto end;
+                       }
+               } else {
+                       ret = lttng_metadata_printf(session,
+                               "struct {} _%s;\n",
+                               field->name);
+               }
+               (*iter_field)++;
+               break;
+       case lttng_ust_ctl_atype_enum_nestable:
+       {
+               const struct lttng_ust_ctl_field *container_field;
+               const struct lttng_ust_ctl_type *container_type;
+
+               (*iter_field)++;
+               if (*iter_field >= nr_fields) {
+                       ret = -EOVERFLOW;
+                       goto end;
+               }
+               container_field = &fields[*iter_field];
+               container_type = &container_field->type;
+
+               /* Only integers are supported as container types. */
+               if (container_type->atype != lttng_ust_ctl_atype_integer) {
+                       ret = -EINVAL;
+                       goto end;
+               }
+               ret = ust_metadata_enum_statedump(session,
+                       field->type.u.enum_nestable.name,
+                       field->type.u.enum_nestable.id,
+                       &container_type->u.integer,
+                       field->name, iter_field, nesting);
+               break;
+       }
+       default:
+               ret = -EINVAL;
+       }
+end:
+       return ret;
+}
+
+static
+int _lttng_context_metadata_statedump(struct ust_registry_session *session,
+               size_t nr_ctx_fields,
+               struct lttng_ust_ctl_field *ctx)
+{
+       int ret = 0;
+       size_t i = 0;
+
+       if (!ctx)
+               return 0;
+       for (;;) {
+               if (i >= nr_ctx_fields) {
+                       break;
+               }
+               ret = _lttng_field_statedump(session, ctx,
+                               nr_ctx_fields, &i, 2);
+               if (ret) {
+                       break;
+               }
+       }
+       return ret;
+}
+
+static
+int _lttng_fields_metadata_statedump(struct ust_registry_session *session,
+               struct ust_registry_event *event)
+{
+       int ret = 0;
+       size_t i = 0;
+
+       for (;;) {
+               if (i >= event->nr_fields) {
+                       break;
+               }
+               ret = _lttng_field_statedump(session, event->fields,
+                               event->nr_fields, &i, 2);
+               if (ret) {
+                       break;
+               }
+       }
+       return ret;
+}
+
+/*
+ * Should be called with session registry mutex held.
+ */
+int ust_metadata_event_statedump(struct ust_registry_session *session,
+               struct ust_registry_channel *chan,
+               struct ust_registry_event *event)
+{
+       int ret = 0;
+
+       /* Don't dump metadata events */
+       if (chan->chan_id == -1U)
+               return 0;
+
+       ret = lttng_metadata_printf(session,
+               "event {\n"
+               "       name = \"%s\";\n"
+               "       id = %u;\n"
+               "       stream_id = %u;\n",
+               event->name,
+               event->id,
+               chan->chan_id);
+       if (ret) {
+               goto end;
+       }
+
+       ret = lttng_metadata_printf(session,
+               "       loglevel = %d;\n",
+               event->loglevel_value);
+       if (ret) {
+               goto end;
+       }
+
+       if (event->model_emf_uri) {
+               ret = lttng_metadata_printf(session,
+                       "       model.emf.uri = \"%s\";\n",
+                       event->model_emf_uri);
+               if (ret) {
+                       goto end;
+               }
+       }
+
+       ret = lttng_metadata_printf(session,
+               "       fields := struct {\n"
+               );
+       if (ret) {
+               goto end;
+       }
+
+       ret = _lttng_fields_metadata_statedump(session, event);
+       if (ret) {
+               goto end;
+       }
+
+       ret = lttng_metadata_printf(session,
+               "       };\n"
+               "};\n\n");
+       if (ret) {
+               goto end;
+       }
+       event->metadata_dumped = 1;
+
+end:
+       return ret;
+}
+
+/*
+ * Should be called with session registry mutex held.
+ */
+int ust_metadata_channel_statedump(struct ust_registry_session *session,
+               struct ust_registry_channel *chan)
+{
+       int ret = 0;
+
+       /* Don't dump metadata events */
+       if (chan->chan_id == -1U)
+               return 0;
+
+       if (!chan->header_type)
+               return -EINVAL;
+
+       ret = lttng_metadata_printf(session,
+               "stream {\n"
+               "       id = %u;\n"
+               "       event.header := %s;\n"
+               "       packet.context := struct packet_context;\n",
+               chan->chan_id,
+               chan->header_type == LTTNG_UST_CTL_CHANNEL_HEADER_COMPACT ?
+                       "struct event_header_compact" :
+                       "struct event_header_large");
+       if (ret) {
+               goto end;
+       }
+
+       if (chan->ctx_fields) {
+               ret = lttng_metadata_printf(session,
+                       "       event.context := struct {\n");
+               if (ret) {
+                       goto end;
+               }
+       }
+       ret = _lttng_context_metadata_statedump(session,
+               chan->nr_ctx_fields,
+               chan->ctx_fields);
+       if (ret) {
+               goto end;
+       }
+       if (chan->ctx_fields) {
+               ret = lttng_metadata_printf(session,
+                       "       };\n");
+               if (ret) {
+                       goto end;
+               }
+       }
+
+       ret = lttng_metadata_printf(session,
+               "};\n\n");
+       /* Flag success of metadata dump. */
+       chan->metadata_dumped = 1;
+
+end:
+       return ret;
+}
+
+static
+int _lttng_stream_packet_context_declare(struct ust_registry_session *session)
+{
+       return lttng_metadata_printf(session,
+               "struct packet_context {\n"
+               "       uint64_clock_monotonic_t timestamp_begin;\n"
+               "       uint64_clock_monotonic_t timestamp_end;\n"
+               "       uint64_t content_size;\n"
+               "       uint64_t packet_size;\n"
+               "       uint64_t packet_seq_num;\n"
+               "       unsigned long events_discarded;\n"
+               "       uint32_t cpu_id;\n"
+               "};\n\n"
+               );
+}
+
+/*
+ * Compact header:
+ * id: range: 0 - 30.
+ * id 31 is reserved to indicate an extended header.
+ *
+ * Large header:
+ * id: range: 0 - 65534.
+ * id 65535 is reserved to indicate an extended header.
+ */
+static
+int _lttng_event_header_declare(struct ust_registry_session *session)
+{
+       return lttng_metadata_printf(session,
+       "struct event_header_compact {\n"
+       "       enum : uint5_t { compact = 0 ... 30, extended = 31 } id;\n"
+       "       variant <id> {\n"
+       "               struct {\n"
+       "                       uint27_clock_monotonic_t timestamp;\n"
+       "               } compact;\n"
+       "               struct {\n"
+       "                       uint32_t id;\n"
+       "                       uint64_clock_monotonic_t timestamp;\n"
+       "               } extended;\n"
+       "       } v;\n"
+       "} align(%u);\n"
+       "\n"
+       "struct event_header_large {\n"
+       "       enum : uint16_t { compact = 0 ... 65534, extended = 65535 } id;\n"
+       "       variant <id> {\n"
+       "               struct {\n"
+       "                       uint32_clock_monotonic_t timestamp;\n"
+       "               } compact;\n"
+       "               struct {\n"
+       "                       uint32_t id;\n"
+       "                       uint64_clock_monotonic_t timestamp;\n"
+       "               } extended;\n"
+       "       } v;\n"
+       "} align(%u);\n\n",
+       session->uint32_t_alignment,
+       session->uint16_t_alignment
+       );
+}
+
+/*
+ * The offset between monotonic and realtime clock can be negative if
+ * the system sets the REALTIME clock to 0 after boot.
+ */
+static
+int measure_single_clock_offset(struct offset_sample *sample)
+{
+       uint64_t monotonic_avg, monotonic[2], measure_delta, realtime;
+       uint64_t tcf = trace_clock_freq();
+       struct timespec rts = { 0, 0 };
+       int ret;
+
+       monotonic[0] = trace_clock_read64();
+       ret = lttng_clock_gettime(CLOCK_REALTIME, &rts);
+       if (ret < 0) {
+               return ret;
+       }
+       monotonic[1] = trace_clock_read64();
+       measure_delta = monotonic[1] - monotonic[0];
+       if (measure_delta > sample->measure_delta) {
+               /*
+                * Discard value if it took longer to read than the best
+                * sample so far.
+                */
+               return 0;
+       }
+       monotonic_avg = (monotonic[0] + monotonic[1]) >> 1;
+       realtime = (uint64_t) rts.tv_sec * tcf;
+       if (tcf == NSEC_PER_SEC) {
+               realtime += rts.tv_nsec;
+       } else {
+               realtime += (uint64_t) rts.tv_nsec * tcf / NSEC_PER_SEC;
+       }
+       sample->offset = (int64_t) realtime - monotonic_avg;
+       sample->measure_delta = measure_delta;
+       return 0;
+}
+
+/*
+ * Approximation of NTP time of day to clock monotonic correlation,
+ * taken at start of trace. Keep the measurement that took the less time
+ * to complete, thus removing imprecision caused by preemption.
+ * May return a negative offset.
+ */
+static
+int64_t measure_clock_offset(void)
+{
+       int i;
+       struct offset_sample offset_best_sample = {
+               .offset = 0,
+               .measure_delta = UINT64_MAX,
+       };
+
+       for (i = 0; i < NR_CLOCK_OFFSET_SAMPLES; i++) {
+               if (measure_single_clock_offset(&offset_best_sample)) {
+                       return 0;
+               }
+       }
+       return offset_best_sample.offset;
+}
+
+static
+int print_metadata_session_information(struct ust_registry_session *registry)
+{
+       int ret;
+       struct ltt_session *session = NULL;
+       char creation_datetime[ISO8601_STR_LEN];
+
+       rcu_read_lock();
+       session = session_find_by_id(registry->tracing_id);
+       if (!session) {
+               ret = -1;
+               goto error;
+       }
+
+       /* Print the trace name */
+       ret = lttng_metadata_printf(registry, " trace_name = \"");
+       if (ret) {
+               goto error;
+       }
+
+       /*
+        * This is necessary since the creation time is present in the session
+        * name when it is generated.
+        */
+       if (session->has_auto_generated_name) {
+               ret = print_escaped_ctf_string(registry, DEFAULT_SESSION_NAME);
+       } else {
+               ret = print_escaped_ctf_string(registry, session->name);
+       }
+       if (ret) {
+               goto error;
+       }
+
+       ret = lttng_metadata_printf(registry, "\";\n");
+       if (ret) {
+               goto error;
+       }
+
+       /* Prepare creation time */
+       ret = time_to_iso8601_str(session->creation_time, creation_datetime,
+                       sizeof(creation_datetime));
+       if (ret) {
+               goto error;
+       }
+
+       /* Output the reste of the information */
+       ret = lttng_metadata_printf(registry,
+                       "       trace_creation_datetime = \"%s\";\n"
+                       "       hostname = \"%s\";\n",
+                       creation_datetime, session->hostname);
+       if (ret) {
+               goto error;
+       }
+
+error:
+       if (session) {
+               session_put(session);
+       }
+       rcu_read_unlock();
+       return ret;
+}
+
+static
+int print_metadata_app_information(struct ust_registry_session *registry,
+               struct ust_app *app)
+{
+       int ret;
+       char datetime[ISO8601_STR_LEN];
+
+       if (!app) {
+               ret = 0;
+               goto end;
+       }
+
+       ret = time_to_iso8601_str(
+                       app->registration_time, datetime, sizeof(datetime));
+       if (ret) {
+               goto end;
+       }
+
+       ret = lttng_metadata_printf(registry,
+                       "       tracer_patchlevel = %u;\n"
+                       "       vpid = %d;\n"
+                       "       procname = \"%s\";\n"
+                       "       vpid_datetime = \"%s\";\n",
+                       app->version.patchlevel, (int) app->pid, app->name,
+                       datetime);
+
+end:
+       return ret;
+}
+
+/*
+ * Should be called with session registry mutex held.
+ */
+int ust_metadata_session_statedump(struct ust_registry_session *session,
+               struct ust_app *app,
+               uint32_t major,
+               uint32_t minor)
+{
+       char uuid_s[LTTNG_UUID_STR_LEN],
+               clock_uuid_s[LTTNG_UUID_STR_LEN];
+       int ret = 0;
+
+       LTTNG_ASSERT(session);
+
+       lttng_uuid_to_str(session->uuid, uuid_s);
+
+       /* For crash ABI */
+       ret = lttng_metadata_printf(session,
+               "/* CTF %u.%u */\n\n",
+               CTF_SPEC_MAJOR,
+               CTF_SPEC_MINOR);
+       if (ret) {
+               goto end;
+       }
+
+       ret = lttng_metadata_printf(session,
+               "typealias integer { size = 8; align = %u; signed = false; } := uint8_t;\n"
+               "typealias integer { size = 16; align = %u; signed = false; } := uint16_t;\n"
+               "typealias integer { size = 32; align = %u; signed = false; } := uint32_t;\n"
+               "typealias integer { size = 64; align = %u; signed = false; } := uint64_t;\n"
+               "typealias integer { size = %u; align = %u; signed = false; } := unsigned long;\n"
+               "typealias integer { size = 5; align = 1; signed = false; } := uint5_t;\n"
+               "typealias integer { size = 27; align = 1; signed = false; } := uint27_t;\n"
+               "\n"
+               "trace {\n"
+               "       major = %u;\n"
+               "       minor = %u;\n"
+               "       uuid = \"%s\";\n"
+               "       byte_order = %s;\n"
+               "       packet.header := struct {\n"
+               "               uint32_t magic;\n"
+               "               uint8_t  uuid[16];\n"
+               "               uint32_t stream_id;\n"
+               "               uint64_t stream_instance_id;\n"
+               "       };\n"
+               "};\n\n",
+               session->uint8_t_alignment,
+               session->uint16_t_alignment,
+               session->uint32_t_alignment,
+               session->uint64_t_alignment,
+               session->bits_per_long,
+               session->long_alignment,
+               CTF_SPEC_MAJOR,
+               CTF_SPEC_MINOR,
+               uuid_s,
+               session->byte_order == BIG_ENDIAN ? "be" : "le"
+               );
+       if (ret) {
+               goto end;
+       }
+
+       ret = lttng_metadata_printf(session,
+               "env {\n"
+               "       domain = \"ust\";\n"
+               "       tracer_name = \"lttng-ust\";\n"
+               "       tracer_major = %u;\n"
+               "       tracer_minor = %u;\n"
+               "       tracer_buffering_scheme = \"%s\";\n"
+               "       tracer_buffering_id = %u;\n"
+               "       architecture_bit_width = %u;\n",
+               major,
+               minor,
+               app ? "pid" : "uid",
+               app ? (int) app->pid : (int) session->tracing_uid,
+               session->bits_per_long);
+       if (ret) {
+               goto end;
+       }
+
+       ret = print_metadata_session_information(session);
+       if (ret) {
+               goto end;
+       }
+
+       /*
+        * If per-application registry, we can output extra information
+        * about the application.
+        */
+       ret = print_metadata_app_information(session, app);
+       if (ret) {
+               goto end;
+       }
+
+       ret = lttng_metadata_printf(session,
+               "};\n\n"
+               );
+       if (ret) {
+               goto end;
+       }
+
+       ret = lttng_metadata_printf(session,
+               "clock {\n"
+               "       name = \"%s\";\n",
+               trace_clock_name()
+               );
+       if (ret) {
+               goto end;
+       }
+
+       if (!trace_clock_uuid(clock_uuid_s)) {
+               ret = lttng_metadata_printf(session,
+                       "       uuid = \"%s\";\n",
+                       clock_uuid_s
+                       );
+               if (ret) {
+                       goto end;
+               }
+       }
+
+       ret = lttng_metadata_printf(session,
+               "       description = \"%s\";\n"
+               "       freq = %" PRIu64 "; /* Frequency, in Hz */\n"
+               "       /* clock value offset from Epoch is: offset * (1/freq) */\n"
+               "       offset = %" PRId64 ";\n"
+               "};\n\n",
+               trace_clock_description(),
+               trace_clock_freq(),
+               measure_clock_offset()
+               );
+       if (ret) {
+               goto end;
+       }
+
+       ret = lttng_metadata_printf(session,
+               "typealias integer {\n"
+               "       size = 27; align = 1; signed = false;\n"
+               "       map = clock.%s.value;\n"
+               "} := uint27_clock_monotonic_t;\n"
+               "\n"
+               "typealias integer {\n"
+               "       size = 32; align = %u; signed = false;\n"
+               "       map = clock.%s.value;\n"
+               "} := uint32_clock_monotonic_t;\n"
+               "\n"
+               "typealias integer {\n"
+               "       size = 64; align = %u; signed = false;\n"
+               "       map = clock.%s.value;\n"
+               "} := uint64_clock_monotonic_t;\n\n",
+               trace_clock_name(),
+               session->uint32_t_alignment,
+               trace_clock_name(),
+               session->uint64_t_alignment,
+               trace_clock_name()
+               );
+       if (ret) {
+               goto end;
+       }
+
+       ret = _lttng_stream_packet_context_declare(session);
+       if (ret) {
+               goto end;
+       }
+
+       ret = _lttng_event_header_declare(session);
+       if (ret) {
+               goto end;
+       }
+
+end:
+       return ret;
+}
diff --git a/src/bin/lttng-sessiond/ust-registry.c b/src/bin/lttng-sessiond/ust-registry.c
deleted file mode 100644 (file)
index 93131ab..0000000
+++ /dev/null
@@ -1,1056 +0,0 @@
-/*
- * Copyright (C) 2013 David Goulet <dgoulet@efficios.com>
- *
- * SPDX-License-Identifier: GPL-2.0-only
- *
- */
-
-#define _LGPL_SOURCE
-#include <inttypes.h>
-
-#include <common/common.h>
-#include <common/hashtable/utils.h>
-#include <lttng/lttng.h>
-
-#include "ust-registry.h"
-#include "ust-app.h"
-#include "ust-field-utils.h"
-#include "utils.h"
-#include "lttng-sessiond.h"
-#include "notification-thread-commands.h"
-
-
-/*
- * Hash table match function for event in the registry.
- */
-static int ht_match_event(struct cds_lfht_node *node, const void *_key)
-{
-       const struct ust_registry_event *key;
-       struct ust_registry_event *event;
-       int i;
-
-       LTTNG_ASSERT(node);
-       LTTNG_ASSERT(_key);
-
-       event = caa_container_of(node, struct ust_registry_event, node.node);
-       LTTNG_ASSERT(event);
-       key = _key;
-
-       /* It has to be a perfect match. First, compare the event names. */
-       if (strncmp(event->name, key->name, sizeof(event->name))) {
-               goto no_match;
-       }
-
-       /* Compare log levels. */
-       if (event->loglevel_value != key->loglevel_value) {
-               goto no_match;
-       }
-
-       /* Compare the number of fields. */
-       if (event->nr_fields != key->nr_fields) {
-               goto no_match;
-       }
-
-       /* Compare each field individually. */
-       for (i = 0; i < event->nr_fields; i++) {
-               if (!match_lttng_ust_ctl_field(&event->fields[i], &key->fields[i])) {
-                       goto no_match;
-               }
-       }
-
-       /* Compare model URI. */
-       if (event->model_emf_uri != NULL && key->model_emf_uri == NULL) {
-               goto no_match;
-       } else if(event->model_emf_uri == NULL && key->model_emf_uri != NULL) {
-               goto no_match;
-       } else if (event->model_emf_uri != NULL && key->model_emf_uri != NULL) {
-               if (strcmp(event->model_emf_uri, key->model_emf_uri)) {
-                       goto no_match;
-               }
-       }
-
-       /* Match */
-       return 1;
-
-no_match:
-       return 0;
-}
-
-static unsigned long ht_hash_event(const void *_key, unsigned long seed)
-{
-       uint64_t hashed_key;
-       const struct ust_registry_event *key = _key;
-
-       LTTNG_ASSERT(key);
-
-       hashed_key = (uint64_t) hash_key_str(key->name, seed);
-
-       return hash_key_u64(&hashed_key, seed);
-}
-
-static int compare_enums(const struct ust_registry_enum *reg_enum_a,
-               const struct ust_registry_enum *reg_enum_b)
-{
-       int ret = 0;
-       size_t i;
-
-       LTTNG_ASSERT(strcmp(reg_enum_a->name, reg_enum_b->name) == 0);
-       if (reg_enum_a->nr_entries != reg_enum_b->nr_entries) {
-               ret = -1;
-               goto end;
-       }
-       for (i = 0; i < reg_enum_a->nr_entries; i++) {
-               const struct lttng_ust_ctl_enum_entry *entries_a, *entries_b;
-
-               entries_a = &reg_enum_a->entries[i];
-               entries_b = &reg_enum_b->entries[i];
-               if (entries_a->start.value != entries_b->start.value) {
-                       ret = -1;
-                       goto end;
-               }
-               if (entries_a->end.value != entries_b->end.value) {
-                       ret = -1;
-                       goto end;
-               }
-               if (entries_a->start.signedness != entries_b->start.signedness) {
-                       ret = -1;
-                       goto end;
-               }
-               if (entries_a->end.signedness != entries_b->end.signedness) {
-                       ret = -1;
-                       goto end;
-               }
-
-               if (strcmp(entries_a->string, entries_b->string)) {
-                       ret = -1;
-                       goto end;
-               }
-       }
-end:
-       return ret;
-}
-
-/*
- * Hash table match function for enumerations in the session. Match is
- * performed on enumeration name, and confirmed by comparing the enum
- * entries.
- */
-static int ht_match_enum(struct cds_lfht_node *node, const void *_key)
-{
-       struct ust_registry_enum *_enum;
-       const struct ust_registry_enum *key;
-
-       LTTNG_ASSERT(node);
-       LTTNG_ASSERT(_key);
-
-       _enum = caa_container_of(node, struct ust_registry_enum,
-                       node.node);
-       LTTNG_ASSERT(_enum);
-       key = _key;
-
-       if (strncmp(_enum->name, key->name, LTTNG_UST_ABI_SYM_NAME_LEN)) {
-               goto no_match;
-       }
-       if (compare_enums(_enum, key)) {
-               goto no_match;
-       }
-
-       /* Match. */
-       return 1;
-
-no_match:
-       return 0;
-}
-
-/*
- * Hash table match function for enumerations in the session. Match is
- * performed by enumeration ID.
- */
-static int ht_match_enum_id(struct cds_lfht_node *node, const void *_key)
-{
-       struct ust_registry_enum *_enum;
-       const struct ust_registry_enum *key = _key;
-
-       LTTNG_ASSERT(node);
-       LTTNG_ASSERT(_key);
-
-       _enum = caa_container_of(node, struct ust_registry_enum, node.node);
-       LTTNG_ASSERT(_enum);
-
-       if (_enum->id != key->id) {
-               goto no_match;
-       }
-
-       /* Match. */
-       return 1;
-
-no_match:
-       return 0;
-}
-
-/*
- * Hash table hash function for enumerations in the session. The
- * enumeration name is used for hashing.
- */
-static unsigned long ht_hash_enum(void *_key, unsigned long seed)
-{
-       struct ust_registry_enum *key = _key;
-
-       LTTNG_ASSERT(key);
-       return hash_key_str(key->name, seed);
-}
-
-/*
- * Return negative value on error, 0 if OK.
- *
- * TODO: we could add stricter verification of more types to catch
- * errors in liblttng-ust implementation earlier than consumption by the
- * trace reader.
- */
-static
-int validate_event_field(struct lttng_ust_ctl_field *field,
-               const char *event_name,
-               struct ust_app *app)
-{
-       int ret = 0;
-
-       switch(field->type.atype) {
-       case lttng_ust_ctl_atype_integer:
-       case lttng_ust_ctl_atype_enum:
-       case lttng_ust_ctl_atype_array:
-       case lttng_ust_ctl_atype_sequence:
-       case lttng_ust_ctl_atype_string:
-       case lttng_ust_ctl_atype_variant:
-       case lttng_ust_ctl_atype_array_nestable:
-       case lttng_ust_ctl_atype_sequence_nestable:
-       case lttng_ust_ctl_atype_enum_nestable:
-       case lttng_ust_ctl_atype_variant_nestable:
-               break;
-       case lttng_ust_ctl_atype_struct:
-               if (field->type.u.legacy._struct.nr_fields != 0) {
-                       WARN("Unsupported non-empty struct field.");
-                       ret = -EINVAL;
-                       goto end;
-               }
-               break;
-       case lttng_ust_ctl_atype_struct_nestable:
-               if (field->type.u.struct_nestable.nr_fields != 0) {
-                       WARN("Unsupported non-empty struct field.");
-                       ret = -EINVAL;
-                       goto end;
-               }
-               break;
-
-       case lttng_ust_ctl_atype_float:
-               switch (field->type.u._float.mant_dig) {
-               case 0:
-                       WARN("UST application '%s' (pid: %d) has unknown float mantissa '%u' "
-                               "in field '%s', rejecting event '%s'",
-                               app->name, app->pid,
-                               field->type.u._float.mant_dig,
-                               field->name,
-                               event_name);
-                       ret = -EINVAL;
-                       goto end;
-               default:
-                       break;
-               }
-               break;
-
-       default:
-               ret = -ENOENT;
-               goto end;
-       }
-end:
-       return ret;
-}
-
-static
-int validate_event_fields(size_t nr_fields, struct lttng_ust_ctl_field *fields,
-               const char *event_name, struct ust_app *app)
-{
-       unsigned int i;
-
-       for (i = 0; i < nr_fields; i++) {
-               if (validate_event_field(&fields[i], event_name, app) < 0)
-                       return -EINVAL;
-       }
-       return 0;
-}
-
-/*
- * Allocate event and initialize it. This does NOT set a valid event id from a
- * registry.
- */
-static struct ust_registry_event *alloc_event(int session_objd,
-               int channel_objd, char *name, char *sig, size_t nr_fields,
-               struct lttng_ust_ctl_field *fields, int loglevel_value,
-               char *model_emf_uri, struct ust_app *app)
-{
-       struct ust_registry_event *event = NULL;
-
-       /*
-        * Ensure that the field content is valid.
-        */
-       if (validate_event_fields(nr_fields, fields, name, app) < 0) {
-               return NULL;
-       }
-
-       event = zmalloc(sizeof(*event));
-       if (!event) {
-               PERROR("zmalloc ust registry event");
-               goto error;
-       }
-
-       event->session_objd = session_objd;
-       event->channel_objd = channel_objd;
-       /* Allocated by ustctl. */
-       event->signature = sig;
-       event->nr_fields = nr_fields;
-       event->fields = fields;
-       event->loglevel_value = loglevel_value;
-       event->model_emf_uri = model_emf_uri;
-       if (name) {
-               /* Copy event name and force NULL byte. */
-               strncpy(event->name, name, sizeof(event->name));
-               event->name[sizeof(event->name) - 1] = '\0';
-       }
-       cds_lfht_node_init(&event->node.node);
-
-error:
-       return event;
-}
-
-/*
- * Free event data structure. This does NOT delete it from any hash table. It's
- * safe to pass a NULL pointer. This shoudl be called inside a call RCU if the
- * event is previously deleted from a rcu hash table.
- */
-static void destroy_event(struct ust_registry_event *event)
-{
-       if (!event) {
-               return;
-       }
-
-       free(event->fields);
-       free(event->model_emf_uri);
-       free(event->signature);
-       free(event);
-}
-
-/*
- * Destroy event function call of the call RCU.
- */
-static void destroy_event_rcu(struct rcu_head *head)
-{
-       struct lttng_ht_node_u64 *node =
-               caa_container_of(head, struct lttng_ht_node_u64, head);
-       struct ust_registry_event *event =
-               caa_container_of(node, struct ust_registry_event, node);
-
-       destroy_event(event);
-}
-
-/*
- * Find an event using the name and signature in the given registry. RCU read
- * side lock MUST be acquired before calling this function and as long as the
- * event reference is kept by the caller.
- *
- * On success, the event pointer is returned else NULL.
- */
-struct ust_registry_event *ust_registry_find_event(
-               struct ust_registry_channel *chan, char *name, char *sig)
-{
-       struct lttng_ht_node_u64 *node;
-       struct lttng_ht_iter iter;
-       struct ust_registry_event *event = NULL;
-       struct ust_registry_event key;
-
-       LTTNG_ASSERT(chan);
-       LTTNG_ASSERT(name);
-       LTTNG_ASSERT(sig);
-
-       /* Setup key for the match function. */
-       strncpy(key.name, name, sizeof(key.name));
-       key.name[sizeof(key.name) - 1] = '\0';
-       key.signature = sig;
-
-       cds_lfht_lookup(chan->ht->ht, chan->ht->hash_fct(&key, lttng_ht_seed),
-                       chan->ht->match_fct, &key, &iter.iter);
-       node = lttng_ht_iter_get_node_u64(&iter);
-       if (!node) {
-               goto end;
-       }
-       event = caa_container_of(node, struct ust_registry_event, node);
-
-end:
-       return event;
-}
-
-/*
- * Create a ust_registry_event from the given parameters and add it to the
- * registry hash table. If event_id is valid, it is set with the newly created
- * event id.
- *
- * On success, return 0 else a negative value. The created event MUST be unique
- * so on duplicate entry -EINVAL is returned. On error, event_id is untouched.
- *
- * Should be called with session registry mutex held.
- */
-int ust_registry_create_event(struct ust_registry_session *session,
-               uint64_t chan_key, int session_objd, int channel_objd, char *name,
-               char *sig, size_t nr_fields, struct lttng_ust_ctl_field *fields,
-               int loglevel_value, char *model_emf_uri, int buffer_type,
-               uint32_t *event_id_p, struct ust_app *app)
-{
-       int ret;
-       uint32_t event_id;
-       struct cds_lfht_node *nptr;
-       struct ust_registry_event *event = NULL;
-       struct ust_registry_channel *chan;
-
-       LTTNG_ASSERT(session);
-       LTTNG_ASSERT(name);
-       LTTNG_ASSERT(sig);
-       LTTNG_ASSERT(event_id_p);
-
-       rcu_read_lock();
-
-       /*
-        * This should not happen but since it comes from the UST tracer, an
-        * external party, don't assert and simply validate values.
-        */
-       if (session_objd < 0 || channel_objd < 0) {
-               ret = -EINVAL;
-               goto error_free;
-       }
-
-       chan = ust_registry_channel_find(session, chan_key);
-       if (!chan) {
-               ret = -EINVAL;
-               goto error_free;
-       }
-
-       /* Check if we've reached the maximum possible id. */
-       if (ust_registry_is_max_id(chan->used_event_id)) {
-               ret = -ENOENT;
-               goto error_free;
-       }
-
-       event = alloc_event(session_objd, channel_objd, name, sig, nr_fields,
-                       fields, loglevel_value, model_emf_uri, app);
-       if (!event) {
-               ret = -ENOMEM;
-               goto error_free;
-       }
-
-       DBG3("UST registry creating event with event: %s, sig: %s, id: %u, "
-                       "chan_objd: %u, sess_objd: %u, chan_id: %u", event->name,
-                       event->signature, event->id, event->channel_objd,
-                       event->session_objd, chan->chan_id);
-
-       /*
-        * This is an add unique with a custom match function for event. The node
-        * are matched using the event name and signature.
-        */
-       nptr = cds_lfht_add_unique(chan->ht->ht, chan->ht->hash_fct(event,
-                               lttng_ht_seed), chan->ht->match_fct, event, &event->node.node);
-       if (nptr != &event->node.node) {
-               if (buffer_type == LTTNG_BUFFER_PER_UID) {
-                       /*
-                        * This is normal, we just have to send the event id of the
-                        * returned node and make sure we destroy the previously allocated
-                        * event object.
-                        */
-                       destroy_event(event);
-                       event = caa_container_of(nptr, struct ust_registry_event,
-                                       node.node);
-                       LTTNG_ASSERT(event);
-                       event_id = event->id;
-               } else {
-                       ERR("UST registry create event add unique failed for event: %s, "
-                                       "sig: %s, id: %u, chan_objd: %u, sess_objd: %u",
-                                       event->name, event->signature, event->id,
-                                       event->channel_objd, event->session_objd);
-                       ret = -EINVAL;
-                       goto error_unlock;
-               }
-       } else {
-               /* Request next event id if the node was successfully added. */
-               event_id = event->id = ust_registry_get_next_event_id(chan);
-       }
-
-       *event_id_p = event_id;
-
-       if (!event->metadata_dumped) {
-               /* Append to metadata */
-               ret = ust_metadata_event_statedump(session, chan, event);
-               if (ret) {
-                       ERR("Error appending event metadata (errno = %d)", ret);
-                       rcu_read_unlock();
-                       return ret;
-               }
-       }
-
-       rcu_read_unlock();
-       return 0;
-
-error_free:
-       free(sig);
-       free(fields);
-       free(model_emf_uri);
-error_unlock:
-       rcu_read_unlock();
-       destroy_event(event);
-       return ret;
-}
-
-/*
- * For a given event in a registry, delete the entry and destroy the event.
- * This MUST be called within a RCU read side lock section.
- */
-void ust_registry_destroy_event(struct ust_registry_channel *chan,
-               struct ust_registry_event *event)
-{
-       int ret;
-       struct lttng_ht_iter iter;
-
-       LTTNG_ASSERT(chan);
-       LTTNG_ASSERT(event);
-
-       /* Delete the node first. */
-       iter.iter.node = &event->node.node;
-       ret = lttng_ht_del(chan->ht, &iter);
-       LTTNG_ASSERT(!ret);
-
-       call_rcu(&event->node.head, destroy_event_rcu);
-
-       return;
-}
-
-static void destroy_enum(struct ust_registry_enum *reg_enum)
-{
-       if (!reg_enum) {
-               return;
-       }
-       free(reg_enum->entries);
-       free(reg_enum);
-}
-
-static void destroy_enum_rcu(struct rcu_head *head)
-{
-       struct ust_registry_enum *reg_enum =
-               caa_container_of(head, struct ust_registry_enum, rcu_head);
-
-       destroy_enum(reg_enum);
-}
-
-/*
- * Lookup enumeration by name and comparing enumeration entries.
- * Needs to be called from RCU read-side critical section.
- */
-static struct ust_registry_enum *ust_registry_lookup_enum(
-               struct ust_registry_session *session,
-               const struct ust_registry_enum *reg_enum_lookup)
-{
-       struct ust_registry_enum *reg_enum = NULL;
-       struct lttng_ht_node_str *node;
-       struct lttng_ht_iter iter;
-
-       cds_lfht_lookup(session->enums->ht,
-                       ht_hash_enum((void *) reg_enum_lookup, lttng_ht_seed),
-                       ht_match_enum, reg_enum_lookup, &iter.iter);
-       node = lttng_ht_iter_get_node_str(&iter);
-       if (!node) {
-               goto end;
-       }
-       reg_enum = caa_container_of(node, struct ust_registry_enum, node);
-end:
-       return reg_enum;
-}
-
-/*
- * Lookup enumeration by enum ID.
- * Needs to be called from RCU read-side critical section.
- */
-struct ust_registry_enum *
-       ust_registry_lookup_enum_by_id(struct ust_registry_session *session,
-               const char *enum_name, uint64_t enum_id)
-{
-       struct ust_registry_enum *reg_enum = NULL;
-       struct lttng_ht_node_str *node;
-       struct lttng_ht_iter iter;
-       struct ust_registry_enum reg_enum_lookup;
-
-       memset(&reg_enum_lookup, 0, sizeof(reg_enum_lookup));
-       strncpy(reg_enum_lookup.name, enum_name, LTTNG_UST_ABI_SYM_NAME_LEN);
-       reg_enum_lookup.name[LTTNG_UST_ABI_SYM_NAME_LEN - 1] = '\0';
-       reg_enum_lookup.id = enum_id;
-       cds_lfht_lookup(session->enums->ht,
-                       ht_hash_enum((void *) &reg_enum_lookup, lttng_ht_seed),
-                       ht_match_enum_id, &reg_enum_lookup, &iter.iter);
-       node = lttng_ht_iter_get_node_str(&iter);
-       if (!node) {
-               goto end;
-       }
-       reg_enum = caa_container_of(node, struct ust_registry_enum, node);
-end:
-       return reg_enum;
-}
-
-/*
- * Create a ust_registry_enum from the given parameters and add it to the
- * registry hash table, or find it if already there.
- *
- * On success, return 0 else a negative value.
- *
- * Should be called with session registry mutex held.
- *
- * We receive ownership of entries.
- */
-int ust_registry_create_or_find_enum(struct ust_registry_session *session,
-               int session_objd, char *enum_name,
-               struct lttng_ust_ctl_enum_entry *entries, size_t nr_entries,
-               uint64_t *enum_id)
-{
-       int ret = 0;
-       struct cds_lfht_node *nodep;
-       struct ust_registry_enum *reg_enum = NULL, *old_reg_enum;
-
-       LTTNG_ASSERT(session);
-       LTTNG_ASSERT(enum_name);
-
-       rcu_read_lock();
-
-       /*
-        * This should not happen but since it comes from the UST tracer, an
-        * external party, don't assert and simply validate values.
-        */
-       if (session_objd < 0) {
-               ret = -EINVAL;
-               goto end;
-       }
-
-       /* Check if the enumeration was already dumped */
-       reg_enum = zmalloc(sizeof(*reg_enum));
-       if (!reg_enum) {
-               PERROR("zmalloc ust registry enumeration");
-               ret = -ENOMEM;
-               goto end;
-       }
-       strncpy(reg_enum->name, enum_name, LTTNG_UST_ABI_SYM_NAME_LEN);
-       reg_enum->name[LTTNG_UST_ABI_SYM_NAME_LEN - 1] = '\0';
-       /* entries will be owned by reg_enum. */
-       reg_enum->entries = entries;
-       reg_enum->nr_entries = nr_entries;
-       entries = NULL;
-
-       old_reg_enum = ust_registry_lookup_enum(session, reg_enum);
-       if (old_reg_enum) {
-               DBG("enum %s already in sess_objd: %u", enum_name, session_objd);
-               /* Fall through. Use prior enum. */
-               destroy_enum(reg_enum);
-               reg_enum = old_reg_enum;
-       } else {
-               DBG("UST registry creating enum: %s, sess_objd: %u",
-                               enum_name, session_objd);
-               if (session->next_enum_id == -1ULL) {
-                       ret = -EOVERFLOW;
-                       destroy_enum(reg_enum);
-                       goto end;
-               }
-               reg_enum->id = session->next_enum_id++;
-               cds_lfht_node_init(&reg_enum->node.node);
-               nodep = cds_lfht_add_unique(session->enums->ht,
-                               ht_hash_enum(reg_enum, lttng_ht_seed),
-                               ht_match_enum_id, reg_enum,
-                               &reg_enum->node.node);
-               LTTNG_ASSERT(nodep == &reg_enum->node.node);
-       }
-       DBG("UST registry reply with enum %s with id %" PRIu64 " in sess_objd: %u",
-                       enum_name, reg_enum->id, session_objd);
-       *enum_id = reg_enum->id;
-end:
-       free(entries);
-       rcu_read_unlock();
-       return ret;
-}
-
-/*
- * For a given enumeration in a registry, delete the entry and destroy
- * the enumeration.
- * This MUST be called within a RCU read side lock section.
- */
-static void ust_registry_destroy_enum(struct ust_registry_session *reg_session,
-               struct ust_registry_enum *reg_enum)
-{
-       int ret;
-       struct lttng_ht_iter iter;
-
-       LTTNG_ASSERT(reg_session);
-       LTTNG_ASSERT(reg_enum);
-
-       /* Delete the node first. */
-       iter.iter.node = &reg_enum->node.node;
-       ret = lttng_ht_del(reg_session->enums, &iter);
-       LTTNG_ASSERT(!ret);
-       call_rcu(&reg_enum->rcu_head, destroy_enum_rcu);
-}
-
-/*
- * We need to execute ht_destroy outside of RCU read-side critical
- * section and outside of call_rcu thread, so we postpone its execution
- * using ht_cleanup_push. It is simpler than to change the semantic of
- * the many callers of delete_ust_app_session().
- */
-static
-void destroy_channel_rcu(struct rcu_head *head)
-{
-       struct ust_registry_channel *chan =
-               caa_container_of(head, struct ust_registry_channel, rcu_head);
-
-       if (chan->ht) {
-               ht_cleanup_push(chan->ht);
-       }
-       free(chan->ctx_fields);
-       free(chan);
-}
-
-/*
- * Destroy every element of the registry and free the memory. This does NOT
- * free the registry pointer since it might not have been allocated before so
- * it's the caller responsability.
- */
-static void destroy_channel(struct ust_registry_channel *chan, bool notif)
-{
-       struct lttng_ht_iter iter;
-       struct ust_registry_event *event;
-       enum lttng_error_code cmd_ret;
-
-       LTTNG_ASSERT(chan);
-
-       if (notif) {
-               cmd_ret = notification_thread_command_remove_channel(
-                               the_notification_thread_handle,
-                               chan->consumer_key, LTTNG_DOMAIN_UST);
-               if (cmd_ret != LTTNG_OK) {
-                       ERR("Failed to remove channel from notification thread");
-               }
-       }
-
-       if (chan->ht) {
-               rcu_read_lock();
-               /* Destroy all event associated with this registry. */
-               cds_lfht_for_each_entry(
-                               chan->ht->ht, &iter.iter, event, node.node) {
-                       /* Delete the node from the ht and free it. */
-                       ust_registry_destroy_event(chan, event);
-               }
-               rcu_read_unlock();
-       }
-       call_rcu(&chan->rcu_head, destroy_channel_rcu);
-}
-
-/*
- * Initialize registry with default values.
- */
-int ust_registry_channel_add(struct ust_registry_session *session,
-               uint64_t key)
-{
-       int ret = 0;
-       struct ust_registry_channel *chan;
-
-       LTTNG_ASSERT(session);
-
-       chan = zmalloc(sizeof(*chan));
-       if (!chan) {
-               PERROR("zmalloc ust registry channel");
-               ret = -ENOMEM;
-               goto error_alloc;
-       }
-
-       chan->ht = lttng_ht_new(0, LTTNG_HT_TYPE_STRING);
-       if (!chan->ht) {
-               ret = -ENOMEM;
-               goto error;
-       }
-
-       /* Set custom match function. */
-       chan->ht->match_fct = ht_match_event;
-       chan->ht->hash_fct = ht_hash_event;
-
-       /*
-        * Assign a channel ID right now since the event notification comes
-        * *before* the channel notify so the ID needs to be set at this point so
-        * the metadata can be dumped for that event.
-        */
-       if (ust_registry_is_max_id(session->used_channel_id)) {
-               ret = -1;
-               goto error;
-       }
-       chan->chan_id = ust_registry_get_next_chan_id(session);
-
-       rcu_read_lock();
-       lttng_ht_node_init_u64(&chan->node, key);
-       lttng_ht_add_unique_u64(session->channels, &chan->node);
-       rcu_read_unlock();
-
-       return 0;
-
-error:
-       destroy_channel(chan, false);
-error_alloc:
-       return ret;
-}
-
-/*
- * Find a channel in the given registry. RCU read side lock MUST be acquired
- * before calling this function and as long as the event reference is kept by
- * the caller.
- *
- * On success, the pointer is returned else NULL.
- */
-struct ust_registry_channel *ust_registry_channel_find(
-               struct ust_registry_session *session, uint64_t key)
-{
-       struct lttng_ht_node_u64 *node;
-       struct lttng_ht_iter iter;
-       struct ust_registry_channel *chan = NULL;
-
-       LTTNG_ASSERT(session);
-       LTTNG_ASSERT(session->channels);
-
-       DBG3("UST registry channel finding key %" PRIu64, key);
-
-       lttng_ht_lookup(session->channels, &key, &iter);
-       node = lttng_ht_iter_get_node_u64(&iter);
-       if (!node) {
-               goto end;
-       }
-       chan = caa_container_of(node, struct ust_registry_channel, node);
-
-end:
-       return chan;
-}
-
-/*
- * Remove channel using key from registry and free memory.
- */
-void ust_registry_channel_del_free(struct ust_registry_session *session,
-               uint64_t key, bool notif)
-{
-       struct lttng_ht_iter iter;
-       struct ust_registry_channel *chan;
-       int ret;
-
-       LTTNG_ASSERT(session);
-
-       rcu_read_lock();
-       chan = ust_registry_channel_find(session, key);
-       if (!chan) {
-               rcu_read_unlock();
-               goto end;
-       }
-
-       iter.iter.node = &chan->node.node;
-       ret = lttng_ht_del(session->channels, &iter);
-       LTTNG_ASSERT(!ret);
-       rcu_read_unlock();
-       destroy_channel(chan, notif);
-
-end:
-       return;
-}
-
-/*
- * Initialize registry with default values and set the newly allocated session
- * pointer to sessionp.
- *
- * Return 0 on success and sessionp is set or else return -1 and sessionp is
- * kept untouched.
- */
-int ust_registry_session_init(struct ust_registry_session **sessionp,
-               struct ust_app *app,
-               uint32_t bits_per_long,
-               uint32_t uint8_t_alignment,
-               uint32_t uint16_t_alignment,
-               uint32_t uint32_t_alignment,
-               uint32_t uint64_t_alignment,
-               uint32_t long_alignment,
-               int byte_order,
-               uint32_t major,
-               uint32_t minor,
-               const char *root_shm_path,
-               const char *shm_path,
-               uid_t euid,
-               gid_t egid,
-               uint64_t tracing_id,
-               uid_t tracing_uid)
-{
-       int ret;
-       struct ust_registry_session *session;
-
-       LTTNG_ASSERT(sessionp);
-
-       session = zmalloc(sizeof(*session));
-       if (!session) {
-               PERROR("zmalloc ust registry session");
-               goto error_alloc;
-       }
-
-       pthread_mutex_init(&session->lock, NULL);
-       session->bits_per_long = bits_per_long;
-       session->uint8_t_alignment = uint8_t_alignment;
-       session->uint16_t_alignment = uint16_t_alignment;
-       session->uint32_t_alignment = uint32_t_alignment;
-       session->uint64_t_alignment = uint64_t_alignment;
-       session->long_alignment = long_alignment;
-       session->byte_order = byte_order;
-       session->metadata_fd = -1;
-       session->uid = euid;
-       session->gid = egid;
-       session->next_enum_id = 0;
-       session->major = major;
-       session->minor = minor;
-       strncpy(session->root_shm_path, root_shm_path,
-               sizeof(session->root_shm_path));
-       session->root_shm_path[sizeof(session->root_shm_path) - 1] = '\0';
-       if (shm_path[0]) {
-               strncpy(session->shm_path, shm_path,
-                       sizeof(session->shm_path));
-               session->shm_path[sizeof(session->shm_path) - 1] = '\0';
-               strncpy(session->metadata_path, shm_path,
-                       sizeof(session->metadata_path));
-               session->metadata_path[sizeof(session->metadata_path) - 1] = '\0';
-               strncat(session->metadata_path, "/metadata",
-                       sizeof(session->metadata_path)
-                               - strlen(session->metadata_path) - 1);
-       }
-       if (session->shm_path[0]) {
-               ret = run_as_mkdir_recursive(session->shm_path,
-                       S_IRWXU | S_IRWXG,
-                       euid, egid);
-               if (ret) {
-                       PERROR("run_as_mkdir_recursive");
-                       goto error;
-               }
-       }
-       if (session->metadata_path[0]) {
-               /* Create metadata file */
-               ret = run_as_open(session->metadata_path,
-                       O_WRONLY | O_CREAT | O_EXCL,
-                       S_IRUSR | S_IWUSR, euid, egid);
-               if (ret < 0) {
-                       PERROR("Opening metadata file");
-                       goto error;
-               }
-               session->metadata_fd = ret;
-       }
-
-       session->enums = lttng_ht_new(0, LTTNG_HT_TYPE_STRING);
-       if (!session->enums) {
-               ERR("Failed to create enums hash table");
-               goto error;
-       }
-       /* hash/match functions are specified at call site. */
-       session->enums->match_fct = NULL;
-       session->enums->hash_fct = NULL;
-
-       session->channels = lttng_ht_new(0, LTTNG_HT_TYPE_U64);
-       if (!session->channels) {
-               goto error;
-       }
-
-       ret = lttng_uuid_generate(session->uuid);
-       if (ret) {
-               ERR("Failed to generate UST uuid (errno = %d)", ret);
-               goto error;
-       }
-
-       session->tracing_id = tracing_id;
-       session->tracing_uid = tracing_uid;
-
-       pthread_mutex_lock(&session->lock);
-       ret = ust_metadata_session_statedump(session, app, major, minor);
-       pthread_mutex_unlock(&session->lock);
-       if (ret) {
-               ERR("Failed to generate session metadata (errno = %d)", ret);
-               goto error;
-       }
-
-       *sessionp = session;
-
-       return 0;
-
-error:
-       ust_registry_session_destroy(session);
-       free(session);
-error_alloc:
-       return -1;
-}
-
-/*
- * Destroy session registry. This does NOT free the given pointer since it
- * might get passed as a reference. The registry lock should NOT be acquired.
- */
-void ust_registry_session_destroy(struct ust_registry_session *reg)
-{
-       int ret;
-       struct lttng_ht_iter iter;
-       struct ust_registry_channel *chan;
-       struct ust_registry_enum *reg_enum;
-
-       if (!reg) {
-               return;
-       }
-
-       /* On error, EBUSY can be returned if lock. Code flow error. */
-       ret = pthread_mutex_destroy(&reg->lock);
-       LTTNG_ASSERT(!ret);
-
-       if (reg->channels) {
-               rcu_read_lock();
-               /* Destroy all event associated with this registry. */
-               cds_lfht_for_each_entry(reg->channels->ht, &iter.iter, chan,
-                               node.node) {
-                       /* Delete the node from the ht and free it. */
-                       ret = lttng_ht_del(reg->channels, &iter);
-                       LTTNG_ASSERT(!ret);
-                       destroy_channel(chan, true);
-               }
-               rcu_read_unlock();
-               ht_cleanup_push(reg->channels);
-       }
-
-       free(reg->metadata);
-       if (reg->metadata_fd >= 0) {
-               ret = close(reg->metadata_fd);
-               if (ret) {
-                       PERROR("close");
-               }
-               ret = run_as_unlink(reg->metadata_path,
-                               reg->uid, reg->gid);
-               if (ret) {
-                       PERROR("unlink");
-               }
-       }
-       if (reg->root_shm_path[0]) {
-               /*
-                * Try deleting the directory hierarchy.
-                */
-               (void) run_as_rmdir_recursive(reg->root_shm_path,
-                               reg->uid, reg->gid,
-                               LTTNG_DIRECTORY_HANDLE_SKIP_NON_EMPTY_FLAG);
-       }
-       /* Destroy the enum hash table */
-       if (reg->enums) {
-               rcu_read_lock();
-               /* Destroy all enum entries associated with this registry. */
-               cds_lfht_for_each_entry(reg->enums->ht, &iter.iter, reg_enum,
-                               node.node) {
-                       ust_registry_destroy_enum(reg, reg_enum);
-               }
-               rcu_read_unlock();
-               ht_cleanup_push(reg->enums);
-       }
-}
diff --git a/src/bin/lttng-sessiond/ust-registry.cpp b/src/bin/lttng-sessiond/ust-registry.cpp
new file mode 100644 (file)
index 0000000..1734652
--- /dev/null
@@ -0,0 +1,1056 @@
+/*
+ * Copyright (C) 2013 David Goulet <dgoulet@efficios.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0-only
+ *
+ */
+
+#define _LGPL_SOURCE
+#include <inttypes.h>
+
+#include <common/common.h>
+#include <common/hashtable/utils.h>
+#include <lttng/lttng.h>
+
+#include "ust-registry.h"
+#include "ust-app.h"
+#include "ust-field-utils.h"
+#include "utils.h"
+#include "lttng-sessiond.h"
+#include "notification-thread-commands.h"
+
+
+/*
+ * Hash table match function for event in the registry.
+ */
+static int ht_match_event(struct cds_lfht_node *node, const void *_key)
+{
+       const struct ust_registry_event *key;
+       struct ust_registry_event *event;
+       int i;
+
+       LTTNG_ASSERT(node);
+       LTTNG_ASSERT(_key);
+
+       event = caa_container_of(node, struct ust_registry_event, node.node);
+       LTTNG_ASSERT(event);
+       key = (ust_registry_event *) _key;
+
+       /* It has to be a perfect match. First, compare the event names. */
+       if (strncmp(event->name, key->name, sizeof(event->name))) {
+               goto no_match;
+       }
+
+       /* Compare log levels. */
+       if (event->loglevel_value != key->loglevel_value) {
+               goto no_match;
+       }
+
+       /* Compare the number of fields. */
+       if (event->nr_fields != key->nr_fields) {
+               goto no_match;
+       }
+
+       /* Compare each field individually. */
+       for (i = 0; i < event->nr_fields; i++) {
+               if (!match_lttng_ust_ctl_field(&event->fields[i], &key->fields[i])) {
+                       goto no_match;
+               }
+       }
+
+       /* Compare model URI. */
+       if (event->model_emf_uri != NULL && key->model_emf_uri == NULL) {
+               goto no_match;
+       } else if(event->model_emf_uri == NULL && key->model_emf_uri != NULL) {
+               goto no_match;
+       } else if (event->model_emf_uri != NULL && key->model_emf_uri != NULL) {
+               if (strcmp(event->model_emf_uri, key->model_emf_uri)) {
+                       goto no_match;
+               }
+       }
+
+       /* Match */
+       return 1;
+
+no_match:
+       return 0;
+}
+
+static unsigned long ht_hash_event(const void *_key, unsigned long seed)
+{
+       uint64_t hashed_key;
+       const struct ust_registry_event *key = (ust_registry_event *) _key;
+
+       LTTNG_ASSERT(key);
+
+       hashed_key = (uint64_t) hash_key_str(key->name, seed);
+
+       return hash_key_u64(&hashed_key, seed);
+}
+
+static int compare_enums(const struct ust_registry_enum *reg_enum_a,
+               const struct ust_registry_enum *reg_enum_b)
+{
+       int ret = 0;
+       size_t i;
+
+       LTTNG_ASSERT(strcmp(reg_enum_a->name, reg_enum_b->name) == 0);
+       if (reg_enum_a->nr_entries != reg_enum_b->nr_entries) {
+               ret = -1;
+               goto end;
+       }
+       for (i = 0; i < reg_enum_a->nr_entries; i++) {
+               const struct lttng_ust_ctl_enum_entry *entries_a, *entries_b;
+
+               entries_a = &reg_enum_a->entries[i];
+               entries_b = &reg_enum_b->entries[i];
+               if (entries_a->start.value != entries_b->start.value) {
+                       ret = -1;
+                       goto end;
+               }
+               if (entries_a->end.value != entries_b->end.value) {
+                       ret = -1;
+                       goto end;
+               }
+               if (entries_a->start.signedness != entries_b->start.signedness) {
+                       ret = -1;
+                       goto end;
+               }
+               if (entries_a->end.signedness != entries_b->end.signedness) {
+                       ret = -1;
+                       goto end;
+               }
+
+               if (strcmp(entries_a->string, entries_b->string)) {
+                       ret = -1;
+                       goto end;
+               }
+       }
+end:
+       return ret;
+}
+
+/*
+ * Hash table match function for enumerations in the session. Match is
+ * performed on enumeration name, and confirmed by comparing the enum
+ * entries.
+ */
+static int ht_match_enum(struct cds_lfht_node *node, const void *_key)
+{
+       struct ust_registry_enum *_enum;
+       const struct ust_registry_enum *key;
+
+       LTTNG_ASSERT(node);
+       LTTNG_ASSERT(_key);
+
+       _enum = caa_container_of(node, struct ust_registry_enum,
+                       node.node);
+       LTTNG_ASSERT(_enum);
+       key = (ust_registry_enum *) _key;
+
+       if (strncmp(_enum->name, key->name, LTTNG_UST_ABI_SYM_NAME_LEN)) {
+               goto no_match;
+       }
+       if (compare_enums(_enum, key)) {
+               goto no_match;
+       }
+
+       /* Match. */
+       return 1;
+
+no_match:
+       return 0;
+}
+
+/*
+ * Hash table match function for enumerations in the session. Match is
+ * performed by enumeration ID.
+ */
+static int ht_match_enum_id(struct cds_lfht_node *node, const void *_key)
+{
+       struct ust_registry_enum *_enum;
+       const struct ust_registry_enum *key = (ust_registry_enum *) _key;
+
+       LTTNG_ASSERT(node);
+       LTTNG_ASSERT(_key);
+
+       _enum = caa_container_of(node, struct ust_registry_enum, node.node);
+       LTTNG_ASSERT(_enum);
+
+       if (_enum->id != key->id) {
+               goto no_match;
+       }
+
+       /* Match. */
+       return 1;
+
+no_match:
+       return 0;
+}
+
+/*
+ * Hash table hash function for enumerations in the session. The
+ * enumeration name is used for hashing.
+ */
+static unsigned long ht_hash_enum(void *_key, unsigned long seed)
+{
+       struct ust_registry_enum *key = (ust_registry_enum *) _key;
+
+       LTTNG_ASSERT(key);
+       return hash_key_str(key->name, seed);
+}
+
+/*
+ * Return negative value on error, 0 if OK.
+ *
+ * TODO: we could add stricter verification of more types to catch
+ * errors in liblttng-ust implementation earlier than consumption by the
+ * trace reader.
+ */
+static
+int validate_event_field(struct lttng_ust_ctl_field *field,
+               const char *event_name,
+               struct ust_app *app)
+{
+       int ret = 0;
+
+       switch(field->type.atype) {
+       case lttng_ust_ctl_atype_integer:
+       case lttng_ust_ctl_atype_enum:
+       case lttng_ust_ctl_atype_array:
+       case lttng_ust_ctl_atype_sequence:
+       case lttng_ust_ctl_atype_string:
+       case lttng_ust_ctl_atype_variant:
+       case lttng_ust_ctl_atype_array_nestable:
+       case lttng_ust_ctl_atype_sequence_nestable:
+       case lttng_ust_ctl_atype_enum_nestable:
+       case lttng_ust_ctl_atype_variant_nestable:
+               break;
+       case lttng_ust_ctl_atype_struct:
+               if (field->type.u.legacy._struct.nr_fields != 0) {
+                       WARN("Unsupported non-empty struct field.");
+                       ret = -EINVAL;
+                       goto end;
+               }
+               break;
+       case lttng_ust_ctl_atype_struct_nestable:
+               if (field->type.u.struct_nestable.nr_fields != 0) {
+                       WARN("Unsupported non-empty struct field.");
+                       ret = -EINVAL;
+                       goto end;
+               }
+               break;
+
+       case lttng_ust_ctl_atype_float:
+               switch (field->type.u._float.mant_dig) {
+               case 0:
+                       WARN("UST application '%s' (pid: %d) has unknown float mantissa '%u' "
+                               "in field '%s', rejecting event '%s'",
+                               app->name, app->pid,
+                               field->type.u._float.mant_dig,
+                               field->name,
+                               event_name);
+                       ret = -EINVAL;
+                       goto end;
+               default:
+                       break;
+               }
+               break;
+
+       default:
+               ret = -ENOENT;
+               goto end;
+       }
+end:
+       return ret;
+}
+
+static
+int validate_event_fields(size_t nr_fields, struct lttng_ust_ctl_field *fields,
+               const char *event_name, struct ust_app *app)
+{
+       unsigned int i;
+
+       for (i = 0; i < nr_fields; i++) {
+               if (validate_event_field(&fields[i], event_name, app) < 0)
+                       return -EINVAL;
+       }
+       return 0;
+}
+
+/*
+ * Allocate event and initialize it. This does NOT set a valid event id from a
+ * registry.
+ */
+static struct ust_registry_event *alloc_event(int session_objd,
+               int channel_objd, char *name, char *sig, size_t nr_fields,
+               struct lttng_ust_ctl_field *fields, int loglevel_value,
+               char *model_emf_uri, struct ust_app *app)
+{
+       struct ust_registry_event *event = NULL;
+
+       /*
+        * Ensure that the field content is valid.
+        */
+       if (validate_event_fields(nr_fields, fields, name, app) < 0) {
+               return NULL;
+       }
+
+       event = (ust_registry_event *) zmalloc(sizeof(*event));
+       if (!event) {
+               PERROR("zmalloc ust registry event");
+               goto error;
+       }
+
+       event->session_objd = session_objd;
+       event->channel_objd = channel_objd;
+       /* Allocated by ustctl. */
+       event->signature = sig;
+       event->nr_fields = nr_fields;
+       event->fields = fields;
+       event->loglevel_value = loglevel_value;
+       event->model_emf_uri = model_emf_uri;
+       if (name) {
+               /* Copy event name and force NULL byte. */
+               strncpy(event->name, name, sizeof(event->name));
+               event->name[sizeof(event->name) - 1] = '\0';
+       }
+       cds_lfht_node_init(&event->node.node);
+
+error:
+       return event;
+}
+
+/*
+ * Free event data structure. This does NOT delete it from any hash table. It's
+ * safe to pass a NULL pointer. This shoudl be called inside a call RCU if the
+ * event is previously deleted from a rcu hash table.
+ */
+static void destroy_event(struct ust_registry_event *event)
+{
+       if (!event) {
+               return;
+       }
+
+       free(event->fields);
+       free(event->model_emf_uri);
+       free(event->signature);
+       free(event);
+}
+
+/*
+ * Destroy event function call of the call RCU.
+ */
+static void destroy_event_rcu(struct rcu_head *head)
+{
+       struct lttng_ht_node_u64 *node =
+               caa_container_of(head, struct lttng_ht_node_u64, head);
+       struct ust_registry_event *event =
+               caa_container_of(node, struct ust_registry_event, node);
+
+       destroy_event(event);
+}
+
+/*
+ * Find an event using the name and signature in the given registry. RCU read
+ * side lock MUST be acquired before calling this function and as long as the
+ * event reference is kept by the caller.
+ *
+ * On success, the event pointer is returned else NULL.
+ */
+struct ust_registry_event *ust_registry_find_event(
+               struct ust_registry_channel *chan, char *name, char *sig)
+{
+       struct lttng_ht_node_u64 *node;
+       struct lttng_ht_iter iter;
+       struct ust_registry_event *event = NULL;
+       struct ust_registry_event key;
+
+       LTTNG_ASSERT(chan);
+       LTTNG_ASSERT(name);
+       LTTNG_ASSERT(sig);
+
+       /* Setup key for the match function. */
+       strncpy(key.name, name, sizeof(key.name));
+       key.name[sizeof(key.name) - 1] = '\0';
+       key.signature = sig;
+
+       cds_lfht_lookup(chan->ht->ht, chan->ht->hash_fct(&key, lttng_ht_seed),
+                       chan->ht->match_fct, &key, &iter.iter);
+       node = lttng_ht_iter_get_node_u64(&iter);
+       if (!node) {
+               goto end;
+       }
+       event = caa_container_of(node, struct ust_registry_event, node);
+
+end:
+       return event;
+}
+
+/*
+ * Create a ust_registry_event from the given parameters and add it to the
+ * registry hash table. If event_id is valid, it is set with the newly created
+ * event id.
+ *
+ * On success, return 0 else a negative value. The created event MUST be unique
+ * so on duplicate entry -EINVAL is returned. On error, event_id is untouched.
+ *
+ * Should be called with session registry mutex held.
+ */
+int ust_registry_create_event(struct ust_registry_session *session,
+               uint64_t chan_key, int session_objd, int channel_objd, char *name,
+               char *sig, size_t nr_fields, struct lttng_ust_ctl_field *fields,
+               int loglevel_value, char *model_emf_uri, int buffer_type,
+               uint32_t *event_id_p, struct ust_app *app)
+{
+       int ret;
+       uint32_t event_id;
+       struct cds_lfht_node *nptr;
+       struct ust_registry_event *event = NULL;
+       struct ust_registry_channel *chan;
+
+       LTTNG_ASSERT(session);
+       LTTNG_ASSERT(name);
+       LTTNG_ASSERT(sig);
+       LTTNG_ASSERT(event_id_p);
+
+       rcu_read_lock();
+
+       /*
+        * This should not happen but since it comes from the UST tracer, an
+        * external party, don't assert and simply validate values.
+        */
+       if (session_objd < 0 || channel_objd < 0) {
+               ret = -EINVAL;
+               goto error_free;
+       }
+
+       chan = ust_registry_channel_find(session, chan_key);
+       if (!chan) {
+               ret = -EINVAL;
+               goto error_free;
+       }
+
+       /* Check if we've reached the maximum possible id. */
+       if (ust_registry_is_max_id(chan->used_event_id)) {
+               ret = -ENOENT;
+               goto error_free;
+       }
+
+       event = alloc_event(session_objd, channel_objd, name, sig, nr_fields,
+                       fields, loglevel_value, model_emf_uri, app);
+       if (!event) {
+               ret = -ENOMEM;
+               goto error_free;
+       }
+
+       DBG3("UST registry creating event with event: %s, sig: %s, id: %u, "
+                       "chan_objd: %u, sess_objd: %u, chan_id: %u", event->name,
+                       event->signature, event->id, event->channel_objd,
+                       event->session_objd, chan->chan_id);
+
+       /*
+        * This is an add unique with a custom match function for event. The node
+        * are matched using the event name and signature.
+        */
+       nptr = cds_lfht_add_unique(chan->ht->ht, chan->ht->hash_fct(event,
+                               lttng_ht_seed), chan->ht->match_fct, event, &event->node.node);
+       if (nptr != &event->node.node) {
+               if (buffer_type == LTTNG_BUFFER_PER_UID) {
+                       /*
+                        * This is normal, we just have to send the event id of the
+                        * returned node and make sure we destroy the previously allocated
+                        * event object.
+                        */
+                       destroy_event(event);
+                       event = caa_container_of(nptr, struct ust_registry_event,
+                                       node.node);
+                       LTTNG_ASSERT(event);
+                       event_id = event->id;
+               } else {
+                       ERR("UST registry create event add unique failed for event: %s, "
+                                       "sig: %s, id: %u, chan_objd: %u, sess_objd: %u",
+                                       event->name, event->signature, event->id,
+                                       event->channel_objd, event->session_objd);
+                       ret = -EINVAL;
+                       goto error_unlock;
+               }
+       } else {
+               /* Request next event id if the node was successfully added. */
+               event_id = event->id = ust_registry_get_next_event_id(chan);
+       }
+
+       *event_id_p = event_id;
+
+       if (!event->metadata_dumped) {
+               /* Append to metadata */
+               ret = ust_metadata_event_statedump(session, chan, event);
+               if (ret) {
+                       ERR("Error appending event metadata (errno = %d)", ret);
+                       rcu_read_unlock();
+                       return ret;
+               }
+       }
+
+       rcu_read_unlock();
+       return 0;
+
+error_free:
+       free(sig);
+       free(fields);
+       free(model_emf_uri);
+error_unlock:
+       rcu_read_unlock();
+       destroy_event(event);
+       return ret;
+}
+
+/*
+ * For a given event in a registry, delete the entry and destroy the event.
+ * This MUST be called within a RCU read side lock section.
+ */
+void ust_registry_destroy_event(struct ust_registry_channel *chan,
+               struct ust_registry_event *event)
+{
+       int ret;
+       struct lttng_ht_iter iter;
+
+       LTTNG_ASSERT(chan);
+       LTTNG_ASSERT(event);
+
+       /* Delete the node first. */
+       iter.iter.node = &event->node.node;
+       ret = lttng_ht_del(chan->ht, &iter);
+       LTTNG_ASSERT(!ret);
+
+       call_rcu(&event->node.head, destroy_event_rcu);
+
+       return;
+}
+
+static void destroy_enum(struct ust_registry_enum *reg_enum)
+{
+       if (!reg_enum) {
+               return;
+       }
+       free(reg_enum->entries);
+       free(reg_enum);
+}
+
+static void destroy_enum_rcu(struct rcu_head *head)
+{
+       struct ust_registry_enum *reg_enum =
+               caa_container_of(head, struct ust_registry_enum, rcu_head);
+
+       destroy_enum(reg_enum);
+}
+
+/*
+ * Lookup enumeration by name and comparing enumeration entries.
+ * Needs to be called from RCU read-side critical section.
+ */
+static struct ust_registry_enum *ust_registry_lookup_enum(
+               struct ust_registry_session *session,
+               const struct ust_registry_enum *reg_enum_lookup)
+{
+       struct ust_registry_enum *reg_enum = NULL;
+       struct lttng_ht_node_str *node;
+       struct lttng_ht_iter iter;
+
+       cds_lfht_lookup(session->enums->ht,
+                       ht_hash_enum((void *) reg_enum_lookup, lttng_ht_seed),
+                       ht_match_enum, reg_enum_lookup, &iter.iter);
+       node = lttng_ht_iter_get_node_str(&iter);
+       if (!node) {
+               goto end;
+       }
+       reg_enum = caa_container_of(node, struct ust_registry_enum, node);
+end:
+       return reg_enum;
+}
+
+/*
+ * Lookup enumeration by enum ID.
+ * Needs to be called from RCU read-side critical section.
+ */
+struct ust_registry_enum *
+       ust_registry_lookup_enum_by_id(struct ust_registry_session *session,
+               const char *enum_name, uint64_t enum_id)
+{
+       struct ust_registry_enum *reg_enum = NULL;
+       struct lttng_ht_node_str *node;
+       struct lttng_ht_iter iter;
+       struct ust_registry_enum reg_enum_lookup;
+
+       memset(&reg_enum_lookup, 0, sizeof(reg_enum_lookup));
+       strncpy(reg_enum_lookup.name, enum_name, LTTNG_UST_ABI_SYM_NAME_LEN);
+       reg_enum_lookup.name[LTTNG_UST_ABI_SYM_NAME_LEN - 1] = '\0';
+       reg_enum_lookup.id = enum_id;
+       cds_lfht_lookup(session->enums->ht,
+                       ht_hash_enum((void *) &reg_enum_lookup, lttng_ht_seed),
+                       ht_match_enum_id, &reg_enum_lookup, &iter.iter);
+       node = lttng_ht_iter_get_node_str(&iter);
+       if (!node) {
+               goto end;
+       }
+       reg_enum = caa_container_of(node, struct ust_registry_enum, node);
+end:
+       return reg_enum;
+}
+
+/*
+ * Create a ust_registry_enum from the given parameters and add it to the
+ * registry hash table, or find it if already there.
+ *
+ * On success, return 0 else a negative value.
+ *
+ * Should be called with session registry mutex held.
+ *
+ * We receive ownership of entries.
+ */
+int ust_registry_create_or_find_enum(struct ust_registry_session *session,
+               int session_objd, char *enum_name,
+               struct lttng_ust_ctl_enum_entry *entries, size_t nr_entries,
+               uint64_t *enum_id)
+{
+       int ret = 0;
+       struct cds_lfht_node *nodep;
+       struct ust_registry_enum *reg_enum = NULL, *old_reg_enum;
+
+       LTTNG_ASSERT(session);
+       LTTNG_ASSERT(enum_name);
+
+       rcu_read_lock();
+
+       /*
+        * This should not happen but since it comes from the UST tracer, an
+        * external party, don't assert and simply validate values.
+        */
+       if (session_objd < 0) {
+               ret = -EINVAL;
+               goto end;
+       }
+
+       /* Check if the enumeration was already dumped */
+       reg_enum = (ust_registry_enum *) zmalloc(sizeof(*reg_enum));
+       if (!reg_enum) {
+               PERROR("zmalloc ust registry enumeration");
+               ret = -ENOMEM;
+               goto end;
+       }
+       strncpy(reg_enum->name, enum_name, LTTNG_UST_ABI_SYM_NAME_LEN);
+       reg_enum->name[LTTNG_UST_ABI_SYM_NAME_LEN - 1] = '\0';
+       /* entries will be owned by reg_enum. */
+       reg_enum->entries = entries;
+       reg_enum->nr_entries = nr_entries;
+       entries = NULL;
+
+       old_reg_enum = ust_registry_lookup_enum(session, reg_enum);
+       if (old_reg_enum) {
+               DBG("enum %s already in sess_objd: %u", enum_name, session_objd);
+               /* Fall through. Use prior enum. */
+               destroy_enum(reg_enum);
+               reg_enum = old_reg_enum;
+       } else {
+               DBG("UST registry creating enum: %s, sess_objd: %u",
+                               enum_name, session_objd);
+               if (session->next_enum_id == -1ULL) {
+                       ret = -EOVERFLOW;
+                       destroy_enum(reg_enum);
+                       goto end;
+               }
+               reg_enum->id = session->next_enum_id++;
+               cds_lfht_node_init(&reg_enum->node.node);
+               nodep = cds_lfht_add_unique(session->enums->ht,
+                               ht_hash_enum(reg_enum, lttng_ht_seed),
+                               ht_match_enum_id, reg_enum,
+                               &reg_enum->node.node);
+               LTTNG_ASSERT(nodep == &reg_enum->node.node);
+       }
+       DBG("UST registry reply with enum %s with id %" PRIu64 " in sess_objd: %u",
+                       enum_name, reg_enum->id, session_objd);
+       *enum_id = reg_enum->id;
+end:
+       free(entries);
+       rcu_read_unlock();
+       return ret;
+}
+
+/*
+ * For a given enumeration in a registry, delete the entry and destroy
+ * the enumeration.
+ * This MUST be called within a RCU read side lock section.
+ */
+static void ust_registry_destroy_enum(struct ust_registry_session *reg_session,
+               struct ust_registry_enum *reg_enum)
+{
+       int ret;
+       struct lttng_ht_iter iter;
+
+       LTTNG_ASSERT(reg_session);
+       LTTNG_ASSERT(reg_enum);
+
+       /* Delete the node first. */
+       iter.iter.node = &reg_enum->node.node;
+       ret = lttng_ht_del(reg_session->enums, &iter);
+       LTTNG_ASSERT(!ret);
+       call_rcu(&reg_enum->rcu_head, destroy_enum_rcu);
+}
+
+/*
+ * We need to execute ht_destroy outside of RCU read-side critical
+ * section and outside of call_rcu thread, so we postpone its execution
+ * using ht_cleanup_push. It is simpler than to change the semantic of
+ * the many callers of delete_ust_app_session().
+ */
+static
+void destroy_channel_rcu(struct rcu_head *head)
+{
+       struct ust_registry_channel *chan =
+               caa_container_of(head, struct ust_registry_channel, rcu_head);
+
+       if (chan->ht) {
+               ht_cleanup_push(chan->ht);
+       }
+       free(chan->ctx_fields);
+       free(chan);
+}
+
+/*
+ * Destroy every element of the registry and free the memory. This does NOT
+ * free the registry pointer since it might not have been allocated before so
+ * it's the caller responsability.
+ */
+static void destroy_channel(struct ust_registry_channel *chan, bool notif)
+{
+       struct lttng_ht_iter iter;
+       struct ust_registry_event *event;
+       enum lttng_error_code cmd_ret;
+
+       LTTNG_ASSERT(chan);
+
+       if (notif) {
+               cmd_ret = notification_thread_command_remove_channel(
+                               the_notification_thread_handle,
+                               chan->consumer_key, LTTNG_DOMAIN_UST);
+               if (cmd_ret != LTTNG_OK) {
+                       ERR("Failed to remove channel from notification thread");
+               }
+       }
+
+       if (chan->ht) {
+               rcu_read_lock();
+               /* Destroy all event associated with this registry. */
+               cds_lfht_for_each_entry(
+                               chan->ht->ht, &iter.iter, event, node.node) {
+                       /* Delete the node from the ht and free it. */
+                       ust_registry_destroy_event(chan, event);
+               }
+               rcu_read_unlock();
+       }
+       call_rcu(&chan->rcu_head, destroy_channel_rcu);
+}
+
+/*
+ * Initialize registry with default values.
+ */
+int ust_registry_channel_add(struct ust_registry_session *session,
+               uint64_t key)
+{
+       int ret = 0;
+       struct ust_registry_channel *chan;
+
+       LTTNG_ASSERT(session);
+
+       chan = (ust_registry_channel *) zmalloc(sizeof(*chan));
+       if (!chan) {
+               PERROR("zmalloc ust registry channel");
+               ret = -ENOMEM;
+               goto error_alloc;
+       }
+
+       chan->ht = lttng_ht_new(0, LTTNG_HT_TYPE_STRING);
+       if (!chan->ht) {
+               ret = -ENOMEM;
+               goto error;
+       }
+
+       /* Set custom match function. */
+       chan->ht->match_fct = ht_match_event;
+       chan->ht->hash_fct = ht_hash_event;
+
+       /*
+        * Assign a channel ID right now since the event notification comes
+        * *before* the channel notify so the ID needs to be set at this point so
+        * the metadata can be dumped for that event.
+        */
+       if (ust_registry_is_max_id(session->used_channel_id)) {
+               ret = -1;
+               goto error;
+       }
+       chan->chan_id = ust_registry_get_next_chan_id(session);
+
+       rcu_read_lock();
+       lttng_ht_node_init_u64(&chan->node, key);
+       lttng_ht_add_unique_u64(session->channels, &chan->node);
+       rcu_read_unlock();
+
+       return 0;
+
+error:
+       destroy_channel(chan, false);
+error_alloc:
+       return ret;
+}
+
+/*
+ * Find a channel in the given registry. RCU read side lock MUST be acquired
+ * before calling this function and as long as the event reference is kept by
+ * the caller.
+ *
+ * On success, the pointer is returned else NULL.
+ */
+struct ust_registry_channel *ust_registry_channel_find(
+               struct ust_registry_session *session, uint64_t key)
+{
+       struct lttng_ht_node_u64 *node;
+       struct lttng_ht_iter iter;
+       struct ust_registry_channel *chan = NULL;
+
+       LTTNG_ASSERT(session);
+       LTTNG_ASSERT(session->channels);
+
+       DBG3("UST registry channel finding key %" PRIu64, key);
+
+       lttng_ht_lookup(session->channels, &key, &iter);
+       node = lttng_ht_iter_get_node_u64(&iter);
+       if (!node) {
+               goto end;
+       }
+       chan = caa_container_of(node, struct ust_registry_channel, node);
+
+end:
+       return chan;
+}
+
+/*
+ * Remove channel using key from registry and free memory.
+ */
+void ust_registry_channel_del_free(struct ust_registry_session *session,
+               uint64_t key, bool notif)
+{
+       struct lttng_ht_iter iter;
+       struct ust_registry_channel *chan;
+       int ret;
+
+       LTTNG_ASSERT(session);
+
+       rcu_read_lock();
+       chan = ust_registry_channel_find(session, key);
+       if (!chan) {
+               rcu_read_unlock();
+               goto end;
+       }
+
+       iter.iter.node = &chan->node.node;
+       ret = lttng_ht_del(session->channels, &iter);
+       LTTNG_ASSERT(!ret);
+       rcu_read_unlock();
+       destroy_channel(chan, notif);
+
+end:
+       return;
+}
+
+/*
+ * Initialize registry with default values and set the newly allocated session
+ * pointer to sessionp.
+ *
+ * Return 0 on success and sessionp is set or else return -1 and sessionp is
+ * kept untouched.
+ */
+int ust_registry_session_init(struct ust_registry_session **sessionp,
+               struct ust_app *app,
+               uint32_t bits_per_long,
+               uint32_t uint8_t_alignment,
+               uint32_t uint16_t_alignment,
+               uint32_t uint32_t_alignment,
+               uint32_t uint64_t_alignment,
+               uint32_t long_alignment,
+               int byte_order,
+               uint32_t major,
+               uint32_t minor,
+               const char *root_shm_path,
+               const char *shm_path,
+               uid_t euid,
+               gid_t egid,
+               uint64_t tracing_id,
+               uid_t tracing_uid)
+{
+       int ret;
+       struct ust_registry_session *session;
+
+       LTTNG_ASSERT(sessionp);
+
+       session = (ust_registry_session *) zmalloc(sizeof(*session));
+       if (!session) {
+               PERROR("zmalloc ust registry session");
+               goto error_alloc;
+       }
+
+       pthread_mutex_init(&session->lock, NULL);
+       session->bits_per_long = bits_per_long;
+       session->uint8_t_alignment = uint8_t_alignment;
+       session->uint16_t_alignment = uint16_t_alignment;
+       session->uint32_t_alignment = uint32_t_alignment;
+       session->uint64_t_alignment = uint64_t_alignment;
+       session->long_alignment = long_alignment;
+       session->byte_order = byte_order;
+       session->metadata_fd = -1;
+       session->uid = euid;
+       session->gid = egid;
+       session->next_enum_id = 0;
+       session->major = major;
+       session->minor = minor;
+       strncpy(session->root_shm_path, root_shm_path,
+               sizeof(session->root_shm_path));
+       session->root_shm_path[sizeof(session->root_shm_path) - 1] = '\0';
+       if (shm_path[0]) {
+               strncpy(session->shm_path, shm_path,
+                       sizeof(session->shm_path));
+               session->shm_path[sizeof(session->shm_path) - 1] = '\0';
+               strncpy(session->metadata_path, shm_path,
+                       sizeof(session->metadata_path));
+               session->metadata_path[sizeof(session->metadata_path) - 1] = '\0';
+               strncat(session->metadata_path, "/metadata",
+                       sizeof(session->metadata_path)
+                               - strlen(session->metadata_path) - 1);
+       }
+       if (session->shm_path[0]) {
+               ret = run_as_mkdir_recursive(session->shm_path,
+                       S_IRWXU | S_IRWXG,
+                       euid, egid);
+               if (ret) {
+                       PERROR("run_as_mkdir_recursive");
+                       goto error;
+               }
+       }
+       if (session->metadata_path[0]) {
+               /* Create metadata file */
+               ret = run_as_open(session->metadata_path,
+                       O_WRONLY | O_CREAT | O_EXCL,
+                       S_IRUSR | S_IWUSR, euid, egid);
+               if (ret < 0) {
+                       PERROR("Opening metadata file");
+                       goto error;
+               }
+               session->metadata_fd = ret;
+       }
+
+       session->enums = lttng_ht_new(0, LTTNG_HT_TYPE_STRING);
+       if (!session->enums) {
+               ERR("Failed to create enums hash table");
+               goto error;
+       }
+       /* hash/match functions are specified at call site. */
+       session->enums->match_fct = NULL;
+       session->enums->hash_fct = NULL;
+
+       session->channels = lttng_ht_new(0, LTTNG_HT_TYPE_U64);
+       if (!session->channels) {
+               goto error;
+       }
+
+       ret = lttng_uuid_generate(session->uuid);
+       if (ret) {
+               ERR("Failed to generate UST uuid (errno = %d)", ret);
+               goto error;
+       }
+
+       session->tracing_id = tracing_id;
+       session->tracing_uid = tracing_uid;
+
+       pthread_mutex_lock(&session->lock);
+       ret = ust_metadata_session_statedump(session, app, major, minor);
+       pthread_mutex_unlock(&session->lock);
+       if (ret) {
+               ERR("Failed to generate session metadata (errno = %d)", ret);
+               goto error;
+       }
+
+       *sessionp = session;
+
+       return 0;
+
+error:
+       ust_registry_session_destroy(session);
+       free(session);
+error_alloc:
+       return -1;
+}
+
+/*
+ * Destroy session registry. This does NOT free the given pointer since it
+ * might get passed as a reference. The registry lock should NOT be acquired.
+ */
+void ust_registry_session_destroy(struct ust_registry_session *reg)
+{
+       int ret;
+       struct lttng_ht_iter iter;
+       struct ust_registry_channel *chan;
+       struct ust_registry_enum *reg_enum;
+
+       if (!reg) {
+               return;
+       }
+
+       /* On error, EBUSY can be returned if lock. Code flow error. */
+       ret = pthread_mutex_destroy(&reg->lock);
+       LTTNG_ASSERT(!ret);
+
+       if (reg->channels) {
+               rcu_read_lock();
+               /* Destroy all event associated with this registry. */
+               cds_lfht_for_each_entry(reg->channels->ht, &iter.iter, chan,
+                               node.node) {
+                       /* Delete the node from the ht and free it. */
+                       ret = lttng_ht_del(reg->channels, &iter);
+                       LTTNG_ASSERT(!ret);
+                       destroy_channel(chan, true);
+               }
+               rcu_read_unlock();
+               ht_cleanup_push(reg->channels);
+       }
+
+       free(reg->metadata);
+       if (reg->metadata_fd >= 0) {
+               ret = close(reg->metadata_fd);
+               if (ret) {
+                       PERROR("close");
+               }
+               ret = run_as_unlink(reg->metadata_path,
+                               reg->uid, reg->gid);
+               if (ret) {
+                       PERROR("unlink");
+               }
+       }
+       if (reg->root_shm_path[0]) {
+               /*
+                * Try deleting the directory hierarchy.
+                */
+               (void) run_as_rmdir_recursive(reg->root_shm_path,
+                               reg->uid, reg->gid,
+                               LTTNG_DIRECTORY_HANDLE_SKIP_NON_EMPTY_FLAG);
+       }
+       /* Destroy the enum hash table */
+       if (reg->enums) {
+               rcu_read_lock();
+               /* Destroy all enum entries associated with this registry. */
+               cds_lfht_for_each_entry(reg->enums->ht, &iter.iter, reg_enum,
+                               node.node) {
+                       ust_registry_destroy_enum(reg, reg_enum);
+               }
+               rcu_read_unlock();
+               ht_cleanup_push(reg->enums);
+       }
+}
diff --git a/src/bin/lttng-sessiond/ust-sigbus.c b/src/bin/lttng-sessiond/ust-sigbus.c
deleted file mode 100644 (file)
index 52a7ac2..0000000
+++ /dev/null
@@ -1,17 +0,0 @@
-/*
- * Copyright (C) 2021 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
- * SPDX-License-Identifier: GPL-2.0-only
- *
- */
-
-#include <lttng/ust-sigbus.h>
-#include <lttng/ust-ctl.h>
-#include "ust-sigbus.h"
-
-DEFINE_LTTNG_UST_SIGBUS_STATE();
-
-void lttng_ust_handle_sigbus(void *address)
-{
-        lttng_ust_ctl_sigbus_handle(address);
-}
diff --git a/src/bin/lttng-sessiond/ust-sigbus.cpp b/src/bin/lttng-sessiond/ust-sigbus.cpp
new file mode 100644 (file)
index 0000000..52a7ac2
--- /dev/null
@@ -0,0 +1,17 @@
+/*
+ * Copyright (C) 2021 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0-only
+ *
+ */
+
+#include <lttng/ust-sigbus.h>
+#include <lttng/ust-ctl.h>
+#include "ust-sigbus.h"
+
+DEFINE_LTTNG_UST_SIGBUS_STATE();
+
+void lttng_ust_handle_sigbus(void *address)
+{
+        lttng_ust_ctl_sigbus_handle(address);
+}
diff --git a/src/bin/lttng-sessiond/utils.c b/src/bin/lttng-sessiond/utils.c
deleted file mode 100644 (file)
index 6936a03..0000000
+++ /dev/null
@@ -1,100 +0,0 @@
-/*
- * Copyright (C) 2011 David Goulet <david.goulet@polymtl.ca>
- * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
- * SPDX-License-Identifier: GPL-2.0-only
- *
- */
-
-#define _LGPL_SOURCE
-#include <stdlib.h>
-#include <unistd.h>
-
-#include <common/error.h>
-
-#include "utils.h"
-#include "snapshot.h"
-#include "lttng-sessiond.h"
-
-int the_ht_cleanup_pipe[2] = {-1, -1};
-
-/*
- * Write to writable pipe used to notify a thread.
- */
-int notify_thread_pipe(int wpipe)
-{
-       ssize_t ret;
-
-       /* Ignore if the pipe is invalid. */
-       if (wpipe < 0) {
-               return 0;
-       }
-
-       ret = lttng_write(wpipe, "!", 1);
-       if (ret < 1) {
-               PERROR("write poll pipe");
-       }
-
-       return (int) ret;
-}
-
-void ht_cleanup_push(struct lttng_ht *ht)
-{
-       ssize_t ret;
-       int fd = the_ht_cleanup_pipe[1];
-
-       if (!ht) {
-               return;
-       }
-       if (fd < 0)
-               return;
-       ret = lttng_write(fd, &ht, sizeof(ht));
-       if (ret < sizeof(ht)) {
-               PERROR("write ht cleanup pipe %d", fd);
-               if (ret < 0) {
-                       ret = -errno;
-               }
-               goto error;
-       }
-
-       /* All good. Don't send back the write positive ret value. */
-       ret = 0;
-error:
-       LTTNG_ASSERT(!ret);
-}
-
-int loglevels_match(int a_loglevel_type, int a_loglevel_value,
-       int b_loglevel_type, int b_loglevel_value, int loglevel_all_type)
-{
-       int match = 1;
-
-       if (a_loglevel_type == b_loglevel_type) {
-               /* Same loglevel type. */
-               if (b_loglevel_type != loglevel_all_type) {
-                       /*
-                        * Loglevel value must also match since the loglevel
-                        * type is not all.
-                        */
-                       if (a_loglevel_value != b_loglevel_value) {
-                               match = 0;
-                       }
-               }
-       } else {
-               /* Loglevel type is different: no match. */
-               match = 0;
-       }
-
-       return match;
-}
-
-const char *session_get_base_path(const struct ltt_session *session)
-{
-       return consumer_output_get_base_path(session->consumer);
-}
-
-const char *consumer_output_get_base_path(const struct consumer_output *output)
-{
-       return output->type == CONSUMER_DST_LOCAL ?
-                       output->dst.session_root_path :
-                       output->dst.net.base_dir;
-}
diff --git a/src/bin/lttng-sessiond/utils.cpp b/src/bin/lttng-sessiond/utils.cpp
new file mode 100644 (file)
index 0000000..6936a03
--- /dev/null
@@ -0,0 +1,100 @@
+/*
+ * Copyright (C) 2011 David Goulet <david.goulet@polymtl.ca>
+ * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0-only
+ *
+ */
+
+#define _LGPL_SOURCE
+#include <stdlib.h>
+#include <unistd.h>
+
+#include <common/error.h>
+
+#include "utils.h"
+#include "snapshot.h"
+#include "lttng-sessiond.h"
+
+int the_ht_cleanup_pipe[2] = {-1, -1};
+
+/*
+ * Write to writable pipe used to notify a thread.
+ */
+int notify_thread_pipe(int wpipe)
+{
+       ssize_t ret;
+
+       /* Ignore if the pipe is invalid. */
+       if (wpipe < 0) {
+               return 0;
+       }
+
+       ret = lttng_write(wpipe, "!", 1);
+       if (ret < 1) {
+               PERROR("write poll pipe");
+       }
+
+       return (int) ret;
+}
+
+void ht_cleanup_push(struct lttng_ht *ht)
+{
+       ssize_t ret;
+       int fd = the_ht_cleanup_pipe[1];
+
+       if (!ht) {
+               return;
+       }
+       if (fd < 0)
+               return;
+       ret = lttng_write(fd, &ht, sizeof(ht));
+       if (ret < sizeof(ht)) {
+               PERROR("write ht cleanup pipe %d", fd);
+               if (ret < 0) {
+                       ret = -errno;
+               }
+               goto error;
+       }
+
+       /* All good. Don't send back the write positive ret value. */
+       ret = 0;
+error:
+       LTTNG_ASSERT(!ret);
+}
+
+int loglevels_match(int a_loglevel_type, int a_loglevel_value,
+       int b_loglevel_type, int b_loglevel_value, int loglevel_all_type)
+{
+       int match = 1;
+
+       if (a_loglevel_type == b_loglevel_type) {
+               /* Same loglevel type. */
+               if (b_loglevel_type != loglevel_all_type) {
+                       /*
+                        * Loglevel value must also match since the loglevel
+                        * type is not all.
+                        */
+                       if (a_loglevel_value != b_loglevel_value) {
+                               match = 0;
+                       }
+               }
+       } else {
+               /* Loglevel type is different: no match. */
+               match = 0;
+       }
+
+       return match;
+}
+
+const char *session_get_base_path(const struct ltt_session *session)
+{
+       return consumer_output_get_base_path(session->consumer);
+}
+
+const char *consumer_output_get_base_path(const struct consumer_output *output)
+{
+       return output->type == CONSUMER_DST_LOCAL ?
+                       output->dst.session_root_path :
+                       output->dst.net.base_dir;
+}
index 58f1ede2e94c34f9d745a65d2e94014645a19ea8..ecd764d7c019f1c5e5902b848676f78df93ef223 100644 (file)
 #include <stddef.h>
 #include <stdint.h>
 
+#ifdef __cplusplus
+extern "C" {
+#endif
+
 struct lttng_dynamic_buffer;
 
 struct lttng_buffer_view {
@@ -92,4 +96,8 @@ bool lttng_buffer_view_contains_string(const struct lttng_buffer_view *buf,
                const char *str,
                size_t len_with_null_terminator);
 
+#ifdef __cplusplus
+}
+#endif
+
 #endif /* LTTNG_BUFFER_VIEW_H */
index d7ef4aeddd27b8d581a691a5f842179f2ca16374..54ed7363ec493b6c0c2b14db940d2402edf6ccf4 100644 (file)
 #include "common/macros.h"
 #include "common/sessiond-comm/sessiond-comm.h"
 
+#ifdef __cplusplus
+extern "C" {
+#endif
+
 /*
  * offsets are absolute from start of bytecode.
  */
@@ -43,10 +47,6 @@ struct literal_double {
        double v;
 } LTTNG_PACKED;
 
-struct literal_string {
-       char string[0];
-} LTTNG_PACKED;
-
 enum bytecode_op {
        BYTECODE_OP_UNKNOWN                             = 0,
 
@@ -262,4 +262,8 @@ unsigned int bytecode_get_len(struct lttng_bytecode *bytecode)
        return bytecode->len;
 }
 
+#ifdef __cplusplus
+}
+#endif
+
 #endif /* LTTNG_COMMON_BYTECODE_H */
index 50185a526ac5468a9aca446694c3bdf209479684..9a107201987f129ee4bd7dbdd12a359893fffebd 100644 (file)
 #include <sys/stat.h>
 #include <urcu/ref.h>
 
+#ifdef __cplusplus
+extern "C" {
+#endif
+
 enum lttng_directory_handle_rmdir_recursive_flags {
        LTTNG_DIRECTORY_HANDLE_FAIL_NON_EMPTY_FLAG = (1U << 0),
        LTTNG_DIRECTORY_HANDLE_SKIP_NON_EMPTY_FLAG = (1U << 1),
@@ -259,4 +263,9 @@ bool lttng_directory_handle_uses_fd(
 bool lttng_directory_handle_equals(const struct lttng_directory_handle *lhs,
                const struct lttng_directory_handle *rhs);
 
+
+#ifdef __cplusplus
+}
+#endif
+
 #endif /* _COMPAT_PATH_HANDLE_H */
index e2a424abe132d564769d69de28aba40b50512324..75de9ba5fcf0a3ea60cbc086c9c60df3d680f0be 100644 (file)
 
 #include <common/common.h>
 
+#ifdef __cplusplus
+extern "C" {
+#endif
+
 /*
  * Used by lttng_poll_clean to free the events structure in a lttng_poll_event.
  */
@@ -21,6 +25,10 @@ static inline void __lttng_poll_free(void *events)
        free(events);
 }
 
+#ifdef __cplusplus
+}
+#endif
+
 /*
  * epoll(7) implementation.
  */
@@ -30,6 +38,10 @@ static inline void __lttng_poll_free(void *events)
 #include <features.h>
 #include <common/compat/fcntl.h>
 
+#ifdef __cplusplus
+extern "C" {
+#endif
+
 /* See man epoll(7) for this define path */
 #define COMPAT_EPOLL_PROC_PATH "/proc/sys/fs/epoll/max_user_watches"
 
@@ -208,6 +220,10 @@ static inline void lttng_poll_clean(struct lttng_poll_event *events)
        __lttng_poll_free((void *) events->events);
 }
 
+#ifdef __cplusplus
+}
+#endif
+
 #else  /* HAVE_EPOLL */
 /*
  * Fallback on poll(2) API
@@ -226,6 +242,10 @@ static inline void lttng_poll_clean(struct lttng_poll_event *events)
 #include <poll.h>
 #include <stdint.h>
 
+#ifdef __cplusplus
+extern "C" {
+#endif
+
 enum {
        /* Polling variables compatibility for poll */
        LPOLLIN = POLLIN,
@@ -377,6 +397,10 @@ static inline void lttng_poll_clean(struct lttng_poll_event *events)
        }
 }
 
+#ifdef __cplusplus
+}
+#endif
+
 #endif /* HAVE_EPOLL */
 
 #endif /* _LTT_POLL_H */
index a6955b0b95b4e9435114187c6ebeb1adfd8093fd..ff74eb0992bfd8c7e76729d15602c5e0dcd604b2 100644 (file)
 #include <common/macros.h>
 #include <stdint.h>
 
+#ifdef __cplusplus
+extern "C" {
+#endif
+
 struct config_entry {
        /* section is NULL if the entry is not in a section */
        const char *section;
@@ -229,4 +233,8 @@ int config_load_session(const char *path, const char *session_name,
                int overwrite, unsigned int autoload,
                const struct config_load_session_override_attr *overrides);
 
+#ifdef __cplusplus
+}
+#endif
+
 #endif /* _CONFIG_H */
index f4abd841726d41b1ad48fcf52d8eea9ec6b6d1f9..b6755addacb4453325aa2b122587677fe9d40638 100644 (file)
 
 #include <common/macros.h>
 
+#ifdef __cplusplus
+extern "C" {
+#endif
+
 /*
  * Parse string as an application context of the form
  * "$app.provider_name:context_name" and return the provider name and context
@@ -24,4 +28,8 @@
 int parse_application_context(const char *str, char **provider_name,
                char **ctx_name);
 
+#ifdef __cplusplus
+}
+#endif
+
 #endif /* LTTNG_COMMON_CONTEXT_H */
index b1576a75795a5bd924f13fd7003e224808b7591b..21aa33416f50bacb38aae862a3a28ceab3839307 100644 (file)
 
 #include "optional.h"
 
+#ifdef __cplusplus
+extern "C" {
+#endif
+
 struct lttng_credentials {
        LTTNG_OPTIONAL(uid_t) uid;
        LTTNG_OPTIONAL(gid_t) gid;
@@ -32,4 +36,8 @@ bool lttng_credentials_is_equal_gid(const struct lttng_credentials *a,
 bool lttng_credentials_is_equal(const struct lttng_credentials *a,
                const struct lttng_credentials *b);
 
+#ifdef __cplusplus
+}
+#endif
+
 #endif /* LTTNG_CREDENTIALS_H */
index 2c88f06b7ddaa1dd1ac9224997e220e0dda8f3cb..8590779c27c3b62d1f2e5e3dced53a57914b4666 100644 (file)
 #include <unistd.h>
 #include <common/macros.h>
 
+#ifdef __cplusplus
+extern "C" {
+#endif
+
 /*
  * Daemonize this process by forking and making the parent wait for the child
  * to signal it indicating readiness. Once received, the parent successfully
@@ -25,4 +29,8 @@
 int lttng_daemonize(pid_t *child_ppid, int *completion_flag,
                int close_fds);
 
+#ifdef __cplusplus
+}
+#endif
+
 #endif /* LTTNG_DAEMONIZE_H */
index 39b77470d68a4aa294858205cc7b365349f055c7..95b1dc29ba16fa5c87df7d00f798eec68ccc1a85 100644 (file)
 #include <pthread.h>
 #include <common/macros.h>
 
+#ifdef __cplusplus
+extern "C" {
+#endif
+
 /* Default unix group name for tracing. */
 #define DEFAULT_TRACING_GROUP                   "tracing"
 
@@ -424,4 +428,8 @@ size_t default_get_ust_uid_channel_subbuf_size(void);
  */
 pthread_attr_t *default_pthread_attr(void);
 
+#ifdef __cplusplus
+}
+#endif
+
 #endif /* _DEFAULTS_H */
index 0e0dedc9b01b9af35b10e3c74a5f21e9afc557c7..d4813fa3919b6c3d43dc536d308368f8f186b837 100644 (file)
 #include <stdint.h>
 #include <common/macros.h>
 
+#ifdef __cplusplus
+extern "C" {
+#endif
+
 struct lttng_buffer_view;
 
 struct lttng_dynamic_buffer {
@@ -92,4 +96,8 @@ void lttng_dynamic_buffer_reset(struct lttng_dynamic_buffer *buffer);
 size_t lttng_dynamic_buffer_get_capacity_left(
                struct lttng_dynamic_buffer *buffer);
 
+#ifdef __cplusplus
+}
+#endif
+
 #endif /* LTTNG_DYNAMIC_BUFFER_H */
index dab9e2bfaccab4017502f079aba83f5b2fe27e04..36a92473a0b6ec7c2c1162e779448f0885d3d4d7 100644 (file)
 
 #include <common/macros.h>
 
+#ifdef __cplusplus
+extern "C" {
+#endif
+
 /*
  * Wrapper around a file descriptor providing reference counting semantics.
  *
@@ -44,4 +48,8 @@ int fd_handle_get_fd(struct fd_handle *handle);
  */
 struct fd_handle *fd_handle_copy(const struct fd_handle *handle);
 
+#ifdef __cplusplus
+}
+#endif
+
 #endif /* FS_HANDLE_H */
index 305434bf6e0d1a5de3e626d6e6ed114ec403ac69..f5dcdf319f8f6fdfd26a82210e9d8e7c0da46105 100644 (file)
 
 #include <common/sessiond-comm/sessiond-comm.h>
 
+#ifdef __cplusplus
+extern "C" {
+#endif
+
 struct bytecode_symbol_iterator;
 
 /*
@@ -33,4 +37,8 @@ const char *bytecode_symbol_iterator_get_name(
 
 void bytecode_symbol_iterator_destroy(struct bytecode_symbol_iterator *it);
 
+#ifdef __cplusplus
+}
+#endif
+
 #endif /* LTTNG_COMMON_FILTER_H */
index 602be318647fe751b86bc9b2205e3bb79b4255f3..92a8446746265daf53066e6bde370deea699a262 100644 (file)
@@ -9,9 +9,17 @@
 #ifndef _LTT_FUTEX_H
 #define _LTT_FUTEX_H
 
+#ifdef __cplusplus
+extern "C" {
+#endif
+
 void futex_wait_update(int32_t *futex, int active);
 void futex_nto1_prepare(int32_t *futex);
 void futex_nto1_wait(int32_t *futex);
 void futex_nto1_wake(int32_t *futex);
 
+#ifdef __cplusplus
+}
+#endif
+
 #endif /* _LTT_FUTEX_H */
index 2c77f0afcd8d7dc62f2f2b2b7d07f2ff27a4897e..e2dae968d0040de1136f40a689018099eb2fc538 100644 (file)
 #include <common/macros.h>
 #include <urcu/rculfhash.h>
 
+#ifdef __cplusplus
+extern "C" {
+#endif
+
 extern unsigned long lttng_ht_seed;
 
-typedef unsigned long (*hash_fct)(const void *_key, unsigned long seed);
+typedef unsigned long (*hash_fct_type)(const void *_key, unsigned long seed);
 typedef cds_lfht_match_fct hash_match_fct;
 
 enum lttng_ht_type {
@@ -29,7 +33,7 @@ enum lttng_ht_type {
 struct lttng_ht {
        struct cds_lfht *ht;
        cds_lfht_match_fct match_fct;
-       hash_fct hash_fct;
+       hash_fct_type hash_fct;
 };
 
 struct lttng_ht_iter {
@@ -122,4 +126,8 @@ struct lttng_ht_node_u64 *lttng_ht_iter_get_node_u64(
 struct lttng_ht_node_two_u64 *lttng_ht_iter_get_node_two_u64(
                struct lttng_ht_iter *iter);
 
+#ifdef __cplusplus
+}
+#endif
+
 #endif /* _LTT_HT_H */
index 992643931f5e42568fa2bd06dd6f16bb260c072a..44f80ce4fb2b65a585f59626d3ba20e470208d3c 100644 (file)
 
 #include <stdint.h>
 
+#ifdef __cplusplus
+extern "C" {
+#endif
+
 unsigned long hash_key_ulong(const void *_key, unsigned long seed);
 unsigned long hash_key_u64(const void *_key, unsigned long seed);
 unsigned long hash_key_str(const void *key, unsigned long seed);
@@ -19,4 +23,8 @@ int hash_match_key_u64(const void *key1, const void *key2);
 int hash_match_key_str(const void *key1, const void *key2);
 int hash_match_key_two_u64(const void *key1, const void *key2);
 
+#ifdef __cplusplus
+}
+#endif
+
 #endif /* _LTT_HT_UTILS_H */
index 3da733e9ef03db82c4f08a33345de07dbef0df8a..9dbd745e6bdaf52ef1ccebae2dffd7c3b1308e82 100644 (file)
 
 #include <inttypes.h>
 
+#ifdef __cplusplus
+extern "C" {
+#endif
+
 struct lttng_index_allocator;
 
 enum lttng_index_allocator_status {
@@ -49,4 +53,8 @@ enum lttng_index_allocator_status lttng_index_allocator_release(
  */
 void lttng_index_allocator_destroy(struct lttng_index_allocator *allocator);
 
+#ifdef __cplusplus
+}
+#endif
+
 #endif /* _COMMON_INDEX_ALLOCATOR_H */
index e6805a983b706e4a31a414ab704fd65133fa59f0..c2fa6408bd53c4bfa623e2ad97f20ed66284f32f 100644 (file)
 #include <common/lttng-kernel-old.h>
 #include <common/sessiond-comm/sessiond-comm.h>        /* for struct lttng_filter_bytecode */
 
+#ifdef __cplusplus
+extern "C" {
+#endif
+
 int kernctl_create_session(int fd);
 int kernctl_open_metadata(int fd, struct lttng_channel_attr *chops);
 int kernctl_create_channel(int fd, struct lttng_channel_attr *chops);
@@ -125,4 +129,8 @@ int kernctl_get_current_timestamp(int fd, uint64_t *ts);
 int kernctl_get_sequence_number(int fd, uint64_t *seq);
 int kernctl_get_instance_id(int fd, uint64_t *seq);
 
+#ifdef __cplusplus
+}
+#endif
+
 #endif /* _LTTNG_KERNEL_CTL_H */
index 874501ef1749325b38ba6d4214990da1396cd74a..7530c912a98251160b608e0bc0b4bf4daefff42e 100644 (file)
@@ -101,13 +101,6 @@ void *zmalloc(size_t len)
 
 #define ASSERT_LOCKED(lock) LTTNG_ASSERT(pthread_mutex_trylock(&lock))
 
-/*
- * Get an aligned pointer to a value. This is meant
- * as a helper to pass an aligned pointer to a member in a packed structure
- * to a function.
- */
-#define ALIGNED_CONST_PTR(value) (((const typeof(value) []) { value }))
-
 /*
  * lttng_strncpy returns 0 on success, or nonzero on failure.
  * It checks that the @src string fits into @dst_len before performing
index dcb7ca8639fd67ddac916ea84816e24e0b2b2b5e..0501904cbc4d1cfa52a926be158afe5219712646 100644 (file)
 /*
  * Initialize an optional field as 'set' with a given value.
  */
-#define LTTNG_OPTIONAL_INIT_VALUE(val) { .value = val, .is_set = 1 }
+#define LTTNG_OPTIONAL_INIT_VALUE(val) { .is_set = 1, .value = val }
 
 /* Set the value of an optional field. */
 #define LTTNG_OPTIONAL_SET(field_ptr, val)     \
        do {                                    \
-               (field_ptr)->value = (val);     \
                (field_ptr)->is_set = 1;        \
+               (field_ptr)->value = (val);     \
        } while (0)
 
 /* Put an optional field in the "unset" (NULL-ed) state. */
index 57a8342d70578c7be479c3bacfc6035a203937d9..af8519ab32641aaab739fa66985179ee025b47ba 100644 (file)
 #include <common/buffer-view.h>
 #include <common/dynamic-array.h>
 
+#ifdef __cplusplus
+extern "C" {
+#endif
+
 struct lttng_payload;
 struct fd_handle;
 
@@ -46,7 +50,26 @@ struct fd_handle;
 struct lttng_payload_view {
        struct lttng_buffer_view buffer;
        /* private */
-       const struct lttng_dynamic_pointer_array _fd_handles;
+
+       /*
+        * Avoid a -Wreturn-type-c-linkage warning with clang.
+        * gcc is more permissive with regards to this warning, but
+        * clang is right that a structure containing a _const_ structure is not
+        * a trivial type in the eyes of the C++ standard, theoritically affecting its
+        * compatibility with C from an ABI standpoint:
+        *   A trivial class is a class that is trivially copyable and has one or
+        *   more default constructors, all of which are either trivial or deleted and
+        *   at least one of which is not deleted.
+        *
+        * A const member implicitly deletes lttng_payload_view's constructor,
+        * making it non-trivial. This is not a problem for the moment as we are
+        * transitioning all code to C++11.
+        */
+#if !defined(__cplusplus)
+       const
+#endif
+       struct lttng_dynamic_pointer_array _fd_handles;
+
        struct {
                size_t *p_fd_handles_position;
                size_t fd_handles_position;
@@ -159,4 +182,8 @@ int lttng_payload_view_get_fd_handle_count(
 struct fd_handle *lttng_payload_view_pop_fd_handle(
                struct lttng_payload_view *payload_view);
 
+#ifdef __cplusplus
+}
+#endif
+
 #endif /* LTTNG_PAYLOAD_VIEW_H */
index d6c0cc19ea8a214bdc58db403adad58155da3186..e9bd8be9d5d2a48ad0a44c8db58edad3e3e4cffb 100644 (file)
 #include <common/dynamic-array.h>
 #include <common/fd-handle.h>
 
+#ifdef __cplusplus
+extern "C" {
+#endif
+
 /*
  * An lttng_payload encompasses the 'data' (bytes) and any passed file
  * descriptors as part of a message between liblttng-ctl and the session
@@ -57,4 +61,8 @@ void lttng_payload_clear(struct lttng_payload *payload);
 int lttng_payload_push_fd_handle(struct lttng_payload *payload,
                struct fd_handle *fd_handle);
 
+#ifdef __cplusplus
+}
+#endif
+
 #endif /* LTTNG_PAYLOAD_H */
index d9f43d66e03265d71342aeace9e4d91b5dd8bebc..b94d871e48426d57d9c88bc419bb659f46babebc 100644 (file)
 #include <common/macros.h>
 #include <sys/types.h>
 
+#ifdef __cplusplus
+extern "C" {
+#endif
+
 enum lttng_pipe_state {
        LTTNG_PIPE_STATE_OPENED = 1,
        LTTNG_PIPE_STATE_CLOSED = 2,
@@ -81,4 +85,8 @@ int lttng_pipe_release_readfd(struct lttng_pipe *pipe);
 /* Returns and releases the write end of the pipe. */
 int lttng_pipe_release_writefd(struct lttng_pipe *pipe);
 
+#ifdef __cplusplus
+}
+#endif
+
 #endif /* LTTNG_PIPE_H */
index ac3668a5ae801c8461b3a0a0a0cd0413935f1c51..9923e8128e7234474376a0563dd8cb7442f95c14 100644 (file)
 #include <unistd.h>
 #include <common/macros.h>
 
+#ifdef __cplusplus
+extern "C" {
+#endif
+
 /*
  * lttng_read and lttng_write take care of EINTR and partial read/write.
  * Upon success, they return the "count" received as parameter.
@@ -22,4 +26,8 @@
 ssize_t lttng_read(int fd, void *buf, size_t count);
 ssize_t lttng_write(int fd, const void *buf, size_t count);
 
+#ifdef __cplusplus
+}
+#endif
+
 #endif /* LTTNG_COMMON_READWRITE_H */
index 1fbe22676445ed8defb5d3299a3219858a3579f1..6053be796b0024277674fa0b5064e75a32e53572 100644 (file)
 #include <common/trace-chunk.h>
 #include <common/dynamic-array.h>
 
+#ifdef __cplusplus
+extern "C" {
+#endif
+
 struct relayd_stream_rotation_position {
        uint64_t stream_id;
        /*
@@ -77,4 +81,8 @@ int relayd_get_configuration(struct lttcomm_relayd_sock *sock,
                uint64_t query_flags,
                uint64_t *result_flags);
 
+#ifdef __cplusplus
+}
+#endif
+
 #endif /* _RELAYD_H */
index 406dfca2219220f86a91d0cae26017ac82f3e9e1..ca084102e4ae8c17d424465c97016fb3c03a3698 100644 (file)
 #include <common/macros.h>
 #include <common/sessiond-comm/sessiond-comm.h>
 
+#ifdef __cplusplus
+extern "C" {
+#endif
+
 /*
  * The run-as process is launched by forking without an exec*() call. This means
  * that any resource allocated before the run-as worker is launched should be
@@ -61,4 +65,8 @@ int run_as_create_worker(const char *procname,
                post_fork_cleanup_cb clean_up_func, void *clean_up_user_data);
 void run_as_destroy_worker(void);
 
+#ifdef __cplusplus
+}
+#endif
+
 #endif /* _RUNAS_H */
index 357e10b08a938cfd9c0252872ec0d95ae6d4a528..93fb19bb51acb611cf9a4fd4d9af2b6532acd5cc 100644 (file)
@@ -87,19 +87,17 @@ error:
  */
 int lttcomm_bind_inet_sock(struct lttcomm_sock *sock)
 {
-       return bind(sock->fd,
-                       (const struct sockaddr *) ALIGNED_CONST_PTR(
-                                       sock->sockaddr.addr.sin),
-                       sizeof(sock->sockaddr.addr.sin));
+       struct sockaddr_in sockaddr = sock->sockaddr.addr.sin;
+
+       return bind(sock->fd, &sockaddr, sizeof(sockaddr));
 }
 
 static
 int connect_no_timeout(struct lttcomm_sock *sock)
 {
-       return connect(sock->fd,
-                       (const struct sockaddr *) ALIGNED_CONST_PTR(
-                                       sock->sockaddr.addr.sin),
-                       sizeof(sock->sockaddr.addr.sin));
+       struct sockaddr_in sockaddr = sock->sockaddr.addr.sin;
+
+       return connect(sock->fd, &sockaddr, sizeof(sockaddr));
 }
 
 static
@@ -109,6 +107,7 @@ int connect_with_timeout(struct lttcomm_sock *sock)
        int ret, flags, connect_ret;
        struct timespec orig_time, cur_time;
        unsigned long diff_ms;
+       struct sockaddr_in sockaddr;
 
        ret = fcntl(sock->fd, F_GETFL, 0);
        if (ret == -1) {
@@ -130,10 +129,8 @@ int connect_with_timeout(struct lttcomm_sock *sock)
                return -1;
        }
 
-       connect_ret = connect(sock->fd,
-                       (const struct sockaddr *) ALIGNED_CONST_PTR(
-                                       sock->sockaddr.addr.sin),
-                       sizeof(sock->sockaddr.addr.sin));
+       sockaddr = sock->sockaddr.addr.sin;
+       connect_ret = connect(sock->fd, &sockaddr, sizeof(sockaddr));
        if (connect_ret == -1 && errno != EAGAIN && errno != EWOULDBLOCK &&
                        errno != EINPROGRESS) {
                goto error;
index 2db8c0890da184a0f429d13710977e507aede0e8..5c0828b2b3246c6ea9518474378f34378e4a08b6 100644 (file)
 
 #include "sessiond-comm.h"
 
+#ifdef __cplusplus
+extern "C" {
+#endif
+
 /* See man tcp(7) for more detail about this value. */
 #define LTTCOMM_INET_PROC_SYN_RETRIES_PATH "/proc/sys/net/ipv4/tcp_syn_retries"
 #define LTTCOMM_INET_PROC_FIN_TIMEOUT_PATH "/proc/sys/net/ipv4/tcp_fin_timeout"
@@ -53,4 +57,8 @@ extern ssize_t lttcomm_sendmsg_inet_sock(struct lttcomm_sock *sock,
 /* Initialize inet communication layer. */
 extern void lttcomm_inet_init(void);
 
+#ifdef __cplusplus
+}
+#endif
+
 #endif /* _LTTCOMM_INET_H */
index f705bc0c06176f42fd7375b99144cc765a2eeac7..8c0d0b795c64ebf48f534427387443fbdcf2ba08 100644 (file)
@@ -85,19 +85,15 @@ error:
  */
 int lttcomm_bind_inet6_sock(struct lttcomm_sock *sock)
 {
-       return bind(sock->fd,
-                       (const struct sockaddr *) ALIGNED_CONST_PTR(
-                                       sock->sockaddr.addr.sin6),
-                       sizeof(sock->sockaddr.addr.sin6));
+       struct sockaddr_in6 sockaddr = sock->sockaddr.addr.sin6;
+       return bind(sock->fd, &sockaddr, sizeof(sockaddr));
 }
 
 static
 int connect_no_timeout(struct lttcomm_sock *sock)
 {
-       return connect(sock->fd,
-                       (const struct sockaddr *) ALIGNED_CONST_PTR(
-                                       sock->sockaddr.addr.sin6),
-                       sizeof(sock->sockaddr.addr.sin6));
+       struct sockaddr_in6 sockaddr = sock->sockaddr.addr.sin6;
+       return connect(sock->fd, &sockaddr, sizeof(sockaddr));
 }
 
 static
@@ -107,6 +103,7 @@ int connect_with_timeout(struct lttcomm_sock *sock)
        int ret, flags, connect_ret;
        struct timespec orig_time, cur_time;
        unsigned long diff_ms;
+       struct sockaddr_in6 sockaddr;
 
        ret = fcntl(sock->fd, F_GETFL, 0);
        if (ret == -1) {
@@ -128,10 +125,8 @@ int connect_with_timeout(struct lttcomm_sock *sock)
                return -1;
        }
 
-       connect_ret = connect(sock->fd,
-                       (const struct sockaddr *) ALIGNED_CONST_PTR(
-                                       sock->sockaddr.addr.sin6),
-                       sizeof(sock->sockaddr.addr.sin6));
+       sockaddr = sock->sockaddr.addr.sin6;
+       connect_ret = connect(sock->fd, &sockaddr, sizeof(sockaddr));
        if (connect_ret == -1 && errno != EAGAIN && errno != EWOULDBLOCK &&
                        errno != EINPROGRESS) {
                goto error;
index c8e1e1ae17cbb50b7d3da01e68e43cade5ae9158..65020bbfcd098a9b22f4d1e15abfa6b41e8ed9cd 100644 (file)
 #include "inet6.h"
 #include <common/unix.h>
 
+#ifdef __cplusplus
+extern "C" {
+#endif
+
 /* Queue size of listen(2) */
 #define LTTNG_SESSIOND_COMM_MAX_LISTEN 64
 
@@ -931,4 +935,8 @@ void lttcomm_init(void);
 /* Get network timeout, in milliseconds */
 unsigned long lttcomm_get_network_timeout(void);
 
+#ifdef __cplusplus
+}
+#endif
+
 #endif /* _LTTNG_SESSIOND_COMM_H */
index d714506b85b2f4a472a8186fad47c621126ed6ca..530507c35ae8ed8c839638501776f5749753f124 100644 (file)
@@ -9,8 +9,16 @@
 #ifndef _LTT_SHM_H
 #define _LTT_SHM_H
 
+#ifdef __cplusplus
+extern "C" {
+#endif
+
 char *shm_ust_get_mmap(char *shm_path, int global);
 
 int shm_create_anonymous(const char *owner_name);
 
+#ifdef __cplusplus
+}
+#endif
+
 #endif /* _LTT_SHM_H */
index 1f856f39761e61129328180849116e9b08486525..b613b1749dab4d4258bffc0857ad745d4883fe74 100644 (file)
 
 #include <urcu.h> /* for caa_likely/unlikely */
 
+#ifdef __cplusplus
+extern "C" {
+#endif
+
 extern int lttng_testpoint_activated;
 
 void *lttng_testpoint_lookup(const char *name);
@@ -43,7 +47,7 @@ void *lttng_testpoint_lookup(const char *name);
                        ret = tp();                                     \
                } else {                                                \
                        if (!found) {                                   \
-                               tp = lttng_testpoint_lookup(tp_name);   \
+                               tp = (int (*)(void)) lttng_testpoint_lookup(tp_name);   \
                                if (tp) {                               \
                                        found = 1;                      \
                                        ret = tp();                     \
@@ -59,4 +63,8 @@ void *lttng_testpoint_lookup(const char *name);
 #define TESTPOINT_DECL(name)   \
        _TESTPOINT_DECL(name)
 
+#ifdef __cplusplus
+}
+#endif
+
 #endif /* NTESTPOINT */
index 4e4440b6afc19db02a810e7181183147305b1fc6..9bb4f3708319ea03f46a47532da772315f5f73d3 100644 (file)
 #include <stddef.h>
 #include <stdint.h>
 
+#ifdef __cplusplus
+extern "C" {
+#endif
+
 /*
  * A trace chunk is a group of directories and files forming a (or a set of)
  * complete and independant trace(s). For instance, a trace archive chunk,
@@ -182,4 +186,8 @@ bool lttng_trace_chunk_get(struct lttng_trace_chunk *chunk);
 
 void lttng_trace_chunk_put(struct lttng_trace_chunk *chunk);
 
+#ifdef __cplusplus
+}
+#endif
+
 #endif /* LTTNG_TRACE_CHUNK_H */
index 820dff0b56893f86edeb1f40f8e289a8a0b6d6b3..e5c7435e34eb7256047304133d642eeeaf26e178 100644 (file)
 #include <common/payload.h>
 #include <common/payload-view.h>
 
+#ifdef __cplusplus
+extern "C" {
+#endif
+
 int lttcomm_create_unix_sock(const char *pathname);
 int lttcomm_create_anon_unix_socketpair(int *fds);
 int lttcomm_connect_unix_sock(const char *pathname);
@@ -51,4 +55,8 @@ ssize_t lttcomm_recv_creds_unix_sock(int sock, void *buf, size_t len,
 
 int lttcomm_setsockopt_creds_unix_sock(int sock);
 
+#ifdef __cplusplus
+}
+#endif
+
 #endif /* _LTTCOMM_UNIX_H */
index 0d3e79aeeb4ad572a99c11680051750c8c304289..d95ea062e20e9d38d3141e54d7704efdb36591d1 100644 (file)
 #include <stdint.h>
 #include <inttypes.h>
 
+#ifdef __cplusplus
+extern "C" {
+#endif
+
 /*
  * Includes final \0.
  */
@@ -59,4 +63,8 @@ void lttng_uuid_copy(lttng_uuid dst, const lttng_uuid src);
  */
 int lttng_uuid_generate(lttng_uuid uuid_out);
 
+#ifdef __cplusplus
+}
+#endif
+
 #endif /* LTTNG_UUID_H */
index ed870aeb071eb5095b9980417c5dba8c7c4ad5b1..30b51ee803b4e1f61efced2d700cbc74265f44a8 100644 (file)
 #include <stdbool.h>
 #include "macros.h"
 
+#ifdef __cplusplus
+extern "C" {
+#endif
+
 struct lttng_waiter {
        struct cds_wfs_node wait_queue_node;
        int32_t state;
@@ -33,4 +37,8 @@ void lttng_waiter_wait(struct lttng_waiter *waiter);
  */
 void lttng_waiter_wake_up(struct lttng_waiter *waiter);
 
+#ifdef __cplusplus
+}
+#endif
+
 #endif /* LTTNG_WAITER_H */
index 7ff30d2662b897571123b25795dde0470d453480..c8deb2ec337026b2dc2a5024479f023c68c26500 100644 (file)
@@ -83,7 +83,7 @@ test_uri_LDADD = $(LIBTAP) $(LIBCOMMON) $(LIBHASHTABLE) $(DL_LIBS)
 
 RELAYD_OBJS = $(top_builddir)/src/bin/lttng-relayd/backward-compatibility-group-by.$(OBJEXT)
 
-test_session_SOURCES = test_session.c
+test_session_SOURCES = test_session.cpp
 test_session_LDADD = $(LIBTAP) $(LIBLTTNG_SESSIOND_COMMON) $(DL_LIBS)
 
 if HAVE_LIBLTTNG_UST_CTL
@@ -93,11 +93,11 @@ endif
 
 # UST data structures unit test
 if HAVE_LIBLTTNG_UST_CTL
-test_ust_data_SOURCES = test_ust_data.c
+test_ust_data_SOURCES = test_ust_data.cpp
 test_ust_data_LDADD = $(LIBTAP) $(LIBLTTNG_SESSIOND_COMMON) $(DL_LIBS)
 endif
 
-test_kernel_data_SOURCES = test_kernel_data.c
+test_kernel_data_SOURCES = test_kernel_data.cpp
 test_kernel_data_LDADD = $(LIBTAP) $(LIBLTTNG_SESSIOND_COMMON) $(DL_LIBS)
 
 # utils suffix for unit test
diff --git a/tests/unit/test_kernel_data.c b/tests/unit/test_kernel_data.c
deleted file mode 100644 (file)
index 79489e3..0000000
+++ /dev/null
@@ -1,194 +0,0 @@
-/*
- * Copyright (C) 2011 David Goulet <david.goulet@polymtl.ca>
- *
- * SPDX-License-Identifier: GPL-2.0-only
- *
- */
-
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <unistd.h>
-#include <time.h>
-
-#include <common/compat/errno.h>
-#include <bin/lttng-sessiond/trace-kernel.h>
-#include <common/defaults.h>
-
-#include <tap/tap.h>
-
-#define RANDOM_STRING_LEN      11
-
-/* Number of TAP tests in this file */
-#define NUM_TESTS 11
-
-#ifdef HAVE_LIBLTTNG_UST_CTL
-#include <lttng/ust-sigbus.h>
-DEFINE_LTTNG_UST_SIGBUS_STATE();
-#endif
-
-static const char alphanum[] =
-       "0123456789"
-       "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
-       "abcdefghijklmnopqrstuvwxyz";
-
-static struct ltt_kernel_session *kern;
-static char random_string[RANDOM_STRING_LEN];
-
-/*
- * Return random string of 10 characters.
- * Not thread-safe.
- */
-static char *get_random_string(void)
-{
-       int i;
-
-       for (i = 0; i < RANDOM_STRING_LEN - 1; i++) {
-               random_string[i] = alphanum[rand() % (sizeof(alphanum) - 1)];
-       }
-
-       random_string[RANDOM_STRING_LEN - 1] = '\0';
-
-       return random_string;
-}
-
-static void test_create_one_kernel_session(void)
-{
-       kern = trace_kernel_create_session();
-       ok(kern != NULL, "Create kernel session");
-
-       if (!kern) {
-               skip(1, "Kernel session is null");
-               return;
-       }
-       ok(kern->fd == -1 &&
-          kern->metadata_stream_fd == -1 &&
-          kern->consumer_fds_sent == 0 &&
-          kern->channel_count == 0 &&
-          kern->stream_count_global == 0 &&
-          kern->metadata == NULL,
-          "Validate kernel session");
-}
-
-static void test_create_kernel_metadata(void)
-{
-       LTTNG_ASSERT(kern != NULL);
-
-       kern->metadata = trace_kernel_create_metadata();
-       ok(kern->metadata != NULL, "Create kernel metadata");
-
-       ok(kern->metadata->fd == -1 &&
-          kern->metadata->conf != NULL &&
-          kern->metadata->conf->attr.overwrite
-                       == DEFAULT_METADATA_OVERWRITE &&
-          kern->metadata->conf->attr.subbuf_size
-                       == default_get_metadata_subbuf_size() &&
-          kern->metadata->conf->attr.num_subbuf
-                       == DEFAULT_METADATA_SUBBUF_NUM &&
-          kern->metadata->conf->attr.switch_timer_interval
-                       == DEFAULT_METADATA_SWITCH_TIMER &&
-          kern->metadata->conf->attr.read_timer_interval
-                       == DEFAULT_METADATA_READ_TIMER &&
-          kern->metadata->conf->attr.output
-                       == LTTNG_EVENT_MMAP,
-          "Validate kernel session metadata");
-
-       trace_kernel_destroy_metadata(kern->metadata);
-}
-
-static void test_create_kernel_channel(void)
-{
-       struct ltt_kernel_channel *chan;
-       struct lttng_channel attr;
-       struct lttng_channel_extended extended;
-
-       memset(&attr, 0, sizeof(attr));
-       memset(&extended, 0, sizeof(extended));
-       attr.attr.extended.ptr = &extended;
-
-       chan = trace_kernel_create_channel(&attr);
-       ok(chan != NULL, "Create kernel channel");
-
-       if (!chan) {
-               skip(1, "Channel is null");
-               return;
-       }
-
-       ok(chan->fd == -1 &&
-          chan->enabled == 1 &&
-          chan->stream_count == 0 &&
-          chan->channel->attr.overwrite  == attr.attr.overwrite,
-          "Validate kernel channel");
-
-       /* Init list in order to avoid sefaults from cds_list_del */
-       CDS_INIT_LIST_HEAD(&chan->list);
-       trace_kernel_destroy_channel(chan);
-}
-
-static void test_create_kernel_event(void)
-{
-       enum lttng_error_code ret;
-       struct ltt_kernel_event *event;
-       struct lttng_event ev;
-
-       memset(&ev, 0, sizeof(ev));
-       ok(!lttng_strncpy(ev.name, get_random_string(),
-                       RANDOM_STRING_LEN),
-               "Validate string length");
-       ev.type = LTTNG_EVENT_TRACEPOINT;
-       ev.loglevel_type = LTTNG_EVENT_LOGLEVEL_ALL;
-
-       ret = trace_kernel_create_event(&ev, NULL, NULL, &event);
-       ok(ret == LTTNG_OK, "Create kernel event");
-
-       if (!event) {
-               skip(1, "Event is null");
-               return;
-       }
-
-       ok(event->fd == -1 &&
-          event->enabled == 1 &&
-          event->event->instrumentation == LTTNG_KERNEL_ABI_TRACEPOINT &&
-          strlen(event->event->name),
-          "Validate kernel event");
-
-       /* Init list in order to avoid sefaults from cds_list_del */
-       CDS_INIT_LIST_HEAD(&event->list);
-       trace_kernel_destroy_event(event);
-}
-
-static void test_create_kernel_stream(void)
-{
-       struct ltt_kernel_stream *stream;
-
-       stream = trace_kernel_create_stream("stream1", 0);
-       ok(stream != NULL, "Create kernel stream");
-
-       if (!stream) {
-               skip(1, "Stream is null");
-               return;
-       }
-
-       ok(stream->fd == -1 &&
-          stream->state == 0,
-          "Validate kernel stream");
-
-       /* Init list in order to avoid sefaults from cds_list_del */
-       CDS_INIT_LIST_HEAD(&stream->list);
-       trace_kernel_destroy_stream(stream);
-}
-
-int main(int argc, char **argv)
-{
-       plan_tests(NUM_TESTS);
-
-       diag("Kernel data structure unit test");
-
-       test_create_one_kernel_session();
-       test_create_kernel_metadata();
-       test_create_kernel_channel();
-       test_create_kernel_event();
-       test_create_kernel_stream();
-
-       return exit_status();
-}
diff --git a/tests/unit/test_kernel_data.cpp b/tests/unit/test_kernel_data.cpp
new file mode 100644 (file)
index 0000000..79489e3
--- /dev/null
@@ -0,0 +1,194 @@
+/*
+ * Copyright (C) 2011 David Goulet <david.goulet@polymtl.ca>
+ *
+ * SPDX-License-Identifier: GPL-2.0-only
+ *
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <time.h>
+
+#include <common/compat/errno.h>
+#include <bin/lttng-sessiond/trace-kernel.h>
+#include <common/defaults.h>
+
+#include <tap/tap.h>
+
+#define RANDOM_STRING_LEN      11
+
+/* Number of TAP tests in this file */
+#define NUM_TESTS 11
+
+#ifdef HAVE_LIBLTTNG_UST_CTL
+#include <lttng/ust-sigbus.h>
+DEFINE_LTTNG_UST_SIGBUS_STATE();
+#endif
+
+static const char alphanum[] =
+       "0123456789"
+       "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
+       "abcdefghijklmnopqrstuvwxyz";
+
+static struct ltt_kernel_session *kern;
+static char random_string[RANDOM_STRING_LEN];
+
+/*
+ * Return random string of 10 characters.
+ * Not thread-safe.
+ */
+static char *get_random_string(void)
+{
+       int i;
+
+       for (i = 0; i < RANDOM_STRING_LEN - 1; i++) {
+               random_string[i] = alphanum[rand() % (sizeof(alphanum) - 1)];
+       }
+
+       random_string[RANDOM_STRING_LEN - 1] = '\0';
+
+       return random_string;
+}
+
+static void test_create_one_kernel_session(void)
+{
+       kern = trace_kernel_create_session();
+       ok(kern != NULL, "Create kernel session");
+
+       if (!kern) {
+               skip(1, "Kernel session is null");
+               return;
+       }
+       ok(kern->fd == -1 &&
+          kern->metadata_stream_fd == -1 &&
+          kern->consumer_fds_sent == 0 &&
+          kern->channel_count == 0 &&
+          kern->stream_count_global == 0 &&
+          kern->metadata == NULL,
+          "Validate kernel session");
+}
+
+static void test_create_kernel_metadata(void)
+{
+       LTTNG_ASSERT(kern != NULL);
+
+       kern->metadata = trace_kernel_create_metadata();
+       ok(kern->metadata != NULL, "Create kernel metadata");
+
+       ok(kern->metadata->fd == -1 &&
+          kern->metadata->conf != NULL &&
+          kern->metadata->conf->attr.overwrite
+                       == DEFAULT_METADATA_OVERWRITE &&
+          kern->metadata->conf->attr.subbuf_size
+                       == default_get_metadata_subbuf_size() &&
+          kern->metadata->conf->attr.num_subbuf
+                       == DEFAULT_METADATA_SUBBUF_NUM &&
+          kern->metadata->conf->attr.switch_timer_interval
+                       == DEFAULT_METADATA_SWITCH_TIMER &&
+          kern->metadata->conf->attr.read_timer_interval
+                       == DEFAULT_METADATA_READ_TIMER &&
+          kern->metadata->conf->attr.output
+                       == LTTNG_EVENT_MMAP,
+          "Validate kernel session metadata");
+
+       trace_kernel_destroy_metadata(kern->metadata);
+}
+
+static void test_create_kernel_channel(void)
+{
+       struct ltt_kernel_channel *chan;
+       struct lttng_channel attr;
+       struct lttng_channel_extended extended;
+
+       memset(&attr, 0, sizeof(attr));
+       memset(&extended, 0, sizeof(extended));
+       attr.attr.extended.ptr = &extended;
+
+       chan = trace_kernel_create_channel(&attr);
+       ok(chan != NULL, "Create kernel channel");
+
+       if (!chan) {
+               skip(1, "Channel is null");
+               return;
+       }
+
+       ok(chan->fd == -1 &&
+          chan->enabled == 1 &&
+          chan->stream_count == 0 &&
+          chan->channel->attr.overwrite  == attr.attr.overwrite,
+          "Validate kernel channel");
+
+       /* Init list in order to avoid sefaults from cds_list_del */
+       CDS_INIT_LIST_HEAD(&chan->list);
+       trace_kernel_destroy_channel(chan);
+}
+
+static void test_create_kernel_event(void)
+{
+       enum lttng_error_code ret;
+       struct ltt_kernel_event *event;
+       struct lttng_event ev;
+
+       memset(&ev, 0, sizeof(ev));
+       ok(!lttng_strncpy(ev.name, get_random_string(),
+                       RANDOM_STRING_LEN),
+               "Validate string length");
+       ev.type = LTTNG_EVENT_TRACEPOINT;
+       ev.loglevel_type = LTTNG_EVENT_LOGLEVEL_ALL;
+
+       ret = trace_kernel_create_event(&ev, NULL, NULL, &event);
+       ok(ret == LTTNG_OK, "Create kernel event");
+
+       if (!event) {
+               skip(1, "Event is null");
+               return;
+       }
+
+       ok(event->fd == -1 &&
+          event->enabled == 1 &&
+          event->event->instrumentation == LTTNG_KERNEL_ABI_TRACEPOINT &&
+          strlen(event->event->name),
+          "Validate kernel event");
+
+       /* Init list in order to avoid sefaults from cds_list_del */
+       CDS_INIT_LIST_HEAD(&event->list);
+       trace_kernel_destroy_event(event);
+}
+
+static void test_create_kernel_stream(void)
+{
+       struct ltt_kernel_stream *stream;
+
+       stream = trace_kernel_create_stream("stream1", 0);
+       ok(stream != NULL, "Create kernel stream");
+
+       if (!stream) {
+               skip(1, "Stream is null");
+               return;
+       }
+
+       ok(stream->fd == -1 &&
+          stream->state == 0,
+          "Validate kernel stream");
+
+       /* Init list in order to avoid sefaults from cds_list_del */
+       CDS_INIT_LIST_HEAD(&stream->list);
+       trace_kernel_destroy_stream(stream);
+}
+
+int main(int argc, char **argv)
+{
+       plan_tests(NUM_TESTS);
+
+       diag("Kernel data structure unit test");
+
+       test_create_one_kernel_session();
+       test_create_kernel_metadata();
+       test_create_kernel_channel();
+       test_create_kernel_event();
+       test_create_kernel_stream();
+
+       return exit_status();
+}
diff --git a/tests/unit/test_session.c b/tests/unit/test_session.c
deleted file mode 100644 (file)
index 4fc2379..0000000
+++ /dev/null
@@ -1,361 +0,0 @@
-/*
- * Copyright (C) 2011 David Goulet <david.goulet@polymtl.ca>
- *
- * SPDX-License-Identifier: GPL-2.0-only
- *
- */
-
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <unistd.h>
-#include <time.h>
-#include <sys/types.h>
-#include <urcu.h>
-
-#include <tap/tap.h>
-
-#include <common/compat/errno.h>
-#include <bin/lttng-sessiond/session.h>
-#include <bin/lttng-sessiond/ust-app.h>
-#include <bin/lttng-sessiond/ht-cleanup.h>
-#include <bin/lttng-sessiond/health-sessiond.h>
-#include <bin/lttng-sessiond/thread.h>
-#include <common/sessiond-comm/sessiond-comm.h>
-#include <common/common.h>
-
-#define SESSION1 "test1"
-
-#define MAX_SESSIONS 10000
-#define RANDOM_STRING_LEN      11
-
-/* Number of TAP tests in this file */
-#define NUM_TESTS 11
-
-static struct ltt_session_list *session_list;
-
-static const char alphanum[] =
-       "0123456789"
-       "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
-       "abcdefghijklmnopqrstuvwxyz";
-static char random_string[RANDOM_STRING_LEN];
-
-/*
- * Return random string of 10 characters.
- * Not thread-safe.
- */
-static char *get_random_string(void)
-{
-       int i;
-
-       for (i = 0; i < RANDOM_STRING_LEN - 1; i++) {
-               random_string[i] = alphanum[rand() % (sizeof(alphanum) - 1)];
-       }
-
-       random_string[RANDOM_STRING_LEN - 1] = '\0';
-
-       return random_string;
-}
-
-/*
- * Return 0 if session name is found, else -1
- */
-static int find_session_name(const char *name)
-{
-       struct ltt_session *iter;
-
-       cds_list_for_each_entry(iter, &session_list->head, list) {
-               if (strcmp(iter->name, name) == 0) {
-                       return 0;
-               }
-       }
-
-       return -1;
-}
-
-static int session_list_count(void)
-{
-       int count = 0;
-       struct ltt_session *iter;
-
-       cds_list_for_each_entry(iter, &session_list->head, list) {
-               count++;
-       }
-       return count;
-}
-
-/*
- * Empty session list manually.
- */
-static void empty_session_list(void)
-{
-       struct ltt_session *iter, *tmp;
-
-       session_lock_list();
-       cds_list_for_each_entry_safe(iter, tmp, &session_list->head, list) {
-               session_destroy(iter);
-       }
-       session_unlock_list();
-
-       /* Session list must be 0 */
-       LTTNG_ASSERT(!session_list_count());
-}
-
-/*
- * Test creation of 1 session
- */
-static int create_one_session(const char *name)
-{
-       int ret;
-       enum lttng_error_code ret_code;
-       struct ltt_session *session = NULL;
-
-       session_lock_list();
-       ret_code = session_create(name, geteuid(), getegid(), &session);
-       session_put(session);
-       if (ret_code == LTTNG_OK) {
-               /* Validate */
-               ret = find_session_name(name);
-               if (ret < 0) {
-                       /* Session not found by name */
-                       printf("session not found after creation\n");
-                       ret = -1;
-               } else {
-                       /* Success */
-                       ret = 0;
-               }
-       } else {
-               if (ret_code == LTTNG_ERR_EXIST_SESS) {
-                       printf("(session already exists) ");
-               }
-               ret = -1;
-       }
-
-       session_unlock_list();
-       return ret;
-}
-
-/*
- * Test deletion of 1 session
- */
-static int destroy_one_session(struct ltt_session *session)
-{
-       int ret;
-       char session_name[NAME_MAX];
-
-       strncpy(session_name, session->name, sizeof(session_name));
-       session_name[sizeof(session_name) - 1] = '\0';
-
-       session_destroy(session);
-       session_put(session);
-
-       ret = find_session_name(session_name);
-       if (ret < 0) {
-               /* Success, -1 means that the sesion is NOT found */
-               ret = 0;
-       } else {
-               /* Fail */
-               ret = -1;
-       }
-       return ret;
-}
-
-/*
- * This test is supposed to fail at the second create call. If so, return 0 for
- * test success, else -1.
- */
-static int two_session_same_name(void)
-{
-       int ret;
-       struct ltt_session *sess;
-
-       ret = create_one_session(SESSION1);
-       if (ret < 0) {
-               /* Fail */
-               ret = -1;
-               goto end;
-       }
-
-       session_lock_list();
-       sess = session_find_by_name(SESSION1);
-       if (sess) {
-               /* Success */
-               session_put(sess);
-               session_unlock_list();
-               ret = 0;
-               goto end_unlock;
-       } else {
-               /* Fail */
-               ret = -1;
-               goto end_unlock;
-       }
-end_unlock:
-       session_unlock_list();
-end:
-       return ret;
-}
-
-static void test_session_list(void)
-{
-       session_list = session_get_list();
-       ok(session_list != NULL, "Session list: not NULL");
-}
-
-static void test_create_one_session(void)
-{
-       ok(create_one_session(SESSION1) == 0,
-          "Create session: %s",
-          SESSION1);
-}
-
-static void test_validate_session(void)
-{
-       struct ltt_session *tmp;
-
-       session_lock_list();
-       tmp = session_find_by_name(SESSION1);
-
-       ok(tmp != NULL,
-          "Validating session: session found");
-
-       if (tmp) {
-               ok(tmp->kernel_session == NULL &&
-                  strlen(tmp->name),
-                  "Validating session: basic sanity check");
-       } else {
-               skip(1, "Skipping session validation check as session was not found");
-               goto end;
-       }
-
-       session_lock(tmp);
-       session_unlock(tmp);
-       session_put(tmp);
-end:
-       session_unlock_list();
-}
-
-static void test_destroy_session(void)
-{
-       struct ltt_session *tmp;
-
-       session_lock_list();
-       tmp = session_find_by_name(SESSION1);
-
-       ok(tmp != NULL,
-          "Destroying session: session found");
-
-       if (tmp) {
-               ok(destroy_one_session(tmp) == 0,
-                  "Destroying session: %s destroyed",
-                  SESSION1);
-       } else {
-               skip(1, "Skipping session destruction as it was not found");
-       }
-       session_unlock_list();
-}
-
-static void test_duplicate_session(void)
-{
-       ok(two_session_same_name() == 0,
-          "Duplicate session creation");
-}
-
-static void test_session_name_generation(void)
-{
-       struct ltt_session *session = NULL;
-       enum lttng_error_code ret_code;
-       const char *expected_session_name_prefix = DEFAULT_SESSION_NAME;
-
-       session_lock_list();
-       ret_code = session_create(NULL, geteuid(), getegid(), &session);
-       ok(ret_code == LTTNG_OK,
-               "Create session with a NULL name (auto-generate a name)");
-       if (!session) {
-               skip(1, "Skipping session name generation tests as session_create() failed.");
-               goto end;
-       }
-       diag("Automatically-generated session name: %s", *session->name ?
-               session->name : "ERROR");
-       ok(*session->name && !strncmp(expected_session_name_prefix, session->name,
-                       sizeof(DEFAULT_SESSION_NAME) - 1),
-                       "Auto-generated session name starts with %s",
-                       DEFAULT_SESSION_NAME);
-end:
-       session_put(session);
-       session_unlock_list();
-}
-
-static void test_large_session_number(void)
-{
-       int ret, i, failed = 0;
-       struct ltt_session *iter, *tmp;
-
-       for (i = 0; i < MAX_SESSIONS; i++) {
-               char *tmp_name = get_random_string();
-               ret = create_one_session(tmp_name);
-               if (ret < 0) {
-                       diag("session %d (name: %s) creation failed", i, tmp_name);
-                       ++failed;
-               }
-       }
-
-       ok(failed == 0,
-          "Large sessions number: created %u sessions",
-          MAX_SESSIONS);
-
-       failed = 0;
-
-       session_lock_list();
-       for (i = 0; i < MAX_SESSIONS; i++) {
-               cds_list_for_each_entry_safe(iter, tmp, &session_list->head, list) {
-                       LTTNG_ASSERT(session_get(iter));
-                       ret = destroy_one_session(iter);
-                       if (ret < 0) {
-                               diag("session %d destroy failed", i);
-                               ++failed;
-                       }
-               }
-       }
-       session_unlock_list();
-
-       ok(failed == 0 && session_list_count() == 0,
-          "Large sessions number: destroyed %u sessions",
-          MAX_SESSIONS);
-}
-
-int main(int argc, char **argv)
-{
-       struct lttng_thread *ht_cleanup_thread;
-
-       plan_tests(NUM_TESTS);
-
-       the_health_sessiond = health_app_create(NR_HEALTH_SESSIOND_TYPES);
-       ht_cleanup_thread = launch_ht_cleanup_thread();
-       LTTNG_ASSERT(ht_cleanup_thread);
-       lttng_thread_put(ht_cleanup_thread);
-
-       diag("Sessions unit tests");
-
-       rcu_register_thread();
-
-       test_session_list();
-
-       test_create_one_session();
-
-       test_validate_session();
-
-       test_destroy_session();
-
-       test_duplicate_session();
-
-       empty_session_list();
-
-       test_session_name_generation();
-
-       test_large_session_number();
-
-       rcu_unregister_thread();
-       lttng_thread_list_shutdown_orphans();
-
-       return exit_status();
-}
diff --git a/tests/unit/test_session.cpp b/tests/unit/test_session.cpp
new file mode 100644 (file)
index 0000000..4fc2379
--- /dev/null
@@ -0,0 +1,361 @@
+/*
+ * Copyright (C) 2011 David Goulet <david.goulet@polymtl.ca>
+ *
+ * SPDX-License-Identifier: GPL-2.0-only
+ *
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <time.h>
+#include <sys/types.h>
+#include <urcu.h>
+
+#include <tap/tap.h>
+
+#include <common/compat/errno.h>
+#include <bin/lttng-sessiond/session.h>
+#include <bin/lttng-sessiond/ust-app.h>
+#include <bin/lttng-sessiond/ht-cleanup.h>
+#include <bin/lttng-sessiond/health-sessiond.h>
+#include <bin/lttng-sessiond/thread.h>
+#include <common/sessiond-comm/sessiond-comm.h>
+#include <common/common.h>
+
+#define SESSION1 "test1"
+
+#define MAX_SESSIONS 10000
+#define RANDOM_STRING_LEN      11
+
+/* Number of TAP tests in this file */
+#define NUM_TESTS 11
+
+static struct ltt_session_list *session_list;
+
+static const char alphanum[] =
+       "0123456789"
+       "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
+       "abcdefghijklmnopqrstuvwxyz";
+static char random_string[RANDOM_STRING_LEN];
+
+/*
+ * Return random string of 10 characters.
+ * Not thread-safe.
+ */
+static char *get_random_string(void)
+{
+       int i;
+
+       for (i = 0; i < RANDOM_STRING_LEN - 1; i++) {
+               random_string[i] = alphanum[rand() % (sizeof(alphanum) - 1)];
+       }
+
+       random_string[RANDOM_STRING_LEN - 1] = '\0';
+
+       return random_string;
+}
+
+/*
+ * Return 0 if session name is found, else -1
+ */
+static int find_session_name(const char *name)
+{
+       struct ltt_session *iter;
+
+       cds_list_for_each_entry(iter, &session_list->head, list) {
+               if (strcmp(iter->name, name) == 0) {
+                       return 0;
+               }
+       }
+
+       return -1;
+}
+
+static int session_list_count(void)
+{
+       int count = 0;
+       struct ltt_session *iter;
+
+       cds_list_for_each_entry(iter, &session_list->head, list) {
+               count++;
+       }
+       return count;
+}
+
+/*
+ * Empty session list manually.
+ */
+static void empty_session_list(void)
+{
+       struct ltt_session *iter, *tmp;
+
+       session_lock_list();
+       cds_list_for_each_entry_safe(iter, tmp, &session_list->head, list) {
+               session_destroy(iter);
+       }
+       session_unlock_list();
+
+       /* Session list must be 0 */
+       LTTNG_ASSERT(!session_list_count());
+}
+
+/*
+ * Test creation of 1 session
+ */
+static int create_one_session(const char *name)
+{
+       int ret;
+       enum lttng_error_code ret_code;
+       struct ltt_session *session = NULL;
+
+       session_lock_list();
+       ret_code = session_create(name, geteuid(), getegid(), &session);
+       session_put(session);
+       if (ret_code == LTTNG_OK) {
+               /* Validate */
+               ret = find_session_name(name);
+               if (ret < 0) {
+                       /* Session not found by name */
+                       printf("session not found after creation\n");
+                       ret = -1;
+               } else {
+                       /* Success */
+                       ret = 0;
+               }
+       } else {
+               if (ret_code == LTTNG_ERR_EXIST_SESS) {
+                       printf("(session already exists) ");
+               }
+               ret = -1;
+       }
+
+       session_unlock_list();
+       return ret;
+}
+
+/*
+ * Test deletion of 1 session
+ */
+static int destroy_one_session(struct ltt_session *session)
+{
+       int ret;
+       char session_name[NAME_MAX];
+
+       strncpy(session_name, session->name, sizeof(session_name));
+       session_name[sizeof(session_name) - 1] = '\0';
+
+       session_destroy(session);
+       session_put(session);
+
+       ret = find_session_name(session_name);
+       if (ret < 0) {
+               /* Success, -1 means that the sesion is NOT found */
+               ret = 0;
+       } else {
+               /* Fail */
+               ret = -1;
+       }
+       return ret;
+}
+
+/*
+ * This test is supposed to fail at the second create call. If so, return 0 for
+ * test success, else -1.
+ */
+static int two_session_same_name(void)
+{
+       int ret;
+       struct ltt_session *sess;
+
+       ret = create_one_session(SESSION1);
+       if (ret < 0) {
+               /* Fail */
+               ret = -1;
+               goto end;
+       }
+
+       session_lock_list();
+       sess = session_find_by_name(SESSION1);
+       if (sess) {
+               /* Success */
+               session_put(sess);
+               session_unlock_list();
+               ret = 0;
+               goto end_unlock;
+       } else {
+               /* Fail */
+               ret = -1;
+               goto end_unlock;
+       }
+end_unlock:
+       session_unlock_list();
+end:
+       return ret;
+}
+
+static void test_session_list(void)
+{
+       session_list = session_get_list();
+       ok(session_list != NULL, "Session list: not NULL");
+}
+
+static void test_create_one_session(void)
+{
+       ok(create_one_session(SESSION1) == 0,
+          "Create session: %s",
+          SESSION1);
+}
+
+static void test_validate_session(void)
+{
+       struct ltt_session *tmp;
+
+       session_lock_list();
+       tmp = session_find_by_name(SESSION1);
+
+       ok(tmp != NULL,
+          "Validating session: session found");
+
+       if (tmp) {
+               ok(tmp->kernel_session == NULL &&
+                  strlen(tmp->name),
+                  "Validating session: basic sanity check");
+       } else {
+               skip(1, "Skipping session validation check as session was not found");
+               goto end;
+       }
+
+       session_lock(tmp);
+       session_unlock(tmp);
+       session_put(tmp);
+end:
+       session_unlock_list();
+}
+
+static void test_destroy_session(void)
+{
+       struct ltt_session *tmp;
+
+       session_lock_list();
+       tmp = session_find_by_name(SESSION1);
+
+       ok(tmp != NULL,
+          "Destroying session: session found");
+
+       if (tmp) {
+               ok(destroy_one_session(tmp) == 0,
+                  "Destroying session: %s destroyed",
+                  SESSION1);
+       } else {
+               skip(1, "Skipping session destruction as it was not found");
+       }
+       session_unlock_list();
+}
+
+static void test_duplicate_session(void)
+{
+       ok(two_session_same_name() == 0,
+          "Duplicate session creation");
+}
+
+static void test_session_name_generation(void)
+{
+       struct ltt_session *session = NULL;
+       enum lttng_error_code ret_code;
+       const char *expected_session_name_prefix = DEFAULT_SESSION_NAME;
+
+       session_lock_list();
+       ret_code = session_create(NULL, geteuid(), getegid(), &session);
+       ok(ret_code == LTTNG_OK,
+               "Create session with a NULL name (auto-generate a name)");
+       if (!session) {
+               skip(1, "Skipping session name generation tests as session_create() failed.");
+               goto end;
+       }
+       diag("Automatically-generated session name: %s", *session->name ?
+               session->name : "ERROR");
+       ok(*session->name && !strncmp(expected_session_name_prefix, session->name,
+                       sizeof(DEFAULT_SESSION_NAME) - 1),
+                       "Auto-generated session name starts with %s",
+                       DEFAULT_SESSION_NAME);
+end:
+       session_put(session);
+       session_unlock_list();
+}
+
+static void test_large_session_number(void)
+{
+       int ret, i, failed = 0;
+       struct ltt_session *iter, *tmp;
+
+       for (i = 0; i < MAX_SESSIONS; i++) {
+               char *tmp_name = get_random_string();
+               ret = create_one_session(tmp_name);
+               if (ret < 0) {
+                       diag("session %d (name: %s) creation failed", i, tmp_name);
+                       ++failed;
+               }
+       }
+
+       ok(failed == 0,
+          "Large sessions number: created %u sessions",
+          MAX_SESSIONS);
+
+       failed = 0;
+
+       session_lock_list();
+       for (i = 0; i < MAX_SESSIONS; i++) {
+               cds_list_for_each_entry_safe(iter, tmp, &session_list->head, list) {
+                       LTTNG_ASSERT(session_get(iter));
+                       ret = destroy_one_session(iter);
+                       if (ret < 0) {
+                               diag("session %d destroy failed", i);
+                               ++failed;
+                       }
+               }
+       }
+       session_unlock_list();
+
+       ok(failed == 0 && session_list_count() == 0,
+          "Large sessions number: destroyed %u sessions",
+          MAX_SESSIONS);
+}
+
+int main(int argc, char **argv)
+{
+       struct lttng_thread *ht_cleanup_thread;
+
+       plan_tests(NUM_TESTS);
+
+       the_health_sessiond = health_app_create(NR_HEALTH_SESSIOND_TYPES);
+       ht_cleanup_thread = launch_ht_cleanup_thread();
+       LTTNG_ASSERT(ht_cleanup_thread);
+       lttng_thread_put(ht_cleanup_thread);
+
+       diag("Sessions unit tests");
+
+       rcu_register_thread();
+
+       test_session_list();
+
+       test_create_one_session();
+
+       test_validate_session();
+
+       test_destroy_session();
+
+       test_duplicate_session();
+
+       empty_session_list();
+
+       test_session_name_generation();
+
+       test_large_session_number();
+
+       rcu_unregister_thread();
+       lttng_thread_list_shutdown_orphans();
+
+       return exit_status();
+}
diff --git a/tests/unit/test_ust_data.c b/tests/unit/test_ust_data.c
deleted file mode 100644 (file)
index 95c6fe1..0000000
+++ /dev/null
@@ -1,283 +0,0 @@
-/*
- * Copyright (C) 2011 David Goulet <david.goulet@polymtl.ca>
- *
- * SPDX-License-Identifier: GPL-2.0-only
- *
- */
-
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <unistd.h>
-#include <time.h>
-#include <urcu.h>
-
-#include <lttng/lttng.h>
-#include <bin/lttng-sessiond/lttng-ust-abi.h>
-#include <common/defaults.h>
-#include <common/compat/errno.h>
-#include <bin/lttng-sessiond/trace-ust.h>
-#include <bin/lttng-sessiond/ust-app.h>
-#include <bin/lttng-sessiond/notification-thread.h>
-
-#include <lttng/ust-sigbus.h>
-
-#include <tap/tap.h>
-
-/* This path will NEVER be created in this test */
-#define PATH1 "/tmp/.test-junk-lttng"
-
-#define RANDOM_STRING_LEN      11
-
-/* Number of TAP tests in this file */
-#define NUM_TESTS 16
-
-DEFINE_LTTNG_UST_SIGBUS_STATE();
-
-static const char alphanum[] =
-       "0123456789"
-       "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
-       "abcdefghijklmnopqrstuvwxyz";
-static char random_string[RANDOM_STRING_LEN];
-
-/*
- * Return random string of 10 characters.
- * Not thread-safe.
- */
-static char *get_random_string(void)
-{
-       int i;
-
-       for (i = 0; i < RANDOM_STRING_LEN - 1; i++) {
-               random_string[i] = alphanum[rand() % (sizeof(alphanum) - 1)];
-       }
-
-       random_string[RANDOM_STRING_LEN - 1] = '\0';
-
-       return random_string;
-}
-
-static void test_create_one_ust_session(void)
-{
-       struct ltt_ust_session *usess =
-               trace_ust_create_session(42);
-
-       ok(usess != NULL, "Create UST session");
-
-       if (!usess) {
-               skip(1, "UST session is null");
-               return;
-       }
-
-       ok(usess->id == 42 &&
-          usess->active == 0 &&
-          usess->domain_global.channels != NULL &&
-          usess->uid == 0 &&
-          usess->gid == 0,
-          "Validate UST session");
-
-       trace_ust_destroy_session(usess);
-}
-
-static void test_create_ust_channel(void)
-{
-       struct ltt_ust_channel *uchan;
-       struct lttng_channel attr;
-       struct lttng_channel_extended extended;
-
-       memset(&attr, 0, sizeof(attr));
-       memset(&extended, 0, sizeof(extended));
-       attr.attr.extended.ptr = &extended;
-
-       ok(lttng_strncpy(attr.name, "channel0", sizeof(attr.name)) == 0,
-               "Validate channel name length");
-       uchan = trace_ust_create_channel(&attr, LTTNG_DOMAIN_UST);
-       ok(uchan != NULL, "Create UST channel");
-
-       if (!uchan) {
-               skip(1, "UST channel is null");
-               return;
-       }
-
-       ok(uchan->enabled == 0 &&
-          strncmp(uchan->name, "channel0", 8) == 0 &&
-          uchan->name[LTTNG_UST_ABI_SYM_NAME_LEN - 1] == '\0' &&
-          uchan->ctx != NULL &&
-          uchan->events != NULL &&
-          uchan->attr.overwrite  == attr.attr.overwrite,
-          "Validate UST channel");
-
-       trace_ust_destroy_channel(uchan);
-}
-
-static void test_create_ust_event(void)
-{
-       struct ltt_ust_event *event;
-       struct lttng_event ev;
-       enum lttng_error_code ret;
-
-       memset(&ev, 0, sizeof(ev));
-       ok(lttng_strncpy(ev.name, get_random_string(),
-                       LTTNG_SYMBOL_NAME_LEN) == 0,
-               "Validate string length");
-       ev.type = LTTNG_EVENT_TRACEPOINT;
-       ev.loglevel_type = LTTNG_EVENT_LOGLEVEL_ALL;
-
-       ret = trace_ust_create_event(&ev, NULL, NULL, NULL, false, &event);
-
-       ok(ret == LTTNG_OK, "Create UST event");
-
-       if (!event) {
-               skip(1, "UST event is null");
-               return;
-       }
-
-       ok(event->enabled == 0 &&
-          event->attr.instrumentation == LTTNG_UST_ABI_TRACEPOINT &&
-          strcmp(event->attr.name, ev.name) == 0 &&
-          event->attr.name[LTTNG_UST_ABI_SYM_NAME_LEN - 1] == '\0',
-          "Validate UST event");
-
-       trace_ust_destroy_event(event);
-}
-
-static void test_create_ust_event_exclusion(void)
-{
-       enum lttng_error_code ret;
-       struct ltt_ust_event *event;
-       struct lttng_event ev;
-       char *name;
-       char *random_name;
-       struct lttng_event_exclusion *exclusion = NULL;
-       struct lttng_event_exclusion *exclusion_copy = NULL;
-       const int exclusion_count = 2;
-
-       memset(&ev, 0, sizeof(ev));
-
-       /* make a wildcarded event name */
-       name = get_random_string();
-       name[strlen(name) - 1] = '*';
-       ok(lttng_strncpy(ev.name, name, LTTNG_SYMBOL_NAME_LEN) == 0,
-               "Validate string length");
-
-       ev.type = LTTNG_EVENT_TRACEPOINT;
-       ev.loglevel_type = LTTNG_EVENT_LOGLEVEL_ALL;
-
-       /* set up an exclusion set */
-       exclusion = zmalloc(sizeof(*exclusion) +
-               LTTNG_SYMBOL_NAME_LEN * exclusion_count);
-       ok(exclusion != NULL, "Create UST exclusion");
-       if (!exclusion) {
-               skip(4, "zmalloc failed");
-               goto end;
-       }
-
-       exclusion->count = exclusion_count;
-       random_name = get_random_string();
-       strncpy(LTTNG_EVENT_EXCLUSION_NAME_AT(exclusion, 0), random_name,
-               LTTNG_SYMBOL_NAME_LEN);
-       strncpy(LTTNG_EVENT_EXCLUSION_NAME_AT(exclusion, 1), random_name,
-               LTTNG_SYMBOL_NAME_LEN);
-
-       ret = trace_ust_create_event(&ev, NULL, NULL, exclusion, false, &event);
-       exclusion = NULL;
-
-       ok(ret != LTTNG_OK, "Create UST event with identical exclusion names fails");
-
-       exclusion = zmalloc(sizeof(*exclusion) +
-               LTTNG_SYMBOL_NAME_LEN * exclusion_count);
-       ok(exclusion != NULL, "Create UST exclusion");
-       if (!exclusion) {
-               skip(2, "zmalloc failed");
-               goto end;
-       }
-
-       exclusion_copy = zmalloc(sizeof(*exclusion) +
-               LTTNG_SYMBOL_NAME_LEN * exclusion_count);
-       if (!exclusion_copy) {
-               skip(2, "zmalloc failed");
-               goto end;
-       }
-
-       /*
-        * We are giving ownership of the exclusion struct to the
-        * trace_ust_create_event() function. Make a copy of the exclusion struct
-        * so we can compare it later.
-        */
-
-       exclusion->count = exclusion_count;
-       strncpy(LTTNG_EVENT_EXCLUSION_NAME_AT(exclusion, 0),
-               get_random_string(), LTTNG_SYMBOL_NAME_LEN);
-       strncpy(LTTNG_EVENT_EXCLUSION_NAME_AT(exclusion, 1),
-               get_random_string(), LTTNG_SYMBOL_NAME_LEN);
-
-       exclusion_copy->count = exclusion_count;
-       strncpy(LTTNG_EVENT_EXCLUSION_NAME_AT(exclusion_copy, 0),
-               LTTNG_EVENT_EXCLUSION_NAME_AT(exclusion, 0), LTTNG_SYMBOL_NAME_LEN);
-       strncpy(LTTNG_EVENT_EXCLUSION_NAME_AT(exclusion_copy, 1),
-               LTTNG_EVENT_EXCLUSION_NAME_AT(exclusion, 1), LTTNG_SYMBOL_NAME_LEN);
-
-       ret = trace_ust_create_event(&ev, NULL, NULL, exclusion, false, &event);
-       exclusion = NULL;
-       ok(ret == LTTNG_OK, "Create UST event with different exclusion names");
-
-       if (!event) {
-               skip(1, "UST event with exclusion is null");
-               goto end;
-       }
-
-       ok(event->enabled == 0 &&
-               event->attr.instrumentation == LTTNG_UST_ABI_TRACEPOINT &&
-               strcmp(event->attr.name, ev.name) == 0 &&
-               event->exclusion != NULL &&
-               event->exclusion->count == exclusion_count &&
-               !memcmp(event->exclusion->names, exclusion_copy->names,
-                       LTTNG_SYMBOL_NAME_LEN * exclusion_count) &&
-               event->attr.name[LTTNG_UST_ABI_SYM_NAME_LEN - 1] == '\0',
-               "Validate UST event and exclusion");
-
-       trace_ust_destroy_event(event);
-end:
-       free(exclusion);
-       free(exclusion_copy);
-       return;
-}
-
-
-static void test_create_ust_context(void)
-{
-       struct lttng_event_context ectx;
-       struct ltt_ust_context *uctx;
-
-       ectx.ctx = LTTNG_EVENT_CONTEXT_VTID;
-
-       uctx = trace_ust_create_context(&ectx);
-       ok(uctx != NULL, "Create UST context");
-
-       if (uctx) {
-               ok((int) uctx->ctx.ctx == LTTNG_UST_ABI_CONTEXT_VTID,
-                  "Validate UST context");
-       } else {
-               skip(1, "Skipping UST context validation as creation failed");
-       }
-       free(uctx);
-}
-
-int main(int argc, char **argv)
-{
-       plan_tests(NUM_TESTS);
-
-       diag("UST data structures unit test");
-
-       rcu_register_thread();
-
-       test_create_one_ust_session();
-       test_create_ust_channel();
-       test_create_ust_event();
-       test_create_ust_context();
-       test_create_ust_event_exclusion();
-
-       rcu_unregister_thread();
-
-       return exit_status();
-}
diff --git a/tests/unit/test_ust_data.cpp b/tests/unit/test_ust_data.cpp
new file mode 100644 (file)
index 0000000..a6dfba4
--- /dev/null
@@ -0,0 +1,283 @@
+/*
+ * Copyright (C) 2011 David Goulet <david.goulet@polymtl.ca>
+ *
+ * SPDX-License-Identifier: GPL-2.0-only
+ *
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <time.h>
+#include <urcu.h>
+
+#include <lttng/lttng.h>
+#include <bin/lttng-sessiond/lttng-ust-abi.h>
+#include <common/defaults.h>
+#include <common/compat/errno.h>
+#include <bin/lttng-sessiond/trace-ust.h>
+#include <bin/lttng-sessiond/ust-app.h>
+#include <bin/lttng-sessiond/notification-thread.h>
+
+#include <lttng/ust-sigbus.h>
+
+#include <tap/tap.h>
+
+/* This path will NEVER be created in this test */
+#define PATH1 "/tmp/.test-junk-lttng"
+
+#define RANDOM_STRING_LEN      11
+
+/* Number of TAP tests in this file */
+#define NUM_TESTS 16
+
+DEFINE_LTTNG_UST_SIGBUS_STATE();
+
+static const char alphanum[] =
+       "0123456789"
+       "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
+       "abcdefghijklmnopqrstuvwxyz";
+static char random_string[RANDOM_STRING_LEN];
+
+/*
+ * Return random string of 10 characters.
+ * Not thread-safe.
+ */
+static char *get_random_string(void)
+{
+       int i;
+
+       for (i = 0; i < RANDOM_STRING_LEN - 1; i++) {
+               random_string[i] = alphanum[rand() % (sizeof(alphanum) - 1)];
+       }
+
+       random_string[RANDOM_STRING_LEN - 1] = '\0';
+
+       return random_string;
+}
+
+static void test_create_one_ust_session(void)
+{
+       struct ltt_ust_session *usess =
+               trace_ust_create_session(42);
+
+       ok(usess != NULL, "Create UST session");
+
+       if (!usess) {
+               skip(1, "UST session is null");
+               return;
+       }
+
+       ok(usess->id == 42 &&
+          usess->active == 0 &&
+          usess->domain_global.channels != NULL &&
+          usess->uid == 0 &&
+          usess->gid == 0,
+          "Validate UST session");
+
+       trace_ust_destroy_session(usess);
+}
+
+static void test_create_ust_channel(void)
+{
+       struct ltt_ust_channel *uchan;
+       struct lttng_channel attr;
+       struct lttng_channel_extended extended;
+
+       memset(&attr, 0, sizeof(attr));
+       memset(&extended, 0, sizeof(extended));
+       attr.attr.extended.ptr = &extended;
+
+       ok(lttng_strncpy(attr.name, "channel0", sizeof(attr.name)) == 0,
+               "Validate channel name length");
+       uchan = trace_ust_create_channel(&attr, LTTNG_DOMAIN_UST);
+       ok(uchan != NULL, "Create UST channel");
+
+       if (!uchan) {
+               skip(1, "UST channel is null");
+               return;
+       }
+
+       ok(uchan->enabled == 0 &&
+          strncmp(uchan->name, "channel0", 8) == 0 &&
+          uchan->name[LTTNG_UST_ABI_SYM_NAME_LEN - 1] == '\0' &&
+          uchan->ctx != NULL &&
+          uchan->events != NULL &&
+          uchan->attr.overwrite  == attr.attr.overwrite,
+          "Validate UST channel");
+
+       trace_ust_destroy_channel(uchan);
+}
+
+static void test_create_ust_event(void)
+{
+       struct ltt_ust_event *event;
+       struct lttng_event ev;
+       enum lttng_error_code ret;
+
+       memset(&ev, 0, sizeof(ev));
+       ok(lttng_strncpy(ev.name, get_random_string(),
+                       LTTNG_SYMBOL_NAME_LEN) == 0,
+               "Validate string length");
+       ev.type = LTTNG_EVENT_TRACEPOINT;
+       ev.loglevel_type = LTTNG_EVENT_LOGLEVEL_ALL;
+
+       ret = trace_ust_create_event(&ev, NULL, NULL, NULL, false, &event);
+
+       ok(ret == LTTNG_OK, "Create UST event");
+
+       if (!event) {
+               skip(1, "UST event is null");
+               return;
+       }
+
+       ok(event->enabled == 0 &&
+          event->attr.instrumentation == LTTNG_UST_ABI_TRACEPOINT &&
+          strcmp(event->attr.name, ev.name) == 0 &&
+          event->attr.name[LTTNG_UST_ABI_SYM_NAME_LEN - 1] == '\0',
+          "Validate UST event");
+
+       trace_ust_destroy_event(event);
+}
+
+static void test_create_ust_event_exclusion(void)
+{
+       enum lttng_error_code ret;
+       struct ltt_ust_event *event;
+       struct lttng_event ev;
+       char *name;
+       char *random_name;
+       struct lttng_event_exclusion *exclusion = NULL;
+       struct lttng_event_exclusion *exclusion_copy = NULL;
+       const int exclusion_count = 2;
+
+       memset(&ev, 0, sizeof(ev));
+
+       /* make a wildcarded event name */
+       name = get_random_string();
+       name[strlen(name) - 1] = '*';
+       ok(lttng_strncpy(ev.name, name, LTTNG_SYMBOL_NAME_LEN) == 0,
+               "Validate string length");
+
+       ev.type = LTTNG_EVENT_TRACEPOINT;
+       ev.loglevel_type = LTTNG_EVENT_LOGLEVEL_ALL;
+
+       /* set up an exclusion set */
+       exclusion = (lttng_event_exclusion *) zmalloc(sizeof(*exclusion) +
+               LTTNG_SYMBOL_NAME_LEN * exclusion_count);
+       ok(exclusion != NULL, "Create UST exclusion");
+       if (!exclusion) {
+               skip(4, "zmalloc failed");
+               goto end;
+       }
+
+       exclusion->count = exclusion_count;
+       random_name = get_random_string();
+       strncpy(LTTNG_EVENT_EXCLUSION_NAME_AT(exclusion, 0), random_name,
+               LTTNG_SYMBOL_NAME_LEN);
+       strncpy(LTTNG_EVENT_EXCLUSION_NAME_AT(exclusion, 1), random_name,
+               LTTNG_SYMBOL_NAME_LEN);
+
+       ret = trace_ust_create_event(&ev, NULL, NULL, exclusion, false, &event);
+       exclusion = NULL;
+
+       ok(ret != LTTNG_OK, "Create UST event with identical exclusion names fails");
+
+       exclusion = (lttng_event_exclusion *) zmalloc(sizeof(*exclusion) +
+               LTTNG_SYMBOL_NAME_LEN * exclusion_count);
+       ok(exclusion != NULL, "Create UST exclusion");
+       if (!exclusion) {
+               skip(2, "zmalloc failed");
+               goto end;
+       }
+
+       exclusion_copy = (lttng_event_exclusion *) zmalloc(sizeof(*exclusion) +
+               LTTNG_SYMBOL_NAME_LEN * exclusion_count);
+       if (!exclusion_copy) {
+               skip(2, "zmalloc failed");
+               goto end;
+       }
+
+       /*
+        * We are giving ownership of the exclusion struct to the
+        * trace_ust_create_event() function. Make a copy of the exclusion struct
+        * so we can compare it later.
+        */
+
+       exclusion->count = exclusion_count;
+       strncpy(LTTNG_EVENT_EXCLUSION_NAME_AT(exclusion, 0),
+               get_random_string(), LTTNG_SYMBOL_NAME_LEN);
+       strncpy(LTTNG_EVENT_EXCLUSION_NAME_AT(exclusion, 1),
+               get_random_string(), LTTNG_SYMBOL_NAME_LEN);
+
+       exclusion_copy->count = exclusion_count;
+       strncpy(LTTNG_EVENT_EXCLUSION_NAME_AT(exclusion_copy, 0),
+               LTTNG_EVENT_EXCLUSION_NAME_AT(exclusion, 0), LTTNG_SYMBOL_NAME_LEN);
+       strncpy(LTTNG_EVENT_EXCLUSION_NAME_AT(exclusion_copy, 1),
+               LTTNG_EVENT_EXCLUSION_NAME_AT(exclusion, 1), LTTNG_SYMBOL_NAME_LEN);
+
+       ret = trace_ust_create_event(&ev, NULL, NULL, exclusion, false, &event);
+       exclusion = NULL;
+       ok(ret == LTTNG_OK, "Create UST event with different exclusion names");
+
+       if (!event) {
+               skip(1, "UST event with exclusion is null");
+               goto end;
+       }
+
+       ok(event->enabled == 0 &&
+               event->attr.instrumentation == LTTNG_UST_ABI_TRACEPOINT &&
+               strcmp(event->attr.name, ev.name) == 0 &&
+               event->exclusion != NULL &&
+               event->exclusion->count == exclusion_count &&
+               !memcmp(event->exclusion->names, exclusion_copy->names,
+                       LTTNG_SYMBOL_NAME_LEN * exclusion_count) &&
+               event->attr.name[LTTNG_UST_ABI_SYM_NAME_LEN - 1] == '\0',
+               "Validate UST event and exclusion");
+
+       trace_ust_destroy_event(event);
+end:
+       free(exclusion);
+       free(exclusion_copy);
+       return;
+}
+
+
+static void test_create_ust_context(void)
+{
+       struct lttng_event_context ectx;
+       struct ltt_ust_context *uctx;
+
+       ectx.ctx = LTTNG_EVENT_CONTEXT_VTID;
+
+       uctx = trace_ust_create_context(&ectx);
+       ok(uctx != NULL, "Create UST context");
+
+       if (uctx) {
+               ok((int) uctx->ctx.ctx == LTTNG_UST_ABI_CONTEXT_VTID,
+                  "Validate UST context");
+       } else {
+               skip(1, "Skipping UST context validation as creation failed");
+       }
+       free(uctx);
+}
+
+int main(int argc, char **argv)
+{
+       plan_tests(NUM_TESTS);
+
+       diag("UST data structures unit test");
+
+       rcu_register_thread();
+
+       test_create_one_ust_session();
+       test_create_ust_channel();
+       test_create_ust_event();
+       test_create_ust_context();
+       test_create_ust_event_exclusion();
+
+       rcu_unregister_thread();
+
+       return exit_status();
+}
index c15909d897d7e7285a67cfd443be50dd502a60be..84288f03848831f8f148c4612d9cd73608af8c75 100644 (file)
  * SUCH DAMAGE.
  */
 
+#ifdef __cplusplus
+extern "C" {
+#endif
+
 /* '## __VA_ARGS__' is a gcc'ism. C99 doesn't allow the token pasting
    and requires the caller to add the final comma if they've ommitted
    the optional arguments */
@@ -89,3 +93,7 @@ void todo_start(char *, ...);
 void todo_end(void);
 
 int exit_status(void);
+
+#ifdef __cplusplus
+}
+#endif
This page took 1.214827 seconds and 4 git commands to generate.