2 * Copyright (C) 2012 David Goulet <dgoulet@efficios.com>
3 * Copyright (C) 2016 Jérémie Galarneau <jeremie.galarneau@efficios.com>
5 * SPDX-License-Identifier: GPL-2.0-only
10 #include "agent-thread.hpp"
12 #include "buffer-registry.hpp"
13 #include "channel.hpp"
15 #include "consumer-output.hpp"
16 #include "consumer.hpp"
17 #include "event-notifier-error-accounting.hpp"
19 #include "health-sessiond.hpp"
20 #include "kernel-consumer.hpp"
22 #include "lttng-sessiond.hpp"
23 #include "lttng-syscall.hpp"
24 #include "notification-thread-commands.hpp"
25 #include "notification-thread.hpp"
26 #include "rotation-thread.hpp"
27 #include "session.hpp"
29 #include "tracker.hpp"
32 #include <common/buffer-view.hpp>
33 #include <common/common.hpp>
34 #include <common/compat/string.hpp>
35 #include <common/defaults.hpp>
36 #include <common/dynamic-buffer.hpp>
37 #include <common/kernel-ctl/kernel-ctl.hpp>
38 #include <common/payload-view.hpp>
39 #include <common/payload.hpp>
40 #include <common/relayd/relayd.hpp>
41 #include <common/sessiond-comm/sessiond-comm.hpp>
42 #include <common/string-utils/string-utils.hpp>
43 #include <common/trace-chunk.hpp>
44 #include <common/urcu.hpp>
45 #include <common/utils.hpp>
47 #include <lttng/action/action-internal.hpp>
48 #include <lttng/action/action.h>
49 #include <lttng/channel-internal.hpp>
50 #include <lttng/channel.h>
51 #include <lttng/condition/condition-internal.hpp>
52 #include <lttng/condition/condition.h>
53 #include <lttng/condition/event-rule-matches-internal.hpp>
54 #include <lttng/condition/event-rule-matches.h>
55 #include <lttng/error-query-internal.hpp>
56 #include <lttng/event-internal.hpp>
57 #include <lttng/event-rule/event-rule-internal.hpp>
58 #include <lttng/event-rule/event-rule.h>
59 #include <lttng/kernel.h>
60 #include <lttng/location-internal.hpp>
61 #include <lttng/lttng-error.h>
62 #include <lttng/rotate-internal.hpp>
63 #include <lttng/session-descriptor-internal.hpp>
64 #include <lttng/session-internal.hpp>
65 #include <lttng/tracker.h>
66 #include <lttng/trigger/trigger-internal.hpp>
67 #include <lttng/userspace-probe-internal.hpp>
73 #include <urcu/list.h>
74 #include <urcu/uatomic.h>
76 /* Sleep for 100ms between each check for the shm path's deletion. */
77 #define SESSION_DESTROY_SHM_PATH_CHECK_DELAY_US 100000
79 namespace lsu
= lttng::sessiond::ust
;
81 static enum lttng_error_code
wait_on_path(void *path
);
84 struct cmd_destroy_session_reply_context
{
86 bool implicit_rotation_on_destroy
;
88 * Indicates whether or not an error occurred while launching the
89 * destruction of a session.
91 enum lttng_error_code destruction_status
;
95 * Command completion handler that is used by the destroy command
96 * when a session that has a non-default shm_path is being destroyed.
98 * See comment in cmd_destroy_session() for the rationale.
100 struct destroy_completion_handler
{
101 struct cmd_completion_handler handler
;
102 char shm_path
[member_sizeof(struct ltt_session
, shm_path
)];
103 } destroy_completion_handler
= {
104 .handler
= { .run
= wait_on_path
, .data
= destroy_completion_handler
.shm_path
},
109 * Used to keep a unique index for each relayd socket created where this value
110 * is associated with streams on the consumer so it can match the right relayd
111 * to send to. It must be accessed with the relayd_net_seq_idx_lock
114 pthread_mutex_t relayd_net_seq_idx_lock
= PTHREAD_MUTEX_INITIALIZER
;
115 uint64_t relayd_net_seq_idx
;
118 static struct cmd_completion_handler
*current_completion_handler
;
119 static int validate_ust_event_name(const char *);
120 static int cmd_enable_event_internal(ltt_session::locked_ref
& session
,
121 const struct lttng_domain
*domain
,
123 struct lttng_event
*event
,
124 char *filter_expression
,
125 struct lttng_bytecode
*filter
,
126 struct lttng_event_exclusion
*exclusion
,
128 static enum lttng_error_code
cmd_enable_channel_internal(ltt_session::locked_ref
& session
,
129 const struct lttng_domain
*domain
,
130 const struct lttng_channel
*_attr
,
134 * Create a session path used by list_lttng_sessions for the case that the
135 * session consumer is on the network.
138 build_network_session_path(char *dst
, size_t size
, const ltt_session::locked_ref
& session
)
140 int ret
, kdata_port
, udata_port
;
141 struct lttng_uri
*kuri
= nullptr, *uuri
= nullptr, *uri
= nullptr;
142 char tmp_uurl
[PATH_MAX
], tmp_urls
[PATH_MAX
];
146 memset(tmp_urls
, 0, sizeof(tmp_urls
));
147 memset(tmp_uurl
, 0, sizeof(tmp_uurl
));
149 kdata_port
= udata_port
= DEFAULT_NETWORK_DATA_PORT
;
151 if (session
->kernel_session
&& session
->kernel_session
->consumer
) {
152 kuri
= &session
->kernel_session
->consumer
->dst
.net
.control
;
153 kdata_port
= session
->kernel_session
->consumer
->dst
.net
.data
.port
;
156 if (session
->ust_session
&& session
->ust_session
->consumer
) {
157 uuri
= &session
->ust_session
->consumer
->dst
.net
.control
;
158 udata_port
= session
->ust_session
->consumer
->dst
.net
.data
.port
;
161 if (uuri
== nullptr && kuri
== nullptr) {
162 uri
= &session
->consumer
->dst
.net
.control
;
163 kdata_port
= session
->consumer
->dst
.net
.data
.port
;
164 } else if (kuri
&& uuri
) {
165 ret
= uri_compare(kuri
, uuri
);
169 /* Build uuri URL string */
170 ret
= uri_to_str_url(uuri
, tmp_uurl
, sizeof(tmp_uurl
));
177 } else if (kuri
&& uuri
== nullptr) {
179 } else if (uuri
&& kuri
== nullptr) {
183 ret
= uri_to_str_url(uri
, tmp_urls
, sizeof(tmp_urls
));
189 * Do we have a UST url set. If yes, this means we have both kernel and UST
192 if (*tmp_uurl
!= '\0') {
195 "[K]: %s [data: %d] -- [U]: %s [data: %d]",
202 if (kuri
|| (!kuri
&& !uuri
)) {
205 /* No kernel URI, use the UST port. */
208 ret
= snprintf(dst
, size
, "%s [data: %d]", tmp_urls
, dport
);
216 * Get run-time attributes if the session has been started (discarded events,
219 static int get_kernel_runtime_stats(const ltt_session::locked_ref
& session
,
220 struct ltt_kernel_channel
*kchan
,
221 uint64_t *discarded_events
,
222 uint64_t *lost_packets
)
226 if (!session
->has_been_started
) {
228 *discarded_events
= 0;
233 ret
= consumer_get_discarded_events(
234 session
->id
, kchan
->key
, session
->kernel_session
->consumer
, discarded_events
);
239 ret
= consumer_get_lost_packets(
240 session
->id
, kchan
->key
, session
->kernel_session
->consumer
, lost_packets
);
250 * Get run-time attributes if the session has been started (discarded events,
253 static int get_ust_runtime_stats(const ltt_session::locked_ref
& session
,
254 struct ltt_ust_channel
*uchan
,
255 uint64_t *discarded_events
,
256 uint64_t *lost_packets
)
259 struct ltt_ust_session
*usess
;
261 if (!discarded_events
|| !lost_packets
) {
266 usess
= session
->ust_session
;
267 LTTNG_ASSERT(discarded_events
);
268 LTTNG_ASSERT(lost_packets
);
270 if (!usess
|| !session
->has_been_started
) {
271 *discarded_events
= 0;
277 if (usess
->buffer_type
== LTTNG_BUFFER_PER_UID
) {
278 ret
= ust_app_uid_get_channel_runtime_stats(usess
->id
,
279 &usess
->buffer_reg_uid_list
,
282 uchan
->attr
.overwrite
,
285 } else if (usess
->buffer_type
== LTTNG_BUFFER_PER_PID
) {
286 ret
= ust_app_pid_get_channel_runtime_stats(usess
,
289 uchan
->attr
.overwrite
,
295 *discarded_events
+= uchan
->per_pid_closed_app_discarded
;
296 *lost_packets
+= uchan
->per_pid_closed_app_lost
;
298 ERR("Unsupported buffer type");
309 * Create a list of agent domain events.
311 * Return number of events in list on success or else a negative value.
313 static enum lttng_error_code
list_lttng_agent_events(struct agent
*agt
,
314 struct lttng_payload
*reply_payload
,
315 unsigned int *nb_events
)
317 enum lttng_error_code ret_code
;
319 unsigned int local_nb_events
= 0;
320 struct agent_event
*event
;
321 struct lttng_ht_iter iter
;
322 unsigned long agent_event_count
;
325 assert(reply_payload
);
327 DBG3("Listing agent events");
329 agent_event_count
= lttng_ht_get_count(agt
->events
);
330 if (agent_event_count
== 0) {
335 if (agent_event_count
> UINT_MAX
) {
336 ret_code
= LTTNG_ERR_OVERFLOW
;
340 local_nb_events
= (unsigned int) agent_event_count
;
343 const lttng::urcu::read_lock_guard read_lock
;
345 cds_lfht_for_each_entry (agt
->events
->ht
, &iter
.iter
, event
, node
.node
) {
346 struct lttng_event
*tmp_event
= lttng_event_create();
349 ret_code
= LTTNG_ERR_NOMEM
;
353 if (lttng_strncpy(tmp_event
->name
, event
->name
, sizeof(tmp_event
->name
))) {
354 lttng_event_destroy(tmp_event
);
355 ret_code
= LTTNG_ERR_FATAL
;
359 tmp_event
->name
[sizeof(tmp_event
->name
) - 1] = '\0';
360 tmp_event
->enabled
= !!event
->enabled_count
;
361 tmp_event
->loglevel
= event
->loglevel_value
;
362 tmp_event
->loglevel_type
= event
->loglevel_type
;
364 ret
= lttng_event_serialize(tmp_event
,
367 event
->filter_expression
,
371 lttng_event_destroy(tmp_event
);
373 ret_code
= LTTNG_ERR_FATAL
;
380 *nb_events
= local_nb_events
;
386 * Create a list of ust global domain events.
388 static enum lttng_error_code
list_lttng_ust_global_events(char *channel_name
,
389 struct ltt_ust_domain_global
*ust_global
,
390 struct lttng_payload
*reply_payload
,
391 unsigned int *nb_events
)
393 enum lttng_error_code ret_code
;
395 struct lttng_ht_iter iter
;
396 struct lttng_ht_node_str
*node
;
397 struct ltt_ust_channel
*uchan
;
398 struct ltt_ust_event
*uevent
;
399 unsigned long channel_event_count
;
400 unsigned int local_nb_events
= 0;
402 assert(reply_payload
);
405 DBG("Listing UST global events for channel %s", channel_name
);
407 const lttng::urcu::read_lock_guard read_lock
;
409 lttng_ht_lookup(ust_global
->channels
, (void *) channel_name
, &iter
);
410 node
= lttng_ht_iter_get_node
<lttng_ht_node_str
>(&iter
);
411 if (node
== nullptr) {
412 ret_code
= LTTNG_ERR_UST_CHAN_NOT_FOUND
;
416 uchan
= caa_container_of(&node
->node
, struct ltt_ust_channel
, node
.node
);
418 channel_event_count
= lttng_ht_get_count(uchan
->events
);
419 if (channel_event_count
== 0) {
425 if (channel_event_count
> UINT_MAX
) {
426 ret_code
= LTTNG_ERR_OVERFLOW
;
430 local_nb_events
= (unsigned int) channel_event_count
;
432 DBG3("Listing UST global %d events", *nb_events
);
434 cds_lfht_for_each_entry (uchan
->events
->ht
, &iter
.iter
, uevent
, node
.node
) {
435 struct lttng_event
*tmp_event
= nullptr;
437 if (uevent
->internal
) {
438 /* This event should remain hidden from clients */
443 tmp_event
= lttng_event_create();
445 ret_code
= LTTNG_ERR_NOMEM
;
449 if (lttng_strncpy(tmp_event
->name
, uevent
->attr
.name
, LTTNG_SYMBOL_NAME_LEN
)) {
450 ret_code
= LTTNG_ERR_FATAL
;
451 lttng_event_destroy(tmp_event
);
455 tmp_event
->name
[LTTNG_SYMBOL_NAME_LEN
- 1] = '\0';
456 tmp_event
->enabled
= uevent
->enabled
;
458 switch (uevent
->attr
.instrumentation
) {
459 case LTTNG_UST_ABI_TRACEPOINT
:
460 tmp_event
->type
= LTTNG_EVENT_TRACEPOINT
;
462 case LTTNG_UST_ABI_PROBE
:
463 tmp_event
->type
= LTTNG_EVENT_PROBE
;
465 case LTTNG_UST_ABI_FUNCTION
:
466 tmp_event
->type
= LTTNG_EVENT_FUNCTION
;
470 tmp_event
->loglevel
= uevent
->attr
.loglevel
;
471 switch (uevent
->attr
.loglevel_type
) {
472 case LTTNG_UST_ABI_LOGLEVEL_ALL
:
473 tmp_event
->loglevel_type
= LTTNG_EVENT_LOGLEVEL_ALL
;
475 case LTTNG_UST_ABI_LOGLEVEL_RANGE
:
476 tmp_event
->loglevel_type
= LTTNG_EVENT_LOGLEVEL_RANGE
;
478 case LTTNG_UST_ABI_LOGLEVEL_SINGLE
:
479 tmp_event
->loglevel_type
= LTTNG_EVENT_LOGLEVEL_SINGLE
;
482 if (uevent
->filter
) {
483 tmp_event
->filter
= 1;
485 if (uevent
->exclusion
) {
486 tmp_event
->exclusion
= 1;
489 std::vector
<const char *> exclusion_names
;
490 if (uevent
->exclusion
) {
491 for (int i
= 0; i
< uevent
->exclusion
->count
; i
++) {
492 exclusion_names
.emplace_back(
493 LTTNG_EVENT_EXCLUSION_NAME_AT(uevent
->exclusion
, i
));
498 * We do not care about the filter bytecode and the fd from the
499 * userspace_probe_location.
501 ret
= lttng_event_serialize(tmp_event
,
502 exclusion_names
.size(),
503 exclusion_names
.size() ? exclusion_names
.data() :
505 uevent
->filter_expression
,
509 lttng_event_destroy(tmp_event
);
511 ret_code
= LTTNG_ERR_FATAL
;
517 /* nb_events is already set at this point. */
519 *nb_events
= local_nb_events
;
525 * Fill lttng_event array of all kernel events in the channel.
527 static enum lttng_error_code
list_lttng_kernel_events(char *channel_name
,
528 struct ltt_kernel_session
*kernel_session
,
529 struct lttng_payload
*reply_payload
,
530 unsigned int *nb_events
)
532 enum lttng_error_code ret_code
;
534 struct ltt_kernel_event
*event
;
535 struct ltt_kernel_channel
*kchan
;
537 assert(reply_payload
);
539 kchan
= trace_kernel_get_channel_by_name(channel_name
, kernel_session
);
540 if (kchan
== nullptr) {
541 ret_code
= LTTNG_ERR_KERN_CHAN_NOT_FOUND
;
545 *nb_events
= kchan
->event_count
;
547 DBG("Listing events for channel %s", kchan
->channel
->name
);
549 if (*nb_events
== 0) {
554 /* Kernel channels */
555 cds_list_for_each_entry (event
, &kchan
->events_list
.head
, list
) {
556 struct lttng_event
*tmp_event
= lttng_event_create();
559 ret_code
= LTTNG_ERR_NOMEM
;
563 if (lttng_strncpy(tmp_event
->name
, event
->event
->name
, LTTNG_SYMBOL_NAME_LEN
)) {
564 lttng_event_destroy(tmp_event
);
565 ret_code
= LTTNG_ERR_FATAL
;
569 tmp_event
->name
[LTTNG_SYMBOL_NAME_LEN
- 1] = '\0';
570 tmp_event
->enabled
= event
->enabled
;
571 tmp_event
->filter
= (unsigned char) !!event
->filter_expression
;
573 switch (event
->event
->instrumentation
) {
574 case LTTNG_KERNEL_ABI_TRACEPOINT
:
575 tmp_event
->type
= LTTNG_EVENT_TRACEPOINT
;
577 case LTTNG_KERNEL_ABI_KRETPROBE
:
578 tmp_event
->type
= LTTNG_EVENT_FUNCTION
;
579 memcpy(&tmp_event
->attr
.probe
,
580 &event
->event
->u
.kprobe
,
581 sizeof(struct lttng_kernel_abi_kprobe
));
583 case LTTNG_KERNEL_ABI_KPROBE
:
584 tmp_event
->type
= LTTNG_EVENT_PROBE
;
585 memcpy(&tmp_event
->attr
.probe
,
586 &event
->event
->u
.kprobe
,
587 sizeof(struct lttng_kernel_abi_kprobe
));
589 case LTTNG_KERNEL_ABI_UPROBE
:
590 tmp_event
->type
= LTTNG_EVENT_USERSPACE_PROBE
;
592 case LTTNG_KERNEL_ABI_FUNCTION
:
593 tmp_event
->type
= LTTNG_EVENT_FUNCTION
;
594 memcpy(&(tmp_event
->attr
.ftrace
),
595 &event
->event
->u
.ftrace
,
596 sizeof(struct lttng_kernel_abi_function
));
598 case LTTNG_KERNEL_ABI_NOOP
:
599 tmp_event
->type
= LTTNG_EVENT_NOOP
;
601 case LTTNG_KERNEL_ABI_SYSCALL
:
602 tmp_event
->type
= LTTNG_EVENT_SYSCALL
;
604 case LTTNG_KERNEL_ABI_ALL
:
611 if (event
->userspace_probe_location
) {
612 struct lttng_userspace_probe_location
*location_copy
=
613 lttng_userspace_probe_location_copy(
614 event
->userspace_probe_location
);
616 if (!location_copy
) {
617 lttng_event_destroy(tmp_event
);
618 ret_code
= LTTNG_ERR_NOMEM
;
622 ret
= lttng_event_set_userspace_probe_location(tmp_event
, location_copy
);
624 lttng_event_destroy(tmp_event
);
625 lttng_userspace_probe_location_destroy(location_copy
);
626 ret_code
= LTTNG_ERR_INVALID
;
631 ret
= lttng_event_serialize(
632 tmp_event
, 0, nullptr, event
->filter_expression
, 0, nullptr, reply_payload
);
633 lttng_event_destroy(tmp_event
);
635 ret_code
= LTTNG_ERR_FATAL
;
646 * Add URI so the consumer output object. Set the correct path depending on the
647 * domain adding the default trace directory.
649 static enum lttng_error_code
add_uri_to_consumer(const ltt_session::locked_ref
& session
,
650 struct consumer_output
*consumer
,
651 struct lttng_uri
*uri
,
652 enum lttng_domain_type domain
)
655 enum lttng_error_code ret_code
= LTTNG_OK
;
659 if (consumer
== nullptr) {
660 DBG("No consumer detected. Don't add URI. Stopping.");
661 ret_code
= LTTNG_ERR_NO_CONSUMER
;
666 case LTTNG_DOMAIN_KERNEL
:
667 ret
= lttng_strncpy(consumer
->domain_subdir
,
668 DEFAULT_KERNEL_TRACE_DIR
,
669 sizeof(consumer
->domain_subdir
));
671 case LTTNG_DOMAIN_UST
:
672 ret
= lttng_strncpy(consumer
->domain_subdir
,
673 DEFAULT_UST_TRACE_DIR
,
674 sizeof(consumer
->domain_subdir
));
678 * This case is possible is we try to add the URI to the global
679 * tracing session consumer object which in this case there is
682 memset(consumer
->domain_subdir
, 0, sizeof(consumer
->domain_subdir
));
686 ERR("Failed to initialize consumer output domain subdirectory");
687 ret_code
= LTTNG_ERR_FATAL
;
691 switch (uri
->dtype
) {
694 DBG2("Setting network URI to consumer");
696 if (consumer
->type
== CONSUMER_DST_NET
) {
697 if ((uri
->stype
== LTTNG_STREAM_CONTROL
&&
698 consumer
->dst
.net
.control_isset
) ||
699 (uri
->stype
== LTTNG_STREAM_DATA
&& consumer
->dst
.net
.data_isset
)) {
700 ret_code
= LTTNG_ERR_URL_EXIST
;
704 memset(&consumer
->dst
, 0, sizeof(consumer
->dst
));
707 /* Set URI into consumer output object */
708 ret
= consumer_set_network_uri(session
, consumer
, uri
);
710 ret_code
= (lttng_error_code
) -ret
;
712 } else if (ret
== 1) {
714 * URI was the same in the consumer so we do not append the subdir
715 * again so to not duplicate output dir.
722 if (*uri
->dst
.path
!= '/' || strstr(uri
->dst
.path
, "../")) {
723 ret_code
= LTTNG_ERR_INVALID
;
726 DBG2("Setting trace directory path from URI to %s", uri
->dst
.path
);
727 memset(&consumer
->dst
, 0, sizeof(consumer
->dst
));
729 ret
= lttng_strncpy(consumer
->dst
.session_root_path
,
731 sizeof(consumer
->dst
.session_root_path
));
733 ret_code
= LTTNG_ERR_FATAL
;
736 consumer
->type
= CONSUMER_DST_LOCAL
;
746 * Init tracing by creating trace directory and sending fds kernel consumer.
748 static int init_kernel_tracing(struct ltt_kernel_session
*session
)
751 struct lttng_ht_iter iter
;
752 struct consumer_socket
*socket
;
754 LTTNG_ASSERT(session
);
756 if (session
->consumer_fds_sent
== 0 && session
->consumer
!= nullptr) {
757 const lttng::urcu::read_lock_guard read_lock
;
759 cds_lfht_for_each_entry (
760 session
->consumer
->socks
->ht
, &iter
.iter
, socket
, node
.node
) {
761 pthread_mutex_lock(socket
->lock
);
762 ret
= kernel_consumer_send_session(socket
, session
);
763 pthread_mutex_unlock(socket
->lock
);
765 ret
= LTTNG_ERR_KERN_CONSUMER_FAIL
;
776 * Create a socket to the relayd using the URI.
778 * On success, the relayd_sock pointer is set to the created socket.
779 * Else, it remains untouched and an LTTng error code is returned.
781 static enum lttng_error_code
create_connect_relayd(struct lttng_uri
*uri
,
782 struct lttcomm_relayd_sock
**relayd_sock
,
783 struct consumer_output
*consumer
)
786 enum lttng_error_code status
= LTTNG_OK
;
787 struct lttcomm_relayd_sock
*rsock
;
789 rsock
= lttcomm_alloc_relayd_sock(
790 uri
, RELAYD_VERSION_COMM_MAJOR
, RELAYD_VERSION_COMM_MINOR
);
792 status
= LTTNG_ERR_FATAL
;
797 * Connect to relayd so we can proceed with a session creation. This call
798 * can possibly block for an arbitrary amount of time to set the health
799 * state to be in poll execution.
802 ret
= relayd_connect(rsock
);
805 ERR("Unable to reach lttng-relayd");
806 status
= LTTNG_ERR_RELAYD_CONNECT_FAIL
;
810 /* Create socket for control stream. */
811 if (uri
->stype
== LTTNG_STREAM_CONTROL
) {
812 uint64_t result_flags
;
814 DBG3("Creating relayd stream socket from URI");
816 /* Check relayd version */
817 ret
= relayd_version_check(rsock
);
818 if (ret
== LTTNG_ERR_RELAYD_VERSION_FAIL
) {
819 status
= LTTNG_ERR_RELAYD_VERSION_FAIL
;
821 } else if (ret
< 0) {
822 ERR("Unable to reach lttng-relayd");
823 status
= LTTNG_ERR_RELAYD_CONNECT_FAIL
;
826 consumer
->relay_major_version
= rsock
->major
;
827 consumer
->relay_minor_version
= rsock
->minor
;
828 ret
= relayd_get_configuration(rsock
, 0, &result_flags
);
830 ERR("Unable to get relayd configuration");
831 status
= LTTNG_ERR_RELAYD_CONNECT_FAIL
;
834 if (result_flags
& LTTCOMM_RELAYD_CONFIGURATION_FLAG_CLEAR_ALLOWED
) {
835 consumer
->relay_allows_clear
= true;
837 } else if (uri
->stype
== LTTNG_STREAM_DATA
) {
838 DBG3("Creating relayd data socket from URI");
840 /* Command is not valid */
841 ERR("Relayd invalid stream type: %d", uri
->stype
);
842 status
= LTTNG_ERR_INVALID
;
846 *relayd_sock
= rsock
;
851 /* The returned value is not useful since we are on an error path. */
852 (void) relayd_close(rsock
);
860 * Connect to the relayd using URI and send the socket to the right consumer.
862 * The consumer socket lock must be held by the caller.
864 * Returns LTTNG_OK on success or an LTTng error code on failure.
866 static enum lttng_error_code
send_consumer_relayd_socket(unsigned int session_id
,
867 struct lttng_uri
*relayd_uri
,
868 struct consumer_output
*consumer
,
869 struct consumer_socket
*consumer_sock
,
870 const char *session_name
,
871 const char *hostname
,
872 const char *base_path
,
873 int session_live_timer
,
874 const uint64_t *current_chunk_id
,
875 time_t session_creation_time
,
876 bool session_name_contains_creation_time
)
879 struct lttcomm_relayd_sock
*rsock
= nullptr;
880 enum lttng_error_code status
;
882 /* Connect to relayd and make version check if uri is the control. */
883 status
= create_connect_relayd(relayd_uri
, &rsock
, consumer
);
884 if (status
!= LTTNG_OK
) {
885 goto relayd_comm_error
;
889 /* Set the network sequence index if not set. */
890 if (consumer
->net_seq_index
== (uint64_t) -1ULL) {
891 pthread_mutex_lock(&relayd_net_seq_idx_lock
);
893 * Increment net_seq_idx because we are about to transfer the
894 * new relayd socket to the consumer.
895 * Assign unique key so the consumer can match streams.
897 consumer
->net_seq_index
= ++relayd_net_seq_idx
;
898 pthread_mutex_unlock(&relayd_net_seq_idx_lock
);
901 /* Send relayd socket to consumer. */
902 ret
= consumer_send_relayd_socket(consumer_sock
,
912 session_creation_time
,
913 session_name_contains_creation_time
);
915 status
= LTTNG_ERR_ENABLE_CONSUMER_FAIL
;
919 /* Flag that the corresponding socket was sent. */
920 if (relayd_uri
->stype
== LTTNG_STREAM_CONTROL
) {
921 consumer_sock
->control_sock_sent
= 1;
922 } else if (relayd_uri
->stype
== LTTNG_STREAM_DATA
) {
923 consumer_sock
->data_sock_sent
= 1;
927 * Close socket which was dup on the consumer side. The session daemon does
928 * NOT keep track of the relayd socket(s) once transfer to the consumer.
932 if (status
!= LTTNG_OK
) {
934 * The consumer output for this session should not be used anymore
935 * since the relayd connection failed thus making any tracing or/and
936 * streaming not usable.
938 consumer
->enabled
= false;
940 (void) relayd_close(rsock
);
948 * Send both relayd sockets to a specific consumer and domain. This is a
949 * helper function to facilitate sending the information to the consumer for a
952 * The consumer socket lock must be held by the caller.
954 * Returns LTTNG_OK, or an LTTng error code on failure.
956 static enum lttng_error_code
send_consumer_relayd_sockets(unsigned int session_id
,
957 struct consumer_output
*consumer
,
958 struct consumer_socket
*sock
,
959 const char *session_name
,
960 const char *hostname
,
961 const char *base_path
,
962 int session_live_timer
,
963 const uint64_t *current_chunk_id
,
964 time_t session_creation_time
,
965 bool session_name_contains_creation_time
)
967 enum lttng_error_code status
= LTTNG_OK
;
969 LTTNG_ASSERT(consumer
);
972 /* Sending control relayd socket. */
973 if (!sock
->control_sock_sent
) {
974 status
= send_consumer_relayd_socket(session_id
,
975 &consumer
->dst
.net
.control
,
983 session_creation_time
,
984 session_name_contains_creation_time
);
985 if (status
!= LTTNG_OK
) {
990 /* Sending data relayd socket. */
991 if (!sock
->data_sock_sent
) {
992 status
= send_consumer_relayd_socket(session_id
,
993 &consumer
->dst
.net
.data
,
1001 session_creation_time
,
1002 session_name_contains_creation_time
);
1003 if (status
!= LTTNG_OK
) {
1013 * Setup relayd connections for a tracing session. First creates the socket to
1014 * the relayd and send them to the right domain consumer. Consumer type MUST be
1017 int cmd_setup_relayd(const ltt_session::locked_ref
& session
)
1020 struct ltt_ust_session
*usess
;
1021 struct ltt_kernel_session
*ksess
;
1022 struct consumer_socket
*socket
;
1023 struct lttng_ht_iter iter
;
1024 LTTNG_OPTIONAL(uint64_t) current_chunk_id
= {};
1026 usess
= session
->ust_session
;
1027 ksess
= session
->kernel_session
;
1029 DBG("Setting relayd for session %s", session
->name
);
1031 if (session
->current_trace_chunk
) {
1032 const lttng_trace_chunk_status status
= lttng_trace_chunk_get_id(
1033 session
->current_trace_chunk
, ¤t_chunk_id
.value
);
1035 if (status
== LTTNG_TRACE_CHUNK_STATUS_OK
) {
1036 current_chunk_id
.is_set
= true;
1038 ERR("Failed to get current trace chunk id");
1039 ret
= LTTNG_ERR_UNK
;
1044 if (usess
&& usess
->consumer
&& usess
->consumer
->type
== CONSUMER_DST_NET
&&
1045 usess
->consumer
->enabled
) {
1046 /* For each consumer socket, send relayd sockets */
1047 const lttng::urcu::read_lock_guard read_lock
;
1049 cds_lfht_for_each_entry (
1050 usess
->consumer
->socks
->ht
, &iter
.iter
, socket
, node
.node
) {
1051 pthread_mutex_lock(socket
->lock
);
1052 ret
= send_consumer_relayd_sockets(
1059 session
->live_timer
,
1060 current_chunk_id
.is_set
? ¤t_chunk_id
.value
: nullptr,
1061 session
->creation_time
,
1062 session
->name_contains_creation_time
);
1063 pthread_mutex_unlock(socket
->lock
);
1064 if (ret
!= LTTNG_OK
) {
1067 /* Session is now ready for network streaming. */
1068 session
->net_handle
= 1;
1071 session
->consumer
->relay_major_version
= usess
->consumer
->relay_major_version
;
1072 session
->consumer
->relay_minor_version
= usess
->consumer
->relay_minor_version
;
1073 session
->consumer
->relay_allows_clear
= usess
->consumer
->relay_allows_clear
;
1076 if (ksess
&& ksess
->consumer
&& ksess
->consumer
->type
== CONSUMER_DST_NET
&&
1077 ksess
->consumer
->enabled
) {
1078 const lttng::urcu::read_lock_guard read_lock
;
1080 cds_lfht_for_each_entry (
1081 ksess
->consumer
->socks
->ht
, &iter
.iter
, socket
, node
.node
) {
1082 pthread_mutex_lock(socket
->lock
);
1083 ret
= send_consumer_relayd_sockets(
1090 session
->live_timer
,
1091 current_chunk_id
.is_set
? ¤t_chunk_id
.value
: nullptr,
1092 session
->creation_time
,
1093 session
->name_contains_creation_time
);
1094 pthread_mutex_unlock(socket
->lock
);
1095 if (ret
!= LTTNG_OK
) {
1098 /* Session is now ready for network streaming. */
1099 session
->net_handle
= 1;
1102 session
->consumer
->relay_major_version
= ksess
->consumer
->relay_major_version
;
1103 session
->consumer
->relay_minor_version
= ksess
->consumer
->relay_minor_version
;
1104 session
->consumer
->relay_allows_clear
= ksess
->consumer
->relay_allows_clear
;
1112 * Start a kernel session by opening all necessary streams.
1114 int start_kernel_session(struct ltt_kernel_session
*ksess
)
1117 struct ltt_kernel_channel
*kchan
;
1119 /* Open kernel metadata */
1120 if (ksess
->metadata
== nullptr && ksess
->output_traces
) {
1121 ret
= kernel_open_metadata(ksess
);
1123 ret
= LTTNG_ERR_KERN_META_FAIL
;
1128 /* Open kernel metadata stream */
1129 if (ksess
->metadata
&& ksess
->metadata_stream_fd
< 0) {
1130 ret
= kernel_open_metadata_stream(ksess
);
1132 ERR("Kernel create metadata stream failed");
1133 ret
= LTTNG_ERR_KERN_STREAM_FAIL
;
1138 /* For each channel */
1139 cds_list_for_each_entry (kchan
, &ksess
->channel_list
.head
, list
) {
1140 if (kchan
->stream_count
== 0) {
1141 ret
= kernel_open_channel_stream(kchan
);
1143 ret
= LTTNG_ERR_KERN_STREAM_FAIL
;
1146 /* Update the stream global counter */
1147 ksess
->stream_count_global
+= ret
;
1151 /* Setup kernel consumer socket and send fds to it */
1152 ret
= init_kernel_tracing(ksess
);
1154 ret
= LTTNG_ERR_KERN_START_FAIL
;
1158 /* This start the kernel tracing */
1159 ret
= kernel_start_session(ksess
);
1161 ret
= LTTNG_ERR_KERN_START_FAIL
;
1165 /* Quiescent wait after starting trace */
1166 kernel_wait_quiescent();
1168 ksess
->active
= true;
1176 int stop_kernel_session(struct ltt_kernel_session
*ksess
)
1178 struct ltt_kernel_channel
*kchan
;
1179 bool error_occurred
= false;
1182 if (!ksess
|| !ksess
->active
) {
1185 DBG("Stopping kernel tracing");
1187 ret
= kernel_stop_session(ksess
);
1189 ret
= LTTNG_ERR_KERN_STOP_FAIL
;
1193 kernel_wait_quiescent();
1195 /* Flush metadata after stopping (if exists) */
1196 if (ksess
->metadata_stream_fd
>= 0) {
1197 ret
= kernel_metadata_flush_buffer(ksess
->metadata_stream_fd
);
1199 ERR("Kernel metadata flush failed");
1200 error_occurred
= true;
1204 /* Flush all buffers after stopping */
1205 cds_list_for_each_entry (kchan
, &ksess
->channel_list
.head
, list
) {
1206 ret
= kernel_flush_buffer(kchan
);
1208 ERR("Kernel flush buffer error");
1209 error_occurred
= true;
1213 ksess
->active
= false;
1214 if (error_occurred
) {
1215 ret
= LTTNG_ERR_UNK
;
1224 * Command LTTNG_DISABLE_CHANNEL processed by the client thread.
1226 int cmd_disable_channel(const ltt_session::locked_ref
& session
,
1227 enum lttng_domain_type domain
,
1231 struct ltt_ust_session
*usess
;
1233 usess
= session
->ust_session
;
1235 const lttng::urcu::read_lock_guard read_lock
;
1238 case LTTNG_DOMAIN_KERNEL
:
1240 ret
= channel_kernel_disable(session
->kernel_session
, channel_name
);
1241 if (ret
!= LTTNG_OK
) {
1245 kernel_wait_quiescent();
1248 case LTTNG_DOMAIN_UST
:
1250 struct ltt_ust_channel
*uchan
;
1251 struct lttng_ht
*chan_ht
;
1253 chan_ht
= usess
->domain_global
.channels
;
1255 uchan
= trace_ust_find_channel_by_name(chan_ht
, channel_name
);
1256 if (uchan
== nullptr) {
1257 ret
= LTTNG_ERR_UST_CHAN_NOT_FOUND
;
1261 ret
= channel_ust_disable(usess
, uchan
);
1262 if (ret
!= LTTNG_OK
) {
1268 ret
= LTTNG_ERR_UNKNOWN_DOMAIN
;
1279 * Command LTTNG_ENABLE_CHANNEL processed by the client thread.
1281 * The wpipe arguments is used as a notifier for the kernel thread.
1283 int cmd_enable_channel(command_ctx
*cmd_ctx
, ltt_session::locked_ref
& session
, int sock
, int wpipe
)
1287 ssize_t sock_recv_len
;
1288 struct lttng_channel
*channel
= nullptr;
1289 struct lttng_buffer_view view
;
1290 struct lttng_dynamic_buffer channel_buffer
;
1291 const struct lttng_domain command_domain
= cmd_ctx
->lsm
.domain
;
1293 lttng_dynamic_buffer_init(&channel_buffer
);
1294 channel_len
= (size_t) cmd_ctx
->lsm
.u
.channel
.length
;
1295 ret
= lttng_dynamic_buffer_set_size(&channel_buffer
, channel_len
);
1297 ret
= LTTNG_ERR_NOMEM
;
1301 sock_recv_len
= lttcomm_recv_unix_sock(sock
, channel_buffer
.data
, channel_len
);
1302 if (sock_recv_len
< 0 || sock_recv_len
!= channel_len
) {
1303 ERR("Failed to receive \"enable channel\" command payload");
1304 ret
= LTTNG_ERR_INVALID
;
1308 view
= lttng_buffer_view_from_dynamic_buffer(&channel_buffer
, 0, channel_len
);
1309 if (!lttng_buffer_view_is_valid(&view
)) {
1310 ret
= LTTNG_ERR_INVALID
;
1314 if (lttng_channel_create_from_buffer(&view
, &channel
) != channel_len
) {
1315 ERR("Invalid channel payload received in \"enable channel\" command");
1316 ret
= LTTNG_ERR_INVALID
;
1320 ret
= cmd_enable_channel_internal(session
, &command_domain
, channel
, wpipe
);
1323 lttng_dynamic_buffer_reset(&channel_buffer
);
1324 lttng_channel_destroy(channel
);
1328 static enum lttng_error_code
cmd_enable_channel_internal(ltt_session::locked_ref
& session
,
1329 const struct lttng_domain
*domain
,
1330 const struct lttng_channel
*_attr
,
1333 enum lttng_error_code ret_code
;
1334 struct ltt_ust_session
*usess
= session
->ust_session
;
1335 struct lttng_ht
*chan_ht
;
1337 struct lttng_channel
*attr
= nullptr;
1339 LTTNG_ASSERT(_attr
);
1340 LTTNG_ASSERT(domain
);
1342 const lttng::urcu::read_lock_guard read_lock
;
1344 attr
= lttng_channel_copy(_attr
);
1346 ret_code
= LTTNG_ERR_NOMEM
;
1350 len
= lttng_strnlen(attr
->name
, sizeof(attr
->name
));
1352 /* Validate channel name */
1353 if (attr
->name
[0] == '.' || memchr(attr
->name
, '/', len
) != nullptr) {
1354 ret_code
= LTTNG_ERR_INVALID_CHANNEL_NAME
;
1358 DBG("Enabling channel %s for session %s", attr
->name
, session
->name
);
1361 * If the session is a live session, remove the switch timer, the
1362 * live timer does the same thing but sends also synchronisation
1363 * beacons for inactive streams.
1365 if (session
->live_timer
> 0) {
1366 attr
->attr
.live_timer_interval
= session
->live_timer
;
1367 attr
->attr
.switch_timer_interval
= 0;
1370 /* Check for feature support */
1371 switch (domain
->type
) {
1372 case LTTNG_DOMAIN_KERNEL
:
1374 if (kernel_supports_ring_buffer_snapshot_sample_positions() != 1) {
1375 /* Sampling position of buffer is not supported */
1376 WARN("Kernel tracer does not support buffer monitoring. "
1377 "Setting the monitor interval timer to 0 "
1378 "(disabled) for channel '%s' of session '%s'",
1381 lttng_channel_set_monitor_timer_interval(attr
, 0);
1385 case LTTNG_DOMAIN_UST
:
1387 case LTTNG_DOMAIN_JUL
:
1388 case LTTNG_DOMAIN_LOG4J
:
1389 case LTTNG_DOMAIN_PYTHON
:
1390 if (!agent_tracing_is_enabled()) {
1391 DBG("Attempted to enable a channel in an agent domain but the agent thread is not running");
1392 ret_code
= LTTNG_ERR_AGENT_TRACING_DISABLED
;
1397 ret_code
= LTTNG_ERR_UNKNOWN_DOMAIN
;
1401 switch (domain
->type
) {
1402 case LTTNG_DOMAIN_KERNEL
:
1404 struct ltt_kernel_channel
*kchan
;
1406 kchan
= trace_kernel_get_channel_by_name(attr
->name
, session
->kernel_session
);
1407 if (kchan
== nullptr) {
1409 * Don't try to create a channel if the session has been started at
1410 * some point in time before. The tracer does not allow it.
1412 if (session
->has_been_started
) {
1413 ret_code
= LTTNG_ERR_TRACE_ALREADY_STARTED
;
1417 if (session
->snapshot
.nb_output
> 0 || session
->snapshot_mode
) {
1418 /* Enforce mmap output for snapshot sessions. */
1419 attr
->attr
.output
= LTTNG_EVENT_MMAP
;
1421 ret_code
= channel_kernel_create(session
->kernel_session
, attr
, wpipe
);
1422 if (attr
->name
[0] != '\0') {
1423 session
->kernel_session
->has_non_default_channel
= 1;
1426 ret_code
= channel_kernel_enable(session
->kernel_session
, kchan
);
1429 if (ret_code
!= LTTNG_OK
) {
1433 kernel_wait_quiescent();
1436 case LTTNG_DOMAIN_UST
:
1437 case LTTNG_DOMAIN_JUL
:
1438 case LTTNG_DOMAIN_LOG4J
:
1439 case LTTNG_DOMAIN_PYTHON
:
1441 struct ltt_ust_channel
*uchan
;
1446 * Current agent implementation limitations force us to allow
1447 * only one channel at once in "agent" subdomains. Each
1448 * subdomain has a default channel name which must be strictly
1451 if (domain
->type
== LTTNG_DOMAIN_JUL
) {
1452 if (strncmp(attr
->name
,
1453 DEFAULT_JUL_CHANNEL_NAME
,
1454 LTTNG_SYMBOL_NAME_LEN
- 1) != 0) {
1455 ret_code
= LTTNG_ERR_INVALID_CHANNEL_NAME
;
1458 } else if (domain
->type
== LTTNG_DOMAIN_LOG4J
) {
1459 if (strncmp(attr
->name
,
1460 DEFAULT_LOG4J_CHANNEL_NAME
,
1461 LTTNG_SYMBOL_NAME_LEN
- 1) != 0) {
1462 ret_code
= LTTNG_ERR_INVALID_CHANNEL_NAME
;
1465 } else if (domain
->type
== LTTNG_DOMAIN_PYTHON
) {
1466 if (strncmp(attr
->name
,
1467 DEFAULT_PYTHON_CHANNEL_NAME
,
1468 LTTNG_SYMBOL_NAME_LEN
- 1) != 0) {
1469 ret_code
= LTTNG_ERR_INVALID_CHANNEL_NAME
;
1474 chan_ht
= usess
->domain_global
.channels
;
1476 uchan
= trace_ust_find_channel_by_name(chan_ht
, attr
->name
);
1477 if (uchan
== nullptr) {
1479 * Don't try to create a channel if the session has been started at
1480 * some point in time before. The tracer does not allow it.
1482 if (session
->has_been_started
) {
1483 ret_code
= LTTNG_ERR_TRACE_ALREADY_STARTED
;
1487 ret_code
= channel_ust_create(usess
, attr
, domain
->buf_type
);
1488 if (attr
->name
[0] != '\0') {
1489 usess
->has_non_default_channel
= 1;
1492 ret_code
= channel_ust_enable(usess
, uchan
);
1497 ret_code
= LTTNG_ERR_UNKNOWN_DOMAIN
;
1501 if (ret_code
== LTTNG_OK
&& attr
->attr
.output
!= LTTNG_EVENT_MMAP
) {
1502 session
->has_non_mmap_channel
= true;
1506 lttng_channel_destroy(attr
);
1510 enum lttng_error_code
1511 cmd_process_attr_tracker_get_tracking_policy(const ltt_session::locked_ref
& session
,
1512 enum lttng_domain_type domain
,
1513 enum lttng_process_attr process_attr
,
1514 enum lttng_tracking_policy
*policy
)
1516 enum lttng_error_code ret_code
= LTTNG_OK
;
1517 const struct process_attr_tracker
*tracker
;
1520 case LTTNG_DOMAIN_KERNEL
:
1521 if (!session
->kernel_session
) {
1522 ret_code
= LTTNG_ERR_INVALID
;
1525 tracker
= kernel_get_process_attr_tracker(session
->kernel_session
, process_attr
);
1527 case LTTNG_DOMAIN_UST
:
1528 if (!session
->ust_session
) {
1529 ret_code
= LTTNG_ERR_INVALID
;
1532 tracker
= trace_ust_get_process_attr_tracker(session
->ust_session
, process_attr
);
1535 ret_code
= LTTNG_ERR_UNSUPPORTED_DOMAIN
;
1539 *policy
= process_attr_tracker_get_tracking_policy(tracker
);
1541 ret_code
= LTTNG_ERR_INVALID
;
1547 enum lttng_error_code
1548 cmd_process_attr_tracker_set_tracking_policy(const ltt_session::locked_ref
& session
,
1549 enum lttng_domain_type domain
,
1550 enum lttng_process_attr process_attr
,
1551 enum lttng_tracking_policy policy
)
1553 enum lttng_error_code ret_code
= LTTNG_OK
;
1556 case LTTNG_TRACKING_POLICY_INCLUDE_SET
:
1557 case LTTNG_TRACKING_POLICY_EXCLUDE_ALL
:
1558 case LTTNG_TRACKING_POLICY_INCLUDE_ALL
:
1561 ret_code
= LTTNG_ERR_INVALID
;
1566 case LTTNG_DOMAIN_KERNEL
:
1567 if (!session
->kernel_session
) {
1568 ret_code
= LTTNG_ERR_INVALID
;
1571 ret_code
= kernel_process_attr_tracker_set_tracking_policy(
1572 session
->kernel_session
, process_attr
, policy
);
1574 case LTTNG_DOMAIN_UST
:
1575 if (!session
->ust_session
) {
1576 ret_code
= LTTNG_ERR_INVALID
;
1579 ret_code
= trace_ust_process_attr_tracker_set_tracking_policy(
1580 session
->ust_session
, process_attr
, policy
);
1583 ret_code
= LTTNG_ERR_UNSUPPORTED_DOMAIN
;
1590 enum lttng_error_code
1591 cmd_process_attr_tracker_inclusion_set_add_value(const ltt_session::locked_ref
& session
,
1592 enum lttng_domain_type domain
,
1593 enum lttng_process_attr process_attr
,
1594 const struct process_attr_value
*value
)
1596 enum lttng_error_code ret_code
= LTTNG_OK
;
1599 case LTTNG_DOMAIN_KERNEL
:
1600 if (!session
->kernel_session
) {
1601 ret_code
= LTTNG_ERR_INVALID
;
1604 ret_code
= kernel_process_attr_tracker_inclusion_set_add_value(
1605 session
->kernel_session
, process_attr
, value
);
1607 case LTTNG_DOMAIN_UST
:
1608 if (!session
->ust_session
) {
1609 ret_code
= LTTNG_ERR_INVALID
;
1612 ret_code
= trace_ust_process_attr_tracker_inclusion_set_add_value(
1613 session
->ust_session
, process_attr
, value
);
1616 ret_code
= LTTNG_ERR_UNSUPPORTED_DOMAIN
;
1623 enum lttng_error_code
1624 cmd_process_attr_tracker_inclusion_set_remove_value(const ltt_session::locked_ref
& session
,
1625 enum lttng_domain_type domain
,
1626 enum lttng_process_attr process_attr
,
1627 const struct process_attr_value
*value
)
1629 enum lttng_error_code ret_code
= LTTNG_OK
;
1632 case LTTNG_DOMAIN_KERNEL
:
1633 if (!session
->kernel_session
) {
1634 ret_code
= LTTNG_ERR_INVALID
;
1637 ret_code
= kernel_process_attr_tracker_inclusion_set_remove_value(
1638 session
->kernel_session
, process_attr
, value
);
1640 case LTTNG_DOMAIN_UST
:
1641 if (!session
->ust_session
) {
1642 ret_code
= LTTNG_ERR_INVALID
;
1645 ret_code
= trace_ust_process_attr_tracker_inclusion_set_remove_value(
1646 session
->ust_session
, process_attr
, value
);
1649 ret_code
= LTTNG_ERR_UNSUPPORTED_DOMAIN
;
1656 enum lttng_error_code
1657 cmd_process_attr_tracker_get_inclusion_set(const ltt_session::locked_ref
& session
,
1658 enum lttng_domain_type domain
,
1659 enum lttng_process_attr process_attr
,
1660 struct lttng_process_attr_values
**values
)
1662 enum lttng_error_code ret_code
= LTTNG_OK
;
1663 const struct process_attr_tracker
*tracker
;
1664 enum process_attr_tracker_status status
;
1667 case LTTNG_DOMAIN_KERNEL
:
1668 if (!session
->kernel_session
) {
1669 ret_code
= LTTNG_ERR_INVALID
;
1672 tracker
= kernel_get_process_attr_tracker(session
->kernel_session
, process_attr
);
1674 case LTTNG_DOMAIN_UST
:
1675 if (!session
->ust_session
) {
1676 ret_code
= LTTNG_ERR_INVALID
;
1679 tracker
= trace_ust_get_process_attr_tracker(session
->ust_session
, process_attr
);
1682 ret_code
= LTTNG_ERR_UNSUPPORTED_DOMAIN
;
1687 ret_code
= LTTNG_ERR_INVALID
;
1691 status
= process_attr_tracker_get_inclusion_set(tracker
, values
);
1693 case PROCESS_ATTR_TRACKER_STATUS_OK
:
1694 ret_code
= LTTNG_OK
;
1696 case PROCESS_ATTR_TRACKER_STATUS_INVALID_TRACKING_POLICY
:
1697 ret_code
= LTTNG_ERR_PROCESS_ATTR_TRACKER_INVALID_TRACKING_POLICY
;
1699 case PROCESS_ATTR_TRACKER_STATUS_ERROR
:
1700 ret_code
= LTTNG_ERR_NOMEM
;
1703 ret_code
= LTTNG_ERR_UNK
;
1712 * Command LTTNG_DISABLE_EVENT processed by the client thread.
1714 int cmd_disable_event(struct command_ctx
*cmd_ctx
,
1715 ltt_session::locked_ref
& locked_session
,
1716 struct lttng_event
*event
,
1717 char *filter_expression
,
1718 struct lttng_bytecode
*bytecode
,
1719 struct lttng_event_exclusion
*exclusion
)
1722 const ltt_session
& session
= *locked_session
;
1723 const char *event_name
;
1724 const char *channel_name
= cmd_ctx
->lsm
.u
.disable
.channel_name
;
1725 const enum lttng_domain_type domain
= cmd_ctx
->lsm
.domain
.type
;
1727 DBG("Disable event command for event \'%s\'", event
->name
);
1730 * Filter and exclusions are simply not handled by the
1731 * disable event command at this time.
1735 (void) filter_expression
;
1738 /* Ignore the presence of filter or exclusion for the event */
1740 event
->exclusion
= 0;
1742 event_name
= event
->name
;
1744 const lttng::urcu::read_lock_guard read_lock
;
1746 /* Error out on unhandled search criteria */
1747 if (event
->loglevel_type
|| event
->loglevel
!= -1 || event
->enabled
|| event
->pid
||
1748 event
->filter
|| event
->exclusion
) {
1749 ret
= LTTNG_ERR_UNK
;
1754 case LTTNG_DOMAIN_KERNEL
:
1756 struct ltt_kernel_channel
*kchan
;
1757 struct ltt_kernel_session
*ksess
;
1759 ksess
= session
.kernel_session
;
1762 * If a non-default channel has been created in the
1763 * session, explicitely require that -c chan_name needs
1766 if (ksess
->has_non_default_channel
&& channel_name
[0] == '\0') {
1767 ret
= LTTNG_ERR_NEED_CHANNEL_NAME
;
1771 kchan
= trace_kernel_get_channel_by_name(channel_name
, ksess
);
1772 if (kchan
== nullptr) {
1773 ret
= LTTNG_ERR_KERN_CHAN_NOT_FOUND
;
1777 switch (event
->type
) {
1778 case LTTNG_EVENT_ALL
:
1779 case LTTNG_EVENT_TRACEPOINT
:
1780 case LTTNG_EVENT_SYSCALL
:
1781 case LTTNG_EVENT_PROBE
:
1782 case LTTNG_EVENT_FUNCTION
:
1783 case LTTNG_EVENT_FUNCTION_ENTRY
: /* fall-through */
1784 if (event_name
[0] == '\0') {
1785 ret
= event_kernel_disable_event(kchan
, nullptr, event
->type
);
1787 ret
= event_kernel_disable_event(kchan
, event_name
, event
->type
);
1789 if (ret
!= LTTNG_OK
) {
1794 ret
= LTTNG_ERR_UNK
;
1798 kernel_wait_quiescent();
1801 case LTTNG_DOMAIN_UST
:
1803 struct ltt_ust_channel
*uchan
;
1804 struct ltt_ust_session
*usess
;
1806 usess
= session
.ust_session
;
1808 if (validate_ust_event_name(event_name
)) {
1809 ret
= LTTNG_ERR_INVALID_EVENT_NAME
;
1814 * If a non-default channel has been created in the
1815 * session, explicitly require that -c chan_name needs
1818 if (usess
->has_non_default_channel
&& channel_name
[0] == '\0') {
1819 ret
= LTTNG_ERR_NEED_CHANNEL_NAME
;
1823 uchan
= trace_ust_find_channel_by_name(usess
->domain_global
.channels
, channel_name
);
1824 if (uchan
== nullptr) {
1825 ret
= LTTNG_ERR_UST_CHAN_NOT_FOUND
;
1829 switch (event
->type
) {
1830 case LTTNG_EVENT_ALL
:
1832 * An empty event name means that everything
1833 * should be disabled.
1835 if (event
->name
[0] == '\0') {
1836 ret
= event_ust_disable_all_tracepoints(usess
, uchan
);
1838 ret
= event_ust_disable_tracepoint(usess
, uchan
, event_name
);
1840 if (ret
!= LTTNG_OK
) {
1845 ret
= LTTNG_ERR_UNK
;
1849 DBG3("Disable UST event %s in channel %s completed", event_name
, channel_name
);
1852 case LTTNG_DOMAIN_LOG4J
:
1853 case LTTNG_DOMAIN_JUL
:
1854 case LTTNG_DOMAIN_PYTHON
:
1857 struct ltt_ust_session
*usess
= session
.ust_session
;
1859 LTTNG_ASSERT(usess
);
1861 switch (event
->type
) {
1862 case LTTNG_EVENT_ALL
:
1865 ret
= LTTNG_ERR_UNK
;
1869 agt
= trace_ust_find_agent(usess
, domain
);
1871 ret
= -LTTNG_ERR_UST_EVENT_NOT_FOUND
;
1875 * An empty event name means that everything
1876 * should be disabled.
1878 if (event
->name
[0] == '\0') {
1879 ret
= event_agent_disable_all(usess
, agt
);
1881 ret
= event_agent_disable(usess
, agt
, event_name
);
1883 if (ret
!= LTTNG_OK
) {
1890 ret
= LTTNG_ERR_UND
;
1900 free(filter_expression
);
1905 * Command LTTNG_ADD_CONTEXT processed by the client thread.
1907 int cmd_add_context(struct command_ctx
*cmd_ctx
,
1908 ltt_session::locked_ref
& locked_session
,
1909 const struct lttng_event_context
*event_context
,
1912 int ret
, chan_kern_created
= 0, chan_ust_created
= 0;
1913 const enum lttng_domain_type domain
= cmd_ctx
->lsm
.domain
.type
;
1914 const struct ltt_session
& session
= *locked_session
;
1915 const char *channel_name
= cmd_ctx
->lsm
.u
.context
.channel_name
;
1918 * Don't try to add a context if the session has been started at
1919 * some point in time before. The tracer does not allow it and would
1920 * result in a corrupted trace.
1922 if (session
.has_been_started
) {
1923 ret
= LTTNG_ERR_TRACE_ALREADY_STARTED
;
1928 case LTTNG_DOMAIN_KERNEL
:
1929 LTTNG_ASSERT(session
.kernel_session
);
1931 if (session
.kernel_session
->channel_count
== 0) {
1932 /* Create default channel */
1933 ret
= channel_kernel_create(session
.kernel_session
, nullptr, kwpipe
);
1934 if (ret
!= LTTNG_OK
) {
1937 chan_kern_created
= 1;
1939 /* Add kernel context to kernel tracer */
1940 ret
= context_kernel_add(session
.kernel_session
, event_context
, channel_name
);
1941 if (ret
!= LTTNG_OK
) {
1945 case LTTNG_DOMAIN_JUL
:
1946 case LTTNG_DOMAIN_LOG4J
:
1949 * Validate channel name.
1950 * If no channel name is given and the domain is JUL or LOG4J,
1951 * set it to the appropriate domain-specific channel name. If
1952 * a name is provided but does not match the expexted channel
1953 * name, return an error.
1955 if (domain
== LTTNG_DOMAIN_JUL
&& *channel_name
&&
1956 strcmp(channel_name
, DEFAULT_JUL_CHANNEL_NAME
) != 0) {
1957 ret
= LTTNG_ERR_UST_CHAN_NOT_FOUND
;
1959 } else if (domain
== LTTNG_DOMAIN_LOG4J
&& *channel_name
&&
1960 strcmp(channel_name
, DEFAULT_LOG4J_CHANNEL_NAME
) != 0) {
1961 ret
= LTTNG_ERR_UST_CHAN_NOT_FOUND
;
1966 case LTTNG_DOMAIN_UST
:
1968 struct ltt_ust_session
*usess
= session
.ust_session
;
1969 unsigned int chan_count
;
1971 LTTNG_ASSERT(usess
);
1973 chan_count
= lttng_ht_get_count(usess
->domain_global
.channels
);
1974 if (chan_count
== 0) {
1975 struct lttng_channel
*attr
;
1976 /* Create default channel */
1977 attr
= channel_new_default_attr(domain
, usess
->buffer_type
);
1978 if (attr
== nullptr) {
1979 ret
= LTTNG_ERR_FATAL
;
1983 ret
= channel_ust_create(usess
, attr
, usess
->buffer_type
);
1984 if (ret
!= LTTNG_OK
) {
1988 channel_attr_destroy(attr
);
1989 chan_ust_created
= 1;
1992 ret
= context_ust_add(usess
, domain
, event_context
, channel_name
);
1993 if (ret
!= LTTNG_OK
) {
1999 ret
= LTTNG_ERR_UND
;
2007 if (chan_kern_created
) {
2008 struct ltt_kernel_channel
*kchan
= trace_kernel_get_channel_by_name(
2009 DEFAULT_CHANNEL_NAME
, session
.kernel_session
);
2010 /* Created previously, this should NOT fail. */
2011 LTTNG_ASSERT(kchan
);
2012 kernel_destroy_channel(kchan
);
2015 if (chan_ust_created
) {
2016 struct ltt_ust_channel
*uchan
= trace_ust_find_channel_by_name(
2017 session
.ust_session
->domain_global
.channels
, DEFAULT_CHANNEL_NAME
);
2018 /* Created previously, this should NOT fail. */
2019 LTTNG_ASSERT(uchan
);
2020 /* Remove from the channel list of the session. */
2021 trace_ust_delete_channel(session
.ust_session
->domain_global
.channels
, uchan
);
2022 trace_ust_destroy_channel(uchan
);
2028 static inline bool name_starts_with(const char *name
, const char *prefix
)
2030 const size_t max_cmp_len
= std::min(strlen(prefix
), (size_t) LTTNG_SYMBOL_NAME_LEN
);
2032 return !strncmp(name
, prefix
, max_cmp_len
);
2035 /* Perform userspace-specific event name validation */
2036 static int validate_ust_event_name(const char *name
)
2046 * Check name against all internal UST event component namespaces used
2049 if (name_starts_with(name
, DEFAULT_JUL_EVENT_COMPONENT
) ||
2050 name_starts_with(name
, DEFAULT_LOG4J_EVENT_COMPONENT
) ||
2051 name_starts_with(name
, DEFAULT_PYTHON_EVENT_COMPONENT
)) {
2060 * Internal version of cmd_enable_event() with a supplemental
2061 * "internal_event" flag which is used to enable internal events which should
2062 * be hidden from clients. Such events are used in the agent implementation to
2063 * enable the events through which all "agent" events are funeled.
2065 static int _cmd_enable_event(ltt_session::locked_ref
& locked_session
,
2066 const struct lttng_domain
*domain
,
2068 struct lttng_event
*event
,
2069 char *filter_expression
,
2070 struct lttng_bytecode
*filter
,
2071 struct lttng_event_exclusion
*exclusion
,
2073 bool internal_event
)
2075 int ret
= 0, channel_created
= 0;
2076 struct lttng_channel
*attr
= nullptr;
2077 const ltt_session
& session
= *locked_session
;
2079 LTTNG_ASSERT(event
);
2080 LTTNG_ASSERT(channel_name
);
2082 /* If we have a filter, we must have its filter expression */
2083 LTTNG_ASSERT(!(!!filter_expression
^ !!filter
));
2085 /* Normalize event name as a globbing pattern */
2086 strutils_normalize_star_glob_pattern(event
->name
);
2088 /* Normalize exclusion names as globbing patterns */
2092 for (i
= 0; i
< exclusion
->count
; i
++) {
2093 char *name
= LTTNG_EVENT_EXCLUSION_NAME_AT(exclusion
, i
);
2095 strutils_normalize_star_glob_pattern(name
);
2099 const lttng::urcu::read_lock_guard read_lock
;
2101 switch (domain
->type
) {
2102 case LTTNG_DOMAIN_KERNEL
:
2104 struct ltt_kernel_channel
*kchan
;
2107 * If a non-default channel has been created in the
2108 * session, explicitely require that -c chan_name needs
2111 if (session
.kernel_session
->has_non_default_channel
&& channel_name
[0] == '\0') {
2112 ret
= LTTNG_ERR_NEED_CHANNEL_NAME
;
2116 kchan
= trace_kernel_get_channel_by_name(channel_name
, session
.kernel_session
);
2117 if (kchan
== nullptr) {
2118 attr
= channel_new_default_attr(LTTNG_DOMAIN_KERNEL
, LTTNG_BUFFER_GLOBAL
);
2119 if (attr
== nullptr) {
2120 ret
= LTTNG_ERR_FATAL
;
2123 if (lttng_strncpy(attr
->name
, channel_name
, sizeof(attr
->name
))) {
2124 ret
= LTTNG_ERR_INVALID
;
2128 ret
= cmd_enable_channel_internal(locked_session
, domain
, attr
, wpipe
);
2129 if (ret
!= LTTNG_OK
) {
2132 channel_created
= 1;
2135 /* Get the newly created kernel channel pointer */
2136 kchan
= trace_kernel_get_channel_by_name(channel_name
, session
.kernel_session
);
2137 if (kchan
== nullptr) {
2138 /* This sould not happen... */
2139 ret
= LTTNG_ERR_FATAL
;
2143 switch (event
->type
) {
2144 case LTTNG_EVENT_ALL
:
2146 char *filter_expression_a
= nullptr;
2147 struct lttng_bytecode
*filter_a
= nullptr;
2150 * We need to duplicate filter_expression and filter,
2151 * because ownership is passed to first enable
2154 if (filter_expression
) {
2155 filter_expression_a
= strdup(filter_expression
);
2156 if (!filter_expression_a
) {
2157 ret
= LTTNG_ERR_FATAL
;
2162 filter_a
= zmalloc
<lttng_bytecode
>(sizeof(*filter_a
) + filter
->len
);
2164 free(filter_expression_a
);
2165 ret
= LTTNG_ERR_FATAL
;
2168 memcpy(filter_a
, filter
, sizeof(*filter_a
) + filter
->len
);
2170 event
->type
= LTTNG_EVENT_TRACEPOINT
; /* Hack */
2171 ret
= event_kernel_enable_event(kchan
, event
, filter_expression
, filter
);
2172 /* We have passed ownership */
2173 filter_expression
= nullptr;
2175 if (ret
!= LTTNG_OK
) {
2176 if (channel_created
) {
2177 /* Let's not leak a useless channel. */
2178 kernel_destroy_channel(kchan
);
2180 free(filter_expression_a
);
2184 event
->type
= LTTNG_EVENT_SYSCALL
; /* Hack */
2185 ret
= event_kernel_enable_event(
2186 kchan
, event
, filter_expression_a
, filter_a
);
2187 /* We have passed ownership */
2188 filter_expression_a
= nullptr;
2190 if (ret
!= LTTNG_OK
) {
2195 case LTTNG_EVENT_PROBE
:
2196 case LTTNG_EVENT_USERSPACE_PROBE
:
2197 case LTTNG_EVENT_FUNCTION
:
2198 case LTTNG_EVENT_FUNCTION_ENTRY
:
2199 case LTTNG_EVENT_TRACEPOINT
:
2200 ret
= event_kernel_enable_event(kchan
, event
, filter_expression
, filter
);
2201 /* We have passed ownership */
2202 filter_expression
= nullptr;
2204 if (ret
!= LTTNG_OK
) {
2205 if (channel_created
) {
2206 /* Let's not leak a useless channel. */
2207 kernel_destroy_channel(kchan
);
2212 case LTTNG_EVENT_SYSCALL
:
2213 ret
= event_kernel_enable_event(kchan
, event
, filter_expression
, filter
);
2214 /* We have passed ownership */
2215 filter_expression
= nullptr;
2217 if (ret
!= LTTNG_OK
) {
2222 ret
= LTTNG_ERR_UNK
;
2226 kernel_wait_quiescent();
2229 case LTTNG_DOMAIN_UST
:
2231 struct ltt_ust_channel
*uchan
;
2232 struct ltt_ust_session
*usess
= session
.ust_session
;
2234 LTTNG_ASSERT(usess
);
2237 * If a non-default channel has been created in the
2238 * session, explicitely require that -c chan_name needs
2241 if (usess
->has_non_default_channel
&& channel_name
[0] == '\0') {
2242 ret
= LTTNG_ERR_NEED_CHANNEL_NAME
;
2246 /* Get channel from global UST domain */
2247 uchan
= trace_ust_find_channel_by_name(usess
->domain_global
.channels
, channel_name
);
2248 if (uchan
== nullptr) {
2249 /* Create default channel */
2250 attr
= channel_new_default_attr(LTTNG_DOMAIN_UST
, usess
->buffer_type
);
2251 if (attr
== nullptr) {
2252 ret
= LTTNG_ERR_FATAL
;
2255 if (lttng_strncpy(attr
->name
, channel_name
, sizeof(attr
->name
))) {
2256 ret
= LTTNG_ERR_INVALID
;
2260 ret
= cmd_enable_channel_internal(locked_session
, domain
, attr
, wpipe
);
2261 if (ret
!= LTTNG_OK
) {
2265 /* Get the newly created channel reference back */
2266 uchan
= trace_ust_find_channel_by_name(usess
->domain_global
.channels
,
2268 LTTNG_ASSERT(uchan
);
2271 if (uchan
->domain
!= LTTNG_DOMAIN_UST
&& !internal_event
) {
2273 * Don't allow users to add UST events to channels which
2274 * are assigned to a userspace subdomain (JUL, Log4J,
2277 ret
= LTTNG_ERR_INVALID_CHANNEL_DOMAIN
;
2281 if (!internal_event
) {
2283 * Ensure the event name is not reserved for internal
2286 ret
= validate_ust_event_name(event
->name
);
2288 WARN("Userspace event name %s failed validation.", event
->name
);
2289 ret
= LTTNG_ERR_INVALID_EVENT_NAME
;
2294 /* At this point, the session and channel exist on the tracer */
2295 ret
= event_ust_enable_tracepoint(
2296 usess
, uchan
, event
, filter_expression
, filter
, exclusion
, internal_event
);
2297 /* We have passed ownership */
2298 filter_expression
= nullptr;
2300 exclusion
= nullptr;
2301 if (ret
== LTTNG_ERR_UST_EVENT_ENABLED
) {
2302 goto already_enabled
;
2303 } else if (ret
!= LTTNG_OK
) {
2308 case LTTNG_DOMAIN_LOG4J
:
2309 case LTTNG_DOMAIN_JUL
:
2310 case LTTNG_DOMAIN_PYTHON
:
2312 const char *default_event_name
, *default_chan_name
;
2314 struct lttng_event uevent
;
2315 struct lttng_domain tmp_dom
;
2316 struct ltt_ust_session
*usess
= session
.ust_session
;
2318 LTTNG_ASSERT(usess
);
2320 if (!agent_tracing_is_enabled()) {
2321 DBG("Attempted to enable an event in an agent domain but the agent thread is not running");
2322 ret
= LTTNG_ERR_AGENT_TRACING_DISABLED
;
2326 agt
= trace_ust_find_agent(usess
, domain
->type
);
2328 agt
= agent_create(domain
->type
);
2330 ret
= LTTNG_ERR_NOMEM
;
2333 agent_add(agt
, usess
->agents
);
2336 /* Create the default tracepoint. */
2337 memset(&uevent
, 0, sizeof(uevent
));
2338 uevent
.type
= LTTNG_EVENT_TRACEPOINT
;
2339 uevent
.loglevel_type
= LTTNG_EVENT_LOGLEVEL_ALL
;
2340 uevent
.loglevel
= -1;
2341 default_event_name
= event_get_default_agent_ust_name(domain
->type
);
2342 if (!default_event_name
) {
2343 ret
= LTTNG_ERR_FATAL
;
2346 strncpy(uevent
.name
, default_event_name
, sizeof(uevent
.name
));
2347 uevent
.name
[sizeof(uevent
.name
) - 1] = '\0';
2350 * The domain type is changed because we are about to enable the
2351 * default channel and event for the JUL domain that are hardcoded.
2352 * This happens in the UST domain.
2354 memcpy(&tmp_dom
, domain
, sizeof(tmp_dom
));
2355 tmp_dom
.type
= LTTNG_DOMAIN_UST
;
2357 switch (domain
->type
) {
2358 case LTTNG_DOMAIN_LOG4J
:
2359 default_chan_name
= DEFAULT_LOG4J_CHANNEL_NAME
;
2361 case LTTNG_DOMAIN_JUL
:
2362 default_chan_name
= DEFAULT_JUL_CHANNEL_NAME
;
2364 case LTTNG_DOMAIN_PYTHON
:
2365 default_chan_name
= DEFAULT_PYTHON_CHANNEL_NAME
;
2368 /* The switch/case we are in makes this impossible */
2373 char *filter_expression_copy
= nullptr;
2374 struct lttng_bytecode
*filter_copy
= nullptr;
2377 const size_t filter_size
=
2378 sizeof(struct lttng_bytecode
) + filter
->len
;
2380 filter_copy
= zmalloc
<lttng_bytecode
>(filter_size
);
2382 ret
= LTTNG_ERR_NOMEM
;
2385 memcpy(filter_copy
, filter
, filter_size
);
2387 filter_expression_copy
= strdup(filter_expression
);
2388 if (!filter_expression
) {
2389 ret
= LTTNG_ERR_NOMEM
;
2392 if (!filter_expression_copy
|| !filter_copy
) {
2393 free(filter_expression_copy
);
2399 ret
= cmd_enable_event_internal(locked_session
,
2401 (char *) default_chan_name
,
2403 filter_expression_copy
,
2409 if (ret
== LTTNG_ERR_UST_EVENT_ENABLED
) {
2410 goto already_enabled
;
2411 } else if (ret
!= LTTNG_OK
) {
2415 /* The wild card * means that everything should be enabled. */
2416 if (strncmp(event
->name
, "*", 1) == 0 && strlen(event
->name
) == 1) {
2417 ret
= event_agent_enable_all(usess
, agt
, event
, filter
, filter_expression
);
2419 ret
= event_agent_enable(usess
, agt
, event
, filter
, filter_expression
);
2422 filter_expression
= nullptr;
2423 if (ret
!= LTTNG_OK
) {
2430 ret
= LTTNG_ERR_UND
;
2438 free(filter_expression
);
2441 channel_attr_destroy(attr
);
2446 * Command LTTNG_ENABLE_EVENT processed by the client thread.
2447 * We own filter, exclusion, and filter_expression.
2449 int cmd_enable_event(struct command_ctx
*cmd_ctx
,
2450 ltt_session::locked_ref
& locked_session
,
2451 struct lttng_event
*event
,
2452 char *filter_expression
,
2453 struct lttng_event_exclusion
*exclusion
,
2454 struct lttng_bytecode
*bytecode
,
2459 * Copied to ensure proper alignment since 'lsm' is a packed structure.
2461 const lttng_domain command_domain
= cmd_ctx
->lsm
.domain
;
2464 * The ownership of the following parameters is transferred to
2465 * _cmd_enable_event:
2467 * - filter_expression,
2471 ret
= _cmd_enable_event(locked_session
,
2473 cmd_ctx
->lsm
.u
.enable
.channel_name
,
2480 filter_expression
= nullptr;
2482 exclusion
= nullptr;
2487 * Enable an event which is internal to LTTng. An internal should
2488 * never be made visible to clients and are immune to checks such as
2491 static int cmd_enable_event_internal(ltt_session::locked_ref
& locked_session
,
2492 const struct lttng_domain
*domain
,
2494 struct lttng_event
*event
,
2495 char *filter_expression
,
2496 struct lttng_bytecode
*filter
,
2497 struct lttng_event_exclusion
*exclusion
,
2500 return _cmd_enable_event(locked_session
,
2512 * Command LTTNG_LIST_TRACEPOINTS processed by the client thread.
2514 enum lttng_error_code
cmd_list_tracepoints(enum lttng_domain_type domain
,
2515 struct lttng_payload
*reply_payload
)
2517 enum lttng_error_code ret_code
;
2519 ssize_t i
, nb_events
= 0;
2520 struct lttng_event
*events
= nullptr;
2521 struct lttcomm_list_command_header reply_command_header
= {};
2522 size_t reply_command_header_offset
;
2524 assert(reply_payload
);
2526 /* Reserve space for command reply header. */
2527 reply_command_header_offset
= reply_payload
->buffer
.size
;
2528 ret
= lttng_dynamic_buffer_set_size(&reply_payload
->buffer
,
2529 reply_command_header_offset
+
2530 sizeof(struct lttcomm_list_command_header
));
2532 ret_code
= LTTNG_ERR_NOMEM
;
2537 case LTTNG_DOMAIN_KERNEL
:
2538 nb_events
= kernel_list_events(&events
);
2539 if (nb_events
< 0) {
2540 ret_code
= LTTNG_ERR_KERN_LIST_FAIL
;
2544 case LTTNG_DOMAIN_UST
:
2545 nb_events
= ust_app_list_events(&events
);
2546 if (nb_events
< 0) {
2547 ret_code
= LTTNG_ERR_UST_LIST_FAIL
;
2551 case LTTNG_DOMAIN_LOG4J
:
2552 case LTTNG_DOMAIN_JUL
:
2553 case LTTNG_DOMAIN_PYTHON
:
2554 nb_events
= agent_list_events(&events
, domain
);
2555 if (nb_events
< 0) {
2556 ret_code
= LTTNG_ERR_UST_LIST_FAIL
;
2561 ret_code
= LTTNG_ERR_UND
;
2565 for (i
= 0; i
< nb_events
; i
++) {
2566 ret
= lttng_event_serialize(
2567 &events
[i
], 0, nullptr, nullptr, 0, nullptr, reply_payload
);
2569 ret_code
= LTTNG_ERR_NOMEM
;
2574 if (nb_events
> UINT32_MAX
) {
2575 ERR("Tracepoint count would overflow the tracepoint listing command's reply");
2576 ret_code
= LTTNG_ERR_OVERFLOW
;
2580 /* Update command reply header. */
2581 reply_command_header
.count
= (uint32_t) nb_events
;
2582 memcpy(reply_payload
->buffer
.data
+ reply_command_header_offset
,
2583 &reply_command_header
,
2584 sizeof(reply_command_header
));
2586 ret_code
= LTTNG_OK
;
2593 * Command LTTNG_LIST_TRACEPOINT_FIELDS processed by the client thread.
2595 enum lttng_error_code
cmd_list_tracepoint_fields(enum lttng_domain_type domain
,
2596 struct lttng_payload
*reply
)
2598 enum lttng_error_code ret_code
;
2600 unsigned int i
, nb_fields
;
2601 struct lttng_event_field
*fields
= nullptr;
2602 struct lttcomm_list_command_header reply_command_header
= {};
2603 size_t reply_command_header_offset
;
2607 /* Reserve space for command reply header. */
2608 reply_command_header_offset
= reply
->buffer
.size
;
2609 ret
= lttng_dynamic_buffer_set_size(&reply
->buffer
,
2610 reply_command_header_offset
+
2611 sizeof(struct lttcomm_list_command_header
));
2613 ret_code
= LTTNG_ERR_NOMEM
;
2618 case LTTNG_DOMAIN_UST
:
2619 ret
= ust_app_list_event_fields(&fields
);
2621 ret_code
= LTTNG_ERR_UST_LIST_FAIL
;
2626 case LTTNG_DOMAIN_KERNEL
:
2627 default: /* fall-through */
2628 ret_code
= LTTNG_ERR_UND
;
2634 for (i
= 0; i
< nb_fields
; i
++) {
2635 ret
= lttng_event_field_serialize(&fields
[i
], reply
);
2637 ret_code
= LTTNG_ERR_NOMEM
;
2642 if (nb_fields
> UINT32_MAX
) {
2643 ERR("Tracepoint field count would overflow the tracepoint field listing command's reply");
2644 ret_code
= LTTNG_ERR_OVERFLOW
;
2648 /* Update command reply header. */
2649 reply_command_header
.count
= (uint32_t) nb_fields
;
2651 memcpy(reply
->buffer
.data
+ reply_command_header_offset
,
2652 &reply_command_header
,
2653 sizeof(reply_command_header
));
2655 ret_code
= LTTNG_OK
;
2662 enum lttng_error_code
cmd_list_syscalls(struct lttng_payload
*reply_payload
)
2664 enum lttng_error_code ret_code
;
2665 ssize_t nb_events
, i
;
2667 struct lttng_event
*events
= nullptr;
2668 struct lttcomm_list_command_header reply_command_header
= {};
2669 size_t reply_command_header_offset
;
2671 assert(reply_payload
);
2673 /* Reserve space for command reply header. */
2674 reply_command_header_offset
= reply_payload
->buffer
.size
;
2675 ret
= lttng_dynamic_buffer_set_size(&reply_payload
->buffer
,
2676 reply_command_header_offset
+
2677 sizeof(struct lttcomm_list_command_header
));
2679 ret_code
= LTTNG_ERR_NOMEM
;
2683 nb_events
= syscall_table_list(&events
);
2684 if (nb_events
< 0) {
2685 ret_code
= (enum lttng_error_code
) - nb_events
;
2689 for (i
= 0; i
< nb_events
; i
++) {
2690 ret
= lttng_event_serialize(
2691 &events
[i
], 0, nullptr, nullptr, 0, nullptr, reply_payload
);
2693 ret_code
= LTTNG_ERR_NOMEM
;
2698 if (nb_events
> UINT32_MAX
) {
2699 ERR("Syscall count would overflow the syscall listing command's reply");
2700 ret_code
= LTTNG_ERR_OVERFLOW
;
2704 /* Update command reply header. */
2705 reply_command_header
.count
= (uint32_t) nb_events
;
2706 memcpy(reply_payload
->buffer
.data
+ reply_command_header_offset
,
2707 &reply_command_header
,
2708 sizeof(reply_command_header
));
2710 ret_code
= LTTNG_OK
;
2717 * Command LTTNG_START_TRACE processed by the client thread.
2719 int cmd_start_trace(const ltt_session::locked_ref
& session
)
2721 enum lttng_error_code ret
;
2722 unsigned long nb_chan
= 0;
2723 struct ltt_kernel_session
*ksession
;
2724 struct ltt_ust_session
*usess
;
2725 const bool session_rotated_after_last_stop
= session
->rotated_after_last_stop
;
2726 const bool session_cleared_after_last_stop
= session
->cleared_after_last_stop
;
2728 /* Ease our life a bit ;) */
2729 ksession
= session
->kernel_session
;
2730 usess
= session
->ust_session
;
2732 /* Is the session already started? */
2733 if (session
->active
) {
2734 ret
= LTTNG_ERR_TRACE_ALREADY_STARTED
;
2735 /* Perform nothing */
2739 if (session
->rotation_state
== LTTNG_ROTATION_STATE_ONGOING
&&
2740 !session
->current_trace_chunk
) {
2742 * A rotation was launched while the session was stopped and
2743 * it has not been completed yet. It is not possible to start
2744 * the session since starting the session here would require a
2745 * rotation from "NULL" to a new trace chunk. That rotation
2746 * would overlap with the ongoing rotation, which is not
2749 WARN("Refusing to start session \"%s\" as a rotation launched after the last \"stop\" is still ongoing",
2751 ret
= LTTNG_ERR_ROTATION_PENDING
;
2756 * Starting a session without channel is useless since after that it's not
2757 * possible to enable channel thus inform the client.
2759 if (usess
&& usess
->domain_global
.channels
) {
2760 nb_chan
+= lttng_ht_get_count(usess
->domain_global
.channels
);
2763 nb_chan
+= ksession
->channel_count
;
2766 ret
= LTTNG_ERR_NO_CHANNEL
;
2770 session
->active
= true;
2771 session
->rotated_after_last_stop
= false;
2772 session
->cleared_after_last_stop
= false;
2773 if (session
->output_traces
&& !session
->current_trace_chunk
) {
2774 if (!session
->has_been_started
) {
2775 struct lttng_trace_chunk
*trace_chunk
;
2777 DBG("Creating initial trace chunk of session \"%s\"", session
->name
);
2779 session_create_new_trace_chunk(session
, nullptr, nullptr, nullptr);
2781 ret
= LTTNG_ERR_CREATE_DIR_FAIL
;
2784 LTTNG_ASSERT(!session
->current_trace_chunk
);
2785 ret
= (lttng_error_code
) session_set_trace_chunk(
2786 session
, trace_chunk
, nullptr);
2787 lttng_trace_chunk_put(trace_chunk
);
2789 ret
= LTTNG_ERR_CREATE_TRACE_CHUNK_FAIL_CONSUMER
;
2793 DBG("Rotating session \"%s\" from its current \"NULL\" trace chunk to a new chunk",
2796 * Rotate existing streams into the new chunk.
2797 * This is a "quiet" rotation has no client has
2798 * explicitly requested this operation.
2800 * There is also no need to wait for the rotation
2801 * to complete as it will happen immediately. No data
2802 * was produced as the session was stopped, so the
2803 * rotation should happen on reception of the command.
2805 ret
= (lttng_error_code
) cmd_rotate_session(
2806 session
, nullptr, true, LTTNG_TRACE_CHUNK_COMMAND_TYPE_NO_OPERATION
);
2807 if (ret
!= LTTNG_OK
) {
2813 /* Kernel tracing */
2814 if (ksession
!= nullptr) {
2815 DBG("Start kernel tracing session %s", session
->name
);
2816 ret
= (lttng_error_code
) start_kernel_session(ksession
);
2817 if (ret
!= LTTNG_OK
) {
2822 /* Flag session that trace should start automatically */
2824 const int int_ret
= ust_app_start_trace_all(usess
);
2827 ret
= LTTNG_ERR_UST_START_FAIL
;
2833 * Open a packet in every stream of the session to ensure that viewers
2834 * can correctly identify the boundaries of the periods during which
2835 * tracing was active for this session.
2837 ret
= session_open_packets(session
);
2838 if (ret
!= LTTNG_OK
) {
2843 * Clear the flag that indicates that a rotation was done while the
2844 * session was stopped.
2846 session
->rotated_after_last_stop
= false;
2848 if (session
->rotate_timer_period
&& !session
->rotation_schedule_timer_enabled
) {
2849 const int int_ret
= timer_session_rotation_schedule_timer_start(
2850 session
, session
->rotate_timer_period
);
2853 ERR("Failed to enable rotate timer");
2854 ret
= LTTNG_ERR_UNK
;
2862 if (ret
== LTTNG_OK
) {
2863 /* Flag this after a successful start. */
2864 session
->has_been_started
= true;
2866 session
->active
= false;
2867 /* Restore initial state on error. */
2868 session
->rotated_after_last_stop
= session_rotated_after_last_stop
;
2869 session
->cleared_after_last_stop
= session_cleared_after_last_stop
;
2876 * Command LTTNG_STOP_TRACE processed by the client thread.
2878 int cmd_stop_trace(const ltt_session::locked_ref
& session
)
2881 struct ltt_kernel_session
*ksession
;
2882 struct ltt_ust_session
*usess
;
2884 DBG("Begin stop session \"%s\" (id %" PRIu64
")", session
->name
, session
->id
);
2886 ksession
= session
->kernel_session
;
2887 usess
= session
->ust_session
;
2889 /* Session is not active. Skip everything and inform the client. */
2890 if (!session
->active
) {
2891 ret
= LTTNG_ERR_TRACE_ALREADY_STOPPED
;
2895 ret
= stop_kernel_session(ksession
);
2896 if (ret
!= LTTNG_OK
) {
2900 if (usess
&& usess
->active
) {
2901 ret
= ust_app_stop_trace_all(usess
);
2903 ret
= LTTNG_ERR_UST_STOP_FAIL
;
2908 DBG("Completed stop session \"%s\" (id %" PRIu64
")", session
->name
, session
->id
);
2909 /* Flag inactive after a successful stop. */
2910 session
->active
= false;
2918 * Set the base_path of the session only if subdir of a control uris is set.
2919 * Return LTTNG_OK on success, otherwise LTTNG_ERR_*.
2921 static int set_session_base_path_from_uris(const ltt_session::locked_ref
& session
,
2923 struct lttng_uri
*uris
)
2928 for (i
= 0; i
< nb_uri
; i
++) {
2929 if (uris
[i
].stype
!= LTTNG_STREAM_CONTROL
|| uris
[i
].subdir
[0] == '\0') {
2930 /* Not interested in these URIs */
2934 if (session
->base_path
!= nullptr) {
2935 free(session
->base_path
);
2936 session
->base_path
= nullptr;
2939 /* Set session base_path */
2940 session
->base_path
= strdup(uris
[i
].subdir
);
2941 if (!session
->base_path
) {
2942 PERROR("Failed to copy base path \"%s\" to session \"%s\"",
2945 ret
= LTTNG_ERR_NOMEM
;
2948 DBG2("Setting base path \"%s\" for session \"%s\"",
2958 * Command LTTNG_SET_CONSUMER_URI processed by the client thread.
2960 int cmd_set_consumer_uri(const ltt_session::locked_ref
& session
,
2962 struct lttng_uri
*uris
)
2965 struct ltt_kernel_session
*ksess
= session
->kernel_session
;
2966 struct ltt_ust_session
*usess
= session
->ust_session
;
2969 LTTNG_ASSERT(nb_uri
> 0);
2971 /* Can't set consumer URI if the session is active. */
2972 if (session
->active
) {
2973 ret
= LTTNG_ERR_TRACE_ALREADY_STARTED
;
2978 * Set the session base path if any. This is done inside
2979 * cmd_set_consumer_uri to preserve backward compatibility of the
2980 * previous session creation api vs the session descriptor api.
2982 ret
= set_session_base_path_from_uris(session
, nb_uri
, uris
);
2983 if (ret
!= LTTNG_OK
) {
2987 /* Set the "global" consumer URIs */
2988 for (i
= 0; i
< nb_uri
; i
++) {
2989 ret
= add_uri_to_consumer(session
, session
->consumer
, &uris
[i
], LTTNG_DOMAIN_NONE
);
2990 if (ret
!= LTTNG_OK
) {
2995 /* Set UST session URIs */
2996 if (session
->ust_session
) {
2997 for (i
= 0; i
< nb_uri
; i
++) {
2998 ret
= add_uri_to_consumer(session
,
2999 session
->ust_session
->consumer
,
3002 if (ret
!= LTTNG_OK
) {
3008 /* Set kernel session URIs */
3009 if (session
->kernel_session
) {
3010 for (i
= 0; i
< nb_uri
; i
++) {
3011 ret
= add_uri_to_consumer(session
,
3012 session
->kernel_session
->consumer
,
3014 LTTNG_DOMAIN_KERNEL
);
3015 if (ret
!= LTTNG_OK
) {
3022 * Make sure to set the session in output mode after we set URI since a
3023 * session can be created without URL (thus flagged in no output mode).
3025 session
->output_traces
= 1;
3027 ksess
->output_traces
= 1;
3031 usess
->output_traces
= 1;
3041 static enum lttng_error_code
3042 set_session_output_from_descriptor(const ltt_session::locked_ref
& session
,
3043 const struct lttng_session_descriptor
*descriptor
)
3046 enum lttng_error_code ret_code
= LTTNG_OK
;
3047 const lttng_session_descriptor_type session_type
=
3048 lttng_session_descriptor_get_type(descriptor
);
3049 const lttng_session_descriptor_output_type output_type
=
3050 lttng_session_descriptor_get_output_type(descriptor
);
3051 struct lttng_uri uris
[2] = {};
3052 size_t uri_count
= 0;
3054 switch (output_type
) {
3055 case LTTNG_SESSION_DESCRIPTOR_OUTPUT_TYPE_NONE
:
3057 case LTTNG_SESSION_DESCRIPTOR_OUTPUT_TYPE_LOCAL
:
3058 lttng_session_descriptor_get_local_output_uri(descriptor
, &uris
[0]);
3061 case LTTNG_SESSION_DESCRIPTOR_OUTPUT_TYPE_NETWORK
:
3062 lttng_session_descriptor_get_network_output_uris(descriptor
, &uris
[0], &uris
[1]);
3066 ret_code
= LTTNG_ERR_INVALID
;
3070 switch (session_type
) {
3071 case LTTNG_SESSION_DESCRIPTOR_TYPE_SNAPSHOT
:
3073 struct snapshot_output
*new_output
= nullptr;
3075 new_output
= snapshot_output_alloc();
3077 ret_code
= LTTNG_ERR_NOMEM
;
3081 ret
= snapshot_output_init_with_uri(session
,
3082 DEFAULT_SNAPSHOT_MAX_SIZE
,
3088 &session
->snapshot
);
3090 ret_code
= (ret
== -ENOMEM
) ? LTTNG_ERR_NOMEM
: LTTNG_ERR_INVALID
;
3091 snapshot_output_destroy(new_output
);
3094 snapshot_add_output(&session
->snapshot
, new_output
);
3097 case LTTNG_SESSION_DESCRIPTOR_TYPE_REGULAR
:
3098 case LTTNG_SESSION_DESCRIPTOR_TYPE_LIVE
:
3100 ret_code
= (lttng_error_code
) cmd_set_consumer_uri(session
, uri_count
, uris
);
3104 ret_code
= LTTNG_ERR_INVALID
;
3111 static enum lttng_error_code
3112 cmd_create_session_from_descriptor(struct lttng_session_descriptor
*descriptor
,
3113 const lttng_sock_cred
*creds
,
3114 const char *home_path
)
3117 enum lttng_error_code ret_code
;
3118 const char *session_name
;
3119 struct ltt_session
*new_session
= nullptr;
3120 enum lttng_session_descriptor_status descriptor_status
;
3122 const auto list_lock
= lttng::sessiond::lock_session_list();
3124 if (*home_path
!= '/') {
3125 ERR("Home path provided by client is not absolute");
3126 ret_code
= LTTNG_ERR_INVALID
;
3131 descriptor_status
= lttng_session_descriptor_get_session_name(descriptor
, &session_name
);
3132 switch (descriptor_status
) {
3133 case LTTNG_SESSION_DESCRIPTOR_STATUS_OK
:
3135 case LTTNG_SESSION_DESCRIPTOR_STATUS_UNSET
:
3136 session_name
= nullptr;
3139 ret_code
= LTTNG_ERR_INVALID
;
3143 ret_code
= session_create(session_name
, creds
->uid
, creds
->gid
, &new_session
);
3144 if (ret_code
!= LTTNG_OK
) {
3148 ret_code
= notification_thread_command_add_session(the_notification_thread_handle
,
3153 if (ret_code
!= LTTNG_OK
) {
3157 /* Announce the session's destruction to the notification thread when it is destroyed. */
3158 ret
= session_add_destroy_notifier(
3160 session_get(new_session
);
3161 new_session
->lock();
3162 return ltt_session::make_locked_ref(*new_session
);
3164 [](const ltt_session::locked_ref
& session
,
3165 void *user_data
__attribute__((unused
))) {
3166 (void) notification_thread_command_remove_session(
3167 the_notification_thread_handle
, session
->id
);
3171 PERROR("Failed to add notification thread command to session's destroy notifiers: session name = %s",
3173 ret
= LTTNG_ERR_NOMEM
;
3177 if (!session_name
) {
3178 ret
= lttng_session_descriptor_set_session_name(descriptor
, new_session
->name
);
3180 ret_code
= LTTNG_ERR_SESSION_FAIL
;
3185 if (!lttng_session_descriptor_is_output_destination_initialized(descriptor
)) {
3187 * Only include the session's creation time in the output
3188 * destination if the name of the session itself was
3189 * not auto-generated.
3191 ret_code
= lttng_session_descriptor_set_default_output(
3193 session_name
? &new_session
->creation_time
: nullptr,
3195 if (ret_code
!= LTTNG_OK
) {
3199 new_session
->has_user_specified_directory
=
3200 lttng_session_descriptor_has_output_directory(descriptor
);
3203 switch (lttng_session_descriptor_get_type(descriptor
)) {
3204 case LTTNG_SESSION_DESCRIPTOR_TYPE_SNAPSHOT
:
3205 new_session
->snapshot_mode
= 1;
3207 case LTTNG_SESSION_DESCRIPTOR_TYPE_LIVE
:
3208 new_session
->live_timer
=
3209 lttng_session_descriptor_live_get_timer_interval(descriptor
);
3215 ret_code
= set_session_output_from_descriptor(
3217 session_get(new_session
);
3218 new_session
->lock();
3219 return ltt_session::make_locked_ref(*new_session
);
3222 if (ret_code
!= LTTNG_OK
) {
3225 new_session
->consumer
->enabled
= true;
3226 ret_code
= LTTNG_OK
;
3228 /* Release reference provided by the session_create function. */
3229 session_put(new_session
);
3230 if (ret_code
!= LTTNG_OK
&& new_session
) {
3231 /* Release the global reference on error. */
3232 session_destroy(new_session
);
3238 enum lttng_error_code
cmd_create_session(struct command_ctx
*cmd_ctx
,
3240 struct lttng_session_descriptor
**return_descriptor
)
3243 size_t payload_size
;
3244 struct lttng_dynamic_buffer payload
;
3245 struct lttng_buffer_view home_dir_view
;
3246 struct lttng_buffer_view session_descriptor_view
;
3247 struct lttng_session_descriptor
*session_descriptor
= nullptr;
3248 enum lttng_error_code ret_code
;
3250 lttng_dynamic_buffer_init(&payload
);
3251 if (cmd_ctx
->lsm
.u
.create_session
.home_dir_size
>= LTTNG_PATH_MAX
) {
3252 ret_code
= LTTNG_ERR_INVALID
;
3255 if (cmd_ctx
->lsm
.u
.create_session
.session_descriptor_size
>
3256 LTTNG_SESSION_DESCRIPTOR_MAX_LEN
) {
3257 ret_code
= LTTNG_ERR_INVALID
;
3261 payload_size
= cmd_ctx
->lsm
.u
.create_session
.home_dir_size
+
3262 cmd_ctx
->lsm
.u
.create_session
.session_descriptor_size
;
3263 ret
= lttng_dynamic_buffer_set_size(&payload
, payload_size
);
3265 ret_code
= LTTNG_ERR_NOMEM
;
3269 ret
= lttcomm_recv_unix_sock(sock
, payload
.data
, payload
.size
);
3271 ERR("Reception of session descriptor failed, aborting.");
3272 ret_code
= LTTNG_ERR_SESSION_FAIL
;
3276 home_dir_view
= lttng_buffer_view_from_dynamic_buffer(
3277 &payload
, 0, cmd_ctx
->lsm
.u
.create_session
.home_dir_size
);
3278 if (cmd_ctx
->lsm
.u
.create_session
.home_dir_size
> 0 &&
3279 !lttng_buffer_view_is_valid(&home_dir_view
)) {
3280 ERR("Invalid payload in \"create session\" command: buffer too short to contain home directory");
3281 ret_code
= LTTNG_ERR_INVALID_PROTOCOL
;
3285 session_descriptor_view
= lttng_buffer_view_from_dynamic_buffer(
3287 cmd_ctx
->lsm
.u
.create_session
.home_dir_size
,
3288 cmd_ctx
->lsm
.u
.create_session
.session_descriptor_size
);
3289 if (!lttng_buffer_view_is_valid(&session_descriptor_view
)) {
3290 ERR("Invalid payload in \"create session\" command: buffer too short to contain session descriptor");
3291 ret_code
= LTTNG_ERR_INVALID_PROTOCOL
;
3295 ret
= lttng_session_descriptor_create_from_buffer(&session_descriptor_view
,
3296 &session_descriptor
);
3298 ERR("Failed to create session descriptor from payload of \"create session\" command");
3299 ret_code
= LTTNG_ERR_INVALID
;
3304 * Sets the descriptor's auto-generated properties (name, output) if
3307 ret_code
= cmd_create_session_from_descriptor(session_descriptor
,
3309 home_dir_view
.size
? home_dir_view
.data
:
3311 if (ret_code
!= LTTNG_OK
) {
3315 ret_code
= LTTNG_OK
;
3316 *return_descriptor
= session_descriptor
;
3317 session_descriptor
= nullptr;
3319 lttng_dynamic_buffer_reset(&payload
);
3320 lttng_session_descriptor_destroy(session_descriptor
);
3324 static void cmd_destroy_session_reply(const ltt_session::locked_ref
& session
, void *_reply_context
)
3328 const struct cmd_destroy_session_reply_context
*reply_context
=
3329 (cmd_destroy_session_reply_context
*) _reply_context
;
3330 struct lttng_dynamic_buffer payload
;
3331 struct lttcomm_session_destroy_command_header cmd_header
;
3332 struct lttng_trace_archive_location
*location
= nullptr;
3333 struct lttcomm_lttng_msg llm
= {
3334 .cmd_type
= LTTCOMM_SESSIOND_COMMAND_DESTROY_SESSION
,
3335 .ret_code
= reply_context
->destruction_status
,
3337 .cmd_header_size
= sizeof(struct lttcomm_session_destroy_command_header
),
3341 size_t payload_size_before_location
;
3343 lttng_dynamic_buffer_init(&payload
);
3345 ret
= lttng_dynamic_buffer_append(&payload
, &llm
, sizeof(llm
));
3347 ERR("Failed to append session destruction message");
3351 cmd_header
.rotation_state
= (int32_t) (reply_context
->implicit_rotation_on_destroy
?
3352 session
->rotation_state
:
3353 LTTNG_ROTATION_STATE_NO_ROTATION
);
3354 ret
= lttng_dynamic_buffer_append(&payload
, &cmd_header
, sizeof(cmd_header
));
3356 ERR("Failed to append session destruction command header");
3360 if (!reply_context
->implicit_rotation_on_destroy
) {
3361 DBG("No implicit rotation performed during the destruction of session \"%s\", sending reply",
3365 if (session
->rotation_state
!= LTTNG_ROTATION_STATE_COMPLETED
) {
3366 DBG("Rotation state of session \"%s\" is not \"completed\", sending session destruction reply",
3371 location
= session_get_trace_archive_location(session
);
3373 ERR("Failed to get the location of the trace archive produced during the destruction of session \"%s\"",
3378 payload_size_before_location
= payload
.size
;
3379 comm_ret
= lttng_trace_archive_location_serialize(location
, &payload
);
3380 lttng_trace_archive_location_put(location
);
3382 ERR("Failed to serialize the location of the trace archive produced during the destruction of session \"%s\"",
3386 /* Update the message to indicate the location's length. */
3387 ((struct lttcomm_lttng_msg
*) payload
.data
)->data_size
=
3388 payload
.size
- payload_size_before_location
;
3390 comm_ret
= lttcomm_send_unix_sock(reply_context
->reply_sock_fd
, payload
.data
, payload
.size
);
3391 if (comm_ret
!= (ssize_t
) payload
.size
) {
3392 ERR("Failed to send result of the destruction of session \"%s\" to client",
3396 ret
= close(reply_context
->reply_sock_fd
);
3398 PERROR("Failed to close client socket in deferred session destroy reply");
3400 lttng_dynamic_buffer_reset(&payload
);
3401 free(_reply_context
);
3405 * Command LTTNG_DESTROY_SESSION processed by the client thread.
3407 * Called with session lock held.
3409 int cmd_destroy_session(const ltt_session::locked_ref
& session
, int *sock_fd
)
3412 enum lttng_error_code destruction_last_error
= LTTNG_OK
;
3413 struct cmd_destroy_session_reply_context
*reply_context
= nullptr;
3416 reply_context
= zmalloc
<cmd_destroy_session_reply_context
>();
3417 if (!reply_context
) {
3418 ret
= LTTNG_ERR_NOMEM
;
3422 reply_context
->reply_sock_fd
= *sock_fd
;
3425 DBG("Begin destroy session %s (id %" PRIu64
")", session
->name
, session
->id
);
3426 if (session
->active
) {
3427 DBG("Session \"%s\" is active, attempting to stop it before destroying it",
3429 ret
= cmd_stop_trace(session
);
3430 if (ret
!= LTTNG_OK
&& ret
!= LTTNG_ERR_TRACE_ALREADY_STOPPED
) {
3431 /* Carry on with the destruction of the session. */
3432 ERR("Failed to stop session \"%s\" as part of its destruction: %s",
3434 lttng_strerror(-ret
));
3435 destruction_last_error
= (lttng_error_code
) ret
;
3439 if (session
->rotation_schedule_timer_enabled
) {
3440 if (timer_session_rotation_schedule_timer_stop(session
)) {
3441 ERR("Failed to stop the \"rotation schedule\" timer of session %s",
3443 destruction_last_error
= LTTNG_ERR_TIMER_STOP_ERROR
;
3447 if (session
->rotate_size
) {
3449 the_rotation_thread_handle
->unsubscribe_session_consumed_size_rotation(
3451 } catch (const std::exception
& e
) {
3452 /* Continue the destruction of the session anyway. */
3453 ERR("Failed to unsubscribe rotation thread notification channel from consumed size condition during session destruction: %s",
3457 session
->rotate_size
= 0;
3460 if (session
->rotated
&& session
->current_trace_chunk
&& session
->output_traces
) {
3462 * Perform a last rotation on destruction if rotations have
3463 * occurred during the session's lifetime.
3465 ret
= cmd_rotate_session(
3466 session
, nullptr, false, LTTNG_TRACE_CHUNK_COMMAND_TYPE_MOVE_TO_COMPLETED
);
3467 if (ret
!= LTTNG_OK
) {
3468 ERR("Failed to perform an implicit rotation as part of the destruction of session \"%s\": %s",
3470 lttng_strerror(-ret
));
3471 destruction_last_error
= (lttng_error_code
) -ret
;
3473 if (reply_context
) {
3474 reply_context
->implicit_rotation_on_destroy
= true;
3476 } else if (session
->has_been_started
&& session
->current_trace_chunk
) {
3478 * The user has not triggered a session rotation. However, to
3479 * ensure all data has been consumed, the session is rotated
3480 * to a 'null' trace chunk before it is destroyed.
3482 * This is a "quiet" rotation meaning that no notification is
3483 * emitted and no renaming of the current trace chunk takes
3486 ret
= cmd_rotate_session(
3487 session
, nullptr, true, LTTNG_TRACE_CHUNK_COMMAND_TYPE_NO_OPERATION
);
3489 * Rotation operations may not be supported by the kernel
3490 * tracer. Hence, do not consider this implicit rotation as
3491 * a session destruction error. The library has already stopped
3492 * the session and waited for pending data; there is nothing
3493 * left to do but complete the destruction of the session.
3495 if (ret
!= LTTNG_OK
&& ret
!= -LTTNG_ERR_ROTATION_NOT_AVAILABLE_KERNEL
) {
3496 ERR("Failed to perform a quiet rotation as part of the destruction of session \"%s\": %s",
3498 lttng_strerror(ret
));
3499 destruction_last_error
= (lttng_error_code
) -ret
;
3503 if (session
->shm_path
[0]) {
3505 * When a session is created with an explicit shm_path,
3506 * the consumer daemon will create its shared memory files
3507 * at that location and will *not* unlink them. This is normal
3508 * as the intention of that feature is to make it possible
3509 * to retrieve the content of those files should a crash occur.
3511 * To ensure the content of those files can be used, the
3512 * sessiond daemon will replicate the content of the metadata
3513 * cache in a metadata file.
3515 * On clean-up, it is expected that the consumer daemon will
3516 * unlink the shared memory files and that the session daemon
3517 * will unlink the metadata file. Then, the session's directory
3518 * in the shm path can be removed.
3520 * Unfortunately, a flaw in the design of the sessiond's and
3521 * consumerd's tear down of channels makes it impossible to
3522 * determine when the sessiond _and_ the consumerd have both
3523 * destroyed their representation of a channel. For one, the
3524 * unlinking, close, and rmdir happen in deferred 'call_rcu'
3525 * callbacks in both daemons.
3527 * However, it is also impossible for the sessiond to know when
3528 * the consumer daemon is done destroying its channel(s) since
3529 * it occurs as a reaction to the closing of the channel's file
3530 * descriptor. There is no resulting communication initiated
3531 * from the consumerd to the sessiond to confirm that the
3532 * operation is completed (and was successful).
3534 * Until this is all fixed, the session daemon checks for the
3535 * removal of the session's shm path which makes it possible
3536 * to safely advertise a session as having been destroyed.
3538 * Prior to this fix, it was not possible to reliably save
3539 * a session making use of the --shm-path option, destroy it,
3540 * and load it again. This is because the creation of the
3541 * session would fail upon seeing the session's shm path
3542 * already in existence.
3544 * Note that none of the error paths in the check for the
3545 * directory's existence return an error. This is normal
3546 * as there isn't much that can be done. The session will
3547 * be destroyed properly, except that we can't offer the
3548 * guarantee that the same session can be re-created.
3550 current_completion_handler
= &destroy_completion_handler
.handler
;
3551 ret
= lttng_strncpy(destroy_completion_handler
.shm_path
,
3553 sizeof(destroy_completion_handler
.shm_path
));
3558 * The session is destroyed. However, note that the command context
3559 * still holds a reference to the session, thus delaying its destruction
3560 * _at least_ up to the point when that reference is released.
3562 session_destroy(&session
.get());
3563 if (reply_context
) {
3564 reply_context
->destruction_status
= destruction_last_error
;
3565 ret
= session_add_destroy_notifier(
3566 session
, cmd_destroy_session_reply
, (void *) reply_context
);
3568 ret
= LTTNG_ERR_FATAL
;
3580 * Command LTTNG_REGISTER_CONSUMER processed by the client thread.
3582 int cmd_register_consumer(const ltt_session::locked_ref
& session
,
3583 enum lttng_domain_type domain
,
3584 const char *sock_path
,
3585 struct consumer_data
*cdata
)
3588 struct consumer_socket
*socket
= nullptr;
3590 LTTNG_ASSERT(cdata
);
3591 LTTNG_ASSERT(sock_path
);
3594 case LTTNG_DOMAIN_KERNEL
:
3596 struct ltt_kernel_session
*ksess
= session
->kernel_session
;
3598 LTTNG_ASSERT(ksess
);
3600 /* Can't register a consumer if there is already one */
3601 if (ksess
->consumer_fds_sent
!= 0) {
3602 ret
= LTTNG_ERR_KERN_CONSUMER_FAIL
;
3606 sock
= lttcomm_connect_unix_sock(sock_path
);
3608 ret
= LTTNG_ERR_CONNECT_FAIL
;
3611 cdata
->cmd_sock
= sock
;
3613 socket
= consumer_allocate_socket(&cdata
->cmd_sock
);
3614 if (socket
== nullptr) {
3617 PERROR("close register consumer");
3619 cdata
->cmd_sock
= -1;
3620 ret
= LTTNG_ERR_FATAL
;
3624 socket
->lock
= zmalloc
<pthread_mutex_t
>();
3625 if (socket
->lock
== nullptr) {
3626 PERROR("zmalloc pthread mutex");
3627 ret
= LTTNG_ERR_FATAL
;
3631 pthread_mutex_init(socket
->lock
, nullptr);
3632 socket
->registered
= 1;
3634 const lttng::urcu::read_lock_guard read_lock
;
3635 consumer_add_socket(socket
, ksess
->consumer
);
3637 pthread_mutex_lock(&cdata
->pid_mutex
);
3639 pthread_mutex_unlock(&cdata
->pid_mutex
);
3644 /* TODO: Userspace tracing */
3645 ret
= LTTNG_ERR_UND
;
3653 consumer_destroy_socket(socket
);
3659 * Command LTTNG_LIST_DOMAINS processed by the client thread.
3661 ssize_t
cmd_list_domains(const ltt_session::locked_ref
& session
, struct lttng_domain
**domains
)
3666 struct lttng_ht_iter iter
;
3668 if (session
->kernel_session
!= nullptr) {
3669 DBG3("Listing domains found kernel domain");
3673 if (session
->ust_session
!= nullptr) {
3674 DBG3("Listing domains found UST global domain");
3677 const lttng::urcu::read_lock_guard read_lock
;
3679 cds_lfht_for_each_entry (
3680 session
->ust_session
->agents
->ht
, &iter
.iter
, agt
, node
.node
) {
3681 if (agt
->being_used
) {
3691 *domains
= calloc
<lttng_domain
>(nb_dom
);
3692 if (*domains
== nullptr) {
3693 ret
= LTTNG_ERR_FATAL
;
3697 if (session
->kernel_session
!= nullptr) {
3698 (*domains
)[index
].type
= LTTNG_DOMAIN_KERNEL
;
3700 /* Kernel session buffer type is always GLOBAL */
3701 (*domains
)[index
].buf_type
= LTTNG_BUFFER_GLOBAL
;
3706 if (session
->ust_session
!= nullptr) {
3707 (*domains
)[index
].type
= LTTNG_DOMAIN_UST
;
3708 (*domains
)[index
].buf_type
= session
->ust_session
->buffer_type
;
3712 const lttng::urcu::read_lock_guard read_lock
;
3714 cds_lfht_for_each_entry (
3715 session
->ust_session
->agents
->ht
, &iter
.iter
, agt
, node
.node
) {
3716 if (agt
->being_used
) {
3717 (*domains
)[index
].type
= agt
->domain
;
3718 (*domains
)[index
].buf_type
=
3719 session
->ust_session
->buffer_type
;
3729 /* Return negative value to differentiate return code */
3734 * Command LTTNG_LIST_CHANNELS processed by the client thread.
3736 enum lttng_error_code
cmd_list_channels(enum lttng_domain_type domain
,
3737 const ltt_session::locked_ref
& session
,
3738 struct lttng_payload
*payload
)
3742 struct lttcomm_list_command_header cmd_header
= {};
3743 size_t cmd_header_offset
;
3744 enum lttng_error_code ret_code
;
3746 LTTNG_ASSERT(payload
);
3748 DBG("Listing channels for session %s", session
->name
);
3750 cmd_header_offset
= payload
->buffer
.size
;
3752 /* Reserve space for command reply header. */
3753 ret
= lttng_dynamic_buffer_set_size(&payload
->buffer
,
3754 cmd_header_offset
+ sizeof(cmd_header
));
3756 ret_code
= LTTNG_ERR_NOMEM
;
3761 case LTTNG_DOMAIN_KERNEL
:
3763 /* Kernel channels */
3764 struct ltt_kernel_channel
*kchan
;
3765 if (session
->kernel_session
!= nullptr) {
3766 cds_list_for_each_entry (
3767 kchan
, &session
->kernel_session
->channel_list
.head
, list
) {
3768 uint64_t discarded_events
, lost_packets
;
3769 struct lttng_channel_extended
*extended
;
3771 extended
= (struct lttng_channel_extended
*)
3772 kchan
->channel
->attr
.extended
.ptr
;
3774 ret
= get_kernel_runtime_stats(
3775 session
, kchan
, &discarded_events
, &lost_packets
);
3777 ret_code
= LTTNG_ERR_UNK
;
3782 * Update the discarded_events and lost_packets
3783 * count for the channel
3785 extended
->discarded_events
= discarded_events
;
3786 extended
->lost_packets
= lost_packets
;
3788 ret
= lttng_channel_serialize(kchan
->channel
, &payload
->buffer
);
3790 ERR("Failed to serialize lttng_channel: channel name = '%s'",
3791 kchan
->channel
->name
);
3792 ret_code
= LTTNG_ERR_UNK
;
3801 case LTTNG_DOMAIN_UST
:
3803 struct lttng_ht_iter iter
;
3804 struct ltt_ust_channel
*uchan
;
3807 const lttng::urcu::read_lock_guard read_lock
;
3809 cds_lfht_for_each_entry (session
->ust_session
->domain_global
.channels
->ht
,
3813 uint64_t discarded_events
= 0, lost_packets
= 0;
3814 struct lttng_channel
*channel
= nullptr;
3815 struct lttng_channel_extended
*extended
;
3817 channel
= trace_ust_channel_to_lttng_channel(uchan
);
3819 ret_code
= LTTNG_ERR_NOMEM
;
3823 extended
= (struct lttng_channel_extended
*)
3824 channel
->attr
.extended
.ptr
;
3826 ret
= get_ust_runtime_stats(
3827 session
, uchan
, &discarded_events
, &lost_packets
);
3829 lttng_channel_destroy(channel
);
3830 ret_code
= LTTNG_ERR_UNK
;
3834 extended
->discarded_events
= discarded_events
;
3835 extended
->lost_packets
= lost_packets
;
3837 ret
= lttng_channel_serialize(channel
, &payload
->buffer
);
3839 ERR("Failed to serialize lttng_channel: channel name = '%s'",
3841 lttng_channel_destroy(channel
);
3842 ret_code
= LTTNG_ERR_UNK
;
3846 lttng_channel_destroy(channel
);
3857 if (i
> UINT32_MAX
) {
3858 ERR("Channel count would overflow the channel listing command's reply");
3859 ret_code
= LTTNG_ERR_OVERFLOW
;
3863 /* Update command reply header. */
3864 cmd_header
.count
= (uint32_t) i
;
3865 memcpy(payload
->buffer
.data
+ cmd_header_offset
, &cmd_header
, sizeof(cmd_header
));
3866 ret_code
= LTTNG_OK
;
3873 * Command LTTNG_LIST_EVENTS processed by the client thread.
3875 enum lttng_error_code
cmd_list_events(enum lttng_domain_type domain
,
3876 const ltt_session::locked_ref
& session
,
3878 struct lttng_payload
*reply_payload
)
3880 int buffer_resize_ret
;
3881 enum lttng_error_code ret_code
= LTTNG_OK
;
3882 struct lttcomm_list_command_header reply_command_header
= {};
3883 size_t reply_command_header_offset
;
3884 unsigned int nb_events
= 0;
3886 assert(reply_payload
);
3888 /* Reserve space for command reply header. */
3889 reply_command_header_offset
= reply_payload
->buffer
.size
;
3890 buffer_resize_ret
= lttng_dynamic_buffer_set_size(
3891 &reply_payload
->buffer
,
3892 reply_command_header_offset
+ sizeof(struct lttcomm_list_command_header
));
3893 if (buffer_resize_ret
) {
3894 ret_code
= LTTNG_ERR_NOMEM
;
3899 case LTTNG_DOMAIN_KERNEL
:
3900 if (session
->kernel_session
!= nullptr) {
3901 ret_code
= list_lttng_kernel_events(
3902 channel_name
, session
->kernel_session
, reply_payload
, &nb_events
);
3906 case LTTNG_DOMAIN_UST
:
3908 if (session
->ust_session
!= nullptr) {
3910 list_lttng_ust_global_events(channel_name
,
3911 &session
->ust_session
->domain_global
,
3918 case LTTNG_DOMAIN_LOG4J
:
3919 case LTTNG_DOMAIN_JUL
:
3920 case LTTNG_DOMAIN_PYTHON
:
3921 if (session
->ust_session
) {
3922 struct lttng_ht_iter iter
;
3925 const lttng::urcu::read_lock_guard read_lock
;
3927 cds_lfht_for_each_entry (
3928 session
->ust_session
->agents
->ht
, &iter
.iter
, agt
, node
.node
) {
3929 if (agt
->domain
== domain
) {
3930 ret_code
= list_lttng_agent_events(
3931 agt
, reply_payload
, &nb_events
);
3938 ret_code
= LTTNG_ERR_UND
;
3942 if (nb_events
> UINT32_MAX
) {
3943 ret_code
= LTTNG_ERR_OVERFLOW
;
3947 /* Update command reply header. */
3948 reply_command_header
.count
= (uint32_t) nb_events
;
3949 memcpy(reply_payload
->buffer
.data
+ reply_command_header_offset
,
3950 &reply_command_header
,
3951 sizeof(reply_command_header
));
3958 * Using the session list, filled a lttng_session array to send back to the
3959 * client for session listing.
3961 * The session list lock MUST be acquired before calling this function.
3963 void cmd_list_lttng_sessions(struct lttng_session
*sessions
,
3964 size_t session_count
,
3970 struct ltt_session
*raw_session_ptr
;
3971 struct ltt_session_list
*list
= session_get_list();
3972 struct lttng_session_extended
*extended
= (typeof(extended
)) (&sessions
[session_count
]);
3974 DBG("Getting all available session for UID %d GID %d", uid
, gid
);
3976 * Iterate over session list and append data after the control struct in
3979 cds_list_for_each_entry (raw_session_ptr
, &list
->head
, list
) {
3980 auto session
= [raw_session_ptr
]() {
3981 session_get(raw_session_ptr
);
3982 raw_session_ptr
->lock();
3983 return ltt_session::make_locked_ref(*raw_session_ptr
);
3987 * Only list the sessions the user can control.
3989 if (!session_access_ok(session
, uid
) || session
->destroyed
) {
3993 struct ltt_kernel_session
*ksess
= session
->kernel_session
;
3994 struct ltt_ust_session
*usess
= session
->ust_session
;
3996 if (session
->consumer
->type
== CONSUMER_DST_NET
||
3997 (ksess
&& ksess
->consumer
->type
== CONSUMER_DST_NET
) ||
3998 (usess
&& usess
->consumer
->type
== CONSUMER_DST_NET
)) {
3999 ret
= build_network_session_path(
4000 sessions
[i
].path
, sizeof(sessions
[i
].path
), session
);
4002 ret
= snprintf(sessions
[i
].path
,
4003 sizeof(sessions
[i
].path
),
4005 session
->consumer
->dst
.session_root_path
);
4008 PERROR("snprintf session path");
4012 strncpy(sessions
[i
].name
, session
->name
, NAME_MAX
);
4013 sessions
[i
].name
[NAME_MAX
- 1] = '\0';
4014 sessions
[i
].enabled
= session
->active
;
4015 sessions
[i
].snapshot_mode
= session
->snapshot_mode
;
4016 sessions
[i
].live_timer_interval
= session
->live_timer
;
4017 extended
[i
].creation_time
.value
= (uint64_t) session
->creation_time
;
4018 extended
[i
].creation_time
.is_set
= 1;
4024 * Command LTTCOMM_SESSIOND_COMMAND_KERNEL_TRACER_STATUS
4026 enum lttng_error_code
cmd_kernel_tracer_status(enum lttng_kernel_tracer_status
*status
)
4028 if (status
== nullptr) {
4029 return LTTNG_ERR_INVALID
;
4032 *status
= get_kernel_tracer_status();
4037 * Command LTTNG_DATA_PENDING returning 0 if the data is NOT pending meaning
4038 * ready for trace analysis (or any kind of reader) or else 1 for pending data.
4040 int cmd_data_pending(const ltt_session::locked_ref
& session
)
4043 struct ltt_kernel_session
*ksess
= session
->kernel_session
;
4044 struct ltt_ust_session
*usess
= session
->ust_session
;
4046 DBG("Data pending for session %s", session
->name
);
4048 /* Session MUST be stopped to ask for data availability. */
4049 if (session
->active
) {
4050 ret
= LTTNG_ERR_SESSION_STARTED
;
4054 * If stopped, just make sure we've started before else the above call
4055 * will always send that there is data pending.
4057 * The consumer assumes that when the data pending command is received,
4058 * the trace has been started before or else no output data is written
4059 * by the streams which is a condition for data pending. So, this is
4060 * *VERY* important that we don't ask the consumer before a start
4063 if (!session
->has_been_started
) {
4069 /* A rotation is still pending, we have to wait. */
4070 if (session
->rotation_state
== LTTNG_ROTATION_STATE_ONGOING
) {
4071 DBG("Rotate still pending for session %s", session
->name
);
4076 if (ksess
&& ksess
->consumer
) {
4077 ret
= consumer_is_data_pending(ksess
->id
, ksess
->consumer
);
4079 /* Data is still being extracted for the kernel. */
4084 if (usess
&& usess
->consumer
) {
4085 ret
= consumer_is_data_pending(usess
->id
, usess
->consumer
);
4087 /* Data is still being extracted for the kernel. */
4092 /* Data is ready to be read by a viewer */
4100 * Command LTTNG_SNAPSHOT_ADD_OUTPUT from the lttng ctl library.
4102 * Return LTTNG_OK on success or else a LTTNG_ERR code.
4104 int cmd_snapshot_add_output(const ltt_session::locked_ref
& session
,
4105 const struct lttng_snapshot_output
*output
,
4109 struct snapshot_output
*new_output
;
4111 LTTNG_ASSERT(output
);
4113 DBG("Cmd snapshot add output for session %s", session
->name
);
4116 * Can't create an output if the session is not set in no-output mode.
4118 if (session
->output_traces
) {
4119 ret
= LTTNG_ERR_NOT_SNAPSHOT_SESSION
;
4123 if (session
->has_non_mmap_channel
) {
4124 ret
= LTTNG_ERR_SNAPSHOT_UNSUPPORTED
;
4128 /* Only one output is allowed until we have the "tee" feature. */
4129 if (session
->snapshot
.nb_output
== 1) {
4130 ret
= LTTNG_ERR_SNAPSHOT_OUTPUT_EXIST
;
4134 new_output
= snapshot_output_alloc();
4136 ret
= LTTNG_ERR_NOMEM
;
4140 ret
= snapshot_output_init(session
,
4147 &session
->snapshot
);
4149 if (ret
== -ENOMEM
) {
4150 ret
= LTTNG_ERR_NOMEM
;
4152 ret
= LTTNG_ERR_INVALID
;
4157 snapshot_add_output(&session
->snapshot
, new_output
);
4159 *id
= new_output
->id
;
4165 snapshot_output_destroy(new_output
);
4171 * Command LTTNG_SNAPSHOT_DEL_OUTPUT from lib lttng ctl.
4173 * Return LTTNG_OK on success or else a LTTNG_ERR code.
4175 int cmd_snapshot_del_output(const ltt_session::locked_ref
& session
,
4176 const struct lttng_snapshot_output
*output
)
4179 struct snapshot_output
*sout
= nullptr;
4181 LTTNG_ASSERT(output
);
4183 const lttng::urcu::read_lock_guard read_lock
;
4186 * Permission denied to create an output if the session is not
4187 * set in no output mode.
4189 if (session
->output_traces
) {
4190 ret
= LTTNG_ERR_NOT_SNAPSHOT_SESSION
;
4195 DBG("Cmd snapshot del output id %" PRIu32
" for session %s",
4198 sout
= snapshot_find_output_by_id(output
->id
, &session
->snapshot
);
4199 } else if (*output
->name
!= '\0') {
4200 DBG("Cmd snapshot del output name %s for session %s", output
->name
, session
->name
);
4201 sout
= snapshot_find_output_by_name(output
->name
, &session
->snapshot
);
4204 ret
= LTTNG_ERR_INVALID
;
4208 snapshot_delete_output(&session
->snapshot
, sout
);
4209 snapshot_output_destroy(sout
);
4217 * Command LTTNG_SNAPSHOT_LIST_OUTPUT from lib lttng ctl.
4219 * If no output is available, outputs is untouched and 0 is returned.
4221 * Return the size of the newly allocated outputs or a negative LTTNG_ERR code.
4223 ssize_t
cmd_snapshot_list_outputs(const ltt_session::locked_ref
& session
,
4224 struct lttng_snapshot_output
**outputs
)
4227 struct lttng_snapshot_output
*list
= nullptr;
4228 struct lttng_ht_iter iter
;
4229 struct snapshot_output
*output
;
4231 LTTNG_ASSERT(outputs
);
4233 DBG("Cmd snapshot list outputs for session %s", session
->name
);
4236 * Permission denied to create an output if the session is not
4237 * set in no output mode.
4239 if (session
->output_traces
) {
4240 ret
= -LTTNG_ERR_NOT_SNAPSHOT_SESSION
;
4244 if (session
->snapshot
.nb_output
== 0) {
4249 list
= calloc
<lttng_snapshot_output
>(session
->snapshot
.nb_output
);
4251 ret
= -LTTNG_ERR_NOMEM
;
4255 /* Copy list from session to the new list object. */
4257 const lttng::urcu::read_lock_guard read_lock
;
4259 cds_lfht_for_each_entry (
4260 session
->snapshot
.output_ht
->ht
, &iter
.iter
, output
, node
.node
) {
4261 LTTNG_ASSERT(output
->consumer
);
4262 list
[idx
].id
= output
->id
;
4263 list
[idx
].max_size
= output
->max_size
;
4264 if (lttng_strncpy(list
[idx
].name
, output
->name
, sizeof(list
[idx
].name
))) {
4265 ret
= -LTTNG_ERR_INVALID
;
4269 if (output
->consumer
->type
== CONSUMER_DST_LOCAL
) {
4270 if (lttng_strncpy(list
[idx
].ctrl_url
,
4271 output
->consumer
->dst
.session_root_path
,
4272 sizeof(list
[idx
].ctrl_url
))) {
4273 ret
= -LTTNG_ERR_INVALID
;
4278 ret
= uri_to_str_url(&output
->consumer
->dst
.net
.control
,
4280 sizeof(list
[idx
].ctrl_url
));
4282 ret
= -LTTNG_ERR_NOMEM
;
4287 ret
= uri_to_str_url(&output
->consumer
->dst
.net
.data
,
4289 sizeof(list
[idx
].data_url
));
4291 ret
= -LTTNG_ERR_NOMEM
;
4302 ret
= session
->snapshot
.nb_output
;
4310 * Check if we can regenerate the metadata for this session.
4311 * Only kernel, UST per-uid and non-live sessions are supported.
4313 * Return 0 if the metadata can be generated, a LTTNG_ERR code otherwise.
4315 static int check_regenerate_metadata_support(const ltt_session::locked_ref
& session
)
4319 if (session
->live_timer
!= 0) {
4320 ret
= LTTNG_ERR_LIVE_SESSION
;
4323 if (!session
->active
) {
4324 ret
= LTTNG_ERR_SESSION_NOT_STARTED
;
4327 if (session
->ust_session
) {
4328 switch (session
->ust_session
->buffer_type
) {
4329 case LTTNG_BUFFER_PER_UID
:
4331 case LTTNG_BUFFER_PER_PID
:
4332 ret
= LTTNG_ERR_PER_PID_SESSION
;
4336 ret
= LTTNG_ERR_UNK
;
4340 if (session
->consumer
->type
== CONSUMER_DST_NET
&&
4341 session
->consumer
->relay_minor_version
< 8) {
4342 ret
= LTTNG_ERR_RELAYD_VERSION_FAIL
;
4352 * Command LTTNG_REGENERATE_METADATA from the lttng-ctl library.
4354 * Ask the consumer to truncate the existing metadata file(s) and
4355 * then regenerate the metadata. Live and per-pid sessions are not
4356 * supported and return an error.
4358 * Return LTTNG_OK on success or else a LTTNG_ERR code.
4360 int cmd_regenerate_metadata(const ltt_session::locked_ref
& session
)
4364 ret
= check_regenerate_metadata_support(session
);
4369 if (session
->kernel_session
) {
4370 ret
= kernctl_session_regenerate_metadata(session
->kernel_session
->fd
);
4372 ERR("Failed to regenerate the kernel metadata");
4377 if (session
->ust_session
) {
4378 ret
= trace_ust_regenerate_metadata(session
->ust_session
);
4380 ERR("Failed to regenerate the UST metadata");
4384 DBG("Cmd metadata regenerate for session %s", session
->name
);
4392 * Command LTTNG_REGENERATE_STATEDUMP from the lttng-ctl library.
4394 * Ask the tracer to regenerate a new statedump.
4396 * Return LTTNG_OK on success or else a LTTNG_ERR code.
4398 int cmd_regenerate_statedump(const ltt_session::locked_ref
& session
)
4402 if (!session
->active
) {
4403 ret
= LTTNG_ERR_SESSION_NOT_STARTED
;
4407 if (session
->kernel_session
) {
4408 ret
= kernctl_session_regenerate_statedump(session
->kernel_session
->fd
);
4410 * Currently, the statedump in kernel can only fail if out
4414 if (ret
== -ENOMEM
) {
4415 ret
= LTTNG_ERR_REGEN_STATEDUMP_NOMEM
;
4417 ret
= LTTNG_ERR_REGEN_STATEDUMP_FAIL
;
4419 ERR("Failed to regenerate the kernel statedump");
4424 if (session
->ust_session
) {
4425 ret
= ust_app_regenerate_statedump_all(session
->ust_session
);
4427 * Currently, the statedump in UST always returns 0.
4430 ret
= LTTNG_ERR_REGEN_STATEDUMP_FAIL
;
4431 ERR("Failed to regenerate the UST statedump");
4435 DBG("Cmd regenerate statedump for session %s", session
->name
);
4442 static enum lttng_error_code
4443 synchronize_tracer_notifier_register(struct notification_thread_handle
*notification_thread
,
4444 struct lttng_trigger
*trigger
,
4445 const struct lttng_credentials
*cmd_creds
)
4447 enum lttng_error_code ret_code
;
4448 const struct lttng_condition
*condition
= lttng_trigger_get_const_condition(trigger
);
4449 const char *trigger_name
;
4450 uid_t trigger_owner
;
4451 enum lttng_trigger_status trigger_status
;
4452 const enum lttng_domain_type trigger_domain
=
4453 lttng_trigger_get_underlying_domain_type_restriction(trigger
);
4455 trigger_status
= lttng_trigger_get_owner_uid(trigger
, &trigger_owner
);
4456 LTTNG_ASSERT(trigger_status
== LTTNG_TRIGGER_STATUS_OK
);
4458 LTTNG_ASSERT(condition
);
4459 LTTNG_ASSERT(lttng_condition_get_type(condition
) ==
4460 LTTNG_CONDITION_TYPE_EVENT_RULE_MATCHES
);
4462 trigger_status
= lttng_trigger_get_name(trigger
, &trigger_name
);
4463 trigger_name
= trigger_status
== LTTNG_TRIGGER_STATUS_OK
? trigger_name
: "(anonymous)";
4465 const auto list_lock
= lttng::sessiond::lock_session_list();
4466 switch (trigger_domain
) {
4467 case LTTNG_DOMAIN_KERNEL
:
4469 ret_code
= kernel_register_event_notifier(trigger
, cmd_creds
);
4470 if (ret_code
!= LTTNG_OK
) {
4471 enum lttng_error_code notif_thread_unregister_ret
;
4473 notif_thread_unregister_ret
=
4474 notification_thread_command_unregister_trigger(notification_thread
,
4477 if (notif_thread_unregister_ret
!= LTTNG_OK
) {
4478 /* Return the original error code. */
4479 ERR("Failed to unregister trigger from notification thread during error recovery: trigger name = '%s', trigger owner uid = %d, error code = %d",
4481 (int) trigger_owner
,
4489 case LTTNG_DOMAIN_UST
:
4490 ust_app_global_update_all_event_notifier_rules();
4492 case LTTNG_DOMAIN_JUL
:
4493 case LTTNG_DOMAIN_LOG4J
:
4494 case LTTNG_DOMAIN_PYTHON
:
4496 /* Agent domains. */
4497 struct agent
*agt
= agent_find_by_event_notifier_domain(trigger_domain
);
4500 agt
= agent_create(trigger_domain
);
4502 ret_code
= LTTNG_ERR_NOMEM
;
4506 agent_add(agt
, the_trigger_agents_ht_by_domain
);
4509 ret_code
= (lttng_error_code
) trigger_agent_enable(trigger
, agt
);
4510 if (ret_code
!= LTTNG_OK
) {
4516 case LTTNG_DOMAIN_NONE
:
4524 lttng::ctl::trigger
cmd_register_trigger(const struct lttng_credentials
*cmd_creds
,
4525 struct lttng_trigger
*trigger
,
4526 bool is_trigger_anonymous
,
4527 struct notification_thread_handle
*notification_thread
)
4529 enum lttng_error_code ret_code
;
4530 const char *trigger_name
;
4531 uid_t trigger_owner
;
4532 enum lttng_trigger_status trigger_status
;
4534 trigger_status
= lttng_trigger_get_name(trigger
, &trigger_name
);
4535 trigger_name
= trigger_status
== LTTNG_TRIGGER_STATUS_OK
? trigger_name
: "(anonymous)";
4537 trigger_status
= lttng_trigger_get_owner_uid(trigger
, &trigger_owner
);
4538 LTTNG_ASSERT(trigger_status
== LTTNG_TRIGGER_STATUS_OK
);
4540 DBG("Running register trigger command: trigger name = '%s', trigger owner uid = %d, command creds uid = %d",
4542 (int) trigger_owner
,
4543 (int) lttng_credentials_get_uid(cmd_creds
));
4546 * Validate the trigger credentials against the command credentials.
4547 * Only the root user can register a trigger with non-matching
4550 if (!lttng_credentials_is_equal_uid(lttng_trigger_get_credentials(trigger
), cmd_creds
)) {
4551 if (lttng_credentials_get_uid(cmd_creds
) != 0) {
4554 "Trigger credentials do not match the command credentials: trigger_name = `{}`, trigger_owner_uid={}, command_creds_uid={}",
4557 lttng_credentials_get_uid(cmd_creds
)),
4558 LTTNG_ERR_INVALID_TRIGGER
);
4563 * The bytecode generation also serves as a validation step for the
4564 * bytecode expressions.
4566 ret_code
= lttng_trigger_generate_bytecode(trigger
, cmd_creds
);
4567 if (ret_code
!= LTTNG_OK
) {
4570 "Failed to generate bytecode of trigger: trigger_name=`{}`, trigger_owner_uid={}",
4577 * A reference to the trigger is acquired by the notification thread.
4578 * It is safe to return the same trigger to the caller since it the
4579 * other user holds a reference.
4581 * The trigger is modified during the execution of the
4582 * "register trigger" command. However, by the time the command returns,
4583 * it is safe to use without any locking as its properties are
4586 ret_code
= notification_thread_command_register_trigger(
4587 notification_thread
, trigger
, is_trigger_anonymous
);
4588 if (ret_code
!= LTTNG_OK
) {
4591 "Failed to register trigger to notification thread: trigger_name=`{}`, trigger_owner_uid={}",
4597 trigger_status
= lttng_trigger_get_name(trigger
, &trigger_name
);
4598 trigger_name
= trigger_status
== LTTNG_TRIGGER_STATUS_OK
? trigger_name
: "(anonymous)";
4601 * Synchronize tracers if the trigger adds an event notifier.
4603 if (lttng_trigger_needs_tracer_notifier(trigger
)) {
4604 ret_code
= synchronize_tracer_notifier_register(
4605 notification_thread
, trigger
, cmd_creds
);
4606 if (ret_code
!= LTTNG_OK
) {
4607 LTTNG_THROW_CTL("Failed to register tracer notifier", ret_code
);
4612 * Return an updated trigger to the client.
4614 * Since a modified version of the same trigger is returned, acquire a
4615 * reference to the trigger so the caller doesn't have to care if those
4616 * are distinct instances or not.
4618 LTTNG_ASSERT(ret_code
== LTTNG_OK
);
4619 lttng_trigger_get(trigger
);
4620 return lttng::ctl::trigger(trigger
);
4623 static enum lttng_error_code
4624 synchronize_tracer_notifier_unregister(const struct lttng_trigger
*trigger
)
4626 enum lttng_error_code ret_code
;
4627 const struct lttng_condition
*condition
= lttng_trigger_get_const_condition(trigger
);
4628 const enum lttng_domain_type trigger_domain
=
4629 lttng_trigger_get_underlying_domain_type_restriction(trigger
);
4631 LTTNG_ASSERT(condition
);
4632 LTTNG_ASSERT(lttng_condition_get_type(condition
) ==
4633 LTTNG_CONDITION_TYPE_EVENT_RULE_MATCHES
);
4635 const auto list_lock
= lttng::sessiond::lock_session_list();
4636 switch (trigger_domain
) {
4637 case LTTNG_DOMAIN_KERNEL
:
4638 ret_code
= kernel_unregister_event_notifier(trigger
);
4639 if (ret_code
!= LTTNG_OK
) {
4644 case LTTNG_DOMAIN_UST
:
4645 ust_app_global_update_all_event_notifier_rules();
4647 case LTTNG_DOMAIN_JUL
:
4648 case LTTNG_DOMAIN_LOG4J
:
4649 case LTTNG_DOMAIN_PYTHON
:
4651 /* Agent domains. */
4652 struct agent
*agt
= agent_find_by_event_notifier_domain(trigger_domain
);
4655 * This trigger was never registered in the first place. Calling
4656 * this function under those circumstances is an internal error.
4659 ret_code
= (lttng_error_code
) trigger_agent_disable(trigger
, agt
);
4660 if (ret_code
!= LTTNG_OK
) {
4666 case LTTNG_DOMAIN_NONE
:
4674 enum lttng_error_code
cmd_unregister_trigger(const struct lttng_credentials
*cmd_creds
,
4675 const struct lttng_trigger
*trigger
,
4676 struct notification_thread_handle
*notification_thread
)
4678 enum lttng_error_code ret_code
;
4679 const char *trigger_name
;
4680 uid_t trigger_owner
;
4681 enum lttng_trigger_status trigger_status
;
4682 struct lttng_trigger
*sessiond_trigger
= nullptr;
4684 trigger_status
= lttng_trigger_get_name(trigger
, &trigger_name
);
4685 trigger_name
= trigger_status
== LTTNG_TRIGGER_STATUS_OK
? trigger_name
: "(anonymous)";
4686 trigger_status
= lttng_trigger_get_owner_uid(trigger
, &trigger_owner
);
4687 LTTNG_ASSERT(trigger_status
== LTTNG_TRIGGER_STATUS_OK
);
4689 DBG("Running unregister trigger command: trigger name = '%s', trigger owner uid = %d, command creds uid = %d",
4691 (int) trigger_owner
,
4692 (int) lttng_credentials_get_uid(cmd_creds
));
4695 * Validate the trigger credentials against the command credentials.
4696 * Only the root user can unregister a trigger with non-matching
4699 if (!lttng_credentials_is_equal_uid(lttng_trigger_get_credentials(trigger
), cmd_creds
)) {
4700 if (lttng_credentials_get_uid(cmd_creds
) != 0) {
4701 ERR("Trigger credentials do not match the command credentials: trigger name = '%s', trigger owner uid = %d, command creds uid = %d",
4703 (int) trigger_owner
,
4704 (int) lttng_credentials_get_uid(cmd_creds
));
4705 ret_code
= LTTNG_ERR_INVALID_TRIGGER
;
4710 /* Fetch the sessiond side trigger object. */
4711 ret_code
= notification_thread_command_get_trigger(
4712 notification_thread
, trigger
, &sessiond_trigger
);
4713 if (ret_code
!= LTTNG_OK
) {
4714 DBG("Failed to get trigger from notification thread during unregister: trigger name = '%s', trigger owner uid = %d, error code = %d",
4716 (int) trigger_owner
,
4721 LTTNG_ASSERT(sessiond_trigger
);
4724 * From this point on, no matter what, consider the trigger
4727 * We set the unregistered state of the sessiond side trigger object in
4728 * the client thread since we want to minimize the possibility of the
4729 * notification thread being stalled due to a long execution of an
4730 * action that required the trigger lock.
4732 lttng_trigger_set_as_unregistered(sessiond_trigger
);
4734 ret_code
= notification_thread_command_unregister_trigger(notification_thread
, trigger
);
4735 if (ret_code
!= LTTNG_OK
) {
4736 DBG("Failed to unregister trigger from notification thread: trigger name = '%s', trigger owner uid = %d, error code = %d",
4738 (int) trigger_owner
,
4744 * Synchronize tracers if the trigger removes an event notifier.
4745 * Do this even if the trigger unregistration failed to at least stop
4746 * the tracers from producing notifications associated with this
4749 if (lttng_trigger_needs_tracer_notifier(trigger
)) {
4750 ret_code
= synchronize_tracer_notifier_unregister(trigger
);
4751 if (ret_code
!= LTTNG_OK
) {
4752 ERR("Error unregistering trigger to tracer.");
4758 lttng_trigger_put(sessiond_trigger
);
4762 enum lttng_error_code
cmd_list_triggers(struct command_ctx
*cmd_ctx
,
4763 struct notification_thread_handle
*notification_thread
,
4764 struct lttng_triggers
**return_triggers
)
4767 enum lttng_error_code ret_code
;
4768 struct lttng_triggers
*triggers
= nullptr;
4770 /* Get the set of triggers from the notification thread. */
4771 ret_code
= notification_thread_command_list_triggers(
4772 notification_thread
, cmd_ctx
->creds
.uid
, &triggers
);
4773 if (ret_code
!= LTTNG_OK
) {
4777 ret
= lttng_triggers_remove_hidden_triggers(triggers
);
4779 ret_code
= LTTNG_ERR_UNK
;
4783 *return_triggers
= triggers
;
4785 ret_code
= LTTNG_OK
;
4787 lttng_triggers_destroy(triggers
);
4791 enum lttng_error_code
4792 cmd_execute_error_query(const struct lttng_credentials
*cmd_creds
,
4793 const struct lttng_error_query
*query
,
4794 struct lttng_error_query_results
**_results
,
4795 struct notification_thread_handle
*notification_thread
)
4797 enum lttng_error_code ret_code
;
4798 const struct lttng_trigger
*query_target_trigger
;
4799 const struct lttng_action
*query_target_action
= nullptr;
4800 struct lttng_trigger
*matching_trigger
= nullptr;
4801 const char *trigger_name
;
4802 uid_t trigger_owner
;
4803 enum lttng_trigger_status trigger_status
;
4804 struct lttng_error_query_results
*results
= nullptr;
4806 switch (lttng_error_query_get_target_type(query
)) {
4807 case LTTNG_ERROR_QUERY_TARGET_TYPE_TRIGGER
:
4808 query_target_trigger
= lttng_error_query_trigger_borrow_target(query
);
4810 case LTTNG_ERROR_QUERY_TARGET_TYPE_CONDITION
:
4811 query_target_trigger
= lttng_error_query_condition_borrow_target(query
);
4813 case LTTNG_ERROR_QUERY_TARGET_TYPE_ACTION
:
4814 query_target_trigger
= lttng_error_query_action_borrow_trigger_target(query
);
4820 LTTNG_ASSERT(query_target_trigger
);
4822 ret_code
= notification_thread_command_get_trigger(
4823 notification_thread
, query_target_trigger
, &matching_trigger
);
4824 if (ret_code
!= LTTNG_OK
) {
4828 /* No longer needed. */
4829 query_target_trigger
= nullptr;
4831 if (lttng_error_query_get_target_type(query
) == LTTNG_ERROR_QUERY_TARGET_TYPE_ACTION
) {
4832 /* Get the sessiond-side version of the target action. */
4833 query_target_action
=
4834 lttng_error_query_action_borrow_action_target(query
, matching_trigger
);
4837 trigger_status
= lttng_trigger_get_name(matching_trigger
, &trigger_name
);
4838 trigger_name
= trigger_status
== LTTNG_TRIGGER_STATUS_OK
? trigger_name
: "(anonymous)";
4839 trigger_status
= lttng_trigger_get_owner_uid(matching_trigger
, &trigger_owner
);
4840 LTTNG_ASSERT(trigger_status
== LTTNG_TRIGGER_STATUS_OK
);
4842 results
= lttng_error_query_results_create();
4844 ret_code
= LTTNG_ERR_NOMEM
;
4848 DBG("Running \"execute error query\" command: trigger name = '%s', trigger owner uid = %d, command creds uid = %d",
4850 (int) trigger_owner
,
4851 (int) lttng_credentials_get_uid(cmd_creds
));
4854 * Validate the trigger credentials against the command credentials.
4855 * Only the root user can target a trigger with non-matching
4858 if (!lttng_credentials_is_equal_uid(lttng_trigger_get_credentials(matching_trigger
),
4860 if (lttng_credentials_get_uid(cmd_creds
) != 0) {
4861 ERR("Trigger credentials do not match the command credentials: trigger name = '%s', trigger owner uid = %d, command creds uid = %d",
4863 (int) trigger_owner
,
4864 (int) lttng_credentials_get_uid(cmd_creds
));
4865 ret_code
= LTTNG_ERR_INVALID_TRIGGER
;
4870 switch (lttng_error_query_get_target_type(query
)) {
4871 case LTTNG_ERROR_QUERY_TARGET_TYPE_TRIGGER
:
4872 trigger_status
= lttng_trigger_add_error_results(matching_trigger
, results
);
4874 switch (trigger_status
) {
4875 case LTTNG_TRIGGER_STATUS_OK
:
4878 ret_code
= LTTNG_ERR_UNK
;
4883 case LTTNG_ERROR_QUERY_TARGET_TYPE_CONDITION
:
4886 lttng_trigger_condition_add_error_results(matching_trigger
, results
);
4888 switch (trigger_status
) {
4889 case LTTNG_TRIGGER_STATUS_OK
:
4892 ret_code
= LTTNG_ERR_UNK
;
4898 case LTTNG_ERROR_QUERY_TARGET_TYPE_ACTION
:
4900 const enum lttng_action_status action_status
=
4901 lttng_action_add_error_query_results(query_target_action
, results
);
4903 switch (action_status
) {
4904 case LTTNG_ACTION_STATUS_OK
:
4907 ret_code
= LTTNG_ERR_UNK
;
4918 *_results
= results
;
4920 ret_code
= LTTNG_OK
;
4922 lttng_trigger_put(matching_trigger
);
4923 lttng_error_query_results_destroy(results
);
4928 * Send relayd sockets from snapshot output to consumer. Ignore request if the
4929 * snapshot output is *not* set with a remote destination.
4931 * Return LTTNG_OK on success or a LTTNG_ERR code.
4933 static enum lttng_error_code
set_relayd_for_snapshot(struct consumer_output
*output
,
4934 const ltt_session::locked_ref
& session
)
4936 enum lttng_error_code status
= LTTNG_OK
;
4937 struct lttng_ht_iter iter
;
4938 struct consumer_socket
*socket
;
4939 LTTNG_OPTIONAL(uint64_t) current_chunk_id
= {};
4940 const char *base_path
;
4942 LTTNG_ASSERT(output
);
4944 DBG2("Set relayd object from snapshot output");
4946 if (session
->current_trace_chunk
) {
4947 const lttng_trace_chunk_status chunk_status
= lttng_trace_chunk_get_id(
4948 session
->current_trace_chunk
, ¤t_chunk_id
.value
);
4950 if (chunk_status
== LTTNG_TRACE_CHUNK_STATUS_OK
) {
4951 current_chunk_id
.is_set
= true;
4953 ERR("Failed to get current trace chunk id");
4954 status
= LTTNG_ERR_UNK
;
4959 /* Ignore if snapshot consumer output is not network. */
4960 if (output
->type
!= CONSUMER_DST_NET
) {
4965 * The snapshot record URI base path overrides the session
4968 if (output
->dst
.net
.control
.subdir
[0] != '\0') {
4969 base_path
= output
->dst
.net
.control
.subdir
;
4971 base_path
= session
->base_path
;
4975 * For each consumer socket, create and send the relayd object of the
4979 const lttng::urcu::read_lock_guard read_lock
;
4981 cds_lfht_for_each_entry (output
->socks
->ht
, &iter
.iter
, socket
, node
.node
) {
4982 pthread_mutex_lock(socket
->lock
);
4983 status
= send_consumer_relayd_sockets(
4990 session
->live_timer
,
4991 current_chunk_id
.is_set
? ¤t_chunk_id
.value
: nullptr,
4992 session
->creation_time
,
4993 session
->name_contains_creation_time
);
4994 pthread_mutex_unlock(socket
->lock
);
4995 if (status
!= LTTNG_OK
) {
5006 * Record a kernel snapshot.
5008 * Return LTTNG_OK on success or a LTTNG_ERR code.
5010 static enum lttng_error_code
record_kernel_snapshot(struct ltt_kernel_session
*ksess
,
5011 const struct consumer_output
*output
,
5012 uint64_t nb_packets_per_stream
)
5014 enum lttng_error_code status
;
5016 LTTNG_ASSERT(ksess
);
5017 LTTNG_ASSERT(output
);
5019 status
= kernel_snapshot_record(ksess
, output
, nb_packets_per_stream
);
5024 * Record a UST snapshot.
5026 * Returns LTTNG_OK on success or a LTTNG_ERR error code.
5028 static enum lttng_error_code
record_ust_snapshot(struct ltt_ust_session
*usess
,
5029 const struct consumer_output
*output
,
5030 uint64_t nb_packets_per_stream
)
5032 enum lttng_error_code status
;
5034 LTTNG_ASSERT(usess
);
5035 LTTNG_ASSERT(output
);
5037 status
= ust_app_snapshot_record(usess
, output
, nb_packets_per_stream
);
5041 static uint64_t get_session_size_one_more_packet_per_stream(const ltt_session::locked_ref
& session
,
5042 uint64_t cur_nr_packets
)
5044 uint64_t tot_size
= 0;
5046 if (session
->kernel_session
) {
5047 struct ltt_kernel_channel
*chan
;
5048 const struct ltt_kernel_session
*ksess
= session
->kernel_session
;
5050 cds_list_for_each_entry (chan
, &ksess
->channel_list
.head
, list
) {
5051 if (cur_nr_packets
>= chan
->channel
->attr
.num_subbuf
) {
5053 * Don't take channel into account if we
5054 * already grab all its packets.
5058 tot_size
+= chan
->channel
->attr
.subbuf_size
* chan
->stream_count
;
5062 if (session
->ust_session
) {
5063 const struct ltt_ust_session
*usess
= session
->ust_session
;
5065 tot_size
+= ust_app_get_size_one_more_packet_per_stream(usess
, cur_nr_packets
);
5072 * Calculate the number of packets we can grab from each stream that
5073 * fits within the overall snapshot max size.
5075 * Returns -1 on error, 0 means infinite number of packets, else > 0 is
5076 * the number of packets per stream.
5078 * TODO: this approach is not perfect: we consider the worse case
5079 * (packet filling the sub-buffers) as an upper bound, but we could do
5080 * better if we do this calculation while we actually grab the packet
5081 * content: we would know how much padding we don't actually store into
5084 * This algorithm is currently bounded by the number of packets per
5087 * Since we call this algorithm before actually grabbing the data, it's
5088 * an approximation: for instance, applications could appear/disappear
5089 * in between this call and actually grabbing data.
5091 static int64_t get_session_nb_packets_per_stream(const ltt_session::locked_ref
& session
,
5095 uint64_t cur_nb_packets
= 0;
5098 return 0; /* Infinite */
5101 size_left
= max_size
;
5103 uint64_t one_more_packet_tot_size
;
5105 one_more_packet_tot_size
=
5106 get_session_size_one_more_packet_per_stream(session
, cur_nb_packets
);
5107 if (!one_more_packet_tot_size
) {
5108 /* We are already grabbing all packets. */
5111 size_left
-= one_more_packet_tot_size
;
5112 if (size_left
< 0) {
5117 if (!cur_nb_packets
&& size_left
!= max_size
) {
5118 /* Not enough room to grab one packet of each stream, error. */
5121 return cur_nb_packets
;
5124 static enum lttng_error_code
snapshot_record(const ltt_session::locked_ref
& session
,
5125 const struct snapshot_output
*snapshot_output
)
5127 int64_t nb_packets_per_stream
;
5128 char snapshot_chunk_name
[LTTNG_NAME_MAX
];
5130 enum lttng_error_code ret_code
= LTTNG_OK
;
5131 struct lttng_trace_chunk
*snapshot_trace_chunk
;
5132 struct consumer_output
*original_ust_consumer_output
= nullptr;
5133 struct consumer_output
*original_kernel_consumer_output
= nullptr;
5134 struct consumer_output
*snapshot_ust_consumer_output
= nullptr;
5135 struct consumer_output
*snapshot_kernel_consumer_output
= nullptr;
5137 ret
= snprintf(snapshot_chunk_name
,
5138 sizeof(snapshot_chunk_name
),
5140 snapshot_output
->name
,
5141 snapshot_output
->datetime
,
5142 snapshot_output
->nb_snapshot
);
5143 if (ret
< 0 || ret
>= sizeof(snapshot_chunk_name
)) {
5144 ERR("Failed to format snapshot name");
5145 ret_code
= LTTNG_ERR_INVALID
;
5148 DBG("Recording snapshot \"%s\" for session \"%s\" with chunk name \"%s\"",
5149 snapshot_output
->name
,
5151 snapshot_chunk_name
);
5152 if (!session
->kernel_session
&& !session
->ust_session
) {
5153 ERR("Failed to record snapshot as no channels exist");
5154 ret_code
= LTTNG_ERR_NO_CHANNEL
;
5158 if (session
->kernel_session
) {
5159 original_kernel_consumer_output
= session
->kernel_session
->consumer
;
5160 snapshot_kernel_consumer_output
= consumer_copy_output(snapshot_output
->consumer
);
5161 strcpy(snapshot_kernel_consumer_output
->chunk_path
, snapshot_chunk_name
);
5163 /* Copy the original domain subdir. */
5164 strcpy(snapshot_kernel_consumer_output
->domain_subdir
,
5165 original_kernel_consumer_output
->domain_subdir
);
5167 ret
= consumer_copy_sockets(snapshot_kernel_consumer_output
,
5168 original_kernel_consumer_output
);
5170 ERR("Failed to copy consumer sockets from snapshot output configuration");
5171 ret_code
= LTTNG_ERR_NOMEM
;
5174 ret_code
= set_relayd_for_snapshot(snapshot_kernel_consumer_output
, session
);
5175 if (ret_code
!= LTTNG_OK
) {
5176 ERR("Failed to setup relay daemon for kernel tracer snapshot");
5179 session
->kernel_session
->consumer
= snapshot_kernel_consumer_output
;
5181 if (session
->ust_session
) {
5182 original_ust_consumer_output
= session
->ust_session
->consumer
;
5183 snapshot_ust_consumer_output
= consumer_copy_output(snapshot_output
->consumer
);
5184 strcpy(snapshot_ust_consumer_output
->chunk_path
, snapshot_chunk_name
);
5186 /* Copy the original domain subdir. */
5187 strcpy(snapshot_ust_consumer_output
->domain_subdir
,
5188 original_ust_consumer_output
->domain_subdir
);
5190 ret
= consumer_copy_sockets(snapshot_ust_consumer_output
,
5191 original_ust_consumer_output
);
5193 ERR("Failed to copy consumer sockets from snapshot output configuration");
5194 ret_code
= LTTNG_ERR_NOMEM
;
5197 ret_code
= set_relayd_for_snapshot(snapshot_ust_consumer_output
, session
);
5198 if (ret_code
!= LTTNG_OK
) {
5199 ERR("Failed to setup relay daemon for userspace tracer snapshot");
5202 session
->ust_session
->consumer
= snapshot_ust_consumer_output
;
5205 snapshot_trace_chunk
= session_create_new_trace_chunk(
5207 snapshot_kernel_consumer_output
?: snapshot_ust_consumer_output
,
5208 consumer_output_get_base_path(snapshot_output
->consumer
),
5209 snapshot_chunk_name
);
5210 if (!snapshot_trace_chunk
) {
5211 ERR("Failed to create temporary trace chunk to record a snapshot of session \"%s\"",
5213 ret_code
= LTTNG_ERR_CREATE_DIR_FAIL
;
5216 LTTNG_ASSERT(!session
->current_trace_chunk
);
5217 ret
= session_set_trace_chunk(session
, snapshot_trace_chunk
, nullptr);
5218 lttng_trace_chunk_put(snapshot_trace_chunk
);
5219 snapshot_trace_chunk
= nullptr;
5221 ERR("Failed to set temporary trace chunk to record a snapshot of session \"%s\"",
5223 ret_code
= LTTNG_ERR_CREATE_TRACE_CHUNK_FAIL_CONSUMER
;
5227 nb_packets_per_stream
=
5228 get_session_nb_packets_per_stream(session
, snapshot_output
->max_size
);
5229 if (nb_packets_per_stream
< 0) {
5230 ret_code
= LTTNG_ERR_MAX_SIZE_INVALID
;
5231 goto error_close_trace_chunk
;
5234 if (session
->kernel_session
) {
5235 ret_code
= record_kernel_snapshot(session
->kernel_session
,
5236 snapshot_kernel_consumer_output
,
5237 nb_packets_per_stream
);
5238 if (ret_code
!= LTTNG_OK
) {
5239 goto error_close_trace_chunk
;
5243 if (session
->ust_session
) {
5244 ret_code
= record_ust_snapshot(
5245 session
->ust_session
, snapshot_ust_consumer_output
, nb_packets_per_stream
);
5246 if (ret_code
!= LTTNG_OK
) {
5247 goto error_close_trace_chunk
;
5251 error_close_trace_chunk
:
5252 if (session_set_trace_chunk(session
, nullptr, &snapshot_trace_chunk
)) {
5253 ERR("Failed to release the current trace chunk of session \"%s\"", session
->name
);
5254 ret_code
= LTTNG_ERR_UNK
;
5257 if (session_close_trace_chunk(session
,
5258 snapshot_trace_chunk
,
5259 LTTNG_TRACE_CHUNK_COMMAND_TYPE_NO_OPERATION
,
5262 * Don't goto end; make sure the chunk is closed for the session
5263 * to allow future snapshots.
5265 ERR("Failed to close snapshot trace chunk of session \"%s\"", session
->name
);
5266 ret_code
= LTTNG_ERR_CLOSE_TRACE_CHUNK_FAIL_CONSUMER
;
5269 lttng_trace_chunk_put(snapshot_trace_chunk
);
5270 snapshot_trace_chunk
= nullptr;
5272 if (original_ust_consumer_output
) {
5273 session
->ust_session
->consumer
= original_ust_consumer_output
;
5275 if (original_kernel_consumer_output
) {
5276 session
->kernel_session
->consumer
= original_kernel_consumer_output
;
5278 consumer_output_put(snapshot_ust_consumer_output
);
5279 consumer_output_put(snapshot_kernel_consumer_output
);
5284 * Command LTTNG_SNAPSHOT_RECORD from lib lttng ctl.
5286 * The wait parameter is ignored so this call always wait for the snapshot to
5287 * complete before returning.
5289 * Return LTTNG_OK on success or else a LTTNG_ERR code.
5291 int cmd_snapshot_record(const ltt_session::locked_ref
& session
,
5292 const struct lttng_snapshot_output
*output
,
5293 int wait
__attribute__((unused
)))
5295 enum lttng_error_code cmd_ret
= LTTNG_OK
;
5297 unsigned int snapshot_success
= 0;
5299 struct snapshot_output
*tmp_output
= nullptr;
5301 LTTNG_ASSERT(output
);
5303 DBG("Cmd snapshot record for session %s", session
->name
);
5305 /* Get the datetime for the snapshot output directory. */
5306 ret
= utils_get_current_time_str("%Y%m%d-%H%M%S", datetime
, sizeof(datetime
));
5308 cmd_ret
= LTTNG_ERR_INVALID
;
5313 * Permission denied to create an output if the session is not
5314 * set in no output mode.
5316 if (session
->output_traces
) {
5317 cmd_ret
= LTTNG_ERR_NOT_SNAPSHOT_SESSION
;
5321 /* The session needs to be started at least once. */
5322 if (!session
->has_been_started
) {
5323 cmd_ret
= LTTNG_ERR_START_SESSION_ONCE
;
5327 /* Use temporary output for the session. */
5328 if (*output
->ctrl_url
!= '\0') {
5329 tmp_output
= snapshot_output_alloc();
5331 cmd_ret
= LTTNG_ERR_NOMEM
;
5335 ret
= snapshot_output_init(session
,
5344 if (ret
== -ENOMEM
) {
5345 cmd_ret
= LTTNG_ERR_NOMEM
;
5347 cmd_ret
= LTTNG_ERR_INVALID
;
5351 /* Use the global session count for the temporary snapshot. */
5352 tmp_output
->nb_snapshot
= session
->snapshot
.nb_snapshot
;
5354 /* Use the global datetime */
5355 memcpy(tmp_output
->datetime
, datetime
, sizeof(datetime
));
5356 cmd_ret
= snapshot_record(session
, tmp_output
);
5357 if (cmd_ret
!= LTTNG_OK
) {
5360 snapshot_success
= 1;
5362 struct snapshot_output
*sout
;
5363 struct lttng_ht_iter iter
;
5365 const lttng::urcu::read_lock_guard read_lock
;
5367 cds_lfht_for_each_entry (
5368 session
->snapshot
.output_ht
->ht
, &iter
.iter
, sout
, node
.node
) {
5369 struct snapshot_output output_copy
;
5372 * Make a local copy of the output and override output
5373 * parameters with those provided as part of the
5376 memcpy(&output_copy
, sout
, sizeof(output_copy
));
5378 if (output
->max_size
!= (uint64_t) -1ULL) {
5379 output_copy
.max_size
= output
->max_size
;
5382 output_copy
.nb_snapshot
= session
->snapshot
.nb_snapshot
;
5383 memcpy(output_copy
.datetime
, datetime
, sizeof(datetime
));
5385 /* Use temporary name. */
5386 if (*output
->name
!= '\0') {
5387 if (lttng_strncpy(output_copy
.name
,
5389 sizeof(output_copy
.name
))) {
5390 cmd_ret
= LTTNG_ERR_INVALID
;
5395 cmd_ret
= snapshot_record(session
, &output_copy
);
5396 if (cmd_ret
!= LTTNG_OK
) {
5400 snapshot_success
= 1;
5404 if (snapshot_success
) {
5405 session
->snapshot
.nb_snapshot
++;
5407 cmd_ret
= LTTNG_ERR_SNAPSHOT_FAIL
;
5412 snapshot_output_destroy(tmp_output
);
5419 * Command LTTNG_SET_SESSION_SHM_PATH processed by the client thread.
5421 int cmd_set_session_shm_path(const ltt_session::locked_ref
& session
, const char *shm_path
)
5424 * Can only set shm path before session is started.
5426 if (session
->has_been_started
) {
5427 return LTTNG_ERR_SESSION_STARTED
;
5430 strncpy(session
->shm_path
, shm_path
, sizeof(session
->shm_path
));
5431 session
->shm_path
[sizeof(session
->shm_path
) - 1] = '\0';
5437 * Command LTTNG_ROTATE_SESSION from the lttng-ctl library.
5439 * Ask the consumer to rotate the session output directory.
5440 * The session lock must be held.
5442 * Returns LTTNG_OK on success or else a negative LTTng error code.
5444 int cmd_rotate_session(const ltt_session::locked_ref
& session
,
5445 struct lttng_rotate_session_return
*rotate_return
,
5446 bool quiet_rotation
,
5447 enum lttng_trace_chunk_command_type command
)
5450 uint64_t ongoing_rotation_chunk_id
;
5451 enum lttng_error_code cmd_ret
= LTTNG_OK
;
5452 struct lttng_trace_chunk
*chunk_being_archived
= nullptr;
5453 struct lttng_trace_chunk
*new_trace_chunk
= nullptr;
5454 enum lttng_trace_chunk_status chunk_status
;
5455 bool failed_to_rotate
= false;
5456 enum lttng_error_code rotation_fail_code
= LTTNG_OK
;
5458 if (!session
->has_been_started
) {
5459 cmd_ret
= LTTNG_ERR_START_SESSION_ONCE
;
5464 * Explicit rotation is not supported for live sessions.
5465 * However, live sessions can perform a quiet rotation on
5467 * Rotation is not supported for snapshot traces (no output).
5469 if ((!quiet_rotation
&& session
->live_timer
) || !session
->output_traces
) {
5470 cmd_ret
= LTTNG_ERR_ROTATION_NOT_AVAILABLE
;
5474 /* Unsupported feature in lttng-relayd before 2.11. */
5475 if (!quiet_rotation
&& session
->consumer
->type
== CONSUMER_DST_NET
&&
5476 (session
->consumer
->relay_major_version
== 2 &&
5477 session
->consumer
->relay_minor_version
< 11)) {
5478 cmd_ret
= LTTNG_ERR_ROTATION_NOT_AVAILABLE_RELAY
;
5482 /* Unsupported feature in lttng-modules before 2.8 (lack of sequence number). */
5483 if (session
->kernel_session
&& !kernel_supports_ring_buffer_packet_sequence_number()) {
5484 cmd_ret
= LTTNG_ERR_ROTATION_NOT_AVAILABLE_KERNEL
;
5488 if (session
->rotation_state
== LTTNG_ROTATION_STATE_ONGOING
) {
5489 DBG("Refusing to launch a rotation; a rotation is already in progress for session %s",
5491 cmd_ret
= LTTNG_ERR_ROTATION_PENDING
;
5496 * After a stop, we only allow one rotation to occur, the other ones are
5497 * useless until a new start.
5499 if (session
->rotated_after_last_stop
) {
5500 DBG("Session \"%s\" was already rotated after stop, refusing rotation",
5502 cmd_ret
= LTTNG_ERR_ROTATION_MULTIPLE_AFTER_STOP
;
5507 * After a stop followed by a clear, disallow following rotations a they would
5508 * generate empty chunks.
5510 if (session
->cleared_after_last_stop
) {
5511 DBG("Session \"%s\" was already cleared after stop, refusing rotation",
5513 cmd_ret
= LTTNG_ERR_ROTATION_AFTER_STOP_CLEAR
;
5517 if (session
->active
) {
5519 session_create_new_trace_chunk(session
, nullptr, nullptr, nullptr);
5520 if (!new_trace_chunk
) {
5521 cmd_ret
= LTTNG_ERR_CREATE_DIR_FAIL
;
5527 * The current trace chunk becomes the chunk being archived.
5529 * After this point, "chunk_being_archived" must absolutely
5530 * be closed on the consumer(s), otherwise it will never be
5531 * cleaned-up, which will result in a leak.
5533 ret
= session_set_trace_chunk(session
, new_trace_chunk
, &chunk_being_archived
);
5535 cmd_ret
= LTTNG_ERR_CREATE_TRACE_CHUNK_FAIL_CONSUMER
;
5539 if (session
->kernel_session
) {
5540 cmd_ret
= kernel_rotate_session(session
);
5541 if (cmd_ret
!= LTTNG_OK
) {
5542 failed_to_rotate
= true;
5543 rotation_fail_code
= cmd_ret
;
5546 if (session
->ust_session
) {
5547 cmd_ret
= ust_app_rotate_session(session
);
5548 if (cmd_ret
!= LTTNG_OK
) {
5549 failed_to_rotate
= true;
5550 rotation_fail_code
= cmd_ret
;
5554 if (!session
->active
) {
5555 session
->rotated_after_last_stop
= true;
5558 if (!chunk_being_archived
) {
5559 DBG("Rotating session \"%s\" from a \"NULL\" trace chunk to a new trace chunk, skipping completion check",
5561 if (failed_to_rotate
) {
5562 cmd_ret
= rotation_fail_code
;
5569 session
->rotation_state
= LTTNG_ROTATION_STATE_ONGOING
;
5570 chunk_status
= lttng_trace_chunk_get_id(chunk_being_archived
, &ongoing_rotation_chunk_id
);
5571 LTTNG_ASSERT(chunk_status
== LTTNG_TRACE_CHUNK_STATUS_OK
);
5573 ret
= session_close_trace_chunk(
5574 session
, chunk_being_archived
, command
, session
->last_chunk_path
);
5576 cmd_ret
= LTTNG_ERR_CLOSE_TRACE_CHUNK_FAIL_CONSUMER
;
5580 if (failed_to_rotate
) {
5581 cmd_ret
= rotation_fail_code
;
5585 session
->quiet_rotation
= quiet_rotation
;
5586 ret
= timer_session_rotation_pending_check_start(session
, DEFAULT_ROTATE_PENDING_TIMER
);
5588 cmd_ret
= LTTNG_ERR_UNK
;
5592 if (rotate_return
) {
5593 rotate_return
->rotation_id
= ongoing_rotation_chunk_id
;
5596 session
->chunk_being_archived
= chunk_being_archived
;
5597 chunk_being_archived
= nullptr;
5598 if (!quiet_rotation
) {
5599 ret
= notification_thread_command_session_rotation_ongoing(
5600 the_notification_thread_handle
, session
->id
, ongoing_rotation_chunk_id
);
5601 if (ret
!= LTTNG_OK
) {
5602 ERR("Failed to notify notification thread that a session rotation is ongoing for session %s",
5604 cmd_ret
= (lttng_error_code
) ret
;
5608 DBG("Cmd rotate session %s, archive_id %" PRIu64
" sent",
5610 ongoing_rotation_chunk_id
);
5612 lttng_trace_chunk_put(new_trace_chunk
);
5613 lttng_trace_chunk_put(chunk_being_archived
);
5614 ret
= (cmd_ret
== LTTNG_OK
) ? cmd_ret
: -((int) cmd_ret
);
5617 if (session_reset_rotation_state(session
, LTTNG_ROTATION_STATE_ERROR
)) {
5618 ERR("Failed to reset rotation state of session \"%s\"", session
->name
);
5624 * Command LTTNG_ROTATION_GET_INFO from the lttng-ctl library.
5626 * Check if the session has finished its rotation.
5628 * Return LTTNG_OK on success or else an LTTNG_ERR code.
5630 int cmd_rotate_get_info(const ltt_session::locked_ref
& session
,
5631 struct lttng_rotation_get_info_return
*info_return
,
5632 uint64_t rotation_id
)
5634 enum lttng_error_code cmd_ret
= LTTNG_OK
;
5635 enum lttng_rotation_state rotation_state
;
5637 DBG("Cmd rotate_get_info session %s, rotation id %" PRIu64
,
5639 session
->most_recent_chunk_id
.value
);
5641 if (session
->chunk_being_archived
) {
5642 enum lttng_trace_chunk_status chunk_status
;
5645 chunk_status
= lttng_trace_chunk_get_id(session
->chunk_being_archived
, &chunk_id
);
5646 LTTNG_ASSERT(chunk_status
== LTTNG_TRACE_CHUNK_STATUS_OK
);
5648 rotation_state
= rotation_id
== chunk_id
? LTTNG_ROTATION_STATE_ONGOING
:
5649 LTTNG_ROTATION_STATE_EXPIRED
;
5651 if (session
->last_archived_chunk_id
.is_set
&&
5652 rotation_id
!= session
->last_archived_chunk_id
.value
) {
5653 rotation_state
= LTTNG_ROTATION_STATE_EXPIRED
;
5655 rotation_state
= session
->rotation_state
;
5659 switch (rotation_state
) {
5660 case LTTNG_ROTATION_STATE_NO_ROTATION
:
5661 DBG("Reporting that no rotation has occurred within the lifetime of session \"%s\"",
5664 case LTTNG_ROTATION_STATE_EXPIRED
:
5665 DBG("Reporting that the rotation state of rotation id %" PRIu64
5666 " of session \"%s\" has expired",
5670 case LTTNG_ROTATION_STATE_ONGOING
:
5671 DBG("Reporting that rotation id %" PRIu64
" of session \"%s\" is still pending",
5675 case LTTNG_ROTATION_STATE_COMPLETED
:
5679 char *current_tracing_path_reply
;
5680 size_t current_tracing_path_reply_len
;
5682 DBG("Reporting that rotation id %" PRIu64
" of session \"%s\" is completed",
5686 switch (session_get_consumer_destination_type(session
)) {
5687 case CONSUMER_DST_LOCAL
:
5688 current_tracing_path_reply
= info_return
->location
.local
.absolute_path
;
5689 current_tracing_path_reply_len
=
5690 sizeof(info_return
->location
.local
.absolute_path
);
5691 info_return
->location_type
=
5692 (int8_t) LTTNG_TRACE_ARCHIVE_LOCATION_TYPE_LOCAL
;
5693 fmt_ret
= asprintf(&chunk_path
,
5694 "%s/" DEFAULT_ARCHIVED_TRACE_CHUNKS_DIRECTORY
"/%s",
5695 session_get_base_path(session
),
5696 session
->last_archived_chunk_name
);
5697 if (fmt_ret
== -1) {
5698 PERROR("Failed to format the path of the last archived trace chunk");
5699 info_return
->status
= LTTNG_ROTATION_STATUS_ERROR
;
5700 cmd_ret
= LTTNG_ERR_UNK
;
5704 case CONSUMER_DST_NET
:
5706 uint16_t ctrl_port
, data_port
;
5708 current_tracing_path_reply
= info_return
->location
.relay
.relative_path
;
5709 current_tracing_path_reply_len
=
5710 sizeof(info_return
->location
.relay
.relative_path
);
5711 /* Currently the only supported relay protocol. */
5712 info_return
->location
.relay
.protocol
=
5713 (int8_t) LTTNG_TRACE_ARCHIVE_LOCATION_RELAY_PROTOCOL_TYPE_TCP
;
5715 fmt_ret
= lttng_strncpy(info_return
->location
.relay
.host
,
5716 session_get_net_consumer_hostname(session
),
5717 sizeof(info_return
->location
.relay
.host
));
5719 ERR("Failed to copy host name to rotate_get_info reply");
5720 info_return
->status
= LTTNG_ROTATION_STATUS_ERROR
;
5721 cmd_ret
= LTTNG_ERR_SET_URL
;
5725 session_get_net_consumer_ports(session
, &ctrl_port
, &data_port
);
5726 info_return
->location
.relay
.ports
.control
= ctrl_port
;
5727 info_return
->location
.relay
.ports
.data
= data_port
;
5728 info_return
->location_type
=
5729 (int8_t) LTTNG_TRACE_ARCHIVE_LOCATION_TYPE_RELAY
;
5730 chunk_path
= strdup(session
->last_chunk_path
);
5732 ERR("Failed to allocate the path of the last archived trace chunk");
5733 info_return
->status
= LTTNG_ROTATION_STATUS_ERROR
;
5734 cmd_ret
= LTTNG_ERR_UNK
;
5743 fmt_ret
= lttng_strncpy(
5744 current_tracing_path_reply
, chunk_path
, current_tracing_path_reply_len
);
5747 ERR("Failed to copy path of the last archived trace chunk to rotate_get_info reply");
5748 info_return
->status
= LTTNG_ROTATION_STATUS_ERROR
;
5749 cmd_ret
= LTTNG_ERR_UNK
;
5755 case LTTNG_ROTATION_STATE_ERROR
:
5756 DBG("Reporting that an error occurred during rotation %" PRIu64
5757 " of session \"%s\"",
5767 info_return
->status
= (int32_t) rotation_state
;
5772 * Command LTTNG_ROTATION_SET_SCHEDULE from the lttng-ctl library.
5774 * Configure the automatic rotation parameters.
5775 * 'activate' to true means activate the rotation schedule type with 'new_value'.
5776 * 'activate' to false means deactivate the rotation schedule and validate that
5777 * 'new_value' has the same value as the currently active value.
5779 * Return LTTNG_OK on success or else a positive LTTNG_ERR code.
5781 int cmd_rotation_set_schedule(const ltt_session::locked_ref
& session
,
5783 enum lttng_rotation_schedule_type schedule_type
,
5787 uint64_t *parameter_value
;
5789 DBG("Cmd rotate set schedule session %s", session
->name
);
5791 if (session
->live_timer
|| !session
->output_traces
) {
5792 DBG("Failing ROTATION_SET_SCHEDULE command as the rotation feature is not available for this session");
5793 ret
= LTTNG_ERR_ROTATION_NOT_AVAILABLE
;
5797 switch (schedule_type
) {
5798 case LTTNG_ROTATION_SCHEDULE_TYPE_SIZE_THRESHOLD
:
5799 parameter_value
= &session
->rotate_size
;
5801 case LTTNG_ROTATION_SCHEDULE_TYPE_PERIODIC
:
5802 parameter_value
= &session
->rotate_timer_period
;
5803 if (new_value
>= UINT_MAX
) {
5804 DBG("Failing ROTATION_SET_SCHEDULE command as the value requested for a periodic rotation schedule is invalid: %" PRIu64
5808 ret
= LTTNG_ERR_INVALID
;
5813 WARN("Failing ROTATION_SET_SCHEDULE command on unknown schedule type");
5814 ret
= LTTNG_ERR_INVALID
;
5818 /* Improper use of the API. */
5819 if (new_value
== -1ULL) {
5820 WARN("Failing ROTATION_SET_SCHEDULE command as the value requested is -1");
5821 ret
= LTTNG_ERR_INVALID
;
5826 * As indicated in struct ltt_session's comments, a value of == 0 means
5827 * this schedule rotation type is not in use.
5829 * Reject the command if we were asked to activate a schedule that was
5832 if (activate
&& *parameter_value
!= 0) {
5833 DBG("Failing ROTATION_SET_SCHEDULE (activate) command as the schedule is already active");
5834 ret
= LTTNG_ERR_ROTATION_SCHEDULE_SET
;
5839 * Reject the command if we were asked to deactivate a schedule that was
5842 if (!activate
&& *parameter_value
== 0) {
5843 DBG("Failing ROTATION_SET_SCHEDULE (deactivate) command as the schedule is already inactive");
5844 ret
= LTTNG_ERR_ROTATION_SCHEDULE_NOT_SET
;
5849 * Reject the command if we were asked to deactivate a schedule that
5852 if (!activate
&& *parameter_value
!= new_value
) {
5853 DBG("Failing ROTATION_SET_SCHEDULE (deactivate) command as an inexistant schedule was provided");
5854 ret
= LTTNG_ERR_ROTATION_SCHEDULE_NOT_SET
;
5858 *parameter_value
= activate
? new_value
: 0;
5860 switch (schedule_type
) {
5861 case LTTNG_ROTATION_SCHEDULE_TYPE_PERIODIC
:
5862 if (activate
&& session
->active
) {
5864 * Only start the timer if the session is active,
5865 * otherwise it will be started when the session starts.
5867 ret
= timer_session_rotation_schedule_timer_start(session
, new_value
);
5869 ERR("Failed to enable session rotation timer in ROTATION_SET_SCHEDULE command");
5870 ret
= LTTNG_ERR_UNK
;
5874 ret
= timer_session_rotation_schedule_timer_stop(session
);
5876 ERR("Failed to disable session rotation timer in ROTATION_SET_SCHEDULE command");
5877 ret
= LTTNG_ERR_UNK
;
5882 case LTTNG_ROTATION_SCHEDULE_TYPE_SIZE_THRESHOLD
:
5885 the_rotation_thread_handle
->subscribe_session_consumed_size_rotation(
5886 *session
, new_value
);
5887 } catch (const std::exception
& e
) {
5888 ERR("Failed to enable consumed-size notification in ROTATION_SET_SCHEDULE command: %s",
5890 ret
= LTTNG_ERR_UNK
;
5895 the_rotation_thread_handle
5896 ->unsubscribe_session_consumed_size_rotation(*session
);
5897 } catch (const std::exception
& e
) {
5898 ERR("Failed to disable consumed-size notification in ROTATION_SET_SCHEDULE command: %s",
5900 ret
= LTTNG_ERR_UNK
;
5906 /* Would have been caught before. */
5918 /* Wait for a given path to be removed before continuing. */
5919 static enum lttng_error_code
wait_on_path(void *path_data
)
5921 const char *shm_path
= (const char *) path_data
;
5923 DBG("Waiting for the shm path at %s to be removed before completing session destruction",
5929 ret
= stat(shm_path
, &st
);
5931 if (errno
!= ENOENT
) {
5932 PERROR("stat() returned an error while checking for the existence of the shm path");
5934 DBG("shm path no longer exists, completing the destruction of session");
5938 if (!S_ISDIR(st
.st_mode
)) {
5939 ERR("The type of shm path %s returned by stat() is not a directory; aborting the wait for shm path removal",
5944 usleep(SESSION_DESTROY_SHM_PATH_CHECK_DELAY_US
);
5950 * Returns a pointer to a handler to run on completion of a command.
5951 * Returns NULL if no handler has to be run for the last command executed.
5953 const struct cmd_completion_handler
*cmd_pop_completion_handler()
5955 struct cmd_completion_handler
*handler
= current_completion_handler
;
5957 current_completion_handler
= nullptr;
5962 * Init command subsystem.
5967 * Set network sequence index to 1 for streams to match a relayd
5968 * socket on the consumer side.
5970 pthread_mutex_lock(&relayd_net_seq_idx_lock
);
5971 relayd_net_seq_idx
= 1;
5972 pthread_mutex_unlock(&relayd_net_seq_idx_lock
);
5974 DBG("Command subsystem initialized");