2 * Copyright (C) 2012 - David Goulet <dgoulet@efficios.com>
3 * Copyright (C) 2016 - Jérémie Galarneau <jeremie.galarneau@efficios.com>
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License, version 2 only, as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 51
16 * Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
22 #include <urcu/list.h>
23 #include <urcu/uatomic.h>
26 #include <common/defaults.h>
27 #include <common/common.h>
28 #include <common/sessiond-comm/sessiond-comm.h>
29 #include <common/relayd/relayd.h>
30 #include <common/utils.h>
31 #include <common/compat/string.h>
32 #include <common/kernel-ctl/kernel-ctl.h>
33 #include <common/dynamic-buffer.h>
34 #include <common/buffer-view.h>
35 #include <lttng/trigger/trigger-internal.h>
36 #include <lttng/condition/condition.h>
37 #include <lttng/action/action.h>
38 #include <lttng/channel.h>
39 #include <lttng/channel-internal.h>
40 #include <lttng/rotate-internal.h>
41 #include <lttng/location-internal.h>
42 #include <common/string-utils/string-utils.h>
47 #include "health-sessiond.h"
49 #include "kernel-consumer.h"
50 #include "lttng-sessiond.h"
52 #include "lttng-syscall.h"
54 #include "buffer-registry.h"
55 #include "notification-thread.h"
56 #include "notification-thread-commands.h"
58 #include "rotation-thread.h"
59 #include "sessiond-timer.h"
60 #include "agent-thread.h"
64 /* Sleep for 100ms between each check for the shm path's deletion. */
65 #define SESSION_DESTROY_SHM_PATH_CHECK_DELAY_US 100000
67 static enum lttng_error_code
wait_on_path(void *path
);
70 * Command completion handler that is used by the destroy command
71 * when a session that has a non-default shm_path is being destroyed.
73 * See comment in cmd_destroy_session() for the rationale.
75 static struct destroy_completion_handler
{
76 struct cmd_completion_handler handler
;
77 char shm_path
[member_sizeof(struct ltt_session
, shm_path
)];
78 } destroy_completion_handler
= {
81 .data
= destroy_completion_handler
.shm_path
86 static struct cmd_completion_handler
*current_completion_handler
;
89 * Used to keep a unique index for each relayd socket created where this value
90 * is associated with streams on the consumer so it can match the right relayd
91 * to send to. It must be accessed with the relayd_net_seq_idx_lock
94 static pthread_mutex_t relayd_net_seq_idx_lock
= PTHREAD_MUTEX_INITIALIZER
;
95 static uint64_t relayd_net_seq_idx
;
97 static int validate_ust_event_name(const char *);
98 static int cmd_enable_event_internal(struct ltt_session
*session
,
99 struct lttng_domain
*domain
,
100 char *channel_name
, struct lttng_event
*event
,
101 char *filter_expression
,
102 struct lttng_filter_bytecode
*filter
,
103 struct lttng_event_exclusion
*exclusion
,
107 * Create a session path used by list_lttng_sessions for the case that the
108 * session consumer is on the network.
110 static int build_network_session_path(char *dst
, size_t size
,
111 struct ltt_session
*session
)
113 int ret
, kdata_port
, udata_port
;
114 struct lttng_uri
*kuri
= NULL
, *uuri
= NULL
, *uri
= NULL
;
115 char tmp_uurl
[PATH_MAX
], tmp_urls
[PATH_MAX
];
120 memset(tmp_urls
, 0, sizeof(tmp_urls
));
121 memset(tmp_uurl
, 0, sizeof(tmp_uurl
));
123 kdata_port
= udata_port
= DEFAULT_NETWORK_DATA_PORT
;
125 if (session
->kernel_session
&& session
->kernel_session
->consumer
) {
126 kuri
= &session
->kernel_session
->consumer
->dst
.net
.control
;
127 kdata_port
= session
->kernel_session
->consumer
->dst
.net
.data
.port
;
130 if (session
->ust_session
&& session
->ust_session
->consumer
) {
131 uuri
= &session
->ust_session
->consumer
->dst
.net
.control
;
132 udata_port
= session
->ust_session
->consumer
->dst
.net
.data
.port
;
135 if (uuri
== NULL
&& kuri
== NULL
) {
136 uri
= &session
->consumer
->dst
.net
.control
;
137 kdata_port
= session
->consumer
->dst
.net
.data
.port
;
138 } else if (kuri
&& uuri
) {
139 ret
= uri_compare(kuri
, uuri
);
143 /* Build uuri URL string */
144 ret
= uri_to_str_url(uuri
, tmp_uurl
, sizeof(tmp_uurl
));
151 } else if (kuri
&& uuri
== NULL
) {
153 } else if (uuri
&& kuri
== NULL
) {
157 ret
= uri_to_str_url(uri
, tmp_urls
, sizeof(tmp_urls
));
163 * Do we have a UST url set. If yes, this means we have both kernel and UST
166 if (*tmp_uurl
!= '\0') {
167 ret
= snprintf(dst
, size
, "[K]: %s [data: %d] -- [U]: %s [data: %d]",
168 tmp_urls
, kdata_port
, tmp_uurl
, udata_port
);
171 if (kuri
|| (!kuri
&& !uuri
)) {
174 /* No kernel URI, use the UST port. */
177 ret
= snprintf(dst
, size
, "%s [data: %d]", tmp_urls
, dport
);
185 * Get run-time attributes if the session has been started (discarded events,
188 static int get_kernel_runtime_stats(struct ltt_session
*session
,
189 struct ltt_kernel_channel
*kchan
, uint64_t *discarded_events
,
190 uint64_t *lost_packets
)
194 if (!session
->has_been_started
) {
196 *discarded_events
= 0;
201 ret
= consumer_get_discarded_events(session
->id
, kchan
->key
,
202 session
->kernel_session
->consumer
,
208 ret
= consumer_get_lost_packets(session
->id
, kchan
->key
,
209 session
->kernel_session
->consumer
,
220 * Get run-time attributes if the session has been started (discarded events,
223 static int get_ust_runtime_stats(struct ltt_session
*session
,
224 struct ltt_ust_channel
*uchan
, uint64_t *discarded_events
,
225 uint64_t *lost_packets
)
228 struct ltt_ust_session
*usess
;
230 if (!discarded_events
|| !lost_packets
) {
235 usess
= session
->ust_session
;
236 assert(discarded_events
);
237 assert(lost_packets
);
239 if (!usess
|| !session
->has_been_started
) {
240 *discarded_events
= 0;
246 if (usess
->buffer_type
== LTTNG_BUFFER_PER_UID
) {
247 ret
= ust_app_uid_get_channel_runtime_stats(usess
->id
,
248 &usess
->buffer_reg_uid_list
,
249 usess
->consumer
, uchan
->id
,
250 uchan
->attr
.overwrite
,
253 } else if (usess
->buffer_type
== LTTNG_BUFFER_PER_PID
) {
254 ret
= ust_app_pid_get_channel_runtime_stats(usess
,
255 uchan
, usess
->consumer
,
256 uchan
->attr
.overwrite
,
262 *discarded_events
+= uchan
->per_pid_closed_app_discarded
;
263 *lost_packets
+= uchan
->per_pid_closed_app_lost
;
265 ERR("Unsupported buffer type");
276 * Fill lttng_channel array of all channels.
278 static ssize_t
list_lttng_channels(enum lttng_domain_type domain
,
279 struct ltt_session
*session
, struct lttng_channel
*channels
,
280 struct lttng_channel_extended
*chan_exts
)
283 struct ltt_kernel_channel
*kchan
;
285 DBG("Listing channels for session %s", session
->name
);
288 case LTTNG_DOMAIN_KERNEL
:
289 /* Kernel channels */
290 if (session
->kernel_session
!= NULL
) {
291 cds_list_for_each_entry(kchan
,
292 &session
->kernel_session
->channel_list
.head
, list
) {
293 uint64_t discarded_events
, lost_packets
;
294 struct lttng_channel_extended
*extended
;
296 extended
= (struct lttng_channel_extended
*)
297 kchan
->channel
->attr
.extended
.ptr
;
299 ret
= get_kernel_runtime_stats(session
, kchan
,
300 &discarded_events
, &lost_packets
);
304 /* Copy lttng_channel struct to array */
305 memcpy(&channels
[i
], kchan
->channel
, sizeof(struct lttng_channel
));
306 channels
[i
].enabled
= kchan
->enabled
;
307 chan_exts
[i
].discarded_events
=
309 chan_exts
[i
].lost_packets
= lost_packets
;
310 chan_exts
[i
].monitor_timer_interval
=
311 extended
->monitor_timer_interval
;
312 chan_exts
[i
].blocking_timeout
= 0;
317 case LTTNG_DOMAIN_UST
:
319 struct lttng_ht_iter iter
;
320 struct ltt_ust_channel
*uchan
;
323 cds_lfht_for_each_entry(session
->ust_session
->domain_global
.channels
->ht
,
324 &iter
.iter
, uchan
, node
.node
) {
325 uint64_t discarded_events
= 0, lost_packets
= 0;
327 if (lttng_strncpy(channels
[i
].name
, uchan
->name
,
328 LTTNG_SYMBOL_NAME_LEN
)) {
331 channels
[i
].attr
.overwrite
= uchan
->attr
.overwrite
;
332 channels
[i
].attr
.subbuf_size
= uchan
->attr
.subbuf_size
;
333 channels
[i
].attr
.num_subbuf
= uchan
->attr
.num_subbuf
;
334 channels
[i
].attr
.switch_timer_interval
=
335 uchan
->attr
.switch_timer_interval
;
336 channels
[i
].attr
.read_timer_interval
=
337 uchan
->attr
.read_timer_interval
;
338 channels
[i
].enabled
= uchan
->enabled
;
339 channels
[i
].attr
.tracefile_size
= uchan
->tracefile_size
;
340 channels
[i
].attr
.tracefile_count
= uchan
->tracefile_count
;
343 * Map enum lttng_ust_output to enum lttng_event_output.
345 switch (uchan
->attr
.output
) {
347 channels
[i
].attr
.output
= LTTNG_EVENT_MMAP
;
351 * LTTNG_UST_MMAP is the only supported UST
358 chan_exts
[i
].monitor_timer_interval
=
359 uchan
->monitor_timer_interval
;
360 chan_exts
[i
].blocking_timeout
=
361 uchan
->attr
.u
.s
.blocking_timeout
;
363 ret
= get_ust_runtime_stats(session
, uchan
,
364 &discarded_events
, &lost_packets
);
368 chan_exts
[i
].discarded_events
= discarded_events
;
369 chan_exts
[i
].lost_packets
= lost_packets
;
381 return -LTTNG_ERR_FATAL
;
387 static void increment_extended_len(const char *filter_expression
,
388 struct lttng_event_exclusion
*exclusion
, size_t *extended_len
)
390 *extended_len
+= sizeof(struct lttcomm_event_extended_header
);
392 if (filter_expression
) {
393 *extended_len
+= strlen(filter_expression
) + 1;
397 *extended_len
+= exclusion
->count
* LTTNG_SYMBOL_NAME_LEN
;
401 static void append_extended_info(const char *filter_expression
,
402 struct lttng_event_exclusion
*exclusion
, void **extended_at
)
404 struct lttcomm_event_extended_header extended_header
;
405 size_t filter_len
= 0;
406 size_t nb_exclusions
= 0;
408 if (filter_expression
) {
409 filter_len
= strlen(filter_expression
) + 1;
413 nb_exclusions
= exclusion
->count
;
416 /* Set header fields */
417 extended_header
.filter_len
= filter_len
;
418 extended_header
.nb_exclusions
= nb_exclusions
;
421 memcpy(*extended_at
, &extended_header
, sizeof(extended_header
));
422 *extended_at
+= sizeof(extended_header
);
424 /* Copy filter string */
425 if (filter_expression
) {
426 memcpy(*extended_at
, filter_expression
, filter_len
);
427 *extended_at
+= filter_len
;
430 /* Copy exclusion names */
432 size_t len
= nb_exclusions
* LTTNG_SYMBOL_NAME_LEN
;
434 memcpy(*extended_at
, &exclusion
->names
, len
);
440 * Create a list of agent domain events.
442 * Return number of events in list on success or else a negative value.
444 static int list_lttng_agent_events(struct agent
*agt
,
445 struct lttng_event
**events
, size_t *total_size
)
448 unsigned int nb_event
= 0;
449 struct agent_event
*event
;
450 struct lttng_event
*tmp_events
;
451 struct lttng_ht_iter iter
;
452 size_t extended_len
= 0;
458 DBG3("Listing agent events");
461 nb_event
= lttng_ht_get_count(agt
->events
);
469 /* Compute required extended infos size */
470 extended_len
= nb_event
* sizeof(struct lttcomm_event_extended_header
);
473 * This is only valid because the commands which add events are
474 * processed in the same thread as the listing.
477 cds_lfht_for_each_entry(agt
->events
->ht
, &iter
.iter
, event
, node
.node
) {
478 increment_extended_len(event
->filter_expression
, NULL
,
483 *total_size
= nb_event
* sizeof(*tmp_events
) + extended_len
;
484 tmp_events
= zmalloc(*total_size
);
486 PERROR("zmalloc agent events session");
487 ret
= -LTTNG_ERR_FATAL
;
491 extended_at
= ((uint8_t *) tmp_events
) +
492 nb_event
* sizeof(struct lttng_event
);
495 cds_lfht_for_each_entry(agt
->events
->ht
, &iter
.iter
, event
, node
.node
) {
496 strncpy(tmp_events
[i
].name
, event
->name
, sizeof(tmp_events
[i
].name
));
497 tmp_events
[i
].name
[sizeof(tmp_events
[i
].name
) - 1] = '\0';
498 tmp_events
[i
].enabled
= event
->enabled
;
499 tmp_events
[i
].loglevel
= event
->loglevel_value
;
500 tmp_events
[i
].loglevel_type
= event
->loglevel_type
;
503 /* Append extended info */
504 append_extended_info(event
->filter_expression
, NULL
,
509 *events
= tmp_events
;
513 assert(nb_event
== i
);
518 * Create a list of ust global domain events.
520 static int list_lttng_ust_global_events(char *channel_name
,
521 struct ltt_ust_domain_global
*ust_global
,
522 struct lttng_event
**events
, size_t *total_size
)
525 unsigned int nb_event
= 0;
526 struct lttng_ht_iter iter
;
527 struct lttng_ht_node_str
*node
;
528 struct ltt_ust_channel
*uchan
;
529 struct ltt_ust_event
*uevent
;
530 struct lttng_event
*tmp
;
531 size_t extended_len
= 0;
534 DBG("Listing UST global events for channel %s", channel_name
);
538 lttng_ht_lookup(ust_global
->channels
, (void *)channel_name
, &iter
);
539 node
= lttng_ht_iter_get_node_str(&iter
);
541 ret
= LTTNG_ERR_UST_CHAN_NOT_FOUND
;
545 uchan
= caa_container_of(&node
->node
, struct ltt_ust_channel
, node
.node
);
547 nb_event
= lttng_ht_get_count(uchan
->events
);
554 DBG3("Listing UST global %d events", nb_event
);
556 /* Compute required extended infos size */
557 cds_lfht_for_each_entry(uchan
->events
->ht
, &iter
.iter
, uevent
, node
.node
) {
558 if (uevent
->internal
) {
563 increment_extended_len(uevent
->filter_expression
,
564 uevent
->exclusion
, &extended_len
);
567 /* All events are internal, skip. */
573 *total_size
= nb_event
* sizeof(struct lttng_event
) + extended_len
;
574 tmp
= zmalloc(*total_size
);
576 ret
= -LTTNG_ERR_FATAL
;
580 extended_at
= ((uint8_t *) tmp
) + nb_event
* sizeof(struct lttng_event
);
582 cds_lfht_for_each_entry(uchan
->events
->ht
, &iter
.iter
, uevent
, node
.node
) {
583 if (uevent
->internal
) {
584 /* This event should remain hidden from clients */
587 strncpy(tmp
[i
].name
, uevent
->attr
.name
, LTTNG_SYMBOL_NAME_LEN
);
588 tmp
[i
].name
[LTTNG_SYMBOL_NAME_LEN
- 1] = '\0';
589 tmp
[i
].enabled
= uevent
->enabled
;
591 switch (uevent
->attr
.instrumentation
) {
592 case LTTNG_UST_TRACEPOINT
:
593 tmp
[i
].type
= LTTNG_EVENT_TRACEPOINT
;
595 case LTTNG_UST_PROBE
:
596 tmp
[i
].type
= LTTNG_EVENT_PROBE
;
598 case LTTNG_UST_FUNCTION
:
599 tmp
[i
].type
= LTTNG_EVENT_FUNCTION
;
603 tmp
[i
].loglevel
= uevent
->attr
.loglevel
;
604 switch (uevent
->attr
.loglevel_type
) {
605 case LTTNG_UST_LOGLEVEL_ALL
:
606 tmp
[i
].loglevel_type
= LTTNG_EVENT_LOGLEVEL_ALL
;
608 case LTTNG_UST_LOGLEVEL_RANGE
:
609 tmp
[i
].loglevel_type
= LTTNG_EVENT_LOGLEVEL_RANGE
;
611 case LTTNG_UST_LOGLEVEL_SINGLE
:
612 tmp
[i
].loglevel_type
= LTTNG_EVENT_LOGLEVEL_SINGLE
;
615 if (uevent
->filter
) {
618 if (uevent
->exclusion
) {
619 tmp
[i
].exclusion
= 1;
623 /* Append extended info */
624 append_extended_info(uevent
->filter_expression
,
625 uevent
->exclusion
, &extended_at
);
636 * Fill lttng_event array of all kernel events in the channel.
638 static int list_lttng_kernel_events(char *channel_name
,
639 struct ltt_kernel_session
*kernel_session
,
640 struct lttng_event
**events
, size_t *total_size
)
643 unsigned int nb_event
;
644 struct ltt_kernel_event
*event
;
645 struct ltt_kernel_channel
*kchan
;
646 size_t extended_len
= 0;
649 kchan
= trace_kernel_get_channel_by_name(channel_name
, kernel_session
);
651 ret
= LTTNG_ERR_KERN_CHAN_NOT_FOUND
;
655 nb_event
= kchan
->event_count
;
657 DBG("Listing events for channel %s", kchan
->channel
->name
);
665 /* Compute required extended infos size */
666 cds_list_for_each_entry(event
, &kchan
->events_list
.head
, list
) {
667 increment_extended_len(event
->filter_expression
, NULL
,
671 *total_size
= nb_event
* sizeof(struct lttng_event
) + extended_len
;
672 *events
= zmalloc(*total_size
);
673 if (*events
== NULL
) {
674 ret
= LTTNG_ERR_FATAL
;
678 extended_at
= ((void *) *events
) +
679 nb_event
* sizeof(struct lttng_event
);
681 /* Kernel channels */
682 cds_list_for_each_entry(event
, &kchan
->events_list
.head
, list
) {
683 strncpy((*events
)[i
].name
, event
->event
->name
, LTTNG_SYMBOL_NAME_LEN
);
684 (*events
)[i
].name
[LTTNG_SYMBOL_NAME_LEN
- 1] = '\0';
685 (*events
)[i
].enabled
= event
->enabled
;
686 (*events
)[i
].filter
=
687 (unsigned char) !!event
->filter_expression
;
689 switch (event
->event
->instrumentation
) {
690 case LTTNG_KERNEL_TRACEPOINT
:
691 (*events
)[i
].type
= LTTNG_EVENT_TRACEPOINT
;
693 case LTTNG_KERNEL_KRETPROBE
:
694 (*events
)[i
].type
= LTTNG_EVENT_FUNCTION
;
695 memcpy(&(*events
)[i
].attr
.probe
, &event
->event
->u
.kprobe
,
696 sizeof(struct lttng_kernel_kprobe
));
698 case LTTNG_KERNEL_KPROBE
:
699 (*events
)[i
].type
= LTTNG_EVENT_PROBE
;
700 memcpy(&(*events
)[i
].attr
.probe
, &event
->event
->u
.kprobe
,
701 sizeof(struct lttng_kernel_kprobe
));
703 case LTTNG_KERNEL_FUNCTION
:
704 (*events
)[i
].type
= LTTNG_EVENT_FUNCTION
;
705 memcpy(&((*events
)[i
].attr
.ftrace
), &event
->event
->u
.ftrace
,
706 sizeof(struct lttng_kernel_function
));
708 case LTTNG_KERNEL_NOOP
:
709 (*events
)[i
].type
= LTTNG_EVENT_NOOP
;
711 case LTTNG_KERNEL_SYSCALL
:
712 (*events
)[i
].type
= LTTNG_EVENT_SYSCALL
;
714 case LTTNG_KERNEL_ALL
:
722 /* Append extended info */
723 append_extended_info(event
->filter_expression
, NULL
,
731 /* Negate the error code to differentiate the size from an error */
736 * Add URI so the consumer output object. Set the correct path depending on the
737 * domain adding the default trace directory.
739 static int add_uri_to_consumer(struct consumer_output
*consumer
,
740 struct lttng_uri
*uri
, enum lttng_domain_type domain
,
741 const char *session_name
)
744 const char *default_trace_dir
;
748 if (consumer
== NULL
) {
749 DBG("No consumer detected. Don't add URI. Stopping.");
750 ret
= LTTNG_ERR_NO_CONSUMER
;
755 case LTTNG_DOMAIN_KERNEL
:
756 default_trace_dir
= DEFAULT_KERNEL_TRACE_DIR
;
758 case LTTNG_DOMAIN_UST
:
759 default_trace_dir
= DEFAULT_UST_TRACE_DIR
;
763 * This case is possible is we try to add the URI to the global tracing
764 * session consumer object which in this case there is no subdir.
766 default_trace_dir
= "";
769 switch (uri
->dtype
) {
772 DBG2("Setting network URI to consumer");
774 if (consumer
->type
== CONSUMER_DST_NET
) {
775 if ((uri
->stype
== LTTNG_STREAM_CONTROL
&&
776 consumer
->dst
.net
.control_isset
) ||
777 (uri
->stype
== LTTNG_STREAM_DATA
&&
778 consumer
->dst
.net
.data_isset
)) {
779 ret
= LTTNG_ERR_URL_EXIST
;
783 memset(&consumer
->dst
.net
, 0, sizeof(consumer
->dst
.net
));
786 consumer
->type
= CONSUMER_DST_NET
;
788 /* Set URI into consumer output object */
789 ret
= consumer_set_network_uri(consumer
, uri
);
793 } else if (ret
== 1) {
795 * URI was the same in the consumer so we do not append the subdir
796 * again so to not duplicate output dir.
802 if (uri
->stype
== LTTNG_STREAM_CONTROL
&& strlen(uri
->subdir
) == 0) {
803 ret
= consumer_set_subdir(consumer
, session_name
);
805 ret
= LTTNG_ERR_FATAL
;
810 if (uri
->stype
== LTTNG_STREAM_CONTROL
) {
811 /* On a new subdir, reappend the default trace dir. */
812 strncat(consumer
->subdir
, default_trace_dir
,
813 sizeof(consumer
->subdir
) - strlen(consumer
->subdir
) - 1);
814 DBG3("Append domain trace name to subdir %s", consumer
->subdir
);
819 DBG2("Setting trace directory path from URI to %s", uri
->dst
.path
);
820 memset(consumer
->dst
.session_root_path
, 0,
821 sizeof(consumer
->dst
.session_root_path
));
822 /* Explicit length checks for strcpy and strcat. */
823 if (strlen(uri
->dst
.path
) + strlen(default_trace_dir
)
824 >= sizeof(consumer
->dst
.session_root_path
)) {
825 ret
= LTTNG_ERR_FATAL
;
828 strcpy(consumer
->dst
.session_root_path
, uri
->dst
.path
);
829 /* Append default trace dir */
830 strcat(consumer
->dst
.session_root_path
, default_trace_dir
);
831 /* Flag consumer as local. */
832 consumer
->type
= CONSUMER_DST_LOCAL
;
843 * Init tracing by creating trace directory and sending fds kernel consumer.
845 static int init_kernel_tracing(struct ltt_kernel_session
*session
)
848 struct lttng_ht_iter iter
;
849 struct consumer_socket
*socket
;
855 if (session
->consumer_fds_sent
== 0 && session
->consumer
!= NULL
) {
856 cds_lfht_for_each_entry(session
->consumer
->socks
->ht
, &iter
.iter
,
858 pthread_mutex_lock(socket
->lock
);
859 ret
= kernel_consumer_send_session(socket
, session
);
860 pthread_mutex_unlock(socket
->lock
);
862 ret
= LTTNG_ERR_KERN_CONSUMER_FAIL
;
874 * Create a socket to the relayd using the URI.
876 * On success, the relayd_sock pointer is set to the created socket.
877 * Else, it's stays untouched and a lttcomm error code is returned.
879 static int create_connect_relayd(struct lttng_uri
*uri
,
880 struct lttcomm_relayd_sock
**relayd_sock
,
881 struct consumer_output
*consumer
)
884 struct lttcomm_relayd_sock
*rsock
;
886 rsock
= lttcomm_alloc_relayd_sock(uri
, RELAYD_VERSION_COMM_MAJOR
,
887 RELAYD_VERSION_COMM_MINOR
);
889 ret
= LTTNG_ERR_FATAL
;
894 * Connect to relayd so we can proceed with a session creation. This call
895 * can possibly block for an arbitrary amount of time to set the health
896 * state to be in poll execution.
899 ret
= relayd_connect(rsock
);
902 ERR("Unable to reach lttng-relayd");
903 ret
= LTTNG_ERR_RELAYD_CONNECT_FAIL
;
907 /* Create socket for control stream. */
908 if (uri
->stype
== LTTNG_STREAM_CONTROL
) {
909 DBG3("Creating relayd stream socket from URI");
911 /* Check relayd version */
912 ret
= relayd_version_check(rsock
);
913 if (ret
== LTTNG_ERR_RELAYD_VERSION_FAIL
) {
915 } else if (ret
< 0) {
916 ERR("Unable to reach lttng-relayd");
917 ret
= LTTNG_ERR_RELAYD_CONNECT_FAIL
;
920 consumer
->relay_major_version
= rsock
->major
;
921 consumer
->relay_minor_version
= rsock
->minor
;
922 } else if (uri
->stype
== LTTNG_STREAM_DATA
) {
923 DBG3("Creating relayd data socket from URI");
925 /* Command is not valid */
926 ERR("Relayd invalid stream type: %d", uri
->stype
);
927 ret
= LTTNG_ERR_INVALID
;
931 *relayd_sock
= rsock
;
936 /* The returned value is not useful since we are on an error path. */
937 (void) relayd_close(rsock
);
945 * Connect to the relayd using URI and send the socket to the right consumer.
947 * The consumer socket lock must be held by the caller.
949 static int send_consumer_relayd_socket(unsigned int session_id
,
950 struct lttng_uri
*relayd_uri
,
951 struct consumer_output
*consumer
,
952 struct consumer_socket
*consumer_sock
,
953 char *session_name
, char *hostname
, int session_live_timer
)
956 struct lttcomm_relayd_sock
*rsock
= NULL
;
958 /* Connect to relayd and make version check if uri is the control. */
959 ret
= create_connect_relayd(relayd_uri
, &rsock
, consumer
);
960 if (ret
!= LTTNG_OK
) {
961 goto relayd_comm_error
;
965 /* Set the network sequence index if not set. */
966 if (consumer
->net_seq_index
== (uint64_t) -1ULL) {
967 pthread_mutex_lock(&relayd_net_seq_idx_lock
);
969 * Increment net_seq_idx because we are about to transfer the
970 * new relayd socket to the consumer.
971 * Assign unique key so the consumer can match streams.
973 consumer
->net_seq_index
= ++relayd_net_seq_idx
;
974 pthread_mutex_unlock(&relayd_net_seq_idx_lock
);
977 /* Send relayd socket to consumer. */
978 ret
= consumer_send_relayd_socket(consumer_sock
, rsock
, consumer
,
979 relayd_uri
->stype
, session_id
,
980 session_name
, hostname
, session_live_timer
);
982 ret
= LTTNG_ERR_ENABLE_CONSUMER_FAIL
;
986 /* Flag that the corresponding socket was sent. */
987 if (relayd_uri
->stype
== LTTNG_STREAM_CONTROL
) {
988 consumer_sock
->control_sock_sent
= 1;
989 } else if (relayd_uri
->stype
== LTTNG_STREAM_DATA
) {
990 consumer_sock
->data_sock_sent
= 1;
996 * Close socket which was dup on the consumer side. The session daemon does
997 * NOT keep track of the relayd socket(s) once transfer to the consumer.
1001 if (ret
!= LTTNG_OK
) {
1003 * The consumer output for this session should not be used anymore
1004 * since the relayd connection failed thus making any tracing or/and
1005 * streaming not usable.
1007 consumer
->enabled
= 0;
1009 (void) relayd_close(rsock
);
1017 * Send both relayd sockets to a specific consumer and domain. This is a
1018 * helper function to facilitate sending the information to the consumer for a
1021 * The consumer socket lock must be held by the caller.
1023 static int send_consumer_relayd_sockets(enum lttng_domain_type domain
,
1024 unsigned int session_id
, struct consumer_output
*consumer
,
1025 struct consumer_socket
*sock
, char *session_name
,
1026 char *hostname
, int session_live_timer
)
1033 /* Sending control relayd socket. */
1034 if (!sock
->control_sock_sent
) {
1035 ret
= send_consumer_relayd_socket(session_id
,
1036 &consumer
->dst
.net
.control
, consumer
, sock
,
1037 session_name
, hostname
, session_live_timer
);
1038 if (ret
!= LTTNG_OK
) {
1043 /* Sending data relayd socket. */
1044 if (!sock
->data_sock_sent
) {
1045 ret
= send_consumer_relayd_socket(session_id
,
1046 &consumer
->dst
.net
.data
, consumer
, sock
,
1047 session_name
, hostname
, session_live_timer
);
1048 if (ret
!= LTTNG_OK
) {
1058 * Setup relayd connections for a tracing session. First creates the socket to
1059 * the relayd and send them to the right domain consumer. Consumer type MUST be
1062 int cmd_setup_relayd(struct ltt_session
*session
)
1065 struct ltt_ust_session
*usess
;
1066 struct ltt_kernel_session
*ksess
;
1067 struct consumer_socket
*socket
;
1068 struct lttng_ht_iter iter
;
1072 usess
= session
->ust_session
;
1073 ksess
= session
->kernel_session
;
1075 DBG("Setting relayd for session %s", session
->name
);
1079 if (usess
&& usess
->consumer
&& usess
->consumer
->type
== CONSUMER_DST_NET
1080 && usess
->consumer
->enabled
) {
1081 /* For each consumer socket, send relayd sockets */
1082 cds_lfht_for_each_entry(usess
->consumer
->socks
->ht
, &iter
.iter
,
1083 socket
, node
.node
) {
1084 pthread_mutex_lock(socket
->lock
);
1085 ret
= send_consumer_relayd_sockets(LTTNG_DOMAIN_UST
, session
->id
,
1086 usess
->consumer
, socket
,
1087 session
->name
, session
->hostname
,
1088 session
->live_timer
);
1089 pthread_mutex_unlock(socket
->lock
);
1090 if (ret
!= LTTNG_OK
) {
1093 /* Session is now ready for network streaming. */
1094 session
->net_handle
= 1;
1096 session
->consumer
->relay_major_version
=
1097 usess
->consumer
->relay_major_version
;
1098 session
->consumer
->relay_minor_version
=
1099 usess
->consumer
->relay_minor_version
;
1102 if (ksess
&& ksess
->consumer
&& ksess
->consumer
->type
== CONSUMER_DST_NET
1103 && ksess
->consumer
->enabled
) {
1104 cds_lfht_for_each_entry(ksess
->consumer
->socks
->ht
, &iter
.iter
,
1105 socket
, node
.node
) {
1106 pthread_mutex_lock(socket
->lock
);
1107 ret
= send_consumer_relayd_sockets(LTTNG_DOMAIN_KERNEL
, session
->id
,
1108 ksess
->consumer
, socket
,
1109 session
->name
, session
->hostname
,
1110 session
->live_timer
);
1111 pthread_mutex_unlock(socket
->lock
);
1112 if (ret
!= LTTNG_OK
) {
1115 /* Session is now ready for network streaming. */
1116 session
->net_handle
= 1;
1118 session
->consumer
->relay_major_version
=
1119 ksess
->consumer
->relay_major_version
;
1120 session
->consumer
->relay_minor_version
=
1121 ksess
->consumer
->relay_minor_version
;
1130 * Start a kernel session by opening all necessary streams.
1132 static int start_kernel_session(struct ltt_kernel_session
*ksess
, int wpipe
)
1135 struct ltt_kernel_channel
*kchan
;
1137 /* Open kernel metadata */
1138 if (ksess
->metadata
== NULL
&& ksess
->output_traces
) {
1139 ret
= kernel_open_metadata(ksess
);
1141 ret
= LTTNG_ERR_KERN_META_FAIL
;
1146 /* Open kernel metadata stream */
1147 if (ksess
->metadata
&& ksess
->metadata_stream_fd
< 0) {
1148 ret
= kernel_open_metadata_stream(ksess
);
1150 ERR("Kernel create metadata stream failed");
1151 ret
= LTTNG_ERR_KERN_STREAM_FAIL
;
1156 /* For each channel */
1157 cds_list_for_each_entry(kchan
, &ksess
->channel_list
.head
, list
) {
1158 if (kchan
->stream_count
== 0) {
1159 ret
= kernel_open_channel_stream(kchan
);
1161 ret
= LTTNG_ERR_KERN_STREAM_FAIL
;
1164 /* Update the stream global counter */
1165 ksess
->stream_count_global
+= ret
;
1169 /* Setup kernel consumer socket and send fds to it */
1170 ret
= init_kernel_tracing(ksess
);
1172 ret
= LTTNG_ERR_KERN_START_FAIL
;
1176 /* This start the kernel tracing */
1177 ret
= kernel_start_session(ksess
);
1179 ret
= LTTNG_ERR_KERN_START_FAIL
;
1183 /* Quiescent wait after starting trace */
1184 kernel_wait_quiescent(wpipe
);
1195 * Command LTTNG_DISABLE_CHANNEL processed by the client thread.
1197 int cmd_disable_channel(struct ltt_session
*session
,
1198 enum lttng_domain_type domain
, char *channel_name
)
1201 struct ltt_ust_session
*usess
;
1203 usess
= session
->ust_session
;
1208 case LTTNG_DOMAIN_KERNEL
:
1210 ret
= channel_kernel_disable(session
->kernel_session
,
1212 if (ret
!= LTTNG_OK
) {
1216 kernel_wait_quiescent(kernel_tracer_fd
);
1219 case LTTNG_DOMAIN_UST
:
1221 struct ltt_ust_channel
*uchan
;
1222 struct lttng_ht
*chan_ht
;
1224 chan_ht
= usess
->domain_global
.channels
;
1226 uchan
= trace_ust_find_channel_by_name(chan_ht
, channel_name
);
1227 if (uchan
== NULL
) {
1228 ret
= LTTNG_ERR_UST_CHAN_NOT_FOUND
;
1232 ret
= channel_ust_disable(usess
, uchan
);
1233 if (ret
!= LTTNG_OK
) {
1239 ret
= LTTNG_ERR_UNKNOWN_DOMAIN
;
1251 * Command LTTNG_TRACK_PID processed by the client thread.
1253 * Called with session lock held.
1255 int cmd_track_pid(struct ltt_session
*session
, enum lttng_domain_type domain
,
1263 case LTTNG_DOMAIN_KERNEL
:
1265 struct ltt_kernel_session
*ksess
;
1267 ksess
= session
->kernel_session
;
1269 ret
= kernel_track_pid(ksess
, pid
);
1270 if (ret
!= LTTNG_OK
) {
1274 kernel_wait_quiescent(kernel_tracer_fd
);
1277 case LTTNG_DOMAIN_UST
:
1279 struct ltt_ust_session
*usess
;
1281 usess
= session
->ust_session
;
1283 ret
= trace_ust_track_pid(usess
, pid
);
1284 if (ret
!= LTTNG_OK
) {
1290 ret
= LTTNG_ERR_UNKNOWN_DOMAIN
;
1302 * Command LTTNG_UNTRACK_PID processed by the client thread.
1304 * Called with session lock held.
1306 int cmd_untrack_pid(struct ltt_session
*session
, enum lttng_domain_type domain
,
1314 case LTTNG_DOMAIN_KERNEL
:
1316 struct ltt_kernel_session
*ksess
;
1318 ksess
= session
->kernel_session
;
1320 ret
= kernel_untrack_pid(ksess
, pid
);
1321 if (ret
!= LTTNG_OK
) {
1325 kernel_wait_quiescent(kernel_tracer_fd
);
1328 case LTTNG_DOMAIN_UST
:
1330 struct ltt_ust_session
*usess
;
1332 usess
= session
->ust_session
;
1334 ret
= trace_ust_untrack_pid(usess
, pid
);
1335 if (ret
!= LTTNG_OK
) {
1341 ret
= LTTNG_ERR_UNKNOWN_DOMAIN
;
1353 * Command LTTNG_ENABLE_CHANNEL processed by the client thread.
1355 * The wpipe arguments is used as a notifier for the kernel thread.
1357 int cmd_enable_channel(struct ltt_session
*session
,
1358 struct lttng_domain
*domain
, struct lttng_channel
*attr
, int wpipe
)
1361 struct ltt_ust_session
*usess
= session
->ust_session
;
1362 struct lttng_ht
*chan_ht
;
1369 len
= lttng_strnlen(attr
->name
, sizeof(attr
->name
));
1371 /* Validate channel name */
1372 if (attr
->name
[0] == '.' ||
1373 memchr(attr
->name
, '/', len
) != NULL
) {
1374 ret
= LTTNG_ERR_INVALID_CHANNEL_NAME
;
1378 DBG("Enabling channel %s for session %s", attr
->name
, session
->name
);
1383 * Don't try to enable a channel if the session has been started at
1384 * some point in time before. The tracer does not allow it.
1386 if (session
->has_been_started
) {
1387 ret
= LTTNG_ERR_TRACE_ALREADY_STARTED
;
1392 * If the session is a live session, remove the switch timer, the
1393 * live timer does the same thing but sends also synchronisation
1394 * beacons for inactive streams.
1396 if (session
->live_timer
> 0) {
1397 attr
->attr
.live_timer_interval
= session
->live_timer
;
1398 attr
->attr
.switch_timer_interval
= 0;
1401 /* Check for feature support */
1402 switch (domain
->type
) {
1403 case LTTNG_DOMAIN_KERNEL
:
1405 if (kernel_supports_ring_buffer_snapshot_sample_positions(kernel_tracer_fd
) != 1) {
1406 /* Sampling position of buffer is not supported */
1407 WARN("Kernel tracer does not support buffer monitoring. "
1408 "Setting the monitor interval timer to 0 "
1409 "(disabled) for channel '%s' of session '%s'",
1410 attr
-> name
, session
->name
);
1411 lttng_channel_set_monitor_timer_interval(attr
, 0);
1415 case LTTNG_DOMAIN_UST
:
1417 case LTTNG_DOMAIN_JUL
:
1418 case LTTNG_DOMAIN_LOG4J
:
1419 case LTTNG_DOMAIN_PYTHON
:
1420 if (!agent_tracing_is_enabled()) {
1421 DBG("Attempted to enable a channel in an agent domain but the agent thread is not running");
1422 ret
= LTTNG_ERR_AGENT_TRACING_DISABLED
;
1427 ret
= LTTNG_ERR_UNKNOWN_DOMAIN
;
1431 switch (domain
->type
) {
1432 case LTTNG_DOMAIN_KERNEL
:
1434 struct ltt_kernel_channel
*kchan
;
1436 kchan
= trace_kernel_get_channel_by_name(attr
->name
,
1437 session
->kernel_session
);
1438 if (kchan
== NULL
) {
1439 ret
= channel_kernel_create(session
->kernel_session
, attr
, wpipe
);
1440 if (attr
->name
[0] != '\0') {
1441 session
->kernel_session
->has_non_default_channel
= 1;
1444 ret
= channel_kernel_enable(session
->kernel_session
, kchan
);
1447 if (ret
!= LTTNG_OK
) {
1451 kernel_wait_quiescent(kernel_tracer_fd
);
1454 case LTTNG_DOMAIN_UST
:
1455 case LTTNG_DOMAIN_JUL
:
1456 case LTTNG_DOMAIN_LOG4J
:
1457 case LTTNG_DOMAIN_PYTHON
:
1459 struct ltt_ust_channel
*uchan
;
1464 * Current agent implementation limitations force us to allow
1465 * only one channel at once in "agent" subdomains. Each
1466 * subdomain has a default channel name which must be strictly
1469 if (domain
->type
== LTTNG_DOMAIN_JUL
) {
1470 if (strncmp(attr
->name
, DEFAULT_JUL_CHANNEL_NAME
,
1471 LTTNG_SYMBOL_NAME_LEN
)) {
1472 ret
= LTTNG_ERR_INVALID_CHANNEL_NAME
;
1475 } else if (domain
->type
== LTTNG_DOMAIN_LOG4J
) {
1476 if (strncmp(attr
->name
, DEFAULT_LOG4J_CHANNEL_NAME
,
1477 LTTNG_SYMBOL_NAME_LEN
)) {
1478 ret
= LTTNG_ERR_INVALID_CHANNEL_NAME
;
1481 } else if (domain
->type
== LTTNG_DOMAIN_PYTHON
) {
1482 if (strncmp(attr
->name
, DEFAULT_PYTHON_CHANNEL_NAME
,
1483 LTTNG_SYMBOL_NAME_LEN
)) {
1484 ret
= LTTNG_ERR_INVALID_CHANNEL_NAME
;
1489 chan_ht
= usess
->domain_global
.channels
;
1491 uchan
= trace_ust_find_channel_by_name(chan_ht
, attr
->name
);
1492 if (uchan
== NULL
) {
1493 ret
= channel_ust_create(usess
, attr
, domain
->buf_type
);
1494 if (attr
->name
[0] != '\0') {
1495 usess
->has_non_default_channel
= 1;
1498 ret
= channel_ust_enable(usess
, uchan
);
1503 ret
= LTTNG_ERR_UNKNOWN_DOMAIN
;
1514 * Command LTTNG_DISABLE_EVENT processed by the client thread.
1516 int cmd_disable_event(struct ltt_session
*session
,
1517 enum lttng_domain_type domain
, char *channel_name
,
1518 struct lttng_event
*event
)
1523 DBG("Disable event command for event \'%s\'", event
->name
);
1525 event_name
= event
->name
;
1527 /* Error out on unhandled search criteria */
1528 if (event
->loglevel_type
|| event
->loglevel
!= -1 || event
->enabled
1529 || event
->pid
|| event
->filter
|| event
->exclusion
) {
1530 ret
= LTTNG_ERR_UNK
;
1537 case LTTNG_DOMAIN_KERNEL
:
1539 struct ltt_kernel_channel
*kchan
;
1540 struct ltt_kernel_session
*ksess
;
1542 ksess
= session
->kernel_session
;
1545 * If a non-default channel has been created in the
1546 * session, explicitely require that -c chan_name needs
1549 if (ksess
->has_non_default_channel
&& channel_name
[0] == '\0') {
1550 ret
= LTTNG_ERR_NEED_CHANNEL_NAME
;
1554 kchan
= trace_kernel_get_channel_by_name(channel_name
, ksess
);
1555 if (kchan
== NULL
) {
1556 ret
= LTTNG_ERR_KERN_CHAN_NOT_FOUND
;
1560 switch (event
->type
) {
1561 case LTTNG_EVENT_ALL
:
1562 case LTTNG_EVENT_TRACEPOINT
:
1563 case LTTNG_EVENT_SYSCALL
:
1564 case LTTNG_EVENT_PROBE
:
1565 case LTTNG_EVENT_FUNCTION
:
1566 case LTTNG_EVENT_FUNCTION_ENTRY
:/* fall-through */
1567 if (event_name
[0] == '\0') {
1568 ret
= event_kernel_disable_event(kchan
,
1571 ret
= event_kernel_disable_event(kchan
,
1572 event_name
, event
->type
);
1574 if (ret
!= LTTNG_OK
) {
1579 ret
= LTTNG_ERR_UNK
;
1583 kernel_wait_quiescent(kernel_tracer_fd
);
1586 case LTTNG_DOMAIN_UST
:
1588 struct ltt_ust_channel
*uchan
;
1589 struct ltt_ust_session
*usess
;
1591 usess
= session
->ust_session
;
1593 if (validate_ust_event_name(event_name
)) {
1594 ret
= LTTNG_ERR_INVALID_EVENT_NAME
;
1599 * If a non-default channel has been created in the
1600 * session, explicitly require that -c chan_name needs
1603 if (usess
->has_non_default_channel
&& channel_name
[0] == '\0') {
1604 ret
= LTTNG_ERR_NEED_CHANNEL_NAME
;
1608 uchan
= trace_ust_find_channel_by_name(usess
->domain_global
.channels
,
1610 if (uchan
== NULL
) {
1611 ret
= LTTNG_ERR_UST_CHAN_NOT_FOUND
;
1615 switch (event
->type
) {
1616 case LTTNG_EVENT_ALL
:
1618 * An empty event name means that everything
1619 * should be disabled.
1621 if (event
->name
[0] == '\0') {
1622 ret
= event_ust_disable_all_tracepoints(usess
, uchan
);
1624 ret
= event_ust_disable_tracepoint(usess
, uchan
,
1627 if (ret
!= LTTNG_OK
) {
1632 ret
= LTTNG_ERR_UNK
;
1636 DBG3("Disable UST event %s in channel %s completed", event_name
,
1640 case LTTNG_DOMAIN_LOG4J
:
1641 case LTTNG_DOMAIN_JUL
:
1642 case LTTNG_DOMAIN_PYTHON
:
1645 struct ltt_ust_session
*usess
= session
->ust_session
;
1649 switch (event
->type
) {
1650 case LTTNG_EVENT_ALL
:
1653 ret
= LTTNG_ERR_UNK
;
1657 agt
= trace_ust_find_agent(usess
, domain
);
1659 ret
= -LTTNG_ERR_UST_EVENT_NOT_FOUND
;
1663 * An empty event name means that everything
1664 * should be disabled.
1666 if (event
->name
[0] == '\0') {
1667 ret
= event_agent_disable_all(usess
, agt
);
1669 ret
= event_agent_disable(usess
, agt
, event_name
);
1671 if (ret
!= LTTNG_OK
) {
1678 ret
= LTTNG_ERR_UND
;
1691 * Command LTTNG_ADD_CONTEXT processed by the client thread.
1693 int cmd_add_context(struct ltt_session
*session
, enum lttng_domain_type domain
,
1694 char *channel_name
, struct lttng_event_context
*ctx
, int kwpipe
)
1696 int ret
, chan_kern_created
= 0, chan_ust_created
= 0;
1697 char *app_ctx_provider_name
= NULL
, *app_ctx_name
= NULL
;
1700 * Don't try to add a context if the session has been started at
1701 * some point in time before. The tracer does not allow it and would
1702 * result in a corrupted trace.
1704 if (session
->has_been_started
) {
1705 ret
= LTTNG_ERR_TRACE_ALREADY_STARTED
;
1709 if (ctx
->ctx
== LTTNG_EVENT_CONTEXT_APP_CONTEXT
) {
1710 app_ctx_provider_name
= ctx
->u
.app_ctx
.provider_name
;
1711 app_ctx_name
= ctx
->u
.app_ctx
.ctx_name
;
1715 case LTTNG_DOMAIN_KERNEL
:
1716 assert(session
->kernel_session
);
1718 if (session
->kernel_session
->channel_count
== 0) {
1719 /* Create default channel */
1720 ret
= channel_kernel_create(session
->kernel_session
, NULL
, kwpipe
);
1721 if (ret
!= LTTNG_OK
) {
1724 chan_kern_created
= 1;
1726 /* Add kernel context to kernel tracer */
1727 ret
= context_kernel_add(session
->kernel_session
, ctx
, channel_name
);
1728 if (ret
!= LTTNG_OK
) {
1732 case LTTNG_DOMAIN_JUL
:
1733 case LTTNG_DOMAIN_LOG4J
:
1736 * Validate channel name.
1737 * If no channel name is given and the domain is JUL or LOG4J,
1738 * set it to the appropriate domain-specific channel name. If
1739 * a name is provided but does not match the expexted channel
1740 * name, return an error.
1742 if (domain
== LTTNG_DOMAIN_JUL
&& *channel_name
&&
1743 strcmp(channel_name
,
1744 DEFAULT_JUL_CHANNEL_NAME
)) {
1745 ret
= LTTNG_ERR_UST_CHAN_NOT_FOUND
;
1747 } else if (domain
== LTTNG_DOMAIN_LOG4J
&& *channel_name
&&
1748 strcmp(channel_name
,
1749 DEFAULT_LOG4J_CHANNEL_NAME
)) {
1750 ret
= LTTNG_ERR_UST_CHAN_NOT_FOUND
;
1753 /* break is _not_ missing here. */
1755 case LTTNG_DOMAIN_UST
:
1757 struct ltt_ust_session
*usess
= session
->ust_session
;
1758 unsigned int chan_count
;
1762 chan_count
= lttng_ht_get_count(usess
->domain_global
.channels
);
1763 if (chan_count
== 0) {
1764 struct lttng_channel
*attr
;
1765 /* Create default channel */
1766 attr
= channel_new_default_attr(domain
, usess
->buffer_type
);
1768 ret
= LTTNG_ERR_FATAL
;
1772 ret
= channel_ust_create(usess
, attr
, usess
->buffer_type
);
1773 if (ret
!= LTTNG_OK
) {
1777 channel_attr_destroy(attr
);
1778 chan_ust_created
= 1;
1781 ret
= context_ust_add(usess
, domain
, ctx
, channel_name
);
1782 free(app_ctx_provider_name
);
1784 app_ctx_name
= NULL
;
1785 app_ctx_provider_name
= NULL
;
1786 if (ret
!= LTTNG_OK
) {
1792 ret
= LTTNG_ERR_UND
;
1800 if (chan_kern_created
) {
1801 struct ltt_kernel_channel
*kchan
=
1802 trace_kernel_get_channel_by_name(DEFAULT_CHANNEL_NAME
,
1803 session
->kernel_session
);
1804 /* Created previously, this should NOT fail. */
1806 kernel_destroy_channel(kchan
);
1809 if (chan_ust_created
) {
1810 struct ltt_ust_channel
*uchan
=
1811 trace_ust_find_channel_by_name(
1812 session
->ust_session
->domain_global
.channels
,
1813 DEFAULT_CHANNEL_NAME
);
1814 /* Created previously, this should NOT fail. */
1816 /* Remove from the channel list of the session. */
1817 trace_ust_delete_channel(session
->ust_session
->domain_global
.channels
,
1819 trace_ust_destroy_channel(uchan
);
1822 free(app_ctx_provider_name
);
1827 static inline bool name_starts_with(const char *name
, const char *prefix
)
1829 const size_t max_cmp_len
= min(strlen(prefix
), LTTNG_SYMBOL_NAME_LEN
);
1831 return !strncmp(name
, prefix
, max_cmp_len
);
1834 /* Perform userspace-specific event name validation */
1835 static int validate_ust_event_name(const char *name
)
1845 * Check name against all internal UST event component namespaces used
1848 if (name_starts_with(name
, DEFAULT_JUL_EVENT_COMPONENT
) ||
1849 name_starts_with(name
, DEFAULT_LOG4J_EVENT_COMPONENT
) ||
1850 name_starts_with(name
, DEFAULT_PYTHON_EVENT_COMPONENT
)) {
1859 * Internal version of cmd_enable_event() with a supplemental
1860 * "internal_event" flag which is used to enable internal events which should
1861 * be hidden from clients. Such events are used in the agent implementation to
1862 * enable the events through which all "agent" events are funeled.
1864 static int _cmd_enable_event(struct ltt_session
*session
,
1865 struct lttng_domain
*domain
,
1866 char *channel_name
, struct lttng_event
*event
,
1867 char *filter_expression
,
1868 struct lttng_filter_bytecode
*filter
,
1869 struct lttng_event_exclusion
*exclusion
,
1870 int wpipe
, bool internal_event
)
1872 int ret
= 0, channel_created
= 0;
1873 struct lttng_channel
*attr
= NULL
;
1877 assert(channel_name
);
1879 /* If we have a filter, we must have its filter expression */
1880 assert(!(!!filter_expression
^ !!filter
));
1882 /* Normalize event name as a globbing pattern */
1883 strutils_normalize_star_glob_pattern(event
->name
);
1885 /* Normalize exclusion names as globbing patterns */
1889 for (i
= 0; i
< exclusion
->count
; i
++) {
1890 char *name
= LTTNG_EVENT_EXCLUSION_NAME_AT(exclusion
, i
);
1892 strutils_normalize_star_glob_pattern(name
);
1896 DBG("Enable event command for event \'%s\'", event
->name
);
1900 switch (domain
->type
) {
1901 case LTTNG_DOMAIN_KERNEL
:
1903 struct ltt_kernel_channel
*kchan
;
1906 * If a non-default channel has been created in the
1907 * session, explicitely require that -c chan_name needs
1910 if (session
->kernel_session
->has_non_default_channel
1911 && channel_name
[0] == '\0') {
1912 ret
= LTTNG_ERR_NEED_CHANNEL_NAME
;
1916 kchan
= trace_kernel_get_channel_by_name(channel_name
,
1917 session
->kernel_session
);
1918 if (kchan
== NULL
) {
1919 attr
= channel_new_default_attr(LTTNG_DOMAIN_KERNEL
,
1920 LTTNG_BUFFER_GLOBAL
);
1922 ret
= LTTNG_ERR_FATAL
;
1925 if (lttng_strncpy(attr
->name
, channel_name
,
1926 sizeof(attr
->name
))) {
1927 ret
= LTTNG_ERR_INVALID
;
1931 ret
= cmd_enable_channel(session
, domain
, attr
, wpipe
);
1932 if (ret
!= LTTNG_OK
) {
1935 channel_created
= 1;
1938 /* Get the newly created kernel channel pointer */
1939 kchan
= trace_kernel_get_channel_by_name(channel_name
,
1940 session
->kernel_session
);
1941 if (kchan
== NULL
) {
1942 /* This sould not happen... */
1943 ret
= LTTNG_ERR_FATAL
;
1947 switch (event
->type
) {
1948 case LTTNG_EVENT_ALL
:
1950 char *filter_expression_a
= NULL
;
1951 struct lttng_filter_bytecode
*filter_a
= NULL
;
1954 * We need to duplicate filter_expression and filter,
1955 * because ownership is passed to first enable
1958 if (filter_expression
) {
1959 filter_expression_a
= strdup(filter_expression
);
1960 if (!filter_expression_a
) {
1961 ret
= LTTNG_ERR_FATAL
;
1966 filter_a
= zmalloc(sizeof(*filter_a
) + filter
->len
);
1968 free(filter_expression_a
);
1969 ret
= LTTNG_ERR_FATAL
;
1972 memcpy(filter_a
, filter
, sizeof(*filter_a
) + filter
->len
);
1974 event
->type
= LTTNG_EVENT_TRACEPOINT
; /* Hack */
1975 ret
= event_kernel_enable_event(kchan
, event
,
1976 filter_expression
, filter
);
1977 /* We have passed ownership */
1978 filter_expression
= NULL
;
1980 if (ret
!= LTTNG_OK
) {
1981 if (channel_created
) {
1982 /* Let's not leak a useless channel. */
1983 kernel_destroy_channel(kchan
);
1985 free(filter_expression_a
);
1989 event
->type
= LTTNG_EVENT_SYSCALL
; /* Hack */
1990 ret
= event_kernel_enable_event(kchan
, event
,
1991 filter_expression_a
, filter_a
);
1992 /* We have passed ownership */
1993 filter_expression_a
= NULL
;
1995 if (ret
!= LTTNG_OK
) {
2000 case LTTNG_EVENT_PROBE
:
2001 case LTTNG_EVENT_FUNCTION
:
2002 case LTTNG_EVENT_FUNCTION_ENTRY
:
2003 case LTTNG_EVENT_TRACEPOINT
:
2004 ret
= event_kernel_enable_event(kchan
, event
,
2005 filter_expression
, filter
);
2006 /* We have passed ownership */
2007 filter_expression
= NULL
;
2009 if (ret
!= LTTNG_OK
) {
2010 if (channel_created
) {
2011 /* Let's not leak a useless channel. */
2012 kernel_destroy_channel(kchan
);
2017 case LTTNG_EVENT_SYSCALL
:
2018 ret
= event_kernel_enable_event(kchan
, event
,
2019 filter_expression
, filter
);
2020 /* We have passed ownership */
2021 filter_expression
= NULL
;
2023 if (ret
!= LTTNG_OK
) {
2028 ret
= LTTNG_ERR_UNK
;
2032 kernel_wait_quiescent(kernel_tracer_fd
);
2035 case LTTNG_DOMAIN_UST
:
2037 struct ltt_ust_channel
*uchan
;
2038 struct ltt_ust_session
*usess
= session
->ust_session
;
2043 * If a non-default channel has been created in the
2044 * session, explicitely require that -c chan_name needs
2047 if (usess
->has_non_default_channel
&& channel_name
[0] == '\0') {
2048 ret
= LTTNG_ERR_NEED_CHANNEL_NAME
;
2052 /* Get channel from global UST domain */
2053 uchan
= trace_ust_find_channel_by_name(usess
->domain_global
.channels
,
2055 if (uchan
== NULL
) {
2056 /* Create default channel */
2057 attr
= channel_new_default_attr(LTTNG_DOMAIN_UST
,
2058 usess
->buffer_type
);
2060 ret
= LTTNG_ERR_FATAL
;
2063 if (lttng_strncpy(attr
->name
, channel_name
,
2064 sizeof(attr
->name
))) {
2065 ret
= LTTNG_ERR_INVALID
;
2069 ret
= cmd_enable_channel(session
, domain
, attr
, wpipe
);
2070 if (ret
!= LTTNG_OK
) {
2074 /* Get the newly created channel reference back */
2075 uchan
= trace_ust_find_channel_by_name(
2076 usess
->domain_global
.channels
, channel_name
);
2080 if (uchan
->domain
!= LTTNG_DOMAIN_UST
&& !internal_event
) {
2082 * Don't allow users to add UST events to channels which
2083 * are assigned to a userspace subdomain (JUL, Log4J,
2086 ret
= LTTNG_ERR_INVALID_CHANNEL_DOMAIN
;
2090 if (!internal_event
) {
2092 * Ensure the event name is not reserved for internal
2095 ret
= validate_ust_event_name(event
->name
);
2097 WARN("Userspace event name %s failed validation.",
2099 ret
= LTTNG_ERR_INVALID_EVENT_NAME
;
2104 /* At this point, the session and channel exist on the tracer */
2105 ret
= event_ust_enable_tracepoint(usess
, uchan
, event
,
2106 filter_expression
, filter
, exclusion
,
2108 /* We have passed ownership */
2109 filter_expression
= NULL
;
2112 if (ret
== LTTNG_ERR_UST_EVENT_ENABLED
) {
2113 goto already_enabled
;
2114 } else if (ret
!= LTTNG_OK
) {
2119 case LTTNG_DOMAIN_LOG4J
:
2120 case LTTNG_DOMAIN_JUL
:
2121 case LTTNG_DOMAIN_PYTHON
:
2123 const char *default_event_name
, *default_chan_name
;
2125 struct lttng_event uevent
;
2126 struct lttng_domain tmp_dom
;
2127 struct ltt_ust_session
*usess
= session
->ust_session
;
2131 if (!agent_tracing_is_enabled()) {
2132 DBG("Attempted to enable an event in an agent domain but the agent thread is not running");
2133 ret
= LTTNG_ERR_AGENT_TRACING_DISABLED
;
2137 agt
= trace_ust_find_agent(usess
, domain
->type
);
2139 agt
= agent_create(domain
->type
);
2141 ret
= LTTNG_ERR_NOMEM
;
2144 agent_add(agt
, usess
->agents
);
2147 /* Create the default tracepoint. */
2148 memset(&uevent
, 0, sizeof(uevent
));
2149 uevent
.type
= LTTNG_EVENT_TRACEPOINT
;
2150 uevent
.loglevel_type
= LTTNG_EVENT_LOGLEVEL_ALL
;
2151 default_event_name
= event_get_default_agent_ust_name(
2153 if (!default_event_name
) {
2154 ret
= LTTNG_ERR_FATAL
;
2157 strncpy(uevent
.name
, default_event_name
, sizeof(uevent
.name
));
2158 uevent
.name
[sizeof(uevent
.name
) - 1] = '\0';
2161 * The domain type is changed because we are about to enable the
2162 * default channel and event for the JUL domain that are hardcoded.
2163 * This happens in the UST domain.
2165 memcpy(&tmp_dom
, domain
, sizeof(tmp_dom
));
2166 tmp_dom
.type
= LTTNG_DOMAIN_UST
;
2168 switch (domain
->type
) {
2169 case LTTNG_DOMAIN_LOG4J
:
2170 default_chan_name
= DEFAULT_LOG4J_CHANNEL_NAME
;
2172 case LTTNG_DOMAIN_JUL
:
2173 default_chan_name
= DEFAULT_JUL_CHANNEL_NAME
;
2175 case LTTNG_DOMAIN_PYTHON
:
2176 default_chan_name
= DEFAULT_PYTHON_CHANNEL_NAME
;
2179 /* The switch/case we are in makes this impossible */
2184 char *filter_expression_copy
= NULL
;
2185 struct lttng_filter_bytecode
*filter_copy
= NULL
;
2188 const size_t filter_size
= sizeof(
2189 struct lttng_filter_bytecode
)
2192 filter_copy
= zmalloc(filter_size
);
2194 ret
= LTTNG_ERR_NOMEM
;
2197 memcpy(filter_copy
, filter
, filter_size
);
2199 filter_expression_copy
=
2200 strdup(filter_expression
);
2201 if (!filter_expression
) {
2202 ret
= LTTNG_ERR_NOMEM
;
2205 if (!filter_expression_copy
|| !filter_copy
) {
2206 free(filter_expression_copy
);
2212 ret
= cmd_enable_event_internal(session
, &tmp_dom
,
2213 (char *) default_chan_name
,
2214 &uevent
, filter_expression_copy
,
2215 filter_copy
, NULL
, wpipe
);
2218 if (ret
== LTTNG_ERR_UST_EVENT_ENABLED
) {
2219 goto already_enabled
;
2220 } else if (ret
!= LTTNG_OK
) {
2224 /* The wild card * means that everything should be enabled. */
2225 if (strncmp(event
->name
, "*", 1) == 0 && strlen(event
->name
) == 1) {
2226 ret
= event_agent_enable_all(usess
, agt
, event
, filter
,
2229 ret
= event_agent_enable(usess
, agt
, event
, filter
,
2233 filter_expression
= NULL
;
2234 if (ret
!= LTTNG_OK
) {
2241 ret
= LTTNG_ERR_UND
;
2249 free(filter_expression
);
2252 channel_attr_destroy(attr
);
2258 * Command LTTNG_ENABLE_EVENT processed by the client thread.
2259 * We own filter, exclusion, and filter_expression.
2261 int cmd_enable_event(struct ltt_session
*session
, struct lttng_domain
*domain
,
2262 char *channel_name
, struct lttng_event
*event
,
2263 char *filter_expression
,
2264 struct lttng_filter_bytecode
*filter
,
2265 struct lttng_event_exclusion
*exclusion
,
2268 return _cmd_enable_event(session
, domain
, channel_name
, event
,
2269 filter_expression
, filter
, exclusion
, wpipe
, false);
2273 * Enable an event which is internal to LTTng. An internal should
2274 * never be made visible to clients and are immune to checks such as
2277 static int cmd_enable_event_internal(struct ltt_session
*session
,
2278 struct lttng_domain
*domain
,
2279 char *channel_name
, struct lttng_event
*event
,
2280 char *filter_expression
,
2281 struct lttng_filter_bytecode
*filter
,
2282 struct lttng_event_exclusion
*exclusion
,
2285 return _cmd_enable_event(session
, domain
, channel_name
, event
,
2286 filter_expression
, filter
, exclusion
, wpipe
, true);
2290 * Command LTTNG_LIST_TRACEPOINTS processed by the client thread.
2292 ssize_t
cmd_list_tracepoints(enum lttng_domain_type domain
,
2293 struct lttng_event
**events
)
2296 ssize_t nb_events
= 0;
2299 case LTTNG_DOMAIN_KERNEL
:
2300 nb_events
= kernel_list_events(kernel_tracer_fd
, events
);
2301 if (nb_events
< 0) {
2302 ret
= LTTNG_ERR_KERN_LIST_FAIL
;
2306 case LTTNG_DOMAIN_UST
:
2307 nb_events
= ust_app_list_events(events
);
2308 if (nb_events
< 0) {
2309 ret
= LTTNG_ERR_UST_LIST_FAIL
;
2313 case LTTNG_DOMAIN_LOG4J
:
2314 case LTTNG_DOMAIN_JUL
:
2315 case LTTNG_DOMAIN_PYTHON
:
2316 nb_events
= agent_list_events(events
, domain
);
2317 if (nb_events
< 0) {
2318 ret
= LTTNG_ERR_UST_LIST_FAIL
;
2323 ret
= LTTNG_ERR_UND
;
2330 /* Return negative value to differentiate return code */
2335 * Command LTTNG_LIST_TRACEPOINT_FIELDS processed by the client thread.
2337 ssize_t
cmd_list_tracepoint_fields(enum lttng_domain_type domain
,
2338 struct lttng_event_field
**fields
)
2341 ssize_t nb_fields
= 0;
2344 case LTTNG_DOMAIN_UST
:
2345 nb_fields
= ust_app_list_event_fields(fields
);
2346 if (nb_fields
< 0) {
2347 ret
= LTTNG_ERR_UST_LIST_FAIL
;
2351 case LTTNG_DOMAIN_KERNEL
:
2352 default: /* fall-through */
2353 ret
= LTTNG_ERR_UND
;
2360 /* Return negative value to differentiate return code */
2364 ssize_t
cmd_list_syscalls(struct lttng_event
**events
)
2366 return syscall_table_list(events
);
2370 * Command LTTNG_LIST_TRACKER_PIDS processed by the client thread.
2372 * Called with session lock held.
2374 ssize_t
cmd_list_tracker_pids(struct ltt_session
*session
,
2375 enum lttng_domain_type domain
, int32_t **pids
)
2378 ssize_t nr_pids
= 0;
2381 case LTTNG_DOMAIN_KERNEL
:
2383 struct ltt_kernel_session
*ksess
;
2385 ksess
= session
->kernel_session
;
2386 nr_pids
= kernel_list_tracker_pids(ksess
, pids
);
2388 ret
= LTTNG_ERR_KERN_LIST_FAIL
;
2393 case LTTNG_DOMAIN_UST
:
2395 struct ltt_ust_session
*usess
;
2397 usess
= session
->ust_session
;
2398 nr_pids
= trace_ust_list_tracker_pids(usess
, pids
);
2400 ret
= LTTNG_ERR_UST_LIST_FAIL
;
2405 case LTTNG_DOMAIN_LOG4J
:
2406 case LTTNG_DOMAIN_JUL
:
2407 case LTTNG_DOMAIN_PYTHON
:
2409 ret
= LTTNG_ERR_UND
;
2416 /* Return negative value to differentiate return code */
2421 int domain_mkdir(const struct consumer_output
*output
,
2422 const struct ltt_session
*session
,
2423 uid_t uid
, gid_t gid
)
2425 struct consumer_socket
*socket
;
2426 struct lttng_ht_iter iter
;
2430 if (!output
|| !output
->socks
) {
2431 ERR("No consumer output found");
2436 path
= zmalloc(LTTNG_PATH_MAX
* sizeof(char));
2438 ERR("Cannot allocate mkdir path");
2443 ret
= snprintf(path
, LTTNG_PATH_MAX
, "%s%s%s",
2444 session_get_base_path(session
),
2445 output
->chunk_path
, output
->subdir
);
2446 if (ret
< 0 || ret
>= LTTNG_PATH_MAX
) {
2452 DBG("Domain mkdir %s for session %" PRIu64
, path
, session
->id
);
2455 * We have to iterate to find a socket, but we only need to send the
2456 * rename command to one consumer, so we break after the first one.
2458 cds_lfht_for_each_entry(output
->socks
->ht
, &iter
.iter
, socket
, node
.node
) {
2459 pthread_mutex_lock(socket
->lock
);
2460 ret
= consumer_mkdir(socket
, session
->id
, output
, path
, uid
, gid
);
2461 pthread_mutex_unlock(socket
->lock
);
2463 ERR("Consumer mkdir");
2480 int session_mkdir(const struct ltt_session
*session
)
2483 struct consumer_output
*output
;
2488 * Unsupported feature in lttng-relayd before 2.11, not an error since it
2489 * is only needed for session rotation and the user will get an error
2492 if (session
->consumer
->type
== CONSUMER_DST_NET
&&
2493 session
->consumer
->relay_major_version
== 2 &&
2494 session
->consumer
->relay_minor_version
< 11) {
2499 if (session
->kernel_session
) {
2500 output
= session
->kernel_session
->consumer
;
2501 uid
= session
->kernel_session
->uid
;
2502 gid
= session
->kernel_session
->gid
;
2503 ret
= domain_mkdir(output
, session
, uid
, gid
);
2505 ERR("Mkdir kernel");
2510 if (session
->ust_session
) {
2511 output
= session
->ust_session
->consumer
;
2512 uid
= session
->ust_session
->uid
;
2513 gid
= session
->ust_session
->gid
;
2514 ret
= domain_mkdir(output
, session
, uid
, gid
);
2528 * Command LTTNG_START_TRACE processed by the client thread.
2530 * Called with session mutex held.
2532 int cmd_start_trace(struct ltt_session
*session
)
2535 unsigned long nb_chan
= 0;
2536 struct ltt_kernel_session
*ksession
;
2537 struct ltt_ust_session
*usess
;
2541 /* Ease our life a bit ;) */
2542 ksession
= session
->kernel_session
;
2543 usess
= session
->ust_session
;
2545 /* Is the session already started? */
2546 if (session
->active
) {
2547 ret
= LTTNG_ERR_TRACE_ALREADY_STARTED
;
2552 * Starting a session without channel is useless since after that it's not
2553 * possible to enable channel thus inform the client.
2555 if (usess
&& usess
->domain_global
.channels
) {
2556 nb_chan
+= lttng_ht_get_count(usess
->domain_global
.channels
);
2559 nb_chan
+= ksession
->channel_count
;
2562 ret
= LTTNG_ERR_NO_CHANNEL
;
2567 * Record the timestamp of the first time the session is started for
2568 * an eventual session rotation call.
2570 if (!session
->has_been_started
) {
2571 session
->current_chunk_start_ts
= time(NULL
);
2572 if (session
->current_chunk_start_ts
== (time_t) -1) {
2573 PERROR("Failed to retrieve the \"%s\" session's start time",
2575 ret
= LTTNG_ERR_FATAL
;
2578 if (!session
->snapshot_mode
&& session
->output_traces
) {
2579 ret
= session_mkdir(session
);
2581 ERR("Failed to create the session directories");
2582 ret
= LTTNG_ERR_CREATE_DIR_FAIL
;
2588 /* Kernel tracing */
2589 if (ksession
!= NULL
) {
2590 DBG("Start kernel tracing session %s", session
->name
);
2591 ret
= start_kernel_session(ksession
, kernel_tracer_fd
);
2592 if (ret
!= LTTNG_OK
) {
2597 /* Flag session that trace should start automatically */
2600 * Even though the start trace might fail, flag this session active so
2601 * other application coming in are started by default.
2605 ret
= ust_app_start_trace_all(usess
);
2607 ret
= LTTNG_ERR_UST_START_FAIL
;
2612 /* Flag this after a successful start. */
2613 session
->has_been_started
= 1;
2614 session
->active
= 1;
2617 * Clear the flag that indicates that a rotation was done while the
2618 * session was stopped.
2620 session
->rotated_after_last_stop
= false;
2622 if (session
->rotate_timer_period
) {
2623 ret
= sessiond_rotate_timer_start(session
,
2624 session
->rotate_timer_period
);
2626 ERR("Failed to enable rotate timer");
2627 ret
= LTTNG_ERR_UNK
;
2639 int rename_active_chunk(struct ltt_session
*session
)
2643 session
->current_archive_id
++;
2646 * The currently active tracing path is now the folder we
2649 ret
= lttng_strncpy(session
->rotation_chunk
.current_rotate_path
,
2650 session
->rotation_chunk
.active_tracing_path
,
2651 sizeof(session
->rotation_chunk
.current_rotate_path
));
2653 ERR("Failed to copy active tracing path");
2657 ret
= rename_complete_chunk(session
, time(NULL
));
2659 ERR("Failed to rename current rotate path");
2664 * We just renamed, the folder, we didn't do an actual rotation, so
2665 * the active tracing path is now the renamed folder and we have to
2666 * restore the rotate count.
2668 ret
= lttng_strncpy(session
->rotation_chunk
.active_tracing_path
,
2669 session
->rotation_chunk
.current_rotate_path
,
2670 sizeof(session
->rotation_chunk
.active_tracing_path
));
2672 ERR("Failed to rename active session chunk tracing path");
2676 session
->current_archive_id
--;
2681 * Command LTTNG_STOP_TRACE processed by the client thread.
2683 int cmd_stop_trace(struct ltt_session
*session
)
2686 struct ltt_kernel_channel
*kchan
;
2687 struct ltt_kernel_session
*ksession
;
2688 struct ltt_ust_session
*usess
;
2689 bool error_occured
= false;
2693 DBG("Begin stop session %s (id %" PRIu64
")", session
->name
, session
->id
);
2695 ksession
= session
->kernel_session
;
2696 usess
= session
->ust_session
;
2698 /* Session is not active. Skip everythong and inform the client. */
2699 if (!session
->active
) {
2700 ret
= LTTNG_ERR_TRACE_ALREADY_STOPPED
;
2704 if (session
->rotate_relay_pending_timer_enabled
) {
2705 sessiond_timer_rotate_pending_stop(session
);
2708 if (session
->rotate_timer_enabled
) {
2709 sessiond_rotate_timer_stop(session
);
2712 if (session
->current_archive_id
> 0 && !session
->rotate_pending
) {
2713 ret
= rename_active_chunk(session
);
2716 * This error should not prevent the user from stopping
2717 * the session. However, it will be reported at the end.
2719 error_occured
= true;
2724 if (ksession
&& ksession
->active
) {
2725 DBG("Stop kernel tracing");
2727 ret
= kernel_stop_session(ksession
);
2729 ret
= LTTNG_ERR_KERN_STOP_FAIL
;
2733 kernel_wait_quiescent(kernel_tracer_fd
);
2735 /* Flush metadata after stopping (if exists) */
2736 if (ksession
->metadata_stream_fd
>= 0) {
2737 ret
= kernel_metadata_flush_buffer(ksession
->metadata_stream_fd
);
2739 ERR("Kernel metadata flush failed");
2743 /* Flush all buffers after stopping */
2744 cds_list_for_each_entry(kchan
, &ksession
->channel_list
.head
, list
) {
2745 ret
= kernel_flush_buffer(kchan
);
2747 ERR("Kernel flush buffer error");
2751 ksession
->active
= 0;
2752 DBG("Kernel session stopped %s (id %" PRIu64
")", session
->name
,
2756 if (usess
&& usess
->active
) {
2758 * Even though the stop trace might fail, flag this session inactive so
2759 * other application coming in are not started by default.
2763 ret
= ust_app_stop_trace_all(usess
);
2765 ret
= LTTNG_ERR_UST_STOP_FAIL
;
2770 /* Flag inactive after a successful stop. */
2771 session
->active
= 0;
2772 ret
= !error_occured
? LTTNG_OK
: LTTNG_ERR_UNK
;
2779 * Command LTTNG_SET_CONSUMER_URI processed by the client thread.
2781 int cmd_set_consumer_uri(struct ltt_session
*session
, size_t nb_uri
,
2782 struct lttng_uri
*uris
)
2785 struct ltt_kernel_session
*ksess
= session
->kernel_session
;
2786 struct ltt_ust_session
*usess
= session
->ust_session
;
2792 /* Can't set consumer URI if the session is active. */
2793 if (session
->active
) {
2794 ret
= LTTNG_ERR_TRACE_ALREADY_STARTED
;
2798 /* Set the "global" consumer URIs */
2799 for (i
= 0; i
< nb_uri
; i
++) {
2800 ret
= add_uri_to_consumer(session
->consumer
,
2801 &uris
[i
], 0, session
->name
);
2802 if (ret
!= LTTNG_OK
) {
2807 /* Set UST session URIs */
2808 if (session
->ust_session
) {
2809 for (i
= 0; i
< nb_uri
; i
++) {
2810 ret
= add_uri_to_consumer(
2811 session
->ust_session
->consumer
,
2812 &uris
[i
], LTTNG_DOMAIN_UST
,
2814 if (ret
!= LTTNG_OK
) {
2820 /* Set kernel session URIs */
2821 if (session
->kernel_session
) {
2822 for (i
= 0; i
< nb_uri
; i
++) {
2823 ret
= add_uri_to_consumer(
2824 session
->kernel_session
->consumer
,
2825 &uris
[i
], LTTNG_DOMAIN_KERNEL
,
2827 if (ret
!= LTTNG_OK
) {
2834 * Make sure to set the session in output mode after we set URI since a
2835 * session can be created without URL (thus flagged in no output mode).
2837 session
->output_traces
= 1;
2839 ksess
->output_traces
= 1;
2843 usess
->output_traces
= 1;
2854 * Command LTTNG_CREATE_SESSION processed by the client thread.
2856 int cmd_create_session_uri(char *name
, struct lttng_uri
*uris
,
2857 size_t nb_uri
, lttng_sock_cred
*creds
, unsigned int live_timer
)
2860 struct ltt_session
*session
;
2866 * Verify if the session already exist
2868 * XXX: There is no need for the session lock list here since the caller
2869 * (process_client_msg) is holding it. We might want to change that so a
2870 * single command does not lock the entire session list.
2872 session
= session_find_by_name(name
);
2873 if (session
!= NULL
) {
2874 ret
= LTTNG_ERR_EXIST_SESS
;
2878 /* Create tracing session in the registry */
2879 ret
= session_create(name
, LTTNG_SOCK_GET_UID_CRED(creds
),
2880 LTTNG_SOCK_GET_GID_CRED(creds
));
2881 if (ret
!= LTTNG_OK
) {
2886 * Get the newly created session pointer back
2888 * XXX: There is no need for the session lock list here since the caller
2889 * (process_client_msg) is holding it. We might want to change that so a
2890 * single command does not lock the entire session list.
2892 session
= session_find_by_name(name
);
2895 session
->live_timer
= live_timer
;
2896 /* Create default consumer output for the session not yet created. */
2897 session
->consumer
= consumer_create_output(CONSUMER_DST_LOCAL
);
2898 if (session
->consumer
== NULL
) {
2899 ret
= LTTNG_ERR_FATAL
;
2900 goto consumer_error
;
2904 ret
= cmd_set_consumer_uri(session
, nb_uri
, uris
);
2905 if (ret
!= LTTNG_OK
) {
2906 goto consumer_error
;
2908 session
->output_traces
= 1;
2910 session
->output_traces
= 0;
2911 DBG2("Session %s created with no output", session
->name
);
2914 session
->consumer
->enabled
= 1;
2919 session_destroy(session
);
2926 * Command LTTNG_CREATE_SESSION_SNAPSHOT processed by the client thread.
2928 int cmd_create_session_snapshot(char *name
, struct lttng_uri
*uris
,
2929 size_t nb_uri
, lttng_sock_cred
*creds
)
2932 struct ltt_session
*session
;
2933 struct snapshot_output
*new_output
= NULL
;
2939 * Create session in no output mode with URIs set to NULL. The uris we've
2940 * received are for a default snapshot output if one.
2942 ret
= cmd_create_session_uri(name
, NULL
, 0, creds
, 0);
2943 if (ret
!= LTTNG_OK
) {
2947 /* Get the newly created session pointer back. This should NEVER fail. */
2948 session
= session_find_by_name(name
);
2951 /* Flag session for snapshot mode. */
2952 session
->snapshot_mode
= 1;
2954 /* Skip snapshot output creation if no URI is given. */
2959 new_output
= snapshot_output_alloc();
2961 ret
= LTTNG_ERR_NOMEM
;
2962 goto error_snapshot_alloc
;
2965 ret
= snapshot_output_init_with_uri(DEFAULT_SNAPSHOT_MAX_SIZE
, NULL
,
2966 uris
, nb_uri
, session
->consumer
, new_output
, &session
->snapshot
);
2968 if (ret
== -ENOMEM
) {
2969 ret
= LTTNG_ERR_NOMEM
;
2971 ret
= LTTNG_ERR_INVALID
;
2973 goto error_snapshot
;
2977 snapshot_add_output(&session
->snapshot
, new_output
);
2984 snapshot_output_destroy(new_output
);
2985 error_snapshot_alloc
:
2986 session_destroy(session
);
2992 * Command LTTNG_DESTROY_SESSION processed by the client thread.
2994 * Called with session lock held.
2996 int cmd_destroy_session(struct ltt_session
*session
, int wpipe
,
2997 struct notification_thread_handle
*notification_thread_handle
)
3000 struct ltt_ust_session
*usess
;
3001 struct ltt_kernel_session
*ksess
;
3006 usess
= session
->ust_session
;
3007 ksess
= session
->kernel_session
;
3009 DBG("Begin destroy session %s (id %" PRIu64
")", session
->name
, session
->id
);
3011 if (session
->rotate_relay_pending_timer_enabled
) {
3012 sessiond_timer_rotate_pending_stop(session
);
3015 if (session
->rotate_timer_enabled
) {
3016 sessiond_rotate_timer_stop(session
);
3019 if (session
->rotate_size
) {
3020 unsubscribe_session_consumed_size_rotation(session
, notification_thread_handle
);
3021 session
->rotate_size
= 0;
3025 * The rename of the current chunk is performed at stop, but if we rotated
3026 * the session after the previous stop command, we need to rename the
3027 * new (and empty) chunk that was started in between.
3029 if (session
->rotated_after_last_stop
) {
3030 rename_active_chunk(session
);
3033 /* Clean kernel session teardown */
3034 kernel_destroy_session(ksess
);
3036 /* UST session teardown */
3038 /* Close any relayd session */
3039 consumer_output_send_destroy_relayd(usess
->consumer
);
3041 /* Destroy every UST application related to this session. */
3042 ret
= ust_app_destroy_trace_all(usess
);
3044 ERR("Error in ust_app_destroy_trace_all");
3047 /* Clean up the rest. */
3048 trace_ust_destroy_session(usess
);
3052 * Must notify the kernel thread here to update it's poll set in order to
3053 * remove the channel(s)' fd just destroyed.
3055 ret
= notify_thread_pipe(wpipe
);
3057 PERROR("write kernel poll pipe");
3060 if (session
->shm_path
[0]) {
3062 * When a session is created with an explicit shm_path,
3063 * the consumer daemon will create its shared memory files
3064 * at that location and will *not* unlink them. This is normal
3065 * as the intention of that feature is to make it possible
3066 * to retrieve the content of those files should a crash occur.
3068 * To ensure the content of those files can be used, the
3069 * sessiond daemon will replicate the content of the metadata
3070 * cache in a metadata file.
3072 * On clean-up, it is expected that the consumer daemon will
3073 * unlink the shared memory files and that the session daemon
3074 * will unlink the metadata file. Then, the session's directory
3075 * in the shm path can be removed.
3077 * Unfortunately, a flaw in the design of the sessiond's and
3078 * consumerd's tear down of channels makes it impossible to
3079 * determine when the sessiond _and_ the consumerd have both
3080 * destroyed their representation of a channel. For one, the
3081 * unlinking, close, and rmdir happen in deferred 'call_rcu'
3082 * callbacks in both daemons.
3084 * However, it is also impossible for the sessiond to know when
3085 * the consumer daemon is done destroying its channel(s) since
3086 * it occurs as a reaction to the closing of the channel's file
3087 * descriptor. There is no resulting communication initiated
3088 * from the consumerd to the sessiond to confirm that the
3089 * operation is completed (and was successful).
3091 * Until this is all fixed, the session daemon checks for the
3092 * removal of the session's shm path which makes it possible
3093 * to safely advertise a session as having been destroyed.
3095 * Prior to this fix, it was not possible to reliably save
3096 * a session making use of the --shm-path option, destroy it,
3097 * and load it again. This is because the creation of the
3098 * session would fail upon seeing the session's shm path
3099 * already in existence.
3101 * Note that none of the error paths in the check for the
3102 * directory's existence return an error. This is normal
3103 * as there isn't much that can be done. The session will
3104 * be destroyed properly, except that we can't offer the
3105 * guarantee that the same session can be re-created.
3107 current_completion_handler
= &destroy_completion_handler
.handler
;
3108 ret
= lttng_strncpy(destroy_completion_handler
.shm_path
,
3110 sizeof(destroy_completion_handler
.shm_path
));
3113 ret
= session_destroy(session
);
3119 * Command LTTNG_REGISTER_CONSUMER processed by the client thread.
3121 int cmd_register_consumer(struct ltt_session
*session
,
3122 enum lttng_domain_type domain
, const char *sock_path
,
3123 struct consumer_data
*cdata
)
3126 struct consumer_socket
*socket
= NULL
;
3133 case LTTNG_DOMAIN_KERNEL
:
3135 struct ltt_kernel_session
*ksess
= session
->kernel_session
;
3139 /* Can't register a consumer if there is already one */
3140 if (ksess
->consumer_fds_sent
!= 0) {
3141 ret
= LTTNG_ERR_KERN_CONSUMER_FAIL
;
3145 sock
= lttcomm_connect_unix_sock(sock_path
);
3147 ret
= LTTNG_ERR_CONNECT_FAIL
;
3150 cdata
->cmd_sock
= sock
;
3152 socket
= consumer_allocate_socket(&cdata
->cmd_sock
);
3153 if (socket
== NULL
) {
3156 PERROR("close register consumer");
3158 cdata
->cmd_sock
= -1;
3159 ret
= LTTNG_ERR_FATAL
;
3163 socket
->lock
= zmalloc(sizeof(pthread_mutex_t
));
3164 if (socket
->lock
== NULL
) {
3165 PERROR("zmalloc pthread mutex");
3166 ret
= LTTNG_ERR_FATAL
;
3169 pthread_mutex_init(socket
->lock
, NULL
);
3170 socket
->registered
= 1;
3173 consumer_add_socket(socket
, ksess
->consumer
);
3176 pthread_mutex_lock(&cdata
->pid_mutex
);
3178 pthread_mutex_unlock(&cdata
->pid_mutex
);
3183 /* TODO: Userspace tracing */
3184 ret
= LTTNG_ERR_UND
;
3192 consumer_destroy_socket(socket
);
3198 * Command LTTNG_LIST_DOMAINS processed by the client thread.
3200 ssize_t
cmd_list_domains(struct ltt_session
*session
,
3201 struct lttng_domain
**domains
)
3206 struct lttng_ht_iter iter
;
3208 if (session
->kernel_session
!= NULL
) {
3209 DBG3("Listing domains found kernel domain");
3213 if (session
->ust_session
!= NULL
) {
3214 DBG3("Listing domains found UST global domain");
3218 cds_lfht_for_each_entry(session
->ust_session
->agents
->ht
, &iter
.iter
,
3220 if (agt
->being_used
) {
3231 *domains
= zmalloc(nb_dom
* sizeof(struct lttng_domain
));
3232 if (*domains
== NULL
) {
3233 ret
= LTTNG_ERR_FATAL
;
3237 if (session
->kernel_session
!= NULL
) {
3238 (*domains
)[index
].type
= LTTNG_DOMAIN_KERNEL
;
3240 /* Kernel session buffer type is always GLOBAL */
3241 (*domains
)[index
].buf_type
= LTTNG_BUFFER_GLOBAL
;
3246 if (session
->ust_session
!= NULL
) {
3247 (*domains
)[index
].type
= LTTNG_DOMAIN_UST
;
3248 (*domains
)[index
].buf_type
= session
->ust_session
->buffer_type
;
3252 cds_lfht_for_each_entry(session
->ust_session
->agents
->ht
, &iter
.iter
,
3254 if (agt
->being_used
) {
3255 (*domains
)[index
].type
= agt
->domain
;
3256 (*domains
)[index
].buf_type
= session
->ust_session
->buffer_type
;
3266 /* Return negative value to differentiate return code */
3272 * Command LTTNG_LIST_CHANNELS processed by the client thread.
3274 ssize_t
cmd_list_channels(enum lttng_domain_type domain
,
3275 struct ltt_session
*session
, struct lttng_channel
**channels
)
3277 ssize_t nb_chan
= 0, payload_size
= 0, ret
;
3280 case LTTNG_DOMAIN_KERNEL
:
3281 if (session
->kernel_session
!= NULL
) {
3282 nb_chan
= session
->kernel_session
->channel_count
;
3284 DBG3("Number of kernel channels %zd", nb_chan
);
3286 ret
= -LTTNG_ERR_KERN_CHAN_NOT_FOUND
;
3290 case LTTNG_DOMAIN_UST
:
3291 if (session
->ust_session
!= NULL
) {
3293 nb_chan
= lttng_ht_get_count(
3294 session
->ust_session
->domain_global
.channels
);
3297 DBG3("Number of UST global channels %zd", nb_chan
);
3299 ret
= -LTTNG_ERR_UST_CHAN_NOT_FOUND
;
3304 ret
= -LTTNG_ERR_UND
;
3309 const size_t channel_size
= sizeof(struct lttng_channel
) +
3310 sizeof(struct lttng_channel_extended
);
3311 struct lttng_channel_extended
*channel_exts
;
3313 payload_size
= nb_chan
* channel_size
;
3314 *channels
= zmalloc(payload_size
);
3315 if (*channels
== NULL
) {
3316 ret
= -LTTNG_ERR_FATAL
;
3320 channel_exts
= ((void *) *channels
) +
3321 (nb_chan
* sizeof(struct lttng_channel
));
3322 ret
= list_lttng_channels(domain
, session
, *channels
, channel_exts
);
3323 if (ret
!= LTTNG_OK
) {
3338 * Command LTTNG_LIST_EVENTS processed by the client thread.
3340 ssize_t
cmd_list_events(enum lttng_domain_type domain
,
3341 struct ltt_session
*session
, char *channel_name
,
3342 struct lttng_event
**events
, size_t *total_size
)
3345 ssize_t nb_event
= 0;
3348 case LTTNG_DOMAIN_KERNEL
:
3349 if (session
->kernel_session
!= NULL
) {
3350 nb_event
= list_lttng_kernel_events(channel_name
,
3351 session
->kernel_session
, events
,
3355 case LTTNG_DOMAIN_UST
:
3357 if (session
->ust_session
!= NULL
) {
3358 nb_event
= list_lttng_ust_global_events(channel_name
,
3359 &session
->ust_session
->domain_global
, events
,
3364 case LTTNG_DOMAIN_LOG4J
:
3365 case LTTNG_DOMAIN_JUL
:
3366 case LTTNG_DOMAIN_PYTHON
:
3367 if (session
->ust_session
) {
3368 struct lttng_ht_iter iter
;
3372 cds_lfht_for_each_entry(session
->ust_session
->agents
->ht
,
3373 &iter
.iter
, agt
, node
.node
) {
3374 if (agt
->domain
== domain
) {
3375 nb_event
= list_lttng_agent_events(
3385 ret
= LTTNG_ERR_UND
;
3392 /* Return negative value to differentiate return code */
3397 * Using the session list, filled a lttng_session array to send back to the
3398 * client for session listing.
3400 * The session list lock MUST be acquired before calling this function. Use
3401 * session_lock_list() and session_unlock_list().
3403 void cmd_list_lttng_sessions(struct lttng_session
*sessions
, uid_t uid
,
3408 struct ltt_session
*session
;
3409 struct ltt_session_list
*list
= session_get_list();
3411 DBG("Getting all available session for UID %d GID %d",
3414 * Iterate over session list and append data after the control struct in
3417 cds_list_for_each_entry(session
, &list
->head
, list
) {
3419 * Only list the sessions the user can control.
3421 if (!session_access_ok(session
, uid
, gid
)) {
3425 struct ltt_kernel_session
*ksess
= session
->kernel_session
;
3426 struct ltt_ust_session
*usess
= session
->ust_session
;
3428 if (session
->consumer
->type
== CONSUMER_DST_NET
||
3429 (ksess
&& ksess
->consumer
->type
== CONSUMER_DST_NET
) ||
3430 (usess
&& usess
->consumer
->type
== CONSUMER_DST_NET
)) {
3431 ret
= build_network_session_path(sessions
[i
].path
,
3432 sizeof(sessions
[i
].path
), session
);
3434 ret
= snprintf(sessions
[i
].path
, sizeof(sessions
[i
].path
), "%s",
3435 session
->consumer
->dst
.session_root_path
);
3438 PERROR("snprintf session path");
3442 strncpy(sessions
[i
].name
, session
->name
, NAME_MAX
);
3443 sessions
[i
].name
[NAME_MAX
- 1] = '\0';
3444 sessions
[i
].enabled
= session
->active
;
3445 sessions
[i
].snapshot_mode
= session
->snapshot_mode
;
3446 sessions
[i
].live_timer_interval
= session
->live_timer
;
3452 * Command LTTNG_DATA_PENDING returning 0 if the data is NOT pending meaning
3453 * ready for trace analysis (or any kind of reader) or else 1 for pending data.
3455 int cmd_data_pending(struct ltt_session
*session
)
3458 struct ltt_kernel_session
*ksess
= session
->kernel_session
;
3459 struct ltt_ust_session
*usess
= session
->ust_session
;
3463 DBG("Data pending for session %s", session
->name
);
3465 /* Session MUST be stopped to ask for data availability. */
3466 if (session
->active
) {
3467 ret
= LTTNG_ERR_SESSION_STARTED
;
3471 * If stopped, just make sure we've started before else the above call
3472 * will always send that there is data pending.
3474 * The consumer assumes that when the data pending command is received,
3475 * the trace has been started before or else no output data is written
3476 * by the streams which is a condition for data pending. So, this is
3477 * *VERY* important that we don't ask the consumer before a start
3480 if (!session
->has_been_started
) {
3487 * A rotation is still pending, we have to wait.
3489 if (session
->rotate_pending
) {
3490 DBG("Rotate still pending for session %s", session
->name
);
3495 if (ksess
&& ksess
->consumer
) {
3496 ret
= consumer_is_data_pending(ksess
->id
, ksess
->consumer
);
3498 /* Data is still being extracted for the kernel. */
3503 if (usess
&& usess
->consumer
) {
3504 ret
= consumer_is_data_pending(usess
->id
, usess
->consumer
);
3506 /* Data is still being extracted for the kernel. */
3511 /* Data is ready to be read by a viewer */
3519 * Command LTTNG_SNAPSHOT_ADD_OUTPUT from the lttng ctl library.
3521 * Return LTTNG_OK on success or else a LTTNG_ERR code.
3523 int cmd_snapshot_add_output(struct ltt_session
*session
,
3524 struct lttng_snapshot_output
*output
, uint32_t *id
)
3527 struct snapshot_output
*new_output
;
3532 DBG("Cmd snapshot add output for session %s", session
->name
);
3535 * Can't create an output if the session is not set in no-output mode.
3537 if (session
->output_traces
) {
3538 ret
= LTTNG_ERR_NOT_SNAPSHOT_SESSION
;
3542 /* Only one output is allowed until we have the "tee" feature. */
3543 if (session
->snapshot
.nb_output
== 1) {
3544 ret
= LTTNG_ERR_SNAPSHOT_OUTPUT_EXIST
;
3548 new_output
= snapshot_output_alloc();
3550 ret
= LTTNG_ERR_NOMEM
;
3554 ret
= snapshot_output_init(output
->max_size
, output
->name
,
3555 output
->ctrl_url
, output
->data_url
, session
->consumer
, new_output
,
3556 &session
->snapshot
);
3558 if (ret
== -ENOMEM
) {
3559 ret
= LTTNG_ERR_NOMEM
;
3561 ret
= LTTNG_ERR_INVALID
;
3567 snapshot_add_output(&session
->snapshot
, new_output
);
3569 *id
= new_output
->id
;
3576 snapshot_output_destroy(new_output
);
3582 * Command LTTNG_SNAPSHOT_DEL_OUTPUT from lib lttng ctl.
3584 * Return LTTNG_OK on success or else a LTTNG_ERR code.
3586 int cmd_snapshot_del_output(struct ltt_session
*session
,
3587 struct lttng_snapshot_output
*output
)
3590 struct snapshot_output
*sout
= NULL
;
3598 * Permission denied to create an output if the session is not
3599 * set in no output mode.
3601 if (session
->output_traces
) {
3602 ret
= LTTNG_ERR_NOT_SNAPSHOT_SESSION
;
3607 DBG("Cmd snapshot del output id %" PRIu32
" for session %s", output
->id
,
3609 sout
= snapshot_find_output_by_id(output
->id
, &session
->snapshot
);
3610 } else if (*output
->name
!= '\0') {
3611 DBG("Cmd snapshot del output name %s for session %s", output
->name
,
3613 sout
= snapshot_find_output_by_name(output
->name
, &session
->snapshot
);
3616 ret
= LTTNG_ERR_INVALID
;
3620 snapshot_delete_output(&session
->snapshot
, sout
);
3621 snapshot_output_destroy(sout
);
3630 * Command LTTNG_SNAPSHOT_LIST_OUTPUT from lib lttng ctl.
3632 * If no output is available, outputs is untouched and 0 is returned.
3634 * Return the size of the newly allocated outputs or a negative LTTNG_ERR code.
3636 ssize_t
cmd_snapshot_list_outputs(struct ltt_session
*session
,
3637 struct lttng_snapshot_output
**outputs
)
3640 struct lttng_snapshot_output
*list
= NULL
;
3641 struct lttng_ht_iter iter
;
3642 struct snapshot_output
*output
;
3647 DBG("Cmd snapshot list outputs for session %s", session
->name
);
3650 * Permission denied to create an output if the session is not
3651 * set in no output mode.
3653 if (session
->output_traces
) {
3654 ret
= -LTTNG_ERR_NOT_SNAPSHOT_SESSION
;
3658 if (session
->snapshot
.nb_output
== 0) {
3663 list
= zmalloc(session
->snapshot
.nb_output
* sizeof(*list
));
3665 ret
= -LTTNG_ERR_NOMEM
;
3669 /* Copy list from session to the new list object. */
3671 cds_lfht_for_each_entry(session
->snapshot
.output_ht
->ht
, &iter
.iter
,
3672 output
, node
.node
) {
3673 assert(output
->consumer
);
3674 list
[idx
].id
= output
->id
;
3675 list
[idx
].max_size
= output
->max_size
;
3676 if (lttng_strncpy(list
[idx
].name
, output
->name
,
3677 sizeof(list
[idx
].name
))) {
3678 ret
= -LTTNG_ERR_INVALID
;
3681 if (output
->consumer
->type
== CONSUMER_DST_LOCAL
) {
3682 if (lttng_strncpy(list
[idx
].ctrl_url
,
3683 output
->consumer
->dst
.session_root_path
,
3684 sizeof(list
[idx
].ctrl_url
))) {
3685 ret
= -LTTNG_ERR_INVALID
;
3690 ret
= uri_to_str_url(&output
->consumer
->dst
.net
.control
,
3691 list
[idx
].ctrl_url
, sizeof(list
[idx
].ctrl_url
));
3693 ret
= -LTTNG_ERR_NOMEM
;
3698 ret
= uri_to_str_url(&output
->consumer
->dst
.net
.data
,
3699 list
[idx
].data_url
, sizeof(list
[idx
].data_url
));
3701 ret
= -LTTNG_ERR_NOMEM
;
3710 ret
= session
->snapshot
.nb_output
;
3719 * Check if we can regenerate the metadata for this session.
3720 * Only kernel, UST per-uid and non-live sessions are supported.
3722 * Return 0 if the metadata can be generated, a LTTNG_ERR code otherwise.
3725 int check_regenerate_metadata_support(struct ltt_session
*session
)
3731 if (session
->live_timer
!= 0) {
3732 ret
= LTTNG_ERR_LIVE_SESSION
;
3735 if (!session
->active
) {
3736 ret
= LTTNG_ERR_SESSION_NOT_STARTED
;
3739 if (session
->ust_session
) {
3740 switch (session
->ust_session
->buffer_type
) {
3741 case LTTNG_BUFFER_PER_UID
:
3743 case LTTNG_BUFFER_PER_PID
:
3744 ret
= LTTNG_ERR_PER_PID_SESSION
;
3748 ret
= LTTNG_ERR_UNK
;
3752 if (session
->consumer
->type
== CONSUMER_DST_NET
&&
3753 session
->consumer
->relay_minor_version
< 8) {
3754 ret
= LTTNG_ERR_RELAYD_VERSION_FAIL
;
3764 int clear_metadata_file(int fd
)
3769 lseek_ret
= lseek(fd
, 0, SEEK_SET
);
3770 if (lseek_ret
< 0) {
3776 ret
= ftruncate(fd
, 0);
3778 PERROR("ftruncate");
3787 int ust_regenerate_metadata(struct ltt_ust_session
*usess
)
3790 struct buffer_reg_uid
*uid_reg
= NULL
;
3791 struct buffer_reg_session
*session_reg
= NULL
;
3794 cds_list_for_each_entry(uid_reg
, &usess
->buffer_reg_uid_list
, lnode
) {
3795 struct ust_registry_session
*registry
;
3796 struct ust_registry_channel
*chan
;
3797 struct lttng_ht_iter iter_chan
;
3799 session_reg
= uid_reg
->registry
;
3800 registry
= session_reg
->reg
.ust
;
3802 pthread_mutex_lock(®istry
->lock
);
3803 registry
->metadata_len_sent
= 0;
3804 memset(registry
->metadata
, 0, registry
->metadata_alloc_len
);
3805 registry
->metadata_len
= 0;
3806 registry
->metadata_version
++;
3807 if (registry
->metadata_fd
> 0) {
3808 /* Clear the metadata file's content. */
3809 ret
= clear_metadata_file(registry
->metadata_fd
);
3811 pthread_mutex_unlock(®istry
->lock
);
3816 ret
= ust_metadata_session_statedump(registry
, NULL
,
3817 registry
->major
, registry
->minor
);
3819 pthread_mutex_unlock(®istry
->lock
);
3820 ERR("Failed to generate session metadata (err = %d)",
3824 cds_lfht_for_each_entry(registry
->channels
->ht
, &iter_chan
.iter
,
3826 struct ust_registry_event
*event
;
3827 struct lttng_ht_iter iter_event
;
3829 ret
= ust_metadata_channel_statedump(registry
, chan
);
3831 pthread_mutex_unlock(®istry
->lock
);
3832 ERR("Failed to generate channel metadata "
3836 cds_lfht_for_each_entry(chan
->ht
->ht
, &iter_event
.iter
,
3838 ret
= ust_metadata_event_statedump(registry
,
3841 pthread_mutex_unlock(®istry
->lock
);
3842 ERR("Failed to generate event metadata "
3848 pthread_mutex_unlock(®istry
->lock
);
3857 * Command LTTNG_REGENERATE_METADATA from the lttng-ctl library.
3859 * Ask the consumer to truncate the existing metadata file(s) and
3860 * then regenerate the metadata. Live and per-pid sessions are not
3861 * supported and return an error.
3863 * Return 0 on success or else a LTTNG_ERR code.
3865 int cmd_regenerate_metadata(struct ltt_session
*session
)
3871 ret
= check_regenerate_metadata_support(session
);
3876 if (session
->kernel_session
) {
3877 ret
= kernctl_session_regenerate_metadata(
3878 session
->kernel_session
->fd
);
3880 ERR("Failed to regenerate the kernel metadata");
3885 if (session
->ust_session
) {
3886 ret
= ust_regenerate_metadata(session
->ust_session
);
3888 ERR("Failed to regenerate the UST metadata");
3892 DBG("Cmd metadata regenerate for session %s", session
->name
);
3900 * Command LTTNG_REGENERATE_STATEDUMP from the lttng-ctl library.
3902 * Ask the tracer to regenerate a new statedump.
3904 * Return 0 on success or else a LTTNG_ERR code.
3906 int cmd_regenerate_statedump(struct ltt_session
*session
)
3912 if (!session
->active
) {
3913 ret
= LTTNG_ERR_SESSION_NOT_STARTED
;
3917 if (session
->kernel_session
) {
3918 ret
= kernctl_session_regenerate_statedump(
3919 session
->kernel_session
->fd
);
3921 * Currently, the statedump in kernel can only fail if out
3925 if (ret
== -ENOMEM
) {
3926 ret
= LTTNG_ERR_REGEN_STATEDUMP_NOMEM
;
3928 ret
= LTTNG_ERR_REGEN_STATEDUMP_FAIL
;
3930 ERR("Failed to regenerate the kernel statedump");
3935 if (session
->ust_session
) {
3936 ret
= ust_app_regenerate_statedump_all(session
->ust_session
);
3938 * Currently, the statedump in UST always returns 0.
3941 ret
= LTTNG_ERR_REGEN_STATEDUMP_FAIL
;
3942 ERR("Failed to regenerate the UST statedump");
3946 DBG("Cmd regenerate statedump for session %s", session
->name
);
3953 int cmd_register_trigger(struct command_ctx
*cmd_ctx
, int sock
,
3954 struct notification_thread_handle
*notification_thread
)
3958 ssize_t sock_recv_len
;
3959 struct lttng_trigger
*trigger
= NULL
;
3960 struct lttng_buffer_view view
;
3961 struct lttng_dynamic_buffer trigger_buffer
;
3963 lttng_dynamic_buffer_init(&trigger_buffer
);
3964 trigger_len
= (size_t) cmd_ctx
->lsm
->u
.trigger
.length
;
3965 ret
= lttng_dynamic_buffer_set_size(&trigger_buffer
, trigger_len
);
3967 ret
= LTTNG_ERR_NOMEM
;
3971 sock_recv_len
= lttcomm_recv_unix_sock(sock
, trigger_buffer
.data
,
3973 if (sock_recv_len
< 0 || sock_recv_len
!= trigger_len
) {
3974 ERR("Failed to receive \"register trigger\" command payload");
3975 /* TODO: should this be a new error enum ? */
3976 ret
= LTTNG_ERR_INVALID_TRIGGER
;
3980 view
= lttng_buffer_view_from_dynamic_buffer(&trigger_buffer
, 0, -1);
3981 if (lttng_trigger_create_from_buffer(&view
, &trigger
) !=
3983 ERR("Invalid trigger payload received in \"register trigger\" command");
3984 ret
= LTTNG_ERR_INVALID_TRIGGER
;
3988 ret
= notification_thread_command_register_trigger(notification_thread
,
3990 /* Ownership of trigger was transferred. */
3993 lttng_trigger_destroy(trigger
);
3994 lttng_dynamic_buffer_reset(&trigger_buffer
);
3998 int cmd_unregister_trigger(struct command_ctx
*cmd_ctx
, int sock
,
3999 struct notification_thread_handle
*notification_thread
)
4003 ssize_t sock_recv_len
;
4004 struct lttng_trigger
*trigger
= NULL
;
4005 struct lttng_buffer_view view
;
4006 struct lttng_dynamic_buffer trigger_buffer
;
4008 lttng_dynamic_buffer_init(&trigger_buffer
);
4009 trigger_len
= (size_t) cmd_ctx
->lsm
->u
.trigger
.length
;
4010 ret
= lttng_dynamic_buffer_set_size(&trigger_buffer
, trigger_len
);
4012 ret
= LTTNG_ERR_NOMEM
;
4016 sock_recv_len
= lttcomm_recv_unix_sock(sock
, trigger_buffer
.data
,
4018 if (sock_recv_len
< 0 || sock_recv_len
!= trigger_len
) {
4019 ERR("Failed to receive \"unregister trigger\" command payload");
4020 /* TODO: should this be a new error enum ? */
4021 ret
= LTTNG_ERR_INVALID_TRIGGER
;
4025 view
= lttng_buffer_view_from_dynamic_buffer(&trigger_buffer
, 0, -1);
4026 if (lttng_trigger_create_from_buffer(&view
, &trigger
) !=
4028 ERR("Invalid trigger payload received in \"unregister trigger\" command");
4029 ret
= LTTNG_ERR_INVALID_TRIGGER
;
4033 ret
= notification_thread_command_unregister_trigger(notification_thread
,
4036 lttng_trigger_destroy(trigger
);
4037 lttng_dynamic_buffer_reset(&trigger_buffer
);
4042 * Send relayd sockets from snapshot output to consumer. Ignore request if the
4043 * snapshot output is *not* set with a remote destination.
4045 * Return 0 on success or a LTTNG_ERR code.
4047 static int set_relayd_for_snapshot(struct consumer_output
*consumer
,
4048 struct snapshot_output
*snap_output
, struct ltt_session
*session
)
4051 struct lttng_ht_iter iter
;
4052 struct consumer_socket
*socket
;
4055 assert(snap_output
);
4058 DBG2("Set relayd object from snapshot output");
4060 /* Ignore if snapshot consumer output is not network. */
4061 if (snap_output
->consumer
->type
!= CONSUMER_DST_NET
) {
4066 * For each consumer socket, create and send the relayd object of the
4070 cds_lfht_for_each_entry(snap_output
->consumer
->socks
->ht
, &iter
.iter
,
4071 socket
, node
.node
) {
4072 pthread_mutex_lock(socket
->lock
);
4073 ret
= send_consumer_relayd_sockets(0, session
->id
,
4074 snap_output
->consumer
, socket
,
4075 session
->name
, session
->hostname
,
4076 session
->live_timer
);
4077 pthread_mutex_unlock(socket
->lock
);
4078 if (ret
!= LTTNG_OK
) {
4090 * Record a kernel snapshot.
4092 * Return LTTNG_OK on success or a LTTNG_ERR code.
4094 static int record_kernel_snapshot(struct ltt_kernel_session
*ksess
,
4095 struct snapshot_output
*output
, struct ltt_session
*session
,
4096 int wait
, uint64_t nb_packets_per_stream
)
4106 * Copy kernel session sockets so we can communicate with the right
4107 * consumer for the snapshot record command.
4109 ret
= consumer_copy_sockets(output
->consumer
, ksess
->consumer
);
4111 ret
= LTTNG_ERR_NOMEM
;
4115 ret
= set_relayd_for_snapshot(ksess
->consumer
, output
, session
);
4116 if (ret
!= LTTNG_OK
) {
4117 goto error_snapshot
;
4120 ret
= kernel_snapshot_record(ksess
, output
, wait
, nb_packets_per_stream
);
4121 if (ret
!= LTTNG_OK
) {
4122 goto error_snapshot
;
4129 /* Clean up copied sockets so this output can use some other later on. */
4130 consumer_destroy_output_sockets(output
->consumer
);
4137 * Record a UST snapshot.
4139 * Return 0 on success or a LTTNG_ERR error code.
4141 static int record_ust_snapshot(struct ltt_ust_session
*usess
,
4142 struct snapshot_output
*output
, struct ltt_session
*session
,
4143 int wait
, uint64_t nb_packets_per_stream
)
4152 * Copy UST session sockets so we can communicate with the right
4153 * consumer for the snapshot record command.
4155 ret
= consumer_copy_sockets(output
->consumer
, usess
->consumer
);
4157 ret
= LTTNG_ERR_NOMEM
;
4161 ret
= set_relayd_for_snapshot(usess
->consumer
, output
, session
);
4162 if (ret
!= LTTNG_OK
) {
4163 goto error_snapshot
;
4166 ret
= ust_app_snapshot_record(usess
, output
, wait
, nb_packets_per_stream
);
4170 ret
= LTTNG_ERR_INVALID
;
4173 ret
= LTTNG_ERR_SNAPSHOT_FAIL
;
4176 goto error_snapshot
;
4182 /* Clean up copied sockets so this output can use some other later on. */
4183 consumer_destroy_output_sockets(output
->consumer
);
4189 uint64_t get_session_size_one_more_packet_per_stream(struct ltt_session
*session
,
4190 uint64_t cur_nr_packets
)
4192 uint64_t tot_size
= 0;
4194 if (session
->kernel_session
) {
4195 struct ltt_kernel_channel
*chan
;
4196 struct ltt_kernel_session
*ksess
= session
->kernel_session
;
4198 cds_list_for_each_entry(chan
, &ksess
->channel_list
.head
, list
) {
4199 if (cur_nr_packets
>= chan
->channel
->attr
.num_subbuf
) {
4201 * Don't take channel into account if we
4202 * already grab all its packets.
4206 tot_size
+= chan
->channel
->attr
.subbuf_size
4207 * chan
->stream_count
;
4211 if (session
->ust_session
) {
4212 struct ltt_ust_session
*usess
= session
->ust_session
;
4214 tot_size
+= ust_app_get_size_one_more_packet_per_stream(usess
,
4222 * Calculate the number of packets we can grab from each stream that
4223 * fits within the overall snapshot max size.
4225 * Returns -1 on error, 0 means infinite number of packets, else > 0 is
4226 * the number of packets per stream.
4228 * TODO: this approach is not perfect: we consider the worse case
4229 * (packet filling the sub-buffers) as an upper bound, but we could do
4230 * better if we do this calculation while we actually grab the packet
4231 * content: we would know how much padding we don't actually store into
4234 * This algorithm is currently bounded by the number of packets per
4237 * Since we call this algorithm before actually grabbing the data, it's
4238 * an approximation: for instance, applications could appear/disappear
4239 * in between this call and actually grabbing data.
4242 int64_t get_session_nb_packets_per_stream(struct ltt_session
*session
, uint64_t max_size
)
4245 uint64_t cur_nb_packets
= 0;
4248 return 0; /* Infinite */
4251 size_left
= max_size
;
4253 uint64_t one_more_packet_tot_size
;
4255 one_more_packet_tot_size
= get_session_size_one_more_packet_per_stream(session
,
4257 if (!one_more_packet_tot_size
) {
4258 /* We are already grabbing all packets. */
4261 size_left
-= one_more_packet_tot_size
;
4262 if (size_left
< 0) {
4267 if (!cur_nb_packets
) {
4268 /* Not enough room to grab one packet of each stream, error. */
4271 return cur_nb_packets
;
4275 * Command LTTNG_SNAPSHOT_RECORD from lib lttng ctl.
4277 * The wait parameter is ignored so this call always wait for the snapshot to
4278 * complete before returning.
4280 * Return LTTNG_OK on success or else a LTTNG_ERR code.
4282 int cmd_snapshot_record(struct ltt_session
*session
,
4283 struct lttng_snapshot_output
*output
, int wait
)
4286 unsigned int use_tmp_output
= 0;
4287 struct snapshot_output tmp_output
;
4288 unsigned int snapshot_success
= 0;
4294 DBG("Cmd snapshot record for session %s", session
->name
);
4296 /* Get the datetime for the snapshot output directory. */
4297 ret
= utils_get_current_time_str("%Y%m%d-%H%M%S", datetime
,
4300 ret
= LTTNG_ERR_INVALID
;
4305 * Permission denied to create an output if the session is not
4306 * set in no output mode.
4308 if (session
->output_traces
) {
4309 ret
= LTTNG_ERR_NOT_SNAPSHOT_SESSION
;
4313 /* The session needs to be started at least once. */
4314 if (!session
->has_been_started
) {
4315 ret
= LTTNG_ERR_START_SESSION_ONCE
;
4319 /* Use temporary output for the session. */
4320 if (*output
->ctrl_url
!= '\0') {
4321 ret
= snapshot_output_init(output
->max_size
, output
->name
,
4322 output
->ctrl_url
, output
->data_url
, session
->consumer
,
4325 if (ret
== -ENOMEM
) {
4326 ret
= LTTNG_ERR_NOMEM
;
4328 ret
= LTTNG_ERR_INVALID
;
4332 /* Use the global session count for the temporary snapshot. */
4333 tmp_output
.nb_snapshot
= session
->snapshot
.nb_snapshot
;
4335 /* Use the global datetime */
4336 memcpy(tmp_output
.datetime
, datetime
, sizeof(datetime
));
4340 if (use_tmp_output
) {
4341 int64_t nb_packets_per_stream
;
4343 nb_packets_per_stream
= get_session_nb_packets_per_stream(session
,
4344 tmp_output
.max_size
);
4345 if (nb_packets_per_stream
< 0) {
4346 ret
= LTTNG_ERR_MAX_SIZE_INVALID
;
4350 if (session
->kernel_session
) {
4351 ret
= record_kernel_snapshot(session
->kernel_session
,
4352 &tmp_output
, session
,
4353 wait
, nb_packets_per_stream
);
4354 if (ret
!= LTTNG_OK
) {
4359 if (session
->ust_session
) {
4360 ret
= record_ust_snapshot(session
->ust_session
,
4361 &tmp_output
, session
,
4362 wait
, nb_packets_per_stream
);
4363 if (ret
!= LTTNG_OK
) {
4368 snapshot_success
= 1;
4370 struct snapshot_output
*sout
;
4371 struct lttng_ht_iter iter
;
4374 cds_lfht_for_each_entry(session
->snapshot
.output_ht
->ht
,
4375 &iter
.iter
, sout
, node
.node
) {
4376 int64_t nb_packets_per_stream
;
4379 * Make a local copy of the output and assign the possible
4380 * temporary value given by the caller.
4382 memset(&tmp_output
, 0, sizeof(tmp_output
));
4383 memcpy(&tmp_output
, sout
, sizeof(tmp_output
));
4385 if (output
->max_size
!= (uint64_t) -1ULL) {
4386 tmp_output
.max_size
= output
->max_size
;
4389 nb_packets_per_stream
= get_session_nb_packets_per_stream(session
,
4390 tmp_output
.max_size
);
4391 if (nb_packets_per_stream
< 0) {
4392 ret
= LTTNG_ERR_MAX_SIZE_INVALID
;
4397 /* Use temporary name. */
4398 if (*output
->name
!= '\0') {
4399 if (lttng_strncpy(tmp_output
.name
, output
->name
,
4400 sizeof(tmp_output
.name
))) {
4401 ret
= LTTNG_ERR_INVALID
;
4407 tmp_output
.nb_snapshot
= session
->snapshot
.nb_snapshot
;
4408 memcpy(tmp_output
.datetime
, datetime
, sizeof(datetime
));
4410 if (session
->kernel_session
) {
4411 ret
= record_kernel_snapshot(session
->kernel_session
,
4412 &tmp_output
, session
,
4413 wait
, nb_packets_per_stream
);
4414 if (ret
!= LTTNG_OK
) {
4420 if (session
->ust_session
) {
4421 ret
= record_ust_snapshot(session
->ust_session
,
4422 &tmp_output
, session
,
4423 wait
, nb_packets_per_stream
);
4424 if (ret
!= LTTNG_OK
) {
4429 snapshot_success
= 1;
4434 if (snapshot_success
) {
4435 session
->snapshot
.nb_snapshot
++;
4437 ret
= LTTNG_ERR_SNAPSHOT_FAIL
;
4445 * Command LTTNG_SET_SESSION_SHM_PATH processed by the client thread.
4447 int cmd_set_session_shm_path(struct ltt_session
*session
,
4448 const char *shm_path
)
4454 * Can only set shm path before session is started.
4456 if (session
->has_been_started
) {
4457 return LTTNG_ERR_SESSION_STARTED
;
4460 strncpy(session
->shm_path
, shm_path
,
4461 sizeof(session
->shm_path
));
4462 session
->shm_path
[sizeof(session
->shm_path
) - 1] = '\0';
4468 * Command LTTNG_ROTATE_SESSION from the lttng-ctl library.
4470 * Ask the consumer to rotate the session output directory.
4471 * The session lock must be held.
4473 * Return LTTNG_OK on success or else a LTTNG_ERR code.
4475 int cmd_rotate_session(struct ltt_session
*session
,
4476 struct lttng_rotate_session_return
*rotate_return
)
4480 struct tm
*timeinfo
;
4483 bool ust_active
= false;
4487 if (!session
->has_been_started
) {
4488 ret
= -LTTNG_ERR_START_SESSION_ONCE
;
4492 if (session
->live_timer
|| session
->snapshot_mode
||
4493 !session
->output_traces
) {
4494 ret
= -LTTNG_ERR_ROTATION_NOT_AVAILABLE
;
4499 * Unsupported feature in lttng-relayd before 2.11.
4501 if (session
->consumer
->type
== CONSUMER_DST_NET
&&
4502 (session
->consumer
->relay_major_version
== 2 &&
4503 session
->consumer
->relay_minor_version
< 11)) {
4504 ret
= -LTTNG_ERR_ROTATION_NOT_AVAILABLE_RELAY
;
4508 if (session
->rotate_pending
|| session
->rotate_pending_relay
) {
4509 ret
= -LTTNG_ERR_ROTATION_PENDING
;
4510 DBG("Rotate already in progress");
4515 * After a stop, we only allow one rotation to occur, the other ones are
4516 * useless until a new start.
4518 if (session
->rotated_after_last_stop
) {
4519 DBG("Session \"%s\" was already rotated after stop, refusing rotation",
4521 ret
= -LTTNG_ERR_ROTATION_MULTIPLE_AFTER_STOP
;
4525 /* Special case for the first rotation. */
4526 if (session
->current_archive_id
== 0) {
4527 const char *base_path
= NULL
;
4529 /* Either one of the two sessions is enough to get the root path. */
4530 if (session
->kernel_session
) {
4531 base_path
= session_get_base_path(session
);
4532 } else if (session
->ust_session
) {
4533 base_path
= session_get_base_path(session
);
4538 ret
= lttng_strncpy(session
->rotation_chunk
.current_rotate_path
,
4540 sizeof(session
->rotation_chunk
.current_rotate_path
));
4542 ERR("Failed to copy session base path to current rotation chunk path");
4543 ret
= -LTTNG_ERR_UNK
;
4548 * The currently active tracing path is now the folder we
4551 ret
= lttng_strncpy(session
->rotation_chunk
.current_rotate_path
,
4552 session
->rotation_chunk
.active_tracing_path
,
4553 sizeof(session
->rotation_chunk
.current_rotate_path
));
4555 ERR("Failed to copy the active tracing path to the current rotate path");
4556 ret
= -LTTNG_ERR_UNK
;
4560 DBG("Current rotate path %s", session
->rotation_chunk
.current_rotate_path
);
4562 session
->current_archive_id
++;
4563 session
->rotate_pending
= true;
4564 session
->rotation_state
= LTTNG_ROTATION_STATE_ONGOING
;
4565 ret
= notification_thread_command_session_rotation_ongoing(
4566 notification_thread_handle
,
4567 session
->name
, session
->uid
, session
->gid
,
4568 session
->current_archive_id
);
4569 if (ret
!= LTTNG_OK
) {
4570 ERR("Failed to notify notification thread that a session rotation is ongoing for session %s",
4575 * Create the path name for the next chunk.
4578 if (now
== (time_t) -1) {
4579 ret
= -LTTNG_ERR_ROTATION_NOT_AVAILABLE
;
4582 session
->last_chunk_start_ts
= session
->current_chunk_start_ts
;
4583 session
->current_chunk_start_ts
= now
;
4585 timeinfo
= localtime(&now
);
4587 PERROR("Failed to sample local time in rotate session command");
4588 ret
= -LTTNG_ERR_UNK
;
4591 strf_ret
= strftime(datetime
, sizeof(datetime
), "%Y%m%dT%H%M%S%z",
4594 ERR("Failed to format local time timestamp in rotate session command");
4595 ret
= -LTTNG_ERR_UNK
;
4598 if (session
->kernel_session
) {
4600 * The active path for the next rotation/destroy.
4601 * Ex: ~/lttng-traces/auto-20170922-111748/20170922-111754-42
4603 ret
= snprintf(session
->rotation_chunk
.active_tracing_path
,
4604 sizeof(session
->rotation_chunk
.active_tracing_path
),
4606 session_get_base_path(session
),
4607 datetime
, session
->current_archive_id
+ 1);
4608 if (ret
< 0 || ret
== sizeof(session
->rotation_chunk
.active_tracing_path
)) {
4609 ERR("Failed to format active kernel tracing path in rotate session command");
4610 ret
= -LTTNG_ERR_UNK
;
4614 * The sub-directory for the consumer
4615 * Ex: /20170922-111754-42/kernel
4617 ret
= snprintf(session
->kernel_session
->consumer
->chunk_path
,
4618 sizeof(session
->kernel_session
->consumer
->chunk_path
),
4619 "/%s-%" PRIu64
, datetime
,
4620 session
->current_archive_id
+ 1);
4621 if (ret
< 0 || ret
== sizeof(session
->kernel_session
->consumer
->chunk_path
)) {
4622 ERR("Failed to format the kernel consumer's sub-directory in rotate session command");
4623 ret
= -LTTNG_ERR_UNK
;
4627 * Create the new chunk folder, before the rotation begins so we don't
4628 * race with the consumer/tracer activity.
4630 ret
= domain_mkdir(session
->kernel_session
->consumer
, session
,
4631 session
->kernel_session
->uid
,
4632 session
->kernel_session
->gid
);
4634 ERR("Failed to create kernel session tracing path at %s",
4635 session
->kernel_session
->consumer
->chunk_path
);
4636 ret
= -LTTNG_ERR_CREATE_DIR_FAIL
;
4639 ret
= kernel_rotate_session(session
);
4640 if (ret
!= LTTNG_OK
) {
4645 if (session
->ust_session
) {
4646 ret
= snprintf(session
->rotation_chunk
.active_tracing_path
,
4647 PATH_MAX
, "%s/%s-%" PRIu64
,
4648 session_get_base_path(session
),
4649 datetime
, session
->current_archive_id
+ 1);
4651 ERR("Failed to format active UST tracing path in rotate session command");
4652 ret
= -LTTNG_ERR_UNK
;
4655 ret
= snprintf(session
->ust_session
->consumer
->chunk_path
,
4656 PATH_MAX
, "/%s-%" PRIu64
, datetime
,
4657 session
->current_archive_id
+ 1);
4659 ERR("Failed to format the UST consumer's sub-directory in rotate session command");
4660 ret
= -LTTNG_ERR_UNK
;
4664 * Create the new chunk folder, before the rotation begins so we don't
4665 * race with the consumer/tracer activity.
4667 ret
= domain_mkdir(session
->ust_session
->consumer
, session
,
4668 session
->ust_session
->uid
,
4669 session
->ust_session
->gid
);
4671 ret
= -LTTNG_ERR_CREATE_DIR_FAIL
;
4674 ret
= ust_app_rotate_session(session
, &ust_active
);
4675 if (ret
!= LTTNG_OK
) {
4679 * Handle the case where we did not start a rotation on any channel.
4680 * The consumer will never wake up the rotation thread to perform the
4681 * rename, so we have to do it here while we hold the session and
4682 * session_list locks.
4684 if (!session
->kernel_session
&& !ust_active
) {
4685 struct lttng_trace_archive_location
*location
;
4687 session
->rotate_pending
= false;
4688 session
->rotation_state
= LTTNG_ROTATION_STATE_COMPLETED
;
4689 ret
= rename_complete_chunk(session
, now
);
4691 ERR("Failed to rename completed rotation chunk");
4695 /* Ownership of location is transferred. */
4696 location
= session_get_trace_archive_location(session
);
4697 ret
= notification_thread_command_session_rotation_completed(
4698 notification_thread_handle
,
4702 session
->current_archive_id
,
4704 if (ret
!= LTTNG_OK
) {
4705 ERR("Failed to notify notification thread that rotation is complete for session %s",
4711 if (!session
->active
) {
4712 session
->rotated_after_last_stop
= true;
4715 if (rotate_return
) {
4716 rotate_return
->rotation_id
= session
->current_archive_id
;
4719 DBG("Cmd rotate session %s, current_archive_id %" PRIu64
" sent",
4720 session
->name
, session
->current_archive_id
);
4728 * Command LTTNG_ROTATION_GET_INFO from the lttng-ctl library.
4730 * Check if the session has finished its rotation.
4732 * Return 0 on success or else a LTTNG_ERR code.
4734 int cmd_rotate_get_info(struct ltt_session
*session
,
4735 struct lttng_rotation_get_info_return
*info_return
,
4736 uint64_t rotation_id
)
4742 DBG("Cmd rotate_get_info session %s, rotation id %" PRIu64
, session
->name
,
4743 session
->current_archive_id
);
4745 if (session
->current_archive_id
!= rotation_id
) {
4746 info_return
->status
= (int32_t) LTTNG_ROTATION_STATE_EXPIRED
;
4751 switch (session
->rotation_state
) {
4752 case LTTNG_ROTATION_STATE_ONGOING
:
4753 DBG("Reporting that rotation id %" PRIu64
" of session %s is still pending",
4754 rotation_id
, session
->name
);
4756 case LTTNG_ROTATION_STATE_COMPLETED
:
4758 char *current_tracing_path_reply
;
4759 size_t current_tracing_path_reply_len
;
4761 switch (session_get_consumer_destination_type(session
)) {
4762 case CONSUMER_DST_LOCAL
:
4763 current_tracing_path_reply
=
4764 info_return
->location
.local
.absolute_path
;
4765 current_tracing_path_reply_len
=
4766 sizeof(info_return
->location
.local
.absolute_path
);
4767 info_return
->location_type
=
4768 (int8_t) LTTNG_TRACE_ARCHIVE_LOCATION_TYPE_LOCAL
;
4770 case CONSUMER_DST_NET
:
4771 current_tracing_path_reply
=
4772 info_return
->location
.relay
.relative_path
;
4773 current_tracing_path_reply_len
=
4774 sizeof(info_return
->location
.relay
.relative_path
);
4775 /* Currently the only supported relay protocol. */
4776 info_return
->location
.relay
.protocol
=
4777 (int8_t) LTTNG_TRACE_ARCHIVE_LOCATION_RELAY_PROTOCOL_TYPE_TCP
;
4779 ret
= lttng_strncpy(info_return
->location
.relay
.host
,
4780 session_get_net_consumer_hostname(session
),
4781 sizeof(info_return
->location
.relay
.host
));
4783 ERR("Failed to host name to rotate_get_info reply");
4784 info_return
->status
= LTTNG_ROTATION_STATUS_ERROR
;
4785 ret
= -LTTNG_ERR_UNK
;
4789 session_get_net_consumer_ports(session
,
4790 &info_return
->location
.relay
.ports
.control
,
4791 &info_return
->location
.relay
.ports
.data
);
4792 info_return
->location_type
=
4793 (int8_t) LTTNG_TRACE_ARCHIVE_LOCATION_TYPE_RELAY
;
4798 ret
= lttng_strncpy(current_tracing_path_reply
,
4799 session
->rotation_chunk
.current_rotate_path
,
4800 current_tracing_path_reply_len
);
4802 ERR("Failed to copy current tracing path to rotate_get_info reply");
4803 info_return
->status
= LTTNG_ROTATION_STATUS_ERROR
;
4804 ret
= -LTTNG_ERR_UNK
;
4810 case LTTNG_ROTATION_STATE_ERROR
:
4811 DBG("Reporting that an error occurred during rotation %" PRIu64
" of session %s",
4812 rotation_id
, session
->name
);
4818 info_return
->status
= (int32_t) session
->rotation_state
;
4825 * Command LTTNG_ROTATION_SET_SCHEDULE from the lttng-ctl library.
4827 * Configure the automatic rotation parameters.
4828 * 'activate' to true means activate the rotation schedule type with 'new_value'.
4829 * 'activate' to false means deactivate the rotation schedule and validate that
4830 * 'new_value' has the same value as the currently active value.
4832 * Return 0 on success or else a positive LTTNG_ERR code.
4834 int cmd_rotation_set_schedule(struct ltt_session
*session
,
4835 bool activate
, enum lttng_rotation_schedule_type schedule_type
,
4837 struct notification_thread_handle
*notification_thread_handle
)
4840 uint64_t *parameter_value
;
4844 DBG("Cmd rotate set schedule session %s", session
->name
);
4846 if (session
->live_timer
|| session
->snapshot_mode
||
4847 !session
->output_traces
) {
4848 DBG("Failing ROTATION_SET_SCHEDULE command as the rotation feature is not available for this session");
4849 ret
= LTTNG_ERR_ROTATION_NOT_AVAILABLE
;
4853 switch (schedule_type
) {
4854 case LTTNG_ROTATION_SCHEDULE_TYPE_SIZE_THRESHOLD
:
4855 parameter_value
= &session
->rotate_size
;
4857 case LTTNG_ROTATION_SCHEDULE_TYPE_PERIODIC
:
4858 parameter_value
= &session
->rotate_timer_period
;
4859 if (new_value
>= UINT_MAX
) {
4860 DBG("Failing ROTATION_SET_SCHEDULE command as the value requested for a periodic rotation schedule is invalid: %" PRIu64
" > %u (UINT_MAX)",
4861 new_value
, UINT_MAX
);
4862 ret
= LTTNG_ERR_INVALID
;
4867 WARN("Failing ROTATION_SET_SCHEDULE command on unknown schedule type");
4868 ret
= LTTNG_ERR_INVALID
;
4872 /* Improper use of the API. */
4873 if (new_value
== -1ULL) {
4874 WARN("Failing ROTATION_SET_SCHEDULE command as the value requested is -1");
4875 ret
= LTTNG_ERR_INVALID
;
4880 * As indicated in struct ltt_session's comments, a value of == 0 means
4881 * this schedule rotation type is not in use.
4883 * Reject the command if we were asked to activate a schedule that was
4886 if (activate
&& *parameter_value
!= 0) {
4887 DBG("Failing ROTATION_SET_SCHEDULE (activate) command as the schedule is already active");
4888 ret
= LTTNG_ERR_ROTATION_SCHEDULE_SET
;
4893 * Reject the command if we were asked to deactivate a schedule that was
4896 if (!activate
&& *parameter_value
== 0) {
4897 DBG("Failing ROTATION_SET_SCHEDULE (deactivate) command as the schedule is already inactive");
4898 ret
= LTTNG_ERR_ROTATION_SCHEDULE_NOT_SET
;
4903 * Reject the command if we were asked to deactivate a schedule that
4906 if (!activate
&& *parameter_value
!= new_value
) {
4907 DBG("Failing ROTATION_SET_SCHEDULE (deactivate) command as an inexistant schedule was provided");
4908 ret
= LTTNG_ERR_ROTATION_SCHEDULE_NOT_SET
;
4912 *parameter_value
= activate
? new_value
: 0;
4914 switch (schedule_type
) {
4915 case LTTNG_ROTATION_SCHEDULE_TYPE_PERIODIC
:
4916 if (activate
&& session
->active
) {
4918 * Only start the timer if the session is active,
4919 * otherwise it will be started when the session starts.
4921 ret
= sessiond_rotate_timer_start(session
, new_value
);
4923 ERR("Failed to enable session rotation timer in ROTATION_SET_SCHEDULE command");
4924 ret
= LTTNG_ERR_UNK
;
4928 ret
= sessiond_rotate_timer_stop(session
);
4930 ERR("Failed to disable session rotation timer in ROTATION_SET_SCHEDULE command");
4931 ret
= LTTNG_ERR_UNK
;
4936 case LTTNG_ROTATION_SCHEDULE_TYPE_SIZE_THRESHOLD
:
4938 ret
= subscribe_session_consumed_size_rotation(session
,
4939 new_value
, notification_thread_handle
);
4941 ERR("Failed to enable consumed-size notification in ROTATION_SET_SCHEDULE command");
4942 ret
= LTTNG_ERR_UNK
;
4946 ret
= unsubscribe_session_consumed_size_rotation(session
,
4947 notification_thread_handle
);
4949 ERR("Failed to disable consumed-size notification in ROTATION_SET_SCHEDULE command");
4950 ret
= LTTNG_ERR_UNK
;
4957 /* Would have been caught before. */
4970 * Command ROTATE_GET_CURRENT_PATH from the lttng-ctl library.
4972 * Configure the automatic rotation parameters.
4973 * Set to -1ULL to disable them.
4975 * Return LTTNG_OK on success or else a LTTNG_ERR code.
4977 int cmd_session_get_current_output(struct ltt_session
*session
,
4978 struct lttng_session_get_current_output_return
*output_return
)
4983 if (!session
->snapshot_mode
) {
4984 if (session
->current_archive_id
== 0) {
4985 if (session
->kernel_session
) {
4986 path
= session_get_base_path(session
);
4987 } else if (session
->ust_session
) {
4988 path
= session_get_base_path(session
);
4994 path
= session
->rotation_chunk
.active_tracing_path
;
4998 * A snapshot session does not have a "current" trace archive
5004 DBG("Cmd get current output for session %s, returning %s",
5005 session
->name
, path
);
5007 ret
= lttng_strncpy(output_return
->path
,
5009 sizeof(output_return
->path
));
5011 ERR("Failed to copy trace output path to session get current output command reply");
5012 ret
= -LTTNG_ERR_UNK
;
5021 /* Wait for a given path to be removed before continuing. */
5022 static enum lttng_error_code
wait_on_path(void *path_data
)
5024 const char *shm_path
= path_data
;
5026 DBG("Waiting for the shm path at %s to be removed before completing session destruction",
5032 ret
= stat(shm_path
, &st
);
5034 if (errno
!= ENOENT
) {
5035 PERROR("stat() returned an error while checking for the existence of the shm path");
5037 DBG("shm path no longer exists, completing the destruction of session");
5041 if (!S_ISDIR(st
.st_mode
)) {
5042 ERR("The type of shm path %s returned by stat() is not a directory; aborting the wait for shm path removal",
5047 usleep(SESSION_DESTROY_SHM_PATH_CHECK_DELAY_US
);
5053 * Returns a pointer to a handler to run on completion of a command.
5054 * Returns NULL if no handler has to be run for the last command executed.
5056 const struct cmd_completion_handler
*cmd_pop_completion_handler(void)
5058 struct cmd_completion_handler
*handler
= current_completion_handler
;
5060 current_completion_handler
= NULL
;
5065 * Init command subsystem.
5070 * Set network sequence index to 1 for streams to match a relayd
5071 * socket on the consumer side.
5073 pthread_mutex_lock(&relayd_net_seq_idx_lock
);
5074 relayd_net_seq_idx
= 1;
5075 pthread_mutex_unlock(&relayd_net_seq_idx_lock
);
5077 DBG("Command subsystem initialized");