clang-tidy: apply suggested fixes
[lttng-tools.git] / src / bin / lttng-sessiond / cmd.cpp
1 /*
2 * Copyright (C) 2012 David Goulet <dgoulet@efficios.com>
3 * Copyright (C) 2016 Jérémie Galarneau <jeremie.galarneau@efficios.com>
4 *
5 * SPDX-License-Identifier: GPL-2.0-only
6 *
7 */
8
9 #define _LGPL_SOURCE
10 #include "agent-thread.hpp"
11 #include "agent.hpp"
12 #include "buffer-registry.hpp"
13 #include "channel.hpp"
14 #include "cmd.hpp"
15 #include "consumer-output.hpp"
16 #include "consumer.hpp"
17 #include "event-notifier-error-accounting.hpp"
18 #include "event.hpp"
19 #include "health-sessiond.hpp"
20 #include "kernel-consumer.hpp"
21 #include "kernel.hpp"
22 #include "lttng-sessiond.hpp"
23 #include "lttng-syscall.hpp"
24 #include "notification-thread-commands.hpp"
25 #include "notification-thread.hpp"
26 #include "rotation-thread.hpp"
27 #include "session.hpp"
28 #include "timer.hpp"
29 #include "tracker.hpp"
30 #include "utils.hpp"
31
32 #include <common/buffer-view.hpp>
33 #include <common/common.hpp>
34 #include <common/compat/string.hpp>
35 #include <common/defaults.hpp>
36 #include <common/dynamic-buffer.hpp>
37 #include <common/kernel-ctl/kernel-ctl.hpp>
38 #include <common/payload-view.hpp>
39 #include <common/payload.hpp>
40 #include <common/relayd/relayd.hpp>
41 #include <common/sessiond-comm/sessiond-comm.hpp>
42 #include <common/string-utils/string-utils.hpp>
43 #include <common/trace-chunk.hpp>
44 #include <common/urcu.hpp>
45 #include <common/utils.hpp>
46
47 #include <lttng/action/action-internal.hpp>
48 #include <lttng/action/action.h>
49 #include <lttng/channel-internal.hpp>
50 #include <lttng/channel.h>
51 #include <lttng/condition/condition-internal.hpp>
52 #include <lttng/condition/condition.h>
53 #include <lttng/condition/event-rule-matches-internal.hpp>
54 #include <lttng/condition/event-rule-matches.h>
55 #include <lttng/error-query-internal.hpp>
56 #include <lttng/event-internal.hpp>
57 #include <lttng/event-rule/event-rule-internal.hpp>
58 #include <lttng/event-rule/event-rule.h>
59 #include <lttng/kernel.h>
60 #include <lttng/location-internal.hpp>
61 #include <lttng/lttng-error.h>
62 #include <lttng/rotate-internal.hpp>
63 #include <lttng/session-descriptor-internal.hpp>
64 #include <lttng/session-internal.hpp>
65 #include <lttng/tracker.h>
66 #include <lttng/trigger/trigger-internal.hpp>
67 #include <lttng/userspace-probe-internal.hpp>
68
69 #include <algorithm>
70 #include <inttypes.h>
71 #include <stdio.h>
72 #include <sys/stat.h>
73 #include <urcu/list.h>
74 #include <urcu/uatomic.h>
75
76 /* Sleep for 100ms between each check for the shm path's deletion. */
77 #define SESSION_DESTROY_SHM_PATH_CHECK_DELAY_US 100000
78
79 namespace lsu = lttng::sessiond::ust;
80
81 static enum lttng_error_code wait_on_path(void *path);
82
83 namespace {
84 struct cmd_destroy_session_reply_context {
85 int reply_sock_fd;
86 bool implicit_rotation_on_destroy;
87 /*
88 * Indicates whether or not an error occurred while launching the
89 * destruction of a session.
90 */
91 enum lttng_error_code destruction_status;
92 };
93
94 /*
95 * Command completion handler that is used by the destroy command
96 * when a session that has a non-default shm_path is being destroyed.
97 *
98 * See comment in cmd_destroy_session() for the rationale.
99 */
100 struct destroy_completion_handler {
101 struct cmd_completion_handler handler;
102 char shm_path[member_sizeof(struct ltt_session, shm_path)];
103 } destroy_completion_handler = {
104 .handler = { .run = wait_on_path, .data = destroy_completion_handler.shm_path },
105 .shm_path = { 0 },
106 };
107
108 /*
109 * Used to keep a unique index for each relayd socket created where this value
110 * is associated with streams on the consumer so it can match the right relayd
111 * to send to. It must be accessed with the relayd_net_seq_idx_lock
112 * held.
113 */
114 pthread_mutex_t relayd_net_seq_idx_lock = PTHREAD_MUTEX_INITIALIZER;
115 uint64_t relayd_net_seq_idx;
116 } /* namespace */
117
118 static struct cmd_completion_handler *current_completion_handler;
119 static int validate_ust_event_name(const char *);
120 static int cmd_enable_event_internal(ltt_session::locked_ref& session,
121 const struct lttng_domain *domain,
122 char *channel_name,
123 struct lttng_event *event,
124 char *filter_expression,
125 struct lttng_bytecode *filter,
126 struct lttng_event_exclusion *exclusion,
127 int wpipe);
128 static enum lttng_error_code cmd_enable_channel_internal(ltt_session::locked_ref& session,
129 const struct lttng_domain *domain,
130 const struct lttng_channel *_attr,
131 int wpipe);
132
133 /*
134 * Create a session path used by list_lttng_sessions for the case that the
135 * session consumer is on the network.
136 */
137 static int
138 build_network_session_path(char *dst, size_t size, const ltt_session::locked_ref& session)
139 {
140 int ret, kdata_port, udata_port;
141 struct lttng_uri *kuri = nullptr, *uuri = nullptr, *uri = nullptr;
142 char tmp_uurl[PATH_MAX], tmp_urls[PATH_MAX];
143
144 LTTNG_ASSERT(dst);
145
146 memset(tmp_urls, 0, sizeof(tmp_urls));
147 memset(tmp_uurl, 0, sizeof(tmp_uurl));
148
149 kdata_port = udata_port = DEFAULT_NETWORK_DATA_PORT;
150
151 if (session->kernel_session && session->kernel_session->consumer) {
152 kuri = &session->kernel_session->consumer->dst.net.control;
153 kdata_port = session->kernel_session->consumer->dst.net.data.port;
154 }
155
156 if (session->ust_session && session->ust_session->consumer) {
157 uuri = &session->ust_session->consumer->dst.net.control;
158 udata_port = session->ust_session->consumer->dst.net.data.port;
159 }
160
161 if (uuri == nullptr && kuri == nullptr) {
162 uri = &session->consumer->dst.net.control;
163 kdata_port = session->consumer->dst.net.data.port;
164 } else if (kuri && uuri) {
165 ret = uri_compare(kuri, uuri);
166 if (ret) {
167 /* Not Equal */
168 uri = kuri;
169 /* Build uuri URL string */
170 ret = uri_to_str_url(uuri, tmp_uurl, sizeof(tmp_uurl));
171 if (ret < 0) {
172 goto error;
173 }
174 } else {
175 uri = kuri;
176 }
177 } else if (kuri && uuri == nullptr) {
178 uri = kuri;
179 } else if (uuri && kuri == nullptr) {
180 uri = uuri;
181 }
182
183 ret = uri_to_str_url(uri, tmp_urls, sizeof(tmp_urls));
184 if (ret < 0) {
185 goto error;
186 }
187
188 /*
189 * Do we have a UST url set. If yes, this means we have both kernel and UST
190 * to print.
191 */
192 if (*tmp_uurl != '\0') {
193 ret = snprintf(dst,
194 size,
195 "[K]: %s [data: %d] -- [U]: %s [data: %d]",
196 tmp_urls,
197 kdata_port,
198 tmp_uurl,
199 udata_port);
200 } else {
201 int dport;
202 if (kuri || (!kuri && !uuri)) {
203 dport = kdata_port;
204 } else {
205 /* No kernel URI, use the UST port. */
206 dport = udata_port;
207 }
208 ret = snprintf(dst, size, "%s [data: %d]", tmp_urls, dport);
209 }
210
211 error:
212 return ret;
213 }
214
215 /*
216 * Get run-time attributes if the session has been started (discarded events,
217 * lost packets).
218 */
219 static int get_kernel_runtime_stats(const ltt_session::locked_ref& session,
220 struct ltt_kernel_channel *kchan,
221 uint64_t *discarded_events,
222 uint64_t *lost_packets)
223 {
224 int ret;
225
226 if (!session->has_been_started) {
227 ret = 0;
228 *discarded_events = 0;
229 *lost_packets = 0;
230 goto end;
231 }
232
233 ret = consumer_get_discarded_events(
234 session->id, kchan->key, session->kernel_session->consumer, discarded_events);
235 if (ret < 0) {
236 goto end;
237 }
238
239 ret = consumer_get_lost_packets(
240 session->id, kchan->key, session->kernel_session->consumer, lost_packets);
241 if (ret < 0) {
242 goto end;
243 }
244
245 end:
246 return ret;
247 }
248
249 /*
250 * Get run-time attributes if the session has been started (discarded events,
251 * lost packets).
252 */
253 static int get_ust_runtime_stats(const ltt_session::locked_ref& session,
254 struct ltt_ust_channel *uchan,
255 uint64_t *discarded_events,
256 uint64_t *lost_packets)
257 {
258 int ret;
259 struct ltt_ust_session *usess;
260
261 if (!discarded_events || !lost_packets) {
262 ret = -1;
263 goto end;
264 }
265
266 usess = session->ust_session;
267 LTTNG_ASSERT(discarded_events);
268 LTTNG_ASSERT(lost_packets);
269
270 if (!usess || !session->has_been_started) {
271 *discarded_events = 0;
272 *lost_packets = 0;
273 ret = 0;
274 goto end;
275 }
276
277 if (usess->buffer_type == LTTNG_BUFFER_PER_UID) {
278 ret = ust_app_uid_get_channel_runtime_stats(usess->id,
279 &usess->buffer_reg_uid_list,
280 usess->consumer,
281 uchan->id,
282 uchan->attr.overwrite,
283 discarded_events,
284 lost_packets);
285 } else if (usess->buffer_type == LTTNG_BUFFER_PER_PID) {
286 ret = ust_app_pid_get_channel_runtime_stats(usess,
287 uchan,
288 usess->consumer,
289 uchan->attr.overwrite,
290 discarded_events,
291 lost_packets);
292 if (ret < 0) {
293 goto end;
294 }
295 *discarded_events += uchan->per_pid_closed_app_discarded;
296 *lost_packets += uchan->per_pid_closed_app_lost;
297 } else {
298 ERR("Unsupported buffer type");
299 abort();
300 ret = -1;
301 goto end;
302 }
303
304 end:
305 return ret;
306 }
307
308 /*
309 * Create a list of agent domain events.
310 *
311 * Return number of events in list on success or else a negative value.
312 */
313 static enum lttng_error_code list_lttng_agent_events(struct agent *agt,
314 struct lttng_payload *reply_payload,
315 unsigned int *nb_events)
316 {
317 enum lttng_error_code ret_code;
318 int ret = 0;
319 unsigned int local_nb_events = 0;
320 struct agent_event *event;
321 struct lttng_ht_iter iter;
322 unsigned long agent_event_count;
323
324 assert(agt);
325 assert(reply_payload);
326
327 DBG3("Listing agent events");
328
329 agent_event_count = lttng_ht_get_count(agt->events);
330 if (agent_event_count == 0) {
331 /* Early exit. */
332 goto end;
333 }
334
335 if (agent_event_count > UINT_MAX) {
336 ret_code = LTTNG_ERR_OVERFLOW;
337 goto error;
338 }
339
340 local_nb_events = (unsigned int) agent_event_count;
341
342 {
343 const lttng::urcu::read_lock_guard read_lock;
344
345 cds_lfht_for_each_entry (agt->events->ht, &iter.iter, event, node.node) {
346 struct lttng_event *tmp_event = lttng_event_create();
347
348 if (!tmp_event) {
349 ret_code = LTTNG_ERR_NOMEM;
350 goto error;
351 }
352
353 if (lttng_strncpy(tmp_event->name, event->name, sizeof(tmp_event->name))) {
354 lttng_event_destroy(tmp_event);
355 ret_code = LTTNG_ERR_FATAL;
356 goto error;
357 }
358
359 tmp_event->name[sizeof(tmp_event->name) - 1] = '\0';
360 tmp_event->enabled = !!event->enabled_count;
361 tmp_event->loglevel = event->loglevel_value;
362 tmp_event->loglevel_type = event->loglevel_type;
363
364 ret = lttng_event_serialize(tmp_event,
365 0,
366 nullptr,
367 event->filter_expression,
368 0,
369 nullptr,
370 reply_payload);
371 lttng_event_destroy(tmp_event);
372 if (ret) {
373 ret_code = LTTNG_ERR_FATAL;
374 goto error;
375 }
376 }
377 }
378 end:
379 ret_code = LTTNG_OK;
380 *nb_events = local_nb_events;
381 error:
382 return ret_code;
383 }
384
385 /*
386 * Create a list of ust global domain events.
387 */
388 static enum lttng_error_code list_lttng_ust_global_events(char *channel_name,
389 struct ltt_ust_domain_global *ust_global,
390 struct lttng_payload *reply_payload,
391 unsigned int *nb_events)
392 {
393 enum lttng_error_code ret_code;
394 int ret;
395 struct lttng_ht_iter iter;
396 struct lttng_ht_node_str *node;
397 struct ltt_ust_channel *uchan;
398 struct ltt_ust_event *uevent;
399 unsigned long channel_event_count;
400 unsigned int local_nb_events = 0;
401
402 assert(reply_payload);
403 assert(nb_events);
404
405 DBG("Listing UST global events for channel %s", channel_name);
406
407 const lttng::urcu::read_lock_guard read_lock;
408
409 lttng_ht_lookup(ust_global->channels, (void *) channel_name, &iter);
410 node = lttng_ht_iter_get_node<lttng_ht_node_str>(&iter);
411 if (node == nullptr) {
412 ret_code = LTTNG_ERR_UST_CHAN_NOT_FOUND;
413 goto error;
414 }
415
416 uchan = caa_container_of(&node->node, struct ltt_ust_channel, node.node);
417
418 channel_event_count = lttng_ht_get_count(uchan->events);
419 if (channel_event_count == 0) {
420 /* Early exit. */
421 ret_code = LTTNG_OK;
422 goto end;
423 }
424
425 if (channel_event_count > UINT_MAX) {
426 ret_code = LTTNG_ERR_OVERFLOW;
427 goto error;
428 }
429
430 local_nb_events = (unsigned int) channel_event_count;
431
432 DBG3("Listing UST global %d events", *nb_events);
433
434 cds_lfht_for_each_entry (uchan->events->ht, &iter.iter, uevent, node.node) {
435 struct lttng_event *tmp_event = nullptr;
436
437 if (uevent->internal) {
438 /* This event should remain hidden from clients */
439 local_nb_events--;
440 continue;
441 }
442
443 tmp_event = lttng_event_create();
444 if (!tmp_event) {
445 ret_code = LTTNG_ERR_NOMEM;
446 goto error;
447 }
448
449 if (lttng_strncpy(tmp_event->name, uevent->attr.name, LTTNG_SYMBOL_NAME_LEN)) {
450 ret_code = LTTNG_ERR_FATAL;
451 lttng_event_destroy(tmp_event);
452 goto error;
453 }
454
455 tmp_event->name[LTTNG_SYMBOL_NAME_LEN - 1] = '\0';
456 tmp_event->enabled = uevent->enabled;
457
458 switch (uevent->attr.instrumentation) {
459 case LTTNG_UST_ABI_TRACEPOINT:
460 tmp_event->type = LTTNG_EVENT_TRACEPOINT;
461 break;
462 case LTTNG_UST_ABI_PROBE:
463 tmp_event->type = LTTNG_EVENT_PROBE;
464 break;
465 case LTTNG_UST_ABI_FUNCTION:
466 tmp_event->type = LTTNG_EVENT_FUNCTION;
467 break;
468 }
469
470 tmp_event->loglevel = uevent->attr.loglevel;
471 switch (uevent->attr.loglevel_type) {
472 case LTTNG_UST_ABI_LOGLEVEL_ALL:
473 tmp_event->loglevel_type = LTTNG_EVENT_LOGLEVEL_ALL;
474 break;
475 case LTTNG_UST_ABI_LOGLEVEL_RANGE:
476 tmp_event->loglevel_type = LTTNG_EVENT_LOGLEVEL_RANGE;
477 break;
478 case LTTNG_UST_ABI_LOGLEVEL_SINGLE:
479 tmp_event->loglevel_type = LTTNG_EVENT_LOGLEVEL_SINGLE;
480 break;
481 }
482 if (uevent->filter) {
483 tmp_event->filter = 1;
484 }
485 if (uevent->exclusion) {
486 tmp_event->exclusion = 1;
487 }
488
489 std::vector<const char *> exclusion_names;
490 if (uevent->exclusion) {
491 for (int i = 0; i < uevent->exclusion->count; i++) {
492 exclusion_names.emplace_back(
493 LTTNG_EVENT_EXCLUSION_NAME_AT(uevent->exclusion, i));
494 }
495 }
496
497 /*
498 * We do not care about the filter bytecode and the fd from the
499 * userspace_probe_location.
500 */
501 ret = lttng_event_serialize(tmp_event,
502 exclusion_names.size(),
503 exclusion_names.size() ? exclusion_names.data() :
504 nullptr,
505 uevent->filter_expression,
506 0,
507 nullptr,
508 reply_payload);
509 lttng_event_destroy(tmp_event);
510 if (ret) {
511 ret_code = LTTNG_ERR_FATAL;
512 goto error;
513 }
514 }
515
516 end:
517 /* nb_events is already set at this point. */
518 ret_code = LTTNG_OK;
519 *nb_events = local_nb_events;
520 error:
521 return ret_code;
522 }
523
524 /*
525 * Fill lttng_event array of all kernel events in the channel.
526 */
527 static enum lttng_error_code list_lttng_kernel_events(char *channel_name,
528 struct ltt_kernel_session *kernel_session,
529 struct lttng_payload *reply_payload,
530 unsigned int *nb_events)
531 {
532 enum lttng_error_code ret_code;
533 int ret;
534 struct ltt_kernel_event *event;
535 struct ltt_kernel_channel *kchan;
536
537 assert(reply_payload);
538
539 kchan = trace_kernel_get_channel_by_name(channel_name, kernel_session);
540 if (kchan == nullptr) {
541 ret_code = LTTNG_ERR_KERN_CHAN_NOT_FOUND;
542 goto end;
543 }
544
545 *nb_events = kchan->event_count;
546
547 DBG("Listing events for channel %s", kchan->channel->name);
548
549 if (*nb_events == 0) {
550 ret_code = LTTNG_OK;
551 goto end;
552 }
553
554 /* Kernel channels */
555 cds_list_for_each_entry (event, &kchan->events_list.head, list) {
556 struct lttng_event *tmp_event = lttng_event_create();
557
558 if (!tmp_event) {
559 ret_code = LTTNG_ERR_NOMEM;
560 goto end;
561 }
562
563 if (lttng_strncpy(tmp_event->name, event->event->name, LTTNG_SYMBOL_NAME_LEN)) {
564 lttng_event_destroy(tmp_event);
565 ret_code = LTTNG_ERR_FATAL;
566 goto end;
567 }
568
569 tmp_event->name[LTTNG_SYMBOL_NAME_LEN - 1] = '\0';
570 tmp_event->enabled = event->enabled;
571 tmp_event->filter = (unsigned char) !!event->filter_expression;
572
573 switch (event->event->instrumentation) {
574 case LTTNG_KERNEL_ABI_TRACEPOINT:
575 tmp_event->type = LTTNG_EVENT_TRACEPOINT;
576 break;
577 case LTTNG_KERNEL_ABI_KRETPROBE:
578 tmp_event->type = LTTNG_EVENT_FUNCTION;
579 memcpy(&tmp_event->attr.probe,
580 &event->event->u.kprobe,
581 sizeof(struct lttng_kernel_abi_kprobe));
582 break;
583 case LTTNG_KERNEL_ABI_KPROBE:
584 tmp_event->type = LTTNG_EVENT_PROBE;
585 memcpy(&tmp_event->attr.probe,
586 &event->event->u.kprobe,
587 sizeof(struct lttng_kernel_abi_kprobe));
588 break;
589 case LTTNG_KERNEL_ABI_UPROBE:
590 tmp_event->type = LTTNG_EVENT_USERSPACE_PROBE;
591 break;
592 case LTTNG_KERNEL_ABI_FUNCTION:
593 tmp_event->type = LTTNG_EVENT_FUNCTION;
594 memcpy(&(tmp_event->attr.ftrace),
595 &event->event->u.ftrace,
596 sizeof(struct lttng_kernel_abi_function));
597 break;
598 case LTTNG_KERNEL_ABI_NOOP:
599 tmp_event->type = LTTNG_EVENT_NOOP;
600 break;
601 case LTTNG_KERNEL_ABI_SYSCALL:
602 tmp_event->type = LTTNG_EVENT_SYSCALL;
603 break;
604 case LTTNG_KERNEL_ABI_ALL:
605 /* fall-through. */
606 default:
607 abort();
608 break;
609 }
610
611 if (event->userspace_probe_location) {
612 struct lttng_userspace_probe_location *location_copy =
613 lttng_userspace_probe_location_copy(
614 event->userspace_probe_location);
615
616 if (!location_copy) {
617 lttng_event_destroy(tmp_event);
618 ret_code = LTTNG_ERR_NOMEM;
619 goto end;
620 }
621
622 ret = lttng_event_set_userspace_probe_location(tmp_event, location_copy);
623 if (ret) {
624 lttng_event_destroy(tmp_event);
625 lttng_userspace_probe_location_destroy(location_copy);
626 ret_code = LTTNG_ERR_INVALID;
627 goto end;
628 }
629 }
630
631 ret = lttng_event_serialize(
632 tmp_event, 0, nullptr, event->filter_expression, 0, nullptr, reply_payload);
633 lttng_event_destroy(tmp_event);
634 if (ret) {
635 ret_code = LTTNG_ERR_FATAL;
636 goto end;
637 }
638 }
639
640 ret_code = LTTNG_OK;
641 end:
642 return ret_code;
643 }
644
645 /*
646 * Add URI so the consumer output object. Set the correct path depending on the
647 * domain adding the default trace directory.
648 */
649 static enum lttng_error_code add_uri_to_consumer(const ltt_session::locked_ref& session,
650 struct consumer_output *consumer,
651 struct lttng_uri *uri,
652 enum lttng_domain_type domain)
653 {
654 int ret;
655 enum lttng_error_code ret_code = LTTNG_OK;
656
657 LTTNG_ASSERT(uri);
658
659 if (consumer == nullptr) {
660 DBG("No consumer detected. Don't add URI. Stopping.");
661 ret_code = LTTNG_ERR_NO_CONSUMER;
662 goto error;
663 }
664
665 switch (domain) {
666 case LTTNG_DOMAIN_KERNEL:
667 ret = lttng_strncpy(consumer->domain_subdir,
668 DEFAULT_KERNEL_TRACE_DIR,
669 sizeof(consumer->domain_subdir));
670 break;
671 case LTTNG_DOMAIN_UST:
672 ret = lttng_strncpy(consumer->domain_subdir,
673 DEFAULT_UST_TRACE_DIR,
674 sizeof(consumer->domain_subdir));
675 break;
676 default:
677 /*
678 * This case is possible is we try to add the URI to the global
679 * tracing session consumer object which in this case there is
680 * no subdir.
681 */
682 memset(consumer->domain_subdir, 0, sizeof(consumer->domain_subdir));
683 ret = 0;
684 }
685 if (ret) {
686 ERR("Failed to initialize consumer output domain subdirectory");
687 ret_code = LTTNG_ERR_FATAL;
688 goto error;
689 }
690
691 switch (uri->dtype) {
692 case LTTNG_DST_IPV4:
693 case LTTNG_DST_IPV6:
694 DBG2("Setting network URI to consumer");
695
696 if (consumer->type == CONSUMER_DST_NET) {
697 if ((uri->stype == LTTNG_STREAM_CONTROL &&
698 consumer->dst.net.control_isset) ||
699 (uri->stype == LTTNG_STREAM_DATA && consumer->dst.net.data_isset)) {
700 ret_code = LTTNG_ERR_URL_EXIST;
701 goto error;
702 }
703 } else {
704 memset(&consumer->dst, 0, sizeof(consumer->dst));
705 }
706
707 /* Set URI into consumer output object */
708 ret = consumer_set_network_uri(session, consumer, uri);
709 if (ret < 0) {
710 ret_code = (lttng_error_code) -ret;
711 goto error;
712 } else if (ret == 1) {
713 /*
714 * URI was the same in the consumer so we do not append the subdir
715 * again so to not duplicate output dir.
716 */
717 ret_code = LTTNG_OK;
718 goto error;
719 }
720 break;
721 case LTTNG_DST_PATH:
722 if (*uri->dst.path != '/' || strstr(uri->dst.path, "../")) {
723 ret_code = LTTNG_ERR_INVALID;
724 goto error;
725 }
726 DBG2("Setting trace directory path from URI to %s", uri->dst.path);
727 memset(&consumer->dst, 0, sizeof(consumer->dst));
728
729 ret = lttng_strncpy(consumer->dst.session_root_path,
730 uri->dst.path,
731 sizeof(consumer->dst.session_root_path));
732 if (ret) {
733 ret_code = LTTNG_ERR_FATAL;
734 goto error;
735 }
736 consumer->type = CONSUMER_DST_LOCAL;
737 break;
738 }
739
740 ret_code = LTTNG_OK;
741 error:
742 return ret_code;
743 }
744
745 /*
746 * Init tracing by creating trace directory and sending fds kernel consumer.
747 */
748 static int init_kernel_tracing(struct ltt_kernel_session *session)
749 {
750 int ret = 0;
751 struct lttng_ht_iter iter;
752 struct consumer_socket *socket;
753
754 LTTNG_ASSERT(session);
755
756 if (session->consumer_fds_sent == 0 && session->consumer != nullptr) {
757 const lttng::urcu::read_lock_guard read_lock;
758
759 cds_lfht_for_each_entry (
760 session->consumer->socks->ht, &iter.iter, socket, node.node) {
761 pthread_mutex_lock(socket->lock);
762 ret = kernel_consumer_send_session(socket, session);
763 pthread_mutex_unlock(socket->lock);
764 if (ret < 0) {
765 ret = LTTNG_ERR_KERN_CONSUMER_FAIL;
766 goto error;
767 }
768 }
769 }
770
771 error:
772 return ret;
773 }
774
775 /*
776 * Create a socket to the relayd using the URI.
777 *
778 * On success, the relayd_sock pointer is set to the created socket.
779 * Else, it remains untouched and an LTTng error code is returned.
780 */
781 static enum lttng_error_code create_connect_relayd(struct lttng_uri *uri,
782 struct lttcomm_relayd_sock **relayd_sock,
783 struct consumer_output *consumer)
784 {
785 int ret;
786 enum lttng_error_code status = LTTNG_OK;
787 struct lttcomm_relayd_sock *rsock;
788
789 rsock = lttcomm_alloc_relayd_sock(
790 uri, RELAYD_VERSION_COMM_MAJOR, RELAYD_VERSION_COMM_MINOR);
791 if (!rsock) {
792 status = LTTNG_ERR_FATAL;
793 goto error;
794 }
795
796 /*
797 * Connect to relayd so we can proceed with a session creation. This call
798 * can possibly block for an arbitrary amount of time to set the health
799 * state to be in poll execution.
800 */
801 health_poll_entry();
802 ret = relayd_connect(rsock);
803 health_poll_exit();
804 if (ret < 0) {
805 ERR("Unable to reach lttng-relayd");
806 status = LTTNG_ERR_RELAYD_CONNECT_FAIL;
807 goto free_sock;
808 }
809
810 /* Create socket for control stream. */
811 if (uri->stype == LTTNG_STREAM_CONTROL) {
812 uint64_t result_flags;
813
814 DBG3("Creating relayd stream socket from URI");
815
816 /* Check relayd version */
817 ret = relayd_version_check(rsock);
818 if (ret == LTTNG_ERR_RELAYD_VERSION_FAIL) {
819 status = LTTNG_ERR_RELAYD_VERSION_FAIL;
820 goto close_sock;
821 } else if (ret < 0) {
822 ERR("Unable to reach lttng-relayd");
823 status = LTTNG_ERR_RELAYD_CONNECT_FAIL;
824 goto close_sock;
825 }
826 consumer->relay_major_version = rsock->major;
827 consumer->relay_minor_version = rsock->minor;
828 ret = relayd_get_configuration(rsock, 0, &result_flags);
829 if (ret < 0) {
830 ERR("Unable to get relayd configuration");
831 status = LTTNG_ERR_RELAYD_CONNECT_FAIL;
832 goto close_sock;
833 }
834 if (result_flags & LTTCOMM_RELAYD_CONFIGURATION_FLAG_CLEAR_ALLOWED) {
835 consumer->relay_allows_clear = true;
836 }
837 } else if (uri->stype == LTTNG_STREAM_DATA) {
838 DBG3("Creating relayd data socket from URI");
839 } else {
840 /* Command is not valid */
841 ERR("Relayd invalid stream type: %d", uri->stype);
842 status = LTTNG_ERR_INVALID;
843 goto close_sock;
844 }
845
846 *relayd_sock = rsock;
847
848 return status;
849
850 close_sock:
851 /* The returned value is not useful since we are on an error path. */
852 (void) relayd_close(rsock);
853 free_sock:
854 free(rsock);
855 error:
856 return status;
857 }
858
859 /*
860 * Connect to the relayd using URI and send the socket to the right consumer.
861 *
862 * The consumer socket lock must be held by the caller.
863 *
864 * Returns LTTNG_OK on success or an LTTng error code on failure.
865 */
866 static enum lttng_error_code send_consumer_relayd_socket(unsigned int session_id,
867 struct lttng_uri *relayd_uri,
868 struct consumer_output *consumer,
869 struct consumer_socket *consumer_sock,
870 const char *session_name,
871 const char *hostname,
872 const char *base_path,
873 int session_live_timer,
874 const uint64_t *current_chunk_id,
875 time_t session_creation_time,
876 bool session_name_contains_creation_time)
877 {
878 int ret;
879 struct lttcomm_relayd_sock *rsock = nullptr;
880 enum lttng_error_code status;
881
882 /* Connect to relayd and make version check if uri is the control. */
883 status = create_connect_relayd(relayd_uri, &rsock, consumer);
884 if (status != LTTNG_OK) {
885 goto relayd_comm_error;
886 }
887 LTTNG_ASSERT(rsock);
888
889 /* Set the network sequence index if not set. */
890 if (consumer->net_seq_index == (uint64_t) -1ULL) {
891 pthread_mutex_lock(&relayd_net_seq_idx_lock);
892 /*
893 * Increment net_seq_idx because we are about to transfer the
894 * new relayd socket to the consumer.
895 * Assign unique key so the consumer can match streams.
896 */
897 consumer->net_seq_index = ++relayd_net_seq_idx;
898 pthread_mutex_unlock(&relayd_net_seq_idx_lock);
899 }
900
901 /* Send relayd socket to consumer. */
902 ret = consumer_send_relayd_socket(consumer_sock,
903 rsock,
904 consumer,
905 relayd_uri->stype,
906 session_id,
907 session_name,
908 hostname,
909 base_path,
910 session_live_timer,
911 current_chunk_id,
912 session_creation_time,
913 session_name_contains_creation_time);
914 if (ret < 0) {
915 status = LTTNG_ERR_ENABLE_CONSUMER_FAIL;
916 goto close_sock;
917 }
918
919 /* Flag that the corresponding socket was sent. */
920 if (relayd_uri->stype == LTTNG_STREAM_CONTROL) {
921 consumer_sock->control_sock_sent = 1;
922 } else if (relayd_uri->stype == LTTNG_STREAM_DATA) {
923 consumer_sock->data_sock_sent = 1;
924 }
925
926 /*
927 * Close socket which was dup on the consumer side. The session daemon does
928 * NOT keep track of the relayd socket(s) once transfer to the consumer.
929 */
930
931 close_sock:
932 if (status != LTTNG_OK) {
933 /*
934 * The consumer output for this session should not be used anymore
935 * since the relayd connection failed thus making any tracing or/and
936 * streaming not usable.
937 */
938 consumer->enabled = false;
939 }
940 (void) relayd_close(rsock);
941 free(rsock);
942
943 relayd_comm_error:
944 return status;
945 }
946
947 /*
948 * Send both relayd sockets to a specific consumer and domain. This is a
949 * helper function to facilitate sending the information to the consumer for a
950 * session.
951 *
952 * The consumer socket lock must be held by the caller.
953 *
954 * Returns LTTNG_OK, or an LTTng error code on failure.
955 */
956 static enum lttng_error_code send_consumer_relayd_sockets(unsigned int session_id,
957 struct consumer_output *consumer,
958 struct consumer_socket *sock,
959 const char *session_name,
960 const char *hostname,
961 const char *base_path,
962 int session_live_timer,
963 const uint64_t *current_chunk_id,
964 time_t session_creation_time,
965 bool session_name_contains_creation_time)
966 {
967 enum lttng_error_code status = LTTNG_OK;
968
969 LTTNG_ASSERT(consumer);
970 LTTNG_ASSERT(sock);
971
972 /* Sending control relayd socket. */
973 if (!sock->control_sock_sent) {
974 status = send_consumer_relayd_socket(session_id,
975 &consumer->dst.net.control,
976 consumer,
977 sock,
978 session_name,
979 hostname,
980 base_path,
981 session_live_timer,
982 current_chunk_id,
983 session_creation_time,
984 session_name_contains_creation_time);
985 if (status != LTTNG_OK) {
986 goto error;
987 }
988 }
989
990 /* Sending data relayd socket. */
991 if (!sock->data_sock_sent) {
992 status = send_consumer_relayd_socket(session_id,
993 &consumer->dst.net.data,
994 consumer,
995 sock,
996 session_name,
997 hostname,
998 base_path,
999 session_live_timer,
1000 current_chunk_id,
1001 session_creation_time,
1002 session_name_contains_creation_time);
1003 if (status != LTTNG_OK) {
1004 goto error;
1005 }
1006 }
1007
1008 error:
1009 return status;
1010 }
1011
1012 /*
1013 * Setup relayd connections for a tracing session. First creates the socket to
1014 * the relayd and send them to the right domain consumer. Consumer type MUST be
1015 * network.
1016 */
1017 int cmd_setup_relayd(const ltt_session::locked_ref& session)
1018 {
1019 int ret = LTTNG_OK;
1020 struct ltt_ust_session *usess;
1021 struct ltt_kernel_session *ksess;
1022 struct consumer_socket *socket;
1023 struct lttng_ht_iter iter;
1024 LTTNG_OPTIONAL(uint64_t) current_chunk_id = {};
1025
1026 usess = session->ust_session;
1027 ksess = session->kernel_session;
1028
1029 DBG("Setting relayd for session %s", session->name);
1030
1031 if (session->current_trace_chunk) {
1032 const lttng_trace_chunk_status status = lttng_trace_chunk_get_id(
1033 session->current_trace_chunk, &current_chunk_id.value);
1034
1035 if (status == LTTNG_TRACE_CHUNK_STATUS_OK) {
1036 current_chunk_id.is_set = true;
1037 } else {
1038 ERR("Failed to get current trace chunk id");
1039 ret = LTTNG_ERR_UNK;
1040 goto error;
1041 }
1042 }
1043
1044 if (usess && usess->consumer && usess->consumer->type == CONSUMER_DST_NET &&
1045 usess->consumer->enabled) {
1046 /* For each consumer socket, send relayd sockets */
1047 const lttng::urcu::read_lock_guard read_lock;
1048
1049 cds_lfht_for_each_entry (
1050 usess->consumer->socks->ht, &iter.iter, socket, node.node) {
1051 pthread_mutex_lock(socket->lock);
1052 ret = send_consumer_relayd_sockets(
1053 session->id,
1054 usess->consumer,
1055 socket,
1056 session->name,
1057 session->hostname,
1058 session->base_path,
1059 session->live_timer,
1060 current_chunk_id.is_set ? &current_chunk_id.value : nullptr,
1061 session->creation_time,
1062 session->name_contains_creation_time);
1063 pthread_mutex_unlock(socket->lock);
1064 if (ret != LTTNG_OK) {
1065 goto error;
1066 }
1067 /* Session is now ready for network streaming. */
1068 session->net_handle = 1;
1069 }
1070
1071 session->consumer->relay_major_version = usess->consumer->relay_major_version;
1072 session->consumer->relay_minor_version = usess->consumer->relay_minor_version;
1073 session->consumer->relay_allows_clear = usess->consumer->relay_allows_clear;
1074 }
1075
1076 if (ksess && ksess->consumer && ksess->consumer->type == CONSUMER_DST_NET &&
1077 ksess->consumer->enabled) {
1078 const lttng::urcu::read_lock_guard read_lock;
1079
1080 cds_lfht_for_each_entry (
1081 ksess->consumer->socks->ht, &iter.iter, socket, node.node) {
1082 pthread_mutex_lock(socket->lock);
1083 ret = send_consumer_relayd_sockets(
1084 session->id,
1085 ksess->consumer,
1086 socket,
1087 session->name,
1088 session->hostname,
1089 session->base_path,
1090 session->live_timer,
1091 current_chunk_id.is_set ? &current_chunk_id.value : nullptr,
1092 session->creation_time,
1093 session->name_contains_creation_time);
1094 pthread_mutex_unlock(socket->lock);
1095 if (ret != LTTNG_OK) {
1096 goto error;
1097 }
1098 /* Session is now ready for network streaming. */
1099 session->net_handle = 1;
1100 }
1101
1102 session->consumer->relay_major_version = ksess->consumer->relay_major_version;
1103 session->consumer->relay_minor_version = ksess->consumer->relay_minor_version;
1104 session->consumer->relay_allows_clear = ksess->consumer->relay_allows_clear;
1105 }
1106
1107 error:
1108 return ret;
1109 }
1110
1111 /*
1112 * Start a kernel session by opening all necessary streams.
1113 */
1114 int start_kernel_session(struct ltt_kernel_session *ksess)
1115 {
1116 int ret;
1117 struct ltt_kernel_channel *kchan;
1118
1119 /* Open kernel metadata */
1120 if (ksess->metadata == nullptr && ksess->output_traces) {
1121 ret = kernel_open_metadata(ksess);
1122 if (ret < 0) {
1123 ret = LTTNG_ERR_KERN_META_FAIL;
1124 goto error;
1125 }
1126 }
1127
1128 /* Open kernel metadata stream */
1129 if (ksess->metadata && ksess->metadata_stream_fd < 0) {
1130 ret = kernel_open_metadata_stream(ksess);
1131 if (ret < 0) {
1132 ERR("Kernel create metadata stream failed");
1133 ret = LTTNG_ERR_KERN_STREAM_FAIL;
1134 goto error;
1135 }
1136 }
1137
1138 /* For each channel */
1139 cds_list_for_each_entry (kchan, &ksess->channel_list.head, list) {
1140 if (kchan->stream_count == 0) {
1141 ret = kernel_open_channel_stream(kchan);
1142 if (ret < 0) {
1143 ret = LTTNG_ERR_KERN_STREAM_FAIL;
1144 goto error;
1145 }
1146 /* Update the stream global counter */
1147 ksess->stream_count_global += ret;
1148 }
1149 }
1150
1151 /* Setup kernel consumer socket and send fds to it */
1152 ret = init_kernel_tracing(ksess);
1153 if (ret != 0) {
1154 ret = LTTNG_ERR_KERN_START_FAIL;
1155 goto error;
1156 }
1157
1158 /* This start the kernel tracing */
1159 ret = kernel_start_session(ksess);
1160 if (ret < 0) {
1161 ret = LTTNG_ERR_KERN_START_FAIL;
1162 goto error;
1163 }
1164
1165 /* Quiescent wait after starting trace */
1166 kernel_wait_quiescent();
1167
1168 ksess->active = true;
1169
1170 ret = LTTNG_OK;
1171
1172 error:
1173 return ret;
1174 }
1175
1176 int stop_kernel_session(struct ltt_kernel_session *ksess)
1177 {
1178 struct ltt_kernel_channel *kchan;
1179 bool error_occurred = false;
1180 int ret;
1181
1182 if (!ksess || !ksess->active) {
1183 return LTTNG_OK;
1184 }
1185 DBG("Stopping kernel tracing");
1186
1187 ret = kernel_stop_session(ksess);
1188 if (ret < 0) {
1189 ret = LTTNG_ERR_KERN_STOP_FAIL;
1190 goto error;
1191 }
1192
1193 kernel_wait_quiescent();
1194
1195 /* Flush metadata after stopping (if exists) */
1196 if (ksess->metadata_stream_fd >= 0) {
1197 ret = kernel_metadata_flush_buffer(ksess->metadata_stream_fd);
1198 if (ret < 0) {
1199 ERR("Kernel metadata flush failed");
1200 error_occurred = true;
1201 }
1202 }
1203
1204 /* Flush all buffers after stopping */
1205 cds_list_for_each_entry (kchan, &ksess->channel_list.head, list) {
1206 ret = kernel_flush_buffer(kchan);
1207 if (ret < 0) {
1208 ERR("Kernel flush buffer error");
1209 error_occurred = true;
1210 }
1211 }
1212
1213 ksess->active = false;
1214 if (error_occurred) {
1215 ret = LTTNG_ERR_UNK;
1216 } else {
1217 ret = LTTNG_OK;
1218 }
1219 error:
1220 return ret;
1221 }
1222
1223 /*
1224 * Command LTTNG_DISABLE_CHANNEL processed by the client thread.
1225 */
1226 int cmd_disable_channel(const ltt_session::locked_ref& session,
1227 enum lttng_domain_type domain,
1228 char *channel_name)
1229 {
1230 int ret;
1231 struct ltt_ust_session *usess;
1232
1233 usess = session->ust_session;
1234
1235 const lttng::urcu::read_lock_guard read_lock;
1236
1237 switch (domain) {
1238 case LTTNG_DOMAIN_KERNEL:
1239 {
1240 ret = channel_kernel_disable(session->kernel_session, channel_name);
1241 if (ret != LTTNG_OK) {
1242 goto error;
1243 }
1244
1245 kernel_wait_quiescent();
1246 break;
1247 }
1248 case LTTNG_DOMAIN_UST:
1249 {
1250 struct ltt_ust_channel *uchan;
1251 struct lttng_ht *chan_ht;
1252
1253 chan_ht = usess->domain_global.channels;
1254
1255 uchan = trace_ust_find_channel_by_name(chan_ht, channel_name);
1256 if (uchan == nullptr) {
1257 ret = LTTNG_ERR_UST_CHAN_NOT_FOUND;
1258 goto error;
1259 }
1260
1261 ret = channel_ust_disable(usess, uchan);
1262 if (ret != LTTNG_OK) {
1263 goto error;
1264 }
1265 break;
1266 }
1267 default:
1268 ret = LTTNG_ERR_UNKNOWN_DOMAIN;
1269 goto error;
1270 }
1271
1272 ret = LTTNG_OK;
1273
1274 error:
1275 return ret;
1276 }
1277
1278 /*
1279 * Command LTTNG_ENABLE_CHANNEL processed by the client thread.
1280 *
1281 * The wpipe arguments is used as a notifier for the kernel thread.
1282 */
1283 int cmd_enable_channel(command_ctx *cmd_ctx, ltt_session::locked_ref& session, int sock, int wpipe)
1284 {
1285 int ret;
1286 size_t channel_len;
1287 ssize_t sock_recv_len;
1288 struct lttng_channel *channel = nullptr;
1289 struct lttng_buffer_view view;
1290 struct lttng_dynamic_buffer channel_buffer;
1291 const struct lttng_domain command_domain = cmd_ctx->lsm.domain;
1292
1293 lttng_dynamic_buffer_init(&channel_buffer);
1294 channel_len = (size_t) cmd_ctx->lsm.u.channel.length;
1295 ret = lttng_dynamic_buffer_set_size(&channel_buffer, channel_len);
1296 if (ret) {
1297 ret = LTTNG_ERR_NOMEM;
1298 goto end;
1299 }
1300
1301 sock_recv_len = lttcomm_recv_unix_sock(sock, channel_buffer.data, channel_len);
1302 if (sock_recv_len < 0 || sock_recv_len != channel_len) {
1303 ERR("Failed to receive \"enable channel\" command payload");
1304 ret = LTTNG_ERR_INVALID;
1305 goto end;
1306 }
1307
1308 view = lttng_buffer_view_from_dynamic_buffer(&channel_buffer, 0, channel_len);
1309 if (!lttng_buffer_view_is_valid(&view)) {
1310 ret = LTTNG_ERR_INVALID;
1311 goto end;
1312 }
1313
1314 if (lttng_channel_create_from_buffer(&view, &channel) != channel_len) {
1315 ERR("Invalid channel payload received in \"enable channel\" command");
1316 ret = LTTNG_ERR_INVALID;
1317 goto end;
1318 }
1319
1320 ret = cmd_enable_channel_internal(session, &command_domain, channel, wpipe);
1321
1322 end:
1323 lttng_dynamic_buffer_reset(&channel_buffer);
1324 lttng_channel_destroy(channel);
1325 return ret;
1326 }
1327
1328 static enum lttng_error_code cmd_enable_channel_internal(ltt_session::locked_ref& session,
1329 const struct lttng_domain *domain,
1330 const struct lttng_channel *_attr,
1331 int wpipe)
1332 {
1333 enum lttng_error_code ret_code;
1334 struct ltt_ust_session *usess = session->ust_session;
1335 struct lttng_ht *chan_ht;
1336 size_t len;
1337 struct lttng_channel *attr = nullptr;
1338
1339 LTTNG_ASSERT(_attr);
1340 LTTNG_ASSERT(domain);
1341
1342 const lttng::urcu::read_lock_guard read_lock;
1343
1344 attr = lttng_channel_copy(_attr);
1345 if (!attr) {
1346 ret_code = LTTNG_ERR_NOMEM;
1347 goto end;
1348 }
1349
1350 len = lttng_strnlen(attr->name, sizeof(attr->name));
1351
1352 /* Validate channel name */
1353 if (attr->name[0] == '.' || memchr(attr->name, '/', len) != nullptr) {
1354 ret_code = LTTNG_ERR_INVALID_CHANNEL_NAME;
1355 goto end;
1356 }
1357
1358 DBG("Enabling channel %s for session %s", attr->name, session->name);
1359
1360 /*
1361 * If the session is a live session, remove the switch timer, the
1362 * live timer does the same thing but sends also synchronisation
1363 * beacons for inactive streams.
1364 */
1365 if (session->live_timer > 0) {
1366 attr->attr.live_timer_interval = session->live_timer;
1367 attr->attr.switch_timer_interval = 0;
1368 }
1369
1370 /* Check for feature support */
1371 switch (domain->type) {
1372 case LTTNG_DOMAIN_KERNEL:
1373 {
1374 if (kernel_supports_ring_buffer_snapshot_sample_positions() != 1) {
1375 /* Sampling position of buffer is not supported */
1376 WARN("Kernel tracer does not support buffer monitoring. "
1377 "Setting the monitor interval timer to 0 "
1378 "(disabled) for channel '%s' of session '%s'",
1379 attr->name,
1380 session->name);
1381 lttng_channel_set_monitor_timer_interval(attr, 0);
1382 }
1383 break;
1384 }
1385 case LTTNG_DOMAIN_UST:
1386 break;
1387 case LTTNG_DOMAIN_JUL:
1388 case LTTNG_DOMAIN_LOG4J:
1389 case LTTNG_DOMAIN_PYTHON:
1390 if (!agent_tracing_is_enabled()) {
1391 DBG("Attempted to enable a channel in an agent domain but the agent thread is not running");
1392 ret_code = LTTNG_ERR_AGENT_TRACING_DISABLED;
1393 goto error;
1394 }
1395 break;
1396 default:
1397 ret_code = LTTNG_ERR_UNKNOWN_DOMAIN;
1398 goto error;
1399 }
1400
1401 switch (domain->type) {
1402 case LTTNG_DOMAIN_KERNEL:
1403 {
1404 struct ltt_kernel_channel *kchan;
1405
1406 kchan = trace_kernel_get_channel_by_name(attr->name, session->kernel_session);
1407 if (kchan == nullptr) {
1408 /*
1409 * Don't try to create a channel if the session has been started at
1410 * some point in time before. The tracer does not allow it.
1411 */
1412 if (session->has_been_started) {
1413 ret_code = LTTNG_ERR_TRACE_ALREADY_STARTED;
1414 goto error;
1415 }
1416
1417 if (session->snapshot.nb_output > 0 || session->snapshot_mode) {
1418 /* Enforce mmap output for snapshot sessions. */
1419 attr->attr.output = LTTNG_EVENT_MMAP;
1420 }
1421 ret_code = channel_kernel_create(session->kernel_session, attr, wpipe);
1422 if (attr->name[0] != '\0') {
1423 session->kernel_session->has_non_default_channel = 1;
1424 }
1425 } else {
1426 ret_code = channel_kernel_enable(session->kernel_session, kchan);
1427 }
1428
1429 if (ret_code != LTTNG_OK) {
1430 goto error;
1431 }
1432
1433 kernel_wait_quiescent();
1434 break;
1435 }
1436 case LTTNG_DOMAIN_UST:
1437 case LTTNG_DOMAIN_JUL:
1438 case LTTNG_DOMAIN_LOG4J:
1439 case LTTNG_DOMAIN_PYTHON:
1440 {
1441 struct ltt_ust_channel *uchan;
1442
1443 /*
1444 * FIXME
1445 *
1446 * Current agent implementation limitations force us to allow
1447 * only one channel at once in "agent" subdomains. Each
1448 * subdomain has a default channel name which must be strictly
1449 * adhered to.
1450 */
1451 if (domain->type == LTTNG_DOMAIN_JUL) {
1452 if (strncmp(attr->name,
1453 DEFAULT_JUL_CHANNEL_NAME,
1454 LTTNG_SYMBOL_NAME_LEN - 1) != 0) {
1455 ret_code = LTTNG_ERR_INVALID_CHANNEL_NAME;
1456 goto error;
1457 }
1458 } else if (domain->type == LTTNG_DOMAIN_LOG4J) {
1459 if (strncmp(attr->name,
1460 DEFAULT_LOG4J_CHANNEL_NAME,
1461 LTTNG_SYMBOL_NAME_LEN - 1) != 0) {
1462 ret_code = LTTNG_ERR_INVALID_CHANNEL_NAME;
1463 goto error;
1464 }
1465 } else if (domain->type == LTTNG_DOMAIN_PYTHON) {
1466 if (strncmp(attr->name,
1467 DEFAULT_PYTHON_CHANNEL_NAME,
1468 LTTNG_SYMBOL_NAME_LEN - 1) != 0) {
1469 ret_code = LTTNG_ERR_INVALID_CHANNEL_NAME;
1470 goto error;
1471 }
1472 }
1473
1474 chan_ht = usess->domain_global.channels;
1475
1476 uchan = trace_ust_find_channel_by_name(chan_ht, attr->name);
1477 if (uchan == nullptr) {
1478 /*
1479 * Don't try to create a channel if the session has been started at
1480 * some point in time before. The tracer does not allow it.
1481 */
1482 if (session->has_been_started) {
1483 ret_code = LTTNG_ERR_TRACE_ALREADY_STARTED;
1484 goto error;
1485 }
1486
1487 ret_code = channel_ust_create(usess, attr, domain->buf_type);
1488 if (attr->name[0] != '\0') {
1489 usess->has_non_default_channel = 1;
1490 }
1491 } else {
1492 ret_code = channel_ust_enable(usess, uchan);
1493 }
1494 break;
1495 }
1496 default:
1497 ret_code = LTTNG_ERR_UNKNOWN_DOMAIN;
1498 goto error;
1499 }
1500
1501 if (ret_code == LTTNG_OK && attr->attr.output != LTTNG_EVENT_MMAP) {
1502 session->has_non_mmap_channel = true;
1503 }
1504 error:
1505 end:
1506 lttng_channel_destroy(attr);
1507 return ret_code;
1508 }
1509
1510 enum lttng_error_code
1511 cmd_process_attr_tracker_get_tracking_policy(const ltt_session::locked_ref& session,
1512 enum lttng_domain_type domain,
1513 enum lttng_process_attr process_attr,
1514 enum lttng_tracking_policy *policy)
1515 {
1516 enum lttng_error_code ret_code = LTTNG_OK;
1517 const struct process_attr_tracker *tracker;
1518
1519 switch (domain) {
1520 case LTTNG_DOMAIN_KERNEL:
1521 if (!session->kernel_session) {
1522 ret_code = LTTNG_ERR_INVALID;
1523 goto end;
1524 }
1525 tracker = kernel_get_process_attr_tracker(session->kernel_session, process_attr);
1526 break;
1527 case LTTNG_DOMAIN_UST:
1528 if (!session->ust_session) {
1529 ret_code = LTTNG_ERR_INVALID;
1530 goto end;
1531 }
1532 tracker = trace_ust_get_process_attr_tracker(session->ust_session, process_attr);
1533 break;
1534 default:
1535 ret_code = LTTNG_ERR_UNSUPPORTED_DOMAIN;
1536 goto end;
1537 }
1538 if (tracker) {
1539 *policy = process_attr_tracker_get_tracking_policy(tracker);
1540 } else {
1541 ret_code = LTTNG_ERR_INVALID;
1542 }
1543 end:
1544 return ret_code;
1545 }
1546
1547 enum lttng_error_code
1548 cmd_process_attr_tracker_set_tracking_policy(const ltt_session::locked_ref& session,
1549 enum lttng_domain_type domain,
1550 enum lttng_process_attr process_attr,
1551 enum lttng_tracking_policy policy)
1552 {
1553 enum lttng_error_code ret_code = LTTNG_OK;
1554
1555 switch (policy) {
1556 case LTTNG_TRACKING_POLICY_INCLUDE_SET:
1557 case LTTNG_TRACKING_POLICY_EXCLUDE_ALL:
1558 case LTTNG_TRACKING_POLICY_INCLUDE_ALL:
1559 break;
1560 default:
1561 ret_code = LTTNG_ERR_INVALID;
1562 goto end;
1563 }
1564
1565 switch (domain) {
1566 case LTTNG_DOMAIN_KERNEL:
1567 if (!session->kernel_session) {
1568 ret_code = LTTNG_ERR_INVALID;
1569 goto end;
1570 }
1571 ret_code = kernel_process_attr_tracker_set_tracking_policy(
1572 session->kernel_session, process_attr, policy);
1573 break;
1574 case LTTNG_DOMAIN_UST:
1575 if (!session->ust_session) {
1576 ret_code = LTTNG_ERR_INVALID;
1577 goto end;
1578 }
1579 ret_code = trace_ust_process_attr_tracker_set_tracking_policy(
1580 session->ust_session, process_attr, policy);
1581 break;
1582 default:
1583 ret_code = LTTNG_ERR_UNSUPPORTED_DOMAIN;
1584 break;
1585 }
1586 end:
1587 return ret_code;
1588 }
1589
1590 enum lttng_error_code
1591 cmd_process_attr_tracker_inclusion_set_add_value(const ltt_session::locked_ref& session,
1592 enum lttng_domain_type domain,
1593 enum lttng_process_attr process_attr,
1594 const struct process_attr_value *value)
1595 {
1596 enum lttng_error_code ret_code = LTTNG_OK;
1597
1598 switch (domain) {
1599 case LTTNG_DOMAIN_KERNEL:
1600 if (!session->kernel_session) {
1601 ret_code = LTTNG_ERR_INVALID;
1602 goto end;
1603 }
1604 ret_code = kernel_process_attr_tracker_inclusion_set_add_value(
1605 session->kernel_session, process_attr, value);
1606 break;
1607 case LTTNG_DOMAIN_UST:
1608 if (!session->ust_session) {
1609 ret_code = LTTNG_ERR_INVALID;
1610 goto end;
1611 }
1612 ret_code = trace_ust_process_attr_tracker_inclusion_set_add_value(
1613 session->ust_session, process_attr, value);
1614 break;
1615 default:
1616 ret_code = LTTNG_ERR_UNSUPPORTED_DOMAIN;
1617 break;
1618 }
1619 end:
1620 return ret_code;
1621 }
1622
1623 enum lttng_error_code
1624 cmd_process_attr_tracker_inclusion_set_remove_value(const ltt_session::locked_ref& session,
1625 enum lttng_domain_type domain,
1626 enum lttng_process_attr process_attr,
1627 const struct process_attr_value *value)
1628 {
1629 enum lttng_error_code ret_code = LTTNG_OK;
1630
1631 switch (domain) {
1632 case LTTNG_DOMAIN_KERNEL:
1633 if (!session->kernel_session) {
1634 ret_code = LTTNG_ERR_INVALID;
1635 goto end;
1636 }
1637 ret_code = kernel_process_attr_tracker_inclusion_set_remove_value(
1638 session->kernel_session, process_attr, value);
1639 break;
1640 case LTTNG_DOMAIN_UST:
1641 if (!session->ust_session) {
1642 ret_code = LTTNG_ERR_INVALID;
1643 goto end;
1644 }
1645 ret_code = trace_ust_process_attr_tracker_inclusion_set_remove_value(
1646 session->ust_session, process_attr, value);
1647 break;
1648 default:
1649 ret_code = LTTNG_ERR_UNSUPPORTED_DOMAIN;
1650 break;
1651 }
1652 end:
1653 return ret_code;
1654 }
1655
1656 enum lttng_error_code
1657 cmd_process_attr_tracker_get_inclusion_set(const ltt_session::locked_ref& session,
1658 enum lttng_domain_type domain,
1659 enum lttng_process_attr process_attr,
1660 struct lttng_process_attr_values **values)
1661 {
1662 enum lttng_error_code ret_code = LTTNG_OK;
1663 const struct process_attr_tracker *tracker;
1664 enum process_attr_tracker_status status;
1665
1666 switch (domain) {
1667 case LTTNG_DOMAIN_KERNEL:
1668 if (!session->kernel_session) {
1669 ret_code = LTTNG_ERR_INVALID;
1670 goto end;
1671 }
1672 tracker = kernel_get_process_attr_tracker(session->kernel_session, process_attr);
1673 break;
1674 case LTTNG_DOMAIN_UST:
1675 if (!session->ust_session) {
1676 ret_code = LTTNG_ERR_INVALID;
1677 goto end;
1678 }
1679 tracker = trace_ust_get_process_attr_tracker(session->ust_session, process_attr);
1680 break;
1681 default:
1682 ret_code = LTTNG_ERR_UNSUPPORTED_DOMAIN;
1683 goto end;
1684 }
1685
1686 if (!tracker) {
1687 ret_code = LTTNG_ERR_INVALID;
1688 goto end;
1689 }
1690
1691 status = process_attr_tracker_get_inclusion_set(tracker, values);
1692 switch (status) {
1693 case PROCESS_ATTR_TRACKER_STATUS_OK:
1694 ret_code = LTTNG_OK;
1695 break;
1696 case PROCESS_ATTR_TRACKER_STATUS_INVALID_TRACKING_POLICY:
1697 ret_code = LTTNG_ERR_PROCESS_ATTR_TRACKER_INVALID_TRACKING_POLICY;
1698 break;
1699 case PROCESS_ATTR_TRACKER_STATUS_ERROR:
1700 ret_code = LTTNG_ERR_NOMEM;
1701 break;
1702 default:
1703 ret_code = LTTNG_ERR_UNK;
1704 break;
1705 }
1706
1707 end:
1708 return ret_code;
1709 }
1710
1711 /*
1712 * Command LTTNG_DISABLE_EVENT processed by the client thread.
1713 */
1714 int cmd_disable_event(struct command_ctx *cmd_ctx,
1715 ltt_session::locked_ref& locked_session,
1716 struct lttng_event *event,
1717 char *filter_expression,
1718 struct lttng_bytecode *bytecode,
1719 struct lttng_event_exclusion *exclusion)
1720 {
1721 int ret;
1722 const ltt_session& session = *locked_session;
1723 const char *event_name;
1724 const char *channel_name = cmd_ctx->lsm.u.disable.channel_name;
1725 const enum lttng_domain_type domain = cmd_ctx->lsm.domain.type;
1726
1727 DBG("Disable event command for event \'%s\'", event->name);
1728
1729 /*
1730 * Filter and exclusions are simply not handled by the
1731 * disable event command at this time.
1732 *
1733 * FIXME
1734 */
1735 (void) filter_expression;
1736 (void) exclusion;
1737
1738 /* Ignore the presence of filter or exclusion for the event */
1739 event->filter = 0;
1740 event->exclusion = 0;
1741
1742 event_name = event->name;
1743
1744 const lttng::urcu::read_lock_guard read_lock;
1745
1746 /* Error out on unhandled search criteria */
1747 if (event->loglevel_type || event->loglevel != -1 || event->enabled || event->pid ||
1748 event->filter || event->exclusion) {
1749 ret = LTTNG_ERR_UNK;
1750 goto error;
1751 }
1752
1753 switch (domain) {
1754 case LTTNG_DOMAIN_KERNEL:
1755 {
1756 struct ltt_kernel_channel *kchan;
1757 struct ltt_kernel_session *ksess;
1758
1759 ksess = session.kernel_session;
1760
1761 /*
1762 * If a non-default channel has been created in the
1763 * session, explicitely require that -c chan_name needs
1764 * to be provided.
1765 */
1766 if (ksess->has_non_default_channel && channel_name[0] == '\0') {
1767 ret = LTTNG_ERR_NEED_CHANNEL_NAME;
1768 goto error_unlock;
1769 }
1770
1771 kchan = trace_kernel_get_channel_by_name(channel_name, ksess);
1772 if (kchan == nullptr) {
1773 ret = LTTNG_ERR_KERN_CHAN_NOT_FOUND;
1774 goto error_unlock;
1775 }
1776
1777 switch (event->type) {
1778 case LTTNG_EVENT_ALL:
1779 case LTTNG_EVENT_TRACEPOINT:
1780 case LTTNG_EVENT_SYSCALL:
1781 case LTTNG_EVENT_PROBE:
1782 case LTTNG_EVENT_FUNCTION:
1783 case LTTNG_EVENT_FUNCTION_ENTRY: /* fall-through */
1784 if (event_name[0] == '\0') {
1785 ret = event_kernel_disable_event(kchan, nullptr, event->type);
1786 } else {
1787 ret = event_kernel_disable_event(kchan, event_name, event->type);
1788 }
1789 if (ret != LTTNG_OK) {
1790 goto error_unlock;
1791 }
1792 break;
1793 default:
1794 ret = LTTNG_ERR_UNK;
1795 goto error_unlock;
1796 }
1797
1798 kernel_wait_quiescent();
1799 break;
1800 }
1801 case LTTNG_DOMAIN_UST:
1802 {
1803 struct ltt_ust_channel *uchan;
1804 struct ltt_ust_session *usess;
1805
1806 usess = session.ust_session;
1807
1808 if (validate_ust_event_name(event_name)) {
1809 ret = LTTNG_ERR_INVALID_EVENT_NAME;
1810 goto error_unlock;
1811 }
1812
1813 /*
1814 * If a non-default channel has been created in the
1815 * session, explicitly require that -c chan_name needs
1816 * to be provided.
1817 */
1818 if (usess->has_non_default_channel && channel_name[0] == '\0') {
1819 ret = LTTNG_ERR_NEED_CHANNEL_NAME;
1820 goto error_unlock;
1821 }
1822
1823 uchan = trace_ust_find_channel_by_name(usess->domain_global.channels, channel_name);
1824 if (uchan == nullptr) {
1825 ret = LTTNG_ERR_UST_CHAN_NOT_FOUND;
1826 goto error_unlock;
1827 }
1828
1829 switch (event->type) {
1830 case LTTNG_EVENT_ALL:
1831 /*
1832 * An empty event name means that everything
1833 * should be disabled.
1834 */
1835 if (event->name[0] == '\0') {
1836 ret = event_ust_disable_all_tracepoints(usess, uchan);
1837 } else {
1838 ret = event_ust_disable_tracepoint(usess, uchan, event_name);
1839 }
1840 if (ret != LTTNG_OK) {
1841 goto error_unlock;
1842 }
1843 break;
1844 default:
1845 ret = LTTNG_ERR_UNK;
1846 goto error_unlock;
1847 }
1848
1849 DBG3("Disable UST event %s in channel %s completed", event_name, channel_name);
1850 break;
1851 }
1852 case LTTNG_DOMAIN_LOG4J:
1853 case LTTNG_DOMAIN_JUL:
1854 case LTTNG_DOMAIN_PYTHON:
1855 {
1856 struct agent *agt;
1857 struct ltt_ust_session *usess = session.ust_session;
1858
1859 LTTNG_ASSERT(usess);
1860
1861 switch (event->type) {
1862 case LTTNG_EVENT_ALL:
1863 break;
1864 default:
1865 ret = LTTNG_ERR_UNK;
1866 goto error_unlock;
1867 }
1868
1869 agt = trace_ust_find_agent(usess, domain);
1870 if (!agt) {
1871 ret = -LTTNG_ERR_UST_EVENT_NOT_FOUND;
1872 goto error_unlock;
1873 }
1874 /*
1875 * An empty event name means that everything
1876 * should be disabled.
1877 */
1878 if (event->name[0] == '\0') {
1879 ret = event_agent_disable_all(usess, agt);
1880 } else {
1881 ret = event_agent_disable(usess, agt, event_name);
1882 }
1883 if (ret != LTTNG_OK) {
1884 goto error_unlock;
1885 }
1886
1887 break;
1888 }
1889 default:
1890 ret = LTTNG_ERR_UND;
1891 goto error_unlock;
1892 }
1893
1894 ret = LTTNG_OK;
1895
1896 error_unlock:
1897 error:
1898 free(exclusion);
1899 free(bytecode);
1900 free(filter_expression);
1901 return ret;
1902 }
1903
1904 /*
1905 * Command LTTNG_ADD_CONTEXT processed by the client thread.
1906 */
1907 int cmd_add_context(struct command_ctx *cmd_ctx,
1908 ltt_session::locked_ref& locked_session,
1909 const struct lttng_event_context *event_context,
1910 int kwpipe)
1911 {
1912 int ret, chan_kern_created = 0, chan_ust_created = 0;
1913 const enum lttng_domain_type domain = cmd_ctx->lsm.domain.type;
1914 const struct ltt_session& session = *locked_session;
1915 const char *channel_name = cmd_ctx->lsm.u.context.channel_name;
1916
1917 /*
1918 * Don't try to add a context if the session has been started at
1919 * some point in time before. The tracer does not allow it and would
1920 * result in a corrupted trace.
1921 */
1922 if (session.has_been_started) {
1923 ret = LTTNG_ERR_TRACE_ALREADY_STARTED;
1924 goto end;
1925 }
1926
1927 switch (domain) {
1928 case LTTNG_DOMAIN_KERNEL:
1929 LTTNG_ASSERT(session.kernel_session);
1930
1931 if (session.kernel_session->channel_count == 0) {
1932 /* Create default channel */
1933 ret = channel_kernel_create(session.kernel_session, nullptr, kwpipe);
1934 if (ret != LTTNG_OK) {
1935 goto error;
1936 }
1937 chan_kern_created = 1;
1938 }
1939 /* Add kernel context to kernel tracer */
1940 ret = context_kernel_add(session.kernel_session, event_context, channel_name);
1941 if (ret != LTTNG_OK) {
1942 goto error;
1943 }
1944 break;
1945 case LTTNG_DOMAIN_JUL:
1946 case LTTNG_DOMAIN_LOG4J:
1947 {
1948 /*
1949 * Validate channel name.
1950 * If no channel name is given and the domain is JUL or LOG4J,
1951 * set it to the appropriate domain-specific channel name. If
1952 * a name is provided but does not match the expexted channel
1953 * name, return an error.
1954 */
1955 if (domain == LTTNG_DOMAIN_JUL && *channel_name &&
1956 strcmp(channel_name, DEFAULT_JUL_CHANNEL_NAME) != 0) {
1957 ret = LTTNG_ERR_UST_CHAN_NOT_FOUND;
1958 goto error;
1959 } else if (domain == LTTNG_DOMAIN_LOG4J && *channel_name &&
1960 strcmp(channel_name, DEFAULT_LOG4J_CHANNEL_NAME) != 0) {
1961 ret = LTTNG_ERR_UST_CHAN_NOT_FOUND;
1962 goto error;
1963 }
1964 }
1965 /* fall through */
1966 case LTTNG_DOMAIN_UST:
1967 {
1968 struct ltt_ust_session *usess = session.ust_session;
1969 unsigned int chan_count;
1970
1971 LTTNG_ASSERT(usess);
1972
1973 chan_count = lttng_ht_get_count(usess->domain_global.channels);
1974 if (chan_count == 0) {
1975 struct lttng_channel *attr;
1976 /* Create default channel */
1977 attr = channel_new_default_attr(domain, usess->buffer_type);
1978 if (attr == nullptr) {
1979 ret = LTTNG_ERR_FATAL;
1980 goto error;
1981 }
1982
1983 ret = channel_ust_create(usess, attr, usess->buffer_type);
1984 if (ret != LTTNG_OK) {
1985 free(attr);
1986 goto error;
1987 }
1988 channel_attr_destroy(attr);
1989 chan_ust_created = 1;
1990 }
1991
1992 ret = context_ust_add(usess, domain, event_context, channel_name);
1993 if (ret != LTTNG_OK) {
1994 goto error;
1995 }
1996 break;
1997 }
1998 default:
1999 ret = LTTNG_ERR_UND;
2000 goto error;
2001 }
2002
2003 ret = LTTNG_OK;
2004 goto end;
2005
2006 error:
2007 if (chan_kern_created) {
2008 struct ltt_kernel_channel *kchan = trace_kernel_get_channel_by_name(
2009 DEFAULT_CHANNEL_NAME, session.kernel_session);
2010 /* Created previously, this should NOT fail. */
2011 LTTNG_ASSERT(kchan);
2012 kernel_destroy_channel(kchan);
2013 }
2014
2015 if (chan_ust_created) {
2016 struct ltt_ust_channel *uchan = trace_ust_find_channel_by_name(
2017 session.ust_session->domain_global.channels, DEFAULT_CHANNEL_NAME);
2018 /* Created previously, this should NOT fail. */
2019 LTTNG_ASSERT(uchan);
2020 /* Remove from the channel list of the session. */
2021 trace_ust_delete_channel(session.ust_session->domain_global.channels, uchan);
2022 trace_ust_destroy_channel(uchan);
2023 }
2024 end:
2025 return ret;
2026 }
2027
2028 static inline bool name_starts_with(const char *name, const char *prefix)
2029 {
2030 const size_t max_cmp_len = std::min(strlen(prefix), (size_t) LTTNG_SYMBOL_NAME_LEN);
2031
2032 return !strncmp(name, prefix, max_cmp_len);
2033 }
2034
2035 /* Perform userspace-specific event name validation */
2036 static int validate_ust_event_name(const char *name)
2037 {
2038 int ret = 0;
2039
2040 if (!name) {
2041 ret = -1;
2042 goto end;
2043 }
2044
2045 /*
2046 * Check name against all internal UST event component namespaces used
2047 * by the agents.
2048 */
2049 if (name_starts_with(name, DEFAULT_JUL_EVENT_COMPONENT) ||
2050 name_starts_with(name, DEFAULT_LOG4J_EVENT_COMPONENT) ||
2051 name_starts_with(name, DEFAULT_PYTHON_EVENT_COMPONENT)) {
2052 ret = -1;
2053 }
2054
2055 end:
2056 return ret;
2057 }
2058
2059 /*
2060 * Internal version of cmd_enable_event() with a supplemental
2061 * "internal_event" flag which is used to enable internal events which should
2062 * be hidden from clients. Such events are used in the agent implementation to
2063 * enable the events through which all "agent" events are funeled.
2064 */
2065 static int _cmd_enable_event(ltt_session::locked_ref& locked_session,
2066 const struct lttng_domain *domain,
2067 char *channel_name,
2068 struct lttng_event *event,
2069 char *filter_expression,
2070 struct lttng_bytecode *filter,
2071 struct lttng_event_exclusion *exclusion,
2072 int wpipe,
2073 bool internal_event)
2074 {
2075 int ret = 0, channel_created = 0;
2076 struct lttng_channel *attr = nullptr;
2077 const ltt_session& session = *locked_session;
2078
2079 LTTNG_ASSERT(event);
2080 LTTNG_ASSERT(channel_name);
2081
2082 /* If we have a filter, we must have its filter expression */
2083 LTTNG_ASSERT(!(!!filter_expression ^ !!filter));
2084
2085 /* Normalize event name as a globbing pattern */
2086 strutils_normalize_star_glob_pattern(event->name);
2087
2088 /* Normalize exclusion names as globbing patterns */
2089 if (exclusion) {
2090 size_t i;
2091
2092 for (i = 0; i < exclusion->count; i++) {
2093 char *name = LTTNG_EVENT_EXCLUSION_NAME_AT(exclusion, i);
2094
2095 strutils_normalize_star_glob_pattern(name);
2096 }
2097 }
2098
2099 const lttng::urcu::read_lock_guard read_lock;
2100
2101 switch (domain->type) {
2102 case LTTNG_DOMAIN_KERNEL:
2103 {
2104 struct ltt_kernel_channel *kchan;
2105
2106 /*
2107 * If a non-default channel has been created in the
2108 * session, explicitely require that -c chan_name needs
2109 * to be provided.
2110 */
2111 if (session.kernel_session->has_non_default_channel && channel_name[0] == '\0') {
2112 ret = LTTNG_ERR_NEED_CHANNEL_NAME;
2113 goto error;
2114 }
2115
2116 kchan = trace_kernel_get_channel_by_name(channel_name, session.kernel_session);
2117 if (kchan == nullptr) {
2118 attr = channel_new_default_attr(LTTNG_DOMAIN_KERNEL, LTTNG_BUFFER_GLOBAL);
2119 if (attr == nullptr) {
2120 ret = LTTNG_ERR_FATAL;
2121 goto error;
2122 }
2123 if (lttng_strncpy(attr->name, channel_name, sizeof(attr->name))) {
2124 ret = LTTNG_ERR_INVALID;
2125 goto error;
2126 }
2127
2128 ret = cmd_enable_channel_internal(locked_session, domain, attr, wpipe);
2129 if (ret != LTTNG_OK) {
2130 goto error;
2131 }
2132 channel_created = 1;
2133 }
2134
2135 /* Get the newly created kernel channel pointer */
2136 kchan = trace_kernel_get_channel_by_name(channel_name, session.kernel_session);
2137 if (kchan == nullptr) {
2138 /* This sould not happen... */
2139 ret = LTTNG_ERR_FATAL;
2140 goto error;
2141 }
2142
2143 switch (event->type) {
2144 case LTTNG_EVENT_ALL:
2145 {
2146 char *filter_expression_a = nullptr;
2147 struct lttng_bytecode *filter_a = nullptr;
2148
2149 /*
2150 * We need to duplicate filter_expression and filter,
2151 * because ownership is passed to first enable
2152 * event.
2153 */
2154 if (filter_expression) {
2155 filter_expression_a = strdup(filter_expression);
2156 if (!filter_expression_a) {
2157 ret = LTTNG_ERR_FATAL;
2158 goto error;
2159 }
2160 }
2161 if (filter) {
2162 filter_a = zmalloc<lttng_bytecode>(sizeof(*filter_a) + filter->len);
2163 if (!filter_a) {
2164 free(filter_expression_a);
2165 ret = LTTNG_ERR_FATAL;
2166 goto error;
2167 }
2168 memcpy(filter_a, filter, sizeof(*filter_a) + filter->len);
2169 }
2170 event->type = LTTNG_EVENT_TRACEPOINT; /* Hack */
2171 ret = event_kernel_enable_event(kchan, event, filter_expression, filter);
2172 /* We have passed ownership */
2173 filter_expression = nullptr;
2174 filter = nullptr;
2175 if (ret != LTTNG_OK) {
2176 if (channel_created) {
2177 /* Let's not leak a useless channel. */
2178 kernel_destroy_channel(kchan);
2179 }
2180 free(filter_expression_a);
2181 free(filter_a);
2182 goto error;
2183 }
2184 event->type = LTTNG_EVENT_SYSCALL; /* Hack */
2185 ret = event_kernel_enable_event(
2186 kchan, event, filter_expression_a, filter_a);
2187 /* We have passed ownership */
2188 filter_expression_a = nullptr;
2189 filter_a = nullptr;
2190 if (ret != LTTNG_OK) {
2191 goto error;
2192 }
2193 break;
2194 }
2195 case LTTNG_EVENT_PROBE:
2196 case LTTNG_EVENT_USERSPACE_PROBE:
2197 case LTTNG_EVENT_FUNCTION:
2198 case LTTNG_EVENT_FUNCTION_ENTRY:
2199 case LTTNG_EVENT_TRACEPOINT:
2200 ret = event_kernel_enable_event(kchan, event, filter_expression, filter);
2201 /* We have passed ownership */
2202 filter_expression = nullptr;
2203 filter = nullptr;
2204 if (ret != LTTNG_OK) {
2205 if (channel_created) {
2206 /* Let's not leak a useless channel. */
2207 kernel_destroy_channel(kchan);
2208 }
2209 goto error;
2210 }
2211 break;
2212 case LTTNG_EVENT_SYSCALL:
2213 ret = event_kernel_enable_event(kchan, event, filter_expression, filter);
2214 /* We have passed ownership */
2215 filter_expression = nullptr;
2216 filter = nullptr;
2217 if (ret != LTTNG_OK) {
2218 goto error;
2219 }
2220 break;
2221 default:
2222 ret = LTTNG_ERR_UNK;
2223 goto error;
2224 }
2225
2226 kernel_wait_quiescent();
2227 break;
2228 }
2229 case LTTNG_DOMAIN_UST:
2230 {
2231 struct ltt_ust_channel *uchan;
2232 struct ltt_ust_session *usess = session.ust_session;
2233
2234 LTTNG_ASSERT(usess);
2235
2236 /*
2237 * If a non-default channel has been created in the
2238 * session, explicitely require that -c chan_name needs
2239 * to be provided.
2240 */
2241 if (usess->has_non_default_channel && channel_name[0] == '\0') {
2242 ret = LTTNG_ERR_NEED_CHANNEL_NAME;
2243 goto error;
2244 }
2245
2246 /* Get channel from global UST domain */
2247 uchan = trace_ust_find_channel_by_name(usess->domain_global.channels, channel_name);
2248 if (uchan == nullptr) {
2249 /* Create default channel */
2250 attr = channel_new_default_attr(LTTNG_DOMAIN_UST, usess->buffer_type);
2251 if (attr == nullptr) {
2252 ret = LTTNG_ERR_FATAL;
2253 goto error;
2254 }
2255 if (lttng_strncpy(attr->name, channel_name, sizeof(attr->name))) {
2256 ret = LTTNG_ERR_INVALID;
2257 goto error;
2258 }
2259
2260 ret = cmd_enable_channel_internal(locked_session, domain, attr, wpipe);
2261 if (ret != LTTNG_OK) {
2262 goto error;
2263 }
2264
2265 /* Get the newly created channel reference back */
2266 uchan = trace_ust_find_channel_by_name(usess->domain_global.channels,
2267 channel_name);
2268 LTTNG_ASSERT(uchan);
2269 }
2270
2271 if (uchan->domain != LTTNG_DOMAIN_UST && !internal_event) {
2272 /*
2273 * Don't allow users to add UST events to channels which
2274 * are assigned to a userspace subdomain (JUL, Log4J,
2275 * Python, etc.).
2276 */
2277 ret = LTTNG_ERR_INVALID_CHANNEL_DOMAIN;
2278 goto error;
2279 }
2280
2281 if (!internal_event) {
2282 /*
2283 * Ensure the event name is not reserved for internal
2284 * use.
2285 */
2286 ret = validate_ust_event_name(event->name);
2287 if (ret) {
2288 WARN("Userspace event name %s failed validation.", event->name);
2289 ret = LTTNG_ERR_INVALID_EVENT_NAME;
2290 goto error;
2291 }
2292 }
2293
2294 /* At this point, the session and channel exist on the tracer */
2295 ret = event_ust_enable_tracepoint(
2296 usess, uchan, event, filter_expression, filter, exclusion, internal_event);
2297 /* We have passed ownership */
2298 filter_expression = nullptr;
2299 filter = nullptr;
2300 exclusion = nullptr;
2301 if (ret == LTTNG_ERR_UST_EVENT_ENABLED) {
2302 goto already_enabled;
2303 } else if (ret != LTTNG_OK) {
2304 goto error;
2305 }
2306 break;
2307 }
2308 case LTTNG_DOMAIN_LOG4J:
2309 case LTTNG_DOMAIN_JUL:
2310 case LTTNG_DOMAIN_PYTHON:
2311 {
2312 const char *default_event_name, *default_chan_name;
2313 struct agent *agt;
2314 struct lttng_event uevent;
2315 struct lttng_domain tmp_dom;
2316 struct ltt_ust_session *usess = session.ust_session;
2317
2318 LTTNG_ASSERT(usess);
2319
2320 if (!agent_tracing_is_enabled()) {
2321 DBG("Attempted to enable an event in an agent domain but the agent thread is not running");
2322 ret = LTTNG_ERR_AGENT_TRACING_DISABLED;
2323 goto error;
2324 }
2325
2326 agt = trace_ust_find_agent(usess, domain->type);
2327 if (!agt) {
2328 agt = agent_create(domain->type);
2329 if (!agt) {
2330 ret = LTTNG_ERR_NOMEM;
2331 goto error;
2332 }
2333 agent_add(agt, usess->agents);
2334 }
2335
2336 /* Create the default tracepoint. */
2337 memset(&uevent, 0, sizeof(uevent));
2338 uevent.type = LTTNG_EVENT_TRACEPOINT;
2339 uevent.loglevel_type = LTTNG_EVENT_LOGLEVEL_ALL;
2340 uevent.loglevel = -1;
2341 default_event_name = event_get_default_agent_ust_name(domain->type);
2342 if (!default_event_name) {
2343 ret = LTTNG_ERR_FATAL;
2344 goto error;
2345 }
2346 strncpy(uevent.name, default_event_name, sizeof(uevent.name));
2347 uevent.name[sizeof(uevent.name) - 1] = '\0';
2348
2349 /*
2350 * The domain type is changed because we are about to enable the
2351 * default channel and event for the JUL domain that are hardcoded.
2352 * This happens in the UST domain.
2353 */
2354 memcpy(&tmp_dom, domain, sizeof(tmp_dom));
2355 tmp_dom.type = LTTNG_DOMAIN_UST;
2356
2357 switch (domain->type) {
2358 case LTTNG_DOMAIN_LOG4J:
2359 default_chan_name = DEFAULT_LOG4J_CHANNEL_NAME;
2360 break;
2361 case LTTNG_DOMAIN_JUL:
2362 default_chan_name = DEFAULT_JUL_CHANNEL_NAME;
2363 break;
2364 case LTTNG_DOMAIN_PYTHON:
2365 default_chan_name = DEFAULT_PYTHON_CHANNEL_NAME;
2366 break;
2367 default:
2368 /* The switch/case we are in makes this impossible */
2369 abort();
2370 }
2371
2372 {
2373 char *filter_expression_copy = nullptr;
2374 struct lttng_bytecode *filter_copy = nullptr;
2375
2376 if (filter) {
2377 const size_t filter_size =
2378 sizeof(struct lttng_bytecode) + filter->len;
2379
2380 filter_copy = zmalloc<lttng_bytecode>(filter_size);
2381 if (!filter_copy) {
2382 ret = LTTNG_ERR_NOMEM;
2383 goto error;
2384 }
2385 memcpy(filter_copy, filter, filter_size);
2386
2387 filter_expression_copy = strdup(filter_expression);
2388 if (!filter_expression) {
2389 ret = LTTNG_ERR_NOMEM;
2390 }
2391
2392 if (!filter_expression_copy || !filter_copy) {
2393 free(filter_expression_copy);
2394 free(filter_copy);
2395 goto error;
2396 }
2397 }
2398
2399 ret = cmd_enable_event_internal(locked_session,
2400 &tmp_dom,
2401 (char *) default_chan_name,
2402 &uevent,
2403 filter_expression_copy,
2404 filter_copy,
2405 nullptr,
2406 wpipe);
2407 }
2408
2409 if (ret == LTTNG_ERR_UST_EVENT_ENABLED) {
2410 goto already_enabled;
2411 } else if (ret != LTTNG_OK) {
2412 goto error;
2413 }
2414
2415 /* The wild card * means that everything should be enabled. */
2416 if (strncmp(event->name, "*", 1) == 0 && strlen(event->name) == 1) {
2417 ret = event_agent_enable_all(usess, agt, event, filter, filter_expression);
2418 } else {
2419 ret = event_agent_enable(usess, agt, event, filter, filter_expression);
2420 }
2421 filter = nullptr;
2422 filter_expression = nullptr;
2423 if (ret != LTTNG_OK) {
2424 goto error;
2425 }
2426
2427 break;
2428 }
2429 default:
2430 ret = LTTNG_ERR_UND;
2431 goto error;
2432 }
2433
2434 ret = LTTNG_OK;
2435
2436 already_enabled:
2437 error:
2438 free(filter_expression);
2439 free(filter);
2440 free(exclusion);
2441 channel_attr_destroy(attr);
2442 return ret;
2443 }
2444
2445 /*
2446 * Command LTTNG_ENABLE_EVENT processed by the client thread.
2447 * We own filter, exclusion, and filter_expression.
2448 */
2449 int cmd_enable_event(struct command_ctx *cmd_ctx,
2450 ltt_session::locked_ref& locked_session,
2451 struct lttng_event *event,
2452 char *filter_expression,
2453 struct lttng_event_exclusion *exclusion,
2454 struct lttng_bytecode *bytecode,
2455 int wpipe)
2456 {
2457 int ret;
2458 /*
2459 * Copied to ensure proper alignment since 'lsm' is a packed structure.
2460 */
2461 const lttng_domain command_domain = cmd_ctx->lsm.domain;
2462
2463 /*
2464 * The ownership of the following parameters is transferred to
2465 * _cmd_enable_event:
2466 *
2467 * - filter_expression,
2468 * - bytecode,
2469 * - exclusion
2470 */
2471 ret = _cmd_enable_event(locked_session,
2472 &command_domain,
2473 cmd_ctx->lsm.u.enable.channel_name,
2474 event,
2475 filter_expression,
2476 bytecode,
2477 exclusion,
2478 wpipe,
2479 false);
2480 filter_expression = nullptr;
2481 bytecode = nullptr;
2482 exclusion = nullptr;
2483 return ret;
2484 }
2485
2486 /*
2487 * Enable an event which is internal to LTTng. An internal should
2488 * never be made visible to clients and are immune to checks such as
2489 * reserved names.
2490 */
2491 static int cmd_enable_event_internal(ltt_session::locked_ref& locked_session,
2492 const struct lttng_domain *domain,
2493 char *channel_name,
2494 struct lttng_event *event,
2495 char *filter_expression,
2496 struct lttng_bytecode *filter,
2497 struct lttng_event_exclusion *exclusion,
2498 int wpipe)
2499 {
2500 return _cmd_enable_event(locked_session,
2501 domain,
2502 channel_name,
2503 event,
2504 filter_expression,
2505 filter,
2506 exclusion,
2507 wpipe,
2508 true);
2509 }
2510
2511 /*
2512 * Command LTTNG_LIST_TRACEPOINTS processed by the client thread.
2513 */
2514 enum lttng_error_code cmd_list_tracepoints(enum lttng_domain_type domain,
2515 struct lttng_payload *reply_payload)
2516 {
2517 enum lttng_error_code ret_code;
2518 int ret;
2519 ssize_t i, nb_events = 0;
2520 struct lttng_event *events = nullptr;
2521 struct lttcomm_list_command_header reply_command_header = {};
2522 size_t reply_command_header_offset;
2523
2524 assert(reply_payload);
2525
2526 /* Reserve space for command reply header. */
2527 reply_command_header_offset = reply_payload->buffer.size;
2528 ret = lttng_dynamic_buffer_set_size(&reply_payload->buffer,
2529 reply_command_header_offset +
2530 sizeof(struct lttcomm_list_command_header));
2531 if (ret) {
2532 ret_code = LTTNG_ERR_NOMEM;
2533 goto error;
2534 }
2535
2536 switch (domain) {
2537 case LTTNG_DOMAIN_KERNEL:
2538 nb_events = kernel_list_events(&events);
2539 if (nb_events < 0) {
2540 ret_code = LTTNG_ERR_KERN_LIST_FAIL;
2541 goto error;
2542 }
2543 break;
2544 case LTTNG_DOMAIN_UST:
2545 nb_events = ust_app_list_events(&events);
2546 if (nb_events < 0) {
2547 ret_code = LTTNG_ERR_UST_LIST_FAIL;
2548 goto error;
2549 }
2550 break;
2551 case LTTNG_DOMAIN_LOG4J:
2552 case LTTNG_DOMAIN_JUL:
2553 case LTTNG_DOMAIN_PYTHON:
2554 nb_events = agent_list_events(&events, domain);
2555 if (nb_events < 0) {
2556 ret_code = LTTNG_ERR_UST_LIST_FAIL;
2557 goto error;
2558 }
2559 break;
2560 default:
2561 ret_code = LTTNG_ERR_UND;
2562 goto error;
2563 }
2564
2565 for (i = 0; i < nb_events; i++) {
2566 ret = lttng_event_serialize(
2567 &events[i], 0, nullptr, nullptr, 0, nullptr, reply_payload);
2568 if (ret) {
2569 ret_code = LTTNG_ERR_NOMEM;
2570 goto error;
2571 }
2572 }
2573
2574 if (nb_events > UINT32_MAX) {
2575 ERR("Tracepoint count would overflow the tracepoint listing command's reply");
2576 ret_code = LTTNG_ERR_OVERFLOW;
2577 goto error;
2578 }
2579
2580 /* Update command reply header. */
2581 reply_command_header.count = (uint32_t) nb_events;
2582 memcpy(reply_payload->buffer.data + reply_command_header_offset,
2583 &reply_command_header,
2584 sizeof(reply_command_header));
2585
2586 ret_code = LTTNG_OK;
2587 error:
2588 free(events);
2589 return ret_code;
2590 }
2591
2592 /*
2593 * Command LTTNG_LIST_TRACEPOINT_FIELDS processed by the client thread.
2594 */
2595 enum lttng_error_code cmd_list_tracepoint_fields(enum lttng_domain_type domain,
2596 struct lttng_payload *reply)
2597 {
2598 enum lttng_error_code ret_code;
2599 int ret;
2600 unsigned int i, nb_fields;
2601 struct lttng_event_field *fields = nullptr;
2602 struct lttcomm_list_command_header reply_command_header = {};
2603 size_t reply_command_header_offset;
2604
2605 assert(reply);
2606
2607 /* Reserve space for command reply header. */
2608 reply_command_header_offset = reply->buffer.size;
2609 ret = lttng_dynamic_buffer_set_size(&reply->buffer,
2610 reply_command_header_offset +
2611 sizeof(struct lttcomm_list_command_header));
2612 if (ret) {
2613 ret_code = LTTNG_ERR_NOMEM;
2614 goto error;
2615 }
2616
2617 switch (domain) {
2618 case LTTNG_DOMAIN_UST:
2619 ret = ust_app_list_event_fields(&fields);
2620 if (ret < 0) {
2621 ret_code = LTTNG_ERR_UST_LIST_FAIL;
2622 goto error;
2623 }
2624
2625 break;
2626 case LTTNG_DOMAIN_KERNEL:
2627 default: /* fall-through */
2628 ret_code = LTTNG_ERR_UND;
2629 goto error;
2630 }
2631
2632 nb_fields = ret;
2633
2634 for (i = 0; i < nb_fields; i++) {
2635 ret = lttng_event_field_serialize(&fields[i], reply);
2636 if (ret) {
2637 ret_code = LTTNG_ERR_NOMEM;
2638 goto error;
2639 }
2640 }
2641
2642 if (nb_fields > UINT32_MAX) {
2643 ERR("Tracepoint field count would overflow the tracepoint field listing command's reply");
2644 ret_code = LTTNG_ERR_OVERFLOW;
2645 goto error;
2646 }
2647
2648 /* Update command reply header. */
2649 reply_command_header.count = (uint32_t) nb_fields;
2650
2651 memcpy(reply->buffer.data + reply_command_header_offset,
2652 &reply_command_header,
2653 sizeof(reply_command_header));
2654
2655 ret_code = LTTNG_OK;
2656
2657 error:
2658 free(fields);
2659 return ret_code;
2660 }
2661
2662 enum lttng_error_code cmd_list_syscalls(struct lttng_payload *reply_payload)
2663 {
2664 enum lttng_error_code ret_code;
2665 ssize_t nb_events, i;
2666 int ret;
2667 struct lttng_event *events = nullptr;
2668 struct lttcomm_list_command_header reply_command_header = {};
2669 size_t reply_command_header_offset;
2670
2671 assert(reply_payload);
2672
2673 /* Reserve space for command reply header. */
2674 reply_command_header_offset = reply_payload->buffer.size;
2675 ret = lttng_dynamic_buffer_set_size(&reply_payload->buffer,
2676 reply_command_header_offset +
2677 sizeof(struct lttcomm_list_command_header));
2678 if (ret) {
2679 ret_code = LTTNG_ERR_NOMEM;
2680 goto end;
2681 }
2682
2683 nb_events = syscall_table_list(&events);
2684 if (nb_events < 0) {
2685 ret_code = (enum lttng_error_code) - nb_events;
2686 goto end;
2687 }
2688
2689 for (i = 0; i < nb_events; i++) {
2690 ret = lttng_event_serialize(
2691 &events[i], 0, nullptr, nullptr, 0, nullptr, reply_payload);
2692 if (ret) {
2693 ret_code = LTTNG_ERR_NOMEM;
2694 goto end;
2695 }
2696 }
2697
2698 if (nb_events > UINT32_MAX) {
2699 ERR("Syscall count would overflow the syscall listing command's reply");
2700 ret_code = LTTNG_ERR_OVERFLOW;
2701 goto end;
2702 }
2703
2704 /* Update command reply header. */
2705 reply_command_header.count = (uint32_t) nb_events;
2706 memcpy(reply_payload->buffer.data + reply_command_header_offset,
2707 &reply_command_header,
2708 sizeof(reply_command_header));
2709
2710 ret_code = LTTNG_OK;
2711 end:
2712 free(events);
2713 return ret_code;
2714 }
2715
2716 /*
2717 * Command LTTNG_START_TRACE processed by the client thread.
2718 */
2719 int cmd_start_trace(const ltt_session::locked_ref& session)
2720 {
2721 enum lttng_error_code ret;
2722 unsigned long nb_chan = 0;
2723 struct ltt_kernel_session *ksession;
2724 struct ltt_ust_session *usess;
2725 const bool session_rotated_after_last_stop = session->rotated_after_last_stop;
2726 const bool session_cleared_after_last_stop = session->cleared_after_last_stop;
2727
2728 /* Ease our life a bit ;) */
2729 ksession = session->kernel_session;
2730 usess = session->ust_session;
2731
2732 /* Is the session already started? */
2733 if (session->active) {
2734 ret = LTTNG_ERR_TRACE_ALREADY_STARTED;
2735 /* Perform nothing */
2736 goto end;
2737 }
2738
2739 if (session->rotation_state == LTTNG_ROTATION_STATE_ONGOING &&
2740 !session->current_trace_chunk) {
2741 /*
2742 * A rotation was launched while the session was stopped and
2743 * it has not been completed yet. It is not possible to start
2744 * the session since starting the session here would require a
2745 * rotation from "NULL" to a new trace chunk. That rotation
2746 * would overlap with the ongoing rotation, which is not
2747 * supported.
2748 */
2749 WARN("Refusing to start session \"%s\" as a rotation launched after the last \"stop\" is still ongoing",
2750 session->name);
2751 ret = LTTNG_ERR_ROTATION_PENDING;
2752 goto error;
2753 }
2754
2755 /*
2756 * Starting a session without channel is useless since after that it's not
2757 * possible to enable channel thus inform the client.
2758 */
2759 if (usess && usess->domain_global.channels) {
2760 nb_chan += lttng_ht_get_count(usess->domain_global.channels);
2761 }
2762 if (ksession) {
2763 nb_chan += ksession->channel_count;
2764 }
2765 if (!nb_chan) {
2766 ret = LTTNG_ERR_NO_CHANNEL;
2767 goto error;
2768 }
2769
2770 session->active = true;
2771 session->rotated_after_last_stop = false;
2772 session->cleared_after_last_stop = false;
2773 if (session->output_traces && !session->current_trace_chunk) {
2774 if (!session->has_been_started) {
2775 struct lttng_trace_chunk *trace_chunk;
2776
2777 DBG("Creating initial trace chunk of session \"%s\"", session->name);
2778 trace_chunk =
2779 session_create_new_trace_chunk(session, nullptr, nullptr, nullptr);
2780 if (!trace_chunk) {
2781 ret = LTTNG_ERR_CREATE_DIR_FAIL;
2782 goto error;
2783 }
2784 LTTNG_ASSERT(!session->current_trace_chunk);
2785 ret = (lttng_error_code) session_set_trace_chunk(
2786 session, trace_chunk, nullptr);
2787 lttng_trace_chunk_put(trace_chunk);
2788 if (ret) {
2789 ret = LTTNG_ERR_CREATE_TRACE_CHUNK_FAIL_CONSUMER;
2790 goto error;
2791 }
2792 } else {
2793 DBG("Rotating session \"%s\" from its current \"NULL\" trace chunk to a new chunk",
2794 session->name);
2795 /*
2796 * Rotate existing streams into the new chunk.
2797 * This is a "quiet" rotation has no client has
2798 * explicitly requested this operation.
2799 *
2800 * There is also no need to wait for the rotation
2801 * to complete as it will happen immediately. No data
2802 * was produced as the session was stopped, so the
2803 * rotation should happen on reception of the command.
2804 */
2805 ret = (lttng_error_code) cmd_rotate_session(
2806 session, nullptr, true, LTTNG_TRACE_CHUNK_COMMAND_TYPE_NO_OPERATION);
2807 if (ret != LTTNG_OK) {
2808 goto error;
2809 }
2810 }
2811 }
2812
2813 /* Kernel tracing */
2814 if (ksession != nullptr) {
2815 DBG("Start kernel tracing session %s", session->name);
2816 ret = (lttng_error_code) start_kernel_session(ksession);
2817 if (ret != LTTNG_OK) {
2818 goto error;
2819 }
2820 }
2821
2822 /* Flag session that trace should start automatically */
2823 if (usess) {
2824 const int int_ret = ust_app_start_trace_all(usess);
2825
2826 if (int_ret < 0) {
2827 ret = LTTNG_ERR_UST_START_FAIL;
2828 goto error;
2829 }
2830 }
2831
2832 /*
2833 * Open a packet in every stream of the session to ensure that viewers
2834 * can correctly identify the boundaries of the periods during which
2835 * tracing was active for this session.
2836 */
2837 ret = session_open_packets(session);
2838 if (ret != LTTNG_OK) {
2839 goto error;
2840 }
2841
2842 /*
2843 * Clear the flag that indicates that a rotation was done while the
2844 * session was stopped.
2845 */
2846 session->rotated_after_last_stop = false;
2847
2848 if (session->rotate_timer_period && !session->rotation_schedule_timer_enabled) {
2849 const int int_ret = timer_session_rotation_schedule_timer_start(
2850 session, session->rotate_timer_period);
2851
2852 if (int_ret < 0) {
2853 ERR("Failed to enable rotate timer");
2854 ret = LTTNG_ERR_UNK;
2855 goto error;
2856 }
2857 }
2858
2859 ret = LTTNG_OK;
2860
2861 error:
2862 if (ret == LTTNG_OK) {
2863 /* Flag this after a successful start. */
2864 session->has_been_started = true;
2865 } else {
2866 session->active = false;
2867 /* Restore initial state on error. */
2868 session->rotated_after_last_stop = session_rotated_after_last_stop;
2869 session->cleared_after_last_stop = session_cleared_after_last_stop;
2870 }
2871 end:
2872 return ret;
2873 }
2874
2875 /*
2876 * Command LTTNG_STOP_TRACE processed by the client thread.
2877 */
2878 int cmd_stop_trace(const ltt_session::locked_ref& session)
2879 {
2880 int ret;
2881 struct ltt_kernel_session *ksession;
2882 struct ltt_ust_session *usess;
2883
2884 DBG("Begin stop session \"%s\" (id %" PRIu64 ")", session->name, session->id);
2885 /* Short cut */
2886 ksession = session->kernel_session;
2887 usess = session->ust_session;
2888
2889 /* Session is not active. Skip everything and inform the client. */
2890 if (!session->active) {
2891 ret = LTTNG_ERR_TRACE_ALREADY_STOPPED;
2892 goto error;
2893 }
2894
2895 ret = stop_kernel_session(ksession);
2896 if (ret != LTTNG_OK) {
2897 goto error;
2898 }
2899
2900 if (usess && usess->active) {
2901 ret = ust_app_stop_trace_all(usess);
2902 if (ret < 0) {
2903 ret = LTTNG_ERR_UST_STOP_FAIL;
2904 goto error;
2905 }
2906 }
2907
2908 DBG("Completed stop session \"%s\" (id %" PRIu64 ")", session->name, session->id);
2909 /* Flag inactive after a successful stop. */
2910 session->active = false;
2911 ret = LTTNG_OK;
2912
2913 error:
2914 return ret;
2915 }
2916
2917 /*
2918 * Set the base_path of the session only if subdir of a control uris is set.
2919 * Return LTTNG_OK on success, otherwise LTTNG_ERR_*.
2920 */
2921 static int set_session_base_path_from_uris(const ltt_session::locked_ref& session,
2922 size_t nb_uri,
2923 struct lttng_uri *uris)
2924 {
2925 int ret;
2926 size_t i;
2927
2928 for (i = 0; i < nb_uri; i++) {
2929 if (uris[i].stype != LTTNG_STREAM_CONTROL || uris[i].subdir[0] == '\0') {
2930 /* Not interested in these URIs */
2931 continue;
2932 }
2933
2934 if (session->base_path != nullptr) {
2935 free(session->base_path);
2936 session->base_path = nullptr;
2937 }
2938
2939 /* Set session base_path */
2940 session->base_path = strdup(uris[i].subdir);
2941 if (!session->base_path) {
2942 PERROR("Failed to copy base path \"%s\" to session \"%s\"",
2943 uris[i].subdir,
2944 session->name);
2945 ret = LTTNG_ERR_NOMEM;
2946 goto error;
2947 }
2948 DBG2("Setting base path \"%s\" for session \"%s\"",
2949 session->base_path,
2950 session->name);
2951 }
2952 ret = LTTNG_OK;
2953 error:
2954 return ret;
2955 }
2956
2957 /*
2958 * Command LTTNG_SET_CONSUMER_URI processed by the client thread.
2959 */
2960 int cmd_set_consumer_uri(const ltt_session::locked_ref& session,
2961 size_t nb_uri,
2962 struct lttng_uri *uris)
2963 {
2964 int ret, i;
2965 struct ltt_kernel_session *ksess = session->kernel_session;
2966 struct ltt_ust_session *usess = session->ust_session;
2967
2968 LTTNG_ASSERT(uris);
2969 LTTNG_ASSERT(nb_uri > 0);
2970
2971 /* Can't set consumer URI if the session is active. */
2972 if (session->active) {
2973 ret = LTTNG_ERR_TRACE_ALREADY_STARTED;
2974 goto error;
2975 }
2976
2977 /*
2978 * Set the session base path if any. This is done inside
2979 * cmd_set_consumer_uri to preserve backward compatibility of the
2980 * previous session creation api vs the session descriptor api.
2981 */
2982 ret = set_session_base_path_from_uris(session, nb_uri, uris);
2983 if (ret != LTTNG_OK) {
2984 goto error;
2985 }
2986
2987 /* Set the "global" consumer URIs */
2988 for (i = 0; i < nb_uri; i++) {
2989 ret = add_uri_to_consumer(session, session->consumer, &uris[i], LTTNG_DOMAIN_NONE);
2990 if (ret != LTTNG_OK) {
2991 goto error;
2992 }
2993 }
2994
2995 /* Set UST session URIs */
2996 if (session->ust_session) {
2997 for (i = 0; i < nb_uri; i++) {
2998 ret = add_uri_to_consumer(session,
2999 session->ust_session->consumer,
3000 &uris[i],
3001 LTTNG_DOMAIN_UST);
3002 if (ret != LTTNG_OK) {
3003 goto error;
3004 }
3005 }
3006 }
3007
3008 /* Set kernel session URIs */
3009 if (session->kernel_session) {
3010 for (i = 0; i < nb_uri; i++) {
3011 ret = add_uri_to_consumer(session,
3012 session->kernel_session->consumer,
3013 &uris[i],
3014 LTTNG_DOMAIN_KERNEL);
3015 if (ret != LTTNG_OK) {
3016 goto error;
3017 }
3018 }
3019 }
3020
3021 /*
3022 * Make sure to set the session in output mode after we set URI since a
3023 * session can be created without URL (thus flagged in no output mode).
3024 */
3025 session->output_traces = 1;
3026 if (ksess) {
3027 ksess->output_traces = 1;
3028 }
3029
3030 if (usess) {
3031 usess->output_traces = 1;
3032 }
3033
3034 /* All good! */
3035 ret = LTTNG_OK;
3036
3037 error:
3038 return ret;
3039 }
3040
3041 static enum lttng_error_code
3042 set_session_output_from_descriptor(const ltt_session::locked_ref& session,
3043 const struct lttng_session_descriptor *descriptor)
3044 {
3045 int ret;
3046 enum lttng_error_code ret_code = LTTNG_OK;
3047 const lttng_session_descriptor_type session_type =
3048 lttng_session_descriptor_get_type(descriptor);
3049 const lttng_session_descriptor_output_type output_type =
3050 lttng_session_descriptor_get_output_type(descriptor);
3051 struct lttng_uri uris[2] = {};
3052 size_t uri_count = 0;
3053
3054 switch (output_type) {
3055 case LTTNG_SESSION_DESCRIPTOR_OUTPUT_TYPE_NONE:
3056 goto end;
3057 case LTTNG_SESSION_DESCRIPTOR_OUTPUT_TYPE_LOCAL:
3058 lttng_session_descriptor_get_local_output_uri(descriptor, &uris[0]);
3059 uri_count = 1;
3060 break;
3061 case LTTNG_SESSION_DESCRIPTOR_OUTPUT_TYPE_NETWORK:
3062 lttng_session_descriptor_get_network_output_uris(descriptor, &uris[0], &uris[1]);
3063 uri_count = 2;
3064 break;
3065 default:
3066 ret_code = LTTNG_ERR_INVALID;
3067 goto end;
3068 }
3069
3070 switch (session_type) {
3071 case LTTNG_SESSION_DESCRIPTOR_TYPE_SNAPSHOT:
3072 {
3073 struct snapshot_output *new_output = nullptr;
3074
3075 new_output = snapshot_output_alloc();
3076 if (!new_output) {
3077 ret_code = LTTNG_ERR_NOMEM;
3078 goto end;
3079 }
3080
3081 ret = snapshot_output_init_with_uri(session,
3082 DEFAULT_SNAPSHOT_MAX_SIZE,
3083 nullptr,
3084 uris,
3085 uri_count,
3086 session->consumer,
3087 new_output,
3088 &session->snapshot);
3089 if (ret < 0) {
3090 ret_code = (ret == -ENOMEM) ? LTTNG_ERR_NOMEM : LTTNG_ERR_INVALID;
3091 snapshot_output_destroy(new_output);
3092 goto end;
3093 }
3094 snapshot_add_output(&session->snapshot, new_output);
3095 break;
3096 }
3097 case LTTNG_SESSION_DESCRIPTOR_TYPE_REGULAR:
3098 case LTTNG_SESSION_DESCRIPTOR_TYPE_LIVE:
3099 {
3100 ret_code = (lttng_error_code) cmd_set_consumer_uri(session, uri_count, uris);
3101 break;
3102 }
3103 default:
3104 ret_code = LTTNG_ERR_INVALID;
3105 goto end;
3106 }
3107 end:
3108 return ret_code;
3109 }
3110
3111 static enum lttng_error_code
3112 cmd_create_session_from_descriptor(struct lttng_session_descriptor *descriptor,
3113 const lttng_sock_cred *creds,
3114 const char *home_path)
3115 {
3116 int ret;
3117 enum lttng_error_code ret_code;
3118 const char *session_name;
3119 struct ltt_session *new_session = nullptr;
3120 enum lttng_session_descriptor_status descriptor_status;
3121
3122 const auto list_lock = lttng::sessiond::lock_session_list();
3123 if (home_path) {
3124 if (*home_path != '/') {
3125 ERR("Home path provided by client is not absolute");
3126 ret_code = LTTNG_ERR_INVALID;
3127 goto end;
3128 }
3129 }
3130
3131 descriptor_status = lttng_session_descriptor_get_session_name(descriptor, &session_name);
3132 switch (descriptor_status) {
3133 case LTTNG_SESSION_DESCRIPTOR_STATUS_OK:
3134 break;
3135 case LTTNG_SESSION_DESCRIPTOR_STATUS_UNSET:
3136 session_name = nullptr;
3137 break;
3138 default:
3139 ret_code = LTTNG_ERR_INVALID;
3140 goto end;
3141 }
3142
3143 ret_code = session_create(session_name, creds->uid, creds->gid, &new_session);
3144 if (ret_code != LTTNG_OK) {
3145 goto end;
3146 }
3147
3148 ret_code = notification_thread_command_add_session(the_notification_thread_handle,
3149 new_session->id,
3150 new_session->name,
3151 new_session->uid,
3152 new_session->gid);
3153 if (ret_code != LTTNG_OK) {
3154 goto end;
3155 }
3156
3157 /* Announce the session's destruction to the notification thread when it is destroyed. */
3158 ret = session_add_destroy_notifier(
3159 [new_session]() {
3160 session_get(new_session);
3161 new_session->lock();
3162 return ltt_session::make_locked_ref(*new_session);
3163 }(),
3164 [](const ltt_session::locked_ref& session,
3165 void *user_data __attribute__((unused))) {
3166 (void) notification_thread_command_remove_session(
3167 the_notification_thread_handle, session->id);
3168 },
3169 nullptr);
3170 if (ret) {
3171 PERROR("Failed to add notification thread command to session's destroy notifiers: session name = %s",
3172 new_session->name);
3173 ret = LTTNG_ERR_NOMEM;
3174 goto end;
3175 }
3176
3177 if (!session_name) {
3178 ret = lttng_session_descriptor_set_session_name(descriptor, new_session->name);
3179 if (ret) {
3180 ret_code = LTTNG_ERR_SESSION_FAIL;
3181 goto end;
3182 }
3183 }
3184
3185 if (!lttng_session_descriptor_is_output_destination_initialized(descriptor)) {
3186 /*
3187 * Only include the session's creation time in the output
3188 * destination if the name of the session itself was
3189 * not auto-generated.
3190 */
3191 ret_code = lttng_session_descriptor_set_default_output(
3192 descriptor,
3193 session_name ? &new_session->creation_time : nullptr,
3194 home_path);
3195 if (ret_code != LTTNG_OK) {
3196 goto end;
3197 }
3198 } else {
3199 new_session->has_user_specified_directory =
3200 lttng_session_descriptor_has_output_directory(descriptor);
3201 }
3202
3203 switch (lttng_session_descriptor_get_type(descriptor)) {
3204 case LTTNG_SESSION_DESCRIPTOR_TYPE_SNAPSHOT:
3205 new_session->snapshot_mode = 1;
3206 break;
3207 case LTTNG_SESSION_DESCRIPTOR_TYPE_LIVE:
3208 new_session->live_timer =
3209 lttng_session_descriptor_live_get_timer_interval(descriptor);
3210 break;
3211 default:
3212 break;
3213 }
3214
3215 ret_code = set_session_output_from_descriptor(
3216 [new_session]() {
3217 session_get(new_session);
3218 new_session->lock();
3219 return ltt_session::make_locked_ref(*new_session);
3220 }(),
3221 descriptor);
3222 if (ret_code != LTTNG_OK) {
3223 goto end;
3224 }
3225 new_session->consumer->enabled = true;
3226 ret_code = LTTNG_OK;
3227 end:
3228 /* Release reference provided by the session_create function. */
3229 session_put(new_session);
3230 if (ret_code != LTTNG_OK && new_session) {
3231 /* Release the global reference on error. */
3232 session_destroy(new_session);
3233 }
3234
3235 return ret_code;
3236 }
3237
3238 enum lttng_error_code cmd_create_session(struct command_ctx *cmd_ctx,
3239 int sock,
3240 struct lttng_session_descriptor **return_descriptor)
3241 {
3242 int ret;
3243 size_t payload_size;
3244 struct lttng_dynamic_buffer payload;
3245 struct lttng_buffer_view home_dir_view;
3246 struct lttng_buffer_view session_descriptor_view;
3247 struct lttng_session_descriptor *session_descriptor = nullptr;
3248 enum lttng_error_code ret_code;
3249
3250 lttng_dynamic_buffer_init(&payload);
3251 if (cmd_ctx->lsm.u.create_session.home_dir_size >= LTTNG_PATH_MAX) {
3252 ret_code = LTTNG_ERR_INVALID;
3253 goto error;
3254 }
3255 if (cmd_ctx->lsm.u.create_session.session_descriptor_size >
3256 LTTNG_SESSION_DESCRIPTOR_MAX_LEN) {
3257 ret_code = LTTNG_ERR_INVALID;
3258 goto error;
3259 }
3260
3261 payload_size = cmd_ctx->lsm.u.create_session.home_dir_size +
3262 cmd_ctx->lsm.u.create_session.session_descriptor_size;
3263 ret = lttng_dynamic_buffer_set_size(&payload, payload_size);
3264 if (ret) {
3265 ret_code = LTTNG_ERR_NOMEM;
3266 goto error;
3267 }
3268
3269 ret = lttcomm_recv_unix_sock(sock, payload.data, payload.size);
3270 if (ret <= 0) {
3271 ERR("Reception of session descriptor failed, aborting.");
3272 ret_code = LTTNG_ERR_SESSION_FAIL;
3273 goto error;
3274 }
3275
3276 home_dir_view = lttng_buffer_view_from_dynamic_buffer(
3277 &payload, 0, cmd_ctx->lsm.u.create_session.home_dir_size);
3278 if (cmd_ctx->lsm.u.create_session.home_dir_size > 0 &&
3279 !lttng_buffer_view_is_valid(&home_dir_view)) {
3280 ERR("Invalid payload in \"create session\" command: buffer too short to contain home directory");
3281 ret_code = LTTNG_ERR_INVALID_PROTOCOL;
3282 goto error;
3283 }
3284
3285 session_descriptor_view = lttng_buffer_view_from_dynamic_buffer(
3286 &payload,
3287 cmd_ctx->lsm.u.create_session.home_dir_size,
3288 cmd_ctx->lsm.u.create_session.session_descriptor_size);
3289 if (!lttng_buffer_view_is_valid(&session_descriptor_view)) {
3290 ERR("Invalid payload in \"create session\" command: buffer too short to contain session descriptor");
3291 ret_code = LTTNG_ERR_INVALID_PROTOCOL;
3292 goto error;
3293 }
3294
3295 ret = lttng_session_descriptor_create_from_buffer(&session_descriptor_view,
3296 &session_descriptor);
3297 if (ret < 0) {
3298 ERR("Failed to create session descriptor from payload of \"create session\" command");
3299 ret_code = LTTNG_ERR_INVALID;
3300 goto error;
3301 }
3302
3303 /*
3304 * Sets the descriptor's auto-generated properties (name, output) if
3305 * needed.
3306 */
3307 ret_code = cmd_create_session_from_descriptor(session_descriptor,
3308 &cmd_ctx->creds,
3309 home_dir_view.size ? home_dir_view.data :
3310 nullptr);
3311 if (ret_code != LTTNG_OK) {
3312 goto error;
3313 }
3314
3315 ret_code = LTTNG_OK;
3316 *return_descriptor = session_descriptor;
3317 session_descriptor = nullptr;
3318 error:
3319 lttng_dynamic_buffer_reset(&payload);
3320 lttng_session_descriptor_destroy(session_descriptor);
3321 return ret_code;
3322 }
3323
3324 static void cmd_destroy_session_reply(const ltt_session::locked_ref& session, void *_reply_context)
3325 {
3326 int ret;
3327 ssize_t comm_ret;
3328 const struct cmd_destroy_session_reply_context *reply_context =
3329 (cmd_destroy_session_reply_context *) _reply_context;
3330 struct lttng_dynamic_buffer payload;
3331 struct lttcomm_session_destroy_command_header cmd_header;
3332 struct lttng_trace_archive_location *location = nullptr;
3333 struct lttcomm_lttng_msg llm = {
3334 .cmd_type = LTTCOMM_SESSIOND_COMMAND_DESTROY_SESSION,
3335 .ret_code = reply_context->destruction_status,
3336 .pid = UINT32_MAX,
3337 .cmd_header_size = sizeof(struct lttcomm_session_destroy_command_header),
3338 .data_size = 0,
3339 .fd_count = 0,
3340 };
3341 size_t payload_size_before_location;
3342
3343 lttng_dynamic_buffer_init(&payload);
3344
3345 ret = lttng_dynamic_buffer_append(&payload, &llm, sizeof(llm));
3346 if (ret) {
3347 ERR("Failed to append session destruction message");
3348 goto error;
3349 }
3350
3351 cmd_header.rotation_state = (int32_t) (reply_context->implicit_rotation_on_destroy ?
3352 session->rotation_state :
3353 LTTNG_ROTATION_STATE_NO_ROTATION);
3354 ret = lttng_dynamic_buffer_append(&payload, &cmd_header, sizeof(cmd_header));
3355 if (ret) {
3356 ERR("Failed to append session destruction command header");
3357 goto error;
3358 }
3359
3360 if (!reply_context->implicit_rotation_on_destroy) {
3361 DBG("No implicit rotation performed during the destruction of session \"%s\", sending reply",
3362 session->name);
3363 goto send_reply;
3364 }
3365 if (session->rotation_state != LTTNG_ROTATION_STATE_COMPLETED) {
3366 DBG("Rotation state of session \"%s\" is not \"completed\", sending session destruction reply",
3367 session->name);
3368 goto send_reply;
3369 }
3370
3371 location = session_get_trace_archive_location(session);
3372 if (!location) {
3373 ERR("Failed to get the location of the trace archive produced during the destruction of session \"%s\"",
3374 session->name);
3375 goto error;
3376 }
3377
3378 payload_size_before_location = payload.size;
3379 comm_ret = lttng_trace_archive_location_serialize(location, &payload);
3380 lttng_trace_archive_location_put(location);
3381 if (comm_ret < 0) {
3382 ERR("Failed to serialize the location of the trace archive produced during the destruction of session \"%s\"",
3383 session->name);
3384 goto error;
3385 }
3386 /* Update the message to indicate the location's length. */
3387 ((struct lttcomm_lttng_msg *) payload.data)->data_size =
3388 payload.size - payload_size_before_location;
3389 send_reply:
3390 comm_ret = lttcomm_send_unix_sock(reply_context->reply_sock_fd, payload.data, payload.size);
3391 if (comm_ret != (ssize_t) payload.size) {
3392 ERR("Failed to send result of the destruction of session \"%s\" to client",
3393 session->name);
3394 }
3395 error:
3396 ret = close(reply_context->reply_sock_fd);
3397 if (ret) {
3398 PERROR("Failed to close client socket in deferred session destroy reply");
3399 }
3400 lttng_dynamic_buffer_reset(&payload);
3401 free(_reply_context);
3402 }
3403
3404 /*
3405 * Command LTTNG_DESTROY_SESSION processed by the client thread.
3406 *
3407 * Called with session lock held.
3408 */
3409 int cmd_destroy_session(const ltt_session::locked_ref& session, int *sock_fd)
3410 {
3411 int ret;
3412 enum lttng_error_code destruction_last_error = LTTNG_OK;
3413 struct cmd_destroy_session_reply_context *reply_context = nullptr;
3414
3415 if (sock_fd) {
3416 reply_context = zmalloc<cmd_destroy_session_reply_context>();
3417 if (!reply_context) {
3418 ret = LTTNG_ERR_NOMEM;
3419 goto end;
3420 }
3421
3422 reply_context->reply_sock_fd = *sock_fd;
3423 }
3424
3425 DBG("Begin destroy session %s (id %" PRIu64 ")", session->name, session->id);
3426 if (session->active) {
3427 DBG("Session \"%s\" is active, attempting to stop it before destroying it",
3428 session->name);
3429 ret = cmd_stop_trace(session);
3430 if (ret != LTTNG_OK && ret != LTTNG_ERR_TRACE_ALREADY_STOPPED) {
3431 /* Carry on with the destruction of the session. */
3432 ERR("Failed to stop session \"%s\" as part of its destruction: %s",
3433 session->name,
3434 lttng_strerror(-ret));
3435 destruction_last_error = (lttng_error_code) ret;
3436 }
3437 }
3438
3439 if (session->rotation_schedule_timer_enabled) {
3440 if (timer_session_rotation_schedule_timer_stop(session)) {
3441 ERR("Failed to stop the \"rotation schedule\" timer of session %s",
3442 session->name);
3443 destruction_last_error = LTTNG_ERR_TIMER_STOP_ERROR;
3444 }
3445 }
3446
3447 if (session->rotate_size) {
3448 try {
3449 the_rotation_thread_handle->unsubscribe_session_consumed_size_rotation(
3450 *session);
3451 } catch (const std::exception& e) {
3452 /* Continue the destruction of the session anyway. */
3453 ERR("Failed to unsubscribe rotation thread notification channel from consumed size condition during session destruction: %s",
3454 e.what());
3455 }
3456
3457 session->rotate_size = 0;
3458 }
3459
3460 if (session->rotated && session->current_trace_chunk && session->output_traces) {
3461 /*
3462 * Perform a last rotation on destruction if rotations have
3463 * occurred during the session's lifetime.
3464 */
3465 ret = cmd_rotate_session(
3466 session, nullptr, false, LTTNG_TRACE_CHUNK_COMMAND_TYPE_MOVE_TO_COMPLETED);
3467 if (ret != LTTNG_OK) {
3468 ERR("Failed to perform an implicit rotation as part of the destruction of session \"%s\": %s",
3469 session->name,
3470 lttng_strerror(-ret));
3471 destruction_last_error = (lttng_error_code) -ret;
3472 }
3473 if (reply_context) {
3474 reply_context->implicit_rotation_on_destroy = true;
3475 }
3476 } else if (session->has_been_started && session->current_trace_chunk) {
3477 /*
3478 * The user has not triggered a session rotation. However, to
3479 * ensure all data has been consumed, the session is rotated
3480 * to a 'null' trace chunk before it is destroyed.
3481 *
3482 * This is a "quiet" rotation meaning that no notification is
3483 * emitted and no renaming of the current trace chunk takes
3484 * place.
3485 */
3486 ret = cmd_rotate_session(
3487 session, nullptr, true, LTTNG_TRACE_CHUNK_COMMAND_TYPE_NO_OPERATION);
3488 /*
3489 * Rotation operations may not be supported by the kernel
3490 * tracer. Hence, do not consider this implicit rotation as
3491 * a session destruction error. The library has already stopped
3492 * the session and waited for pending data; there is nothing
3493 * left to do but complete the destruction of the session.
3494 */
3495 if (ret != LTTNG_OK && ret != -LTTNG_ERR_ROTATION_NOT_AVAILABLE_KERNEL) {
3496 ERR("Failed to perform a quiet rotation as part of the destruction of session \"%s\": %s",
3497 session->name,
3498 lttng_strerror(ret));
3499 destruction_last_error = (lttng_error_code) -ret;
3500 }
3501 }
3502
3503 if (session->shm_path[0]) {
3504 /*
3505 * When a session is created with an explicit shm_path,
3506 * the consumer daemon will create its shared memory files
3507 * at that location and will *not* unlink them. This is normal
3508 * as the intention of that feature is to make it possible
3509 * to retrieve the content of those files should a crash occur.
3510 *
3511 * To ensure the content of those files can be used, the
3512 * sessiond daemon will replicate the content of the metadata
3513 * cache in a metadata file.
3514 *
3515 * On clean-up, it is expected that the consumer daemon will
3516 * unlink the shared memory files and that the session daemon
3517 * will unlink the metadata file. Then, the session's directory
3518 * in the shm path can be removed.
3519 *
3520 * Unfortunately, a flaw in the design of the sessiond's and
3521 * consumerd's tear down of channels makes it impossible to
3522 * determine when the sessiond _and_ the consumerd have both
3523 * destroyed their representation of a channel. For one, the
3524 * unlinking, close, and rmdir happen in deferred 'call_rcu'
3525 * callbacks in both daemons.
3526 *
3527 * However, it is also impossible for the sessiond to know when
3528 * the consumer daemon is done destroying its channel(s) since
3529 * it occurs as a reaction to the closing of the channel's file
3530 * descriptor. There is no resulting communication initiated
3531 * from the consumerd to the sessiond to confirm that the
3532 * operation is completed (and was successful).
3533 *
3534 * Until this is all fixed, the session daemon checks for the
3535 * removal of the session's shm path which makes it possible
3536 * to safely advertise a session as having been destroyed.
3537 *
3538 * Prior to this fix, it was not possible to reliably save
3539 * a session making use of the --shm-path option, destroy it,
3540 * and load it again. This is because the creation of the
3541 * session would fail upon seeing the session's shm path
3542 * already in existence.
3543 *
3544 * Note that none of the error paths in the check for the
3545 * directory's existence return an error. This is normal
3546 * as there isn't much that can be done. The session will
3547 * be destroyed properly, except that we can't offer the
3548 * guarantee that the same session can be re-created.
3549 */
3550 current_completion_handler = &destroy_completion_handler.handler;
3551 ret = lttng_strncpy(destroy_completion_handler.shm_path,
3552 session->shm_path,
3553 sizeof(destroy_completion_handler.shm_path));
3554 LTTNG_ASSERT(!ret);
3555 }
3556
3557 /*
3558 * The session is destroyed. However, note that the command context
3559 * still holds a reference to the session, thus delaying its destruction
3560 * _at least_ up to the point when that reference is released.
3561 */
3562 session_destroy(&session.get());
3563 if (reply_context) {
3564 reply_context->destruction_status = destruction_last_error;
3565 ret = session_add_destroy_notifier(
3566 session, cmd_destroy_session_reply, (void *) reply_context);
3567 if (ret) {
3568 ret = LTTNG_ERR_FATAL;
3569 goto end;
3570 } else {
3571 *sock_fd = -1;
3572 }
3573 }
3574 ret = LTTNG_OK;
3575 end:
3576 return ret;
3577 }
3578
3579 /*
3580 * Command LTTNG_REGISTER_CONSUMER processed by the client thread.
3581 */
3582 int cmd_register_consumer(const ltt_session::locked_ref& session,
3583 enum lttng_domain_type domain,
3584 const char *sock_path,
3585 struct consumer_data *cdata)
3586 {
3587 int ret, sock;
3588 struct consumer_socket *socket = nullptr;
3589
3590 LTTNG_ASSERT(cdata);
3591 LTTNG_ASSERT(sock_path);
3592
3593 switch (domain) {
3594 case LTTNG_DOMAIN_KERNEL:
3595 {
3596 struct ltt_kernel_session *ksess = session->kernel_session;
3597
3598 LTTNG_ASSERT(ksess);
3599
3600 /* Can't register a consumer if there is already one */
3601 if (ksess->consumer_fds_sent != 0) {
3602 ret = LTTNG_ERR_KERN_CONSUMER_FAIL;
3603 goto error;
3604 }
3605
3606 sock = lttcomm_connect_unix_sock(sock_path);
3607 if (sock < 0) {
3608 ret = LTTNG_ERR_CONNECT_FAIL;
3609 goto error;
3610 }
3611 cdata->cmd_sock = sock;
3612
3613 socket = consumer_allocate_socket(&cdata->cmd_sock);
3614 if (socket == nullptr) {
3615 ret = close(sock);
3616 if (ret < 0) {
3617 PERROR("close register consumer");
3618 }
3619 cdata->cmd_sock = -1;
3620 ret = LTTNG_ERR_FATAL;
3621 goto error;
3622 }
3623
3624 socket->lock = zmalloc<pthread_mutex_t>();
3625 if (socket->lock == nullptr) {
3626 PERROR("zmalloc pthread mutex");
3627 ret = LTTNG_ERR_FATAL;
3628 goto error;
3629 }
3630
3631 pthread_mutex_init(socket->lock, nullptr);
3632 socket->registered = 1;
3633
3634 const lttng::urcu::read_lock_guard read_lock;
3635 consumer_add_socket(socket, ksess->consumer);
3636
3637 pthread_mutex_lock(&cdata->pid_mutex);
3638 cdata->pid = -1;
3639 pthread_mutex_unlock(&cdata->pid_mutex);
3640
3641 break;
3642 }
3643 default:
3644 /* TODO: Userspace tracing */
3645 ret = LTTNG_ERR_UND;
3646 goto error;
3647 }
3648
3649 return LTTNG_OK;
3650
3651 error:
3652 if (socket) {
3653 consumer_destroy_socket(socket);
3654 }
3655 return ret;
3656 }
3657
3658 /*
3659 * Command LTTNG_LIST_DOMAINS processed by the client thread.
3660 */
3661 ssize_t cmd_list_domains(const ltt_session::locked_ref& session, struct lttng_domain **domains)
3662 {
3663 int ret, index = 0;
3664 ssize_t nb_dom = 0;
3665 struct agent *agt;
3666 struct lttng_ht_iter iter;
3667
3668 if (session->kernel_session != nullptr) {
3669 DBG3("Listing domains found kernel domain");
3670 nb_dom++;
3671 }
3672
3673 if (session->ust_session != nullptr) {
3674 DBG3("Listing domains found UST global domain");
3675 nb_dom++;
3676
3677 const lttng::urcu::read_lock_guard read_lock;
3678
3679 cds_lfht_for_each_entry (
3680 session->ust_session->agents->ht, &iter.iter, agt, node.node) {
3681 if (agt->being_used) {
3682 nb_dom++;
3683 }
3684 }
3685 }
3686
3687 if (!nb_dom) {
3688 goto end;
3689 }
3690
3691 *domains = calloc<lttng_domain>(nb_dom);
3692 if (*domains == nullptr) {
3693 ret = LTTNG_ERR_FATAL;
3694 goto error;
3695 }
3696
3697 if (session->kernel_session != nullptr) {
3698 (*domains)[index].type = LTTNG_DOMAIN_KERNEL;
3699
3700 /* Kernel session buffer type is always GLOBAL */
3701 (*domains)[index].buf_type = LTTNG_BUFFER_GLOBAL;
3702
3703 index++;
3704 }
3705
3706 if (session->ust_session != nullptr) {
3707 (*domains)[index].type = LTTNG_DOMAIN_UST;
3708 (*domains)[index].buf_type = session->ust_session->buffer_type;
3709 index++;
3710
3711 {
3712 const lttng::urcu::read_lock_guard read_lock;
3713
3714 cds_lfht_for_each_entry (
3715 session->ust_session->agents->ht, &iter.iter, agt, node.node) {
3716 if (agt->being_used) {
3717 (*domains)[index].type = agt->domain;
3718 (*domains)[index].buf_type =
3719 session->ust_session->buffer_type;
3720 index++;
3721 }
3722 }
3723 }
3724 }
3725 end:
3726 return nb_dom;
3727
3728 error:
3729 /* Return negative value to differentiate return code */
3730 return -ret;
3731 }
3732
3733 /*
3734 * Command LTTNG_LIST_CHANNELS processed by the client thread.
3735 */
3736 enum lttng_error_code cmd_list_channels(enum lttng_domain_type domain,
3737 const ltt_session::locked_ref& session,
3738 struct lttng_payload *payload)
3739 {
3740 int ret = 0;
3741 unsigned int i = 0;
3742 struct lttcomm_list_command_header cmd_header = {};
3743 size_t cmd_header_offset;
3744 enum lttng_error_code ret_code;
3745
3746 LTTNG_ASSERT(payload);
3747
3748 DBG("Listing channels for session %s", session->name);
3749
3750 cmd_header_offset = payload->buffer.size;
3751
3752 /* Reserve space for command reply header. */
3753 ret = lttng_dynamic_buffer_set_size(&payload->buffer,
3754 cmd_header_offset + sizeof(cmd_header));
3755 if (ret) {
3756 ret_code = LTTNG_ERR_NOMEM;
3757 goto end;
3758 }
3759
3760 switch (domain) {
3761 case LTTNG_DOMAIN_KERNEL:
3762 {
3763 /* Kernel channels */
3764 struct ltt_kernel_channel *kchan;
3765 if (session->kernel_session != nullptr) {
3766 cds_list_for_each_entry (
3767 kchan, &session->kernel_session->channel_list.head, list) {
3768 uint64_t discarded_events, lost_packets;
3769 struct lttng_channel_extended *extended;
3770
3771 extended = (struct lttng_channel_extended *)
3772 kchan->channel->attr.extended.ptr;
3773
3774 ret = get_kernel_runtime_stats(
3775 session, kchan, &discarded_events, &lost_packets);
3776 if (ret < 0) {
3777 ret_code = LTTNG_ERR_UNK;
3778 goto end;
3779 }
3780
3781 /*
3782 * Update the discarded_events and lost_packets
3783 * count for the channel
3784 */
3785 extended->discarded_events = discarded_events;
3786 extended->lost_packets = lost_packets;
3787
3788 ret = lttng_channel_serialize(kchan->channel, &payload->buffer);
3789 if (ret) {
3790 ERR("Failed to serialize lttng_channel: channel name = '%s'",
3791 kchan->channel->name);
3792 ret_code = LTTNG_ERR_UNK;
3793 goto end;
3794 }
3795
3796 i++;
3797 }
3798 }
3799 break;
3800 }
3801 case LTTNG_DOMAIN_UST:
3802 {
3803 struct lttng_ht_iter iter;
3804 struct ltt_ust_channel *uchan;
3805
3806 {
3807 const lttng::urcu::read_lock_guard read_lock;
3808
3809 cds_lfht_for_each_entry (session->ust_session->domain_global.channels->ht,
3810 &iter.iter,
3811 uchan,
3812 node.node) {
3813 uint64_t discarded_events = 0, lost_packets = 0;
3814 struct lttng_channel *channel = nullptr;
3815 struct lttng_channel_extended *extended;
3816
3817 channel = trace_ust_channel_to_lttng_channel(uchan);
3818 if (!channel) {
3819 ret_code = LTTNG_ERR_NOMEM;
3820 goto end;
3821 }
3822
3823 extended = (struct lttng_channel_extended *)
3824 channel->attr.extended.ptr;
3825
3826 ret = get_ust_runtime_stats(
3827 session, uchan, &discarded_events, &lost_packets);
3828 if (ret < 0) {
3829 lttng_channel_destroy(channel);
3830 ret_code = LTTNG_ERR_UNK;
3831 goto end;
3832 }
3833
3834 extended->discarded_events = discarded_events;
3835 extended->lost_packets = lost_packets;
3836
3837 ret = lttng_channel_serialize(channel, &payload->buffer);
3838 if (ret) {
3839 ERR("Failed to serialize lttng_channel: channel name = '%s'",
3840 channel->name);
3841 lttng_channel_destroy(channel);
3842 ret_code = LTTNG_ERR_UNK;
3843 goto end;
3844 }
3845
3846 lttng_channel_destroy(channel);
3847 i++;
3848 }
3849 }
3850
3851 break;
3852 }
3853 default:
3854 break;
3855 }
3856
3857 if (i > UINT32_MAX) {
3858 ERR("Channel count would overflow the channel listing command's reply");
3859 ret_code = LTTNG_ERR_OVERFLOW;
3860 goto end;
3861 }
3862
3863 /* Update command reply header. */
3864 cmd_header.count = (uint32_t) i;
3865 memcpy(payload->buffer.data + cmd_header_offset, &cmd_header, sizeof(cmd_header));
3866 ret_code = LTTNG_OK;
3867
3868 end:
3869 return ret_code;
3870 }
3871
3872 /*
3873 * Command LTTNG_LIST_EVENTS processed by the client thread.
3874 */
3875 enum lttng_error_code cmd_list_events(enum lttng_domain_type domain,
3876 const ltt_session::locked_ref& session,
3877 char *channel_name,
3878 struct lttng_payload *reply_payload)
3879 {
3880 int buffer_resize_ret;
3881 enum lttng_error_code ret_code = LTTNG_OK;
3882 struct lttcomm_list_command_header reply_command_header = {};
3883 size_t reply_command_header_offset;
3884 unsigned int nb_events = 0;
3885
3886 assert(reply_payload);
3887
3888 /* Reserve space for command reply header. */
3889 reply_command_header_offset = reply_payload->buffer.size;
3890 buffer_resize_ret = lttng_dynamic_buffer_set_size(
3891 &reply_payload->buffer,
3892 reply_command_header_offset + sizeof(struct lttcomm_list_command_header));
3893 if (buffer_resize_ret) {
3894 ret_code = LTTNG_ERR_NOMEM;
3895 goto end;
3896 }
3897
3898 switch (domain) {
3899 case LTTNG_DOMAIN_KERNEL:
3900 if (session->kernel_session != nullptr) {
3901 ret_code = list_lttng_kernel_events(
3902 channel_name, session->kernel_session, reply_payload, &nb_events);
3903 }
3904
3905 break;
3906 case LTTNG_DOMAIN_UST:
3907 {
3908 if (session->ust_session != nullptr) {
3909 ret_code =
3910 list_lttng_ust_global_events(channel_name,
3911 &session->ust_session->domain_global,
3912 reply_payload,
3913 &nb_events);
3914 }
3915
3916 break;
3917 }
3918 case LTTNG_DOMAIN_LOG4J:
3919 case LTTNG_DOMAIN_JUL:
3920 case LTTNG_DOMAIN_PYTHON:
3921 if (session->ust_session) {
3922 struct lttng_ht_iter iter;
3923 struct agent *agt;
3924
3925 const lttng::urcu::read_lock_guard read_lock;
3926
3927 cds_lfht_for_each_entry (
3928 session->ust_session->agents->ht, &iter.iter, agt, node.node) {
3929 if (agt->domain == domain) {
3930 ret_code = list_lttng_agent_events(
3931 agt, reply_payload, &nb_events);
3932 break;
3933 }
3934 }
3935 }
3936 break;
3937 default:
3938 ret_code = LTTNG_ERR_UND;
3939 break;
3940 }
3941
3942 if (nb_events > UINT32_MAX) {
3943 ret_code = LTTNG_ERR_OVERFLOW;
3944 goto end;
3945 }
3946
3947 /* Update command reply header. */
3948 reply_command_header.count = (uint32_t) nb_events;
3949 memcpy(reply_payload->buffer.data + reply_command_header_offset,
3950 &reply_command_header,
3951 sizeof(reply_command_header));
3952
3953 end:
3954 return ret_code;
3955 }
3956
3957 /*
3958 * Using the session list, filled a lttng_session array to send back to the
3959 * client for session listing.
3960 *
3961 * The session list lock MUST be acquired before calling this function.
3962 */
3963 void cmd_list_lttng_sessions(struct lttng_session *sessions,
3964 size_t session_count,
3965 uid_t uid,
3966 gid_t gid)
3967 {
3968 int ret;
3969 unsigned int i = 0;
3970 struct ltt_session *raw_session_ptr;
3971 struct ltt_session_list *list = session_get_list();
3972 struct lttng_session_extended *extended = (typeof(extended)) (&sessions[session_count]);
3973
3974 DBG("Getting all available session for UID %d GID %d", uid, gid);
3975 /*
3976 * Iterate over session list and append data after the control struct in
3977 * the buffer.
3978 */
3979 cds_list_for_each_entry (raw_session_ptr, &list->head, list) {
3980 auto session = [raw_session_ptr]() {
3981 session_get(raw_session_ptr);
3982 raw_session_ptr->lock();
3983 return ltt_session::make_locked_ref(*raw_session_ptr);
3984 }();
3985
3986 /*
3987 * Only list the sessions the user can control.
3988 */
3989 if (!session_access_ok(session, uid) || session->destroyed) {
3990 continue;
3991 }
3992
3993 struct ltt_kernel_session *ksess = session->kernel_session;
3994 struct ltt_ust_session *usess = session->ust_session;
3995
3996 if (session->consumer->type == CONSUMER_DST_NET ||
3997 (ksess && ksess->consumer->type == CONSUMER_DST_NET) ||
3998 (usess && usess->consumer->type == CONSUMER_DST_NET)) {
3999 ret = build_network_session_path(
4000 sessions[i].path, sizeof(sessions[i].path), session);
4001 } else {
4002 ret = snprintf(sessions[i].path,
4003 sizeof(sessions[i].path),
4004 "%s",
4005 session->consumer->dst.session_root_path);
4006 }
4007 if (ret < 0) {
4008 PERROR("snprintf session path");
4009 continue;
4010 }
4011
4012 strncpy(sessions[i].name, session->name, NAME_MAX);
4013 sessions[i].name[NAME_MAX - 1] = '\0';
4014 sessions[i].enabled = session->active;
4015 sessions[i].snapshot_mode = session->snapshot_mode;
4016 sessions[i].live_timer_interval = session->live_timer;
4017 extended[i].creation_time.value = (uint64_t) session->creation_time;
4018 extended[i].creation_time.is_set = 1;
4019 i++;
4020 }
4021 }
4022
4023 /*
4024 * Command LTTCOMM_SESSIOND_COMMAND_KERNEL_TRACER_STATUS
4025 */
4026 enum lttng_error_code cmd_kernel_tracer_status(enum lttng_kernel_tracer_status *status)
4027 {
4028 if (status == nullptr) {
4029 return LTTNG_ERR_INVALID;
4030 }
4031
4032 *status = get_kernel_tracer_status();
4033 return LTTNG_OK;
4034 }
4035
4036 /*
4037 * Command LTTNG_DATA_PENDING returning 0 if the data is NOT pending meaning
4038 * ready for trace analysis (or any kind of reader) or else 1 for pending data.
4039 */
4040 int cmd_data_pending(const ltt_session::locked_ref& session)
4041 {
4042 int ret;
4043 struct ltt_kernel_session *ksess = session->kernel_session;
4044 struct ltt_ust_session *usess = session->ust_session;
4045
4046 DBG("Data pending for session %s", session->name);
4047
4048 /* Session MUST be stopped to ask for data availability. */
4049 if (session->active) {
4050 ret = LTTNG_ERR_SESSION_STARTED;
4051 goto error;
4052 } else {
4053 /*
4054 * If stopped, just make sure we've started before else the above call
4055 * will always send that there is data pending.
4056 *
4057 * The consumer assumes that when the data pending command is received,
4058 * the trace has been started before or else no output data is written
4059 * by the streams which is a condition for data pending. So, this is
4060 * *VERY* important that we don't ask the consumer before a start
4061 * trace.
4062 */
4063 if (!session->has_been_started) {
4064 ret = 0;
4065 goto error;
4066 }
4067 }
4068
4069 /* A rotation is still pending, we have to wait. */
4070 if (session->rotation_state == LTTNG_ROTATION_STATE_ONGOING) {
4071 DBG("Rotate still pending for session %s", session->name);
4072 ret = 1;
4073 goto error;
4074 }
4075
4076 if (ksess && ksess->consumer) {
4077 ret = consumer_is_data_pending(ksess->id, ksess->consumer);
4078 if (ret == 1) {
4079 /* Data is still being extracted for the kernel. */
4080 goto error;
4081 }
4082 }
4083
4084 if (usess && usess->consumer) {
4085 ret = consumer_is_data_pending(usess->id, usess->consumer);
4086 if (ret == 1) {
4087 /* Data is still being extracted for the kernel. */
4088 goto error;
4089 }
4090 }
4091
4092 /* Data is ready to be read by a viewer */
4093 ret = 0;
4094
4095 error:
4096 return ret;
4097 }
4098
4099 /*
4100 * Command LTTNG_SNAPSHOT_ADD_OUTPUT from the lttng ctl library.
4101 *
4102 * Return LTTNG_OK on success or else a LTTNG_ERR code.
4103 */
4104 int cmd_snapshot_add_output(const ltt_session::locked_ref& session,
4105 const struct lttng_snapshot_output *output,
4106 uint32_t *id)
4107 {
4108 int ret;
4109 struct snapshot_output *new_output;
4110
4111 LTTNG_ASSERT(output);
4112
4113 DBG("Cmd snapshot add output for session %s", session->name);
4114
4115 /*
4116 * Can't create an output if the session is not set in no-output mode.
4117 */
4118 if (session->output_traces) {
4119 ret = LTTNG_ERR_NOT_SNAPSHOT_SESSION;
4120 goto error;
4121 }
4122
4123 if (session->has_non_mmap_channel) {
4124 ret = LTTNG_ERR_SNAPSHOT_UNSUPPORTED;
4125 goto error;
4126 }
4127
4128 /* Only one output is allowed until we have the "tee" feature. */
4129 if (session->snapshot.nb_output == 1) {
4130 ret = LTTNG_ERR_SNAPSHOT_OUTPUT_EXIST;
4131 goto error;
4132 }
4133
4134 new_output = snapshot_output_alloc();
4135 if (!new_output) {
4136 ret = LTTNG_ERR_NOMEM;
4137 goto error;
4138 }
4139
4140 ret = snapshot_output_init(session,
4141 output->max_size,
4142 output->name,
4143 output->ctrl_url,
4144 output->data_url,
4145 session->consumer,
4146 new_output,
4147 &session->snapshot);
4148 if (ret < 0) {
4149 if (ret == -ENOMEM) {
4150 ret = LTTNG_ERR_NOMEM;
4151 } else {
4152 ret = LTTNG_ERR_INVALID;
4153 }
4154 goto free_error;
4155 }
4156
4157 snapshot_add_output(&session->snapshot, new_output);
4158 if (id) {
4159 *id = new_output->id;
4160 }
4161
4162 return LTTNG_OK;
4163
4164 free_error:
4165 snapshot_output_destroy(new_output);
4166 error:
4167 return ret;
4168 }
4169
4170 /*
4171 * Command LTTNG_SNAPSHOT_DEL_OUTPUT from lib lttng ctl.
4172 *
4173 * Return LTTNG_OK on success or else a LTTNG_ERR code.
4174 */
4175 int cmd_snapshot_del_output(const ltt_session::locked_ref& session,
4176 const struct lttng_snapshot_output *output)
4177 {
4178 int ret;
4179 struct snapshot_output *sout = nullptr;
4180
4181 LTTNG_ASSERT(output);
4182
4183 const lttng::urcu::read_lock_guard read_lock;
4184
4185 /*
4186 * Permission denied to create an output if the session is not
4187 * set in no output mode.
4188 */
4189 if (session->output_traces) {
4190 ret = LTTNG_ERR_NOT_SNAPSHOT_SESSION;
4191 goto error;
4192 }
4193
4194 if (output->id) {
4195 DBG("Cmd snapshot del output id %" PRIu32 " for session %s",
4196 output->id,
4197 session->name);
4198 sout = snapshot_find_output_by_id(output->id, &session->snapshot);
4199 } else if (*output->name != '\0') {
4200 DBG("Cmd snapshot del output name %s for session %s", output->name, session->name);
4201 sout = snapshot_find_output_by_name(output->name, &session->snapshot);
4202 }
4203 if (!sout) {
4204 ret = LTTNG_ERR_INVALID;
4205 goto error;
4206 }
4207
4208 snapshot_delete_output(&session->snapshot, sout);
4209 snapshot_output_destroy(sout);
4210 ret = LTTNG_OK;
4211
4212 error:
4213 return ret;
4214 }
4215
4216 /*
4217 * Command LTTNG_SNAPSHOT_LIST_OUTPUT from lib lttng ctl.
4218 *
4219 * If no output is available, outputs is untouched and 0 is returned.
4220 *
4221 * Return the size of the newly allocated outputs or a negative LTTNG_ERR code.
4222 */
4223 ssize_t cmd_snapshot_list_outputs(const ltt_session::locked_ref& session,
4224 struct lttng_snapshot_output **outputs)
4225 {
4226 int ret, idx = 0;
4227 struct lttng_snapshot_output *list = nullptr;
4228 struct lttng_ht_iter iter;
4229 struct snapshot_output *output;
4230
4231 LTTNG_ASSERT(outputs);
4232
4233 DBG("Cmd snapshot list outputs for session %s", session->name);
4234
4235 /*
4236 * Permission denied to create an output if the session is not
4237 * set in no output mode.
4238 */
4239 if (session->output_traces) {
4240 ret = -LTTNG_ERR_NOT_SNAPSHOT_SESSION;
4241 goto end;
4242 }
4243
4244 if (session->snapshot.nb_output == 0) {
4245 ret = 0;
4246 goto end;
4247 }
4248
4249 list = calloc<lttng_snapshot_output>(session->snapshot.nb_output);
4250 if (!list) {
4251 ret = -LTTNG_ERR_NOMEM;
4252 goto end;
4253 }
4254
4255 /* Copy list from session to the new list object. */
4256 {
4257 const lttng::urcu::read_lock_guard read_lock;
4258
4259 cds_lfht_for_each_entry (
4260 session->snapshot.output_ht->ht, &iter.iter, output, node.node) {
4261 LTTNG_ASSERT(output->consumer);
4262 list[idx].id = output->id;
4263 list[idx].max_size = output->max_size;
4264 if (lttng_strncpy(list[idx].name, output->name, sizeof(list[idx].name))) {
4265 ret = -LTTNG_ERR_INVALID;
4266 goto error;
4267 }
4268
4269 if (output->consumer->type == CONSUMER_DST_LOCAL) {
4270 if (lttng_strncpy(list[idx].ctrl_url,
4271 output->consumer->dst.session_root_path,
4272 sizeof(list[idx].ctrl_url))) {
4273 ret = -LTTNG_ERR_INVALID;
4274 goto error;
4275 }
4276 } else {
4277 /* Control URI. */
4278 ret = uri_to_str_url(&output->consumer->dst.net.control,
4279 list[idx].ctrl_url,
4280 sizeof(list[idx].ctrl_url));
4281 if (ret < 0) {
4282 ret = -LTTNG_ERR_NOMEM;
4283 goto error;
4284 }
4285
4286 /* Data URI. */
4287 ret = uri_to_str_url(&output->consumer->dst.net.data,
4288 list[idx].data_url,
4289 sizeof(list[idx].data_url));
4290 if (ret < 0) {
4291 ret = -LTTNG_ERR_NOMEM;
4292 goto error;
4293 }
4294 }
4295
4296 idx++;
4297 }
4298 }
4299
4300 *outputs = list;
4301 list = nullptr;
4302 ret = session->snapshot.nb_output;
4303 error:
4304 free(list);
4305 end:
4306 return ret;
4307 }
4308
4309 /*
4310 * Check if we can regenerate the metadata for this session.
4311 * Only kernel, UST per-uid and non-live sessions are supported.
4312 *
4313 * Return 0 if the metadata can be generated, a LTTNG_ERR code otherwise.
4314 */
4315 static int check_regenerate_metadata_support(const ltt_session::locked_ref& session)
4316 {
4317 int ret;
4318
4319 if (session->live_timer != 0) {
4320 ret = LTTNG_ERR_LIVE_SESSION;
4321 goto end;
4322 }
4323 if (!session->active) {
4324 ret = LTTNG_ERR_SESSION_NOT_STARTED;
4325 goto end;
4326 }
4327 if (session->ust_session) {
4328 switch (session->ust_session->buffer_type) {
4329 case LTTNG_BUFFER_PER_UID:
4330 break;
4331 case LTTNG_BUFFER_PER_PID:
4332 ret = LTTNG_ERR_PER_PID_SESSION;
4333 goto end;
4334 default:
4335 abort();
4336 ret = LTTNG_ERR_UNK;
4337 goto end;
4338 }
4339 }
4340 if (session->consumer->type == CONSUMER_DST_NET &&
4341 session->consumer->relay_minor_version < 8) {
4342 ret = LTTNG_ERR_RELAYD_VERSION_FAIL;
4343 goto end;
4344 }
4345 ret = 0;
4346
4347 end:
4348 return ret;
4349 }
4350
4351 /*
4352 * Command LTTNG_REGENERATE_METADATA from the lttng-ctl library.
4353 *
4354 * Ask the consumer to truncate the existing metadata file(s) and
4355 * then regenerate the metadata. Live and per-pid sessions are not
4356 * supported and return an error.
4357 *
4358 * Return LTTNG_OK on success or else a LTTNG_ERR code.
4359 */
4360 int cmd_regenerate_metadata(const ltt_session::locked_ref& session)
4361 {
4362 int ret;
4363
4364 ret = check_regenerate_metadata_support(session);
4365 if (ret) {
4366 goto end;
4367 }
4368
4369 if (session->kernel_session) {
4370 ret = kernctl_session_regenerate_metadata(session->kernel_session->fd);
4371 if (ret < 0) {
4372 ERR("Failed to regenerate the kernel metadata");
4373 goto end;
4374 }
4375 }
4376
4377 if (session->ust_session) {
4378 ret = trace_ust_regenerate_metadata(session->ust_session);
4379 if (ret < 0) {
4380 ERR("Failed to regenerate the UST metadata");
4381 goto end;
4382 }
4383 }
4384 DBG("Cmd metadata regenerate for session %s", session->name);
4385 ret = LTTNG_OK;
4386
4387 end:
4388 return ret;
4389 }
4390
4391 /*
4392 * Command LTTNG_REGENERATE_STATEDUMP from the lttng-ctl library.
4393 *
4394 * Ask the tracer to regenerate a new statedump.
4395 *
4396 * Return LTTNG_OK on success or else a LTTNG_ERR code.
4397 */
4398 int cmd_regenerate_statedump(const ltt_session::locked_ref& session)
4399 {
4400 int ret;
4401
4402 if (!session->active) {
4403 ret = LTTNG_ERR_SESSION_NOT_STARTED;
4404 goto end;
4405 }
4406
4407 if (session->kernel_session) {
4408 ret = kernctl_session_regenerate_statedump(session->kernel_session->fd);
4409 /*
4410 * Currently, the statedump in kernel can only fail if out
4411 * of memory.
4412 */
4413 if (ret < 0) {
4414 if (ret == -ENOMEM) {
4415 ret = LTTNG_ERR_REGEN_STATEDUMP_NOMEM;
4416 } else {
4417 ret = LTTNG_ERR_REGEN_STATEDUMP_FAIL;
4418 }
4419 ERR("Failed to regenerate the kernel statedump");
4420 goto end;
4421 }
4422 }
4423
4424 if (session->ust_session) {
4425 ret = ust_app_regenerate_statedump_all(session->ust_session);
4426 /*
4427 * Currently, the statedump in UST always returns 0.
4428 */
4429 if (ret < 0) {
4430 ret = LTTNG_ERR_REGEN_STATEDUMP_FAIL;
4431 ERR("Failed to regenerate the UST statedump");
4432 goto end;
4433 }
4434 }
4435 DBG("Cmd regenerate statedump for session %s", session->name);
4436 ret = LTTNG_OK;
4437
4438 end:
4439 return ret;
4440 }
4441
4442 static enum lttng_error_code
4443 synchronize_tracer_notifier_register(struct notification_thread_handle *notification_thread,
4444 struct lttng_trigger *trigger,
4445 const struct lttng_credentials *cmd_creds)
4446 {
4447 enum lttng_error_code ret_code;
4448 const struct lttng_condition *condition = lttng_trigger_get_const_condition(trigger);
4449 const char *trigger_name;
4450 uid_t trigger_owner;
4451 enum lttng_trigger_status trigger_status;
4452 const enum lttng_domain_type trigger_domain =
4453 lttng_trigger_get_underlying_domain_type_restriction(trigger);
4454
4455 trigger_status = lttng_trigger_get_owner_uid(trigger, &trigger_owner);
4456 LTTNG_ASSERT(trigger_status == LTTNG_TRIGGER_STATUS_OK);
4457
4458 LTTNG_ASSERT(condition);
4459 LTTNG_ASSERT(lttng_condition_get_type(condition) ==
4460 LTTNG_CONDITION_TYPE_EVENT_RULE_MATCHES);
4461
4462 trigger_status = lttng_trigger_get_name(trigger, &trigger_name);
4463 trigger_name = trigger_status == LTTNG_TRIGGER_STATUS_OK ? trigger_name : "(anonymous)";
4464
4465 const auto list_lock = lttng::sessiond::lock_session_list();
4466 switch (trigger_domain) {
4467 case LTTNG_DOMAIN_KERNEL:
4468 {
4469 ret_code = kernel_register_event_notifier(trigger, cmd_creds);
4470 if (ret_code != LTTNG_OK) {
4471 enum lttng_error_code notif_thread_unregister_ret;
4472
4473 notif_thread_unregister_ret =
4474 notification_thread_command_unregister_trigger(notification_thread,
4475 trigger);
4476
4477 if (notif_thread_unregister_ret != LTTNG_OK) {
4478 /* Return the original error code. */
4479 ERR("Failed to unregister trigger from notification thread during error recovery: trigger name = '%s', trigger owner uid = %d, error code = %d",
4480 trigger_name,
4481 (int) trigger_owner,
4482 ret_code);
4483 }
4484
4485 return ret_code;
4486 }
4487 break;
4488 }
4489 case LTTNG_DOMAIN_UST:
4490 ust_app_global_update_all_event_notifier_rules();
4491 break;
4492 case LTTNG_DOMAIN_JUL:
4493 case LTTNG_DOMAIN_LOG4J:
4494 case LTTNG_DOMAIN_PYTHON:
4495 {
4496 /* Agent domains. */
4497 struct agent *agt = agent_find_by_event_notifier_domain(trigger_domain);
4498
4499 if (!agt) {
4500 agt = agent_create(trigger_domain);
4501 if (!agt) {
4502 ret_code = LTTNG_ERR_NOMEM;
4503 return ret_code;
4504 }
4505
4506 agent_add(agt, the_trigger_agents_ht_by_domain);
4507 }
4508
4509 ret_code = (lttng_error_code) trigger_agent_enable(trigger, agt);
4510 if (ret_code != LTTNG_OK) {
4511 return ret_code;
4512 }
4513
4514 break;
4515 }
4516 case LTTNG_DOMAIN_NONE:
4517 default:
4518 abort();
4519 }
4520
4521 return LTTNG_OK;
4522 }
4523
4524 lttng::ctl::trigger cmd_register_trigger(const struct lttng_credentials *cmd_creds,
4525 struct lttng_trigger *trigger,
4526 bool is_trigger_anonymous,
4527 struct notification_thread_handle *notification_thread)
4528 {
4529 enum lttng_error_code ret_code;
4530 const char *trigger_name;
4531 uid_t trigger_owner;
4532 enum lttng_trigger_status trigger_status;
4533
4534 trigger_status = lttng_trigger_get_name(trigger, &trigger_name);
4535 trigger_name = trigger_status == LTTNG_TRIGGER_STATUS_OK ? trigger_name : "(anonymous)";
4536
4537 trigger_status = lttng_trigger_get_owner_uid(trigger, &trigger_owner);
4538 LTTNG_ASSERT(trigger_status == LTTNG_TRIGGER_STATUS_OK);
4539
4540 DBG("Running register trigger command: trigger name = '%s', trigger owner uid = %d, command creds uid = %d",
4541 trigger_name,
4542 (int) trigger_owner,
4543 (int) lttng_credentials_get_uid(cmd_creds));
4544
4545 /*
4546 * Validate the trigger credentials against the command credentials.
4547 * Only the root user can register a trigger with non-matching
4548 * credentials.
4549 */
4550 if (!lttng_credentials_is_equal_uid(lttng_trigger_get_credentials(trigger), cmd_creds)) {
4551 if (lttng_credentials_get_uid(cmd_creds) != 0) {
4552 LTTNG_THROW_CTL(
4553 fmt::format(
4554 "Trigger credentials do not match the command credentials: trigger_name = `{}`, trigger_owner_uid={}, command_creds_uid={}",
4555 trigger_name,
4556 trigger_owner,
4557 lttng_credentials_get_uid(cmd_creds)),
4558 LTTNG_ERR_INVALID_TRIGGER);
4559 }
4560 }
4561
4562 /*
4563 * The bytecode generation also serves as a validation step for the
4564 * bytecode expressions.
4565 */
4566 ret_code = lttng_trigger_generate_bytecode(trigger, cmd_creds);
4567 if (ret_code != LTTNG_OK) {
4568 LTTNG_THROW_CTL(
4569 fmt::format(
4570 "Failed to generate bytecode of trigger: trigger_name=`{}`, trigger_owner_uid={}",
4571 trigger_name,
4572 trigger_owner),
4573 ret_code);
4574 }
4575
4576 /*
4577 * A reference to the trigger is acquired by the notification thread.
4578 * It is safe to return the same trigger to the caller since it the
4579 * other user holds a reference.
4580 *
4581 * The trigger is modified during the execution of the
4582 * "register trigger" command. However, by the time the command returns,
4583 * it is safe to use without any locking as its properties are
4584 * immutable.
4585 */
4586 ret_code = notification_thread_command_register_trigger(
4587 notification_thread, trigger, is_trigger_anonymous);
4588 if (ret_code != LTTNG_OK) {
4589 LTTNG_THROW_CTL(
4590 fmt::format(
4591 "Failed to register trigger to notification thread: trigger_name=`{}`, trigger_owner_uid={}",
4592 trigger_name,
4593 trigger_owner),
4594 ret_code);
4595 }
4596
4597 trigger_status = lttng_trigger_get_name(trigger, &trigger_name);
4598 trigger_name = trigger_status == LTTNG_TRIGGER_STATUS_OK ? trigger_name : "(anonymous)";
4599
4600 /*
4601 * Synchronize tracers if the trigger adds an event notifier.
4602 */
4603 if (lttng_trigger_needs_tracer_notifier(trigger)) {
4604 ret_code = synchronize_tracer_notifier_register(
4605 notification_thread, trigger, cmd_creds);
4606 if (ret_code != LTTNG_OK) {
4607 LTTNG_THROW_CTL("Failed to register tracer notifier", ret_code);
4608 }
4609 }
4610
4611 /*
4612 * Return an updated trigger to the client.
4613 *
4614 * Since a modified version of the same trigger is returned, acquire a
4615 * reference to the trigger so the caller doesn't have to care if those
4616 * are distinct instances or not.
4617 */
4618 LTTNG_ASSERT(ret_code == LTTNG_OK);
4619 lttng_trigger_get(trigger);
4620 return lttng::ctl::trigger(trigger);
4621 }
4622
4623 static enum lttng_error_code
4624 synchronize_tracer_notifier_unregister(const struct lttng_trigger *trigger)
4625 {
4626 enum lttng_error_code ret_code;
4627 const struct lttng_condition *condition = lttng_trigger_get_const_condition(trigger);
4628 const enum lttng_domain_type trigger_domain =
4629 lttng_trigger_get_underlying_domain_type_restriction(trigger);
4630
4631 LTTNG_ASSERT(condition);
4632 LTTNG_ASSERT(lttng_condition_get_type(condition) ==
4633 LTTNG_CONDITION_TYPE_EVENT_RULE_MATCHES);
4634
4635 const auto list_lock = lttng::sessiond::lock_session_list();
4636 switch (trigger_domain) {
4637 case LTTNG_DOMAIN_KERNEL:
4638 ret_code = kernel_unregister_event_notifier(trigger);
4639 if (ret_code != LTTNG_OK) {
4640 return ret_code;
4641 }
4642
4643 break;
4644 case LTTNG_DOMAIN_UST:
4645 ust_app_global_update_all_event_notifier_rules();
4646 break;
4647 case LTTNG_DOMAIN_JUL:
4648 case LTTNG_DOMAIN_LOG4J:
4649 case LTTNG_DOMAIN_PYTHON:
4650 {
4651 /* Agent domains. */
4652 struct agent *agt = agent_find_by_event_notifier_domain(trigger_domain);
4653
4654 /*
4655 * This trigger was never registered in the first place. Calling
4656 * this function under those circumstances is an internal error.
4657 */
4658 LTTNG_ASSERT(agt);
4659 ret_code = (lttng_error_code) trigger_agent_disable(trigger, agt);
4660 if (ret_code != LTTNG_OK) {
4661 return ret_code;
4662 }
4663
4664 break;
4665 }
4666 case LTTNG_DOMAIN_NONE:
4667 default:
4668 abort();
4669 }
4670
4671 return LTTNG_OK;
4672 }
4673
4674 enum lttng_error_code cmd_unregister_trigger(const struct lttng_credentials *cmd_creds,
4675 const struct lttng_trigger *trigger,
4676 struct notification_thread_handle *notification_thread)
4677 {
4678 enum lttng_error_code ret_code;
4679 const char *trigger_name;
4680 uid_t trigger_owner;
4681 enum lttng_trigger_status trigger_status;
4682 struct lttng_trigger *sessiond_trigger = nullptr;
4683
4684 trigger_status = lttng_trigger_get_name(trigger, &trigger_name);
4685 trigger_name = trigger_status == LTTNG_TRIGGER_STATUS_OK ? trigger_name : "(anonymous)";
4686 trigger_status = lttng_trigger_get_owner_uid(trigger, &trigger_owner);
4687 LTTNG_ASSERT(trigger_status == LTTNG_TRIGGER_STATUS_OK);
4688
4689 DBG("Running unregister trigger command: trigger name = '%s', trigger owner uid = %d, command creds uid = %d",
4690 trigger_name,
4691 (int) trigger_owner,
4692 (int) lttng_credentials_get_uid(cmd_creds));
4693
4694 /*
4695 * Validate the trigger credentials against the command credentials.
4696 * Only the root user can unregister a trigger with non-matching
4697 * credentials.
4698 */
4699 if (!lttng_credentials_is_equal_uid(lttng_trigger_get_credentials(trigger), cmd_creds)) {
4700 if (lttng_credentials_get_uid(cmd_creds) != 0) {
4701 ERR("Trigger credentials do not match the command credentials: trigger name = '%s', trigger owner uid = %d, command creds uid = %d",
4702 trigger_name,
4703 (int) trigger_owner,
4704 (int) lttng_credentials_get_uid(cmd_creds));
4705 ret_code = LTTNG_ERR_INVALID_TRIGGER;
4706 goto end;
4707 }
4708 }
4709
4710 /* Fetch the sessiond side trigger object. */
4711 ret_code = notification_thread_command_get_trigger(
4712 notification_thread, trigger, &sessiond_trigger);
4713 if (ret_code != LTTNG_OK) {
4714 DBG("Failed to get trigger from notification thread during unregister: trigger name = '%s', trigger owner uid = %d, error code = %d",
4715 trigger_name,
4716 (int) trigger_owner,
4717 ret_code);
4718 goto end;
4719 }
4720
4721 LTTNG_ASSERT(sessiond_trigger);
4722
4723 /*
4724 * From this point on, no matter what, consider the trigger
4725 * unregistered.
4726 *
4727 * We set the unregistered state of the sessiond side trigger object in
4728 * the client thread since we want to minimize the possibility of the
4729 * notification thread being stalled due to a long execution of an
4730 * action that required the trigger lock.
4731 */
4732 lttng_trigger_set_as_unregistered(sessiond_trigger);
4733
4734 ret_code = notification_thread_command_unregister_trigger(notification_thread, trigger);
4735 if (ret_code != LTTNG_OK) {
4736 DBG("Failed to unregister trigger from notification thread: trigger name = '%s', trigger owner uid = %d, error code = %d",
4737 trigger_name,
4738 (int) trigger_owner,
4739 ret_code);
4740 goto end;
4741 }
4742
4743 /*
4744 * Synchronize tracers if the trigger removes an event notifier.
4745 * Do this even if the trigger unregistration failed to at least stop
4746 * the tracers from producing notifications associated with this
4747 * event notifier.
4748 */
4749 if (lttng_trigger_needs_tracer_notifier(trigger)) {
4750 ret_code = synchronize_tracer_notifier_unregister(trigger);
4751 if (ret_code != LTTNG_OK) {
4752 ERR("Error unregistering trigger to tracer.");
4753 goto end;
4754 }
4755 }
4756
4757 end:
4758 lttng_trigger_put(sessiond_trigger);
4759 return ret_code;
4760 }
4761
4762 enum lttng_error_code cmd_list_triggers(struct command_ctx *cmd_ctx,
4763 struct notification_thread_handle *notification_thread,
4764 struct lttng_triggers **return_triggers)
4765 {
4766 int ret;
4767 enum lttng_error_code ret_code;
4768 struct lttng_triggers *triggers = nullptr;
4769
4770 /* Get the set of triggers from the notification thread. */
4771 ret_code = notification_thread_command_list_triggers(
4772 notification_thread, cmd_ctx->creds.uid, &triggers);
4773 if (ret_code != LTTNG_OK) {
4774 goto end;
4775 }
4776
4777 ret = lttng_triggers_remove_hidden_triggers(triggers);
4778 if (ret) {
4779 ret_code = LTTNG_ERR_UNK;
4780 goto end;
4781 }
4782
4783 *return_triggers = triggers;
4784 triggers = nullptr;
4785 ret_code = LTTNG_OK;
4786 end:
4787 lttng_triggers_destroy(triggers);
4788 return ret_code;
4789 }
4790
4791 enum lttng_error_code
4792 cmd_execute_error_query(const struct lttng_credentials *cmd_creds,
4793 const struct lttng_error_query *query,
4794 struct lttng_error_query_results **_results,
4795 struct notification_thread_handle *notification_thread)
4796 {
4797 enum lttng_error_code ret_code;
4798 const struct lttng_trigger *query_target_trigger;
4799 const struct lttng_action *query_target_action = nullptr;
4800 struct lttng_trigger *matching_trigger = nullptr;
4801 const char *trigger_name;
4802 uid_t trigger_owner;
4803 enum lttng_trigger_status trigger_status;
4804 struct lttng_error_query_results *results = nullptr;
4805
4806 switch (lttng_error_query_get_target_type(query)) {
4807 case LTTNG_ERROR_QUERY_TARGET_TYPE_TRIGGER:
4808 query_target_trigger = lttng_error_query_trigger_borrow_target(query);
4809 break;
4810 case LTTNG_ERROR_QUERY_TARGET_TYPE_CONDITION:
4811 query_target_trigger = lttng_error_query_condition_borrow_target(query);
4812 break;
4813 case LTTNG_ERROR_QUERY_TARGET_TYPE_ACTION:
4814 query_target_trigger = lttng_error_query_action_borrow_trigger_target(query);
4815 break;
4816 default:
4817 abort();
4818 }
4819
4820 LTTNG_ASSERT(query_target_trigger);
4821
4822 ret_code = notification_thread_command_get_trigger(
4823 notification_thread, query_target_trigger, &matching_trigger);
4824 if (ret_code != LTTNG_OK) {
4825 goto end;
4826 }
4827
4828 /* No longer needed. */
4829 query_target_trigger = nullptr;
4830
4831 if (lttng_error_query_get_target_type(query) == LTTNG_ERROR_QUERY_TARGET_TYPE_ACTION) {
4832 /* Get the sessiond-side version of the target action. */
4833 query_target_action =
4834 lttng_error_query_action_borrow_action_target(query, matching_trigger);
4835 }
4836
4837 trigger_status = lttng_trigger_get_name(matching_trigger, &trigger_name);
4838 trigger_name = trigger_status == LTTNG_TRIGGER_STATUS_OK ? trigger_name : "(anonymous)";
4839 trigger_status = lttng_trigger_get_owner_uid(matching_trigger, &trigger_owner);
4840 LTTNG_ASSERT(trigger_status == LTTNG_TRIGGER_STATUS_OK);
4841
4842 results = lttng_error_query_results_create();
4843 if (!results) {
4844 ret_code = LTTNG_ERR_NOMEM;
4845 goto end;
4846 }
4847
4848 DBG("Running \"execute error query\" command: trigger name = '%s', trigger owner uid = %d, command creds uid = %d",
4849 trigger_name,
4850 (int) trigger_owner,
4851 (int) lttng_credentials_get_uid(cmd_creds));
4852
4853 /*
4854 * Validate the trigger credentials against the command credentials.
4855 * Only the root user can target a trigger with non-matching
4856 * credentials.
4857 */
4858 if (!lttng_credentials_is_equal_uid(lttng_trigger_get_credentials(matching_trigger),
4859 cmd_creds)) {
4860 if (lttng_credentials_get_uid(cmd_creds) != 0) {
4861 ERR("Trigger credentials do not match the command credentials: trigger name = '%s', trigger owner uid = %d, command creds uid = %d",
4862 trigger_name,
4863 (int) trigger_owner,
4864 (int) lttng_credentials_get_uid(cmd_creds));
4865 ret_code = LTTNG_ERR_INVALID_TRIGGER;
4866 goto end;
4867 }
4868 }
4869
4870 switch (lttng_error_query_get_target_type(query)) {
4871 case LTTNG_ERROR_QUERY_TARGET_TYPE_TRIGGER:
4872 trigger_status = lttng_trigger_add_error_results(matching_trigger, results);
4873
4874 switch (trigger_status) {
4875 case LTTNG_TRIGGER_STATUS_OK:
4876 break;
4877 default:
4878 ret_code = LTTNG_ERR_UNK;
4879 goto end;
4880 }
4881
4882 break;
4883 case LTTNG_ERROR_QUERY_TARGET_TYPE_CONDITION:
4884 {
4885 trigger_status =
4886 lttng_trigger_condition_add_error_results(matching_trigger, results);
4887
4888 switch (trigger_status) {
4889 case LTTNG_TRIGGER_STATUS_OK:
4890 break;
4891 default:
4892 ret_code = LTTNG_ERR_UNK;
4893 goto end;
4894 }
4895
4896 break;
4897 }
4898 case LTTNG_ERROR_QUERY_TARGET_TYPE_ACTION:
4899 {
4900 const enum lttng_action_status action_status =
4901 lttng_action_add_error_query_results(query_target_action, results);
4902
4903 switch (action_status) {
4904 case LTTNG_ACTION_STATUS_OK:
4905 break;
4906 default:
4907 ret_code = LTTNG_ERR_UNK;
4908 goto end;
4909 }
4910
4911 break;
4912 }
4913 default:
4914 abort();
4915 break;
4916 }
4917
4918 *_results = results;
4919 results = nullptr;
4920 ret_code = LTTNG_OK;
4921 end:
4922 lttng_trigger_put(matching_trigger);
4923 lttng_error_query_results_destroy(results);
4924 return ret_code;
4925 }
4926
4927 /*
4928 * Send relayd sockets from snapshot output to consumer. Ignore request if the
4929 * snapshot output is *not* set with a remote destination.
4930 *
4931 * Return LTTNG_OK on success or a LTTNG_ERR code.
4932 */
4933 static enum lttng_error_code set_relayd_for_snapshot(struct consumer_output *output,
4934 const ltt_session::locked_ref& session)
4935 {
4936 enum lttng_error_code status = LTTNG_OK;
4937 struct lttng_ht_iter iter;
4938 struct consumer_socket *socket;
4939 LTTNG_OPTIONAL(uint64_t) current_chunk_id = {};
4940 const char *base_path;
4941
4942 LTTNG_ASSERT(output);
4943
4944 DBG2("Set relayd object from snapshot output");
4945
4946 if (session->current_trace_chunk) {
4947 const lttng_trace_chunk_status chunk_status = lttng_trace_chunk_get_id(
4948 session->current_trace_chunk, &current_chunk_id.value);
4949
4950 if (chunk_status == LTTNG_TRACE_CHUNK_STATUS_OK) {
4951 current_chunk_id.is_set = true;
4952 } else {
4953 ERR("Failed to get current trace chunk id");
4954 status = LTTNG_ERR_UNK;
4955 goto error;
4956 }
4957 }
4958
4959 /* Ignore if snapshot consumer output is not network. */
4960 if (output->type != CONSUMER_DST_NET) {
4961 goto error;
4962 }
4963
4964 /*
4965 * The snapshot record URI base path overrides the session
4966 * base path.
4967 */
4968 if (output->dst.net.control.subdir[0] != '\0') {
4969 base_path = output->dst.net.control.subdir;
4970 } else {
4971 base_path = session->base_path;
4972 }
4973
4974 /*
4975 * For each consumer socket, create and send the relayd object of the
4976 * snapshot output.
4977 */
4978 {
4979 const lttng::urcu::read_lock_guard read_lock;
4980
4981 cds_lfht_for_each_entry (output->socks->ht, &iter.iter, socket, node.node) {
4982 pthread_mutex_lock(socket->lock);
4983 status = send_consumer_relayd_sockets(
4984 session->id,
4985 output,
4986 socket,
4987 session->name,
4988 session->hostname,
4989 base_path,
4990 session->live_timer,
4991 current_chunk_id.is_set ? &current_chunk_id.value : nullptr,
4992 session->creation_time,
4993 session->name_contains_creation_time);
4994 pthread_mutex_unlock(socket->lock);
4995 if (status != LTTNG_OK) {
4996 goto error;
4997 }
4998 }
4999 }
5000
5001 error:
5002 return status;
5003 }
5004
5005 /*
5006 * Record a kernel snapshot.
5007 *
5008 * Return LTTNG_OK on success or a LTTNG_ERR code.
5009 */
5010 static enum lttng_error_code record_kernel_snapshot(struct ltt_kernel_session *ksess,
5011 const struct consumer_output *output,
5012 uint64_t nb_packets_per_stream)
5013 {
5014 enum lttng_error_code status;
5015
5016 LTTNG_ASSERT(ksess);
5017 LTTNG_ASSERT(output);
5018
5019 status = kernel_snapshot_record(ksess, output, nb_packets_per_stream);
5020 return status;
5021 }
5022
5023 /*
5024 * Record a UST snapshot.
5025 *
5026 * Returns LTTNG_OK on success or a LTTNG_ERR error code.
5027 */
5028 static enum lttng_error_code record_ust_snapshot(struct ltt_ust_session *usess,
5029 const struct consumer_output *output,
5030 uint64_t nb_packets_per_stream)
5031 {
5032 enum lttng_error_code status;
5033
5034 LTTNG_ASSERT(usess);
5035 LTTNG_ASSERT(output);
5036
5037 status = ust_app_snapshot_record(usess, output, nb_packets_per_stream);
5038 return status;
5039 }
5040
5041 static uint64_t get_session_size_one_more_packet_per_stream(const ltt_session::locked_ref& session,
5042 uint64_t cur_nr_packets)
5043 {
5044 uint64_t tot_size = 0;
5045
5046 if (session->kernel_session) {
5047 struct ltt_kernel_channel *chan;
5048 const struct ltt_kernel_session *ksess = session->kernel_session;
5049
5050 cds_list_for_each_entry (chan, &ksess->channel_list.head, list) {
5051 if (cur_nr_packets >= chan->channel->attr.num_subbuf) {
5052 /*
5053 * Don't take channel into account if we
5054 * already grab all its packets.
5055 */
5056 continue;
5057 }
5058 tot_size += chan->channel->attr.subbuf_size * chan->stream_count;
5059 }
5060 }
5061
5062 if (session->ust_session) {
5063 const struct ltt_ust_session *usess = session->ust_session;
5064
5065 tot_size += ust_app_get_size_one_more_packet_per_stream(usess, cur_nr_packets);
5066 }
5067
5068 return tot_size;
5069 }
5070
5071 /*
5072 * Calculate the number of packets we can grab from each stream that
5073 * fits within the overall snapshot max size.
5074 *
5075 * Returns -1 on error, 0 means infinite number of packets, else > 0 is
5076 * the number of packets per stream.
5077 *
5078 * TODO: this approach is not perfect: we consider the worse case
5079 * (packet filling the sub-buffers) as an upper bound, but we could do
5080 * better if we do this calculation while we actually grab the packet
5081 * content: we would know how much padding we don't actually store into
5082 * the file.
5083 *
5084 * This algorithm is currently bounded by the number of packets per
5085 * stream.
5086 *
5087 * Since we call this algorithm before actually grabbing the data, it's
5088 * an approximation: for instance, applications could appear/disappear
5089 * in between this call and actually grabbing data.
5090 */
5091 static int64_t get_session_nb_packets_per_stream(const ltt_session::locked_ref& session,
5092 uint64_t max_size)
5093 {
5094 int64_t size_left;
5095 uint64_t cur_nb_packets = 0;
5096
5097 if (!max_size) {
5098 return 0; /* Infinite */
5099 }
5100
5101 size_left = max_size;
5102 for (;;) {
5103 uint64_t one_more_packet_tot_size;
5104
5105 one_more_packet_tot_size =
5106 get_session_size_one_more_packet_per_stream(session, cur_nb_packets);
5107 if (!one_more_packet_tot_size) {
5108 /* We are already grabbing all packets. */
5109 break;
5110 }
5111 size_left -= one_more_packet_tot_size;
5112 if (size_left < 0) {
5113 break;
5114 }
5115 cur_nb_packets++;
5116 }
5117 if (!cur_nb_packets && size_left != max_size) {
5118 /* Not enough room to grab one packet of each stream, error. */
5119 return -1;
5120 }
5121 return cur_nb_packets;
5122 }
5123
5124 static enum lttng_error_code snapshot_record(const ltt_session::locked_ref& session,
5125 const struct snapshot_output *snapshot_output)
5126 {
5127 int64_t nb_packets_per_stream;
5128 char snapshot_chunk_name[LTTNG_NAME_MAX];
5129 int ret;
5130 enum lttng_error_code ret_code = LTTNG_OK;
5131 struct lttng_trace_chunk *snapshot_trace_chunk;
5132 struct consumer_output *original_ust_consumer_output = nullptr;
5133 struct consumer_output *original_kernel_consumer_output = nullptr;
5134 struct consumer_output *snapshot_ust_consumer_output = nullptr;
5135 struct consumer_output *snapshot_kernel_consumer_output = nullptr;
5136
5137 ret = snprintf(snapshot_chunk_name,
5138 sizeof(snapshot_chunk_name),
5139 "%s-%s-%" PRIu64,
5140 snapshot_output->name,
5141 snapshot_output->datetime,
5142 snapshot_output->nb_snapshot);
5143 if (ret < 0 || ret >= sizeof(snapshot_chunk_name)) {
5144 ERR("Failed to format snapshot name");
5145 ret_code = LTTNG_ERR_INVALID;
5146 goto error;
5147 }
5148 DBG("Recording snapshot \"%s\" for session \"%s\" with chunk name \"%s\"",
5149 snapshot_output->name,
5150 session->name,
5151 snapshot_chunk_name);
5152 if (!session->kernel_session && !session->ust_session) {
5153 ERR("Failed to record snapshot as no channels exist");
5154 ret_code = LTTNG_ERR_NO_CHANNEL;
5155 goto error;
5156 }
5157
5158 if (session->kernel_session) {
5159 original_kernel_consumer_output = session->kernel_session->consumer;
5160 snapshot_kernel_consumer_output = consumer_copy_output(snapshot_output->consumer);
5161 strcpy(snapshot_kernel_consumer_output->chunk_path, snapshot_chunk_name);
5162
5163 /* Copy the original domain subdir. */
5164 strcpy(snapshot_kernel_consumer_output->domain_subdir,
5165 original_kernel_consumer_output->domain_subdir);
5166
5167 ret = consumer_copy_sockets(snapshot_kernel_consumer_output,
5168 original_kernel_consumer_output);
5169 if (ret < 0) {
5170 ERR("Failed to copy consumer sockets from snapshot output configuration");
5171 ret_code = LTTNG_ERR_NOMEM;
5172 goto error;
5173 }
5174 ret_code = set_relayd_for_snapshot(snapshot_kernel_consumer_output, session);
5175 if (ret_code != LTTNG_OK) {
5176 ERR("Failed to setup relay daemon for kernel tracer snapshot");
5177 goto error;
5178 }
5179 session->kernel_session->consumer = snapshot_kernel_consumer_output;
5180 }
5181 if (session->ust_session) {
5182 original_ust_consumer_output = session->ust_session->consumer;
5183 snapshot_ust_consumer_output = consumer_copy_output(snapshot_output->consumer);
5184 strcpy(snapshot_ust_consumer_output->chunk_path, snapshot_chunk_name);
5185
5186 /* Copy the original domain subdir. */
5187 strcpy(snapshot_ust_consumer_output->domain_subdir,
5188 original_ust_consumer_output->domain_subdir);
5189
5190 ret = consumer_copy_sockets(snapshot_ust_consumer_output,
5191 original_ust_consumer_output);
5192 if (ret < 0) {
5193 ERR("Failed to copy consumer sockets from snapshot output configuration");
5194 ret_code = LTTNG_ERR_NOMEM;
5195 goto error;
5196 }
5197 ret_code = set_relayd_for_snapshot(snapshot_ust_consumer_output, session);
5198 if (ret_code != LTTNG_OK) {
5199 ERR("Failed to setup relay daemon for userspace tracer snapshot");
5200 goto error;
5201 }
5202 session->ust_session->consumer = snapshot_ust_consumer_output;
5203 }
5204
5205 snapshot_trace_chunk = session_create_new_trace_chunk(
5206 session,
5207 snapshot_kernel_consumer_output ?: snapshot_ust_consumer_output,
5208 consumer_output_get_base_path(snapshot_output->consumer),
5209 snapshot_chunk_name);
5210 if (!snapshot_trace_chunk) {
5211 ERR("Failed to create temporary trace chunk to record a snapshot of session \"%s\"",
5212 session->name);
5213 ret_code = LTTNG_ERR_CREATE_DIR_FAIL;
5214 goto error;
5215 }
5216 LTTNG_ASSERT(!session->current_trace_chunk);
5217 ret = session_set_trace_chunk(session, snapshot_trace_chunk, nullptr);
5218 lttng_trace_chunk_put(snapshot_trace_chunk);
5219 snapshot_trace_chunk = nullptr;
5220 if (ret) {
5221 ERR("Failed to set temporary trace chunk to record a snapshot of session \"%s\"",
5222 session->name);
5223 ret_code = LTTNG_ERR_CREATE_TRACE_CHUNK_FAIL_CONSUMER;
5224 goto error;
5225 }
5226
5227 nb_packets_per_stream =
5228 get_session_nb_packets_per_stream(session, snapshot_output->max_size);
5229 if (nb_packets_per_stream < 0) {
5230 ret_code = LTTNG_ERR_MAX_SIZE_INVALID;
5231 goto error_close_trace_chunk;
5232 }
5233
5234 if (session->kernel_session) {
5235 ret_code = record_kernel_snapshot(session->kernel_session,
5236 snapshot_kernel_consumer_output,
5237 nb_packets_per_stream);
5238 if (ret_code != LTTNG_OK) {
5239 goto error_close_trace_chunk;
5240 }
5241 }
5242
5243 if (session->ust_session) {
5244 ret_code = record_ust_snapshot(
5245 session->ust_session, snapshot_ust_consumer_output, nb_packets_per_stream);
5246 if (ret_code != LTTNG_OK) {
5247 goto error_close_trace_chunk;
5248 }
5249 }
5250
5251 error_close_trace_chunk:
5252 if (session_set_trace_chunk(session, nullptr, &snapshot_trace_chunk)) {
5253 ERR("Failed to release the current trace chunk of session \"%s\"", session->name);
5254 ret_code = LTTNG_ERR_UNK;
5255 }
5256
5257 if (session_close_trace_chunk(session,
5258 snapshot_trace_chunk,
5259 LTTNG_TRACE_CHUNK_COMMAND_TYPE_NO_OPERATION,
5260 nullptr)) {
5261 /*
5262 * Don't goto end; make sure the chunk is closed for the session
5263 * to allow future snapshots.
5264 */
5265 ERR("Failed to close snapshot trace chunk of session \"%s\"", session->name);
5266 ret_code = LTTNG_ERR_CLOSE_TRACE_CHUNK_FAIL_CONSUMER;
5267 }
5268
5269 lttng_trace_chunk_put(snapshot_trace_chunk);
5270 snapshot_trace_chunk = nullptr;
5271 error:
5272 if (original_ust_consumer_output) {
5273 session->ust_session->consumer = original_ust_consumer_output;
5274 }
5275 if (original_kernel_consumer_output) {
5276 session->kernel_session->consumer = original_kernel_consumer_output;
5277 }
5278 consumer_output_put(snapshot_ust_consumer_output);
5279 consumer_output_put(snapshot_kernel_consumer_output);
5280 return ret_code;
5281 }
5282
5283 /*
5284 * Command LTTNG_SNAPSHOT_RECORD from lib lttng ctl.
5285 *
5286 * The wait parameter is ignored so this call always wait for the snapshot to
5287 * complete before returning.
5288 *
5289 * Return LTTNG_OK on success or else a LTTNG_ERR code.
5290 */
5291 int cmd_snapshot_record(const ltt_session::locked_ref& session,
5292 const struct lttng_snapshot_output *output,
5293 int wait __attribute__((unused)))
5294 {
5295 enum lttng_error_code cmd_ret = LTTNG_OK;
5296 int ret;
5297 unsigned int snapshot_success = 0;
5298 char datetime[16];
5299 struct snapshot_output *tmp_output = nullptr;
5300
5301 LTTNG_ASSERT(output);
5302
5303 DBG("Cmd snapshot record for session %s", session->name);
5304
5305 /* Get the datetime for the snapshot output directory. */
5306 ret = utils_get_current_time_str("%Y%m%d-%H%M%S", datetime, sizeof(datetime));
5307 if (!ret) {
5308 cmd_ret = LTTNG_ERR_INVALID;
5309 goto error;
5310 }
5311
5312 /*
5313 * Permission denied to create an output if the session is not
5314 * set in no output mode.
5315 */
5316 if (session->output_traces) {
5317 cmd_ret = LTTNG_ERR_NOT_SNAPSHOT_SESSION;
5318 goto error;
5319 }
5320
5321 /* The session needs to be started at least once. */
5322 if (!session->has_been_started) {
5323 cmd_ret = LTTNG_ERR_START_SESSION_ONCE;
5324 goto error;
5325 }
5326
5327 /* Use temporary output for the session. */
5328 if (*output->ctrl_url != '\0') {
5329 tmp_output = snapshot_output_alloc();
5330 if (!tmp_output) {
5331 cmd_ret = LTTNG_ERR_NOMEM;
5332 goto error;
5333 }
5334
5335 ret = snapshot_output_init(session,
5336 output->max_size,
5337 output->name,
5338 output->ctrl_url,
5339 output->data_url,
5340 session->consumer,
5341 tmp_output,
5342 nullptr);
5343 if (ret < 0) {
5344 if (ret == -ENOMEM) {
5345 cmd_ret = LTTNG_ERR_NOMEM;
5346 } else {
5347 cmd_ret = LTTNG_ERR_INVALID;
5348 }
5349 goto error;
5350 }
5351 /* Use the global session count for the temporary snapshot. */
5352 tmp_output->nb_snapshot = session->snapshot.nb_snapshot;
5353
5354 /* Use the global datetime */
5355 memcpy(tmp_output->datetime, datetime, sizeof(datetime));
5356 cmd_ret = snapshot_record(session, tmp_output);
5357 if (cmd_ret != LTTNG_OK) {
5358 goto error;
5359 }
5360 snapshot_success = 1;
5361 } else {
5362 struct snapshot_output *sout;
5363 struct lttng_ht_iter iter;
5364
5365 const lttng::urcu::read_lock_guard read_lock;
5366
5367 cds_lfht_for_each_entry (
5368 session->snapshot.output_ht->ht, &iter.iter, sout, node.node) {
5369 struct snapshot_output output_copy;
5370
5371 /*
5372 * Make a local copy of the output and override output
5373 * parameters with those provided as part of the
5374 * command.
5375 */
5376 memcpy(&output_copy, sout, sizeof(output_copy));
5377
5378 if (output->max_size != (uint64_t) -1ULL) {
5379 output_copy.max_size = output->max_size;
5380 }
5381
5382 output_copy.nb_snapshot = session->snapshot.nb_snapshot;
5383 memcpy(output_copy.datetime, datetime, sizeof(datetime));
5384
5385 /* Use temporary name. */
5386 if (*output->name != '\0') {
5387 if (lttng_strncpy(output_copy.name,
5388 output->name,
5389 sizeof(output_copy.name))) {
5390 cmd_ret = LTTNG_ERR_INVALID;
5391 goto error;
5392 }
5393 }
5394
5395 cmd_ret = snapshot_record(session, &output_copy);
5396 if (cmd_ret != LTTNG_OK) {
5397 goto error;
5398 }
5399
5400 snapshot_success = 1;
5401 }
5402 }
5403
5404 if (snapshot_success) {
5405 session->snapshot.nb_snapshot++;
5406 } else {
5407 cmd_ret = LTTNG_ERR_SNAPSHOT_FAIL;
5408 }
5409
5410 error:
5411 if (tmp_output) {
5412 snapshot_output_destroy(tmp_output);
5413 }
5414
5415 return cmd_ret;
5416 }
5417
5418 /*
5419 * Command LTTNG_SET_SESSION_SHM_PATH processed by the client thread.
5420 */
5421 int cmd_set_session_shm_path(const ltt_session::locked_ref& session, const char *shm_path)
5422 {
5423 /*
5424 * Can only set shm path before session is started.
5425 */
5426 if (session->has_been_started) {
5427 return LTTNG_ERR_SESSION_STARTED;
5428 }
5429
5430 strncpy(session->shm_path, shm_path, sizeof(session->shm_path));
5431 session->shm_path[sizeof(session->shm_path) - 1] = '\0';
5432
5433 return LTTNG_OK;
5434 }
5435
5436 /*
5437 * Command LTTNG_ROTATE_SESSION from the lttng-ctl library.
5438 *
5439 * Ask the consumer to rotate the session output directory.
5440 * The session lock must be held.
5441 *
5442 * Returns LTTNG_OK on success or else a negative LTTng error code.
5443 */
5444 int cmd_rotate_session(const ltt_session::locked_ref& session,
5445 struct lttng_rotate_session_return *rotate_return,
5446 bool quiet_rotation,
5447 enum lttng_trace_chunk_command_type command)
5448 {
5449 int ret;
5450 uint64_t ongoing_rotation_chunk_id;
5451 enum lttng_error_code cmd_ret = LTTNG_OK;
5452 struct lttng_trace_chunk *chunk_being_archived = nullptr;
5453 struct lttng_trace_chunk *new_trace_chunk = nullptr;
5454 enum lttng_trace_chunk_status chunk_status;
5455 bool failed_to_rotate = false;
5456 enum lttng_error_code rotation_fail_code = LTTNG_OK;
5457
5458 if (!session->has_been_started) {
5459 cmd_ret = LTTNG_ERR_START_SESSION_ONCE;
5460 goto end;
5461 }
5462
5463 /*
5464 * Explicit rotation is not supported for live sessions.
5465 * However, live sessions can perform a quiet rotation on
5466 * destroy.
5467 * Rotation is not supported for snapshot traces (no output).
5468 */
5469 if ((!quiet_rotation && session->live_timer) || !session->output_traces) {
5470 cmd_ret = LTTNG_ERR_ROTATION_NOT_AVAILABLE;
5471 goto end;
5472 }
5473
5474 /* Unsupported feature in lttng-relayd before 2.11. */
5475 if (!quiet_rotation && session->consumer->type == CONSUMER_DST_NET &&
5476 (session->consumer->relay_major_version == 2 &&
5477 session->consumer->relay_minor_version < 11)) {
5478 cmd_ret = LTTNG_ERR_ROTATION_NOT_AVAILABLE_RELAY;
5479 goto end;
5480 }
5481
5482 /* Unsupported feature in lttng-modules before 2.8 (lack of sequence number). */
5483 if (session->kernel_session && !kernel_supports_ring_buffer_packet_sequence_number()) {
5484 cmd_ret = LTTNG_ERR_ROTATION_NOT_AVAILABLE_KERNEL;
5485 goto end;
5486 }
5487
5488 if (session->rotation_state == LTTNG_ROTATION_STATE_ONGOING) {
5489 DBG("Refusing to launch a rotation; a rotation is already in progress for session %s",
5490 session->name);
5491 cmd_ret = LTTNG_ERR_ROTATION_PENDING;
5492 goto end;
5493 }
5494
5495 /*
5496 * After a stop, we only allow one rotation to occur, the other ones are
5497 * useless until a new start.
5498 */
5499 if (session->rotated_after_last_stop) {
5500 DBG("Session \"%s\" was already rotated after stop, refusing rotation",
5501 session->name);
5502 cmd_ret = LTTNG_ERR_ROTATION_MULTIPLE_AFTER_STOP;
5503 goto end;
5504 }
5505
5506 /*
5507 * After a stop followed by a clear, disallow following rotations a they would
5508 * generate empty chunks.
5509 */
5510 if (session->cleared_after_last_stop) {
5511 DBG("Session \"%s\" was already cleared after stop, refusing rotation",
5512 session->name);
5513 cmd_ret = LTTNG_ERR_ROTATION_AFTER_STOP_CLEAR;
5514 goto end;
5515 }
5516
5517 if (session->active) {
5518 new_trace_chunk =
5519 session_create_new_trace_chunk(session, nullptr, nullptr, nullptr);
5520 if (!new_trace_chunk) {
5521 cmd_ret = LTTNG_ERR_CREATE_DIR_FAIL;
5522 goto error;
5523 }
5524 }
5525
5526 /*
5527 * The current trace chunk becomes the chunk being archived.
5528 *
5529 * After this point, "chunk_being_archived" must absolutely
5530 * be closed on the consumer(s), otherwise it will never be
5531 * cleaned-up, which will result in a leak.
5532 */
5533 ret = session_set_trace_chunk(session, new_trace_chunk, &chunk_being_archived);
5534 if (ret) {
5535 cmd_ret = LTTNG_ERR_CREATE_TRACE_CHUNK_FAIL_CONSUMER;
5536 goto error;
5537 }
5538
5539 if (session->kernel_session) {
5540 cmd_ret = kernel_rotate_session(session);
5541 if (cmd_ret != LTTNG_OK) {
5542 failed_to_rotate = true;
5543 rotation_fail_code = cmd_ret;
5544 }
5545 }
5546 if (session->ust_session) {
5547 cmd_ret = ust_app_rotate_session(session);
5548 if (cmd_ret != LTTNG_OK) {
5549 failed_to_rotate = true;
5550 rotation_fail_code = cmd_ret;
5551 }
5552 }
5553
5554 if (!session->active) {
5555 session->rotated_after_last_stop = true;
5556 }
5557
5558 if (!chunk_being_archived) {
5559 DBG("Rotating session \"%s\" from a \"NULL\" trace chunk to a new trace chunk, skipping completion check",
5560 session->name);
5561 if (failed_to_rotate) {
5562 cmd_ret = rotation_fail_code;
5563 goto error;
5564 }
5565 cmd_ret = LTTNG_OK;
5566 goto end;
5567 }
5568
5569 session->rotation_state = LTTNG_ROTATION_STATE_ONGOING;
5570 chunk_status = lttng_trace_chunk_get_id(chunk_being_archived, &ongoing_rotation_chunk_id);
5571 LTTNG_ASSERT(chunk_status == LTTNG_TRACE_CHUNK_STATUS_OK);
5572
5573 ret = session_close_trace_chunk(
5574 session, chunk_being_archived, command, session->last_chunk_path);
5575 if (ret) {
5576 cmd_ret = LTTNG_ERR_CLOSE_TRACE_CHUNK_FAIL_CONSUMER;
5577 goto error;
5578 }
5579
5580 if (failed_to_rotate) {
5581 cmd_ret = rotation_fail_code;
5582 goto error;
5583 }
5584
5585 session->quiet_rotation = quiet_rotation;
5586 ret = timer_session_rotation_pending_check_start(session, DEFAULT_ROTATE_PENDING_TIMER);
5587 if (ret) {
5588 cmd_ret = LTTNG_ERR_UNK;
5589 goto error;
5590 }
5591
5592 if (rotate_return) {
5593 rotate_return->rotation_id = ongoing_rotation_chunk_id;
5594 }
5595
5596 session->chunk_being_archived = chunk_being_archived;
5597 chunk_being_archived = nullptr;
5598 if (!quiet_rotation) {
5599 ret = notification_thread_command_session_rotation_ongoing(
5600 the_notification_thread_handle, session->id, ongoing_rotation_chunk_id);
5601 if (ret != LTTNG_OK) {
5602 ERR("Failed to notify notification thread that a session rotation is ongoing for session %s",
5603 session->name);
5604 cmd_ret = (lttng_error_code) ret;
5605 }
5606 }
5607
5608 DBG("Cmd rotate session %s, archive_id %" PRIu64 " sent",
5609 session->name,
5610 ongoing_rotation_chunk_id);
5611 end:
5612 lttng_trace_chunk_put(new_trace_chunk);
5613 lttng_trace_chunk_put(chunk_being_archived);
5614 ret = (cmd_ret == LTTNG_OK) ? cmd_ret : -((int) cmd_ret);
5615 return ret;
5616 error:
5617 if (session_reset_rotation_state(session, LTTNG_ROTATION_STATE_ERROR)) {
5618 ERR("Failed to reset rotation state of session \"%s\"", session->name);
5619 }
5620 goto end;
5621 }
5622
5623 /*
5624 * Command LTTNG_ROTATION_GET_INFO from the lttng-ctl library.
5625 *
5626 * Check if the session has finished its rotation.
5627 *
5628 * Return LTTNG_OK on success or else an LTTNG_ERR code.
5629 */
5630 int cmd_rotate_get_info(const ltt_session::locked_ref& session,
5631 struct lttng_rotation_get_info_return *info_return,
5632 uint64_t rotation_id)
5633 {
5634 enum lttng_error_code cmd_ret = LTTNG_OK;
5635 enum lttng_rotation_state rotation_state;
5636
5637 DBG("Cmd rotate_get_info session %s, rotation id %" PRIu64,
5638 session->name,
5639 session->most_recent_chunk_id.value);
5640
5641 if (session->chunk_being_archived) {
5642 enum lttng_trace_chunk_status chunk_status;
5643 uint64_t chunk_id;
5644
5645 chunk_status = lttng_trace_chunk_get_id(session->chunk_being_archived, &chunk_id);
5646 LTTNG_ASSERT(chunk_status == LTTNG_TRACE_CHUNK_STATUS_OK);
5647
5648 rotation_state = rotation_id == chunk_id ? LTTNG_ROTATION_STATE_ONGOING :
5649 LTTNG_ROTATION_STATE_EXPIRED;
5650 } else {
5651 if (session->last_archived_chunk_id.is_set &&
5652 rotation_id != session->last_archived_chunk_id.value) {
5653 rotation_state = LTTNG_ROTATION_STATE_EXPIRED;
5654 } else {
5655 rotation_state = session->rotation_state;
5656 }
5657 }
5658
5659 switch (rotation_state) {
5660 case LTTNG_ROTATION_STATE_NO_ROTATION:
5661 DBG("Reporting that no rotation has occurred within the lifetime of session \"%s\"",
5662 session->name);
5663 goto end;
5664 case LTTNG_ROTATION_STATE_EXPIRED:
5665 DBG("Reporting that the rotation state of rotation id %" PRIu64
5666 " of session \"%s\" has expired",
5667 rotation_id,
5668 session->name);
5669 break;
5670 case LTTNG_ROTATION_STATE_ONGOING:
5671 DBG("Reporting that rotation id %" PRIu64 " of session \"%s\" is still pending",
5672 rotation_id,
5673 session->name);
5674 break;
5675 case LTTNG_ROTATION_STATE_COMPLETED:
5676 {
5677 int fmt_ret;
5678 char *chunk_path;
5679 char *current_tracing_path_reply;
5680 size_t current_tracing_path_reply_len;
5681
5682 DBG("Reporting that rotation id %" PRIu64 " of session \"%s\" is completed",
5683 rotation_id,
5684 session->name);
5685
5686 switch (session_get_consumer_destination_type(session)) {
5687 case CONSUMER_DST_LOCAL:
5688 current_tracing_path_reply = info_return->location.local.absolute_path;
5689 current_tracing_path_reply_len =
5690 sizeof(info_return->location.local.absolute_path);
5691 info_return->location_type =
5692 (int8_t) LTTNG_TRACE_ARCHIVE_LOCATION_TYPE_LOCAL;
5693 fmt_ret = asprintf(&chunk_path,
5694 "%s/" DEFAULT_ARCHIVED_TRACE_CHUNKS_DIRECTORY "/%s",
5695 session_get_base_path(session),
5696 session->last_archived_chunk_name);
5697 if (fmt_ret == -1) {
5698 PERROR("Failed to format the path of the last archived trace chunk");
5699 info_return->status = LTTNG_ROTATION_STATUS_ERROR;
5700 cmd_ret = LTTNG_ERR_UNK;
5701 goto end;
5702 }
5703 break;
5704 case CONSUMER_DST_NET:
5705 {
5706 uint16_t ctrl_port, data_port;
5707
5708 current_tracing_path_reply = info_return->location.relay.relative_path;
5709 current_tracing_path_reply_len =
5710 sizeof(info_return->location.relay.relative_path);
5711 /* Currently the only supported relay protocol. */
5712 info_return->location.relay.protocol =
5713 (int8_t) LTTNG_TRACE_ARCHIVE_LOCATION_RELAY_PROTOCOL_TYPE_TCP;
5714
5715 fmt_ret = lttng_strncpy(info_return->location.relay.host,
5716 session_get_net_consumer_hostname(session),
5717 sizeof(info_return->location.relay.host));
5718 if (fmt_ret) {
5719 ERR("Failed to copy host name to rotate_get_info reply");
5720 info_return->status = LTTNG_ROTATION_STATUS_ERROR;
5721 cmd_ret = LTTNG_ERR_SET_URL;
5722 goto end;
5723 }
5724
5725 session_get_net_consumer_ports(session, &ctrl_port, &data_port);
5726 info_return->location.relay.ports.control = ctrl_port;
5727 info_return->location.relay.ports.data = data_port;
5728 info_return->location_type =
5729 (int8_t) LTTNG_TRACE_ARCHIVE_LOCATION_TYPE_RELAY;
5730 chunk_path = strdup(session->last_chunk_path);
5731 if (!chunk_path) {
5732 ERR("Failed to allocate the path of the last archived trace chunk");
5733 info_return->status = LTTNG_ROTATION_STATUS_ERROR;
5734 cmd_ret = LTTNG_ERR_UNK;
5735 goto end;
5736 }
5737 break;
5738 }
5739 default:
5740 abort();
5741 }
5742
5743 fmt_ret = lttng_strncpy(
5744 current_tracing_path_reply, chunk_path, current_tracing_path_reply_len);
5745 free(chunk_path);
5746 if (fmt_ret) {
5747 ERR("Failed to copy path of the last archived trace chunk to rotate_get_info reply");
5748 info_return->status = LTTNG_ROTATION_STATUS_ERROR;
5749 cmd_ret = LTTNG_ERR_UNK;
5750 goto end;
5751 }
5752
5753 break;
5754 }
5755 case LTTNG_ROTATION_STATE_ERROR:
5756 DBG("Reporting that an error occurred during rotation %" PRIu64
5757 " of session \"%s\"",
5758 rotation_id,
5759 session->name);
5760 break;
5761 default:
5762 abort();
5763 }
5764
5765 cmd_ret = LTTNG_OK;
5766 end:
5767 info_return->status = (int32_t) rotation_state;
5768 return cmd_ret;
5769 }
5770
5771 /*
5772 * Command LTTNG_ROTATION_SET_SCHEDULE from the lttng-ctl library.
5773 *
5774 * Configure the automatic rotation parameters.
5775 * 'activate' to true means activate the rotation schedule type with 'new_value'.
5776 * 'activate' to false means deactivate the rotation schedule and validate that
5777 * 'new_value' has the same value as the currently active value.
5778 *
5779 * Return LTTNG_OK on success or else a positive LTTNG_ERR code.
5780 */
5781 int cmd_rotation_set_schedule(const ltt_session::locked_ref& session,
5782 bool activate,
5783 enum lttng_rotation_schedule_type schedule_type,
5784 uint64_t new_value)
5785 {
5786 int ret;
5787 uint64_t *parameter_value;
5788
5789 DBG("Cmd rotate set schedule session %s", session->name);
5790
5791 if (session->live_timer || !session->output_traces) {
5792 DBG("Failing ROTATION_SET_SCHEDULE command as the rotation feature is not available for this session");
5793 ret = LTTNG_ERR_ROTATION_NOT_AVAILABLE;
5794 goto end;
5795 }
5796
5797 switch (schedule_type) {
5798 case LTTNG_ROTATION_SCHEDULE_TYPE_SIZE_THRESHOLD:
5799 parameter_value = &session->rotate_size;
5800 break;
5801 case LTTNG_ROTATION_SCHEDULE_TYPE_PERIODIC:
5802 parameter_value = &session->rotate_timer_period;
5803 if (new_value >= UINT_MAX) {
5804 DBG("Failing ROTATION_SET_SCHEDULE command as the value requested for a periodic rotation schedule is invalid: %" PRIu64
5805 " > %u (UINT_MAX)",
5806 new_value,
5807 UINT_MAX);
5808 ret = LTTNG_ERR_INVALID;
5809 goto end;
5810 }
5811 break;
5812 default:
5813 WARN("Failing ROTATION_SET_SCHEDULE command on unknown schedule type");
5814 ret = LTTNG_ERR_INVALID;
5815 goto end;
5816 }
5817
5818 /* Improper use of the API. */
5819 if (new_value == -1ULL) {
5820 WARN("Failing ROTATION_SET_SCHEDULE command as the value requested is -1");
5821 ret = LTTNG_ERR_INVALID;
5822 goto end;
5823 }
5824
5825 /*
5826 * As indicated in struct ltt_session's comments, a value of == 0 means
5827 * this schedule rotation type is not in use.
5828 *
5829 * Reject the command if we were asked to activate a schedule that was
5830 * already active.
5831 */
5832 if (activate && *parameter_value != 0) {
5833 DBG("Failing ROTATION_SET_SCHEDULE (activate) command as the schedule is already active");
5834 ret = LTTNG_ERR_ROTATION_SCHEDULE_SET;
5835 goto end;
5836 }
5837
5838 /*
5839 * Reject the command if we were asked to deactivate a schedule that was
5840 * not active.
5841 */
5842 if (!activate && *parameter_value == 0) {
5843 DBG("Failing ROTATION_SET_SCHEDULE (deactivate) command as the schedule is already inactive");
5844 ret = LTTNG_ERR_ROTATION_SCHEDULE_NOT_SET;
5845 goto end;
5846 }
5847
5848 /*
5849 * Reject the command if we were asked to deactivate a schedule that
5850 * doesn't exist.
5851 */
5852 if (!activate && *parameter_value != new_value) {
5853 DBG("Failing ROTATION_SET_SCHEDULE (deactivate) command as an inexistant schedule was provided");
5854 ret = LTTNG_ERR_ROTATION_SCHEDULE_NOT_SET;
5855 goto end;
5856 }
5857
5858 *parameter_value = activate ? new_value : 0;
5859
5860 switch (schedule_type) {
5861 case LTTNG_ROTATION_SCHEDULE_TYPE_PERIODIC:
5862 if (activate && session->active) {
5863 /*
5864 * Only start the timer if the session is active,
5865 * otherwise it will be started when the session starts.
5866 */
5867 ret = timer_session_rotation_schedule_timer_start(session, new_value);
5868 if (ret) {
5869 ERR("Failed to enable session rotation timer in ROTATION_SET_SCHEDULE command");
5870 ret = LTTNG_ERR_UNK;
5871 goto end;
5872 }
5873 } else {
5874 ret = timer_session_rotation_schedule_timer_stop(session);
5875 if (ret) {
5876 ERR("Failed to disable session rotation timer in ROTATION_SET_SCHEDULE command");
5877 ret = LTTNG_ERR_UNK;
5878 goto end;
5879 }
5880 }
5881 break;
5882 case LTTNG_ROTATION_SCHEDULE_TYPE_SIZE_THRESHOLD:
5883 if (activate) {
5884 try {
5885 the_rotation_thread_handle->subscribe_session_consumed_size_rotation(
5886 *session, new_value);
5887 } catch (const std::exception& e) {
5888 ERR("Failed to enable consumed-size notification in ROTATION_SET_SCHEDULE command: %s",
5889 e.what());
5890 ret = LTTNG_ERR_UNK;
5891 goto end;
5892 }
5893 } else {
5894 try {
5895 the_rotation_thread_handle
5896 ->unsubscribe_session_consumed_size_rotation(*session);
5897 } catch (const std::exception& e) {
5898 ERR("Failed to disable consumed-size notification in ROTATION_SET_SCHEDULE command: %s",
5899 e.what());
5900 ret = LTTNG_ERR_UNK;
5901 goto end;
5902 }
5903 }
5904 break;
5905 default:
5906 /* Would have been caught before. */
5907 abort();
5908 }
5909
5910 ret = LTTNG_OK;
5911
5912 goto end;
5913
5914 end:
5915 return ret;
5916 }
5917
5918 /* Wait for a given path to be removed before continuing. */
5919 static enum lttng_error_code wait_on_path(void *path_data)
5920 {
5921 const char *shm_path = (const char *) path_data;
5922
5923 DBG("Waiting for the shm path at %s to be removed before completing session destruction",
5924 shm_path);
5925 while (true) {
5926 int ret;
5927 struct stat st;
5928
5929 ret = stat(shm_path, &st);
5930 if (ret) {
5931 if (errno != ENOENT) {
5932 PERROR("stat() returned an error while checking for the existence of the shm path");
5933 } else {
5934 DBG("shm path no longer exists, completing the destruction of session");
5935 }
5936 break;
5937 } else {
5938 if (!S_ISDIR(st.st_mode)) {
5939 ERR("The type of shm path %s returned by stat() is not a directory; aborting the wait for shm path removal",
5940 shm_path);
5941 break;
5942 }
5943 }
5944 usleep(SESSION_DESTROY_SHM_PATH_CHECK_DELAY_US);
5945 }
5946 return LTTNG_OK;
5947 }
5948
5949 /*
5950 * Returns a pointer to a handler to run on completion of a command.
5951 * Returns NULL if no handler has to be run for the last command executed.
5952 */
5953 const struct cmd_completion_handler *cmd_pop_completion_handler()
5954 {
5955 struct cmd_completion_handler *handler = current_completion_handler;
5956
5957 current_completion_handler = nullptr;
5958 return handler;
5959 }
5960
5961 /*
5962 * Init command subsystem.
5963 */
5964 void cmd_init()
5965 {
5966 /*
5967 * Set network sequence index to 1 for streams to match a relayd
5968 * socket on the consumer side.
5969 */
5970 pthread_mutex_lock(&relayd_net_seq_idx_lock);
5971 relayd_net_seq_idx = 1;
5972 pthread_mutex_unlock(&relayd_net_seq_idx_lock);
5973
5974 DBG("Command subsystem initialized");
5975 }
This page took 0.176434 seconds and 4 git commands to generate.