LTTNG_EVENT_FIELD_STRING = 4,
};
+enum lttng_event_flag {
+ LTTNG_EVENT_FLAG_SYSCALL_32 = (1U << 0),
+ LTTNG_EVENT_FLAG_SYSCALL_64 = (1U << 1),
+};
+
/*
* Perf counter attributes
*
*
* The structures should be initialized to zero before use.
*/
-#define LTTNG_EVENT_PADDING1 14
+#define LTTNG_EVENT_PADDING1 10
#define LTTNG_EVENT_PADDING2 LTTNG_SYMBOL_NAME_LEN + 32
struct lttng_event {
enum lttng_event_type type;
unsigned char filter; /* filter enabled ? */
unsigned char exclusion; /* exclusions added ? */
+ /* Event flag, from 2.6 and above. */
+ enum lttng_event_flag flags;
+
char padding[LTTNG_EVENT_PADDING1];
/* Per event type configuration */
extern int lttng_list_tracepoint_fields(struct lttng_handle *handle,
struct lttng_event_field **fields);
+/*
+ * List the available kernel syscall.
+ *
+ * Return the size (number of entries) of the allocated "lttng_event" array.
+ * All events in will be of type syscall. Caller must free events. On error a
+ * negative LTTng error code is returned.
+ */
+extern int lttng_list_syscalls(struct lttng_event **events);
+
/*
* Add context to event(s) for a specific channel (or for all).
*
snapshot.c snapshot.h \
agent.c agent.h \
save.h save.c \
- load-session-thread.h load-session-thread.c
+ load-session-thread.h load-session-thread.c \
+ syscall.h syscall.c
if HAVE_LIBLTTNG_UST_CTL
lttng_sessiond_SOURCES += trace-ust.c ust-registry.c ust-app.c \
#include "kernel-consumer.h"
#include "lttng-sessiond.h"
#include "utils.h"
+#include "syscall.h"
#include "cmd.h"
DBG("Listing events for channel %s", kchan->channel->name);
if (nb_event == 0) {
- goto end;
+ *events = NULL;
+ goto syscall;
}
*events = zmalloc(nb_event * sizeof(struct lttng_event));
i++;
}
-end:
+syscall:
+ if (syscall_table) {
+ ssize_t new_size;
+
+ new_size = syscall_list_channel(kchan, events, nb_event);
+ if (new_size < 0) {
+ free(events);
+ ret = -new_size;
+ goto error;
+ }
+ nb_event = new_size;
+ }
+
return nb_event;
error:
return -ret;
}
+ssize_t cmd_list_syscalls(struct lttng_event **events)
+{
+ return syscall_table_list(events);
+}
+
/*
* Command LTTNG_START_TRACE processed by the client thread.
*/
ssize_t cmd_list_tracepoints(int domain, struct lttng_event **events);
ssize_t cmd_snapshot_list_outputs(struct ltt_session *session,
struct lttng_snapshot_output **outputs);
+ssize_t cmd_list_syscalls(struct lttng_event **events);
int cmd_calibrate(int domain, struct lttng_calibrate *calibrate);
int cmd_data_pending(struct ltt_session *session);
#include "kernel.h"
#include "kernel-consumer.h"
#include "kern-modules.h"
+#include "utils.h"
/*
* Add context on a kernel channel.
rcu_read_unlock();
return ret;
}
+
+/*
+ * Get the syscall mask array from the kernel tracer.
+ *
+ * Return 0 on success else a negative value. In both case, syscall_mask should
+ * be freed.
+ */
+int kernel_syscall_mask(int chan_fd, char **syscall_mask, uint32_t *nr_bits)
+{
+ assert(syscall_mask);
+ assert(nr_bits);
+
+ return kernctl_syscall_mask(chan_fd, syscall_mask, nr_bits);
+}
void kernel_destroy_channel(struct ltt_kernel_channel *kchan);
int kernel_snapshot_record(struct ltt_kernel_session *ksess,
struct snapshot_output *output, int wait, uint64_t max_stream_size);
+int kernel_syscall_mask(int chan_fd, char **syscall_mask, uint32_t *nr_bits);
int init_kernel_workarounds(void);
#include "agent-thread.h"
#include "save.h"
#include "load-session-thread.h"
+#include "syscall.h"
#define CONSUMERD_FILE "lttng-consumerd"
}
DBG("Unloading kernel modules");
modprobe_remove_lttng_all();
+ free(syscall_table);
}
close_consumer_sockets();
case LTTNG_LIST_DOMAINS:
case LTTNG_LIST_CHANNELS:
case LTTNG_LIST_EVENTS:
+ case LTTNG_LIST_SYSCALLS:
break;
default:
/* Setup lttng message with no payload */
case LTTNG_CALIBRATE:
case LTTNG_LIST_SESSIONS:
case LTTNG_LIST_TRACEPOINTS:
+ case LTTNG_LIST_SYSCALLS:
case LTTNG_LIST_TRACEPOINT_FIELDS:
case LTTNG_SAVE_SESSION:
need_tracing_session = 0;
ret = LTTNG_OK;
break;
}
+ case LTTNG_LIST_SYSCALLS:
+ {
+ struct lttng_event *events;
+ ssize_t nb_events;
+
+ nb_events = cmd_list_syscalls(&events);
+ if (nb_events < 0) {
+ /* Return value is a negative lttng_error_code. */
+ ret = -nb_events;
+ goto error;
+ }
+
+ /*
+ * Setup lttng message with payload size set to the event list size in
+ * bytes and then copy list into the llm payload.
+ */
+ ret = setup_lttng_msg(cmd_ctx, sizeof(struct lttng_event) * nb_events);
+ if (ret < 0) {
+ free(events);
+ goto setup_error;
+ }
+
+ /* Copy event list into message payload */
+ memcpy(cmd_ctx->llm->payload, events,
+ sizeof(struct lttng_event) * nb_events);
+
+ free(events);
+
+ ret = LTTNG_OK;
+ break;
+ }
case LTTNG_SET_CONSUMER_URI:
{
size_t nb_uri, len;
/* Setup kernel tracer */
if (!opt_no_kernel) {
init_kernel_tracer();
+ ret = syscall_init_table();
+ if (ret < 0) {
+ ERR("Unable to populate syscall table. Syscall tracing won't"
+ " work for this session daemon.");
+ }
}
/* Set ulimit for open files */
--- /dev/null
+/*
+ * Copyright (C) 2014 - David Goulet <dgoulet@efficios.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License, version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 51
+ * Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#define _GNU_SOURCE
+#include <common/bitfield.h>
+#include <common/common.h>
+#include <common/kernel-ctl/kernel-ctl.h>
+
+#include "lttng-sessiond.h"
+#include "kernel.h"
+#include "syscall.h"
+#include "utils.h"
+
+/* Global syscall table. */
+struct syscall *syscall_table;
+
+/* Number of entry in the syscall table. */
+static size_t syscall_table_nb_entry;
+
+/*
+ * Populate the system call table using the kernel tracer.
+ *
+ * Return 0 on success and the syscall table is allocated. On error, a negative
+ * value is returned and the syscall table is set to NULL.
+ */
+int syscall_init_table(void)
+{
+ int ret, fd, err;
+ size_t nbmem;
+ FILE *fp;
+ /* Syscall data from the kernel. */
+ size_t index;
+ uint32_t bitness;
+ char name[SYSCALL_NAME_LEN];
+
+ DBG3("Syscall init system call table");
+
+ fd = kernctl_syscall_list(kernel_tracer_fd);
+ if (fd < 0) {
+ ret = -errno;
+ PERROR("kernelctl syscall list");
+ goto error_ioctl;
+ }
+
+ fp = fdopen(fd, "r");
+ if (!fp) {
+ ret = -errno;
+ PERROR("syscall list fdopen");
+ goto error_fp;
+ }
+
+ nbmem = SYSCALL_TABLE_INIT_SIZE;
+ syscall_table = zmalloc(sizeof(struct syscall) * nbmem);
+ if (!syscall_table) {
+ ret = -errno;
+ PERROR("syscall list zmalloc");
+ goto error;
+ }
+
+ while (fscanf(fp,
+ "syscall { index = %lu; \
+ name = %" XSTR(SYSCALL_NAME_LEN) "[^;]; \
+ bitness = %u; };\n",
+ &index, name, &bitness) == 3) {
+ if (index >= nbmem ) {
+ struct syscall *new_list;
+ size_t new_nbmem;
+
+ /* Double memory size. */
+ new_nbmem = index << 1;
+
+ DBG("Reallocating syscall table from %zu to %zu entries", nbmem,
+ new_nbmem);
+ new_list = realloc(syscall_table, new_nbmem * sizeof(*new_list));
+ if (!new_list) {
+ ret = -errno;
+ PERROR("syscall list realloc");
+ goto error;
+ }
+
+ /* Zero out the new memory. */
+ memset(new_list + nbmem, 0,
+ (new_nbmem - nbmem) * sizeof(*new_list));
+ nbmem = new_nbmem;
+ syscall_table = new_list;
+ }
+ syscall_table[index].index = index;
+ syscall_table[index].bitness = bitness;
+ strncpy(syscall_table[index].name, name,
+ sizeof(syscall_table[index].name));
+ /*
+ DBG("Syscall name '%s' at index %" PRIu32 " of bitness %u",
+ syscall_table[index].name,
+ syscall_table[index].index,
+ syscall_table[index].bitness);
+ */
+ }
+
+ syscall_table_nb_entry = index;
+
+ ret = 0;
+
+error:
+ err = fclose(fp);
+ if (err) {
+ PERROR("syscall list fclose");
+ }
+ return ret;
+
+error_fp:
+ err = close(fd);
+ if (err) {
+ PERROR("syscall list close");
+ }
+
+error_ioctl:
+ return ret;
+}
+
+/*
+ * Helper function for the list syscalls command that empty the temporary
+ * syscall hashtable used to track duplicate between 32 and 64 bit arch.
+ *
+ * This empty the hash table and destroys it after. After this, the pointer is
+ * unsuable. RCU read side lock MUST be acquired before calling this.
+ */
+static void destroy_syscall_ht(struct lttng_ht *ht)
+{
+ struct lttng_ht_iter iter;
+ struct syscall *ksyscall;
+
+ DBG3("Destroying syscall hash table.");
+
+ if (!ht) {
+ return;
+ }
+
+ cds_lfht_for_each_entry(ht->ht, &iter.iter, ksyscall, node.node) {
+ int ret;
+
+ ret = lttng_ht_del(ht, &iter);
+ assert(!ret);
+ free(ksyscall);
+ }
+ ht_cleanup_push(ht);
+}
+
+/*
+ * Allocate the given hashtable pointer.
+ *
+ * Return 0 on success else a negative LTTNG error value.
+ */
+static int init_syscall_ht(struct lttng_ht **ht)
+{
+ int ret;
+
+ *ht = lttng_ht_new(0, LTTNG_HT_TYPE_STRING);
+ if (!*ht) {
+ ret = -LTTNG_ERR_NOMEM;
+ } else {
+ ret = 0;
+ }
+
+ return ret;
+}
+
+/*
+ * Lookup a syscall in the given hash table by name.
+ *
+ * Return syscall object if found or else NULL.
+ */
+static struct syscall *lookup_syscall(struct lttng_ht *ht, const char *name)
+{
+ struct lttng_ht_node_str *node;
+ struct lttng_ht_iter iter;
+ struct syscall *ksyscall = NULL;
+
+ assert(ht);
+ assert(name);
+
+ lttng_ht_lookup(ht, (void *) name, &iter);
+ node = lttng_ht_iter_get_node_str(&iter);
+ if (node) {
+ ksyscall = caa_container_of(node, struct syscall, node);
+ }
+
+ return ksyscall;
+}
+
+/*
+ * Using the given syscall object in the events array with the bitness of the
+ * syscall at index in the syscall table.
+ */
+static void update_event_syscall_bitness(struct lttng_event *events,
+ unsigned int index, unsigned int syscall_index)
+{
+ assert(events);
+
+ if (syscall_table[index].bitness == 32) {
+ events[syscall_index].flags |= LTTNG_EVENT_FLAG_SYSCALL_32;
+ } else {
+ events[syscall_index].flags |= LTTNG_EVENT_FLAG_SYSCALL_64;
+ }
+}
+
+/*
+ * Allocate and initialize syscall object and add it to the given hashtable.
+ *
+ * Return 0 on success else -LTTNG_ERR_NOMEM.
+ */
+static int add_syscall_to_ht(struct lttng_ht *ht, unsigned int index,
+ unsigned int syscall_index)
+{
+ int ret;
+ struct syscall *ksyscall;
+
+ assert(ht);
+
+ ksyscall = zmalloc(sizeof(*ksyscall));
+ if (!ksyscall) {
+ ret = -LTTNG_ERR_NOMEM;
+ goto error;
+ }
+
+ strncpy(ksyscall->name, syscall_table[index].name,
+ sizeof(ksyscall->name));
+ ksyscall->bitness = syscall_table[index].bitness;
+ ksyscall->index = syscall_index;
+ lttng_ht_node_init_str(&ksyscall->node, ksyscall->name);
+ lttng_ht_add_unique_str(ht, &ksyscall->node);
+ ret = 0;
+
+error:
+ return ret;
+}
+
+/*
+ * List syscalls present in the kernel syscall global array, allocate and
+ * populate the events structure with them. Skip the empty syscall name.
+ *
+ * Return the number of entries in the array else a negative value.
+ */
+ssize_t syscall_table_list(struct lttng_event **_events)
+{
+ int i, index = 0;
+ ssize_t ret;
+ struct lttng_event *events;
+ /* Hash table used to filter duplicate out. */
+ struct lttng_ht *syscalls_ht = NULL;
+
+ assert(_events);
+
+ DBG("Syscall table listing.");
+
+ rcu_read_lock();
+
+ /*
+ * Allocate at least the number of total syscall we have even if some of
+ * them might not be valid. The count below will make sure to return the
+ * right size of the events array.
+ */
+ events = zmalloc(syscall_table_nb_entry * sizeof(*events));
+ if (!events) {
+ PERROR("syscall table list zmalloc");
+ ret = -LTTNG_ERR_NOMEM;
+ goto error;
+ }
+
+ ret = init_syscall_ht(&syscalls_ht);
+ if (ret < 0) {
+ goto error;
+ }
+
+ for (i = 0; i < syscall_table_nb_entry; i++) {
+ struct syscall *ksyscall;
+
+ /* Skip empty syscalls. */
+ if (*syscall_table[i].name == '\0') {
+ continue;
+ }
+
+ ksyscall = lookup_syscall(syscalls_ht, syscall_table[i].name);
+ if (ksyscall) {
+ update_event_syscall_bitness(events, i, ksyscall->index);
+ continue;
+ }
+
+ ret = add_syscall_to_ht(syscalls_ht, i, index);
+ if (ret < 0) {
+ goto error;
+ }
+
+ /* Copy the event information in the event's array. */
+ strncpy(events[index].name, syscall_table[i].name,
+ sizeof(events[index].name));
+ update_event_syscall_bitness(events, i, index);
+ events[index].type = LTTNG_EVENT_SYSCALL;
+ /* This makes the command line not print the enabled/disabled field. */
+ events[index].enabled = -1;
+ index++;
+ }
+
+ destroy_syscall_ht(syscalls_ht);
+ *_events = events;
+ rcu_read_unlock();
+ return index;
+
+error:
+ destroy_syscall_ht(syscalls_ht);
+ free(events);
+ rcu_read_unlock();
+ return ret;
+}
+
+/*
+ * Add enabled syscall to the events list using the given kernel channel.
+ *
+ * Return the number of entry of the events array that is different from size
+ * if the array grows. On error, return negative value and events is untouched.
+ */
+ssize_t syscall_list_channel(struct ltt_kernel_channel *kchan,
+ struct lttng_event **_events, size_t size)
+{
+ int err, i;
+ size_t new_size;
+ ssize_t ret, count;
+ char *mask = NULL;
+ uint32_t len;
+ struct lttng_event *events = NULL;
+ /* Hash table used to filter duplicate out. */
+ struct lttng_ht *syscalls_ht = NULL;
+
+ assert(kchan);
+
+ /* Get syscall mask from the kernel tracer. */
+ err = kernel_syscall_mask(kchan->fd, &mask, &len);
+ if (err < 0) {
+ ret = err;
+ goto error;
+ }
+
+ ret = init_syscall_ht(&syscalls_ht);
+ if (ret < 0) {
+ goto error;
+ }
+
+ count = new_size = size;
+ events = *_events;
+
+ for (i = 0; i < len; i++) {
+ unsigned char val;
+ struct syscall *ksyscall;
+
+ bitfield_read_be(mask, unsigned char, i, 1, &val);
+ if (!val) {
+ /* Syscall is disabled, continue the loop. */
+ continue;
+ }
+
+ /* Skip empty syscall. */
+ if (*syscall_table[i].name == '\0') {
+ continue;
+ }
+
+ /* Syscall is enabled thus add it to the events list. */
+
+ if (count >= new_size) {
+ struct lttng_event *new_events;
+
+ /* Get the maximum here since count can be 0. */
+ new_size = max(count << 1, 1);
+ DBG3("Listing syscall realloc events array from %zu to %zu", count,
+ new_size);
+ new_events = realloc(events, new_size * sizeof(*new_events));
+ if (!new_events) {
+ PERROR("realloc kernel events list");
+ ret = -ENOMEM;
+ goto error;
+ }
+ memset(new_events + count, 0,
+ (new_size - count) * sizeof(*new_events));
+ events = new_events;
+ }
+
+ ksyscall = lookup_syscall(syscalls_ht, syscall_table[i].name);
+ if (ksyscall) {
+ update_event_syscall_bitness(events, i, ksyscall->index);
+ continue;
+ }
+
+ ret = add_syscall_to_ht(syscalls_ht, i, count);
+ if (ret < 0) {
+ goto error;
+ }
+
+ update_event_syscall_bitness(events, i, count);
+ strncpy(events[count].name, syscall_table[i].name,
+ sizeof(events[count].name));
+ events[count].enabled = 1;
+ events[count].type = LTTNG_KERNEL_SYSCALL;
+ count++;
+ }
+
+ *_events = events;
+
+ return count;
+
+error:
+ destroy_syscall_ht(syscalls_ht);
+ free(events);
+ return ret;
+}
--- /dev/null
+/*
+ * Copyright (C) 2014 - David Goulet <dgoulet@efficios.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License, version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 51
+ * Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#ifndef SYSCALL_H
+#define SYSCALL_H
+
+#include <common/hashtable/hashtable.h>
+#include <lttng/event.h>
+
+#include "trace-kernel.h"
+
+/*
+ * Default size of the kernel system call array. With this size, we usually
+ * reallocate twice considering a 32 bit compat layer also.
+ */
+#define SYSCALL_TABLE_INIT_SIZE 256
+
+/* Maximum length of a syscall name. */
+#define SYSCALL_NAME_LEN 255
+
+/*
+ * Represent a kernel syscall and used when we are populating the internal
+ * list.
+ */
+struct syscall {
+ uint32_t index;
+ uint32_t bitness;
+ char name[SYSCALL_NAME_LEN];
+ /* Used by the list syscalls command. */
+ struct lttng_ht_node_str node;
+};
+
+/*
+ * Allocated once when listing all syscalls at boot time. This is an array
+ * indexed by the syscall index provided in the listing.
+ */
+extern struct syscall *syscall_table;
+
+/* Use to list kernel system calls. */
+int syscall_init_table(void);
+ssize_t syscall_table_list(struct lttng_event **events);
+ssize_t syscall_list_channel(struct ltt_kernel_channel *kchan,
+ struct lttng_event **_events, size_t size);
+
+#endif /* SYSCALL_H */
static char *opt_channel;
static int opt_domain;
static int opt_fields;
+static int opt_syscall;
#if 0
/* Not implemented yet */
static char *opt_cmd_name;
{"channel", 'c', POPT_ARG_STRING, &opt_channel, 0, 0, 0},
{"domain", 'd', POPT_ARG_VAL, &opt_domain, 1, 0, 0},
{"fields", 'f', POPT_ARG_VAL, &opt_fields, 1, 0, 0},
+ {"syscall", 'S', POPT_ARG_VAL, &opt_syscall, 1, 0, 0},
{"list-options", 0, POPT_ARG_NONE, NULL, OPT_LIST_OPTIONS, NULL, NULL},
{0, 0, 0, 0, 0, 0, 0}
};
fprintf(ofp, " -u, --userspace Select user-space domain.\n");
fprintf(ofp, " -j, --jul Apply for Java application using JUL\n");
fprintf(ofp, " -f, --fields List event fields.\n");
+ fprintf(ofp, " --syscall List available system calls.\n");
#if 0
fprintf(ofp, " -p, --pid PID List user-space events by PID\n");
#endif
}
}
+static const char *bitness_event(enum lttng_event_flag flags)
+{
+ if (flags & LTTNG_EVENT_FLAG_SYSCALL_32) {
+ if (flags & LTTNG_EVENT_FLAG_SYSCALL_64) {
+ return " [32/64-bit]";
+ } else {
+ return " [32-bit]";
+ }
+ } else if (flags & LTTNG_EVENT_FLAG_SYSCALL_64) {
+ return " [64-bit]";
+ } else {
+ return "";
+ }
+}
+
/*
* Pretty print single event.
*/
MSG("%ssymbol: \"%s\"", indent8, event->attr.ftrace.symbol_name);
break;
case LTTNG_EVENT_SYSCALL:
- MSG("%ssyscalls (type: syscall)%s%s", indent6,
+ MSG("%s%s%s%s%s", indent6, event->name,
+ (opt_syscall ? "" : " (type:syscall)"),
enabled_string(event->enabled),
- filter_string(event->filter));
+ bitness_event(event->flags));
break;
case LTTNG_EVENT_NOOP:
MSG("%s (type: noop)%s%s", indent6,
return ret;
}
+/*
+ * Machine interface
+ * Print a list of system calls.
+ */
+static int mi_list_syscalls(struct lttng_event *events, int count)
+{
+ int ret, i;
+
+ /* Open events */
+ ret = mi_lttng_events_open(writer);
+ if (ret) {
+ goto end;
+ }
+
+ for (i = 0; i < count; i++) {
+ ret = mi_lttng_event(writer, &events[i], 0);
+ if (ret) {
+ goto end;
+ }
+ }
+
+ /* Close events. */
+ ret = mi_lttng_writer_close_element(writer);
+ if (ret) {
+ goto end;
+ }
+
+end:
+ return ret;
+}
+
+/*
+ * Ask for kernel system calls.
+ */
+static int list_syscalls(void)
+{
+ int i, size, ret = CMD_SUCCESS;
+ struct lttng_event *event_list;
+
+ DBG("Getting kernel system call events");
+
+ size = lttng_list_syscalls(&event_list);
+ if (size < 0) {
+ ERR("Unable to list system calls: %s", lttng_strerror(size));
+ ret = CMD_ERROR;
+ goto error;
+ }
+
+ if (lttng_opt_mi) {
+ /* Mi print */
+ ret = mi_list_syscalls(event_list, size);
+ if (ret) {
+ ret = CMD_ERROR;
+ goto end;
+ }
+ } else {
+ MSG("System calls:\n-------------");
+
+ for (i = 0; i < size; i++) {
+ print_events(&event_list[i]);
+ }
+
+ MSG("");
+ }
+
+end:
+ free(event_list);
+ return ret;
+
+error:
+ return ret;
+}
+
/*
* Machine Interface
* Print a list of agent events
domain.type = LTTNG_DOMAIN_LOG4J;
}
+ if (!opt_kernel && opt_syscall) {
+ WARN("--syscall will only work with the Kernel domain (-k)");
+ ret = CMD_ERROR;
+ goto end;
+ }
+
if (opt_kernel || opt_userspace || opt_jul || opt_log4j) {
handle = lttng_create_handle(session_name, &domain);
if (handle == NULL) {
}
}
if (opt_kernel) {
- ret = list_kernel_events();
- if (ret) {
- goto end;
+ if (opt_syscall) {
+ ret = list_syscalls();
+ if (ret) {
+ goto end;
+ }
+ } else {
+ ret = list_kernel_events();
+ if (ret) {
+ goto end;
+ }
}
}
if (opt_userspace) {
--- /dev/null
+#ifndef _LTTNG_BITFIELD_H
+#define _LTTNG_BITFIELD_H
+
+/*
+ * BabelTrace
+ *
+ * Bitfields read/write functions.
+ *
+ * Copyright 2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <assert.h>
+#include <stdint.h> /* C99 5.2.4.2 Numerical limits */
+#include <limits.h> /* C99 5.2.4.2 Numerical limits */
+
+#include <common/compat/endian.h> /* Non-standard BIG_ENDIAN, LITTLE_ENDIAN, BYTE_ORDER */
+
+/* We can't shift a int from 32 bit, >> 32 and << 32 on int is undefined */
+#define _piecewise_rshift(_v, _shift) \
+({ \
+ typeof(_v) ___v = (_v); \
+ typeof(_shift) ___shift = (_shift); \
+ unsigned long sb = (___shift) / (sizeof(___v) * CHAR_BIT - 1); \
+ unsigned long final = (___shift) % (sizeof(___v) * CHAR_BIT - 1); \
+ \
+ for (; sb; sb--) \
+ ___v >>= sizeof(___v) * CHAR_BIT - 1; \
+ ___v >>= final; \
+})
+
+#define _piecewise_lshift(_v, _shift) \
+({ \
+ typeof(_v) ___v = (_v); \
+ typeof(_shift) ___shift = (_shift); \
+ unsigned long sb = (___shift) / (sizeof(___v) * CHAR_BIT - 1); \
+ unsigned long final = (___shift) % (sizeof(___v) * CHAR_BIT - 1); \
+ \
+ for (; sb; sb--) \
+ ___v <<= sizeof(___v) * CHAR_BIT - 1; \
+ ___v <<= final; \
+})
+
+#define _is_signed_type(type) ((type) -1 < (type) 0)
+
+#define _unsigned_cast(type, v) \
+({ \
+ (sizeof(v) < sizeof(type)) ? \
+ ((type) (v)) & (~(~(type) 0 << (sizeof(v) * CHAR_BIT))) : \
+ (type) (v); \
+})
+
+/*
+ * bitfield_write - write integer to a bitfield in native endianness
+ *
+ * Save integer to the bitfield, which starts at the "start" bit, has "len"
+ * bits.
+ * The inside of a bitfield is from high bits to low bits.
+ * Uses native endianness.
+ * For unsigned "v", pad MSB with 0 if bitfield is larger than v.
+ * For signed "v", sign-extend v if bitfield is larger than v.
+ *
+ * On little endian, bytes are placed from the less significant to the most
+ * significant. Also, consecutive bitfields are placed from lower bits to higher
+ * bits.
+ *
+ * On big endian, bytes are places from most significant to less significant.
+ * Also, consecutive bitfields are placed from higher to lower bits.
+ */
+
+#define _bitfield_write_le(_ptr, type, _start, _length, _v) \
+do { \
+ typeof(_v) __v = (_v); \
+ type *__ptr = (void *) (_ptr); \
+ unsigned long __start = (_start), __length = (_length); \
+ type mask, cmask; \
+ unsigned long ts = sizeof(type) * CHAR_BIT; /* type size */ \
+ unsigned long start_unit, end_unit, this_unit; \
+ unsigned long end, cshift; /* cshift is "complement shift" */ \
+ \
+ if (!__length) \
+ break; \
+ \
+ end = __start + __length; \
+ start_unit = __start / ts; \
+ end_unit = (end + (ts - 1)) / ts; \
+ \
+ /* Trim v high bits */ \
+ if (__length < sizeof(__v) * CHAR_BIT) \
+ __v &= ~((~(typeof(__v)) 0) << __length); \
+ \
+ /* We can now append v with a simple "or", shift it piece-wise */ \
+ this_unit = start_unit; \
+ if (start_unit == end_unit - 1) { \
+ mask = ~((~(type) 0) << (__start % ts)); \
+ if (end % ts) \
+ mask |= (~(type) 0) << (end % ts); \
+ cmask = (type) __v << (__start % ts); \
+ cmask &= ~mask; \
+ __ptr[this_unit] &= mask; \
+ __ptr[this_unit] |= cmask; \
+ break; \
+ } \
+ if (__start % ts) { \
+ cshift = __start % ts; \
+ mask = ~((~(type) 0) << cshift); \
+ cmask = (type) __v << cshift; \
+ cmask &= ~mask; \
+ __ptr[this_unit] &= mask; \
+ __ptr[this_unit] |= cmask; \
+ __v = _piecewise_rshift(__v, ts - cshift); \
+ __start += ts - cshift; \
+ this_unit++; \
+ } \
+ for (; this_unit < end_unit - 1; this_unit++) { \
+ __ptr[this_unit] = (type) __v; \
+ __v = _piecewise_rshift(__v, ts); \
+ __start += ts; \
+ } \
+ if (end % ts) { \
+ mask = (~(type) 0) << (end % ts); \
+ cmask = (type) __v; \
+ cmask &= ~mask; \
+ __ptr[this_unit] &= mask; \
+ __ptr[this_unit] |= cmask; \
+ } else \
+ __ptr[this_unit] = (type) __v; \
+} while (0)
+
+#define _bitfield_write_be(_ptr, type, _start, _length, _v) \
+do { \
+ typeof(_v) __v = (_v); \
+ type *__ptr = (void *) (_ptr); \
+ unsigned long __start = (_start), __length = (_length); \
+ type mask, cmask; \
+ unsigned long ts = sizeof(type) * CHAR_BIT; /* type size */ \
+ unsigned long start_unit, end_unit, this_unit; \
+ unsigned long end, cshift; /* cshift is "complement shift" */ \
+ \
+ if (!__length) \
+ break; \
+ \
+ end = __start + __length; \
+ start_unit = __start / ts; \
+ end_unit = (end + (ts - 1)) / ts; \
+ \
+ /* Trim v high bits */ \
+ if (__length < sizeof(__v) * CHAR_BIT) \
+ __v &= ~((~(typeof(__v)) 0) << __length); \
+ \
+ /* We can now append v with a simple "or", shift it piece-wise */ \
+ this_unit = end_unit - 1; \
+ if (start_unit == end_unit - 1) { \
+ mask = ~((~(type) 0) << ((ts - (end % ts)) % ts)); \
+ if (__start % ts) \
+ mask |= (~((type) 0)) << (ts - (__start % ts)); \
+ cmask = (type) __v << ((ts - (end % ts)) % ts); \
+ cmask &= ~mask; \
+ __ptr[this_unit] &= mask; \
+ __ptr[this_unit] |= cmask; \
+ break; \
+ } \
+ if (end % ts) { \
+ cshift = end % ts; \
+ mask = ~((~(type) 0) << (ts - cshift)); \
+ cmask = (type) __v << (ts - cshift); \
+ cmask &= ~mask; \
+ __ptr[this_unit] &= mask; \
+ __ptr[this_unit] |= cmask; \
+ __v = _piecewise_rshift(__v, cshift); \
+ end -= cshift; \
+ this_unit--; \
+ } \
+ for (; (long) this_unit >= (long) start_unit + 1; this_unit--) { \
+ __ptr[this_unit] = (type) __v; \
+ __v = _piecewise_rshift(__v, ts); \
+ end -= ts; \
+ } \
+ if (__start % ts) { \
+ mask = (~(type) 0) << (ts - (__start % ts)); \
+ cmask = (type) __v; \
+ cmask &= ~mask; \
+ __ptr[this_unit] &= mask; \
+ __ptr[this_unit] |= cmask; \
+ } else \
+ __ptr[this_unit] = (type) __v; \
+} while (0)
+
+/*
+ * bitfield_write - write integer to a bitfield in native endianness
+ * bitfield_write_le - write integer to a bitfield in little endian
+ * bitfield_write_be - write integer to a bitfield in big endian
+ */
+
+#if (BYTE_ORDER == LITTLE_ENDIAN)
+
+#define bitfield_write(ptr, type, _start, _length, _v) \
+ _bitfield_write_le(ptr, type, _start, _length, _v)
+
+#define bitfield_write_le(ptr, type, _start, _length, _v) \
+ _bitfield_write_le(ptr, type, _start, _length, _v)
+
+#define bitfield_write_be(ptr, type, _start, _length, _v) \
+ _bitfield_write_be(ptr, unsigned char, _start, _length, _v)
+
+#elif (BYTE_ORDER == BIG_ENDIAN)
+
+#define bitfield_write(ptr, type, _start, _length, _v) \
+ _bitfield_write_be(ptr, type, _start, _length, _v)
+
+#define bitfield_write_le(ptr, type, _start, _length, _v) \
+ _bitfield_write_le(ptr, unsigned char, _start, _length, _v)
+
+#define bitfield_write_be(ptr, type, _start, _length, _v) \
+ _bitfield_write_be(ptr, type, _start, _length, _v)
+
+#else /* (BYTE_ORDER == PDP_ENDIAN) */
+
+#error "Byte order not supported"
+
+#endif
+
+#define _bitfield_read_le(_ptr, type, _start, _length, _vptr) \
+do { \
+ typeof(*(_vptr)) *__vptr = (_vptr); \
+ typeof(*__vptr) __v; \
+ type *__ptr = (void *) (_ptr); \
+ unsigned long __start = (_start), __length = (_length); \
+ type mask, cmask; \
+ unsigned long ts = sizeof(type) * CHAR_BIT; /* type size */ \
+ unsigned long start_unit, end_unit, this_unit; \
+ unsigned long end, cshift; /* cshift is "complement shift" */ \
+ \
+ if (!__length) { \
+ *__vptr = 0; \
+ break; \
+ } \
+ \
+ end = __start + __length; \
+ start_unit = __start / ts; \
+ end_unit = (end + (ts - 1)) / ts; \
+ \
+ this_unit = end_unit - 1; \
+ if (_is_signed_type(typeof(__v)) \
+ && (__ptr[this_unit] & ((type) 1 << ((end % ts ? : ts) - 1)))) \
+ __v = ~(typeof(__v)) 0; \
+ else \
+ __v = 0; \
+ if (start_unit == end_unit - 1) { \
+ cmask = __ptr[this_unit]; \
+ cmask >>= (__start % ts); \
+ if ((end - __start) % ts) { \
+ mask = ~((~(type) 0) << (end - __start)); \
+ cmask &= mask; \
+ } \
+ __v = _piecewise_lshift(__v, end - __start); \
+ __v |= _unsigned_cast(typeof(__v), cmask); \
+ *__vptr = __v; \
+ break; \
+ } \
+ if (end % ts) { \
+ cshift = end % ts; \
+ mask = ~((~(type) 0) << cshift); \
+ cmask = __ptr[this_unit]; \
+ cmask &= mask; \
+ __v = _piecewise_lshift(__v, cshift); \
+ __v |= _unsigned_cast(typeof(__v), cmask); \
+ end -= cshift; \
+ this_unit--; \
+ } \
+ for (; (long) this_unit >= (long) start_unit + 1; this_unit--) { \
+ __v = _piecewise_lshift(__v, ts); \
+ __v |= _unsigned_cast(typeof(__v), __ptr[this_unit]);\
+ end -= ts; \
+ } \
+ if (__start % ts) { \
+ mask = ~((~(type) 0) << (ts - (__start % ts))); \
+ cmask = __ptr[this_unit]; \
+ cmask >>= (__start % ts); \
+ cmask &= mask; \
+ __v = _piecewise_lshift(__v, ts - (__start % ts)); \
+ __v |= _unsigned_cast(typeof(__v), cmask); \
+ } else { \
+ __v = _piecewise_lshift(__v, ts); \
+ __v |= _unsigned_cast(typeof(__v), __ptr[this_unit]);\
+ } \
+ *__vptr = __v; \
+} while (0)
+
+#define _bitfield_read_be(_ptr, type, _start, _length, _vptr) \
+do { \
+ typeof(*(_vptr)) *__vptr = (_vptr); \
+ typeof(*__vptr) __v; \
+ type *__ptr = (void *) (_ptr); \
+ unsigned long __start = (_start), __length = (_length); \
+ type mask, cmask; \
+ unsigned long ts = sizeof(type) * CHAR_BIT; /* type size */ \
+ unsigned long start_unit, end_unit, this_unit; \
+ unsigned long end, cshift; /* cshift is "complement shift" */ \
+ \
+ if (!__length) { \
+ *__vptr = 0; \
+ break; \
+ } \
+ \
+ end = __start + __length; \
+ start_unit = __start / ts; \
+ end_unit = (end + (ts - 1)) / ts; \
+ \
+ this_unit = start_unit; \
+ if (_is_signed_type(typeof(__v)) \
+ && (__ptr[this_unit] & ((type) 1 << (ts - (__start % ts) - 1)))) \
+ __v = ~(typeof(__v)) 0; \
+ else \
+ __v = 0; \
+ if (start_unit == end_unit - 1) { \
+ cmask = __ptr[this_unit]; \
+ cmask >>= (ts - (end % ts)) % ts; \
+ if ((end - __start) % ts) { \
+ mask = ~((~(type) 0) << (end - __start)); \
+ cmask &= mask; \
+ } \
+ __v = _piecewise_lshift(__v, end - __start); \
+ __v |= _unsigned_cast(typeof(__v), cmask); \
+ *__vptr = __v; \
+ break; \
+ } \
+ if (__start % ts) { \
+ cshift = __start % ts; \
+ mask = ~((~(type) 0) << (ts - cshift)); \
+ cmask = __ptr[this_unit]; \
+ cmask &= mask; \
+ __v = _piecewise_lshift(__v, ts - cshift); \
+ __v |= _unsigned_cast(typeof(__v), cmask); \
+ __start += ts - cshift; \
+ this_unit++; \
+ } \
+ for (; this_unit < end_unit - 1; this_unit++) { \
+ __v = _piecewise_lshift(__v, ts); \
+ __v |= _unsigned_cast(typeof(__v), __ptr[this_unit]);\
+ __start += ts; \
+ } \
+ if (end % ts) { \
+ mask = ~((~(type) 0) << (end % ts)); \
+ cmask = __ptr[this_unit]; \
+ cmask >>= ts - (end % ts); \
+ cmask &= mask; \
+ __v = _piecewise_lshift(__v, end % ts); \
+ __v |= _unsigned_cast(typeof(__v), cmask); \
+ } else { \
+ __v = _piecewise_lshift(__v, ts); \
+ __v |= _unsigned_cast(typeof(__v), __ptr[this_unit]);\
+ } \
+ *__vptr = __v; \
+} while (0)
+
+/*
+ * bitfield_read - read integer from a bitfield in native endianness
+ * bitfield_read_le - read integer from a bitfield in little endian
+ * bitfield_read_be - read integer from a bitfield in big endian
+ */
+
+#if (BYTE_ORDER == LITTLE_ENDIAN)
+
+#define bitfield_read(_ptr, type, _start, _length, _vptr) \
+ _bitfield_read_le(_ptr, type, _start, _length, _vptr)
+
+#define bitfield_read_le(_ptr, type, _start, _length, _vptr) \
+ _bitfield_read_le(_ptr, type, _start, _length, _vptr)
+
+#define bitfield_read_be(_ptr, type, _start, _length, _vptr) \
+ _bitfield_read_be(_ptr, unsigned char, _start, _length, _vptr)
+
+#elif (BYTE_ORDER == BIG_ENDIAN)
+
+#define bitfield_read(_ptr, type, _start, _length, _vptr) \
+ _bitfield_read_be(_ptr, type, _start, _length, _vptr)
+
+#define bitfield_read_le(_ptr, type, _start, _length, _vptr) \
+ _bitfield_read_le(_ptr, unsigned char, _start, _length, _vptr)
+
+#define bitfield_read_be(_ptr, type, _start, _length, _vptr) \
+ _bitfield_read_be(_ptr, type, _start, _length, _vptr)
+
+#else /* (BYTE_ORDER == PDP_ENDIAN) */
+
+#error "Byte order not supported"
+
+#endif
+
+#endif /* _LTTNG_BITFIELD_H */
}
array_alloc_len = ALIGN(kmask_len.len, 8) >> 3;
+
kmask = zmalloc(sizeof(*kmask) + array_alloc_len);
if (!kmask) {
ret = -1;
goto end;
}
- new_mask = realloc(syscall_mask, array_alloc_len);
+ new_mask = realloc(*syscall_mask, array_alloc_len);
if (!new_mask) {
ret = -1;
goto end;
LTTNG_CALIBRATE = 1,
LTTNG_DISABLE_CHANNEL = 2,
LTTNG_DISABLE_EVENT = 3,
- /* 4 */
+ LTTNG_LIST_SYSCALLS = 4,
LTTNG_ENABLE_CHANNEL = 5,
LTTNG_ENABLE_EVENT = 6,
/* 7 */
return ret / sizeof(struct lttng_event_field);
}
+/*
+ * Lists all available kernel system calls. Allocates and sets the contents of
+ * the events array.
+ *
+ * Returns the number of lttng_event entries in events; on error, returns a
+ * negative value.
+ */
+int lttng_list_syscalls(struct lttng_event **events)
+{
+ int ret;
+ struct lttcomm_session_msg lsm;
+
+ if (!events) {
+ return -LTTNG_ERR_INVALID;
+ }
+
+ memset(&lsm, 0, sizeof(lsm));
+ lsm.cmd_type = LTTNG_LIST_SYSCALLS;
+ /* Force kernel domain for system calls. */
+ lsm.domain.type = LTTNG_DOMAIN_KERNEL;
+
+ ret = lttng_ctl_ask_sessiond(&lsm, (void **) events);
+ if (ret < 0) {
+ return ret;
+ }
+
+ return ret / sizeof(struct lttng_event);
+}
+
/*
* Returns a human readable string describing
* the error code (a negative value).