enum lttng_kernel_calibrate_type type; /* type (input) */
} __attribute__((packed));
+struct lttng_kernel_syscall_mask {
+ uint32_t len; /* in bits */
+ char mask[];
+} __attribute__((packed));
+
enum lttng_kernel_context_type {
LTTNG_KERNEL_CONTEXT_PID = 0,
LTTNG_KERNEL_CONTEXT_PERF_COUNTER = 1,
#define LTTNG_KERNEL_STREAM _IO(0xF6, 0x62)
#define LTTNG_KERNEL_EVENT \
_IOW(0xF6, 0x63, struct lttng_kernel_event)
+#define LTTNG_KERNEL_SYSCALL_MASK \
+ _IOWR(0xF6, 0x64, struct lttng_kernel_syscall_mask)
/* Event and Channel FD ioctl */
#define LTTNG_KERNEL_CONTEXT \
const char *name);
int lttng_syscall_filter_disable(struct lttng_channel *chan,
const char *name);
+long lttng_channel_syscall_mask(struct lttng_channel *channel,
+ struct lttng_kernel_syscall_mask __user *usyscall_mask);
#else
static inline int lttng_syscalls_register(struct lttng_channel *chan, void *filter)
{
return 0;
}
+static
int lttng_syscall_filter_enable(struct lttng_channel *chan,
const char *name)
{
return -ENOSYS;
}
+static
int lttng_syscall_filter_disable(struct lttng_channel *chan,
const char *name)
{
return -ENOSYS;
}
+
+static
+long lttng_channel_syscall_mask(struct lttng_channel *channel,
+ struct lttng_kernel_syscall_mask __user *usyscall_mask)
+{
+ return -ENOSYS;
+}
#endif
struct lttng_ctx_field *lttng_append_context(struct lttng_ctx **ctx);
#include <asm/ptrace.h>
#include <asm/syscall.h>
+#include "lib/bitfield.h"
#include "wrapper/tracepoint.h"
#include "lttng-events.h"
return syscall_nr;
}
+static
+uint32_t get_sc_tables_len(void)
+{
+ return ARRAY_SIZE(sc_table) + ARRAY_SIZE(compat_sc_table);
+}
+
int lttng_syscall_filter_enable(struct lttng_channel *chan,
const char *name)
{
{
}
+static
+int get_sc_table(const struct trace_syscall_entry *entry,
+ const struct trace_syscall_entry **table,
+ unsigned int *bitness)
+{
+ if (entry >= sc_table && entry < sc_table + ARRAY_SIZE(sc_table)) {
+ if (bitness)
+ *bitness = BITS_PER_LONG;
+ if (table)
+ *table = sc_table;
+ return 0;
+ }
+ if (!(entry >= compat_sc_table
+ && entry < compat_sc_table + ARRAY_SIZE(compat_sc_table))) {
+ return -EINVAL;
+ }
+ if (bitness)
+ *bitness = 32;
+ if (table)
+ *table = compat_sc_table;
+ return 0;
+}
+
static
int syscall_list_show(struct seq_file *m, void *p)
{
const struct trace_syscall_entry *table, *entry = p;
unsigned int bitness;
+ int ret;
- if (entry >= sc_table && entry < sc_table + ARRAY_SIZE(sc_table)) {
- bitness = BITS_PER_LONG;
- table = sc_table;
- } else {
- bitness = 32;
- table = compat_sc_table;
- WARN_ON_ONCE(!(entry >= compat_sc_table
- && entry < compat_sc_table + ARRAY_SIZE(compat_sc_table)));
- }
- seq_printf(m, "syscall { id = %u; name = %s; bitness = %u; };\n",
+ ret = get_sc_table(entry, &table, &bitness);
+ if (ret)
+ return ret;
+ seq_printf(m, "syscall { index = %lu; name = %s; bitness = %u; };\n",
entry - table,
entry->desc->name,
bitness);
.llseek = seq_lseek,
.release = seq_release,
};
+
+long lttng_channel_syscall_mask(struct lttng_channel *channel,
+ struct lttng_kernel_syscall_mask __user *usyscall_mask)
+{
+ uint32_t len, sc_tables_len, bitmask_len;
+ int ret = 0, bit;
+ char *tmp_mask;
+ struct lttng_syscall_filter *filter;
+
+ ret = get_user(len, &usyscall_mask->len);
+ if (ret)
+ return ret;
+ sc_tables_len = get_sc_tables_len();
+ bitmask_len = ALIGN(sc_tables_len, 8) >> 3;
+ if (len < sc_tables_len) {
+ return put_user(sc_tables_len, &usyscall_mask->len);
+ }
+ /* Array is large enough, we can copy array to user-space. */
+ tmp_mask = kzalloc(bitmask_len, GFP_KERNEL);
+ if (!tmp_mask)
+ return -ENOMEM;
+ filter = channel->sc_filter;
+
+ for (bit = 0; bit < ARRAY_SIZE(sc_table); bit++) {
+ bt_bitfield_write_be(tmp_mask, char, bit, 1,
+ test_bit(bit, filter->sc));
+ }
+ for (; bit < sc_tables_len; bit++) {
+ bt_bitfield_write_be(tmp_mask, char, bit, 1,
+ test_bit(bit - ARRAY_SIZE(sc_table),
+ filter->sc_compat));
+ }
+ if (copy_to_user(usyscall_mask->mask, tmp_mask, bitmask_len))
+ ret = -EFAULT;
+ kfree(tmp_mask);
+ return ret;
+}