Cleanup: Move all source files to src/
authorMathieu Desnoyers <mathieu.desnoyers@efficios.com>
Wed, 6 May 2020 18:08:22 +0000 (14:08 -0400)
committerMathieu Desnoyers <mathieu.desnoyers@efficios.com>
Wed, 6 May 2020 19:08:56 +0000 (15:08 -0400)
This includes *.c, lib/*/*.c, probes/*.c, wrapper/*.c.

Adapt Makefile and Kbuild accordingly. Introduce src/Kbuild.

Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
256 files changed:
Makefile
lib/Kbuild [deleted file]
lib/prio_heap/lttng_prio_heap.c [deleted file]
lib/ringbuffer/ring_buffer_backend.c [deleted file]
lib/ringbuffer/ring_buffer_frontend.c [deleted file]
lib/ringbuffer/ring_buffer_iterator.c [deleted file]
lib/ringbuffer/ring_buffer_mmap.c [deleted file]
lib/ringbuffer/ring_buffer_splice.c [deleted file]
lib/ringbuffer/ring_buffer_vfs.c [deleted file]
lttng-abi.c [deleted file]
lttng-calibrate.c [deleted file]
lttng-clock.c [deleted file]
lttng-context-callstack-legacy-impl.h [deleted file]
lttng-context-callstack-stackwalk-impl.h [deleted file]
lttng-context-callstack.c [deleted file]
lttng-context-cgroup-ns.c [deleted file]
lttng-context-cpu-id.c [deleted file]
lttng-context-egid.c [deleted file]
lttng-context-euid.c [deleted file]
lttng-context-gid.c [deleted file]
lttng-context-hostname.c [deleted file]
lttng-context-interruptible.c [deleted file]
lttng-context-ipc-ns.c [deleted file]
lttng-context-migratable.c [deleted file]
lttng-context-mnt-ns.c [deleted file]
lttng-context-need-reschedule.c [deleted file]
lttng-context-net-ns.c [deleted file]
lttng-context-nice.c [deleted file]
lttng-context-perf-counters.c [deleted file]
lttng-context-pid-ns.c [deleted file]
lttng-context-pid.c [deleted file]
lttng-context-ppid.c [deleted file]
lttng-context-preemptible.c [deleted file]
lttng-context-prio.c [deleted file]
lttng-context-procname.c [deleted file]
lttng-context-sgid.c [deleted file]
lttng-context-suid.c [deleted file]
lttng-context-tid.c [deleted file]
lttng-context-uid.c [deleted file]
lttng-context-user-ns.c [deleted file]
lttng-context-uts-ns.c [deleted file]
lttng-context-vegid.c [deleted file]
lttng-context-veuid.c [deleted file]
lttng-context-vgid.c [deleted file]
lttng-context-vpid.c [deleted file]
lttng-context-vppid.c [deleted file]
lttng-context-vsgid.c [deleted file]
lttng-context-vsuid.c [deleted file]
lttng-context-vtid.c [deleted file]
lttng-context-vuid.c [deleted file]
lttng-context.c [deleted file]
lttng-events.c [deleted file]
lttng-filter-interpreter.c [deleted file]
lttng-filter-specialize.c [deleted file]
lttng-filter-validator.c [deleted file]
lttng-filter.c [deleted file]
lttng-probes.c [deleted file]
lttng-ring-buffer-client-discard.c [deleted file]
lttng-ring-buffer-client-mmap-discard.c [deleted file]
lttng-ring-buffer-client-mmap-overwrite.c [deleted file]
lttng-ring-buffer-client-overwrite.c [deleted file]
lttng-ring-buffer-client.h [deleted file]
lttng-ring-buffer-metadata-client.c [deleted file]
lttng-ring-buffer-metadata-client.h [deleted file]
lttng-ring-buffer-metadata-mmap-client.c [deleted file]
lttng-statedump-impl.c [deleted file]
lttng-string-utils.c [deleted file]
lttng-syscalls.c [deleted file]
lttng-tp-mempool.c [deleted file]
lttng-tracepoint.c [deleted file]
lttng-tracker-id.c [deleted file]
lttng-wrapper-impl.c [deleted file]
probes/Kbuild [deleted file]
probes/lttng-kprobes.c [deleted file]
probes/lttng-kretprobes.c [deleted file]
probes/lttng-probe-9p.c [deleted file]
probes/lttng-probe-asoc.c [deleted file]
probes/lttng-probe-block.c [deleted file]
probes/lttng-probe-btrfs.c [deleted file]
probes/lttng-probe-compaction.c [deleted file]
probes/lttng-probe-ext3.c [deleted file]
probes/lttng-probe-ext4.c [deleted file]
probes/lttng-probe-gpio.c [deleted file]
probes/lttng-probe-i2c.c [deleted file]
probes/lttng-probe-irq.c [deleted file]
probes/lttng-probe-jbd.c [deleted file]
probes/lttng-probe-jbd2.c [deleted file]
probes/lttng-probe-kmem.c [deleted file]
probes/lttng-probe-kvm-x86-mmu.c [deleted file]
probes/lttng-probe-kvm-x86.c [deleted file]
probes/lttng-probe-kvm.c [deleted file]
probes/lttng-probe-lock.c [deleted file]
probes/lttng-probe-module.c [deleted file]
probes/lttng-probe-napi.c [deleted file]
probes/lttng-probe-net.c [deleted file]
probes/lttng-probe-power.c [deleted file]
probes/lttng-probe-preemptirq.c [deleted file]
probes/lttng-probe-printk.c [deleted file]
probes/lttng-probe-random.c [deleted file]
probes/lttng-probe-rcu.c [deleted file]
probes/lttng-probe-regmap.c [deleted file]
probes/lttng-probe-regulator.c [deleted file]
probes/lttng-probe-rpm.c [deleted file]
probes/lttng-probe-sched.c [deleted file]
probes/lttng-probe-scsi.c [deleted file]
probes/lttng-probe-signal.c [deleted file]
probes/lttng-probe-skb.c [deleted file]
probes/lttng-probe-sock.c [deleted file]
probes/lttng-probe-statedump.c [deleted file]
probes/lttng-probe-sunrpc.c [deleted file]
probes/lttng-probe-timer.c [deleted file]
probes/lttng-probe-udp.c [deleted file]
probes/lttng-probe-user.c [deleted file]
probes/lttng-probe-v4l2.c [deleted file]
probes/lttng-probe-vmscan.c [deleted file]
probes/lttng-probe-workqueue.c [deleted file]
probes/lttng-probe-writeback.c [deleted file]
probes/lttng-probe-x86-exceptions.c [deleted file]
probes/lttng-probe-x86-irq-vectors.c [deleted file]
probes/lttng-uprobes.c [deleted file]
probes/lttng.c [deleted file]
src/Kbuild [new file with mode: 0644]
src/lib/Kbuild [new file with mode: 0644]
src/lib/prio_heap/lttng_prio_heap.c [new file with mode: 0644]
src/lib/ringbuffer/ring_buffer_backend.c [new file with mode: 0644]
src/lib/ringbuffer/ring_buffer_frontend.c [new file with mode: 0644]
src/lib/ringbuffer/ring_buffer_iterator.c [new file with mode: 0644]
src/lib/ringbuffer/ring_buffer_mmap.c [new file with mode: 0644]
src/lib/ringbuffer/ring_buffer_splice.c [new file with mode: 0644]
src/lib/ringbuffer/ring_buffer_vfs.c [new file with mode: 0644]
src/lttng-abi.c [new file with mode: 0644]
src/lttng-calibrate.c [new file with mode: 0644]
src/lttng-clock.c [new file with mode: 0644]
src/lttng-context-callstack-legacy-impl.h [new file with mode: 0644]
src/lttng-context-callstack-stackwalk-impl.h [new file with mode: 0644]
src/lttng-context-callstack.c [new file with mode: 0644]
src/lttng-context-cgroup-ns.c [new file with mode: 0644]
src/lttng-context-cpu-id.c [new file with mode: 0644]
src/lttng-context-egid.c [new file with mode: 0644]
src/lttng-context-euid.c [new file with mode: 0644]
src/lttng-context-gid.c [new file with mode: 0644]
src/lttng-context-hostname.c [new file with mode: 0644]
src/lttng-context-interruptible.c [new file with mode: 0644]
src/lttng-context-ipc-ns.c [new file with mode: 0644]
src/lttng-context-migratable.c [new file with mode: 0644]
src/lttng-context-mnt-ns.c [new file with mode: 0644]
src/lttng-context-need-reschedule.c [new file with mode: 0644]
src/lttng-context-net-ns.c [new file with mode: 0644]
src/lttng-context-nice.c [new file with mode: 0644]
src/lttng-context-perf-counters.c [new file with mode: 0644]
src/lttng-context-pid-ns.c [new file with mode: 0644]
src/lttng-context-pid.c [new file with mode: 0644]
src/lttng-context-ppid.c [new file with mode: 0644]
src/lttng-context-preemptible.c [new file with mode: 0644]
src/lttng-context-prio.c [new file with mode: 0644]
src/lttng-context-procname.c [new file with mode: 0644]
src/lttng-context-sgid.c [new file with mode: 0644]
src/lttng-context-suid.c [new file with mode: 0644]
src/lttng-context-tid.c [new file with mode: 0644]
src/lttng-context-uid.c [new file with mode: 0644]
src/lttng-context-user-ns.c [new file with mode: 0644]
src/lttng-context-uts-ns.c [new file with mode: 0644]
src/lttng-context-vegid.c [new file with mode: 0644]
src/lttng-context-veuid.c [new file with mode: 0644]
src/lttng-context-vgid.c [new file with mode: 0644]
src/lttng-context-vpid.c [new file with mode: 0644]
src/lttng-context-vppid.c [new file with mode: 0644]
src/lttng-context-vsgid.c [new file with mode: 0644]
src/lttng-context-vsuid.c [new file with mode: 0644]
src/lttng-context-vtid.c [new file with mode: 0644]
src/lttng-context-vuid.c [new file with mode: 0644]
src/lttng-context.c [new file with mode: 0644]
src/lttng-events.c [new file with mode: 0644]
src/lttng-filter-interpreter.c [new file with mode: 0644]
src/lttng-filter-specialize.c [new file with mode: 0644]
src/lttng-filter-validator.c [new file with mode: 0644]
src/lttng-filter.c [new file with mode: 0644]
src/lttng-probes.c [new file with mode: 0644]
src/lttng-ring-buffer-client-discard.c [new file with mode: 0644]
src/lttng-ring-buffer-client-mmap-discard.c [new file with mode: 0644]
src/lttng-ring-buffer-client-mmap-overwrite.c [new file with mode: 0644]
src/lttng-ring-buffer-client-overwrite.c [new file with mode: 0644]
src/lttng-ring-buffer-client.h [new file with mode: 0644]
src/lttng-ring-buffer-metadata-client.c [new file with mode: 0644]
src/lttng-ring-buffer-metadata-client.h [new file with mode: 0644]
src/lttng-ring-buffer-metadata-mmap-client.c [new file with mode: 0644]
src/lttng-statedump-impl.c [new file with mode: 0644]
src/lttng-string-utils.c [new file with mode: 0644]
src/lttng-syscalls.c [new file with mode: 0644]
src/lttng-tp-mempool.c [new file with mode: 0644]
src/lttng-tracepoint.c [new file with mode: 0644]
src/lttng-tracker-id.c [new file with mode: 0644]
src/lttng-wrapper-impl.c [new file with mode: 0644]
src/probes/Kbuild [new file with mode: 0644]
src/probes/lttng-kprobes.c [new file with mode: 0644]
src/probes/lttng-kretprobes.c [new file with mode: 0644]
src/probes/lttng-probe-9p.c [new file with mode: 0644]
src/probes/lttng-probe-asoc.c [new file with mode: 0644]
src/probes/lttng-probe-block.c [new file with mode: 0644]
src/probes/lttng-probe-btrfs.c [new file with mode: 0644]
src/probes/lttng-probe-compaction.c [new file with mode: 0644]
src/probes/lttng-probe-ext3.c [new file with mode: 0644]
src/probes/lttng-probe-ext4.c [new file with mode: 0644]
src/probes/lttng-probe-gpio.c [new file with mode: 0644]
src/probes/lttng-probe-i2c.c [new file with mode: 0644]
src/probes/lttng-probe-irq.c [new file with mode: 0644]
src/probes/lttng-probe-jbd.c [new file with mode: 0644]
src/probes/lttng-probe-jbd2.c [new file with mode: 0644]
src/probes/lttng-probe-kmem.c [new file with mode: 0644]
src/probes/lttng-probe-kvm-x86-mmu.c [new file with mode: 0644]
src/probes/lttng-probe-kvm-x86.c [new file with mode: 0644]
src/probes/lttng-probe-kvm.c [new file with mode: 0644]
src/probes/lttng-probe-lock.c [new file with mode: 0644]
src/probes/lttng-probe-module.c [new file with mode: 0644]
src/probes/lttng-probe-napi.c [new file with mode: 0644]
src/probes/lttng-probe-net.c [new file with mode: 0644]
src/probes/lttng-probe-power.c [new file with mode: 0644]
src/probes/lttng-probe-preemptirq.c [new file with mode: 0644]
src/probes/lttng-probe-printk.c [new file with mode: 0644]
src/probes/lttng-probe-random.c [new file with mode: 0644]
src/probes/lttng-probe-rcu.c [new file with mode: 0644]
src/probes/lttng-probe-regmap.c [new file with mode: 0644]
src/probes/lttng-probe-regulator.c [new file with mode: 0644]
src/probes/lttng-probe-rpm.c [new file with mode: 0644]
src/probes/lttng-probe-sched.c [new file with mode: 0644]
src/probes/lttng-probe-scsi.c [new file with mode: 0644]
src/probes/lttng-probe-signal.c [new file with mode: 0644]
src/probes/lttng-probe-skb.c [new file with mode: 0644]
src/probes/lttng-probe-sock.c [new file with mode: 0644]
src/probes/lttng-probe-statedump.c [new file with mode: 0644]
src/probes/lttng-probe-sunrpc.c [new file with mode: 0644]
src/probes/lttng-probe-timer.c [new file with mode: 0644]
src/probes/lttng-probe-udp.c [new file with mode: 0644]
src/probes/lttng-probe-user.c [new file with mode: 0644]
src/probes/lttng-probe-v4l2.c [new file with mode: 0644]
src/probes/lttng-probe-vmscan.c [new file with mode: 0644]
src/probes/lttng-probe-workqueue.c [new file with mode: 0644]
src/probes/lttng-probe-writeback.c [new file with mode: 0644]
src/probes/lttng-probe-x86-exceptions.c [new file with mode: 0644]
src/probes/lttng-probe-x86-irq-vectors.c [new file with mode: 0644]
src/probes/lttng-uprobes.c [new file with mode: 0644]
src/probes/lttng.c [new file with mode: 0644]
src/wrapper/fdtable.c [new file with mode: 0644]
src/wrapper/irqdesc.c [new file with mode: 0644]
src/wrapper/kallsyms.c [new file with mode: 0644]
src/wrapper/page_alloc.c [new file with mode: 0644]
src/wrapper/random.c [new file with mode: 0644]
src/wrapper/splice.c [new file with mode: 0644]
src/wrapper/trace-clock.c [new file with mode: 0644]
wrapper/fdtable.c [deleted file]
wrapper/irqdesc.c [deleted file]
wrapper/kallsyms.c [deleted file]
wrapper/page_alloc.c [deleted file]
wrapper/random.c [deleted file]
wrapper/splice.c [deleted file]
wrapper/trace-clock.c [deleted file]

index 7ee341e1c561a5940b60655ed3e635d1fc9c59a9..c586efedc8c52538308fc9f81d3373d34709dc2b 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -27,114 +27,7 @@ ifneq ($(KERNELRELEASE),)
     endif
   endif
 
-  include $(TOP_LTTNG_MODULES_DIR)/Kbuild.common
-
-  ccflags-y += -I$(TOP_LTTNG_MODULES_DIR)/include
-
-  obj-$(CONFIG_LTTNG) += lttng-ring-buffer-client-discard.o
-  obj-$(CONFIG_LTTNG) += lttng-ring-buffer-client-overwrite.o
-  obj-$(CONFIG_LTTNG) += lttng-ring-buffer-metadata-client.o
-  obj-$(CONFIG_LTTNG) += lttng-ring-buffer-client-mmap-discard.o
-  obj-$(CONFIG_LTTNG) += lttng-ring-buffer-client-mmap-overwrite.o
-  obj-$(CONFIG_LTTNG) += lttng-ring-buffer-metadata-mmap-client.o
-  obj-$(CONFIG_LTTNG) += lttng-clock.o
-
-  obj-$(CONFIG_LTTNG) += lttng-tracer.o
-
-  obj-$(CONFIG_LTTNG) += lttng-wrapper.o
-
-  lttng-tracer-objs := lttng-events.o lttng-abi.o lttng-string-utils.o \
-                       lttng-probes.o lttng-context.o \
-                       lttng-context-pid.o lttng-context-procname.o \
-                       lttng-context-prio.o lttng-context-nice.o \
-                       lttng-context-vpid.o lttng-context-tid.o \
-                       lttng-context-vtid.o lttng-context-ppid.o \
-                       lttng-context-vppid.o lttng-context-cpu-id.o \
-                       lttng-context-uid.o \
-                       lttng-context-euid.o \
-                       lttng-context-suid.o \
-                       lttng-context-gid.o \
-                       lttng-context-egid.o \
-                       lttng-context-sgid.o \
-                       lttng-context-vuid.o \
-                       lttng-context-veuid.o \
-                       lttng-context-vsuid.o \
-                       lttng-context-vgid.o \
-                       lttng-context-vegid.o \
-                       lttng-context-vsgid.o \
-                       lttng-context-interruptible.o \
-                       lttng-context-need-reschedule.o \
-                       lttng-context-callstack.o lttng-calibrate.o \
-                       lttng-context-hostname.o \
-                       probes/lttng.o \
-                       lttng-tracker-id.o \
-                       lttng-filter.o lttng-filter-interpreter.o \
-                       lttng-filter-specialize.o \
-                       lttng-filter-validator.o \
-                       probes/lttng-probe-user.o \
-                       lttng-tp-mempool.o \
-
-  lttng-wrapper-objs := wrapper/page_alloc.o \
-                        wrapper/random.o \
-                        wrapper/trace-clock.o \
-                        wrapper/kallsyms.o \
-                        wrapper/irqdesc.o \
-                        wrapper/fdtable.o \
-                        lttng-wrapper-impl.o
-
-  ifneq ($(CONFIG_HAVE_SYSCALL_TRACEPOINTS),)
-    lttng-tracer-objs += lttng-syscalls.o
-  endif # CONFIG_HAVE_SYSCALL_TRACEPOINTS
-
-  ifneq ($(CONFIG_PERF_EVENTS),)
-    lttng-tracer-objs += lttng-context-perf-counters.o
-  endif # CONFIG_PERF_EVENTS
-
-  ifneq ($(CONFIG_PREEMPT_RT_FULL),)
-    lttng-tracer-objs += lttng-context-migratable.o
-    lttng-tracer-objs += lttng-context-preemptible.o
-  endif # CONFIG_PREEMPT_RT_FULL
-
-  ifneq ($(CONFIG_PREEMPT),)
-    lttng-tracer-objs += lttng-context-preemptible.o
-  endif
-
-  lttng-tracer-objs += $(shell \
-    if [ $(VERSION) -ge 4 \
-      -o \( $(VERSION) -eq 3 -a $(PATCHLEVEL) -ge 15 \) ] ; then \
-      echo "lttng-tracepoint.o" ; fi;)
-
-  lttng-tracer-objs += lttng-context-cgroup-ns.o
-
-  ifneq ($(CONFIG_IPC_NS),)
-    lttng-tracer-objs += lttng-context-ipc-ns.o
-  endif
-
-  ifneq ($(wildcard $(mnt_ns_dep)),)
-     lttng-tracer-objs += lttng-context-mnt-ns.o
-  endif
-
-  ifneq ($(CONFIG_NET_NS),)
-    lttng-tracer-objs += lttng-context-net-ns.o
-  endif
-
-  ifneq ($(CONFIG_PID_NS),)
-    lttng-tracer-objs += lttng-context-pid-ns.o
-  endif
-
-  ifneq ($(CONFIG_USER_NS),)
-    lttng-tracer-objs += lttng-context-user-ns.o
-  endif
-
-  ifneq ($(CONFIG_UTS_NS),)
-    lttng-tracer-objs += lttng-context-uts-ns.o
-  endif
-
-  obj-$(CONFIG_LTTNG) += lttng-statedump.o
-  lttng-statedump-objs := lttng-statedump-impl.o
-
-  obj-$(CONFIG_LTTNG) += probes/
-  obj-$(CONFIG_LTTNG) += lib/
+  obj-$(CONFIG_LTTNG) += src/
   obj-$(CONFIG_LTTNG) += tests/
 
 else # KERNELRELEASE
diff --git a/lib/Kbuild b/lib/Kbuild
deleted file mode 100644 (file)
index a2837b4..0000000
+++ /dev/null
@@ -1,21 +0,0 @@
-# SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
-
-TOP_LTTNG_MODULES_DIR := $(shell dirname $(lastword $(MAKEFILE_LIST)))/..
-
-include $(TOP_LTTNG_MODULES_DIR)/Kbuild.common
-
-ccflags-y += -I$(TOP_LTTNG_MODULES_DIR)/include
-
-obj-$(CONFIG_LTTNG) += lttng-lib-ring-buffer.o
-
-lttng-lib-ring-buffer-objs := \
-  ringbuffer/ring_buffer_backend.o \
-  ringbuffer/ring_buffer_frontend.o \
-  ringbuffer/ring_buffer_iterator.o \
-  ringbuffer/ring_buffer_vfs.o \
-  ringbuffer/ring_buffer_splice.o \
-  ringbuffer/ring_buffer_mmap.o \
-  prio_heap/lttng_prio_heap.o \
-  ../wrapper/splice.o
-
-# vim:syntax=make
diff --git a/lib/prio_heap/lttng_prio_heap.c b/lib/prio_heap/lttng_prio_heap.c
deleted file mode 100644 (file)
index 0b85ddb..0000000
+++ /dev/null
@@ -1,199 +0,0 @@
-/* SPDX-License-Identifier: MIT
- *
- * lttng_prio_heap.c
- *
- * Priority heap containing pointers. Based on CLRS, chapter 6.
- *
- * Copyright 2011 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#include <linux/slab.h>
-#include <lttng/prio_heap.h>
-#include <wrapper/vmalloc.h>
-
-#ifdef DEBUG_HEAP
-void lttng_check_heap(const struct lttng_ptr_heap *heap)
-{
-       size_t i;
-
-       if (!heap->len)
-               return;
-
-       for (i = 1; i < heap->len; i++)
-               WARN_ON_ONCE(!heap->gt(heap->ptrs[i], heap->ptrs[0]));
-}
-#endif
-
-static
-size_t parent(size_t i)
-{
-       return (i -1) >> 1;
-}
-
-static
-size_t left(size_t i)
-{
-       return (i << 1) + 1;
-}
-
-static
-size_t right(size_t i)
-{
-       return (i << 1) + 2;
-}
-
-/*
- * Copy of heap->ptrs pointer is invalid after heap_grow.
- */
-static
-int heap_grow(struct lttng_ptr_heap *heap, size_t new_len)
-{
-       void **new_ptrs;
-
-       if (heap->alloc_len >= new_len)
-               return 0;
-
-       heap->alloc_len = max_t(size_t, new_len, heap->alloc_len << 1);
-       new_ptrs = lttng_kvmalloc(heap->alloc_len * sizeof(void *), heap->gfpmask);
-       if (!new_ptrs)
-               return -ENOMEM;
-       if (heap->ptrs)
-               memcpy(new_ptrs, heap->ptrs, heap->len * sizeof(void *));
-       lttng_kvfree(heap->ptrs);
-       heap->ptrs = new_ptrs;
-       return 0;
-}
-
-static
-int heap_set_len(struct lttng_ptr_heap *heap, size_t new_len)
-{
-       int ret;
-
-       ret = heap_grow(heap, new_len);
-       if (ret)
-               return ret;
-       heap->len = new_len;
-       return 0;
-}
-
-int lttng_heap_init(struct lttng_ptr_heap *heap, size_t alloc_len,
-             gfp_t gfpmask, int gt(void *a, void *b))
-{
-       heap->ptrs = NULL;
-       heap->len = 0;
-       heap->alloc_len = 0;
-       heap->gt = gt;
-       heap->gfpmask = gfpmask;
-       /*
-        * Minimum size allocated is 1 entry to ensure memory allocation
-        * never fails within heap_replace_max.
-        */
-       return heap_grow(heap, max_t(size_t, 1, alloc_len));
-}
-
-void lttng_heap_free(struct lttng_ptr_heap *heap)
-{
-       lttng_kvfree(heap->ptrs);
-}
-
-static void heapify(struct lttng_ptr_heap *heap, size_t i)
-{
-       void **ptrs = heap->ptrs;
-       size_t l, r, largest;
-
-       for (;;) {
-               void *tmp;
-
-               l = left(i);
-               r = right(i);
-               if (l < heap->len && heap->gt(ptrs[l], ptrs[i]))
-                       largest = l;
-               else
-                       largest = i;
-               if (r < heap->len && heap->gt(ptrs[r], ptrs[largest]))
-                       largest = r;
-               if (largest == i)
-                       break;
-               tmp = ptrs[i];
-               ptrs[i] = ptrs[largest];
-               ptrs[largest] = tmp;
-               i = largest;
-       }
-       lttng_check_heap(heap);
-}
-
-void *lttng_heap_replace_max(struct lttng_ptr_heap *heap, void *p)
-{
-       void *res;
-
-       if (!heap->len) {
-               (void) heap_set_len(heap, 1);
-               heap->ptrs[0] = p;
-               lttng_check_heap(heap);
-               return NULL;
-       }
-
-       /* Replace the current max and heapify */
-       res = heap->ptrs[0];
-       heap->ptrs[0] = p;
-       heapify(heap, 0);
-       return res;
-}
-
-int lttng_heap_insert(struct lttng_ptr_heap *heap, void *p)
-{
-       void **ptrs;
-       size_t pos;
-       int ret;
-
-       ret = heap_set_len(heap, heap->len + 1);
-       if (ret)
-               return ret;
-       ptrs = heap->ptrs;
-       pos = heap->len - 1;
-       while (pos > 0 && heap->gt(p, ptrs[parent(pos)])) {
-               /* Move parent down until we find the right spot */
-               ptrs[pos] = ptrs[parent(pos)];
-               pos = parent(pos);
-       }
-       ptrs[pos] = p;
-       lttng_check_heap(heap);
-       return 0;
-}
-
-void *lttng_heap_remove(struct lttng_ptr_heap *heap)
-{
-       switch (heap->len) {
-       case 0:
-               return NULL;
-       case 1:
-               (void) heap_set_len(heap, 0);
-               return heap->ptrs[0];
-       }
-       /* Shrink, replace the current max by previous last entry and heapify */
-       heap_set_len(heap, heap->len - 1);
-       /* len changed. previous last entry is at heap->len */
-       return lttng_heap_replace_max(heap, heap->ptrs[heap->len]);
-}
-
-void *lttng_heap_cherrypick(struct lttng_ptr_heap *heap, void *p)
-{
-       size_t pos, len = heap->len;
-
-       for (pos = 0; pos < len; pos++)
-               if (heap->ptrs[pos] == p)
-                       goto found;
-       return NULL;
-found:
-       if (heap->len == 1) {
-               (void) heap_set_len(heap, 0);
-               lttng_check_heap(heap);
-               return heap->ptrs[0];
-       }
-       /* Replace p with previous last entry and heapify. */
-       heap_set_len(heap, heap->len - 1);
-       /* len changed. previous last entry is at heap->len */
-       heap->ptrs[pos] = heap->ptrs[heap->len];
-       heapify(heap, pos);
-       return p;
-}
diff --git a/lib/ringbuffer/ring_buffer_backend.c b/lib/ringbuffer/ring_buffer_backend.c
deleted file mode 100644 (file)
index d6547d7..0000000
+++ /dev/null
@@ -1,1124 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only OR LGPL-2.1-only)
- *
- * ring_buffer_backend.c
- *
- * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#include <linux/stddef.h>
-#include <linux/module.h>
-#include <linux/string.h>
-#include <linux/bitops.h>
-#include <linux/delay.h>
-#include <linux/errno.h>
-#include <linux/slab.h>
-#include <linux/cpu.h>
-#include <linux/mm.h>
-#include <linux/vmalloc.h>
-
-#include <wrapper/mm.h>
-#include <wrapper/vmalloc.h>   /* for wrapper_vmalloc_sync_mappings() */
-#include <ringbuffer/config.h>
-#include <ringbuffer/backend.h>
-#include <ringbuffer/frontend.h>
-
-/**
- * lib_ring_buffer_backend_allocate - allocate a channel buffer
- * @config: ring buffer instance configuration
- * @buf: the buffer struct
- * @size: total size of the buffer
- * @num_subbuf: number of subbuffers
- * @extra_reader_sb: need extra subbuffer for reader
- */
-static
-int lib_ring_buffer_backend_allocate(const struct lib_ring_buffer_config *config,
-                                    struct lib_ring_buffer_backend *bufb,
-                                    size_t size, size_t num_subbuf,
-                                    int extra_reader_sb)
-{
-       struct channel_backend *chanb = &bufb->chan->backend;
-       unsigned long j, num_pages, num_pages_per_subbuf, page_idx = 0;
-       unsigned long subbuf_size, mmap_offset = 0;
-       unsigned long num_subbuf_alloc;
-       struct page **pages;
-       unsigned long i;
-
-       num_pages = size >> PAGE_SHIFT;
-
-       /*
-        * Verify that there is enough free pages available on the system for
-        * the current allocation request.
-        * wrapper_check_enough_free_pages uses si_mem_available() if available
-        * and returns if there should be enough free pages based on the
-        * current estimate.
-        */
-       if (!wrapper_check_enough_free_pages(num_pages))
-               goto not_enough_pages;
-
-       /*
-        * Set the current user thread as the first target of the OOM killer.
-        * If the estimate received by si_mem_available() was off, and we do
-        * end up running out of memory because of this buffer allocation, we
-        * want to kill the offending app first.
-        */
-       wrapper_set_current_oom_origin();
-
-       num_pages_per_subbuf = num_pages >> get_count_order(num_subbuf);
-       subbuf_size = chanb->subbuf_size;
-       num_subbuf_alloc = num_subbuf;
-
-       if (extra_reader_sb) {
-               num_pages += num_pages_per_subbuf; /* Add pages for reader */
-               num_subbuf_alloc++;
-       }
-
-       pages = vmalloc_node(ALIGN(sizeof(*pages) * num_pages,
-                                  1 << INTERNODE_CACHE_SHIFT),
-                       cpu_to_node(max(bufb->cpu, 0)));
-       if (unlikely(!pages))
-               goto pages_error;
-
-       bufb->array = lttng_kvmalloc_node(ALIGN(sizeof(*bufb->array)
-                                        * num_subbuf_alloc,
-                                 1 << INTERNODE_CACHE_SHIFT),
-                       GFP_KERNEL | __GFP_NOWARN,
-                       cpu_to_node(max(bufb->cpu, 0)));
-       if (unlikely(!bufb->array))
-               goto array_error;
-
-       for (i = 0; i < num_pages; i++) {
-               pages[i] = alloc_pages_node(cpu_to_node(max(bufb->cpu, 0)),
-                               GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO, 0);
-               if (unlikely(!pages[i]))
-                       goto depopulate;
-       }
-       bufb->num_pages_per_subbuf = num_pages_per_subbuf;
-
-       /* Allocate backend pages array elements */
-       for (i = 0; i < num_subbuf_alloc; i++) {
-               bufb->array[i] =
-                       lttng_kvzalloc_node(ALIGN(
-                               sizeof(struct lib_ring_buffer_backend_pages) +
-                               sizeof(struct lib_ring_buffer_backend_page)
-                               * num_pages_per_subbuf,
-                               1 << INTERNODE_CACHE_SHIFT),
-                               GFP_KERNEL | __GFP_NOWARN,
-                               cpu_to_node(max(bufb->cpu, 0)));
-               if (!bufb->array[i])
-                       goto free_array;
-       }
-
-       /* Allocate write-side subbuffer table */
-       bufb->buf_wsb = lttng_kvzalloc_node(ALIGN(
-                               sizeof(struct lib_ring_buffer_backend_subbuffer)
-                               * num_subbuf,
-                               1 << INTERNODE_CACHE_SHIFT),
-                               GFP_KERNEL | __GFP_NOWARN,
-                               cpu_to_node(max(bufb->cpu, 0)));
-       if (unlikely(!bufb->buf_wsb))
-               goto free_array;
-
-       for (i = 0; i < num_subbuf; i++)
-               bufb->buf_wsb[i].id = subbuffer_id(config, 0, 1, i);
-
-       /* Assign read-side subbuffer table */
-       if (extra_reader_sb)
-               bufb->buf_rsb.id = subbuffer_id(config, 0, 1,
-                                               num_subbuf_alloc - 1);
-       else
-               bufb->buf_rsb.id = subbuffer_id(config, 0, 1, 0);
-
-       /* Allocate subbuffer packet counter table */
-       bufb->buf_cnt = lttng_kvzalloc_node(ALIGN(
-                               sizeof(struct lib_ring_buffer_backend_counts)
-                               * num_subbuf,
-                               1 << INTERNODE_CACHE_SHIFT),
-                       GFP_KERNEL | __GFP_NOWARN,
-                       cpu_to_node(max(bufb->cpu, 0)));
-       if (unlikely(!bufb->buf_cnt))
-               goto free_wsb;
-
-       /* Assign pages to page index */
-       for (i = 0; i < num_subbuf_alloc; i++) {
-               for (j = 0; j < num_pages_per_subbuf; j++) {
-                       CHAN_WARN_ON(chanb, page_idx > num_pages);
-                       bufb->array[i]->p[j].virt = page_address(pages[page_idx]);
-                       bufb->array[i]->p[j].pfn = page_to_pfn(pages[page_idx]);
-                       page_idx++;
-               }
-               if (config->output == RING_BUFFER_MMAP) {
-                       bufb->array[i]->mmap_offset = mmap_offset;
-                       mmap_offset += subbuf_size;
-               }
-       }
-
-       /*
-        * If kmalloc ever uses vmalloc underneath, make sure the buffer pages
-        * will not fault.
-        */
-       wrapper_vmalloc_sync_mappings();
-       wrapper_clear_current_oom_origin();
-       vfree(pages);
-       return 0;
-
-free_wsb:
-       lttng_kvfree(bufb->buf_wsb);
-free_array:
-       for (i = 0; (i < num_subbuf_alloc && bufb->array[i]); i++)
-               lttng_kvfree(bufb->array[i]);
-depopulate:
-       /* Free all allocated pages */
-       for (i = 0; (i < num_pages && pages[i]); i++)
-               __free_page(pages[i]);
-       lttng_kvfree(bufb->array);
-array_error:
-       vfree(pages);
-pages_error:
-       wrapper_clear_current_oom_origin();
-not_enough_pages:
-       return -ENOMEM;
-}
-
-int lib_ring_buffer_backend_create(struct lib_ring_buffer_backend *bufb,
-                                  struct channel_backend *chanb, int cpu)
-{
-       const struct lib_ring_buffer_config *config = &chanb->config;
-
-       bufb->chan = container_of(chanb, struct channel, backend);
-       bufb->cpu = cpu;
-
-       return lib_ring_buffer_backend_allocate(config, bufb, chanb->buf_size,
-                                               chanb->num_subbuf,
-                                               chanb->extra_reader_sb);
-}
-
-void lib_ring_buffer_backend_free(struct lib_ring_buffer_backend *bufb)
-{
-       struct channel_backend *chanb = &bufb->chan->backend;
-       unsigned long i, j, num_subbuf_alloc;
-
-       num_subbuf_alloc = chanb->num_subbuf;
-       if (chanb->extra_reader_sb)
-               num_subbuf_alloc++;
-
-       lttng_kvfree(bufb->buf_wsb);
-       lttng_kvfree(bufb->buf_cnt);
-       for (i = 0; i < num_subbuf_alloc; i++) {
-               for (j = 0; j < bufb->num_pages_per_subbuf; j++)
-                       __free_page(pfn_to_page(bufb->array[i]->p[j].pfn));
-               lttng_kvfree(bufb->array[i]);
-       }
-       lttng_kvfree(bufb->array);
-       bufb->allocated = 0;
-}
-
-void lib_ring_buffer_backend_reset(struct lib_ring_buffer_backend *bufb)
-{
-       struct channel_backend *chanb = &bufb->chan->backend;
-       const struct lib_ring_buffer_config *config = &chanb->config;
-       unsigned long num_subbuf_alloc;
-       unsigned int i;
-
-       num_subbuf_alloc = chanb->num_subbuf;
-       if (chanb->extra_reader_sb)
-               num_subbuf_alloc++;
-
-       for (i = 0; i < chanb->num_subbuf; i++)
-               bufb->buf_wsb[i].id = subbuffer_id(config, 0, 1, i);
-       if (chanb->extra_reader_sb)
-               bufb->buf_rsb.id = subbuffer_id(config, 0, 1,
-                                               num_subbuf_alloc - 1);
-       else
-               bufb->buf_rsb.id = subbuffer_id(config, 0, 1, 0);
-
-       for (i = 0; i < num_subbuf_alloc; i++) {
-               /* Don't reset mmap_offset */
-               v_set(config, &bufb->array[i]->records_commit, 0);
-               v_set(config, &bufb->array[i]->records_unread, 0);
-               bufb->array[i]->data_size = 0;
-               /* Don't reset backend page and virt addresses */
-       }
-       /* Don't reset num_pages_per_subbuf, cpu, allocated */
-       v_set(config, &bufb->records_read, 0);
-}
-
-/*
- * The frontend is responsible for also calling ring_buffer_backend_reset for
- * each buffer when calling channel_backend_reset.
- */
-void channel_backend_reset(struct channel_backend *chanb)
-{
-       struct channel *chan = container_of(chanb, struct channel, backend);
-       const struct lib_ring_buffer_config *config = &chanb->config;
-
-       /*
-        * Don't reset buf_size, subbuf_size, subbuf_size_order,
-        * num_subbuf_order, buf_size_order, extra_reader_sb, num_subbuf,
-        * priv, notifiers, config, cpumask and name.
-        */
-       chanb->start_tsc = config->cb.ring_buffer_clock_read(chan);
-}
-
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
-
-/*
- * No need to implement a "dead" callback to do a buffer switch here,
- * because it will happen when tracing is stopped, or will be done by
- * switch timer CPU DEAD callback.
- * We don't free buffers when CPU go away, because it would make trace
- * data vanish, which is unwanted.
- */
-int lttng_cpuhp_rb_backend_prepare(unsigned int cpu,
-               struct lttng_cpuhp_node *node)
-{
-       struct channel_backend *chanb = container_of(node,
-                       struct channel_backend, cpuhp_prepare);
-       const struct lib_ring_buffer_config *config = &chanb->config;
-       struct lib_ring_buffer *buf;
-       int ret;
-
-       CHAN_WARN_ON(chanb, config->alloc == RING_BUFFER_ALLOC_GLOBAL);
-
-       buf = per_cpu_ptr(chanb->buf, cpu);
-       ret = lib_ring_buffer_create(buf, chanb, cpu);
-       if (ret) {
-               printk(KERN_ERR
-                 "ring_buffer_cpu_hp_callback: cpu %d "
-                 "buffer creation failed\n", cpu);
-               return ret;
-       }
-       return 0;
-}
-EXPORT_SYMBOL_GPL(lttng_cpuhp_rb_backend_prepare);
-
-#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
-
-#ifdef CONFIG_HOTPLUG_CPU
-
-/**
- *     lib_ring_buffer_cpu_hp_callback - CPU hotplug callback
- *     @nb: notifier block
- *     @action: hotplug action to take
- *     @hcpu: CPU number
- *
- *     Returns the success/failure of the operation. (%NOTIFY_OK, %NOTIFY_BAD)
- */
-static
-int lib_ring_buffer_cpu_hp_callback(struct notifier_block *nb,
-                                             unsigned long action,
-                                             void *hcpu)
-{
-       unsigned int cpu = (unsigned long)hcpu;
-       struct channel_backend *chanb = container_of(nb, struct channel_backend,
-                                                    cpu_hp_notifier);
-       const struct lib_ring_buffer_config *config = &chanb->config;
-       struct lib_ring_buffer *buf;
-       int ret;
-
-       CHAN_WARN_ON(chanb, config->alloc == RING_BUFFER_ALLOC_GLOBAL);
-
-       switch (action) {
-       case CPU_UP_PREPARE:
-       case CPU_UP_PREPARE_FROZEN:
-               buf = per_cpu_ptr(chanb->buf, cpu);
-               ret = lib_ring_buffer_create(buf, chanb, cpu);
-               if (ret) {
-                       printk(KERN_ERR
-                         "ring_buffer_cpu_hp_callback: cpu %d "
-                         "buffer creation failed\n", cpu);
-                       return NOTIFY_BAD;
-               }
-               break;
-       case CPU_DEAD:
-       case CPU_DEAD_FROZEN:
-               /* No need to do a buffer switch here, because it will happen
-                * when tracing is stopped, or will be done by switch timer CPU
-                * DEAD callback. */
-               break;
-       }
-       return NOTIFY_OK;
-}
-
-#endif
-
-#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
-
-/**
- * channel_backend_init - initialize a channel backend
- * @chanb: channel backend
- * @name: channel name
- * @config: client ring buffer configuration
- * @priv: client private data
- * @parent: dentry of parent directory, %NULL for root directory
- * @subbuf_size: size of sub-buffers (> PAGE_SIZE, power of 2)
- * @num_subbuf: number of sub-buffers (power of 2)
- *
- * Returns channel pointer if successful, %NULL otherwise.
- *
- * Creates per-cpu channel buffers using the sizes and attributes
- * specified.  The created channel buffer files will be named
- * name_0...name_N-1.  File permissions will be %S_IRUSR.
- *
- * Called with CPU hotplug disabled.
- */
-int channel_backend_init(struct channel_backend *chanb,
-                        const char *name,
-                        const struct lib_ring_buffer_config *config,
-                        void *priv, size_t subbuf_size, size_t num_subbuf)
-{
-       struct channel *chan = container_of(chanb, struct channel, backend);
-       unsigned int i;
-       int ret;
-
-       if (!name)
-               return -EPERM;
-
-       /* Check that the subbuffer size is larger than a page. */
-       if (subbuf_size < PAGE_SIZE)
-               return -EINVAL;
-
-       /*
-        * Make sure the number of subbuffers and subbuffer size are
-        * power of 2 and nonzero.
-        */
-       if (!subbuf_size || (subbuf_size & (subbuf_size - 1)))
-               return -EINVAL;
-       if (!num_subbuf || (num_subbuf & (num_subbuf - 1)))
-               return -EINVAL;
-       /*
-        * Overwrite mode buffers require at least 2 subbuffers per
-        * buffer.
-        */
-       if (config->mode == RING_BUFFER_OVERWRITE && num_subbuf < 2)
-               return -EINVAL;
-
-       ret = subbuffer_id_check_index(config, num_subbuf);
-       if (ret)
-               return ret;
-
-       chanb->priv = priv;
-       chanb->buf_size = num_subbuf * subbuf_size;
-       chanb->subbuf_size = subbuf_size;
-       chanb->buf_size_order = get_count_order(chanb->buf_size);
-       chanb->subbuf_size_order = get_count_order(subbuf_size);
-       chanb->num_subbuf_order = get_count_order(num_subbuf);
-       chanb->extra_reader_sb =
-                       (config->mode == RING_BUFFER_OVERWRITE) ? 1 : 0;
-       chanb->num_subbuf = num_subbuf;
-       strlcpy(chanb->name, name, NAME_MAX);
-       memcpy(&chanb->config, config, sizeof(chanb->config));
-
-       if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
-               if (!zalloc_cpumask_var(&chanb->cpumask, GFP_KERNEL))
-                       return -ENOMEM;
-       }
-
-       if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
-               /* Allocating the buffer per-cpu structures */
-               chanb->buf = alloc_percpu(struct lib_ring_buffer);
-               if (!chanb->buf)
-                       goto free_cpumask;
-
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
-               chanb->cpuhp_prepare.component = LTTNG_RING_BUFFER_BACKEND;
-               ret = cpuhp_state_add_instance(lttng_rb_hp_prepare,
-                       &chanb->cpuhp_prepare.node);
-               if (ret)
-                       goto free_bufs;
-#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
-
-               {
-                       /*
-                        * In case of non-hotplug cpu, if the ring-buffer is allocated
-                        * in early initcall, it will not be notified of secondary cpus.
-                        * In that off case, we need to allocate for all possible cpus.
-                        */
-#ifdef CONFIG_HOTPLUG_CPU
-                       /*
-                        * buf->backend.allocated test takes care of concurrent CPU
-                        * hotplug.
-                        * Priority higher than frontend, so we create the ring buffer
-                        * before we start the timer.
-                        */
-                       chanb->cpu_hp_notifier.notifier_call =
-                                       lib_ring_buffer_cpu_hp_callback;
-                       chanb->cpu_hp_notifier.priority = 5;
-                       register_hotcpu_notifier(&chanb->cpu_hp_notifier);
-
-                       get_online_cpus();
-                       for_each_online_cpu(i) {
-                               ret = lib_ring_buffer_create(per_cpu_ptr(chanb->buf, i),
-                                                        chanb, i);
-                               if (ret)
-                                       goto free_bufs; /* cpu hotplug locked */
-                       }
-                       put_online_cpus();
-#else
-                       for_each_possible_cpu(i) {
-                               ret = lib_ring_buffer_create(per_cpu_ptr(chanb->buf, i),
-                                                        chanb, i);
-                               if (ret)
-                                       goto free_bufs;
-                       }
-#endif
-               }
-#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
-       } else {
-               chanb->buf = kzalloc(sizeof(struct lib_ring_buffer), GFP_KERNEL);
-               if (!chanb->buf)
-                       goto free_cpumask;
-               ret = lib_ring_buffer_create(chanb->buf, chanb, -1);
-               if (ret)
-                       goto free_bufs;
-       }
-       chanb->start_tsc = config->cb.ring_buffer_clock_read(chan);
-
-       return 0;
-
-free_bufs:
-       if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
-               /*
-                * Teardown of lttng_rb_hp_prepare instance
-                * on "add" error is handled within cpu hotplug,
-                * no teardown to do from the caller.
-                */
-#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
-#ifdef CONFIG_HOTPLUG_CPU
-               put_online_cpus();
-               unregister_hotcpu_notifier(&chanb->cpu_hp_notifier);
-#endif
-#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
-               for_each_possible_cpu(i) {
-                       struct lib_ring_buffer *buf =
-                               per_cpu_ptr(chanb->buf, i);
-
-                       if (!buf->backend.allocated)
-                               continue;
-                       lib_ring_buffer_free(buf);
-               }
-               free_percpu(chanb->buf);
-       } else
-               kfree(chanb->buf);
-free_cpumask:
-       if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
-               free_cpumask_var(chanb->cpumask);
-       return -ENOMEM;
-}
-
-/**
- * channel_backend_unregister_notifiers - unregister notifiers
- * @chan: the channel
- *
- * Holds CPU hotplug.
- */
-void channel_backend_unregister_notifiers(struct channel_backend *chanb)
-{
-       const struct lib_ring_buffer_config *config = &chanb->config;
-
-       if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
-               int ret;
-
-               ret = cpuhp_state_remove_instance(lttng_rb_hp_prepare,
-                               &chanb->cpuhp_prepare.node);
-               WARN_ON(ret);
-#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
-               unregister_hotcpu_notifier(&chanb->cpu_hp_notifier);
-#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
-       }
-}
-
-/**
- * channel_backend_free - destroy the channel
- * @chan: the channel
- *
- * Destroy all channel buffers and frees the channel.
- */
-void channel_backend_free(struct channel_backend *chanb)
-{
-       const struct lib_ring_buffer_config *config = &chanb->config;
-       unsigned int i;
-
-       if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
-               for_each_possible_cpu(i) {
-                       struct lib_ring_buffer *buf = per_cpu_ptr(chanb->buf, i);
-
-                       if (!buf->backend.allocated)
-                               continue;
-                       lib_ring_buffer_free(buf);
-               }
-               free_cpumask_var(chanb->cpumask);
-               free_percpu(chanb->buf);
-       } else {
-               struct lib_ring_buffer *buf = chanb->buf;
-
-               CHAN_WARN_ON(chanb, !buf->backend.allocated);
-               lib_ring_buffer_free(buf);
-               kfree(buf);
-       }
-}
-
-/**
- * lib_ring_buffer_write - write data to a ring_buffer buffer.
- * @bufb : buffer backend
- * @offset : offset within the buffer
- * @src : source address
- * @len : length to write
- * @pagecpy : page size copied so far
- */
-void _lib_ring_buffer_write(struct lib_ring_buffer_backend *bufb, size_t offset,
-                           const void *src, size_t len, size_t pagecpy)
-{
-       struct channel_backend *chanb = &bufb->chan->backend;
-       const struct lib_ring_buffer_config *config = &chanb->config;
-       size_t sbidx, index;
-       struct lib_ring_buffer_backend_pages *rpages;
-       unsigned long sb_bindex, id;
-
-       do {
-               len -= pagecpy;
-               src += pagecpy;
-               offset += pagecpy;
-               sbidx = offset >> chanb->subbuf_size_order;
-               index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
-
-               /*
-                * Underlying layer should never ask for writes across
-                * subbuffers.
-                */
-               CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
-
-               pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK));
-               id = bufb->buf_wsb[sbidx].id;
-               sb_bindex = subbuffer_id_get_index(config, id);
-               rpages = bufb->array[sb_bindex];
-               CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
-                            && subbuffer_id_is_noref(config, id));
-               lib_ring_buffer_do_copy(config,
-                                       rpages->p[index].virt
-                                               + (offset & ~PAGE_MASK),
-                                       src, pagecpy);
-       } while (unlikely(len != pagecpy));
-}
-EXPORT_SYMBOL_GPL(_lib_ring_buffer_write);
-
-
-/**
- * lib_ring_buffer_memset - write len bytes of c to a ring_buffer buffer.
- * @bufb : buffer backend
- * @offset : offset within the buffer
- * @c : the byte to write
- * @len : length to write
- * @pagecpy : page size copied so far
- */
-void _lib_ring_buffer_memset(struct lib_ring_buffer_backend *bufb,
-                            size_t offset,
-                            int c, size_t len, size_t pagecpy)
-{
-       struct channel_backend *chanb = &bufb->chan->backend;
-       const struct lib_ring_buffer_config *config = &chanb->config;
-       size_t sbidx, index;
-       struct lib_ring_buffer_backend_pages *rpages;
-       unsigned long sb_bindex, id;
-
-       do {
-               len -= pagecpy;
-               offset += pagecpy;
-               sbidx = offset >> chanb->subbuf_size_order;
-               index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
-
-               /*
-                * Underlying layer should never ask for writes across
-                * subbuffers.
-                */
-               CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
-
-               pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK));
-               id = bufb->buf_wsb[sbidx].id;
-               sb_bindex = subbuffer_id_get_index(config, id);
-               rpages = bufb->array[sb_bindex];
-               CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
-                            && subbuffer_id_is_noref(config, id));
-               lib_ring_buffer_do_memset(rpages->p[index].virt
-                                         + (offset & ~PAGE_MASK),
-                                         c, pagecpy);
-       } while (unlikely(len != pagecpy));
-}
-EXPORT_SYMBOL_GPL(_lib_ring_buffer_memset);
-
-/**
- * lib_ring_buffer_strcpy - write string data to a ring_buffer buffer.
- * @bufb : buffer backend
- * @offset : offset within the buffer
- * @src : source address
- * @len : length to write
- * @pagecpy : page size copied so far
- * @pad : character to use for padding
- */
-void _lib_ring_buffer_strcpy(struct lib_ring_buffer_backend *bufb,
-                       size_t offset, const char *src, size_t len,
-                       size_t pagecpy, int pad)
-{
-       struct channel_backend *chanb = &bufb->chan->backend;
-       const struct lib_ring_buffer_config *config = &chanb->config;
-       size_t sbidx, index;
-       struct lib_ring_buffer_backend_pages *rpages;
-       unsigned long sb_bindex, id;
-       int src_terminated = 0;
-
-       CHAN_WARN_ON(chanb, !len);
-       offset += pagecpy;
-       do {
-               len -= pagecpy;
-               if (!src_terminated)
-                       src += pagecpy;
-               sbidx = offset >> chanb->subbuf_size_order;
-               index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
-
-               /*
-                * Underlying layer should never ask for writes across
-                * subbuffers.
-                */
-               CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
-
-               pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK));
-               id = bufb->buf_wsb[sbidx].id;
-               sb_bindex = subbuffer_id_get_index(config, id);
-               rpages = bufb->array[sb_bindex];
-               CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
-                            && subbuffer_id_is_noref(config, id));
-
-               if (likely(!src_terminated)) {
-                       size_t count, to_copy;
-
-                       to_copy = pagecpy;
-                       if (pagecpy == len)
-                               to_copy--;      /* Final '\0' */
-                       count = lib_ring_buffer_do_strcpy(config,
-                                       rpages->p[index].virt
-                                               + (offset & ~PAGE_MASK),
-                                       src, to_copy);
-                       offset += count;
-                       /* Padding */
-                       if (unlikely(count < to_copy)) {
-                               size_t pad_len = to_copy - count;
-
-                               /* Next pages will have padding */
-                               src_terminated = 1;
-                               lib_ring_buffer_do_memset(rpages->p[index].virt
-                                               + (offset & ~PAGE_MASK),
-                                       pad, pad_len);
-                               offset += pad_len;
-                       }
-               } else {
-                       size_t pad_len;
-
-                       pad_len = pagecpy;
-                       if (pagecpy == len)
-                               pad_len--;      /* Final '\0' */
-                       lib_ring_buffer_do_memset(rpages->p[index].virt
-                                       + (offset & ~PAGE_MASK),
-                               pad, pad_len);
-                       offset += pad_len;
-               }
-       } while (unlikely(len != pagecpy));
-       /* Ending '\0' */
-       lib_ring_buffer_do_memset(rpages->p[index].virt + (offset & ~PAGE_MASK),
-                       '\0', 1);
-}
-EXPORT_SYMBOL_GPL(_lib_ring_buffer_strcpy);
-
-/**
- * lib_ring_buffer_copy_from_user_inatomic - write user data to a ring_buffer buffer.
- * @bufb : buffer backend
- * @offset : offset within the buffer
- * @src : source address
- * @len : length to write
- * @pagecpy : page size copied so far
- *
- * This function deals with userspace pointers, it should never be called
- * directly without having the src pointer checked with access_ok()
- * previously.
- */
-void _lib_ring_buffer_copy_from_user_inatomic(struct lib_ring_buffer_backend *bufb,
-                                     size_t offset,
-                                     const void __user *src, size_t len,
-                                     size_t pagecpy)
-{
-       struct channel_backend *chanb = &bufb->chan->backend;
-       const struct lib_ring_buffer_config *config = &chanb->config;
-       size_t sbidx, index;
-       struct lib_ring_buffer_backend_pages *rpages;
-       unsigned long sb_bindex, id;
-       int ret;
-
-       do {
-               len -= pagecpy;
-               src += pagecpy;
-               offset += pagecpy;
-               sbidx = offset >> chanb->subbuf_size_order;
-               index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
-
-               /*
-                * Underlying layer should never ask for writes across
-                * subbuffers.
-                */
-               CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
-
-               pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK));
-               id = bufb->buf_wsb[sbidx].id;
-               sb_bindex = subbuffer_id_get_index(config, id);
-               rpages = bufb->array[sb_bindex];
-               CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
-                               && subbuffer_id_is_noref(config, id));
-               ret = lib_ring_buffer_do_copy_from_user_inatomic(rpages->p[index].virt
-                                                       + (offset & ~PAGE_MASK),
-                                                       src, pagecpy) != 0;
-               if (ret > 0) {
-                       /* Copy failed. */
-                       _lib_ring_buffer_memset(bufb, offset, 0, len, 0);
-                       break; /* stop copy */
-               }
-       } while (unlikely(len != pagecpy));
-}
-EXPORT_SYMBOL_GPL(_lib_ring_buffer_copy_from_user_inatomic);
-
-/**
- * lib_ring_buffer_strcpy_from_user_inatomic - write userspace string data to a ring_buffer buffer.
- * @bufb : buffer backend
- * @offset : offset within the buffer
- * @src : source address
- * @len : length to write
- * @pagecpy : page size copied so far
- * @pad : character to use for padding
- *
- * This function deals with userspace pointers, it should never be called
- * directly without having the src pointer checked with access_ok()
- * previously.
- */
-void _lib_ring_buffer_strcpy_from_user_inatomic(struct lib_ring_buffer_backend *bufb,
-               size_t offset, const char __user *src, size_t len,
-               size_t pagecpy, int pad)
-{
-       struct channel_backend *chanb = &bufb->chan->backend;
-       const struct lib_ring_buffer_config *config = &chanb->config;
-       size_t sbidx, index;
-       struct lib_ring_buffer_backend_pages *rpages;
-       unsigned long sb_bindex, id;
-       int src_terminated = 0;
-
-       offset += pagecpy;
-       do {
-               len -= pagecpy;
-               if (!src_terminated)
-                       src += pagecpy;
-               sbidx = offset >> chanb->subbuf_size_order;
-               index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
-
-               /*
-                * Underlying layer should never ask for writes across
-                * subbuffers.
-                */
-               CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
-
-               pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK));
-               id = bufb->buf_wsb[sbidx].id;
-               sb_bindex = subbuffer_id_get_index(config, id);
-               rpages = bufb->array[sb_bindex];
-               CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
-                               && subbuffer_id_is_noref(config, id));
-
-               if (likely(!src_terminated)) {
-                       size_t count, to_copy;
-
-                       to_copy = pagecpy;
-                       if (pagecpy == len)
-                               to_copy--;      /* Final '\0' */
-                       count = lib_ring_buffer_do_strcpy_from_user_inatomic(config,
-                                       rpages->p[index].virt
-                                               + (offset & ~PAGE_MASK),
-                                       src, to_copy);
-                       offset += count;
-                       /* Padding */
-                       if (unlikely(count < to_copy)) {
-                               size_t pad_len = to_copy - count;
-
-                               /* Next pages will have padding */
-                               src_terminated = 1;
-                               lib_ring_buffer_do_memset(rpages->p[index].virt
-                                               + (offset & ~PAGE_MASK),
-                                       pad, pad_len);
-                               offset += pad_len;
-                       }
-               } else {
-                       size_t pad_len;
-
-                       pad_len = pagecpy;
-                       if (pagecpy == len)
-                               pad_len--;      /* Final '\0' */
-                       lib_ring_buffer_do_memset(rpages->p[index].virt
-                                       + (offset & ~PAGE_MASK),
-                               pad, pad_len);
-                       offset += pad_len;
-               }
-       } while (unlikely(len != pagecpy));
-       /* Ending '\0' */
-       lib_ring_buffer_do_memset(rpages->p[index].virt + (offset & ~PAGE_MASK),
-                       '\0', 1);
-}
-EXPORT_SYMBOL_GPL(_lib_ring_buffer_strcpy_from_user_inatomic);
-
-/**
- * lib_ring_buffer_read - read data from ring_buffer_buffer.
- * @bufb : buffer backend
- * @offset : offset within the buffer
- * @dest : destination address
- * @len : length to copy to destination
- *
- * Should be protected by get_subbuf/put_subbuf.
- * Returns the length copied.
- */
-size_t lib_ring_buffer_read(struct lib_ring_buffer_backend *bufb, size_t offset,
-                           void *dest, size_t len)
-{
-       struct channel_backend *chanb = &bufb->chan->backend;
-       const struct lib_ring_buffer_config *config = &chanb->config;
-       size_t index, pagecpy, orig_len;
-       struct lib_ring_buffer_backend_pages *rpages;
-       unsigned long sb_bindex, id;
-
-       orig_len = len;
-       offset &= chanb->buf_size - 1;
-       index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
-       if (unlikely(!len))
-               return 0;
-       for (;;) {
-               pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK));
-               id = bufb->buf_rsb.id;
-               sb_bindex = subbuffer_id_get_index(config, id);
-               rpages = bufb->array[sb_bindex];
-               CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
-                            && subbuffer_id_is_noref(config, id));
-               memcpy(dest, rpages->p[index].virt + (offset & ~PAGE_MASK),
-                      pagecpy);
-               len -= pagecpy;
-               if (likely(!len))
-                       break;
-               dest += pagecpy;
-               offset += pagecpy;
-               index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
-               /*
-                * Underlying layer should never ask for reads across
-                * subbuffers.
-                */
-               CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
-       }
-       return orig_len;
-}
-EXPORT_SYMBOL_GPL(lib_ring_buffer_read);
-
-/**
- * __lib_ring_buffer_copy_to_user - read data from ring_buffer to userspace
- * @bufb : buffer backend
- * @offset : offset within the buffer
- * @dest : destination userspace address
- * @len : length to copy to destination
- *
- * Should be protected by get_subbuf/put_subbuf.
- * access_ok() must have been performed on dest addresses prior to call this
- * function.
- * Returns -EFAULT on error, 0 if ok.
- */
-int __lib_ring_buffer_copy_to_user(struct lib_ring_buffer_backend *bufb,
-                                  size_t offset, void __user *dest, size_t len)
-{
-       struct channel_backend *chanb = &bufb->chan->backend;
-       const struct lib_ring_buffer_config *config = &chanb->config;
-       size_t index;
-       ssize_t pagecpy;
-       struct lib_ring_buffer_backend_pages *rpages;
-       unsigned long sb_bindex, id;
-
-       offset &= chanb->buf_size - 1;
-       index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
-       if (unlikely(!len))
-               return 0;
-       for (;;) {
-               pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK));
-               id = bufb->buf_rsb.id;
-               sb_bindex = subbuffer_id_get_index(config, id);
-               rpages = bufb->array[sb_bindex];
-               CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
-                            && subbuffer_id_is_noref(config, id));
-               if (__copy_to_user(dest,
-                              rpages->p[index].virt + (offset & ~PAGE_MASK),
-                              pagecpy))
-                       return -EFAULT;
-               len -= pagecpy;
-               if (likely(!len))
-                       break;
-               dest += pagecpy;
-               offset += pagecpy;
-               index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
-               /*
-                * Underlying layer should never ask for reads across
-                * subbuffers.
-                */
-               CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
-       }
-       return 0;
-}
-EXPORT_SYMBOL_GPL(__lib_ring_buffer_copy_to_user);
-
-/**
- * lib_ring_buffer_read_cstr - read a C-style string from ring_buffer.
- * @bufb : buffer backend
- * @offset : offset within the buffer
- * @dest : destination address
- * @len : destination's length
- *
- * Return string's length, or -EINVAL on error.
- * Should be protected by get_subbuf/put_subbuf.
- * Destination length should be at least 1 to hold '\0'.
- */
-int lib_ring_buffer_read_cstr(struct lib_ring_buffer_backend *bufb, size_t offset,
-                             void *dest, size_t len)
-{
-       struct channel_backend *chanb = &bufb->chan->backend;
-       const struct lib_ring_buffer_config *config = &chanb->config;
-       size_t index;
-       ssize_t pagecpy, pagelen, strpagelen, orig_offset;
-       char *str;
-       struct lib_ring_buffer_backend_pages *rpages;
-       unsigned long sb_bindex, id;
-
-       offset &= chanb->buf_size - 1;
-       index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
-       orig_offset = offset;
-       if (unlikely(!len))
-               return -EINVAL;
-       for (;;) {
-               id = bufb->buf_rsb.id;
-               sb_bindex = subbuffer_id_get_index(config, id);
-               rpages = bufb->array[sb_bindex];
-               CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
-                            && subbuffer_id_is_noref(config, id));
-               str = (char *)rpages->p[index].virt + (offset & ~PAGE_MASK);
-               pagelen = PAGE_SIZE - (offset & ~PAGE_MASK);
-               strpagelen = strnlen(str, pagelen);
-               if (len) {
-                       pagecpy = min_t(size_t, len, strpagelen);
-                       if (dest) {
-                               memcpy(dest, str, pagecpy);
-                               dest += pagecpy;
-                       }
-                       len -= pagecpy;
-               }
-               offset += strpagelen;
-               index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
-               if (strpagelen < pagelen)
-                       break;
-               /*
-                * Underlying layer should never ask for reads across
-                * subbuffers.
-                */
-               CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
-       }
-       if (dest && len)
-               ((char *)dest)[0] = 0;
-       return offset - orig_offset;
-}
-EXPORT_SYMBOL_GPL(lib_ring_buffer_read_cstr);
-
-/**
- * lib_ring_buffer_read_get_pfn - Get a page frame number to read from
- * @bufb : buffer backend
- * @offset : offset within the buffer
- * @virt : pointer to page address (output)
- *
- * Should be protected by get_subbuf/put_subbuf.
- * Returns the pointer to the page frame number unsigned long.
- */
-unsigned long *lib_ring_buffer_read_get_pfn(struct lib_ring_buffer_backend *bufb,
-                                           size_t offset, void ***virt)
-{
-       size_t index;
-       struct lib_ring_buffer_backend_pages *rpages;
-       struct channel_backend *chanb = &bufb->chan->backend;
-       const struct lib_ring_buffer_config *config = &chanb->config;
-       unsigned long sb_bindex, id;
-
-       offset &= chanb->buf_size - 1;
-       index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
-       id = bufb->buf_rsb.id;
-       sb_bindex = subbuffer_id_get_index(config, id);
-       rpages = bufb->array[sb_bindex];
-       CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
-                    && subbuffer_id_is_noref(config, id));
-       *virt = &rpages->p[index].virt;
-       return &rpages->p[index].pfn;
-}
-EXPORT_SYMBOL_GPL(lib_ring_buffer_read_get_pfn);
-
-/**
- * lib_ring_buffer_read_offset_address - get address of a buffer location
- * @bufb : buffer backend
- * @offset : offset within the buffer.
- *
- * Return the address where a given offset is located (for read).
- * Should be used to get the current subbuffer header pointer. Given we know
- * it's never on a page boundary, it's safe to read/write directly
- * from/to this address, as long as the read/write is never bigger than a
- * page size.
- */
-void *lib_ring_buffer_read_offset_address(struct lib_ring_buffer_backend *bufb,
-                                         size_t offset)
-{
-       size_t index;
-       struct lib_ring_buffer_backend_pages *rpages;
-       struct channel_backend *chanb = &bufb->chan->backend;
-       const struct lib_ring_buffer_config *config = &chanb->config;
-       unsigned long sb_bindex, id;
-
-       offset &= chanb->buf_size - 1;
-       index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
-       id = bufb->buf_rsb.id;
-       sb_bindex = subbuffer_id_get_index(config, id);
-       rpages = bufb->array[sb_bindex];
-       CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
-                    && subbuffer_id_is_noref(config, id));
-       return rpages->p[index].virt + (offset & ~PAGE_MASK);
-}
-EXPORT_SYMBOL_GPL(lib_ring_buffer_read_offset_address);
-
-/**
- * lib_ring_buffer_offset_address - get address of a location within the buffer
- * @bufb : buffer backend
- * @offset : offset within the buffer.
- *
- * Return the address where a given offset is located.
- * Should be used to get the current subbuffer header pointer. Given we know
- * it's always at the beginning of a page, it's safe to write directly to this
- * address, as long as the write is never bigger than a page size.
- */
-void *lib_ring_buffer_offset_address(struct lib_ring_buffer_backend *bufb,
-                                    size_t offset)
-{
-       size_t sbidx, index;
-       struct lib_ring_buffer_backend_pages *rpages;
-       struct channel_backend *chanb = &bufb->chan->backend;
-       const struct lib_ring_buffer_config *config = &chanb->config;
-       unsigned long sb_bindex, id;
-
-       offset &= chanb->buf_size - 1;
-       sbidx = offset >> chanb->subbuf_size_order;
-       index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
-       id = bufb->buf_wsb[sbidx].id;
-       sb_bindex = subbuffer_id_get_index(config, id);
-       rpages = bufb->array[sb_bindex];
-       CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
-                    && subbuffer_id_is_noref(config, id));
-       return rpages->p[index].virt + (offset & ~PAGE_MASK);
-}
-EXPORT_SYMBOL_GPL(lib_ring_buffer_offset_address);
diff --git a/lib/ringbuffer/ring_buffer_frontend.c b/lib/ringbuffer/ring_buffer_frontend.c
deleted file mode 100644 (file)
index fca37fb..0000000
+++ /dev/null
@@ -1,2387 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only OR LGPL-2.1-only)
- *
- * ring_buffer_frontend.c
- *
- * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
- * Ring buffer wait-free buffer synchronization. Producer-consumer and flight
- * recorder (overwrite) modes. See thesis:
- *
- * Desnoyers, Mathieu (2009), "Low-Impact Operating System Tracing", Ph.D.
- * dissertation, Ecole Polytechnique de Montreal.
- * http://www.lttng.org/pub/thesis/desnoyers-dissertation-2009-12.pdf
- *
- * - Algorithm presentation in Chapter 5:
- *     "Lockless Multi-Core High-Throughput Buffering".
- * - Algorithm formal verification in Section 8.6:
- *     "Formal verification of LTTng"
- *
- * Author:
- *     Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
- * Inspired from LTT and RelayFS:
- *  Karim Yaghmour <karim@opersys.com>
- *  Tom Zanussi <zanussi@us.ibm.com>
- *  Bob Wisniewski <bob@watson.ibm.com>
- * And from K42 :
- *  Bob Wisniewski <bob@watson.ibm.com>
- *
- * Buffer reader semantic :
- *
- * - get_subbuf_size
- * while buffer is not finalized and empty
- *   - get_subbuf
- *     - if return value != 0, continue
- *   - splice one subbuffer worth of data to a pipe
- *   - splice the data from pipe to disk/network
- *   - put_subbuf
- */
-
-#include <linux/delay.h>
-#include <linux/module.h>
-#include <linux/percpu.h>
-#include <asm/cacheflush.h>
-
-#include <ringbuffer/config.h>
-#include <ringbuffer/backend.h>
-#include <ringbuffer/frontend.h>
-#include <ringbuffer/iterator.h>
-#include <ringbuffer/nohz.h>
-#include <wrapper/atomic.h>
-#include <wrapper/kref.h>
-#include <wrapper/percpu-defs.h>
-#include <wrapper/timer.h>
-#include <wrapper/vmalloc.h>
-
-/*
- * Internal structure representing offsets to use at a sub-buffer switch.
- */
-struct switch_offsets {
-       unsigned long begin, end, old;
-       size_t pre_header_padding, size;
-       unsigned int switch_new_start:1, switch_new_end:1, switch_old_start:1,
-                    switch_old_end:1;
-};
-
-#ifdef CONFIG_NO_HZ
-enum tick_nohz_val {
-       TICK_NOHZ_STOP,
-       TICK_NOHZ_FLUSH,
-       TICK_NOHZ_RESTART,
-};
-
-static ATOMIC_NOTIFIER_HEAD(tick_nohz_notifier);
-#endif /* CONFIG_NO_HZ */
-
-static DEFINE_PER_CPU(spinlock_t, ring_buffer_nohz_lock);
-
-DEFINE_PER_CPU(unsigned int, lib_ring_buffer_nesting);
-EXPORT_PER_CPU_SYMBOL(lib_ring_buffer_nesting);
-
-static
-void lib_ring_buffer_print_errors(struct channel *chan,
-                                 struct lib_ring_buffer *buf, int cpu);
-static
-void _lib_ring_buffer_switch_remote(struct lib_ring_buffer *buf,
-               enum switch_mode mode);
-
-static
-int lib_ring_buffer_poll_deliver(const struct lib_ring_buffer_config *config,
-                                struct lib_ring_buffer *buf,
-                                struct channel *chan)
-{
-       unsigned long consumed_old, consumed_idx, commit_count, write_offset;
-
-       consumed_old = atomic_long_read(&buf->consumed);
-       consumed_idx = subbuf_index(consumed_old, chan);
-       commit_count = v_read(config, &buf->commit_cold[consumed_idx].cc_sb);
-       /*
-        * No memory barrier here, since we are only interested
-        * in a statistically correct polling result. The next poll will
-        * get the data is we are racing. The mb() that ensures correct
-        * memory order is in get_subbuf.
-        */
-       write_offset = v_read(config, &buf->offset);
-
-       /*
-        * Check that the subbuffer we are trying to consume has been
-        * already fully committed.
-        */
-
-       if (((commit_count - chan->backend.subbuf_size)
-            & chan->commit_count_mask)
-           - (buf_trunc(consumed_old, chan)
-              >> chan->backend.num_subbuf_order)
-           != 0)
-               return 0;
-
-       /*
-        * Check that we are not about to read the same subbuffer in
-        * which the writer head is.
-        */
-       if (subbuf_trunc(write_offset, chan) - subbuf_trunc(consumed_old, chan)
-           == 0)
-               return 0;
-
-       return 1;
-}
-
-/*
- * Must be called under cpu hotplug protection.
- */
-void lib_ring_buffer_free(struct lib_ring_buffer *buf)
-{
-       struct channel *chan = buf->backend.chan;
-
-       lib_ring_buffer_print_errors(chan, buf, buf->backend.cpu);
-       lttng_kvfree(buf->commit_hot);
-       lttng_kvfree(buf->commit_cold);
-       lttng_kvfree(buf->ts_end);
-
-       lib_ring_buffer_backend_free(&buf->backend);
-}
-
-/**
- * lib_ring_buffer_reset - Reset ring buffer to initial values.
- * @buf: Ring buffer.
- *
- * Effectively empty the ring buffer. Should be called when the buffer is not
- * used for writing. The ring buffer can be opened for reading, but the reader
- * should not be using the iterator concurrently with reset. The previous
- * current iterator record is reset.
- */
-void lib_ring_buffer_reset(struct lib_ring_buffer *buf)
-{
-       struct channel *chan = buf->backend.chan;
-       const struct lib_ring_buffer_config *config = &chan->backend.config;
-       unsigned int i;
-
-       /*
-        * Reset iterator first. It will put the subbuffer if it currently holds
-        * it.
-        */
-       lib_ring_buffer_iterator_reset(buf);
-       v_set(config, &buf->offset, 0);
-       for (i = 0; i < chan->backend.num_subbuf; i++) {
-               v_set(config, &buf->commit_hot[i].cc, 0);
-               v_set(config, &buf->commit_hot[i].seq, 0);
-               v_set(config, &buf->commit_cold[i].cc_sb, 0);
-               buf->ts_end[i] = 0;
-       }
-       atomic_long_set(&buf->consumed, 0);
-       atomic_set(&buf->record_disabled, 0);
-       v_set(config, &buf->last_tsc, 0);
-       lib_ring_buffer_backend_reset(&buf->backend);
-       /* Don't reset number of active readers */
-       v_set(config, &buf->records_lost_full, 0);
-       v_set(config, &buf->records_lost_wrap, 0);
-       v_set(config, &buf->records_lost_big, 0);
-       v_set(config, &buf->records_count, 0);
-       v_set(config, &buf->records_overrun, 0);
-       buf->finalized = 0;
-}
-EXPORT_SYMBOL_GPL(lib_ring_buffer_reset);
-
-/**
- * channel_reset - Reset channel to initial values.
- * @chan: Channel.
- *
- * Effectively empty the channel. Should be called when the channel is not used
- * for writing. The channel can be opened for reading, but the reader should not
- * be using the iterator concurrently with reset. The previous current iterator
- * record is reset.
- */
-void channel_reset(struct channel *chan)
-{
-       /*
-        * Reset iterators first. Will put the subbuffer if held for reading.
-        */
-       channel_iterator_reset(chan);
-       atomic_set(&chan->record_disabled, 0);
-       /* Don't reset commit_count_mask, still valid */
-       channel_backend_reset(&chan->backend);
-       /* Don't reset switch/read timer interval */
-       /* Don't reset notifiers and notifier enable bits */
-       /* Don't reset reader reference count */
-}
-EXPORT_SYMBOL_GPL(channel_reset);
-
-/*
- * Must be called under cpu hotplug protection.
- */
-int lib_ring_buffer_create(struct lib_ring_buffer *buf,
-                          struct channel_backend *chanb, int cpu)
-{
-       const struct lib_ring_buffer_config *config = &chanb->config;
-       struct channel *chan = container_of(chanb, struct channel, backend);
-       void *priv = chanb->priv;
-       size_t subbuf_header_size;
-       u64 tsc;
-       int ret;
-
-       /* Test for cpu hotplug */
-       if (buf->backend.allocated)
-               return 0;
-
-       /*
-        * Paranoia: per cpu dynamic allocation is not officially documented as
-        * zeroing the memory, so let's do it here too, just in case.
-        */
-       memset(buf, 0, sizeof(*buf));
-
-       ret = lib_ring_buffer_backend_create(&buf->backend, &chan->backend, cpu);
-       if (ret)
-               return ret;
-
-       buf->commit_hot =
-               lttng_kvzalloc_node(ALIGN(sizeof(*buf->commit_hot)
-                                  * chan->backend.num_subbuf,
-                                  1 << INTERNODE_CACHE_SHIFT),
-                       GFP_KERNEL | __GFP_NOWARN,
-                       cpu_to_node(max(cpu, 0)));
-       if (!buf->commit_hot) {
-               ret = -ENOMEM;
-               goto free_chanbuf;
-       }
-
-       buf->commit_cold =
-               lttng_kvzalloc_node(ALIGN(sizeof(*buf->commit_cold)
-                                  * chan->backend.num_subbuf,
-                                  1 << INTERNODE_CACHE_SHIFT),
-                       GFP_KERNEL | __GFP_NOWARN,
-                       cpu_to_node(max(cpu, 0)));
-       if (!buf->commit_cold) {
-               ret = -ENOMEM;
-               goto free_commit;
-       }
-
-       buf->ts_end =
-               lttng_kvzalloc_node(ALIGN(sizeof(*buf->ts_end)
-                                  * chan->backend.num_subbuf,
-                                  1 << INTERNODE_CACHE_SHIFT),
-                       GFP_KERNEL | __GFP_NOWARN,
-                       cpu_to_node(max(cpu, 0)));
-       if (!buf->ts_end) {
-               ret = -ENOMEM;
-               goto free_commit_cold;
-       }
-
-       init_waitqueue_head(&buf->read_wait);
-       init_waitqueue_head(&buf->write_wait);
-       raw_spin_lock_init(&buf->raw_tick_nohz_spinlock);
-
-       /*
-        * Write the subbuffer header for first subbuffer so we know the total
-        * duration of data gathering.
-        */
-       subbuf_header_size = config->cb.subbuffer_header_size();
-       v_set(config, &buf->offset, subbuf_header_size);
-       subbuffer_id_clear_noref(config, &buf->backend.buf_wsb[0].id);
-       tsc = config->cb.ring_buffer_clock_read(buf->backend.chan);
-       config->cb.buffer_begin(buf, tsc, 0);
-       v_add(config, subbuf_header_size, &buf->commit_hot[0].cc);
-
-       if (config->cb.buffer_create) {
-               ret = config->cb.buffer_create(buf, priv, cpu, chanb->name);
-               if (ret)
-                       goto free_init;
-       }
-
-       /*
-        * Ensure the buffer is ready before setting it to allocated and setting
-        * the cpumask.
-        * Used for cpu hotplug vs cpumask iteration.
-        */
-       smp_wmb();
-       buf->backend.allocated = 1;
-
-       if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
-               CHAN_WARN_ON(chan, cpumask_test_cpu(cpu,
-                            chan->backend.cpumask));
-               cpumask_set_cpu(cpu, chan->backend.cpumask);
-       }
-
-       return 0;
-
-       /* Error handling */
-free_init:
-       lttng_kvfree(buf->ts_end);
-free_commit_cold:
-       lttng_kvfree(buf->commit_cold);
-free_commit:
-       lttng_kvfree(buf->commit_hot);
-free_chanbuf:
-       lib_ring_buffer_backend_free(&buf->backend);
-       return ret;
-}
-
-static void switch_buffer_timer(LTTNG_TIMER_FUNC_ARG_TYPE t)
-{
-       struct lib_ring_buffer *buf = lttng_from_timer(buf, t, switch_timer);
-       struct channel *chan = buf->backend.chan;
-       const struct lib_ring_buffer_config *config = &chan->backend.config;
-
-       /*
-        * Only flush buffers periodically if readers are active.
-        */
-       if (atomic_long_read(&buf->active_readers))
-               lib_ring_buffer_switch_slow(buf, SWITCH_ACTIVE);
-
-       if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
-               lttng_mod_timer_pinned(&buf->switch_timer,
-                                jiffies + chan->switch_timer_interval);
-       else
-               mod_timer(&buf->switch_timer,
-                         jiffies + chan->switch_timer_interval);
-}
-
-/*
- * Called with ring_buffer_nohz_lock held for per-cpu buffers.
- */
-static void lib_ring_buffer_start_switch_timer(struct lib_ring_buffer *buf)
-{
-       struct channel *chan = buf->backend.chan;
-       const struct lib_ring_buffer_config *config = &chan->backend.config;
-       unsigned int flags = 0;
-
-       if (!chan->switch_timer_interval || buf->switch_timer_enabled)
-               return;
-
-       if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
-               flags = LTTNG_TIMER_PINNED;
-
-       lttng_timer_setup(&buf->switch_timer, switch_buffer_timer, flags, buf);
-       buf->switch_timer.expires = jiffies + chan->switch_timer_interval;
-
-       if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
-               add_timer_on(&buf->switch_timer, buf->backend.cpu);
-       else
-               add_timer(&buf->switch_timer);
-
-       buf->switch_timer_enabled = 1;
-}
-
-/*
- * Called with ring_buffer_nohz_lock held for per-cpu buffers.
- */
-static void lib_ring_buffer_stop_switch_timer(struct lib_ring_buffer *buf)
-{
-       struct channel *chan = buf->backend.chan;
-
-       if (!chan->switch_timer_interval || !buf->switch_timer_enabled)
-               return;
-
-       del_timer_sync(&buf->switch_timer);
-       buf->switch_timer_enabled = 0;
-}
-
-/*
- * Polling timer to check the channels for data.
- */
-static void read_buffer_timer(LTTNG_TIMER_FUNC_ARG_TYPE t)
-{
-       struct lib_ring_buffer *buf = lttng_from_timer(buf, t, read_timer);
-       struct channel *chan = buf->backend.chan;
-       const struct lib_ring_buffer_config *config = &chan->backend.config;
-
-       CHAN_WARN_ON(chan, !buf->backend.allocated);
-
-       if (atomic_long_read(&buf->active_readers)
-           && lib_ring_buffer_poll_deliver(config, buf, chan)) {
-               wake_up_interruptible(&buf->read_wait);
-               wake_up_interruptible(&chan->read_wait);
-       }
-
-       if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
-               lttng_mod_timer_pinned(&buf->read_timer,
-                                jiffies + chan->read_timer_interval);
-       else
-               mod_timer(&buf->read_timer,
-                         jiffies + chan->read_timer_interval);
-}
-
-/*
- * Called with ring_buffer_nohz_lock held for per-cpu buffers.
- */
-static void lib_ring_buffer_start_read_timer(struct lib_ring_buffer *buf)
-{
-       struct channel *chan = buf->backend.chan;
-       const struct lib_ring_buffer_config *config = &chan->backend.config;
-       unsigned int flags = 0;
-
-       if (config->wakeup != RING_BUFFER_WAKEUP_BY_TIMER
-           || !chan->read_timer_interval
-           || buf->read_timer_enabled)
-               return;
-
-       if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
-               flags = LTTNG_TIMER_PINNED;
-
-       lttng_timer_setup(&buf->read_timer, read_buffer_timer, flags, buf);
-       buf->read_timer.expires = jiffies + chan->read_timer_interval;
-
-       if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
-               add_timer_on(&buf->read_timer, buf->backend.cpu);
-       else
-               add_timer(&buf->read_timer);
-
-       buf->read_timer_enabled = 1;
-}
-
-/*
- * Called with ring_buffer_nohz_lock held for per-cpu buffers.
- */
-static void lib_ring_buffer_stop_read_timer(struct lib_ring_buffer *buf)
-{
-       struct channel *chan = buf->backend.chan;
-       const struct lib_ring_buffer_config *config = &chan->backend.config;
-
-       if (config->wakeup != RING_BUFFER_WAKEUP_BY_TIMER
-           || !chan->read_timer_interval
-           || !buf->read_timer_enabled)
-               return;
-
-       del_timer_sync(&buf->read_timer);
-       /*
-        * do one more check to catch data that has been written in the last
-        * timer period.
-        */
-       if (lib_ring_buffer_poll_deliver(config, buf, chan)) {
-               wake_up_interruptible(&buf->read_wait);
-               wake_up_interruptible(&chan->read_wait);
-       }
-       buf->read_timer_enabled = 0;
-}
-
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
-
-enum cpuhp_state lttng_rb_hp_prepare;
-enum cpuhp_state lttng_rb_hp_online;
-
-void lttng_rb_set_hp_prepare(enum cpuhp_state val)
-{
-       lttng_rb_hp_prepare = val;
-}
-EXPORT_SYMBOL_GPL(lttng_rb_set_hp_prepare);
-
-void lttng_rb_set_hp_online(enum cpuhp_state val)
-{
-       lttng_rb_hp_online = val;
-}
-EXPORT_SYMBOL_GPL(lttng_rb_set_hp_online);
-
-int lttng_cpuhp_rb_frontend_dead(unsigned int cpu,
-               struct lttng_cpuhp_node *node)
-{
-       struct channel *chan = container_of(node, struct channel,
-                                           cpuhp_prepare);
-       struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf, cpu);
-       const struct lib_ring_buffer_config *config = &chan->backend.config;
-
-       CHAN_WARN_ON(chan, config->alloc == RING_BUFFER_ALLOC_GLOBAL);
-
-       /*
-        * Performing a buffer switch on a remote CPU. Performed by
-        * the CPU responsible for doing the hotunplug after the target
-        * CPU stopped running completely. Ensures that all data
-        * from that remote CPU is flushed.
-        */
-       lib_ring_buffer_switch_slow(buf, SWITCH_ACTIVE);
-       return 0;
-}
-EXPORT_SYMBOL_GPL(lttng_cpuhp_rb_frontend_dead);
-
-int lttng_cpuhp_rb_frontend_online(unsigned int cpu,
-               struct lttng_cpuhp_node *node)
-{
-       struct channel *chan = container_of(node, struct channel,
-                                           cpuhp_online);
-       struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf, cpu);
-       const struct lib_ring_buffer_config *config = &chan->backend.config;
-
-       CHAN_WARN_ON(chan, config->alloc == RING_BUFFER_ALLOC_GLOBAL);
-
-       wake_up_interruptible(&chan->hp_wait);
-       lib_ring_buffer_start_switch_timer(buf);
-       lib_ring_buffer_start_read_timer(buf);
-       return 0;
-}
-EXPORT_SYMBOL_GPL(lttng_cpuhp_rb_frontend_online);
-
-int lttng_cpuhp_rb_frontend_offline(unsigned int cpu,
-               struct lttng_cpuhp_node *node)
-{
-       struct channel *chan = container_of(node, struct channel,
-                                           cpuhp_online);
-       struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf, cpu);
-       const struct lib_ring_buffer_config *config = &chan->backend.config;
-
-       CHAN_WARN_ON(chan, config->alloc == RING_BUFFER_ALLOC_GLOBAL);
-
-       lib_ring_buffer_stop_switch_timer(buf);
-       lib_ring_buffer_stop_read_timer(buf);
-       return 0;
-}
-EXPORT_SYMBOL_GPL(lttng_cpuhp_rb_frontend_offline);
-
-#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
-
-#ifdef CONFIG_HOTPLUG_CPU
-
-/**
- *     lib_ring_buffer_cpu_hp_callback - CPU hotplug callback
- *     @nb: notifier block
- *     @action: hotplug action to take
- *     @hcpu: CPU number
- *
- *     Returns the success/failure of the operation. (%NOTIFY_OK, %NOTIFY_BAD)
- */
-static
-int lib_ring_buffer_cpu_hp_callback(struct notifier_block *nb,
-                                             unsigned long action,
-                                             void *hcpu)
-{
-       unsigned int cpu = (unsigned long)hcpu;
-       struct channel *chan = container_of(nb, struct channel,
-                                           cpu_hp_notifier);
-       struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf, cpu);
-       const struct lib_ring_buffer_config *config = &chan->backend.config;
-
-       if (!chan->cpu_hp_enable)
-               return NOTIFY_DONE;
-
-       CHAN_WARN_ON(chan, config->alloc == RING_BUFFER_ALLOC_GLOBAL);
-
-       switch (action) {
-       case CPU_DOWN_FAILED:
-       case CPU_DOWN_FAILED_FROZEN:
-       case CPU_ONLINE:
-       case CPU_ONLINE_FROZEN:
-               wake_up_interruptible(&chan->hp_wait);
-               lib_ring_buffer_start_switch_timer(buf);
-               lib_ring_buffer_start_read_timer(buf);
-               return NOTIFY_OK;
-
-       case CPU_DOWN_PREPARE:
-       case CPU_DOWN_PREPARE_FROZEN:
-               lib_ring_buffer_stop_switch_timer(buf);
-               lib_ring_buffer_stop_read_timer(buf);
-               return NOTIFY_OK;
-
-       case CPU_DEAD:
-       case CPU_DEAD_FROZEN:
-               /*
-                * Performing a buffer switch on a remote CPU. Performed by
-                * the CPU responsible for doing the hotunplug after the target
-                * CPU stopped running completely. Ensures that all data
-                * from that remote CPU is flushed.
-                */
-               lib_ring_buffer_switch_slow(buf, SWITCH_ACTIVE);
-               return NOTIFY_OK;
-
-       default:
-               return NOTIFY_DONE;
-       }
-}
-
-#endif
-
-#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
-
-#if defined(CONFIG_NO_HZ) && defined(CONFIG_LIB_RING_BUFFER)
-/*
- * For per-cpu buffers, call the reader wakeups before switching the buffer, so
- * that wake-up-tracing generated events are flushed before going idle (in
- * tick_nohz). We test if the spinlock is locked to deal with the race where
- * readers try to sample the ring buffer before we perform the switch. We let
- * the readers retry in that case. If there is data in the buffer, the wake up
- * is going to forbid the CPU running the reader thread from going idle.
- */
-static int notrace ring_buffer_tick_nohz_callback(struct notifier_block *nb,
-                                                 unsigned long val,
-                                                 void *data)
-{
-       struct channel *chan = container_of(nb, struct channel,
-                                           tick_nohz_notifier);
-       const struct lib_ring_buffer_config *config = &chan->backend.config;
-       struct lib_ring_buffer *buf;
-       int cpu = smp_processor_id();
-
-       if (config->alloc != RING_BUFFER_ALLOC_PER_CPU) {
-               /*
-                * We don't support keeping the system idle with global buffers
-                * and streaming active. In order to do so, we would need to
-                * sample a non-nohz-cpumask racelessly with the nohz updates
-                * without adding synchronization overhead to nohz. Leave this
-                * use-case out for now.
-                */
-               return 0;
-       }
-
-       buf = channel_get_ring_buffer(config, chan, cpu);
-       switch (val) {
-       case TICK_NOHZ_FLUSH:
-               raw_spin_lock(&buf->raw_tick_nohz_spinlock);
-               if (config->wakeup == RING_BUFFER_WAKEUP_BY_TIMER
-                   && chan->read_timer_interval
-                   && atomic_long_read(&buf->active_readers)
-                   && (lib_ring_buffer_poll_deliver(config, buf, chan)
-                       || lib_ring_buffer_pending_data(config, buf, chan))) {
-                       wake_up_interruptible(&buf->read_wait);
-                       wake_up_interruptible(&chan->read_wait);
-               }
-               if (chan->switch_timer_interval)
-                       lib_ring_buffer_switch_slow(buf, SWITCH_ACTIVE);
-               raw_spin_unlock(&buf->raw_tick_nohz_spinlock);
-               break;
-       case TICK_NOHZ_STOP:
-               spin_lock(lttng_this_cpu_ptr(&ring_buffer_nohz_lock));
-               lib_ring_buffer_stop_switch_timer(buf);
-               lib_ring_buffer_stop_read_timer(buf);
-               spin_unlock(lttng_this_cpu_ptr(&ring_buffer_nohz_lock));
-               break;
-       case TICK_NOHZ_RESTART:
-               spin_lock(lttng_this_cpu_ptr(&ring_buffer_nohz_lock));
-               lib_ring_buffer_start_read_timer(buf);
-               lib_ring_buffer_start_switch_timer(buf);
-               spin_unlock(lttng_this_cpu_ptr(&ring_buffer_nohz_lock));
-               break;
-       }
-
-       return 0;
-}
-
-void notrace lib_ring_buffer_tick_nohz_flush(void)
-{
-       atomic_notifier_call_chain(&tick_nohz_notifier, TICK_NOHZ_FLUSH,
-                                  NULL);
-}
-
-void notrace lib_ring_buffer_tick_nohz_stop(void)
-{
-       atomic_notifier_call_chain(&tick_nohz_notifier, TICK_NOHZ_STOP,
-                                  NULL);
-}
-
-void notrace lib_ring_buffer_tick_nohz_restart(void)
-{
-       atomic_notifier_call_chain(&tick_nohz_notifier, TICK_NOHZ_RESTART,
-                                  NULL);
-}
-#endif /* defined(CONFIG_NO_HZ) && defined(CONFIG_LIB_RING_BUFFER) */
-
-/*
- * Holds CPU hotplug.
- */
-static void channel_unregister_notifiers(struct channel *chan)
-{
-       const struct lib_ring_buffer_config *config = &chan->backend.config;
-
-       channel_iterator_unregister_notifiers(chan);
-       if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
-#ifdef CONFIG_NO_HZ
-               /*
-                * Remove the nohz notifier first, so we are certain we stop
-                * the timers.
-                */
-               atomic_notifier_chain_unregister(&tick_nohz_notifier,
-                                                &chan->tick_nohz_notifier);
-               /*
-                * ring_buffer_nohz_lock will not be needed below, because
-                * we just removed the notifiers, which were the only source of
-                * concurrency.
-                */
-#endif /* CONFIG_NO_HZ */
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
-               {
-                       int ret;
-
-                       ret = cpuhp_state_remove_instance(lttng_rb_hp_online,
-                               &chan->cpuhp_online.node);
-                       WARN_ON(ret);
-                       ret = cpuhp_state_remove_instance_nocalls(lttng_rb_hp_prepare,
-                               &chan->cpuhp_prepare.node);
-                       WARN_ON(ret);
-               }
-#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
-               {
-                       int cpu;
-
-#ifdef CONFIG_HOTPLUG_CPU
-                       get_online_cpus();
-                       chan->cpu_hp_enable = 0;
-                       for_each_online_cpu(cpu) {
-                               struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf,
-                                                                     cpu);
-                               lib_ring_buffer_stop_switch_timer(buf);
-                               lib_ring_buffer_stop_read_timer(buf);
-                       }
-                       put_online_cpus();
-                       unregister_cpu_notifier(&chan->cpu_hp_notifier);
-#else
-                       for_each_possible_cpu(cpu) {
-                               struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf,
-                                                                     cpu);
-                               lib_ring_buffer_stop_switch_timer(buf);
-                               lib_ring_buffer_stop_read_timer(buf);
-                       }
-#endif
-               }
-#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
-       } else {
-               struct lib_ring_buffer *buf = chan->backend.buf;
-
-               lib_ring_buffer_stop_switch_timer(buf);
-               lib_ring_buffer_stop_read_timer(buf);
-       }
-       channel_backend_unregister_notifiers(&chan->backend);
-}
-
-static void lib_ring_buffer_set_quiescent(struct lib_ring_buffer *buf)
-{
-       if (!buf->quiescent) {
-               buf->quiescent = true;
-               _lib_ring_buffer_switch_remote(buf, SWITCH_FLUSH);
-       }
-}
-
-static void lib_ring_buffer_clear_quiescent(struct lib_ring_buffer *buf)
-{
-       buf->quiescent = false;
-}
-
-void lib_ring_buffer_set_quiescent_channel(struct channel *chan)
-{
-       int cpu;
-       const struct lib_ring_buffer_config *config = &chan->backend.config;
-
-       if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
-               get_online_cpus();
-               for_each_channel_cpu(cpu, chan) {
-                       struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf,
-                                                             cpu);
-
-                       lib_ring_buffer_set_quiescent(buf);
-               }
-               put_online_cpus();
-       } else {
-               struct lib_ring_buffer *buf = chan->backend.buf;
-
-               lib_ring_buffer_set_quiescent(buf);
-       }
-}
-EXPORT_SYMBOL_GPL(lib_ring_buffer_set_quiescent_channel);
-
-void lib_ring_buffer_clear_quiescent_channel(struct channel *chan)
-{
-       int cpu;
-       const struct lib_ring_buffer_config *config = &chan->backend.config;
-
-       if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
-               get_online_cpus();
-               for_each_channel_cpu(cpu, chan) {
-                       struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf,
-                                                             cpu);
-
-                       lib_ring_buffer_clear_quiescent(buf);
-               }
-               put_online_cpus();
-       } else {
-               struct lib_ring_buffer *buf = chan->backend.buf;
-
-               lib_ring_buffer_clear_quiescent(buf);
-       }
-}
-EXPORT_SYMBOL_GPL(lib_ring_buffer_clear_quiescent_channel);
-
-static void channel_free(struct channel *chan)
-{
-       if (chan->backend.release_priv_ops) {
-               chan->backend.release_priv_ops(chan->backend.priv_ops);
-       }
-       channel_iterator_free(chan);
-       channel_backend_free(&chan->backend);
-       kfree(chan);
-}
-
-/**
- * channel_create - Create channel.
- * @config: ring buffer instance configuration
- * @name: name of the channel
- * @priv: ring buffer client private data
- * @buf_addr: pointer the the beginning of the preallocated buffer contiguous
- *            address mapping. It is used only by RING_BUFFER_STATIC
- *            configuration. It can be set to NULL for other backends.
- * @subbuf_size: subbuffer size
- * @num_subbuf: number of subbuffers
- * @switch_timer_interval: Time interval (in us) to fill sub-buffers with
- *                         padding to let readers get those sub-buffers.
- *                         Used for live streaming.
- * @read_timer_interval: Time interval (in us) to wake up pending readers.
- *
- * Holds cpu hotplug.
- * Returns NULL on failure.
- */
-struct channel *channel_create(const struct lib_ring_buffer_config *config,
-                  const char *name, void *priv, void *buf_addr,
-                  size_t subbuf_size,
-                  size_t num_subbuf, unsigned int switch_timer_interval,
-                  unsigned int read_timer_interval)
-{
-       int ret;
-       struct channel *chan;
-
-       if (lib_ring_buffer_check_config(config, switch_timer_interval,
-                                        read_timer_interval))
-               return NULL;
-
-       chan = kzalloc(sizeof(struct channel), GFP_KERNEL);
-       if (!chan)
-               return NULL;
-
-       ret = channel_backend_init(&chan->backend, name, config, priv,
-                                  subbuf_size, num_subbuf);
-       if (ret)
-               goto error;
-
-       ret = channel_iterator_init(chan);
-       if (ret)
-               goto error_free_backend;
-
-       chan->commit_count_mask = (~0UL >> chan->backend.num_subbuf_order);
-       chan->switch_timer_interval = usecs_to_jiffies(switch_timer_interval);
-       chan->read_timer_interval = usecs_to_jiffies(read_timer_interval);
-       kref_init(&chan->ref);
-       init_waitqueue_head(&chan->read_wait);
-       init_waitqueue_head(&chan->hp_wait);
-
-       if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
-               chan->cpuhp_prepare.component = LTTNG_RING_BUFFER_FRONTEND;
-               ret = cpuhp_state_add_instance_nocalls(lttng_rb_hp_prepare,
-                       &chan->cpuhp_prepare.node);
-               if (ret)
-                       goto cpuhp_prepare_error;
-
-               chan->cpuhp_online.component = LTTNG_RING_BUFFER_FRONTEND;
-               ret = cpuhp_state_add_instance(lttng_rb_hp_online,
-                       &chan->cpuhp_online.node);
-               if (ret)
-                       goto cpuhp_online_error;
-#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
-               {
-                       int cpu;
-                       /*
-                        * In case of non-hotplug cpu, if the ring-buffer is allocated
-                        * in early initcall, it will not be notified of secondary cpus.
-                        * In that off case, we need to allocate for all possible cpus.
-                        */
-#ifdef CONFIG_HOTPLUG_CPU
-                       chan->cpu_hp_notifier.notifier_call =
-                                       lib_ring_buffer_cpu_hp_callback;
-                       chan->cpu_hp_notifier.priority = 6;
-                       register_cpu_notifier(&chan->cpu_hp_notifier);
-
-                       get_online_cpus();
-                       for_each_online_cpu(cpu) {
-                               struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf,
-                                                                      cpu);
-                               spin_lock(&per_cpu(ring_buffer_nohz_lock, cpu));
-                               lib_ring_buffer_start_switch_timer(buf);
-                               lib_ring_buffer_start_read_timer(buf);
-                               spin_unlock(&per_cpu(ring_buffer_nohz_lock, cpu));
-                       }
-                       chan->cpu_hp_enable = 1;
-                       put_online_cpus();
-#else
-                       for_each_possible_cpu(cpu) {
-                               struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf,
-                                                                     cpu);
-                               spin_lock(&per_cpu(ring_buffer_nohz_lock, cpu));
-                               lib_ring_buffer_start_switch_timer(buf);
-                               lib_ring_buffer_start_read_timer(buf);
-                               spin_unlock(&per_cpu(ring_buffer_nohz_lock, cpu));
-                       }
-#endif
-               }
-#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
-
-#if defined(CONFIG_NO_HZ) && defined(CONFIG_LIB_RING_BUFFER)
-               /* Only benefit from NO_HZ idle with per-cpu buffers for now. */
-               chan->tick_nohz_notifier.notifier_call =
-                       ring_buffer_tick_nohz_callback;
-               chan->tick_nohz_notifier.priority = ~0U;
-               atomic_notifier_chain_register(&tick_nohz_notifier,
-                                      &chan->tick_nohz_notifier);
-#endif /* defined(CONFIG_NO_HZ) && defined(CONFIG_LIB_RING_BUFFER) */
-
-       } else {
-               struct lib_ring_buffer *buf = chan->backend.buf;
-
-               lib_ring_buffer_start_switch_timer(buf);
-               lib_ring_buffer_start_read_timer(buf);
-       }
-
-       return chan;
-
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
-cpuhp_online_error:
-       ret = cpuhp_state_remove_instance_nocalls(lttng_rb_hp_prepare,
-                       &chan->cpuhp_prepare.node);
-       WARN_ON(ret);
-cpuhp_prepare_error:
-#endif /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
-error_free_backend:
-       channel_backend_free(&chan->backend);
-error:
-       kfree(chan);
-       return NULL;
-}
-EXPORT_SYMBOL_GPL(channel_create);
-
-static
-void channel_release(struct kref *kref)
-{
-       struct channel *chan = container_of(kref, struct channel, ref);
-       channel_free(chan);
-}
-
-/**
- * channel_destroy - Finalize, wait for q.s. and destroy channel.
- * @chan: channel to destroy
- *
- * Holds cpu hotplug.
- * Call "destroy" callback, finalize channels, and then decrement the
- * channel reference count.  Note that when readers have completed data
- * consumption of finalized channels, get_subbuf() will return -ENODATA.
- * They should release their handle at that point.  Returns the private
- * data pointer.
- */
-void *channel_destroy(struct channel *chan)
-{
-       int cpu;
-       const struct lib_ring_buffer_config *config = &chan->backend.config;
-       void *priv;
-
-       channel_unregister_notifiers(chan);
-
-       if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
-               /*
-                * No need to hold cpu hotplug, because all notifiers have been
-                * unregistered.
-                */
-               for_each_channel_cpu(cpu, chan) {
-                       struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf,
-                                                             cpu);
-
-                       if (config->cb.buffer_finalize)
-                               config->cb.buffer_finalize(buf,
-                                                          chan->backend.priv,
-                                                          cpu);
-                       /*
-                        * Perform flush before writing to finalized.
-                        */
-                       smp_wmb();
-                       WRITE_ONCE(buf->finalized, 1);
-                       wake_up_interruptible(&buf->read_wait);
-               }
-       } else {
-               struct lib_ring_buffer *buf = chan->backend.buf;
-
-               if (config->cb.buffer_finalize)
-                       config->cb.buffer_finalize(buf, chan->backend.priv, -1);
-               /*
-                * Perform flush before writing to finalized.
-                */
-               smp_wmb();
-               WRITE_ONCE(buf->finalized, 1);
-               wake_up_interruptible(&buf->read_wait);
-       }
-       WRITE_ONCE(chan->finalized, 1);
-       wake_up_interruptible(&chan->hp_wait);
-       wake_up_interruptible(&chan->read_wait);
-       priv = chan->backend.priv;
-       kref_put(&chan->ref, channel_release);
-       return priv;
-}
-EXPORT_SYMBOL_GPL(channel_destroy);
-
-struct lib_ring_buffer *channel_get_ring_buffer(
-                                       const struct lib_ring_buffer_config *config,
-                                       struct channel *chan, int cpu)
-{
-       if (config->alloc == RING_BUFFER_ALLOC_GLOBAL)
-               return chan->backend.buf;
-       else
-               return per_cpu_ptr(chan->backend.buf, cpu);
-}
-EXPORT_SYMBOL_GPL(channel_get_ring_buffer);
-
-int lib_ring_buffer_open_read(struct lib_ring_buffer *buf)
-{
-       struct channel *chan = buf->backend.chan;
-
-       if (!atomic_long_add_unless(&buf->active_readers, 1, 1))
-               return -EBUSY;
-       if (!lttng_kref_get(&chan->ref)) {
-               atomic_long_dec(&buf->active_readers);
-               return -EOVERFLOW;
-       }
-       lttng_smp_mb__after_atomic();
-       return 0;
-}
-EXPORT_SYMBOL_GPL(lib_ring_buffer_open_read);
-
-void lib_ring_buffer_release_read(struct lib_ring_buffer *buf)
-{
-       struct channel *chan = buf->backend.chan;
-
-       CHAN_WARN_ON(chan, atomic_long_read(&buf->active_readers) != 1);
-       lttng_smp_mb__before_atomic();
-       atomic_long_dec(&buf->active_readers);
-       kref_put(&chan->ref, channel_release);
-}
-EXPORT_SYMBOL_GPL(lib_ring_buffer_release_read);
-
-/*
- * Promote compiler barrier to a smp_mb().
- * For the specific ring buffer case, this IPI call should be removed if the
- * architecture does not reorder writes.  This should eventually be provided by
- * a separate architecture-specific infrastructure.
- */
-static void remote_mb(void *info)
-{
-       smp_mb();
-}
-
-/**
- * lib_ring_buffer_snapshot - save subbuffer position snapshot (for read)
- * @buf: ring buffer
- * @consumed: consumed count indicating the position where to read
- * @produced: produced count, indicates position when to stop reading
- *
- * Returns -ENODATA if buffer is finalized, -EAGAIN if there is currently no
- * data to read at consumed position, or 0 if the get operation succeeds.
- * Busy-loop trying to get data if the tick_nohz sequence lock is held.
- */
-
-int lib_ring_buffer_snapshot(struct lib_ring_buffer *buf,
-                            unsigned long *consumed, unsigned long *produced)
-{
-       struct channel *chan = buf->backend.chan;
-       const struct lib_ring_buffer_config *config = &chan->backend.config;
-       unsigned long consumed_cur, write_offset;
-       int finalized;
-
-retry:
-       finalized = READ_ONCE(buf->finalized);
-       /*
-        * Read finalized before counters.
-        */
-       smp_rmb();
-       consumed_cur = atomic_long_read(&buf->consumed);
-       /*
-        * No need to issue a memory barrier between consumed count read and
-        * write offset read, because consumed count can only change
-        * concurrently in overwrite mode, and we keep a sequence counter
-        * identifier derived from the write offset to check we are getting
-        * the same sub-buffer we are expecting (the sub-buffers are atomically
-        * "tagged" upon writes, tags are checked upon read).
-        */
-       write_offset = v_read(config, &buf->offset);
-
-       /*
-        * Check that we are not about to read the same subbuffer in
-        * which the writer head is.
-        */
-       if (subbuf_trunc(write_offset, chan) - subbuf_trunc(consumed_cur, chan)
-           == 0)
-               goto nodata;
-
-       *consumed = consumed_cur;
-       *produced = subbuf_trunc(write_offset, chan);
-
-       return 0;
-
-nodata:
-       /*
-        * The memory barriers __wait_event()/wake_up_interruptible() take care
-        * of "raw_spin_is_locked" memory ordering.
-        */
-       if (finalized)
-               return -ENODATA;
-       else if (raw_spin_is_locked(&buf->raw_tick_nohz_spinlock))
-               goto retry;
-       else
-               return -EAGAIN;
-}
-EXPORT_SYMBOL_GPL(lib_ring_buffer_snapshot);
-
-/**
- * Performs the same function as lib_ring_buffer_snapshot(), but the positions
- * are saved regardless of whether the consumed and produced positions are
- * in the same subbuffer.
- * @buf: ring buffer
- * @consumed: consumed byte count indicating the last position read
- * @produced: produced byte count indicating the last position written
- *
- * This function is meant to provide information on the exact producer and
- * consumer positions without regard for the "snapshot" feature.
- */
-int lib_ring_buffer_snapshot_sample_positions(struct lib_ring_buffer *buf,
-               unsigned long *consumed, unsigned long *produced)
-{
-       struct channel *chan = buf->backend.chan;
-       const struct lib_ring_buffer_config *config = &chan->backend.config;
-
-       smp_rmb();
-       *consumed = atomic_long_read(&buf->consumed);
-       /*
-        * No need to issue a memory barrier between consumed count read and
-        * write offset read, because consumed count can only change
-        * concurrently in overwrite mode, and we keep a sequence counter
-        * identifier derived from the write offset to check we are getting
-        * the same sub-buffer we are expecting (the sub-buffers are atomically
-        * "tagged" upon writes, tags are checked upon read).
-        */
-       *produced = v_read(config, &buf->offset);
-       return 0;
-}
-
-/**
- * lib_ring_buffer_put_snapshot - move consumed counter forward
- *
- * Should only be called from consumer context.
- * @buf: ring buffer
- * @consumed_new: new consumed count value
- */
-void lib_ring_buffer_move_consumer(struct lib_ring_buffer *buf,
-                                  unsigned long consumed_new)
-{
-       struct lib_ring_buffer_backend *bufb = &buf->backend;
-       struct channel *chan = bufb->chan;
-       unsigned long consumed;
-
-       CHAN_WARN_ON(chan, atomic_long_read(&buf->active_readers) != 1);
-
-       /*
-        * Only push the consumed value forward.
-        * If the consumed cmpxchg fails, this is because we have been pushed by
-        * the writer in flight recorder mode.
-        */
-       consumed = atomic_long_read(&buf->consumed);
-       while ((long) consumed - (long) consumed_new < 0)
-               consumed = atomic_long_cmpxchg(&buf->consumed, consumed,
-                                              consumed_new);
-       /* Wake-up the metadata producer */
-       wake_up_interruptible(&buf->write_wait);
-}
-EXPORT_SYMBOL_GPL(lib_ring_buffer_move_consumer);
-
-#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
-static void lib_ring_buffer_flush_read_subbuf_dcache(
-               const struct lib_ring_buffer_config *config,
-               struct channel *chan,
-               struct lib_ring_buffer *buf)
-{
-       struct lib_ring_buffer_backend_pages *pages;
-       unsigned long sb_bindex, id, i, nr_pages;
-
-       if (config->output != RING_BUFFER_MMAP)
-               return;
-
-       /*
-        * Architectures with caches aliased on virtual addresses may
-        * use different cache lines for the linear mapping vs
-        * user-space memory mapping. Given that the ring buffer is
-        * based on the kernel linear mapping, aligning it with the
-        * user-space mapping is not straightforward, and would require
-        * extra TLB entries. Therefore, simply flush the dcache for the
-        * entire sub-buffer before reading it.
-        */
-       id = buf->backend.buf_rsb.id;
-       sb_bindex = subbuffer_id_get_index(config, id);
-       pages = buf->backend.array[sb_bindex];
-       nr_pages = buf->backend.num_pages_per_subbuf;
-       for (i = 0; i < nr_pages; i++) {
-               struct lib_ring_buffer_backend_page *backend_page;
-
-               backend_page = &pages->p[i];
-               flush_dcache_page(pfn_to_page(backend_page->pfn));
-       }
-}
-#else
-static void lib_ring_buffer_flush_read_subbuf_dcache(
-               const struct lib_ring_buffer_config *config,
-               struct channel *chan,
-               struct lib_ring_buffer *buf)
-{
-}
-#endif
-
-/**
- * lib_ring_buffer_get_subbuf - get exclusive access to subbuffer for reading
- * @buf: ring buffer
- * @consumed: consumed count indicating the position where to read
- *
- * Returns -ENODATA if buffer is finalized, -EAGAIN if there is currently no
- * data to read at consumed position, or 0 if the get operation succeeds.
- * Busy-loop trying to get data if the tick_nohz sequence lock is held.
- */
-int lib_ring_buffer_get_subbuf(struct lib_ring_buffer *buf,
-                              unsigned long consumed)
-{
-       struct channel *chan = buf->backend.chan;
-       const struct lib_ring_buffer_config *config = &chan->backend.config;
-       unsigned long consumed_cur, consumed_idx, commit_count, write_offset;
-       int ret;
-       int finalized;
-
-       if (buf->get_subbuf) {
-               /*
-                * Reader is trying to get a subbuffer twice.
-                */
-               CHAN_WARN_ON(chan, 1);
-               return -EBUSY;
-       }
-retry:
-       finalized = READ_ONCE(buf->finalized);
-       /*
-        * Read finalized before counters.
-        */
-       smp_rmb();
-       consumed_cur = atomic_long_read(&buf->consumed);
-       consumed_idx = subbuf_index(consumed, chan);
-       commit_count = v_read(config, &buf->commit_cold[consumed_idx].cc_sb);
-       /*
-        * Make sure we read the commit count before reading the buffer
-        * data and the write offset. Correct consumed offset ordering
-        * wrt commit count is insured by the use of cmpxchg to update
-        * the consumed offset.
-        * smp_call_function_single can fail if the remote CPU is offline,
-        * this is OK because then there is no wmb to execute there.
-        * If our thread is executing on the same CPU as the on the buffers
-        * belongs to, we don't have to synchronize it at all. If we are
-        * migrated, the scheduler will take care of the memory barriers.
-        * Normally, smp_call_function_single() should ensure program order when
-        * executing the remote function, which implies that it surrounds the
-        * function execution with :
-        * smp_mb()
-        * send IPI
-        * csd_lock_wait
-        *                recv IPI
-        *                smp_mb()
-        *                exec. function
-        *                smp_mb()
-        *                csd unlock
-        * smp_mb()
-        *
-        * However, smp_call_function_single() does not seem to clearly execute
-        * such barriers. It depends on spinlock semantic to provide the barrier
-        * before executing the IPI and, when busy-looping, csd_lock_wait only
-        * executes smp_mb() when it has to wait for the other CPU.
-        *
-        * I don't trust this code. Therefore, let's add the smp_mb() sequence
-        * required ourself, even if duplicated. It has no performance impact
-        * anyway.
-        *
-        * smp_mb() is needed because smp_rmb() and smp_wmb() only order read vs
-        * read and write vs write. They do not ensure core synchronization. We
-        * really have to ensure total order between the 3 barriers running on
-        * the 2 CPUs.
-        */
-       if (config->ipi == RING_BUFFER_IPI_BARRIER) {
-               if (config->sync == RING_BUFFER_SYNC_PER_CPU
-                   && config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
-                       if (raw_smp_processor_id() != buf->backend.cpu) {
-                               /* Total order with IPI handler smp_mb() */
-                               smp_mb();
-                               smp_call_function_single(buf->backend.cpu,
-                                                        remote_mb, NULL, 1);
-                               /* Total order with IPI handler smp_mb() */
-                               smp_mb();
-                       }
-               } else {
-                       /* Total order with IPI handler smp_mb() */
-                       smp_mb();
-                       smp_call_function(remote_mb, NULL, 1);
-                       /* Total order with IPI handler smp_mb() */
-                       smp_mb();
-               }
-       } else {
-               /*
-                * Local rmb to match the remote wmb to read the commit count
-                * before the buffer data and the write offset.
-                */
-               smp_rmb();
-       }
-
-       write_offset = v_read(config, &buf->offset);
-
-       /*
-        * Check that the buffer we are getting is after or at consumed_cur
-        * position.
-        */
-       if ((long) subbuf_trunc(consumed, chan)
-           - (long) subbuf_trunc(consumed_cur, chan) < 0)
-               goto nodata;
-
-       /*
-        * Check that the subbuffer we are trying to consume has been
-        * already fully committed.
-        */
-       if (((commit_count - chan->backend.subbuf_size)
-            & chan->commit_count_mask)
-           - (buf_trunc(consumed, chan)
-              >> chan->backend.num_subbuf_order)
-           != 0)
-               goto nodata;
-
-       /*
-        * Check that we are not about to read the same subbuffer in
-        * which the writer head is.
-        */
-       if (subbuf_trunc(write_offset, chan) - subbuf_trunc(consumed, chan)
-           == 0)
-               goto nodata;
-
-       /*
-        * Failure to get the subbuffer causes a busy-loop retry without going
-        * to a wait queue. These are caused by short-lived race windows where
-        * the writer is getting access to a subbuffer we were trying to get
-        * access to. Also checks that the "consumed" buffer count we are
-        * looking for matches the one contained in the subbuffer id.
-        */
-       ret = update_read_sb_index(config, &buf->backend, &chan->backend,
-                                  consumed_idx, buf_trunc_val(consumed, chan));
-       if (ret)
-               goto retry;
-       subbuffer_id_clear_noref(config, &buf->backend.buf_rsb.id);
-
-       buf->get_subbuf_consumed = consumed;
-       buf->get_subbuf = 1;
-
-       lib_ring_buffer_flush_read_subbuf_dcache(config, chan, buf);
-
-       return 0;
-
-nodata:
-       /*
-        * The memory barriers __wait_event()/wake_up_interruptible() take care
-        * of "raw_spin_is_locked" memory ordering.
-        */
-       if (finalized)
-               return -ENODATA;
-       else if (raw_spin_is_locked(&buf->raw_tick_nohz_spinlock))
-               goto retry;
-       else
-               return -EAGAIN;
-}
-EXPORT_SYMBOL_GPL(lib_ring_buffer_get_subbuf);
-
-/**
- * lib_ring_buffer_put_subbuf - release exclusive subbuffer access
- * @buf: ring buffer
- */
-void lib_ring_buffer_put_subbuf(struct lib_ring_buffer *buf)
-{
-       struct lib_ring_buffer_backend *bufb = &buf->backend;
-       struct channel *chan = bufb->chan;
-       const struct lib_ring_buffer_config *config = &chan->backend.config;
-       unsigned long read_sb_bindex, consumed_idx, consumed;
-
-       CHAN_WARN_ON(chan, atomic_long_read(&buf->active_readers) != 1);
-
-       if (!buf->get_subbuf) {
-               /*
-                * Reader puts a subbuffer it did not get.
-                */
-               CHAN_WARN_ON(chan, 1);
-               return;
-       }
-       consumed = buf->get_subbuf_consumed;
-       buf->get_subbuf = 0;
-
-       /*
-        * Clear the records_unread counter. (overruns counter)
-        * Can still be non-zero if a file reader simply grabbed the data
-        * without using iterators.
-        * Can be below zero if an iterator is used on a snapshot more than
-        * once.
-        */
-       read_sb_bindex = subbuffer_id_get_index(config, bufb->buf_rsb.id);
-       v_add(config, v_read(config,
-                            &bufb->array[read_sb_bindex]->records_unread),
-             &bufb->records_read);
-       v_set(config, &bufb->array[read_sb_bindex]->records_unread, 0);
-       CHAN_WARN_ON(chan, config->mode == RING_BUFFER_OVERWRITE
-                    && subbuffer_id_is_noref(config, bufb->buf_rsb.id));
-       subbuffer_id_set_noref(config, &bufb->buf_rsb.id);
-
-       /*
-        * Exchange the reader subbuffer with the one we put in its place in the
-        * writer subbuffer table. Expect the original consumed count. If
-        * update_read_sb_index fails, this is because the writer updated the
-        * subbuffer concurrently. We should therefore keep the subbuffer we
-        * currently have: it has become invalid to try reading this sub-buffer
-        * consumed count value anyway.
-        */
-       consumed_idx = subbuf_index(consumed, chan);
-       update_read_sb_index(config, &buf->backend, &chan->backend,
-                            consumed_idx, buf_trunc_val(consumed, chan));
-       /*
-        * update_read_sb_index return value ignored. Don't exchange sub-buffer
-        * if the writer concurrently updated it.
-        */
-}
-EXPORT_SYMBOL_GPL(lib_ring_buffer_put_subbuf);
-
-/*
- * cons_offset is an iterator on all subbuffer offsets between the reader
- * position and the writer position. (inclusive)
- */
-static
-void lib_ring_buffer_print_subbuffer_errors(struct lib_ring_buffer *buf,
-                                           struct channel *chan,
-                                           unsigned long cons_offset,
-                                           int cpu)
-{
-       const struct lib_ring_buffer_config *config = &chan->backend.config;
-       unsigned long cons_idx, commit_count, commit_count_sb;
-
-       cons_idx = subbuf_index(cons_offset, chan);
-       commit_count = v_read(config, &buf->commit_hot[cons_idx].cc);
-       commit_count_sb = v_read(config, &buf->commit_cold[cons_idx].cc_sb);
-
-       if (subbuf_offset(commit_count, chan) != 0)
-               printk(KERN_WARNING
-                      "ring buffer %s, cpu %d: "
-                      "commit count in subbuffer %lu,\n"
-                      "expecting multiples of %lu bytes\n"
-                      "  [ %lu bytes committed, %lu bytes reader-visible ]\n",
-                      chan->backend.name, cpu, cons_idx,
-                      chan->backend.subbuf_size,
-                      commit_count, commit_count_sb);
-
-       printk(KERN_DEBUG "ring buffer: %s, cpu %d: %lu bytes committed\n",
-              chan->backend.name, cpu, commit_count);
-}
-
-static
-void lib_ring_buffer_print_buffer_errors(struct lib_ring_buffer *buf,
-                                        struct channel *chan,
-                                        void *priv, int cpu)
-{
-       const struct lib_ring_buffer_config *config = &chan->backend.config;
-       unsigned long write_offset, cons_offset;
-
-       /*
-        * No need to order commit_count, write_offset and cons_offset reads
-        * because we execute at teardown when no more writer nor reader
-        * references are left.
-        */
-       write_offset = v_read(config, &buf->offset);
-       cons_offset = atomic_long_read(&buf->consumed);
-       if (write_offset != cons_offset)
-               printk(KERN_DEBUG
-                      "ring buffer %s, cpu %d: "
-                      "non-consumed data\n"
-                      "  [ %lu bytes written, %lu bytes read ]\n",
-                      chan->backend.name, cpu, write_offset, cons_offset);
-
-       for (cons_offset = atomic_long_read(&buf->consumed);
-            (long) (subbuf_trunc((unsigned long) v_read(config, &buf->offset),
-                                 chan)
-                    - cons_offset) > 0;
-            cons_offset = subbuf_align(cons_offset, chan))
-               lib_ring_buffer_print_subbuffer_errors(buf, chan, cons_offset,
-                                                      cpu);
-}
-
-#ifdef LTTNG_RING_BUFFER_COUNT_EVENTS
-static
-void lib_ring_buffer_print_records_count(struct channel *chan,
-                                        struct lib_ring_buffer *buf,
-                                        int cpu)
-{
-       const struct lib_ring_buffer_config *config = &chan->backend.config;
-
-       if (!strcmp(chan->backend.name, "relay-metadata")) {
-               printk(KERN_DEBUG "ring buffer %s: %lu records written, "
-                       "%lu records overrun\n",
-                       chan->backend.name,
-                       v_read(config, &buf->records_count),
-                       v_read(config, &buf->records_overrun));
-       } else {
-               printk(KERN_DEBUG "ring buffer %s, cpu %d: %lu records written, "
-                       "%lu records overrun\n",
-                       chan->backend.name, cpu,
-                       v_read(config, &buf->records_count),
-                       v_read(config, &buf->records_overrun));
-       }
-}
-#else
-static
-void lib_ring_buffer_print_records_count(struct channel *chan,
-                                        struct lib_ring_buffer *buf,
-                                        int cpu)
-{
-}
-#endif
-
-static
-void lib_ring_buffer_print_errors(struct channel *chan,
-                                 struct lib_ring_buffer *buf, int cpu)
-{
-       const struct lib_ring_buffer_config *config = &chan->backend.config;
-       void *priv = chan->backend.priv;
-
-       lib_ring_buffer_print_records_count(chan, buf, cpu);
-       if (strcmp(chan->backend.name, "relay-metadata")) {
-               if (v_read(config, &buf->records_lost_full)
-                   || v_read(config, &buf->records_lost_wrap)
-                   || v_read(config, &buf->records_lost_big))
-                       printk(KERN_WARNING
-                               "ring buffer %s, cpu %d: records were lost. Caused by:\n"
-                               "  [ %lu buffer full, %lu nest buffer wrap-around, "
-                               "%lu event too big ]\n",
-                               chan->backend.name, cpu,
-                               v_read(config, &buf->records_lost_full),
-                               v_read(config, &buf->records_lost_wrap),
-                               v_read(config, &buf->records_lost_big));
-       }
-       lib_ring_buffer_print_buffer_errors(buf, chan, priv, cpu);
-}
-
-/*
- * lib_ring_buffer_switch_old_start: Populate old subbuffer header.
- *
- * Only executed when the buffer is finalized, in SWITCH_FLUSH.
- */
-static
-void lib_ring_buffer_switch_old_start(struct lib_ring_buffer *buf,
-                                     struct channel *chan,
-                                     struct switch_offsets *offsets,
-                                     u64 tsc)
-{
-       const struct lib_ring_buffer_config *config = &chan->backend.config;
-       unsigned long oldidx = subbuf_index(offsets->old, chan);
-       unsigned long commit_count;
-       struct commit_counters_hot *cc_hot;
-
-       config->cb.buffer_begin(buf, tsc, oldidx);
-
-       /*
-        * Order all writes to buffer before the commit count update that will
-        * determine that the subbuffer is full.
-        */
-       if (config->ipi == RING_BUFFER_IPI_BARRIER) {
-               /*
-                * Must write slot data before incrementing commit count.  This
-                * compiler barrier is upgraded into a smp_mb() by the IPI sent
-                * by get_subbuf().
-                */
-               barrier();
-       } else
-               smp_wmb();
-       cc_hot = &buf->commit_hot[oldidx];
-       v_add(config, config->cb.subbuffer_header_size(), &cc_hot->cc);
-       commit_count = v_read(config, &cc_hot->cc);
-       /* Check if the written buffer has to be delivered */
-       lib_ring_buffer_check_deliver(config, buf, chan, offsets->old,
-                                     commit_count, oldidx, tsc);
-       lib_ring_buffer_write_commit_counter(config, buf, chan,
-                       offsets->old + config->cb.subbuffer_header_size(),
-                       commit_count, cc_hot);
-}
-
-/*
- * lib_ring_buffer_switch_old_end: switch old subbuffer
- *
- * Note : offset_old should never be 0 here. It is ok, because we never perform
- * buffer switch on an empty subbuffer in SWITCH_ACTIVE mode. The caller
- * increments the offset_old value when doing a SWITCH_FLUSH on an empty
- * subbuffer.
- */
-static
-void lib_ring_buffer_switch_old_end(struct lib_ring_buffer *buf,
-                                   struct channel *chan,
-                                   struct switch_offsets *offsets,
-                                   u64 tsc)
-{
-       const struct lib_ring_buffer_config *config = &chan->backend.config;
-       unsigned long oldidx = subbuf_index(offsets->old - 1, chan);
-       unsigned long commit_count, padding_size, data_size;
-       struct commit_counters_hot *cc_hot;
-       u64 *ts_end;
-
-       data_size = subbuf_offset(offsets->old - 1, chan) + 1;
-       padding_size = chan->backend.subbuf_size - data_size;
-       subbuffer_set_data_size(config, &buf->backend, oldidx, data_size);
-
-       ts_end = &buf->ts_end[oldidx];
-       /*
-        * This is the last space reservation in that sub-buffer before
-        * it gets delivered. This provides exclusive access to write to
-        * this sub-buffer's ts_end. There are also no concurrent
-        * readers of that ts_end because delivery of that sub-buffer is
-        * postponed until the commit counter is incremented for the
-        * current space reservation.
-        */
-       *ts_end = tsc;
-
-       /*
-        * Order all writes to buffer and store to ts_end before the commit
-        * count update that will determine that the subbuffer is full.
-        */
-       if (config->ipi == RING_BUFFER_IPI_BARRIER) {
-               /*
-                * Must write slot data before incrementing commit count.  This
-                * compiler barrier is upgraded into a smp_mb() by the IPI sent
-                * by get_subbuf().
-                */
-               barrier();
-       } else
-               smp_wmb();
-       cc_hot = &buf->commit_hot[oldidx];
-       v_add(config, padding_size, &cc_hot->cc);
-       commit_count = v_read(config, &cc_hot->cc);
-       lib_ring_buffer_check_deliver(config, buf, chan, offsets->old - 1,
-                                     commit_count, oldidx, tsc);
-       lib_ring_buffer_write_commit_counter(config, buf, chan,
-                       offsets->old + padding_size, commit_count,
-                       cc_hot);
-}
-
-/*
- * lib_ring_buffer_switch_new_start: Populate new subbuffer.
- *
- * This code can be executed unordered : writers may already have written to the
- * sub-buffer before this code gets executed, caution.  The commit makes sure
- * that this code is executed before the deliver of this sub-buffer.
- */
-static
-void lib_ring_buffer_switch_new_start(struct lib_ring_buffer *buf,
-                                     struct channel *chan,
-                                     struct switch_offsets *offsets,
-                                     u64 tsc)
-{
-       const struct lib_ring_buffer_config *config = &chan->backend.config;
-       unsigned long beginidx = subbuf_index(offsets->begin, chan);
-       unsigned long commit_count;
-       struct commit_counters_hot *cc_hot;
-
-       config->cb.buffer_begin(buf, tsc, beginidx);
-
-       /*
-        * Order all writes to buffer before the commit count update that will
-        * determine that the subbuffer is full.
-        */
-       if (config->ipi == RING_BUFFER_IPI_BARRIER) {
-               /*
-                * Must write slot data before incrementing commit count.  This
-                * compiler barrier is upgraded into a smp_mb() by the IPI sent
-                * by get_subbuf().
-                */
-               barrier();
-       } else
-               smp_wmb();
-       cc_hot = &buf->commit_hot[beginidx];
-       v_add(config, config->cb.subbuffer_header_size(), &cc_hot->cc);
-       commit_count = v_read(config, &cc_hot->cc);
-       /* Check if the written buffer has to be delivered */
-       lib_ring_buffer_check_deliver(config, buf, chan, offsets->begin,
-                                     commit_count, beginidx, tsc);
-       lib_ring_buffer_write_commit_counter(config, buf, chan,
-                       offsets->begin + config->cb.subbuffer_header_size(),
-                       commit_count, cc_hot);
-}
-
-/*
- * lib_ring_buffer_switch_new_end: finish switching current subbuffer
- *
- * Calls subbuffer_set_data_size() to set the data size of the current
- * sub-buffer. We do not need to perform check_deliver nor commit here,
- * since this task will be done by the "commit" of the event for which
- * we are currently doing the space reservation.
- */
-static
-void lib_ring_buffer_switch_new_end(struct lib_ring_buffer *buf,
-                                           struct channel *chan,
-                                           struct switch_offsets *offsets,
-                                           u64 tsc)
-{
-       const struct lib_ring_buffer_config *config = &chan->backend.config;
-       unsigned long endidx, data_size;
-       u64 *ts_end;
-
-       endidx = subbuf_index(offsets->end - 1, chan);
-       data_size = subbuf_offset(offsets->end - 1, chan) + 1;
-       subbuffer_set_data_size(config, &buf->backend, endidx, data_size);
-       ts_end = &buf->ts_end[endidx];
-       /*
-        * This is the last space reservation in that sub-buffer before
-        * it gets delivered. This provides exclusive access to write to
-        * this sub-buffer's ts_end. There are also no concurrent
-        * readers of that ts_end because delivery of that sub-buffer is
-        * postponed until the commit counter is incremented for the
-        * current space reservation.
-        */
-       *ts_end = tsc;
-}
-
-/*
- * Returns :
- * 0 if ok
- * !0 if execution must be aborted.
- */
-static
-int lib_ring_buffer_try_switch_slow(enum switch_mode mode,
-                                   struct lib_ring_buffer *buf,
-                                   struct channel *chan,
-                                   struct switch_offsets *offsets,
-                                   u64 *tsc)
-{
-       const struct lib_ring_buffer_config *config = &chan->backend.config;
-       unsigned long off, reserve_commit_diff;
-
-       offsets->begin = v_read(config, &buf->offset);
-       offsets->old = offsets->begin;
-       offsets->switch_old_start = 0;
-       off = subbuf_offset(offsets->begin, chan);
-
-       *tsc = config->cb.ring_buffer_clock_read(chan);
-
-       /*
-        * Ensure we flush the header of an empty subbuffer when doing the
-        * finalize (SWITCH_FLUSH). This ensures that we end up knowing the
-        * total data gathering duration even if there were no records saved
-        * after the last buffer switch.
-        * In SWITCH_ACTIVE mode, switch the buffer when it contains events.
-        * SWITCH_ACTIVE only flushes the current subbuffer, dealing with end of
-        * subbuffer header as appropriate.
-        * The next record that reserves space will be responsible for
-        * populating the following subbuffer header. We choose not to populate
-        * the next subbuffer header here because we want to be able to use
-        * SWITCH_ACTIVE for periodical buffer flush and CPU tick_nohz stop
-        * buffer flush, which must guarantee that all the buffer content
-        * (records and header timestamps) are visible to the reader. This is
-        * required for quiescence guarantees for the fusion merge.
-        */
-       if (mode != SWITCH_FLUSH && !off)
-               return -1;      /* we do not have to switch : buffer is empty */
-
-       if (unlikely(off == 0)) {
-               unsigned long sb_index, commit_count;
-
-               /*
-                * We are performing a SWITCH_FLUSH. At this stage, there are no
-                * concurrent writes into the buffer.
-                *
-                * The client does not save any header information.  Don't
-                * switch empty subbuffer on finalize, because it is invalid to
-                * deliver a completely empty subbuffer.
-                */
-               if (!config->cb.subbuffer_header_size())
-                       return -1;
-
-               /* Test new buffer integrity */
-               sb_index = subbuf_index(offsets->begin, chan);
-               commit_count = v_read(config,
-                               &buf->commit_cold[sb_index].cc_sb);
-               reserve_commit_diff =
-                 (buf_trunc(offsets->begin, chan)
-                  >> chan->backend.num_subbuf_order)
-                 - (commit_count & chan->commit_count_mask);
-               if (likely(reserve_commit_diff == 0)) {
-                       /* Next subbuffer not being written to. */
-                       if (unlikely(config->mode != RING_BUFFER_OVERWRITE &&
-                               subbuf_trunc(offsets->begin, chan)
-                                - subbuf_trunc((unsigned long)
-                                    atomic_long_read(&buf->consumed), chan)
-                               >= chan->backend.buf_size)) {
-                               /*
-                                * We do not overwrite non consumed buffers
-                                * and we are full : don't switch.
-                                */
-                               return -1;
-                       } else {
-                               /*
-                                * Next subbuffer not being written to, and we
-                                * are either in overwrite mode or the buffer is
-                                * not full. It's safe to write in this new
-                                * subbuffer.
-                                */
-                       }
-               } else {
-                       /*
-                        * Next subbuffer reserve offset does not match the
-                        * commit offset. Don't perform switch in
-                        * producer-consumer and overwrite mode.  Caused by
-                        * either a writer OOPS or too many nested writes over a
-                        * reserve/commit pair.
-                        */
-                       return -1;
-               }
-
-               /*
-                * Need to write the subbuffer start header on finalize.
-                */
-               offsets->switch_old_start = 1;
-       }
-       offsets->begin = subbuf_align(offsets->begin, chan);
-       /* Note: old points to the next subbuf at offset 0 */
-       offsets->end = offsets->begin;
-       return 0;
-}
-
-/*
- * Force a sub-buffer switch. This operation is completely reentrant : can be
- * called while tracing is active with absolutely no lock held.
- *
- * Note, however, that as a v_cmpxchg is used for some atomic
- * operations, this function must be called from the CPU which owns the buffer
- * for a ACTIVE flush.
- */
-void lib_ring_buffer_switch_slow(struct lib_ring_buffer *buf, enum switch_mode mode)
-{
-       struct channel *chan = buf->backend.chan;
-       const struct lib_ring_buffer_config *config = &chan->backend.config;
-       struct switch_offsets offsets;
-       unsigned long oldidx;
-       u64 tsc;
-
-       offsets.size = 0;
-
-       /*
-        * Perform retryable operations.
-        */
-       do {
-               if (lib_ring_buffer_try_switch_slow(mode, buf, chan, &offsets,
-                                                   &tsc))
-                       return; /* Switch not needed */
-       } while (v_cmpxchg(config, &buf->offset, offsets.old, offsets.end)
-                != offsets.old);
-
-       /*
-        * Atomically update last_tsc. This update races against concurrent
-        * atomic updates, but the race will always cause supplementary full TSC
-        * records, never the opposite (missing a full TSC record when it would
-        * be needed).
-        */
-       save_last_tsc(config, buf, tsc);
-
-       /*
-        * Push the reader if necessary
-        */
-       lib_ring_buffer_reserve_push_reader(buf, chan, offsets.old);
-
-       oldidx = subbuf_index(offsets.old, chan);
-       lib_ring_buffer_clear_noref(config, &buf->backend, oldidx);
-
-       /*
-        * May need to populate header start on SWITCH_FLUSH.
-        */
-       if (offsets.switch_old_start) {
-               lib_ring_buffer_switch_old_start(buf, chan, &offsets, tsc);
-               offsets.old += config->cb.subbuffer_header_size();
-       }
-
-       /*
-        * Switch old subbuffer.
-        */
-       lib_ring_buffer_switch_old_end(buf, chan, &offsets, tsc);
-}
-EXPORT_SYMBOL_GPL(lib_ring_buffer_switch_slow);
-
-struct switch_param {
-       struct lib_ring_buffer *buf;
-       enum switch_mode mode;
-};
-
-static void remote_switch(void *info)
-{
-       struct switch_param *param = info;
-       struct lib_ring_buffer *buf = param->buf;
-
-       lib_ring_buffer_switch_slow(buf, param->mode);
-}
-
-static void _lib_ring_buffer_switch_remote(struct lib_ring_buffer *buf,
-               enum switch_mode mode)
-{
-       struct channel *chan = buf->backend.chan;
-       const struct lib_ring_buffer_config *config = &chan->backend.config;
-       int ret;
-       struct switch_param param;
-
-       /*
-        * With global synchronization we don't need to use the IPI scheme.
-        */
-       if (config->sync == RING_BUFFER_SYNC_GLOBAL) {
-               lib_ring_buffer_switch_slow(buf, mode);
-               return;
-       }
-
-       /*
-        * Disabling preemption ensures two things: first, that the
-        * target cpu is not taken concurrently offline while we are within
-        * smp_call_function_single(). Secondly, if it happens that the
-        * CPU is not online, our own call to lib_ring_buffer_switch_slow()
-        * needs to be protected from CPU hotplug handlers, which can
-        * also perform a remote subbuffer switch.
-        */
-       preempt_disable();
-       param.buf = buf;
-       param.mode = mode;
-       ret = smp_call_function_single(buf->backend.cpu,
-                                remote_switch, &param, 1);
-       if (ret) {
-               /* Remote CPU is offline, do it ourself. */
-               lib_ring_buffer_switch_slow(buf, mode);
-       }
-       preempt_enable();
-}
-
-/* Switch sub-buffer if current sub-buffer is non-empty. */
-void lib_ring_buffer_switch_remote(struct lib_ring_buffer *buf)
-{
-       _lib_ring_buffer_switch_remote(buf, SWITCH_ACTIVE);
-}
-EXPORT_SYMBOL_GPL(lib_ring_buffer_switch_remote);
-
-/* Switch sub-buffer even if current sub-buffer is empty. */
-void lib_ring_buffer_switch_remote_empty(struct lib_ring_buffer *buf)
-{
-       _lib_ring_buffer_switch_remote(buf, SWITCH_FLUSH);
-}
-EXPORT_SYMBOL_GPL(lib_ring_buffer_switch_remote_empty);
-
-void lib_ring_buffer_clear(struct lib_ring_buffer *buf)
-{
-       struct lib_ring_buffer_backend *bufb = &buf->backend;
-       struct channel *chan = bufb->chan;
-
-       lib_ring_buffer_switch_remote(buf);
-       lib_ring_buffer_clear_reader(buf, chan);
-}
-EXPORT_SYMBOL_GPL(lib_ring_buffer_clear);
-
-/*
- * Returns :
- * 0 if ok
- * -ENOSPC if event size is too large for packet.
- * -ENOBUFS if there is currently not enough space in buffer for the event.
- * -EIO if data cannot be written into the buffer for any other reason.
- */
-static
-int lib_ring_buffer_try_reserve_slow(struct lib_ring_buffer *buf,
-                                    struct channel *chan,
-                                    struct switch_offsets *offsets,
-                                    struct lib_ring_buffer_ctx *ctx,
-                                    void *client_ctx)
-{
-       const struct lib_ring_buffer_config *config = &chan->backend.config;
-       unsigned long reserve_commit_diff, offset_cmp;
-
-retry:
-       offsets->begin = offset_cmp = v_read(config, &buf->offset);
-       offsets->old = offsets->begin;
-       offsets->switch_new_start = 0;
-       offsets->switch_new_end = 0;
-       offsets->switch_old_end = 0;
-       offsets->pre_header_padding = 0;
-
-       ctx->tsc = config->cb.ring_buffer_clock_read(chan);
-       if ((int64_t) ctx->tsc == -EIO)
-               return -EIO;
-
-       if (last_tsc_overflow(config, buf, ctx->tsc))
-               ctx->rflags |= RING_BUFFER_RFLAG_FULL_TSC;
-
-       if (unlikely(subbuf_offset(offsets->begin, ctx->chan) == 0)) {
-               offsets->switch_new_start = 1;          /* For offsets->begin */
-       } else {
-               offsets->size = config->cb.record_header_size(config, chan,
-                                               offsets->begin,
-                                               &offsets->pre_header_padding,
-                                               ctx, client_ctx);
-               offsets->size +=
-                       lib_ring_buffer_align(offsets->begin + offsets->size,
-                                             ctx->largest_align)
-                       + ctx->data_size;
-               if (unlikely(subbuf_offset(offsets->begin, chan) +
-                            offsets->size > chan->backend.subbuf_size)) {
-                       offsets->switch_old_end = 1;    /* For offsets->old */
-                       offsets->switch_new_start = 1;  /* For offsets->begin */
-               }
-       }
-       if (unlikely(offsets->switch_new_start)) {
-               unsigned long sb_index, commit_count;
-
-               /*
-                * We are typically not filling the previous buffer completely.
-                */
-               if (likely(offsets->switch_old_end))
-                       offsets->begin = subbuf_align(offsets->begin, chan);
-               offsets->begin = offsets->begin
-                                + config->cb.subbuffer_header_size();
-               /* Test new buffer integrity */
-               sb_index = subbuf_index(offsets->begin, chan);
-               /*
-                * Read buf->offset before buf->commit_cold[sb_index].cc_sb.
-                * lib_ring_buffer_check_deliver() has the matching
-                * memory barriers required around commit_cold cc_sb
-                * updates to ensure reserve and commit counter updates
-                * are not seen reordered when updated by another CPU.
-                */
-               smp_rmb();
-               commit_count = v_read(config,
-                               &buf->commit_cold[sb_index].cc_sb);
-               /* Read buf->commit_cold[sb_index].cc_sb before buf->offset. */
-               smp_rmb();
-               if (unlikely(offset_cmp != v_read(config, &buf->offset))) {
-                       /*
-                        * The reserve counter have been concurrently updated
-                        * while we read the commit counter. This means the
-                        * commit counter we read might not match buf->offset
-                        * due to concurrent update. We therefore need to retry.
-                        */
-                       goto retry;
-               }
-               reserve_commit_diff =
-                 (buf_trunc(offsets->begin, chan)
-                  >> chan->backend.num_subbuf_order)
-                 - (commit_count & chan->commit_count_mask);
-               if (likely(reserve_commit_diff == 0)) {
-                       /* Next subbuffer not being written to. */
-                       if (unlikely(config->mode != RING_BUFFER_OVERWRITE &&
-                               subbuf_trunc(offsets->begin, chan)
-                                - subbuf_trunc((unsigned long)
-                                    atomic_long_read(&buf->consumed), chan)
-                               >= chan->backend.buf_size)) {
-                               /*
-                                * We do not overwrite non consumed buffers
-                                * and we are full : record is lost.
-                                */
-                               v_inc(config, &buf->records_lost_full);
-                               return -ENOBUFS;
-                       } else {
-                               /*
-                                * Next subbuffer not being written to, and we
-                                * are either in overwrite mode or the buffer is
-                                * not full. It's safe to write in this new
-                                * subbuffer.
-                                */
-                       }
-               } else {
-                       /*
-                        * Next subbuffer reserve offset does not match the
-                        * commit offset, and this did not involve update to the
-                        * reserve counter. Drop record in producer-consumer and
-                        * overwrite mode.  Caused by either a writer OOPS or
-                        * too many nested writes over a reserve/commit pair.
-                        */
-                       v_inc(config, &buf->records_lost_wrap);
-                       return -EIO;
-               }
-               offsets->size =
-                       config->cb.record_header_size(config, chan,
-                                               offsets->begin,
-                                               &offsets->pre_header_padding,
-                                               ctx, client_ctx);
-               offsets->size +=
-                       lib_ring_buffer_align(offsets->begin + offsets->size,
-                                             ctx->largest_align)
-                       + ctx->data_size;
-               if (unlikely(subbuf_offset(offsets->begin, chan)
-                            + offsets->size > chan->backend.subbuf_size)) {
-                       /*
-                        * Record too big for subbuffers, report error, don't
-                        * complete the sub-buffer switch.
-                        */
-                       v_inc(config, &buf->records_lost_big);
-                       return -ENOSPC;
-               } else {
-                       /*
-                        * We just made a successful buffer switch and the
-                        * record fits in the new subbuffer. Let's write.
-                        */
-               }
-       } else {
-               /*
-                * Record fits in the current buffer and we are not on a switch
-                * boundary. It's safe to write.
-                */
-       }
-       offsets->end = offsets->begin + offsets->size;
-
-       if (unlikely(subbuf_offset(offsets->end, chan) == 0)) {
-               /*
-                * The offset_end will fall at the very beginning of the next
-                * subbuffer.
-                */
-               offsets->switch_new_end = 1;    /* For offsets->begin */
-       }
-       return 0;
-}
-
-static struct lib_ring_buffer *get_current_buf(struct channel *chan, int cpu)
-{
-       const struct lib_ring_buffer_config *config = &chan->backend.config;
-
-       if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
-               return per_cpu_ptr(chan->backend.buf, cpu);
-       else
-               return chan->backend.buf;
-}
-
-void lib_ring_buffer_lost_event_too_big(struct channel *chan)
-{
-       const struct lib_ring_buffer_config *config = &chan->backend.config;
-       struct lib_ring_buffer *buf = get_current_buf(chan, smp_processor_id());
-
-       v_inc(config, &buf->records_lost_big);
-}
-EXPORT_SYMBOL_GPL(lib_ring_buffer_lost_event_too_big);
-
-/**
- * lib_ring_buffer_reserve_slow - Atomic slot reservation in a buffer.
- * @ctx: ring buffer context.
- *
- * Return : -NOBUFS if not enough space, -ENOSPC if event size too large,
- * -EIO for other errors, else returns 0.
- * It will take care of sub-buffer switching.
- */
-int lib_ring_buffer_reserve_slow(struct lib_ring_buffer_ctx *ctx,
-               void *client_ctx)
-{
-       struct channel *chan = ctx->chan;
-       const struct lib_ring_buffer_config *config = &chan->backend.config;
-       struct lib_ring_buffer *buf;
-       struct switch_offsets offsets;
-       int ret;
-
-       ctx->buf = buf = get_current_buf(chan, ctx->cpu);
-       offsets.size = 0;
-
-       do {
-               ret = lib_ring_buffer_try_reserve_slow(buf, chan, &offsets,
-                                                      ctx, client_ctx);
-               if (unlikely(ret))
-                       return ret;
-       } while (unlikely(v_cmpxchg(config, &buf->offset, offsets.old,
-                                   offsets.end)
-                         != offsets.old));
-
-       /*
-        * Atomically update last_tsc. This update races against concurrent
-        * atomic updates, but the race will always cause supplementary full TSC
-        * records, never the opposite (missing a full TSC record when it would
-        * be needed).
-        */
-       save_last_tsc(config, buf, ctx->tsc);
-
-       /*
-        * Push the reader if necessary
-        */
-       lib_ring_buffer_reserve_push_reader(buf, chan, offsets.end - 1);
-
-       /*
-        * Clear noref flag for this subbuffer.
-        */
-       lib_ring_buffer_clear_noref(config, &buf->backend,
-                                   subbuf_index(offsets.end - 1, chan));
-
-       /*
-        * Switch old subbuffer if needed.
-        */
-       if (unlikely(offsets.switch_old_end)) {
-               lib_ring_buffer_clear_noref(config, &buf->backend,
-                                           subbuf_index(offsets.old - 1, chan));
-               lib_ring_buffer_switch_old_end(buf, chan, &offsets, ctx->tsc);
-       }
-
-       /*
-        * Populate new subbuffer.
-        */
-       if (unlikely(offsets.switch_new_start))
-               lib_ring_buffer_switch_new_start(buf, chan, &offsets, ctx->tsc);
-
-       if (unlikely(offsets.switch_new_end))
-               lib_ring_buffer_switch_new_end(buf, chan, &offsets, ctx->tsc);
-
-       ctx->slot_size = offsets.size;
-       ctx->pre_offset = offsets.begin;
-       ctx->buf_offset = offsets.begin + offsets.pre_header_padding;
-       return 0;
-}
-EXPORT_SYMBOL_GPL(lib_ring_buffer_reserve_slow);
-
-static
-void lib_ring_buffer_vmcore_check_deliver(const struct lib_ring_buffer_config *config,
-                                         struct lib_ring_buffer *buf,
-                                         unsigned long commit_count,
-                                         unsigned long idx)
-{
-       if (config->oops == RING_BUFFER_OOPS_CONSISTENCY)
-               v_set(config, &buf->commit_hot[idx].seq, commit_count);
-}
-
-/*
- * The ring buffer can count events recorded and overwritten per buffer,
- * but it is disabled by default due to its performance overhead.
- */
-#ifdef LTTNG_RING_BUFFER_COUNT_EVENTS
-static
-void deliver_count_events(const struct lib_ring_buffer_config *config,
-               struct lib_ring_buffer *buf,
-               unsigned long idx)
-{
-       v_add(config, subbuffer_get_records_count(config,
-                       &buf->backend, idx),
-               &buf->records_count);
-       v_add(config, subbuffer_count_records_overrun(config,
-                       &buf->backend, idx),
-               &buf->records_overrun);
-}
-#else /* LTTNG_RING_BUFFER_COUNT_EVENTS */
-static
-void deliver_count_events(const struct lib_ring_buffer_config *config,
-               struct lib_ring_buffer *buf,
-               unsigned long idx)
-{
-}
-#endif /* #else LTTNG_RING_BUFFER_COUNT_EVENTS */
-
-
-void lib_ring_buffer_check_deliver_slow(const struct lib_ring_buffer_config *config,
-                                  struct lib_ring_buffer *buf,
-                                  struct channel *chan,
-                                  unsigned long offset,
-                                  unsigned long commit_count,
-                                  unsigned long idx,
-                                  u64 tsc)
-{
-       unsigned long old_commit_count = commit_count
-                                        - chan->backend.subbuf_size;
-
-       /*
-        * If we succeeded at updating cc_sb below, we are the subbuffer
-        * writer delivering the subbuffer. Deals with concurrent
-        * updates of the "cc" value without adding a add_return atomic
-        * operation to the fast path.
-        *
-        * We are doing the delivery in two steps:
-        * - First, we cmpxchg() cc_sb to the new value
-        *   old_commit_count + 1. This ensures that we are the only
-        *   subbuffer user successfully filling the subbuffer, but we
-        *   do _not_ set the cc_sb value to "commit_count" yet.
-        *   Therefore, other writers that would wrap around the ring
-        *   buffer and try to start writing to our subbuffer would
-        *   have to drop records, because it would appear as
-        *   non-filled.
-        *   We therefore have exclusive access to the subbuffer control
-        *   structures.  This mutual exclusion with other writers is
-        *   crucially important to perform record overruns count in
-        *   flight recorder mode locklessly.
-        * - When we are ready to release the subbuffer (either for
-        *   reading or for overrun by other writers), we simply set the
-        *   cc_sb value to "commit_count" and perform delivery.
-        *
-        * The subbuffer size is least 2 bytes (minimum size: 1 page).
-        * This guarantees that old_commit_count + 1 != commit_count.
-        */
-
-       /*
-        * Order prior updates to reserve count prior to the
-        * commit_cold cc_sb update.
-        */
-       smp_wmb();
-       if (likely(v_cmpxchg(config, &buf->commit_cold[idx].cc_sb,
-                                old_commit_count, old_commit_count + 1)
-                  == old_commit_count)) {
-               u64 *ts_end;
-
-               /*
-                * Start of exclusive subbuffer access. We are
-                * guaranteed to be the last writer in this subbuffer
-                * and any other writer trying to access this subbuffer
-                * in this state is required to drop records.
-                *
-                * We can read the ts_end for the current sub-buffer
-                * which has been saved by the very last space
-                * reservation for the current sub-buffer.
-                *
-                * Order increment of commit counter before reading ts_end.
-                */
-               smp_mb();
-               ts_end = &buf->ts_end[idx];
-               deliver_count_events(config, buf, idx);
-               config->cb.buffer_end(buf, *ts_end, idx,
-                                     lib_ring_buffer_get_data_size(config,
-                                                               buf,
-                                                               idx));
-
-               /*
-                * Increment the packet counter while we have exclusive
-                * access.
-                */
-               subbuffer_inc_packet_count(config, &buf->backend, idx);
-
-               /*
-                * Set noref flag and offset for this subbuffer id.
-                * Contains a memory barrier that ensures counter stores
-                * are ordered before set noref and offset.
-                */
-               lib_ring_buffer_set_noref_offset(config, &buf->backend, idx,
-                                                buf_trunc_val(offset, chan));
-
-               /*
-                * Order set_noref and record counter updates before the
-                * end of subbuffer exclusive access. Orders with
-                * respect to writers coming into the subbuffer after
-                * wrap around, and also order wrt concurrent readers.
-                */
-               smp_mb();
-               /* End of exclusive subbuffer access */
-               v_set(config, &buf->commit_cold[idx].cc_sb,
-                     commit_count);
-               /*
-                * Order later updates to reserve count after
-                * the commit_cold cc_sb update.
-                */
-               smp_wmb();
-               lib_ring_buffer_vmcore_check_deliver(config, buf,
-                                                commit_count, idx);
-
-               /*
-                * RING_BUFFER_WAKEUP_BY_WRITER wakeup is not lock-free.
-                */
-               if (config->wakeup == RING_BUFFER_WAKEUP_BY_WRITER
-                   && atomic_long_read(&buf->active_readers)
-                   && lib_ring_buffer_poll_deliver(config, buf, chan)) {
-                       wake_up_interruptible(&buf->read_wait);
-                       wake_up_interruptible(&chan->read_wait);
-               }
-
-       }
-}
-EXPORT_SYMBOL_GPL(lib_ring_buffer_check_deliver_slow);
-
-int __init init_lib_ring_buffer_frontend(void)
-{
-       int cpu;
-
-       for_each_possible_cpu(cpu)
-               spin_lock_init(&per_cpu(ring_buffer_nohz_lock, cpu));
-       return 0;
-}
-
-module_init(init_lib_ring_buffer_frontend);
-
-void __exit exit_lib_ring_buffer_frontend(void)
-{
-}
-
-module_exit(exit_lib_ring_buffer_frontend);
diff --git a/lib/ringbuffer/ring_buffer_iterator.c b/lib/ringbuffer/ring_buffer_iterator.c
deleted file mode 100644 (file)
index 15d7c75..0000000
+++ /dev/null
@@ -1,841 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only OR LGPL-2.1-only)
- *
- * ring_buffer_iterator.c
- *
- * Ring buffer and channel iterators. Get each event of a channel in order. Uses
- * a prio heap for per-cpu buffers, giving a O(log(NR_CPUS)) algorithmic
- * complexity for the "get next event" operation.
- *
- * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#include <ringbuffer/iterator.h>
-#include <wrapper/file.h>
-#include <wrapper/uaccess.h>
-#include <linux/jiffies.h>
-#include <linux/delay.h>
-#include <linux/module.h>
-
-/*
- * Safety factor taking into account internal kernel interrupt latency.
- * Assuming 250ms worse-case latency.
- */
-#define MAX_SYSTEM_LATENCY     250
-
-/*
- * Maximum delta expected between trace clocks. At most 1 jiffy delta.
- */
-#define MAX_CLOCK_DELTA                (jiffies_to_usecs(1) * 1000)
-
-/**
- * lib_ring_buffer_get_next_record - Get the next record in a buffer.
- * @chan: channel
- * @buf: buffer
- *
- * Returns the size of the event read, -EAGAIN if buffer is empty, -ENODATA if
- * buffer is empty and finalized. The buffer must already be opened for reading.
- */
-ssize_t lib_ring_buffer_get_next_record(struct channel *chan,
-                                       struct lib_ring_buffer *buf)
-{
-       const struct lib_ring_buffer_config *config = &chan->backend.config;
-       struct lib_ring_buffer_iter *iter = &buf->iter;
-       int ret;
-
-restart:
-       switch (iter->state) {
-       case ITER_GET_SUBBUF:
-               ret = lib_ring_buffer_get_next_subbuf(buf);
-               if (ret && !READ_ONCE(buf->finalized)
-                   && config->alloc == RING_BUFFER_ALLOC_GLOBAL) {
-                       /*
-                        * Use "pull" scheme for global buffers. The reader
-                        * itself flushes the buffer to "pull" data not visible
-                        * to readers yet. Flush current subbuffer and re-try.
-                        *
-                        * Per-CPU buffers rather use a "push" scheme because
-                        * the IPI needed to flush all CPU's buffers is too
-                        * costly. In the "push" scheme, the reader waits for
-                        * the writer periodic timer to flush the
-                        * buffers (keeping track of a quiescent state
-                        * timestamp). Therefore, the writer "pushes" data out
-                        * of the buffers rather than letting the reader "pull"
-                        * data from the buffer.
-                        */
-                       lib_ring_buffer_switch_slow(buf, SWITCH_ACTIVE);
-                       ret = lib_ring_buffer_get_next_subbuf(buf);
-               }
-               if (ret)
-                       return ret;
-               iter->consumed = buf->cons_snapshot;
-               iter->data_size = lib_ring_buffer_get_read_data_size(config, buf);
-               iter->read_offset = iter->consumed;
-               /* skip header */
-               iter->read_offset += config->cb.subbuffer_header_size();
-               iter->state = ITER_TEST_RECORD;
-               goto restart;
-       case ITER_TEST_RECORD:
-               if (iter->read_offset - iter->consumed >= iter->data_size) {
-                       iter->state = ITER_PUT_SUBBUF;
-               } else {
-                       CHAN_WARN_ON(chan, !config->cb.record_get);
-                       config->cb.record_get(config, chan, buf,
-                                             iter->read_offset,
-                                             &iter->header_len,
-                                             &iter->payload_len,
-                                             &iter->timestamp);
-                       iter->read_offset += iter->header_len;
-                       subbuffer_consume_record(config, &buf->backend);
-                       iter->state = ITER_NEXT_RECORD;
-                       return iter->payload_len;
-               }
-               goto restart;
-       case ITER_NEXT_RECORD:
-               iter->read_offset += iter->payload_len;
-               iter->state = ITER_TEST_RECORD;
-               goto restart;
-       case ITER_PUT_SUBBUF:
-               lib_ring_buffer_put_next_subbuf(buf);
-               iter->state = ITER_GET_SUBBUF;
-               goto restart;
-       default:
-               CHAN_WARN_ON(chan, 1);  /* Should not happen */
-               return -EPERM;
-       }
-}
-EXPORT_SYMBOL_GPL(lib_ring_buffer_get_next_record);
-
-static int buf_is_higher(void *a, void *b)
-{
-       struct lib_ring_buffer *bufa = a;
-       struct lib_ring_buffer *bufb = b;
-
-       /* Consider lowest timestamps to be at the top of the heap */
-       return (bufa->iter.timestamp < bufb->iter.timestamp);
-}
-
-static
-void lib_ring_buffer_get_empty_buf_records(const struct lib_ring_buffer_config *config,
-                                          struct channel *chan)
-{
-       struct lttng_ptr_heap *heap = &chan->iter.heap;
-       struct lib_ring_buffer *buf, *tmp;
-       ssize_t len;
-
-       list_for_each_entry_safe(buf, tmp, &chan->iter.empty_head,
-                                iter.empty_node) {
-               len = lib_ring_buffer_get_next_record(chan, buf);
-
-               /*
-                * Deal with -EAGAIN and -ENODATA.
-                * len >= 0 means record contains data.
-                * -EBUSY should never happen, because we support only one
-                * reader.
-                */
-               switch (len) {
-               case -EAGAIN:
-                       /* Keep node in empty list */
-                       break;
-               case -ENODATA:
-                       /*
-                        * Buffer is finalized. Don't add to list of empty
-                        * buffer, because it has no more data to provide, ever.
-                        */
-                       list_del(&buf->iter.empty_node);
-                       break;
-               case -EBUSY:
-                       CHAN_WARN_ON(chan, 1);
-                       break;
-               default:
-                       /*
-                        * Insert buffer into the heap, remove from empty buffer
-                        * list.
-                        */
-                       CHAN_WARN_ON(chan, len < 0);
-                       list_del(&buf->iter.empty_node);
-                       CHAN_WARN_ON(chan, lttng_heap_insert(heap, buf));
-               }
-       }
-}
-
-static
-void lib_ring_buffer_wait_for_qs(const struct lib_ring_buffer_config *config,
-                                struct channel *chan)
-{
-       u64 timestamp_qs;
-       unsigned long wait_msecs;
-
-       /*
-        * No need to wait if no empty buffers are present.
-        */
-       if (list_empty(&chan->iter.empty_head))
-               return;
-
-       timestamp_qs = config->cb.ring_buffer_clock_read(chan);
-       /*
-        * We need to consider previously empty buffers.
-        * Do a get next buf record on each of them. Add them to
-        * the heap if they have data. If at least one of them
-        * don't have data, we need to wait for
-        * switch_timer_interval + MAX_SYSTEM_LATENCY (so we are sure the
-        * buffers have been switched either by the timer or idle entry) and
-        * check them again, adding them if they have data.
-        */
-       lib_ring_buffer_get_empty_buf_records(config, chan);
-
-       /*
-        * No need to wait if no empty buffers are present.
-        */
-       if (list_empty(&chan->iter.empty_head))
-               return;
-
-       /*
-        * We need to wait for the buffer switch timer to run. If the
-        * CPU is idle, idle entry performed the switch.
-        * TODO: we could optimize further by skipping the sleep if all
-        * empty buffers belong to idle or offline cpus.
-        */
-       wait_msecs = jiffies_to_msecs(chan->switch_timer_interval);
-       wait_msecs += MAX_SYSTEM_LATENCY;
-       msleep(wait_msecs);
-       lib_ring_buffer_get_empty_buf_records(config, chan);
-       /*
-        * Any buffer still in the empty list here cannot possibly
-        * contain an event with a timestamp prior to "timestamp_qs".
-        * The new quiescent state timestamp is the one we grabbed
-        * before waiting for buffer data.  It is therefore safe to
-        * ignore empty buffers up to last_qs timestamp for fusion
-        * merge.
-        */
-       chan->iter.last_qs = timestamp_qs;
-}
-
-/**
- * channel_get_next_record - Get the next record in a channel.
- * @chan: channel
- * @ret_buf: the buffer in which the event is located (output)
- *
- * Returns the size of new current event, -EAGAIN if all buffers are empty,
- * -ENODATA if all buffers are empty and finalized. The channel must already be
- * opened for reading.
- */
-
-ssize_t channel_get_next_record(struct channel *chan,
-                               struct lib_ring_buffer **ret_buf)
-{
-       const struct lib_ring_buffer_config *config = &chan->backend.config;
-       struct lib_ring_buffer *buf;
-       struct lttng_ptr_heap *heap;
-       ssize_t len;
-
-       if (config->alloc == RING_BUFFER_ALLOC_GLOBAL) {
-               *ret_buf = channel_get_ring_buffer(config, chan, 0);
-               return lib_ring_buffer_get_next_record(chan, *ret_buf);
-       }
-
-       heap = &chan->iter.heap;
-
-       /*
-        * get next record for topmost buffer.
-        */
-       buf = lttng_heap_maximum(heap);
-       if (buf) {
-               len = lib_ring_buffer_get_next_record(chan, buf);
-               /*
-                * Deal with -EAGAIN and -ENODATA.
-                * len >= 0 means record contains data.
-                */
-               switch (len) {
-               case -EAGAIN:
-                       buf->iter.timestamp = 0;
-                       list_add(&buf->iter.empty_node, &chan->iter.empty_head);
-                       /* Remove topmost buffer from the heap */
-                       CHAN_WARN_ON(chan, lttng_heap_remove(heap) != buf);
-                       break;
-               case -ENODATA:
-                       /*
-                        * Buffer is finalized. Remove buffer from heap and
-                        * don't add to list of empty buffer, because it has no
-                        * more data to provide, ever.
-                        */
-                       CHAN_WARN_ON(chan, lttng_heap_remove(heap) != buf);
-                       break;
-               case -EBUSY:
-                       CHAN_WARN_ON(chan, 1);
-                       break;
-               default:
-                       /*
-                        * Reinsert buffer into the heap. Note that heap can be
-                        * partially empty, so we need to use
-                        * lttng_heap_replace_max().
-                        */
-                       CHAN_WARN_ON(chan, len < 0);
-                       CHAN_WARN_ON(chan, lttng_heap_replace_max(heap, buf) != buf);
-                       break;
-               }
-       }
-
-       buf = lttng_heap_maximum(heap);
-       if (!buf || buf->iter.timestamp > chan->iter.last_qs) {
-               /*
-                * Deal with buffers previously showing no data.
-                * Add buffers containing data to the heap, update
-                * last_qs.
-                */
-               lib_ring_buffer_wait_for_qs(config, chan);
-       }
-
-       *ret_buf = buf = lttng_heap_maximum(heap);
-       if (buf) {
-               /*
-                * If this warning triggers, you probably need to check your
-                * system interrupt latency. Typical causes: too many printk()
-                * output going to a serial console with interrupts off.
-                * Allow for MAX_CLOCK_DELTA ns timestamp delta going backward.
-                * Observed on SMP KVM setups with trace_clock().
-                */
-               if (chan->iter.last_timestamp
-                   > (buf->iter.timestamp + MAX_CLOCK_DELTA)) {
-                       printk(KERN_WARNING "ring_buffer: timestamps going "
-                              "backward. Last time %llu ns, cpu %d, "
-                              "current time %llu ns, cpu %d, "
-                              "delta %llu ns.\n",
-                              chan->iter.last_timestamp, chan->iter.last_cpu,
-                              buf->iter.timestamp, buf->backend.cpu,
-                              chan->iter.last_timestamp - buf->iter.timestamp);
-                       CHAN_WARN_ON(chan, 1);
-               }
-               chan->iter.last_timestamp = buf->iter.timestamp;
-               chan->iter.last_cpu = buf->backend.cpu;
-               return buf->iter.payload_len;
-       } else {
-               /* Heap is empty */
-               if (list_empty(&chan->iter.empty_head))
-                       return -ENODATA;        /* All buffers finalized */
-               else
-                       return -EAGAIN;         /* Temporarily empty */
-       }
-}
-EXPORT_SYMBOL_GPL(channel_get_next_record);
-
-static
-void lib_ring_buffer_iterator_init(struct channel *chan, struct lib_ring_buffer *buf)
-{
-       if (buf->iter.allocated)
-               return;
-
-       buf->iter.allocated = 1;
-       if (chan->iter.read_open && !buf->iter.read_open) {
-               CHAN_WARN_ON(chan, lib_ring_buffer_open_read(buf) != 0);
-               buf->iter.read_open = 1;
-       }
-
-       /* Add to list of buffers without any current record */
-       if (chan->backend.config.alloc == RING_BUFFER_ALLOC_PER_CPU)
-               list_add(&buf->iter.empty_node, &chan->iter.empty_head);
-}
-
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
-
-int lttng_cpuhp_rb_iter_online(unsigned int cpu,
-               struct lttng_cpuhp_node *node)
-{
-       struct channel *chan = container_of(node, struct channel,
-                                           cpuhp_iter_online);
-       struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf, cpu);
-       const struct lib_ring_buffer_config *config = &chan->backend.config;
-
-       CHAN_WARN_ON(chan, config->alloc == RING_BUFFER_ALLOC_GLOBAL);
-
-       lib_ring_buffer_iterator_init(chan, buf);
-       return 0;
-}
-EXPORT_SYMBOL_GPL(lttng_cpuhp_rb_iter_online);
-
-#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
-
-#ifdef CONFIG_HOTPLUG_CPU
-static
-int channel_iterator_cpu_hotplug(struct notifier_block *nb,
-                                          unsigned long action,
-                                          void *hcpu)
-{
-       unsigned int cpu = (unsigned long)hcpu;
-       struct channel *chan = container_of(nb, struct channel,
-                                           hp_iter_notifier);
-       struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf, cpu);
-       const struct lib_ring_buffer_config *config = &chan->backend.config;
-
-       if (!chan->hp_iter_enable)
-               return NOTIFY_DONE;
-
-       CHAN_WARN_ON(chan, config->alloc == RING_BUFFER_ALLOC_GLOBAL);
-
-       switch (action) {
-       case CPU_DOWN_FAILED:
-       case CPU_DOWN_FAILED_FROZEN:
-       case CPU_ONLINE:
-       case CPU_ONLINE_FROZEN:
-               lib_ring_buffer_iterator_init(chan, buf);
-               return NOTIFY_OK;
-       default:
-               return NOTIFY_DONE;
-       }
-}
-#endif
-
-#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
-
-int channel_iterator_init(struct channel *chan)
-{
-       const struct lib_ring_buffer_config *config = &chan->backend.config;
-       struct lib_ring_buffer *buf;
-
-       if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
-               int ret;
-
-               INIT_LIST_HEAD(&chan->iter.empty_head);
-               ret = lttng_heap_init(&chan->iter.heap,
-                               num_possible_cpus(),
-                               GFP_KERNEL, buf_is_higher);
-               if (ret)
-                       return ret;
-
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
-               chan->cpuhp_iter_online.component = LTTNG_RING_BUFFER_ITER;
-               ret = cpuhp_state_add_instance(lttng_rb_hp_online,
-                       &chan->cpuhp_iter_online.node);
-               if (ret)
-                       return ret;
-#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
-               {
-                       int cpu;
-
-                       /*
-                        * In case of non-hotplug cpu, if the ring-buffer is allocated
-                        * in early initcall, it will not be notified of secondary cpus.
-                        * In that off case, we need to allocate for all possible cpus.
-                        */
-#ifdef CONFIG_HOTPLUG_CPU
-                       chan->hp_iter_notifier.notifier_call =
-                               channel_iterator_cpu_hotplug;
-                       chan->hp_iter_notifier.priority = 10;
-                       register_cpu_notifier(&chan->hp_iter_notifier);
-
-                       get_online_cpus();
-                       for_each_online_cpu(cpu) {
-                               buf = per_cpu_ptr(chan->backend.buf, cpu);
-                               lib_ring_buffer_iterator_init(chan, buf);
-                       }
-                       chan->hp_iter_enable = 1;
-                       put_online_cpus();
-#else
-                       for_each_possible_cpu(cpu) {
-                               buf = per_cpu_ptr(chan->backend.buf, cpu);
-                               lib_ring_buffer_iterator_init(chan, buf);
-                       }
-#endif
-               }
-#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
-       } else {
-               buf = channel_get_ring_buffer(config, chan, 0);
-               lib_ring_buffer_iterator_init(chan, buf);
-       }
-       return 0;
-}
-
-void channel_iterator_unregister_notifiers(struct channel *chan)
-{
-       const struct lib_ring_buffer_config *config = &chan->backend.config;
-
-       if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
-               {
-                       int ret;
-
-                       ret = cpuhp_state_remove_instance(lttng_rb_hp_online,
-                               &chan->cpuhp_iter_online.node);
-                       WARN_ON(ret);
-               }
-#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
-               chan->hp_iter_enable = 0;
-               unregister_cpu_notifier(&chan->hp_iter_notifier);
-#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
-       }
-}
-
-void channel_iterator_free(struct channel *chan)
-{
-       const struct lib_ring_buffer_config *config = &chan->backend.config;
-
-       if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
-               lttng_heap_free(&chan->iter.heap);
-}
-
-int lib_ring_buffer_iterator_open(struct lib_ring_buffer *buf)
-{
-       struct channel *chan = buf->backend.chan;
-       const struct lib_ring_buffer_config *config = &chan->backend.config;
-       CHAN_WARN_ON(chan, config->output != RING_BUFFER_ITERATOR);
-       return lib_ring_buffer_open_read(buf);
-}
-EXPORT_SYMBOL_GPL(lib_ring_buffer_iterator_open);
-
-/*
- * Note: Iterators must not be mixed with other types of outputs, because an
- * iterator can leave the buffer in "GET" state, which is not consistent with
- * other types of output (mmap, splice, raw data read).
- */
-void lib_ring_buffer_iterator_release(struct lib_ring_buffer *buf)
-{
-       lib_ring_buffer_release_read(buf);
-}
-EXPORT_SYMBOL_GPL(lib_ring_buffer_iterator_release);
-
-int channel_iterator_open(struct channel *chan)
-{
-       const struct lib_ring_buffer_config *config = &chan->backend.config;
-       struct lib_ring_buffer *buf;
-       int ret = 0, cpu;
-
-       CHAN_WARN_ON(chan, config->output != RING_BUFFER_ITERATOR);
-
-       if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
-               get_online_cpus();
-               /* Allow CPU hotplug to keep track of opened reader */
-               chan->iter.read_open = 1;
-               for_each_channel_cpu(cpu, chan) {
-                       buf = channel_get_ring_buffer(config, chan, cpu);
-                       ret = lib_ring_buffer_iterator_open(buf);
-                       if (ret)
-                               goto error;
-                       buf->iter.read_open = 1;
-               }
-               put_online_cpus();
-       } else {
-               buf = channel_get_ring_buffer(config, chan, 0);
-               ret = lib_ring_buffer_iterator_open(buf);
-       }
-       return ret;
-error:
-       /* Error should always happen on CPU 0, hence no close is required. */
-       CHAN_WARN_ON(chan, cpu != 0);
-       put_online_cpus();
-       return ret;
-}
-EXPORT_SYMBOL_GPL(channel_iterator_open);
-
-void channel_iterator_release(struct channel *chan)
-{
-       const struct lib_ring_buffer_config *config = &chan->backend.config;
-       struct lib_ring_buffer *buf;
-       int cpu;
-
-       if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
-               get_online_cpus();
-               for_each_channel_cpu(cpu, chan) {
-                       buf = channel_get_ring_buffer(config, chan, cpu);
-                       if (buf->iter.read_open) {
-                               lib_ring_buffer_iterator_release(buf);
-                               buf->iter.read_open = 0;
-                       }
-               }
-               chan->iter.read_open = 0;
-               put_online_cpus();
-       } else {
-               buf = channel_get_ring_buffer(config, chan, 0);
-               lib_ring_buffer_iterator_release(buf);
-       }
-}
-EXPORT_SYMBOL_GPL(channel_iterator_release);
-
-void lib_ring_buffer_iterator_reset(struct lib_ring_buffer *buf)
-{
-       struct channel *chan = buf->backend.chan;
-
-       if (buf->iter.state != ITER_GET_SUBBUF)
-               lib_ring_buffer_put_next_subbuf(buf);
-       buf->iter.state = ITER_GET_SUBBUF;
-       /* Remove from heap (if present). */
-       if (lttng_heap_cherrypick(&chan->iter.heap, buf))
-               list_add(&buf->iter.empty_node, &chan->iter.empty_head);
-       buf->iter.timestamp = 0;
-       buf->iter.header_len = 0;
-       buf->iter.payload_len = 0;
-       buf->iter.consumed = 0;
-       buf->iter.read_offset = 0;
-       buf->iter.data_size = 0;
-       /* Don't reset allocated and read_open */
-}
-
-void channel_iterator_reset(struct channel *chan)
-{
-       const struct lib_ring_buffer_config *config = &chan->backend.config;
-       struct lib_ring_buffer *buf;
-       int cpu;
-
-       /* Empty heap, put into empty_head */
-       while ((buf = lttng_heap_remove(&chan->iter.heap)) != NULL)
-               list_add(&buf->iter.empty_node, &chan->iter.empty_head);
-
-       for_each_channel_cpu(cpu, chan) {
-               buf = channel_get_ring_buffer(config, chan, cpu);
-               lib_ring_buffer_iterator_reset(buf);
-       }
-       /* Don't reset read_open */
-       chan->iter.last_qs = 0;
-       chan->iter.last_timestamp = 0;
-       chan->iter.last_cpu = 0;
-       chan->iter.len_left = 0;
-}
-
-/*
- * Ring buffer payload extraction read() implementation.
- */
-static
-ssize_t channel_ring_buffer_file_read(struct file *filp,
-                                     char __user *user_buf,
-                                     size_t count,
-                                     loff_t *ppos,
-                                     struct channel *chan,
-                                     struct lib_ring_buffer *buf,
-                                     int fusionmerge)
-{
-       const struct lib_ring_buffer_config *config = &chan->backend.config;
-       size_t read_count = 0, read_offset;
-       ssize_t len;
-
-       might_sleep();
-       if (!lttng_access_ok(VERIFY_WRITE, user_buf, count))
-               return -EFAULT;
-
-       /* Finish copy of previous record */
-       if (*ppos != 0) {
-               if (read_count < count) {
-                       len = chan->iter.len_left;
-                       read_offset = *ppos;
-                       if (config->alloc == RING_BUFFER_ALLOC_PER_CPU
-                           && fusionmerge)
-                               buf = lttng_heap_maximum(&chan->iter.heap);
-                       CHAN_WARN_ON(chan, !buf);
-                       goto skip_get_next;
-               }
-       }
-
-       while (read_count < count) {
-               size_t copy_len, space_left;
-
-               if (fusionmerge)
-                       len = channel_get_next_record(chan, &buf);
-               else
-                       len = lib_ring_buffer_get_next_record(chan, buf);
-len_test:
-               if (len < 0) {
-                       /*
-                        * Check if buffer is finalized (end of file).
-                        */
-                       if (len == -ENODATA) {
-                               /* A 0 read_count will tell about end of file */
-                               goto nodata;
-                       }
-                       if (filp->f_flags & O_NONBLOCK) {
-                               if (!read_count)
-                                       read_count = -EAGAIN;
-                               goto nodata;
-                       } else {
-                               int error;
-
-                               /*
-                                * No data available at the moment, return what
-                                * we got.
-                                */
-                               if (read_count)
-                                       goto nodata;
-
-                               /*
-                                * Wait for returned len to be >= 0 or -ENODATA.
-                                */
-                               if (fusionmerge)
-                                       error = wait_event_interruptible(
-                                         chan->read_wait,
-                                         ((len = channel_get_next_record(chan,
-                                               &buf)), len != -EAGAIN));
-                               else
-                                       error = wait_event_interruptible(
-                                         buf->read_wait,
-                                         ((len = lib_ring_buffer_get_next_record(
-                                                 chan, buf)), len != -EAGAIN));
-                               CHAN_WARN_ON(chan, len == -EBUSY);
-                               if (error) {
-                                       read_count = error;
-                                       goto nodata;
-                               }
-                               CHAN_WARN_ON(chan, len < 0 && len != -ENODATA);
-                               goto len_test;
-                       }
-               }
-               read_offset = buf->iter.read_offset;
-skip_get_next:
-               space_left = count - read_count;
-               if (len <= space_left) {
-                       copy_len = len;
-                       chan->iter.len_left = 0;
-                       *ppos = 0;
-               } else {
-                       copy_len = space_left;
-                       chan->iter.len_left = len - copy_len;
-                       *ppos = read_offset + copy_len;
-               }
-               if (__lib_ring_buffer_copy_to_user(&buf->backend, read_offset,
-                                              &user_buf[read_count],
-                                              copy_len)) {
-                       /*
-                        * Leave the len_left and ppos values at their current
-                        * state, as we currently have a valid event to read.
-                        */
-                       return -EFAULT;
-               }
-               read_count += copy_len;
-       };
-       return read_count;
-
-nodata:
-       *ppos = 0;
-       chan->iter.len_left = 0;
-       return read_count;
-}
-
-/**
- * lib_ring_buffer_file_read - Read buffer record payload.
- * @filp: file structure pointer.
- * @buffer: user buffer to read data into.
- * @count: number of bytes to read.
- * @ppos: file read position.
- *
- * Returns a negative value on error, or the number of bytes read on success.
- * ppos is used to save the position _within the current record_ between calls
- * to read().
- */
-static
-ssize_t lib_ring_buffer_file_read(struct file *filp,
-                                 char __user *user_buf,
-                                 size_t count,
-                                 loff_t *ppos)
-{
-       struct inode *inode = filp->lttng_f_dentry->d_inode;
-       struct lib_ring_buffer *buf = inode->i_private;
-       struct channel *chan = buf->backend.chan;
-
-       return channel_ring_buffer_file_read(filp, user_buf, count, ppos,
-                                            chan, buf, 0);
-}
-
-/**
- * channel_file_read - Read channel record payload.
- * @filp: file structure pointer.
- * @buffer: user buffer to read data into.
- * @count: number of bytes to read.
- * @ppos: file read position.
- *
- * Returns a negative value on error, or the number of bytes read on success.
- * ppos is used to save the position _within the current record_ between calls
- * to read().
- */
-static
-ssize_t channel_file_read(struct file *filp,
-                         char __user *user_buf,
-                         size_t count,
-                         loff_t *ppos)
-{
-       struct inode *inode = filp->lttng_f_dentry->d_inode;
-       struct channel *chan = inode->i_private;
-       const struct lib_ring_buffer_config *config = &chan->backend.config;
-
-       if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
-               return channel_ring_buffer_file_read(filp, user_buf, count,
-                                                    ppos, chan, NULL, 1);
-       else {
-               struct lib_ring_buffer *buf =
-                       channel_get_ring_buffer(config, chan, 0);
-               return channel_ring_buffer_file_read(filp, user_buf, count,
-                                                    ppos, chan, buf, 0);
-       }
-}
-
-static
-int lib_ring_buffer_file_open(struct inode *inode, struct file *file)
-{
-       struct lib_ring_buffer *buf = inode->i_private;
-       int ret;
-
-       ret = lib_ring_buffer_iterator_open(buf);
-       if (ret)
-               return ret;
-
-       file->private_data = buf;
-       ret = nonseekable_open(inode, file);
-       if (ret)
-               goto release_iter;
-       return 0;
-
-release_iter:
-       lib_ring_buffer_iterator_release(buf);
-       return ret;
-}
-
-static
-int lib_ring_buffer_file_release(struct inode *inode, struct file *file)
-{
-       struct lib_ring_buffer *buf = inode->i_private;
-
-       lib_ring_buffer_iterator_release(buf);
-       return 0;
-}
-
-static
-int channel_file_open(struct inode *inode, struct file *file)
-{
-       struct channel *chan = inode->i_private;
-       int ret;
-
-       ret = channel_iterator_open(chan);
-       if (ret)
-               return ret;
-
-       file->private_data = chan;
-       ret = nonseekable_open(inode, file);
-       if (ret)
-               goto release_iter;
-       return 0;
-
-release_iter:
-       channel_iterator_release(chan);
-       return ret;
-}
-
-static
-int channel_file_release(struct inode *inode, struct file *file)
-{
-       struct channel *chan = inode->i_private;
-
-       channel_iterator_release(chan);
-       return 0;
-}
-
-const struct file_operations channel_payload_file_operations = {
-       .owner = THIS_MODULE,
-       .open = channel_file_open,
-       .release = channel_file_release,
-       .read = channel_file_read,
-       .llseek = vfs_lib_ring_buffer_no_llseek,
-};
-EXPORT_SYMBOL_GPL(channel_payload_file_operations);
-
-const struct file_operations lib_ring_buffer_payload_file_operations = {
-       .owner = THIS_MODULE,
-       .open = lib_ring_buffer_file_open,
-       .release = lib_ring_buffer_file_release,
-       .read = lib_ring_buffer_file_read,
-       .llseek = vfs_lib_ring_buffer_no_llseek,
-};
-EXPORT_SYMBOL_GPL(lib_ring_buffer_payload_file_operations);
diff --git a/lib/ringbuffer/ring_buffer_mmap.c b/lib/ringbuffer/ring_buffer_mmap.c
deleted file mode 100644 (file)
index 34174a5..0000000
+++ /dev/null
@@ -1,139 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only
- *
- * ring_buffer_mmap.c
- *
- * Copyright (C) 2002-2005 - Tom Zanussi <zanussi@us.ibm.com>, IBM Corp
- * Copyright (C) 1999-2005 - Karim Yaghmour <karim@opersys.com>
- * Copyright (C) 2008-2012 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
- * Re-using code from kernel/relay.c, hence the GPL-2.0-only license for this
- * file.
- */
-
-#include <linux/module.h>
-#include <linux/mm.h>
-
-#include <ringbuffer/backend.h>
-#include <ringbuffer/frontend.h>
-#include <ringbuffer/vfs.h>
-
-/*
- * fault() vm_op implementation for ring buffer file mapping.
- */
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,1,0))
-static vm_fault_t lib_ring_buffer_fault_compat(struct vm_area_struct *vma, struct vm_fault *vmf)
-#else
-static int lib_ring_buffer_fault_compat(struct vm_area_struct *vma, struct vm_fault *vmf)
-#endif
-{
-       struct lib_ring_buffer *buf = vma->vm_private_data;
-       struct channel *chan = buf->backend.chan;
-       const struct lib_ring_buffer_config *config = &chan->backend.config;
-       pgoff_t pgoff = vmf->pgoff;
-       unsigned long *pfnp;
-       void **virt;
-       unsigned long offset, sb_bindex;
-
-       /*
-        * Verify that faults are only done on the range of pages owned by the
-        * reader.
-        */
-       offset = pgoff << PAGE_SHIFT;
-       sb_bindex = subbuffer_id_get_index(config, buf->backend.buf_rsb.id);
-       if (!(offset >= buf->backend.array[sb_bindex]->mmap_offset
-             && offset < buf->backend.array[sb_bindex]->mmap_offset +
-                         buf->backend.chan->backend.subbuf_size))
-               return VM_FAULT_SIGBUS;
-       /*
-        * ring_buffer_read_get_pfn() gets the page frame number for the
-        * current reader's pages.
-        */
-       pfnp = lib_ring_buffer_read_get_pfn(&buf->backend, offset, &virt);
-       if (!*pfnp)
-               return VM_FAULT_SIGBUS;
-       get_page(pfn_to_page(*pfnp));
-       vmf->page = pfn_to_page(*pfnp);
-
-       return 0;
-}
-
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,1,0))
-static vm_fault_t lib_ring_buffer_fault(struct vm_fault *vmf)
-{
-       struct vm_area_struct *vma = vmf->vma;
-       return lib_ring_buffer_fault_compat(vma, vmf);
-}
-#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(4,11,0))
-static int lib_ring_buffer_fault(struct vm_fault *vmf)
-{
-       struct vm_area_struct *vma = vmf->vma;
-       return lib_ring_buffer_fault_compat(vma, vmf);
-}
-#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,11,0)) */
-static int lib_ring_buffer_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
-{
-       return lib_ring_buffer_fault_compat(vma, vmf);
-}
-#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,11,0)) */
-
-/*
- * vm_ops for ring buffer file mappings.
- */
-static const struct vm_operations_struct lib_ring_buffer_mmap_ops = {
-       .fault = lib_ring_buffer_fault,
-};
-
-/**
- *     lib_ring_buffer_mmap_buf: - mmap channel buffer to process address space
- *     @buf: ring buffer to map
- *     @vma: vm_area_struct describing memory to be mapped
- *
- *     Returns 0 if ok, negative on error
- *
- *     Caller should already have grabbed mmap_sem.
- */
-static int lib_ring_buffer_mmap_buf(struct lib_ring_buffer *buf,
-                                   struct vm_area_struct *vma)
-{
-       unsigned long length = vma->vm_end - vma->vm_start;
-       struct channel *chan = buf->backend.chan;
-       const struct lib_ring_buffer_config *config = &chan->backend.config;
-       unsigned long mmap_buf_len;
-
-       if (config->output != RING_BUFFER_MMAP)
-               return -EINVAL;
-
-       mmap_buf_len = chan->backend.buf_size;
-       if (chan->backend.extra_reader_sb)
-               mmap_buf_len += chan->backend.subbuf_size;
-
-       if (length != mmap_buf_len)
-               return -EINVAL;
-
-       vma->vm_ops = &lib_ring_buffer_mmap_ops;
-       vma->vm_flags |= VM_DONTEXPAND;
-       vma->vm_private_data = buf;
-
-       return 0;
-}
-
-int lib_ring_buffer_mmap(struct file *filp, struct vm_area_struct *vma,
-               struct lib_ring_buffer *buf)
-{
-       return lib_ring_buffer_mmap_buf(buf, vma);
-}
-EXPORT_SYMBOL_GPL(lib_ring_buffer_mmap);
-
-/**
- *     vfs_lib_ring_buffer_mmap - mmap file op
- *     @filp: the file
- *     @vma: the vma describing what to map
- *
- *     Calls upon lib_ring_buffer_mmap_buf() to map the file into user space.
- */
-int vfs_lib_ring_buffer_mmap(struct file *filp, struct vm_area_struct *vma)
-{
-       struct lib_ring_buffer *buf = filp->private_data;
-       return lib_ring_buffer_mmap(filp, vma, buf);
-}
-EXPORT_SYMBOL_GPL(vfs_lib_ring_buffer_mmap);
diff --git a/lib/ringbuffer/ring_buffer_splice.c b/lib/ringbuffer/ring_buffer_splice.c
deleted file mode 100644 (file)
index cd803a7..0000000
+++ /dev/null
@@ -1,222 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only
- *
- * ring_buffer_splice.c
- *
- * Copyright (C) 2002-2005 - Tom Zanussi <zanussi@us.ibm.com>, IBM Corp
- * Copyright (C) 1999-2005 - Karim Yaghmour <karim@opersys.com>
- * Copyright (C) 2008-2012 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
- * Re-using code from kernel/relay.c, which is why it is licensed under
- * the GPL-2.0.
- */
-
-#include <linux/module.h>
-#include <linux/fs.h>
-#include <linux/version.h>
-
-#include <wrapper/splice.h>
-#include <ringbuffer/backend.h>
-#include <ringbuffer/frontend.h>
-#include <ringbuffer/vfs.h>
-
-#if 0
-#define printk_dbg(fmt, args...) printk(fmt, args)
-#else
-#define printk_dbg(fmt, args...)
-#endif
-
-loff_t vfs_lib_ring_buffer_no_llseek(struct file *file, loff_t offset,
-               int origin)
-{
-       return -ESPIPE;
-}
-EXPORT_SYMBOL_GPL(vfs_lib_ring_buffer_no_llseek);
-
-/*
- * Release pages from the buffer so splice pipe_to_file can move them.
- * Called after the pipe has been populated with buffer pages.
- */
-static void lib_ring_buffer_pipe_buf_release(struct pipe_inode_info *pipe,
-                                            struct pipe_buffer *pbuf)
-{
-       __free_page(pbuf->page);
-}
-
-static const struct pipe_buf_operations ring_buffer_pipe_buf_ops = {
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,1,0))
-       .can_merge = 0,
-#endif
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,15,0))
-       .map = generic_pipe_buf_map,
-       .unmap = generic_pipe_buf_unmap,
-#endif
-       .confirm = generic_pipe_buf_confirm,
-       .release = lib_ring_buffer_pipe_buf_release,
-       .steal = generic_pipe_buf_steal,
-       .get = generic_pipe_buf_get,
-};
-
-/*
- * Page release operation after splice pipe_to_file ends.
- */
-static void lib_ring_buffer_page_release(struct splice_pipe_desc *spd,
-                                        unsigned int i)
-{
-       __free_page(spd->pages[i]);
-}
-
-/*
- *     subbuf_splice_actor - splice up to one subbuf's worth of data
- */
-static int subbuf_splice_actor(struct file *in,
-                              loff_t *ppos,
-                              struct pipe_inode_info *pipe,
-                              size_t len,
-                              unsigned int flags,
-                              struct lib_ring_buffer *buf)
-{
-       struct channel *chan = buf->backend.chan;
-       const struct lib_ring_buffer_config *config = &chan->backend.config;
-       unsigned int poff, subbuf_pages, nr_pages;
-       struct page *pages[PIPE_DEF_BUFFERS];
-       struct partial_page partial[PIPE_DEF_BUFFERS];
-       struct splice_pipe_desc spd = {
-               .pages = pages,
-               .nr_pages = 0,
-               .partial = partial,
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,12,0))
-               .flags = flags,
-#endif
-               .ops = &ring_buffer_pipe_buf_ops,
-               .spd_release = lib_ring_buffer_page_release,
-       };
-       unsigned long consumed_old, roffset;
-       unsigned long bytes_avail;
-
-       /*
-        * Check that a GET_SUBBUF ioctl has been done before.
-        */
-       WARN_ON(atomic_long_read(&buf->active_readers) != 1);
-       consumed_old = lib_ring_buffer_get_consumed(config, buf);
-       consumed_old += *ppos;
-
-       /*
-        * Adjust read len, if longer than what is available.
-        * Max read size is 1 subbuffer due to get_subbuf/put_subbuf for
-        * protection.
-        */
-       bytes_avail = chan->backend.subbuf_size;
-       WARN_ON(bytes_avail > chan->backend.buf_size);
-       len = min_t(size_t, len, bytes_avail);
-       subbuf_pages = bytes_avail >> PAGE_SHIFT;
-       nr_pages = min_t(unsigned int, subbuf_pages, PIPE_DEF_BUFFERS);
-       roffset = consumed_old & PAGE_MASK;
-       poff = consumed_old & ~PAGE_MASK;
-       printk_dbg(KERN_DEBUG "SPLICE actor len %zu pos %zd write_pos %ld\n",
-                  len, (ssize_t)*ppos, lib_ring_buffer_get_offset(config, buf));
-
-       for (; spd.nr_pages < nr_pages; spd.nr_pages++) {
-               unsigned int this_len;
-               unsigned long *pfnp, new_pfn;
-               struct page *new_page;
-               void **virt;
-
-               if (!len)
-                       break;
-               printk_dbg(KERN_DEBUG "SPLICE actor loop len %zu roffset %ld\n",
-                          len, roffset);
-
-               /*
-                * We have to replace the page we are moving into the splice
-                * pipe.
-                */
-               new_page = alloc_pages_node(cpu_to_node(max(buf->backend.cpu,
-                                                           0)),
-                                           GFP_KERNEL | __GFP_ZERO, 0);
-               if (!new_page)
-                       break;
-               new_pfn = page_to_pfn(new_page);
-               this_len = PAGE_SIZE - poff;
-               pfnp = lib_ring_buffer_read_get_pfn(&buf->backend, roffset, &virt);
-               spd.pages[spd.nr_pages] = pfn_to_page(*pfnp);
-               *pfnp = new_pfn;
-               *virt = page_address(new_page);
-               spd.partial[spd.nr_pages].offset = poff;
-               spd.partial[spd.nr_pages].len = this_len;
-
-               poff = 0;
-               roffset += PAGE_SIZE;
-               len -= this_len;
-       }
-
-       if (!spd.nr_pages)
-               return 0;
-
-       return wrapper_splice_to_pipe(pipe, &spd);
-}
-
-ssize_t lib_ring_buffer_splice_read(struct file *in, loff_t *ppos,
-                                   struct pipe_inode_info *pipe, size_t len,
-                                   unsigned int flags,
-                                   struct lib_ring_buffer *buf)
-{
-       struct channel *chan = buf->backend.chan;
-       const struct lib_ring_buffer_config *config = &chan->backend.config;
-       ssize_t spliced;
-       int ret;
-
-       if (config->output != RING_BUFFER_SPLICE)
-               return -EINVAL;
-
-       /*
-        * We require ppos and length to be page-aligned for performance reasons
-        * (no page copy). Size is known using the ioctl
-        * RING_BUFFER_GET_PADDED_SUBBUF_SIZE, which is page-size padded.
-        * We fail when the ppos or len passed is not page-sized, because splice
-        * is not allowed to copy more than the length passed as parameter (so
-        * the ABI does not let us silently copy more than requested to include
-        * padding).
-        */
-       if (*ppos != PAGE_ALIGN(*ppos) || len != PAGE_ALIGN(len))
-               return -EINVAL;
-
-       ret = 0;
-       spliced = 0;
-
-       printk_dbg(KERN_DEBUG "SPLICE read len %zu pos %zd\n", len,
-                  (ssize_t)*ppos);
-       while (len && !spliced) {
-               ret = subbuf_splice_actor(in, ppos, pipe, len, flags, buf);
-               printk_dbg(KERN_DEBUG "SPLICE read loop ret %d\n", ret);
-               if (ret < 0)
-                       break;
-               else if (!ret) {
-                       if (flags & SPLICE_F_NONBLOCK)
-                               ret = -EAGAIN;
-                       break;
-               }
-
-               *ppos += ret;
-               if (ret > len)
-                       len = 0;
-               else
-                       len -= ret;
-               spliced += ret;
-       }
-
-       if (spliced)
-               return spliced;
-
-       return ret;
-}
-EXPORT_SYMBOL_GPL(lib_ring_buffer_splice_read);
-
-ssize_t vfs_lib_ring_buffer_splice_read(struct file *in, loff_t *ppos,
-                                   struct pipe_inode_info *pipe, size_t len,
-                                   unsigned int flags)
-{
-       struct lib_ring_buffer *buf = in->private_data;
-
-       return lib_ring_buffer_splice_read(in, ppos, pipe, len, flags, buf);
-}
-EXPORT_SYMBOL_GPL(vfs_lib_ring_buffer_splice_read);
diff --git a/lib/ringbuffer/ring_buffer_vfs.c b/lib/ringbuffer/ring_buffer_vfs.c
deleted file mode 100644 (file)
index e5d6a70..0000000
+++ /dev/null
@@ -1,460 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only OR LGPL-2.1-only)
- *
- * ring_buffer_vfs.c
- *
- * Ring Buffer VFS file operations.
- *
- * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#include <linux/module.h>
-#include <linux/fs.h>
-#include <linux/compat.h>
-
-#include <ringbuffer/backend.h>
-#include <ringbuffer/frontend.h>
-#include <ringbuffer/vfs.h>
-#include <wrapper/poll.h>
-#include <lttng/tracer.h>
-
-static int put_ulong(unsigned long val, unsigned long arg)
-{
-       return put_user(val, (unsigned long __user *)arg);
-}
-
-#ifdef CONFIG_COMPAT
-static int compat_put_ulong(compat_ulong_t val, unsigned long arg)
-{
-       return put_user(val, (compat_ulong_t __user *)compat_ptr(arg));
-}
-#endif
-
-/*
- * This is not used by anonymous file descriptors. This code is left
- * there if we ever want to implement an inode with open() operation.
- */
-int lib_ring_buffer_open(struct inode *inode, struct file *file,
-               struct lib_ring_buffer *buf)
-{
-       int ret;
-
-       if (!buf)
-               return -EINVAL;
-
-       ret = lib_ring_buffer_open_read(buf);
-       if (ret)
-               return ret;
-
-       ret = nonseekable_open(inode, file);
-       if (ret)
-               goto release_read;
-       return 0;
-
-release_read:
-       lib_ring_buffer_release_read(buf);
-       return ret;
-}
-EXPORT_SYMBOL_GPL(lib_ring_buffer_open);
-
-/**
- *     vfs_lib_ring_buffer_open - ring buffer open file operation
- *     @inode: opened inode
- *     @file: opened file
- *
- *     Open implementation. Makes sure only one open instance of a buffer is
- *     done at a given moment.
- */
-static
-int vfs_lib_ring_buffer_open(struct inode *inode, struct file *file)
-{
-       struct lib_ring_buffer *buf = inode->i_private;
-
-       file->private_data = buf;
-       return lib_ring_buffer_open(inode, file, buf);
-}
-
-int lib_ring_buffer_release(struct inode *inode, struct file *file,
-               struct lib_ring_buffer *buf)
-{
-       lib_ring_buffer_release_read(buf);
-
-       return 0;
-}
-EXPORT_SYMBOL_GPL(lib_ring_buffer_release);
-
-/**
- *     vfs_lib_ring_buffer_release - ring buffer release file operation
- *     @inode: opened inode
- *     @file: opened file
- *
- *     Release implementation.
- */
-static
-int vfs_lib_ring_buffer_release(struct inode *inode, struct file *file)
-{
-       struct lib_ring_buffer *buf = file->private_data;
-
-       return lib_ring_buffer_release(inode, file, buf);
-}
-
-unsigned int lib_ring_buffer_poll(struct file *filp, poll_table *wait,
-               struct lib_ring_buffer *buf)
-{
-       unsigned int mask = 0;
-       struct channel *chan = buf->backend.chan;
-       const struct lib_ring_buffer_config *config = &chan->backend.config;
-       int finalized, disabled;
-
-       if (filp->f_mode & FMODE_READ) {
-               poll_wait_set_exclusive(wait);
-               poll_wait(filp, &buf->read_wait, wait);
-
-               finalized = lib_ring_buffer_is_finalized(config, buf);
-               disabled = lib_ring_buffer_channel_is_disabled(chan);
-
-               /*
-                * lib_ring_buffer_is_finalized() contains a smp_rmb() ordering
-                * finalized load before offsets loads.
-                */
-               WARN_ON(atomic_long_read(&buf->active_readers) != 1);
-retry:
-               if (disabled)
-                       return POLLERR;
-
-               if (subbuf_trunc(lib_ring_buffer_get_offset(config, buf), chan)
-                 - subbuf_trunc(lib_ring_buffer_get_consumed(config, buf), chan)
-                 == 0) {
-                       if (finalized)
-                               return POLLHUP;
-                       else {
-                               /*
-                                * The memory barriers
-                                * __wait_event()/wake_up_interruptible() take
-                                * care of "raw_spin_is_locked" memory ordering.
-                                */
-                               if (raw_spin_is_locked(&buf->raw_tick_nohz_spinlock))
-                                       goto retry;
-                               else
-                                       return 0;
-                       }
-               } else {
-                       if (subbuf_trunc(lib_ring_buffer_get_offset(config, buf),
-                                        chan)
-                         - subbuf_trunc(lib_ring_buffer_get_consumed(config, buf),
-                                        chan)
-                         >= chan->backend.buf_size)
-                               return POLLPRI | POLLRDBAND;
-                       else
-                               return POLLIN | POLLRDNORM;
-               }
-       }
-       return mask;
-}
-EXPORT_SYMBOL_GPL(lib_ring_buffer_poll);
-
-/**
- *     vfs_lib_ring_buffer_poll - ring buffer poll file operation
- *     @filp: the file
- *     @wait: poll table
- *
- *     Poll implementation.
- */
-static
-unsigned int vfs_lib_ring_buffer_poll(struct file *filp, poll_table *wait)
-{
-       struct lib_ring_buffer *buf = filp->private_data;
-
-       return lib_ring_buffer_poll(filp, wait, buf);
-}
-
-long lib_ring_buffer_ioctl(struct file *filp, unsigned int cmd,
-               unsigned long arg, struct lib_ring_buffer *buf)
-{
-       struct channel *chan = buf->backend.chan;
-       const struct lib_ring_buffer_config *config = &chan->backend.config;
-
-       if (lib_ring_buffer_channel_is_disabled(chan))
-               return -EIO;
-
-       switch (cmd) {
-       case RING_BUFFER_SNAPSHOT:
-               return lib_ring_buffer_snapshot(buf, &buf->cons_snapshot,
-                                           &buf->prod_snapshot);
-       case RING_BUFFER_SNAPSHOT_SAMPLE_POSITIONS:
-               return lib_ring_buffer_snapshot_sample_positions(buf,
-                               &buf->cons_snapshot, &buf->prod_snapshot);
-       case RING_BUFFER_SNAPSHOT_GET_CONSUMED:
-               return put_ulong(buf->cons_snapshot, arg);
-       case RING_BUFFER_SNAPSHOT_GET_PRODUCED:
-               return put_ulong(buf->prod_snapshot, arg);
-       case RING_BUFFER_GET_SUBBUF:
-       {
-               unsigned long uconsume;
-               long ret;
-
-               ret = get_user(uconsume, (unsigned long __user *) arg);
-               if (ret)
-                       return ret; /* will return -EFAULT */
-               ret = lib_ring_buffer_get_subbuf(buf, uconsume);
-               if (!ret) {
-                       /* Set file position to zero at each successful "get" */
-                       filp->f_pos = 0;
-               }
-               return ret;
-       }
-       case RING_BUFFER_PUT_SUBBUF:
-               lib_ring_buffer_put_subbuf(buf);
-               return 0;
-
-       case RING_BUFFER_GET_NEXT_SUBBUF:
-       {
-               long ret;
-
-               ret = lib_ring_buffer_get_next_subbuf(buf);
-               if (!ret) {
-                       /* Set file position to zero at each successful "get" */
-                       filp->f_pos = 0;
-               }
-               return ret;
-       }
-       case RING_BUFFER_PUT_NEXT_SUBBUF:
-               lib_ring_buffer_put_next_subbuf(buf);
-               return 0;
-       case RING_BUFFER_GET_SUBBUF_SIZE:
-               return put_ulong(lib_ring_buffer_get_read_data_size(config, buf),
-                                arg);
-       case RING_BUFFER_GET_PADDED_SUBBUF_SIZE:
-       {
-               unsigned long size;
-
-               size = lib_ring_buffer_get_read_data_size(config, buf);
-               size = PAGE_ALIGN(size);
-               return put_ulong(size, arg);
-       }
-       case RING_BUFFER_GET_MAX_SUBBUF_SIZE:
-               return put_ulong(chan->backend.subbuf_size, arg);
-       case RING_BUFFER_GET_MMAP_LEN:
-       {
-               unsigned long mmap_buf_len;
-
-               if (config->output != RING_BUFFER_MMAP)
-                       return -EINVAL;
-               mmap_buf_len = chan->backend.buf_size;
-               if (chan->backend.extra_reader_sb)
-                       mmap_buf_len += chan->backend.subbuf_size;
-               if (mmap_buf_len > INT_MAX)
-                       return -EFBIG;
-               return put_ulong(mmap_buf_len, arg);
-       }
-       case RING_BUFFER_GET_MMAP_READ_OFFSET:
-       {
-               unsigned long sb_bindex;
-
-               if (config->output != RING_BUFFER_MMAP)
-                       return -EINVAL;
-               sb_bindex = subbuffer_id_get_index(config,
-                                                  buf->backend.buf_rsb.id);
-               return put_ulong(buf->backend.array[sb_bindex]->mmap_offset,
-                                arg);
-       }
-       case RING_BUFFER_FLUSH:
-               lib_ring_buffer_switch_remote(buf);
-               return 0;
-       case RING_BUFFER_FLUSH_EMPTY:
-               lib_ring_buffer_switch_remote_empty(buf);
-               return 0;
-       case RING_BUFFER_CLEAR:
-               lib_ring_buffer_clear(buf);
-               return 0;
-       default:
-               return -ENOIOCTLCMD;
-       }
-}
-EXPORT_SYMBOL_GPL(lib_ring_buffer_ioctl);
-
-/**
- *     vfs_lib_ring_buffer_ioctl - control ring buffer reader synchronization
- *
- *     @filp: the file
- *     @cmd: the command
- *     @arg: command arg
- *
- *     This ioctl implements commands necessary for producer/consumer
- *     and flight recorder reader interaction :
- *     RING_BUFFER_GET_NEXT_SUBBUF
- *             Get the next sub-buffer that can be read. It never blocks.
- *     RING_BUFFER_PUT_NEXT_SUBBUF
- *             Release the currently read sub-buffer.
- *     RING_BUFFER_GET_SUBBUF_SIZE
- *             returns the size of the current sub-buffer.
- *     RING_BUFFER_GET_MAX_SUBBUF_SIZE
- *             returns the maximum size for sub-buffers.
- *     RING_BUFFER_GET_NUM_SUBBUF
- *             returns the number of reader-visible sub-buffers in the per cpu
- *              channel (for mmap).
- *      RING_BUFFER_GET_MMAP_READ_OFFSET
- *              returns the offset of the subbuffer belonging to the reader.
- *              Should only be used for mmap clients.
- */
-static
-long vfs_lib_ring_buffer_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
-{
-       struct lib_ring_buffer *buf = filp->private_data;
-
-       return lib_ring_buffer_ioctl(filp, cmd, arg, buf);
-}
-
-#ifdef CONFIG_COMPAT
-long lib_ring_buffer_compat_ioctl(struct file *filp, unsigned int cmd,
-               unsigned long arg, struct lib_ring_buffer *buf)
-{
-       struct channel *chan = buf->backend.chan;
-       const struct lib_ring_buffer_config *config = &chan->backend.config;
-
-       if (lib_ring_buffer_channel_is_disabled(chan))
-               return -EIO;
-
-       switch (cmd) {
-       case RING_BUFFER_COMPAT_SNAPSHOT:
-               return lib_ring_buffer_snapshot(buf, &buf->cons_snapshot,
-                                               &buf->prod_snapshot);
-       case RING_BUFFER_COMPAT_SNAPSHOT_SAMPLE_POSITIONS:
-               return lib_ring_buffer_snapshot_sample_positions(buf,
-                               &buf->cons_snapshot, &buf->prod_snapshot);
-       case RING_BUFFER_COMPAT_SNAPSHOT_GET_CONSUMED:
-               return compat_put_ulong(buf->cons_snapshot, arg);
-       case RING_BUFFER_COMPAT_SNAPSHOT_GET_PRODUCED:
-               return compat_put_ulong(buf->prod_snapshot, arg);
-       case RING_BUFFER_COMPAT_GET_SUBBUF:
-       {
-               __u32 uconsume;
-               unsigned long consume;
-               long ret;
-
-               ret = get_user(uconsume, (__u32 __user *) arg);
-               if (ret)
-                       return ret; /* will return -EFAULT */
-               consume = buf->cons_snapshot;
-               consume &= ~0xFFFFFFFFL;
-               consume |= uconsume;
-               ret = lib_ring_buffer_get_subbuf(buf, consume);
-               if (!ret) {
-                       /* Set file position to zero at each successful "get" */
-                       filp->f_pos = 0;
-               }
-               return ret;
-       }
-       case RING_BUFFER_COMPAT_PUT_SUBBUF:
-               lib_ring_buffer_put_subbuf(buf);
-               return 0;
-
-       case RING_BUFFER_COMPAT_GET_NEXT_SUBBUF:
-       {
-               long ret;
-
-               ret = lib_ring_buffer_get_next_subbuf(buf);
-               if (!ret) {
-                       /* Set file position to zero at each successful "get" */
-                       filp->f_pos = 0;
-               }
-               return ret;
-       }
-       case RING_BUFFER_COMPAT_PUT_NEXT_SUBBUF:
-               lib_ring_buffer_put_next_subbuf(buf);
-               return 0;
-       case RING_BUFFER_COMPAT_GET_SUBBUF_SIZE:
-       {
-               unsigned long data_size;
-
-               data_size = lib_ring_buffer_get_read_data_size(config, buf);
-               if (data_size > UINT_MAX)
-                       return -EFBIG;
-               return compat_put_ulong(data_size, arg);
-       }
-       case RING_BUFFER_COMPAT_GET_PADDED_SUBBUF_SIZE:
-       {
-               unsigned long size;
-
-               size = lib_ring_buffer_get_read_data_size(config, buf);
-               size = PAGE_ALIGN(size);
-               if (size > UINT_MAX)
-                       return -EFBIG;
-               return compat_put_ulong(size, arg);
-       }
-       case RING_BUFFER_COMPAT_GET_MAX_SUBBUF_SIZE:
-               if (chan->backend.subbuf_size > UINT_MAX)
-                       return -EFBIG;
-               return compat_put_ulong(chan->backend.subbuf_size, arg);
-       case RING_BUFFER_COMPAT_GET_MMAP_LEN:
-       {
-               unsigned long mmap_buf_len;
-
-               if (config->output != RING_BUFFER_MMAP)
-                       return -EINVAL;
-               mmap_buf_len = chan->backend.buf_size;
-               if (chan->backend.extra_reader_sb)
-                       mmap_buf_len += chan->backend.subbuf_size;
-               if (mmap_buf_len > UINT_MAX)
-                       return -EFBIG;
-               return compat_put_ulong(mmap_buf_len, arg);
-       }
-       case RING_BUFFER_COMPAT_GET_MMAP_READ_OFFSET:
-       {
-               unsigned long sb_bindex, read_offset;
-
-               if (config->output != RING_BUFFER_MMAP)
-                       return -EINVAL;
-               sb_bindex = subbuffer_id_get_index(config,
-                                                  buf->backend.buf_rsb.id);
-               read_offset = buf->backend.array[sb_bindex]->mmap_offset;
-               if (read_offset > UINT_MAX)
-                       return -EINVAL;
-               return compat_put_ulong(read_offset, arg);
-       }
-       case RING_BUFFER_COMPAT_FLUSH:
-               lib_ring_buffer_switch_remote(buf);
-               return 0;
-       case RING_BUFFER_COMPAT_FLUSH_EMPTY:
-               lib_ring_buffer_switch_remote_empty(buf);
-               return 0;
-       case RING_BUFFER_COMPAT_CLEAR:
-               lib_ring_buffer_clear(buf);
-               return 0;
-       default:
-               return -ENOIOCTLCMD;
-       }
-}
-EXPORT_SYMBOL_GPL(lib_ring_buffer_compat_ioctl);
-
-static
-long vfs_lib_ring_buffer_compat_ioctl(struct file *filp, unsigned int cmd,
-                                 unsigned long arg)
-{
-       struct lib_ring_buffer *buf = filp->private_data;
-
-       return lib_ring_buffer_compat_ioctl(filp, cmd, arg, buf);
-}
-#endif
-
-const struct file_operations lib_ring_buffer_file_operations = {
-       .owner = THIS_MODULE,
-       .open = vfs_lib_ring_buffer_open,
-       .release = vfs_lib_ring_buffer_release,
-       .poll = vfs_lib_ring_buffer_poll,
-       .splice_read = vfs_lib_ring_buffer_splice_read,
-       .mmap = vfs_lib_ring_buffer_mmap,
-       .unlocked_ioctl = vfs_lib_ring_buffer_ioctl,
-       .llseek = vfs_lib_ring_buffer_no_llseek,
-#ifdef CONFIG_COMPAT
-       .compat_ioctl = vfs_lib_ring_buffer_compat_ioctl,
-#endif
-};
-EXPORT_SYMBOL_GPL(lib_ring_buffer_file_operations);
-
-MODULE_LICENSE("GPL and additional rights");
-MODULE_AUTHOR("Mathieu Desnoyers <mathieu.desnoyers@efficios.com>");
-MODULE_DESCRIPTION("LTTng ring buffer library");
-MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
-       __stringify(LTTNG_MODULES_MINOR_VERSION) "."
-       __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
-       LTTNG_MODULES_EXTRAVERSION);
diff --git a/lttng-abi.c b/lttng-abi.c
deleted file mode 100644 (file)
index 01eb4d5..0000000
+++ /dev/null
@@ -1,1973 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
- *
- * lttng-abi.c
- *
- * LTTng ABI
- *
- * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
- * Mimic system calls for:
- * - session creation, returns a file descriptor or failure.
- *   - channel creation, returns a file descriptor or failure.
- *     - Operates on a session file descriptor
- *     - Takes all channel options as parameters.
- *   - stream get, returns a file descriptor or failure.
- *     - Operates on a channel file descriptor.
- *   - stream notifier get, returns a file descriptor or failure.
- *     - Operates on a channel file descriptor.
- *   - event creation, returns a file descriptor or failure.
- *     - Operates on a channel file descriptor
- *     - Takes an event name as parameter
- *     - Takes an instrumentation source as parameter
- *       - e.g. tracepoints, dynamic_probes...
- *     - Takes instrumentation source specific arguments.
- */
-
-#include <linux/module.h>
-#include <linux/proc_fs.h>
-#include <linux/anon_inodes.h>
-#include <linux/file.h>
-#include <linux/uaccess.h>
-#include <linux/slab.h>
-#include <linux/err.h>
-#include <wrapper/vmalloc.h>   /* for wrapper_vmalloc_sync_mappings() */
-#include <ringbuffer/vfs.h>
-#include <ringbuffer/backend.h>
-#include <ringbuffer/frontend.h>
-#include <wrapper/poll.h>
-#include <wrapper/file.h>
-#include <wrapper/kref.h>
-#include <lttng/string-utils.h>
-#include <lttng/abi.h>
-#include <lttng/abi-old.h>
-#include <lttng/events.h>
-#include <lttng/tracer.h>
-#include <lttng/tp-mempool.h>
-#include <ringbuffer/frontend_types.h>
-
-/*
- * This is LTTng's own personal way to create a system call as an external
- * module. We use ioctl() on /proc/lttng.
- */
-
-static struct proc_dir_entry *lttng_proc_dentry;
-
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,6,0))
-static const struct proc_ops lttng_proc_ops;
-#else
-static const struct file_operations lttng_proc_ops;
-#endif
-
-static const struct file_operations lttng_session_fops;
-static const struct file_operations lttng_channel_fops;
-static const struct file_operations lttng_metadata_fops;
-static const struct file_operations lttng_event_fops;
-static struct file_operations lttng_stream_ring_buffer_file_operations;
-
-static int put_u64(uint64_t val, unsigned long arg);
-
-/*
- * Teardown management: opened file descriptors keep a refcount on the module,
- * so it can only exit when all file descriptors are closed.
- */
-
-static
-int lttng_abi_create_session(void)
-{
-       struct lttng_session *session;
-       struct file *session_file;
-       int session_fd, ret;
-
-       session = lttng_session_create();
-       if (!session)
-               return -ENOMEM;
-       session_fd = lttng_get_unused_fd();
-       if (session_fd < 0) {
-               ret = session_fd;
-               goto fd_error;
-       }
-       session_file = anon_inode_getfile("[lttng_session]",
-                                         &lttng_session_fops,
-                                         session, O_RDWR);
-       if (IS_ERR(session_file)) {
-               ret = PTR_ERR(session_file);
-               goto file_error;
-       }
-       session->file = session_file;
-       fd_install(session_fd, session_file);
-       return session_fd;
-
-file_error:
-       put_unused_fd(session_fd);
-fd_error:
-       lttng_session_destroy(session);
-       return ret;
-}
-
-static
-int lttng_abi_tracepoint_list(void)
-{
-       struct file *tracepoint_list_file;
-       int file_fd, ret;
-
-       file_fd = lttng_get_unused_fd();
-       if (file_fd < 0) {
-               ret = file_fd;
-               goto fd_error;
-       }
-
-       tracepoint_list_file = anon_inode_getfile("[lttng_tracepoint_list]",
-                                         &lttng_tracepoint_list_fops,
-                                         NULL, O_RDWR);
-       if (IS_ERR(tracepoint_list_file)) {
-               ret = PTR_ERR(tracepoint_list_file);
-               goto file_error;
-       }
-       ret = lttng_tracepoint_list_fops.open(NULL, tracepoint_list_file);
-       if (ret < 0)
-               goto open_error;
-       fd_install(file_fd, tracepoint_list_file);
-       return file_fd;
-
-open_error:
-       fput(tracepoint_list_file);
-file_error:
-       put_unused_fd(file_fd);
-fd_error:
-       return ret;
-}
-
-#ifndef CONFIG_HAVE_SYSCALL_TRACEPOINTS
-static inline
-int lttng_abi_syscall_list(void)
-{
-       return -ENOSYS;
-}
-#else
-static
-int lttng_abi_syscall_list(void)
-{
-       struct file *syscall_list_file;
-       int file_fd, ret;
-
-       file_fd = lttng_get_unused_fd();
-       if (file_fd < 0) {
-               ret = file_fd;
-               goto fd_error;
-       }
-
-       syscall_list_file = anon_inode_getfile("[lttng_syscall_list]",
-                                         &lttng_syscall_list_fops,
-                                         NULL, O_RDWR);
-       if (IS_ERR(syscall_list_file)) {
-               ret = PTR_ERR(syscall_list_file);
-               goto file_error;
-       }
-       ret = lttng_syscall_list_fops.open(NULL, syscall_list_file);
-       if (ret < 0)
-               goto open_error;
-       fd_install(file_fd, syscall_list_file);
-       return file_fd;
-
-open_error:
-       fput(syscall_list_file);
-file_error:
-       put_unused_fd(file_fd);
-fd_error:
-       return ret;
-}
-#endif
-
-static
-void lttng_abi_tracer_version(struct lttng_kernel_tracer_version *v)
-{
-       v->major = LTTNG_MODULES_MAJOR_VERSION;
-       v->minor = LTTNG_MODULES_MINOR_VERSION;
-       v->patchlevel = LTTNG_MODULES_PATCHLEVEL_VERSION;
-}
-
-static
-void lttng_abi_tracer_abi_version(struct lttng_kernel_tracer_abi_version *v)
-{
-       v->major = LTTNG_MODULES_ABI_MAJOR_VERSION;
-       v->minor = LTTNG_MODULES_ABI_MINOR_VERSION;
-}
-
-static
-long lttng_abi_add_context(struct file *file,
-       struct lttng_kernel_context *context_param,
-       struct lttng_ctx **ctx, struct lttng_session *session)
-{
-
-       if (session->been_active)
-               return -EPERM;
-
-       switch (context_param->ctx) {
-       case LTTNG_KERNEL_CONTEXT_PID:
-               return lttng_add_pid_to_ctx(ctx);
-       case LTTNG_KERNEL_CONTEXT_PRIO:
-               return lttng_add_prio_to_ctx(ctx);
-       case LTTNG_KERNEL_CONTEXT_NICE:
-               return lttng_add_nice_to_ctx(ctx);
-       case LTTNG_KERNEL_CONTEXT_VPID:
-               return lttng_add_vpid_to_ctx(ctx);
-       case LTTNG_KERNEL_CONTEXT_TID:
-               return lttng_add_tid_to_ctx(ctx);
-       case LTTNG_KERNEL_CONTEXT_VTID:
-               return lttng_add_vtid_to_ctx(ctx);
-       case LTTNG_KERNEL_CONTEXT_PPID:
-               return lttng_add_ppid_to_ctx(ctx);
-       case LTTNG_KERNEL_CONTEXT_VPPID:
-               return lttng_add_vppid_to_ctx(ctx);
-       case LTTNG_KERNEL_CONTEXT_PERF_COUNTER:
-               context_param->u.perf_counter.name[LTTNG_KERNEL_SYM_NAME_LEN - 1] = '\0';
-               return lttng_add_perf_counter_to_ctx(context_param->u.perf_counter.type,
-                               context_param->u.perf_counter.config,
-                               context_param->u.perf_counter.name,
-                               ctx);
-       case LTTNG_KERNEL_CONTEXT_PROCNAME:
-               return lttng_add_procname_to_ctx(ctx);
-       case LTTNG_KERNEL_CONTEXT_HOSTNAME:
-               return lttng_add_hostname_to_ctx(ctx);
-       case LTTNG_KERNEL_CONTEXT_CPU_ID:
-               return lttng_add_cpu_id_to_ctx(ctx);
-       case LTTNG_KERNEL_CONTEXT_INTERRUPTIBLE:
-               return lttng_add_interruptible_to_ctx(ctx);
-       case LTTNG_KERNEL_CONTEXT_NEED_RESCHEDULE:
-               return lttng_add_need_reschedule_to_ctx(ctx);
-       case LTTNG_KERNEL_CONTEXT_PREEMPTIBLE:
-               return lttng_add_preemptible_to_ctx(ctx);
-       case LTTNG_KERNEL_CONTEXT_MIGRATABLE:
-               return lttng_add_migratable_to_ctx(ctx);
-       case LTTNG_KERNEL_CONTEXT_CALLSTACK_KERNEL:
-       case LTTNG_KERNEL_CONTEXT_CALLSTACK_USER:
-               return lttng_add_callstack_to_ctx(ctx, context_param->ctx);
-       case LTTNG_KERNEL_CONTEXT_CGROUP_NS:
-               return lttng_add_cgroup_ns_to_ctx(ctx);
-       case LTTNG_KERNEL_CONTEXT_IPC_NS:
-               return lttng_add_ipc_ns_to_ctx(ctx);
-       case LTTNG_KERNEL_CONTEXT_MNT_NS:
-               return lttng_add_mnt_ns_to_ctx(ctx);
-       case LTTNG_KERNEL_CONTEXT_NET_NS:
-               return lttng_add_net_ns_to_ctx(ctx);
-       case LTTNG_KERNEL_CONTEXT_PID_NS:
-               return lttng_add_pid_ns_to_ctx(ctx);
-       case LTTNG_KERNEL_CONTEXT_USER_NS:
-               return lttng_add_user_ns_to_ctx(ctx);
-       case LTTNG_KERNEL_CONTEXT_UTS_NS:
-               return lttng_add_uts_ns_to_ctx(ctx);
-       case LTTNG_KERNEL_CONTEXT_UID:
-               return lttng_add_uid_to_ctx(ctx);
-       case LTTNG_KERNEL_CONTEXT_EUID:
-               return lttng_add_euid_to_ctx(ctx);
-       case LTTNG_KERNEL_CONTEXT_SUID:
-               return lttng_add_suid_to_ctx(ctx);
-       case LTTNG_KERNEL_CONTEXT_GID:
-               return lttng_add_gid_to_ctx(ctx);
-       case LTTNG_KERNEL_CONTEXT_EGID:
-               return lttng_add_egid_to_ctx(ctx);
-       case LTTNG_KERNEL_CONTEXT_SGID:
-               return lttng_add_sgid_to_ctx(ctx);
-       case LTTNG_KERNEL_CONTEXT_VUID:
-               return lttng_add_vuid_to_ctx(ctx);
-       case LTTNG_KERNEL_CONTEXT_VEUID:
-               return lttng_add_veuid_to_ctx(ctx);
-       case LTTNG_KERNEL_CONTEXT_VSUID:
-               return lttng_add_vsuid_to_ctx(ctx);
-       case LTTNG_KERNEL_CONTEXT_VGID:
-               return lttng_add_vgid_to_ctx(ctx);
-       case LTTNG_KERNEL_CONTEXT_VEGID:
-               return lttng_add_vegid_to_ctx(ctx);
-       case LTTNG_KERNEL_CONTEXT_VSGID:
-               return lttng_add_vsgid_to_ctx(ctx);
-       default:
-               return -EINVAL;
-       }
-}
-
-/**
- *     lttng_ioctl - lttng syscall through ioctl
- *
- *     @file: the file
- *     @cmd: the command
- *     @arg: command arg
- *
- *     This ioctl implements lttng commands:
- *     LTTNG_KERNEL_SESSION
- *             Returns a LTTng trace session file descriptor
- *     LTTNG_KERNEL_TRACER_VERSION
- *             Returns the LTTng kernel tracer version
- *     LTTNG_KERNEL_TRACEPOINT_LIST
- *             Returns a file descriptor listing available tracepoints
- *     LTTNG_KERNEL_WAIT_QUIESCENT
- *             Returns after all previously running probes have completed
- *     LTTNG_KERNEL_TRACER_ABI_VERSION
- *             Returns the LTTng kernel tracer ABI version
- *
- * The returned session will be deleted when its file descriptor is closed.
- */
-static
-long lttng_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
-{
-       switch (cmd) {
-       case LTTNG_KERNEL_OLD_SESSION:
-       case LTTNG_KERNEL_SESSION:
-               return lttng_abi_create_session();
-       case LTTNG_KERNEL_OLD_TRACER_VERSION:
-       {
-               struct lttng_kernel_tracer_version v;
-               struct lttng_kernel_old_tracer_version oldv;
-               struct lttng_kernel_old_tracer_version *uversion =
-                       (struct lttng_kernel_old_tracer_version __user *) arg;
-
-               lttng_abi_tracer_version(&v);
-               oldv.major = v.major;
-               oldv.minor = v.minor;
-               oldv.patchlevel = v.patchlevel;
-
-               if (copy_to_user(uversion, &oldv, sizeof(oldv)))
-                       return -EFAULT;
-               return 0;
-       }
-       case LTTNG_KERNEL_TRACER_VERSION:
-       {
-               struct lttng_kernel_tracer_version version;
-               struct lttng_kernel_tracer_version *uversion =
-                       (struct lttng_kernel_tracer_version __user *) arg;
-
-               lttng_abi_tracer_version(&version);
-
-               if (copy_to_user(uversion, &version, sizeof(version)))
-                       return -EFAULT;
-               return 0;
-       }
-       case LTTNG_KERNEL_TRACER_ABI_VERSION:
-       {
-               struct lttng_kernel_tracer_abi_version version;
-               struct lttng_kernel_tracer_abi_version *uversion =
-                       (struct lttng_kernel_tracer_abi_version __user *) arg;
-
-               lttng_abi_tracer_abi_version(&version);
-
-               if (copy_to_user(uversion, &version, sizeof(version)))
-                       return -EFAULT;
-               return 0;
-       }
-       case LTTNG_KERNEL_OLD_TRACEPOINT_LIST:
-       case LTTNG_KERNEL_TRACEPOINT_LIST:
-               return lttng_abi_tracepoint_list();
-       case LTTNG_KERNEL_SYSCALL_LIST:
-               return lttng_abi_syscall_list();
-       case LTTNG_KERNEL_OLD_WAIT_QUIESCENT:
-       case LTTNG_KERNEL_WAIT_QUIESCENT:
-               synchronize_trace();
-               return 0;
-       case LTTNG_KERNEL_OLD_CALIBRATE:
-       {
-               struct lttng_kernel_old_calibrate __user *ucalibrate =
-                       (struct lttng_kernel_old_calibrate __user *) arg;
-               struct lttng_kernel_old_calibrate old_calibrate;
-               struct lttng_kernel_calibrate calibrate;
-               int ret;
-
-               if (copy_from_user(&old_calibrate, ucalibrate, sizeof(old_calibrate)))
-                       return -EFAULT;
-               calibrate.type = old_calibrate.type;
-               ret = lttng_calibrate(&calibrate);
-               if (copy_to_user(ucalibrate, &old_calibrate, sizeof(old_calibrate)))
-                       return -EFAULT;
-               return ret;
-       }
-       case LTTNG_KERNEL_CALIBRATE:
-       {
-               struct lttng_kernel_calibrate __user *ucalibrate =
-                       (struct lttng_kernel_calibrate __user *) arg;
-               struct lttng_kernel_calibrate calibrate;
-               int ret;
-
-               if (copy_from_user(&calibrate, ucalibrate, sizeof(calibrate)))
-                       return -EFAULT;
-               ret = lttng_calibrate(&calibrate);
-               if (copy_to_user(ucalibrate, &calibrate, sizeof(calibrate)))
-                       return -EFAULT;
-               return ret;
-       }
-       default:
-               return -ENOIOCTLCMD;
-       }
-}
-
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,6,0))
-static const struct proc_ops lttng_proc_ops = {
-       .proc_ioctl = lttng_ioctl,
-#ifdef CONFIG_COMPAT
-       .proc_compat_ioctl = lttng_ioctl,
-#endif /* CONFIG_COMPAT */
-};
-#else
-static const struct file_operations lttng_proc_ops = {
-       .owner = THIS_MODULE,
-       .unlocked_ioctl = lttng_ioctl,
-#ifdef CONFIG_COMPAT
-       .compat_ioctl = lttng_ioctl,
-#endif /* CONFIG_COMPAT */
-};
-#endif
-
-static
-int lttng_abi_create_channel(struct file *session_file,
-                            struct lttng_kernel_channel *chan_param,
-                            enum channel_type channel_type)
-{
-       struct lttng_session *session = session_file->private_data;
-       const struct file_operations *fops = NULL;
-       const char *transport_name;
-       struct lttng_channel *chan;
-       struct file *chan_file;
-       int chan_fd;
-       int ret = 0;
-
-       chan_fd = lttng_get_unused_fd();
-       if (chan_fd < 0) {
-               ret = chan_fd;
-               goto fd_error;
-       }
-       switch (channel_type) {
-       case PER_CPU_CHANNEL:
-               fops = &lttng_channel_fops;
-               break;
-       case METADATA_CHANNEL:
-               fops = &lttng_metadata_fops;
-               break;
-       }
-
-       chan_file = anon_inode_getfile("[lttng_channel]",
-                                      fops,
-                                      NULL, O_RDWR);
-       if (IS_ERR(chan_file)) {
-               ret = PTR_ERR(chan_file);
-               goto file_error;
-       }
-       switch (channel_type) {
-       case PER_CPU_CHANNEL:
-               if (chan_param->output == LTTNG_KERNEL_SPLICE) {
-                       transport_name = chan_param->overwrite ?
-                               "relay-overwrite" : "relay-discard";
-               } else if (chan_param->output == LTTNG_KERNEL_MMAP) {
-                       transport_name = chan_param->overwrite ?
-                               "relay-overwrite-mmap" : "relay-discard-mmap";
-               } else {
-                       return -EINVAL;
-               }
-               break;
-       case METADATA_CHANNEL:
-               if (chan_param->output == LTTNG_KERNEL_SPLICE)
-                       transport_name = "relay-metadata";
-               else if (chan_param->output == LTTNG_KERNEL_MMAP)
-                       transport_name = "relay-metadata-mmap";
-               else
-                       return -EINVAL;
-               break;
-       default:
-               transport_name = "<unknown>";
-               break;
-       }
-       if (!atomic_long_add_unless(&session_file->f_count, 1, LONG_MAX)) {
-               ret = -EOVERFLOW;
-               goto refcount_error;
-       }
-       /*
-        * We tolerate no failure path after channel creation. It will stay
-        * invariant for the rest of the session.
-        */
-       chan = lttng_channel_create(session, transport_name, NULL,
-                                 chan_param->subbuf_size,
-                                 chan_param->num_subbuf,
-                                 chan_param->switch_timer_interval,
-                                 chan_param->read_timer_interval,
-                                 channel_type);
-       if (!chan) {
-               ret = -EINVAL;
-               goto chan_error;
-       }
-       chan->file = chan_file;
-       chan_file->private_data = chan;
-       fd_install(chan_fd, chan_file);
-
-       return chan_fd;
-
-chan_error:
-       atomic_long_dec(&session_file->f_count);
-refcount_error:
-       fput(chan_file);
-file_error:
-       put_unused_fd(chan_fd);
-fd_error:
-       return ret;
-}
-
-static
-int lttng_abi_session_set_name(struct lttng_session *session,
-               struct lttng_kernel_session_name *name)
-{
-       size_t len;
-
-       len = strnlen(name->name, LTTNG_KERNEL_SESSION_NAME_LEN);
-
-       if (len == LTTNG_KERNEL_SESSION_NAME_LEN) {
-               /* Name is too long/malformed */
-               return -EINVAL;
-       }
-
-       strcpy(session->name, name->name);
-       return 0;
-}
-
-static
-int lttng_abi_session_set_creation_time(struct lttng_session *session,
-               struct lttng_kernel_session_creation_time *time)
-{
-       size_t len;
-
-       len = strnlen(time->iso8601, LTTNG_KERNEL_SESSION_CREATION_TIME_ISO8601_LEN);
-
-       if (len == LTTNG_KERNEL_SESSION_CREATION_TIME_ISO8601_LEN) {
-               /* Time is too long/malformed */
-               return -EINVAL;
-       }
-
-       strcpy(session->creation_time, time->iso8601);
-       return 0;
-}
-
-static
-enum tracker_type get_tracker_type(struct lttng_kernel_tracker_args *tracker)
-{
-       switch (tracker->type) {
-       case LTTNG_KERNEL_TRACKER_PID:
-               return TRACKER_PID;
-       case LTTNG_KERNEL_TRACKER_VPID:
-               return TRACKER_VPID;
-       case LTTNG_KERNEL_TRACKER_UID:
-               return TRACKER_UID;
-       case LTTNG_KERNEL_TRACKER_VUID:
-               return TRACKER_VUID;
-       case LTTNG_KERNEL_TRACKER_GID:
-               return TRACKER_GID;
-       case LTTNG_KERNEL_TRACKER_VGID:
-               return TRACKER_VGID;
-       default:
-               return TRACKER_UNKNOWN;
-       }
-}
-
-/**
- *     lttng_session_ioctl - lttng session fd ioctl
- *
- *     @file: the file
- *     @cmd: the command
- *     @arg: command arg
- *
- *     This ioctl implements lttng commands:
- *     LTTNG_KERNEL_CHANNEL
- *             Returns a LTTng channel file descriptor
- *     LTTNG_KERNEL_ENABLE
- *             Enables tracing for a session (weak enable)
- *     LTTNG_KERNEL_DISABLE
- *             Disables tracing for a session (strong disable)
- *     LTTNG_KERNEL_METADATA
- *             Returns a LTTng metadata file descriptor
- *     LTTNG_KERNEL_SESSION_TRACK_PID
- *             Add PID to session PID tracker
- *     LTTNG_KERNEL_SESSION_UNTRACK_PID
- *             Remove PID from session PID tracker
- *     LTTNG_KERNEL_SESSION_TRACK_ID
- *             Add ID to tracker
- *     LTTNG_KERNEL_SESSION_UNTRACK_ID
- *             Remove ID from tracker
- *
- * The returned channel will be deleted when its file descriptor is closed.
- */
-static
-long lttng_session_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
-{
-       struct lttng_session *session = file->private_data;
-       struct lttng_kernel_channel chan_param;
-       struct lttng_kernel_old_channel old_chan_param;
-
-       switch (cmd) {
-       case LTTNG_KERNEL_OLD_CHANNEL:
-       {
-               if (copy_from_user(&old_chan_param,
-                               (struct lttng_kernel_old_channel __user *) arg,
-                               sizeof(struct lttng_kernel_old_channel)))
-                       return -EFAULT;
-               chan_param.overwrite = old_chan_param.overwrite;
-               chan_param.subbuf_size = old_chan_param.subbuf_size;
-               chan_param.num_subbuf = old_chan_param.num_subbuf;
-               chan_param.switch_timer_interval = old_chan_param.switch_timer_interval;
-               chan_param.read_timer_interval = old_chan_param.read_timer_interval;
-               chan_param.output = old_chan_param.output;
-
-               return lttng_abi_create_channel(file, &chan_param,
-                               PER_CPU_CHANNEL);
-       }
-       case LTTNG_KERNEL_CHANNEL:
-       {
-               if (copy_from_user(&chan_param,
-                               (struct lttng_kernel_channel __user *) arg,
-                               sizeof(struct lttng_kernel_channel)))
-                       return -EFAULT;
-               return lttng_abi_create_channel(file, &chan_param,
-                               PER_CPU_CHANNEL);
-       }
-       case LTTNG_KERNEL_OLD_SESSION_START:
-       case LTTNG_KERNEL_OLD_ENABLE:
-       case LTTNG_KERNEL_SESSION_START:
-       case LTTNG_KERNEL_ENABLE:
-               return lttng_session_enable(session);
-       case LTTNG_KERNEL_OLD_SESSION_STOP:
-       case LTTNG_KERNEL_OLD_DISABLE:
-       case LTTNG_KERNEL_SESSION_STOP:
-       case LTTNG_KERNEL_DISABLE:
-               return lttng_session_disable(session);
-       case LTTNG_KERNEL_OLD_METADATA:
-       {
-               if (copy_from_user(&old_chan_param,
-                               (struct lttng_kernel_old_channel __user *) arg,
-                               sizeof(struct lttng_kernel_old_channel)))
-                       return -EFAULT;
-               chan_param.overwrite = old_chan_param.overwrite;
-               chan_param.subbuf_size = old_chan_param.subbuf_size;
-               chan_param.num_subbuf = old_chan_param.num_subbuf;
-               chan_param.switch_timer_interval = old_chan_param.switch_timer_interval;
-               chan_param.read_timer_interval = old_chan_param.read_timer_interval;
-               chan_param.output = old_chan_param.output;
-
-               return lttng_abi_create_channel(file, &chan_param,
-                               METADATA_CHANNEL);
-       }
-       case LTTNG_KERNEL_METADATA:
-       {
-               if (copy_from_user(&chan_param,
-                                       (struct lttng_kernel_channel __user *) arg,
-                                       sizeof(struct lttng_kernel_channel)))
-                       return -EFAULT;
-               return lttng_abi_create_channel(file, &chan_param,
-                               METADATA_CHANNEL);
-       }
-       case LTTNG_KERNEL_SESSION_TRACK_PID:
-               return lttng_session_track_id(session, TRACKER_PID, (int) arg);
-       case LTTNG_KERNEL_SESSION_UNTRACK_PID:
-               return lttng_session_untrack_id(session, TRACKER_PID, (int) arg);
-       case LTTNG_KERNEL_SESSION_TRACK_ID:
-       {
-               struct lttng_kernel_tracker_args tracker;
-               enum tracker_type tracker_type;
-
-               if (copy_from_user(&tracker,
-                               (struct lttng_kernel_tracker_args __user *) arg,
-                               sizeof(struct lttng_kernel_tracker_args)))
-                       return -EFAULT;
-               tracker_type = get_tracker_type(&tracker);
-               if (tracker_type == TRACKER_UNKNOWN)
-                       return -EINVAL;
-               return lttng_session_track_id(session, tracker_type, tracker.id);
-       }
-       case LTTNG_KERNEL_SESSION_UNTRACK_ID:
-       {
-               struct lttng_kernel_tracker_args tracker;
-               enum tracker_type tracker_type;
-
-               if (copy_from_user(&tracker,
-                               (struct lttng_kernel_tracker_args __user *) arg,
-                               sizeof(struct lttng_kernel_tracker_args)))
-                       return -EFAULT;
-               tracker_type = get_tracker_type(&tracker);
-               if (tracker_type == TRACKER_UNKNOWN)
-                       return -EINVAL;
-               return lttng_session_untrack_id(session, tracker_type,
-                               tracker.id);
-       }
-       case LTTNG_KERNEL_SESSION_LIST_TRACKER_PIDS:
-               return lttng_session_list_tracker_ids(session, TRACKER_PID);
-       case LTTNG_KERNEL_SESSION_LIST_TRACKER_IDS:
-       {
-               struct lttng_kernel_tracker_args tracker;
-               enum tracker_type tracker_type;
-
-               if (copy_from_user(&tracker,
-                               (struct lttng_kernel_tracker_args __user *) arg,
-                               sizeof(struct lttng_kernel_tracker_args)))
-                       return -EFAULT;
-               tracker_type = get_tracker_type(&tracker);
-               if (tracker_type == TRACKER_UNKNOWN)
-                       return -EINVAL;
-               return lttng_session_list_tracker_ids(session, tracker_type);
-       }
-       case LTTNG_KERNEL_SESSION_METADATA_REGEN:
-               return lttng_session_metadata_regenerate(session);
-       case LTTNG_KERNEL_SESSION_STATEDUMP:
-               return lttng_session_statedump(session);
-       case LTTNG_KERNEL_SESSION_SET_NAME:
-       {
-               struct lttng_kernel_session_name name;
-
-               if (copy_from_user(&name,
-                               (struct lttng_kernel_session_name __user *) arg,
-                               sizeof(struct lttng_kernel_session_name)))
-                       return -EFAULT;
-               return lttng_abi_session_set_name(session, &name);
-       }
-       case LTTNG_KERNEL_SESSION_SET_CREATION_TIME:
-       {
-               struct lttng_kernel_session_creation_time time;
-
-               if (copy_from_user(&time,
-                               (struct lttng_kernel_session_creation_time __user *) arg,
-                               sizeof(struct lttng_kernel_session_creation_time)))
-                       return -EFAULT;
-               return lttng_abi_session_set_creation_time(session, &time);
-       }
-       default:
-               return -ENOIOCTLCMD;
-       }
-}
-
-/*
- * Called when the last file reference is dropped.
- *
- * Big fat note: channels and events are invariant for the whole session after
- * their creation. So this session destruction also destroys all channel and
- * event structures specific to this session (they are not destroyed when their
- * individual file is released).
- */
-static
-int lttng_session_release(struct inode *inode, struct file *file)
-{
-       struct lttng_session *session = file->private_data;
-
-       if (session)
-               lttng_session_destroy(session);
-       return 0;
-}
-
-static const struct file_operations lttng_session_fops = {
-       .owner = THIS_MODULE,
-       .release = lttng_session_release,
-       .unlocked_ioctl = lttng_session_ioctl,
-#ifdef CONFIG_COMPAT
-       .compat_ioctl = lttng_session_ioctl,
-#endif
-};
-
-/**
- *     lttng_metadata_ring_buffer_poll - LTTng ring buffer poll file operation
- *     @filp: the file
- *     @wait: poll table
- *
- *     Handles the poll operations for the metadata channels.
- */
-static
-unsigned int lttng_metadata_ring_buffer_poll(struct file *filp,
-               poll_table *wait)
-{
-       struct lttng_metadata_stream *stream = filp->private_data;
-       struct lib_ring_buffer *buf = stream->priv;
-       int finalized;
-       unsigned int mask = 0;
-
-       if (filp->f_mode & FMODE_READ) {
-               poll_wait_set_exclusive(wait);
-               poll_wait(filp, &stream->read_wait, wait);
-
-               finalized = stream->finalized;
-
-               /*
-                * lib_ring_buffer_is_finalized() contains a smp_rmb()
-                * ordering finalized load before offsets loads.
-                */
-               WARN_ON(atomic_long_read(&buf->active_readers) != 1);
-
-               if (finalized)
-                       mask |= POLLHUP;
-
-               mutex_lock(&stream->metadata_cache->lock);
-               if (stream->metadata_cache->metadata_written >
-                               stream->metadata_out)
-                       mask |= POLLIN;
-               mutex_unlock(&stream->metadata_cache->lock);
-       }
-
-       return mask;
-}
-
-static
-void lttng_metadata_ring_buffer_ioctl_put_next_subbuf(struct file *filp,
-               unsigned int cmd, unsigned long arg)
-{
-       struct lttng_metadata_stream *stream = filp->private_data;
-
-       stream->metadata_out = stream->metadata_in;
-}
-
-/*
- * Reset the counter of how much metadata has been consumed to 0. That way,
- * the consumer receives the content of the metadata cache unchanged. This is
- * different from the metadata_regenerate where the offset from epoch is
- * resampled, here we want the exact same content as the last time the metadata
- * was generated. This command is only possible if all the metadata written
- * in the cache has been output to the metadata stream to avoid corrupting the
- * metadata file.
- *
- * Return 0 on success, a negative value on error.
- */
-static
-int lttng_metadata_cache_dump(struct lttng_metadata_stream *stream)
-{
-       int ret;
-       struct lttng_metadata_cache *cache = stream->metadata_cache;
-
-       mutex_lock(&cache->lock);
-       if (stream->metadata_out != cache->metadata_written) {
-               ret = -EBUSY;
-               goto end;
-       }
-       stream->metadata_out = 0;
-       stream->metadata_in = 0;
-       wake_up_interruptible(&stream->read_wait);
-       ret = 0;
-
-end:
-       mutex_unlock(&cache->lock);
-       return ret;
-}
-
-static
-long lttng_metadata_ring_buffer_ioctl(struct file *filp,
-               unsigned int cmd, unsigned long arg)
-{
-       int ret;
-       struct lttng_metadata_stream *stream = filp->private_data;
-       struct lib_ring_buffer *buf = stream->priv;
-
-       switch (cmd) {
-       case RING_BUFFER_GET_NEXT_SUBBUF:
-       {
-               struct lttng_metadata_stream *stream = filp->private_data;
-               struct lib_ring_buffer *buf = stream->priv;
-               struct channel *chan = buf->backend.chan;
-
-               ret = lttng_metadata_output_channel(stream, chan);
-               if (ret > 0) {
-                       lib_ring_buffer_switch_slow(buf, SWITCH_ACTIVE);
-                       ret = 0;
-               } else if (ret < 0)
-                       goto err;
-               break;
-       }
-       case RING_BUFFER_GET_SUBBUF:
-       {
-               /*
-                * Random access is not allowed for metadata channel.
-                */
-               return -ENOSYS;
-       }
-       case RING_BUFFER_FLUSH_EMPTY:   /* Fall-through. */
-       case RING_BUFFER_FLUSH:
-       {
-               struct lttng_metadata_stream *stream = filp->private_data;
-               struct lib_ring_buffer *buf = stream->priv;
-               struct channel *chan = buf->backend.chan;
-
-               /*
-                * Before doing the actual ring buffer flush, write up to one
-                * packet of metadata in the ring buffer.
-                */
-               ret = lttng_metadata_output_channel(stream, chan);
-               if (ret < 0)
-                       goto err;
-               break;
-       }
-       case RING_BUFFER_GET_METADATA_VERSION:
-       {
-               struct lttng_metadata_stream *stream = filp->private_data;
-
-               return put_u64(stream->version, arg);
-       }
-       case RING_BUFFER_METADATA_CACHE_DUMP:
-       {
-               struct lttng_metadata_stream *stream = filp->private_data;
-
-               return lttng_metadata_cache_dump(stream);
-       }
-       default:
-               break;
-       }
-       /* PUT_SUBBUF is the one from lib ring buffer, unmodified. */
-
-       /* Performing lib ring buffer ioctl after our own. */
-       ret = lib_ring_buffer_ioctl(filp, cmd, arg, buf);
-       if (ret < 0)
-               goto err;
-
-       switch (cmd) {
-       case RING_BUFFER_PUT_NEXT_SUBBUF:
-       {
-               lttng_metadata_ring_buffer_ioctl_put_next_subbuf(filp,
-                               cmd, arg);
-               break;
-       }
-       default:
-               break;
-       }
-err:
-       return ret;
-}
-
-#ifdef CONFIG_COMPAT
-static
-long lttng_metadata_ring_buffer_compat_ioctl(struct file *filp,
-               unsigned int cmd, unsigned long arg)
-{
-       int ret;
-       struct lttng_metadata_stream *stream = filp->private_data;
-       struct lib_ring_buffer *buf = stream->priv;
-
-       switch (cmd) {
-       case RING_BUFFER_GET_NEXT_SUBBUF:
-       {
-               struct lttng_metadata_stream *stream = filp->private_data;
-               struct lib_ring_buffer *buf = stream->priv;
-               struct channel *chan = buf->backend.chan;
-
-               ret = lttng_metadata_output_channel(stream, chan);
-               if (ret > 0) {
-                       lib_ring_buffer_switch_slow(buf, SWITCH_ACTIVE);
-                       ret = 0;
-               } else if (ret < 0)
-                       goto err;
-               break;
-       }
-       case RING_BUFFER_GET_SUBBUF:
-       {
-               /*
-                * Random access is not allowed for metadata channel.
-                */
-               return -ENOSYS;
-       }
-       case RING_BUFFER_FLUSH_EMPTY:   /* Fall-through. */
-       case RING_BUFFER_FLUSH:
-       {
-               struct lttng_metadata_stream *stream = filp->private_data;
-               struct lib_ring_buffer *buf = stream->priv;
-               struct channel *chan = buf->backend.chan;
-
-               /*
-                * Before doing the actual ring buffer flush, write up to one
-                * packet of metadata in the ring buffer.
-                */
-               ret = lttng_metadata_output_channel(stream, chan);
-               if (ret < 0)
-                       goto err;
-               break;
-       }
-       case RING_BUFFER_GET_METADATA_VERSION:
-       {
-               struct lttng_metadata_stream *stream = filp->private_data;
-
-               return put_u64(stream->version, arg);
-       }
-       case RING_BUFFER_METADATA_CACHE_DUMP:
-       {
-               struct lttng_metadata_stream *stream = filp->private_data;
-
-               return lttng_metadata_cache_dump(stream);
-       }
-       default:
-               break;
-       }
-       /* PUT_SUBBUF is the one from lib ring buffer, unmodified. */
-
-       /* Performing lib ring buffer ioctl after our own. */
-       ret = lib_ring_buffer_compat_ioctl(filp, cmd, arg, buf);
-       if (ret < 0)
-               goto err;
-
-       switch (cmd) {
-       case RING_BUFFER_PUT_NEXT_SUBBUF:
-       {
-               lttng_metadata_ring_buffer_ioctl_put_next_subbuf(filp,
-                               cmd, arg);
-               break;
-       }
-       default:
-               break;
-       }
-err:
-       return ret;
-}
-#endif
-
-/*
- * This is not used by anonymous file descriptors. This code is left
- * there if we ever want to implement an inode with open() operation.
- */
-static
-int lttng_metadata_ring_buffer_open(struct inode *inode, struct file *file)
-{
-       struct lttng_metadata_stream *stream = inode->i_private;
-       struct lib_ring_buffer *buf = stream->priv;
-
-       file->private_data = buf;
-       /*
-        * Since life-time of metadata cache differs from that of
-        * session, we need to keep our own reference on the transport.
-        */
-       if (!try_module_get(stream->transport->owner)) {
-               printk(KERN_WARNING "LTT : Can't lock transport module.\n");
-               return -EBUSY;
-       }
-       return lib_ring_buffer_open(inode, file, buf);
-}
-
-static
-int lttng_metadata_ring_buffer_release(struct inode *inode, struct file *file)
-{
-       struct lttng_metadata_stream *stream = file->private_data;
-       struct lib_ring_buffer *buf = stream->priv;
-
-       kref_put(&stream->metadata_cache->refcount, metadata_cache_destroy);
-       module_put(stream->transport->owner);
-       return lib_ring_buffer_release(inode, file, buf);
-}
-
-static
-ssize_t lttng_metadata_ring_buffer_splice_read(struct file *in, loff_t *ppos,
-               struct pipe_inode_info *pipe, size_t len,
-               unsigned int flags)
-{
-       struct lttng_metadata_stream *stream = in->private_data;
-       struct lib_ring_buffer *buf = stream->priv;
-
-       return lib_ring_buffer_splice_read(in, ppos, pipe, len,
-                       flags, buf);
-}
-
-static
-int lttng_metadata_ring_buffer_mmap(struct file *filp,
-               struct vm_area_struct *vma)
-{
-       struct lttng_metadata_stream *stream = filp->private_data;
-       struct lib_ring_buffer *buf = stream->priv;
-
-       return lib_ring_buffer_mmap(filp, vma, buf);
-}
-
-static
-const struct file_operations lttng_metadata_ring_buffer_file_operations = {
-       .owner = THIS_MODULE,
-       .open = lttng_metadata_ring_buffer_open,
-       .release = lttng_metadata_ring_buffer_release,
-       .poll = lttng_metadata_ring_buffer_poll,
-       .splice_read = lttng_metadata_ring_buffer_splice_read,
-       .mmap = lttng_metadata_ring_buffer_mmap,
-       .unlocked_ioctl = lttng_metadata_ring_buffer_ioctl,
-       .llseek = vfs_lib_ring_buffer_no_llseek,
-#ifdef CONFIG_COMPAT
-       .compat_ioctl = lttng_metadata_ring_buffer_compat_ioctl,
-#endif
-};
-
-static
-int lttng_abi_create_stream_fd(struct file *channel_file, void *stream_priv,
-               const struct file_operations *fops)
-{
-       int stream_fd, ret;
-       struct file *stream_file;
-
-       stream_fd = lttng_get_unused_fd();
-       if (stream_fd < 0) {
-               ret = stream_fd;
-               goto fd_error;
-       }
-       stream_file = anon_inode_getfile("[lttng_stream]", fops,
-                       stream_priv, O_RDWR);
-       if (IS_ERR(stream_file)) {
-               ret = PTR_ERR(stream_file);
-               goto file_error;
-       }
-       /*
-        * OPEN_FMODE, called within anon_inode_getfile/alloc_file, don't honor
-        * FMODE_LSEEK, FMODE_PREAD nor FMODE_PWRITE. We need to read from this
-        * file descriptor, so we set FMODE_PREAD here.
-        */
-       stream_file->f_mode |= FMODE_PREAD;
-       fd_install(stream_fd, stream_file);
-       /*
-        * The stream holds a reference to the channel within the generic ring
-        * buffer library, so no need to hold a refcount on the channel and
-        * session files here.
-        */
-       return stream_fd;
-
-file_error:
-       put_unused_fd(stream_fd);
-fd_error:
-       return ret;
-}
-
-static
-int lttng_abi_open_stream(struct file *channel_file)
-{
-       struct lttng_channel *channel = channel_file->private_data;
-       struct lib_ring_buffer *buf;
-       int ret;
-       void *stream_priv;
-
-       buf = channel->ops->buffer_read_open(channel->chan);
-       if (!buf)
-               return -ENOENT;
-
-       stream_priv = buf;
-       ret = lttng_abi_create_stream_fd(channel_file, stream_priv,
-                       &lttng_stream_ring_buffer_file_operations);
-       if (ret < 0)
-               goto fd_error;
-
-       return ret;
-
-fd_error:
-       channel->ops->buffer_read_close(buf);
-       return ret;
-}
-
-static
-int lttng_abi_open_metadata_stream(struct file *channel_file)
-{
-       struct lttng_channel *channel = channel_file->private_data;
-       struct lttng_session *session = channel->session;
-       struct lib_ring_buffer *buf;
-       int ret;
-       struct lttng_metadata_stream *metadata_stream;
-       void *stream_priv;
-
-       buf = channel->ops->buffer_read_open(channel->chan);
-       if (!buf)
-               return -ENOENT;
-
-       metadata_stream = kzalloc(sizeof(struct lttng_metadata_stream),
-                       GFP_KERNEL);
-       if (!metadata_stream) {
-               ret = -ENOMEM;
-               goto nomem;
-       }
-       metadata_stream->metadata_cache = session->metadata_cache;
-       init_waitqueue_head(&metadata_stream->read_wait);
-       metadata_stream->priv = buf;
-       stream_priv = metadata_stream;
-       metadata_stream->transport = channel->transport;
-
-       /*
-        * Since life-time of metadata cache differs from that of
-        * session, we need to keep our own reference on the transport.
-        */
-       if (!try_module_get(metadata_stream->transport->owner)) {
-               printk(KERN_WARNING "LTT : Can't lock transport module.\n");
-               ret = -EINVAL;
-               goto notransport;
-       }
-
-       if (!lttng_kref_get(&session->metadata_cache->refcount)) {
-               ret = -EOVERFLOW;
-               goto kref_error;
-       }
-
-       ret = lttng_abi_create_stream_fd(channel_file, stream_priv,
-                       &lttng_metadata_ring_buffer_file_operations);
-       if (ret < 0)
-               goto fd_error;
-
-       list_add(&metadata_stream->list,
-               &session->metadata_cache->metadata_stream);
-       return ret;
-
-fd_error:
-       kref_put(&session->metadata_cache->refcount, metadata_cache_destroy);
-kref_error:
-       module_put(metadata_stream->transport->owner);
-notransport:
-       kfree(metadata_stream);
-nomem:
-       channel->ops->buffer_read_close(buf);
-       return ret;
-}
-
-static
-int lttng_abi_create_event(struct file *channel_file,
-                          struct lttng_kernel_event *event_param)
-{
-       struct lttng_channel *channel = channel_file->private_data;
-       int event_fd, ret;
-       struct file *event_file;
-       void *priv;
-
-       event_param->name[LTTNG_KERNEL_SYM_NAME_LEN - 1] = '\0';
-       switch (event_param->instrumentation) {
-       case LTTNG_KERNEL_KRETPROBE:
-               event_param->u.kretprobe.symbol_name[LTTNG_KERNEL_SYM_NAME_LEN - 1] = '\0';
-               break;
-       case LTTNG_KERNEL_KPROBE:
-               event_param->u.kprobe.symbol_name[LTTNG_KERNEL_SYM_NAME_LEN - 1] = '\0';
-               break;
-       case LTTNG_KERNEL_FUNCTION:
-               WARN_ON_ONCE(1);
-               /* Not implemented. */
-               break;
-       default:
-               break;
-       }
-       event_fd = lttng_get_unused_fd();
-       if (event_fd < 0) {
-               ret = event_fd;
-               goto fd_error;
-       }
-       event_file = anon_inode_getfile("[lttng_event]",
-                                       &lttng_event_fops,
-                                       NULL, O_RDWR);
-       if (IS_ERR(event_file)) {
-               ret = PTR_ERR(event_file);
-               goto file_error;
-       }
-       /* The event holds a reference on the channel */
-       if (!atomic_long_add_unless(&channel_file->f_count, 1, LONG_MAX)) {
-               ret = -EOVERFLOW;
-               goto refcount_error;
-       }
-       if (event_param->instrumentation == LTTNG_KERNEL_TRACEPOINT
-                       || event_param->instrumentation == LTTNG_KERNEL_SYSCALL) {
-               struct lttng_enabler *enabler;
-
-               if (strutils_is_star_glob_pattern(event_param->name)) {
-                       /*
-                        * If the event name is a star globbing pattern,
-                        * we create the special star globbing enabler.
-                        */
-                       enabler = lttng_enabler_create(LTTNG_ENABLER_STAR_GLOB,
-                               event_param, channel);
-               } else {
-                       enabler = lttng_enabler_create(LTTNG_ENABLER_NAME,
-                               event_param, channel);
-               }
-               priv = enabler;
-       } else {
-               struct lttng_event *event;
-
-               /*
-                * We tolerate no failure path after event creation. It
-                * will stay invariant for the rest of the session.
-                */
-               event = lttng_event_create(channel, event_param,
-                               NULL, NULL,
-                               event_param->instrumentation);
-               WARN_ON_ONCE(!event);
-               if (IS_ERR(event)) {
-                       ret = PTR_ERR(event);
-                       goto event_error;
-               }
-               priv = event;
-       }
-       event_file->private_data = priv;
-       fd_install(event_fd, event_file);
-       return event_fd;
-
-event_error:
-       atomic_long_dec(&channel_file->f_count);
-refcount_error:
-       fput(event_file);
-file_error:
-       put_unused_fd(event_fd);
-fd_error:
-       return ret;
-}
-
-/**
- *     lttng_channel_ioctl - lttng syscall through ioctl
- *
- *     @file: the file
- *     @cmd: the command
- *     @arg: command arg
- *
- *     This ioctl implements lttng commands:
- *      LTTNG_KERNEL_STREAM
- *              Returns an event stream file descriptor or failure.
- *              (typically, one event stream records events from one CPU)
- *     LTTNG_KERNEL_EVENT
- *             Returns an event file descriptor or failure.
- *     LTTNG_KERNEL_CONTEXT
- *             Prepend a context field to each event in the channel
- *     LTTNG_KERNEL_ENABLE
- *             Enable recording for events in this channel (weak enable)
- *     LTTNG_KERNEL_DISABLE
- *             Disable recording for events in this channel (strong disable)
- *
- * Channel and event file descriptors also hold a reference on the session.
- */
-static
-long lttng_channel_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
-{
-       struct lttng_channel *channel = file->private_data;
-
-       switch (cmd) {
-       case LTTNG_KERNEL_OLD_STREAM:
-       case LTTNG_KERNEL_STREAM:
-               return lttng_abi_open_stream(file);
-       case LTTNG_KERNEL_OLD_EVENT:
-       {
-               struct lttng_kernel_event *uevent_param;
-               struct lttng_kernel_old_event *old_uevent_param;
-               int ret;
-
-               uevent_param = kmalloc(sizeof(struct lttng_kernel_event),
-                               GFP_KERNEL);
-               if (!uevent_param) {
-                       ret = -ENOMEM;
-                       goto old_event_end;
-               }
-               old_uevent_param = kmalloc(
-                               sizeof(struct lttng_kernel_old_event),
-                               GFP_KERNEL);
-               if (!old_uevent_param) {
-                       ret = -ENOMEM;
-                       goto old_event_error_free_param;
-               }
-               if (copy_from_user(old_uevent_param,
-                               (struct lttng_kernel_old_event __user *) arg,
-                               sizeof(struct lttng_kernel_old_event))) {
-                       ret = -EFAULT;
-                       goto old_event_error_free_old_param;
-               }
-
-               memcpy(uevent_param->name, old_uevent_param->name,
-                               sizeof(uevent_param->name));
-               uevent_param->instrumentation =
-                       old_uevent_param->instrumentation;
-
-               switch (old_uevent_param->instrumentation) {
-               case LTTNG_KERNEL_KPROBE:
-                       uevent_param->u.kprobe.addr =
-                               old_uevent_param->u.kprobe.addr;
-                       uevent_param->u.kprobe.offset =
-                               old_uevent_param->u.kprobe.offset;
-                       memcpy(uevent_param->u.kprobe.symbol_name,
-                               old_uevent_param->u.kprobe.symbol_name,
-                               sizeof(uevent_param->u.kprobe.symbol_name));
-                       break;
-               case LTTNG_KERNEL_KRETPROBE:
-                       uevent_param->u.kretprobe.addr =
-                               old_uevent_param->u.kretprobe.addr;
-                       uevent_param->u.kretprobe.offset =
-                               old_uevent_param->u.kretprobe.offset;
-                       memcpy(uevent_param->u.kretprobe.symbol_name,
-                               old_uevent_param->u.kretprobe.symbol_name,
-                               sizeof(uevent_param->u.kretprobe.symbol_name));
-                       break;
-               case LTTNG_KERNEL_FUNCTION:
-                       WARN_ON_ONCE(1);
-                       /* Not implemented. */
-                       break;
-               default:
-                       break;
-               }
-               ret = lttng_abi_create_event(file, uevent_param);
-
-old_event_error_free_old_param:
-               kfree(old_uevent_param);
-old_event_error_free_param:
-               kfree(uevent_param);
-old_event_end:
-               return ret;
-       }
-       case LTTNG_KERNEL_EVENT:
-       {
-               struct lttng_kernel_event uevent_param;
-
-               if (copy_from_user(&uevent_param,
-                               (struct lttng_kernel_event __user *) arg,
-                               sizeof(uevent_param)))
-                       return -EFAULT;
-               return lttng_abi_create_event(file, &uevent_param);
-       }
-       case LTTNG_KERNEL_OLD_CONTEXT:
-       {
-               struct lttng_kernel_context *ucontext_param;
-               struct lttng_kernel_old_context *old_ucontext_param;
-               int ret;
-
-               ucontext_param = kmalloc(sizeof(struct lttng_kernel_context),
-                               GFP_KERNEL);
-               if (!ucontext_param) {
-                       ret = -ENOMEM;
-                       goto old_ctx_end;
-               }
-               old_ucontext_param = kmalloc(sizeof(struct lttng_kernel_old_context),
-                               GFP_KERNEL);
-               if (!old_ucontext_param) {
-                       ret = -ENOMEM;
-                       goto old_ctx_error_free_param;
-               }
-
-               if (copy_from_user(old_ucontext_param,
-                               (struct lttng_kernel_old_context __user *) arg,
-                               sizeof(struct lttng_kernel_old_context))) {
-                       ret = -EFAULT;
-                       goto old_ctx_error_free_old_param;
-               }
-               ucontext_param->ctx = old_ucontext_param->ctx;
-               memcpy(ucontext_param->padding, old_ucontext_param->padding,
-                               sizeof(ucontext_param->padding));
-               /* only type that uses the union */
-               if (old_ucontext_param->ctx == LTTNG_KERNEL_CONTEXT_PERF_COUNTER) {
-                       ucontext_param->u.perf_counter.type =
-                               old_ucontext_param->u.perf_counter.type;
-                       ucontext_param->u.perf_counter.config =
-                               old_ucontext_param->u.perf_counter.config;
-                       memcpy(ucontext_param->u.perf_counter.name,
-                               old_ucontext_param->u.perf_counter.name,
-                               sizeof(ucontext_param->u.perf_counter.name));
-               }
-
-               ret = lttng_abi_add_context(file,
-                               ucontext_param,
-                               &channel->ctx, channel->session);
-
-old_ctx_error_free_old_param:
-               kfree(old_ucontext_param);
-old_ctx_error_free_param:
-               kfree(ucontext_param);
-old_ctx_end:
-               return ret;
-       }
-       case LTTNG_KERNEL_CONTEXT:
-       {
-               struct lttng_kernel_context ucontext_param;
-
-               if (copy_from_user(&ucontext_param,
-                               (struct lttng_kernel_context __user *) arg,
-                               sizeof(ucontext_param)))
-                       return -EFAULT;
-               return lttng_abi_add_context(file,
-                               &ucontext_param,
-                               &channel->ctx, channel->session);
-       }
-       case LTTNG_KERNEL_OLD_ENABLE:
-       case LTTNG_KERNEL_ENABLE:
-               return lttng_channel_enable(channel);
-       case LTTNG_KERNEL_OLD_DISABLE:
-       case LTTNG_KERNEL_DISABLE:
-               return lttng_channel_disable(channel);
-       case LTTNG_KERNEL_SYSCALL_MASK:
-               return lttng_channel_syscall_mask(channel,
-                       (struct lttng_kernel_syscall_mask __user *) arg);
-       default:
-               return -ENOIOCTLCMD;
-       }
-}
-
-/**
- *     lttng_metadata_ioctl - lttng syscall through ioctl
- *
- *     @file: the file
- *     @cmd: the command
- *     @arg: command arg
- *
- *     This ioctl implements lttng commands:
- *      LTTNG_KERNEL_STREAM
- *              Returns an event stream file descriptor or failure.
- *
- * Channel and event file descriptors also hold a reference on the session.
- */
-static
-long lttng_metadata_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
-{
-       switch (cmd) {
-       case LTTNG_KERNEL_OLD_STREAM:
-       case LTTNG_KERNEL_STREAM:
-               return lttng_abi_open_metadata_stream(file);
-       default:
-               return -ENOIOCTLCMD;
-       }
-}
-
-/**
- *     lttng_channel_poll - lttng stream addition/removal monitoring
- *
- *     @file: the file
- *     @wait: poll table
- */
-unsigned int lttng_channel_poll(struct file *file, poll_table *wait)
-{
-       struct lttng_channel *channel = file->private_data;
-       unsigned int mask = 0;
-
-       if (file->f_mode & FMODE_READ) {
-               poll_wait_set_exclusive(wait);
-               poll_wait(file, channel->ops->get_hp_wait_queue(channel->chan),
-                         wait);
-
-               if (channel->ops->is_disabled(channel->chan))
-                       return POLLERR;
-               if (channel->ops->is_finalized(channel->chan))
-                       return POLLHUP;
-               if (channel->ops->buffer_has_read_closed_stream(channel->chan))
-                       return POLLIN | POLLRDNORM;
-               return 0;
-       }
-       return mask;
-
-}
-
-static
-int lttng_channel_release(struct inode *inode, struct file *file)
-{
-       struct lttng_channel *channel = file->private_data;
-
-       if (channel)
-               fput(channel->session->file);
-       return 0;
-}
-
-static
-int lttng_metadata_channel_release(struct inode *inode, struct file *file)
-{
-       struct lttng_channel *channel = file->private_data;
-
-       if (channel) {
-               fput(channel->session->file);
-               lttng_metadata_channel_destroy(channel);
-       }
-
-       return 0;
-}
-
-static const struct file_operations lttng_channel_fops = {
-       .owner = THIS_MODULE,
-       .release = lttng_channel_release,
-       .poll = lttng_channel_poll,
-       .unlocked_ioctl = lttng_channel_ioctl,
-#ifdef CONFIG_COMPAT
-       .compat_ioctl = lttng_channel_ioctl,
-#endif
-};
-
-static const struct file_operations lttng_metadata_fops = {
-       .owner = THIS_MODULE,
-       .release = lttng_metadata_channel_release,
-       .unlocked_ioctl = lttng_metadata_ioctl,
-#ifdef CONFIG_COMPAT
-       .compat_ioctl = lttng_metadata_ioctl,
-#endif
-};
-
-/**
- *     lttng_event_ioctl - lttng syscall through ioctl
- *
- *     @file: the file
- *     @cmd: the command
- *     @arg: command arg
- *
- *     This ioctl implements lttng commands:
- *     LTTNG_KERNEL_CONTEXT
- *             Prepend a context field to each record of this event
- *     LTTNG_KERNEL_ENABLE
- *             Enable recording for this event (weak enable)
- *     LTTNG_KERNEL_DISABLE
- *             Disable recording for this event (strong disable)
- */
-static
-long lttng_event_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
-{
-       struct lttng_event *event;
-       struct lttng_enabler *enabler;
-       enum lttng_event_type *evtype = file->private_data;
-
-       switch (cmd) {
-       case LTTNG_KERNEL_OLD_CONTEXT:
-       {
-               /* Not implemented */
-               return -ENOSYS;
-       }
-       case LTTNG_KERNEL_CONTEXT:
-       {
-               /* Not implemented */
-               return -ENOSYS;
-       }
-       case LTTNG_KERNEL_OLD_ENABLE:
-       case LTTNG_KERNEL_ENABLE:
-               switch (*evtype) {
-               case LTTNG_TYPE_EVENT:
-                       event = file->private_data;
-                       return lttng_event_enable(event);
-               case LTTNG_TYPE_ENABLER:
-                       enabler = file->private_data;
-                       return lttng_enabler_enable(enabler);
-               default:
-                       WARN_ON_ONCE(1);
-                       return -ENOSYS;
-               }
-       case LTTNG_KERNEL_OLD_DISABLE:
-       case LTTNG_KERNEL_DISABLE:
-               switch (*evtype) {
-               case LTTNG_TYPE_EVENT:
-                       event = file->private_data;
-                       return lttng_event_disable(event);
-               case LTTNG_TYPE_ENABLER:
-                       enabler = file->private_data;
-                       return lttng_enabler_disable(enabler);
-               default:
-                       WARN_ON_ONCE(1);
-                       return -ENOSYS;
-               }
-       case LTTNG_KERNEL_FILTER:
-               switch (*evtype) {
-               case LTTNG_TYPE_EVENT:
-                       return -EINVAL;
-               case LTTNG_TYPE_ENABLER:
-               {
-                       enabler = file->private_data;
-                       return lttng_enabler_attach_bytecode(enabler,
-                               (struct lttng_kernel_filter_bytecode __user *) arg);
-               }
-               default:
-                       WARN_ON_ONCE(1);
-                       return -ENOSYS;
-               }
-       case LTTNG_KERNEL_ADD_CALLSITE:
-               switch (*evtype) {
-               case LTTNG_TYPE_EVENT:
-                       event = file->private_data;
-                       return lttng_event_add_callsite(event,
-                               (struct lttng_kernel_event_callsite __user *) arg);
-               case LTTNG_TYPE_ENABLER:
-                       return -EINVAL;
-               default:
-                       WARN_ON_ONCE(1);
-                       return -ENOSYS;
-               }
-       default:
-               return -ENOIOCTLCMD;
-       }
-}
-
-static
-int lttng_event_release(struct inode *inode, struct file *file)
-{
-       struct lttng_event *event;
-       struct lttng_enabler *enabler;
-       enum lttng_event_type *evtype = file->private_data;
-
-       if (!evtype)
-               return 0;
-
-       switch (*evtype) {
-       case LTTNG_TYPE_EVENT:
-               event = file->private_data;
-               if (event)
-                       fput(event->chan->file);
-               break;
-       case LTTNG_TYPE_ENABLER:
-               enabler = file->private_data;
-               if (enabler)
-                       fput(enabler->chan->file);
-               break;
-       default:
-               WARN_ON_ONCE(1);
-               break;
-       }
-
-       return 0;
-}
-
-/* TODO: filter control ioctl */
-static const struct file_operations lttng_event_fops = {
-       .owner = THIS_MODULE,
-       .release = lttng_event_release,
-       .unlocked_ioctl = lttng_event_ioctl,
-#ifdef CONFIG_COMPAT
-       .compat_ioctl = lttng_event_ioctl,
-#endif
-};
-
-static int put_u64(uint64_t val, unsigned long arg)
-{
-       return put_user(val, (uint64_t __user *) arg);
-}
-
-static long lttng_stream_ring_buffer_ioctl(struct file *filp,
-               unsigned int cmd, unsigned long arg)
-{
-       struct lib_ring_buffer *buf = filp->private_data;
-       struct channel *chan = buf->backend.chan;
-       const struct lib_ring_buffer_config *config = &chan->backend.config;
-       const struct lttng_channel_ops *ops = chan->backend.priv_ops;
-       int ret;
-
-       if (atomic_read(&chan->record_disabled))
-               return -EIO;
-
-       switch (cmd) {
-       case LTTNG_RING_BUFFER_GET_TIMESTAMP_BEGIN:
-       {
-               uint64_t ts;
-
-               ret = ops->timestamp_begin(config, buf, &ts);
-               if (ret < 0)
-                       goto error;
-               return put_u64(ts, arg);
-       }
-       case LTTNG_RING_BUFFER_GET_TIMESTAMP_END:
-       {
-               uint64_t ts;
-
-               ret = ops->timestamp_end(config, buf, &ts);
-               if (ret < 0)
-                       goto error;
-               return put_u64(ts, arg);
-       }
-       case LTTNG_RING_BUFFER_GET_EVENTS_DISCARDED:
-       {
-               uint64_t ed;
-
-               ret = ops->events_discarded(config, buf, &ed);
-               if (ret < 0)
-                       goto error;
-               return put_u64(ed, arg);
-       }
-       case LTTNG_RING_BUFFER_GET_CONTENT_SIZE:
-       {
-               uint64_t cs;
-
-               ret = ops->content_size(config, buf, &cs);
-               if (ret < 0)
-                       goto error;
-               return put_u64(cs, arg);
-       }
-       case LTTNG_RING_BUFFER_GET_PACKET_SIZE:
-       {
-               uint64_t ps;
-
-               ret = ops->packet_size(config, buf, &ps);
-               if (ret < 0)
-                       goto error;
-               return put_u64(ps, arg);
-       }
-       case LTTNG_RING_BUFFER_GET_STREAM_ID:
-       {
-               uint64_t si;
-
-               ret = ops->stream_id(config, buf, &si);
-               if (ret < 0)
-                       goto error;
-               return put_u64(si, arg);
-       }
-       case LTTNG_RING_BUFFER_GET_CURRENT_TIMESTAMP:
-       {
-               uint64_t ts;
-
-               ret = ops->current_timestamp(config, buf, &ts);
-               if (ret < 0)
-                       goto error;
-               return put_u64(ts, arg);
-       }
-       case LTTNG_RING_BUFFER_GET_SEQ_NUM:
-       {
-               uint64_t seq;
-
-               ret = ops->sequence_number(config, buf, &seq);
-               if (ret < 0)
-                       goto error;
-               return put_u64(seq, arg);
-       }
-       case LTTNG_RING_BUFFER_INSTANCE_ID:
-       {
-               uint64_t id;
-
-               ret = ops->instance_id(config, buf, &id);
-               if (ret < 0)
-                       goto error;
-               return put_u64(id, arg);
-       }
-       default:
-               return lib_ring_buffer_file_operations.unlocked_ioctl(filp,
-                               cmd, arg);
-       }
-
-error:
-       return -ENOSYS;
-}
-
-#ifdef CONFIG_COMPAT
-static long lttng_stream_ring_buffer_compat_ioctl(struct file *filp,
-               unsigned int cmd, unsigned long arg)
-{
-       struct lib_ring_buffer *buf = filp->private_data;
-       struct channel *chan = buf->backend.chan;
-       const struct lib_ring_buffer_config *config = &chan->backend.config;
-       const struct lttng_channel_ops *ops = chan->backend.priv_ops;
-       int ret;
-
-       if (atomic_read(&chan->record_disabled))
-               return -EIO;
-
-       switch (cmd) {
-       case LTTNG_RING_BUFFER_COMPAT_GET_TIMESTAMP_BEGIN:
-       {
-               uint64_t ts;
-
-               ret = ops->timestamp_begin(config, buf, &ts);
-               if (ret < 0)
-                       goto error;
-               return put_u64(ts, arg);
-       }
-       case LTTNG_RING_BUFFER_COMPAT_GET_TIMESTAMP_END:
-       {
-               uint64_t ts;
-
-               ret = ops->timestamp_end(config, buf, &ts);
-               if (ret < 0)
-                       goto error;
-               return put_u64(ts, arg);
-       }
-       case LTTNG_RING_BUFFER_COMPAT_GET_EVENTS_DISCARDED:
-       {
-               uint64_t ed;
-
-               ret = ops->events_discarded(config, buf, &ed);
-               if (ret < 0)
-                       goto error;
-               return put_u64(ed, arg);
-       }
-       case LTTNG_RING_BUFFER_COMPAT_GET_CONTENT_SIZE:
-       {
-               uint64_t cs;
-
-               ret = ops->content_size(config, buf, &cs);
-               if (ret < 0)
-                       goto error;
-               return put_u64(cs, arg);
-       }
-       case LTTNG_RING_BUFFER_COMPAT_GET_PACKET_SIZE:
-       {
-               uint64_t ps;
-
-               ret = ops->packet_size(config, buf, &ps);
-               if (ret < 0)
-                       goto error;
-               return put_u64(ps, arg);
-       }
-       case LTTNG_RING_BUFFER_COMPAT_GET_STREAM_ID:
-       {
-               uint64_t si;
-
-               ret = ops->stream_id(config, buf, &si);
-               if (ret < 0)
-                       goto error;
-               return put_u64(si, arg);
-       }
-       case LTTNG_RING_BUFFER_GET_CURRENT_TIMESTAMP:
-       {
-               uint64_t ts;
-
-               ret = ops->current_timestamp(config, buf, &ts);
-               if (ret < 0)
-                       goto error;
-               return put_u64(ts, arg);
-       }
-       case LTTNG_RING_BUFFER_COMPAT_GET_SEQ_NUM:
-       {
-               uint64_t seq;
-
-               ret = ops->sequence_number(config, buf, &seq);
-               if (ret < 0)
-                       goto error;
-               return put_u64(seq, arg);
-       }
-       case LTTNG_RING_BUFFER_COMPAT_INSTANCE_ID:
-       {
-               uint64_t id;
-
-               ret = ops->instance_id(config, buf, &id);
-               if (ret < 0)
-                       goto error;
-               return put_u64(id, arg);
-       }
-       default:
-               return lib_ring_buffer_file_operations.compat_ioctl(filp,
-                               cmd, arg);
-       }
-
-error:
-       return -ENOSYS;
-}
-#endif /* CONFIG_COMPAT */
-
-static void lttng_stream_override_ring_buffer_fops(void)
-{
-       lttng_stream_ring_buffer_file_operations.owner = THIS_MODULE;
-       lttng_stream_ring_buffer_file_operations.open =
-               lib_ring_buffer_file_operations.open;
-       lttng_stream_ring_buffer_file_operations.release =
-               lib_ring_buffer_file_operations.release;
-       lttng_stream_ring_buffer_file_operations.poll =
-               lib_ring_buffer_file_operations.poll;
-       lttng_stream_ring_buffer_file_operations.splice_read =
-               lib_ring_buffer_file_operations.splice_read;
-       lttng_stream_ring_buffer_file_operations.mmap =
-               lib_ring_buffer_file_operations.mmap;
-       lttng_stream_ring_buffer_file_operations.unlocked_ioctl =
-               lttng_stream_ring_buffer_ioctl;
-       lttng_stream_ring_buffer_file_operations.llseek =
-               lib_ring_buffer_file_operations.llseek;
-#ifdef CONFIG_COMPAT
-       lttng_stream_ring_buffer_file_operations.compat_ioctl =
-               lttng_stream_ring_buffer_compat_ioctl;
-#endif
-}
-
-int __init lttng_abi_init(void)
-{
-       int ret = 0;
-
-       wrapper_vmalloc_sync_mappings();
-       lttng_clock_ref();
-
-       ret = lttng_tp_mempool_init();
-       if (ret) {
-               goto error;
-       }
-
-       lttng_proc_dentry = proc_create_data("lttng", S_IRUSR | S_IWUSR, NULL,
-                                       &lttng_proc_ops, NULL);
-
-       if (!lttng_proc_dentry) {
-               printk(KERN_ERR "Error creating LTTng control file\n");
-               ret = -ENOMEM;
-               goto error;
-       }
-       lttng_stream_override_ring_buffer_fops();
-       return 0;
-
-error:
-       lttng_tp_mempool_destroy();
-       lttng_clock_unref();
-       return ret;
-}
-
-/* No __exit annotation because used by init error path too. */
-void lttng_abi_exit(void)
-{
-       lttng_tp_mempool_destroy();
-       lttng_clock_unref();
-       if (lttng_proc_dentry)
-               remove_proc_entry("lttng", NULL);
-}
diff --git a/lttng-calibrate.c b/lttng-calibrate.c
deleted file mode 100644 (file)
index 3886319..0000000
+++ /dev/null
@@ -1,29 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
- *
- * lttng-calibrate.c
- *
- * LTTng probe calibration.
- *
- * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#include <lttng/abi.h>
-#include <lttng/events.h>
-
-noinline
-void lttng_calibrate_kretprobe(void)
-{
-       asm volatile ("");
-}
-
-int lttng_calibrate(struct lttng_kernel_calibrate *calibrate)
-{
-       switch (calibrate->type) {
-       case LTTNG_KERNEL_CALIBRATE_KRETPROBE:
-               lttng_calibrate_kretprobe();
-               break;
-       default:
-               return -EINVAL;
-       }
-       return 0;
-}
diff --git a/lttng-clock.c b/lttng-clock.c
deleted file mode 100644 (file)
index 7512a3f..0000000
+++ /dev/null
@@ -1,97 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
- *
- * lttng-clock.c
- *
- * Copyright (C) 2014 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#include <linux/module.h>
-#include <linux/kmod.h>
-#include <linux/mutex.h>
-
-#include <wrapper/trace-clock.h>
-#include <lttng/events.h>
-#include <lttng/tracer.h>
-
-struct lttng_trace_clock *lttng_trace_clock;
-EXPORT_SYMBOL_GPL(lttng_trace_clock);
-
-static DEFINE_MUTEX(clock_mutex);
-static struct module *lttng_trace_clock_mod;   /* plugin */
-static int clock_used;                         /* refcount */
-
-int lttng_clock_register_plugin(struct lttng_trace_clock *ltc,
-               struct module *mod)
-{
-       int ret = 0;
-
-       mutex_lock(&clock_mutex);
-       if (clock_used) {
-               ret = -EBUSY;
-               goto end;
-       }
-       if (lttng_trace_clock_mod) {
-               ret = -EEXIST;
-               goto end;
-       }
-       /* set clock */
-       WRITE_ONCE(lttng_trace_clock, ltc);
-       lttng_trace_clock_mod = mod;
-end:
-       mutex_unlock(&clock_mutex);
-       return ret;
-}
-EXPORT_SYMBOL_GPL(lttng_clock_register_plugin);
-
-void lttng_clock_unregister_plugin(struct lttng_trace_clock *ltc,
-               struct module *mod)
-{
-       mutex_lock(&clock_mutex);
-       WARN_ON_ONCE(clock_used);
-       if (!lttng_trace_clock_mod) {
-               goto end;
-       }
-       WARN_ON_ONCE(lttng_trace_clock_mod != mod);
-
-       WRITE_ONCE(lttng_trace_clock, NULL);
-       lttng_trace_clock_mod = NULL;
-end:
-       mutex_unlock(&clock_mutex);
-}
-EXPORT_SYMBOL_GPL(lttng_clock_unregister_plugin);
-
-void lttng_clock_ref(void)
-{
-       mutex_lock(&clock_mutex);
-       clock_used++;
-       if (lttng_trace_clock_mod) {
-               int ret;
-
-               ret = try_module_get(lttng_trace_clock_mod);
-               if (!ret) {
-                       printk(KERN_ERR "LTTng-clock cannot get clock plugin module\n");
-                       WRITE_ONCE(lttng_trace_clock, NULL);
-                       lttng_trace_clock_mod = NULL;
-               }
-       }
-       mutex_unlock(&clock_mutex);
-}
-EXPORT_SYMBOL_GPL(lttng_clock_ref);
-
-void lttng_clock_unref(void)
-{
-       mutex_lock(&clock_mutex);
-       clock_used--;
-       if (lttng_trace_clock_mod)
-               module_put(lttng_trace_clock_mod);
-       mutex_unlock(&clock_mutex);
-}
-EXPORT_SYMBOL_GPL(lttng_clock_unref);
-
-MODULE_LICENSE("GPL and additional rights");
-MODULE_AUTHOR("Mathieu Desnoyers <mathieu.desnoyers@efficios.com>");
-MODULE_DESCRIPTION("LTTng Clock");
-MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
-       __stringify(LTTNG_MODULES_MINOR_VERSION) "."
-       __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
-       LTTNG_MODULES_EXTRAVERSION);
diff --git a/lttng-context-callstack-legacy-impl.h b/lttng-context-callstack-legacy-impl.h
deleted file mode 100644 (file)
index 8d78fb9..0000000
+++ /dev/null
@@ -1,244 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
- *
- * lttng-context-callstack-legacy-impl.h
- *
- * LTTng callstack event context, legacy implementation. Targets
- * kernels and architectures not yet using the stacktrace common
- * infrastructure introduced in the upstream Linux kernel by commit
- * 214d8ca6ee "stacktrace: Provide common infrastructure" (merged in
- * Linux 5.2, then gradually introduced within architectures).
- *
- * Copyright (C) 2014 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- * Copyright (C) 2014 Francis Giraldeau <francis.giraldeau@gmail.com>
- */
-
-#define MAX_ENTRIES 128
-
-enum lttng_cs_ctx_modes {
-       CALLSTACK_KERNEL = 0,
-       CALLSTACK_USER = 1,
-       NR_CALLSTACK_MODES,
-};
-
-struct lttng_cs_dispatch {
-       struct stack_trace stack_trace;
-       unsigned long entries[MAX_ENTRIES];
-};
-
-struct lttng_cs {
-       struct lttng_cs_dispatch dispatch[RING_BUFFER_MAX_NESTING];
-};
-
-struct field_data {
-       struct lttng_cs __percpu *cs_percpu;
-       enum lttng_cs_ctx_modes mode;
-};
-
-struct lttng_cs_type {
-       const char *name;
-       const char *length_name;
-       const char *save_func_name;
-       void (*save_func)(struct stack_trace *trace);
-};
-
-static struct lttng_cs_type cs_types[] = {
-       {
-               .name           = "callstack_kernel",
-               .length_name    = "_callstack_kernel_length",
-               .save_func_name = "save_stack_trace",
-               .save_func      = NULL,
-       },
-       {
-               .name           = "callstack_user",
-               .length_name    = "_callstack_user_length",
-               .save_func_name = "save_stack_trace_user",
-               .save_func      = NULL,
-       },
-};
-
-static
-const char *lttng_cs_ctx_mode_name(enum lttng_cs_ctx_modes mode)
-{
-       return cs_types[mode].name;
-}
-
-static
-const char *lttng_cs_ctx_mode_length_name(enum lttng_cs_ctx_modes mode)
-{
-       return cs_types[mode].length_name;
-}
-
-static
-int init_type(enum lttng_cs_ctx_modes mode)
-{
-       unsigned long func;
-
-       if (cs_types[mode].save_func)
-               return 0;
-       func = kallsyms_lookup_funcptr(cs_types[mode].save_func_name);
-       if (!func) {
-               printk(KERN_WARNING "LTTng: symbol lookup failed: %s\n",
-                               cs_types[mode].save_func_name);
-               return -EINVAL;
-       }
-       cs_types[mode].save_func = (void *) func;
-       return 0;
-}
-
-static
-void lttng_cs_set_init(struct lttng_cs __percpu *cs_set)
-{
-       int cpu, i;
-
-       for_each_possible_cpu(cpu) {
-               struct lttng_cs *cs;
-
-               cs = per_cpu_ptr(cs_set, cpu);
-               for (i = 0; i < RING_BUFFER_MAX_NESTING; i++) {
-                       struct lttng_cs_dispatch *dispatch;
-
-                       dispatch = &cs->dispatch[i];
-                       dispatch->stack_trace.entries = dispatch->entries;
-                       dispatch->stack_trace.max_entries = MAX_ENTRIES;
-               }
-       }
-}
-
-/* Keep track of nesting inside userspace callstack context code */
-DEFINE_PER_CPU(int, callstack_user_nesting);
-
-static
-struct stack_trace *stack_trace_context(struct lttng_ctx_field *field,
-                                       struct lib_ring_buffer_ctx *ctx)
-{
-       int buffer_nesting, cs_user_nesting;
-       struct lttng_cs *cs;
-       struct field_data *fdata = field->priv;
-
-       /*
-        * Do not gather the userspace callstack context when the event was
-        * triggered by the userspace callstack context saving mechanism.
-        */
-       cs_user_nesting = per_cpu(callstack_user_nesting, ctx->cpu);
-
-       if (fdata->mode == CALLSTACK_USER && cs_user_nesting >= 1)
-               return NULL;
-
-       /*
-        * get_cpu() is not required, preemption is already
-        * disabled while event is written.
-        *
-        * max nesting is checked in lib_ring_buffer_get_cpu().
-        * Check it again as a safety net.
-        */
-       cs = per_cpu_ptr(fdata->cs_percpu, ctx->cpu);
-       buffer_nesting = per_cpu(lib_ring_buffer_nesting, ctx->cpu) - 1;
-       if (buffer_nesting >= RING_BUFFER_MAX_NESTING)
-               return NULL;
-
-       return &cs->dispatch[buffer_nesting].stack_trace;
-}
-
-static
-size_t lttng_callstack_length_get_size(size_t offset, struct lttng_ctx_field *field,
-                               struct lib_ring_buffer_ctx *ctx,
-                               struct lttng_channel *chan)
-{
-       size_t orig_offset = offset;
-
-       offset += lib_ring_buffer_align(offset, lttng_alignof(unsigned int));
-       offset += sizeof(unsigned int);
-       return offset - orig_offset;
-}
-
-/*
- * In order to reserve the correct size, the callstack is computed. The
- * resulting callstack is saved to be accessed in the record step.
- */
-static
-size_t lttng_callstack_sequence_get_size(size_t offset, struct lttng_ctx_field *field,
-                                       struct lib_ring_buffer_ctx *ctx,
-                                       struct lttng_channel *chan)
-{
-       struct stack_trace *trace;
-       struct field_data *fdata = field->priv;
-       size_t orig_offset = offset;
-
-       /* do not write data if no space is available */
-       trace = stack_trace_context(field, ctx);
-       if (unlikely(!trace)) {
-               offset += lib_ring_buffer_align(offset, lttng_alignof(unsigned long));
-               return offset - orig_offset;
-       }
-
-       /* reset stack trace, no need to clear memory */
-       trace->nr_entries = 0;
-
-       if (fdata->mode == CALLSTACK_USER)
-               ++per_cpu(callstack_user_nesting, ctx->cpu);
-
-       /* do the real work and reserve space */
-       cs_types[fdata->mode].save_func(trace);
-
-       if (fdata->mode == CALLSTACK_USER)
-               per_cpu(callstack_user_nesting, ctx->cpu)--;
-
-       /*
-        * Remove final ULONG_MAX delimiter. If we cannot find it, add
-        * our own marker to show that the stack is incomplete. This is
-        * more compact for a trace.
-        */
-       if (trace->nr_entries > 0
-                       && trace->entries[trace->nr_entries - 1] == ULONG_MAX) {
-               trace->nr_entries--;
-       }
-       offset += lib_ring_buffer_align(offset, lttng_alignof(unsigned long));
-       offset += sizeof(unsigned long) * trace->nr_entries;
-       /* Add our own ULONG_MAX delimiter to show incomplete stack. */
-       if (trace->nr_entries == trace->max_entries)
-               offset += sizeof(unsigned long);
-       return offset - orig_offset;
-}
-
-static
-void lttng_callstack_length_record(struct lttng_ctx_field *field,
-                       struct lib_ring_buffer_ctx *ctx,
-                       struct lttng_channel *chan)
-{
-       struct stack_trace *trace = stack_trace_context(field, ctx);
-       unsigned int nr_seq_entries;
-
-       lib_ring_buffer_align_ctx(ctx, lttng_alignof(unsigned int));
-       if (unlikely(!trace)) {
-               nr_seq_entries = 0;
-       } else {
-               nr_seq_entries = trace->nr_entries;
-               if (trace->nr_entries == trace->max_entries)
-                       nr_seq_entries++;
-       }
-       chan->ops->event_write(ctx, &nr_seq_entries, sizeof(unsigned int));
-}
-static
-void lttng_callstack_sequence_record(struct lttng_ctx_field *field,
-                       struct lib_ring_buffer_ctx *ctx,
-                       struct lttng_channel *chan)
-{
-       struct stack_trace *trace = stack_trace_context(field, ctx);
-       unsigned int nr_seq_entries;
-
-       lib_ring_buffer_align_ctx(ctx, lttng_alignof(unsigned long));
-       if (unlikely(!trace)) {
-               return;
-       }
-       nr_seq_entries = trace->nr_entries;
-       if (trace->nr_entries == trace->max_entries)
-               nr_seq_entries++;
-       chan->ops->event_write(ctx, trace->entries,
-                       sizeof(unsigned long) * trace->nr_entries);
-       /* Add our own ULONG_MAX delimiter to show incomplete stack. */
-       if (trace->nr_entries == trace->max_entries) {
-               unsigned long delim = ULONG_MAX;
-
-               chan->ops->event_write(ctx, &delim, sizeof(unsigned long));
-       }
-}
diff --git a/lttng-context-callstack-stackwalk-impl.h b/lttng-context-callstack-stackwalk-impl.h
deleted file mode 100644 (file)
index 42f4273..0000000
+++ /dev/null
@@ -1,264 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
- *
- * lttng-context-callstack-stackwalk-impl.h
- *
- * LTTng callstack event context, stackwalk implementation. Targets
- * kernels and architectures using the stacktrace common infrastructure
- * introduced in the upstream Linux kernel by commit 214d8ca6ee
- * "stacktrace: Provide common infrastructure" (merged in Linux 5.2,
- * then gradually introduced within architectures).
- *
- * Copyright (C) 2014-2019 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- * Copyright (C) 2014 Francis Giraldeau <francis.giraldeau@gmail.com>
- */
-
-#define MAX_ENTRIES 128
-
-enum lttng_cs_ctx_modes {
-       CALLSTACK_KERNEL = 0,
-       CALLSTACK_USER = 1,
-       NR_CALLSTACK_MODES,
-};
-
-struct lttng_stack_trace {
-       unsigned long entries[MAX_ENTRIES];
-       unsigned int nr_entries;
-};
-
-struct lttng_cs {
-       struct lttng_stack_trace stack_trace[RING_BUFFER_MAX_NESTING];
-};
-
-struct field_data {
-       struct lttng_cs __percpu *cs_percpu;
-       enum lttng_cs_ctx_modes mode;
-};
-
-static
-unsigned int (*save_func_kernel)(unsigned long *store, unsigned int size,
-                               unsigned int skipnr);
-static
-unsigned int (*save_func_user)(unsigned long *store, unsigned int size);
-
-static
-const char *lttng_cs_ctx_mode_name(enum lttng_cs_ctx_modes mode)
-{
-       switch (mode) {
-       case CALLSTACK_KERNEL:
-               return "callstack_kernel";
-       case CALLSTACK_USER:
-               return "callstack_user";
-       default:
-               return NULL;
-       }
-}
-
-static
-const char *lttng_cs_ctx_mode_length_name(enum lttng_cs_ctx_modes mode)
-{
-       switch (mode) {
-       case CALLSTACK_KERNEL:
-               return "_callstack_kernel_length";
-       case CALLSTACK_USER:
-               return "_callstack_user_length";
-       default:
-               return NULL;
-       }
-}
-
-static
-int init_type_callstack_kernel(void)
-{
-       unsigned long func;
-       const char *func_name = "stack_trace_save";
-
-       if (save_func_kernel)
-               return 0;
-       func = kallsyms_lookup_funcptr(func_name);
-       if (!func) {
-               printk(KERN_WARNING "LTTng: symbol lookup failed: %s\n",
-                               func_name);
-               return -EINVAL;
-       }
-       save_func_kernel = (void *) func;
-       return 0;
-}
-
-static
-int init_type_callstack_user(void)
-{
-       unsigned long func;
-       const char *func_name = "stack_trace_save_user";
-
-       if (save_func_user)
-               return 0;
-       func = kallsyms_lookup_funcptr(func_name);
-       if (!func) {
-               printk(KERN_WARNING "LTTng: symbol lookup failed: %s\n",
-                               func_name);
-               return -EINVAL;
-       }
-       save_func_user = (void *) func;
-       return 0;
-}
-
-static
-int init_type(enum lttng_cs_ctx_modes mode)
-{
-       switch (mode) {
-       case CALLSTACK_KERNEL:
-               return init_type_callstack_kernel();
-       case CALLSTACK_USER:
-               return init_type_callstack_user();
-       default:
-               return -EINVAL;
-       }
-}
-
-static
-void lttng_cs_set_init(struct lttng_cs __percpu *cs_set)
-{
-}
-
-/* Keep track of nesting inside userspace callstack context code */
-DEFINE_PER_CPU(int, callstack_user_nesting);
-
-static
-struct lttng_stack_trace *stack_trace_context(struct lttng_ctx_field *field,
-                                       struct lib_ring_buffer_ctx *ctx)
-{
-       int buffer_nesting, cs_user_nesting;
-       struct lttng_cs *cs;
-       struct field_data *fdata = field->priv;
-
-       /*
-        * Do not gather the userspace callstack context when the event was
-        * triggered by the userspace callstack context saving mechanism.
-        */
-       cs_user_nesting = per_cpu(callstack_user_nesting, ctx->cpu);
-
-       if (fdata->mode == CALLSTACK_USER && cs_user_nesting >= 1)
-               return NULL;
-
-       /*
-        * get_cpu() is not required, preemption is already
-        * disabled while event is written.
-        *
-        * max nesting is checked in lib_ring_buffer_get_cpu().
-        * Check it again as a safety net.
-        */
-       cs = per_cpu_ptr(fdata->cs_percpu, ctx->cpu);
-       buffer_nesting = per_cpu(lib_ring_buffer_nesting, ctx->cpu) - 1;
-       if (buffer_nesting >= RING_BUFFER_MAX_NESTING)
-               return NULL;
-
-       return &cs->stack_trace[buffer_nesting];
-}
-
-static
-size_t lttng_callstack_length_get_size(size_t offset, struct lttng_ctx_field *field,
-                               struct lib_ring_buffer_ctx *ctx,
-                               struct lttng_channel *chan)
-{
-       size_t orig_offset = offset;
-
-       offset += lib_ring_buffer_align(offset, lttng_alignof(unsigned int));
-       offset += sizeof(unsigned int);
-       return offset - orig_offset;
-}
-
-/*
- * In order to reserve the correct size, the callstack is computed. The
- * resulting callstack is saved to be accessed in the record step.
- */
-static
-size_t lttng_callstack_sequence_get_size(size_t offset, struct lttng_ctx_field *field,
-                                       struct lib_ring_buffer_ctx *ctx,
-                                       struct lttng_channel *chan)
-{
-       struct lttng_stack_trace *trace;
-       struct field_data *fdata = field->priv;
-       size_t orig_offset = offset;
-
-       /* do not write data if no space is available */
-       trace = stack_trace_context(field, ctx);
-       if (unlikely(!trace)) {
-               offset += lib_ring_buffer_align(offset, lttng_alignof(unsigned long));
-               return offset - orig_offset;
-       }
-
-       /* reset stack trace, no need to clear memory */
-       trace->nr_entries = 0;
-
-       switch (fdata->mode) {
-       case CALLSTACK_KERNEL:
-               /* do the real work and reserve space */
-               trace->nr_entries = save_func_kernel(trace->entries,
-                                               MAX_ENTRIES, 0);
-               break;
-       case CALLSTACK_USER:
-               ++per_cpu(callstack_user_nesting, ctx->cpu);
-               /* do the real work and reserve space */
-               trace->nr_entries = save_func_user(trace->entries,
-                                               MAX_ENTRIES);
-               per_cpu(callstack_user_nesting, ctx->cpu)--;
-               break;
-       default:
-               WARN_ON_ONCE(1);
-       }
-
-       /*
-        * If the array is filled, add our own marker to show that the
-        * stack is incomplete.
-        */
-       offset += lib_ring_buffer_align(offset, lttng_alignof(unsigned long));
-       offset += sizeof(unsigned long) * trace->nr_entries;
-       /* Add our own ULONG_MAX delimiter to show incomplete stack. */
-       if (trace->nr_entries == MAX_ENTRIES)
-               offset += sizeof(unsigned long);
-       return offset - orig_offset;
-}
-
-static
-void lttng_callstack_length_record(struct lttng_ctx_field *field,
-                       struct lib_ring_buffer_ctx *ctx,
-                       struct lttng_channel *chan)
-{
-       struct lttng_stack_trace *trace = stack_trace_context(field, ctx);
-       unsigned int nr_seq_entries;
-
-       lib_ring_buffer_align_ctx(ctx, lttng_alignof(unsigned int));
-       if (unlikely(!trace)) {
-               nr_seq_entries = 0;
-       } else {
-               nr_seq_entries = trace->nr_entries;
-               if (trace->nr_entries == MAX_ENTRIES)
-                       nr_seq_entries++;
-       }
-       chan->ops->event_write(ctx, &nr_seq_entries, sizeof(unsigned int));
-}
-
-static
-void lttng_callstack_sequence_record(struct lttng_ctx_field *field,
-                       struct lib_ring_buffer_ctx *ctx,
-                       struct lttng_channel *chan)
-{
-       struct lttng_stack_trace *trace = stack_trace_context(field, ctx);
-       unsigned int nr_seq_entries;
-
-       lib_ring_buffer_align_ctx(ctx, lttng_alignof(unsigned long));
-       if (unlikely(!trace)) {
-               return;
-       }
-       nr_seq_entries = trace->nr_entries;
-       if (trace->nr_entries == MAX_ENTRIES)
-               nr_seq_entries++;
-       chan->ops->event_write(ctx, trace->entries,
-                       sizeof(unsigned long) * trace->nr_entries);
-       /* Add our own ULONG_MAX delimiter to show incomplete stack. */
-       if (trace->nr_entries == MAX_ENTRIES) {
-               unsigned long delim = ULONG_MAX;
-
-               chan->ops->event_write(ctx, &delim, sizeof(unsigned long));
-       }
-}
diff --git a/lttng-context-callstack.c b/lttng-context-callstack.c
deleted file mode 100644 (file)
index 7b9e651..0000000
+++ /dev/null
@@ -1,193 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
- *
- * lttng-context-callstack.c
- *
- * LTTng callstack event context.
- *
- * Copyright (C) 2014 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- * Copyright (C) 2014 Francis Giraldeau <francis.giraldeau@gmail.com>
- *
- * The callstack context can be added to any kernel event. It records
- * either the kernel or the userspace callstack, up to a max depth. The
- * context is a CTF sequence, such that it uses only the space required
- * for the number of callstack entries.
- *
- * It allocates callstack buffers per-CPU up to 4 interrupt nesting.
- * This nesting limit is the same as defined in the ring buffer. It
- * therefore uses a fixed amount of memory, proportional to the number
- * of CPUs:
- *
- *   size = cpus * nest * depth * sizeof(unsigned long)
- *
- * Which is 4096 bytes per CPU on 64-bit host and a depth of 128.
- * The allocation is done at the initialization to avoid memory
- * allocation overhead while tracing, using a shallow stack.
- *
- * The kernel callstack is recovered using save_stack_trace(), and the
- * userspace callstack uses save_stack_trace_user(). They rely on frame
- * pointers. These are usually available for the kernel, but the
- * compiler option -fomit-frame-pointer frequently used in popular Linux
- * distributions may cause the userspace callstack to be unreliable, and
- * is a known limitation of this approach. If frame pointers are not
- * available, it produces no error, but the callstack will be empty. We
- * still provide the feature, because it works well for runtime
- * environments having frame pointers. In the future, unwind support
- * and/or last branch record may provide a solution to this problem.
- *
- * The symbol name resolution is left to the trace reader.
- */
-
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/sched.h>
-#include <linux/utsname.h>
-#include <linux/stacktrace.h>
-#include <linux/spinlock.h>
-#include <ringbuffer/backend.h>
-#include <ringbuffer/frontend.h>
-#include <lttng/events.h>
-#include <lttng/tracer.h>
-#include <lttng/endian.h>
-#include "wrapper/vmalloc.h"
-
-#ifdef CONFIG_ARCH_STACKWALK
-#include "lttng-context-callstack-stackwalk-impl.h"
-#else
-#include "lttng-context-callstack-legacy-impl.h"
-#endif
-
-static
-void field_data_free(struct field_data *fdata)
-{
-       if (!fdata)
-               return;
-       free_percpu(fdata->cs_percpu);
-       kfree(fdata);
-}
-
-static
-struct field_data __percpu *field_data_create(enum lttng_cs_ctx_modes mode)
-{
-       struct lttng_cs __percpu *cs_set;
-       struct field_data *fdata;
-
-       fdata = kzalloc(sizeof(*fdata), GFP_KERNEL);
-       if (!fdata)
-               return NULL;
-       cs_set = alloc_percpu(struct lttng_cs);
-       if (!cs_set)
-               goto error_alloc;
-       lttng_cs_set_init(cs_set);
-       fdata->cs_percpu = cs_set;
-       fdata->mode = mode;
-       return fdata;
-
-error_alloc:
-       field_data_free(fdata);
-       return NULL;
-}
-
-static
-void lttng_callstack_sequence_destroy(struct lttng_ctx_field *field)
-{
-       struct field_data *fdata = field->priv;
-
-       field_data_free(fdata);
-}
-
-static const struct lttng_type sequence_elem_type =
-       __type_integer(unsigned long, 0, 0, -1, __BYTE_ORDER, 16, none);
-
-static
-int __lttng_add_callstack_generic(struct lttng_ctx **ctx,
-               enum lttng_cs_ctx_modes mode)
-{
-       const char *ctx_name = lttng_cs_ctx_mode_name(mode);
-       const char *ctx_length_name = lttng_cs_ctx_mode_length_name(mode);
-       struct lttng_ctx_field *length_field, *sequence_field;
-       struct lttng_event_field *field;
-       struct field_data *fdata;
-       int ret;
-
-       ret = init_type(mode);
-       if (ret)
-               return ret;
-       length_field = lttng_append_context(ctx);
-       if (!length_field)
-               return -ENOMEM;
-       sequence_field = lttng_append_context(ctx);
-       if (!sequence_field) {
-               lttng_remove_context_field(ctx, length_field);
-               return -ENOMEM;
-       }
-       if (lttng_find_context(*ctx, ctx_name)) {
-               ret = -EEXIST;
-               goto error_find;
-       }
-       fdata = field_data_create(mode);
-       if (!fdata) {
-               ret = -ENOMEM;
-               goto error_create;
-       }
-
-       field = &length_field->event_field;
-       field->name = ctx_length_name;
-       field->type.atype = atype_integer;
-       field->type.u.integer.size = sizeof(unsigned int) * CHAR_BIT;
-       field->type.u.integer.alignment = lttng_alignof(unsigned int) * CHAR_BIT;
-       field->type.u.integer.signedness = lttng_is_signed_type(unsigned int);
-       field->type.u.integer.reverse_byte_order = 0;
-       field->type.u.integer.base = 10;
-       field->type.u.integer.encoding = lttng_encode_none;
-       length_field->get_size_arg = lttng_callstack_length_get_size;
-       length_field->record = lttng_callstack_length_record;
-
-       field = &sequence_field->event_field;
-       field->name = ctx_name;
-       field->type.atype = atype_sequence_nestable;
-       field->type.u.sequence_nestable.elem_type = &sequence_elem_type;
-       field->type.u.sequence_nestable.alignment = 0;
-       sequence_field->get_size_arg = lttng_callstack_sequence_get_size;
-       sequence_field->record = lttng_callstack_sequence_record;
-       sequence_field->priv = fdata;
-       sequence_field->destroy = lttng_callstack_sequence_destroy;
-
-       wrapper_vmalloc_sync_mappings();
-       return 0;
-
-error_create:
-       field_data_free(fdata);
-error_find:
-       lttng_remove_context_field(ctx, sequence_field);
-       lttng_remove_context_field(ctx, length_field);
-       return ret;
-}
-
-/**
- *     lttng_add_callstack_to_ctx - add callstack event context
- *
- *     @ctx: the lttng_ctx pointer to initialize
- *     @type: the context type
- *
- *     Supported callstack type supported:
- *     LTTNG_KERNEL_CONTEXT_CALLSTACK_KERNEL
- *             Records the callstack of the kernel
- *     LTTNG_KERNEL_CONTEXT_CALLSTACK_USER
- *             Records the callstack of the userspace program (from the kernel)
- *
- * Return 0 for success, or error code.
- */
-int lttng_add_callstack_to_ctx(struct lttng_ctx **ctx, int type)
-{
-       switch (type) {
-       case LTTNG_KERNEL_CONTEXT_CALLSTACK_KERNEL:
-               return __lttng_add_callstack_generic(ctx, CALLSTACK_KERNEL);
-#ifdef CONFIG_X86
-       case LTTNG_KERNEL_CONTEXT_CALLSTACK_USER:
-               return __lttng_add_callstack_generic(ctx, CALLSTACK_USER);
-#endif
-       default:
-               return -EINVAL;
-       }
-}
-EXPORT_SYMBOL_GPL(lttng_add_callstack_to_ctx);
diff --git a/lttng-context-cgroup-ns.c b/lttng-context-cgroup-ns.c
deleted file mode 100644 (file)
index 27f00f6..0000000
+++ /dev/null
@@ -1,105 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
- *
- * lttng-context-cgroup-ns.c
- *
- * LTTng cgroup namespace context.
- *
- * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *               2019 Michael Jeanson <mjeanson@efficios.com>
- *
- */
-
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/sched.h>
-#include <linux/cgroup.h>
-#include <lttng/events.h>
-#include <ringbuffer/frontend_types.h>
-#include <wrapper/vmalloc.h>
-#include <wrapper/namespace.h>
-#include <lttng/tracer.h>
-
-#if defined(CONFIG_CGROUPS) && \
-       ((LINUX_VERSION_CODE >= KERNEL_VERSION(4,6,0)) || \
-        LTTNG_UBUNTU_KERNEL_RANGE(4,4,0,0, 4,5,0,0))
-
-static
-size_t cgroup_ns_get_size(size_t offset)
-{
-       size_t size = 0;
-
-       size += lib_ring_buffer_align(offset, lttng_alignof(unsigned int));
-       size += sizeof(unsigned int);
-       return size;
-}
-
-static
-void cgroup_ns_record(struct lttng_ctx_field *field,
-                struct lib_ring_buffer_ctx *ctx,
-                struct lttng_channel *chan)
-{
-       unsigned int cgroup_ns_inum = 0;
-
-       /*
-        * nsproxy can be NULL when scheduled out of exit.
-        *
-        * As documented in 'linux/nsproxy.h' namespaces access rules, no
-        * precautions should be taken when accessing the current task's
-        * namespaces, just dereference the pointers.
-        */
-       if (current->nsproxy)
-               cgroup_ns_inum = current->nsproxy->cgroup_ns->lttng_ns_inum;
-
-       lib_ring_buffer_align_ctx(ctx, lttng_alignof(cgroup_ns_inum));
-       chan->ops->event_write(ctx, &cgroup_ns_inum, sizeof(cgroup_ns_inum));
-}
-
-static
-void cgroup_ns_get_value(struct lttng_ctx_field *field,
-               struct lttng_probe_ctx *lttng_probe_ctx,
-               union lttng_ctx_value *value)
-{
-       unsigned int cgroup_ns_inum = 0;
-
-       /*
-        * nsproxy can be NULL when scheduled out of exit.
-        *
-        * As documented in 'linux/nsproxy.h' namespaces access rules, no
-        * precautions should be taken when accessing the current task's
-        * namespaces, just dereference the pointers.
-        */
-       if (current->nsproxy)
-               cgroup_ns_inum = current->nsproxy->cgroup_ns->lttng_ns_inum;
-
-       value->s64 = cgroup_ns_inum;
-}
-
-int lttng_add_cgroup_ns_to_ctx(struct lttng_ctx **ctx)
-{
-       struct lttng_ctx_field *field;
-
-       field = lttng_append_context(ctx);
-       if (!field)
-               return -ENOMEM;
-       if (lttng_find_context(*ctx, "cgroup_ns")) {
-               lttng_remove_context_field(ctx, field);
-               return -EEXIST;
-       }
-       field->event_field.name = "cgroup_ns";
-       field->event_field.type.atype = atype_integer;
-       field->event_field.type.u.integer.size = sizeof(unsigned int) * CHAR_BIT;
-       field->event_field.type.u.integer.alignment = lttng_alignof(unsigned int) * CHAR_BIT;
-       field->event_field.type.u.integer.signedness = lttng_is_signed_type(unsigned int);
-       field->event_field.type.u.integer.reverse_byte_order = 0;
-       field->event_field.type.u.integer.base = 10;
-       field->event_field.type.u.integer.encoding = lttng_encode_none;
-       field->get_size = cgroup_ns_get_size;
-       field->record = cgroup_ns_record;
-       field->get_value = cgroup_ns_get_value;
-       lttng_context_update(*ctx);
-       wrapper_vmalloc_sync_mappings();
-       return 0;
-}
-EXPORT_SYMBOL_GPL(lttng_add_cgroup_ns_to_ctx);
-
-#endif
diff --git a/lttng-context-cpu-id.c b/lttng-context-cpu-id.c
deleted file mode 100644 (file)
index 498dfcf..0000000
+++ /dev/null
@@ -1,74 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
- *
- * lttng-context-cpu-id.c
- *
- * LTTng CPU id context.
- *
- * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/sched.h>
-#include <lttng/events.h>
-#include <ringbuffer/frontend_types.h>
-#include <wrapper/vmalloc.h>
-#include <lttng/tracer.h>
-
-static
-size_t cpu_id_get_size(size_t offset)
-{
-       size_t size = 0;
-
-       size += lib_ring_buffer_align(offset, lttng_alignof(int));
-       size += sizeof(int);
-       return size;
-}
-
-static
-void cpu_id_record(struct lttng_ctx_field *field,
-               struct lib_ring_buffer_ctx *ctx,
-               struct lttng_channel *chan)
-{
-       int cpu;
-
-       cpu = ctx->cpu;
-       lib_ring_buffer_align_ctx(ctx, lttng_alignof(cpu));
-       chan->ops->event_write(ctx, &cpu, sizeof(cpu));
-}
-
-static
-void cpu_id_get_value(struct lttng_ctx_field *field,
-               struct lttng_probe_ctx *lttng_probe_ctx,
-               union lttng_ctx_value *value)
-{
-       value->s64 = smp_processor_id();
-}
-
-int lttng_add_cpu_id_to_ctx(struct lttng_ctx **ctx)
-{
-       struct lttng_ctx_field *field;
-
-       field = lttng_append_context(ctx);
-       if (!field)
-               return -ENOMEM;
-       if (lttng_find_context(*ctx, "cpu_id")) {
-               lttng_remove_context_field(ctx, field);
-               return -EEXIST;
-       }
-       field->event_field.name = "cpu_id";
-       field->event_field.type.atype = atype_integer;
-       field->event_field.type.u.integer.size = sizeof(int) * CHAR_BIT;
-       field->event_field.type.u.integer.alignment = lttng_alignof(int) * CHAR_BIT;
-       field->event_field.type.u.integer.signedness = lttng_is_signed_type(int);
-       field->event_field.type.u.integer.reverse_byte_order = 0;
-       field->event_field.type.u.integer.base = 10;
-       field->event_field.type.u.integer.encoding = lttng_encode_none;
-       field->get_size = cpu_id_get_size;
-       field->record = cpu_id_record;
-       field->get_value = cpu_id_get_value;
-       lttng_context_update(*ctx);
-       wrapper_vmalloc_sync_mappings();
-       return 0;
-}
-EXPORT_SYMBOL_GPL(lttng_add_cpu_id_to_ctx);
diff --git a/lttng-context-egid.c b/lttng-context-egid.c
deleted file mode 100644 (file)
index e649fec..0000000
+++ /dev/null
@@ -1,76 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
- *
- * lttng-context-egid.c
- *
- * LTTng effective group ID context.
- *
- * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *               2019 Michael Jeanson <mjeanson@efficios.com>
- *
- */
-
-#include <linux/module.h>
-#include <linux/sched.h>
-#include <lttng/events.h>
-#include <lttng/tracer.h>
-#include <ringbuffer/frontend_types.h>
-#include <wrapper/vmalloc.h>
-#include <wrapper/user_namespace.h>
-
-static
-size_t egid_get_size(size_t offset)
-{
-       size_t size = 0;
-
-       size += lib_ring_buffer_align(offset, lttng_alignof(gid_t));
-       size += sizeof(gid_t);
-       return size;
-}
-
-static
-void egid_record(struct lttng_ctx_field *field,
-                struct lib_ring_buffer_ctx *ctx,
-                struct lttng_channel *chan)
-{
-       gid_t egid;
-
-       egid = lttng_current_egid();
-       lib_ring_buffer_align_ctx(ctx, lttng_alignof(egid));
-       chan->ops->event_write(ctx, &egid, sizeof(egid));
-}
-
-static
-void egid_get_value(struct lttng_ctx_field *field,
-               struct lttng_probe_ctx *lttng_probe_ctx,
-               union lttng_ctx_value *value)
-{
-       value->s64 = lttng_current_egid();
-}
-
-int lttng_add_egid_to_ctx(struct lttng_ctx **ctx)
-{
-       struct lttng_ctx_field *field;
-
-       field = lttng_append_context(ctx);
-       if (!field)
-               return -ENOMEM;
-       if (lttng_find_context(*ctx, "egid")) {
-               lttng_remove_context_field(ctx, field);
-               return -EEXIST;
-       }
-       field->event_field.name = "egid";
-       field->event_field.type.atype = atype_integer;
-       field->event_field.type.u.integer.size = sizeof(gid_t) * CHAR_BIT;
-       field->event_field.type.u.integer.alignment = lttng_alignof(gid_t) * CHAR_BIT;
-       field->event_field.type.u.integer.signedness = lttng_is_signed_type(gid_t);
-       field->event_field.type.u.integer.reverse_byte_order = 0;
-       field->event_field.type.u.integer.base = 10;
-       field->event_field.type.u.integer.encoding = lttng_encode_none;
-       field->get_size = egid_get_size;
-       field->record = egid_record;
-       field->get_value = egid_get_value;
-       lttng_context_update(*ctx);
-       wrapper_vmalloc_sync_mappings();
-       return 0;
-}
-EXPORT_SYMBOL_GPL(lttng_add_egid_to_ctx);
diff --git a/lttng-context-euid.c b/lttng-context-euid.c
deleted file mode 100644 (file)
index 79faf3a..0000000
+++ /dev/null
@@ -1,76 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
- *
- * lttng-context-euid.c
- *
- * LTTng effective user ID context.
- *
- * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *               2019 Michael Jeanson <mjeanson@efficios.com>
- *
- */
-
-#include <linux/module.h>
-#include <linux/sched.h>
-#include <lttng/events.h>
-#include <lttng/tracer.h>
-#include <ringbuffer/frontend_types.h>
-#include <wrapper/vmalloc.h>
-#include <wrapper/user_namespace.h>
-
-static
-size_t euid_get_size(size_t offset)
-{
-       size_t size = 0;
-
-       size += lib_ring_buffer_align(offset, lttng_alignof(uid_t));
-       size += sizeof(uid_t);
-       return size;
-}
-
-static
-void euid_record(struct lttng_ctx_field *field,
-                struct lib_ring_buffer_ctx *ctx,
-                struct lttng_channel *chan)
-{
-       uid_t euid;
-
-       euid = lttng_current_euid();
-       lib_ring_buffer_align_ctx(ctx, lttng_alignof(euid));
-       chan->ops->event_write(ctx, &euid, sizeof(euid));
-}
-
-static
-void euid_get_value(struct lttng_ctx_field *field,
-               struct lttng_probe_ctx *lttng_probe_ctx,
-               union lttng_ctx_value *value)
-{
-       value->s64 = lttng_current_euid();
-}
-
-int lttng_add_euid_to_ctx(struct lttng_ctx **ctx)
-{
-       struct lttng_ctx_field *field;
-
-       field = lttng_append_context(ctx);
-       if (!field)
-               return -ENOMEM;
-       if (lttng_find_context(*ctx, "euid")) {
-               lttng_remove_context_field(ctx, field);
-               return -EEXIST;
-       }
-       field->event_field.name = "euid";
-       field->event_field.type.atype = atype_integer;
-       field->event_field.type.u.integer.size = sizeof(uid_t) * CHAR_BIT;
-       field->event_field.type.u.integer.alignment = lttng_alignof(uid_t) * CHAR_BIT;
-       field->event_field.type.u.integer.signedness = lttng_is_signed_type(uid_t);
-       field->event_field.type.u.integer.reverse_byte_order = 0;
-       field->event_field.type.u.integer.base = 10;
-       field->event_field.type.u.integer.encoding = lttng_encode_none;
-       field->get_size = euid_get_size;
-       field->record = euid_record;
-       field->get_value = euid_get_value;
-       lttng_context_update(*ctx);
-       wrapper_vmalloc_sync_mappings();
-       return 0;
-}
-EXPORT_SYMBOL_GPL(lttng_add_euid_to_ctx);
diff --git a/lttng-context-gid.c b/lttng-context-gid.c
deleted file mode 100644 (file)
index 5620469..0000000
+++ /dev/null
@@ -1,76 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
- *
- * lttng-context-gid.c
- *
- * LTTng real group ID context.
- *
- * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *               2019 Michael Jeanson <mjeanson@efficios.com>
- *
- */
-
-#include <linux/module.h>
-#include <linux/sched.h>
-#include <lttng/events.h>
-#include <lttng/tracer.h>
-#include <ringbuffer/frontend_types.h>
-#include <wrapper/vmalloc.h>
-#include <wrapper/user_namespace.h>
-
-static
-size_t gid_get_size(size_t offset)
-{
-       size_t size = 0;
-
-       size += lib_ring_buffer_align(offset, lttng_alignof(gid_t));
-       size += sizeof(gid_t);
-       return size;
-}
-
-static
-void gid_record(struct lttng_ctx_field *field,
-                struct lib_ring_buffer_ctx *ctx,
-                struct lttng_channel *chan)
-{
-       gid_t gid;
-
-       gid = lttng_current_gid();
-       lib_ring_buffer_align_ctx(ctx, lttng_alignof(gid));
-       chan->ops->event_write(ctx, &gid, sizeof(gid));
-}
-
-static
-void gid_get_value(struct lttng_ctx_field *field,
-               struct lttng_probe_ctx *lttng_probe_ctx,
-               union lttng_ctx_value *value)
-{
-       value->s64 = lttng_current_gid();
-}
-
-int lttng_add_gid_to_ctx(struct lttng_ctx **ctx)
-{
-       struct lttng_ctx_field *field;
-
-       field = lttng_append_context(ctx);
-       if (!field)
-               return -ENOMEM;
-       if (lttng_find_context(*ctx, "gid")) {
-               lttng_remove_context_field(ctx, field);
-               return -EEXIST;
-       }
-       field->event_field.name = "gid";
-       field->event_field.type.atype = atype_integer;
-       field->event_field.type.u.integer.size = sizeof(gid_t) * CHAR_BIT;
-       field->event_field.type.u.integer.alignment = lttng_alignof(gid_t) * CHAR_BIT;
-       field->event_field.type.u.integer.signedness = lttng_is_signed_type(gid_t);
-       field->event_field.type.u.integer.reverse_byte_order = 0;
-       field->event_field.type.u.integer.base = 10;
-       field->event_field.type.u.integer.encoding = lttng_encode_none;
-       field->get_size = gid_get_size;
-       field->record = gid_record;
-       field->get_value = gid_get_value;
-       lttng_context_update(*ctx);
-       wrapper_vmalloc_sync_mappings();
-       return 0;
-}
-EXPORT_SYMBOL_GPL(lttng_add_gid_to_ctx);
diff --git a/lttng-context-hostname.c b/lttng-context-hostname.c
deleted file mode 100644 (file)
index 86c5d02..0000000
+++ /dev/null
@@ -1,106 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
- *
- * lttng-context-hostname.c
- *
- * LTTng hostname context.
- *
- * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/sched.h>
-#include <linux/utsname.h>
-#include <lttng/events.h>
-#include <ringbuffer/frontend_types.h>
-#include <wrapper/vmalloc.h>
-#include <lttng/tracer.h>
-
-#define LTTNG_HOSTNAME_CTX_LEN (__NEW_UTS_LEN + 1)
-
-static
-size_t hostname_get_size(size_t offset)
-{
-       size_t size = 0;
-
-       size += LTTNG_HOSTNAME_CTX_LEN;
-       return size;
-}
-
-static
-void hostname_record(struct lttng_ctx_field *field,
-                struct lib_ring_buffer_ctx *ctx,
-                struct lttng_channel *chan)
-{
-       struct nsproxy *nsproxy;
-       struct uts_namespace *ns;
-       char *hostname;
-
-       /*
-        * No need to take the RCU read-side lock to read current
-        * nsproxy. (documented in nsproxy.h)
-        */
-       nsproxy = current->nsproxy;
-       if (nsproxy) {
-               ns = nsproxy->uts_ns;
-               hostname = ns->name.nodename;
-               chan->ops->event_write(ctx, hostname,
-                               LTTNG_HOSTNAME_CTX_LEN);
-       } else {
-               chan->ops->event_memset(ctx, 0,
-                               LTTNG_HOSTNAME_CTX_LEN);
-       }
-}
-
-static
-void hostname_get_value(struct lttng_ctx_field *field,
-               struct lttng_probe_ctx *lttng_probe_ctx,
-               union lttng_ctx_value *value)
-{
-       struct nsproxy *nsproxy;
-       struct uts_namespace *ns;
-       char *hostname;
-
-       /*
-        * No need to take the RCU read-side lock to read current
-        * nsproxy. (documented in nsproxy.h)
-        */
-       nsproxy = current->nsproxy;
-       if (nsproxy) {
-               ns = nsproxy->uts_ns;
-               hostname = ns->name.nodename;
-       } else {
-               hostname = "";
-       }
-       value->str = hostname;
-}
-
-static const struct lttng_type hostname_array_elem_type =
-       __type_integer(char, 0, 0, -1, __BYTE_ORDER, 10, UTF8);
-
-int lttng_add_hostname_to_ctx(struct lttng_ctx **ctx)
-{
-       struct lttng_ctx_field *field;
-
-       field = lttng_append_context(ctx);
-       if (!field)
-               return -ENOMEM;
-       if (lttng_find_context(*ctx, "hostname")) {
-               lttng_remove_context_field(ctx, field);
-               return -EEXIST;
-       }
-       field->event_field.name = "hostname";
-       field->event_field.type.atype = atype_array_nestable;
-       field->event_field.type.u.array_nestable.elem_type =
-               &hostname_array_elem_type;
-       field->event_field.type.u.array_nestable.length = LTTNG_HOSTNAME_CTX_LEN;
-       field->event_field.type.u.array_nestable.alignment = 0;
-
-       field->get_size = hostname_get_size;
-       field->record = hostname_record;
-       field->get_value = hostname_get_value;
-       lttng_context_update(*ctx);
-       wrapper_vmalloc_sync_mappings();
-       return 0;
-}
-EXPORT_SYMBOL_GPL(lttng_add_hostname_to_ctx);
diff --git a/lttng-context-interruptible.c b/lttng-context-interruptible.c
deleted file mode 100644 (file)
index 9fbf266..0000000
+++ /dev/null
@@ -1,81 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
- *
- * lttng-context-interruptible.c
- *
- * LTTng interruptible context.
- *
- * Copyright (C) 2009-2015 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/sched.h>
-#include <linux/irqflags.h>
-#include <lttng/events.h>
-#include <ringbuffer/frontend_types.h>
-#include <wrapper/vmalloc.h>
-#include <lttng/tracer.h>
-
-/*
- * Interruptible at value -1 means "unknown".
- */
-
-static
-size_t interruptible_get_size(size_t offset)
-{
-       size_t size = 0;
-
-       size += lib_ring_buffer_align(offset, lttng_alignof(int8_t));
-       size += sizeof(int8_t);
-       return size;
-}
-
-static
-void interruptible_record(struct lttng_ctx_field *field,
-               struct lib_ring_buffer_ctx *ctx,
-               struct lttng_channel *chan)
-{
-       struct lttng_probe_ctx *lttng_probe_ctx = ctx->priv;
-       int8_t interruptible = lttng_probe_ctx->interruptible;
-
-       lib_ring_buffer_align_ctx(ctx, lttng_alignof(interruptible));
-       chan->ops->event_write(ctx, &interruptible, sizeof(interruptible));
-}
-
-static
-void interruptible_get_value(struct lttng_ctx_field *field,
-               struct lttng_probe_ctx *lttng_probe_ctx,
-               union lttng_ctx_value *value)
-{
-       int8_t interruptible = lttng_probe_ctx->interruptible;
-
-       value->s64 = interruptible;
-}
-
-int lttng_add_interruptible_to_ctx(struct lttng_ctx **ctx)
-{
-       struct lttng_ctx_field *field;
-
-       field = lttng_append_context(ctx);
-       if (!field)
-               return -ENOMEM;
-       if (lttng_find_context(*ctx, "interruptible")) {
-               lttng_remove_context_field(ctx, field);
-               return -EEXIST;
-       }
-       field->event_field.name = "interruptible";
-       field->event_field.type.atype = atype_integer;
-       field->event_field.type.u.integer.size = sizeof(int8_t) * CHAR_BIT;
-       field->event_field.type.u.integer.alignment = lttng_alignof(int8_t) * CHAR_BIT;
-       field->event_field.type.u.integer.signedness = lttng_is_signed_type(int8_t);
-       field->event_field.type.u.integer.reverse_byte_order = 0;
-       field->event_field.type.u.integer.base = 10;
-       field->event_field.type.u.integer.encoding = lttng_encode_none;
-       field->get_size = interruptible_get_size;
-       field->record = interruptible_record;
-       field->get_value = interruptible_get_value;
-       lttng_context_update(*ctx);
-       wrapper_vmalloc_sync_mappings();
-       return 0;
-}
-EXPORT_SYMBOL_GPL(lttng_add_interruptible_to_ctx);
diff --git a/lttng-context-ipc-ns.c b/lttng-context-ipc-ns.c
deleted file mode 100644 (file)
index a112922..0000000
+++ /dev/null
@@ -1,104 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
- *
- * lttng-context-ipc-ns.c
- *
- * LTTng ipc namespace context.
- *
- * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *               2019 Michael Jeanson <mjeanson@efficios.com>
- *
- */
-
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/sched.h>
-#include <linux/ipc_namespace.h>
-#include <lttng/events.h>
-#include <ringbuffer/frontend_types.h>
-#include <wrapper/vmalloc.h>
-#include <wrapper/namespace.h>
-#include <lttng/tracer.h>
-
-#if defined(CONFIG_IPC_NS) && \
-       (LINUX_VERSION_CODE >= KERNEL_VERSION(3,8,0))
-
-static
-size_t ipc_ns_get_size(size_t offset)
-{
-       size_t size = 0;
-
-       size += lib_ring_buffer_align(offset, lttng_alignof(unsigned int));
-       size += sizeof(unsigned int);
-       return size;
-}
-
-static
-void ipc_ns_record(struct lttng_ctx_field *field,
-                struct lib_ring_buffer_ctx *ctx,
-                struct lttng_channel *chan)
-{
-       unsigned int ipc_ns_inum = 0;
-
-       /*
-        * nsproxy can be NULL when scheduled out of exit.
-        *
-        * As documented in 'linux/nsproxy.h' namespaces access rules, no
-        * precautions should be taken when accessing the current task's
-        * namespaces, just dereference the pointers.
-        */
-       if (current->nsproxy)
-               ipc_ns_inum = current->nsproxy->ipc_ns->lttng_ns_inum;
-
-       lib_ring_buffer_align_ctx(ctx, lttng_alignof(ipc_ns_inum));
-       chan->ops->event_write(ctx, &ipc_ns_inum, sizeof(ipc_ns_inum));
-}
-
-static
-void ipc_ns_get_value(struct lttng_ctx_field *field,
-               struct lttng_probe_ctx *lttng_probe_ctx,
-               union lttng_ctx_value *value)
-{
-       unsigned int ipc_ns_inum = 0;
-
-       /*
-        * nsproxy can be NULL when scheduled out of exit.
-        *
-        * As documented in 'linux/nsproxy.h' namespaces access rules, no
-        * precautions should be taken when accessing the current task's
-        * namespaces, just dereference the pointers.
-        */
-       if (current->nsproxy)
-               ipc_ns_inum = current->nsproxy->ipc_ns->lttng_ns_inum;
-
-       value->s64 = ipc_ns_inum;
-}
-
-int lttng_add_ipc_ns_to_ctx(struct lttng_ctx **ctx)
-{
-       struct lttng_ctx_field *field;
-
-       field = lttng_append_context(ctx);
-       if (!field)
-               return -ENOMEM;
-       if (lttng_find_context(*ctx, "ipc_ns")) {
-               lttng_remove_context_field(ctx, field);
-               return -EEXIST;
-       }
-       field->event_field.name = "ipc_ns";
-       field->event_field.type.atype = atype_integer;
-       field->event_field.type.u.integer.size = sizeof(unsigned int) * CHAR_BIT;
-       field->event_field.type.u.integer.alignment = lttng_alignof(unsigned int) * CHAR_BIT;
-       field->event_field.type.u.integer.signedness = lttng_is_signed_type(unsigned int);
-       field->event_field.type.u.integer.reverse_byte_order = 0;
-       field->event_field.type.u.integer.base = 10;
-       field->event_field.type.u.integer.encoding = lttng_encode_none;
-       field->get_size = ipc_ns_get_size;
-       field->record = ipc_ns_record;
-       field->get_value = ipc_ns_get_value;
-       lttng_context_update(*ctx);
-       wrapper_vmalloc_sync_mappings();
-       return 0;
-}
-EXPORT_SYMBOL_GPL(lttng_add_ipc_ns_to_ctx);
-
-#endif
diff --git a/lttng-context-migratable.c b/lttng-context-migratable.c
deleted file mode 100644 (file)
index 207e02f..0000000
+++ /dev/null
@@ -1,74 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
- *
- * lttng-context-migratable.c
- *
- * LTTng migratable context.
- *
- * Copyright (C) 2009-2015 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/sched.h>
-#include <linux/irqflags.h>
-#include <lttng/events.h>
-#include <ringbuffer/frontend_types.h>
-#include <wrapper/vmalloc.h>
-#include <lttng/tracer.h>
-
-static
-size_t migratable_get_size(size_t offset)
-{
-       size_t size = 0;
-
-       size += lib_ring_buffer_align(offset, lttng_alignof(uint8_t));
-       size += sizeof(uint8_t);
-       return size;
-}
-
-static
-void migratable_record(struct lttng_ctx_field *field,
-               struct lib_ring_buffer_ctx *ctx,
-               struct lttng_channel *chan)
-{
-       uint8_t migratable = !current->migrate_disable;
-
-       lib_ring_buffer_align_ctx(ctx, lttng_alignof(migratable));
-       chan->ops->event_write(ctx, &migratable, sizeof(migratable));
-}
-
-static
-void migratable_get_value(struct lttng_ctx_field *field,
-               struct lttng_probe_ctx *lttng_probe_ctx,
-               union lttng_ctx_value *value)
-{
-       value->s64 = !current->migrate_disable;
-}
-
-int lttng_add_migratable_to_ctx(struct lttng_ctx **ctx)
-{
-       struct lttng_ctx_field *field;
-
-       field = lttng_append_context(ctx);
-       if (!field)
-               return -ENOMEM;
-       if (lttng_find_context(*ctx, "migratable")) {
-               lttng_remove_context_field(ctx, field);
-               return -EEXIST;
-       }
-       field->event_field.name = "migratable";
-       field->event_field.type.atype = atype_integer;
-       field->event_field.type.u.integer.size = sizeof(uint8_t) * CHAR_BIT;
-       field->event_field.type.u.integer.alignment = lttng_alignof(uint8_t) * CHAR_BIT;
-       field->event_field.type.u.integer.signedness = lttng_is_signed_type(uint8_t);
-       field->event_field.type.u.integer.reverse_byte_order = 0;
-       field->event_field.type.u.integer.base = 10;
-       field->event_field.type.u.integer.encoding = lttng_encode_none;
-       field->get_size = migratable_get_size;
-       field->record = migratable_record;
-       field->get_value = migratable_get_value;
-       lttng_context_update(*ctx);
-       wrapper_vmalloc_sync_mappings();
-       return 0;
-}
-EXPORT_SYMBOL_GPL(lttng_add_migratable_to_ctx);
diff --git a/lttng-context-mnt-ns.c b/lttng-context-mnt-ns.c
deleted file mode 100644 (file)
index 7fce5dd..0000000
+++ /dev/null
@@ -1,106 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
- *
- * lttng-context-mnt-ns.c
- *
- * LTTng mount namespace context.
- *
- * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *               2019 Michael Jeanson <mjeanson@efficios.com>
- *
- */
-
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/sched.h>
-#include <lttng/events.h>
-#include <linux/nsproxy.h>
-#include <ringbuffer/frontend_types.h>
-#include <wrapper/vmalloc.h>
-#include <wrapper/namespace.h>
-#include <lttng/tracer.h>
-
-#if !defined(LTTNG_MNT_NS_MISSING_HEADER) && \
-       (LINUX_VERSION_CODE >= KERNEL_VERSION(3,8,0))
-
-#include <../fs/mount.h>
-
-static
-size_t mnt_ns_get_size(size_t offset)
-{
-       size_t size = 0;
-
-       size += lib_ring_buffer_align(offset, lttng_alignof(unsigned int));
-       size += sizeof(unsigned int);
-       return size;
-}
-
-static
-void mnt_ns_record(struct lttng_ctx_field *field,
-                struct lib_ring_buffer_ctx *ctx,
-                struct lttng_channel *chan)
-{
-       unsigned int mnt_ns_inum = 0;
-
-       /*
-        * nsproxy can be NULL when scheduled out of exit.
-        *
-        * As documented in 'linux/nsproxy.h' namespaces access rules, no
-        * precautions should be taken when accessing the current task's
-        * namespaces, just dereference the pointers.
-        */
-       if (current->nsproxy)
-               mnt_ns_inum = current->nsproxy->mnt_ns->lttng_ns_inum;
-
-       lib_ring_buffer_align_ctx(ctx, lttng_alignof(mnt_ns_inum));
-       chan->ops->event_write(ctx, &mnt_ns_inum, sizeof(mnt_ns_inum));
-}
-
-static
-void mnt_ns_get_value(struct lttng_ctx_field *field,
-               struct lttng_probe_ctx *lttng_probe_ctx,
-               union lttng_ctx_value *value)
-{
-       unsigned int mnt_ns_inum = 0;
-
-       /*
-        * nsproxy can be NULL when scheduled out of exit.
-        *
-        * As documented in 'linux/nsproxy.h' namespaces access rules, no
-        * precautions should be taken when accessing the current task's
-        * namespaces, just dereference the pointers.
-        */
-       if (current->nsproxy)
-               mnt_ns_inum = current->nsproxy->mnt_ns->lttng_ns_inum;
-
-       value->s64 = mnt_ns_inum;
-}
-
-int lttng_add_mnt_ns_to_ctx(struct lttng_ctx **ctx)
-{
-       struct lttng_ctx_field *field;
-
-       field = lttng_append_context(ctx);
-       if (!field)
-               return -ENOMEM;
-       if (lttng_find_context(*ctx, "mnt_ns")) {
-               lttng_remove_context_field(ctx, field);
-               return -EEXIST;
-       }
-       field->event_field.name = "mnt_ns";
-       field->event_field.type.atype = atype_integer;
-       field->event_field.type.u.integer.size = sizeof(unsigned int) * CHAR_BIT;
-       field->event_field.type.u.integer.alignment = lttng_alignof(unsigned int) * CHAR_BIT;
-       field->event_field.type.u.integer.signedness = lttng_is_signed_type(unsigned int);
-       field->event_field.type.u.integer.reverse_byte_order = 0;
-       field->event_field.type.u.integer.base = 10;
-       field->event_field.type.u.integer.encoding = lttng_encode_none;
-       field->get_size = mnt_ns_get_size;
-       field->record = mnt_ns_record;
-       field->get_value = mnt_ns_get_value;
-       lttng_context_update(*ctx);
-       wrapper_vmalloc_sync_mappings();
-       return 0;
-}
-EXPORT_SYMBOL_GPL(lttng_add_mnt_ns_to_ctx);
-
-#endif
diff --git a/lttng-context-need-reschedule.c b/lttng-context-need-reschedule.c
deleted file mode 100644 (file)
index 7f8deec..0000000
+++ /dev/null
@@ -1,74 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
- *
- * lttng-context-need-reschedule.c
- *
- * LTTng need_reschedule context.
- *
- * Copyright (C) 2009-2015 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/sched.h>
-#include <linux/irqflags.h>
-#include <lttng/events.h>
-#include <ringbuffer/frontend_types.h>
-#include <wrapper/vmalloc.h>
-#include <lttng/tracer.h>
-
-static
-size_t need_reschedule_get_size(size_t offset)
-{
-       size_t size = 0;
-
-       size += lib_ring_buffer_align(offset, lttng_alignof(uint8_t));
-       size += sizeof(uint8_t);
-       return size;
-}
-
-static
-void need_reschedule_record(struct lttng_ctx_field *field,
-               struct lib_ring_buffer_ctx *ctx,
-               struct lttng_channel *chan)
-{
-       uint8_t need_reschedule = test_tsk_need_resched(current);
-
-       lib_ring_buffer_align_ctx(ctx, lttng_alignof(need_reschedule));
-       chan->ops->event_write(ctx, &need_reschedule, sizeof(need_reschedule));
-}
-
-static
-void need_reschedule_get_value(struct lttng_ctx_field *field,
-               struct lttng_probe_ctx *lttng_probe_ctx,
-               union lttng_ctx_value *value)
-{
-       value->s64 = test_tsk_need_resched(current);;
-}
-
-int lttng_add_need_reschedule_to_ctx(struct lttng_ctx **ctx)
-{
-       struct lttng_ctx_field *field;
-
-       field = lttng_append_context(ctx);
-       if (!field)
-               return -ENOMEM;
-       if (lttng_find_context(*ctx, "need_reschedule")) {
-               lttng_remove_context_field(ctx, field);
-               return -EEXIST;
-       }
-       field->event_field.name = "need_reschedule";
-       field->event_field.type.atype = atype_integer;
-       field->event_field.type.u.integer.size = sizeof(uint8_t) * CHAR_BIT;
-       field->event_field.type.u.integer.alignment = lttng_alignof(uint8_t) * CHAR_BIT;
-       field->event_field.type.u.integer.signedness = lttng_is_signed_type(uint8_t);
-       field->event_field.type.u.integer.reverse_byte_order = 0;
-       field->event_field.type.u.integer.base = 10;
-       field->event_field.type.u.integer.encoding = lttng_encode_none;
-       field->get_size = need_reschedule_get_size;
-       field->record = need_reschedule_record;
-       field->get_value = need_reschedule_get_value;
-       lttng_context_update(*ctx);
-       wrapper_vmalloc_sync_mappings();
-       return 0;
-}
-EXPORT_SYMBOL_GPL(lttng_add_need_reschedule_to_ctx);
diff --git a/lttng-context-net-ns.c b/lttng-context-net-ns.c
deleted file mode 100644 (file)
index 879a61b..0000000
+++ /dev/null
@@ -1,105 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
- *
- * lttng-context-net-ns.c
- *
- * LTTng net namespace context.
- *
- * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *               2019 Michael Jeanson <mjeanson@efficios.com>
- *
- */
-
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/sched.h>
-#include <linux/nsproxy.h>
-#include <net/net_namespace.h>
-#include <lttng/events.h>
-#include <ringbuffer/frontend_types.h>
-#include <wrapper/vmalloc.h>
-#include <wrapper/namespace.h>
-#include <lttng/tracer.h>
-
-#if defined(CONFIG_NET_NS) && \
-       (LINUX_VERSION_CODE >= KERNEL_VERSION(3,8,0))
-
-static
-size_t net_ns_get_size(size_t offset)
-{
-       size_t size = 0;
-
-       size += lib_ring_buffer_align(offset, lttng_alignof(unsigned int));
-       size += sizeof(unsigned int);
-       return size;
-}
-
-static
-void net_ns_record(struct lttng_ctx_field *field,
-                struct lib_ring_buffer_ctx *ctx,
-                struct lttng_channel *chan)
-{
-       unsigned int net_ns_inum = 0;
-
-       /*
-        * nsproxy can be NULL when scheduled out of exit.
-        *
-        * As documented in 'linux/nsproxy.h' namespaces access rules, no
-        * precautions should be taken when accessing the current task's
-        * namespaces, just dereference the pointers.
-        */
-       if (current->nsproxy)
-               net_ns_inum = current->nsproxy->net_ns->lttng_ns_inum;
-
-       lib_ring_buffer_align_ctx(ctx, lttng_alignof(net_ns_inum));
-       chan->ops->event_write(ctx, &net_ns_inum, sizeof(net_ns_inum));
-}
-
-static
-void net_ns_get_value(struct lttng_ctx_field *field,
-               struct lttng_probe_ctx *lttng_probe_ctx,
-               union lttng_ctx_value *value)
-{
-       unsigned int net_ns_inum = 0;
-
-       /*
-        * nsproxy can be NULL when scheduled out of exit.
-        *
-        * As documented in 'linux/nsproxy.h' namespaces access rules, no
-        * precautions should be taken when accessing the current task's
-        * namespaces, just dereference the pointers.
-        */
-       if (current->nsproxy)
-               net_ns_inum = current->nsproxy->net_ns->lttng_ns_inum;
-
-       value->s64 = net_ns_inum;
-}
-
-int lttng_add_net_ns_to_ctx(struct lttng_ctx **ctx)
-{
-       struct lttng_ctx_field *field;
-
-       field = lttng_append_context(ctx);
-       if (!field)
-               return -ENOMEM;
-       if (lttng_find_context(*ctx, "net_ns")) {
-               lttng_remove_context_field(ctx, field);
-               return -EEXIST;
-       }
-       field->event_field.name = "net_ns";
-       field->event_field.type.atype = atype_integer;
-       field->event_field.type.u.integer.size = sizeof(unsigned int) * CHAR_BIT;
-       field->event_field.type.u.integer.alignment = lttng_alignof(unsigned int) * CHAR_BIT;
-       field->event_field.type.u.integer.signedness = lttng_is_signed_type(unsigned int);
-       field->event_field.type.u.integer.reverse_byte_order = 0;
-       field->event_field.type.u.integer.base = 10;
-       field->event_field.type.u.integer.encoding = lttng_encode_none;
-       field->get_size = net_ns_get_size;
-       field->record = net_ns_record;
-       field->get_value = net_ns_get_value;
-       lttng_context_update(*ctx);
-       wrapper_vmalloc_sync_mappings();
-       return 0;
-}
-EXPORT_SYMBOL_GPL(lttng_add_net_ns_to_ctx);
-
-#endif
diff --git a/lttng-context-nice.c b/lttng-context-nice.c
deleted file mode 100644 (file)
index aaa3643..0000000
+++ /dev/null
@@ -1,74 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
- *
- * lttng-context-nice.c
- *
- * LTTng nice context.
- *
- * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/sched.h>
-#include <lttng/events.h>
-#include <ringbuffer/frontend_types.h>
-#include <wrapper/vmalloc.h>
-#include <lttng/tracer.h>
-
-static
-size_t nice_get_size(size_t offset)
-{
-       size_t size = 0;
-
-       size += lib_ring_buffer_align(offset, lttng_alignof(int));
-       size += sizeof(int);
-       return size;
-}
-
-static
-void nice_record(struct lttng_ctx_field *field,
-               struct lib_ring_buffer_ctx *ctx,
-               struct lttng_channel *chan)
-{
-       int nice;
-
-       nice = task_nice(current);
-       lib_ring_buffer_align_ctx(ctx, lttng_alignof(nice));
-       chan->ops->event_write(ctx, &nice, sizeof(nice));
-}
-
-static
-void nice_get_value(struct lttng_ctx_field *field,
-               struct lttng_probe_ctx *lttng_probe_ctx,
-               union lttng_ctx_value *value)
-{
-       value->s64 = task_nice(current);
-}
-
-int lttng_add_nice_to_ctx(struct lttng_ctx **ctx)
-{
-       struct lttng_ctx_field *field;
-
-       field = lttng_append_context(ctx);
-       if (!field)
-               return -ENOMEM;
-       if (lttng_find_context(*ctx, "nice")) {
-               lttng_remove_context_field(ctx, field);
-               return -EEXIST;
-       }
-       field->event_field.name = "nice";
-       field->event_field.type.atype = atype_integer;
-       field->event_field.type.u.integer.size = sizeof(int) * CHAR_BIT;
-       field->event_field.type.u.integer.alignment = lttng_alignof(int) * CHAR_BIT;
-       field->event_field.type.u.integer.signedness = lttng_is_signed_type(int);
-       field->event_field.type.u.integer.reverse_byte_order = 0;
-       field->event_field.type.u.integer.base = 10;
-       field->event_field.type.u.integer.encoding = lttng_encode_none;
-       field->get_size = nice_get_size;
-       field->record = nice_record;
-       field->get_value = nice_get_value;
-       lttng_context_update(*ctx);
-       wrapper_vmalloc_sync_mappings();
-       return 0;
-}
-EXPORT_SYMBOL_GPL(lttng_add_nice_to_ctx);
diff --git a/lttng-context-perf-counters.c b/lttng-context-perf-counters.c
deleted file mode 100644 (file)
index 5784f75..0000000
+++ /dev/null
@@ -1,364 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
- *
- * lttng-context-perf-counters.c
- *
- * LTTng performance monitoring counters (perf-counters) integration module.
- *
- * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/perf_event.h>
-#include <linux/list.h>
-#include <linux/string.h>
-#include <linux/cpu.h>
-#include <lttng/events.h>
-#include <ringbuffer/frontend_types.h>
-#include <wrapper/vmalloc.h>
-#include <wrapper/perf.h>
-#include <lttng/tracer.h>
-
-static
-size_t perf_counter_get_size(size_t offset)
-{
-       size_t size = 0;
-
-       size += lib_ring_buffer_align(offset, lttng_alignof(uint64_t));
-       size += sizeof(uint64_t);
-       return size;
-}
-
-static
-void perf_counter_record(struct lttng_ctx_field *field,
-                        struct lib_ring_buffer_ctx *ctx,
-                        struct lttng_channel *chan)
-{
-       struct perf_event *event;
-       uint64_t value;
-
-       event = field->u.perf_counter->e[ctx->cpu];
-       if (likely(event)) {
-               if (unlikely(event->state == PERF_EVENT_STATE_ERROR)) {
-                       value = 0;
-               } else {
-                       event->pmu->read(event);
-                       value = local64_read(&event->count);
-               }
-       } else {
-               /*
-                * Perf chooses not to be clever and not to support enabling a
-                * perf counter before the cpu is brought up. Therefore, we need
-                * to support having events coming (e.g. scheduler events)
-                * before the counter is setup. Write an arbitrary 0 in this
-                * case.
-                */
-               value = 0;
-       }
-       lib_ring_buffer_align_ctx(ctx, lttng_alignof(value));
-       chan->ops->event_write(ctx, &value, sizeof(value));
-}
-
-#if defined(CONFIG_PERF_EVENTS) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,99))
-static
-void overflow_callback(struct perf_event *event,
-                      struct perf_sample_data *data,
-                      struct pt_regs *regs)
-{
-}
-#else
-static
-void overflow_callback(struct perf_event *event, int nmi,
-                      struct perf_sample_data *data,
-                      struct pt_regs *regs)
-{
-}
-#endif
-
-static
-void lttng_destroy_perf_counter_field(struct lttng_ctx_field *field)
-{
-       struct perf_event **events = field->u.perf_counter->e;
-
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
-       {
-               int ret;
-
-               ret = cpuhp_state_remove_instance(lttng_hp_online,
-                       &field->u.perf_counter->cpuhp_online.node);
-               WARN_ON(ret);
-               ret = cpuhp_state_remove_instance(lttng_hp_prepare,
-                       &field->u.perf_counter->cpuhp_prepare.node);
-               WARN_ON(ret);
-       }
-#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
-       {
-               int cpu;
-
-               get_online_cpus();
-               for_each_online_cpu(cpu)
-                       perf_event_release_kernel(events[cpu]);
-               put_online_cpus();
-#ifdef CONFIG_HOTPLUG_CPU
-               unregister_cpu_notifier(&field->u.perf_counter->nb);
-#endif
-       }
-#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
-       kfree(field->event_field.name);
-       kfree(field->u.perf_counter->attr);
-       lttng_kvfree(events);
-       kfree(field->u.perf_counter);
-}
-
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
-
-int lttng_cpuhp_perf_counter_online(unsigned int cpu,
-               struct lttng_cpuhp_node *node)
-{
-       struct lttng_perf_counter_field *perf_field =
-               container_of(node, struct lttng_perf_counter_field,
-                               cpuhp_online);
-       struct perf_event **events = perf_field->e;
-       struct perf_event_attr *attr = perf_field->attr;
-       struct perf_event *pevent;
-
-       pevent = wrapper_perf_event_create_kernel_counter(attr,
-                       cpu, NULL, overflow_callback);
-       if (!pevent || IS_ERR(pevent))
-               return -EINVAL;
-       if (pevent->state == PERF_EVENT_STATE_ERROR) {
-               perf_event_release_kernel(pevent);
-               return -EINVAL;
-       }
-       barrier();      /* Create perf counter before setting event */
-       events[cpu] = pevent;
-       return 0;
-}
-
-int lttng_cpuhp_perf_counter_dead(unsigned int cpu,
-               struct lttng_cpuhp_node *node)
-{
-       struct lttng_perf_counter_field *perf_field =
-               container_of(node, struct lttng_perf_counter_field,
-                               cpuhp_prepare);
-       struct perf_event **events = perf_field->e;
-       struct perf_event *pevent;
-
-       pevent = events[cpu];
-       events[cpu] = NULL;
-       barrier();      /* NULLify event before perf counter teardown */
-       perf_event_release_kernel(pevent);
-       return 0;
-}
-
-#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
-
-#ifdef CONFIG_HOTPLUG_CPU
-
-/**
- *     lttng_perf_counter_hp_callback - CPU hotplug callback
- *     @nb: notifier block
- *     @action: hotplug action to take
- *     @hcpu: CPU number
- *
- *     Returns the success/failure of the operation. (%NOTIFY_OK, %NOTIFY_BAD)
- *
- * We can setup perf counters when the cpu is online (up prepare seems to be too
- * soon).
- */
-static
-int lttng_perf_counter_cpu_hp_callback(struct notifier_block *nb,
-                                                unsigned long action,
-                                                void *hcpu)
-{
-       unsigned int cpu = (unsigned long) hcpu;
-       struct lttng_perf_counter_field *perf_field =
-               container_of(nb, struct lttng_perf_counter_field, nb);
-       struct perf_event **events = perf_field->e;
-       struct perf_event_attr *attr = perf_field->attr;
-       struct perf_event *pevent;
-
-       if (!perf_field->hp_enable)
-               return NOTIFY_OK;
-
-       switch (action) {
-       case CPU_ONLINE:
-       case CPU_ONLINE_FROZEN:
-               pevent = wrapper_perf_event_create_kernel_counter(attr,
-                               cpu, NULL, overflow_callback);
-               if (!pevent || IS_ERR(pevent))
-                       return NOTIFY_BAD;
-               if (pevent->state == PERF_EVENT_STATE_ERROR) {
-                       perf_event_release_kernel(pevent);
-                       return NOTIFY_BAD;
-               }
-               barrier();      /* Create perf counter before setting event */
-               events[cpu] = pevent;
-               break;
-       case CPU_UP_CANCELED:
-       case CPU_UP_CANCELED_FROZEN:
-       case CPU_DEAD:
-       case CPU_DEAD_FROZEN:
-               pevent = events[cpu];
-               events[cpu] = NULL;
-               barrier();      /* NULLify event before perf counter teardown */
-               perf_event_release_kernel(pevent);
-               break;
-       }
-       return NOTIFY_OK;
-}
-
-#endif
-
-#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
-
-int lttng_add_perf_counter_to_ctx(uint32_t type,
-                                 uint64_t config,
-                                 const char *name,
-                                 struct lttng_ctx **ctx)
-{
-       struct lttng_ctx_field *field;
-       struct lttng_perf_counter_field *perf_field;
-       struct perf_event **events;
-       struct perf_event_attr *attr;
-       int ret;
-       char *name_alloc;
-
-       events = lttng_kvzalloc(num_possible_cpus() * sizeof(*events), GFP_KERNEL);
-       if (!events)
-               return -ENOMEM;
-
-       attr = kzalloc(sizeof(struct perf_event_attr), GFP_KERNEL);
-       if (!attr) {
-               ret = -ENOMEM;
-               goto error_attr;
-       }
-
-       attr->type = type;
-       attr->config = config;
-       attr->size = sizeof(struct perf_event_attr);
-       attr->pinned = 1;
-       attr->disabled = 0;
-
-       perf_field = kzalloc(sizeof(struct lttng_perf_counter_field), GFP_KERNEL);
-       if (!perf_field) {
-               ret = -ENOMEM;
-               goto error_alloc_perf_field;
-       }
-       perf_field->e = events;
-       perf_field->attr = attr;
-
-       name_alloc = kstrdup(name, GFP_KERNEL);
-       if (!name_alloc) {
-               ret = -ENOMEM;
-               goto name_alloc_error;
-       }
-
-       field = lttng_append_context(ctx);
-       if (!field) {
-               ret = -ENOMEM;
-               goto append_context_error;
-       }
-       if (lttng_find_context(*ctx, name_alloc)) {
-               ret = -EEXIST;
-               goto find_error;
-       }
-
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
-
-       perf_field->cpuhp_prepare.component = LTTNG_CONTEXT_PERF_COUNTERS;
-       ret = cpuhp_state_add_instance(lttng_hp_prepare,
-               &perf_field->cpuhp_prepare.node);
-       if (ret)
-               goto cpuhp_prepare_error;
-
-       perf_field->cpuhp_online.component = LTTNG_CONTEXT_PERF_COUNTERS;
-       ret = cpuhp_state_add_instance(lttng_hp_online,
-               &perf_field->cpuhp_online.node);
-       if (ret)
-               goto cpuhp_online_error;
-
-#else  /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
-       {
-               int cpu;
-
-#ifdef CONFIG_HOTPLUG_CPU
-               perf_field->nb.notifier_call =
-                       lttng_perf_counter_cpu_hp_callback;
-               perf_field->nb.priority = 0;
-               register_cpu_notifier(&perf_field->nb);
-#endif
-               get_online_cpus();
-               for_each_online_cpu(cpu) {
-                       events[cpu] = wrapper_perf_event_create_kernel_counter(attr,
-                                               cpu, NULL, overflow_callback);
-                       if (!events[cpu] || IS_ERR(events[cpu])) {
-                               ret = -EINVAL;
-                               goto counter_error;
-                       }
-                       if (events[cpu]->state == PERF_EVENT_STATE_ERROR) {
-                               ret = -EBUSY;
-                               goto counter_busy;
-                       }
-               }
-               put_online_cpus();
-               perf_field->hp_enable = 1;
-       }
-#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
-
-       field->destroy = lttng_destroy_perf_counter_field;
-
-       field->event_field.name = name_alloc;
-       field->event_field.type.atype = atype_integer;
-       field->event_field.type.u.integer.size = sizeof(uint64_t) * CHAR_BIT;
-       field->event_field.type.u.integer.alignment = lttng_alignof(uint64_t) * CHAR_BIT;
-       field->event_field.type.u.integer.signedness = lttng_is_signed_type(uint64_t);
-       field->event_field.type.u.integer.reverse_byte_order = 0;
-       field->event_field.type.u.integer.base = 10;
-       field->event_field.type.u.integer.encoding = lttng_encode_none;
-       field->get_size = perf_counter_get_size;
-       field->record = perf_counter_record;
-       field->u.perf_counter = perf_field;
-       lttng_context_update(*ctx);
-
-       wrapper_vmalloc_sync_mappings();
-       return 0;
-
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
-cpuhp_online_error:
-       {
-               int remove_ret;
-
-               remove_ret = cpuhp_state_remove_instance(lttng_hp_prepare,
-                               &perf_field->cpuhp_prepare.node);
-               WARN_ON(remove_ret);
-       }
-cpuhp_prepare_error:
-#else  /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
-counter_busy:
-counter_error:
-       {
-               int cpu;
-
-               for_each_online_cpu(cpu) {
-                       if (events[cpu] && !IS_ERR(events[cpu]))
-                               perf_event_release_kernel(events[cpu]);
-               }
-               put_online_cpus();
-#ifdef CONFIG_HOTPLUG_CPU
-               unregister_cpu_notifier(&perf_field->nb);
-#endif
-       }
-#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
-find_error:
-       lttng_remove_context_field(ctx, field);
-append_context_error:
-       kfree(name_alloc);
-name_alloc_error:
-       kfree(perf_field);
-error_alloc_perf_field:
-       kfree(attr);
-error_attr:
-       lttng_kvfree(events);
-       return ret;
-}
diff --git a/lttng-context-pid-ns.c b/lttng-context-pid-ns.c
deleted file mode 100644 (file)
index 721485d..0000000
+++ /dev/null
@@ -1,106 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
- *
- * lttng-context-pid-ns.c
- *
- * LTTng pid namespace context.
- *
- * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *               2019 Michael Jeanson <mjeanson@efficios.com>
- *
- */
-
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/sched.h>
-#include <linux/pid_namespace.h>
-#include <lttng/events.h>
-#include <ringbuffer/frontend_types.h>
-#include <wrapper/vmalloc.h>
-#include <wrapper/namespace.h>
-#include <lttng/tracer.h>
-
-#if defined(CONFIG_PID_NS) && \
-       (LINUX_VERSION_CODE >= KERNEL_VERSION(3,8,0))
-
-static
-size_t pid_ns_get_size(size_t offset)
-{
-       size_t size = 0;
-
-       size += lib_ring_buffer_align(offset, lttng_alignof(unsigned int));
-       size += sizeof(unsigned int);
-       return size;
-}
-
-static
-void pid_ns_record(struct lttng_ctx_field *field,
-                struct lib_ring_buffer_ctx *ctx,
-                struct lttng_channel *chan)
-{
-       struct pid_namespace *ns;
-       unsigned int pid_ns_inum = 0;
-
-       /*
-        * The pid namespace is an exception -- it's accessed using
-        * task_active_pid_ns. The pid namespace in nsproxy is the
-        * namespace that children will use.
-        */
-       ns = task_active_pid_ns(current);
-
-       if (ns)
-               pid_ns_inum = ns->lttng_ns_inum;
-
-       lib_ring_buffer_align_ctx(ctx, lttng_alignof(pid_ns_inum));
-       chan->ops->event_write(ctx, &pid_ns_inum, sizeof(pid_ns_inum));
-}
-
-static
-void pid_ns_get_value(struct lttng_ctx_field *field,
-               struct lttng_probe_ctx *lttng_probe_ctx,
-               union lttng_ctx_value *value)
-{
-       struct pid_namespace *ns;
-       unsigned int pid_ns_inum = 0;
-
-       /*
-        * The pid namespace is an exception -- it's accessed using
-        * task_active_pid_ns. The pid namespace in nsproxy is the
-        * namespace that children will use.
-        */
-       ns = task_active_pid_ns(current);
-
-       if (ns)
-               pid_ns_inum = ns->lttng_ns_inum;
-
-       value->s64 = pid_ns_inum;
-}
-
-int lttng_add_pid_ns_to_ctx(struct lttng_ctx **ctx)
-{
-       struct lttng_ctx_field *field;
-
-       field = lttng_append_context(ctx);
-       if (!field)
-               return -ENOMEM;
-       if (lttng_find_context(*ctx, "pid_ns")) {
-               lttng_remove_context_field(ctx, field);
-               return -EEXIST;
-       }
-       field->event_field.name = "pid_ns";
-       field->event_field.type.atype = atype_integer;
-       field->event_field.type.u.integer.size = sizeof(unsigned int) * CHAR_BIT;
-       field->event_field.type.u.integer.alignment = lttng_alignof(unsigned int) * CHAR_BIT;
-       field->event_field.type.u.integer.signedness = lttng_is_signed_type(unsigned int);
-       field->event_field.type.u.integer.reverse_byte_order = 0;
-       field->event_field.type.u.integer.base = 10;
-       field->event_field.type.u.integer.encoding = lttng_encode_none;
-       field->get_size = pid_ns_get_size;
-       field->record = pid_ns_record;
-       field->get_value = pid_ns_get_value;
-       lttng_context_update(*ctx);
-       wrapper_vmalloc_sync_mappings();
-       return 0;
-}
-EXPORT_SYMBOL_GPL(lttng_add_pid_ns_to_ctx);
-
-#endif
diff --git a/lttng-context-pid.c b/lttng-context-pid.c
deleted file mode 100644 (file)
index f3e4aef..0000000
+++ /dev/null
@@ -1,74 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
- *
- * lttng-context-pid.c
- *
- * LTTng PID context.
- *
- * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/sched.h>
-#include <lttng/events.h>
-#include <ringbuffer/frontend_types.h>
-#include <wrapper/vmalloc.h>
-#include <lttng/tracer.h>
-
-static
-size_t pid_get_size(size_t offset)
-{
-       size_t size = 0;
-
-       size += lib_ring_buffer_align(offset, lttng_alignof(pid_t));
-       size += sizeof(pid_t);
-       return size;
-}
-
-static
-void pid_record(struct lttng_ctx_field *field,
-               struct lib_ring_buffer_ctx *ctx,
-               struct lttng_channel *chan)
-{
-       pid_t pid;
-
-       pid = task_tgid_nr(current);
-       lib_ring_buffer_align_ctx(ctx, lttng_alignof(pid));
-       chan->ops->event_write(ctx, &pid, sizeof(pid));
-}
-
-static
-void pid_get_value(struct lttng_ctx_field *field,
-               struct lttng_probe_ctx *lttng_probe_ctx,
-               union lttng_ctx_value *value)
-{
-       value->s64 = task_tgid_nr(current);
-}
-
-int lttng_add_pid_to_ctx(struct lttng_ctx **ctx)
-{
-       struct lttng_ctx_field *field;
-
-       field = lttng_append_context(ctx);
-       if (!field)
-               return -ENOMEM;
-       if (lttng_find_context(*ctx, "pid")) {
-               lttng_remove_context_field(ctx, field);
-               return -EEXIST;
-       }
-       field->event_field.name = "pid";
-       field->event_field.type.atype = atype_integer;
-       field->event_field.type.u.integer.size = sizeof(pid_t) * CHAR_BIT;
-       field->event_field.type.u.integer.alignment = lttng_alignof(pid_t) * CHAR_BIT;
-       field->event_field.type.u.integer.signedness = lttng_is_signed_type(pid_t);
-       field->event_field.type.u.integer.reverse_byte_order = 0;
-       field->event_field.type.u.integer.base = 10;
-       field->event_field.type.u.integer.encoding = lttng_encode_none;
-       field->get_size = pid_get_size;
-       field->record = pid_record;
-       field->get_value = pid_get_value;
-       lttng_context_update(*ctx);
-       wrapper_vmalloc_sync_mappings();
-       return 0;
-}
-EXPORT_SYMBOL_GPL(lttng_add_pid_to_ctx);
diff --git a/lttng-context-ppid.c b/lttng-context-ppid.c
deleted file mode 100644 (file)
index 854c515..0000000
+++ /dev/null
@@ -1,96 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
- *
- * lttng-context-ppid.c
- *
- * LTTng PPID context.
- *
- * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/sched.h>
-#include <linux/syscalls.h>
-#include <lttng/events.h>
-#include <ringbuffer/frontend_types.h>
-#include <wrapper/vmalloc.h>
-#include <lttng/tracer.h>
-
-static
-size_t ppid_get_size(size_t offset)
-{
-       size_t size = 0;
-
-       size += lib_ring_buffer_align(offset, lttng_alignof(pid_t));
-       size += sizeof(pid_t);
-       return size;
-}
-
-static
-void ppid_record(struct lttng_ctx_field *field,
-                struct lib_ring_buffer_ctx *ctx,
-                struct lttng_channel *chan)
-{
-       pid_t ppid;
-
-       /*
-        * TODO: when we eventually add RCU subsystem instrumentation,
-        * taking the rcu read lock here will trigger RCU tracing
-        * recursively. We should modify the kernel synchronization so
-        * it synchronizes both for RCU and RCU sched, and rely on
-        * rcu_read_lock_sched_notrace.
-        */
-       rcu_read_lock();
-       ppid = task_tgid_nr(current->real_parent);
-       rcu_read_unlock();
-       lib_ring_buffer_align_ctx(ctx, lttng_alignof(ppid));
-       chan->ops->event_write(ctx, &ppid, sizeof(ppid));
-}
-
-static
-void ppid_get_value(struct lttng_ctx_field *field,
-               struct lttng_probe_ctx *lttng_probe_ctx,
-               union lttng_ctx_value *value)
-{
-       pid_t ppid;
-
-       /*
-        * TODO: when we eventually add RCU subsystem instrumentation,
-        * taking the rcu read lock here will trigger RCU tracing
-        * recursively. We should modify the kernel synchronization so
-        * it synchronizes both for RCU and RCU sched, and rely on
-        * rcu_read_lock_sched_notrace.
-        */
-       rcu_read_lock();
-       ppid = task_tgid_nr(current->real_parent);
-       rcu_read_unlock();
-       value->s64 = ppid;
-}
-
-int lttng_add_ppid_to_ctx(struct lttng_ctx **ctx)
-{
-       struct lttng_ctx_field *field;
-
-       field = lttng_append_context(ctx);
-       if (!field)
-               return -ENOMEM;
-       if (lttng_find_context(*ctx, "ppid")) {
-               lttng_remove_context_field(ctx, field);
-               return -EEXIST;
-       }
-       field->event_field.name = "ppid";
-       field->event_field.type.atype = atype_integer;
-       field->event_field.type.u.integer.size = sizeof(pid_t) * CHAR_BIT;
-       field->event_field.type.u.integer.alignment = lttng_alignof(pid_t) * CHAR_BIT;
-       field->event_field.type.u.integer.signedness = lttng_is_signed_type(pid_t);
-       field->event_field.type.u.integer.reverse_byte_order = 0;
-       field->event_field.type.u.integer.base = 10;
-       field->event_field.type.u.integer.encoding = lttng_encode_none;
-       field->get_size = ppid_get_size;
-       field->record = ppid_record;
-       field->get_value = ppid_get_value;
-       lttng_context_update(*ctx);
-       wrapper_vmalloc_sync_mappings();
-       return 0;
-}
-EXPORT_SYMBOL_GPL(lttng_add_ppid_to_ctx);
diff --git a/lttng-context-preemptible.c b/lttng-context-preemptible.c
deleted file mode 100644 (file)
index 6130a1a..0000000
+++ /dev/null
@@ -1,92 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
- *
- * lttng-context-preemptible.c
- *
- * LTTng preemptible context.
- *
- * Copyright (C) 2009-2015 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/sched.h>
-#include <linux/irqflags.h>
-#include <lttng/events.h>
-#include <ringbuffer/frontend_types.h>
-#include <wrapper/vmalloc.h>
-#include <lttng/tracer.h>
-
-/*
- * We nest twice in preempt disabling within LTTng: one nesting is done
- * by the instrumentation (tracepoint, kprobes, kretprobes, syscall
- * tracepoint), and the second is within the lib ring buffer
- * lib_ring_buffer_get_cpu().
- */
-#define LTTNG_PREEMPT_DISABLE_NESTING  2
-
-static
-size_t preemptible_get_size(size_t offset)
-{
-       size_t size = 0;
-
-       size += lib_ring_buffer_align(offset, lttng_alignof(uint8_t));
-       size += sizeof(uint8_t);
-       return size;
-}
-
-static
-void preemptible_record(struct lttng_ctx_field *field,
-               struct lib_ring_buffer_ctx *ctx,
-               struct lttng_channel *chan)
-{
-       int pc = preempt_count();
-       uint8_t preemptible = 0;
-
-       WARN_ON_ONCE(pc < LTTNG_PREEMPT_DISABLE_NESTING);
-       if (pc == LTTNG_PREEMPT_DISABLE_NESTING)
-               preemptible = 1;
-       lib_ring_buffer_align_ctx(ctx, lttng_alignof(preemptible));
-       chan->ops->event_write(ctx, &preemptible, sizeof(preemptible));
-}
-
-static
-void preemptible_get_value(struct lttng_ctx_field *field,
-               struct lttng_probe_ctx *lttng_probe_ctx,
-               union lttng_ctx_value *value)
-{
-       int pc = preempt_count();
-
-       WARN_ON_ONCE(pc < LTTNG_PREEMPT_DISABLE_NESTING);
-       if (pc == LTTNG_PREEMPT_DISABLE_NESTING)
-               value->s64 = 1;
-       else
-               value->s64 = 0;
-}
-
-int lttng_add_preemptible_to_ctx(struct lttng_ctx **ctx)
-{
-       struct lttng_ctx_field *field;
-
-       field = lttng_append_context(ctx);
-       if (!field)
-               return -ENOMEM;
-       if (lttng_find_context(*ctx, "preemptible")) {
-               lttng_remove_context_field(ctx, field);
-               return -EEXIST;
-       }
-       field->event_field.name = "preemptible";
-       field->event_field.type.atype = atype_integer;
-       field->event_field.type.u.integer.size = sizeof(uint8_t) * CHAR_BIT;
-       field->event_field.type.u.integer.alignment = lttng_alignof(uint8_t) * CHAR_BIT;
-       field->event_field.type.u.integer.signedness = lttng_is_signed_type(uint8_t);
-       field->event_field.type.u.integer.reverse_byte_order = 0;
-       field->event_field.type.u.integer.base = 10;
-       field->event_field.type.u.integer.encoding = lttng_encode_none;
-       field->get_size = preemptible_get_size;
-       field->record = preemptible_record;
-       field->get_value = preemptible_get_value;
-       lttng_context_update(*ctx);
-       wrapper_vmalloc_sync_mappings();
-       return 0;
-}
-EXPORT_SYMBOL_GPL(lttng_add_preemptible_to_ctx);
diff --git a/lttng-context-prio.c b/lttng-context-prio.c
deleted file mode 100644 (file)
index d300445..0000000
+++ /dev/null
@@ -1,95 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
- *
- * lttng-context-prio.c
- *
- * LTTng priority context.
- *
- * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/sched.h>
-#include <lttng/events.h>
-#include <ringbuffer/frontend_types.h>
-#include <wrapper/vmalloc.h>
-#include <wrapper/kallsyms.h>
-#include <lttng/tracer.h>
-
-static
-int (*wrapper_task_prio_sym)(struct task_struct *t);
-
-int wrapper_task_prio_init(void)
-{
-       wrapper_task_prio_sym = (void *) kallsyms_lookup_funcptr("task_prio");
-       if (!wrapper_task_prio_sym) {
-               printk(KERN_WARNING "LTTng: task_prio symbol lookup failed.\n");
-               return -EINVAL;
-       }
-       return 0;
-}
-
-static
-size_t prio_get_size(size_t offset)
-{
-       size_t size = 0;
-
-       size += lib_ring_buffer_align(offset, lttng_alignof(int));
-       size += sizeof(int);
-       return size;
-}
-
-static
-void prio_record(struct lttng_ctx_field *field,
-               struct lib_ring_buffer_ctx *ctx,
-               struct lttng_channel *chan)
-{
-       int prio;
-
-       prio = wrapper_task_prio_sym(current);
-       lib_ring_buffer_align_ctx(ctx, lttng_alignof(prio));
-       chan->ops->event_write(ctx, &prio, sizeof(prio));
-}
-
-static
-void prio_get_value(struct lttng_ctx_field *field,
-               struct lttng_probe_ctx *lttng_probe_ctx,
-               union lttng_ctx_value *value)
-{
-       value->s64 = wrapper_task_prio_sym(current);
-}
-
-int lttng_add_prio_to_ctx(struct lttng_ctx **ctx)
-{
-       struct lttng_ctx_field *field;
-       int ret;
-
-       if (!wrapper_task_prio_sym) {
-               ret = wrapper_task_prio_init();
-               if (ret)
-                       return ret;
-       }
-
-       field = lttng_append_context(ctx);
-       if (!field)
-               return -ENOMEM;
-       if (lttng_find_context(*ctx, "prio")) {
-               lttng_remove_context_field(ctx, field);
-               return -EEXIST;
-       }
-       field->event_field.name = "prio";
-       field->event_field.type.atype = atype_integer;
-       field->event_field.type.u.integer.size = sizeof(int) * CHAR_BIT;
-       field->event_field.type.u.integer.alignment = lttng_alignof(int) * CHAR_BIT;
-       field->event_field.type.u.integer.signedness = lttng_is_signed_type(int);
-       field->event_field.type.u.integer.reverse_byte_order = 0;
-       field->event_field.type.u.integer.base = 10;
-       field->event_field.type.u.integer.encoding = lttng_encode_none;
-       field->get_size = prio_get_size;
-       field->record = prio_record;
-       field->get_value = prio_get_value;
-       lttng_context_update(*ctx);
-       wrapper_vmalloc_sync_mappings();
-       return 0;
-}
-EXPORT_SYMBOL_GPL(lttng_add_prio_to_ctx);
diff --git a/lttng-context-procname.c b/lttng-context-procname.c
deleted file mode 100644 (file)
index fb5c36b..0000000
+++ /dev/null
@@ -1,77 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
- *
- * lttng-context-procname.c
- *
- * LTTng procname context.
- *
- * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/sched.h>
-#include <lttng/events.h>
-#include <ringbuffer/frontend_types.h>
-#include <wrapper/vmalloc.h>
-#include <lttng/tracer.h>
-#include <lttng/endian.h>
-
-static
-size_t procname_get_size(size_t offset)
-{
-       size_t size = 0;
-
-       size += sizeof(current->comm);
-       return size;
-}
-
-/*
- * Racy read of procname. We simply copy its whole array size.
- * Races with /proc/<task>/procname write only.
- * Otherwise having to take a mutex for each event is cumbersome and
- * could lead to crash in IRQ context and deadlock of the lockdep tracer.
- */
-static
-void procname_record(struct lttng_ctx_field *field,
-                struct lib_ring_buffer_ctx *ctx,
-                struct lttng_channel *chan)
-{
-       chan->ops->event_write(ctx, current->comm, sizeof(current->comm));
-}
-
-static
-void procname_get_value(struct lttng_ctx_field *field,
-               struct lttng_probe_ctx *lttng_probe_ctx,
-               union lttng_ctx_value *value)
-{
-       value->str = current->comm;
-}
-
-static const struct lttng_type procname_array_elem_type =
-       __type_integer(char, 0, 0, -1, __BYTE_ORDER, 10, UTF8);
-
-int lttng_add_procname_to_ctx(struct lttng_ctx **ctx)
-{
-       struct lttng_ctx_field *field;
-
-       field = lttng_append_context(ctx);
-       if (!field)
-               return -ENOMEM;
-       if (lttng_find_context(*ctx, "procname")) {
-               lttng_remove_context_field(ctx, field);
-               return -EEXIST;
-       }
-       field->event_field.name = "procname";
-       field->event_field.type.atype = atype_array_nestable;
-       field->event_field.type.u.array_nestable.elem_type = &procname_array_elem_type;
-       field->event_field.type.u.array_nestable.length = sizeof(current->comm);
-       field->event_field.type.u.array_nestable.alignment = 0;
-
-       field->get_size = procname_get_size;
-       field->record = procname_record;
-       field->get_value = procname_get_value;
-       lttng_context_update(*ctx);
-       wrapper_vmalloc_sync_mappings();
-       return 0;
-}
-EXPORT_SYMBOL_GPL(lttng_add_procname_to_ctx);
diff --git a/lttng-context-sgid.c b/lttng-context-sgid.c
deleted file mode 100644 (file)
index 18f1b83..0000000
+++ /dev/null
@@ -1,76 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
- *
- * lttng-context-sgid.c
- *
- * LTTng saved set-group-ID context.
- *
- * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *               2019 Michael Jeanson <mjeanson@efficios.com>
- *
- */
-
-#include <linux/module.h>
-#include <linux/sched.h>
-#include <lttng/events.h>
-#include <lttng/tracer.h>
-#include <ringbuffer/frontend_types.h>
-#include <wrapper/vmalloc.h>
-#include <wrapper/user_namespace.h>
-
-static
-size_t sgid_get_size(size_t offset)
-{
-       size_t size = 0;
-
-       size += lib_ring_buffer_align(offset, lttng_alignof(gid_t));
-       size += sizeof(gid_t);
-       return size;
-}
-
-static
-void sgid_record(struct lttng_ctx_field *field,
-                struct lib_ring_buffer_ctx *ctx,
-                struct lttng_channel *chan)
-{
-       gid_t sgid;
-
-       sgid = lttng_current_sgid();
-       lib_ring_buffer_align_ctx(ctx, lttng_alignof(sgid));
-       chan->ops->event_write(ctx, &sgid, sizeof(sgid));
-}
-
-static
-void sgid_get_value(struct lttng_ctx_field *field,
-               struct lttng_probe_ctx *lttng_probe_ctx,
-               union lttng_ctx_value *value)
-{
-       value->s64 = lttng_current_sgid();
-}
-
-int lttng_add_sgid_to_ctx(struct lttng_ctx **ctx)
-{
-       struct lttng_ctx_field *field;
-
-       field = lttng_append_context(ctx);
-       if (!field)
-               return -ENOMEM;
-       if (lttng_find_context(*ctx, "sgid")) {
-               lttng_remove_context_field(ctx, field);
-               return -EEXIST;
-       }
-       field->event_field.name = "sgid";
-       field->event_field.type.atype = atype_integer;
-       field->event_field.type.u.integer.size = sizeof(gid_t) * CHAR_BIT;
-       field->event_field.type.u.integer.alignment = lttng_alignof(gid_t) * CHAR_BIT;
-       field->event_field.type.u.integer.signedness = lttng_is_signed_type(gid_t);
-       field->event_field.type.u.integer.reverse_byte_order = 0;
-       field->event_field.type.u.integer.base = 10;
-       field->event_field.type.u.integer.encoding = lttng_encode_none;
-       field->get_size = sgid_get_size;
-       field->record = sgid_record;
-       field->get_value = sgid_get_value;
-       lttng_context_update(*ctx);
-       wrapper_vmalloc_sync_mappings();
-       return 0;
-}
-EXPORT_SYMBOL_GPL(lttng_add_sgid_to_ctx);
diff --git a/lttng-context-suid.c b/lttng-context-suid.c
deleted file mode 100644 (file)
index 1aa52dc..0000000
+++ /dev/null
@@ -1,76 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
- *
- * lttng-context-suid.c
- *
- * LTTng saved set-user-ID context.
- *
- * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *               2019 Michael Jeanson <mjeanson@efficios.com>
- *
- */
-
-#include <linux/module.h>
-#include <linux/sched.h>
-#include <lttng/events.h>
-#include <lttng/tracer.h>
-#include <ringbuffer/frontend_types.h>
-#include <wrapper/vmalloc.h>
-#include <wrapper/user_namespace.h>
-
-static
-size_t suid_get_size(size_t offset)
-{
-       size_t size = 0;
-
-       size += lib_ring_buffer_align(offset, lttng_alignof(uid_t));
-       size += sizeof(uid_t);
-       return size;
-}
-
-static
-void suid_record(struct lttng_ctx_field *field,
-                struct lib_ring_buffer_ctx *ctx,
-                struct lttng_channel *chan)
-{
-       uid_t suid;
-
-       suid = lttng_current_suid();
-       lib_ring_buffer_align_ctx(ctx, lttng_alignof(suid));
-       chan->ops->event_write(ctx, &suid, sizeof(suid));
-}
-
-static
-void suid_get_value(struct lttng_ctx_field *field,
-               struct lttng_probe_ctx *lttng_probe_ctx,
-               union lttng_ctx_value *value)
-{
-       value->s64 = lttng_current_suid();
-}
-
-int lttng_add_suid_to_ctx(struct lttng_ctx **ctx)
-{
-       struct lttng_ctx_field *field;
-
-       field = lttng_append_context(ctx);
-       if (!field)
-               return -ENOMEM;
-       if (lttng_find_context(*ctx, "suid")) {
-               lttng_remove_context_field(ctx, field);
-               return -EEXIST;
-       }
-       field->event_field.name = "suid";
-       field->event_field.type.atype = atype_integer;
-       field->event_field.type.u.integer.size = sizeof(uid_t) * CHAR_BIT;
-       field->event_field.type.u.integer.alignment = lttng_alignof(uid_t) * CHAR_BIT;
-       field->event_field.type.u.integer.signedness = lttng_is_signed_type(uid_t);
-       field->event_field.type.u.integer.reverse_byte_order = 0;
-       field->event_field.type.u.integer.base = 10;
-       field->event_field.type.u.integer.encoding = lttng_encode_none;
-       field->get_size = suid_get_size;
-       field->record = suid_record;
-       field->get_value = suid_get_value;
-       lttng_context_update(*ctx);
-       wrapper_vmalloc_sync_mappings();
-       return 0;
-}
-EXPORT_SYMBOL_GPL(lttng_add_suid_to_ctx);
diff --git a/lttng-context-tid.c b/lttng-context-tid.c
deleted file mode 100644 (file)
index 3116130..0000000
+++ /dev/null
@@ -1,77 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
- *
- * lttng-context-tid.c
- *
- * LTTng TID context.
- *
- * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/sched.h>
-#include <lttng/events.h>
-#include <ringbuffer/frontend_types.h>
-#include <wrapper/vmalloc.h>
-#include <lttng/tracer.h>
-
-static
-size_t tid_get_size(size_t offset)
-{
-       size_t size = 0;
-
-       size += lib_ring_buffer_align(offset, lttng_alignof(pid_t));
-       size += sizeof(pid_t);
-       return size;
-}
-
-static
-void tid_record(struct lttng_ctx_field *field,
-                struct lib_ring_buffer_ctx *ctx,
-                struct lttng_channel *chan)
-{
-       pid_t tid;
-
-       tid = task_pid_nr(current);
-       lib_ring_buffer_align_ctx(ctx, lttng_alignof(tid));
-       chan->ops->event_write(ctx, &tid, sizeof(tid));
-}
-
-static
-void tid_get_value(struct lttng_ctx_field *field,
-               struct lttng_probe_ctx *lttng_probe_ctx,
-               union lttng_ctx_value *value)
-{
-       pid_t tid;
-
-       tid = task_pid_nr(current);
-       value->s64 = tid;
-}
-
-int lttng_add_tid_to_ctx(struct lttng_ctx **ctx)
-{
-       struct lttng_ctx_field *field;
-
-       field = lttng_append_context(ctx);
-       if (!field)
-               return -ENOMEM;
-       if (lttng_find_context(*ctx, "tid")) {
-               lttng_remove_context_field(ctx, field);
-               return -EEXIST;
-       }
-       field->event_field.name = "tid";
-       field->event_field.type.atype = atype_integer;
-       field->event_field.type.u.integer.size = sizeof(pid_t) * CHAR_BIT;
-       field->event_field.type.u.integer.alignment = lttng_alignof(pid_t) * CHAR_BIT;
-       field->event_field.type.u.integer.signedness = lttng_is_signed_type(pid_t);
-       field->event_field.type.u.integer.reverse_byte_order = 0;
-       field->event_field.type.u.integer.base = 10;
-       field->event_field.type.u.integer.encoding = lttng_encode_none;
-       field->get_size = tid_get_size;
-       field->record = tid_record;
-       field->get_value = tid_get_value;
-       lttng_context_update(*ctx);
-       wrapper_vmalloc_sync_mappings();
-       return 0;
-}
-EXPORT_SYMBOL_GPL(lttng_add_tid_to_ctx);
diff --git a/lttng-context-uid.c b/lttng-context-uid.c
deleted file mode 100644 (file)
index c48bd0a..0000000
+++ /dev/null
@@ -1,76 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
- *
- * lttng-context-uid.c
- *
- * LTTng real user ID context.
- *
- * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *               2019 Michael Jeanson <mjeanson@efficios.com>
- *
- */
-
-#include <linux/module.h>
-#include <linux/sched.h>
-#include <lttng/events.h>
-#include <lttng/tracer.h>
-#include <ringbuffer/frontend_types.h>
-#include <wrapper/vmalloc.h>
-#include <wrapper/user_namespace.h>
-
-static
-size_t uid_get_size(size_t offset)
-{
-       size_t size = 0;
-
-       size += lib_ring_buffer_align(offset, lttng_alignof(uid_t));
-       size += sizeof(uid_t);
-       return size;
-}
-
-static
-void uid_record(struct lttng_ctx_field *field,
-                struct lib_ring_buffer_ctx *ctx,
-                struct lttng_channel *chan)
-{
-       uid_t uid;
-
-       uid = lttng_current_uid();
-       lib_ring_buffer_align_ctx(ctx, lttng_alignof(uid));
-       chan->ops->event_write(ctx, &uid, sizeof(uid));
-}
-
-static
-void uid_get_value(struct lttng_ctx_field *field,
-               struct lttng_probe_ctx *lttng_probe_ctx,
-               union lttng_ctx_value *value)
-{
-       value->s64 = lttng_current_uid();
-}
-
-int lttng_add_uid_to_ctx(struct lttng_ctx **ctx)
-{
-       struct lttng_ctx_field *field;
-
-       field = lttng_append_context(ctx);
-       if (!field)
-               return -ENOMEM;
-       if (lttng_find_context(*ctx, "uid")) {
-               lttng_remove_context_field(ctx, field);
-               return -EEXIST;
-       }
-       field->event_field.name = "uid";
-       field->event_field.type.atype = atype_integer;
-       field->event_field.type.u.integer.size = sizeof(uid_t) * CHAR_BIT;
-       field->event_field.type.u.integer.alignment = lttng_alignof(uid_t) * CHAR_BIT;
-       field->event_field.type.u.integer.signedness = lttng_is_signed_type(uid_t);
-       field->event_field.type.u.integer.reverse_byte_order = 0;
-       field->event_field.type.u.integer.base = 10;
-       field->event_field.type.u.integer.encoding = lttng_encode_none;
-       field->get_size = uid_get_size;
-       field->record = uid_record;
-       field->get_value = uid_get_value;
-       lttng_context_update(*ctx);
-       wrapper_vmalloc_sync_mappings();
-       return 0;
-}
-EXPORT_SYMBOL_GPL(lttng_add_uid_to_ctx);
diff --git a/lttng-context-user-ns.c b/lttng-context-user-ns.c
deleted file mode 100644 (file)
index b2c1189..0000000
+++ /dev/null
@@ -1,90 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
- *
- * lttng-context-user-ns.c
- *
- * LTTng user namespace context.
- *
- * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *               2019 Michael Jeanson <mjeanson@efficios.com>
- *
- */
-
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/sched.h>
-#include <linux/user_namespace.h>
-#include <lttng/events.h>
-#include <ringbuffer/frontend_types.h>
-#include <wrapper/vmalloc.h>
-#include <wrapper/namespace.h>
-#include <lttng/tracer.h>
-
-#if defined(CONFIG_USER_NS) && \
-       (LINUX_VERSION_CODE >= KERNEL_VERSION(3,8,0))
-
-static
-size_t user_ns_get_size(size_t offset)
-{
-       size_t size = 0;
-
-       size += lib_ring_buffer_align(offset, lttng_alignof(unsigned int));
-       size += sizeof(unsigned int);
-       return size;
-}
-
-static
-void user_ns_record(struct lttng_ctx_field *field,
-                struct lib_ring_buffer_ctx *ctx,
-                struct lttng_channel *chan)
-{
-       unsigned int user_ns_inum = 0;
-
-       if (current_user_ns())
-               user_ns_inum = current_user_ns()->lttng_ns_inum;
-
-       lib_ring_buffer_align_ctx(ctx, lttng_alignof(user_ns_inum));
-       chan->ops->event_write(ctx, &user_ns_inum, sizeof(user_ns_inum));
-}
-
-static
-void user_ns_get_value(struct lttng_ctx_field *field,
-               struct lttng_probe_ctx *lttng_probe_ctx,
-               union lttng_ctx_value *value)
-{
-       unsigned int user_ns_inum = 0;
-
-       if (current_user_ns())
-               user_ns_inum = current_user_ns()->lttng_ns_inum;
-
-       value->s64 = user_ns_inum;
-}
-
-int lttng_add_user_ns_to_ctx(struct lttng_ctx **ctx)
-{
-       struct lttng_ctx_field *field;
-
-       field = lttng_append_context(ctx);
-       if (!field)
-               return -ENOMEM;
-       if (lttng_find_context(*ctx, "user_ns")) {
-               lttng_remove_context_field(ctx, field);
-               return -EEXIST;
-       }
-       field->event_field.name = "user_ns";
-       field->event_field.type.atype = atype_integer;
-       field->event_field.type.u.integer.size = sizeof(unsigned int) * CHAR_BIT;
-       field->event_field.type.u.integer.alignment = lttng_alignof(unsigned int) * CHAR_BIT;
-       field->event_field.type.u.integer.signedness = lttng_is_signed_type(unsigned int);
-       field->event_field.type.u.integer.reverse_byte_order = 0;
-       field->event_field.type.u.integer.base = 10;
-       field->event_field.type.u.integer.encoding = lttng_encode_none;
-       field->get_size = user_ns_get_size;
-       field->record = user_ns_record;
-       field->get_value = user_ns_get_value;
-       lttng_context_update(*ctx);
-       wrapper_vmalloc_sync_mappings();
-       return 0;
-}
-EXPORT_SYMBOL_GPL(lttng_add_user_ns_to_ctx);
-
-#endif
diff --git a/lttng-context-uts-ns.c b/lttng-context-uts-ns.c
deleted file mode 100644 (file)
index b4284a5..0000000
+++ /dev/null
@@ -1,104 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
- *
- * lttng-context-uts-ns.c
- *
- * LTTng uts namespace context.
- *
- * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *               2019 Michael Jeanson <mjeanson@efficios.com>
- *
- */
-
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/sched.h>
-#include <linux/utsname.h>
-#include <lttng/events.h>
-#include <ringbuffer/frontend_types.h>
-#include <wrapper/vmalloc.h>
-#include <wrapper/namespace.h>
-#include <lttng/tracer.h>
-
-#if defined(CONFIG_UTS_NS) && \
-       (LINUX_VERSION_CODE >= KERNEL_VERSION(3,8,0))
-
-static
-size_t uts_ns_get_size(size_t offset)
-{
-       size_t size = 0;
-
-       size += lib_ring_buffer_align(offset, lttng_alignof(unsigned int));
-       size += sizeof(unsigned int);
-       return size;
-}
-
-static
-void uts_ns_record(struct lttng_ctx_field *field,
-                struct lib_ring_buffer_ctx *ctx,
-                struct lttng_channel *chan)
-{
-       unsigned int uts_ns_inum = 0;
-
-       /*
-        * nsproxy can be NULL when scheduled out of exit.
-        *
-        * As documented in 'linux/nsproxy.h' namespaces access rules, no
-        * precautions should be taken when accessing the current task's
-        * namespaces, just dereference the pointers.
-        */
-       if (current->nsproxy)
-               uts_ns_inum = current->nsproxy->uts_ns->lttng_ns_inum;
-
-       lib_ring_buffer_align_ctx(ctx, lttng_alignof(uts_ns_inum));
-       chan->ops->event_write(ctx, &uts_ns_inum, sizeof(uts_ns_inum));
-}
-
-static
-void uts_ns_get_value(struct lttng_ctx_field *field,
-               struct lttng_probe_ctx *lttng_probe_ctx,
-               union lttng_ctx_value *value)
-{
-       unsigned int uts_ns_inum = 0;
-
-       /*
-        * nsproxy can be NULL when scheduled out of exit.
-        *
-        * As documented in 'linux/nsproxy.h' namespaces access rules, no
-        * precautions should be taken when accessing the current task's
-        * namespaces, just dereference the pointers.
-        */
-       if (current->nsproxy)
-               uts_ns_inum = current->nsproxy->uts_ns->lttng_ns_inum;
-
-       value->s64 = uts_ns_inum;
-}
-
-int lttng_add_uts_ns_to_ctx(struct lttng_ctx **ctx)
-{
-       struct lttng_ctx_field *field;
-
-       field = lttng_append_context(ctx);
-       if (!field)
-               return -ENOMEM;
-       if (lttng_find_context(*ctx, "uts_ns")) {
-               lttng_remove_context_field(ctx, field);
-               return -EEXIST;
-       }
-       field->event_field.name = "uts_ns";
-       field->event_field.type.atype = atype_integer;
-       field->event_field.type.u.integer.size = sizeof(unsigned int) * CHAR_BIT;
-       field->event_field.type.u.integer.alignment = lttng_alignof(unsigned int) * CHAR_BIT;
-       field->event_field.type.u.integer.signedness = lttng_is_signed_type(unsigned int);
-       field->event_field.type.u.integer.reverse_byte_order = 0;
-       field->event_field.type.u.integer.base = 10;
-       field->event_field.type.u.integer.encoding = lttng_encode_none;
-       field->get_size = uts_ns_get_size;
-       field->record = uts_ns_record;
-       field->get_value = uts_ns_get_value;
-       lttng_context_update(*ctx);
-       wrapper_vmalloc_sync_mappings();
-       return 0;
-}
-EXPORT_SYMBOL_GPL(lttng_add_uts_ns_to_ctx);
-
-#endif
diff --git a/lttng-context-vegid.c b/lttng-context-vegid.c
deleted file mode 100644 (file)
index 6207e61..0000000
+++ /dev/null
@@ -1,76 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
- *
- * lttng-context-vegid.c
- *
- * LTTng namespaced effective group ID context.
- *
- * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *               2019 Michael Jeanson <mjeanson@efficios.com>
- *
- */
-
-#include <linux/module.h>
-#include <linux/sched.h>
-#include <lttng/events.h>
-#include <lttng/tracer.h>
-#include <ringbuffer/frontend_types.h>
-#include <wrapper/vmalloc.h>
-#include <wrapper/user_namespace.h>
-
-static
-size_t vegid_get_size(size_t offset)
-{
-       size_t size = 0;
-
-       size += lib_ring_buffer_align(offset, lttng_alignof(gid_t));
-       size += sizeof(gid_t);
-       return size;
-}
-
-static
-void vegid_record(struct lttng_ctx_field *field,
-                struct lib_ring_buffer_ctx *ctx,
-                struct lttng_channel *chan)
-{
-       gid_t vegid;
-
-       vegid = lttng_current_vegid();
-       lib_ring_buffer_align_ctx(ctx, lttng_alignof(vegid));
-       chan->ops->event_write(ctx, &vegid, sizeof(vegid));
-}
-
-static
-void vegid_get_value(struct lttng_ctx_field *field,
-               struct lttng_probe_ctx *lttng_probe_ctx,
-               union lttng_ctx_value *value)
-{
-       value->s64 = lttng_current_vegid();
-}
-
-int lttng_add_vegid_to_ctx(struct lttng_ctx **ctx)
-{
-       struct lttng_ctx_field *field;
-
-       field = lttng_append_context(ctx);
-       if (!field)
-               return -ENOMEM;
-       if (lttng_find_context(*ctx, "vegid")) {
-               lttng_remove_context_field(ctx, field);
-               return -EEXIST;
-       }
-       field->event_field.name = "vegid";
-       field->event_field.type.atype = atype_integer;
-       field->event_field.type.u.integer.size = sizeof(gid_t) * CHAR_BIT;
-       field->event_field.type.u.integer.alignment = lttng_alignof(gid_t) * CHAR_BIT;
-       field->event_field.type.u.integer.signedness = lttng_is_signed_type(gid_t);
-       field->event_field.type.u.integer.reverse_byte_order = 0;
-       field->event_field.type.u.integer.base = 10;
-       field->event_field.type.u.integer.encoding = lttng_encode_none;
-       field->get_size = vegid_get_size;
-       field->record = vegid_record;
-       field->get_value = vegid_get_value;
-       lttng_context_update(*ctx);
-       wrapper_vmalloc_sync_mappings();
-       return 0;
-}
-EXPORT_SYMBOL_GPL(lttng_add_vegid_to_ctx);
diff --git a/lttng-context-veuid.c b/lttng-context-veuid.c
deleted file mode 100644 (file)
index a249820..0000000
+++ /dev/null
@@ -1,76 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
- *
- * lttng-context-veuid.c
- *
- * LTTng namespaced effective user ID context.
- *
- * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *               2019 Michael Jeanson <mjeanson@efficios.com>
- *
- */
-
-#include <linux/module.h>
-#include <linux/sched.h>
-#include <lttng/events.h>
-#include <lttng/tracer.h>
-#include <ringbuffer/frontend_types.h>
-#include <wrapper/vmalloc.h>
-#include <wrapper/user_namespace.h>
-
-static
-size_t veuid_get_size(size_t offset)
-{
-       size_t size = 0;
-
-       size += lib_ring_buffer_align(offset, lttng_alignof(uid_t));
-       size += sizeof(uid_t);
-       return size;
-}
-
-static
-void veuid_record(struct lttng_ctx_field *field,
-                struct lib_ring_buffer_ctx *ctx,
-                struct lttng_channel *chan)
-{
-       uid_t veuid;
-
-       veuid = lttng_current_veuid();
-       lib_ring_buffer_align_ctx(ctx, lttng_alignof(veuid));
-       chan->ops->event_write(ctx, &veuid, sizeof(veuid));
-}
-
-static
-void veuid_get_value(struct lttng_ctx_field *field,
-               struct lttng_probe_ctx *lttng_probe_ctx,
-               union lttng_ctx_value *value)
-{
-       value->s64 = lttng_current_veuid();
-}
-
-int lttng_add_veuid_to_ctx(struct lttng_ctx **ctx)
-{
-       struct lttng_ctx_field *field;
-
-       field = lttng_append_context(ctx);
-       if (!field)
-               return -ENOMEM;
-       if (lttng_find_context(*ctx, "veuid")) {
-               lttng_remove_context_field(ctx, field);
-               return -EEXIST;
-       }
-       field->event_field.name = "veuid";
-       field->event_field.type.atype = atype_integer;
-       field->event_field.type.u.integer.size = sizeof(uid_t) * CHAR_BIT;
-       field->event_field.type.u.integer.alignment = lttng_alignof(uid_t) * CHAR_BIT;
-       field->event_field.type.u.integer.signedness = lttng_is_signed_type(uid_t);
-       field->event_field.type.u.integer.reverse_byte_order = 0;
-       field->event_field.type.u.integer.base = 10;
-       field->event_field.type.u.integer.encoding = lttng_encode_none;
-       field->get_size = veuid_get_size;
-       field->record = veuid_record;
-       field->get_value = veuid_get_value;
-       lttng_context_update(*ctx);
-       wrapper_vmalloc_sync_mappings();
-       return 0;
-}
-EXPORT_SYMBOL_GPL(lttng_add_veuid_to_ctx);
diff --git a/lttng-context-vgid.c b/lttng-context-vgid.c
deleted file mode 100644 (file)
index a833915..0000000
+++ /dev/null
@@ -1,76 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
- *
- * lttng-context-vgid.c
- *
- * LTTng namespaced real group ID context.
- *
- * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *               2019 Michael Jeanson <mjeanson@efficios.com>
- *
- */
-
-#include <linux/module.h>
-#include <linux/sched.h>
-#include <lttng/events.h>
-#include <lttng/tracer.h>
-#include <ringbuffer/frontend_types.h>
-#include <wrapper/vmalloc.h>
-#include <wrapper/user_namespace.h>
-
-static
-size_t vgid_get_size(size_t offset)
-{
-       size_t size = 0;
-
-       size += lib_ring_buffer_align(offset, lttng_alignof(gid_t));
-       size += sizeof(gid_t);
-       return size;
-}
-
-static
-void vgid_record(struct lttng_ctx_field *field,
-                struct lib_ring_buffer_ctx *ctx,
-                struct lttng_channel *chan)
-{
-       gid_t vgid;
-
-       vgid = lttng_current_vgid();
-       lib_ring_buffer_align_ctx(ctx, lttng_alignof(vgid));
-       chan->ops->event_write(ctx, &vgid, sizeof(vgid));
-}
-
-static
-void vgid_get_value(struct lttng_ctx_field *field,
-               struct lttng_probe_ctx *lttng_probe_ctx,
-               union lttng_ctx_value *value)
-{
-       value->s64 = lttng_current_vgid();
-}
-
-int lttng_add_vgid_to_ctx(struct lttng_ctx **ctx)
-{
-       struct lttng_ctx_field *field;
-
-       field = lttng_append_context(ctx);
-       if (!field)
-               return -ENOMEM;
-       if (lttng_find_context(*ctx, "vgid")) {
-               lttng_remove_context_field(ctx, field);
-               return -EEXIST;
-       }
-       field->event_field.name = "vgid";
-       field->event_field.type.atype = atype_integer;
-       field->event_field.type.u.integer.size = sizeof(gid_t) * CHAR_BIT;
-       field->event_field.type.u.integer.alignment = lttng_alignof(gid_t) * CHAR_BIT;
-       field->event_field.type.u.integer.signedness = lttng_is_signed_type(gid_t);
-       field->event_field.type.u.integer.reverse_byte_order = 0;
-       field->event_field.type.u.integer.base = 10;
-       field->event_field.type.u.integer.encoding = lttng_encode_none;
-       field->get_size = vgid_get_size;
-       field->record = vgid_record;
-       field->get_value = vgid_get_value;
-       lttng_context_update(*ctx);
-       wrapper_vmalloc_sync_mappings();
-       return 0;
-}
-EXPORT_SYMBOL_GPL(lttng_add_vgid_to_ctx);
diff --git a/lttng-context-vpid.c b/lttng-context-vpid.c
deleted file mode 100644 (file)
index 28178b9..0000000
+++ /dev/null
@@ -1,89 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
- *
- * lttng-context-vpid.c
- *
- * LTTng vPID context.
- *
- * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/sched.h>
-#include <lttng/events.h>
-#include <ringbuffer/frontend_types.h>
-#include <wrapper/vmalloc.h>
-#include <lttng/tracer.h>
-
-static
-size_t vpid_get_size(size_t offset)
-{
-       size_t size = 0;
-
-       size += lib_ring_buffer_align(offset, lttng_alignof(pid_t));
-       size += sizeof(pid_t);
-       return size;
-}
-
-static
-void vpid_record(struct lttng_ctx_field *field,
-                struct lib_ring_buffer_ctx *ctx,
-                struct lttng_channel *chan)
-{
-       pid_t vpid;
-
-       /*
-        * nsproxy can be NULL when scheduled out of exit.
-        */
-       if (!current->nsproxy)
-               vpid = 0;
-       else
-               vpid = task_tgid_vnr(current);
-       lib_ring_buffer_align_ctx(ctx, lttng_alignof(vpid));
-       chan->ops->event_write(ctx, &vpid, sizeof(vpid));
-}
-
-static
-void vpid_get_value(struct lttng_ctx_field *field,
-               struct lttng_probe_ctx *lttng_probe_ctx,
-               union lttng_ctx_value *value)
-{
-       pid_t vpid;
-
-       /*
-        * nsproxy can be NULL when scheduled out of exit.
-        */
-       if (!current->nsproxy)
-               vpid = 0;
-       else
-               vpid = task_tgid_vnr(current);
-       value->s64 = vpid;
-}
-
-int lttng_add_vpid_to_ctx(struct lttng_ctx **ctx)
-{
-       struct lttng_ctx_field *field;
-
-       field = lttng_append_context(ctx);
-       if (!field)
-               return -ENOMEM;
-       if (lttng_find_context(*ctx, "vpid")) {
-               lttng_remove_context_field(ctx, field);
-               return -EEXIST;
-       }
-       field->event_field.name = "vpid";
-       field->event_field.type.atype = atype_integer;
-       field->event_field.type.u.integer.size = sizeof(pid_t) * CHAR_BIT;
-       field->event_field.type.u.integer.alignment = lttng_alignof(pid_t) * CHAR_BIT;
-       field->event_field.type.u.integer.signedness = lttng_is_signed_type(pid_t);
-       field->event_field.type.u.integer.reverse_byte_order = 0;
-       field->event_field.type.u.integer.base = 10;
-       field->event_field.type.u.integer.encoding = lttng_encode_none;
-       field->get_size = vpid_get_size;
-       field->record = vpid_record;
-       field->get_value = vpid_get_value;
-       lttng_context_update(*ctx);
-       wrapper_vmalloc_sync_mappings();
-       return 0;
-}
-EXPORT_SYMBOL_GPL(lttng_add_vpid_to_ctx);
diff --git a/lttng-context-vppid.c b/lttng-context-vppid.c
deleted file mode 100644 (file)
index 8757eb2..0000000
+++ /dev/null
@@ -1,118 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
- *
- * lttng-context-vppid.c
- *
- * LTTng vPPID context.
- *
- * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/sched.h>
-#include <linux/syscalls.h>
-#include <lttng/events.h>
-#include <ringbuffer/frontend_types.h>
-#include <wrapper/vmalloc.h>
-#include <lttng/tracer.h>
-
-static
-size_t vppid_get_size(size_t offset)
-{
-       size_t size = 0;
-
-       size += lib_ring_buffer_align(offset, lttng_alignof(pid_t));
-       size += sizeof(pid_t);
-       return size;
-}
-
-static
-void vppid_record(struct lttng_ctx_field *field,
-                 struct lib_ring_buffer_ctx *ctx,
-                 struct lttng_channel *chan)
-{
-       struct task_struct *parent;
-       pid_t vppid;
-
-       /*
-        * current nsproxy can be NULL when scheduled out of exit. pid_vnr uses
-        * the current thread nsproxy to perform the lookup.
-        */
-
-       /*
-        * TODO: when we eventually add RCU subsystem instrumentation,
-        * taking the rcu read lock here will trigger RCU tracing
-        * recursively. We should modify the kernel synchronization so
-        * it synchronizes both for RCU and RCU sched, and rely on
-        * rcu_read_lock_sched_notrace.
-        */
-
-       rcu_read_lock();
-       parent = rcu_dereference(current->real_parent);
-       if (!current->nsproxy)
-               vppid = 0;
-       else
-               vppid = task_tgid_vnr(parent);
-       rcu_read_unlock();
-       lib_ring_buffer_align_ctx(ctx, lttng_alignof(vppid));
-       chan->ops->event_write(ctx, &vppid, sizeof(vppid));
-}
-
-static
-void vppid_get_value(struct lttng_ctx_field *field,
-               struct lttng_probe_ctx *lttng_probe_ctx,
-               union lttng_ctx_value *value)
-{
-       struct task_struct *parent;
-       pid_t vppid;
-
-       /*
-        * current nsproxy can be NULL when scheduled out of exit. pid_vnr uses
-        * the current thread nsproxy to perform the lookup.
-        */
-
-       /*
-        * TODO: when we eventually add RCU subsystem instrumentation,
-        * taking the rcu read lock here will trigger RCU tracing
-        * recursively. We should modify the kernel synchronization so
-        * it synchronizes both for RCU and RCU sched, and rely on
-        * rcu_read_lock_sched_notrace.
-        */
-
-       rcu_read_lock();
-       parent = rcu_dereference(current->real_parent);
-       if (!current->nsproxy)
-               vppid = 0;
-       else
-               vppid = task_tgid_vnr(parent);
-       rcu_read_unlock();
-       value->s64 = vppid;
-}
-
-int lttng_add_vppid_to_ctx(struct lttng_ctx **ctx)
-{
-       struct lttng_ctx_field *field;
-
-       field = lttng_append_context(ctx);
-       if (!field)
-               return -ENOMEM;
-       if (lttng_find_context(*ctx, "vppid")) {
-               lttng_remove_context_field(ctx, field);
-               return -EEXIST;
-       }
-       field->event_field.name = "vppid";
-       field->event_field.type.atype = atype_integer;
-       field->event_field.type.u.integer.size = sizeof(pid_t) * CHAR_BIT;
-       field->event_field.type.u.integer.alignment = lttng_alignof(pid_t) * CHAR_BIT;
-       field->event_field.type.u.integer.signedness = lttng_is_signed_type(pid_t);
-       field->event_field.type.u.integer.reverse_byte_order = 0;
-       field->event_field.type.u.integer.base = 10;
-       field->event_field.type.u.integer.encoding = lttng_encode_none;
-       field->get_size = vppid_get_size;
-       field->record = vppid_record;
-       field->get_value = vppid_get_value;
-       lttng_context_update(*ctx);
-       wrapper_vmalloc_sync_mappings();
-       return 0;
-}
-EXPORT_SYMBOL_GPL(lttng_add_vppid_to_ctx);
diff --git a/lttng-context-vsgid.c b/lttng-context-vsgid.c
deleted file mode 100644 (file)
index c6a6ea7..0000000
+++ /dev/null
@@ -1,76 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
- *
- * lttng-context-vsgid.c
- *
- * LTTng namespaced saved set-group-ID context.
- *
- * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *               2019 Michael Jeanson <mjeanson@efficios.com>
- *
- */
-
-#include <linux/module.h>
-#include <linux/sched.h>
-#include <lttng/events.h>
-#include <lttng/tracer.h>
-#include <ringbuffer/frontend_types.h>
-#include <wrapper/vmalloc.h>
-#include <wrapper/user_namespace.h>
-
-static
-size_t vsgid_get_size(size_t offset)
-{
-       size_t size = 0;
-
-       size += lib_ring_buffer_align(offset, lttng_alignof(gid_t));
-       size += sizeof(gid_t);
-       return size;
-}
-
-static
-void vsgid_record(struct lttng_ctx_field *field,
-                struct lib_ring_buffer_ctx *ctx,
-                struct lttng_channel *chan)
-{
-       gid_t vsgid;
-
-       vsgid = lttng_current_vsgid();
-       lib_ring_buffer_align_ctx(ctx, lttng_alignof(vsgid));
-       chan->ops->event_write(ctx, &vsgid, sizeof(vsgid));
-}
-
-static
-void vsgid_get_value(struct lttng_ctx_field *field,
-               struct lttng_probe_ctx *lttng_probe_ctx,
-               union lttng_ctx_value *value)
-{
-       value->s64 = lttng_current_vsgid();
-}
-
-int lttng_add_vsgid_to_ctx(struct lttng_ctx **ctx)
-{
-       struct lttng_ctx_field *field;
-
-       field = lttng_append_context(ctx);
-       if (!field)
-               return -ENOMEM;
-       if (lttng_find_context(*ctx, "vsgid")) {
-               lttng_remove_context_field(ctx, field);
-               return -EEXIST;
-       }
-       field->event_field.name = "vsgid";
-       field->event_field.type.atype = atype_integer;
-       field->event_field.type.u.integer.size = sizeof(gid_t) * CHAR_BIT;
-       field->event_field.type.u.integer.alignment = lttng_alignof(gid_t) * CHAR_BIT;
-       field->event_field.type.u.integer.signedness = lttng_is_signed_type(gid_t);
-       field->event_field.type.u.integer.reverse_byte_order = 0;
-       field->event_field.type.u.integer.base = 10;
-       field->event_field.type.u.integer.encoding = lttng_encode_none;
-       field->get_size = vsgid_get_size;
-       field->record = vsgid_record;
-       field->get_value = vsgid_get_value;
-       lttng_context_update(*ctx);
-       wrapper_vmalloc_sync_mappings();
-       return 0;
-}
-EXPORT_SYMBOL_GPL(lttng_add_vsgid_to_ctx);
diff --git a/lttng-context-vsuid.c b/lttng-context-vsuid.c
deleted file mode 100644 (file)
index c22d430..0000000
+++ /dev/null
@@ -1,76 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
- *
- * lttng-context-vsuid.c
- *
- * LTTng namespaced saved set-user-ID context.
- *
- * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *               2019 Michael Jeanson <mjeanson@efficios.com>
- *
- */
-
-#include <linux/module.h>
-#include <linux/sched.h>
-#include <lttng/events.h>
-#include <lttng/tracer.h>
-#include <ringbuffer/frontend_types.h>
-#include <wrapper/vmalloc.h>
-#include <wrapper/user_namespace.h>
-
-static
-size_t vsuid_get_size(size_t offset)
-{
-       size_t size = 0;
-
-       size += lib_ring_buffer_align(offset, lttng_alignof(uid_t));
-       size += sizeof(uid_t);
-       return size;
-}
-
-static
-void vsuid_record(struct lttng_ctx_field *field,
-                struct lib_ring_buffer_ctx *ctx,
-                struct lttng_channel *chan)
-{
-       uid_t vsuid;
-
-       vsuid = lttng_current_vsuid();
-       lib_ring_buffer_align_ctx(ctx, lttng_alignof(vsuid));
-       chan->ops->event_write(ctx, &vsuid, sizeof(vsuid));
-}
-
-static
-void vsuid_get_value(struct lttng_ctx_field *field,
-               struct lttng_probe_ctx *lttng_probe_ctx,
-               union lttng_ctx_value *value)
-{
-       value->s64 = lttng_current_vsuid();
-}
-
-int lttng_add_vsuid_to_ctx(struct lttng_ctx **ctx)
-{
-       struct lttng_ctx_field *field;
-
-       field = lttng_append_context(ctx);
-       if (!field)
-               return -ENOMEM;
-       if (lttng_find_context(*ctx, "vsuid")) {
-               lttng_remove_context_field(ctx, field);
-               return -EEXIST;
-       }
-       field->event_field.name = "vsuid";
-       field->event_field.type.atype = atype_integer;
-       field->event_field.type.u.integer.size = sizeof(uid_t) * CHAR_BIT;
-       field->event_field.type.u.integer.alignment = lttng_alignof(uid_t) * CHAR_BIT;
-       field->event_field.type.u.integer.signedness = lttng_is_signed_type(uid_t);
-       field->event_field.type.u.integer.reverse_byte_order = 0;
-       field->event_field.type.u.integer.base = 10;
-       field->event_field.type.u.integer.encoding = lttng_encode_none;
-       field->get_size = vsuid_get_size;
-       field->record = vsuid_record;
-       field->get_value = vsuid_get_value;
-       lttng_context_update(*ctx);
-       wrapper_vmalloc_sync_mappings();
-       return 0;
-}
-EXPORT_SYMBOL_GPL(lttng_add_vsuid_to_ctx);
diff --git a/lttng-context-vtid.c b/lttng-context-vtid.c
deleted file mode 100644 (file)
index 3b0cadc..0000000
+++ /dev/null
@@ -1,89 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
- *
- * lttng-context-vtid.c
- *
- * LTTng vTID context.
- *
- * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/sched.h>
-#include <lttng/events.h>
-#include <ringbuffer/frontend_types.h>
-#include <wrapper/vmalloc.h>
-#include <lttng/tracer.h>
-
-static
-size_t vtid_get_size(size_t offset)
-{
-       size_t size = 0;
-
-       size += lib_ring_buffer_align(offset, lttng_alignof(pid_t));
-       size += sizeof(pid_t);
-       return size;
-}
-
-static
-void vtid_record(struct lttng_ctx_field *field,
-                struct lib_ring_buffer_ctx *ctx,
-                struct lttng_channel *chan)
-{
-       pid_t vtid;
-
-       /*
-        * nsproxy can be NULL when scheduled out of exit.
-        */
-       if (!current->nsproxy)
-               vtid = 0;
-       else
-               vtid = task_pid_vnr(current);
-       lib_ring_buffer_align_ctx(ctx, lttng_alignof(vtid));
-       chan->ops->event_write(ctx, &vtid, sizeof(vtid));
-}
-
-static
-void vtid_get_value(struct lttng_ctx_field *field,
-               struct lttng_probe_ctx *lttng_probe_ctx,
-               union lttng_ctx_value *value)
-{
-       pid_t vtid;
-
-       /*
-        * nsproxy can be NULL when scheduled out of exit.
-        */
-       if (!current->nsproxy)
-               vtid = 0;
-       else
-               vtid = task_pid_vnr(current);
-       value->s64 = vtid;
-}
-
-int lttng_add_vtid_to_ctx(struct lttng_ctx **ctx)
-{
-       struct lttng_ctx_field *field;
-
-       field = lttng_append_context(ctx);
-       if (!field)
-               return -ENOMEM;
-       if (lttng_find_context(*ctx, "vtid")) {
-               lttng_remove_context_field(ctx, field);
-               return -EEXIST;
-       }
-       field->event_field.name = "vtid";
-       field->event_field.type.atype = atype_integer;
-       field->event_field.type.u.integer.size = sizeof(pid_t) * CHAR_BIT;
-       field->event_field.type.u.integer.alignment = lttng_alignof(pid_t) * CHAR_BIT;
-       field->event_field.type.u.integer.signedness = lttng_is_signed_type(pid_t);
-       field->event_field.type.u.integer.reverse_byte_order = 0;
-       field->event_field.type.u.integer.base = 10;
-       field->event_field.type.u.integer.encoding = lttng_encode_none;
-       field->get_size = vtid_get_size;
-       field->record = vtid_record;
-       field->get_value = vtid_get_value;
-       lttng_context_update(*ctx);
-       wrapper_vmalloc_sync_mappings();
-       return 0;
-}
-EXPORT_SYMBOL_GPL(lttng_add_vtid_to_ctx);
diff --git a/lttng-context-vuid.c b/lttng-context-vuid.c
deleted file mode 100644 (file)
index e83f898..0000000
+++ /dev/null
@@ -1,76 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
- *
- * lttng-context-vuid.c
- *
- * LTTng namespaced real user ID context.
- *
- * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *               2019 Michael Jeanson <mjeanson@efficios.com>
- *
- */
-
-#include <linux/module.h>
-#include <linux/sched.h>
-#include <lttng/events.h>
-#include <lttng/tracer.h>
-#include <ringbuffer/frontend_types.h>
-#include <wrapper/vmalloc.h>
-#include <wrapper/user_namespace.h>
-
-static
-size_t vuid_get_size(size_t offset)
-{
-       size_t size = 0;
-
-       size += lib_ring_buffer_align(offset, lttng_alignof(uid_t));
-       size += sizeof(uid_t);
-       return size;
-}
-
-static
-void vuid_record(struct lttng_ctx_field *field,
-                struct lib_ring_buffer_ctx *ctx,
-                struct lttng_channel *chan)
-{
-       uid_t vuid;
-
-       vuid = lttng_current_vuid();
-       lib_ring_buffer_align_ctx(ctx, lttng_alignof(vuid));
-       chan->ops->event_write(ctx, &vuid, sizeof(vuid));
-}
-
-static
-void vuid_get_value(struct lttng_ctx_field *field,
-               struct lttng_probe_ctx *lttng_probe_ctx,
-               union lttng_ctx_value *value)
-{
-       value->s64 = lttng_current_vuid();
-}
-
-int lttng_add_vuid_to_ctx(struct lttng_ctx **ctx)
-{
-       struct lttng_ctx_field *field;
-
-       field = lttng_append_context(ctx);
-       if (!field)
-               return -ENOMEM;
-       if (lttng_find_context(*ctx, "vuid")) {
-               lttng_remove_context_field(ctx, field);
-               return -EEXIST;
-       }
-       field->event_field.name = "vuid";
-       field->event_field.type.atype = atype_integer;
-       field->event_field.type.u.integer.size = sizeof(uid_t) * CHAR_BIT;
-       field->event_field.type.u.integer.alignment = lttng_alignof(uid_t) * CHAR_BIT;
-       field->event_field.type.u.integer.signedness = lttng_is_signed_type(uid_t);
-       field->event_field.type.u.integer.reverse_byte_order = 0;
-       field->event_field.type.u.integer.base = 10;
-       field->event_field.type.u.integer.encoding = lttng_encode_none;
-       field->get_size = vuid_get_size;
-       field->record = vuid_record;
-       field->get_value = vuid_get_value;
-       lttng_context_update(*ctx);
-       wrapper_vmalloc_sync_mappings();
-       return 0;
-}
-EXPORT_SYMBOL_GPL(lttng_add_vuid_to_ctx);
diff --git a/lttng-context.c b/lttng-context.c
deleted file mode 100644 (file)
index eb5e5d1..0000000
+++ /dev/null
@@ -1,312 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
- *
- * lttng-context.c
- *
- * LTTng trace/channel/event context management.
- *
- * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#include <linux/module.h>
-#include <linux/list.h>
-#include <linux/mutex.h>
-#include <linux/slab.h>
-#include <wrapper/vmalloc.h>   /* for wrapper_vmalloc_sync_mappings() */
-#include <lttng/events.h>
-#include <lttng/tracer.h>
-
-/*
- * The filter implementation requires that two consecutive "get" for the
- * same context performed by the same thread return the same result.
- */
-
-/*
- * Static array of contexts, for $ctx filters.
- */
-struct lttng_ctx *lttng_static_ctx;
-
-int lttng_find_context(struct lttng_ctx *ctx, const char *name)
-{
-       unsigned int i;
-
-       for (i = 0; i < ctx->nr_fields; i++) {
-               /* Skip allocated (but non-initialized) contexts */
-               if (!ctx->fields[i].event_field.name)
-                       continue;
-               if (!strcmp(ctx->fields[i].event_field.name, name))
-                       return 1;
-       }
-       return 0;
-}
-EXPORT_SYMBOL_GPL(lttng_find_context);
-
-int lttng_get_context_index(struct lttng_ctx *ctx, const char *name)
-{
-       unsigned int i;
-       const char *subname;
-
-       if (!ctx)
-               return -1;
-       if (strncmp(name, "$ctx.", strlen("$ctx.")) == 0) {
-               subname = name + strlen("$ctx.");
-       } else {
-               subname = name;
-       }
-       for (i = 0; i < ctx->nr_fields; i++) {
-               /* Skip allocated (but non-initialized) contexts */
-               if (!ctx->fields[i].event_field.name)
-                       continue;
-               if (!strcmp(ctx->fields[i].event_field.name, subname))
-                       return i;
-       }
-       return -1;
-}
-EXPORT_SYMBOL_GPL(lttng_get_context_index);
-
-/*
- * Note: as we append context information, the pointer location may change.
- */
-struct lttng_ctx_field *lttng_append_context(struct lttng_ctx **ctx_p)
-{
-       struct lttng_ctx_field *field;
-       struct lttng_ctx *ctx;
-
-       if (!*ctx_p) {
-               *ctx_p = kzalloc(sizeof(struct lttng_ctx), GFP_KERNEL);
-               if (!*ctx_p)
-                       return NULL;
-               (*ctx_p)->largest_align = 1;
-       }
-       ctx = *ctx_p;
-       if (ctx->nr_fields + 1 > ctx->allocated_fields) {
-               struct lttng_ctx_field *new_fields;
-
-               ctx->allocated_fields = max_t(size_t, 1, 2 * ctx->allocated_fields);
-               new_fields = lttng_kvzalloc(ctx->allocated_fields * sizeof(struct lttng_ctx_field), GFP_KERNEL);
-               if (!new_fields)
-                       return NULL;
-               if (ctx->fields)
-                       memcpy(new_fields, ctx->fields, sizeof(*ctx->fields) * ctx->nr_fields);
-               lttng_kvfree(ctx->fields);
-               ctx->fields = new_fields;
-       }
-       field = &ctx->fields[ctx->nr_fields];
-       ctx->nr_fields++;
-       return field;
-}
-EXPORT_SYMBOL_GPL(lttng_append_context);
-
-/*
- * lttng_context_update() should be called at least once between context
- * modification and trace start.
- */
-void lttng_context_update(struct lttng_ctx *ctx)
-{
-       int i;
-       size_t largest_align = 8;       /* in bits */
-
-       for (i = 0; i < ctx->nr_fields; i++) {
-               struct lttng_type *type;
-               size_t field_align = 8;
-
-               type = &ctx->fields[i].event_field.type;
-               switch (type->atype) {
-               case atype_integer:
-                       field_align = type->u.integer.alignment;
-                       break;
-               case atype_array_nestable:
-               {
-                       const struct lttng_type *nested_type;
-
-                       nested_type = type->u.array_nestable.elem_type;
-                       switch (nested_type->atype) {
-                       case atype_integer:
-                               field_align = nested_type->u.integer.alignment;
-                               break;
-                       case atype_string:
-                               break;
-
-                       case atype_array_nestable:
-                       case atype_sequence_nestable:
-                       case atype_struct_nestable:
-                       case atype_variant_nestable:
-                       default:
-                               WARN_ON_ONCE(1);
-                               break;
-                       }
-                       field_align = max_t(size_t, field_align,
-                                       type->u.array_nestable.alignment);
-                       break;
-               }
-               case atype_sequence_nestable:
-               {
-                       const struct lttng_type *nested_type;
-
-                       nested_type = type->u.sequence_nestable.elem_type;
-                       switch (nested_type->atype) {
-                       case atype_integer:
-                               field_align = nested_type->u.integer.alignment;
-                               break;
-
-                       case atype_string:
-                               break;
-
-                       case atype_array_nestable:
-                       case atype_sequence_nestable:
-                       case atype_struct_nestable:
-                       case atype_variant_nestable:
-                       default:
-                               WARN_ON_ONCE(1);
-                               break;
-                       }
-                       field_align = max_t(size_t, field_align,
-                                       type->u.sequence_nestable.alignment);
-                       break;
-               }
-               case atype_string:
-                       break;
-
-               case atype_struct_nestable:
-               case atype_variant_nestable:
-                       break;
-
-               case atype_enum_nestable:
-               default:
-                       WARN_ON_ONCE(1);
-                       break;
-               }
-               largest_align = max_t(size_t, largest_align, field_align);
-       }
-       ctx->largest_align = largest_align >> 3;        /* bits to bytes */
-}
-
-/*
- * Remove last context field.
- */
-void lttng_remove_context_field(struct lttng_ctx **ctx_p,
-                               struct lttng_ctx_field *field)
-{
-       struct lttng_ctx *ctx;
-
-       ctx = *ctx_p;
-       ctx->nr_fields--;
-       WARN_ON_ONCE(&ctx->fields[ctx->nr_fields] != field);
-       memset(&ctx->fields[ctx->nr_fields], 0, sizeof(struct lttng_ctx_field));
-}
-EXPORT_SYMBOL_GPL(lttng_remove_context_field);
-
-void lttng_destroy_context(struct lttng_ctx *ctx)
-{
-       int i;
-
-       if (!ctx)
-               return;
-       for (i = 0; i < ctx->nr_fields; i++) {
-               if (ctx->fields[i].destroy)
-                       ctx->fields[i].destroy(&ctx->fields[i]);
-       }
-       lttng_kvfree(ctx->fields);
-       kfree(ctx);
-}
-
-int lttng_context_init(void)
-{
-       int ret;
-
-       ret = lttng_add_hostname_to_ctx(&lttng_static_ctx);
-       if (ret) {
-               printk(KERN_WARNING "Cannot add context lttng_add_hostname_to_ctx");
-       }
-       ret = lttng_add_nice_to_ctx(&lttng_static_ctx);
-       if (ret) {
-               printk(KERN_WARNING "Cannot add context lttng_add_nice_to_ctx");
-       }
-       ret = lttng_add_pid_to_ctx(&lttng_static_ctx);
-       if (ret) {
-               printk(KERN_WARNING "Cannot add context lttng_add_pid_to_ctx");
-       }
-       ret = lttng_add_ppid_to_ctx(&lttng_static_ctx);
-       if (ret) {
-               printk(KERN_WARNING "Cannot add context lttng_add_ppid_to_ctx");
-       }
-       ret = lttng_add_prio_to_ctx(&lttng_static_ctx);
-       if (ret) {
-               printk(KERN_WARNING "Cannot add context lttng_add_prio_to_ctx");
-       }
-       ret = lttng_add_procname_to_ctx(&lttng_static_ctx);
-       if (ret) {
-               printk(KERN_WARNING "Cannot add context lttng_add_procname_to_ctx");
-       }
-       ret = lttng_add_tid_to_ctx(&lttng_static_ctx);
-       if (ret) {
-               printk(KERN_WARNING "Cannot add context lttng_add_tid_to_ctx");
-       }
-       ret = lttng_add_vppid_to_ctx(&lttng_static_ctx);
-       if (ret) {
-               printk(KERN_WARNING "Cannot add context lttng_add_vppid_to_ctx");
-       }
-       ret = lttng_add_vtid_to_ctx(&lttng_static_ctx);
-       if (ret) {
-               printk(KERN_WARNING "Cannot add context lttng_add_vtid_to_ctx");
-       }
-       ret = lttng_add_vpid_to_ctx(&lttng_static_ctx);
-       if (ret) {
-               printk(KERN_WARNING "Cannot add context lttng_add_vpid_to_ctx");
-       }
-       ret = lttng_add_cpu_id_to_ctx(&lttng_static_ctx);
-       if (ret) {
-               printk(KERN_WARNING "Cannot add context lttng_add_cpu_id_to_ctx");
-       }
-       ret = lttng_add_interruptible_to_ctx(&lttng_static_ctx);
-       if (ret) {
-               printk(KERN_WARNING "Cannot add context lttng_add_interruptible_to_ctx");
-       }
-       ret = lttng_add_need_reschedule_to_ctx(&lttng_static_ctx);
-       if (ret) {
-               printk(KERN_WARNING "Cannot add context lttng_add_need_reschedule_to_ctx");
-       }
-       ret = lttng_add_preemptible_to_ctx(&lttng_static_ctx);
-       if (ret && ret != -ENOSYS) {
-               printk(KERN_WARNING "Cannot add context lttng_add_preemptible_to_ctx");
-       }
-       ret = lttng_add_migratable_to_ctx(&lttng_static_ctx);
-       if (ret && ret != -ENOSYS) {
-               printk(KERN_WARNING "Cannot add context lttng_add_migratable_to_ctx");
-       }
-       ret = lttng_add_cgroup_ns_to_ctx(&lttng_static_ctx);
-       if (ret && ret != -ENOSYS) {
-               printk(KERN_WARNING "Cannot add context lttng_add_cgroup_ns_to_ctx");
-       }
-       ret = lttng_add_ipc_ns_to_ctx(&lttng_static_ctx);
-       if (ret && ret != -ENOSYS) {
-               printk(KERN_WARNING "Cannot add context lttng_add_ipc_ns_to_ctx");
-       }
-       ret = lttng_add_mnt_ns_to_ctx(&lttng_static_ctx);
-       if (ret && ret != -ENOSYS) {
-               printk(KERN_WARNING "Cannot add context lttng_add_mnt_ns_to_ctx");
-       }
-       ret = lttng_add_net_ns_to_ctx(&lttng_static_ctx);
-       if (ret && ret != -ENOSYS) {
-               printk(KERN_WARNING "Cannot add context lttng_add_net_ns_to_ctx");
-       }
-       ret = lttng_add_pid_ns_to_ctx(&lttng_static_ctx);
-       if (ret && ret != -ENOSYS) {
-               printk(KERN_WARNING "Cannot add context lttng_add_pid_ns_to_ctx");
-       }
-       ret = lttng_add_user_ns_to_ctx(&lttng_static_ctx);
-       if (ret && ret != -ENOSYS) {
-               printk(KERN_WARNING "Cannot add context lttng_add_user_ns_to_ctx");
-       }
-       ret = lttng_add_uts_ns_to_ctx(&lttng_static_ctx);
-       if (ret && ret != -ENOSYS) {
-               printk(KERN_WARNING "Cannot add context lttng_add_uts_ns_to_ctx");
-       }
-       /* TODO: perf counters for filtering */
-       return 0;
-}
-
-void lttng_context_exit(void)
-{
-       lttng_destroy_context(lttng_static_ctx);
-       lttng_static_ctx = NULL;
-}
diff --git a/lttng-events.c b/lttng-events.c
deleted file mode 100644 (file)
index a853609..0000000
+++ /dev/null
@@ -1,3087 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
- *
- * lttng-events.c
- *
- * Holds LTTng per-session event registry.
- *
- * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-/*
- * This page_alloc.h wrapper needs to be included before gfpflags.h because it
- * overrides a function with a define.
- */
-#include "wrapper/page_alloc.h"
-
-#include <linux/module.h>
-#include <linux/mutex.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/jiffies.h>
-#include <linux/utsname.h>
-#include <linux/err.h>
-#include <linux/seq_file.h>
-#include <linux/file.h>
-#include <linux/anon_inodes.h>
-#include <wrapper/file.h>
-#include <linux/jhash.h>
-#include <linux/uaccess.h>
-#include <linux/vmalloc.h>
-#include <linux/dmi.h>
-
-#include <wrapper/uuid.h>
-#include <wrapper/vmalloc.h>   /* for wrapper_vmalloc_sync_mappings() */
-#include <wrapper/random.h>
-#include <wrapper/tracepoint.h>
-#include <wrapper/list.h>
-#include <wrapper/types.h>
-#include <lttng/kernel-version.h>
-#include <lttng/events.h>
-#include <lttng/tracer.h>
-#include <lttng/abi-old.h>
-#include <lttng/endian.h>
-#include <lttng/string-utils.h>
-#include <ringbuffer/backend.h>
-#include <ringbuffer/frontend.h>
-#include <wrapper/time.h>
-
-#define METADATA_CACHE_DEFAULT_SIZE 4096
-
-static LIST_HEAD(sessions);
-static LIST_HEAD(lttng_transport_list);
-/*
- * Protect the sessions and metadata caches.
- */
-static DEFINE_MUTEX(sessions_mutex);
-static struct kmem_cache *event_cache;
-
-static void lttng_session_lazy_sync_enablers(struct lttng_session *session);
-static void lttng_session_sync_enablers(struct lttng_session *session);
-static void lttng_enabler_destroy(struct lttng_enabler *enabler);
-
-static void _lttng_event_destroy(struct lttng_event *event);
-static void _lttng_channel_destroy(struct lttng_channel *chan);
-static int _lttng_event_unregister(struct lttng_event *event);
-static
-int _lttng_event_metadata_statedump(struct lttng_session *session,
-                                 struct lttng_channel *chan,
-                                 struct lttng_event *event);
-static
-int _lttng_session_metadata_statedump(struct lttng_session *session);
-static
-void _lttng_metadata_channel_hangup(struct lttng_metadata_stream *stream);
-static
-int _lttng_type_statedump(struct lttng_session *session,
-               const struct lttng_type *type,
-               size_t nesting);
-static
-int _lttng_field_statedump(struct lttng_session *session,
-               const struct lttng_event_field *field,
-               size_t nesting);
-
-void synchronize_trace(void)
-{
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,1,0))
-       synchronize_rcu();
-#else
-       synchronize_sched();
-#endif
-
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0))
-#ifdef CONFIG_PREEMPT_RT_FULL
-       synchronize_rcu();
-#endif
-#else /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0)) */
-#ifdef CONFIG_PREEMPT_RT
-       synchronize_rcu();
-#endif
-#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0)) */
-}
-
-void lttng_lock_sessions(void)
-{
-       mutex_lock(&sessions_mutex);
-}
-
-void lttng_unlock_sessions(void)
-{
-       mutex_unlock(&sessions_mutex);
-}
-
-/*
- * Called with sessions lock held.
- */
-int lttng_session_active(void)
-{
-       struct lttng_session *iter;
-
-       list_for_each_entry(iter, &sessions, list) {
-               if (iter->active)
-                       return 1;
-       }
-       return 0;
-}
-
-struct lttng_session *lttng_session_create(void)
-{
-       struct lttng_session *session;
-       struct lttng_metadata_cache *metadata_cache;
-       int i;
-
-       mutex_lock(&sessions_mutex);
-       session = lttng_kvzalloc(sizeof(struct lttng_session), GFP_KERNEL);
-       if (!session)
-               goto err;
-       INIT_LIST_HEAD(&session->chan);
-       INIT_LIST_HEAD(&session->events);
-       lttng_guid_gen(&session->uuid);
-
-       metadata_cache = kzalloc(sizeof(struct lttng_metadata_cache),
-                       GFP_KERNEL);
-       if (!metadata_cache)
-               goto err_free_session;
-       metadata_cache->data = vzalloc(METADATA_CACHE_DEFAULT_SIZE);
-       if (!metadata_cache->data)
-               goto err_free_cache;
-       metadata_cache->cache_alloc = METADATA_CACHE_DEFAULT_SIZE;
-       kref_init(&metadata_cache->refcount);
-       mutex_init(&metadata_cache->lock);
-       session->metadata_cache = metadata_cache;
-       INIT_LIST_HEAD(&metadata_cache->metadata_stream);
-       memcpy(&metadata_cache->uuid, &session->uuid,
-               sizeof(metadata_cache->uuid));
-       INIT_LIST_HEAD(&session->enablers_head);
-       for (i = 0; i < LTTNG_EVENT_HT_SIZE; i++)
-               INIT_HLIST_HEAD(&session->events_ht.table[i]);
-       list_add(&session->list, &sessions);
-       session->pid_tracker.session = session;
-       session->pid_tracker.tracker_type = TRACKER_PID;
-       session->vpid_tracker.session = session;
-       session->vpid_tracker.tracker_type = TRACKER_VPID;
-       session->uid_tracker.session = session;
-       session->uid_tracker.tracker_type = TRACKER_UID;
-       session->vuid_tracker.session = session;
-       session->vuid_tracker.tracker_type = TRACKER_VUID;
-       session->gid_tracker.session = session;
-       session->gid_tracker.tracker_type = TRACKER_GID;
-       session->vgid_tracker.session = session;
-       session->vgid_tracker.tracker_type = TRACKER_VGID;
-       mutex_unlock(&sessions_mutex);
-       return session;
-
-err_free_cache:
-       kfree(metadata_cache);
-err_free_session:
-       lttng_kvfree(session);
-err:
-       mutex_unlock(&sessions_mutex);
-       return NULL;
-}
-
-void metadata_cache_destroy(struct kref *kref)
-{
-       struct lttng_metadata_cache *cache =
-               container_of(kref, struct lttng_metadata_cache, refcount);
-       vfree(cache->data);
-       kfree(cache);
-}
-
-void lttng_session_destroy(struct lttng_session *session)
-{
-       struct lttng_channel *chan, *tmpchan;
-       struct lttng_event *event, *tmpevent;
-       struct lttng_metadata_stream *metadata_stream;
-       struct lttng_enabler *enabler, *tmpenabler;
-       int ret;
-
-       mutex_lock(&sessions_mutex);
-       WRITE_ONCE(session->active, 0);
-       list_for_each_entry(chan, &session->chan, list) {
-               ret = lttng_syscalls_unregister(chan);
-               WARN_ON(ret);
-       }
-       list_for_each_entry(event, &session->events, list) {
-               ret = _lttng_event_unregister(event);
-               WARN_ON(ret);
-       }
-       synchronize_trace();    /* Wait for in-flight events to complete */
-       list_for_each_entry_safe(enabler, tmpenabler,
-                       &session->enablers_head, node)
-               lttng_enabler_destroy(enabler);
-       list_for_each_entry_safe(event, tmpevent, &session->events, list)
-               _lttng_event_destroy(event);
-       list_for_each_entry_safe(chan, tmpchan, &session->chan, list) {
-               BUG_ON(chan->channel_type == METADATA_CHANNEL);
-               _lttng_channel_destroy(chan);
-       }
-       list_for_each_entry(metadata_stream, &session->metadata_cache->metadata_stream, list)
-               _lttng_metadata_channel_hangup(metadata_stream);
-       lttng_id_tracker_destroy(&session->pid_tracker, false);
-       lttng_id_tracker_destroy(&session->vpid_tracker, false);
-       lttng_id_tracker_destroy(&session->uid_tracker, false);
-       lttng_id_tracker_destroy(&session->vuid_tracker, false);
-       lttng_id_tracker_destroy(&session->gid_tracker, false);
-       lttng_id_tracker_destroy(&session->vgid_tracker, false);
-       kref_put(&session->metadata_cache->refcount, metadata_cache_destroy);
-       list_del(&session->list);
-       mutex_unlock(&sessions_mutex);
-       lttng_kvfree(session);
-}
-
-int lttng_session_statedump(struct lttng_session *session)
-{
-       int ret;
-
-       mutex_lock(&sessions_mutex);
-       ret = lttng_statedump_start(session);
-       mutex_unlock(&sessions_mutex);
-       return ret;
-}
-
-int lttng_session_enable(struct lttng_session *session)
-{
-       int ret = 0;
-       struct lttng_channel *chan;
-
-       mutex_lock(&sessions_mutex);
-       if (session->active) {
-               ret = -EBUSY;
-               goto end;
-       }
-
-       /* Set transient enabler state to "enabled" */
-       session->tstate = 1;
-
-       /* We need to sync enablers with session before activation. */
-       lttng_session_sync_enablers(session);
-
-       /*
-        * Snapshot the number of events per channel to know the type of header
-        * we need to use.
-        */
-       list_for_each_entry(chan, &session->chan, list) {
-               if (chan->header_type)
-                       continue;               /* don't change it if session stop/restart */
-               if (chan->free_event_id < 31)
-                       chan->header_type = 1;  /* compact */
-               else
-                       chan->header_type = 2;  /* large */
-       }
-
-       /* Clear each stream's quiescent state. */
-       list_for_each_entry(chan, &session->chan, list) {
-               if (chan->channel_type != METADATA_CHANNEL)
-                       lib_ring_buffer_clear_quiescent_channel(chan->chan);
-       }
-
-       WRITE_ONCE(session->active, 1);
-       WRITE_ONCE(session->been_active, 1);
-       ret = _lttng_session_metadata_statedump(session);
-       if (ret) {
-               WRITE_ONCE(session->active, 0);
-               goto end;
-       }
-       ret = lttng_statedump_start(session);
-       if (ret)
-               WRITE_ONCE(session->active, 0);
-end:
-       mutex_unlock(&sessions_mutex);
-       return ret;
-}
-
-int lttng_session_disable(struct lttng_session *session)
-{
-       int ret = 0;
-       struct lttng_channel *chan;
-
-       mutex_lock(&sessions_mutex);
-       if (!session->active) {
-               ret = -EBUSY;
-               goto end;
-       }
-       WRITE_ONCE(session->active, 0);
-
-       /* Set transient enabler state to "disabled" */
-       session->tstate = 0;
-       lttng_session_sync_enablers(session);
-
-       /* Set each stream's quiescent state. */
-       list_for_each_entry(chan, &session->chan, list) {
-               if (chan->channel_type != METADATA_CHANNEL)
-                       lib_ring_buffer_set_quiescent_channel(chan->chan);
-       }
-end:
-       mutex_unlock(&sessions_mutex);
-       return ret;
-}
-
-int lttng_session_metadata_regenerate(struct lttng_session *session)
-{
-       int ret = 0;
-       struct lttng_channel *chan;
-       struct lttng_event *event;
-       struct lttng_metadata_cache *cache = session->metadata_cache;
-       struct lttng_metadata_stream *stream;
-
-       mutex_lock(&sessions_mutex);
-       if (!session->active) {
-               ret = -EBUSY;
-               goto end;
-       }
-
-       mutex_lock(&cache->lock);
-       memset(cache->data, 0, cache->cache_alloc);
-       cache->metadata_written = 0;
-       cache->version++;
-       list_for_each_entry(stream, &session->metadata_cache->metadata_stream, list) {
-               stream->metadata_out = 0;
-               stream->metadata_in = 0;
-       }
-       mutex_unlock(&cache->lock);
-
-       session->metadata_dumped = 0;
-       list_for_each_entry(chan, &session->chan, list) {
-               chan->metadata_dumped = 0;
-       }
-
-       list_for_each_entry(event, &session->events, list) {
-               event->metadata_dumped = 0;
-       }
-
-       ret = _lttng_session_metadata_statedump(session);
-
-end:
-       mutex_unlock(&sessions_mutex);
-       return ret;
-}
-
-int lttng_channel_enable(struct lttng_channel *channel)
-{
-       int ret = 0;
-
-       mutex_lock(&sessions_mutex);
-       if (channel->channel_type == METADATA_CHANNEL) {
-               ret = -EPERM;
-               goto end;
-       }
-       if (channel->enabled) {
-               ret = -EEXIST;
-               goto end;
-       }
-       /* Set transient enabler state to "enabled" */
-       channel->tstate = 1;
-       lttng_session_sync_enablers(channel->session);
-       /* Set atomically the state to "enabled" */
-       WRITE_ONCE(channel->enabled, 1);
-end:
-       mutex_unlock(&sessions_mutex);
-       return ret;
-}
-
-int lttng_channel_disable(struct lttng_channel *channel)
-{
-       int ret = 0;
-
-       mutex_lock(&sessions_mutex);
-       if (channel->channel_type == METADATA_CHANNEL) {
-               ret = -EPERM;
-               goto end;
-       }
-       if (!channel->enabled) {
-               ret = -EEXIST;
-               goto end;
-       }
-       /* Set atomically the state to "disabled" */
-       WRITE_ONCE(channel->enabled, 0);
-       /* Set transient enabler state to "enabled" */
-       channel->tstate = 0;
-       lttng_session_sync_enablers(channel->session);
-end:
-       mutex_unlock(&sessions_mutex);
-       return ret;
-}
-
-int lttng_event_enable(struct lttng_event *event)
-{
-       int ret = 0;
-
-       mutex_lock(&sessions_mutex);
-       if (event->chan->channel_type == METADATA_CHANNEL) {
-               ret = -EPERM;
-               goto end;
-       }
-       if (event->enabled) {
-               ret = -EEXIST;
-               goto end;
-       }
-       switch (event->instrumentation) {
-       case LTTNG_KERNEL_TRACEPOINT:
-       case LTTNG_KERNEL_SYSCALL:
-               ret = -EINVAL;
-               break;
-       case LTTNG_KERNEL_KPROBE:
-       case LTTNG_KERNEL_UPROBE:
-       case LTTNG_KERNEL_NOOP:
-               WRITE_ONCE(event->enabled, 1);
-               break;
-       case LTTNG_KERNEL_KRETPROBE:
-               ret = lttng_kretprobes_event_enable_state(event, 1);
-               break;
-       case LTTNG_KERNEL_FUNCTION:     /* Fall-through. */
-       default:
-               WARN_ON_ONCE(1);
-               ret = -EINVAL;
-       }
-end:
-       mutex_unlock(&sessions_mutex);
-       return ret;
-}
-
-int lttng_event_disable(struct lttng_event *event)
-{
-       int ret = 0;
-
-       mutex_lock(&sessions_mutex);
-       if (event->chan->channel_type == METADATA_CHANNEL) {
-               ret = -EPERM;
-               goto end;
-       }
-       if (!event->enabled) {
-               ret = -EEXIST;
-               goto end;
-       }
-       switch (event->instrumentation) {
-       case LTTNG_KERNEL_TRACEPOINT:
-       case LTTNG_KERNEL_SYSCALL:
-               ret = -EINVAL;
-               break;
-       case LTTNG_KERNEL_KPROBE:
-       case LTTNG_KERNEL_UPROBE:
-       case LTTNG_KERNEL_NOOP:
-               WRITE_ONCE(event->enabled, 0);
-               break;
-       case LTTNG_KERNEL_KRETPROBE:
-               ret = lttng_kretprobes_event_enable_state(event, 0);
-               break;
-       case LTTNG_KERNEL_FUNCTION:     /* Fall-through. */
-       default:
-               WARN_ON_ONCE(1);
-               ret = -EINVAL;
-       }
-end:
-       mutex_unlock(&sessions_mutex);
-       return ret;
-}
-
-static struct lttng_transport *lttng_transport_find(const char *name)
-{
-       struct lttng_transport *transport;
-
-       list_for_each_entry(transport, &lttng_transport_list, node) {
-               if (!strcmp(transport->name, name))
-                       return transport;
-       }
-       return NULL;
-}
-
-struct lttng_channel *lttng_channel_create(struct lttng_session *session,
-                                      const char *transport_name,
-                                      void *buf_addr,
-                                      size_t subbuf_size, size_t num_subbuf,
-                                      unsigned int switch_timer_interval,
-                                      unsigned int read_timer_interval,
-                                      enum channel_type channel_type)
-{
-       struct lttng_channel *chan;
-       struct lttng_transport *transport = NULL;
-
-       mutex_lock(&sessions_mutex);
-       if (session->been_active && channel_type != METADATA_CHANNEL)
-               goto active;    /* Refuse to add channel to active session */
-       transport = lttng_transport_find(transport_name);
-       if (!transport) {
-               printk(KERN_WARNING "LTTng transport %s not found\n",
-                      transport_name);
-               goto notransport;
-       }
-       if (!try_module_get(transport->owner)) {
-               printk(KERN_WARNING "LTT : Can't lock transport module.\n");
-               goto notransport;
-       }
-       chan = kzalloc(sizeof(struct lttng_channel), GFP_KERNEL);
-       if (!chan)
-               goto nomem;
-       chan->session = session;
-       chan->id = session->free_chan_id++;
-       chan->ops = &transport->ops;
-       /*
-        * Note: the channel creation op already writes into the packet
-        * headers. Therefore the "chan" information used as input
-        * should be already accessible.
-        */
-       chan->chan = transport->ops.channel_create(transport_name,
-                       chan, buf_addr, subbuf_size, num_subbuf,
-                       switch_timer_interval, read_timer_interval);
-       if (!chan->chan)
-               goto create_error;
-       chan->tstate = 1;
-       chan->enabled = 1;
-       chan->transport = transport;
-       chan->channel_type = channel_type;
-       list_add(&chan->list, &session->chan);
-       mutex_unlock(&sessions_mutex);
-       return chan;
-
-create_error:
-       kfree(chan);
-nomem:
-       if (transport)
-               module_put(transport->owner);
-notransport:
-active:
-       mutex_unlock(&sessions_mutex);
-       return NULL;
-}
-
-/*
- * Only used internally at session destruction for per-cpu channels, and
- * when metadata channel is released.
- * Needs to be called with sessions mutex held.
- */
-static
-void _lttng_channel_destroy(struct lttng_channel *chan)
-{
-       chan->ops->channel_destroy(chan->chan);
-       module_put(chan->transport->owner);
-       list_del(&chan->list);
-       lttng_destroy_context(chan->ctx);
-       kfree(chan);
-}
-
-void lttng_metadata_channel_destroy(struct lttng_channel *chan)
-{
-       BUG_ON(chan->channel_type != METADATA_CHANNEL);
-
-       /* Protect the metadata cache with the sessions_mutex. */
-       mutex_lock(&sessions_mutex);
-       _lttng_channel_destroy(chan);
-       mutex_unlock(&sessions_mutex);
-}
-EXPORT_SYMBOL_GPL(lttng_metadata_channel_destroy);
-
-static
-void _lttng_metadata_channel_hangup(struct lttng_metadata_stream *stream)
-{
-       stream->finalized = 1;
-       wake_up_interruptible(&stream->read_wait);
-}
-
-/*
- * Supports event creation while tracing session is active.
- * Needs to be called with sessions mutex held.
- */
-struct lttng_event *_lttng_event_create(struct lttng_channel *chan,
-                               struct lttng_kernel_event *event_param,
-                               void *filter,
-                               const struct lttng_event_desc *event_desc,
-                               enum lttng_kernel_instrumentation itype)
-{
-       struct lttng_session *session = chan->session;
-       struct lttng_event *event;
-       const char *event_name;
-       struct hlist_head *head;
-       size_t name_len;
-       uint32_t hash;
-       int ret;
-
-       if (chan->free_event_id == -1U) {
-               ret = -EMFILE;
-               goto full;
-       }
-
-       switch (itype) {
-       case LTTNG_KERNEL_TRACEPOINT:
-               event_name = event_desc->name;
-               break;
-       case LTTNG_KERNEL_KPROBE:
-       case LTTNG_KERNEL_UPROBE:
-       case LTTNG_KERNEL_KRETPROBE:
-       case LTTNG_KERNEL_NOOP:
-       case LTTNG_KERNEL_SYSCALL:
-               event_name = event_param->name;
-               break;
-       case LTTNG_KERNEL_FUNCTION:     /* Fall-through. */
-       default:
-               WARN_ON_ONCE(1);
-               ret = -EINVAL;
-               goto type_error;
-       }
-       name_len = strlen(event_name);
-       hash = jhash(event_name, name_len, 0);
-       head = &session->events_ht.table[hash & (LTTNG_EVENT_HT_SIZE - 1)];
-       lttng_hlist_for_each_entry(event, head, hlist) {
-               WARN_ON_ONCE(!event->desc);
-               if (!strncmp(event->desc->name, event_name,
-                                       LTTNG_KERNEL_SYM_NAME_LEN - 1)
-                               && chan == event->chan) {
-                       ret = -EEXIST;
-                       goto exist;
-               }
-       }
-
-       event = kmem_cache_zalloc(event_cache, GFP_KERNEL);
-       if (!event) {
-               ret = -ENOMEM;
-               goto cache_error;
-       }
-       event->chan = chan;
-       event->filter = filter;
-       event->id = chan->free_event_id++;
-       event->instrumentation = itype;
-       event->evtype = LTTNG_TYPE_EVENT;
-       INIT_LIST_HEAD(&event->bytecode_runtime_head);
-       INIT_LIST_HEAD(&event->enablers_ref_head);
-
-       switch (itype) {
-       case LTTNG_KERNEL_TRACEPOINT:
-               /* Event will be enabled by enabler sync. */
-               event->enabled = 0;
-               event->registered = 0;
-               event->desc = lttng_event_get(event_name);
-               if (!event->desc) {
-                       ret = -ENOENT;
-                       goto register_error;
-               }
-               /* Populate lttng_event structure before event registration. */
-               smp_wmb();
-               break;
-       case LTTNG_KERNEL_KPROBE:
-               /*
-                * Needs to be explicitly enabled after creation, since
-                * we may want to apply filters.
-                */
-               event->enabled = 0;
-               event->registered = 1;
-               /*
-                * Populate lttng_event structure before event
-                * registration.
-                */
-               smp_wmb();
-               ret = lttng_kprobes_register(event_name,
-                               event_param->u.kprobe.symbol_name,
-                               event_param->u.kprobe.offset,
-                               event_param->u.kprobe.addr,
-                               event);
-               if (ret) {
-                       ret = -EINVAL;
-                       goto register_error;
-               }
-               ret = try_module_get(event->desc->owner);
-               WARN_ON_ONCE(!ret);
-               break;
-       case LTTNG_KERNEL_KRETPROBE:
-       {
-               struct lttng_event *event_return;
-
-               /* kretprobe defines 2 events */
-               /*
-                * Needs to be explicitly enabled after creation, since
-                * we may want to apply filters.
-                */
-               event->enabled = 0;
-               event->registered = 1;
-               event_return =
-                       kmem_cache_zalloc(event_cache, GFP_KERNEL);
-               if (!event_return) {
-                       ret = -ENOMEM;
-                       goto register_error;
-               }
-               event_return->chan = chan;
-               event_return->filter = filter;
-               event_return->id = chan->free_event_id++;
-               event_return->enabled = 0;
-               event_return->registered = 1;
-               event_return->instrumentation = itype;
-               /*
-                * Populate lttng_event structure before kretprobe registration.
-                */
-               smp_wmb();
-               ret = lttng_kretprobes_register(event_name,
-                               event_param->u.kretprobe.symbol_name,
-                               event_param->u.kretprobe.offset,
-                               event_param->u.kretprobe.addr,
-                               event, event_return);
-               if (ret) {
-                       kmem_cache_free(event_cache, event_return);
-                       ret = -EINVAL;
-                       goto register_error;
-               }
-               /* Take 2 refs on the module: one per event. */
-               ret = try_module_get(event->desc->owner);
-               WARN_ON_ONCE(!ret);
-               ret = try_module_get(event->desc->owner);
-               WARN_ON_ONCE(!ret);
-               ret = _lttng_event_metadata_statedump(chan->session, chan,
-                                                   event_return);
-               WARN_ON_ONCE(ret > 0);
-               if (ret) {
-                       kmem_cache_free(event_cache, event_return);
-                       module_put(event->desc->owner);
-                       module_put(event->desc->owner);
-                       goto statedump_error;
-               }
-               list_add(&event_return->list, &chan->session->events);
-               break;
-       }
-       case LTTNG_KERNEL_NOOP:
-       case LTTNG_KERNEL_SYSCALL:
-               /*
-                * Needs to be explicitly enabled after creation, since
-                * we may want to apply filters.
-                */
-               event->enabled = 0;
-               event->registered = 0;
-               event->desc = event_desc;
-               if (!event->desc) {
-                       ret = -EINVAL;
-                       goto register_error;
-               }
-               break;
-       case LTTNG_KERNEL_UPROBE:
-               /*
-                * Needs to be explicitly enabled after creation, since
-                * we may want to apply filters.
-                */
-               event->enabled = 0;
-               event->registered = 1;
-
-               /*
-                * Populate lttng_event structure before event
-                * registration.
-                */
-               smp_wmb();
-
-               ret = lttng_uprobes_register(event_param->name,
-                               event_param->u.uprobe.fd,
-                               event);
-               if (ret)
-                       goto register_error;
-               ret = try_module_get(event->desc->owner);
-               WARN_ON_ONCE(!ret);
-               break;
-       case LTTNG_KERNEL_FUNCTION:     /* Fall-through */
-       default:
-               WARN_ON_ONCE(1);
-               ret = -EINVAL;
-               goto register_error;
-       }
-       ret = _lttng_event_metadata_statedump(chan->session, chan, event);
-       WARN_ON_ONCE(ret > 0);
-       if (ret) {
-               goto statedump_error;
-       }
-       hlist_add_head(&event->hlist, head);
-       list_add(&event->list, &chan->session->events);
-       return event;
-
-statedump_error:
-       /* If a statedump error occurs, events will not be readable. */
-register_error:
-       kmem_cache_free(event_cache, event);
-cache_error:
-exist:
-type_error:
-full:
-       return ERR_PTR(ret);
-}
-
-struct lttng_event *lttng_event_create(struct lttng_channel *chan,
-                               struct lttng_kernel_event *event_param,
-                               void *filter,
-                               const struct lttng_event_desc *event_desc,
-                               enum lttng_kernel_instrumentation itype)
-{
-       struct lttng_event *event;
-
-       mutex_lock(&sessions_mutex);
-       event = _lttng_event_create(chan, event_param, filter, event_desc,
-                               itype);
-       mutex_unlock(&sessions_mutex);
-       return event;
-}
-
-/* Only used for tracepoints for now. */
-static
-void register_event(struct lttng_event *event)
-{
-       const struct lttng_event_desc *desc;
-       int ret = -EINVAL;
-
-       if (event->registered)
-               return;
-
-       desc = event->desc;
-       switch (event->instrumentation) {
-       case LTTNG_KERNEL_TRACEPOINT:
-               ret = lttng_wrapper_tracepoint_probe_register(desc->kname,
-                                                 desc->probe_callback,
-                                                 event);
-               break;
-       case LTTNG_KERNEL_SYSCALL:
-               ret = lttng_syscall_filter_enable(event->chan,
-                       desc->name);
-               break;
-       case LTTNG_KERNEL_KPROBE:
-       case LTTNG_KERNEL_UPROBE:
-       case LTTNG_KERNEL_KRETPROBE:
-       case LTTNG_KERNEL_NOOP:
-               ret = 0;
-               break;
-       case LTTNG_KERNEL_FUNCTION:     /* Fall-through */
-       default:
-               WARN_ON_ONCE(1);
-       }
-       if (!ret)
-               event->registered = 1;
-}
-
-/*
- * Only used internally at session destruction.
- */
-int _lttng_event_unregister(struct lttng_event *event)
-{
-       const struct lttng_event_desc *desc;
-       int ret = -EINVAL;
-
-       if (!event->registered)
-               return 0;
-
-       desc = event->desc;
-       switch (event->instrumentation) {
-       case LTTNG_KERNEL_TRACEPOINT:
-               ret = lttng_wrapper_tracepoint_probe_unregister(event->desc->kname,
-                                                 event->desc->probe_callback,
-                                                 event);
-               break;
-       case LTTNG_KERNEL_KPROBE:
-               lttng_kprobes_unregister(event);
-               ret = 0;
-               break;
-       case LTTNG_KERNEL_KRETPROBE:
-               lttng_kretprobes_unregister(event);
-               ret = 0;
-               break;
-       case LTTNG_KERNEL_SYSCALL:
-               ret = lttng_syscall_filter_disable(event->chan,
-                       desc->name);
-               break;
-       case LTTNG_KERNEL_NOOP:
-               ret = 0;
-               break;
-       case LTTNG_KERNEL_UPROBE:
-               lttng_uprobes_unregister(event);
-               ret = 0;
-               break;
-       case LTTNG_KERNEL_FUNCTION:     /* Fall-through */
-       default:
-               WARN_ON_ONCE(1);
-       }
-       if (!ret)
-               event->registered = 0;
-       return ret;
-}
-
-/*
- * Only used internally at session destruction.
- */
-static
-void _lttng_event_destroy(struct lttng_event *event)
-{
-       switch (event->instrumentation) {
-       case LTTNG_KERNEL_TRACEPOINT:
-               lttng_event_put(event->desc);
-               break;
-       case LTTNG_KERNEL_KPROBE:
-               module_put(event->desc->owner);
-               lttng_kprobes_destroy_private(event);
-               break;
-       case LTTNG_KERNEL_KRETPROBE:
-               module_put(event->desc->owner);
-               lttng_kretprobes_destroy_private(event);
-               break;
-       case LTTNG_KERNEL_NOOP:
-       case LTTNG_KERNEL_SYSCALL:
-               break;
-       case LTTNG_KERNEL_UPROBE:
-               module_put(event->desc->owner);
-               lttng_uprobes_destroy_private(event);
-               break;
-       case LTTNG_KERNEL_FUNCTION:     /* Fall-through */
-       default:
-               WARN_ON_ONCE(1);
-       }
-       list_del(&event->list);
-       lttng_destroy_context(event->ctx);
-       kmem_cache_free(event_cache, event);
-}
-
-struct lttng_id_tracker *get_tracker(struct lttng_session *session,
-               enum tracker_type tracker_type)
-{
-       switch (tracker_type) {
-       case TRACKER_PID:
-               return &session->pid_tracker;
-       case TRACKER_VPID:
-               return &session->vpid_tracker;
-       case TRACKER_UID:
-               return &session->uid_tracker;
-       case TRACKER_VUID:
-               return &session->vuid_tracker;
-       case TRACKER_GID:
-               return &session->gid_tracker;
-       case TRACKER_VGID:
-               return &session->vgid_tracker;
-       default:
-               WARN_ON_ONCE(1);
-               return NULL;
-       }
-}
-
-int lttng_session_track_id(struct lttng_session *session,
-               enum tracker_type tracker_type, int id)
-{
-       struct lttng_id_tracker *tracker;
-       int ret;
-
-       tracker = get_tracker(session, tracker_type);
-       if (!tracker)
-               return -EINVAL;
-       if (id < -1)
-               return -EINVAL;
-       mutex_lock(&sessions_mutex);
-       if (id == -1) {
-               /* track all ids: destroy tracker. */
-               lttng_id_tracker_destroy(tracker, true);
-               ret = 0;
-       } else {
-               ret = lttng_id_tracker_add(tracker, id);
-       }
-       mutex_unlock(&sessions_mutex);
-       return ret;
-}
-
-int lttng_session_untrack_id(struct lttng_session *session,
-               enum tracker_type tracker_type, int id)
-{
-       struct lttng_id_tracker *tracker;
-       int ret;
-
-       tracker = get_tracker(session, tracker_type);
-       if (!tracker)
-               return -EINVAL;
-       if (id < -1)
-               return -EINVAL;
-       mutex_lock(&sessions_mutex);
-       if (id == -1) {
-               /* untrack all ids: replace by empty tracker. */
-               ret = lttng_id_tracker_empty_set(tracker);
-       } else {
-               ret = lttng_id_tracker_del(tracker, id);
-       }
-       mutex_unlock(&sessions_mutex);
-       return ret;
-}
-
-static
-void *id_list_start(struct seq_file *m, loff_t *pos)
-{
-       struct lttng_id_tracker *id_tracker = m->private;
-       struct lttng_id_tracker_rcu *id_tracker_p = id_tracker->p;
-       struct lttng_id_hash_node *e;
-       int iter = 0, i;
-
-       mutex_lock(&sessions_mutex);
-       if (id_tracker_p) {
-               for (i = 0; i < LTTNG_ID_TABLE_SIZE; i++) {
-                       struct hlist_head *head = &id_tracker_p->id_hash[i];
-
-                       lttng_hlist_for_each_entry(e, head, hlist) {
-                               if (iter++ >= *pos)
-                                       return e;
-                       }
-               }
-       } else {
-               /* ID tracker disabled. */
-               if (iter >= *pos && iter == 0) {
-                       return id_tracker_p;    /* empty tracker */
-               }
-               iter++;
-       }
-       /* End of list */
-       return NULL;
-}
-
-/* Called with sessions_mutex held. */
-static
-void *id_list_next(struct seq_file *m, void *p, loff_t *ppos)
-{
-       struct lttng_id_tracker *id_tracker = m->private;
-       struct lttng_id_tracker_rcu *id_tracker_p = id_tracker->p;
-       struct lttng_id_hash_node *e;
-       int iter = 0, i;
-
-       (*ppos)++;
-       if (id_tracker_p) {
-               for (i = 0; i < LTTNG_ID_TABLE_SIZE; i++) {
-                       struct hlist_head *head = &id_tracker_p->id_hash[i];
-
-                       lttng_hlist_for_each_entry(e, head, hlist) {
-                               if (iter++ >= *ppos)
-                                       return e;
-                       }
-               }
-       } else {
-               /* ID tracker disabled. */
-               if (iter >= *ppos && iter == 0)
-                       return p;       /* empty tracker */
-               iter++;
-       }
-
-       /* End of list */
-       return NULL;
-}
-
-static
-void id_list_stop(struct seq_file *m, void *p)
-{
-       mutex_unlock(&sessions_mutex);
-}
-
-static
-int id_list_show(struct seq_file *m, void *p)
-{
-       struct lttng_id_tracker *id_tracker = m->private;
-       struct lttng_id_tracker_rcu *id_tracker_p = id_tracker->p;
-       int id;
-
-       if (p == id_tracker_p) {
-               /* Tracker disabled. */
-               id = -1;
-       } else {
-               const struct lttng_id_hash_node *e = p;
-
-               id = lttng_id_tracker_get_node_id(e);
-       }
-       switch (id_tracker->tracker_type) {
-       case TRACKER_PID:
-               seq_printf(m,   "process { pid = %d; };\n", id);
-               break;
-       case TRACKER_VPID:
-               seq_printf(m,   "process { vpid = %d; };\n", id);
-               break;
-       case TRACKER_UID:
-               seq_printf(m,   "user { uid = %d; };\n", id);
-               break;
-       case TRACKER_VUID:
-               seq_printf(m,   "user { vuid = %d; };\n", id);
-               break;
-       case TRACKER_GID:
-               seq_printf(m,   "group { gid = %d; };\n", id);
-               break;
-       case TRACKER_VGID:
-               seq_printf(m,   "group { vgid = %d; };\n", id);
-               break;
-       default:
-               seq_printf(m,   "UNKNOWN { field = %d };\n", id);
-       }
-       return 0;
-}
-
-static
-const struct seq_operations lttng_tracker_ids_list_seq_ops = {
-       .start = id_list_start,
-       .next = id_list_next,
-       .stop = id_list_stop,
-       .show = id_list_show,
-};
-
-static
-int lttng_tracker_ids_list_open(struct inode *inode, struct file *file)
-{
-       return seq_open(file, &lttng_tracker_ids_list_seq_ops);
-}
-
-static
-int lttng_tracker_ids_list_release(struct inode *inode, struct file *file)
-{
-       struct seq_file *m = file->private_data;
-       struct lttng_id_tracker *id_tracker = m->private;
-       int ret;
-
-       WARN_ON_ONCE(!id_tracker);
-       ret = seq_release(inode, file);
-       if (!ret)
-               fput(id_tracker->session->file);
-       return ret;
-}
-
-const struct file_operations lttng_tracker_ids_list_fops = {
-       .owner = THIS_MODULE,
-       .open = lttng_tracker_ids_list_open,
-       .read = seq_read,
-       .llseek = seq_lseek,
-       .release = lttng_tracker_ids_list_release,
-};
-
-int lttng_session_list_tracker_ids(struct lttng_session *session,
-               enum tracker_type tracker_type)
-{
-       struct file *tracker_ids_list_file;
-       struct seq_file *m;
-       int file_fd, ret;
-
-       file_fd = lttng_get_unused_fd();
-       if (file_fd < 0) {
-               ret = file_fd;
-               goto fd_error;
-       }
-
-       tracker_ids_list_file = anon_inode_getfile("[lttng_tracker_ids_list]",
-                                         &lttng_tracker_ids_list_fops,
-                                         NULL, O_RDWR);
-       if (IS_ERR(tracker_ids_list_file)) {
-               ret = PTR_ERR(tracker_ids_list_file);
-               goto file_error;
-       }
-       if (!atomic_long_add_unless(&session->file->f_count, 1, LONG_MAX)) {
-               ret = -EOVERFLOW;
-               goto refcount_error;
-       }
-       ret = lttng_tracker_ids_list_fops.open(NULL, tracker_ids_list_file);
-       if (ret < 0)
-               goto open_error;
-       m = tracker_ids_list_file->private_data;
-
-       m->private = get_tracker(session, tracker_type);
-       BUG_ON(!m->private);
-       fd_install(file_fd, tracker_ids_list_file);
-
-       return file_fd;
-
-open_error:
-       atomic_long_dec(&session->file->f_count);
-refcount_error:
-       fput(tracker_ids_list_file);
-file_error:
-       put_unused_fd(file_fd);
-fd_error:
-       return ret;
-}
-
-/*
- * Enabler management.
- */
-static
-int lttng_match_enabler_star_glob(const char *desc_name,
-               const char *pattern)
-{
-       if (!strutils_star_glob_match(pattern, LTTNG_SIZE_MAX,
-                       desc_name, LTTNG_SIZE_MAX))
-               return 0;
-       return 1;
-}
-
-static
-int lttng_match_enabler_name(const char *desc_name,
-               const char *name)
-{
-       if (strcmp(desc_name, name))
-               return 0;
-       return 1;
-}
-
-static
-int lttng_desc_match_enabler(const struct lttng_event_desc *desc,
-               struct lttng_enabler *enabler)
-{
-       const char *desc_name, *enabler_name;
-
-       enabler_name = enabler->event_param.name;
-       switch (enabler->event_param.instrumentation) {
-       case LTTNG_KERNEL_TRACEPOINT:
-               desc_name = desc->name;
-               break;
-       case LTTNG_KERNEL_SYSCALL:
-               desc_name = desc->name;
-               if (!strncmp(desc_name, "compat_", strlen("compat_")))
-                       desc_name += strlen("compat_");
-               if (!strncmp(desc_name, "syscall_exit_",
-                               strlen("syscall_exit_"))) {
-                       desc_name += strlen("syscall_exit_");
-               } else if (!strncmp(desc_name, "syscall_entry_",
-                               strlen("syscall_entry_"))) {
-                       desc_name += strlen("syscall_entry_");
-               } else {
-                       WARN_ON_ONCE(1);
-                       return -EINVAL;
-               }
-               break;
-       default:
-               WARN_ON_ONCE(1);
-               return -EINVAL;
-       }
-       switch (enabler->type) {
-       case LTTNG_ENABLER_STAR_GLOB:
-               return lttng_match_enabler_star_glob(desc_name, enabler_name);
-       case LTTNG_ENABLER_NAME:
-               return lttng_match_enabler_name(desc_name, enabler_name);
-       default:
-               return -EINVAL;
-       }
-}
-
-static
-int lttng_event_match_enabler(struct lttng_event *event,
-               struct lttng_enabler *enabler)
-{
-       if (enabler->event_param.instrumentation != event->instrumentation)
-               return 0;
-       if (lttng_desc_match_enabler(event->desc, enabler)
-                       && event->chan == enabler->chan)
-               return 1;
-       else
-               return 0;
-}
-
-static
-struct lttng_enabler_ref *lttng_event_enabler_ref(struct lttng_event *event,
-               struct lttng_enabler *enabler)
-{
-       struct lttng_enabler_ref *enabler_ref;
-
-       list_for_each_entry(enabler_ref,
-                       &event->enablers_ref_head, node) {
-               if (enabler_ref->ref == enabler)
-                       return enabler_ref;
-       }
-       return NULL;
-}
-
-static
-void lttng_create_tracepoint_if_missing(struct lttng_enabler *enabler)
-{
-       struct lttng_session *session = enabler->chan->session;
-       struct lttng_probe_desc *probe_desc;
-       const struct lttng_event_desc *desc;
-       int i;
-       struct list_head *probe_list;
-
-       probe_list = lttng_get_probe_list_head();
-       /*
-        * For each probe event, if we find that a probe event matches
-        * our enabler, create an associated lttng_event if not
-        * already present.
-        */
-       list_for_each_entry(probe_desc, probe_list, head) {
-               for (i = 0; i < probe_desc->nr_events; i++) {
-                       int found = 0;
-                       struct hlist_head *head;
-                       const char *event_name;
-                       size_t name_len;
-                       uint32_t hash;
-                       struct lttng_event *event;
-
-                       desc = probe_desc->event_desc[i];
-                       if (!lttng_desc_match_enabler(desc, enabler))
-                               continue;
-                       event_name = desc->name;
-                       name_len = strlen(event_name);
-
-                       /*
-                        * Check if already created.
-                        */
-                       hash = jhash(event_name, name_len, 0);
-                       head = &session->events_ht.table[hash & (LTTNG_EVENT_HT_SIZE - 1)];
-                       lttng_hlist_for_each_entry(event, head, hlist) {
-                               if (event->desc == desc
-                                               && event->chan == enabler->chan)
-                                       found = 1;
-                       }
-                       if (found)
-                               continue;
-
-                       /*
-                        * We need to create an event for this
-                        * event probe.
-                        */
-                       event = _lttng_event_create(enabler->chan,
-                                       NULL, NULL, desc,
-                                       LTTNG_KERNEL_TRACEPOINT);
-                       if (!event) {
-                               printk(KERN_INFO "Unable to create event %s\n",
-                                       probe_desc->event_desc[i]->name);
-                       }
-               }
-       }
-}
-
-static
-void lttng_create_syscall_if_missing(struct lttng_enabler *enabler)
-{
-       int ret;
-
-       ret = lttng_syscalls_register(enabler->chan, NULL);
-       WARN_ON_ONCE(ret);
-}
-
-/*
- * Create struct lttng_event if it is missing and present in the list of
- * tracepoint probes.
- * Should be called with sessions mutex held.
- */
-static
-void lttng_create_event_if_missing(struct lttng_enabler *enabler)
-{
-       switch (enabler->event_param.instrumentation) {
-       case LTTNG_KERNEL_TRACEPOINT:
-               lttng_create_tracepoint_if_missing(enabler);
-               break;
-       case LTTNG_KERNEL_SYSCALL:
-               lttng_create_syscall_if_missing(enabler);
-               break;
-       default:
-               WARN_ON_ONCE(1);
-               break;
-       }
-}
-
-/*
- * Create events associated with an enabler (if not already present),
- * and add backward reference from the event to the enabler.
- * Should be called with sessions mutex held.
- */
-static
-int lttng_enabler_ref_events(struct lttng_enabler *enabler)
-{
-       struct lttng_session *session = enabler->chan->session;
-       struct lttng_event *event;
-
-       /* First ensure that probe events are created for this enabler. */
-       lttng_create_event_if_missing(enabler);
-
-       /* For each event matching enabler in session event list. */
-       list_for_each_entry(event, &session->events, list) {
-               struct lttng_enabler_ref *enabler_ref;
-
-               if (!lttng_event_match_enabler(event, enabler))
-                       continue;
-               enabler_ref = lttng_event_enabler_ref(event, enabler);
-               if (!enabler_ref) {
-                       /*
-                        * If no backward ref, create it.
-                        * Add backward ref from event to enabler.
-                        */
-                       enabler_ref = kzalloc(sizeof(*enabler_ref), GFP_KERNEL);
-                       if (!enabler_ref)
-                               return -ENOMEM;
-                       enabler_ref->ref = enabler;
-                       list_add(&enabler_ref->node,
-                               &event->enablers_ref_head);
-               }
-
-               /*
-                * Link filter bytecodes if not linked yet.
-                */
-               lttng_enabler_event_link_bytecode(event, enabler);
-
-               /* TODO: merge event context. */
-       }
-       return 0;
-}
-
-/*
- * Called at module load: connect the probe on all enablers matching
- * this event.
- * Called with sessions lock held.
- */
-int lttng_fix_pending_events(void)
-{
-       struct lttng_session *session;
-
-       list_for_each_entry(session, &sessions, list)
-               lttng_session_lazy_sync_enablers(session);
-       return 0;
-}
-
-struct lttng_enabler *lttng_enabler_create(enum lttng_enabler_type type,
-               struct lttng_kernel_event *event_param,
-               struct lttng_channel *chan)
-{
-       struct lttng_enabler *enabler;
-
-       enabler = kzalloc(sizeof(*enabler), GFP_KERNEL);
-       if (!enabler)
-               return NULL;
-       enabler->type = type;
-       INIT_LIST_HEAD(&enabler->filter_bytecode_head);
-       memcpy(&enabler->event_param, event_param,
-               sizeof(enabler->event_param));
-       enabler->chan = chan;
-       /* ctx left NULL */
-       enabler->enabled = 0;
-       enabler->evtype = LTTNG_TYPE_ENABLER;
-       mutex_lock(&sessions_mutex);
-       list_add(&enabler->node, &enabler->chan->session->enablers_head);
-       lttng_session_lazy_sync_enablers(enabler->chan->session);
-       mutex_unlock(&sessions_mutex);
-       return enabler;
-}
-
-int lttng_enabler_enable(struct lttng_enabler *enabler)
-{
-       mutex_lock(&sessions_mutex);
-       enabler->enabled = 1;
-       lttng_session_lazy_sync_enablers(enabler->chan->session);
-       mutex_unlock(&sessions_mutex);
-       return 0;
-}
-
-int lttng_enabler_disable(struct lttng_enabler *enabler)
-{
-       mutex_lock(&sessions_mutex);
-       enabler->enabled = 0;
-       lttng_session_lazy_sync_enablers(enabler->chan->session);
-       mutex_unlock(&sessions_mutex);
-       return 0;
-}
-
-int lttng_enabler_attach_bytecode(struct lttng_enabler *enabler,
-               struct lttng_kernel_filter_bytecode __user *bytecode)
-{
-       struct lttng_filter_bytecode_node *bytecode_node;
-       uint32_t bytecode_len;
-       int ret;
-
-       ret = get_user(bytecode_len, &bytecode->len);
-       if (ret)
-               return ret;
-       bytecode_node = kzalloc(sizeof(*bytecode_node) + bytecode_len,
-                       GFP_KERNEL);
-       if (!bytecode_node)
-               return -ENOMEM;
-       ret = copy_from_user(&bytecode_node->bc, bytecode,
-               sizeof(*bytecode) + bytecode_len);
-       if (ret)
-               goto error_free;
-       bytecode_node->enabler = enabler;
-       /* Enforce length based on allocated size */
-       bytecode_node->bc.len = bytecode_len;
-       list_add_tail(&bytecode_node->node, &enabler->filter_bytecode_head);
-       lttng_session_lazy_sync_enablers(enabler->chan->session);
-       return 0;
-
-error_free:
-       kfree(bytecode_node);
-       return ret;
-}
-
-int lttng_event_add_callsite(struct lttng_event *event,
-               struct lttng_kernel_event_callsite __user *callsite)
-{
-
-       switch (event->instrumentation) {
-       case LTTNG_KERNEL_UPROBE:
-               return lttng_uprobes_add_callsite(event, callsite);
-       default:
-               return -EINVAL;
-       }
-}
-
-int lttng_enabler_attach_context(struct lttng_enabler *enabler,
-               struct lttng_kernel_context *context_param)
-{
-       return -ENOSYS;
-}
-
-static
-void lttng_enabler_destroy(struct lttng_enabler *enabler)
-{
-       struct lttng_filter_bytecode_node *filter_node, *tmp_filter_node;
-
-       /* Destroy filter bytecode */
-       list_for_each_entry_safe(filter_node, tmp_filter_node,
-                       &enabler->filter_bytecode_head, node) {
-               kfree(filter_node);
-       }
-
-       /* Destroy contexts */
-       lttng_destroy_context(enabler->ctx);
-
-       list_del(&enabler->node);
-       kfree(enabler);
-}
-
-/*
- * lttng_session_sync_enablers should be called just before starting a
- * session.
- * Should be called with sessions mutex held.
- */
-static
-void lttng_session_sync_enablers(struct lttng_session *session)
-{
-       struct lttng_enabler *enabler;
-       struct lttng_event *event;
-
-       list_for_each_entry(enabler, &session->enablers_head, node)
-               lttng_enabler_ref_events(enabler);
-       /*
-        * For each event, if at least one of its enablers is enabled,
-        * and its channel and session transient states are enabled, we
-        * enable the event, else we disable it.
-        */
-       list_for_each_entry(event, &session->events, list) {
-               struct lttng_enabler_ref *enabler_ref;
-               struct lttng_bytecode_runtime *runtime;
-               int enabled = 0, has_enablers_without_bytecode = 0;
-
-               switch (event->instrumentation) {
-               case LTTNG_KERNEL_TRACEPOINT:
-               case LTTNG_KERNEL_SYSCALL:
-                       /* Enable events */
-                       list_for_each_entry(enabler_ref,
-                                       &event->enablers_ref_head, node) {
-                               if (enabler_ref->ref->enabled) {
-                                       enabled = 1;
-                                       break;
-                               }
-                       }
-                       break;
-               default:
-                       /* Not handled with lazy sync. */
-                       continue;
-               }
-               /*
-                * Enabled state is based on union of enablers, with
-                * intesection of session and channel transient enable
-                * states.
-                */
-               enabled = enabled && session->tstate && event->chan->tstate;
-
-               WRITE_ONCE(event->enabled, enabled);
-               /*
-                * Sync tracepoint registration with event enabled
-                * state.
-                */
-               if (enabled) {
-                       register_event(event);
-               } else {
-                       _lttng_event_unregister(event);
-               }
-
-               /* Check if has enablers without bytecode enabled */
-               list_for_each_entry(enabler_ref,
-                               &event->enablers_ref_head, node) {
-                       if (enabler_ref->ref->enabled
-                                       && list_empty(&enabler_ref->ref->filter_bytecode_head)) {
-                               has_enablers_without_bytecode = 1;
-                               break;
-                       }
-               }
-               event->has_enablers_without_bytecode =
-                       has_enablers_without_bytecode;
-
-               /* Enable filters */
-               list_for_each_entry(runtime,
-                               &event->bytecode_runtime_head, node)
-                       lttng_filter_sync_state(runtime);
-       }
-}
-
-/*
- * Apply enablers to session events, adding events to session if need
- * be. It is required after each modification applied to an active
- * session, and right before session "start".
- * "lazy" sync means we only sync if required.
- * Should be called with sessions mutex held.
- */
-static
-void lttng_session_lazy_sync_enablers(struct lttng_session *session)
-{
-       /* We can skip if session is not active */
-       if (!session->active)
-               return;
-       lttng_session_sync_enablers(session);
-}
-
-/*
- * Serialize at most one packet worth of metadata into a metadata
- * channel.
- * We grab the metadata cache mutex to get exclusive access to our metadata
- * buffer and to the metadata cache. Exclusive access to the metadata buffer
- * allows us to do racy operations such as looking for remaining space left in
- * packet and write, since mutual exclusion protects us from concurrent writes.
- * Mutual exclusion on the metadata cache allow us to read the cache content
- * without racing against reallocation of the cache by updates.
- * Returns the number of bytes written in the channel, 0 if no data
- * was written and a negative value on error.
- */
-int lttng_metadata_output_channel(struct lttng_metadata_stream *stream,
-               struct channel *chan)
-{
-       struct lib_ring_buffer_ctx ctx;
-       int ret = 0;
-       size_t len, reserve_len;
-
-       /*
-        * Ensure we support mutiple get_next / put sequences followed by
-        * put_next. The metadata cache lock protects reading the metadata
-        * cache. It can indeed be read concurrently by "get_next_subbuf" and
-        * "flush" operations on the buffer invoked by different processes.
-        * Moreover, since the metadata cache memory can be reallocated, we
-        * need to have exclusive access against updates even though we only
-        * read it.
-        */
-       mutex_lock(&stream->metadata_cache->lock);
-       WARN_ON(stream->metadata_in < stream->metadata_out);
-       if (stream->metadata_in != stream->metadata_out)
-               goto end;
-
-       /* Metadata regenerated, change the version. */
-       if (stream->metadata_cache->version != stream->version)
-               stream->version = stream->metadata_cache->version;
-
-       len = stream->metadata_cache->metadata_written -
-               stream->metadata_in;
-       if (!len)
-               goto end;
-       reserve_len = min_t(size_t,
-                       stream->transport->ops.packet_avail_size(chan),
-                       len);
-       lib_ring_buffer_ctx_init(&ctx, chan, NULL, reserve_len,
-                       sizeof(char), -1);
-       /*
-        * If reservation failed, return an error to the caller.
-        */
-       ret = stream->transport->ops.event_reserve(&ctx, 0);
-       if (ret != 0) {
-               printk(KERN_WARNING "LTTng: Metadata event reservation failed\n");
-               goto end;
-       }
-       stream->transport->ops.event_write(&ctx,
-                       stream->metadata_cache->data + stream->metadata_in,
-                       reserve_len);
-       stream->transport->ops.event_commit(&ctx);
-       stream->metadata_in += reserve_len;
-       ret = reserve_len;
-
-end:
-       mutex_unlock(&stream->metadata_cache->lock);
-       return ret;
-}
-
-/*
- * Write the metadata to the metadata cache.
- * Must be called with sessions_mutex held.
- * The metadata cache lock protects us from concurrent read access from
- * thread outputting metadata content to ring buffer.
- */
-int lttng_metadata_printf(struct lttng_session *session,
-                         const char *fmt, ...)
-{
-       char *str;
-       size_t len;
-       va_list ap;
-       struct lttng_metadata_stream *stream;
-
-       WARN_ON_ONCE(!READ_ONCE(session->active));
-
-       va_start(ap, fmt);
-       str = kvasprintf(GFP_KERNEL, fmt, ap);
-       va_end(ap);
-       if (!str)
-               return -ENOMEM;
-
-       len = strlen(str);
-       mutex_lock(&session->metadata_cache->lock);
-       if (session->metadata_cache->metadata_written + len >
-                       session->metadata_cache->cache_alloc) {
-               char *tmp_cache_realloc;
-               unsigned int tmp_cache_alloc_size;
-
-               tmp_cache_alloc_size = max_t(unsigned int,
-                               session->metadata_cache->cache_alloc + len,
-                               session->metadata_cache->cache_alloc << 1);
-               tmp_cache_realloc = vzalloc(tmp_cache_alloc_size);
-               if (!tmp_cache_realloc)
-                       goto err;
-               if (session->metadata_cache->data) {
-                       memcpy(tmp_cache_realloc,
-                               session->metadata_cache->data,
-                               session->metadata_cache->cache_alloc);
-                       vfree(session->metadata_cache->data);
-               }
-
-               session->metadata_cache->cache_alloc = tmp_cache_alloc_size;
-               session->metadata_cache->data = tmp_cache_realloc;
-       }
-       memcpy(session->metadata_cache->data +
-                       session->metadata_cache->metadata_written,
-                       str, len);
-       session->metadata_cache->metadata_written += len;
-       mutex_unlock(&session->metadata_cache->lock);
-       kfree(str);
-
-       list_for_each_entry(stream, &session->metadata_cache->metadata_stream, list)
-               wake_up_interruptible(&stream->read_wait);
-
-       return 0;
-
-err:
-       mutex_unlock(&session->metadata_cache->lock);
-       kfree(str);
-       return -ENOMEM;
-}
-
-static
-int print_tabs(struct lttng_session *session, size_t nesting)
-{
-       size_t i;
-
-       for (i = 0; i < nesting; i++) {
-               int ret;
-
-               ret = lttng_metadata_printf(session, "  ");
-               if (ret) {
-                       return ret;
-               }
-       }
-       return 0;
-}
-
-static
-int lttng_field_name_statedump(struct lttng_session *session,
-               const struct lttng_event_field *field,
-               size_t nesting)
-{
-       return lttng_metadata_printf(session, " _%s;\n", field->name);
-}
-
-static
-int _lttng_integer_type_statedump(struct lttng_session *session,
-               const struct lttng_type *type,
-               size_t nesting)
-{
-       int ret;
-
-       WARN_ON_ONCE(type->atype != atype_integer);
-       ret = print_tabs(session, nesting);
-       if (ret)
-               return ret;
-       ret = lttng_metadata_printf(session,
-               "integer { size = %u; align = %u; signed = %u; encoding = %s; base = %u;%s }",
-               type->u.integer.size,
-               type->u.integer.alignment,
-               type->u.integer.signedness,
-               (type->u.integer.encoding == lttng_encode_none)
-                       ? "none"
-                       : (type->u.integer.encoding == lttng_encode_UTF8)
-                               ? "UTF8"
-                               : "ASCII",
-               type->u.integer.base,
-#if __BYTE_ORDER == __BIG_ENDIAN
-               type->u.integer.reverse_byte_order ? " byte_order = le;" : ""
-#else
-               type->u.integer.reverse_byte_order ? " byte_order = be;" : ""
-#endif
-       );
-       return ret;
-}
-
-/*
- * Must be called with sessions_mutex held.
- */
-static
-int _lttng_struct_type_statedump(struct lttng_session *session,
-               const struct lttng_type *type,
-               size_t nesting)
-{
-       int ret;
-       uint32_t i, nr_fields;
-       unsigned int alignment;
-
-       WARN_ON_ONCE(type->atype != atype_struct_nestable);
-
-       ret = print_tabs(session, nesting);
-       if (ret)
-               return ret;
-       ret = lttng_metadata_printf(session,
-               "struct {\n");
-       if (ret)
-               return ret;
-       nr_fields = type->u.struct_nestable.nr_fields;
-       for (i = 0; i < nr_fields; i++) {
-               const struct lttng_event_field *iter_field;
-
-               iter_field = &type->u.struct_nestable.fields[i];
-               ret = _lttng_field_statedump(session, iter_field, nesting + 1);
-               if (ret)
-                       return ret;
-       }
-       ret = print_tabs(session, nesting);
-       if (ret)
-               return ret;
-       alignment = type->u.struct_nestable.alignment;
-       if (alignment) {
-               ret = lttng_metadata_printf(session,
-                       "} align(%u)",
-                       alignment);
-       } else {
-               ret = lttng_metadata_printf(session,
-                       "}");
-       }
-       return ret;
-}
-
-/*
- * Must be called with sessions_mutex held.
- */
-static
-int _lttng_struct_field_statedump(struct lttng_session *session,
-               const struct lttng_event_field *field,
-               size_t nesting)
-{
-       int ret;
-
-       ret = _lttng_struct_type_statedump(session,
-                       &field->type, nesting);
-       if (ret)
-               return ret;
-       return lttng_field_name_statedump(session, field, nesting);
-}
-
-/*
- * Must be called with sessions_mutex held.
- */
-static
-int _lttng_variant_type_statedump(struct lttng_session *session,
-               const struct lttng_type *type,
-               size_t nesting)
-{
-       int ret;
-       uint32_t i, nr_choices;
-
-       WARN_ON_ONCE(type->atype != atype_variant_nestable);
-       /*
-        * CTF 1.8 does not allow expressing nonzero variant alignment in a nestable way.
-        */
-       if (type->u.variant_nestable.alignment != 0)
-               return -EINVAL;
-       ret = print_tabs(session, nesting);
-       if (ret)
-               return ret;
-       ret = lttng_metadata_printf(session,
-               "variant <_%s> {\n",
-               type->u.variant_nestable.tag_name);
-       if (ret)
-               return ret;
-       nr_choices = type->u.variant_nestable.nr_choices;
-       for (i = 0; i < nr_choices; i++) {
-               const struct lttng_event_field *iter_field;
-
-               iter_field = &type->u.variant_nestable.choices[i];
-               ret = _lttng_field_statedump(session, iter_field, nesting + 1);
-               if (ret)
-                       return ret;
-       }
-       ret = print_tabs(session, nesting);
-       if (ret)
-               return ret;
-       ret = lttng_metadata_printf(session,
-               "}");
-       return ret;
-}
-
-/*
- * Must be called with sessions_mutex held.
- */
-static
-int _lttng_variant_field_statedump(struct lttng_session *session,
-               const struct lttng_event_field *field,
-               size_t nesting)
-{
-       int ret;
-
-       ret = _lttng_variant_type_statedump(session,
-                       &field->type, nesting);
-       if (ret)
-               return ret;
-       return lttng_field_name_statedump(session, field, nesting);
-}
-
-/*
- * Must be called with sessions_mutex held.
- */
-static
-int _lttng_array_field_statedump(struct lttng_session *session,
-               const struct lttng_event_field *field,
-               size_t nesting)
-{
-       int ret;
-       const struct lttng_type *elem_type;
-
-       WARN_ON_ONCE(field->type.atype != atype_array_nestable);
-
-       if (field->type.u.array_nestable.alignment) {
-               ret = print_tabs(session, nesting);
-               if (ret)
-                       return ret;
-               ret = lttng_metadata_printf(session,
-               "struct { } align(%u) _%s_padding;\n",
-                               field->type.u.array_nestable.alignment * CHAR_BIT,
-                               field->name);
-               if (ret)
-                       return ret;
-       }
-       /*
-        * Nested compound types: Only array of structures and variants are
-        * currently supported.
-        */
-       elem_type = field->type.u.array_nestable.elem_type;
-       switch (elem_type->atype) {
-       case atype_integer:
-       case atype_struct_nestable:
-       case atype_variant_nestable:
-               ret = _lttng_type_statedump(session, elem_type, nesting);
-               if (ret)
-                       return ret;
-               break;
-
-       default:
-               return -EINVAL;
-       }
-       ret = lttng_metadata_printf(session,
-               " _%s[%u];\n",
-               field->name,
-               field->type.u.array_nestable.length);
-       return ret;
-}
-
-/*
- * Must be called with sessions_mutex held.
- */
-static
-int _lttng_sequence_field_statedump(struct lttng_session *session,
-               const struct lttng_event_field *field,
-               size_t nesting)
-{
-       int ret;
-       const char *length_name;
-       const struct lttng_type *elem_type;
-
-       WARN_ON_ONCE(field->type.atype != atype_sequence_nestable);
-
-       length_name = field->type.u.sequence_nestable.length_name;
-
-       if (field->type.u.sequence_nestable.alignment) {
-               ret = print_tabs(session, nesting);
-               if (ret)
-                       return ret;
-               ret = lttng_metadata_printf(session,
-               "struct { } align(%u) _%s_padding;\n",
-                               field->type.u.sequence_nestable.alignment * CHAR_BIT,
-                               field->name);
-               if (ret)
-                       return ret;
-       }
-
-       /*
-        * Nested compound types: Only array of structures and variants are
-        * currently supported.
-        */
-       elem_type = field->type.u.sequence_nestable.elem_type;
-       switch (elem_type->atype) {
-       case atype_integer:
-       case atype_struct_nestable:
-       case atype_variant_nestable:
-               ret = _lttng_type_statedump(session, elem_type, nesting);
-               if (ret)
-                       return ret;
-               break;
-
-       default:
-               return -EINVAL;
-       }
-       ret = lttng_metadata_printf(session,
-               " _%s[ _%s ];\n",
-               field->name,
-               field->type.u.sequence_nestable.length_name);
-       return ret;
-}
-
-/*
- * Must be called with sessions_mutex held.
- */
-static
-int _lttng_enum_type_statedump(struct lttng_session *session,
-               const struct lttng_type *type,
-               size_t nesting)
-{
-       const struct lttng_enum_desc *enum_desc;
-       const struct lttng_type *container_type;
-       int ret;
-       unsigned int i, nr_entries;
-
-       container_type = type->u.enum_nestable.container_type;
-       if (container_type->atype != atype_integer) {
-               ret = -EINVAL;
-               goto end;
-       }
-       enum_desc = type->u.enum_nestable.desc;
-       nr_entries = enum_desc->nr_entries;
-
-       ret = print_tabs(session, nesting);
-       if (ret)
-               goto end;
-       ret = lttng_metadata_printf(session, "enum : ");
-       if (ret)
-               goto end;
-       ret = _lttng_integer_type_statedump(session, container_type, 0);
-       if (ret)
-               goto end;
-       ret = lttng_metadata_printf(session, " {\n");
-       if (ret)
-               goto end;
-       /* Dump all entries */
-       for (i = 0; i < nr_entries; i++) {
-               const struct lttng_enum_entry *entry = &enum_desc->entries[i];
-               int j, len;
-
-               ret = print_tabs(session, nesting + 1);
-               if (ret)
-                       goto end;
-               ret = lttng_metadata_printf(session,
-                               "\"");
-               if (ret)
-                       goto end;
-               len = strlen(entry->string);
-               /* Escape the character '"' */
-               for (j = 0; j < len; j++) {
-                       char c = entry->string[j];
-
-                       switch (c) {
-                       case '"':
-                               ret = lttng_metadata_printf(session,
-                                               "\\\"");
-                               break;
-                       case '\\':
-                               ret = lttng_metadata_printf(session,
-                                               "\\\\");
-                               break;
-                       default:
-                               ret = lttng_metadata_printf(session,
-                                               "%c", c);
-                               break;
-                       }
-                       if (ret)
-                               goto end;
-               }
-               ret = lttng_metadata_printf(session, "\"");
-               if (ret)
-                       goto end;
-
-               if (entry->options.is_auto) {
-                       ret = lttng_metadata_printf(session, ",\n");
-                       if (ret)
-                               goto end;
-               } else {
-                       ret = lttng_metadata_printf(session,
-                                       " = ");
-                       if (ret)
-                               goto end;
-                       if (entry->start.signedness)
-                               ret = lttng_metadata_printf(session,
-                                       "%lld", (long long) entry->start.value);
-                       else
-                               ret = lttng_metadata_printf(session,
-                                       "%llu", entry->start.value);
-                       if (ret)
-                               goto end;
-                       if (entry->start.signedness == entry->end.signedness &&
-                                       entry->start.value
-                                               == entry->end.value) {
-                               ret = lttng_metadata_printf(session,
-                                       ",\n");
-                       } else {
-                               if (entry->end.signedness) {
-                                       ret = lttng_metadata_printf(session,
-                                               " ... %lld,\n",
-                                               (long long) entry->end.value);
-                               } else {
-                                       ret = lttng_metadata_printf(session,
-                                               " ... %llu,\n",
-                                               entry->end.value);
-                               }
-                       }
-                       if (ret)
-                               goto end;
-               }
-       }
-       ret = print_tabs(session, nesting);
-       if (ret)
-               goto end;
-       ret = lttng_metadata_printf(session, "}");
-end:
-       return ret;
-}
-
-/*
- * Must be called with sessions_mutex held.
- */
-static
-int _lttng_enum_field_statedump(struct lttng_session *session,
-               const struct lttng_event_field *field,
-               size_t nesting)
-{
-       int ret;
-
-       ret = _lttng_enum_type_statedump(session, &field->type, nesting);
-       if (ret)
-               return ret;
-       return lttng_field_name_statedump(session, field, nesting);
-}
-
-static
-int _lttng_integer_field_statedump(struct lttng_session *session,
-               const struct lttng_event_field *field,
-               size_t nesting)
-{
-       int ret;
-
-       ret = _lttng_integer_type_statedump(session, &field->type, nesting);
-       if (ret)
-               return ret;
-       return lttng_field_name_statedump(session, field, nesting);
-}
-
-static
-int _lttng_string_type_statedump(struct lttng_session *session,
-               const struct lttng_type *type,
-               size_t nesting)
-{
-       int ret;
-
-       WARN_ON_ONCE(type->atype != atype_string);
-       /* Default encoding is UTF8 */
-       ret = print_tabs(session, nesting);
-       if (ret)
-               return ret;
-       ret = lttng_metadata_printf(session,
-               "string%s",
-               type->u.string.encoding == lttng_encode_ASCII ?
-                       " { encoding = ASCII; }" : "");
-       return ret;
-}
-
-static
-int _lttng_string_field_statedump(struct lttng_session *session,
-               const struct lttng_event_field *field,
-               size_t nesting)
-{
-       int ret;
-
-       WARN_ON_ONCE(field->type.atype != atype_string);
-       ret = _lttng_string_type_statedump(session, &field->type, nesting);
-       if (ret)
-               return ret;
-       return lttng_field_name_statedump(session, field, nesting);
-}
-
-/*
- * Must be called with sessions_mutex held.
- */
-static
-int _lttng_type_statedump(struct lttng_session *session,
-               const struct lttng_type *type,
-               size_t nesting)
-{
-       int ret = 0;
-
-       switch (type->atype) {
-       case atype_integer:
-               ret = _lttng_integer_type_statedump(session, type, nesting);
-               break;
-       case atype_enum_nestable:
-               ret = _lttng_enum_type_statedump(session, type, nesting);
-               break;
-       case atype_string:
-               ret = _lttng_string_type_statedump(session, type, nesting);
-               break;
-       case atype_struct_nestable:
-               ret = _lttng_struct_type_statedump(session, type, nesting);
-               break;
-       case atype_variant_nestable:
-               ret = _lttng_variant_type_statedump(session, type, nesting);
-               break;
-
-       /* Nested arrays and sequences are not supported yet. */
-       case atype_array_nestable:
-       case atype_sequence_nestable:
-       default:
-               WARN_ON_ONCE(1);
-               return -EINVAL;
-       }
-       return ret;
-}
-
-/*
- * Must be called with sessions_mutex held.
- */
-static
-int _lttng_field_statedump(struct lttng_session *session,
-               const struct lttng_event_field *field,
-               size_t nesting)
-{
-       int ret = 0;
-
-       switch (field->type.atype) {
-       case atype_integer:
-               ret = _lttng_integer_field_statedump(session, field, nesting);
-               break;
-       case atype_enum_nestable:
-               ret = _lttng_enum_field_statedump(session, field, nesting);
-               break;
-       case atype_string:
-               ret = _lttng_string_field_statedump(session, field, nesting);
-               break;
-       case atype_struct_nestable:
-               ret = _lttng_struct_field_statedump(session, field, nesting);
-               break;
-       case atype_array_nestable:
-               ret = _lttng_array_field_statedump(session, field, nesting);
-               break;
-       case atype_sequence_nestable:
-               ret = _lttng_sequence_field_statedump(session, field, nesting);
-               break;
-       case atype_variant_nestable:
-               ret = _lttng_variant_field_statedump(session, field, nesting);
-               break;
-
-       default:
-               WARN_ON_ONCE(1);
-               return -EINVAL;
-       }
-       return ret;
-}
-
-static
-int _lttng_context_metadata_statedump(struct lttng_session *session,
-                                   struct lttng_ctx *ctx)
-{
-       int ret = 0;
-       int i;
-
-       if (!ctx)
-               return 0;
-       for (i = 0; i < ctx->nr_fields; i++) {
-               const struct lttng_ctx_field *field = &ctx->fields[i];
-
-               ret = _lttng_field_statedump(session, &field->event_field, 2);
-               if (ret)
-                       return ret;
-       }
-       return ret;
-}
-
-static
-int _lttng_fields_metadata_statedump(struct lttng_session *session,
-                                  struct lttng_event *event)
-{
-       const struct lttng_event_desc *desc = event->desc;
-       int ret = 0;
-       int i;
-
-       for (i = 0; i < desc->nr_fields; i++) {
-               const struct lttng_event_field *field = &desc->fields[i];
-
-               ret = _lttng_field_statedump(session, field, 2);
-               if (ret)
-                       return ret;
-       }
-       return ret;
-}
-
-/*
- * Must be called with sessions_mutex held.
- */
-static
-int _lttng_event_metadata_statedump(struct lttng_session *session,
-                                 struct lttng_channel *chan,
-                                 struct lttng_event *event)
-{
-       int ret = 0;
-
-       if (event->metadata_dumped || !READ_ONCE(session->active))
-               return 0;
-       if (chan->channel_type == METADATA_CHANNEL)
-               return 0;
-
-       ret = lttng_metadata_printf(session,
-               "event {\n"
-               "       name = \"%s\";\n"
-               "       id = %u;\n"
-               "       stream_id = %u;\n",
-               event->desc->name,
-               event->id,
-               event->chan->id);
-       if (ret)
-               goto end;
-
-       if (event->ctx) {
-               ret = lttng_metadata_printf(session,
-                       "       context := struct {\n");
-               if (ret)
-                       goto end;
-       }
-       ret = _lttng_context_metadata_statedump(session, event->ctx);
-       if (ret)
-               goto end;
-       if (event->ctx) {
-               ret = lttng_metadata_printf(session,
-                       "       };\n");
-               if (ret)
-                       goto end;
-       }
-
-       ret = lttng_metadata_printf(session,
-               "       fields := struct {\n"
-               );
-       if (ret)
-               goto end;
-
-       ret = _lttng_fields_metadata_statedump(session, event);
-       if (ret)
-               goto end;
-
-       /*
-        * LTTng space reservation can only reserve multiples of the
-        * byte size.
-        */
-       ret = lttng_metadata_printf(session,
-               "       };\n"
-               "};\n\n");
-       if (ret)
-               goto end;
-
-       event->metadata_dumped = 1;
-end:
-       return ret;
-
-}
-
-/*
- * Must be called with sessions_mutex held.
- */
-static
-int _lttng_channel_metadata_statedump(struct lttng_session *session,
-                                   struct lttng_channel *chan)
-{
-       int ret = 0;
-
-       if (chan->metadata_dumped || !READ_ONCE(session->active))
-               return 0;
-
-       if (chan->channel_type == METADATA_CHANNEL)
-               return 0;
-
-       WARN_ON_ONCE(!chan->header_type);
-       ret = lttng_metadata_printf(session,
-               "stream {\n"
-               "       id = %u;\n"
-               "       event.header := %s;\n"
-               "       packet.context := struct packet_context;\n",
-               chan->id,
-               chan->header_type == 1 ? "struct event_header_compact" :
-                       "struct event_header_large");
-       if (ret)
-               goto end;
-
-       if (chan->ctx) {
-               ret = lttng_metadata_printf(session,
-                       "       event.context := struct {\n");
-               if (ret)
-                       goto end;
-       }
-       ret = _lttng_context_metadata_statedump(session, chan->ctx);
-       if (ret)
-               goto end;
-       if (chan->ctx) {
-               ret = lttng_metadata_printf(session,
-                       "       };\n");
-               if (ret)
-                       goto end;
-       }
-
-       ret = lttng_metadata_printf(session,
-               "};\n\n");
-
-       chan->metadata_dumped = 1;
-end:
-       return ret;
-}
-
-/*
- * Must be called with sessions_mutex held.
- */
-static
-int _lttng_stream_packet_context_declare(struct lttng_session *session)
-{
-       return lttng_metadata_printf(session,
-               "struct packet_context {\n"
-               "       uint64_clock_monotonic_t timestamp_begin;\n"
-               "       uint64_clock_monotonic_t timestamp_end;\n"
-               "       uint64_t content_size;\n"
-               "       uint64_t packet_size;\n"
-               "       uint64_t packet_seq_num;\n"
-               "       unsigned long events_discarded;\n"
-               "       uint32_t cpu_id;\n"
-               "};\n\n"
-               );
-}
-
-/*
- * Compact header:
- * id: range: 0 - 30.
- * id 31 is reserved to indicate an extended header.
- *
- * Large header:
- * id: range: 0 - 65534.
- * id 65535 is reserved to indicate an extended header.
- *
- * Must be called with sessions_mutex held.
- */
-static
-int _lttng_event_header_declare(struct lttng_session *session)
-{
-       return lttng_metadata_printf(session,
-       "struct event_header_compact {\n"
-       "       enum : uint5_t { compact = 0 ... 30, extended = 31 } id;\n"
-       "       variant <id> {\n"
-       "               struct {\n"
-       "                       uint27_clock_monotonic_t timestamp;\n"
-       "               } compact;\n"
-       "               struct {\n"
-       "                       uint32_t id;\n"
-       "                       uint64_clock_monotonic_t timestamp;\n"
-       "               } extended;\n"
-       "       } v;\n"
-       "} align(%u);\n"
-       "\n"
-       "struct event_header_large {\n"
-       "       enum : uint16_t { compact = 0 ... 65534, extended = 65535 } id;\n"
-       "       variant <id> {\n"
-       "               struct {\n"
-       "                       uint32_clock_monotonic_t timestamp;\n"
-       "               } compact;\n"
-       "               struct {\n"
-       "                       uint32_t id;\n"
-       "                       uint64_clock_monotonic_t timestamp;\n"
-       "               } extended;\n"
-       "       } v;\n"
-       "} align(%u);\n\n",
-       lttng_alignof(uint32_t) * CHAR_BIT,
-       lttng_alignof(uint16_t) * CHAR_BIT
-       );
-}
-
- /*
- * Approximation of NTP time of day to clock monotonic correlation,
- * taken at start of trace.
- * Yes, this is only an approximation. Yes, we can (and will) do better
- * in future versions.
- * This function may return a negative offset. It may happen if the
- * system sets the REALTIME clock to 0 after boot.
- *
- * Use 64bit timespec on kernels that have it, this makes 32bit arch
- * y2038 compliant.
- */
-static
-int64_t measure_clock_offset(void)
-{
-       uint64_t monotonic_avg, monotonic[2], realtime;
-       uint64_t tcf = trace_clock_freq();
-       int64_t offset;
-       unsigned long flags;
-#ifdef LTTNG_KERNEL_HAS_TIMESPEC64
-       struct timespec64 rts = { 0, 0 };
-#else
-       struct timespec rts = { 0, 0 };
-#endif
-
-       /* Disable interrupts to increase correlation precision. */
-       local_irq_save(flags);
-       monotonic[0] = trace_clock_read64();
-#ifdef LTTNG_KERNEL_HAS_TIMESPEC64
-       ktime_get_real_ts64(&rts);
-#else
-       getnstimeofday(&rts);
-#endif
-       monotonic[1] = trace_clock_read64();
-       local_irq_restore(flags);
-
-       monotonic_avg = (monotonic[0] + monotonic[1]) >> 1;
-       realtime = (uint64_t) rts.tv_sec * tcf;
-       if (tcf == NSEC_PER_SEC) {
-               realtime += rts.tv_nsec;
-       } else {
-               uint64_t n = rts.tv_nsec * tcf;
-
-               do_div(n, NSEC_PER_SEC);
-               realtime += n;
-       }
-       offset = (int64_t) realtime - monotonic_avg;
-       return offset;
-}
-
-static
-int print_escaped_ctf_string(struct lttng_session *session, const char *string)
-{
-       int ret = 0;
-       size_t i;
-       char cur;
-
-       i = 0;
-       cur = string[i];
-       while (cur != '\0') {
-               switch (cur) {
-               case '\n':
-                       ret = lttng_metadata_printf(session, "%s", "\\n");
-                       break;
-               case '\\':
-               case '"':
-                       ret = lttng_metadata_printf(session, "%c", '\\');
-                       if (ret)
-                               goto error;
-                       /* We still print the current char */
-                       /* Fallthrough */
-               default:
-                       ret = lttng_metadata_printf(session, "%c", cur);
-                       break;
-               }
-
-               if (ret)
-                       goto error;
-
-               cur = string[++i];
-       }
-error:
-       return ret;
-}
-
-static
-int print_metadata_escaped_field(struct lttng_session *session, const char *field,
-               const char *field_value)
-{
-       int ret;
-
-       ret = lttng_metadata_printf(session, "  %s = \"", field);
-       if (ret)
-               goto error;
-
-       ret = print_escaped_ctf_string(session, field_value);
-       if (ret)
-               goto error;
-
-       ret = lttng_metadata_printf(session, "\";\n");
-
-error:
-       return ret;
-}
-
-/*
- * Output metadata into this session's metadata buffers.
- * Must be called with sessions_mutex held.
- */
-static
-int _lttng_session_metadata_statedump(struct lttng_session *session)
-{
-       unsigned char *uuid_c = session->uuid.b;
-       unsigned char uuid_s[37], clock_uuid_s[BOOT_ID_LEN];
-       const char *product_uuid;
-       struct lttng_channel *chan;
-       struct lttng_event *event;
-       int ret = 0;
-
-       if (!READ_ONCE(session->active))
-               return 0;
-       if (session->metadata_dumped)
-               goto skip_session;
-
-       snprintf(uuid_s, sizeof(uuid_s),
-               "%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x%02x-%02x%02x%02x%02x%02x%02x",
-               uuid_c[0], uuid_c[1], uuid_c[2], uuid_c[3],
-               uuid_c[4], uuid_c[5], uuid_c[6], uuid_c[7],
-               uuid_c[8], uuid_c[9], uuid_c[10], uuid_c[11],
-               uuid_c[12], uuid_c[13], uuid_c[14], uuid_c[15]);
-
-       ret = lttng_metadata_printf(session,
-               "typealias integer { size = 8; align = %u; signed = false; } := uint8_t;\n"
-               "typealias integer { size = 16; align = %u; signed = false; } := uint16_t;\n"
-               "typealias integer { size = 32; align = %u; signed = false; } := uint32_t;\n"
-               "typealias integer { size = 64; align = %u; signed = false; } := uint64_t;\n"
-               "typealias integer { size = %u; align = %u; signed = false; } := unsigned long;\n"
-               "typealias integer { size = 5; align = 1; signed = false; } := uint5_t;\n"
-               "typealias integer { size = 27; align = 1; signed = false; } := uint27_t;\n"
-               "\n"
-               "trace {\n"
-               "       major = %u;\n"
-               "       minor = %u;\n"
-               "       uuid = \"%s\";\n"
-               "       byte_order = %s;\n"
-               "       packet.header := struct {\n"
-               "               uint32_t magic;\n"
-               "               uint8_t  uuid[16];\n"
-               "               uint32_t stream_id;\n"
-               "               uint64_t stream_instance_id;\n"
-               "       };\n"
-               "};\n\n",
-               lttng_alignof(uint8_t) * CHAR_BIT,
-               lttng_alignof(uint16_t) * CHAR_BIT,
-               lttng_alignof(uint32_t) * CHAR_BIT,
-               lttng_alignof(uint64_t) * CHAR_BIT,
-               sizeof(unsigned long) * CHAR_BIT,
-               lttng_alignof(unsigned long) * CHAR_BIT,
-               CTF_SPEC_MAJOR,
-               CTF_SPEC_MINOR,
-               uuid_s,
-#if __BYTE_ORDER == __BIG_ENDIAN
-               "be"
-#else
-               "le"
-#endif
-               );
-       if (ret)
-               goto end;
-
-       ret = lttng_metadata_printf(session,
-               "env {\n"
-               "       hostname = \"%s\";\n"
-               "       domain = \"kernel\";\n"
-               "       sysname = \"%s\";\n"
-               "       kernel_release = \"%s\";\n"
-               "       kernel_version = \"%s\";\n"
-               "       tracer_name = \"lttng-modules\";\n"
-               "       tracer_major = %d;\n"
-               "       tracer_minor = %d;\n"
-               "       tracer_patchlevel = %d;\n"
-               "       trace_buffering_scheme = \"global\";\n",
-               current->nsproxy->uts_ns->name.nodename,
-               utsname()->sysname,
-               utsname()->release,
-               utsname()->version,
-               LTTNG_MODULES_MAJOR_VERSION,
-               LTTNG_MODULES_MINOR_VERSION,
-               LTTNG_MODULES_PATCHLEVEL_VERSION
-               );
-       if (ret)
-               goto end;
-
-       ret = print_metadata_escaped_field(session, "trace_name", session->name);
-       if (ret)
-               goto end;
-       ret = print_metadata_escaped_field(session, "trace_creation_datetime",
-                       session->creation_time);
-       if (ret)
-               goto end;
-
-       /* Add the product UUID to the 'env' section */
-       product_uuid = dmi_get_system_info(DMI_PRODUCT_UUID);
-       if (product_uuid) {
-               ret = lttng_metadata_printf(session,
-                               "       product_uuid = \"%s\";\n",
-                               product_uuid
-                               );
-               if (ret)
-                       goto end;
-       }
-
-       /* Close the 'env' section */
-       ret = lttng_metadata_printf(session, "};\n\n");
-       if (ret)
-               goto end;
-
-       ret = lttng_metadata_printf(session,
-               "clock {\n"
-               "       name = \"%s\";\n",
-               trace_clock_name()
-               );
-       if (ret)
-               goto end;
-
-       if (!trace_clock_uuid(clock_uuid_s)) {
-               ret = lttng_metadata_printf(session,
-                       "       uuid = \"%s\";\n",
-                       clock_uuid_s
-                       );
-               if (ret)
-                       goto end;
-       }
-
-       ret = lttng_metadata_printf(session,
-               "       description = \"%s\";\n"
-               "       freq = %llu; /* Frequency, in Hz */\n"
-               "       /* clock value offset from Epoch is: offset * (1/freq) */\n"
-               "       offset = %lld;\n"
-               "};\n\n",
-               trace_clock_description(),
-               (unsigned long long) trace_clock_freq(),
-               (long long) measure_clock_offset()
-               );
-       if (ret)
-               goto end;
-
-       ret = lttng_metadata_printf(session,
-               "typealias integer {\n"
-               "       size = 27; align = 1; signed = false;\n"
-               "       map = clock.%s.value;\n"
-               "} := uint27_clock_monotonic_t;\n"
-               "\n"
-               "typealias integer {\n"
-               "       size = 32; align = %u; signed = false;\n"
-               "       map = clock.%s.value;\n"
-               "} := uint32_clock_monotonic_t;\n"
-               "\n"
-               "typealias integer {\n"
-               "       size = 64; align = %u; signed = false;\n"
-               "       map = clock.%s.value;\n"
-               "} := uint64_clock_monotonic_t;\n\n",
-               trace_clock_name(),
-               lttng_alignof(uint32_t) * CHAR_BIT,
-               trace_clock_name(),
-               lttng_alignof(uint64_t) * CHAR_BIT,
-               trace_clock_name()
-               );
-       if (ret)
-               goto end;
-
-       ret = _lttng_stream_packet_context_declare(session);
-       if (ret)
-               goto end;
-
-       ret = _lttng_event_header_declare(session);
-       if (ret)
-               goto end;
-
-skip_session:
-       list_for_each_entry(chan, &session->chan, list) {
-               ret = _lttng_channel_metadata_statedump(session, chan);
-               if (ret)
-                       goto end;
-       }
-
-       list_for_each_entry(event, &session->events, list) {
-               ret = _lttng_event_metadata_statedump(session, event->chan, event);
-               if (ret)
-                       goto end;
-       }
-       session->metadata_dumped = 1;
-end:
-       return ret;
-}
-
-/**
- * lttng_transport_register - LTT transport registration
- * @transport: transport structure
- *
- * Registers a transport which can be used as output to extract the data out of
- * LTTng. The module calling this registration function must ensure that no
- * trap-inducing code will be executed by the transport functions. E.g.
- * vmalloc_sync_mappings() must be called between a vmalloc and the moment the memory
- * is made visible to the transport function. This registration acts as a
- * vmalloc_sync_mappings. Therefore, only if the module allocates virtual memory
- * after its registration must it synchronize the TLBs.
- */
-void lttng_transport_register(struct lttng_transport *transport)
-{
-       /*
-        * Make sure no page fault can be triggered by the module about to be
-        * registered. We deal with this here so we don't have to call
-        * vmalloc_sync_mappings() in each module's init.
-        */
-       wrapper_vmalloc_sync_mappings();
-
-       mutex_lock(&sessions_mutex);
-       list_add_tail(&transport->node, &lttng_transport_list);
-       mutex_unlock(&sessions_mutex);
-}
-EXPORT_SYMBOL_GPL(lttng_transport_register);
-
-/**
- * lttng_transport_unregister - LTT transport unregistration
- * @transport: transport structure
- */
-void lttng_transport_unregister(struct lttng_transport *transport)
-{
-       mutex_lock(&sessions_mutex);
-       list_del(&transport->node);
-       mutex_unlock(&sessions_mutex);
-}
-EXPORT_SYMBOL_GPL(lttng_transport_unregister);
-
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
-
-enum cpuhp_state lttng_hp_prepare;
-enum cpuhp_state lttng_hp_online;
-
-static int lttng_hotplug_prepare(unsigned int cpu, struct hlist_node *node)
-{
-       struct lttng_cpuhp_node *lttng_node;
-
-       lttng_node = container_of(node, struct lttng_cpuhp_node, node);
-       switch (lttng_node->component) {
-       case LTTNG_RING_BUFFER_FRONTEND:
-               return 0;
-       case LTTNG_RING_BUFFER_BACKEND:
-               return lttng_cpuhp_rb_backend_prepare(cpu, lttng_node);
-       case LTTNG_RING_BUFFER_ITER:
-               return 0;
-       case LTTNG_CONTEXT_PERF_COUNTERS:
-               return 0;
-       default:
-               return -EINVAL;
-       }
-}
-
-static int lttng_hotplug_dead(unsigned int cpu, struct hlist_node *node)
-{
-       struct lttng_cpuhp_node *lttng_node;
-
-       lttng_node = container_of(node, struct lttng_cpuhp_node, node);
-       switch (lttng_node->component) {
-       case LTTNG_RING_BUFFER_FRONTEND:
-               return lttng_cpuhp_rb_frontend_dead(cpu, lttng_node);
-       case LTTNG_RING_BUFFER_BACKEND:
-               return 0;
-       case LTTNG_RING_BUFFER_ITER:
-               return 0;
-       case LTTNG_CONTEXT_PERF_COUNTERS:
-               return lttng_cpuhp_perf_counter_dead(cpu, lttng_node);
-       default:
-               return -EINVAL;
-       }
-}
-
-static int lttng_hotplug_online(unsigned int cpu, struct hlist_node *node)
-{
-       struct lttng_cpuhp_node *lttng_node;
-
-       lttng_node = container_of(node, struct lttng_cpuhp_node, node);
-       switch (lttng_node->component) {
-       case LTTNG_RING_BUFFER_FRONTEND:
-               return lttng_cpuhp_rb_frontend_online(cpu, lttng_node);
-       case LTTNG_RING_BUFFER_BACKEND:
-               return 0;
-       case LTTNG_RING_BUFFER_ITER:
-               return lttng_cpuhp_rb_iter_online(cpu, lttng_node);
-       case LTTNG_CONTEXT_PERF_COUNTERS:
-               return lttng_cpuhp_perf_counter_online(cpu, lttng_node);
-       default:
-               return -EINVAL;
-       }
-}
-
-static int lttng_hotplug_offline(unsigned int cpu, struct hlist_node *node)
-{
-       struct lttng_cpuhp_node *lttng_node;
-
-       lttng_node = container_of(node, struct lttng_cpuhp_node, node);
-       switch (lttng_node->component) {
-       case LTTNG_RING_BUFFER_FRONTEND:
-               return lttng_cpuhp_rb_frontend_offline(cpu, lttng_node);
-       case LTTNG_RING_BUFFER_BACKEND:
-               return 0;
-       case LTTNG_RING_BUFFER_ITER:
-               return 0;
-       case LTTNG_CONTEXT_PERF_COUNTERS:
-               return 0;
-       default:
-               return -EINVAL;
-       }
-}
-
-static int __init lttng_init_cpu_hotplug(void)
-{
-       int ret;
-
-       ret = cpuhp_setup_state_multi(CPUHP_BP_PREPARE_DYN, "lttng:prepare",
-                       lttng_hotplug_prepare,
-                       lttng_hotplug_dead);
-       if (ret < 0) {
-               return ret;
-       }
-       lttng_hp_prepare = ret;
-       lttng_rb_set_hp_prepare(ret);
-
-       ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "lttng:online",
-                       lttng_hotplug_online,
-                       lttng_hotplug_offline);
-       if (ret < 0) {
-               cpuhp_remove_multi_state(lttng_hp_prepare);
-               lttng_hp_prepare = 0;
-               return ret;
-       }
-       lttng_hp_online = ret;
-       lttng_rb_set_hp_online(ret);
-
-       return 0;
-}
-
-static void __exit lttng_exit_cpu_hotplug(void)
-{
-       lttng_rb_set_hp_online(0);
-       cpuhp_remove_multi_state(lttng_hp_online);
-       lttng_rb_set_hp_prepare(0);
-       cpuhp_remove_multi_state(lttng_hp_prepare);
-}
-
-#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
-static int lttng_init_cpu_hotplug(void)
-{
-       return 0;
-}
-static void lttng_exit_cpu_hotplug(void)
-{
-}
-#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
-
-
-static int __init lttng_events_init(void)
-{
-       int ret;
-
-       ret = wrapper_lttng_fixup_sig(THIS_MODULE);
-       if (ret)
-               return ret;
-       ret = wrapper_get_pfnblock_flags_mask_init();
-       if (ret)
-               return ret;
-       ret = wrapper_get_pageblock_flags_mask_init();
-       if (ret)
-               return ret;
-       ret = lttng_probes_init();
-       if (ret)
-               return ret;
-       ret = lttng_context_init();
-       if (ret)
-               return ret;
-       ret = lttng_tracepoint_init();
-       if (ret)
-               goto error_tp;
-       event_cache = KMEM_CACHE(lttng_event, 0);
-       if (!event_cache) {
-               ret = -ENOMEM;
-               goto error_kmem;
-       }
-       ret = lttng_abi_init();
-       if (ret)
-               goto error_abi;
-       ret = lttng_logger_init();
-       if (ret)
-               goto error_logger;
-       ret = lttng_init_cpu_hotplug();
-       if (ret)
-               goto error_hotplug;
-       printk(KERN_NOTICE "LTTng: Loaded modules v%s.%s.%s%s (%s)%s%s\n",
-               __stringify(LTTNG_MODULES_MAJOR_VERSION),
-               __stringify(LTTNG_MODULES_MINOR_VERSION),
-               __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION),
-               LTTNG_MODULES_EXTRAVERSION,
-               LTTNG_VERSION_NAME,
-#ifdef LTTNG_EXTRA_VERSION_GIT
-               LTTNG_EXTRA_VERSION_GIT[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_GIT,
-#else
-               "",
-#endif
-#ifdef LTTNG_EXTRA_VERSION_NAME
-               LTTNG_EXTRA_VERSION_NAME[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_NAME);
-#else
-               "");
-#endif
-       return 0;
-
-error_hotplug:
-       lttng_logger_exit();
-error_logger:
-       lttng_abi_exit();
-error_abi:
-       kmem_cache_destroy(event_cache);
-error_kmem:
-       lttng_tracepoint_exit();
-error_tp:
-       lttng_context_exit();
-       printk(KERN_NOTICE "LTTng: Failed to load modules v%s.%s.%s%s (%s)%s%s\n",
-               __stringify(LTTNG_MODULES_MAJOR_VERSION),
-               __stringify(LTTNG_MODULES_MINOR_VERSION),
-               __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION),
-               LTTNG_MODULES_EXTRAVERSION,
-               LTTNG_VERSION_NAME,
-#ifdef LTTNG_EXTRA_VERSION_GIT
-               LTTNG_EXTRA_VERSION_GIT[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_GIT,
-#else
-               "",
-#endif
-#ifdef LTTNG_EXTRA_VERSION_NAME
-               LTTNG_EXTRA_VERSION_NAME[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_NAME);
-#else
-               "");
-#endif
-       return ret;
-}
-
-module_init(lttng_events_init);
-
-static void __exit lttng_events_exit(void)
-{
-       struct lttng_session *session, *tmpsession;
-
-       lttng_exit_cpu_hotplug();
-       lttng_logger_exit();
-       lttng_abi_exit();
-       list_for_each_entry_safe(session, tmpsession, &sessions, list)
-               lttng_session_destroy(session);
-       kmem_cache_destroy(event_cache);
-       lttng_tracepoint_exit();
-       lttng_context_exit();
-       printk(KERN_NOTICE "LTTng: Unloaded modules v%s.%s.%s%s (%s)%s%s\n",
-               __stringify(LTTNG_MODULES_MAJOR_VERSION),
-               __stringify(LTTNG_MODULES_MINOR_VERSION),
-               __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION),
-               LTTNG_MODULES_EXTRAVERSION,
-               LTTNG_VERSION_NAME,
-#ifdef LTTNG_EXTRA_VERSION_GIT
-               LTTNG_EXTRA_VERSION_GIT[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_GIT,
-#else
-               "",
-#endif
-#ifdef LTTNG_EXTRA_VERSION_NAME
-               LTTNG_EXTRA_VERSION_NAME[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_NAME);
-#else
-               "");
-#endif
-}
-
-module_exit(lttng_events_exit);
-
-#include <generated/patches.i>
-#ifdef LTTNG_EXTRA_VERSION_GIT
-MODULE_INFO(extra_version_git, LTTNG_EXTRA_VERSION_GIT);
-#endif
-#ifdef LTTNG_EXTRA_VERSION_NAME
-MODULE_INFO(extra_version_name, LTTNG_EXTRA_VERSION_NAME);
-#endif
-MODULE_LICENSE("GPL and additional rights");
-MODULE_AUTHOR("Mathieu Desnoyers <mathieu.desnoyers@efficios.com>");
-MODULE_DESCRIPTION("LTTng tracer");
-MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
-       __stringify(LTTNG_MODULES_MINOR_VERSION) "."
-       __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
-       LTTNG_MODULES_EXTRAVERSION);
diff --git a/lttng-filter-interpreter.c b/lttng-filter-interpreter.c
deleted file mode 100644 (file)
index 5ba7e23..0000000
+++ /dev/null
@@ -1,1579 +0,0 @@
-/* SPDX-License-Identifier: MIT
- *
- * lttng-filter-interpreter.c
- *
- * LTTng modules filter interpreter.
- *
- * Copyright (C) 2010-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#include <wrapper/uaccess.h>
-#include <wrapper/frame.h>
-#include <wrapper/types.h>
-#include <linux/swab.h>
-
-#include <lttng/filter.h>
-#include <lttng/string-utils.h>
-
-LTTNG_STACK_FRAME_NON_STANDARD(lttng_filter_interpret_bytecode);
-
-/*
- * get_char should be called with page fault handler disabled if it is expected
- * to handle user-space read.
- */
-static
-char get_char(struct estack_entry *reg, size_t offset)
-{
-       if (unlikely(offset >= reg->u.s.seq_len))
-               return '\0';
-       if (reg->u.s.user) {
-               char c;
-
-               /* Handle invalid access as end of string. */
-               if (unlikely(!lttng_access_ok(VERIFY_READ,
-                               reg->u.s.user_str + offset,
-                               sizeof(c))))
-                       return '\0';
-               /* Handle fault (nonzero return value) as end of string. */
-               if (unlikely(__copy_from_user_inatomic(&c,
-                               reg->u.s.user_str + offset,
-                               sizeof(c))))
-                       return '\0';
-               return c;
-       } else {
-               return reg->u.s.str[offset];
-       }
-}
-
-/*
- * -1: wildcard found.
- * -2: unknown escape char.
- * 0: normal char.
- */
-static
-int parse_char(struct estack_entry *reg, char *c, size_t *offset)
-{
-       switch (*c) {
-       case '\\':
-               (*offset)++;
-               *c = get_char(reg, *offset);
-               switch (*c) {
-               case '\\':
-               case '*':
-                       return 0;
-               default:
-                       return -2;
-               }
-       case '*':
-               return -1;
-       default:
-               return 0;
-       }
-}
-
-static
-char get_char_at_cb(size_t at, void *data)
-{
-       return get_char(data, at);
-}
-
-static
-int stack_star_glob_match(struct estack *stack, int top, const char *cmp_type)
-{
-       bool has_user = false;
-       mm_segment_t old_fs;
-       int result;
-       struct estack_entry *pattern_reg;
-       struct estack_entry *candidate_reg;
-
-       if (estack_bx(stack, top)->u.s.user
-                       || estack_ax(stack, top)->u.s.user) {
-               has_user = true;
-               old_fs = get_fs();
-               set_fs(KERNEL_DS);
-               pagefault_disable();
-       }
-
-       /* Find out which side is the pattern vs. the candidate. */
-       if (estack_ax(stack, top)->u.s.literal_type == ESTACK_STRING_LITERAL_TYPE_STAR_GLOB) {
-               pattern_reg = estack_ax(stack, top);
-               candidate_reg = estack_bx(stack, top);
-       } else {
-               pattern_reg = estack_bx(stack, top);
-               candidate_reg = estack_ax(stack, top);
-       }
-
-       /* Perform the match operation. */
-       result = !strutils_star_glob_match_char_cb(get_char_at_cb,
-               pattern_reg, get_char_at_cb, candidate_reg);
-       if (has_user) {
-               pagefault_enable();
-               set_fs(old_fs);
-       }
-
-       return result;
-}
-
-static
-int stack_strcmp(struct estack *stack, int top, const char *cmp_type)
-{
-       size_t offset_bx = 0, offset_ax = 0;
-       int diff, has_user = 0;
-       mm_segment_t old_fs;
-
-       if (estack_bx(stack, top)->u.s.user
-                       || estack_ax(stack, top)->u.s.user) {
-               has_user = 1;
-               old_fs = get_fs();
-               set_fs(KERNEL_DS);
-               pagefault_disable();
-       }
-
-       for (;;) {
-               int ret;
-               int escaped_r0 = 0;
-               char char_bx, char_ax;
-
-               char_bx = get_char(estack_bx(stack, top), offset_bx);
-               char_ax = get_char(estack_ax(stack, top), offset_ax);
-
-               if (unlikely(char_bx == '\0')) {
-                       if (char_ax == '\0') {
-                               diff = 0;
-                               break;
-                       } else {
-                               if (estack_ax(stack, top)->u.s.literal_type ==
-                                               ESTACK_STRING_LITERAL_TYPE_PLAIN) {
-                                       ret = parse_char(estack_ax(stack, top),
-                                               &char_ax, &offset_ax);
-                                       if (ret == -1) {
-                                               diff = 0;
-                                               break;
-                                       }
-                               }
-                               diff = -1;
-                               break;
-                       }
-               }
-               if (unlikely(char_ax == '\0')) {
-                       if (estack_bx(stack, top)->u.s.literal_type ==
-                                       ESTACK_STRING_LITERAL_TYPE_PLAIN) {
-                               ret = parse_char(estack_bx(stack, top),
-                                       &char_bx, &offset_bx);
-                               if (ret == -1) {
-                                       diff = 0;
-                                       break;
-                               }
-                       }
-                       diff = 1;
-                       break;
-               }
-               if (estack_bx(stack, top)->u.s.literal_type ==
-                               ESTACK_STRING_LITERAL_TYPE_PLAIN) {
-                       ret = parse_char(estack_bx(stack, top),
-                               &char_bx, &offset_bx);
-                       if (ret == -1) {
-                               diff = 0;
-                               break;
-                       } else if (ret == -2) {
-                               escaped_r0 = 1;
-                       }
-                       /* else compare both char */
-               }
-               if (estack_ax(stack, top)->u.s.literal_type ==
-                               ESTACK_STRING_LITERAL_TYPE_PLAIN) {
-                       ret = parse_char(estack_ax(stack, top),
-                               &char_ax, &offset_ax);
-                       if (ret == -1) {
-                               diff = 0;
-                               break;
-                       } else if (ret == -2) {
-                               if (!escaped_r0) {
-                                       diff = -1;
-                                       break;
-                               }
-                       } else {
-                               if (escaped_r0) {
-                                       diff = 1;
-                                       break;
-                               }
-                       }
-               } else {
-                       if (escaped_r0) {
-                               diff = 1;
-                               break;
-                       }
-               }
-               diff = char_bx - char_ax;
-               if (diff != 0)
-                       break;
-               offset_bx++;
-               offset_ax++;
-       }
-       if (has_user) {
-               pagefault_enable();
-               set_fs(old_fs);
-       }
-       return diff;
-}
-
-uint64_t lttng_filter_false(void *filter_data,
-               struct lttng_probe_ctx *lttng_probe_ctx,
-               const char *filter_stack_data)
-{
-       return 0;
-}
-
-#ifdef INTERPRETER_USE_SWITCH
-
-/*
- * Fallback for compilers that do not support taking address of labels.
- */
-
-#define START_OP                                                       \
-       start_pc = &bytecode->data[0];                                  \
-       for (pc = next_pc = start_pc; pc - start_pc < bytecode->len;    \
-                       pc = next_pc) {                                 \
-               dbg_printk("Executing op %s (%u)\n",                    \
-                       lttng_filter_print_op((unsigned int) *(filter_opcode_t *) pc), \
-                       (unsigned int) *(filter_opcode_t *) pc);        \
-               switch (*(filter_opcode_t *) pc)        {
-
-#define OP(name)       case name
-
-#define PO             break
-
-#define END_OP         }                                               \
-       }
-
-#else
-
-/*
- * Dispatch-table based interpreter.
- */
-
-#define START_OP                                                       \
-       start_pc = &bytecode->code[0];                                  \
-       pc = next_pc = start_pc;                                        \
-       if (unlikely(pc - start_pc >= bytecode->len))                   \
-               goto end;                                               \
-       goto *dispatch[*(filter_opcode_t *) pc];
-
-#define OP(name)                                                       \
-LABEL_##name
-
-#define PO                                                             \
-               pc = next_pc;                                           \
-               goto *dispatch[*(filter_opcode_t *) pc];
-
-#define END_OP
-
-#endif
-
-static int context_get_index(struct lttng_probe_ctx *lttng_probe_ctx,
-               struct load_ptr *ptr,
-               uint32_t idx)
-{
-
-       struct lttng_ctx_field *ctx_field;
-       struct lttng_event_field *field;
-       union lttng_ctx_value v;
-
-       ctx_field = &lttng_static_ctx->fields[idx];
-       field = &ctx_field->event_field;
-       ptr->type = LOAD_OBJECT;
-       /* field is only used for types nested within variants. */
-       ptr->field = NULL;
-
-       switch (field->type.atype) {
-       case atype_integer:
-               ctx_field->get_value(ctx_field, lttng_probe_ctx, &v);
-               if (field->type.u.integer.signedness) {
-                       ptr->object_type = OBJECT_TYPE_S64;
-                       ptr->u.s64 = v.s64;
-                       ptr->ptr = &ptr->u.s64;
-               } else {
-                       ptr->object_type = OBJECT_TYPE_U64;
-                       ptr->u.u64 = v.s64;     /* Cast. */
-                       ptr->ptr = &ptr->u.u64;
-               }
-               break;
-       case atype_enum_nestable:
-       {
-               const struct lttng_integer_type *itype =
-                       &field->type.u.enum_nestable.container_type->u.integer;
-
-               ctx_field->get_value(ctx_field, lttng_probe_ctx, &v);
-               if (itype->signedness) {
-                       ptr->object_type = OBJECT_TYPE_S64;
-                       ptr->u.s64 = v.s64;
-                       ptr->ptr = &ptr->u.s64;
-               } else {
-                       ptr->object_type = OBJECT_TYPE_U64;
-                       ptr->u.u64 = v.s64;     /* Cast. */
-                       ptr->ptr = &ptr->u.u64;
-               }
-               break;
-       }
-       case atype_array_nestable:
-               if (!lttng_is_bytewise_integer(field->type.u.array_nestable.elem_type)) {
-                       printk(KERN_WARNING "Array nesting only supports integer types.\n");
-                       return -EINVAL;
-               }
-               if (field->type.u.array_nestable.elem_type->u.integer.encoding == lttng_encode_none) {
-                       printk(KERN_WARNING "Only string arrays are supported for contexts.\n");
-                       return -EINVAL;
-               }
-               ptr->object_type = OBJECT_TYPE_STRING;
-               ctx_field->get_value(ctx_field, lttng_probe_ctx, &v);
-               ptr->ptr = v.str;
-               break;
-       case atype_sequence_nestable:
-               if (!lttng_is_bytewise_integer(field->type.u.sequence_nestable.elem_type)) {
-                       printk(KERN_WARNING "Sequence nesting only supports integer types.\n");
-                       return -EINVAL;
-               }
-               if (field->type.u.sequence_nestable.elem_type->u.integer.encoding == lttng_encode_none) {
-                       printk(KERN_WARNING "Only string sequences are supported for contexts.\n");
-                       return -EINVAL;
-               }
-               ptr->object_type = OBJECT_TYPE_STRING;
-               ctx_field->get_value(ctx_field, lttng_probe_ctx, &v);
-               ptr->ptr = v.str;
-               break;
-       case atype_string:
-               ptr->object_type = OBJECT_TYPE_STRING;
-               ctx_field->get_value(ctx_field, lttng_probe_ctx, &v);
-               ptr->ptr = v.str;
-               break;
-       case atype_struct_nestable:
-               printk(KERN_WARNING "Structure type cannot be loaded.\n");
-               return -EINVAL;
-       case atype_variant_nestable:
-               printk(KERN_WARNING "Variant type cannot be loaded.\n");
-               return -EINVAL;
-       default:
-               printk(KERN_WARNING "Unknown type: %d", (int) field->type.atype);
-               return -EINVAL;
-       }
-       return 0;
-}
-
-static int dynamic_get_index(struct lttng_probe_ctx *lttng_probe_ctx,
-               struct bytecode_runtime *runtime,
-               uint64_t index, struct estack_entry *stack_top)
-{
-       int ret;
-       const struct filter_get_index_data *gid;
-
-       /*
-        * Types nested within variants need to perform dynamic lookup
-        * based on the field descriptions. LTTng-UST does not implement
-        * variants for now.
-        */
-       if (stack_top->u.ptr.field)
-               return -EINVAL;
-       gid = (const struct filter_get_index_data *) &runtime->data[index];
-       switch (stack_top->u.ptr.type) {
-       case LOAD_OBJECT:
-               switch (stack_top->u.ptr.object_type) {
-               case OBJECT_TYPE_ARRAY:
-               {
-                       const char *ptr;
-
-                       WARN_ON_ONCE(gid->offset >= gid->array_len);
-                       /* Skip count (unsigned long) */
-                       ptr = *(const char **) (stack_top->u.ptr.ptr + sizeof(unsigned long));
-                       ptr = ptr + gid->offset;
-                       stack_top->u.ptr.ptr = ptr;
-                       stack_top->u.ptr.object_type = gid->elem.type;
-                       stack_top->u.ptr.rev_bo = gid->elem.rev_bo;
-                       /* field is only used for types nested within variants. */
-                       stack_top->u.ptr.field = NULL;
-                       break;
-               }
-               case OBJECT_TYPE_SEQUENCE:
-               {
-                       const char *ptr;
-                       size_t ptr_seq_len;
-
-                       ptr = *(const char **) (stack_top->u.ptr.ptr + sizeof(unsigned long));
-                       ptr_seq_len = *(unsigned long *) stack_top->u.ptr.ptr;
-                       if (gid->offset >= gid->elem.len * ptr_seq_len) {
-                               ret = -EINVAL;
-                               goto end;
-                       }
-                       ptr = ptr + gid->offset;
-                       stack_top->u.ptr.ptr = ptr;
-                       stack_top->u.ptr.object_type = gid->elem.type;
-                       stack_top->u.ptr.rev_bo = gid->elem.rev_bo;
-                       /* field is only used for types nested within variants. */
-                       stack_top->u.ptr.field = NULL;
-                       break;
-               }
-               case OBJECT_TYPE_STRUCT:
-                       printk(KERN_WARNING "Nested structures are not supported yet.\n");
-                       ret = -EINVAL;
-                       goto end;
-               case OBJECT_TYPE_VARIANT:
-               default:
-                       printk(KERN_WARNING "Unexpected get index type %d",
-                               (int) stack_top->u.ptr.object_type);
-                       ret = -EINVAL;
-                       goto end;
-               }
-               break;
-       case LOAD_ROOT_CONTEXT:
-       case LOAD_ROOT_APP_CONTEXT:     /* Fall-through */
-       {
-               ret = context_get_index(lttng_probe_ctx,
-                               &stack_top->u.ptr,
-                               gid->ctx_index);
-               if (ret) {
-                       goto end;
-               }
-               break;
-       }
-       case LOAD_ROOT_PAYLOAD:
-               stack_top->u.ptr.ptr += gid->offset;
-               if (gid->elem.type == OBJECT_TYPE_STRING)
-                       stack_top->u.ptr.ptr = *(const char * const *) stack_top->u.ptr.ptr;
-               stack_top->u.ptr.object_type = gid->elem.type;
-               stack_top->u.ptr.type = LOAD_OBJECT;
-               /* field is only used for types nested within variants. */
-               stack_top->u.ptr.field = NULL;
-               break;
-       }
-       return 0;
-
-end:
-       return ret;
-}
-
-static int dynamic_load_field(struct estack_entry *stack_top)
-{
-       int ret;
-
-       switch (stack_top->u.ptr.type) {
-       case LOAD_OBJECT:
-               break;
-       case LOAD_ROOT_CONTEXT:
-       case LOAD_ROOT_APP_CONTEXT:
-       case LOAD_ROOT_PAYLOAD:
-       default:
-               dbg_printk("Filter warning: cannot load root, missing field name.\n");
-               ret = -EINVAL;
-               goto end;
-       }
-       switch (stack_top->u.ptr.object_type) {
-       case OBJECT_TYPE_S8:
-               dbg_printk("op load field s8\n");
-               stack_top->u.v = *(int8_t *) stack_top->u.ptr.ptr;
-               break;
-       case OBJECT_TYPE_S16:
-       {
-               int16_t tmp;
-
-               dbg_printk("op load field s16\n");
-               tmp = *(int16_t *) stack_top->u.ptr.ptr;
-               if (stack_top->u.ptr.rev_bo)
-                       __swab16s(&tmp);
-               stack_top->u.v = tmp;
-               break;
-       }
-       case OBJECT_TYPE_S32:
-       {
-               int32_t tmp;
-
-               dbg_printk("op load field s32\n");
-               tmp = *(int32_t *) stack_top->u.ptr.ptr;
-               if (stack_top->u.ptr.rev_bo)
-                       __swab32s(&tmp);
-               stack_top->u.v = tmp;
-               break;
-       }
-       case OBJECT_TYPE_S64:
-       {
-               int64_t tmp;
-
-               dbg_printk("op load field s64\n");
-               tmp = *(int64_t *) stack_top->u.ptr.ptr;
-               if (stack_top->u.ptr.rev_bo)
-                       __swab64s(&tmp);
-               stack_top->u.v = tmp;
-               break;
-       }
-       case OBJECT_TYPE_U8:
-               dbg_printk("op load field u8\n");
-               stack_top->u.v = *(uint8_t *) stack_top->u.ptr.ptr;
-               break;
-       case OBJECT_TYPE_U16:
-       {
-               uint16_t tmp;
-
-               dbg_printk("op load field s16\n");
-               tmp = *(uint16_t *) stack_top->u.ptr.ptr;
-               if (stack_top->u.ptr.rev_bo)
-                       __swab16s(&tmp);
-               stack_top->u.v = tmp;
-               break;
-       }
-       case OBJECT_TYPE_U32:
-       {
-               uint32_t tmp;
-
-               dbg_printk("op load field u32\n");
-               tmp = *(uint32_t *) stack_top->u.ptr.ptr;
-               if (stack_top->u.ptr.rev_bo)
-                       __swab32s(&tmp);
-               stack_top->u.v = tmp;
-               break;
-       }
-       case OBJECT_TYPE_U64:
-       {
-               uint64_t tmp;
-
-               dbg_printk("op load field u64\n");
-               tmp = *(uint64_t *) stack_top->u.ptr.ptr;
-               if (stack_top->u.ptr.rev_bo)
-                       __swab64s(&tmp);
-               stack_top->u.v = tmp;
-               break;
-       }
-       case OBJECT_TYPE_STRING:
-       {
-               const char *str;
-
-               dbg_printk("op load field string\n");
-               str = (const char *) stack_top->u.ptr.ptr;
-               stack_top->u.s.str = str;
-               if (unlikely(!stack_top->u.s.str)) {
-                       dbg_printk("Filter warning: loading a NULL string.\n");
-                       ret = -EINVAL;
-                       goto end;
-               }
-               stack_top->u.s.seq_len = LTTNG_SIZE_MAX;
-               stack_top->u.s.literal_type =
-                       ESTACK_STRING_LITERAL_TYPE_NONE;
-               break;
-       }
-       case OBJECT_TYPE_STRING_SEQUENCE:
-       {
-               const char *ptr;
-
-               dbg_printk("op load field string sequence\n");
-               ptr = stack_top->u.ptr.ptr;
-               stack_top->u.s.seq_len = *(unsigned long *) ptr;
-               stack_top->u.s.str = *(const char **) (ptr + sizeof(unsigned long));
-               if (unlikely(!stack_top->u.s.str)) {
-                       dbg_printk("Filter warning: loading a NULL sequence.\n");
-                       ret = -EINVAL;
-                       goto end;
-               }
-               stack_top->u.s.literal_type =
-                       ESTACK_STRING_LITERAL_TYPE_NONE;
-               break;
-       }
-       case OBJECT_TYPE_DYNAMIC:
-               /*
-                * Dynamic types in context are looked up
-                * by context get index.
-                */
-               ret = -EINVAL;
-               goto end;
-       case OBJECT_TYPE_DOUBLE:
-               ret = -EINVAL;
-               goto end;
-       case OBJECT_TYPE_SEQUENCE:
-       case OBJECT_TYPE_ARRAY:
-       case OBJECT_TYPE_STRUCT:
-       case OBJECT_TYPE_VARIANT:
-               printk(KERN_WARNING "Sequences, arrays, struct and variant cannot be loaded (nested types).\n");
-               ret = -EINVAL;
-               goto end;
-       }
-       return 0;
-
-end:
-       return ret;
-}
-
-/*
- * Return 0 (discard), or raise the 0x1 flag (log event).
- * Currently, other flags are kept for future extensions and have no
- * effect.
- */
-uint64_t lttng_filter_interpret_bytecode(void *filter_data,
-               struct lttng_probe_ctx *lttng_probe_ctx,
-               const char *filter_stack_data)
-{
-       struct bytecode_runtime *bytecode = filter_data;
-       void *pc, *next_pc, *start_pc;
-       int ret = -EINVAL;
-       uint64_t retval = 0;
-       struct estack _stack;
-       struct estack *stack = &_stack;
-       register int64_t ax = 0, bx = 0;
-       register int top = FILTER_STACK_EMPTY;
-#ifndef INTERPRETER_USE_SWITCH
-       static void *dispatch[NR_FILTER_OPS] = {
-               [ FILTER_OP_UNKNOWN ] = &&LABEL_FILTER_OP_UNKNOWN,
-
-               [ FILTER_OP_RETURN ] = &&LABEL_FILTER_OP_RETURN,
-
-               /* binary */
-               [ FILTER_OP_MUL ] = &&LABEL_FILTER_OP_MUL,
-               [ FILTER_OP_DIV ] = &&LABEL_FILTER_OP_DIV,
-               [ FILTER_OP_MOD ] = &&LABEL_FILTER_OP_MOD,
-               [ FILTER_OP_PLUS ] = &&LABEL_FILTER_OP_PLUS,
-               [ FILTER_OP_MINUS ] = &&LABEL_FILTER_OP_MINUS,
-               [ FILTER_OP_BIT_RSHIFT ] = &&LABEL_FILTER_OP_BIT_RSHIFT,
-               [ FILTER_OP_BIT_LSHIFT ] = &&LABEL_FILTER_OP_BIT_LSHIFT,
-               [ FILTER_OP_BIT_AND ] = &&LABEL_FILTER_OP_BIT_AND,
-               [ FILTER_OP_BIT_OR ] = &&LABEL_FILTER_OP_BIT_OR,
-               [ FILTER_OP_BIT_XOR ] = &&LABEL_FILTER_OP_BIT_XOR,
-
-               /* binary comparators */
-               [ FILTER_OP_EQ ] = &&LABEL_FILTER_OP_EQ,
-               [ FILTER_OP_NE ] = &&LABEL_FILTER_OP_NE,
-               [ FILTER_OP_GT ] = &&LABEL_FILTER_OP_GT,
-               [ FILTER_OP_LT ] = &&LABEL_FILTER_OP_LT,
-               [ FILTER_OP_GE ] = &&LABEL_FILTER_OP_GE,
-               [ FILTER_OP_LE ] = &&LABEL_FILTER_OP_LE,
-
-               /* string binary comparator */
-               [ FILTER_OP_EQ_STRING ] = &&LABEL_FILTER_OP_EQ_STRING,
-               [ FILTER_OP_NE_STRING ] = &&LABEL_FILTER_OP_NE_STRING,
-               [ FILTER_OP_GT_STRING ] = &&LABEL_FILTER_OP_GT_STRING,
-               [ FILTER_OP_LT_STRING ] = &&LABEL_FILTER_OP_LT_STRING,
-               [ FILTER_OP_GE_STRING ] = &&LABEL_FILTER_OP_GE_STRING,
-               [ FILTER_OP_LE_STRING ] = &&LABEL_FILTER_OP_LE_STRING,
-
-               /* globbing pattern binary comparator */
-               [ FILTER_OP_EQ_STAR_GLOB_STRING ] = &&LABEL_FILTER_OP_EQ_STAR_GLOB_STRING,
-               [ FILTER_OP_NE_STAR_GLOB_STRING ] = &&LABEL_FILTER_OP_NE_STAR_GLOB_STRING,
-
-               /* s64 binary comparator */
-               [ FILTER_OP_EQ_S64 ] = &&LABEL_FILTER_OP_EQ_S64,
-               [ FILTER_OP_NE_S64 ] = &&LABEL_FILTER_OP_NE_S64,
-               [ FILTER_OP_GT_S64 ] = &&LABEL_FILTER_OP_GT_S64,
-               [ FILTER_OP_LT_S64 ] = &&LABEL_FILTER_OP_LT_S64,
-               [ FILTER_OP_GE_S64 ] = &&LABEL_FILTER_OP_GE_S64,
-               [ FILTER_OP_LE_S64 ] = &&LABEL_FILTER_OP_LE_S64,
-
-               /* double binary comparator */
-               [ FILTER_OP_EQ_DOUBLE ] = &&LABEL_FILTER_OP_EQ_DOUBLE,
-               [ FILTER_OP_NE_DOUBLE ] = &&LABEL_FILTER_OP_NE_DOUBLE,
-               [ FILTER_OP_GT_DOUBLE ] = &&LABEL_FILTER_OP_GT_DOUBLE,
-               [ FILTER_OP_LT_DOUBLE ] = &&LABEL_FILTER_OP_LT_DOUBLE,
-               [ FILTER_OP_GE_DOUBLE ] = &&LABEL_FILTER_OP_GE_DOUBLE,
-               [ FILTER_OP_LE_DOUBLE ] = &&LABEL_FILTER_OP_LE_DOUBLE,
-
-               /* Mixed S64-double binary comparators */
-               [ FILTER_OP_EQ_DOUBLE_S64 ] = &&LABEL_FILTER_OP_EQ_DOUBLE_S64,
-               [ FILTER_OP_NE_DOUBLE_S64 ] = &&LABEL_FILTER_OP_NE_DOUBLE_S64,
-               [ FILTER_OP_GT_DOUBLE_S64 ] = &&LABEL_FILTER_OP_GT_DOUBLE_S64,
-               [ FILTER_OP_LT_DOUBLE_S64 ] = &&LABEL_FILTER_OP_LT_DOUBLE_S64,
-               [ FILTER_OP_GE_DOUBLE_S64 ] = &&LABEL_FILTER_OP_GE_DOUBLE_S64,
-               [ FILTER_OP_LE_DOUBLE_S64 ] = &&LABEL_FILTER_OP_LE_DOUBLE_S64,
-
-               [ FILTER_OP_EQ_S64_DOUBLE ] = &&LABEL_FILTER_OP_EQ_S64_DOUBLE,
-               [ FILTER_OP_NE_S64_DOUBLE ] = &&LABEL_FILTER_OP_NE_S64_DOUBLE,
-               [ FILTER_OP_GT_S64_DOUBLE ] = &&LABEL_FILTER_OP_GT_S64_DOUBLE,
-               [ FILTER_OP_LT_S64_DOUBLE ] = &&LABEL_FILTER_OP_LT_S64_DOUBLE,
-               [ FILTER_OP_GE_S64_DOUBLE ] = &&LABEL_FILTER_OP_GE_S64_DOUBLE,
-               [ FILTER_OP_LE_S64_DOUBLE ] = &&LABEL_FILTER_OP_LE_S64_DOUBLE,
-
-               /* unary */
-               [ FILTER_OP_UNARY_PLUS ] = &&LABEL_FILTER_OP_UNARY_PLUS,
-               [ FILTER_OP_UNARY_MINUS ] = &&LABEL_FILTER_OP_UNARY_MINUS,
-               [ FILTER_OP_UNARY_NOT ] = &&LABEL_FILTER_OP_UNARY_NOT,
-               [ FILTER_OP_UNARY_PLUS_S64 ] = &&LABEL_FILTER_OP_UNARY_PLUS_S64,
-               [ FILTER_OP_UNARY_MINUS_S64 ] = &&LABEL_FILTER_OP_UNARY_MINUS_S64,
-               [ FILTER_OP_UNARY_NOT_S64 ] = &&LABEL_FILTER_OP_UNARY_NOT_S64,
-               [ FILTER_OP_UNARY_PLUS_DOUBLE ] = &&LABEL_FILTER_OP_UNARY_PLUS_DOUBLE,
-               [ FILTER_OP_UNARY_MINUS_DOUBLE ] = &&LABEL_FILTER_OP_UNARY_MINUS_DOUBLE,
-               [ FILTER_OP_UNARY_NOT_DOUBLE ] = &&LABEL_FILTER_OP_UNARY_NOT_DOUBLE,
-
-               /* logical */
-               [ FILTER_OP_AND ] = &&LABEL_FILTER_OP_AND,
-               [ FILTER_OP_OR ] = &&LABEL_FILTER_OP_OR,
-
-               /* load field ref */
-               [ FILTER_OP_LOAD_FIELD_REF ] = &&LABEL_FILTER_OP_LOAD_FIELD_REF,
-               [ FILTER_OP_LOAD_FIELD_REF_STRING ] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_STRING,
-               [ FILTER_OP_LOAD_FIELD_REF_SEQUENCE ] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_SEQUENCE,
-               [ FILTER_OP_LOAD_FIELD_REF_S64 ] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_S64,
-               [ FILTER_OP_LOAD_FIELD_REF_DOUBLE ] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_DOUBLE,
-
-               /* load from immediate operand */
-               [ FILTER_OP_LOAD_STRING ] = &&LABEL_FILTER_OP_LOAD_STRING,
-               [ FILTER_OP_LOAD_STAR_GLOB_STRING ] = &&LABEL_FILTER_OP_LOAD_STAR_GLOB_STRING,
-               [ FILTER_OP_LOAD_S64 ] = &&LABEL_FILTER_OP_LOAD_S64,
-               [ FILTER_OP_LOAD_DOUBLE ] = &&LABEL_FILTER_OP_LOAD_DOUBLE,
-
-               /* cast */
-               [ FILTER_OP_CAST_TO_S64 ] = &&LABEL_FILTER_OP_CAST_TO_S64,
-               [ FILTER_OP_CAST_DOUBLE_TO_S64 ] = &&LABEL_FILTER_OP_CAST_DOUBLE_TO_S64,
-               [ FILTER_OP_CAST_NOP ] = &&LABEL_FILTER_OP_CAST_NOP,
-
-               /* get context ref */
-               [ FILTER_OP_GET_CONTEXT_REF ] = &&LABEL_FILTER_OP_GET_CONTEXT_REF,
-               [ FILTER_OP_GET_CONTEXT_REF_STRING ] = &&LABEL_FILTER_OP_GET_CONTEXT_REF_STRING,
-               [ FILTER_OP_GET_CONTEXT_REF_S64 ] = &&LABEL_FILTER_OP_GET_CONTEXT_REF_S64,
-               [ FILTER_OP_GET_CONTEXT_REF_DOUBLE ] = &&LABEL_FILTER_OP_GET_CONTEXT_REF_DOUBLE,
-
-               /* load userspace field ref */
-               [ FILTER_OP_LOAD_FIELD_REF_USER_STRING ] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_USER_STRING,
-               [ FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE ] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE,
-
-               /* Instructions for recursive traversal through composed types. */
-               [ FILTER_OP_GET_CONTEXT_ROOT ] = &&LABEL_FILTER_OP_GET_CONTEXT_ROOT,
-               [ FILTER_OP_GET_APP_CONTEXT_ROOT ] = &&LABEL_FILTER_OP_GET_APP_CONTEXT_ROOT,
-               [ FILTER_OP_GET_PAYLOAD_ROOT ] = &&LABEL_FILTER_OP_GET_PAYLOAD_ROOT,
-
-               [ FILTER_OP_GET_SYMBOL ] = &&LABEL_FILTER_OP_GET_SYMBOL,
-               [ FILTER_OP_GET_SYMBOL_FIELD ] = &&LABEL_FILTER_OP_GET_SYMBOL_FIELD,
-               [ FILTER_OP_GET_INDEX_U16 ] = &&LABEL_FILTER_OP_GET_INDEX_U16,
-               [ FILTER_OP_GET_INDEX_U64 ] = &&LABEL_FILTER_OP_GET_INDEX_U64,
-
-               [ FILTER_OP_LOAD_FIELD ] = &&LABEL_FILTER_OP_LOAD_FIELD,
-               [ FILTER_OP_LOAD_FIELD_S8        ] = &&LABEL_FILTER_OP_LOAD_FIELD_S8,
-               [ FILTER_OP_LOAD_FIELD_S16 ] = &&LABEL_FILTER_OP_LOAD_FIELD_S16,
-               [ FILTER_OP_LOAD_FIELD_S32 ] = &&LABEL_FILTER_OP_LOAD_FIELD_S32,
-               [ FILTER_OP_LOAD_FIELD_S64 ] = &&LABEL_FILTER_OP_LOAD_FIELD_S64,
-               [ FILTER_OP_LOAD_FIELD_U8 ] = &&LABEL_FILTER_OP_LOAD_FIELD_U8,
-               [ FILTER_OP_LOAD_FIELD_U16 ] = &&LABEL_FILTER_OP_LOAD_FIELD_U16,
-               [ FILTER_OP_LOAD_FIELD_U32 ] = &&LABEL_FILTER_OP_LOAD_FIELD_U32,
-               [ FILTER_OP_LOAD_FIELD_U64 ] = &&LABEL_FILTER_OP_LOAD_FIELD_U64,
-               [ FILTER_OP_LOAD_FIELD_STRING ] = &&LABEL_FILTER_OP_LOAD_FIELD_STRING,
-               [ FILTER_OP_LOAD_FIELD_SEQUENCE ] = &&LABEL_FILTER_OP_LOAD_FIELD_SEQUENCE,
-               [ FILTER_OP_LOAD_FIELD_DOUBLE ] = &&LABEL_FILTER_OP_LOAD_FIELD_DOUBLE,
-
-               [ FILTER_OP_UNARY_BIT_NOT ] = &&LABEL_FILTER_OP_UNARY_BIT_NOT,
-
-               [ FILTER_OP_RETURN_S64 ] = &&LABEL_FILTER_OP_RETURN_S64,
-       };
-#endif /* #ifndef INTERPRETER_USE_SWITCH */
-
-       START_OP
-
-               OP(FILTER_OP_UNKNOWN):
-               OP(FILTER_OP_LOAD_FIELD_REF):
-               OP(FILTER_OP_GET_CONTEXT_REF):
-#ifdef INTERPRETER_USE_SWITCH
-               default:
-#endif /* INTERPRETER_USE_SWITCH */
-                       printk(KERN_WARNING "unknown bytecode op %u\n",
-                               (unsigned int) *(filter_opcode_t *) pc);
-                       ret = -EINVAL;
-                       goto end;
-
-               OP(FILTER_OP_RETURN):
-               OP(FILTER_OP_RETURN_S64):
-                       /* LTTNG_FILTER_DISCARD  or LTTNG_FILTER_RECORD_FLAG */
-                       retval = !!estack_ax_v;
-                       ret = 0;
-                       goto end;
-
-               /* binary */
-               OP(FILTER_OP_MUL):
-               OP(FILTER_OP_DIV):
-               OP(FILTER_OP_MOD):
-               OP(FILTER_OP_PLUS):
-               OP(FILTER_OP_MINUS):
-                       printk(KERN_WARNING "unsupported bytecode op %u\n",
-                               (unsigned int) *(filter_opcode_t *) pc);
-                       ret = -EINVAL;
-                       goto end;
-
-               OP(FILTER_OP_EQ):
-               OP(FILTER_OP_NE):
-               OP(FILTER_OP_GT):
-               OP(FILTER_OP_LT):
-               OP(FILTER_OP_GE):
-               OP(FILTER_OP_LE):
-                       printk(KERN_WARNING "unsupported non-specialized bytecode op %u\n",
-                               (unsigned int) *(filter_opcode_t *) pc);
-                       ret = -EINVAL;
-                       goto end;
-
-               OP(FILTER_OP_EQ_STRING):
-               {
-                       int res;
-
-                       res = (stack_strcmp(stack, top, "==") == 0);
-                       estack_pop(stack, top, ax, bx);
-                       estack_ax_v = res;
-                       next_pc += sizeof(struct binary_op);
-                       PO;
-               }
-               OP(FILTER_OP_NE_STRING):
-               {
-                       int res;
-
-                       res = (stack_strcmp(stack, top, "!=") != 0);
-                       estack_pop(stack, top, ax, bx);
-                       estack_ax_v = res;
-                       next_pc += sizeof(struct binary_op);
-                       PO;
-               }
-               OP(FILTER_OP_GT_STRING):
-               {
-                       int res;
-
-                       res = (stack_strcmp(stack, top, ">") > 0);
-                       estack_pop(stack, top, ax, bx);
-                       estack_ax_v = res;
-                       next_pc += sizeof(struct binary_op);
-                       PO;
-               }
-               OP(FILTER_OP_LT_STRING):
-               {
-                       int res;
-
-                       res = (stack_strcmp(stack, top, "<") < 0);
-                       estack_pop(stack, top, ax, bx);
-                       estack_ax_v = res;
-                       next_pc += sizeof(struct binary_op);
-                       PO;
-               }
-               OP(FILTER_OP_GE_STRING):
-               {
-                       int res;
-
-                       res = (stack_strcmp(stack, top, ">=") >= 0);
-                       estack_pop(stack, top, ax, bx);
-                       estack_ax_v = res;
-                       next_pc += sizeof(struct binary_op);
-                       PO;
-               }
-               OP(FILTER_OP_LE_STRING):
-               {
-                       int res;
-
-                       res = (stack_strcmp(stack, top, "<=") <= 0);
-                       estack_pop(stack, top, ax, bx);
-                       estack_ax_v = res;
-                       next_pc += sizeof(struct binary_op);
-                       PO;
-               }
-
-               OP(FILTER_OP_EQ_STAR_GLOB_STRING):
-               {
-                       int res;
-
-                       res = (stack_star_glob_match(stack, top, "==") == 0);
-                       estack_pop(stack, top, ax, bx);
-                       estack_ax_v = res;
-                       next_pc += sizeof(struct binary_op);
-                       PO;
-               }
-               OP(FILTER_OP_NE_STAR_GLOB_STRING):
-               {
-                       int res;
-
-                       res = (stack_star_glob_match(stack, top, "!=") != 0);
-                       estack_pop(stack, top, ax, bx);
-                       estack_ax_v = res;
-                       next_pc += sizeof(struct binary_op);
-                       PO;
-               }
-
-               OP(FILTER_OP_EQ_S64):
-               {
-                       int res;
-
-                       res = (estack_bx_v == estack_ax_v);
-                       estack_pop(stack, top, ax, bx);
-                       estack_ax_v = res;
-                       next_pc += sizeof(struct binary_op);
-                       PO;
-               }
-               OP(FILTER_OP_NE_S64):
-               {
-                       int res;
-
-                       res = (estack_bx_v != estack_ax_v);
-                       estack_pop(stack, top, ax, bx);
-                       estack_ax_v = res;
-                       next_pc += sizeof(struct binary_op);
-                       PO;
-               }
-               OP(FILTER_OP_GT_S64):
-               {
-                       int res;
-
-                       res = (estack_bx_v > estack_ax_v);
-                       estack_pop(stack, top, ax, bx);
-                       estack_ax_v = res;
-                       next_pc += sizeof(struct binary_op);
-                       PO;
-               }
-               OP(FILTER_OP_LT_S64):
-               {
-                       int res;
-
-                       res = (estack_bx_v < estack_ax_v);
-                       estack_pop(stack, top, ax, bx);
-                       estack_ax_v = res;
-                       next_pc += sizeof(struct binary_op);
-                       PO;
-               }
-               OP(FILTER_OP_GE_S64):
-               {
-                       int res;
-
-                       res = (estack_bx_v >= estack_ax_v);
-                       estack_pop(stack, top, ax, bx);
-                       estack_ax_v = res;
-                       next_pc += sizeof(struct binary_op);
-                       PO;
-               }
-               OP(FILTER_OP_LE_S64):
-               {
-                       int res;
-
-                       res = (estack_bx_v <= estack_ax_v);
-                       estack_pop(stack, top, ax, bx);
-                       estack_ax_v = res;
-                       next_pc += sizeof(struct binary_op);
-                       PO;
-               }
-
-               OP(FILTER_OP_EQ_DOUBLE):
-               OP(FILTER_OP_NE_DOUBLE):
-               OP(FILTER_OP_GT_DOUBLE):
-               OP(FILTER_OP_LT_DOUBLE):
-               OP(FILTER_OP_GE_DOUBLE):
-               OP(FILTER_OP_LE_DOUBLE):
-               {
-                       BUG_ON(1);
-                       PO;
-               }
-
-               /* Mixed S64-double binary comparators */
-               OP(FILTER_OP_EQ_DOUBLE_S64):
-               OP(FILTER_OP_NE_DOUBLE_S64):
-               OP(FILTER_OP_GT_DOUBLE_S64):
-               OP(FILTER_OP_LT_DOUBLE_S64):
-               OP(FILTER_OP_GE_DOUBLE_S64):
-               OP(FILTER_OP_LE_DOUBLE_S64):
-               OP(FILTER_OP_EQ_S64_DOUBLE):
-               OP(FILTER_OP_NE_S64_DOUBLE):
-               OP(FILTER_OP_GT_S64_DOUBLE):
-               OP(FILTER_OP_LT_S64_DOUBLE):
-               OP(FILTER_OP_GE_S64_DOUBLE):
-               OP(FILTER_OP_LE_S64_DOUBLE):
-               {
-                       BUG_ON(1);
-                       PO;
-               }
-               OP(FILTER_OP_BIT_RSHIFT):
-               {
-                       int64_t res;
-
-                       /* Catch undefined behavior. */
-                       if (unlikely(estack_ax_v < 0 || estack_ax_v >= 64)) {
-                               ret = -EINVAL;
-                               goto end;
-                       }
-                       res = ((uint64_t) estack_bx_v >> (uint32_t) estack_ax_v);
-                       estack_pop(stack, top, ax, bx);
-                       estack_ax_v = res;
-                       next_pc += sizeof(struct binary_op);
-                       PO;
-               }
-               OP(FILTER_OP_BIT_LSHIFT):
-               {
-                       int64_t res;
-
-                       /* Catch undefined behavior. */
-                       if (unlikely(estack_ax_v < 0 || estack_ax_v >= 64)) {
-                               ret = -EINVAL;
-                               goto end;
-                       }
-                       res = ((uint64_t) estack_bx_v << (uint32_t) estack_ax_v);
-                       estack_pop(stack, top, ax, bx);
-                       estack_ax_v = res;
-                       next_pc += sizeof(struct binary_op);
-                       PO;
-               }
-               OP(FILTER_OP_BIT_AND):
-               {
-                       int64_t res;
-
-                       res = ((uint64_t) estack_bx_v & (uint64_t) estack_ax_v);
-                       estack_pop(stack, top, ax, bx);
-                       estack_ax_v = res;
-                       next_pc += sizeof(struct binary_op);
-                       PO;
-               }
-               OP(FILTER_OP_BIT_OR):
-               {
-                       int64_t res;
-
-                       res = ((uint64_t) estack_bx_v | (uint64_t) estack_ax_v);
-                       estack_pop(stack, top, ax, bx);
-                       estack_ax_v = res;
-                       next_pc += sizeof(struct binary_op);
-                       PO;
-               }
-               OP(FILTER_OP_BIT_XOR):
-               {
-                       int64_t res;
-
-                       res = ((uint64_t) estack_bx_v ^ (uint64_t) estack_ax_v);
-                       estack_pop(stack, top, ax, bx);
-                       estack_ax_v = res;
-                       next_pc += sizeof(struct binary_op);
-                       PO;
-               }
-
-               /* unary */
-               OP(FILTER_OP_UNARY_PLUS):
-               OP(FILTER_OP_UNARY_MINUS):
-               OP(FILTER_OP_UNARY_NOT):
-                       printk(KERN_WARNING "unsupported non-specialized bytecode op %u\n",
-                               (unsigned int) *(filter_opcode_t *) pc);
-                       ret = -EINVAL;
-                       goto end;
-
-
-               OP(FILTER_OP_UNARY_BIT_NOT):
-               {
-                       estack_ax_v = ~(uint64_t) estack_ax_v;
-                       next_pc += sizeof(struct unary_op);
-                       PO;
-               }
-
-               OP(FILTER_OP_UNARY_PLUS_S64):
-               {
-                       next_pc += sizeof(struct unary_op);
-                       PO;
-               }
-               OP(FILTER_OP_UNARY_MINUS_S64):
-               {
-                       estack_ax_v = -estack_ax_v;
-                       next_pc += sizeof(struct unary_op);
-                       PO;
-               }
-               OP(FILTER_OP_UNARY_PLUS_DOUBLE):
-               OP(FILTER_OP_UNARY_MINUS_DOUBLE):
-               {
-                       BUG_ON(1);
-                       PO;
-               }
-               OP(FILTER_OP_UNARY_NOT_S64):
-               {
-                       estack_ax_v = !estack_ax_v;
-                       next_pc += sizeof(struct unary_op);
-                       PO;
-               }
-               OP(FILTER_OP_UNARY_NOT_DOUBLE):
-               {
-                       BUG_ON(1);
-                       PO;
-               }
-
-               /* logical */
-               OP(FILTER_OP_AND):
-               {
-                       struct logical_op *insn = (struct logical_op *) pc;
-
-                       /* If AX is 0, skip and evaluate to 0 */
-                       if (unlikely(estack_ax_v == 0)) {
-                               dbg_printk("Jumping to bytecode offset %u\n",
-                                       (unsigned int) insn->skip_offset);
-                               next_pc = start_pc + insn->skip_offset;
-                       } else {
-                               /* Pop 1 when jump not taken */
-                               estack_pop(stack, top, ax, bx);
-                               next_pc += sizeof(struct logical_op);
-                       }
-                       PO;
-               }
-               OP(FILTER_OP_OR):
-               {
-                       struct logical_op *insn = (struct logical_op *) pc;
-
-                       /* If AX is nonzero, skip and evaluate to 1 */
-
-                       if (unlikely(estack_ax_v != 0)) {
-                               estack_ax_v = 1;
-                               dbg_printk("Jumping to bytecode offset %u\n",
-                                       (unsigned int) insn->skip_offset);
-                               next_pc = start_pc + insn->skip_offset;
-                       } else {
-                               /* Pop 1 when jump not taken */
-                               estack_pop(stack, top, ax, bx);
-                               next_pc += sizeof(struct logical_op);
-                       }
-                       PO;
-               }
-
-
-               /* load field ref */
-               OP(FILTER_OP_LOAD_FIELD_REF_STRING):
-               {
-                       struct load_op *insn = (struct load_op *) pc;
-                       struct field_ref *ref = (struct field_ref *) insn->data;
-
-                       dbg_printk("load field ref offset %u type string\n",
-                               ref->offset);
-                       estack_push(stack, top, ax, bx);
-                       estack_ax(stack, top)->u.s.str =
-                               *(const char * const *) &filter_stack_data[ref->offset];
-                       if (unlikely(!estack_ax(stack, top)->u.s.str)) {
-                               dbg_printk("Filter warning: loading a NULL string.\n");
-                               ret = -EINVAL;
-                               goto end;
-                       }
-                       estack_ax(stack, top)->u.s.seq_len = LTTNG_SIZE_MAX;
-                       estack_ax(stack, top)->u.s.literal_type =
-                               ESTACK_STRING_LITERAL_TYPE_NONE;
-                       estack_ax(stack, top)->u.s.user = 0;
-                       dbg_printk("ref load string %s\n", estack_ax(stack, top)->u.s.str);
-                       next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
-                       PO;
-               }
-
-               OP(FILTER_OP_LOAD_FIELD_REF_SEQUENCE):
-               {
-                       struct load_op *insn = (struct load_op *) pc;
-                       struct field_ref *ref = (struct field_ref *) insn->data;
-
-                       dbg_printk("load field ref offset %u type sequence\n",
-                               ref->offset);
-                       estack_push(stack, top, ax, bx);
-                       estack_ax(stack, top)->u.s.seq_len =
-                               *(unsigned long *) &filter_stack_data[ref->offset];
-                       estack_ax(stack, top)->u.s.str =
-                               *(const char **) (&filter_stack_data[ref->offset
-                                                               + sizeof(unsigned long)]);
-                       if (unlikely(!estack_ax(stack, top)->u.s.str)) {
-                               dbg_printk("Filter warning: loading a NULL sequence.\n");
-                               ret = -EINVAL;
-                               goto end;
-                       }
-                       estack_ax(stack, top)->u.s.literal_type =
-                               ESTACK_STRING_LITERAL_TYPE_NONE;
-                       estack_ax(stack, top)->u.s.user = 0;
-                       next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
-                       PO;
-               }
-
-               OP(FILTER_OP_LOAD_FIELD_REF_S64):
-               {
-                       struct load_op *insn = (struct load_op *) pc;
-                       struct field_ref *ref = (struct field_ref *) insn->data;
-
-                       dbg_printk("load field ref offset %u type s64\n",
-                               ref->offset);
-                       estack_push(stack, top, ax, bx);
-                       estack_ax_v =
-                               ((struct literal_numeric *) &filter_stack_data[ref->offset])->v;
-                       dbg_printk("ref load s64 %lld\n",
-                               (long long) estack_ax_v);
-                       next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
-                       PO;
-               }
-
-               OP(FILTER_OP_LOAD_FIELD_REF_DOUBLE):
-               {
-                       BUG_ON(1);
-                       PO;
-               }
-
-               /* load from immediate operand */
-               OP(FILTER_OP_LOAD_STRING):
-               {
-                       struct load_op *insn = (struct load_op *) pc;
-
-                       dbg_printk("load string %s\n", insn->data);
-                       estack_push(stack, top, ax, bx);
-                       estack_ax(stack, top)->u.s.str = insn->data;
-                       estack_ax(stack, top)->u.s.seq_len = LTTNG_SIZE_MAX;
-                       estack_ax(stack, top)->u.s.literal_type =
-                               ESTACK_STRING_LITERAL_TYPE_PLAIN;
-                       estack_ax(stack, top)->u.s.user = 0;
-                       next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
-                       PO;
-               }
-
-               OP(FILTER_OP_LOAD_STAR_GLOB_STRING):
-               {
-                       struct load_op *insn = (struct load_op *) pc;
-
-                       dbg_printk("load globbing pattern %s\n", insn->data);
-                       estack_push(stack, top, ax, bx);
-                       estack_ax(stack, top)->u.s.str = insn->data;
-                       estack_ax(stack, top)->u.s.seq_len = LTTNG_SIZE_MAX;
-                       estack_ax(stack, top)->u.s.literal_type =
-                               ESTACK_STRING_LITERAL_TYPE_STAR_GLOB;
-                       estack_ax(stack, top)->u.s.user = 0;
-                       next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
-                       PO;
-               }
-
-               OP(FILTER_OP_LOAD_S64):
-               {
-                       struct load_op *insn = (struct load_op *) pc;
-
-                       estack_push(stack, top, ax, bx);
-                       estack_ax_v = ((struct literal_numeric *) insn->data)->v;
-                       dbg_printk("load s64 %lld\n",
-                               (long long) estack_ax_v);
-                       next_pc += sizeof(struct load_op)
-                                       + sizeof(struct literal_numeric);
-                       PO;
-               }
-
-               OP(FILTER_OP_LOAD_DOUBLE):
-               {
-                       BUG_ON(1);
-                       PO;
-               }
-
-               /* cast */
-               OP(FILTER_OP_CAST_TO_S64):
-                       printk(KERN_WARNING "unsupported non-specialized bytecode op %u\n",
-                               (unsigned int) *(filter_opcode_t *) pc);
-                       ret = -EINVAL;
-                       goto end;
-
-               OP(FILTER_OP_CAST_DOUBLE_TO_S64):
-               {
-                       BUG_ON(1);
-                       PO;
-               }
-
-               OP(FILTER_OP_CAST_NOP):
-               {
-                       next_pc += sizeof(struct cast_op);
-                       PO;
-               }
-
-               /* get context ref */
-               OP(FILTER_OP_GET_CONTEXT_REF_STRING):
-               {
-                       struct load_op *insn = (struct load_op *) pc;
-                       struct field_ref *ref = (struct field_ref *) insn->data;
-                       struct lttng_ctx_field *ctx_field;
-                       union lttng_ctx_value v;
-
-                       dbg_printk("get context ref offset %u type string\n",
-                               ref->offset);
-                       ctx_field = &lttng_static_ctx->fields[ref->offset];
-                       ctx_field->get_value(ctx_field, lttng_probe_ctx, &v);
-                       estack_push(stack, top, ax, bx);
-                       estack_ax(stack, top)->u.s.str = v.str;
-                       if (unlikely(!estack_ax(stack, top)->u.s.str)) {
-                               dbg_printk("Filter warning: loading a NULL string.\n");
-                               ret = -EINVAL;
-                               goto end;
-                       }
-                       estack_ax(stack, top)->u.s.seq_len = LTTNG_SIZE_MAX;
-                       estack_ax(stack, top)->u.s.literal_type =
-                               ESTACK_STRING_LITERAL_TYPE_NONE;
-                       estack_ax(stack, top)->u.s.user = 0;
-                       dbg_printk("ref get context string %s\n", estack_ax(stack, top)->u.s.str);
-                       next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
-                       PO;
-               }
-
-               OP(FILTER_OP_GET_CONTEXT_REF_S64):
-               {
-                       struct load_op *insn = (struct load_op *) pc;
-                       struct field_ref *ref = (struct field_ref *) insn->data;
-                       struct lttng_ctx_field *ctx_field;
-                       union lttng_ctx_value v;
-
-                       dbg_printk("get context ref offset %u type s64\n",
-                               ref->offset);
-                       ctx_field = &lttng_static_ctx->fields[ref->offset];
-                       ctx_field->get_value(ctx_field, lttng_probe_ctx, &v);
-                       estack_push(stack, top, ax, bx);
-                       estack_ax_v = v.s64;
-                       dbg_printk("ref get context s64 %lld\n",
-                               (long long) estack_ax_v);
-                       next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
-                       PO;
-               }
-
-               OP(FILTER_OP_GET_CONTEXT_REF_DOUBLE):
-               {
-                       BUG_ON(1);
-                       PO;
-               }
-
-               /* load userspace field ref */
-               OP(FILTER_OP_LOAD_FIELD_REF_USER_STRING):
-               {
-                       struct load_op *insn = (struct load_op *) pc;
-                       struct field_ref *ref = (struct field_ref *) insn->data;
-
-                       dbg_printk("load field ref offset %u type user string\n",
-                               ref->offset);
-                       estack_push(stack, top, ax, bx);
-                       estack_ax(stack, top)->u.s.user_str =
-                               *(const char * const *) &filter_stack_data[ref->offset];
-                       if (unlikely(!estack_ax(stack, top)->u.s.str)) {
-                               dbg_printk("Filter warning: loading a NULL string.\n");
-                               ret = -EINVAL;
-                               goto end;
-                       }
-                       estack_ax(stack, top)->u.s.seq_len = LTTNG_SIZE_MAX;
-                       estack_ax(stack, top)->u.s.literal_type =
-                               ESTACK_STRING_LITERAL_TYPE_NONE;
-                       estack_ax(stack, top)->u.s.user = 1;
-                       dbg_printk("ref load string %s\n", estack_ax(stack, top)->u.s.str);
-                       next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
-                       PO;
-               }
-
-               OP(FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE):
-               {
-                       struct load_op *insn = (struct load_op *) pc;
-                       struct field_ref *ref = (struct field_ref *) insn->data;
-
-                       dbg_printk("load field ref offset %u type user sequence\n",
-                               ref->offset);
-                       estack_push(stack, top, ax, bx);
-                       estack_ax(stack, top)->u.s.seq_len =
-                               *(unsigned long *) &filter_stack_data[ref->offset];
-                       estack_ax(stack, top)->u.s.user_str =
-                               *(const char **) (&filter_stack_data[ref->offset
-                                                               + sizeof(unsigned long)]);
-                       if (unlikely(!estack_ax(stack, top)->u.s.str)) {
-                               dbg_printk("Filter warning: loading a NULL sequence.\n");
-                               ret = -EINVAL;
-                               goto end;
-                       }
-                       estack_ax(stack, top)->u.s.literal_type =
-                               ESTACK_STRING_LITERAL_TYPE_NONE;
-                       estack_ax(stack, top)->u.s.user = 1;
-                       next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
-                       PO;
-               }
-
-               OP(FILTER_OP_GET_CONTEXT_ROOT):
-               {
-                       dbg_printk("op get context root\n");
-                       estack_push(stack, top, ax, bx);
-                       estack_ax(stack, top)->u.ptr.type = LOAD_ROOT_CONTEXT;
-                       /* "field" only needed for variants. */
-                       estack_ax(stack, top)->u.ptr.field = NULL;
-                       next_pc += sizeof(struct load_op);
-                       PO;
-               }
-
-               OP(FILTER_OP_GET_APP_CONTEXT_ROOT):
-               {
-                       BUG_ON(1);
-                       PO;
-               }
-
-               OP(FILTER_OP_GET_PAYLOAD_ROOT):
-               {
-                       dbg_printk("op get app payload root\n");
-                       estack_push(stack, top, ax, bx);
-                       estack_ax(stack, top)->u.ptr.type = LOAD_ROOT_PAYLOAD;
-                       estack_ax(stack, top)->u.ptr.ptr = filter_stack_data;
-                       /* "field" only needed for variants. */
-                       estack_ax(stack, top)->u.ptr.field = NULL;
-                       next_pc += sizeof(struct load_op);
-                       PO;
-               }
-
-               OP(FILTER_OP_GET_SYMBOL):
-               {
-                       dbg_printk("op get symbol\n");
-                       switch (estack_ax(stack, top)->u.ptr.type) {
-                       case LOAD_OBJECT:
-                               printk(KERN_WARNING "Nested fields not implemented yet.\n");
-                               ret = -EINVAL;
-                               goto end;
-                       case LOAD_ROOT_CONTEXT:
-                       case LOAD_ROOT_APP_CONTEXT:
-                       case LOAD_ROOT_PAYLOAD:
-                               /*
-                                * symbol lookup is performed by
-                                * specialization.
-                                */
-                               ret = -EINVAL;
-                               goto end;
-                       }
-                       next_pc += sizeof(struct load_op) + sizeof(struct get_symbol);
-                       PO;
-               }
-
-               OP(FILTER_OP_GET_SYMBOL_FIELD):
-               {
-                       /*
-                        * Used for first variant encountered in a
-                        * traversal. Variants are not implemented yet.
-                        */
-                       ret = -EINVAL;
-                       goto end;
-               }
-
-               OP(FILTER_OP_GET_INDEX_U16):
-               {
-                       struct load_op *insn = (struct load_op *) pc;
-                       struct get_index_u16 *index = (struct get_index_u16 *) insn->data;
-
-                       dbg_printk("op get index u16\n");
-                       ret = dynamic_get_index(lttng_probe_ctx, bytecode, index->index, estack_ax(stack, top));
-                       if (ret)
-                               goto end;
-                       estack_ax_v = estack_ax(stack, top)->u.v;
-                       next_pc += sizeof(struct load_op) + sizeof(struct get_index_u16);
-                       PO;
-               }
-
-               OP(FILTER_OP_GET_INDEX_U64):
-               {
-                       struct load_op *insn = (struct load_op *) pc;
-                       struct get_index_u64 *index = (struct get_index_u64 *) insn->data;
-
-                       dbg_printk("op get index u64\n");
-                       ret = dynamic_get_index(lttng_probe_ctx, bytecode, index->index, estack_ax(stack, top));
-                       if (ret)
-                               goto end;
-                       estack_ax_v = estack_ax(stack, top)->u.v;
-                       next_pc += sizeof(struct load_op) + sizeof(struct get_index_u64);
-                       PO;
-               }
-
-               OP(FILTER_OP_LOAD_FIELD):
-               {
-                       dbg_printk("op load field\n");
-                       ret = dynamic_load_field(estack_ax(stack, top));
-                       if (ret)
-                               goto end;
-                       estack_ax_v = estack_ax(stack, top)->u.v;
-                       next_pc += sizeof(struct load_op);
-                       PO;
-               }
-
-               OP(FILTER_OP_LOAD_FIELD_S8):
-               {
-                       dbg_printk("op load field s8\n");
-
-                       estack_ax_v = *(int8_t *) estack_ax(stack, top)->u.ptr.ptr;
-                       next_pc += sizeof(struct load_op);
-                       PO;
-               }
-               OP(FILTER_OP_LOAD_FIELD_S16):
-               {
-                       dbg_printk("op load field s16\n");
-
-                       estack_ax_v = *(int16_t *) estack_ax(stack, top)->u.ptr.ptr;
-                       next_pc += sizeof(struct load_op);
-                       PO;
-               }
-               OP(FILTER_OP_LOAD_FIELD_S32):
-               {
-                       dbg_printk("op load field s32\n");
-
-                       estack_ax_v = *(int32_t *) estack_ax(stack, top)->u.ptr.ptr;
-                       next_pc += sizeof(struct load_op);
-                       PO;
-               }
-               OP(FILTER_OP_LOAD_FIELD_S64):
-               {
-                       dbg_printk("op load field s64\n");
-
-                       estack_ax_v = *(int64_t *) estack_ax(stack, top)->u.ptr.ptr;
-                       next_pc += sizeof(struct load_op);
-                       PO;
-               }
-               OP(FILTER_OP_LOAD_FIELD_U8):
-               {
-                       dbg_printk("op load field u8\n");
-
-                       estack_ax_v = *(uint8_t *) estack_ax(stack, top)->u.ptr.ptr;
-                       next_pc += sizeof(struct load_op);
-                       PO;
-               }
-               OP(FILTER_OP_LOAD_FIELD_U16):
-               {
-                       dbg_printk("op load field u16\n");
-
-                       estack_ax_v = *(uint16_t *) estack_ax(stack, top)->u.ptr.ptr;
-                       next_pc += sizeof(struct load_op);
-                       PO;
-               }
-               OP(FILTER_OP_LOAD_FIELD_U32):
-               {
-                       dbg_printk("op load field u32\n");
-
-                       estack_ax_v = *(uint32_t *) estack_ax(stack, top)->u.ptr.ptr;
-                       next_pc += sizeof(struct load_op);
-                       PO;
-               }
-               OP(FILTER_OP_LOAD_FIELD_U64):
-               {
-                       dbg_printk("op load field u64\n");
-
-                       estack_ax_v = *(uint64_t *) estack_ax(stack, top)->u.ptr.ptr;
-                       next_pc += sizeof(struct load_op);
-                       PO;
-               }
-               OP(FILTER_OP_LOAD_FIELD_DOUBLE):
-               {
-                       ret = -EINVAL;
-                       goto end;
-               }
-
-               OP(FILTER_OP_LOAD_FIELD_STRING):
-               {
-                       const char *str;
-
-                       dbg_printk("op load field string\n");
-                       str = (const char *) estack_ax(stack, top)->u.ptr.ptr;
-                       estack_ax(stack, top)->u.s.str = str;
-                       if (unlikely(!estack_ax(stack, top)->u.s.str)) {
-                               dbg_printk("Filter warning: loading a NULL string.\n");
-                               ret = -EINVAL;
-                               goto end;
-                       }
-                       estack_ax(stack, top)->u.s.seq_len = LTTNG_SIZE_MAX;
-                       estack_ax(stack, top)->u.s.literal_type =
-                               ESTACK_STRING_LITERAL_TYPE_NONE;
-                       next_pc += sizeof(struct load_op);
-                       PO;
-               }
-
-               OP(FILTER_OP_LOAD_FIELD_SEQUENCE):
-               {
-                       const char *ptr;
-
-                       dbg_printk("op load field string sequence\n");
-                       ptr = estack_ax(stack, top)->u.ptr.ptr;
-                       estack_ax(stack, top)->u.s.seq_len = *(unsigned long *) ptr;
-                       estack_ax(stack, top)->u.s.str = *(const char **) (ptr + sizeof(unsigned long));
-                       if (unlikely(!estack_ax(stack, top)->u.s.str)) {
-                               dbg_printk("Filter warning: loading a NULL sequence.\n");
-                               ret = -EINVAL;
-                               goto end;
-                       }
-                       estack_ax(stack, top)->u.s.literal_type =
-                               ESTACK_STRING_LITERAL_TYPE_NONE;
-                       next_pc += sizeof(struct load_op);
-                       PO;
-               }
-
-       END_OP
-end:
-       /* return 0 (discard) on error */
-       if (ret)
-               return 0;
-       return retval;
-}
-
-#undef START_OP
-#undef OP
-#undef PO
-#undef END_OP
diff --git a/lttng-filter-specialize.c b/lttng-filter-specialize.c
deleted file mode 100644 (file)
index ccc4583..0000000
+++ /dev/null
@@ -1,1215 +0,0 @@
-/* SPDX-License-Identifier: MIT
- *
- * lttng-filter-specialize.c
- *
- * LTTng modules filter code specializer.
- *
- * Copyright (C) 2010-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#include <linux/slab.h>
-#include <lttng/filter.h>
-#include <lttng/align.h>
-
-static ssize_t bytecode_reserve_data(struct bytecode_runtime *runtime,
-               size_t align, size_t len)
-{
-       ssize_t ret;
-       size_t padding = offset_align(runtime->data_len, align);
-       size_t new_len = runtime->data_len + padding + len;
-       size_t new_alloc_len = new_len;
-       size_t old_alloc_len = runtime->data_alloc_len;
-
-       if (new_len > FILTER_MAX_DATA_LEN)
-               return -EINVAL;
-
-       if (new_alloc_len > old_alloc_len) {
-               char *newptr;
-
-               new_alloc_len =
-                       max_t(size_t, 1U << get_count_order(new_alloc_len), old_alloc_len << 1);
-               newptr = krealloc(runtime->data, new_alloc_len, GFP_KERNEL);
-               if (!newptr)
-                       return -ENOMEM;
-               runtime->data = newptr;
-               /* We zero directly the memory from start of allocation. */
-               memset(&runtime->data[old_alloc_len], 0, new_alloc_len - old_alloc_len);
-               runtime->data_alloc_len = new_alloc_len;
-       }
-       runtime->data_len += padding;
-       ret = runtime->data_len;
-       runtime->data_len += len;
-       return ret;
-}
-
-static ssize_t bytecode_push_data(struct bytecode_runtime *runtime,
-               const void *p, size_t align, size_t len)
-{
-       ssize_t offset;
-
-       offset = bytecode_reserve_data(runtime, align, len);
-       if (offset < 0)
-               return -ENOMEM;
-       memcpy(&runtime->data[offset], p, len);
-       return offset;
-}
-
-static int specialize_load_field(struct vstack_entry *stack_top,
-               struct load_op *insn)
-{
-       int ret;
-
-       switch (stack_top->load.type) {
-       case LOAD_OBJECT:
-               break;
-       case LOAD_ROOT_CONTEXT:
-       case LOAD_ROOT_APP_CONTEXT:
-       case LOAD_ROOT_PAYLOAD:
-       default:
-               dbg_printk("Filter warning: cannot load root, missing field name.\n");
-               ret = -EINVAL;
-               goto end;
-       }
-       switch (stack_top->load.object_type) {
-       case OBJECT_TYPE_S8:
-               dbg_printk("op load field s8\n");
-               stack_top->type = REG_S64;
-               if (!stack_top->load.rev_bo)
-                       insn->op = FILTER_OP_LOAD_FIELD_S8;
-               break;
-       case OBJECT_TYPE_S16:
-               dbg_printk("op load field s16\n");
-               stack_top->type = REG_S64;
-               if (!stack_top->load.rev_bo)
-                       insn->op = FILTER_OP_LOAD_FIELD_S16;
-               break;
-       case OBJECT_TYPE_S32:
-               dbg_printk("op load field s32\n");
-               stack_top->type = REG_S64;
-               if (!stack_top->load.rev_bo)
-                       insn->op = FILTER_OP_LOAD_FIELD_S32;
-               break;
-       case OBJECT_TYPE_S64:
-               dbg_printk("op load field s64\n");
-               stack_top->type = REG_S64;
-               if (!stack_top->load.rev_bo)
-                       insn->op = FILTER_OP_LOAD_FIELD_S64;
-               break;
-       case OBJECT_TYPE_U8:
-               dbg_printk("op load field u8\n");
-               stack_top->type = REG_S64;
-               insn->op = FILTER_OP_LOAD_FIELD_U8;
-               break;
-       case OBJECT_TYPE_U16:
-               dbg_printk("op load field u16\n");
-               stack_top->type = REG_S64;
-               if (!stack_top->load.rev_bo)
-                       insn->op = FILTER_OP_LOAD_FIELD_U16;
-               break;
-       case OBJECT_TYPE_U32:
-               dbg_printk("op load field u32\n");
-               stack_top->type = REG_S64;
-               if (!stack_top->load.rev_bo)
-                       insn->op = FILTER_OP_LOAD_FIELD_U32;
-               break;
-       case OBJECT_TYPE_U64:
-               dbg_printk("op load field u64\n");
-               stack_top->type = REG_S64;
-               if (!stack_top->load.rev_bo)
-                       insn->op = FILTER_OP_LOAD_FIELD_U64;
-               break;
-       case OBJECT_TYPE_DOUBLE:
-               printk(KERN_WARNING "Double type unsupported\n\n");
-               ret = -EINVAL;
-               goto end;
-       case OBJECT_TYPE_STRING:
-               dbg_printk("op load field string\n");
-               stack_top->type = REG_STRING;
-               insn->op = FILTER_OP_LOAD_FIELD_STRING;
-               break;
-       case OBJECT_TYPE_STRING_SEQUENCE:
-               dbg_printk("op load field string sequence\n");
-               stack_top->type = REG_STRING;
-               insn->op = FILTER_OP_LOAD_FIELD_SEQUENCE;
-               break;
-       case OBJECT_TYPE_DYNAMIC:
-               ret = -EINVAL;
-               goto end;
-       case OBJECT_TYPE_SEQUENCE:
-       case OBJECT_TYPE_ARRAY:
-       case OBJECT_TYPE_STRUCT:
-       case OBJECT_TYPE_VARIANT:
-               printk(KERN_WARNING "Sequences, arrays, struct and variant cannot be loaded (nested types).\n");
-               ret = -EINVAL;
-               goto end;
-       }
-       return 0;
-
-end:
-       return ret;
-}
-
-static int specialize_get_index_object_type(enum object_type *otype,
-               int signedness, uint32_t elem_len)
-{
-       switch (elem_len) {
-       case 8:
-               if (signedness)
-                       *otype = OBJECT_TYPE_S8;
-               else
-                       *otype = OBJECT_TYPE_U8;
-               break;
-       case 16:
-               if (signedness)
-                       *otype = OBJECT_TYPE_S16;
-               else
-                       *otype = OBJECT_TYPE_U16;
-               break;
-       case 32:
-               if (signedness)
-                       *otype = OBJECT_TYPE_S32;
-               else
-                       *otype = OBJECT_TYPE_U32;
-               break;
-       case 64:
-               if (signedness)
-                       *otype = OBJECT_TYPE_S64;
-               else
-                       *otype = OBJECT_TYPE_U64;
-               break;
-       default:
-               return -EINVAL;
-       }
-       return 0;
-}
-
-static int specialize_get_index(struct bytecode_runtime *runtime,
-               struct load_op *insn, uint64_t index,
-               struct vstack_entry *stack_top,
-               int idx_len)
-{
-       int ret;
-       struct filter_get_index_data gid;
-       ssize_t data_offset;
-
-       memset(&gid, 0, sizeof(gid));
-       switch (stack_top->load.type) {
-       case LOAD_OBJECT:
-               switch (stack_top->load.object_type) {
-               case OBJECT_TYPE_ARRAY:
-               {
-                       const struct lttng_integer_type *integer_type;
-                       const struct lttng_event_field *field;
-                       uint32_t elem_len, num_elems;
-                       int signedness;
-
-                       field = stack_top->load.field;
-                       if (!lttng_is_bytewise_integer(field->type.u.array_nestable.elem_type)) {
-                               ret = -EINVAL;
-                               goto end;
-                       }
-                       integer_type = &field->type.u.array_nestable.elem_type->u.integer;
-                       num_elems = field->type.u.array_nestable.length;
-                       elem_len = integer_type->size;
-                       signedness = integer_type->signedness;
-                       if (index >= num_elems) {
-                               ret = -EINVAL;
-                               goto end;
-                       }
-                       ret = specialize_get_index_object_type(&stack_top->load.object_type,
-                                       signedness, elem_len);
-                       if (ret)
-                               goto end;
-                       gid.offset = index * (elem_len / CHAR_BIT);
-                       gid.array_len = num_elems * (elem_len / CHAR_BIT);
-                       gid.elem.type = stack_top->load.object_type;
-                       gid.elem.len = elem_len;
-                       if (integer_type->reverse_byte_order)
-                               gid.elem.rev_bo = true;
-                       stack_top->load.rev_bo = gid.elem.rev_bo;
-                       break;
-               }
-               case OBJECT_TYPE_SEQUENCE:
-               {
-                       const struct lttng_integer_type *integer_type;
-                       const struct lttng_event_field *field;
-                       uint32_t elem_len;
-                       int signedness;
-
-                       field = stack_top->load.field;
-                       if (!lttng_is_bytewise_integer(field->type.u.sequence_nestable.elem_type)) {
-                               ret = -EINVAL;
-                               goto end;
-                       }
-                       integer_type = &field->type.u.sequence_nestable.elem_type->u.integer;
-                       elem_len = integer_type->size;
-                       signedness = integer_type->signedness;
-                       ret = specialize_get_index_object_type(&stack_top->load.object_type,
-                                       signedness, elem_len);
-                       if (ret)
-                               goto end;
-                       gid.offset = index * (elem_len / CHAR_BIT);
-                       gid.elem.type = stack_top->load.object_type;
-                       gid.elem.len = elem_len;
-                       if (integer_type->reverse_byte_order)
-                               gid.elem.rev_bo = true;
-                       stack_top->load.rev_bo = gid.elem.rev_bo;
-                       break;
-               }
-               case OBJECT_TYPE_STRUCT:
-                       /* Only generated by the specialize phase. */
-               case OBJECT_TYPE_VARIANT:       /* Fall-through */
-               default:
-                       printk(KERN_WARNING "Unexpected get index type %d",
-                               (int) stack_top->load.object_type);
-                       ret = -EINVAL;
-                       goto end;
-               }
-               break;
-       case LOAD_ROOT_CONTEXT:
-       case LOAD_ROOT_APP_CONTEXT:
-       case LOAD_ROOT_PAYLOAD:
-               printk(KERN_WARNING "Index lookup for root field not implemented yet.\n");
-               ret = -EINVAL;
-               goto end;
-       }
-       data_offset = bytecode_push_data(runtime, &gid,
-               __alignof__(gid), sizeof(gid));
-       if (data_offset < 0) {
-               ret = -EINVAL;
-               goto end;
-       }
-       switch (idx_len) {
-       case 2:
-               ((struct get_index_u16 *) insn->data)->index = data_offset;
-               break;
-       case 8:
-               ((struct get_index_u64 *) insn->data)->index = data_offset;
-               break;
-       default:
-               ret = -EINVAL;
-               goto end;
-       }
-
-       return 0;
-
-end:
-       return ret;
-}
-
-static int specialize_context_lookup_name(struct bytecode_runtime *bytecode,
-               struct load_op *insn)
-{
-       uint16_t offset;
-       const char *name;
-
-       offset = ((struct get_symbol *) insn->data)->offset;
-       name = bytecode->p.bc->bc.data + bytecode->p.bc->bc.reloc_offset + offset;
-       return lttng_get_context_index(lttng_static_ctx, name);
-}
-
-static int specialize_load_object(const struct lttng_event_field *field,
-               struct vstack_load *load, bool is_context)
-{
-       load->type = LOAD_OBJECT;
-       /*
-        * LTTng-UST layout all integer fields as s64 on the stack for the filter.
-        */
-       switch (field->type.atype) {
-       case atype_integer:
-               if (field->type.u.integer.signedness)
-                       load->object_type = OBJECT_TYPE_S64;
-               else
-                       load->object_type = OBJECT_TYPE_U64;
-               load->rev_bo = false;
-               break;
-       case atype_enum_nestable:
-       {
-               const struct lttng_integer_type *itype =
-                       &field->type.u.enum_nestable.container_type->u.integer;
-
-               if (itype->signedness)
-                       load->object_type = OBJECT_TYPE_S64;
-               else
-                       load->object_type = OBJECT_TYPE_U64;
-               load->rev_bo = false;
-               break;
-       }
-       case atype_array_nestable:
-               if (!lttng_is_bytewise_integer(field->type.u.array_nestable.elem_type)) {
-                       printk(KERN_WARNING "Array nesting only supports integer types.\n");
-                       return -EINVAL;
-               }
-               if (is_context) {
-                       load->object_type = OBJECT_TYPE_STRING;
-               } else {
-                       if (field->type.u.array_nestable.elem_type->u.integer.encoding == lttng_encode_none) {
-                               load->object_type = OBJECT_TYPE_ARRAY;
-                               load->field = field;
-                       } else {
-                               load->object_type = OBJECT_TYPE_STRING_SEQUENCE;
-                       }
-               }
-               break;
-       case atype_sequence_nestable:
-               if (!lttng_is_bytewise_integer(field->type.u.sequence_nestable.elem_type)) {
-                       printk(KERN_WARNING "Sequence nesting only supports integer types.\n");
-                       return -EINVAL;
-               }
-               if (is_context) {
-                       load->object_type = OBJECT_TYPE_STRING;
-               } else {
-                       if (field->type.u.sequence_nestable.elem_type->u.integer.encoding == lttng_encode_none) {
-                               load->object_type = OBJECT_TYPE_SEQUENCE;
-                               load->field = field;
-                       } else {
-                               load->object_type = OBJECT_TYPE_STRING_SEQUENCE;
-                       }
-               }
-               break;
-       case atype_string:
-               load->object_type = OBJECT_TYPE_STRING;
-               break;
-       case atype_struct_nestable:
-               printk(KERN_WARNING "Structure type cannot be loaded.\n");
-               return -EINVAL;
-       case atype_variant_nestable:
-               printk(KERN_WARNING "Variant type cannot be loaded.\n");
-               return -EINVAL;
-       default:
-               printk(KERN_WARNING "Unknown type: %d", (int) field->type.atype);
-               return -EINVAL;
-       }
-       return 0;
-}
-
-static int specialize_context_lookup(struct bytecode_runtime *runtime,
-               struct load_op *insn,
-               struct vstack_load *load)
-{
-       int idx, ret;
-       struct lttng_ctx_field *ctx_field;
-       struct lttng_event_field *field;
-       struct filter_get_index_data gid;
-       ssize_t data_offset;
-
-       idx = specialize_context_lookup_name(runtime, insn);
-       if (idx < 0) {
-               return -ENOENT;
-       }
-       ctx_field = &lttng_static_ctx->fields[idx];
-       field = &ctx_field->event_field;
-       ret = specialize_load_object(field, load, true);
-       if (ret)
-               return ret;
-       /* Specialize each get_symbol into a get_index. */
-       insn->op = FILTER_OP_GET_INDEX_U16;
-       memset(&gid, 0, sizeof(gid));
-       gid.ctx_index = idx;
-       gid.elem.type = load->object_type;
-       data_offset = bytecode_push_data(runtime, &gid,
-               __alignof__(gid), sizeof(gid));
-       if (data_offset < 0) {
-               return -EINVAL;
-       }
-       ((struct get_index_u16 *) insn->data)->index = data_offset;
-       return 0;
-}
-
-static int specialize_event_payload_lookup(struct lttng_event *event,
-               struct bytecode_runtime *runtime,
-               struct load_op *insn,
-               struct vstack_load *load)
-{
-       const char *name;
-       uint16_t offset;
-       const struct lttng_event_desc *desc = event->desc;
-       unsigned int i, nr_fields;
-       bool found = false;
-       uint32_t field_offset = 0;
-       const struct lttng_event_field *field;
-       int ret;
-       struct filter_get_index_data gid;
-       ssize_t data_offset;
-
-       nr_fields = desc->nr_fields;
-       offset = ((struct get_symbol *) insn->data)->offset;
-       name = runtime->p.bc->bc.data + runtime->p.bc->bc.reloc_offset + offset;
-       for (i = 0; i < nr_fields; i++) {
-               field = &desc->fields[i];
-               if (field->nofilter) {
-                       continue;
-               }
-               if (!strcmp(field->name, name)) {
-                       found = true;
-                       break;
-               }
-               /* compute field offset on stack */
-               switch (field->type.atype) {
-               case atype_integer:
-               case atype_enum_nestable:
-                       field_offset += sizeof(int64_t);
-                       break;
-               case atype_array_nestable:
-               case atype_sequence_nestable:
-                       field_offset += sizeof(unsigned long);
-                       field_offset += sizeof(void *);
-                       break;
-               case atype_string:
-                       field_offset += sizeof(void *);
-                       break;
-               default:
-                       ret = -EINVAL;
-                       goto end;
-               }
-       }
-       if (!found) {
-               ret = -EINVAL;
-               goto end;
-       }
-
-       ret = specialize_load_object(field, load, false);
-       if (ret)
-               goto end;
-
-       /* Specialize each get_symbol into a get_index. */
-       insn->op = FILTER_OP_GET_INDEX_U16;
-       memset(&gid, 0, sizeof(gid));
-       gid.offset = field_offset;
-       gid.elem.type = load->object_type;
-       data_offset = bytecode_push_data(runtime, &gid,
-               __alignof__(gid), sizeof(gid));
-       if (data_offset < 0) {
-               ret = -EINVAL;
-               goto end;
-       }
-       ((struct get_index_u16 *) insn->data)->index = data_offset;
-       ret = 0;
-end:
-       return ret;
-}
-
-int lttng_filter_specialize_bytecode(struct lttng_event *event,
-               struct bytecode_runtime *bytecode)
-{
-       void *pc, *next_pc, *start_pc;
-       int ret = -EINVAL;
-       struct vstack _stack;
-       struct vstack *stack = &_stack;
-
-       vstack_init(stack);
-
-       start_pc = &bytecode->code[0];
-       for (pc = next_pc = start_pc; pc - start_pc < bytecode->len;
-                       pc = next_pc) {
-               switch (*(filter_opcode_t *) pc) {
-               case FILTER_OP_UNKNOWN:
-               default:
-                       printk(KERN_WARNING "unknown bytecode op %u\n",
-                               (unsigned int) *(filter_opcode_t *) pc);
-                       ret = -EINVAL;
-                       goto end;
-
-               case FILTER_OP_RETURN:
-               case FILTER_OP_RETURN_S64:
-                       ret = 0;
-                       goto end;
-
-               /* binary */
-               case FILTER_OP_MUL:
-               case FILTER_OP_DIV:
-               case FILTER_OP_MOD:
-               case FILTER_OP_PLUS:
-               case FILTER_OP_MINUS:
-                       printk(KERN_WARNING "unsupported bytecode op %u\n",
-                               (unsigned int) *(filter_opcode_t *) pc);
-                       ret = -EINVAL;
-                       goto end;
-
-               case FILTER_OP_EQ:
-               {
-                       struct binary_op *insn = (struct binary_op *) pc;
-
-                       switch(vstack_ax(stack)->type) {
-                       default:
-                               printk(KERN_WARNING "unknown register type\n");
-                               ret = -EINVAL;
-                               goto end;
-
-                       case REG_STRING:
-                               if (vstack_bx(stack)->type == REG_STAR_GLOB_STRING)
-                                       insn->op = FILTER_OP_EQ_STAR_GLOB_STRING;
-                               else
-                                       insn->op = FILTER_OP_EQ_STRING;
-                               break;
-                       case REG_STAR_GLOB_STRING:
-                               insn->op = FILTER_OP_EQ_STAR_GLOB_STRING;
-                               break;
-                       case REG_S64:
-                               if (vstack_bx(stack)->type == REG_S64)
-                                       insn->op = FILTER_OP_EQ_S64;
-                               else
-                                       insn->op = FILTER_OP_EQ_DOUBLE_S64;
-                               break;
-                       case REG_DOUBLE:
-                               if (vstack_bx(stack)->type == REG_S64)
-                                       insn->op = FILTER_OP_EQ_S64_DOUBLE;
-                               else
-                                       insn->op = FILTER_OP_EQ_DOUBLE;
-                               break;
-                       }
-                       /* Pop 2, push 1 */
-                       if (vstack_pop(stack)) {
-                               ret = -EINVAL;
-                               goto end;
-                       }
-                       vstack_ax(stack)->type = REG_S64;
-                       next_pc += sizeof(struct binary_op);
-                       break;
-               }
-
-               case FILTER_OP_NE:
-               {
-                       struct binary_op *insn = (struct binary_op *) pc;
-
-                       switch(vstack_ax(stack)->type) {
-                       default:
-                               printk(KERN_WARNING "unknown register type\n");
-                               ret = -EINVAL;
-                               goto end;
-
-                       case REG_STRING:
-                               if (vstack_bx(stack)->type == REG_STAR_GLOB_STRING)
-                                       insn->op = FILTER_OP_NE_STAR_GLOB_STRING;
-                               else
-                                       insn->op = FILTER_OP_NE_STRING;
-                               break;
-                       case REG_STAR_GLOB_STRING:
-                               insn->op = FILTER_OP_NE_STAR_GLOB_STRING;
-                               break;
-                       case REG_S64:
-                               if (vstack_bx(stack)->type == REG_S64)
-                                       insn->op = FILTER_OP_NE_S64;
-                               else
-                                       insn->op = FILTER_OP_NE_DOUBLE_S64;
-                               break;
-                       case REG_DOUBLE:
-                               if (vstack_bx(stack)->type == REG_S64)
-                                       insn->op = FILTER_OP_NE_S64_DOUBLE;
-                               else
-                                       insn->op = FILTER_OP_NE_DOUBLE;
-                               break;
-                       }
-                       /* Pop 2, push 1 */
-                       if (vstack_pop(stack)) {
-                               ret = -EINVAL;
-                               goto end;
-                       }
-                       vstack_ax(stack)->type = REG_S64;
-                       next_pc += sizeof(struct binary_op);
-                       break;
-               }
-
-               case FILTER_OP_GT:
-               {
-                       struct binary_op *insn = (struct binary_op *) pc;
-
-                       switch(vstack_ax(stack)->type) {
-                       default:
-                               printk(KERN_WARNING "unknown register type\n");
-                               ret = -EINVAL;
-                               goto end;
-
-                       case REG_STAR_GLOB_STRING:
-                               printk(KERN_WARNING "invalid register type for > binary operator\n");
-                               ret = -EINVAL;
-                               goto end;
-                       case REG_STRING:
-                               insn->op = FILTER_OP_GT_STRING;
-                               break;
-                       case REG_S64:
-                               if (vstack_bx(stack)->type == REG_S64)
-                                       insn->op = FILTER_OP_GT_S64;
-                               else
-                                       insn->op = FILTER_OP_GT_DOUBLE_S64;
-                               break;
-                       case REG_DOUBLE:
-                               if (vstack_bx(stack)->type == REG_S64)
-                                       insn->op = FILTER_OP_GT_S64_DOUBLE;
-                               else
-                                       insn->op = FILTER_OP_GT_DOUBLE;
-                               break;
-                       }
-                       /* Pop 2, push 1 */
-                       if (vstack_pop(stack)) {
-                               ret = -EINVAL;
-                               goto end;
-                       }
-                       vstack_ax(stack)->type = REG_S64;
-                       next_pc += sizeof(struct binary_op);
-                       break;
-               }
-
-               case FILTER_OP_LT:
-               {
-                       struct binary_op *insn = (struct binary_op *) pc;
-
-                       switch(vstack_ax(stack)->type) {
-                       default:
-                               printk(KERN_WARNING "unknown register type\n");
-                               ret = -EINVAL;
-                               goto end;
-
-                       case REG_STAR_GLOB_STRING:
-                               printk(KERN_WARNING "invalid register type for < binary operator\n");
-                               ret = -EINVAL;
-                               goto end;
-                       case REG_STRING:
-                               insn->op = FILTER_OP_LT_STRING;
-                               break;
-                       case REG_S64:
-                               if (vstack_bx(stack)->type == REG_S64)
-                                       insn->op = FILTER_OP_LT_S64;
-                               else
-                                       insn->op = FILTER_OP_LT_DOUBLE_S64;
-                               break;
-                       case REG_DOUBLE:
-                               if (vstack_bx(stack)->type == REG_S64)
-                                       insn->op = FILTER_OP_LT_S64_DOUBLE;
-                               else
-                                       insn->op = FILTER_OP_LT_DOUBLE;
-                               break;
-                       }
-                       /* Pop 2, push 1 */
-                       if (vstack_pop(stack)) {
-                               ret = -EINVAL;
-                               goto end;
-                       }
-                       vstack_ax(stack)->type = REG_S64;
-                       next_pc += sizeof(struct binary_op);
-                       break;
-               }
-
-               case FILTER_OP_GE:
-               {
-                       struct binary_op *insn = (struct binary_op *) pc;
-
-                       switch(vstack_ax(stack)->type) {
-                       default:
-                               printk(KERN_WARNING "unknown register type\n");
-                               ret = -EINVAL;
-                               goto end;
-
-                       case REG_STAR_GLOB_STRING:
-                               printk(KERN_WARNING "invalid register type for >= binary operator\n");
-                               ret = -EINVAL;
-                               goto end;
-                       case REG_STRING:
-                               insn->op = FILTER_OP_GE_STRING;
-                               break;
-                       case REG_S64:
-                               if (vstack_bx(stack)->type == REG_S64)
-                                       insn->op = FILTER_OP_GE_S64;
-                               else
-                                       insn->op = FILTER_OP_GE_DOUBLE_S64;
-                               break;
-                       case REG_DOUBLE:
-                               if (vstack_bx(stack)->type == REG_S64)
-                                       insn->op = FILTER_OP_GE_S64_DOUBLE;
-                               else
-                                       insn->op = FILTER_OP_GE_DOUBLE;
-                               break;
-                       }
-                       /* Pop 2, push 1 */
-                       if (vstack_pop(stack)) {
-                               ret = -EINVAL;
-                               goto end;
-                       }
-                       vstack_ax(stack)->type = REG_S64;
-                       next_pc += sizeof(struct binary_op);
-                       break;
-               }
-               case FILTER_OP_LE:
-               {
-                       struct binary_op *insn = (struct binary_op *) pc;
-
-                       switch(vstack_ax(stack)->type) {
-                       default:
-                               printk(KERN_WARNING "unknown register type\n");
-                               ret = -EINVAL;
-                               goto end;
-
-                       case REG_STAR_GLOB_STRING:
-                               printk(KERN_WARNING "invalid register type for <= binary operator\n");
-                               ret = -EINVAL;
-                               goto end;
-                       case REG_STRING:
-                               insn->op = FILTER_OP_LE_STRING;
-                               break;
-                       case REG_S64:
-                               if (vstack_bx(stack)->type == REG_S64)
-                                       insn->op = FILTER_OP_LE_S64;
-                               else
-                                       insn->op = FILTER_OP_LE_DOUBLE_S64;
-                               break;
-                       case REG_DOUBLE:
-                               if (vstack_bx(stack)->type == REG_S64)
-                                       insn->op = FILTER_OP_LE_S64_DOUBLE;
-                               else
-                                       insn->op = FILTER_OP_LE_DOUBLE;
-                               break;
-                       }
-                       vstack_ax(stack)->type = REG_S64;
-                       next_pc += sizeof(struct binary_op);
-                       break;
-               }
-
-               case FILTER_OP_EQ_STRING:
-               case FILTER_OP_NE_STRING:
-               case FILTER_OP_GT_STRING:
-               case FILTER_OP_LT_STRING:
-               case FILTER_OP_GE_STRING:
-               case FILTER_OP_LE_STRING:
-               case FILTER_OP_EQ_STAR_GLOB_STRING:
-               case FILTER_OP_NE_STAR_GLOB_STRING:
-               case FILTER_OP_EQ_S64:
-               case FILTER_OP_NE_S64:
-               case FILTER_OP_GT_S64:
-               case FILTER_OP_LT_S64:
-               case FILTER_OP_GE_S64:
-               case FILTER_OP_LE_S64:
-               case FILTER_OP_EQ_DOUBLE:
-               case FILTER_OP_NE_DOUBLE:
-               case FILTER_OP_GT_DOUBLE:
-               case FILTER_OP_LT_DOUBLE:
-               case FILTER_OP_GE_DOUBLE:
-               case FILTER_OP_LE_DOUBLE:
-               case FILTER_OP_EQ_DOUBLE_S64:
-               case FILTER_OP_NE_DOUBLE_S64:
-               case FILTER_OP_GT_DOUBLE_S64:
-               case FILTER_OP_LT_DOUBLE_S64:
-               case FILTER_OP_GE_DOUBLE_S64:
-               case FILTER_OP_LE_DOUBLE_S64:
-               case FILTER_OP_EQ_S64_DOUBLE:
-               case FILTER_OP_NE_S64_DOUBLE:
-               case FILTER_OP_GT_S64_DOUBLE:
-               case FILTER_OP_LT_S64_DOUBLE:
-               case FILTER_OP_GE_S64_DOUBLE:
-               case FILTER_OP_LE_S64_DOUBLE:
-               case FILTER_OP_BIT_RSHIFT:
-               case FILTER_OP_BIT_LSHIFT:
-               case FILTER_OP_BIT_AND:
-               case FILTER_OP_BIT_OR:
-               case FILTER_OP_BIT_XOR:
-               {
-                       /* Pop 2, push 1 */
-                       if (vstack_pop(stack)) {
-                               ret = -EINVAL;
-                               goto end;
-                       }
-                       vstack_ax(stack)->type = REG_S64;
-                       next_pc += sizeof(struct binary_op);
-                       break;
-               }
-
-               /* unary */
-               case FILTER_OP_UNARY_PLUS:
-               {
-                       struct unary_op *insn = (struct unary_op *) pc;
-
-                       switch(vstack_ax(stack)->type) {
-                       default:
-                               printk(KERN_WARNING "unknown register type\n");
-                               ret = -EINVAL;
-                               goto end;
-
-                       case REG_S64:
-                               insn->op = FILTER_OP_UNARY_PLUS_S64;
-                               break;
-                       case REG_DOUBLE:
-                               insn->op = FILTER_OP_UNARY_PLUS_DOUBLE;
-                               break;
-                       }
-                       /* Pop 1, push 1 */
-                       next_pc += sizeof(struct unary_op);
-                       break;
-               }
-
-               case FILTER_OP_UNARY_MINUS:
-               {
-                       struct unary_op *insn = (struct unary_op *) pc;
-
-                       switch(vstack_ax(stack)->type) {
-                       default:
-                               printk(KERN_WARNING "unknown register type\n");
-                               ret = -EINVAL;
-                               goto end;
-
-                       case REG_S64:
-                               insn->op = FILTER_OP_UNARY_MINUS_S64;
-                               break;
-                       case REG_DOUBLE:
-                               insn->op = FILTER_OP_UNARY_MINUS_DOUBLE;
-                               break;
-                       }
-                       /* Pop 1, push 1 */
-                       next_pc += sizeof(struct unary_op);
-                       break;
-               }
-
-               case FILTER_OP_UNARY_NOT:
-               {
-                       struct unary_op *insn = (struct unary_op *) pc;
-
-                       switch(vstack_ax(stack)->type) {
-                       default:
-                               printk(KERN_WARNING "unknown register type\n");
-                               ret = -EINVAL;
-                               goto end;
-
-                       case REG_S64:
-                               insn->op = FILTER_OP_UNARY_NOT_S64;
-                               break;
-                       case REG_DOUBLE:
-                               insn->op = FILTER_OP_UNARY_NOT_DOUBLE;
-                               break;
-                       }
-                       /* Pop 1, push 1 */
-                       next_pc += sizeof(struct unary_op);
-                       break;
-               }
-
-               case FILTER_OP_UNARY_BIT_NOT:
-               {
-                       /* Pop 1, push 1 */
-                       next_pc += sizeof(struct unary_op);
-                       break;
-               }
-
-               case FILTER_OP_UNARY_PLUS_S64:
-               case FILTER_OP_UNARY_MINUS_S64:
-               case FILTER_OP_UNARY_NOT_S64:
-               case FILTER_OP_UNARY_PLUS_DOUBLE:
-               case FILTER_OP_UNARY_MINUS_DOUBLE:
-               case FILTER_OP_UNARY_NOT_DOUBLE:
-               {
-                       /* Pop 1, push 1 */
-                       next_pc += sizeof(struct unary_op);
-                       break;
-               }
-
-               /* logical */
-               case FILTER_OP_AND:
-               case FILTER_OP_OR:
-               {
-                       /* Continue to next instruction */
-                       /* Pop 1 when jump not taken */
-                       if (vstack_pop(stack)) {
-                               ret = -EINVAL;
-                               goto end;
-                       }
-                       next_pc += sizeof(struct logical_op);
-                       break;
-               }
-
-               /* load field ref */
-               case FILTER_OP_LOAD_FIELD_REF:
-               {
-                       printk(KERN_WARNING "Unknown field ref type\n");
-                       ret = -EINVAL;
-                       goto end;
-               }
-               /* get context ref */
-               case FILTER_OP_GET_CONTEXT_REF:
-               {
-                       printk(KERN_WARNING "Unknown get context ref type\n");
-                       ret = -EINVAL;
-                       goto end;
-               }
-               case FILTER_OP_LOAD_FIELD_REF_STRING:
-               case FILTER_OP_LOAD_FIELD_REF_SEQUENCE:
-               case FILTER_OP_GET_CONTEXT_REF_STRING:
-               case FILTER_OP_LOAD_FIELD_REF_USER_STRING:
-               case FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE:
-               {
-                       if (vstack_push(stack)) {
-                               ret = -EINVAL;
-                               goto end;
-                       }
-                       vstack_ax(stack)->type = REG_STRING;
-                       next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
-                       break;
-               }
-               case FILTER_OP_LOAD_FIELD_REF_S64:
-               case FILTER_OP_GET_CONTEXT_REF_S64:
-               {
-                       if (vstack_push(stack)) {
-                               ret = -EINVAL;
-                               goto end;
-                       }
-                       vstack_ax(stack)->type = REG_S64;
-                       next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
-                       break;
-               }
-               case FILTER_OP_LOAD_FIELD_REF_DOUBLE:
-               case FILTER_OP_GET_CONTEXT_REF_DOUBLE:
-               {
-                       if (vstack_push(stack)) {
-                               ret = -EINVAL;
-                               goto end;
-                       }
-                       vstack_ax(stack)->type = REG_DOUBLE;
-                       next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
-                       break;
-               }
-
-               /* load from immediate operand */
-               case FILTER_OP_LOAD_STRING:
-               {
-                       struct load_op *insn = (struct load_op *) pc;
-
-                       if (vstack_push(stack)) {
-                               ret = -EINVAL;
-                               goto end;
-                       }
-                       vstack_ax(stack)->type = REG_STRING;
-                       next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
-                       break;
-               }
-
-               case FILTER_OP_LOAD_STAR_GLOB_STRING:
-               {
-                       struct load_op *insn = (struct load_op *) pc;
-
-                       if (vstack_push(stack)) {
-                               ret = -EINVAL;
-                               goto end;
-                       }
-                       vstack_ax(stack)->type = REG_STAR_GLOB_STRING;
-                       next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
-                       break;
-               }
-
-               case FILTER_OP_LOAD_S64:
-               {
-                       if (vstack_push(stack)) {
-                               ret = -EINVAL;
-                               goto end;
-                       }
-                       vstack_ax(stack)->type = REG_S64;
-                       next_pc += sizeof(struct load_op)
-                                       + sizeof(struct literal_numeric);
-                       break;
-               }
-
-               case FILTER_OP_LOAD_DOUBLE:
-               {
-                       if (vstack_push(stack)) {
-                               ret = -EINVAL;
-                               goto end;
-                       }
-                       vstack_ax(stack)->type = REG_DOUBLE;
-                       next_pc += sizeof(struct load_op)
-                                       + sizeof(struct literal_double);
-                       break;
-               }
-
-               /* cast */
-               case FILTER_OP_CAST_TO_S64:
-               {
-                       struct cast_op *insn = (struct cast_op *) pc;
-
-                       switch (vstack_ax(stack)->type) {
-                       default:
-                               printk(KERN_WARNING "unknown register type\n");
-                               ret = -EINVAL;
-                               goto end;
-
-                       case REG_STRING:
-                       case REG_STAR_GLOB_STRING:
-                               printk(KERN_WARNING "Cast op can only be applied to numeric or floating point registers\n");
-                               ret = -EINVAL;
-                               goto end;
-                       case REG_S64:
-                               insn->op = FILTER_OP_CAST_NOP;
-                               break;
-                       case REG_DOUBLE:
-                               insn->op = FILTER_OP_CAST_DOUBLE_TO_S64;
-                               break;
-                       }
-                       /* Pop 1, push 1 */
-                       vstack_ax(stack)->type = REG_S64;
-                       next_pc += sizeof(struct cast_op);
-                       break;
-               }
-               case FILTER_OP_CAST_DOUBLE_TO_S64:
-               {
-                       /* Pop 1, push 1 */
-                       vstack_ax(stack)->type = REG_S64;
-                       next_pc += sizeof(struct cast_op);
-                       break;
-               }
-               case FILTER_OP_CAST_NOP:
-               {
-                       next_pc += sizeof(struct cast_op);
-                       break;
-               }
-
-               /*
-                * Instructions for recursive traversal through composed types.
-                */
-               case FILTER_OP_GET_CONTEXT_ROOT:
-               {
-                       if (vstack_push(stack)) {
-                               ret = -EINVAL;
-                               goto end;
-                       }
-                       vstack_ax(stack)->type = REG_PTR;
-                       vstack_ax(stack)->load.type = LOAD_ROOT_CONTEXT;
-                       next_pc += sizeof(struct load_op);
-                       break;
-               }
-               case FILTER_OP_GET_APP_CONTEXT_ROOT:
-               {
-                       if (vstack_push(stack)) {
-                               ret = -EINVAL;
-                               goto end;
-                       }
-                       vstack_ax(stack)->type = REG_PTR;
-                       vstack_ax(stack)->load.type = LOAD_ROOT_APP_CONTEXT;
-                       next_pc += sizeof(struct load_op);
-                       break;
-               }
-               case FILTER_OP_GET_PAYLOAD_ROOT:
-               {
-                       if (vstack_push(stack)) {
-                               ret = -EINVAL;
-                               goto end;
-                       }
-                       vstack_ax(stack)->type = REG_PTR;
-                       vstack_ax(stack)->load.type = LOAD_ROOT_PAYLOAD;
-                       next_pc += sizeof(struct load_op);
-                       break;
-               }
-
-               case FILTER_OP_LOAD_FIELD:
-               {
-                       struct load_op *insn = (struct load_op *) pc;
-
-                       WARN_ON_ONCE(vstack_ax(stack)->type != REG_PTR);
-                       /* Pop 1, push 1 */
-                       ret = specialize_load_field(vstack_ax(stack), insn);
-                       if (ret)
-                               goto end;
-
-                       next_pc += sizeof(struct load_op);
-                       break;
-               }
-
-               case FILTER_OP_LOAD_FIELD_S8:
-               case FILTER_OP_LOAD_FIELD_S16:
-               case FILTER_OP_LOAD_FIELD_S32:
-               case FILTER_OP_LOAD_FIELD_S64:
-               case FILTER_OP_LOAD_FIELD_U8:
-               case FILTER_OP_LOAD_FIELD_U16:
-               case FILTER_OP_LOAD_FIELD_U32:
-               case FILTER_OP_LOAD_FIELD_U64:
-               {
-                       /* Pop 1, push 1 */
-                       vstack_ax(stack)->type = REG_S64;
-                       next_pc += sizeof(struct load_op);
-                       break;
-               }
-
-               case FILTER_OP_LOAD_FIELD_STRING:
-               case FILTER_OP_LOAD_FIELD_SEQUENCE:
-               {
-                       /* Pop 1, push 1 */
-                       vstack_ax(stack)->type = REG_STRING;
-                       next_pc += sizeof(struct load_op);
-                       break;
-               }
-
-               case FILTER_OP_LOAD_FIELD_DOUBLE:
-               {
-                       /* Pop 1, push 1 */
-                       vstack_ax(stack)->type = REG_DOUBLE;
-                       next_pc += sizeof(struct load_op);
-                       break;
-               }
-
-               case FILTER_OP_GET_SYMBOL:
-               {
-                       struct load_op *insn = (struct load_op *) pc;
-
-                       dbg_printk("op get symbol\n");
-                       switch (vstack_ax(stack)->load.type) {
-                       case LOAD_OBJECT:
-                               printk(KERN_WARNING "Nested fields not implemented yet.\n");
-                               ret = -EINVAL;
-                               goto end;
-                       case LOAD_ROOT_CONTEXT:
-                               /* Lookup context field. */
-                               ret = specialize_context_lookup(bytecode, insn,
-                                       &vstack_ax(stack)->load);
-                               if (ret)
-                                       goto end;
-                               break;
-                       case LOAD_ROOT_APP_CONTEXT:
-                               ret = -EINVAL;
-                               goto end;
-                       case LOAD_ROOT_PAYLOAD:
-                               /* Lookup event payload field. */
-                               ret = specialize_event_payload_lookup(event,
-                                       bytecode, insn,
-                                       &vstack_ax(stack)->load);
-                               if (ret)
-                                       goto end;
-                               break;
-                       }
-                       next_pc += sizeof(struct load_op) + sizeof(struct get_symbol);
-                       break;
-               }
-
-               case FILTER_OP_GET_SYMBOL_FIELD:
-               {
-                       /* Always generated by specialize phase. */
-                       ret = -EINVAL;
-                       goto end;
-               }
-
-               case FILTER_OP_GET_INDEX_U16:
-               {
-                       struct load_op *insn = (struct load_op *) pc;
-                       struct get_index_u16 *index = (struct get_index_u16 *) insn->data;
-
-                       dbg_printk("op get index u16\n");
-                       /* Pop 1, push 1 */
-                       ret = specialize_get_index(bytecode, insn, index->index,
-                                       vstack_ax(stack), sizeof(*index));
-                       if (ret)
-                               goto end;
-                       next_pc += sizeof(struct load_op) + sizeof(struct get_index_u16);
-                       break;
-               }
-
-               case FILTER_OP_GET_INDEX_U64:
-               {
-                       struct load_op *insn = (struct load_op *) pc;
-                       struct get_index_u64 *index = (struct get_index_u64 *) insn->data;
-
-                       dbg_printk("op get index u64\n");
-                       /* Pop 1, push 1 */
-                       ret = specialize_get_index(bytecode, insn, index->index,
-                                       vstack_ax(stack), sizeof(*index));
-                       if (ret)
-                               goto end;
-                       next_pc += sizeof(struct load_op) + sizeof(struct get_index_u64);
-                       break;
-               }
-
-               }
-       }
-end:
-       return ret;
-}
diff --git a/lttng-filter-validator.c b/lttng-filter-validator.c
deleted file mode 100644 (file)
index 38d6ed0..0000000
+++ /dev/null
@@ -1,1743 +0,0 @@
-/* SPDX-License-Identifier: MIT
- *
- * lttng-filter-validator.c
- *
- * LTTng modules filter bytecode validator.
- *
- * Copyright (C) 2010-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#include <linux/types.h>
-#include <linux/jhash.h>
-#include <linux/slab.h>
-
-#include <wrapper/list.h>
-#include <lttng/filter.h>
-
-#define MERGE_POINT_TABLE_BITS         7
-#define MERGE_POINT_TABLE_SIZE         (1U << MERGE_POINT_TABLE_BITS)
-
-/* merge point table node */
-struct mp_node {
-       struct hlist_node node;
-
-       /* Context at merge point */
-       struct vstack stack;
-       unsigned long target_pc;
-};
-
-struct mp_table {
-       struct hlist_head mp_head[MERGE_POINT_TABLE_SIZE];
-};
-
-static
-int lttng_hash_match(struct mp_node *mp_node, unsigned long key_pc)
-{
-       if (mp_node->target_pc == key_pc)
-               return 1;
-       else
-               return 0;
-}
-
-static
-int merge_points_compare(const struct vstack *stacka,
-                       const struct vstack *stackb)
-{
-       int i, len;
-
-       if (stacka->top != stackb->top)
-               return 1;
-       len = stacka->top + 1;
-       WARN_ON_ONCE(len < 0);
-       for (i = 0; i < len; i++) {
-               if (stacka->e[i].type != stackb->e[i].type)
-                       return 1;
-       }
-       return 0;
-}
-
-static
-int merge_point_add_check(struct mp_table *mp_table, unsigned long target_pc,
-               const struct vstack *stack)
-{
-       struct mp_node *mp_node;
-       unsigned long hash = jhash_1word(target_pc, 0);
-       struct hlist_head *head;
-       struct mp_node *lookup_node;
-       int found = 0;
-
-       dbg_printk("Filter: adding merge point at offset %lu, hash %lu\n",
-                       target_pc, hash);
-       mp_node = kzalloc(sizeof(struct mp_node), GFP_KERNEL);
-       if (!mp_node)
-               return -ENOMEM;
-       mp_node->target_pc = target_pc;
-       memcpy(&mp_node->stack, stack, sizeof(mp_node->stack));
-
-       head = &mp_table->mp_head[hash & (MERGE_POINT_TABLE_SIZE - 1)];
-       lttng_hlist_for_each_entry(lookup_node, head, node) {
-               if (lttng_hash_match(lookup_node, target_pc)) {
-                       found = 1;
-                       break;
-               }
-       }
-       if (found) {
-               /* Key already present */
-               dbg_printk("Filter: compare merge points for offset %lu, hash %lu\n",
-                               target_pc, hash);
-               kfree(mp_node);
-               if (merge_points_compare(stack, &lookup_node->stack)) {
-                       printk(KERN_WARNING "Merge points differ for offset %lu\n",
-                               target_pc);
-                       return -EINVAL;
-               }
-       } else {
-               hlist_add_head(&mp_node->node, head);
-       }
-       return 0;
-}
-
-/*
- * Binary comparators use top of stack and top of stack -1.
- */
-static
-int bin_op_compare_check(struct vstack *stack, const filter_opcode_t opcode,
-               const char *str)
-{
-       if (unlikely(!vstack_ax(stack) || !vstack_bx(stack)))
-               goto error_empty;
-
-       switch (vstack_ax(stack)->type) {
-       default:
-       case REG_DOUBLE:
-               goto error_type;
-
-       case REG_STRING:
-               switch (vstack_bx(stack)->type) {
-               default:
-               case REG_DOUBLE:
-                       goto error_type;
-               case REG_TYPE_UNKNOWN:
-                       goto unknown;
-               case REG_STRING:
-                       break;
-               case REG_STAR_GLOB_STRING:
-                       if (opcode != FILTER_OP_EQ && opcode != FILTER_OP_NE) {
-                               goto error_mismatch;
-                       }
-                       break;
-               case REG_S64:
-                       goto error_mismatch;
-               }
-               break;
-       case REG_STAR_GLOB_STRING:
-               switch (vstack_bx(stack)->type) {
-               default:
-               case REG_DOUBLE:
-                       goto error_type;
-               case REG_TYPE_UNKNOWN:
-                       goto unknown;
-               case REG_STRING:
-                       if (opcode != FILTER_OP_EQ && opcode != FILTER_OP_NE) {
-                               goto error_mismatch;
-                       }
-                       break;
-               case REG_STAR_GLOB_STRING:
-               case REG_S64:
-                       goto error_mismatch;
-               }
-               break;
-       case REG_S64:
-               switch (vstack_bx(stack)->type) {
-               default:
-               case REG_DOUBLE:
-                       goto error_type;
-               case REG_TYPE_UNKNOWN:
-                       goto unknown;
-               case REG_STRING:
-               case REG_STAR_GLOB_STRING:
-                       goto error_mismatch;
-               case REG_S64:
-                       break;
-               }
-               break;
-       case REG_TYPE_UNKNOWN:
-               switch (vstack_bx(stack)->type) {
-               default:
-               case REG_DOUBLE:
-                       goto error_type;
-               case REG_TYPE_UNKNOWN:
-               case REG_STRING:
-               case REG_STAR_GLOB_STRING:
-               case REG_S64:
-                       goto unknown;
-               }
-               break;
-       }
-       return 0;
-
-unknown:
-       return 1;
-
-error_empty:
-       printk(KERN_WARNING "empty stack for '%s' binary operator\n", str);
-       return -EINVAL;
-
-error_mismatch:
-       printk(KERN_WARNING "type mismatch for '%s' binary operator\n", str);
-       return -EINVAL;
-
-error_type:
-       printk(KERN_WARNING "unknown type for '%s' binary operator\n", str);
-       return -EINVAL;
-}
-
-/*
- * Binary bitwise operators use top of stack and top of stack -1.
- * Return 0 if typing is known to match, 1 if typing is dynamic
- * (unknown), negative error value on error.
- */
-static
-int bin_op_bitwise_check(struct vstack *stack, filter_opcode_t opcode,
-               const char *str)
-{
-       if (unlikely(!vstack_ax(stack) || !vstack_bx(stack)))
-               goto error_empty;
-
-       switch (vstack_ax(stack)->type) {
-       default:
-       case REG_DOUBLE:
-               goto error_type;
-
-       case REG_TYPE_UNKNOWN:
-               switch (vstack_bx(stack)->type) {
-               default:
-               case REG_DOUBLE:
-                       goto error_type;
-               case REG_TYPE_UNKNOWN:
-               case REG_STRING:
-               case REG_STAR_GLOB_STRING:
-               case REG_S64:
-                       goto unknown;
-               }
-               break;
-       case REG_S64:
-               switch (vstack_bx(stack)->type) {
-               default:
-               case REG_DOUBLE:
-                       goto error_type;
-               case REG_TYPE_UNKNOWN:
-                       goto unknown;
-               case REG_S64:
-                       break;
-               }
-               break;
-       }
-       return 0;
-
-unknown:
-       return 1;
-
-error_empty:
-       printk(KERN_WARNING "empty stack for '%s' binary operator\n", str);
-       return -EINVAL;
-
-error_type:
-       printk(KERN_WARNING "unknown type for '%s' binary operator\n", str);
-       return -EINVAL;
-}
-
-static
-int validate_get_symbol(struct bytecode_runtime *bytecode,
-               const struct get_symbol *sym)
-{
-       const char *str, *str_limit;
-       size_t len_limit;
-
-       if (sym->offset >= bytecode->p.bc->bc.len - bytecode->p.bc->bc.reloc_offset)
-               return -EINVAL;
-
-       str = bytecode->p.bc->bc.data + bytecode->p.bc->bc.reloc_offset + sym->offset;
-       str_limit = bytecode->p.bc->bc.data + bytecode->p.bc->bc.len;
-       len_limit = str_limit - str;
-       if (strnlen(str, len_limit) == len_limit)
-               return -EINVAL;
-       return 0;
-}
-
-/*
- * Validate bytecode range overflow within the validation pass.
- * Called for each instruction encountered.
- */
-static
-int bytecode_validate_overflow(struct bytecode_runtime *bytecode,
-               char *start_pc, char *pc)
-{
-       int ret = 0;
-
-       switch (*(filter_opcode_t *) pc) {
-       case FILTER_OP_UNKNOWN:
-       default:
-       {
-               printk(KERN_WARNING "unknown bytecode op %u\n",
-                       (unsigned int) *(filter_opcode_t *) pc);
-               ret = -EINVAL;
-               break;
-       }
-
-       case FILTER_OP_RETURN:
-       case FILTER_OP_RETURN_S64:
-       {
-               if (unlikely(pc + sizeof(struct return_op)
-                               > start_pc + bytecode->len)) {
-                       ret = -ERANGE;
-               }
-               break;
-       }
-
-       /* binary */
-       case FILTER_OP_MUL:
-       case FILTER_OP_DIV:
-       case FILTER_OP_MOD:
-       case FILTER_OP_PLUS:
-       case FILTER_OP_MINUS:
-       case FILTER_OP_EQ_DOUBLE:
-       case FILTER_OP_NE_DOUBLE:
-       case FILTER_OP_GT_DOUBLE:
-       case FILTER_OP_LT_DOUBLE:
-       case FILTER_OP_GE_DOUBLE:
-       case FILTER_OP_LE_DOUBLE:
-       /* Floating point */
-       case FILTER_OP_EQ_DOUBLE_S64:
-       case FILTER_OP_NE_DOUBLE_S64:
-       case FILTER_OP_GT_DOUBLE_S64:
-       case FILTER_OP_LT_DOUBLE_S64:
-       case FILTER_OP_GE_DOUBLE_S64:
-       case FILTER_OP_LE_DOUBLE_S64:
-       case FILTER_OP_EQ_S64_DOUBLE:
-       case FILTER_OP_NE_S64_DOUBLE:
-       case FILTER_OP_GT_S64_DOUBLE:
-       case FILTER_OP_LT_S64_DOUBLE:
-       case FILTER_OP_GE_S64_DOUBLE:
-       case FILTER_OP_LE_S64_DOUBLE:
-       case FILTER_OP_LOAD_FIELD_REF_DOUBLE:
-       case FILTER_OP_GET_CONTEXT_REF_DOUBLE:
-       case FILTER_OP_LOAD_DOUBLE:
-       case FILTER_OP_CAST_DOUBLE_TO_S64:
-       case FILTER_OP_UNARY_PLUS_DOUBLE:
-       case FILTER_OP_UNARY_MINUS_DOUBLE:
-       case FILTER_OP_UNARY_NOT_DOUBLE:
-       {
-               printk(KERN_WARNING "unsupported bytecode op %u\n",
-                       (unsigned int) *(filter_opcode_t *) pc);
-               ret = -EINVAL;
-               break;
-       }
-
-       case FILTER_OP_EQ:
-       case FILTER_OP_NE:
-       case FILTER_OP_GT:
-       case FILTER_OP_LT:
-       case FILTER_OP_GE:
-       case FILTER_OP_LE:
-       case FILTER_OP_EQ_STRING:
-       case FILTER_OP_NE_STRING:
-       case FILTER_OP_GT_STRING:
-       case FILTER_OP_LT_STRING:
-       case FILTER_OP_GE_STRING:
-       case FILTER_OP_LE_STRING:
-       case FILTER_OP_EQ_STAR_GLOB_STRING:
-       case FILTER_OP_NE_STAR_GLOB_STRING:
-       case FILTER_OP_EQ_S64:
-       case FILTER_OP_NE_S64:
-       case FILTER_OP_GT_S64:
-       case FILTER_OP_LT_S64:
-       case FILTER_OP_GE_S64:
-       case FILTER_OP_LE_S64:
-       case FILTER_OP_BIT_RSHIFT:
-       case FILTER_OP_BIT_LSHIFT:
-       case FILTER_OP_BIT_AND:
-       case FILTER_OP_BIT_OR:
-       case FILTER_OP_BIT_XOR:
-       {
-               if (unlikely(pc + sizeof(struct binary_op)
-                               > start_pc + bytecode->len)) {
-                       ret = -ERANGE;
-               }
-               break;
-       }
-
-       /* unary */
-       case FILTER_OP_UNARY_PLUS:
-       case FILTER_OP_UNARY_MINUS:
-       case FILTER_OP_UNARY_NOT:
-       case FILTER_OP_UNARY_PLUS_S64:
-       case FILTER_OP_UNARY_MINUS_S64:
-       case FILTER_OP_UNARY_NOT_S64:
-       case FILTER_OP_UNARY_BIT_NOT:
-       {
-               if (unlikely(pc + sizeof(struct unary_op)
-                               > start_pc + bytecode->len)) {
-                       ret = -ERANGE;
-               }
-               break;
-       }
-
-       /* logical */
-       case FILTER_OP_AND:
-       case FILTER_OP_OR:
-       {
-               if (unlikely(pc + sizeof(struct logical_op)
-                               > start_pc + bytecode->len)) {
-                       ret = -ERANGE;
-               }
-               break;
-       }
-
-       /* load field ref */
-       case FILTER_OP_LOAD_FIELD_REF:
-       {
-               printk(KERN_WARNING "Unknown field ref type\n");
-               ret = -EINVAL;
-               break;
-       }
-
-       /* get context ref */
-       case FILTER_OP_GET_CONTEXT_REF:
-       {
-               printk(KERN_WARNING "Unknown field ref type\n");
-               ret = -EINVAL;
-               break;
-       }
-       case FILTER_OP_LOAD_FIELD_REF_STRING:
-       case FILTER_OP_LOAD_FIELD_REF_SEQUENCE:
-       case FILTER_OP_LOAD_FIELD_REF_USER_STRING:
-       case FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE:
-       case FILTER_OP_LOAD_FIELD_REF_S64:
-       case FILTER_OP_GET_CONTEXT_REF_STRING:
-       case FILTER_OP_GET_CONTEXT_REF_S64:
-       {
-               if (unlikely(pc + sizeof(struct load_op) + sizeof(struct field_ref)
-                               > start_pc + bytecode->len)) {
-                       ret = -ERANGE;
-               }
-               break;
-       }
-
-       /* load from immediate operand */
-       case FILTER_OP_LOAD_STRING:
-       case FILTER_OP_LOAD_STAR_GLOB_STRING:
-       {
-               struct load_op *insn = (struct load_op *) pc;
-               uint32_t str_len, maxlen;
-
-               if (unlikely(pc + sizeof(struct load_op)
-                               > start_pc + bytecode->len)) {
-                       ret = -ERANGE;
-                       break;
-               }
-
-               maxlen = start_pc + bytecode->len - pc - sizeof(struct load_op);
-               str_len = strnlen(insn->data, maxlen);
-               if (unlikely(str_len >= maxlen)) {
-                       /* Final '\0' not found within range */
-                       ret = -ERANGE;
-               }
-               break;
-       }
-
-       case FILTER_OP_LOAD_S64:
-       {
-               if (unlikely(pc + sizeof(struct load_op) + sizeof(struct literal_numeric)
-                               > start_pc + bytecode->len)) {
-                       ret = -ERANGE;
-               }
-               break;
-       }
-
-       case FILTER_OP_CAST_TO_S64:
-       case FILTER_OP_CAST_NOP:
-       {
-               if (unlikely(pc + sizeof(struct cast_op)
-                               > start_pc + bytecode->len)) {
-                       ret = -ERANGE;
-               }
-               break;
-       }
-
-       /*
-        * Instructions for recursive traversal through composed types.
-        */
-       case FILTER_OP_GET_CONTEXT_ROOT:
-       case FILTER_OP_GET_APP_CONTEXT_ROOT:
-       case FILTER_OP_GET_PAYLOAD_ROOT:
-       case FILTER_OP_LOAD_FIELD:
-       case FILTER_OP_LOAD_FIELD_S8:
-       case FILTER_OP_LOAD_FIELD_S16:
-       case FILTER_OP_LOAD_FIELD_S32:
-       case FILTER_OP_LOAD_FIELD_S64:
-       case FILTER_OP_LOAD_FIELD_U8:
-       case FILTER_OP_LOAD_FIELD_U16:
-       case FILTER_OP_LOAD_FIELD_U32:
-       case FILTER_OP_LOAD_FIELD_U64:
-       case FILTER_OP_LOAD_FIELD_STRING:
-       case FILTER_OP_LOAD_FIELD_SEQUENCE:
-       case FILTER_OP_LOAD_FIELD_DOUBLE:
-               if (unlikely(pc + sizeof(struct load_op)
-                               > start_pc + bytecode->len)) {
-                       ret = -ERANGE;
-               }
-               break;
-
-       case FILTER_OP_GET_SYMBOL:
-       {
-               struct load_op *insn = (struct load_op *) pc;
-               struct get_symbol *sym = (struct get_symbol *) insn->data;
-
-               if (unlikely(pc + sizeof(struct load_op) + sizeof(struct get_symbol)
-                               > start_pc + bytecode->len)) {
-                       ret = -ERANGE;
-                       break;
-               }
-               ret = validate_get_symbol(bytecode, sym);
-               break;
-       }
-
-       case FILTER_OP_GET_SYMBOL_FIELD:
-               printk(KERN_WARNING "Unexpected get symbol field\n");
-               ret = -EINVAL;
-               break;
-
-       case FILTER_OP_GET_INDEX_U16:
-               if (unlikely(pc + sizeof(struct load_op) + sizeof(struct get_index_u16)
-                               > start_pc + bytecode->len)) {
-                       ret = -ERANGE;
-               }
-               break;
-
-       case FILTER_OP_GET_INDEX_U64:
-               if (unlikely(pc + sizeof(struct load_op) + sizeof(struct get_index_u64)
-                               > start_pc + bytecode->len)) {
-                       ret = -ERANGE;
-               }
-               break;
-       }
-
-       return ret;
-}
-
-static
-unsigned long delete_all_nodes(struct mp_table *mp_table)
-{
-       struct mp_node *mp_node;
-       struct hlist_node *tmp;
-       unsigned long nr_nodes = 0;
-       int i;
-
-       for (i = 0; i < MERGE_POINT_TABLE_SIZE; i++) {
-               struct hlist_head *head;
-
-               head = &mp_table->mp_head[i];
-               lttng_hlist_for_each_entry_safe(mp_node, tmp, head, node) {
-                       kfree(mp_node);
-                       nr_nodes++;
-               }
-       }
-       return nr_nodes;
-}
-
-/*
- * Return value:
- * >=0: success
- * <0: error
- */
-static
-int validate_instruction_context(struct bytecode_runtime *bytecode,
-               struct vstack *stack,
-               char *start_pc,
-               char *pc)
-{
-       int ret = 0;
-       const filter_opcode_t opcode = *(filter_opcode_t *) pc;
-
-       switch (opcode) {
-       case FILTER_OP_UNKNOWN:
-       default:
-       {
-               printk(KERN_WARNING "unknown bytecode op %u\n",
-                       (unsigned int) *(filter_opcode_t *) pc);
-               ret = -EINVAL;
-               goto end;
-       }
-
-       case FILTER_OP_RETURN:
-       case FILTER_OP_RETURN_S64:
-       {
-               goto end;
-       }
-
-       /* binary */
-       case FILTER_OP_MUL:
-       case FILTER_OP_DIV:
-       case FILTER_OP_MOD:
-       case FILTER_OP_PLUS:
-       case FILTER_OP_MINUS:
-       /* Floating point */
-       case FILTER_OP_EQ_DOUBLE:
-       case FILTER_OP_NE_DOUBLE:
-       case FILTER_OP_GT_DOUBLE:
-       case FILTER_OP_LT_DOUBLE:
-       case FILTER_OP_GE_DOUBLE:
-       case FILTER_OP_LE_DOUBLE:
-       case FILTER_OP_EQ_DOUBLE_S64:
-       case FILTER_OP_NE_DOUBLE_S64:
-       case FILTER_OP_GT_DOUBLE_S64:
-       case FILTER_OP_LT_DOUBLE_S64:
-       case FILTER_OP_GE_DOUBLE_S64:
-       case FILTER_OP_LE_DOUBLE_S64:
-       case FILTER_OP_EQ_S64_DOUBLE:
-       case FILTER_OP_NE_S64_DOUBLE:
-       case FILTER_OP_GT_S64_DOUBLE:
-       case FILTER_OP_LT_S64_DOUBLE:
-       case FILTER_OP_GE_S64_DOUBLE:
-       case FILTER_OP_LE_S64_DOUBLE:
-       case FILTER_OP_UNARY_PLUS_DOUBLE:
-       case FILTER_OP_UNARY_MINUS_DOUBLE:
-       case FILTER_OP_UNARY_NOT_DOUBLE:
-       case FILTER_OP_LOAD_FIELD_REF_DOUBLE:
-       case FILTER_OP_LOAD_DOUBLE:
-       case FILTER_OP_CAST_DOUBLE_TO_S64:
-       case FILTER_OP_GET_CONTEXT_REF_DOUBLE:
-       {
-               printk(KERN_WARNING "unsupported bytecode op %u\n",
-                       (unsigned int) *(filter_opcode_t *) pc);
-               ret = -EINVAL;
-               goto end;
-       }
-
-       case FILTER_OP_EQ:
-       {
-               ret = bin_op_compare_check(stack, opcode, "==");
-               if (ret < 0)
-                       goto end;
-               break;
-       }
-       case FILTER_OP_NE:
-       {
-               ret = bin_op_compare_check(stack, opcode, "!=");
-               if (ret < 0)
-                       goto end;
-               break;
-       }
-       case FILTER_OP_GT:
-       {
-               ret = bin_op_compare_check(stack, opcode, ">");
-               if (ret < 0)
-                       goto end;
-               break;
-       }
-       case FILTER_OP_LT:
-       {
-               ret = bin_op_compare_check(stack, opcode, "<");
-               if (ret < 0)
-                       goto end;
-               break;
-       }
-       case FILTER_OP_GE:
-       {
-               ret = bin_op_compare_check(stack, opcode, ">=");
-               if (ret < 0)
-                       goto end;
-               break;
-       }
-       case FILTER_OP_LE:
-       {
-               ret = bin_op_compare_check(stack, opcode, "<=");
-               if (ret < 0)
-                       goto end;
-               break;
-       }
-
-       case FILTER_OP_EQ_STRING:
-       case FILTER_OP_NE_STRING:
-       case FILTER_OP_GT_STRING:
-       case FILTER_OP_LT_STRING:
-       case FILTER_OP_GE_STRING:
-       case FILTER_OP_LE_STRING:
-       {
-               if (!vstack_ax(stack) || !vstack_bx(stack)) {
-                       printk(KERN_WARNING "Empty stack\n");
-                       ret = -EINVAL;
-                       goto end;
-               }
-               if (vstack_ax(stack)->type != REG_STRING
-                               || vstack_bx(stack)->type != REG_STRING) {
-                       printk(KERN_WARNING "Unexpected register type for string comparator\n");
-                       ret = -EINVAL;
-                       goto end;
-               }
-               break;
-       }
-
-
-       case FILTER_OP_EQ_STAR_GLOB_STRING:
-       case FILTER_OP_NE_STAR_GLOB_STRING:
-       {
-               if (!vstack_ax(stack) || !vstack_bx(stack)) {
-                       printk(KERN_WARNING "Empty stack\n");
-                       ret = -EINVAL;
-                       goto end;
-               }
-               if (vstack_ax(stack)->type != REG_STAR_GLOB_STRING
-                               && vstack_bx(stack)->type != REG_STAR_GLOB_STRING) {
-                       printk(KERN_WARNING "Unexpected register type for globbing pattern comparator\n");
-                       ret = -EINVAL;
-                       goto end;
-               }
-               break;
-       }
-
-       case FILTER_OP_EQ_S64:
-       case FILTER_OP_NE_S64:
-       case FILTER_OP_GT_S64:
-       case FILTER_OP_LT_S64:
-       case FILTER_OP_GE_S64:
-       case FILTER_OP_LE_S64:
-       {
-               if (!vstack_ax(stack) || !vstack_bx(stack)) {
-                       printk(KERN_WARNING "Empty stack\n");
-                       ret = -EINVAL;
-                       goto end;
-               }
-               if (vstack_ax(stack)->type != REG_S64
-                               || vstack_bx(stack)->type != REG_S64) {
-                       printk(KERN_WARNING "Unexpected register type for s64 comparator\n");
-                       ret = -EINVAL;
-                       goto end;
-               }
-               break;
-       }
-
-       case FILTER_OP_BIT_RSHIFT:
-               ret = bin_op_bitwise_check(stack, opcode, ">>");
-               if (ret < 0)
-                       goto end;
-               break;
-       case FILTER_OP_BIT_LSHIFT:
-               ret = bin_op_bitwise_check(stack, opcode, "<<");
-               if (ret < 0)
-                       goto end;
-               break;
-       case FILTER_OP_BIT_AND:
-               ret = bin_op_bitwise_check(stack, opcode, "&");
-               if (ret < 0)
-                       goto end;
-               break;
-       case FILTER_OP_BIT_OR:
-               ret = bin_op_bitwise_check(stack, opcode, "|");
-               if (ret < 0)
-                       goto end;
-               break;
-       case FILTER_OP_BIT_XOR:
-               ret = bin_op_bitwise_check(stack, opcode, "^");
-               if (ret < 0)
-                       goto end;
-               break;
-
-       /* unary */
-       case FILTER_OP_UNARY_PLUS:
-       case FILTER_OP_UNARY_MINUS:
-       case FILTER_OP_UNARY_NOT:
-       {
-               if (!vstack_ax(stack)) {
-                       printk(KERN_WARNING "Empty stack\n");
-                       ret = -EINVAL;
-                       goto end;
-               }
-               switch (vstack_ax(stack)->type) {
-               default:
-               case REG_DOUBLE:
-                       printk(KERN_WARNING "unknown register type\n");
-                       ret = -EINVAL;
-                       goto end;
-
-               case REG_STRING:
-               case REG_STAR_GLOB_STRING:
-                       printk(KERN_WARNING "Unary op can only be applied to numeric or floating point registers\n");
-                       ret = -EINVAL;
-                       goto end;
-               case REG_S64:
-               case REG_TYPE_UNKNOWN:
-                       break;
-               }
-               break;
-       }
-       case FILTER_OP_UNARY_BIT_NOT:
-       {
-               if (!vstack_ax(stack)) {
-                       printk(KERN_WARNING "Empty stack\n");
-                       ret = -EINVAL;
-                       goto end;
-               }
-               switch (vstack_ax(stack)->type) {
-               default:
-                       printk(KERN_WARNING "unknown register type\n");
-                       ret = -EINVAL;
-                       goto end;
-
-               case REG_STRING:
-               case REG_STAR_GLOB_STRING:
-               case REG_DOUBLE:
-                       printk(KERN_WARNING "Unary bitwise op can only be applied to numeric registers\n");
-                       ret = -EINVAL;
-                       goto end;
-               case REG_S64:
-                       break;
-               case REG_TYPE_UNKNOWN:
-                       break;
-               }
-               break;
-       }
-
-       case FILTER_OP_UNARY_PLUS_S64:
-       case FILTER_OP_UNARY_MINUS_S64:
-       case FILTER_OP_UNARY_NOT_S64:
-       {
-               if (!vstack_ax(stack)) {
-                       printk(KERN_WARNING "Empty stack\n");
-                       ret = -EINVAL;
-                       goto end;
-               }
-               if (vstack_ax(stack)->type != REG_S64) {
-                       printk(KERN_WARNING "Invalid register type\n");
-                       ret = -EINVAL;
-                       goto end;
-               }
-               break;
-       }
-
-       /* logical */
-       case FILTER_OP_AND:
-       case FILTER_OP_OR:
-       {
-               struct logical_op *insn = (struct logical_op *) pc;
-
-               if (!vstack_ax(stack)) {
-                       printk(KERN_WARNING "Empty stack\n");
-                       ret = -EINVAL;
-                       goto end;
-               }
-               if (vstack_ax(stack)->type != REG_S64) {
-                       printk(KERN_WARNING "Logical comparator expects S64 register\n");
-                       ret = -EINVAL;
-                       goto end;
-               }
-
-               dbg_printk("Validate jumping to bytecode offset %u\n",
-                       (unsigned int) insn->skip_offset);
-               if (unlikely(start_pc + insn->skip_offset <= pc)) {
-                       printk(KERN_WARNING "Loops are not allowed in bytecode\n");
-                       ret = -EINVAL;
-                       goto end;
-               }
-               break;
-       }
-
-       /* load field ref */
-       case FILTER_OP_LOAD_FIELD_REF:
-       {
-               printk(KERN_WARNING "Unknown field ref type\n");
-               ret = -EINVAL;
-               goto end;
-       }
-       case FILTER_OP_LOAD_FIELD_REF_STRING:
-       case FILTER_OP_LOAD_FIELD_REF_SEQUENCE:
-       case FILTER_OP_LOAD_FIELD_REF_USER_STRING:
-       case FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE:
-       {
-               struct load_op *insn = (struct load_op *) pc;
-               struct field_ref *ref = (struct field_ref *) insn->data;
-
-               dbg_printk("Validate load field ref offset %u type string\n",
-                       ref->offset);
-               break;
-       }
-       case FILTER_OP_LOAD_FIELD_REF_S64:
-       {
-               struct load_op *insn = (struct load_op *) pc;
-               struct field_ref *ref = (struct field_ref *) insn->data;
-
-               dbg_printk("Validate load field ref offset %u type s64\n",
-                       ref->offset);
-               break;
-       }
-
-       /* load from immediate operand */
-       case FILTER_OP_LOAD_STRING:
-       case FILTER_OP_LOAD_STAR_GLOB_STRING:
-       {
-               break;
-       }
-
-       case FILTER_OP_LOAD_S64:
-       {
-               break;
-       }
-
-       case FILTER_OP_CAST_TO_S64:
-       {
-               struct cast_op *insn = (struct cast_op *) pc;
-
-               if (!vstack_ax(stack)) {
-                       printk(KERN_WARNING "Empty stack\n");
-                       ret = -EINVAL;
-                       goto end;
-               }
-               switch (vstack_ax(stack)->type) {
-               default:
-               case REG_DOUBLE:
-                       printk(KERN_WARNING "unknown register type\n");
-                       ret = -EINVAL;
-                       goto end;
-
-               case REG_STRING:
-               case REG_STAR_GLOB_STRING:
-                       printk(KERN_WARNING "Cast op can only be applied to numeric or floating point registers\n");
-                       ret = -EINVAL;
-                       goto end;
-               case REG_S64:
-                       break;
-               }
-               if (insn->op == FILTER_OP_CAST_DOUBLE_TO_S64) {
-                       if (vstack_ax(stack)->type != REG_DOUBLE) {
-                               printk(KERN_WARNING "Cast expects double\n");
-                               ret = -EINVAL;
-                               goto end;
-                       }
-               }
-               break;
-       }
-       case FILTER_OP_CAST_NOP:
-       {
-               break;
-       }
-
-       /* get context ref */
-       case FILTER_OP_GET_CONTEXT_REF:
-       {
-               printk(KERN_WARNING "Unknown get context ref type\n");
-               ret = -EINVAL;
-               goto end;
-       }
-       case FILTER_OP_GET_CONTEXT_REF_STRING:
-       {
-               struct load_op *insn = (struct load_op *) pc;
-               struct field_ref *ref = (struct field_ref *) insn->data;
-
-               dbg_printk("Validate get context ref offset %u type string\n",
-                       ref->offset);
-               break;
-       }
-       case FILTER_OP_GET_CONTEXT_REF_S64:
-       {
-               struct load_op *insn = (struct load_op *) pc;
-               struct field_ref *ref = (struct field_ref *) insn->data;
-
-               dbg_printk("Validate get context ref offset %u type s64\n",
-                       ref->offset);
-               break;
-       }
-
-       /*
-        * Instructions for recursive traversal through composed types.
-        */
-       case FILTER_OP_GET_CONTEXT_ROOT:
-       {
-               dbg_printk("Validate get context root\n");
-               break;
-       }
-       case FILTER_OP_GET_APP_CONTEXT_ROOT:
-       {
-               dbg_printk("Validate get app context root\n");
-               break;
-       }
-       case FILTER_OP_GET_PAYLOAD_ROOT:
-       {
-               dbg_printk("Validate get payload root\n");
-               break;
-       }
-       case FILTER_OP_LOAD_FIELD:
-       {
-               /*
-                * We tolerate that field type is unknown at validation,
-                * because we are performing the load specialization in
-                * a phase after validation.
-                */
-               dbg_printk("Validate load field\n");
-               break;
-       }
-       case FILTER_OP_LOAD_FIELD_S8:
-       {
-               dbg_printk("Validate load field s8\n");
-               break;
-       }
-       case FILTER_OP_LOAD_FIELD_S16:
-       {
-               dbg_printk("Validate load field s16\n");
-               break;
-       }
-       case FILTER_OP_LOAD_FIELD_S32:
-       {
-               dbg_printk("Validate load field s32\n");
-               break;
-       }
-       case FILTER_OP_LOAD_FIELD_S64:
-       {
-               dbg_printk("Validate load field s64\n");
-               break;
-       }
-       case FILTER_OP_LOAD_FIELD_U8:
-       {
-               dbg_printk("Validate load field u8\n");
-               break;
-       }
-       case FILTER_OP_LOAD_FIELD_U16:
-       {
-               dbg_printk("Validate load field u16\n");
-               break;
-       }
-       case FILTER_OP_LOAD_FIELD_U32:
-       {
-               dbg_printk("Validate load field u32\n");
-               break;
-       }
-       case FILTER_OP_LOAD_FIELD_U64:
-       {
-               dbg_printk("Validate load field u64\n");
-               break;
-       }
-       case FILTER_OP_LOAD_FIELD_STRING:
-       {
-               dbg_printk("Validate load field string\n");
-               break;
-       }
-       case FILTER_OP_LOAD_FIELD_SEQUENCE:
-       {
-               dbg_printk("Validate load field sequence\n");
-               break;
-       }
-       case FILTER_OP_LOAD_FIELD_DOUBLE:
-       {
-               dbg_printk("Validate load field double\n");
-               break;
-       }
-
-       case FILTER_OP_GET_SYMBOL:
-       {
-               struct load_op *insn = (struct load_op *) pc;
-               struct get_symbol *sym = (struct get_symbol *) insn->data;
-
-               dbg_printk("Validate get symbol offset %u\n", sym->offset);
-               break;
-       }
-
-       case FILTER_OP_GET_SYMBOL_FIELD:
-       {
-               struct load_op *insn = (struct load_op *) pc;
-               struct get_symbol *sym = (struct get_symbol *) insn->data;
-
-               dbg_printk("Validate get symbol field offset %u\n", sym->offset);
-               break;
-       }
-
-       case FILTER_OP_GET_INDEX_U16:
-       {
-               struct load_op *insn = (struct load_op *) pc;
-               struct get_index_u16 *get_index = (struct get_index_u16 *) insn->data;
-
-               dbg_printk("Validate get index u16 index %u\n", get_index->index);
-               break;
-       }
-
-       case FILTER_OP_GET_INDEX_U64:
-       {
-               struct load_op *insn = (struct load_op *) pc;
-               struct get_index_u64 *get_index = (struct get_index_u64 *) insn->data;
-
-               dbg_printk("Validate get index u64 index %llu\n",
-                       (unsigned long long) get_index->index);
-               break;
-       }
-       }
-end:
-       return ret;
-}
-
-/*
- * Return value:
- * 0: success
- * <0: error
- */
-static
-int validate_instruction_all_contexts(struct bytecode_runtime *bytecode,
-               struct mp_table *mp_table,
-               struct vstack *stack,
-               char *start_pc,
-               char *pc)
-{
-       int ret, found = 0;
-       unsigned long target_pc = pc - start_pc;
-       unsigned long hash;
-       struct hlist_head *head;
-       struct mp_node *mp_node;
-
-       /* Validate the context resulting from the previous instruction */
-       ret = validate_instruction_context(bytecode, stack, start_pc, pc);
-       if (ret < 0)
-               return ret;
-
-       /* Validate merge points */
-       hash = jhash_1word(target_pc, 0);
-       head = &mp_table->mp_head[hash & (MERGE_POINT_TABLE_SIZE - 1)];
-       lttng_hlist_for_each_entry(mp_node, head, node) {
-               if (lttng_hash_match(mp_node, target_pc)) {
-                       found = 1;
-                       break;
-               }
-       }
-       if (found) {
-               dbg_printk("Filter: validate merge point at offset %lu\n",
-                               target_pc);
-               if (merge_points_compare(stack, &mp_node->stack)) {
-                       printk(KERN_WARNING "Merge points differ for offset %lu\n",
-                               target_pc);
-                       return -EINVAL;
-               }
-               /* Once validated, we can remove the merge point */
-               dbg_printk("Filter: remove merge point at offset %lu\n",
-                               target_pc);
-               hlist_del(&mp_node->node);
-       }
-       return 0;
-}
-
-/*
- * Return value:
- * >0: going to next insn.
- * 0: success, stop iteration.
- * <0: error
- */
-static
-int exec_insn(struct bytecode_runtime *bytecode,
-               struct mp_table *mp_table,
-               struct vstack *stack,
-               char **_next_pc,
-               char *pc)
-{
-       int ret = 1;
-       char *next_pc = *_next_pc;
-
-       switch (*(filter_opcode_t *) pc) {
-       case FILTER_OP_UNKNOWN:
-       default:
-       {
-               printk(KERN_WARNING "unknown bytecode op %u\n",
-                       (unsigned int) *(filter_opcode_t *) pc);
-               ret = -EINVAL;
-               goto end;
-       }
-
-       case FILTER_OP_RETURN:
-       {
-               if (!vstack_ax(stack)) {
-                       printk(KERN_WARNING "Empty stack\n");
-                       ret = -EINVAL;
-                       goto end;
-               }
-               switch (vstack_ax(stack)->type) {
-               case REG_S64:
-               case REG_TYPE_UNKNOWN:
-                       break;
-               default:
-                       printk(KERN_WARNING "Unexpected register type %d at end of bytecode\n",
-                               (int) vstack_ax(stack)->type);
-                       ret = -EINVAL;
-                       goto end;
-               }
-
-               ret = 0;
-               goto end;
-       }
-
-       case FILTER_OP_RETURN_S64:
-       {
-               if (!vstack_ax(stack)) {
-                       printk(KERN_WARNING "Empty stack\n");
-                       ret = -EINVAL;
-                       goto end;
-               }
-               switch (vstack_ax(stack)->type) {
-               case REG_S64:
-                       break;
-               default:
-               case REG_TYPE_UNKNOWN:
-                       printk(KERN_WARNING "Unexpected register type %d at end of bytecode\n",
-                               (int) vstack_ax(stack)->type);
-                       ret = -EINVAL;
-                       goto end;
-               }
-
-               ret = 0;
-               goto end;
-       }
-
-       /* binary */
-       case FILTER_OP_MUL:
-       case FILTER_OP_DIV:
-       case FILTER_OP_MOD:
-       case FILTER_OP_PLUS:
-       case FILTER_OP_MINUS:
-       /* Floating point */
-       case FILTER_OP_EQ_DOUBLE:
-       case FILTER_OP_NE_DOUBLE:
-       case FILTER_OP_GT_DOUBLE:
-       case FILTER_OP_LT_DOUBLE:
-       case FILTER_OP_GE_DOUBLE:
-       case FILTER_OP_LE_DOUBLE:
-       case FILTER_OP_EQ_DOUBLE_S64:
-       case FILTER_OP_NE_DOUBLE_S64:
-       case FILTER_OP_GT_DOUBLE_S64:
-       case FILTER_OP_LT_DOUBLE_S64:
-       case FILTER_OP_GE_DOUBLE_S64:
-       case FILTER_OP_LE_DOUBLE_S64:
-       case FILTER_OP_EQ_S64_DOUBLE:
-       case FILTER_OP_NE_S64_DOUBLE:
-       case FILTER_OP_GT_S64_DOUBLE:
-       case FILTER_OP_LT_S64_DOUBLE:
-       case FILTER_OP_GE_S64_DOUBLE:
-       case FILTER_OP_LE_S64_DOUBLE:
-       case FILTER_OP_UNARY_PLUS_DOUBLE:
-       case FILTER_OP_UNARY_MINUS_DOUBLE:
-       case FILTER_OP_UNARY_NOT_DOUBLE:
-       case FILTER_OP_LOAD_FIELD_REF_DOUBLE:
-       case FILTER_OP_GET_CONTEXT_REF_DOUBLE:
-       case FILTER_OP_LOAD_DOUBLE:
-       case FILTER_OP_CAST_DOUBLE_TO_S64:
-       {
-               printk(KERN_WARNING "unsupported bytecode op %u\n",
-                       (unsigned int) *(filter_opcode_t *) pc);
-               ret = -EINVAL;
-               goto end;
-       }
-
-       case FILTER_OP_EQ:
-       case FILTER_OP_NE:
-       case FILTER_OP_GT:
-       case FILTER_OP_LT:
-       case FILTER_OP_GE:
-       case FILTER_OP_LE:
-       case FILTER_OP_EQ_STRING:
-       case FILTER_OP_NE_STRING:
-       case FILTER_OP_GT_STRING:
-       case FILTER_OP_LT_STRING:
-       case FILTER_OP_GE_STRING:
-       case FILTER_OP_LE_STRING:
-       case FILTER_OP_EQ_STAR_GLOB_STRING:
-       case FILTER_OP_NE_STAR_GLOB_STRING:
-       case FILTER_OP_EQ_S64:
-       case FILTER_OP_NE_S64:
-       case FILTER_OP_GT_S64:
-       case FILTER_OP_LT_S64:
-       case FILTER_OP_GE_S64:
-       case FILTER_OP_LE_S64:
-       case FILTER_OP_BIT_RSHIFT:
-       case FILTER_OP_BIT_LSHIFT:
-       case FILTER_OP_BIT_AND:
-       case FILTER_OP_BIT_OR:
-       case FILTER_OP_BIT_XOR:
-       {
-               /* Pop 2, push 1 */
-               if (vstack_pop(stack)) {
-                       ret = -EINVAL;
-                       goto end;
-               }
-               if (!vstack_ax(stack)) {
-                       printk(KERN_WARNING "Empty stack\n");
-                       ret = -EINVAL;
-                       goto end;
-               }
-               switch (vstack_ax(stack)->type) {
-               case REG_S64:
-               case REG_DOUBLE:
-               case REG_STRING:
-               case REG_STAR_GLOB_STRING:
-               case REG_TYPE_UNKNOWN:
-                       break;
-               default:
-                       printk(KERN_WARNING "Unexpected register type %d for operation\n",
-                               (int) vstack_ax(stack)->type);
-                       ret = -EINVAL;
-                       goto end;
-               }
-
-               vstack_ax(stack)->type = REG_S64;
-               next_pc += sizeof(struct binary_op);
-               break;
-       }
-
-       /* unary */
-       case FILTER_OP_UNARY_PLUS:
-       case FILTER_OP_UNARY_MINUS:
-       {
-               /* Pop 1, push 1 */
-               if (!vstack_ax(stack)) {
-                       printk(KERN_WARNING "Empty stack\n\n");
-                       ret = -EINVAL;
-                       goto end;
-               }
-               switch (vstack_ax(stack)->type) {
-               case REG_S64:
-               case REG_TYPE_UNKNOWN:
-                       break;
-               default:
-                       printk(KERN_WARNING "Unexpected register type %d for operation\n",
-                               (int) vstack_ax(stack)->type);
-                       ret = -EINVAL;
-                       goto end;
-               }
-
-               vstack_ax(stack)->type = REG_TYPE_UNKNOWN;
-               next_pc += sizeof(struct unary_op);
-               break;
-       }
-
-       case FILTER_OP_UNARY_PLUS_S64:
-       case FILTER_OP_UNARY_MINUS_S64:
-       case FILTER_OP_UNARY_NOT_S64:
-       {
-               /* Pop 1, push 1 */
-               if (!vstack_ax(stack)) {
-                       printk(KERN_WARNING "Empty stack\n\n");
-                       ret = -EINVAL;
-                       goto end;
-               }
-               switch (vstack_ax(stack)->type) {
-               case REG_S64:
-                       break;
-               default:
-                       printk(KERN_WARNING "Unexpected register type %d for operation\n",
-                               (int) vstack_ax(stack)->type);
-                       ret = -EINVAL;
-                       goto end;
-               }
-
-               vstack_ax(stack)->type = REG_S64;
-               next_pc += sizeof(struct unary_op);
-               break;
-       }
-
-       case FILTER_OP_UNARY_NOT:
-       {
-               /* Pop 1, push 1 */
-               if (!vstack_ax(stack)) {
-                       printk(KERN_WARNING "Empty stack\n\n");
-                       ret = -EINVAL;
-                       goto end;
-               }
-               switch (vstack_ax(stack)->type) {
-               case REG_S64:
-               case REG_TYPE_UNKNOWN:
-                       break;
-               default:
-                       printk(KERN_WARNING "Unexpected register type %d for operation\n",
-                               (int) vstack_ax(stack)->type);
-                       ret = -EINVAL;
-                       goto end;
-               }
-
-               vstack_ax(stack)->type = REG_S64;
-               next_pc += sizeof(struct unary_op);
-               break;
-       }
-
-       case FILTER_OP_UNARY_BIT_NOT:
-       {
-               /* Pop 1, push 1 */
-               if (!vstack_ax(stack)) {
-                       printk(KERN_WARNING "Empty stack\n");
-                       ret = -EINVAL;
-                       goto end;
-               }
-               switch (vstack_ax(stack)->type) {
-               case REG_S64:
-               case REG_TYPE_UNKNOWN:
-                       break;
-               case REG_DOUBLE:
-               default:
-                       printk(KERN_WARNING "Unexpected register type %d for operation\n",
-                               (int) vstack_ax(stack)->type);
-                       ret = -EINVAL;
-                       goto end;
-               }
-
-               vstack_ax(stack)->type = REG_S64;
-               next_pc += sizeof(struct unary_op);
-               break;
-       }
-
-       /* logical */
-       case FILTER_OP_AND:
-       case FILTER_OP_OR:
-       {
-               struct logical_op *insn = (struct logical_op *) pc;
-               int merge_ret;
-
-               /* Add merge point to table */
-               merge_ret = merge_point_add_check(mp_table,
-                                       insn->skip_offset, stack);
-               if (merge_ret) {
-                       ret = merge_ret;
-                       goto end;
-               }
-
-               if (!vstack_ax(stack)) {
-                       printk(KERN_WARNING "Empty stack\n\n");
-                       ret = -EINVAL;
-                       goto end;
-               }
-               /* There is always a cast-to-s64 operation before a or/and op. */
-               switch (vstack_ax(stack)->type) {
-               case REG_S64:
-                       break;
-               default:
-                       printk(KERN_WARNING "Incorrect register type %d for operation\n",
-                               (int) vstack_ax(stack)->type);
-                       ret = -EINVAL;
-                       goto end;
-               }
-
-               /* Continue to next instruction */
-               /* Pop 1 when jump not taken */
-               if (vstack_pop(stack)) {
-                       ret = -EINVAL;
-                       goto end;
-               }
-               next_pc += sizeof(struct logical_op);
-               break;
-       }
-
-       /* load field ref */
-       case FILTER_OP_LOAD_FIELD_REF:
-       {
-               printk(KERN_WARNING "Unknown field ref type\n");
-               ret = -EINVAL;
-               goto end;
-       }
-       /* get context ref */
-       case FILTER_OP_GET_CONTEXT_REF:
-       {
-               printk(KERN_WARNING "Unknown get context ref type\n");
-               ret = -EINVAL;
-               goto end;
-       }
-       case FILTER_OP_LOAD_FIELD_REF_STRING:
-       case FILTER_OP_LOAD_FIELD_REF_SEQUENCE:
-       case FILTER_OP_GET_CONTEXT_REF_STRING:
-       case FILTER_OP_LOAD_FIELD_REF_USER_STRING:
-       case FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE:
-       {
-               if (vstack_push(stack)) {
-                       ret = -EINVAL;
-                       goto end;
-               }
-               vstack_ax(stack)->type = REG_STRING;
-               next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
-               break;
-       }
-       case FILTER_OP_LOAD_FIELD_REF_S64:
-       case FILTER_OP_GET_CONTEXT_REF_S64:
-       {
-               if (vstack_push(stack)) {
-                       ret = -EINVAL;
-                       goto end;
-               }
-               vstack_ax(stack)->type = REG_S64;
-               next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
-               break;
-       }
-
-       /* load from immediate operand */
-       case FILTER_OP_LOAD_STRING:
-       {
-               struct load_op *insn = (struct load_op *) pc;
-
-               if (vstack_push(stack)) {
-                       ret = -EINVAL;
-                       goto end;
-               }
-               vstack_ax(stack)->type = REG_STRING;
-               next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
-               break;
-       }
-
-       case FILTER_OP_LOAD_STAR_GLOB_STRING:
-       {
-               struct load_op *insn = (struct load_op *) pc;
-
-               if (vstack_push(stack)) {
-                       ret = -EINVAL;
-                       goto end;
-               }
-               vstack_ax(stack)->type = REG_STAR_GLOB_STRING;
-               next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
-               break;
-       }
-
-       case FILTER_OP_LOAD_S64:
-       {
-               if (vstack_push(stack)) {
-                       ret = -EINVAL;
-                       goto end;
-               }
-               vstack_ax(stack)->type = REG_S64;
-               next_pc += sizeof(struct load_op)
-                               + sizeof(struct literal_numeric);
-               break;
-       }
-
-       case FILTER_OP_CAST_TO_S64:
-       {
-               /* Pop 1, push 1 */
-               if (!vstack_ax(stack)) {
-                       printk(KERN_WARNING "Empty stack\n");
-                       ret = -EINVAL;
-                       goto end;
-               }
-               switch (vstack_ax(stack)->type) {
-               case REG_S64:
-               case REG_DOUBLE:
-               case REG_TYPE_UNKNOWN:
-                       break;
-               default:
-                       printk(KERN_WARNING "Incorrect register type %d for cast\n",
-                               (int) vstack_ax(stack)->type);
-                       ret = -EINVAL;
-                       goto end;
-               }
-               vstack_ax(stack)->type = REG_S64;
-               next_pc += sizeof(struct cast_op);
-               break;
-       }
-       case FILTER_OP_CAST_NOP:
-       {
-               next_pc += sizeof(struct cast_op);
-               break;
-       }
-
-       /*
-        * Instructions for recursive traversal through composed types.
-        */
-       case FILTER_OP_GET_CONTEXT_ROOT:
-       case FILTER_OP_GET_APP_CONTEXT_ROOT:
-       case FILTER_OP_GET_PAYLOAD_ROOT:
-       {
-               if (vstack_push(stack)) {
-                       ret = -EINVAL;
-                       goto end;
-               }
-               vstack_ax(stack)->type = REG_PTR;
-               next_pc += sizeof(struct load_op);
-               break;
-       }
-
-       case FILTER_OP_LOAD_FIELD:
-       {
-               /* Pop 1, push 1 */
-               if (!vstack_ax(stack)) {
-                       printk(KERN_WARNING "Empty stack\n\n");
-                       ret = -EINVAL;
-                       goto end;
-               }
-               if (vstack_ax(stack)->type != REG_PTR) {
-                       printk(KERN_WARNING "Expecting pointer on top of stack\n\n");
-                       ret = -EINVAL;
-                       goto end;
-               }
-               vstack_ax(stack)->type = REG_TYPE_UNKNOWN;
-               next_pc += sizeof(struct load_op);
-               break;
-       }
-
-       case FILTER_OP_LOAD_FIELD_S8:
-       case FILTER_OP_LOAD_FIELD_S16:
-       case FILTER_OP_LOAD_FIELD_S32:
-       case FILTER_OP_LOAD_FIELD_S64:
-       case FILTER_OP_LOAD_FIELD_U8:
-       case FILTER_OP_LOAD_FIELD_U16:
-       case FILTER_OP_LOAD_FIELD_U32:
-       case FILTER_OP_LOAD_FIELD_U64:
-       {
-               /* Pop 1, push 1 */
-               if (!vstack_ax(stack)) {
-                       printk(KERN_WARNING "Empty stack\n\n");
-                       ret = -EINVAL;
-                       goto end;
-               }
-               if (vstack_ax(stack)->type != REG_PTR) {
-                       printk(KERN_WARNING "Expecting pointer on top of stack\n\n");
-                       ret = -EINVAL;
-                       goto end;
-               }
-               vstack_ax(stack)->type = REG_S64;
-               next_pc += sizeof(struct load_op);
-               break;
-       }
-
-       case FILTER_OP_LOAD_FIELD_STRING:
-       case FILTER_OP_LOAD_FIELD_SEQUENCE:
-       {
-               /* Pop 1, push 1 */
-               if (!vstack_ax(stack)) {
-                       printk(KERN_WARNING "Empty stack\n\n");
-                       ret = -EINVAL;
-                       goto end;
-               }
-               if (vstack_ax(stack)->type != REG_PTR) {
-                       printk(KERN_WARNING "Expecting pointer on top of stack\n\n");
-                       ret = -EINVAL;
-                       goto end;
-               }
-               vstack_ax(stack)->type = REG_STRING;
-               next_pc += sizeof(struct load_op);
-               break;
-       }
-
-       case FILTER_OP_LOAD_FIELD_DOUBLE:
-       {
-               /* Pop 1, push 1 */
-               if (!vstack_ax(stack)) {
-                       printk(KERN_WARNING "Empty stack\n\n");
-                       ret = -EINVAL;
-                       goto end;
-               }
-               if (vstack_ax(stack)->type != REG_PTR) {
-                       printk(KERN_WARNING "Expecting pointer on top of stack\n\n");
-                       ret = -EINVAL;
-                       goto end;
-               }
-               vstack_ax(stack)->type = REG_DOUBLE;
-               next_pc += sizeof(struct load_op);
-               break;
-       }
-
-       case FILTER_OP_GET_SYMBOL:
-       case FILTER_OP_GET_SYMBOL_FIELD:
-       {
-               /* Pop 1, push 1 */
-               if (!vstack_ax(stack)) {
-                       printk(KERN_WARNING "Empty stack\n\n");
-                       ret = -EINVAL;
-                       goto end;
-               }
-               if (vstack_ax(stack)->type != REG_PTR) {
-                       printk(KERN_WARNING "Expecting pointer on top of stack\n\n");
-                       ret = -EINVAL;
-                       goto end;
-               }
-               next_pc += sizeof(struct load_op) + sizeof(struct get_symbol);
-               break;
-       }
-
-       case FILTER_OP_GET_INDEX_U16:
-       {
-               /* Pop 1, push 1 */
-               if (!vstack_ax(stack)) {
-                       printk(KERN_WARNING "Empty stack\n\n");
-                       ret = -EINVAL;
-                       goto end;
-               }
-               if (vstack_ax(stack)->type != REG_PTR) {
-                       printk(KERN_WARNING "Expecting pointer on top of stack\n\n");
-                       ret = -EINVAL;
-                       goto end;
-               }
-               next_pc += sizeof(struct load_op) + sizeof(struct get_index_u16);
-               break;
-       }
-
-       case FILTER_OP_GET_INDEX_U64:
-       {
-               /* Pop 1, push 1 */
-               if (!vstack_ax(stack)) {
-                       printk(KERN_WARNING "Empty stack\n\n");
-                       ret = -EINVAL;
-                       goto end;
-               }
-               if (vstack_ax(stack)->type != REG_PTR) {
-                       printk(KERN_WARNING "Expecting pointer on top of stack\n\n");
-                       ret = -EINVAL;
-                       goto end;
-               }
-               next_pc += sizeof(struct load_op) + sizeof(struct get_index_u64);
-               break;
-       }
-
-       }
-end:
-       *_next_pc = next_pc;
-       return ret;
-}
-
-/*
- * Never called concurrently (hash seed is shared).
- */
-int lttng_filter_validate_bytecode(struct bytecode_runtime *bytecode)
-{
-       struct mp_table *mp_table;
-       char *pc, *next_pc, *start_pc;
-       int ret = -EINVAL;
-       struct vstack stack;
-
-       vstack_init(&stack);
-
-       mp_table = kzalloc(sizeof(*mp_table), GFP_KERNEL);
-       if (!mp_table) {
-               printk(KERN_WARNING "Error allocating hash table for bytecode validation\n");
-               return -ENOMEM;
-       }
-       start_pc = &bytecode->code[0];
-       for (pc = next_pc = start_pc; pc - start_pc < bytecode->len;
-                       pc = next_pc) {
-               ret = bytecode_validate_overflow(bytecode, start_pc, pc);
-               if (ret != 0) {
-                       if (ret == -ERANGE)
-                               printk(KERN_WARNING "filter bytecode overflow\n");
-                       goto end;
-               }
-               dbg_printk("Validating op %s (%u)\n",
-                       lttng_filter_print_op((unsigned int) *(filter_opcode_t *) pc),
-                       (unsigned int) *(filter_opcode_t *) pc);
-
-               /*
-                * For each instruction, validate the current context
-                * (traversal of entire execution flow), and validate
-                * all merge points targeting this instruction.
-                */
-               ret = validate_instruction_all_contexts(bytecode, mp_table,
-                                       &stack, start_pc, pc);
-               if (ret)
-                       goto end;
-               ret = exec_insn(bytecode, mp_table, &stack, &next_pc, pc);
-               if (ret <= 0)
-                       goto end;
-       }
-end:
-       if (delete_all_nodes(mp_table)) {
-               if (!ret) {
-                       printk(KERN_WARNING "Unexpected merge points\n");
-                       ret = -EINVAL;
-               }
-       }
-       kfree(mp_table);
-       return ret;
-}
diff --git a/lttng-filter.c b/lttng-filter.c
deleted file mode 100644 (file)
index 12c2264..0000000
+++ /dev/null
@@ -1,565 +0,0 @@
-/* SPDX-License-Identifier: MIT
- *
- * lttng-filter.c
- *
- * LTTng modules filter code.
- *
- * Copyright (C) 2010-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#include <linux/list.h>
-#include <linux/slab.h>
-
-#include <lttng/filter.h>
-
-static const char *opnames[] = {
-       [ FILTER_OP_UNKNOWN ] = "UNKNOWN",
-
-       [ FILTER_OP_RETURN ] = "RETURN",
-
-       /* binary */
-       [ FILTER_OP_MUL ] = "MUL",
-       [ FILTER_OP_DIV ] = "DIV",
-       [ FILTER_OP_MOD ] = "MOD",
-       [ FILTER_OP_PLUS ] = "PLUS",
-       [ FILTER_OP_MINUS ] = "MINUS",
-       [ FILTER_OP_BIT_RSHIFT ] = "BIT_RSHIFT",
-       [ FILTER_OP_BIT_LSHIFT ] = "BIT_LSHIFT",
-       [ FILTER_OP_BIT_AND ] = "BIT_AND",
-       [ FILTER_OP_BIT_OR ] = "BIT_OR",
-       [ FILTER_OP_BIT_XOR ] = "BIT_XOR",
-
-       /* binary comparators */
-       [ FILTER_OP_EQ ] = "EQ",
-       [ FILTER_OP_NE ] = "NE",
-       [ FILTER_OP_GT ] = "GT",
-       [ FILTER_OP_LT ] = "LT",
-       [ FILTER_OP_GE ] = "GE",
-       [ FILTER_OP_LE ] = "LE",
-
-       /* string binary comparators */
-       [ FILTER_OP_EQ_STRING ] = "EQ_STRING",
-       [ FILTER_OP_NE_STRING ] = "NE_STRING",
-       [ FILTER_OP_GT_STRING ] = "GT_STRING",
-       [ FILTER_OP_LT_STRING ] = "LT_STRING",
-       [ FILTER_OP_GE_STRING ] = "GE_STRING",
-       [ FILTER_OP_LE_STRING ] = "LE_STRING",
-
-       /* s64 binary comparators */
-       [ FILTER_OP_EQ_S64 ] = "EQ_S64",
-       [ FILTER_OP_NE_S64 ] = "NE_S64",
-       [ FILTER_OP_GT_S64 ] = "GT_S64",
-       [ FILTER_OP_LT_S64 ] = "LT_S64",
-       [ FILTER_OP_GE_S64 ] = "GE_S64",
-       [ FILTER_OP_LE_S64 ] = "LE_S64",
-
-       /* double binary comparators */
-       [ FILTER_OP_EQ_DOUBLE ] = "EQ_DOUBLE",
-       [ FILTER_OP_NE_DOUBLE ] = "NE_DOUBLE",
-       [ FILTER_OP_GT_DOUBLE ] = "GT_DOUBLE",
-       [ FILTER_OP_LT_DOUBLE ] = "LT_DOUBLE",
-       [ FILTER_OP_GE_DOUBLE ] = "GE_DOUBLE",
-       [ FILTER_OP_LE_DOUBLE ] = "LE_DOUBLE",
-
-       /* Mixed S64-double binary comparators */
-       [ FILTER_OP_EQ_DOUBLE_S64 ] = "EQ_DOUBLE_S64",
-       [ FILTER_OP_NE_DOUBLE_S64 ] = "NE_DOUBLE_S64",
-       [ FILTER_OP_GT_DOUBLE_S64 ] = "GT_DOUBLE_S64",
-       [ FILTER_OP_LT_DOUBLE_S64 ] = "LT_DOUBLE_S64",
-       [ FILTER_OP_GE_DOUBLE_S64 ] = "GE_DOUBLE_S64",
-       [ FILTER_OP_LE_DOUBLE_S64 ] = "LE_DOUBLE_S64",
-
-       [ FILTER_OP_EQ_S64_DOUBLE ] = "EQ_S64_DOUBLE",
-       [ FILTER_OP_NE_S64_DOUBLE ] = "NE_S64_DOUBLE",
-       [ FILTER_OP_GT_S64_DOUBLE ] = "GT_S64_DOUBLE",
-       [ FILTER_OP_LT_S64_DOUBLE ] = "LT_S64_DOUBLE",
-       [ FILTER_OP_GE_S64_DOUBLE ] = "GE_S64_DOUBLE",
-       [ FILTER_OP_LE_S64_DOUBLE ] = "LE_S64_DOUBLE",
-
-       /* unary */
-       [ FILTER_OP_UNARY_PLUS ] = "UNARY_PLUS",
-       [ FILTER_OP_UNARY_MINUS ] = "UNARY_MINUS",
-       [ FILTER_OP_UNARY_NOT ] = "UNARY_NOT",
-       [ FILTER_OP_UNARY_PLUS_S64 ] = "UNARY_PLUS_S64",
-       [ FILTER_OP_UNARY_MINUS_S64 ] = "UNARY_MINUS_S64",
-       [ FILTER_OP_UNARY_NOT_S64 ] = "UNARY_NOT_S64",
-       [ FILTER_OP_UNARY_PLUS_DOUBLE ] = "UNARY_PLUS_DOUBLE",
-       [ FILTER_OP_UNARY_MINUS_DOUBLE ] = "UNARY_MINUS_DOUBLE",
-       [ FILTER_OP_UNARY_NOT_DOUBLE ] = "UNARY_NOT_DOUBLE",
-
-       /* logical */
-       [ FILTER_OP_AND ] = "AND",
-       [ FILTER_OP_OR ] = "OR",
-
-       /* load field ref */
-       [ FILTER_OP_LOAD_FIELD_REF ] = "LOAD_FIELD_REF",
-       [ FILTER_OP_LOAD_FIELD_REF_STRING ] = "LOAD_FIELD_REF_STRING",
-       [ FILTER_OP_LOAD_FIELD_REF_SEQUENCE ] = "LOAD_FIELD_REF_SEQUENCE",
-       [ FILTER_OP_LOAD_FIELD_REF_S64 ] = "LOAD_FIELD_REF_S64",
-       [ FILTER_OP_LOAD_FIELD_REF_DOUBLE ] = "LOAD_FIELD_REF_DOUBLE",
-
-       /* load from immediate operand */
-       [ FILTER_OP_LOAD_STRING ] = "LOAD_STRING",
-       [ FILTER_OP_LOAD_S64 ] = "LOAD_S64",
-       [ FILTER_OP_LOAD_DOUBLE ] = "LOAD_DOUBLE",
-
-       /* cast */
-       [ FILTER_OP_CAST_TO_S64 ] = "CAST_TO_S64",
-       [ FILTER_OP_CAST_DOUBLE_TO_S64 ] = "CAST_DOUBLE_TO_S64",
-       [ FILTER_OP_CAST_NOP ] = "CAST_NOP",
-
-       /* get context ref */
-       [ FILTER_OP_GET_CONTEXT_REF ] = "GET_CONTEXT_REF",
-       [ FILTER_OP_GET_CONTEXT_REF_STRING ] = "GET_CONTEXT_REF_STRING",
-       [ FILTER_OP_GET_CONTEXT_REF_S64 ] = "GET_CONTEXT_REF_S64",
-       [ FILTER_OP_GET_CONTEXT_REF_DOUBLE ] = "GET_CONTEXT_REF_DOUBLE",
-
-       /* load userspace field ref */
-       [ FILTER_OP_LOAD_FIELD_REF_USER_STRING ] = "LOAD_FIELD_REF_USER_STRING",
-       [ FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE ] = "LOAD_FIELD_REF_USER_SEQUENCE",
-
-       /*
-        * load immediate star globbing pattern (literal string)
-        * from immediate.
-        */
-       [ FILTER_OP_LOAD_STAR_GLOB_STRING ] = "LOAD_STAR_GLOB_STRING",
-
-       /* globbing pattern binary operator: apply to */
-       [ FILTER_OP_EQ_STAR_GLOB_STRING ] = "EQ_STAR_GLOB_STRING",
-       [ FILTER_OP_NE_STAR_GLOB_STRING ] = "NE_STAR_GLOB_STRING",
-
-       /*
-        * Instructions for recursive traversal through composed types.
-        */
-       [ FILTER_OP_GET_CONTEXT_ROOT ] = "GET_CONTEXT_ROOT",
-       [ FILTER_OP_GET_APP_CONTEXT_ROOT ] = "GET_APP_CONTEXT_ROOT",
-       [ FILTER_OP_GET_PAYLOAD_ROOT ] = "GET_PAYLOAD_ROOT",
-
-       [ FILTER_OP_GET_SYMBOL ] = "GET_SYMBOL",
-       [ FILTER_OP_GET_SYMBOL_FIELD ] = "GET_SYMBOL_FIELD",
-       [ FILTER_OP_GET_INDEX_U16 ] = "GET_INDEX_U16",
-       [ FILTER_OP_GET_INDEX_U64 ] = "GET_INDEX_U64",
-
-       [ FILTER_OP_LOAD_FIELD ] = "LOAD_FIELD",
-       [ FILTER_OP_LOAD_FIELD_S8 ] = "LOAD_FIELD_S8",
-       [ FILTER_OP_LOAD_FIELD_S16 ] = "LOAD_FIELD_S16",
-       [ FILTER_OP_LOAD_FIELD_S32 ] = "LOAD_FIELD_S32",
-       [ FILTER_OP_LOAD_FIELD_S64 ] = "LOAD_FIELD_S64",
-       [ FILTER_OP_LOAD_FIELD_U8 ] = "LOAD_FIELD_U8",
-       [ FILTER_OP_LOAD_FIELD_U16 ] = "LOAD_FIELD_U16",
-       [ FILTER_OP_LOAD_FIELD_U32 ] = "LOAD_FIELD_U32",
-       [ FILTER_OP_LOAD_FIELD_U64 ] = "LOAD_FIELD_U64",
-       [ FILTER_OP_LOAD_FIELD_STRING ] = "LOAD_FIELD_STRING",
-       [ FILTER_OP_LOAD_FIELD_SEQUENCE ] = "LOAD_FIELD_SEQUENCE",
-       [ FILTER_OP_LOAD_FIELD_DOUBLE ] = "LOAD_FIELD_DOUBLE",
-
-       [ FILTER_OP_UNARY_BIT_NOT ] = "UNARY_BIT_NOT",
-
-       [ FILTER_OP_RETURN_S64 ] = "RETURN_S64",
-};
-
-const char *lttng_filter_print_op(enum filter_op op)
-{
-       if (op >= NR_FILTER_OPS)
-               return "UNKNOWN";
-       else
-               return opnames[op];
-}
-
-static
-int apply_field_reloc(struct lttng_event *event,
-               struct bytecode_runtime *runtime,
-               uint32_t runtime_len,
-               uint32_t reloc_offset,
-               const char *field_name,
-               enum filter_op filter_op)
-{
-       const struct lttng_event_desc *desc;
-       const struct lttng_event_field *fields, *field = NULL;
-       unsigned int nr_fields, i;
-       struct load_op *op;
-       uint32_t field_offset = 0;
-
-       dbg_printk("Apply field reloc: %u %s\n", reloc_offset, field_name);
-
-       /* Lookup event by name */
-       desc = event->desc;
-       if (!desc)
-               return -EINVAL;
-       fields = desc->fields;
-       if (!fields)
-               return -EINVAL;
-       nr_fields = desc->nr_fields;
-       for (i = 0; i < nr_fields; i++) {
-               if (fields[i].nofilter)
-                       continue;
-               if (!strcmp(fields[i].name, field_name)) {
-                       field = &fields[i];
-                       break;
-               }
-               /* compute field offset */
-               switch (fields[i].type.atype) {
-               case atype_integer:
-               case atype_enum_nestable:
-                       field_offset += sizeof(int64_t);
-                       break;
-               case atype_array_nestable:
-                       if (!lttng_is_bytewise_integer(fields[i].type.u.array_nestable.elem_type))
-                               return -EINVAL;
-                       field_offset += sizeof(unsigned long);
-                       field_offset += sizeof(void *);
-                       break;
-               case atype_sequence_nestable:
-                       if (!lttng_is_bytewise_integer(fields[i].type.u.sequence_nestable.elem_type))
-                               return -EINVAL;
-                       field_offset += sizeof(unsigned long);
-                       field_offset += sizeof(void *);
-                       break;
-               case atype_string:
-                       field_offset += sizeof(void *);
-                       break;
-               case atype_struct_nestable:     /* Unsupported. */
-               case atype_variant_nestable:    /* Unsupported. */
-               default:
-                       return -EINVAL;
-               }
-       }
-       if (!field)
-               return -EINVAL;
-
-       /* Check if field offset is too large for 16-bit offset */
-       if (field_offset > LTTNG_KERNEL_FILTER_BYTECODE_MAX_LEN - 1)
-               return -EINVAL;
-
-       /* set type */
-       op = (struct load_op *) &runtime->code[reloc_offset];
-
-       switch (filter_op) {
-       case FILTER_OP_LOAD_FIELD_REF:
-       {
-               struct field_ref *field_ref;
-
-               field_ref = (struct field_ref *) op->data;
-               switch (field->type.atype) {
-               case atype_integer:
-               case atype_enum_nestable:
-                       op->op = FILTER_OP_LOAD_FIELD_REF_S64;
-                       break;
-               case atype_array_nestable:
-               case atype_sequence_nestable:
-                       if (field->user)
-                               op->op = FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE;
-                       else
-                               op->op = FILTER_OP_LOAD_FIELD_REF_SEQUENCE;
-                       break;
-               case atype_string:
-                       if (field->user)
-                               op->op = FILTER_OP_LOAD_FIELD_REF_USER_STRING;
-                       else
-                               op->op = FILTER_OP_LOAD_FIELD_REF_STRING;
-                       break;
-               case atype_struct_nestable:     /* Unsupported. */
-               case atype_variant_nestable:    /* Unsupported. */
-               default:
-                       return -EINVAL;
-               }
-               /* set offset */
-               field_ref->offset = (uint16_t) field_offset;
-               break;
-       }
-       default:
-               return -EINVAL;
-       }
-       return 0;
-}
-
-static
-int apply_context_reloc(struct lttng_event *event,
-               struct bytecode_runtime *runtime,
-               uint32_t runtime_len,
-               uint32_t reloc_offset,
-               const char *context_name,
-               enum filter_op filter_op)
-{
-       struct load_op *op;
-       struct lttng_ctx_field *ctx_field;
-       int idx;
-
-       dbg_printk("Apply context reloc: %u %s\n", reloc_offset, context_name);
-
-       /* Get context index */
-       idx = lttng_get_context_index(lttng_static_ctx, context_name);
-       if (idx < 0)
-               return -ENOENT;
-
-       /* Check if idx is too large for 16-bit offset */
-       if (idx > LTTNG_KERNEL_FILTER_BYTECODE_MAX_LEN - 1)
-               return -EINVAL;
-
-       /* Get context return type */
-       ctx_field = &lttng_static_ctx->fields[idx];
-       op = (struct load_op *) &runtime->code[reloc_offset];
-
-       switch (filter_op) {
-       case FILTER_OP_GET_CONTEXT_REF:
-       {
-               struct field_ref *field_ref;
-
-               field_ref = (struct field_ref *) op->data;
-               switch (ctx_field->event_field.type.atype) {
-               case atype_integer:
-               case atype_enum_nestable:
-                       op->op = FILTER_OP_GET_CONTEXT_REF_S64;
-                       break;
-                       /* Sequence and array supported as string */
-               case atype_string:
-                       BUG_ON(ctx_field->event_field.user);
-                       op->op = FILTER_OP_GET_CONTEXT_REF_STRING;
-                       break;
-               case atype_array_nestable:
-                       if (!lttng_is_bytewise_integer(ctx_field->event_field.type.u.array_nestable.elem_type))
-                               return -EINVAL;
-                       BUG_ON(ctx_field->event_field.user);
-                       op->op = FILTER_OP_GET_CONTEXT_REF_STRING;
-                       break;
-               case atype_sequence_nestable:
-                       if (!lttng_is_bytewise_integer(ctx_field->event_field.type.u.sequence_nestable.elem_type))
-                               return -EINVAL;
-                       BUG_ON(ctx_field->event_field.user);
-                       op->op = FILTER_OP_GET_CONTEXT_REF_STRING;
-                       break;
-               case atype_struct_nestable:     /* Unsupported. */
-               case atype_variant_nestable:    /* Unsupported. */
-               default:
-                       return -EINVAL;
-               }
-               /* set offset to context index within channel contexts */
-               field_ref->offset = (uint16_t) idx;
-               break;
-       }
-       default:
-               return -EINVAL;
-       }
-       return 0;
-}
-
-static
-int apply_reloc(struct lttng_event *event,
-               struct bytecode_runtime *runtime,
-               uint32_t runtime_len,
-               uint32_t reloc_offset,
-               const char *name)
-{
-       struct load_op *op;
-
-       dbg_printk("Apply reloc: %u %s\n", reloc_offset, name);
-
-       /* Ensure that the reloc is within the code */
-       if (runtime_len - reloc_offset < sizeof(uint16_t))
-               return -EINVAL;
-
-       op = (struct load_op *) &runtime->code[reloc_offset];
-       switch (op->op) {
-       case FILTER_OP_LOAD_FIELD_REF:
-               return apply_field_reloc(event, runtime, runtime_len,
-                       reloc_offset, name, op->op);
-       case FILTER_OP_GET_CONTEXT_REF:
-               return apply_context_reloc(event, runtime, runtime_len,
-                       reloc_offset, name, op->op);
-       case FILTER_OP_GET_SYMBOL:
-       case FILTER_OP_GET_SYMBOL_FIELD:
-               /*
-                * Will be handled by load specialize phase or
-                * dynamically by interpreter.
-                */
-               return 0;
-       default:
-               printk(KERN_WARNING "Unknown reloc op type %u\n", op->op);
-               return -EINVAL;
-       }
-       return 0;
-}
-
-static
-int bytecode_is_linked(struct lttng_filter_bytecode_node *filter_bytecode,
-               struct lttng_event *event)
-{
-       struct lttng_bytecode_runtime *bc_runtime;
-
-       list_for_each_entry(bc_runtime,
-                       &event->bytecode_runtime_head, node) {
-               if (bc_runtime->bc == filter_bytecode)
-                       return 1;
-       }
-       return 0;
-}
-
-/*
- * Take a bytecode with reloc table and link it to an event to create a
- * bytecode runtime.
- */
-static
-int _lttng_filter_event_link_bytecode(struct lttng_event *event,
-               struct lttng_filter_bytecode_node *filter_bytecode,
-               struct list_head *insert_loc)
-{
-       int ret, offset, next_offset;
-       struct bytecode_runtime *runtime = NULL;
-       size_t runtime_alloc_len;
-
-       if (!filter_bytecode)
-               return 0;
-       /* Bytecode already linked */
-       if (bytecode_is_linked(filter_bytecode, event))
-               return 0;
-
-       dbg_printk("Linking...\n");
-
-       /* We don't need the reloc table in the runtime */
-       runtime_alloc_len = sizeof(*runtime) + filter_bytecode->bc.reloc_offset;
-       runtime = kzalloc(runtime_alloc_len, GFP_KERNEL);
-       if (!runtime) {
-               ret = -ENOMEM;
-               goto alloc_error;
-       }
-       runtime->p.bc = filter_bytecode;
-       runtime->p.event = event;
-       runtime->len = filter_bytecode->bc.reloc_offset;
-       /* copy original bytecode */
-       memcpy(runtime->code, filter_bytecode->bc.data, runtime->len);
-       /*
-        * apply relocs. Those are a uint16_t (offset in bytecode)
-        * followed by a string (field name).
-        */
-       for (offset = filter_bytecode->bc.reloc_offset;
-                       offset < filter_bytecode->bc.len;
-                       offset = next_offset) {
-               uint16_t reloc_offset =
-                       *(uint16_t *) &filter_bytecode->bc.data[offset];
-               const char *name =
-                       (const char *) &filter_bytecode->bc.data[offset + sizeof(uint16_t)];
-
-               ret = apply_reloc(event, runtime, runtime->len, reloc_offset, name);
-               if (ret) {
-                       goto link_error;
-               }
-               next_offset = offset + sizeof(uint16_t) + strlen(name) + 1;
-       }
-       /* Validate bytecode */
-       ret = lttng_filter_validate_bytecode(runtime);
-       if (ret) {
-               goto link_error;
-       }
-       /* Specialize bytecode */
-       ret = lttng_filter_specialize_bytecode(event, runtime);
-       if (ret) {
-               goto link_error;
-       }
-       runtime->p.filter = lttng_filter_interpret_bytecode;
-       runtime->p.link_failed = 0;
-       list_add_rcu(&runtime->p.node, insert_loc);
-       dbg_printk("Linking successful.\n");
-       return 0;
-
-link_error:
-       runtime->p.filter = lttng_filter_false;
-       runtime->p.link_failed = 1;
-       list_add_rcu(&runtime->p.node, insert_loc);
-alloc_error:
-       dbg_printk("Linking failed.\n");
-       return ret;
-}
-
-void lttng_filter_sync_state(struct lttng_bytecode_runtime *runtime)
-{
-       struct lttng_filter_bytecode_node *bc = runtime->bc;
-
-       if (!bc->enabler->enabled || runtime->link_failed)
-               runtime->filter = lttng_filter_false;
-       else
-               runtime->filter = lttng_filter_interpret_bytecode;
-}
-
-/*
- * Link bytecode for all enablers referenced by an event.
- */
-void lttng_enabler_event_link_bytecode(struct lttng_event *event,
-               struct lttng_enabler *enabler)
-{
-       struct lttng_filter_bytecode_node *bc;
-       struct lttng_bytecode_runtime *runtime;
-
-       /* Can only be called for events with desc attached */
-       WARN_ON_ONCE(!event->desc);
-
-       /* Link each bytecode. */
-       list_for_each_entry(bc, &enabler->filter_bytecode_head, node) {
-               int found = 0, ret;
-               struct list_head *insert_loc;
-
-               list_for_each_entry(runtime,
-                               &event->bytecode_runtime_head, node) {
-                       if (runtime->bc == bc) {
-                               found = 1;
-                               break;
-                       }
-               }
-               /* Skip bytecode already linked */
-               if (found)
-                       continue;
-
-               /*
-                * Insert at specified priority (seqnum) in increasing
-                * order. If there already is a bytecode of the same priority,
-                * insert the new bytecode right after it.
-                */
-               list_for_each_entry_reverse(runtime,
-                               &event->bytecode_runtime_head, node) {
-                       if (runtime->bc->bc.seqnum <= bc->bc.seqnum) {
-                               /* insert here */
-                               insert_loc = &runtime->node;
-                               goto add_within;
-                       }
-               }
-               /* Add to head to list */
-               insert_loc = &event->bytecode_runtime_head;
-       add_within:
-               dbg_printk("linking bytecode\n");
-               ret = _lttng_filter_event_link_bytecode(event, bc,
-                               insert_loc);
-               if (ret) {
-                       dbg_printk("[lttng filter] warning: cannot link event bytecode\n");
-               }
-       }
-}
-
-/*
- * We own the filter_bytecode if we return success.
- */
-int lttng_filter_enabler_attach_bytecode(struct lttng_enabler *enabler,
-               struct lttng_filter_bytecode_node *filter_bytecode)
-{
-       list_add(&filter_bytecode->node, &enabler->filter_bytecode_head);
-       return 0;
-}
-
-void lttng_free_enabler_filter_bytecode(struct lttng_enabler *enabler)
-{
-       struct lttng_filter_bytecode_node *filter_bytecode, *tmp;
-
-       list_for_each_entry_safe(filter_bytecode, tmp,
-                       &enabler->filter_bytecode_head, node) {
-               kfree(filter_bytecode);
-       }
-}
-
-void lttng_free_event_filter_runtime(struct lttng_event *event)
-{
-       struct bytecode_runtime *runtime, *tmp;
-
-       list_for_each_entry_safe(runtime, tmp,
-                       &event->bytecode_runtime_head, p.node) {
-               kfree(runtime->data);
-               kfree(runtime);
-       }
-}
diff --git a/lttng-probes.c b/lttng-probes.c
deleted file mode 100644 (file)
index 4a2bb63..0000000
+++ /dev/null
@@ -1,324 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
- *
- * lttng-probes.c
- *
- * Holds LTTng probes registry.
- *
- * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#include <linux/module.h>
-#include <linux/list.h>
-#include <linux/mutex.h>
-#include <linux/seq_file.h>
-
-#include <lttng/events.h>
-
-/*
- * probe list is protected by sessions lock.
- */
-static LIST_HEAD(_probe_list);
-
-/*
- * List of probes registered by not yet processed.
- */
-static LIST_HEAD(lazy_probe_init);
-
-/*
- * lazy_nesting counter ensures we don't trigger lazy probe registration
- * fixup while we are performing the fixup. It is protected by the
- * sessions lock.
- */
-static int lazy_nesting;
-
-DEFINE_PER_CPU(struct lttng_dynamic_len_stack, lttng_dynamic_len_stack);
-
-EXPORT_PER_CPU_SYMBOL_GPL(lttng_dynamic_len_stack);
-
-/*
- * Called under sessions lock.
- */
-static
-int check_event_provider(struct lttng_probe_desc *desc)
-{
-       int i;
-       size_t provider_name_len;
-
-       provider_name_len = strnlen(desc->provider,
-                               LTTNG_KERNEL_SYM_NAME_LEN - 1);
-       for (i = 0; i < desc->nr_events; i++) {
-               if (strncmp(desc->event_desc[i]->name,
-                               desc->provider,
-                               provider_name_len))
-                       return 0;       /* provider mismatch */
-               /*
-                * The event needs to contain at least provider name + _ +
-                * one or more letter.
-                */
-               if (strlen(desc->event_desc[i]->name) <= provider_name_len + 1)
-                       return 0;       /* provider mismatch */
-               if (desc->event_desc[i]->name[provider_name_len] != '_')
-                       return 0;       /* provider mismatch */
-       }
-       return 1;
-}
-
-/*
- * Called under sessions lock.
- */
-static
-void lttng_lazy_probe_register(struct lttng_probe_desc *desc)
-{
-       struct lttng_probe_desc *iter;
-       struct list_head *probe_list;
-
-       /*
-        * Each provider enforce that every event name begins with the
-        * provider name. Check this in an assertion for extra
-        * carefulness. This ensures we cannot have duplicate event
-        * names across providers.
-        */
-       WARN_ON_ONCE(!check_event_provider(desc));
-
-       /*
-        * The provider ensures there are no duplicate event names.
-        * Duplicated TRACEPOINT_EVENT event names would generate a
-        * compile-time error due to duplicated symbol names.
-        */
-
-       /*
-        * We sort the providers by struct lttng_probe_desc pointer
-        * address.
-        */
-       probe_list = &_probe_list;
-       list_for_each_entry_reverse(iter, probe_list, head) {
-               BUG_ON(iter == desc); /* Should never be in the list twice */
-               if (iter < desc) {
-                       /* We belong to the location right after iter. */
-                       list_add(&desc->head, &iter->head);
-                       goto desc_added;
-               }
-       }
-       /* We should be added at the head of the list */
-       list_add(&desc->head, probe_list);
-desc_added:
-       pr_debug("LTTng: just registered probe %s containing %u events\n",
-               desc->provider, desc->nr_events);
-}
-
-/*
- * Called under sessions lock.
- */
-static
-void fixup_lazy_probes(void)
-{
-       struct lttng_probe_desc *iter, *tmp;
-       int ret;
-
-       lazy_nesting++;
-       list_for_each_entry_safe(iter, tmp,
-                       &lazy_probe_init, lazy_init_head) {
-               lttng_lazy_probe_register(iter);
-               iter->lazy = 0;
-               list_del(&iter->lazy_init_head);
-       }
-       ret = lttng_fix_pending_events();
-       WARN_ON_ONCE(ret);
-       lazy_nesting--;
-}
-
-/*
- * Called under sessions lock.
- */
-struct list_head *lttng_get_probe_list_head(void)
-{
-       if (!lazy_nesting && !list_empty(&lazy_probe_init))
-               fixup_lazy_probes();
-       return &_probe_list;
-}
-
-static
-const struct lttng_probe_desc *find_provider(const char *provider)
-{
-       struct lttng_probe_desc *iter;
-       struct list_head *probe_list;
-
-       probe_list = lttng_get_probe_list_head();
-       list_for_each_entry(iter, probe_list, head) {
-               if (!strcmp(iter->provider, provider))
-                       return iter;
-       }
-       return NULL;
-}
-
-int lttng_probe_register(struct lttng_probe_desc *desc)
-{
-       int ret = 0;
-
-       lttng_lock_sessions();
-
-       /*
-        * Check if the provider has already been registered.
-        */
-       if (find_provider(desc->provider)) {
-               ret = -EEXIST;
-               goto end;
-       }
-       list_add(&desc->lazy_init_head, &lazy_probe_init);
-       desc->lazy = 1;
-       pr_debug("LTTng: adding probe %s containing %u events to lazy registration list\n",
-               desc->provider, desc->nr_events);
-       /*
-        * If there is at least one active session, we need to register
-        * the probe immediately, since we cannot delay event
-        * registration because they are needed ASAP.
-        */
-       if (lttng_session_active())
-               fixup_lazy_probes();
-end:
-       lttng_unlock_sessions();
-       return ret;
-}
-EXPORT_SYMBOL_GPL(lttng_probe_register);
-
-void lttng_probe_unregister(struct lttng_probe_desc *desc)
-{
-       lttng_lock_sessions();
-       if (!desc->lazy)
-               list_del(&desc->head);
-       else
-               list_del(&desc->lazy_init_head);
-       pr_debug("LTTng: just unregistered probe %s\n", desc->provider);
-       lttng_unlock_sessions();
-}
-EXPORT_SYMBOL_GPL(lttng_probe_unregister);
-
-/*
- * TODO: this is O(nr_probes * nb_events), could be faster.
- * Called with sessions lock held.
- */
-static
-const struct lttng_event_desc *find_event(const char *name)
-{
-       struct lttng_probe_desc *probe_desc;
-       int i;
-
-       list_for_each_entry(probe_desc, &_probe_list, head) {
-               for (i = 0; i < probe_desc->nr_events; i++) {
-                       if (!strcmp(probe_desc->event_desc[i]->name, name))
-                               return probe_desc->event_desc[i];
-                       }
-       }
-       return NULL;
-}
-
-/*
- * Called with sessions lock held.
- */
-const struct lttng_event_desc *lttng_event_get(const char *name)
-{
-       const struct lttng_event_desc *event;
-       int ret;
-
-       event = find_event(name);
-       if (!event)
-               return NULL;
-       ret = try_module_get(event->owner);
-       WARN_ON_ONCE(!ret);
-       return event;
-}
-EXPORT_SYMBOL_GPL(lttng_event_get);
-
-/*
- * Called with sessions lock held.
- */
-void lttng_event_put(const struct lttng_event_desc *event)
-{
-       module_put(event->owner);
-}
-EXPORT_SYMBOL_GPL(lttng_event_put);
-
-static
-void *tp_list_start(struct seq_file *m, loff_t *pos)
-{
-       struct lttng_probe_desc *probe_desc;
-       struct list_head *probe_list;
-       int iter = 0, i;
-
-       lttng_lock_sessions();
-       probe_list = lttng_get_probe_list_head();
-       list_for_each_entry(probe_desc, probe_list, head) {
-               for (i = 0; i < probe_desc->nr_events; i++) {
-                       if (iter++ >= *pos)
-                               return (void *) probe_desc->event_desc[i];
-               }
-       }
-       /* End of list */
-       return NULL;
-}
-
-static
-void *tp_list_next(struct seq_file *m, void *p, loff_t *ppos)
-{
-       struct lttng_probe_desc *probe_desc;
-       struct list_head *probe_list;
-       int iter = 0, i;
-
-       (*ppos)++;
-       probe_list = lttng_get_probe_list_head();
-       list_for_each_entry(probe_desc, probe_list, head) {
-               for (i = 0; i < probe_desc->nr_events; i++) {
-                       if (iter++ >= *ppos)
-                               return (void *) probe_desc->event_desc[i];
-               }
-       }
-       /* End of list */
-       return NULL;
-}
-
-static
-void tp_list_stop(struct seq_file *m, void *p)
-{
-       lttng_unlock_sessions();
-}
-
-static
-int tp_list_show(struct seq_file *m, void *p)
-{
-       const struct lttng_event_desc *probe_desc = p;
-
-       seq_printf(m,   "event { name = %s; };\n",
-                  probe_desc->name);
-       return 0;
-}
-
-static
-const struct seq_operations lttng_tracepoint_list_seq_ops = {
-       .start = tp_list_start,
-       .next = tp_list_next,
-       .stop = tp_list_stop,
-       .show = tp_list_show,
-};
-
-static
-int lttng_tracepoint_list_open(struct inode *inode, struct file *file)
-{
-       return seq_open(file, &lttng_tracepoint_list_seq_ops);
-}
-
-const struct file_operations lttng_tracepoint_list_fops = {
-       .owner = THIS_MODULE,
-       .open = lttng_tracepoint_list_open,
-       .read = seq_read,
-       .llseek = seq_lseek,
-       .release = seq_release,
-};
-
-int lttng_probes_init(void)
-{
-       int cpu;
-
-       for_each_possible_cpu(cpu)
-               per_cpu_ptr(&lttng_dynamic_len_stack, cpu)->offset = 0;
-       return 0;
-}
diff --git a/lttng-ring-buffer-client-discard.c b/lttng-ring-buffer-client-discard.c
deleted file mode 100644 (file)
index c9d617a..0000000
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
- *
- * lttng-ring-buffer-client-discard.c
- *
- * LTTng lib ring buffer client (discard mode).
- *
- * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#include <linux/module.h>
-#include <lttng/tracer.h>
-
-#define RING_BUFFER_MODE_TEMPLATE              RING_BUFFER_DISCARD
-#define RING_BUFFER_MODE_TEMPLATE_STRING       "discard"
-#define RING_BUFFER_OUTPUT_TEMPLATE            RING_BUFFER_SPLICE
-#include "lttng-ring-buffer-client.h"
diff --git a/lttng-ring-buffer-client-mmap-discard.c b/lttng-ring-buffer-client-mmap-discard.c
deleted file mode 100644 (file)
index c79ab66..0000000
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
- *
- * lttng-ring-buffer-client-discard.c
- *
- * LTTng lib ring buffer client (discard mode).
- *
- * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#include <linux/module.h>
-#include <lttng/tracer.h>
-
-#define RING_BUFFER_MODE_TEMPLATE              RING_BUFFER_DISCARD
-#define RING_BUFFER_MODE_TEMPLATE_STRING       "discard-mmap"
-#define RING_BUFFER_OUTPUT_TEMPLATE            RING_BUFFER_MMAP
-#include "lttng-ring-buffer-client.h"
diff --git a/lttng-ring-buffer-client-mmap-overwrite.c b/lttng-ring-buffer-client-mmap-overwrite.c
deleted file mode 100644 (file)
index 1166fc7..0000000
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
- *
- * lttng-ring-buffer-client-overwrite.c
- *
- * LTTng lib ring buffer client (overwrite mode).
- *
- * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#include <linux/module.h>
-#include <lttng/tracer.h>
-
-#define RING_BUFFER_MODE_TEMPLATE              RING_BUFFER_OVERWRITE
-#define RING_BUFFER_MODE_TEMPLATE_STRING       "overwrite-mmap"
-#define RING_BUFFER_OUTPUT_TEMPLATE            RING_BUFFER_MMAP
-#include "lttng-ring-buffer-client.h"
diff --git a/lttng-ring-buffer-client-overwrite.c b/lttng-ring-buffer-client-overwrite.c
deleted file mode 100644 (file)
index c4a7c5e..0000000
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
- *
- * lttng-ring-buffer-client-overwrite.c
- *
- * LTTng lib ring buffer client (overwrite mode).
- *
- * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#include <linux/module.h>
-#include <lttng/tracer.h>
-
-#define RING_BUFFER_MODE_TEMPLATE              RING_BUFFER_OVERWRITE
-#define RING_BUFFER_MODE_TEMPLATE_STRING       "overwrite"
-#define RING_BUFFER_OUTPUT_TEMPLATE            RING_BUFFER_SPLICE
-#include "lttng-ring-buffer-client.h"
diff --git a/lttng-ring-buffer-client.h b/lttng-ring-buffer-client.h
deleted file mode 100644 (file)
index aad7955..0000000
+++ /dev/null
@@ -1,790 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
- *
- * lttng-ring-buffer-client.h
- *
- * LTTng lib ring buffer client template.
- *
- * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <lttng/bitfield.h>
-#include <wrapper/vmalloc.h>   /* for wrapper_vmalloc_sync_mappings() */
-#include <wrapper/trace-clock.h>
-#include <lttng/events.h>
-#include <lttng/tracer.h>
-#include <ringbuffer/frontend_types.h>
-
-#define LTTNG_COMPACT_EVENT_BITS       5
-#define LTTNG_COMPACT_TSC_BITS         27
-
-static struct lttng_transport lttng_relay_transport;
-
-/*
- * Keep the natural field alignment for _each field_ within this structure if
- * you ever add/remove a field from this header. Packed attribute is not used
- * because gcc generates poor code on at least powerpc and mips. Don't ever
- * let gcc add padding between the structure elements.
- *
- * The guarantee we have with timestamps is that all the events in a
- * packet are included (inclusive) within the begin/end timestamps of
- * the packet. Another guarantee we have is that the "timestamp begin",
- * as well as the event timestamps, are monotonically increasing (never
- * decrease) when moving forward in a stream (physically). But this
- * guarantee does not apply to "timestamp end", because it is sampled at
- * commit time, which is not ordered with respect to space reservation.
- */
-
-struct packet_header {
-       /* Trace packet header */
-       uint32_t magic;                 /*
-                                        * Trace magic number.
-                                        * contains endianness information.
-                                        */
-       uint8_t uuid[16];
-       uint32_t stream_id;
-       uint64_t stream_instance_id;
-
-       struct {
-               /* Stream packet context */
-               uint64_t timestamp_begin;       /* Cycle count at subbuffer start */
-               uint64_t timestamp_end;         /* Cycle count at subbuffer end */
-               uint64_t content_size;          /* Size of data in subbuffer */
-               uint64_t packet_size;           /* Subbuffer size (include padding) */
-               uint64_t packet_seq_num;        /* Packet sequence number */
-               unsigned long events_discarded; /*
-                                                * Events lost in this subbuffer since
-                                                * the beginning of the trace.
-                                                * (may overflow)
-                                                */
-               uint32_t cpu_id;                /* CPU id associated with stream */
-               uint8_t header_end;             /* End of header */
-       } ctx;
-};
-
-struct lttng_client_ctx {
-       size_t packet_context_len;
-       size_t event_context_len;
-};
-
-static inline notrace u64 lib_ring_buffer_clock_read(struct channel *chan)
-{
-       return trace_clock_read64();
-}
-
-static inline
-size_t ctx_get_aligned_size(size_t offset, struct lttng_ctx *ctx,
-               size_t ctx_len)
-{
-       size_t orig_offset = offset;
-
-       if (likely(!ctx))
-               return 0;
-       offset += lib_ring_buffer_align(offset, ctx->largest_align);
-       offset += ctx_len;
-       return offset - orig_offset;
-}
-
-static inline
-void ctx_get_struct_size(struct lttng_ctx *ctx, size_t *ctx_len,
-               struct lttng_channel *chan, struct lib_ring_buffer_ctx *bufctx)
-{
-       int i;
-       size_t offset = 0;
-
-       if (likely(!ctx)) {
-               *ctx_len = 0;
-               return;
-       }
-       for (i = 0; i < ctx->nr_fields; i++) {
-               if (ctx->fields[i].get_size)
-                       offset += ctx->fields[i].get_size(offset);
-               if (ctx->fields[i].get_size_arg)
-                       offset += ctx->fields[i].get_size_arg(offset,
-                                       &ctx->fields[i], bufctx, chan);
-       }
-       *ctx_len = offset;
-}
-
-static inline
-void ctx_record(struct lib_ring_buffer_ctx *bufctx,
-               struct lttng_channel *chan,
-               struct lttng_ctx *ctx)
-{
-       int i;
-
-       if (likely(!ctx))
-               return;
-       lib_ring_buffer_align_ctx(bufctx, ctx->largest_align);
-       for (i = 0; i < ctx->nr_fields; i++)
-               ctx->fields[i].record(&ctx->fields[i], bufctx, chan);
-}
-
-/*
- * record_header_size - Calculate the header size and padding necessary.
- * @config: ring buffer instance configuration
- * @chan: channel
- * @offset: offset in the write buffer
- * @pre_header_padding: padding to add before the header (output)
- * @ctx: reservation context
- *
- * Returns the event header size (including padding).
- *
- * The payload must itself determine its own alignment from the biggest type it
- * contains.
- */
-static __inline__
-size_t record_header_size(const struct lib_ring_buffer_config *config,
-                                struct channel *chan, size_t offset,
-                                size_t *pre_header_padding,
-                                struct lib_ring_buffer_ctx *ctx,
-                                struct lttng_client_ctx *client_ctx)
-{
-       struct lttng_channel *lttng_chan = channel_get_private(chan);
-       struct lttng_probe_ctx *lttng_probe_ctx = ctx->priv;
-       struct lttng_event *event = lttng_probe_ctx->event;
-       size_t orig_offset = offset;
-       size_t padding;
-
-       switch (lttng_chan->header_type) {
-       case 1: /* compact */
-               padding = lib_ring_buffer_align(offset, lttng_alignof(uint32_t));
-               offset += padding;
-               if (!(ctx->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTTNG_RFLAG_EXTENDED))) {
-                       offset += sizeof(uint32_t);     /* id and timestamp */
-               } else {
-                       /* Minimum space taken by LTTNG_COMPACT_EVENT_BITS id */
-                       offset += (LTTNG_COMPACT_EVENT_BITS + CHAR_BIT - 1) / CHAR_BIT;
-                       /* Align extended struct on largest member */
-                       offset += lib_ring_buffer_align(offset, lttng_alignof(uint64_t));
-                       offset += sizeof(uint32_t);     /* id */
-                       offset += lib_ring_buffer_align(offset, lttng_alignof(uint64_t));
-                       offset += sizeof(uint64_t);     /* timestamp */
-               }
-               break;
-       case 2: /* large */
-               padding = lib_ring_buffer_align(offset, lttng_alignof(uint16_t));
-               offset += padding;
-               offset += sizeof(uint16_t);
-               if (!(ctx->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTTNG_RFLAG_EXTENDED))) {
-                       offset += lib_ring_buffer_align(offset, lttng_alignof(uint32_t));
-                       offset += sizeof(uint32_t);     /* timestamp */
-               } else {
-                       /* Align extended struct on largest member */
-                       offset += lib_ring_buffer_align(offset, lttng_alignof(uint64_t));
-                       offset += sizeof(uint32_t);     /* id */
-                       offset += lib_ring_buffer_align(offset, lttng_alignof(uint64_t));
-                       offset += sizeof(uint64_t);     /* timestamp */
-               }
-               break;
-       default:
-               padding = 0;
-               WARN_ON_ONCE(1);
-       }
-       offset += ctx_get_aligned_size(offset, lttng_chan->ctx,
-                       client_ctx->packet_context_len);
-       offset += ctx_get_aligned_size(offset, event->ctx,
-                       client_ctx->event_context_len);
-
-       *pre_header_padding = padding;
-       return offset - orig_offset;
-}
-
-#include <ringbuffer/api.h>
-
-static
-void lttng_write_event_header_slow(const struct lib_ring_buffer_config *config,
-                                struct lib_ring_buffer_ctx *ctx,
-                                uint32_t event_id);
-
-/*
- * lttng_write_event_header
- *
- * Writes the event header to the offset (already aligned on 32-bits).
- *
- * @config: ring buffer instance configuration
- * @ctx: reservation context
- * @event_id: event ID
- */
-static __inline__
-void lttng_write_event_header(const struct lib_ring_buffer_config *config,
-                           struct lib_ring_buffer_ctx *ctx,
-                           uint32_t event_id)
-{
-       struct lttng_channel *lttng_chan = channel_get_private(ctx->chan);
-       struct lttng_probe_ctx *lttng_probe_ctx = ctx->priv;
-       struct lttng_event *event = lttng_probe_ctx->event;
-
-       if (unlikely(ctx->rflags))
-               goto slow_path;
-
-       switch (lttng_chan->header_type) {
-       case 1: /* compact */
-       {
-               uint32_t id_time = 0;
-
-               bt_bitfield_write(&id_time, uint32_t,
-                               0,
-                               LTTNG_COMPACT_EVENT_BITS,
-                               event_id);
-               bt_bitfield_write(&id_time, uint32_t,
-                               LTTNG_COMPACT_EVENT_BITS,
-                               LTTNG_COMPACT_TSC_BITS,
-                               ctx->tsc);
-               lib_ring_buffer_write(config, ctx, &id_time, sizeof(id_time));
-               break;
-       }
-       case 2: /* large */
-       {
-               uint32_t timestamp = (uint32_t) ctx->tsc;
-               uint16_t id = event_id;
-
-               lib_ring_buffer_write(config, ctx, &id, sizeof(id));
-               lib_ring_buffer_align_ctx(ctx, lttng_alignof(uint32_t));
-               lib_ring_buffer_write(config, ctx, &timestamp, sizeof(timestamp));
-               break;
-       }
-       default:
-               WARN_ON_ONCE(1);
-       }
-
-       ctx_record(ctx, lttng_chan, lttng_chan->ctx);
-       ctx_record(ctx, lttng_chan, event->ctx);
-       lib_ring_buffer_align_ctx(ctx, ctx->largest_align);
-
-       return;
-
-slow_path:
-       lttng_write_event_header_slow(config, ctx, event_id);
-}
-
-static
-void lttng_write_event_header_slow(const struct lib_ring_buffer_config *config,
-                                struct lib_ring_buffer_ctx *ctx,
-                                uint32_t event_id)
-{
-       struct lttng_channel *lttng_chan = channel_get_private(ctx->chan);
-       struct lttng_probe_ctx *lttng_probe_ctx = ctx->priv;
-       struct lttng_event *event = lttng_probe_ctx->event;
-
-       switch (lttng_chan->header_type) {
-       case 1: /* compact */
-               if (!(ctx->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTTNG_RFLAG_EXTENDED))) {
-                       uint32_t id_time = 0;
-
-                       bt_bitfield_write(&id_time, uint32_t,
-                                       0,
-                                       LTTNG_COMPACT_EVENT_BITS,
-                                       event_id);
-                       bt_bitfield_write(&id_time, uint32_t,
-                                       LTTNG_COMPACT_EVENT_BITS,
-                                       LTTNG_COMPACT_TSC_BITS, ctx->tsc);
-                       lib_ring_buffer_write(config, ctx, &id_time, sizeof(id_time));
-               } else {
-                       uint8_t id = 0;
-                       uint64_t timestamp = ctx->tsc;
-
-                       bt_bitfield_write(&id, uint8_t,
-                                       0,
-                                       LTTNG_COMPACT_EVENT_BITS,
-                                       31);
-                       lib_ring_buffer_write(config, ctx, &id, sizeof(id));
-                       /* Align extended struct on largest member */
-                       lib_ring_buffer_align_ctx(ctx, lttng_alignof(uint64_t));
-                       lib_ring_buffer_write(config, ctx, &event_id, sizeof(event_id));
-                       lib_ring_buffer_align_ctx(ctx, lttng_alignof(uint64_t));
-                       lib_ring_buffer_write(config, ctx, &timestamp, sizeof(timestamp));
-               }
-               break;
-       case 2: /* large */
-       {
-               if (!(ctx->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTTNG_RFLAG_EXTENDED))) {
-                       uint32_t timestamp = (uint32_t) ctx->tsc;
-                       uint16_t id = event_id;
-
-                       lib_ring_buffer_write(config, ctx, &id, sizeof(id));
-                       lib_ring_buffer_align_ctx(ctx, lttng_alignof(uint32_t));
-                       lib_ring_buffer_write(config, ctx, &timestamp, sizeof(timestamp));
-               } else {
-                       uint16_t id = 65535;
-                       uint64_t timestamp = ctx->tsc;
-
-                       lib_ring_buffer_write(config, ctx, &id, sizeof(id));
-                       /* Align extended struct on largest member */
-                       lib_ring_buffer_align_ctx(ctx, lttng_alignof(uint64_t));
-                       lib_ring_buffer_write(config, ctx, &event_id, sizeof(event_id));
-                       lib_ring_buffer_align_ctx(ctx, lttng_alignof(uint64_t));
-                       lib_ring_buffer_write(config, ctx, &timestamp, sizeof(timestamp));
-               }
-               break;
-       }
-       default:
-               WARN_ON_ONCE(1);
-       }
-       ctx_record(ctx, lttng_chan, lttng_chan->ctx);
-       ctx_record(ctx, lttng_chan, event->ctx);
-       lib_ring_buffer_align_ctx(ctx, ctx->largest_align);
-}
-
-static const struct lib_ring_buffer_config client_config;
-
-static u64 client_ring_buffer_clock_read(struct channel *chan)
-{
-       return lib_ring_buffer_clock_read(chan);
-}
-
-static
-size_t client_record_header_size(const struct lib_ring_buffer_config *config,
-                                struct channel *chan, size_t offset,
-                                size_t *pre_header_padding,
-                                struct lib_ring_buffer_ctx *ctx,
-                                void *client_ctx)
-{
-       return record_header_size(config, chan, offset,
-                                 pre_header_padding, ctx, client_ctx);
-}
-
-/**
- * client_packet_header_size - called on buffer-switch to a new sub-buffer
- *
- * Return header size without padding after the structure. Don't use packed
- * structure because gcc generates inefficient code on some architectures
- * (powerpc, mips..)
- */
-static size_t client_packet_header_size(void)
-{
-       return offsetof(struct packet_header, ctx.header_end);
-}
-
-static void client_buffer_begin(struct lib_ring_buffer *buf, u64 tsc,
-                               unsigned int subbuf_idx)
-{
-       struct channel *chan = buf->backend.chan;
-       struct packet_header *header =
-               (struct packet_header *)
-                       lib_ring_buffer_offset_address(&buf->backend,
-                               subbuf_idx * chan->backend.subbuf_size);
-       struct lttng_channel *lttng_chan = channel_get_private(chan);
-       struct lttng_session *session = lttng_chan->session;
-
-       header->magic = CTF_MAGIC_NUMBER;
-       memcpy(header->uuid, session->uuid.b, sizeof(session->uuid));
-       header->stream_id = lttng_chan->id;
-       header->stream_instance_id = buf->backend.cpu;
-       header->ctx.timestamp_begin = tsc;
-       header->ctx.timestamp_end = 0;
-       header->ctx.content_size = ~0ULL; /* for debugging */
-       header->ctx.packet_size = ~0ULL;
-       header->ctx.packet_seq_num = chan->backend.num_subbuf * \
-                                    buf->backend.buf_cnt[subbuf_idx].seq_cnt + \
-                                    subbuf_idx;
-       header->ctx.events_discarded = 0;
-       header->ctx.cpu_id = buf->backend.cpu;
-}
-
-/*
- * offset is assumed to never be 0 here : never deliver a completely empty
- * subbuffer. data_size is between 1 and subbuf_size.
- */
-static void client_buffer_end(struct lib_ring_buffer *buf, u64 tsc,
-                             unsigned int subbuf_idx, unsigned long data_size)
-{
-       struct channel *chan = buf->backend.chan;
-       struct packet_header *header =
-               (struct packet_header *)
-                       lib_ring_buffer_offset_address(&buf->backend,
-                               subbuf_idx * chan->backend.subbuf_size);
-       unsigned long records_lost = 0;
-
-       header->ctx.timestamp_end = tsc;
-       header->ctx.content_size =
-               (uint64_t) data_size * CHAR_BIT;                /* in bits */
-       header->ctx.packet_size =
-               (uint64_t) PAGE_ALIGN(data_size) * CHAR_BIT;    /* in bits */
-       records_lost += lib_ring_buffer_get_records_lost_full(&client_config, buf);
-       records_lost += lib_ring_buffer_get_records_lost_wrap(&client_config, buf);
-       records_lost += lib_ring_buffer_get_records_lost_big(&client_config, buf);
-       header->ctx.events_discarded = records_lost;
-}
-
-static int client_buffer_create(struct lib_ring_buffer *buf, void *priv,
-                               int cpu, const char *name)
-{
-       return 0;
-}
-
-static void client_buffer_finalize(struct lib_ring_buffer *buf, void *priv, int cpu)
-{
-}
-
-static struct packet_header *client_packet_header(
-               const struct lib_ring_buffer_config *config,
-               struct lib_ring_buffer *buf)
-{
-       return lib_ring_buffer_read_offset_address(&buf->backend, 0);
-}
-
-static int client_timestamp_begin(const struct lib_ring_buffer_config *config,
-               struct lib_ring_buffer *buf,
-               uint64_t *timestamp_begin)
-{
-       struct packet_header *header = client_packet_header(config, buf);
-       *timestamp_begin = header->ctx.timestamp_begin;
-
-       return 0;
-}
-
-static int client_timestamp_end(const struct lib_ring_buffer_config *config,
-                       struct lib_ring_buffer *buf,
-                       uint64_t *timestamp_end)
-{
-       struct packet_header *header = client_packet_header(config, buf);
-       *timestamp_end = header->ctx.timestamp_end;
-
-       return 0;
-}
-
-static int client_events_discarded(const struct lib_ring_buffer_config *config,
-                       struct lib_ring_buffer *buf,
-                       uint64_t *events_discarded)
-{
-       struct packet_header *header = client_packet_header(config, buf);
-       *events_discarded = header->ctx.events_discarded;
-
-       return 0;
-}
-
-static int client_content_size(const struct lib_ring_buffer_config *config,
-                       struct lib_ring_buffer *buf,
-                       uint64_t *content_size)
-{
-       struct packet_header *header = client_packet_header(config, buf);
-       *content_size = header->ctx.content_size;
-
-       return 0;
-}
-
-static int client_packet_size(const struct lib_ring_buffer_config *config,
-                       struct lib_ring_buffer *buf,
-                       uint64_t *packet_size)
-{
-       struct packet_header *header = client_packet_header(config, buf);
-       *packet_size = header->ctx.packet_size;
-
-       return 0;
-}
-
-static int client_stream_id(const struct lib_ring_buffer_config *config,
-                       struct lib_ring_buffer *buf,
-                       uint64_t *stream_id)
-{
-       struct channel *chan = buf->backend.chan;
-       struct lttng_channel *lttng_chan = channel_get_private(chan);
-
-       *stream_id = lttng_chan->id;
-       return 0;
-}
-
-static int client_current_timestamp(const struct lib_ring_buffer_config *config,
-               struct lib_ring_buffer *bufb,
-               uint64_t *ts)
-{
-       *ts = config->cb.ring_buffer_clock_read(bufb->backend.chan);
-
-       return 0;
-}
-
-static int client_sequence_number(const struct lib_ring_buffer_config *config,
-                       struct lib_ring_buffer *buf,
-                       uint64_t *seq)
-{
-       struct packet_header *header = client_packet_header(config, buf);
-
-       *seq = header->ctx.packet_seq_num;
-
-       return 0;
-}
-
-static
-int client_instance_id(const struct lib_ring_buffer_config *config,
-               struct lib_ring_buffer *buf,
-               uint64_t *id)
-{
-       *id = buf->backend.cpu;
-
-       return 0;
-}
-
-static const struct lib_ring_buffer_config client_config = {
-       .cb.ring_buffer_clock_read = client_ring_buffer_clock_read,
-       .cb.record_header_size = client_record_header_size,
-       .cb.subbuffer_header_size = client_packet_header_size,
-       .cb.buffer_begin = client_buffer_begin,
-       .cb.buffer_end = client_buffer_end,
-       .cb.buffer_create = client_buffer_create,
-       .cb.buffer_finalize = client_buffer_finalize,
-
-       .tsc_bits = LTTNG_COMPACT_TSC_BITS,
-       .alloc = RING_BUFFER_ALLOC_PER_CPU,
-       .sync = RING_BUFFER_SYNC_PER_CPU,
-       .mode = RING_BUFFER_MODE_TEMPLATE,
-       .backend = RING_BUFFER_PAGE,
-       .output = RING_BUFFER_OUTPUT_TEMPLATE,
-       .oops = RING_BUFFER_OOPS_CONSISTENCY,
-       .ipi = RING_BUFFER_IPI_BARRIER,
-       .wakeup = RING_BUFFER_WAKEUP_BY_TIMER,
-};
-
-static
-void release_priv_ops(void *priv_ops)
-{
-       module_put(THIS_MODULE);
-}
-
-static
-void lttng_channel_destroy(struct channel *chan)
-{
-       channel_destroy(chan);
-}
-
-static
-struct channel *_channel_create(const char *name,
-                               struct lttng_channel *lttng_chan, void *buf_addr,
-                               size_t subbuf_size, size_t num_subbuf,
-                               unsigned int switch_timer_interval,
-                               unsigned int read_timer_interval)
-{
-       struct channel *chan;
-
-       chan = channel_create(&client_config, name, lttng_chan, buf_addr,
-                             subbuf_size, num_subbuf, switch_timer_interval,
-                             read_timer_interval);
-       if (chan) {
-               /*
-                * Ensure this module is not unloaded before we finish
-                * using lttng_relay_transport.ops.
-                */
-               if (!try_module_get(THIS_MODULE)) {
-                       printk(KERN_WARNING "LTT : Can't lock transport module.\n");
-                       goto error;
-               }
-               chan->backend.priv_ops = &lttng_relay_transport.ops;
-               chan->backend.release_priv_ops = release_priv_ops;
-       }
-       return chan;
-
-error:
-       lttng_channel_destroy(chan);
-       return NULL;
-}
-
-static
-struct lib_ring_buffer *lttng_buffer_read_open(struct channel *chan)
-{
-       struct lib_ring_buffer *buf;
-       int cpu;
-
-       for_each_channel_cpu(cpu, chan) {
-               buf = channel_get_ring_buffer(&client_config, chan, cpu);
-               if (!lib_ring_buffer_open_read(buf))
-                       return buf;
-       }
-       return NULL;
-}
-
-static
-int lttng_buffer_has_read_closed_stream(struct channel *chan)
-{
-       struct lib_ring_buffer *buf;
-       int cpu;
-
-       for_each_channel_cpu(cpu, chan) {
-               buf = channel_get_ring_buffer(&client_config, chan, cpu);
-               if (!atomic_long_read(&buf->active_readers))
-                       return 1;
-       }
-       return 0;
-}
-
-static
-void lttng_buffer_read_close(struct lib_ring_buffer *buf)
-{
-       lib_ring_buffer_release_read(buf);
-}
-
-static
-int lttng_event_reserve(struct lib_ring_buffer_ctx *ctx,
-                     uint32_t event_id)
-{
-       struct lttng_channel *lttng_chan = channel_get_private(ctx->chan);
-       struct lttng_probe_ctx *lttng_probe_ctx = ctx->priv;
-       struct lttng_event *event = lttng_probe_ctx->event;
-       struct lttng_client_ctx client_ctx;
-       int ret, cpu;
-
-       cpu = lib_ring_buffer_get_cpu(&client_config);
-       if (unlikely(cpu < 0))
-               return -EPERM;
-       ctx->cpu = cpu;
-
-       /* Compute internal size of context structures. */
-       ctx_get_struct_size(lttng_chan->ctx, &client_ctx.packet_context_len, lttng_chan, ctx);
-       ctx_get_struct_size(event->ctx, &client_ctx.event_context_len, lttng_chan, ctx);
-
-       switch (lttng_chan->header_type) {
-       case 1: /* compact */
-               if (event_id > 30)
-                       ctx->rflags |= LTTNG_RFLAG_EXTENDED;
-               break;
-       case 2: /* large */
-               if (event_id > 65534)
-                       ctx->rflags |= LTTNG_RFLAG_EXTENDED;
-               break;
-       default:
-               WARN_ON_ONCE(1);
-       }
-
-       ret = lib_ring_buffer_reserve(&client_config, ctx, &client_ctx);
-       if (unlikely(ret))
-               goto put;
-       lib_ring_buffer_backend_get_pages(&client_config, ctx,
-                       &ctx->backend_pages);
-       lttng_write_event_header(&client_config, ctx, event_id);
-       return 0;
-put:
-       lib_ring_buffer_put_cpu(&client_config);
-       return ret;
-}
-
-static
-void lttng_event_commit(struct lib_ring_buffer_ctx *ctx)
-{
-       lib_ring_buffer_commit(&client_config, ctx);
-       lib_ring_buffer_put_cpu(&client_config);
-}
-
-static
-void lttng_event_write(struct lib_ring_buffer_ctx *ctx, const void *src,
-                    size_t len)
-{
-       lib_ring_buffer_write(&client_config, ctx, src, len);
-}
-
-static
-void lttng_event_write_from_user(struct lib_ring_buffer_ctx *ctx,
-                              const void __user *src, size_t len)
-{
-       lib_ring_buffer_copy_from_user_inatomic(&client_config, ctx, src, len);
-}
-
-static
-void lttng_event_memset(struct lib_ring_buffer_ctx *ctx,
-               int c, size_t len)
-{
-       lib_ring_buffer_memset(&client_config, ctx, c, len);
-}
-
-static
-void lttng_event_strcpy(struct lib_ring_buffer_ctx *ctx, const char *src,
-               size_t len)
-{
-       lib_ring_buffer_strcpy(&client_config, ctx, src, len, '#');
-}
-
-static
-void lttng_event_strcpy_from_user(struct lib_ring_buffer_ctx *ctx,
-               const char __user *src, size_t len)
-{
-       lib_ring_buffer_strcpy_from_user_inatomic(&client_config, ctx, src,
-                       len, '#');
-}
-
-static
-wait_queue_head_t *lttng_get_writer_buf_wait_queue(struct channel *chan, int cpu)
-{
-       struct lib_ring_buffer *buf = channel_get_ring_buffer(&client_config,
-                                       chan, cpu);
-       return &buf->write_wait;
-}
-
-static
-wait_queue_head_t *lttng_get_hp_wait_queue(struct channel *chan)
-{
-       return &chan->hp_wait;
-}
-
-static
-int lttng_is_finalized(struct channel *chan)
-{
-       return lib_ring_buffer_channel_is_finalized(chan);
-}
-
-static
-int lttng_is_disabled(struct channel *chan)
-{
-       return lib_ring_buffer_channel_is_disabled(chan);
-}
-
-static struct lttng_transport lttng_relay_transport = {
-       .name = "relay-" RING_BUFFER_MODE_TEMPLATE_STRING,
-       .owner = THIS_MODULE,
-       .ops = {
-               .channel_create = _channel_create,
-               .channel_destroy = lttng_channel_destroy,
-               .buffer_read_open = lttng_buffer_read_open,
-               .buffer_has_read_closed_stream =
-                       lttng_buffer_has_read_closed_stream,
-               .buffer_read_close = lttng_buffer_read_close,
-               .event_reserve = lttng_event_reserve,
-               .event_commit = lttng_event_commit,
-               .event_write = lttng_event_write,
-               .event_write_from_user = lttng_event_write_from_user,
-               .event_memset = lttng_event_memset,
-               .event_strcpy = lttng_event_strcpy,
-               .event_strcpy_from_user = lttng_event_strcpy_from_user,
-               .packet_avail_size = NULL,      /* Would be racy anyway */
-               .get_writer_buf_wait_queue = lttng_get_writer_buf_wait_queue,
-               .get_hp_wait_queue = lttng_get_hp_wait_queue,
-               .is_finalized = lttng_is_finalized,
-               .is_disabled = lttng_is_disabled,
-               .timestamp_begin = client_timestamp_begin,
-               .timestamp_end = client_timestamp_end,
-               .events_discarded = client_events_discarded,
-               .content_size = client_content_size,
-               .packet_size = client_packet_size,
-               .stream_id = client_stream_id,
-               .current_timestamp = client_current_timestamp,
-               .sequence_number = client_sequence_number,
-               .instance_id = client_instance_id,
-       },
-};
-
-static int __init lttng_ring_buffer_client_init(void)
-{
-       /*
-        * This vmalloc sync all also takes care of the lib ring buffer
-        * vmalloc'd module pages when it is built as a module into LTTng.
-        */
-       wrapper_vmalloc_sync_mappings();
-       lttng_transport_register(&lttng_relay_transport);
-       return 0;
-}
-
-module_init(lttng_ring_buffer_client_init);
-
-static void __exit lttng_ring_buffer_client_exit(void)
-{
-       lttng_transport_unregister(&lttng_relay_transport);
-}
-
-module_exit(lttng_ring_buffer_client_exit);
-
-MODULE_LICENSE("GPL and additional rights");
-MODULE_AUTHOR("Mathieu Desnoyers <mathieu.desnoyers@efficios.com>");
-MODULE_DESCRIPTION("LTTng ring buffer " RING_BUFFER_MODE_TEMPLATE_STRING
-                  " client");
-MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
-       __stringify(LTTNG_MODULES_MINOR_VERSION) "."
-       __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
-       LTTNG_MODULES_EXTRAVERSION);
diff --git a/lttng-ring-buffer-metadata-client.c b/lttng-ring-buffer-metadata-client.c
deleted file mode 100644 (file)
index 2d52492..0000000
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
- *
- * lttng-ring-buffer-metadata-client.c
- *
- * LTTng lib ring buffer metadta client.
- *
- * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#include <linux/module.h>
-#include <lttng/tracer.h>
-
-#define RING_BUFFER_MODE_TEMPLATE              RING_BUFFER_DISCARD
-#define RING_BUFFER_MODE_TEMPLATE_STRING       "metadata"
-#define RING_BUFFER_OUTPUT_TEMPLATE            RING_BUFFER_SPLICE
-#include "lttng-ring-buffer-metadata-client.h"
diff --git a/lttng-ring-buffer-metadata-client.h b/lttng-ring-buffer-metadata-client.h
deleted file mode 100644 (file)
index 0f68b38..0000000
+++ /dev/null
@@ -1,451 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
- *
- * lttng-ring-buffer-client.h
- *
- * LTTng lib ring buffer client template.
- *
- * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <wrapper/vmalloc.h>   /* for wrapper_vmalloc_sync_mappings() */
-#include <lttng/events.h>
-#include <lttng/tracer.h>
-
-static struct lttng_transport lttng_relay_transport;
-
-struct metadata_packet_header {
-       uint32_t magic;                 /* 0x75D11D57 */
-       uint8_t  uuid[16];              /* Unique Universal Identifier */
-       uint32_t checksum;              /* 0 if unused */
-       uint32_t content_size;          /* in bits */
-       uint32_t packet_size;           /* in bits */
-       uint8_t  compression_scheme;    /* 0 if unused */
-       uint8_t  encryption_scheme;     /* 0 if unused */
-       uint8_t  checksum_scheme;       /* 0 if unused */
-       uint8_t  major;                 /* CTF spec major version number */
-       uint8_t  minor;                 /* CTF spec minor version number */
-       uint8_t  header_end[0];
-};
-
-struct metadata_record_header {
-       uint8_t header_end[0];          /* End of header */
-};
-
-static const struct lib_ring_buffer_config client_config;
-
-static inline
-u64 lib_ring_buffer_clock_read(struct channel *chan)
-{
-       return 0;
-}
-
-static inline
-size_t record_header_size(const struct lib_ring_buffer_config *config,
-                                struct channel *chan, size_t offset,
-                                size_t *pre_header_padding,
-                                struct lib_ring_buffer_ctx *ctx,
-                                void *client_ctx)
-{
-       return 0;
-}
-
-#include <ringbuffer/api.h>
-
-static u64 client_ring_buffer_clock_read(struct channel *chan)
-{
-       return 0;
-}
-
-static
-size_t client_record_header_size(const struct lib_ring_buffer_config *config,
-                                struct channel *chan, size_t offset,
-                                size_t *pre_header_padding,
-                                struct lib_ring_buffer_ctx *ctx,
-                                void *client_ctx)
-{
-       return 0;
-}
-
-/**
- * client_packet_header_size - called on buffer-switch to a new sub-buffer
- *
- * Return header size without padding after the structure. Don't use packed
- * structure because gcc generates inefficient code on some architectures
- * (powerpc, mips..)
- */
-static size_t client_packet_header_size(void)
-{
-       return offsetof(struct metadata_packet_header, header_end);
-}
-
-static void client_buffer_begin(struct lib_ring_buffer *buf, u64 tsc,
-                               unsigned int subbuf_idx)
-{
-       struct channel *chan = buf->backend.chan;
-       struct metadata_packet_header *header =
-               (struct metadata_packet_header *)
-                       lib_ring_buffer_offset_address(&buf->backend,
-                               subbuf_idx * chan->backend.subbuf_size);
-       struct lttng_metadata_cache *metadata_cache =
-               channel_get_private(chan);
-
-       header->magic = TSDL_MAGIC_NUMBER;
-       memcpy(header->uuid, metadata_cache->uuid.b,
-               sizeof(metadata_cache->uuid));
-       header->checksum = 0;           /* 0 if unused */
-       header->content_size = 0xFFFFFFFF; /* in bits, for debugging */
-       header->packet_size = 0xFFFFFFFF;  /* in bits, for debugging */
-       header->compression_scheme = 0; /* 0 if unused */
-       header->encryption_scheme = 0;  /* 0 if unused */
-       header->checksum_scheme = 0;    /* 0 if unused */
-       header->major = CTF_SPEC_MAJOR;
-       header->minor = CTF_SPEC_MINOR;
-}
-
-/*
- * offset is assumed to never be 0 here : never deliver a completely empty
- * subbuffer. data_size is between 1 and subbuf_size.
- */
-static void client_buffer_end(struct lib_ring_buffer *buf, u64 tsc,
-                             unsigned int subbuf_idx, unsigned long data_size)
-{
-       struct channel *chan = buf->backend.chan;
-       struct metadata_packet_header *header =
-               (struct metadata_packet_header *)
-                       lib_ring_buffer_offset_address(&buf->backend,
-                               subbuf_idx * chan->backend.subbuf_size);
-       unsigned long records_lost = 0;
-
-       header->content_size = data_size * CHAR_BIT;            /* in bits */
-       header->packet_size = PAGE_ALIGN(data_size) * CHAR_BIT; /* in bits */
-       /*
-        * We do not care about the records lost count, because the metadata
-        * channel waits and retry.
-        */
-       (void) lib_ring_buffer_get_records_lost_full(&client_config, buf);
-       records_lost += lib_ring_buffer_get_records_lost_wrap(&client_config, buf);
-       records_lost += lib_ring_buffer_get_records_lost_big(&client_config, buf);
-       WARN_ON_ONCE(records_lost != 0);
-}
-
-static int client_buffer_create(struct lib_ring_buffer *buf, void *priv,
-                               int cpu, const char *name)
-{
-       return 0;
-}
-
-static void client_buffer_finalize(struct lib_ring_buffer *buf, void *priv, int cpu)
-{
-}
-
-static int client_timestamp_begin(const struct lib_ring_buffer_config *config,
-               struct lib_ring_buffer *buf, uint64_t *timestamp_begin)
-{
-       return -ENOSYS;
-}
-
-static int client_timestamp_end(const struct lib_ring_buffer_config *config,
-                       struct lib_ring_buffer *bufb,
-                       uint64_t *timestamp_end)
-{
-       return -ENOSYS;
-}
-
-static int client_events_discarded(const struct lib_ring_buffer_config *config,
-                       struct lib_ring_buffer *bufb,
-                       uint64_t *events_discarded)
-{
-       return -ENOSYS;
-}
-
-static int client_current_timestamp(const struct lib_ring_buffer_config *config,
-               struct lib_ring_buffer *bufb,
-               uint64_t *ts)
-{
-       return -ENOSYS;
-}
-
-static int client_content_size(const struct lib_ring_buffer_config *config,
-                       struct lib_ring_buffer *bufb,
-                       uint64_t *content_size)
-{
-       return -ENOSYS;
-}
-
-static int client_packet_size(const struct lib_ring_buffer_config *config,
-                       struct lib_ring_buffer *bufb,
-                       uint64_t *packet_size)
-{
-       return -ENOSYS;
-}
-
-static int client_stream_id(const struct lib_ring_buffer_config *config,
-                       struct lib_ring_buffer *bufb,
-                       uint64_t *stream_id)
-{
-       return -ENOSYS;
-}
-
-static int client_sequence_number(const struct lib_ring_buffer_config *config,
-                       struct lib_ring_buffer *bufb,
-                       uint64_t *seq)
-{
-       return -ENOSYS;
-}
-
-static
-int client_instance_id(const struct lib_ring_buffer_config *config,
-               struct lib_ring_buffer *bufb,
-               uint64_t *id)
-{
-       return -ENOSYS;
-}
-
-static const struct lib_ring_buffer_config client_config = {
-       .cb.ring_buffer_clock_read = client_ring_buffer_clock_read,
-       .cb.record_header_size = client_record_header_size,
-       .cb.subbuffer_header_size = client_packet_header_size,
-       .cb.buffer_begin = client_buffer_begin,
-       .cb.buffer_end = client_buffer_end,
-       .cb.buffer_create = client_buffer_create,
-       .cb.buffer_finalize = client_buffer_finalize,
-
-       .tsc_bits = 0,
-       .alloc = RING_BUFFER_ALLOC_GLOBAL,
-       .sync = RING_BUFFER_SYNC_GLOBAL,
-       .mode = RING_BUFFER_MODE_TEMPLATE,
-       .backend = RING_BUFFER_PAGE,
-       .output = RING_BUFFER_OUTPUT_TEMPLATE,
-       .oops = RING_BUFFER_OOPS_CONSISTENCY,
-       .ipi = RING_BUFFER_IPI_BARRIER,
-       .wakeup = RING_BUFFER_WAKEUP_BY_TIMER,
-};
-
-static
-void release_priv_ops(void *priv_ops)
-{
-       module_put(THIS_MODULE);
-}
-
-static
-void lttng_channel_destroy(struct channel *chan)
-{
-       channel_destroy(chan);
-}
-
-static
-struct channel *_channel_create(const char *name,
-                               struct lttng_channel *lttng_chan, void *buf_addr,
-                               size_t subbuf_size, size_t num_subbuf,
-                               unsigned int switch_timer_interval,
-                               unsigned int read_timer_interval)
-{
-       struct channel *chan;
-
-       chan = channel_create(&client_config, name,
-                             lttng_chan->session->metadata_cache, buf_addr,
-                             subbuf_size, num_subbuf, switch_timer_interval,
-                             read_timer_interval);
-       if (chan) {
-               /*
-                * Ensure this module is not unloaded before we finish
-                * using lttng_relay_transport.ops.
-                */
-               if (!try_module_get(THIS_MODULE)) {
-                       printk(KERN_WARNING "LTT : Can't lock transport module.\n");
-                       goto error;
-               }
-               chan->backend.priv_ops = &lttng_relay_transport.ops;
-               chan->backend.release_priv_ops = release_priv_ops;
-       }
-       return chan;
-
-error:
-       lttng_channel_destroy(chan);
-       return NULL;
-}
-
-static
-struct lib_ring_buffer *lttng_buffer_read_open(struct channel *chan)
-{
-       struct lib_ring_buffer *buf;
-
-       buf = channel_get_ring_buffer(&client_config, chan, 0);
-       if (!lib_ring_buffer_open_read(buf))
-               return buf;
-       return NULL;
-}
-
-static
-int lttng_buffer_has_read_closed_stream(struct channel *chan)
-{
-       struct lib_ring_buffer *buf;
-       int cpu;
-
-       for_each_channel_cpu(cpu, chan) {
-               buf = channel_get_ring_buffer(&client_config, chan, cpu);
-               if (!atomic_long_read(&buf->active_readers))
-                       return 1;
-       }
-       return 0;
-}
-
-static
-void lttng_buffer_read_close(struct lib_ring_buffer *buf)
-{
-       lib_ring_buffer_release_read(buf);
-}
-
-static
-int lttng_event_reserve(struct lib_ring_buffer_ctx *ctx, uint32_t event_id)
-{
-       int ret;
-
-       ret = lib_ring_buffer_reserve(&client_config, ctx, NULL);
-       if (ret)
-               return ret;
-       lib_ring_buffer_backend_get_pages(&client_config, ctx,
-                       &ctx->backend_pages);
-       return 0;
-
-}
-
-static
-void lttng_event_commit(struct lib_ring_buffer_ctx *ctx)
-{
-       lib_ring_buffer_commit(&client_config, ctx);
-}
-
-static
-void lttng_event_write(struct lib_ring_buffer_ctx *ctx, const void *src,
-                    size_t len)
-{
-       lib_ring_buffer_write(&client_config, ctx, src, len);
-}
-
-static
-void lttng_event_write_from_user(struct lib_ring_buffer_ctx *ctx,
-                              const void __user *src, size_t len)
-{
-       lib_ring_buffer_copy_from_user_inatomic(&client_config, ctx, src, len);
-}
-
-static
-void lttng_event_memset(struct lib_ring_buffer_ctx *ctx,
-               int c, size_t len)
-{
-       lib_ring_buffer_memset(&client_config, ctx, c, len);
-}
-
-static
-void lttng_event_strcpy(struct lib_ring_buffer_ctx *ctx, const char *src,
-               size_t len)
-{
-       lib_ring_buffer_strcpy(&client_config, ctx, src, len, '#');
-}
-
-static
-size_t lttng_packet_avail_size(struct channel *chan)
-{
-       unsigned long o_begin;
-       struct lib_ring_buffer *buf;
-
-       buf = chan->backend.buf;        /* Only for global buffer ! */
-       o_begin = v_read(&client_config, &buf->offset);
-       if (subbuf_offset(o_begin, chan) != 0) {
-               return chan->backend.subbuf_size - subbuf_offset(o_begin, chan);
-       } else {
-               return chan->backend.subbuf_size - subbuf_offset(o_begin, chan)
-                       - sizeof(struct metadata_packet_header);
-       }
-}
-
-static
-wait_queue_head_t *lttng_get_writer_buf_wait_queue(struct channel *chan, int cpu)
-{
-       struct lib_ring_buffer *buf = channel_get_ring_buffer(&client_config,
-                                       chan, cpu);
-       return &buf->write_wait;
-}
-
-static
-wait_queue_head_t *lttng_get_hp_wait_queue(struct channel *chan)
-{
-       return &chan->hp_wait;
-}
-
-static
-int lttng_is_finalized(struct channel *chan)
-{
-       return lib_ring_buffer_channel_is_finalized(chan);
-}
-
-static
-int lttng_is_disabled(struct channel *chan)
-{
-       return lib_ring_buffer_channel_is_disabled(chan);
-}
-
-static struct lttng_transport lttng_relay_transport = {
-       .name = "relay-" RING_BUFFER_MODE_TEMPLATE_STRING,
-       .owner = THIS_MODULE,
-       .ops = {
-               .channel_create = _channel_create,
-               .channel_destroy = lttng_channel_destroy,
-               .buffer_read_open = lttng_buffer_read_open,
-               .buffer_has_read_closed_stream =
-                       lttng_buffer_has_read_closed_stream,
-               .buffer_read_close = lttng_buffer_read_close,
-               .event_reserve = lttng_event_reserve,
-               .event_commit = lttng_event_commit,
-               .event_write_from_user = lttng_event_write_from_user,
-               .event_memset = lttng_event_memset,
-               .event_write = lttng_event_write,
-               .event_strcpy = lttng_event_strcpy,
-               .packet_avail_size = lttng_packet_avail_size,
-               .get_writer_buf_wait_queue = lttng_get_writer_buf_wait_queue,
-               .get_hp_wait_queue = lttng_get_hp_wait_queue,
-               .is_finalized = lttng_is_finalized,
-               .is_disabled = lttng_is_disabled,
-               .timestamp_begin = client_timestamp_begin,
-               .timestamp_end = client_timestamp_end,
-               .events_discarded = client_events_discarded,
-               .content_size = client_content_size,
-               .packet_size = client_packet_size,
-               .stream_id = client_stream_id,
-               .current_timestamp = client_current_timestamp,
-               .sequence_number = client_sequence_number,
-               .instance_id = client_instance_id,
-       },
-};
-
-static int __init lttng_ring_buffer_client_init(void)
-{
-       /*
-        * This vmalloc sync all also takes care of the lib ring buffer
-        * vmalloc'd module pages when it is built as a module into LTTng.
-        */
-       wrapper_vmalloc_sync_mappings();
-       lttng_transport_register(&lttng_relay_transport);
-       return 0;
-}
-
-module_init(lttng_ring_buffer_client_init);
-
-static void __exit lttng_ring_buffer_client_exit(void)
-{
-       lttng_transport_unregister(&lttng_relay_transport);
-}
-
-module_exit(lttng_ring_buffer_client_exit);
-
-MODULE_LICENSE("GPL and additional rights");
-MODULE_AUTHOR("Mathieu Desnoyers <mathieu.desnoyers@efficios.com>");
-MODULE_DESCRIPTION("LTTng ring buffer " RING_BUFFER_MODE_TEMPLATE_STRING
-                  " client");
-MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
-       __stringify(LTTNG_MODULES_MINOR_VERSION) "."
-       __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
-       LTTNG_MODULES_EXTRAVERSION);
diff --git a/lttng-ring-buffer-metadata-mmap-client.c b/lttng-ring-buffer-metadata-mmap-client.c
deleted file mode 100644 (file)
index 15975b4..0000000
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
- *
- * lttng-ring-buffer-metadata-client.c
- *
- * LTTng lib ring buffer metadta client.
- *
- * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#include <linux/module.h>
-#include <lttng/tracer.h>
-
-#define RING_BUFFER_MODE_TEMPLATE              RING_BUFFER_DISCARD
-#define RING_BUFFER_MODE_TEMPLATE_STRING       "metadata-mmap"
-#define RING_BUFFER_OUTPUT_TEMPLATE            RING_BUFFER_MMAP
-#include "lttng-ring-buffer-metadata-client.h"
diff --git a/lttng-statedump-impl.c b/lttng-statedump-impl.c
deleted file mode 100644 (file)
index 1a2a12b..0000000
+++ /dev/null
@@ -1,647 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
- *
- * lttng-statedump.c
- *
- * Linux Trace Toolkit Next Generation Kernel State Dump
- *
- * Copyright 2005 Jean-Hugues Deschenes <jean-hugues.deschenes@polymtl.ca>
- * Copyright 2006-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
- * Changes:
- *     Eric Clement:                   Add listing of network IP interface
- *     2006, 2007 Mathieu Desnoyers    Fix kernel threads
- *                                     Various updates
- */
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/netlink.h>
-#include <linux/inet.h>
-#include <linux/ip.h>
-#include <linux/kthread.h>
-#include <linux/proc_fs.h>
-#include <linux/file.h>
-#include <linux/interrupt.h>
-#include <linux/irqnr.h>
-#include <linux/cpu.h>
-#include <linux/netdevice.h>
-#include <linux/inetdevice.h>
-#include <linux/sched.h>
-#include <linux/mm.h>
-#include <linux/swap.h>
-#include <linux/wait.h>
-#include <linux/mutex.h>
-#include <linux/device.h>
-
-#include <lttng/events.h>
-#include <lttng/tracer.h>
-#include <wrapper/irqdesc.h>
-#include <wrapper/fdtable.h>
-#include <wrapper/namespace.h>
-#include <wrapper/irq.h>
-#include <wrapper/tracepoint.h>
-#include <wrapper/genhd.h>
-#include <wrapper/file.h>
-#include <wrapper/fdtable.h>
-
-#ifdef CONFIG_LTTNG_HAS_LIST_IRQ
-#include <linux/irq.h>
-#endif
-
-/* Define the tracepoints, but do not build the probes */
-#define CREATE_TRACE_POINTS
-#define TRACE_INCLUDE_PATH instrumentation/events
-#define TRACE_INCLUDE_FILE lttng-statedump
-#define LTTNG_INSTRUMENTATION
-#include <instrumentation/events/lttng-statedump.h>
-
-DEFINE_TRACE(lttng_statedump_block_device);
-DEFINE_TRACE(lttng_statedump_end);
-DEFINE_TRACE(lttng_statedump_interrupt);
-DEFINE_TRACE(lttng_statedump_file_descriptor);
-DEFINE_TRACE(lttng_statedump_start);
-DEFINE_TRACE(lttng_statedump_process_state);
-DEFINE_TRACE(lttng_statedump_process_pid_ns);
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,6,0))
-DEFINE_TRACE(lttng_statedump_process_cgroup_ns);
-#endif
-DEFINE_TRACE(lttng_statedump_process_ipc_ns);
-#ifndef LTTNG_MNT_NS_MISSING_HEADER
-DEFINE_TRACE(lttng_statedump_process_mnt_ns);
-#endif
-DEFINE_TRACE(lttng_statedump_process_net_ns);
-DEFINE_TRACE(lttng_statedump_process_user_ns);
-DEFINE_TRACE(lttng_statedump_process_uts_ns);
-DEFINE_TRACE(lttng_statedump_network_interface);
-#ifdef LTTNG_HAVE_STATEDUMP_CPU_TOPOLOGY
-DEFINE_TRACE(lttng_statedump_cpu_topology);
-#endif
-
-struct lttng_fd_ctx {
-       char *page;
-       struct lttng_session *session;
-       struct files_struct *files;
-};
-
-/*
- * Protected by the trace lock.
- */
-static struct delayed_work cpu_work[NR_CPUS];
-static DECLARE_WAIT_QUEUE_HEAD(statedump_wq);
-static atomic_t kernel_threads_to_run;
-
-enum lttng_thread_type {
-       LTTNG_USER_THREAD = 0,
-       LTTNG_KERNEL_THREAD = 1,
-};
-
-enum lttng_execution_mode {
-       LTTNG_USER_MODE = 0,
-       LTTNG_SYSCALL = 1,
-       LTTNG_TRAP = 2,
-       LTTNG_IRQ = 3,
-       LTTNG_SOFTIRQ = 4,
-       LTTNG_MODE_UNKNOWN = 5,
-};
-
-enum lttng_execution_submode {
-       LTTNG_NONE = 0,
-       LTTNG_UNKNOWN = 1,
-};
-
-enum lttng_process_status {
-       LTTNG_UNNAMED = 0,
-       LTTNG_WAIT_FORK = 1,
-       LTTNG_WAIT_CPU = 2,
-       LTTNG_EXIT = 3,
-       LTTNG_ZOMBIE = 4,
-       LTTNG_WAIT = 5,
-       LTTNG_RUN = 6,
-       LTTNG_DEAD = 7,
-};
-
-static
-int lttng_enumerate_block_devices(struct lttng_session *session)
-{
-       struct class *ptr_block_class;
-       struct device_type *ptr_disk_type;
-       struct class_dev_iter iter;
-       struct device *dev;
-
-       ptr_block_class = wrapper_get_block_class();
-       if (!ptr_block_class)
-               return -ENOSYS;
-       ptr_disk_type = wrapper_get_disk_type();
-       if (!ptr_disk_type) {
-               return -ENOSYS;
-       }
-       class_dev_iter_init(&iter, ptr_block_class, NULL, ptr_disk_type);
-       while ((dev = class_dev_iter_next(&iter))) {
-               struct disk_part_iter piter;
-               struct gendisk *disk = dev_to_disk(dev);
-               struct hd_struct *part;
-
-               /*
-                * Don't show empty devices or things that have been
-                * suppressed
-                */
-               if (get_capacity(disk) == 0 ||
-                   (disk->flags & GENHD_FL_SUPPRESS_PARTITION_INFO))
-                       continue;
-
-               disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0);
-               while ((part = disk_part_iter_next(&piter))) {
-                       char name_buf[BDEVNAME_SIZE];
-                       char *p;
-
-                       p = wrapper_disk_name(disk, part->partno, name_buf);
-                       if (!p) {
-                               disk_part_iter_exit(&piter);
-                               class_dev_iter_exit(&iter);
-                               return -ENOSYS;
-                       }
-                       trace_lttng_statedump_block_device(session,
-                                       part_devt(part), name_buf);
-               }
-               disk_part_iter_exit(&piter);
-       }
-       class_dev_iter_exit(&iter);
-       return 0;
-}
-
-#ifdef CONFIG_INET
-
-static
-void lttng_enumerate_device(struct lttng_session *session,
-               struct net_device *dev)
-{
-       struct in_device *in_dev;
-       struct in_ifaddr *ifa;
-
-       if (dev->flags & IFF_UP) {
-               in_dev = in_dev_get(dev);
-               if (in_dev) {
-                       for (ifa = in_dev->ifa_list; ifa != NULL;
-                            ifa = ifa->ifa_next) {
-                               trace_lttng_statedump_network_interface(
-                                       session, dev, ifa);
-                       }
-                       in_dev_put(in_dev);
-               }
-       } else {
-               trace_lttng_statedump_network_interface(
-                       session, dev, NULL);
-       }
-}
-
-static
-int lttng_enumerate_network_ip_interface(struct lttng_session *session)
-{
-       struct net_device *dev;
-
-       read_lock(&dev_base_lock);
-       for_each_netdev(&init_net, dev)
-               lttng_enumerate_device(session, dev);
-       read_unlock(&dev_base_lock);
-
-       return 0;
-}
-#else /* CONFIG_INET */
-static inline
-int lttng_enumerate_network_ip_interface(struct lttng_session *session)
-{
-       return 0;
-}
-#endif /* CONFIG_INET */
-
-static
-int lttng_dump_one_fd(const void *p, struct file *file, unsigned int fd)
-{
-       const struct lttng_fd_ctx *ctx = p;
-       const char *s = d_path(&file->f_path, ctx->page, PAGE_SIZE);
-       unsigned int flags = file->f_flags;
-       struct fdtable *fdt;
-
-       /*
-        * We don't expose kernel internal flags, only userspace-visible
-        * flags.
-        */
-       flags &= ~FMODE_NONOTIFY;
-       fdt = files_fdtable(ctx->files);
-       /*
-        * We need to check here again whether fd is within the fdt
-        * max_fds range, because we might be seeing a different
-        * files_fdtable() than iterate_fd(), assuming only RCU is
-        * protecting the read. In reality, iterate_fd() holds
-        * file_lock, which should ensure the fdt does not change while
-        * the lock is taken, but we are not aware whether this is
-        * guaranteed or not, so play safe.
-        */
-       if (fd < fdt->max_fds && lttng_close_on_exec(fd, fdt))
-               flags |= O_CLOEXEC;
-       if (IS_ERR(s)) {
-               struct dentry *dentry = file->f_path.dentry;
-
-               /* Make sure we give at least some info */
-               spin_lock(&dentry->d_lock);
-               trace_lttng_statedump_file_descriptor(ctx->session,
-                       ctx->files, fd, dentry->d_name.name, flags,
-                       file->f_mode);
-               spin_unlock(&dentry->d_lock);
-               goto end;
-       }
-       trace_lttng_statedump_file_descriptor(ctx->session,
-               ctx->files, fd, s, flags, file->f_mode);
-end:
-       return 0;
-}
-
-/* Called with task lock held. */
-static
-void lttng_enumerate_files(struct lttng_session *session,
-               struct files_struct *files,
-               char *tmp)
-{
-       struct lttng_fd_ctx ctx = { .page = tmp, .session = session, .files = files, };
-
-       lttng_iterate_fd(files, 0, lttng_dump_one_fd, &ctx);
-}
-
-#ifdef LTTNG_HAVE_STATEDUMP_CPU_TOPOLOGY
-static
-int lttng_enumerate_cpu_topology(struct lttng_session *session)
-{
-       int cpu;
-       const cpumask_t *cpumask = cpu_possible_mask;
-
-       for (cpu = cpumask_first(cpumask); cpu < nr_cpu_ids;
-                       cpu = cpumask_next(cpu, cpumask)) {
-               trace_lttng_statedump_cpu_topology(session, &cpu_data(cpu));
-       }
-
-       return 0;
-}
-#else
-static
-int lttng_enumerate_cpu_topology(struct lttng_session *session)
-{
-       return 0;
-}
-#endif
-
-#if 0
-/*
- * FIXME: we cannot take a mmap_sem while in a RCU read-side critical section
- * (scheduling in atomic). Normally, the tasklist lock protects this kind of
- * iteration, but it is not exported to modules.
- */
-static
-void lttng_enumerate_task_vm_maps(struct lttng_session *session,
-               struct task_struct *p)
-{
-       struct mm_struct *mm;
-       struct vm_area_struct *map;
-       unsigned long ino;
-
-       /* get_task_mm does a task_lock... */
-       mm = get_task_mm(p);
-       if (!mm)
-               return;
-
-       map = mm->mmap;
-       if (map) {
-               down_read(&mm->mmap_sem);
-               while (map) {
-                       if (map->vm_file)
-                               ino = map->vm_file->lttng_f_dentry->d_inode->i_ino;
-                       else
-                               ino = 0;
-                       trace_lttng_statedump_vm_map(session, p, map, ino);
-                       map = map->vm_next;
-               }
-               up_read(&mm->mmap_sem);
-       }
-       mmput(mm);
-}
-
-static
-int lttng_enumerate_vm_maps(struct lttng_session *session)
-{
-       struct task_struct *p;
-
-       rcu_read_lock();
-       for_each_process(p)
-               lttng_enumerate_task_vm_maps(session, p);
-       rcu_read_unlock();
-       return 0;
-}
-#endif
-
-#ifdef CONFIG_LTTNG_HAS_LIST_IRQ
-
-static
-int lttng_list_interrupts(struct lttng_session *session)
-{
-       unsigned int irq;
-       unsigned long flags = 0;
-       struct irq_desc *desc;
-
-#define irq_to_desc    wrapper_irq_to_desc
-       /* needs irq_desc */
-       for_each_irq_desc(irq, desc) {
-               struct irqaction *action;
-               const char *irq_chip_name =
-                       irq_desc_get_chip(desc)->name ? : "unnamed_irq_chip";
-
-               local_irq_save(flags);
-               raw_spin_lock(&desc->lock);
-               for (action = desc->action; action; action = action->next) {
-                       trace_lttng_statedump_interrupt(session,
-                               irq, irq_chip_name, action);
-               }
-               raw_spin_unlock(&desc->lock);
-               local_irq_restore(flags);
-       }
-       return 0;
-#undef irq_to_desc
-}
-#else
-static inline
-int lttng_list_interrupts(struct lttng_session *session)
-{
-       return 0;
-}
-#endif
-
-/*
- * Statedump the task's namespaces using the proc filesystem inode number as
- * the unique identifier. The user and pid ns are nested and will be dumped
- * recursively.
- *
- * Called with task lock held.
- */
-static
-void lttng_statedump_process_ns(struct lttng_session *session,
-               struct task_struct *p,
-               enum lttng_thread_type type,
-               enum lttng_execution_mode mode,
-               enum lttng_execution_submode submode,
-               enum lttng_process_status status)
-{
-       struct nsproxy *proxy;
-       struct pid_namespace *pid_ns;
-       struct user_namespace *user_ns;
-
-       /*
-        * The pid and user namespaces are special, they are nested and
-        * accessed with specific functions instead of the nsproxy struct
-        * like the other namespaces.
-        */
-       pid_ns = task_active_pid_ns(p);
-       do {
-               trace_lttng_statedump_process_pid_ns(session, p, pid_ns);
-               pid_ns = pid_ns ? pid_ns->parent : NULL;
-       } while (pid_ns);
-
-
-       user_ns = task_cred_xxx(p, user_ns);
-       do {
-               trace_lttng_statedump_process_user_ns(session, p, user_ns);
-               /*
-                * trace_lttng_statedump_process_user_ns() internally
-                * checks whether user_ns is NULL. While this does not
-                * appear to be a possible return value for
-                * task_cred_xxx(), err on the safe side and check
-                * for NULL here as well to be consistent with the
-                * paranoid behavior of
-                * trace_lttng_statedump_process_user_ns().
-                */
-               user_ns = user_ns ? user_ns->lttng_user_ns_parent : NULL;
-       } while (user_ns);
-
-       /*
-        * Back and forth on locking strategy within Linux upstream for nsproxy.
-        * See Linux upstream commit 728dba3a39c66b3d8ac889ddbe38b5b1c264aec3
-        * "namespaces: Use task_lock and not rcu to protect nsproxy"
-        * for details.
-        */
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,17,0) || \
-               LTTNG_UBUNTU_KERNEL_RANGE(3,13,11,36, 3,14,0,0) || \
-               LTTNG_UBUNTU_KERNEL_RANGE(3,16,1,11, 3,17,0,0) || \
-               LTTNG_RHEL_KERNEL_RANGE(3,10,0,229,13,0, 3,11,0,0,0,0))
-       proxy = p->nsproxy;
-#else
-       rcu_read_lock();
-       proxy = task_nsproxy(p);
-#endif
-       if (proxy) {
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,6,0))
-               trace_lttng_statedump_process_cgroup_ns(session, p, proxy->cgroup_ns);
-#endif
-               trace_lttng_statedump_process_ipc_ns(session, p, proxy->ipc_ns);
-#ifndef LTTNG_MNT_NS_MISSING_HEADER
-               trace_lttng_statedump_process_mnt_ns(session, p, proxy->mnt_ns);
-#endif
-               trace_lttng_statedump_process_net_ns(session, p, proxy->net_ns);
-               trace_lttng_statedump_process_uts_ns(session, p, proxy->uts_ns);
-       }
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,17,0) || \
-               LTTNG_UBUNTU_KERNEL_RANGE(3,13,11,36, 3,14,0,0) || \
-               LTTNG_UBUNTU_KERNEL_RANGE(3,16,1,11, 3,17,0,0) || \
-               LTTNG_RHEL_KERNEL_RANGE(3,10,0,229,13,0, 3,11,0,0,0,0))
-       /* (nothing) */
-#else
-       rcu_read_unlock();
-#endif
-}
-
-static
-int lttng_enumerate_process_states(struct lttng_session *session)
-{
-       struct task_struct *g, *p;
-       char *tmp;
-
-       tmp = (char *) __get_free_page(GFP_KERNEL);
-       if (!tmp)
-               return -ENOMEM;
-
-       rcu_read_lock();
-       for_each_process(g) {
-               struct files_struct *prev_files = NULL;
-
-               p = g;
-               do {
-                       enum lttng_execution_mode mode =
-                               LTTNG_MODE_UNKNOWN;
-                       enum lttng_execution_submode submode =
-                               LTTNG_UNKNOWN;
-                       enum lttng_process_status status;
-                       enum lttng_thread_type type;
-                       struct files_struct *files;
-
-                       task_lock(p);
-                       if (p->exit_state == EXIT_ZOMBIE)
-                               status = LTTNG_ZOMBIE;
-                       else if (p->exit_state == EXIT_DEAD)
-                               status = LTTNG_DEAD;
-                       else if (p->state == TASK_RUNNING) {
-                               /* Is this a forked child that has not run yet? */
-                               if (list_empty(&p->rt.run_list))
-                                       status = LTTNG_WAIT_FORK;
-                               else
-                                       /*
-                                        * All tasks are considered as wait_cpu;
-                                        * the viewer will sort out if the task
-                                        * was really running at this time.
-                                        */
-                                       status = LTTNG_WAIT_CPU;
-                       } else if (p->state &
-                               (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)) {
-                               /* Task is waiting for something to complete */
-                               status = LTTNG_WAIT;
-                       } else
-                               status = LTTNG_UNNAMED;
-                       submode = LTTNG_NONE;
-
-                       /*
-                        * Verification of t->mm is to filter out kernel
-                        * threads; Viewer will further filter out if a
-                        * user-space thread was in syscall mode or not.
-                        */
-                       if (p->mm)
-                               type = LTTNG_USER_THREAD;
-                       else
-                               type = LTTNG_KERNEL_THREAD;
-                       files = p->files;
-
-                       trace_lttng_statedump_process_state(session,
-                               p, type, mode, submode, status, files);
-                       lttng_statedump_process_ns(session,
-                               p, type, mode, submode, status);
-                       /*
-                        * As an optimisation for the common case, do not
-                        * repeat information for the same files_struct in
-                        * two consecutive threads. This is the common case
-                        * for threads sharing the same fd table. RCU guarantees
-                        * that the same files_struct pointer is not re-used
-                        * throughout processes/threads iteration.
-                        */
-                       if (files && files != prev_files) {
-                               lttng_enumerate_files(session, files, tmp);
-                               prev_files = files;
-                       }
-                       task_unlock(p);
-               } while_each_thread(g, p);
-       }
-       rcu_read_unlock();
-
-       free_page((unsigned long) tmp);
-
-       return 0;
-}
-
-static
-void lttng_statedump_work_func(struct work_struct *work)
-{
-       if (atomic_dec_and_test(&kernel_threads_to_run))
-               /* If we are the last thread, wake up do_lttng_statedump */
-               wake_up(&statedump_wq);
-}
-
-static
-int do_lttng_statedump(struct lttng_session *session)
-{
-       int cpu, ret;
-
-       trace_lttng_statedump_start(session);
-       ret = lttng_enumerate_process_states(session);
-       if (ret)
-               return ret;
-       /*
-        * FIXME
-        * ret = lttng_enumerate_vm_maps(session);
-        * if (ret)
-        *      return ret;
-        */
-       ret = lttng_list_interrupts(session);
-       if (ret)
-               return ret;
-       ret = lttng_enumerate_network_ip_interface(session);
-       if (ret)
-               return ret;
-       ret = lttng_enumerate_block_devices(session);
-       switch (ret) {
-       case 0:
-               break;
-       case -ENOSYS:
-               printk(KERN_WARNING "LTTng: block device enumeration is not supported by kernel\n");
-               break;
-       default:
-               return ret;
-       }
-       ret = lttng_enumerate_cpu_topology(session);
-       if (ret)
-               return ret;
-
-       /* TODO lttng_dump_idt_table(session); */
-       /* TODO lttng_dump_softirq_vec(session); */
-       /* TODO lttng_list_modules(session); */
-       /* TODO lttng_dump_swap_files(session); */
-
-       /*
-        * Fire off a work queue on each CPU. Their sole purpose in life
-        * is to guarantee that each CPU has been in a state where is was in
-        * syscall mode (i.e. not in a trap, an IRQ or a soft IRQ).
-        */
-       get_online_cpus();
-       atomic_set(&kernel_threads_to_run, num_online_cpus());
-       for_each_online_cpu(cpu) {
-               INIT_DELAYED_WORK(&cpu_work[cpu], lttng_statedump_work_func);
-               schedule_delayed_work_on(cpu, &cpu_work[cpu], 0);
-       }
-       /* Wait for all threads to run */
-       __wait_event(statedump_wq, (atomic_read(&kernel_threads_to_run) == 0));
-       put_online_cpus();
-       /* Our work is done */
-       trace_lttng_statedump_end(session);
-       return 0;
-}
-
-/*
- * Called with session mutex held.
- */
-int lttng_statedump_start(struct lttng_session *session)
-{
-       return do_lttng_statedump(session);
-}
-EXPORT_SYMBOL_GPL(lttng_statedump_start);
-
-static
-int __init lttng_statedump_init(void)
-{
-       /*
-        * Allow module to load even if the fixup cannot be done. This
-        * will allow seemless transition when the underlying issue fix
-        * is merged into the Linux kernel, and when tracepoint.c
-        * "tracepoint_module_notify" is turned into a static function.
-        */
-       (void) wrapper_lttng_fixup_sig(THIS_MODULE);
-       return 0;
-}
-
-module_init(lttng_statedump_init);
-
-static
-void __exit lttng_statedump_exit(void)
-{
-}
-
-module_exit(lttng_statedump_exit);
-
-MODULE_LICENSE("GPL and additional rights");
-MODULE_AUTHOR("Jean-Hugues Deschenes");
-MODULE_DESCRIPTION("LTTng statedump provider");
-MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
-       __stringify(LTTNG_MODULES_MINOR_VERSION) "."
-       __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
-       LTTNG_MODULES_EXTRAVERSION);
diff --git a/lttng-string-utils.c b/lttng-string-utils.c
deleted file mode 100644 (file)
index d944790..0000000
+++ /dev/null
@@ -1,356 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
- *
- * Copyright (C) 2017 Philippe Proulx <pproulx@efficios.com>
- */
-
-#include <linux/types.h>
-
-#include <lttng/string-utils.h>
-
-enum star_glob_pattern_type_flags {
-       STAR_GLOB_PATTERN_TYPE_FLAG_NONE        = 0,
-       STAR_GLOB_PATTERN_TYPE_FLAG_PATTERN     = (1U << 0),
-       STAR_GLOB_PATTERN_TYPE_FLAG_END_ONLY    = (1U << 1),
-};
-
-static
-enum star_glob_pattern_type_flags strutils_test_glob_pattern(const char *pattern)
-{
-       enum star_glob_pattern_type_flags ret =
-               STAR_GLOB_PATTERN_TYPE_FLAG_NONE;
-       const char *p;
-
-       for (p = pattern; *p != '\0'; p++) {
-               switch (*p) {
-               case '*':
-                       ret = STAR_GLOB_PATTERN_TYPE_FLAG_PATTERN;
-
-                       if (p[1] == '\0') {
-                               ret |= STAR_GLOB_PATTERN_TYPE_FLAG_END_ONLY;
-                       }
-                       goto end;
-               case '\\':
-                       p++;
-
-                       if (*p == '\0') {
-                               goto end;
-                       }
-                       break;
-               default:
-                       break;
-               }
-       }
-
-end:
-       return ret;
-}
-
-/*
- * Returns true if `pattern` is a star-only globbing pattern, that is,
- * it contains at least one non-escaped `*`.
- */
-bool strutils_is_star_glob_pattern(const char *pattern)
-{
-       return strutils_test_glob_pattern(pattern) &
-               STAR_GLOB_PATTERN_TYPE_FLAG_PATTERN;
-}
-
-/*
- * Returns true if `pattern` is a globbing pattern with a globbing,
- * non-escaped star only at its very end.
- */
-bool strutils_is_star_at_the_end_only_glob_pattern(const char *pattern)
-{
-       return strutils_test_glob_pattern(pattern) &
-               STAR_GLOB_PATTERN_TYPE_FLAG_END_ONLY;
-}
-
-struct string_with_len {
-       const char *str;
-       size_t len;
-};
-
-static
-char string_get_char_at_cb(size_t at, void *data)
-{
-       struct string_with_len *string_with_len = data;
-
-       if (at >= string_with_len->len) {
-               return '\0';
-       }
-
-       return string_with_len->str[at];
-}
-
-/*
- * Globbing matching function with the star feature only (`?` and
- * character sets are not supported). This matches `candidate` (plain
- * string) against `pattern`. A literal star can be escaped with `\` in
- * `pattern`.
- *
- * `pattern_len` or `candidate_len` can be greater than the actual
- * string length of `pattern` or `candidate` if the string is
- * null-terminated.
- */
-bool strutils_star_glob_match(const char *pattern, size_t pattern_len,
-               const char *candidate, size_t candidate_len) {
-       struct string_with_len pattern_with_len = {
-               pattern, pattern_len
-       };
-       struct string_with_len candidate_with_len = {
-               candidate, candidate_len
-       };
-
-       return strutils_star_glob_match_char_cb(string_get_char_at_cb,
-               &pattern_with_len, string_get_char_at_cb,
-               &candidate_with_len);
-}
-
-bool strutils_star_glob_match_char_cb(
-               strutils_get_char_at_cb pattern_get_char_at_cb,
-               void *pattern_get_char_at_cb_data,
-               strutils_get_char_at_cb candidate_get_char_at_cb,
-               void *candidate_get_char_at_cb_data)
-{
-       size_t retry_p_at = 0, retry_c_at = 0, c_at, p_at;
-       char c, p, prev_p;
-       bool got_a_star = false;
-
-retry:
-       c_at = retry_c_at;
-       c = candidate_get_char_at_cb(c_at, candidate_get_char_at_cb_data);
-       p_at = retry_p_at;
-       p = pattern_get_char_at_cb(p_at, pattern_get_char_at_cb_data);
-
-       /*
-        * The concept here is to retry a match in the specific case
-        * where we already got a star. The retry position for the
-        * pattern is just after the most recent star, and the retry
-        * position for the candidate is the character following the
-        * last try's first character.
-        *
-        * Example:
-        *
-        *     candidate: hi ev every onyx one
-        *                ^
-        *     pattern:   hi*every*one
-        *                ^
-        *
-        *     candidate: hi ev every onyx one
-        *                 ^
-        *     pattern:   hi*every*one
-        *                 ^
-        *
-        *     candidate: hi ev every onyx one
-        *                  ^
-        *     pattern:   hi*every*one
-        *                  ^
-        *
-        *     candidate: hi ev every onyx one
-        *                  ^
-        *     pattern:   hi*every*one
-        *                   ^ MISMATCH
-        *
-        *     candidate: hi ev every onyx one
-        *                   ^
-        *     pattern:   hi*every*one
-        *                   ^
-        *
-        *     candidate: hi ev every onyx one
-        *                   ^^
-        *     pattern:   hi*every*one
-        *                   ^^
-        *
-        *     candidate: hi ev every onyx one
-        *                   ^ ^
-        *     pattern:   hi*every*one
-        *                   ^ ^ MISMATCH
-        *
-        *     candidate: hi ev every onyx one
-        *                    ^
-        *     pattern:   hi*every*one
-        *                   ^ MISMATCH
-        *
-        *     candidate: hi ev every onyx one
-        *                     ^
-        *     pattern:   hi*every*one
-        *                   ^ MISMATCH
-        *
-        *     candidate: hi ev every onyx one
-        *                      ^
-        *     pattern:   hi*every*one
-        *                   ^
-        *
-        *     candidate: hi ev every onyx one
-        *                      ^^
-        *     pattern:   hi*every*one
-        *                   ^^
-        *
-        *     candidate: hi ev every onyx one
-        *                      ^ ^
-        *     pattern:   hi*every*one
-        *                   ^ ^
-        *
-        *     candidate: hi ev every onyx one
-        *                      ^  ^
-        *     pattern:   hi*every*one
-        *                   ^  ^
-        *
-        *     candidate: hi ev every onyx one
-        *                      ^   ^
-        *     pattern:   hi*every*one
-        *                   ^   ^
-        *
-        *     candidate: hi ev every onyx one
-        *                           ^
-        *     pattern:   hi*every*one
-        *                        ^
-        *
-        *     candidate: hi ev every onyx one
-        *                           ^
-        *     pattern:   hi*every*one
-        *                         ^ MISMATCH
-        *
-        *     candidate: hi ev every onyx one
-        *                            ^
-        *     pattern:   hi*every*one
-        *                         ^
-        *
-        *     candidate: hi ev every onyx one
-        *                            ^^
-        *     pattern:   hi*every*one
-        *                         ^^
-        *
-        *     candidate: hi ev every onyx one
-        *                            ^ ^
-        *     pattern:   hi*every*one
-        *                         ^ ^ MISMATCH
-        *
-        *     candidate: hi ev every onyx one
-        *                             ^
-        *     pattern:   hi*every*one
-        *                         ^ MISMATCH
-        *
-        *     candidate: hi ev every onyx one
-        *                              ^
-        *     pattern:   hi*every*one
-        *                         ^ MISMATCH
-        *
-        *     candidate: hi ev every onyx one
-        *                               ^
-        *     pattern:   hi*every*one
-        *                         ^ MISMATCH
-        *
-        *     candidate: hi ev every onyx one
-        *                                ^
-        *     pattern:   hi*every*one
-        *                         ^ MISMATCH
-        *
-        *     candidate: hi ev every onyx one
-        *                                 ^
-        *     pattern:   hi*every*one
-        *                         ^
-        *
-        *     candidate: hi ev every onyx one
-        *                                 ^^
-        *     pattern:   hi*every*one
-        *                         ^^
-        *
-        *     candidate: hi ev every onyx one
-        *                                 ^ ^
-        *     pattern:   hi*every*one
-        *                         ^ ^
-        *
-        *     candidate: hi ev every onyx one
-        *                                 ^  ^
-        *     pattern:   hi*every*one
-        *                         ^  ^ SUCCESS
-        */
-       while (c != '\0') {
-               if (p == '\0') {
-                       goto end_of_pattern;
-               }
-
-               switch (p) {
-               case '*':
-               {
-                       char retry_p;
-                       got_a_star = true;
-
-                       /*
-                        * Our first try starts at the current candidate
-                        * character and after the star in the pattern.
-                        */
-                       retry_c_at = c_at;
-                       retry_p_at = p_at + 1;
-                       retry_p = pattern_get_char_at_cb(retry_p_at,
-                               pattern_get_char_at_cb_data);
-
-                       if (retry_p == '\0') {
-                               /*
-                                * Star at the end of the pattern at
-                                * this point: automatic match.
-                                */
-                               return true;
-                       }
-
-                       goto retry;
-               }
-               case '\\':
-                       /* Go to escaped character. */
-                       p_at++;
-                       p = pattern_get_char_at_cb(p_at,
-                               pattern_get_char_at_cb_data);
-
-                       /* Fall-through. */
-               default:
-                       /*
-                        * Default case which will compare the escaped
-                        * character now.
-                        */
-                       if (p == '\0' || c != p) {
-end_of_pattern:
-                               /* Character mismatch OR end of pattern. */
-                               if (!got_a_star) {
-                                       /*
-                                        * We didn't get any star yet,
-                                        * so this first mismatch
-                                        * automatically makes the whole
-                                        * test fail.
-                                        */
-                                       return false;
-                               }
-
-                               /*
-                                * Next try: next candidate character,
-                                * original pattern character (following
-                                * the most recent star).
-                                */
-                               retry_c_at++;
-                               goto retry;
-                       }
-                       break;
-               }
-
-               /* Next pattern and candidate characters. */
-               c_at++;
-               c = candidate_get_char_at_cb(c_at,
-                       candidate_get_char_at_cb_data);
-               p_at++;
-               p = pattern_get_char_at_cb(p_at, pattern_get_char_at_cb_data);
-       }
-
-       /*
-        * We checked every candidate character and we're still in a
-        * success state: the only pattern character allowed to remain
-        * is a star.
-        */
-       if (p == '\0') {
-               return true;
-       }
-
-       prev_p = p;
-       p_at++;
-       p = pattern_get_char_at_cb(p_at, pattern_get_char_at_cb_data);
-       return prev_p == '*' && p == '\0';
-}
diff --git a/lttng-syscalls.c b/lttng-syscalls.c
deleted file mode 100644 (file)
index a5b5f40..0000000
+++ /dev/null
@@ -1,1325 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
- *
- * lttng-syscalls.c
- *
- * LTTng syscall probes.
- *
- * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/compat.h>
-#include <linux/err.h>
-#include <linux/bitmap.h>
-#include <linux/in.h>
-#include <linux/in6.h>
-#include <linux/seq_file.h>
-#include <linux/stringify.h>
-#include <linux/file.h>
-#include <linux/anon_inodes.h>
-#include <linux/fcntl.h>
-#include <asm/ptrace.h>
-#include <asm/syscall.h>
-
-#include <lttng/bitfield.h>
-#include <wrapper/tracepoint.h>
-#include <wrapper/file.h>
-#include <wrapper/rcu.h>
-#include <wrapper/syscall.h>
-#include <lttng/events.h>
-
-#ifndef CONFIG_COMPAT
-# ifndef is_compat_task
-#  define is_compat_task()     (0)
-# endif
-#endif
-
-/* in_compat_syscall appears in kernel 4.6. */
-#ifndef in_compat_syscall
- #define in_compat_syscall()   is_compat_task()
-#endif
-
-enum sc_type {
-       SC_TYPE_ENTRY,
-       SC_TYPE_EXIT,
-       SC_TYPE_COMPAT_ENTRY,
-       SC_TYPE_COMPAT_EXIT,
-};
-
-#define SYSCALL_ENTRY_TOK              syscall_entry_
-#define COMPAT_SYSCALL_ENTRY_TOK       compat_syscall_entry_
-#define SYSCALL_EXIT_TOK               syscall_exit_
-#define COMPAT_SYSCALL_EXIT_TOK                compat_syscall_exit_
-
-#define SYSCALL_ENTRY_STR              __stringify(SYSCALL_ENTRY_TOK)
-#define COMPAT_SYSCALL_ENTRY_STR       __stringify(COMPAT_SYSCALL_ENTRY_TOK)
-#define SYSCALL_EXIT_STR               __stringify(SYSCALL_EXIT_TOK)
-#define COMPAT_SYSCALL_EXIT_STR                __stringify(COMPAT_SYSCALL_EXIT_TOK)
-
-static
-void syscall_entry_probe(void *__data, struct pt_regs *regs, long id);
-static
-void syscall_exit_probe(void *__data, struct pt_regs *regs, long ret);
-
-/*
- * Forward declarations for old kernels.
- */
-struct mmsghdr;
-struct rlimit64;
-struct oldold_utsname;
-struct old_utsname;
-struct sel_arg_struct;
-struct mmap_arg_struct;
-struct file_handle;
-struct user_msghdr;
-
-/*
- * Forward declaration for kernels >= 5.6
- */
-struct timex;
-struct timeval;
-struct itimerval;
-struct itimerspec;
-
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,6,0))
-typedef __kernel_old_time_t time_t;
-#endif
-
-#ifdef IA32_NR_syscalls
-#define NR_compat_syscalls IA32_NR_syscalls
-#else
-#define NR_compat_syscalls NR_syscalls
-#endif
-
-/*
- * Create LTTng tracepoint probes.
- */
-#define LTTNG_PACKAGE_BUILD
-#define CREATE_TRACE_POINTS
-#define TP_MODULE_NOINIT
-#define TRACE_INCLUDE_PATH instrumentation/syscalls/headers
-
-#define PARAMS(args...)        args
-
-/* Handle unknown syscalls */
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM syscalls_unknown
-#include <instrumentation/syscalls/headers/syscalls_unknown.h>
-#undef TRACE_SYSTEM
-
-#define SC_ENTER
-
-#undef sc_exit
-#define sc_exit(...)
-#undef sc_in
-#define sc_in(...)     __VA_ARGS__
-#undef sc_out
-#define sc_out(...)
-#undef sc_inout
-#define sc_inout(...)  __VA_ARGS__
-
-/* Hijack probe callback for system call enter */
-#undef TP_PROBE_CB
-#define TP_PROBE_CB(_template)         &syscall_entry_probe
-#define SC_LTTNG_TRACEPOINT_EVENT(_name, _proto, _args, _fields) \
-       LTTNG_TRACEPOINT_EVENT(syscall_entry_##_name, PARAMS(_proto), PARAMS(_args), \
-               PARAMS(_fields))
-#define SC_LTTNG_TRACEPOINT_EVENT_CODE(_name, _proto, _args, _locvar, _code_pre, _fields, _code_post) \
-       LTTNG_TRACEPOINT_EVENT_CODE(syscall_entry_##_name, PARAMS(_proto), PARAMS(_args), \
-               PARAMS(_locvar), PARAMS(_code_pre),                             \
-               PARAMS(_fields), PARAMS(_code_post))
-#define SC_LTTNG_TRACEPOINT_EVENT_CLASS_NOARGS(_name, _fields) \
-       LTTNG_TRACEPOINT_EVENT_CLASS_NOARGS(syscall_entry_##_name, PARAMS(_fields))
-#define SC_LTTNG_TRACEPOINT_EVENT_INSTANCE_NOARGS(_template, _name)            \
-       LTTNG_TRACEPOINT_EVENT_INSTANCE_NOARGS(syscall_entry_##_template, syscall_entry_##_name)
-/* Enumerations only defined at first inclusion. */
-#define SC_LTTNG_TRACEPOINT_ENUM(_name, _values) \
-       LTTNG_TRACEPOINT_ENUM(_name, PARAMS(_values))
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM syscall_entry_integers
-#define TRACE_INCLUDE_FILE syscalls_integers
-#include <instrumentation/syscalls/headers/syscalls_integers.h>
-#undef TRACE_INCLUDE_FILE
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM syscall_entry_pointers
-#define TRACE_INCLUDE_FILE syscalls_pointers
-#include <instrumentation/syscalls/headers/syscalls_pointers.h>
-#undef TRACE_INCLUDE_FILE
-#undef TRACE_SYSTEM
-#undef SC_LTTNG_TRACEPOINT_ENUM
-#undef SC_LTTNG_TRACEPOINT_EVENT_CODE
-#undef SC_LTTNG_TRACEPOINT_EVENT
-#undef SC_LTTNG_TRACEPOINT_EVENT_CLASS_NOARGS
-#undef SC_LTTNG_TRACEPOINT_EVENT_INSTANCE_NOARGS
-#undef TP_PROBE_CB
-#undef _TRACE_SYSCALLS_INTEGERS_H
-#undef _TRACE_SYSCALLS_POINTERS_H
-
-/* Hijack probe callback for compat system call enter */
-#define TP_PROBE_CB(_template)         &syscall_entry_probe
-#define LTTNG_SC_COMPAT
-#define SC_LTTNG_TRACEPOINT_EVENT(_name, _proto, _args, _fields) \
-       LTTNG_TRACEPOINT_EVENT(compat_syscall_entry_##_name, PARAMS(_proto), PARAMS(_args), \
-               PARAMS(_fields))
-#define SC_LTTNG_TRACEPOINT_EVENT_CODE(_name, _proto, _args, _locvar, _code_pre, _fields, _code_post) \
-       LTTNG_TRACEPOINT_EVENT_CODE(compat_syscall_entry_##_name, PARAMS(_proto), PARAMS(_args), \
-               PARAMS(_locvar), PARAMS(_code_pre), PARAMS(_fields), PARAMS(_code_post))
-#define SC_LTTNG_TRACEPOINT_EVENT_CLASS_NOARGS(_name, _fields) \
-       LTTNG_TRACEPOINT_EVENT_CLASS_NOARGS(compat_syscall_entry_##_name, PARAMS(_fields))
-#define SC_LTTNG_TRACEPOINT_EVENT_INSTANCE_NOARGS(_template, _name)            \
-       LTTNG_TRACEPOINT_EVENT_INSTANCE_NOARGS(compat_syscall_entry_##_template, \
-               compat_syscall_entry_##_name)
-/* Enumerations only defined at inital inclusion (not here). */
-#define SC_LTTNG_TRACEPOINT_ENUM(_name, _values)
-#define TRACE_SYSTEM compat_syscall_entry_integers
-#define TRACE_INCLUDE_FILE compat_syscalls_integers
-#include <instrumentation/syscalls/headers/compat_syscalls_integers.h>
-#undef TRACE_INCLUDE_FILE
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM compat_syscall_entry_pointers
-#define TRACE_INCLUDE_FILE compat_syscalls_pointers
-#include <instrumentation/syscalls/headers/compat_syscalls_pointers.h>
-#undef TRACE_INCLUDE_FILE
-#undef TRACE_SYSTEM
-#undef SC_LTTNG_TRACEPOINT_ENUM
-#undef SC_LTTNG_TRACEPOINT_EVENT_CODE
-#undef SC_LTTNG_TRACEPOINT_EVENT
-#undef SC_LTTNG_TRACEPOINT_EVENT_CLASS_NOARGS
-#undef SC_LTTNG_TRACEPOINT_EVENT_INSTANCE_NOARGS
-#undef TP_PROBE_CB
-#undef _TRACE_SYSCALLS_INTEGERS_H
-#undef _TRACE_SYSCALLS_POINTERS_H
-#undef LTTNG_SC_COMPAT
-
-#undef SC_ENTER
-
-#define SC_EXIT
-
-#undef sc_exit
-#define sc_exit(...)           __VA_ARGS__
-#undef sc_in
-#define sc_in(...)
-#undef sc_out
-#define sc_out(...)            __VA_ARGS__
-#undef sc_inout
-#define sc_inout(...)          __VA_ARGS__
-
-/* Hijack probe callback for system call exit */
-#define TP_PROBE_CB(_template)         &syscall_exit_probe
-#define SC_LTTNG_TRACEPOINT_EVENT(_name, _proto, _args, _fields) \
-       LTTNG_TRACEPOINT_EVENT(syscall_exit_##_name, PARAMS(_proto), PARAMS(_args), \
-               PARAMS(_fields))
-#define SC_LTTNG_TRACEPOINT_EVENT_CODE(_name, _proto, _args, _locvar, _code_pre, _fields, _code_post) \
-       LTTNG_TRACEPOINT_EVENT_CODE(syscall_exit_##_name, PARAMS(_proto), PARAMS(_args), \
-               PARAMS(_locvar), PARAMS(_code_pre), PARAMS(_fields), PARAMS(_code_post))
-#define SC_LTTNG_TRACEPOINT_EVENT_CLASS_NOARGS(_name, _fields) \
-       LTTNG_TRACEPOINT_EVENT_CLASS_NOARGS(syscall_exit_##_name, PARAMS(_fields))
-#define SC_LTTNG_TRACEPOINT_EVENT_INSTANCE_NOARGS(_template, _name)            \
-       LTTNG_TRACEPOINT_EVENT_INSTANCE_NOARGS(syscall_exit_##_template,        \
-               syscall_exit_##_name)
-/* Enumerations only defined at inital inclusion (not here). */
-#define SC_LTTNG_TRACEPOINT_ENUM(_name, _values)
-#define TRACE_SYSTEM syscall_exit_integers
-#define TRACE_INCLUDE_FILE syscalls_integers
-#include <instrumentation/syscalls/headers/syscalls_integers.h>
-#undef TRACE_INCLUDE_FILE
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM syscall_exit_pointers
-#define TRACE_INCLUDE_FILE syscalls_pointers
-#include <instrumentation/syscalls/headers/syscalls_pointers.h>
-#undef TRACE_INCLUDE_FILE
-#undef TRACE_SYSTEM
-#undef SC_LTTNG_TRACEPOINT_ENUM
-#undef SC_LTTNG_TRACEPOINT_EVENT_CODE
-#undef SC_LTTNG_TRACEPOINT_EVENT
-#undef SC_LTTNG_TRACEPOINT_EVENT_CLASS_NOARGS
-#undef SC_LTTNG_TRACEPOINT_EVENT_INSTANCE_NOARGS
-#undef TP_PROBE_CB
-#undef _TRACE_SYSCALLS_INTEGERS_H
-#undef _TRACE_SYSCALLS_POINTERS_H
-
-
-/* Hijack probe callback for compat system call exit */
-#define TP_PROBE_CB(_template)         &syscall_exit_probe
-#define LTTNG_SC_COMPAT
-#define SC_LTTNG_TRACEPOINT_EVENT(_name, _proto, _args, _fields) \
-       LTTNG_TRACEPOINT_EVENT(compat_syscall_exit_##_name, PARAMS(_proto), PARAMS(_args), \
-               PARAMS(_fields))
-#define SC_LTTNG_TRACEPOINT_EVENT_CODE(_name, _proto, _args, _locvar, _code_pre, _fields, _code_post) \
-       LTTNG_TRACEPOINT_EVENT_CODE(compat_syscall_exit_##_name, PARAMS(_proto), PARAMS(_args), \
-               PARAMS(_locvar), PARAMS(_code_pre), PARAMS(_fields), PARAMS(_code_post))
-#define SC_LTTNG_TRACEPOINT_EVENT_CLASS_NOARGS(_name, _fields) \
-       LTTNG_TRACEPOINT_EVENT_CLASS_NOARGS(compat_syscall_exit_##_name, PARAMS(_fields))
-#define SC_LTTNG_TRACEPOINT_EVENT_INSTANCE_NOARGS(_template, _name)            \
-       LTTNG_TRACEPOINT_EVENT_INSTANCE_NOARGS(compat_syscall_exit_##_template, \
-               compat_syscall_exit_##_name)
-/* Enumerations only defined at inital inclusion (not here). */
-#define SC_LTTNG_TRACEPOINT_ENUM(_name, _values)
-#define TRACE_SYSTEM compat_syscall_exit_integers
-#define TRACE_INCLUDE_FILE compat_syscalls_integers
-#include <instrumentation/syscalls/headers/compat_syscalls_integers.h>
-#undef TRACE_INCLUDE_FILE
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM compat_syscall_exit_pointers
-#define TRACE_INCLUDE_FILE compat_syscalls_pointers
-#include <instrumentation/syscalls/headers/compat_syscalls_pointers.h>
-#undef TRACE_INCLUDE_FILE
-#undef TRACE_SYSTEM
-#undef SC_LTTNG_TRACEPOINT_ENUM
-#undef SC_LTTNG_TRACEPOINT_EVENT_CODE
-#undef SC_LTTNG_TRACEPOINT_EVENT
-#undef SC_LTTNG_TRACEPOINT_EVENT_CLASS_NOARGS
-#undef SC_LTTNG_TRACEPOINT_EVENT_INSTANCE_NOARGS
-#undef TP_PROBE_CB
-#undef _TRACE_SYSCALLS_INTEGERS_H
-#undef _TRACE_SYSCALLS_POINTERS_H
-#undef LTTNG_SC_COMPAT
-
-#undef SC_EXIT
-
-#undef TP_MODULE_NOINIT
-#undef LTTNG_PACKAGE_BUILD
-#undef CREATE_TRACE_POINTS
-
-struct trace_syscall_entry {
-       void *func;
-       const struct lttng_event_desc *desc;
-       const struct lttng_event_field *fields;
-       unsigned int nrargs;
-};
-
-#define CREATE_SYSCALL_TABLE
-
-#define SC_ENTER
-
-#undef sc_exit
-#define sc_exit(...)
-
-#undef TRACE_SYSCALL_TABLE
-#define TRACE_SYSCALL_TABLE(_template, _name, _nr, _nrargs)    \
-       [ _nr ] = {                                             \
-               .func = __event_probe__syscall_entry_##_template, \
-               .nrargs = (_nrargs),                            \
-               .fields = __event_fields___syscall_entry_##_template, \
-               .desc = &__event_desc___syscall_entry_##_name,  \
-       },
-
-/* Syscall enter tracing table */
-static const struct trace_syscall_entry sc_table[] = {
-#include <instrumentation/syscalls/headers/syscalls_integers.h>
-#include <instrumentation/syscalls/headers/syscalls_pointers.h>
-};
-
-#undef TRACE_SYSCALL_TABLE
-#define TRACE_SYSCALL_TABLE(_template, _name, _nr, _nrargs)    \
-       [ _nr ] = {                                             \
-               .func = __event_probe__compat_syscall_entry_##_template, \
-               .nrargs = (_nrargs),                            \
-               .fields = __event_fields___compat_syscall_entry_##_template, \
-               .desc = &__event_desc___compat_syscall_entry_##_name, \
-       },
-
-/* Compat syscall enter table */
-const struct trace_syscall_entry compat_sc_table[] = {
-#include <instrumentation/syscalls/headers/compat_syscalls_integers.h>
-#include <instrumentation/syscalls/headers/compat_syscalls_pointers.h>
-};
-
-#undef SC_ENTER
-
-#define SC_EXIT
-
-#undef sc_exit
-#define sc_exit(...)           __VA_ARGS__
-
-#undef TRACE_SYSCALL_TABLE
-#define TRACE_SYSCALL_TABLE(_template, _name, _nr, _nrargs)    \
-       [ _nr ] = {                                             \
-               .func = __event_probe__syscall_exit_##_template, \
-               .nrargs = (_nrargs),                            \
-               .fields = __event_fields___syscall_exit_##_template, \
-               .desc = &__event_desc___syscall_exit_##_name, \
-       },
-
-/* Syscall exit table */
-static const struct trace_syscall_entry sc_exit_table[] = {
-#include <instrumentation/syscalls/headers/syscalls_integers.h>
-#include <instrumentation/syscalls/headers/syscalls_pointers.h>
-};
-
-#undef TRACE_SYSCALL_TABLE
-#define TRACE_SYSCALL_TABLE(_template, _name, _nr, _nrargs)    \
-       [ _nr ] = {                                             \
-               .func = __event_probe__compat_syscall_exit_##_template, \
-               .nrargs = (_nrargs),                            \
-               .fields = __event_fields___compat_syscall_exit_##_template, \
-               .desc = &__event_desc___compat_syscall_exit_##_name, \
-       },
-
-/* Compat syscall exit table */
-const struct trace_syscall_entry compat_sc_exit_table[] = {
-#include <instrumentation/syscalls/headers/compat_syscalls_integers.h>
-#include <instrumentation/syscalls/headers/compat_syscalls_pointers.h>
-};
-
-#undef SC_EXIT
-
-#undef CREATE_SYSCALL_TABLE
-
-struct lttng_syscall_filter {
-       DECLARE_BITMAP(sc, NR_syscalls);
-       DECLARE_BITMAP(sc_compat, NR_compat_syscalls);
-};
-
-static void syscall_entry_unknown(struct lttng_event *event,
-       struct pt_regs *regs, unsigned int id)
-{
-       unsigned long args[LTTNG_SYSCALL_NR_ARGS];
-
-       lttng_syscall_get_arguments(current, regs, args);
-       if (unlikely(in_compat_syscall()))
-               __event_probe__compat_syscall_entry_unknown(event, id, args);
-       else
-               __event_probe__syscall_entry_unknown(event, id, args);
-}
-
-void syscall_entry_probe(void *__data, struct pt_regs *regs, long id)
-{
-       struct lttng_channel *chan = __data;
-       struct lttng_event *event, *unknown_event;
-       const struct trace_syscall_entry *table, *entry;
-       size_t table_len;
-
-       if (unlikely(in_compat_syscall())) {
-               struct lttng_syscall_filter *filter;
-
-               filter = lttng_rcu_dereference(chan->sc_filter);
-               if (filter) {
-                       if (id < 0 || id >= NR_compat_syscalls
-                               || !test_bit(id, filter->sc_compat)) {
-                               /* System call filtered out. */
-                               return;
-                       }
-               }
-               table = compat_sc_table;
-               table_len = ARRAY_SIZE(compat_sc_table);
-               unknown_event = chan->sc_compat_unknown;
-       } else {
-               struct lttng_syscall_filter *filter;
-
-               filter = lttng_rcu_dereference(chan->sc_filter);
-               if (filter) {
-                       if (id < 0 || id >= NR_syscalls
-                               || !test_bit(id, filter->sc)) {
-                               /* System call filtered out. */
-                               return;
-                       }
-               }
-               table = sc_table;
-               table_len = ARRAY_SIZE(sc_table);
-               unknown_event = chan->sc_unknown;
-       }
-       if (unlikely(id < 0 || id >= table_len)) {
-               syscall_entry_unknown(unknown_event, regs, id);
-               return;
-       }
-       if (unlikely(in_compat_syscall()))
-               event = chan->compat_sc_table[id];
-       else
-               event = chan->sc_table[id];
-       if (unlikely(!event)) {
-               syscall_entry_unknown(unknown_event, regs, id);
-               return;
-       }
-       entry = &table[id];
-       WARN_ON_ONCE(!entry);
-
-       switch (entry->nrargs) {
-       case 0:
-       {
-               void (*fptr)(void *__data) = entry->func;
-
-               fptr(event);
-               break;
-       }
-       case 1:
-       {
-               void (*fptr)(void *__data, unsigned long arg0) = entry->func;
-               unsigned long args[LTTNG_SYSCALL_NR_ARGS];
-
-               lttng_syscall_get_arguments(current, regs, args);
-               fptr(event, args[0]);
-               break;
-       }
-       case 2:
-       {
-               void (*fptr)(void *__data,
-                       unsigned long arg0,
-                       unsigned long arg1) = entry->func;
-               unsigned long args[LTTNG_SYSCALL_NR_ARGS];
-
-               lttng_syscall_get_arguments(current, regs, args);
-               fptr(event, args[0], args[1]);
-               break;
-       }
-       case 3:
-       {
-               void (*fptr)(void *__data,
-                       unsigned long arg0,
-                       unsigned long arg1,
-                       unsigned long arg2) = entry->func;
-               unsigned long args[LTTNG_SYSCALL_NR_ARGS];
-
-               lttng_syscall_get_arguments(current, regs, args);
-               fptr(event, args[0], args[1], args[2]);
-               break;
-       }
-       case 4:
-       {
-               void (*fptr)(void *__data,
-                       unsigned long arg0,
-                       unsigned long arg1,
-                       unsigned long arg2,
-                       unsigned long arg3) = entry->func;
-               unsigned long args[LTTNG_SYSCALL_NR_ARGS];
-
-               lttng_syscall_get_arguments(current, regs, args);
-               fptr(event, args[0], args[1], args[2], args[3]);
-               break;
-       }
-       case 5:
-       {
-               void (*fptr)(void *__data,
-                       unsigned long arg0,
-                       unsigned long arg1,
-                       unsigned long arg2,
-                       unsigned long arg3,
-                       unsigned long arg4) = entry->func;
-               unsigned long args[LTTNG_SYSCALL_NR_ARGS];
-
-               lttng_syscall_get_arguments(current, regs, args);
-               fptr(event, args[0], args[1], args[2], args[3], args[4]);
-               break;
-       }
-       case 6:
-       {
-               void (*fptr)(void *__data,
-                       unsigned long arg0,
-                       unsigned long arg1,
-                       unsigned long arg2,
-                       unsigned long arg3,
-                       unsigned long arg4,
-                       unsigned long arg5) = entry->func;
-               unsigned long args[LTTNG_SYSCALL_NR_ARGS];
-
-               lttng_syscall_get_arguments(current, regs, args);
-               fptr(event, args[0], args[1], args[2],
-                       args[3], args[4], args[5]);
-               break;
-       }
-       default:
-               break;
-       }
-}
-
-static void syscall_exit_unknown(struct lttng_event *event,
-       struct pt_regs *regs, int id, long ret)
-{
-       unsigned long args[LTTNG_SYSCALL_NR_ARGS];
-
-       lttng_syscall_get_arguments(current, regs, args);
-       if (unlikely(in_compat_syscall()))
-               __event_probe__compat_syscall_exit_unknown(event, id, ret,
-                       args);
-       else
-               __event_probe__syscall_exit_unknown(event, id, ret, args);
-}
-
-void syscall_exit_probe(void *__data, struct pt_regs *regs, long ret)
-{
-       struct lttng_channel *chan = __data;
-       struct lttng_event *event, *unknown_event;
-       const struct trace_syscall_entry *table, *entry;
-       size_t table_len;
-       long id;
-
-       id = syscall_get_nr(current, regs);
-       if (unlikely(in_compat_syscall())) {
-               struct lttng_syscall_filter *filter;
-
-               filter = lttng_rcu_dereference(chan->sc_filter);
-               if (filter) {
-                       if (id < 0 || id >= NR_compat_syscalls
-                               || !test_bit(id, filter->sc_compat)) {
-                               /* System call filtered out. */
-                               return;
-                       }
-               }
-               table = compat_sc_exit_table;
-               table_len = ARRAY_SIZE(compat_sc_exit_table);
-               unknown_event = chan->compat_sc_exit_unknown;
-       } else {
-               struct lttng_syscall_filter *filter;
-
-               filter = lttng_rcu_dereference(chan->sc_filter);
-               if (filter) {
-                       if (id < 0 || id >= NR_syscalls
-                               || !test_bit(id, filter->sc)) {
-                               /* System call filtered out. */
-                               return;
-                       }
-               }
-               table = sc_exit_table;
-               table_len = ARRAY_SIZE(sc_exit_table);
-               unknown_event = chan->sc_exit_unknown;
-       }
-       if (unlikely(id < 0 || id >= table_len)) {
-               syscall_exit_unknown(unknown_event, regs, id, ret);
-               return;
-       }
-       if (unlikely(in_compat_syscall()))
-               event = chan->compat_sc_exit_table[id];
-       else
-               event = chan->sc_exit_table[id];
-       if (unlikely(!event)) {
-               syscall_exit_unknown(unknown_event, regs, id, ret);
-               return;
-       }
-       entry = &table[id];
-       WARN_ON_ONCE(!entry);
-
-       switch (entry->nrargs) {
-       case 0:
-       {
-               void (*fptr)(void *__data, long ret) = entry->func;
-
-               fptr(event, ret);
-               break;
-       }
-       case 1:
-       {
-               void (*fptr)(void *__data,
-                       long ret,
-                       unsigned long arg0) = entry->func;
-               unsigned long args[LTTNG_SYSCALL_NR_ARGS];
-
-               lttng_syscall_get_arguments(current, regs, args);
-               fptr(event, ret, args[0]);
-               break;
-       }
-       case 2:
-       {
-               void (*fptr)(void *__data,
-                       long ret,
-                       unsigned long arg0,
-                       unsigned long arg1) = entry->func;
-               unsigned long args[LTTNG_SYSCALL_NR_ARGS];
-
-               lttng_syscall_get_arguments(current, regs, args);
-               fptr(event, ret, args[0], args[1]);
-               break;
-       }
-       case 3:
-       {
-               void (*fptr)(void *__data,
-                       long ret,
-                       unsigned long arg0,
-                       unsigned long arg1,
-                       unsigned long arg2) = entry->func;
-               unsigned long args[LTTNG_SYSCALL_NR_ARGS];
-
-               lttng_syscall_get_arguments(current, regs, args);
-               fptr(event, ret, args[0], args[1], args[2]);
-               break;
-       }
-       case 4:
-       {
-               void (*fptr)(void *__data,
-                       long ret,
-                       unsigned long arg0,
-                       unsigned long arg1,
-                       unsigned long arg2,
-                       unsigned long arg3) = entry->func;
-               unsigned long args[LTTNG_SYSCALL_NR_ARGS];
-
-               lttng_syscall_get_arguments(current, regs, args);
-               fptr(event, ret, args[0], args[1], args[2], args[3]);
-               break;
-       }
-       case 5:
-       {
-               void (*fptr)(void *__data,
-                       long ret,
-                       unsigned long arg0,
-                       unsigned long arg1,
-                       unsigned long arg2,
-                       unsigned long arg3,
-                       unsigned long arg4) = entry->func;
-               unsigned long args[LTTNG_SYSCALL_NR_ARGS];
-
-               lttng_syscall_get_arguments(current, regs, args);
-               fptr(event, ret, args[0], args[1], args[2], args[3], args[4]);
-               break;
-       }
-       case 6:
-       {
-               void (*fptr)(void *__data,
-                       long ret,
-                       unsigned long arg0,
-                       unsigned long arg1,
-                       unsigned long arg2,
-                       unsigned long arg3,
-                       unsigned long arg4,
-                       unsigned long arg5) = entry->func;
-               unsigned long args[LTTNG_SYSCALL_NR_ARGS];
-
-               lttng_syscall_get_arguments(current, regs, args);
-               fptr(event, ret, args[0], args[1], args[2],
-                       args[3], args[4], args[5]);
-               break;
-       }
-       default:
-               break;
-       }
-}
-
-/*
- * noinline to diminish caller stack size.
- * Should be called with sessions lock held.
- */
-static
-int fill_table(const struct trace_syscall_entry *table, size_t table_len,
-       struct lttng_event **chan_table, struct lttng_channel *chan,
-       void *filter, enum sc_type type)
-{
-       const struct lttng_event_desc *desc;
-       unsigned int i;
-
-       /* Allocate events for each syscall, insert into table */
-       for (i = 0; i < table_len; i++) {
-               struct lttng_kernel_event ev;
-               desc = table[i].desc;
-
-               if (!desc) {
-                       /* Unknown syscall */
-                       continue;
-               }
-               /*
-                * Skip those already populated by previous failed
-                * register for this channel.
-                */
-               if (chan_table[i])
-                       continue;
-               memset(&ev, 0, sizeof(ev));
-               switch (type) {
-               case SC_TYPE_ENTRY:
-                       strncpy(ev.name, SYSCALL_ENTRY_STR,
-                               LTTNG_KERNEL_SYM_NAME_LEN);
-                       break;
-               case SC_TYPE_EXIT:
-                       strncpy(ev.name, SYSCALL_EXIT_STR,
-                               LTTNG_KERNEL_SYM_NAME_LEN);
-                       break;
-               case SC_TYPE_COMPAT_ENTRY:
-                       strncpy(ev.name, COMPAT_SYSCALL_ENTRY_STR,
-                               LTTNG_KERNEL_SYM_NAME_LEN);
-                       break;
-               case SC_TYPE_COMPAT_EXIT:
-                       strncpy(ev.name, COMPAT_SYSCALL_EXIT_STR,
-                               LTTNG_KERNEL_SYM_NAME_LEN);
-                       break;
-               default:
-                       BUG_ON(1);
-                       break;
-               }
-               strncat(ev.name, desc->name,
-                       LTTNG_KERNEL_SYM_NAME_LEN - strlen(ev.name) - 1);
-               ev.name[LTTNG_KERNEL_SYM_NAME_LEN - 1] = '\0';
-               ev.instrumentation = LTTNG_KERNEL_SYSCALL;
-               chan_table[i] = _lttng_event_create(chan, &ev, filter,
-                                               desc, ev.instrumentation);
-               WARN_ON_ONCE(!chan_table[i]);
-               if (IS_ERR(chan_table[i])) {
-                       /*
-                        * If something goes wrong in event registration
-                        * after the first one, we have no choice but to
-                        * leave the previous events in there, until
-                        * deleted by session teardown.
-                        */
-                       return PTR_ERR(chan_table[i]);
-               }
-       }
-       return 0;
-}
-
-/*
- * Should be called with sessions lock held.
- */
-int lttng_syscalls_register(struct lttng_channel *chan, void *filter)
-{
-       struct lttng_kernel_event ev;
-       int ret;
-
-       wrapper_vmalloc_sync_mappings();
-
-       if (!chan->sc_table) {
-               /* create syscall table mapping syscall to events */
-               chan->sc_table = kzalloc(sizeof(struct lttng_event *)
-                                       * ARRAY_SIZE(sc_table), GFP_KERNEL);
-               if (!chan->sc_table)
-                       return -ENOMEM;
-       }
-       if (!chan->sc_exit_table) {
-               /* create syscall table mapping syscall to events */
-               chan->sc_exit_table = kzalloc(sizeof(struct lttng_event *)
-                                       * ARRAY_SIZE(sc_exit_table), GFP_KERNEL);
-               if (!chan->sc_exit_table)
-                       return -ENOMEM;
-       }
-
-
-#ifdef CONFIG_COMPAT
-       if (!chan->compat_sc_table) {
-               /* create syscall table mapping compat syscall to events */
-               chan->compat_sc_table = kzalloc(sizeof(struct lttng_event *)
-                                       * ARRAY_SIZE(compat_sc_table), GFP_KERNEL);
-               if (!chan->compat_sc_table)
-                       return -ENOMEM;
-       }
-
-       if (!chan->compat_sc_exit_table) {
-               /* create syscall table mapping compat syscall to events */
-               chan->compat_sc_exit_table = kzalloc(sizeof(struct lttng_event *)
-                                       * ARRAY_SIZE(compat_sc_exit_table), GFP_KERNEL);
-               if (!chan->compat_sc_exit_table)
-                       return -ENOMEM;
-       }
-#endif
-       if (!chan->sc_unknown) {
-               const struct lttng_event_desc *desc =
-                       &__event_desc___syscall_entry_unknown;
-
-               memset(&ev, 0, sizeof(ev));
-               strncpy(ev.name, desc->name, LTTNG_KERNEL_SYM_NAME_LEN);
-               ev.name[LTTNG_KERNEL_SYM_NAME_LEN - 1] = '\0';
-               ev.instrumentation = LTTNG_KERNEL_SYSCALL;
-               chan->sc_unknown = _lttng_event_create(chan, &ev, filter,
-                                               desc,
-                                               ev.instrumentation);
-               WARN_ON_ONCE(!chan->sc_unknown);
-               if (IS_ERR(chan->sc_unknown)) {
-                       return PTR_ERR(chan->sc_unknown);
-               }
-       }
-
-       if (!chan->sc_compat_unknown) {
-               const struct lttng_event_desc *desc =
-                       &__event_desc___compat_syscall_entry_unknown;
-
-               memset(&ev, 0, sizeof(ev));
-               strncpy(ev.name, desc->name, LTTNG_KERNEL_SYM_NAME_LEN);
-               ev.name[LTTNG_KERNEL_SYM_NAME_LEN - 1] = '\0';
-               ev.instrumentation = LTTNG_KERNEL_SYSCALL;
-               chan->sc_compat_unknown = _lttng_event_create(chan, &ev, filter,
-                                               desc,
-                                               ev.instrumentation);
-               WARN_ON_ONCE(!chan->sc_unknown);
-               if (IS_ERR(chan->sc_compat_unknown)) {
-                       return PTR_ERR(chan->sc_compat_unknown);
-               }
-       }
-
-       if (!chan->compat_sc_exit_unknown) {
-               const struct lttng_event_desc *desc =
-                       &__event_desc___compat_syscall_exit_unknown;
-
-               memset(&ev, 0, sizeof(ev));
-               strncpy(ev.name, desc->name, LTTNG_KERNEL_SYM_NAME_LEN);
-               ev.name[LTTNG_KERNEL_SYM_NAME_LEN - 1] = '\0';
-               ev.instrumentation = LTTNG_KERNEL_SYSCALL;
-               chan->compat_sc_exit_unknown = _lttng_event_create(chan, &ev,
-                                               filter, desc,
-                                               ev.instrumentation);
-               WARN_ON_ONCE(!chan->compat_sc_exit_unknown);
-               if (IS_ERR(chan->compat_sc_exit_unknown)) {
-                       return PTR_ERR(chan->compat_sc_exit_unknown);
-               }
-       }
-
-       if (!chan->sc_exit_unknown) {
-               const struct lttng_event_desc *desc =
-                       &__event_desc___syscall_exit_unknown;
-
-               memset(&ev, 0, sizeof(ev));
-               strncpy(ev.name, desc->name, LTTNG_KERNEL_SYM_NAME_LEN);
-               ev.name[LTTNG_KERNEL_SYM_NAME_LEN - 1] = '\0';
-               ev.instrumentation = LTTNG_KERNEL_SYSCALL;
-               chan->sc_exit_unknown = _lttng_event_create(chan, &ev, filter,
-                                               desc, ev.instrumentation);
-               WARN_ON_ONCE(!chan->sc_exit_unknown);
-               if (IS_ERR(chan->sc_exit_unknown)) {
-                       return PTR_ERR(chan->sc_exit_unknown);
-               }
-       }
-
-       ret = fill_table(sc_table, ARRAY_SIZE(sc_table),
-                       chan->sc_table, chan, filter, SC_TYPE_ENTRY);
-       if (ret)
-               return ret;
-       ret = fill_table(sc_exit_table, ARRAY_SIZE(sc_exit_table),
-                       chan->sc_exit_table, chan, filter, SC_TYPE_EXIT);
-       if (ret)
-               return ret;
-
-#ifdef CONFIG_COMPAT
-       ret = fill_table(compat_sc_table, ARRAY_SIZE(compat_sc_table),
-                       chan->compat_sc_table, chan, filter,
-                       SC_TYPE_COMPAT_ENTRY);
-       if (ret)
-               return ret;
-       ret = fill_table(compat_sc_exit_table, ARRAY_SIZE(compat_sc_exit_table),
-                       chan->compat_sc_exit_table, chan, filter,
-                       SC_TYPE_COMPAT_EXIT);
-       if (ret)
-               return ret;
-#endif
-       if (!chan->sys_enter_registered) {
-               ret = lttng_wrapper_tracepoint_probe_register("sys_enter",
-                               (void *) syscall_entry_probe, chan);
-               if (ret)
-                       return ret;
-               chan->sys_enter_registered = 1;
-       }
-       /*
-        * We change the name of sys_exit tracepoint due to namespace
-        * conflict with sys_exit syscall entry.
-        */
-       if (!chan->sys_exit_registered) {
-               ret = lttng_wrapper_tracepoint_probe_register("sys_exit",
-                               (void *) syscall_exit_probe, chan);
-               if (ret) {
-                       WARN_ON_ONCE(lttng_wrapper_tracepoint_probe_unregister("sys_enter",
-                               (void *) syscall_entry_probe, chan));
-                       return ret;
-               }
-               chan->sys_exit_registered = 1;
-       }
-       return ret;
-}
-
-/*
- * Only called at session destruction.
- */
-int lttng_syscalls_unregister(struct lttng_channel *chan)
-{
-       int ret;
-
-       if (!chan->sc_table)
-               return 0;
-       if (chan->sys_enter_registered) {
-               ret = lttng_wrapper_tracepoint_probe_unregister("sys_enter",
-                               (void *) syscall_entry_probe, chan);
-               if (ret)
-                       return ret;
-               chan->sys_enter_registered = 0;
-       }
-       if (chan->sys_exit_registered) {
-               ret = lttng_wrapper_tracepoint_probe_unregister("sys_exit",
-                               (void *) syscall_exit_probe, chan);
-               if (ret)
-                       return ret;
-               chan->sys_exit_registered = 0;
-       }
-       /* lttng_event destroy will be performed by lttng_session_destroy() */
-       kfree(chan->sc_table);
-       kfree(chan->sc_exit_table);
-#ifdef CONFIG_COMPAT
-       kfree(chan->compat_sc_table);
-       kfree(chan->compat_sc_exit_table);
-#endif
-       kfree(chan->sc_filter);
-       return 0;
-}
-
-static
-int get_syscall_nr(const char *syscall_name)
-{
-       int syscall_nr = -1;
-       int i;
-
-       for (i = 0; i < ARRAY_SIZE(sc_table); i++) {
-               const struct trace_syscall_entry *entry;
-               const char *it_name;
-
-               entry = &sc_table[i];
-               if (!entry->desc)
-                       continue;
-               it_name = entry->desc->name;
-               it_name += strlen(SYSCALL_ENTRY_STR);
-               if (!strcmp(syscall_name, it_name)) {
-                       syscall_nr = i;
-                       break;
-               }
-       }
-       return syscall_nr;
-}
-
-static
-int get_compat_syscall_nr(const char *syscall_name)
-{
-       int syscall_nr = -1;
-       int i;
-
-       for (i = 0; i < ARRAY_SIZE(compat_sc_table); i++) {
-               const struct trace_syscall_entry *entry;
-               const char *it_name;
-
-               entry = &compat_sc_table[i];
-               if (!entry->desc)
-                       continue;
-               it_name = entry->desc->name;
-               it_name += strlen(COMPAT_SYSCALL_ENTRY_STR);
-               if (!strcmp(syscall_name, it_name)) {
-                       syscall_nr = i;
-                       break;
-               }
-       }
-       return syscall_nr;
-}
-
-static
-uint32_t get_sc_tables_len(void)
-{
-       return ARRAY_SIZE(sc_table) + ARRAY_SIZE(compat_sc_table);
-}
-
-int lttng_syscall_filter_enable(struct lttng_channel *chan,
-               const char *name)
-{
-       int syscall_nr, compat_syscall_nr, ret;
-       struct lttng_syscall_filter *filter;
-
-       WARN_ON_ONCE(!chan->sc_table);
-
-       if (!name) {
-               /* Enable all system calls by removing filter */
-               if (chan->sc_filter) {
-                       filter = chan->sc_filter;
-                       rcu_assign_pointer(chan->sc_filter, NULL);
-                       synchronize_trace();
-                       kfree(filter);
-               }
-               chan->syscall_all = 1;
-               return 0;
-       }
-
-       if (!chan->sc_filter) {
-               if (chan->syscall_all) {
-                       /*
-                        * All syscalls are already enabled.
-                        */
-                       return -EEXIST;
-               }
-               filter = kzalloc(sizeof(struct lttng_syscall_filter),
-                               GFP_KERNEL);
-               if (!filter)
-                       return -ENOMEM;
-       } else {
-               filter = chan->sc_filter;
-       }
-       syscall_nr = get_syscall_nr(name);
-       compat_syscall_nr = get_compat_syscall_nr(name);
-       if (syscall_nr < 0 && compat_syscall_nr < 0) {
-               ret = -ENOENT;
-               goto error;
-       }
-       if (syscall_nr >= 0) {
-               if (test_bit(syscall_nr, filter->sc)) {
-                       ret = -EEXIST;
-                       goto error;
-               }
-               bitmap_set(filter->sc, syscall_nr, 1);
-       }
-       if (compat_syscall_nr >= 0) {
-               if (test_bit(compat_syscall_nr, filter->sc_compat)) {
-                       ret = -EEXIST;
-                       goto error;
-               }
-               bitmap_set(filter->sc_compat, compat_syscall_nr, 1);
-       }
-       if (!chan->sc_filter)
-               rcu_assign_pointer(chan->sc_filter, filter);
-       return 0;
-
-error:
-       if (!chan->sc_filter)
-               kfree(filter);
-       return ret;
-}
-
-int lttng_syscall_filter_disable(struct lttng_channel *chan,
-               const char *name)
-{
-       int syscall_nr, compat_syscall_nr, ret;
-       struct lttng_syscall_filter *filter;
-
-       WARN_ON_ONCE(!chan->sc_table);
-
-       if (!chan->sc_filter) {
-               if (!chan->syscall_all)
-                       return -EEXIST;
-               filter = kzalloc(sizeof(struct lttng_syscall_filter),
-                               GFP_KERNEL);
-               if (!filter)
-                       return -ENOMEM;
-               /* Trace all system calls, then apply disable. */
-               bitmap_set(filter->sc, 0, NR_syscalls);
-               bitmap_set(filter->sc_compat, 0, NR_compat_syscalls);
-       } else {
-               filter = chan->sc_filter;
-       }
-
-       if (!name) {
-               /* Fail if all syscalls are already disabled. */
-               if (bitmap_empty(filter->sc, NR_syscalls)
-                       && bitmap_empty(filter->sc_compat,
-                               NR_compat_syscalls)) {
-                       ret = -EEXIST;
-                       goto error;
-               }
-
-               /* Disable all system calls */
-               bitmap_clear(filter->sc, 0, NR_syscalls);
-               bitmap_clear(filter->sc_compat, 0, NR_compat_syscalls);
-               goto apply_filter;
-       }
-       syscall_nr = get_syscall_nr(name);
-       compat_syscall_nr = get_compat_syscall_nr(name);
-       if (syscall_nr < 0 && compat_syscall_nr < 0) {
-               ret = -ENOENT;
-               goto error;
-       }
-       if (syscall_nr >= 0) {
-               if (!test_bit(syscall_nr, filter->sc)) {
-                       ret = -EEXIST;
-                       goto error;
-               }
-               bitmap_clear(filter->sc, syscall_nr, 1);
-       }
-       if (compat_syscall_nr >= 0) {
-               if (!test_bit(compat_syscall_nr, filter->sc_compat)) {
-                       ret = -EEXIST;
-                       goto error;
-               }
-               bitmap_clear(filter->sc_compat, compat_syscall_nr, 1);
-       }
-apply_filter:
-       if (!chan->sc_filter)
-               rcu_assign_pointer(chan->sc_filter, filter);
-       chan->syscall_all = 0;
-       return 0;
-
-error:
-       if (!chan->sc_filter)
-               kfree(filter);
-       return ret;
-}
-
-static
-const struct trace_syscall_entry *syscall_list_get_entry(loff_t *pos)
-{
-       const struct trace_syscall_entry *entry;
-       int iter = 0;
-
-       for (entry = sc_table;
-                       entry < sc_table + ARRAY_SIZE(sc_table);
-                        entry++) {
-               if (iter++ >= *pos)
-                       return entry;
-       }
-       for (entry = compat_sc_table;
-                       entry < compat_sc_table + ARRAY_SIZE(compat_sc_table);
-                        entry++) {
-               if (iter++ >= *pos)
-                       return entry;
-       }
-       /* End of list */
-       return NULL;
-}
-
-static
-void *syscall_list_start(struct seq_file *m, loff_t *pos)
-{
-       return (void *) syscall_list_get_entry(pos);
-}
-
-static
-void *syscall_list_next(struct seq_file *m, void *p, loff_t *ppos)
-{
-       (*ppos)++;
-       return (void *) syscall_list_get_entry(ppos);
-}
-
-static
-void syscall_list_stop(struct seq_file *m, void *p)
-{
-}
-
-static
-int get_sc_table(const struct trace_syscall_entry *entry,
-               const struct trace_syscall_entry **table,
-               unsigned int *bitness)
-{
-       if (entry >= sc_table && entry < sc_table + ARRAY_SIZE(sc_table)) {
-               if (bitness)
-                       *bitness = BITS_PER_LONG;
-               if (table)
-                       *table = sc_table;
-               return 0;
-       }
-       if (!(entry >= compat_sc_table
-                       && entry < compat_sc_table + ARRAY_SIZE(compat_sc_table))) {
-               return -EINVAL;
-       }
-       if (bitness)
-               *bitness = 32;
-       if (table)
-               *table = compat_sc_table;
-       return 0;
-}
-
-static
-int syscall_list_show(struct seq_file *m, void *p)
-{
-       const struct trace_syscall_entry *table, *entry = p;
-       unsigned int bitness;
-       unsigned long index;
-       int ret;
-       const char *name;
-
-       ret = get_sc_table(entry, &table, &bitness);
-       if (ret)
-               return ret;
-       if (!entry->desc)
-               return 0;
-       if (table == sc_table) {
-               index = entry - table;
-               name = &entry->desc->name[strlen(SYSCALL_ENTRY_STR)];
-       } else {
-               index = (entry - table) + ARRAY_SIZE(sc_table);
-               name = &entry->desc->name[strlen(COMPAT_SYSCALL_ENTRY_STR)];
-       }
-       seq_printf(m,   "syscall { index = %lu; name = %s; bitness = %u; };\n",
-               index, name, bitness);
-       return 0;
-}
-
-static
-const struct seq_operations lttng_syscall_list_seq_ops = {
-       .start = syscall_list_start,
-       .next = syscall_list_next,
-       .stop = syscall_list_stop,
-       .show = syscall_list_show,
-};
-
-static
-int lttng_syscall_list_open(struct inode *inode, struct file *file)
-{
-       return seq_open(file, &lttng_syscall_list_seq_ops);
-}
-
-const struct file_operations lttng_syscall_list_fops = {
-       .owner = THIS_MODULE,
-       .open = lttng_syscall_list_open,
-       .read = seq_read,
-       .llseek = seq_lseek,
-       .release = seq_release,
-};
-
-long lttng_channel_syscall_mask(struct lttng_channel *channel,
-               struct lttng_kernel_syscall_mask __user *usyscall_mask)
-{
-       uint32_t len, sc_tables_len, bitmask_len;
-       int ret = 0, bit;
-       char *tmp_mask;
-       struct lttng_syscall_filter *filter;
-
-       ret = get_user(len, &usyscall_mask->len);
-       if (ret)
-               return ret;
-       sc_tables_len = get_sc_tables_len();
-       bitmask_len = ALIGN(sc_tables_len, 8) >> 3;
-       if (len < sc_tables_len) {
-               return put_user(sc_tables_len, &usyscall_mask->len);
-       }
-       /* Array is large enough, we can copy array to user-space. */
-       tmp_mask = kzalloc(bitmask_len, GFP_KERNEL);
-       if (!tmp_mask)
-               return -ENOMEM;
-       filter = channel->sc_filter;
-
-       for (bit = 0; bit < ARRAY_SIZE(sc_table); bit++) {
-               char state;
-
-               if (channel->sc_table) {
-                       if (filter)
-                               state = test_bit(bit, filter->sc);
-                       else
-                               state = 1;
-               } else {
-                       state = 0;
-               }
-               bt_bitfield_write_be(tmp_mask, char, bit, 1, state);
-       }
-       for (; bit < sc_tables_len; bit++) {
-               char state;
-
-               if (channel->compat_sc_table) {
-                       if (filter)
-                               state = test_bit(bit - ARRAY_SIZE(sc_table),
-                                               filter->sc_compat);
-                       else
-                               state = 1;
-               } else {
-                       state = 0;
-               }
-               bt_bitfield_write_be(tmp_mask, char, bit, 1, state);
-       }
-       if (copy_to_user(usyscall_mask->mask, tmp_mask, bitmask_len))
-               ret = -EFAULT;
-       kfree(tmp_mask);
-       return ret;
-}
-
-int lttng_abi_syscall_list(void)
-{
-       struct file *syscall_list_file;
-       int file_fd, ret;
-
-       file_fd = lttng_get_unused_fd();
-       if (file_fd < 0) {
-               ret = file_fd;
-               goto fd_error;
-       }
-
-       syscall_list_file = anon_inode_getfile("[lttng_syscall_list]",
-                                         &lttng_syscall_list_fops,
-                                         NULL, O_RDWR);
-       if (IS_ERR(syscall_list_file)) {
-               ret = PTR_ERR(syscall_list_file);
-               goto file_error;
-       }
-       ret = lttng_syscall_list_fops.open(NULL, syscall_list_file);
-       if (ret < 0)
-               goto open_error;
-       fd_install(file_fd, syscall_list_file);
-       return file_fd;
-
-open_error:
-       fput(syscall_list_file);
-file_error:
-       put_unused_fd(file_fd);
-fd_error:
-       return ret;
-}
diff --git a/lttng-tp-mempool.c b/lttng-tp-mempool.c
deleted file mode 100644 (file)
index 70ee5cc..0000000
+++ /dev/null
@@ -1,152 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
- *
- * lttng-tp-mempool.c
- *
- * Copyright (C) 2018 Julien Desfossez <jdesfossez@efficios.com>
- */
-
-#include <linux/slab.h>
-#include <linux/percpu.h>
-
-#include <lttng/tp-mempool.h>
-
-struct lttng_tp_buf_entry {
-       int cpu; /* To make sure we return the entry to the right pool. */
-       char buf[LTTNG_TP_MEMPOOL_BUF_SIZE];
-       struct list_head list;
-};
-
-/*
- * No exclusive access strategy for now, this memory pool is currently only
- * used from a non-preemptible context, and the interrupt tracepoint probes do
- * not use this facility.
- */
-struct per_cpu_buf {
-       struct list_head free_list; /* Free struct lttng_tp_buf_entry. */
-};
-
-static struct per_cpu_buf __percpu *pool; /* Per-cpu buffer. */
-
-int lttng_tp_mempool_init(void)
-{
-       int ret, cpu;
-
-       /* The pool is only supposed to be allocated once. */
-       if (pool) {
-               WARN_ON_ONCE(1);
-               ret = -1;
-               goto end;
-       }
-
-       pool = alloc_percpu(struct per_cpu_buf);
-       if (!pool) {
-               ret = -ENOMEM;
-               goto end;
-       }
-
-       for_each_possible_cpu(cpu) {
-               struct per_cpu_buf *cpu_buf = per_cpu_ptr(pool, cpu);
-
-               INIT_LIST_HEAD(&cpu_buf->free_list);
-       }
-
-       for_each_possible_cpu(cpu) {
-               int i;
-               struct per_cpu_buf *cpu_buf = per_cpu_ptr(pool, cpu);
-
-               for (i = 0; i < LTTNG_TP_MEMPOOL_NR_BUF_PER_CPU; i++) {
-                       struct lttng_tp_buf_entry *entry;
-
-                       entry = kzalloc_node(sizeof(struct lttng_tp_buf_entry),
-                                       GFP_KERNEL, cpu_to_node(cpu));
-                       if (!entry) {
-                               ret = -ENOMEM;
-                               goto error_free_pool;
-                       }
-                       entry->cpu = cpu;
-                       list_add_tail(&entry->list, &cpu_buf->free_list);
-               }
-       }
-
-       ret = 0;
-       goto end;
-
-error_free_pool:
-       lttng_tp_mempool_destroy();
-end:
-       return ret;
-}
-
-void lttng_tp_mempool_destroy(void)
-{
-       int cpu;
-
-       if (!pool) {
-               return;
-       }
-
-       for_each_possible_cpu(cpu) {
-               struct per_cpu_buf *cpu_buf = per_cpu_ptr(pool, cpu);
-               struct lttng_tp_buf_entry *entry, *tmp;
-               int i = 0;
-
-               list_for_each_entry_safe(entry, tmp, &cpu_buf->free_list, list) {
-                       list_del(&entry->list);
-                       kfree(entry);
-                       i++;
-               }
-               if (i < LTTNG_TP_MEMPOOL_NR_BUF_PER_CPU) {
-                       printk(KERN_WARNING "Leak detected in tp-mempool\n");
-               }
-       }
-       free_percpu(pool);
-       pool = NULL;
-}
-
-void *lttng_tp_mempool_alloc(size_t size)
-{
-       void *ret;
-       struct lttng_tp_buf_entry *entry;
-       struct per_cpu_buf *cpu_buf;
-       int cpu = smp_processor_id();
-
-       if (size > LTTNG_TP_MEMPOOL_BUF_SIZE) {
-               ret = NULL;
-               goto end;
-       }
-
-       cpu_buf = per_cpu_ptr(pool, cpu);
-       if (list_empty(&cpu_buf->free_list)) {
-               ret = NULL;
-               goto end;
-       }
-
-       entry = list_first_entry(&cpu_buf->free_list, struct lttng_tp_buf_entry, list);
-       /* Remove the entry from the free list. */
-       list_del(&entry->list);
-
-       memset(entry->buf, 0, LTTNG_TP_MEMPOOL_BUF_SIZE);
-
-       ret = (void *) entry->buf;
-
-end:
-       return ret;
-}
-
-void lttng_tp_mempool_free(void *ptr)
-{
-       struct lttng_tp_buf_entry *entry;
-       struct per_cpu_buf *cpu_buf;
-
-       if (!ptr)
-               goto end;
-       entry = container_of(ptr, struct lttng_tp_buf_entry, buf);
-       cpu_buf = per_cpu_ptr(pool, entry->cpu);
-       if (!cpu_buf)
-               goto end;
-       /* Add it to the free list. */
-       list_add_tail(&entry->list, &cpu_buf->free_list);
-
-end:
-       return;
-}
diff --git a/lttng-tracepoint.c b/lttng-tracepoint.c
deleted file mode 100644 (file)
index ed78a17..0000000
+++ /dev/null
@@ -1,439 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
- *
- * lttng-tracepoint.c
- *
- * LTTng adaptation layer for Linux kernel 3.15+ tracepoints.
- *
- * Copyright (C) 2014 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#include <linux/mutex.h>
-#include <linux/err.h>
-#include <linux/notifier.h>
-#include <linux/tracepoint.h>
-#include <linux/slab.h>
-#include <linux/jhash.h>
-#include <linux/module.h>
-
-#include <lttng/tracepoint.h>
-#include <wrapper/list.h>
-#include <wrapper/tracepoint.h>
-
-/*
- * Protect the tracepoint table. lttng_tracepoint_mutex nests within
- * kernel/tracepoint.c tp_modlist_mutex. kernel/tracepoint.c
- * tracepoint_mutex nests within lttng_tracepoint_mutex.
- */
-static
-DEFINE_MUTEX(lttng_tracepoint_mutex);
-
-#define TRACEPOINT_HASH_BITS 6
-#define TRACEPOINT_TABLE_SIZE (1 << TRACEPOINT_HASH_BITS)
-static
-struct hlist_head tracepoint_table[TRACEPOINT_TABLE_SIZE];
-
-/*
- * The tracepoint entry is the node contained within the hash table. It
- * is a mapping from the "string" key to the struct tracepoint pointer.
- */
-struct tracepoint_entry {
-       struct hlist_node hlist;
-       struct tracepoint *tp;
-       int refcount;
-       struct list_head probes;
-       char name[0];
-};
-
-struct lttng_tp_probe {
-       struct tracepoint_func tp_func;
-       struct list_head list;
-};
-
-static
-int add_probe(struct tracepoint_entry *e, void *probe, void *data)
-{
-       struct lttng_tp_probe *p;
-       int found = 0;
-
-       list_for_each_entry(p, &e->probes, list) {
-               if (p->tp_func.func == probe && p->tp_func.data == data) {
-                       found = 1;
-                       break;
-               }
-       }
-       if (found)
-               return -EEXIST;
-       p = kmalloc(sizeof(struct lttng_tp_probe), GFP_KERNEL);
-       if (!p)
-               return -ENOMEM;
-       p->tp_func.func = probe;
-       p->tp_func.data = data;
-       list_add(&p->list, &e->probes);
-       return 0;
-}
-
-static
-int remove_probe(struct tracepoint_entry *e, void *probe, void *data)
-{
-       struct lttng_tp_probe *p;
-       int found = 0;
-
-       list_for_each_entry(p, &e->probes, list) {
-               if (p->tp_func.func == probe && p->tp_func.data == data) {
-                       found = 1;
-                       break;
-               }
-       }
-       if (found) {
-               list_del(&p->list);
-               kfree(p);
-               return 0;
-       } else {
-               WARN_ON(1);
-               return -ENOENT;
-       }
-}
-
-/*
- * Get tracepoint if the tracepoint is present in the tracepoint hash table.
- * Must be called with lttng_tracepoint_mutex held.
- * Returns NULL if not present.
- */
-static
-struct tracepoint_entry *get_tracepoint(const char *name)
-{
-       struct hlist_head *head;
-       struct tracepoint_entry *e;
-       u32 hash = jhash(name, strlen(name), 0);
-
-       head = &tracepoint_table[hash & (TRACEPOINT_TABLE_SIZE - 1)];
-       lttng_hlist_for_each_entry(e, head, hlist) {
-               if (!strcmp(name, e->name))
-                       return e;
-       }
-       return NULL;
-}
-
-/*
- * Add the tracepoint to the tracepoint hash table. Must be called with
- * lttng_tracepoint_mutex held.
- */
-static
-struct tracepoint_entry *add_tracepoint(const char *name)
-{
-       struct hlist_head *head;
-       struct tracepoint_entry *e;
-       size_t name_len = strlen(name) + 1;
-       u32 hash = jhash(name, name_len - 1, 0);
-
-       head = &tracepoint_table[hash & (TRACEPOINT_TABLE_SIZE - 1)];
-       lttng_hlist_for_each_entry(e, head, hlist) {
-               if (!strcmp(name, e->name)) {
-                       printk(KERN_NOTICE
-                               "tracepoint %s busy\n", name);
-                       return ERR_PTR(-EEXIST);        /* Already there */
-               }
-       }
-       /*
-        * Using kmalloc here to allocate a variable length element. Could
-        * cause some memory fragmentation if overused.
-        */
-       e = kmalloc(sizeof(struct tracepoint_entry) + name_len, GFP_KERNEL);
-       if (!e)
-               return ERR_PTR(-ENOMEM);
-       memcpy(&e->name[0], name, name_len);
-       e->tp = NULL;
-       e->refcount = 0;
-       INIT_LIST_HEAD(&e->probes);
-       hlist_add_head(&e->hlist, head);
-       return e;
-}
-
-/*
- * Remove the tracepoint from the tracepoint hash table. Must be called
- * with lttng_tracepoint_mutex held.
- */
-static
-void remove_tracepoint(struct tracepoint_entry *e)
-{
-       hlist_del(&e->hlist);
-       kfree(e);
-}
-
-int lttng_tracepoint_probe_register(const char *name, void *probe, void *data)
-{
-       struct tracepoint_entry *e;
-       int ret = 0;
-
-       mutex_lock(&lttng_tracepoint_mutex);
-       e = get_tracepoint(name);
-       if (!e) {
-               e = add_tracepoint(name);
-               if (IS_ERR(e)) {
-                       ret = PTR_ERR(e);
-                       goto end;
-               }
-       }
-       /* add (probe, data) to entry */
-       ret = add_probe(e, probe, data);
-       if (ret)
-               goto end;
-       e->refcount++;
-       if (e->tp) {
-               ret = tracepoint_probe_register(e->tp, probe, data);
-               WARN_ON_ONCE(ret);
-               ret = 0;
-       }
-end:
-       mutex_unlock(&lttng_tracepoint_mutex);
-       return ret;
-}
-
-int lttng_tracepoint_probe_unregister(const char *name, void *probe, void *data)
-{
-       struct tracepoint_entry *e;
-       int ret = 0;
-
-       mutex_lock(&lttng_tracepoint_mutex);
-       e = get_tracepoint(name);
-       if (!e) {
-               ret = -ENOENT;
-               goto end;
-       }
-       /* remove (probe, data) from entry */
-       ret = remove_probe(e, probe, data);
-       if (ret)
-               goto end;
-       if (e->tp) {
-               ret = tracepoint_probe_unregister(e->tp, probe, data);
-               WARN_ON_ONCE(ret);
-               ret = 0;
-       }
-       if (!--e->refcount)
-               remove_tracepoint(e);
-end:
-       mutex_unlock(&lttng_tracepoint_mutex);
-       return ret;
-}
-
-#ifdef CONFIG_MODULES
-
-static
-int lttng_tracepoint_coming(struct tp_module *tp_mod)
-{
-       int i;
-
-       mutex_lock(&lttng_tracepoint_mutex);
-       for (i = 0; i < tp_mod->mod->num_tracepoints; i++) {
-               struct tracepoint *tp;
-               struct tracepoint_entry *e;
-               struct lttng_tp_probe *p;
-
-               tp = lttng_tracepoint_ptr_deref(&tp_mod->mod->tracepoints_ptrs[i]);
-               e = get_tracepoint(tp->name);
-               if (!e) {
-                       e = add_tracepoint(tp->name);
-                       if (IS_ERR(e)) {
-                               pr_warn("LTTng: error (%ld) adding tracepoint\n",
-                                       PTR_ERR(e));
-                               continue;
-                       }
-               }
-               /* If already enabled, just check consistency */
-               if (e->tp) {
-                       WARN_ON(e->tp != tp);
-                       continue;
-               }
-               e->tp = tp;
-               e->refcount++;
-               /* register each (probe, data) */
-               list_for_each_entry(p, &e->probes, list) {
-                       int ret;
-
-                       ret = tracepoint_probe_register(e->tp,
-                                       p->tp_func.func, p->tp_func.data);
-                       WARN_ON_ONCE(ret);
-               }
-       }
-       mutex_unlock(&lttng_tracepoint_mutex);
-       return NOTIFY_OK;
-}
-
-static
-int lttng_tracepoint_going(struct tp_module *tp_mod)
-{
-       int i;
-
-       mutex_lock(&lttng_tracepoint_mutex);
-       for (i = 0; i < tp_mod->mod->num_tracepoints; i++) {
-               struct tracepoint *tp;
-               struct tracepoint_entry *e;
-               struct lttng_tp_probe *p;
-
-               tp = lttng_tracepoint_ptr_deref(&tp_mod->mod->tracepoints_ptrs[i]);
-               e = get_tracepoint(tp->name);
-               if (!e || !e->tp)
-                       continue;
-               /* unregister each (probe, data) */
-               list_for_each_entry(p, &e->probes, list) {
-                       int ret;
-
-                       ret = tracepoint_probe_unregister(e->tp,
-                                       p->tp_func.func, p->tp_func.data);
-                       WARN_ON_ONCE(ret);
-               }
-               e->tp = NULL;
-               if (!--e->refcount)
-                       remove_tracepoint(e);
-       }
-       mutex_unlock(&lttng_tracepoint_mutex);
-       return 0;
-}
-
-static
-int lttng_tracepoint_notify(struct notifier_block *self,
-               unsigned long val, void *data)
-{
-       struct tp_module *tp_mod = data;
-       int ret = 0;
-
-       switch (val) {
-       case MODULE_STATE_COMING:
-               ret = lttng_tracepoint_coming(tp_mod);
-               break;
-       case MODULE_STATE_GOING:
-               ret = lttng_tracepoint_going(tp_mod);
-               break;
-       default:
-               break;
-       }
-       return ret;
-}
-
-static
-struct notifier_block lttng_tracepoint_notifier = {
-       .notifier_call = lttng_tracepoint_notify,
-       .priority = 0,
-};
-
-static
-int lttng_tracepoint_module_init(void)
-{
-       return register_tracepoint_module_notifier(&lttng_tracepoint_notifier);
-}
-
-static
-void lttng_tracepoint_module_exit(void)
-{
-       WARN_ON(unregister_tracepoint_module_notifier(&lttng_tracepoint_notifier));
-}
-
-#else /* #ifdef CONFIG_MODULES */
-
-static
-int lttng_tracepoint_module_init(void)
-{
-       return 0;
-}
-
-static
-void lttng_tracepoint_module_exit(void)
-{
-}
-
-#endif /* #else #ifdef CONFIG_MODULES */
-
-static
-void lttng_kernel_tracepoint_add(struct tracepoint *tp, void *priv)
-{
-       struct tracepoint_entry *e;
-       struct lttng_tp_probe *p;
-       int *ret = priv;
-
-       mutex_lock(&lttng_tracepoint_mutex);
-       e = get_tracepoint(tp->name);
-       if (!e) {
-               e = add_tracepoint(tp->name);
-               if (IS_ERR(e)) {
-                       pr_warn("LTTng: error (%ld) adding tracepoint\n",
-                               PTR_ERR(e));
-                       *ret = (int) PTR_ERR(e);
-                       goto end;
-               }
-       }
-       /* If already enabled, just check consistency */
-       if (e->tp) {
-               WARN_ON(e->tp != tp);
-               goto end;
-       }
-       e->tp = tp;
-       e->refcount++;
-       /* register each (probe, data) */
-       list_for_each_entry(p, &e->probes, list) {
-               int ret;
-
-               ret = tracepoint_probe_register(e->tp,
-                               p->tp_func.func, p->tp_func.data);
-               WARN_ON_ONCE(ret);
-       }
-end:
-       mutex_unlock(&lttng_tracepoint_mutex);
-}
-
-static
-void lttng_kernel_tracepoint_remove(struct tracepoint *tp, void *priv)
-{
-       struct tracepoint_entry *e;
-       int *ret = priv;
-
-       mutex_lock(&lttng_tracepoint_mutex);
-       e = get_tracepoint(tp->name);
-       if (!e || e->refcount != 1 || !list_empty(&e->probes)) {
-               *ret = -EINVAL;
-               goto end;
-       }
-       remove_tracepoint(e);
-end:
-       mutex_unlock(&lttng_tracepoint_mutex);
-}
-
-int __init lttng_tracepoint_init(void)
-{
-       int ret = 0;
-
-       for_each_kernel_tracepoint(lttng_kernel_tracepoint_add, &ret);
-       if (ret)
-               goto error;
-       ret = lttng_tracepoint_module_init();
-       if (ret)
-               goto error_module;
-       return 0;
-
-error_module:
-       {
-               int error_ret = 0;
-
-               for_each_kernel_tracepoint(lttng_kernel_tracepoint_remove,
-                               &error_ret);
-               WARN_ON(error_ret);
-       }
-error:
-       return ret;
-}
-
-void lttng_tracepoint_exit(void)
-{
-       int i, ret = 0;
-
-       lttng_tracepoint_module_exit();
-       for_each_kernel_tracepoint(lttng_kernel_tracepoint_remove, &ret);
-       WARN_ON(ret);
-       mutex_lock(&lttng_tracepoint_mutex);
-       for (i = 0; i < TRACEPOINT_TABLE_SIZE; i++) {
-               struct hlist_head *head = &tracepoint_table[i];
-
-               /* All tracepoints should be removed */
-               WARN_ON(!hlist_empty(head));
-       }
-       mutex_unlock(&lttng_tracepoint_mutex);
-}
diff --git a/lttng-tracker-id.c b/lttng-tracker-id.c
deleted file mode 100644 (file)
index 205c4af..0000000
+++ /dev/null
@@ -1,192 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
- *
- * lttng-tracker-pid.c
- *
- * LTTng Process ID tracking.
- *
- * Copyright (C) 2014 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/err.h>
-#include <linux/seq_file.h>
-#include <linux/stringify.h>
-#include <linux/hash.h>
-#include <linux/rcupdate.h>
-
-#include <wrapper/tracepoint.h>
-#include <wrapper/rcu.h>
-#include <wrapper/list.h>
-#include <lttng/events.h>
-
-/*
- * Hash table is allocated and freed when there are no possible
- * concurrent lookups (ensured by the alloc/free caller). However,
- * there can be concurrent RCU lookups vs add/del operations.
- *
- * Concurrent updates of the PID hash table are forbidden: the caller
- * must ensure mutual exclusion. This is currently done by holding the
- * sessions_mutex across calls to create, destroy, add, and del
- * functions of this API.
- */
-int lttng_id_tracker_get_node_id(const struct lttng_id_hash_node *node)
-{
-       return node->id;
-}
-
-/*
- * Lookup performed from RCU read-side critical section (RCU sched),
- * protected by preemption off at the tracepoint call site.
- * Return true if found, false if not found.
- */
-bool lttng_id_tracker_lookup(struct lttng_id_tracker_rcu *p, int id)
-{
-       struct hlist_head *head;
-       struct lttng_id_hash_node *e;
-       uint32_t hash = hash_32(id, 32);
-
-       head = &p->id_hash[hash & (LTTNG_ID_TABLE_SIZE - 1)];
-       lttng_hlist_for_each_entry_rcu(e, head, hlist) {
-               if (id == e->id)
-                       return true;    /* Found */
-       }
-       return false;
-}
-EXPORT_SYMBOL_GPL(lttng_id_tracker_lookup);
-
-static struct lttng_id_tracker_rcu *lttng_id_tracker_rcu_create(void)
-{
-       struct lttng_id_tracker_rcu *tracker;
-
-       tracker = kzalloc(sizeof(struct lttng_id_tracker_rcu), GFP_KERNEL);
-       if (!tracker)
-               return NULL;
-       return tracker;
-}
-
-/*
- * Tracker add and del operations support concurrent RCU lookups.
- */
-int lttng_id_tracker_add(struct lttng_id_tracker *lf, int id)
-{
-       struct hlist_head *head;
-       struct lttng_id_hash_node *e;
-       struct lttng_id_tracker_rcu *p = lf->p;
-       uint32_t hash = hash_32(id, 32);
-       bool allocated = false;
-
-       if (!p) {
-               p = lttng_id_tracker_rcu_create();
-               if (!p)
-                       return -ENOMEM;
-               allocated = true;
-       }
-       head = &p->id_hash[hash & (LTTNG_ID_TABLE_SIZE - 1)];
-       lttng_hlist_for_each_entry(e, head, hlist) {
-               if (id == e->id)
-                       return -EEXIST;
-       }
-       e = kmalloc(sizeof(struct lttng_id_hash_node), GFP_KERNEL);
-       if (!e)
-               return -ENOMEM;
-       e->id = id;
-       hlist_add_head_rcu(&e->hlist, head);
-       if (allocated) {
-               rcu_assign_pointer(lf->p, p);
-       }
-       return 0;
-}
-
-static
-void id_tracker_del_node_rcu(struct lttng_id_hash_node *e)
-{
-       hlist_del_rcu(&e->hlist);
-       /*
-        * We choose to use a heavyweight synchronize on removal here,
-        * since removal of an ID from the tracker mask is a rare
-        * operation, and we don't want to use more cache lines than
-        * what we really need when doing the ID lookups, so we don't
-        * want to afford adding a rcu_head field to those pid hash
-        * node.
-        */
-       synchronize_trace();
-       kfree(e);
-}
-
-/*
- * This removal is only used on destroy, so it does not need to support
- * concurrent RCU lookups.
- */
-static
-void id_tracker_del_node(struct lttng_id_hash_node *e)
-{
-       hlist_del(&e->hlist);
-       kfree(e);
-}
-
-int lttng_id_tracker_del(struct lttng_id_tracker *lf, int id)
-{
-       struct hlist_head *head;
-       struct lttng_id_hash_node *e;
-       struct lttng_id_tracker_rcu *p = lf->p;
-       uint32_t hash = hash_32(id, 32);
-
-       if (!p)
-               return -ENOENT;
-       head = &p->id_hash[hash & (LTTNG_ID_TABLE_SIZE - 1)];
-       /*
-        * No need of _safe iteration, because we stop traversal as soon
-        * as we remove the entry.
-        */
-       lttng_hlist_for_each_entry(e, head, hlist) {
-               if (id == e->id) {
-                       id_tracker_del_node_rcu(e);
-                       return 0;
-               }
-       }
-       return -ENOENT; /* Not found */
-}
-
-static void lttng_id_tracker_rcu_destroy(struct lttng_id_tracker_rcu *p)
-{
-       int i;
-
-       if (!p)
-               return;
-       for (i = 0; i < LTTNG_ID_TABLE_SIZE; i++) {
-               struct hlist_head *head = &p->id_hash[i];
-               struct lttng_id_hash_node *e;
-               struct hlist_node *tmp;
-
-               lttng_hlist_for_each_entry_safe(e, tmp, head, hlist)
-                       id_tracker_del_node(e);
-       }
-       kfree(p);
-}
-
-int lttng_id_tracker_empty_set(struct lttng_id_tracker *lf)
-{
-       struct lttng_id_tracker_rcu *p, *oldp;
-
-       p = lttng_id_tracker_rcu_create();
-       if (!p)
-               return -ENOMEM;
-       oldp = lf->p;
-       rcu_assign_pointer(lf->p, p);
-       synchronize_trace();
-       lttng_id_tracker_rcu_destroy(oldp);
-       return 0;
-}
-
-void lttng_id_tracker_destroy(struct lttng_id_tracker *lf, bool rcu)
-{
-       struct lttng_id_tracker_rcu *p = lf->p;
-
-       if (!lf->p)
-               return;
-       rcu_assign_pointer(lf->p, NULL);
-       if (rcu)
-               synchronize_trace();
-       lttng_id_tracker_rcu_destroy(p);
-}
diff --git a/lttng-wrapper-impl.c b/lttng-wrapper-impl.c
deleted file mode 100644 (file)
index e7f5660..0000000
+++ /dev/null
@@ -1,37 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
- *
- * lttng-wrapper.c
- *
- * Copyright (C) 2020 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#include <linux/module.h>
-#include <lttng/tracer.h>
-
-static int __init lttng_wrapper_init(void)
-{
-       return 0;
-}
-
-module_init(lttng_wrapper_init);
-
-static void __exit lttng_exit(void)
-{
-}
-
-module_exit(lttng_exit);
-
-#include <generated/patches.i>
-#ifdef LTTNG_EXTRA_VERSION_GIT
-MODULE_INFO(extra_version_git, LTTNG_EXTRA_VERSION_GIT);
-#endif
-#ifdef LTTNG_EXTRA_VERSION_NAME
-MODULE_INFO(extra_version_name, LTTNG_EXTRA_VERSION_NAME);
-#endif
-MODULE_LICENSE("GPL and additional rights");
-MODULE_AUTHOR("Mathieu Desnoyers <mathieu.desnoyers@efficios.com>");
-MODULE_DESCRIPTION("LTTng wrapper");
-MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
-       __stringify(LTTNG_MODULES_MINOR_VERSION) "."
-       __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
-       LTTNG_MODULES_EXTRAVERSION);
diff --git a/probes/Kbuild b/probes/Kbuild
deleted file mode 100644 (file)
index 9c53ca2..0000000
+++ /dev/null
@@ -1,259 +0,0 @@
-# SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
-
-TOP_LTTNG_MODULES_DIR := $(shell dirname $(lastword $(MAKEFILE_LIST)))/..
-
-include $(TOP_LTTNG_MODULES_DIR)/Kbuild.common
-
-ccflags-y += -I$(TOP_LTTNG_MODULES_DIR)/include
-
-obj-$(CONFIG_LTTNG) += lttng-probe-sched.o
-obj-$(CONFIG_LTTNG) += lttng-probe-irq.o
-obj-$(CONFIG_LTTNG) += lttng-probe-timer.o
-obj-$(CONFIG_LTTNG) += lttng-probe-kmem.o
-obj-$(CONFIG_LTTNG) += lttng-probe-module.o
-obj-$(CONFIG_LTTNG) += lttng-probe-power.o
-obj-$(CONFIG_LTTNG) += lttng-probe-statedump.o
-
-ifneq ($(CONFIG_NET_9P),)
-  obj-$(CONFIG_LTTNG) +=  $(shell \
-    if [ $(VERSION) -ge 4 \
-      -o \( $(VERSION) -eq 3 -a $(PATCHLEVEL) -eq 10 \) ] ; then \
-      echo "lttng-probe-9p.o" ; fi;)
-endif # CONFIG_NET_9P
-
-i2c_dep = $(srctree)/include/trace/events/i2c.h
-ifneq ($(wildcard $(i2c_dep)),)
-  obj-$(CONFIG_LTTNG) += lttng-probe-i2c.o
-endif
-
-ifneq ($(CONFIG_KVM),)
-  obj-$(CONFIG_LTTNG) += lttng-probe-kvm.o
-  ifneq ($(CONFIG_X86),)
-    kvm_dep_lapic = $(srctree)/arch/x86/kvm/lapic.h
-    kvm_dep_lapic_check = $(wildcard $(kvm_dep_lapic))
-    ifneq ($(kvm_dep_lapic_check),)
-      # search for iodev.h in any of its known locations
-      kvm_dep_iodev = $(srctree)/virt/kvm/iodev.h $(srctree)/include/kvm/iodev.h
-      kvm_dep_iodev_check = $(wildcard $(kvm_dep_iodev))
-      ifneq ($(kvm_dep_iodev_check),)
-        kvm_dep_emulate = $(srctree)/arch/x86/kvm/kvm_emulate.h
-        kvm_dep_emulate_wildcard = $(wildcard $(kvm_dep_emulate))
-        kvm_dep_emulate_check = $(shell \
-        if [ \( $(VERSION) -ge 6 \
-           -o \( $(VERSION) -eq 5 -a $(PATCHLEVEL) -eq 7 \) \) -a \
-           -z "$(kvm_dep_emulate_wildcard)" ] ; then \
-          echo "warn" ; \
-        else \
-          echo "ok" ; \
-        fi ;)
-        ifeq ($(kvm_dep_emulate_check),ok)
-          CFLAGS_lttng-probe-kvm-x86.o += -I$(srctree)/virt/kvm \
-                     -I$(srctree)/arch/x86/kvm
-          CFLAGS_lttng-probe-kvm-x86-mmu.o += -I$(srctree)/virt/kvm
-          obj-$(CONFIG_LTTNG) += lttng-probe-kvm-x86.o
-          obj-$(CONFIG_LTTNG) += lttng-probe-kvm-x86-mmu.o
-        else # ($(kvm_dep_emulate_check),ok)
-          $(warning File $(kvm_dep_emulate) not found. Probe "kvm" x86-specific is disabled. Use full kernel source tree to enable it.)
-        endif # ($(kvm_dep_emulate_check),ok)
-      else # $(kvm_dep_iodev_check)
-        $(warning File $(kvm_dep_iodev) not found. Probe "kvm" x86-specific is disabled. Use full kernel source tree to enable it.)
-      endif # $(kvm_dep_iodev_check)
-    else # $(kvm_dep_lapic_check)
-      $(warning File $(kvm_dep_lapic) not found. Probe "kvm" x86-specific is disabled. Use full kernel source tree to enable it.)
-    endif # $(kvm_dep_lapic_check)
-  endif # CONFIG_X86
-endif # CONFIG_KVM
-
-ifneq ($(CONFIG_X86),)
-  x86_irq_vectors_dep = $(srctree)/arch/x86/include/asm/trace/irq_vectors.h
-
-  ifneq ($(wildcard $(x86_irq_vectors_dep)),)
-    obj-$(CONFIG_LTTNG) += lttng-probe-x86-irq-vectors.o
-  endif # $(wildcard $(x86_irq_vectors_dep))
-
-  x86_exceptions_dep = $(srctree)/arch/x86/include/asm/trace/exceptions.h
-
-  ifneq ($(wildcard $(x86_exceptions_dep)),)
-    obj-$(CONFIG_LTTNG) += lttng-probe-x86-exceptions.o
-  endif # $(wildcard $(x86_exceptions_dep))
-endif # CONFIG_X86
-
-obj-$(CONFIG_LTTNG) += lttng-probe-signal.o
-
-ifneq ($(CONFIG_BLOCK),)
-  # need blk_cmd_buf_len
-  ifneq ($(CONFIG_EVENT_TRACING),)
-    obj-$(CONFIG_LTTNG) += lttng-probe-block.o
-  endif # CONFIG_EVENT_TRACING
-endif # CONFIG_BLOCK
-
-ifneq ($(CONFIG_NET),)
-  obj-$(CONFIG_LTTNG) += lttng-probe-napi.o
-  obj-$(CONFIG_LTTNG) += lttng-probe-skb.o
-  obj-$(CONFIG_LTTNG) += lttng-probe-net.o
-  obj-$(CONFIG_LTTNG) +=  $(shell \
-    if [ $(VERSION) -ge 4 \
-      -o \( $(VERSION) -eq 3 -a $(PATCHLEVEL) -ge 1 \) ] ; then \
-      echo "lttng-probe-sock.o" ; fi;)
-  obj-$(CONFIG_LTTNG) +=  $(shell \
-    if [ $(VERSION) -ge 4 \
-      -o \( $(VERSION) -eq 3 -a $(PATCHLEVEL) -ge 1 \) ] ; then \
-      echo "lttng-probe-udp.o" ; fi;)
-endif # CONFIG_NET
-
-ifneq ($(CONFIG_SND_SOC),)
-  obj-$(CONFIG_LTTNG) += lttng-probe-asoc.o
-endif # CONFIG_SND_SOC
-
-ifneq ($(CONFIG_EXT3_FS),)
-  ext3_dep = $(srctree)/fs/ext3/*.h
-  ext3_dep_check = $(wildcard $(ext3_dep))
-  ext3 = $(shell \
-    if [ $(VERSION) -lt 4 -o \( $(VERSION) -eq 4 -a $(PATCHLEVEL) -lt 3 \) ] ; then \
-      if [ $(VERSION) -ge 4 -o \( $(VERSION) -eq 3 -a $(PATCHLEVEL) -ge 1 \) ] ; then \
-        if [ \( $(VERSION) -ge 4 -o \( $(VERSION) -eq 3 -a $(PATCHLEVEL) -ge 4 \) \) -a \
-          -z "$(ext3_dep_check)" ] ; then \
-          echo "warn" ; \
-          exit ; \
-        fi; \
-        echo "lttng-probe-ext3.o" ; \
-      fi; \
-    fi;)
-  ifeq ($(ext3),warn)
-    $(warning Files $(ext3_dep) not found. Probe "ext3" is disabled. Use full kernel source tree to enable it.)
-    ext3 =
-  endif # $(ext3),warn
-  obj-$(CONFIG_LTTNG) += $(ext3)
-endif # CONFIG_EXT3_FS
-
-ifneq ($(CONFIG_GPIOLIB),)
-  obj-$(CONFIG_LTTNG) +=  $(shell \
-    if [ $(VERSION) -ge 3 ] ; then \
-      echo "lttng-probe-gpio.o" ; fi;)
-endif # CONFIG_GPIOLIB
-
-ifneq ($(CONFIG_JBD2),)
-  obj-$(CONFIG_LTTNG) += lttng-probe-jbd2.o
-endif # CONFIG_JBD2
-
-ifneq ($(CONFIG_JBD),)
-  obj-$(CONFIG_LTTNG) +=  $(shell \
-    if [ $(VERSION) -ge 4 \
-      -o \( $(VERSION) -eq 3 -a $(PATCHLEVEL) -ge 1 \) ] ; then \
-      echo "lttng-probe-jbd.o" ; fi;)
-endif # CONFIG_JBD
-
-ifneq ($(CONFIG_REGULATOR),)
-  obj-$(CONFIG_LTTNG) += lttng-probe-regulator.o
-endif # CONFIG_REGULATOR
-
-ifneq ($(CONFIG_SCSI),)
-  obj-$(CONFIG_LTTNG) += lttng-probe-scsi.o
-endif # CONFIG_SCSI
-
-obj-$(CONFIG_LTTNG) += lttng-probe-vmscan.o
-
-# lock probe does not work, so disabling it for now
-#ifneq ($(CONFIG_LOCKDEP),)
-#  obj-$(CONFIG_LTTNG) += lttng-probe-lock.o
-#endif # CONFIG_LOCKDEP
-
-ifneq ($(CONFIG_BTRFS_FS),)
-  btrfs_dep = $(srctree)/fs/btrfs/*.h
-  ifneq ($(wildcard $(btrfs_dep)),)
-    obj-$(CONFIG_LTTNG) += lttng-probe-btrfs.o
-  else
-    $(warning Files $(btrfs_dep) not found. Probe "btrfs" is disabled. Use full kernel source tree to enable it.)
-  endif # $(wildcard $(btrfs_dep))
-endif # CONFIG_BTRFS_FS
-
-obj-$(CONFIG_LTTNG) += lttng-probe-compaction.o
-
-ifneq ($(CONFIG_EXT4_FS),)
-  ext4_dep = $(srctree)/fs/ext4/*.h
-  ifneq ($(wildcard $(ext4_dep)),)
-    obj-$(CONFIG_LTTNG) += lttng-probe-ext4.o
-  else
-    $(warning Files $(ext4_dep) not found. Probe "ext4" is disabled. Use full kernel source tree to enable it.)
-  endif # $(wildcard $(ext4_dep))
-endif # CONFIG_EXT4_FS
-
-obj-$(CONFIG_LTTNG) +=  $(shell \
-  if [ $(VERSION) -ge 4 \
-    -o \( $(VERSION) -eq 3 -a $(PATCHLEVEL) -ge 4 \) ] ; then \
-    echo "lttng-probe-printk.o" ; fi;)
-
-ifneq ($(CONFIG_FRAME_WARN),0)
-  CFLAGS_lttng-probe-printk.o += -Wframe-larger-than=2200
-endif
-
-obj-$(CONFIG_LTTNG) +=  $(shell \
-    if [ $(VERSION) -ge 4 \
-      -o \( $(VERSION) -eq 3 -a $(PATCHLEVEL) -ge 6 \) \
-      -o \( $(VERSION) -eq 3 -a $(PATCHLEVEL) -eq 5 -a $(SUBLEVEL) -ge 2 \) \
-      -o \( $(VERSION) -eq 3 -a $(PATCHLEVEL) -eq 4 -a $(SUBLEVEL) -ge 9 \) \
-      -o \( $(VERSION) -eq 3 -a $(PATCHLEVEL) -eq 0 -a $(SUBLEVEL) -ge 41 \) ] ; then \
-      echo "lttng-probe-random.o" ; fi;)
-
-obj-$(CONFIG_LTTNG) +=  $(shell \
-  if [ $(VERSION) -ge 4 \
-    -o \( $(VERSION) -eq 3 -a $(PATCHLEVEL) -ge 2 \) ] ; then \
-    echo "lttng-probe-rcu.o" ; fi;)
-
-ifneq ($(CONFIG_REGMAP),)
-  regmap_dep_4_1 = $(srctree)/drivers/base/regmap/trace.h
-  ifneq ($(wildcard $(regmap_dep_4_1)),)
-    obj-$(CONFIG_LTTNG) += lttng-probe-regmap.o
-  else
-    $(warning File $(regmap_dep_4_1) not found. Probe "regmap" is disabled. Need Linux 4.1+ kernel source tree to enable it.)
-  endif # $(wildcard $(regmap_dep_4_1)),
-endif # CONFIG_REGMAP
-
-ifneq ($(CONFIG_PM_RUNTIME),)
-  obj-$(CONFIG_LTTNG) +=  $(shell \
-    if [ $(VERSION) -ge 4 \
-      -o \( $(VERSION) -eq 3 -a $(PATCHLEVEL) -ge 2 \) ] ; then \
-      echo "lttng-probe-rpm.o" ; fi;)
-endif # CONFIG_PM_RUNTIME
-
-ifneq ($(CONFIG_SUNRPC),)
-  obj-$(CONFIG_LTTNG) +=  $(shell \
-    if [ $(VERSION) -ge 4 \
-      -o \( $(VERSION) -eq 3 -a $(PATCHLEVEL) -ge 4 \) ] ; then \
-      echo "lttng-probe-sunrpc.o" ; fi;)
-endif # CONFIG_SUNRPC
-
-ifneq ($(CONFIG_VIDEO_V4L2),)
-  obj-$(CONFIG_LTTNG) += $(shell \
-    if [ $(VERSION) -ge 4 \
-      -o \( $(VERSION) -eq 3 -a $(PATCHLEVEL) -ge 14 \) ] ; then \
-      echo "lttng-probe-v4l2.o" ; fi;)
-endif # CONFIG_VIDEO_V4L2
-
-obj-$(CONFIG_LTTNG) += lttng-probe-workqueue.o
-
-ifneq ($(CONFIG_KALLSYMS_ALL),)
-  obj-$(CONFIG_LTTNG) += lttng-probe-writeback.o
-else
-  ifdef CONFIG_LOCALVERSION # Check if dot-config is included.
-    $(warning CONFIG_KALLSYMS_ALL is disabled, therefore probe "writeback" is disabled. Rebuild your kernel with this configuration option enabled in order to trace this subsystem.)
-  endif
-endif # CONFIG_KALLSYMS_ALL
-
-ifneq ($(CONFIG_KPROBES),)
-  obj-$(CONFIG_LTTNG) += lttng-kprobes.o
-endif # CONFIG_KPROBES
-
-ifneq ($(CONFIG_UPROBES),)
-  obj-$(CONFIG_LTTNG) += lttng-uprobes.o
-endif # CONFIG_UPROBES
-
-ifneq ($(CONFIG_KRETPROBES),)
-  obj-$(CONFIG_LTTNG) += lttng-kretprobes.o
-endif # CONFIG_KRETPROBES
-
-ifneq ($(CONFIG_PREEMPTIRQ_EVENTS),)
-  obj-$(CONFIG_LTTNG) += lttng-probe-preemptirq.o
-endif # CONFIG_PREEMPTIRQ_EVENTS
-
-# vim:syntax=make
diff --git a/probes/lttng-kprobes.c b/probes/lttng-kprobes.c
deleted file mode 100644 (file)
index a2474d0..0000000
+++ /dev/null
@@ -1,174 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
- *
- * probes/lttng-kprobes.c
- *
- * LTTng kprobes integration module.
- *
- * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#include <linux/module.h>
-#include <linux/kprobes.h>
-#include <linux/slab.h>
-#include <lttng/events.h>
-#include <ringbuffer/frontend_types.h>
-#include <wrapper/vmalloc.h>
-#include <wrapper/irqflags.h>
-#include <lttng/tracer.h>
-#include <blacklist/kprobes.h>
-
-static
-int lttng_kprobes_handler_pre(struct kprobe *p, struct pt_regs *regs)
-{
-       struct lttng_event *event =
-               container_of(p, struct lttng_event, u.kprobe.kp);
-       struct lttng_probe_ctx lttng_probe_ctx = {
-               .event = event,
-               .interruptible = !lttng_regs_irqs_disabled(regs),
-       };
-       struct lttng_channel *chan = event->chan;
-       struct lib_ring_buffer_ctx ctx;
-       int ret;
-       unsigned long data = (unsigned long) p->addr;
-
-       if (unlikely(!READ_ONCE(chan->session->active)))
-               return 0;
-       if (unlikely(!READ_ONCE(chan->enabled)))
-               return 0;
-       if (unlikely(!READ_ONCE(event->enabled)))
-               return 0;
-
-       lib_ring_buffer_ctx_init(&ctx, chan->chan, &lttng_probe_ctx, sizeof(data),
-                                lttng_alignof(data), -1);
-       ret = chan->ops->event_reserve(&ctx, event->id);
-       if (ret < 0)
-               return 0;
-       lib_ring_buffer_align_ctx(&ctx, lttng_alignof(data));
-       chan->ops->event_write(&ctx, &data, sizeof(data));
-       chan->ops->event_commit(&ctx);
-       return 0;
-}
-
-/*
- * Create event description
- */
-static
-int lttng_create_kprobe_event(const char *name, struct lttng_event *event)
-{
-       struct lttng_event_field *field;
-       struct lttng_event_desc *desc;
-       int ret;
-
-       desc = kzalloc(sizeof(*event->desc), GFP_KERNEL);
-       if (!desc)
-               return -ENOMEM;
-       desc->name = kstrdup(name, GFP_KERNEL);
-       if (!desc->name) {
-               ret = -ENOMEM;
-               goto error_str;
-       }
-       desc->nr_fields = 1;
-       desc->fields = field =
-               kzalloc(1 * sizeof(struct lttng_event_field), GFP_KERNEL);
-       if (!field) {
-               ret = -ENOMEM;
-               goto error_field;
-       }
-       field->name = "ip";
-       field->type.atype = atype_integer;
-       field->type.u.integer.size = sizeof(unsigned long) * CHAR_BIT;
-       field->type.u.integer.alignment = lttng_alignof(unsigned long) * CHAR_BIT;
-       field->type.u.integer.signedness = lttng_is_signed_type(unsigned long);
-       field->type.u.integer.reverse_byte_order = 0;
-       field->type.u.integer.base = 16;
-       field->type.u.integer.encoding = lttng_encode_none;
-       desc->owner = THIS_MODULE;
-       event->desc = desc;
-
-       return 0;
-
-error_field:
-       kfree(desc->name);
-error_str:
-       kfree(desc);
-       return ret;
-}
-
-int lttng_kprobes_register(const char *name,
-                          const char *symbol_name,
-                          uint64_t offset,
-                          uint64_t addr,
-                          struct lttng_event *event)
-{
-       int ret;
-
-       /* Kprobes expects a NULL symbol name if unused */
-       if (symbol_name[0] == '\0')
-               symbol_name = NULL;
-
-       ret = lttng_create_kprobe_event(name, event);
-       if (ret)
-               goto error;
-       memset(&event->u.kprobe.kp, 0, sizeof(event->u.kprobe.kp));
-       event->u.kprobe.kp.pre_handler = lttng_kprobes_handler_pre;
-       if (symbol_name) {
-               event->u.kprobe.symbol_name =
-                       kzalloc(LTTNG_KERNEL_SYM_NAME_LEN * sizeof(char),
-                               GFP_KERNEL);
-               if (!event->u.kprobe.symbol_name) {
-                       ret = -ENOMEM;
-                       goto name_error;
-               }
-               memcpy(event->u.kprobe.symbol_name, symbol_name,
-                      LTTNG_KERNEL_SYM_NAME_LEN * sizeof(char));
-               event->u.kprobe.kp.symbol_name =
-                       event->u.kprobe.symbol_name;
-       }
-       event->u.kprobe.kp.offset = offset;
-       event->u.kprobe.kp.addr = (void *) (unsigned long) addr;
-
-       /*
-        * Ensure the memory we just allocated don't trigger page faults.
-        * Well.. kprobes itself puts the page fault handler on the blacklist,
-        * but we can never be too careful.
-        */
-       wrapper_vmalloc_sync_mappings();
-
-       ret = register_kprobe(&event->u.kprobe.kp);
-       if (ret)
-               goto register_error;
-       return 0;
-
-register_error:
-       kfree(event->u.kprobe.symbol_name);
-name_error:
-       kfree(event->desc->fields);
-       kfree(event->desc->name);
-       kfree(event->desc);
-error:
-       return ret;
-}
-EXPORT_SYMBOL_GPL(lttng_kprobes_register);
-
-void lttng_kprobes_unregister(struct lttng_event *event)
-{
-       unregister_kprobe(&event->u.kprobe.kp);
-}
-EXPORT_SYMBOL_GPL(lttng_kprobes_unregister);
-
-void lttng_kprobes_destroy_private(struct lttng_event *event)
-{
-       kfree(event->u.kprobe.symbol_name);
-       kfree(event->desc->fields);
-       kfree(event->desc->name);
-       kfree(event->desc);
-}
-EXPORT_SYMBOL_GPL(lttng_kprobes_destroy_private);
-
-MODULE_LICENSE("GPL and additional rights");
-MODULE_AUTHOR("Mathieu Desnoyers <mathieu.desnoyers@efficios.com>");
-MODULE_DESCRIPTION("LTTng kprobes probes");
-MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
-       __stringify(LTTNG_MODULES_MINOR_VERSION) "."
-       __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
-       LTTNG_MODULES_EXTRAVERSION);
diff --git a/probes/lttng-kretprobes.c b/probes/lttng-kretprobes.c
deleted file mode 100644 (file)
index 0067593..0000000
+++ /dev/null
@@ -1,307 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
- *
- * probes/lttng-kretprobes.c
- *
- * LTTng kretprobes integration module.
- *
- * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#include <linux/module.h>
-#include <linux/kprobes.h>
-#include <linux/slab.h>
-#include <linux/kref.h>
-#include <lttng/events.h>
-#include <ringbuffer/frontend_types.h>
-#include <wrapper/vmalloc.h>
-#include <wrapper/irqflags.h>
-#include <lttng/tracer.h>
-#include <blacklist/kprobes.h>
-
-enum lttng_kretprobe_type {
-       EVENT_ENTRY = 0,
-       EVENT_RETURN = 1,
-};
-
-struct lttng_krp {
-       struct kretprobe krp;
-       struct lttng_event *event[2];   /* ENTRY and RETURN */
-       struct kref kref_register;
-       struct kref kref_alloc;
-};
-
-static
-int _lttng_kretprobes_handler(struct kretprobe_instance *krpi,
-                             struct pt_regs *regs,
-                             enum lttng_kretprobe_type type)
-{
-       struct lttng_krp *lttng_krp =
-               container_of(krpi->rp, struct lttng_krp, krp);
-       struct lttng_event *event =
-               lttng_krp->event[type];
-       struct lttng_probe_ctx lttng_probe_ctx = {
-               .event = event,
-               .interruptible = !lttng_regs_irqs_disabled(regs),
-       };
-       struct lttng_channel *chan = event->chan;
-       struct lib_ring_buffer_ctx ctx;
-       int ret;
-       struct {
-               unsigned long ip;
-               unsigned long parent_ip;
-       } payload;
-
-       if (unlikely(!READ_ONCE(chan->session->active)))
-               return 0;
-       if (unlikely(!READ_ONCE(chan->enabled)))
-               return 0;
-       if (unlikely(!READ_ONCE(event->enabled)))
-               return 0;
-
-       payload.ip = (unsigned long) krpi->rp->kp.addr;
-       payload.parent_ip = (unsigned long) krpi->ret_addr;
-
-       lib_ring_buffer_ctx_init(&ctx, chan->chan, &lttng_probe_ctx, sizeof(payload),
-                                lttng_alignof(payload), -1);
-       ret = chan->ops->event_reserve(&ctx, event->id);
-       if (ret < 0)
-               return 0;
-       lib_ring_buffer_align_ctx(&ctx, lttng_alignof(payload));
-       chan->ops->event_write(&ctx, &payload, sizeof(payload));
-       chan->ops->event_commit(&ctx);
-       return 0;
-}
-
-static
-int lttng_kretprobes_handler_entry(struct kretprobe_instance *krpi,
-                                  struct pt_regs *regs)
-{
-       return _lttng_kretprobes_handler(krpi, regs, EVENT_ENTRY);
-}
-
-static
-int lttng_kretprobes_handler_return(struct kretprobe_instance *krpi,
-                                   struct pt_regs *regs)
-{
-       return _lttng_kretprobes_handler(krpi, regs, EVENT_RETURN);
-}
-
-/*
- * Create event description
- */
-static
-int lttng_create_kprobe_event(const char *name, struct lttng_event *event,
-                             enum lttng_kretprobe_type type)
-{
-       struct lttng_event_field *fields;
-       struct lttng_event_desc *desc;
-       int ret;
-       char *alloc_name;
-       size_t name_len;
-       const char *suffix = NULL;
-
-       desc = kzalloc(sizeof(*event->desc), GFP_KERNEL);
-       if (!desc)
-               return -ENOMEM;
-       name_len = strlen(name);
-       switch (type) {
-       case EVENT_ENTRY:
-               suffix = "_entry";
-               break;
-       case EVENT_RETURN:
-               suffix = "_return";
-               break;
-       }
-       name_len += strlen(suffix);
-       alloc_name = kmalloc(name_len + 1, GFP_KERNEL);
-       if (!alloc_name) {
-               ret = -ENOMEM;
-               goto error_str;
-       }
-       strcpy(alloc_name, name);
-       strcat(alloc_name, suffix);
-       desc->name = alloc_name;
-       desc->nr_fields = 2;
-       desc->fields = fields =
-               kzalloc(2 * sizeof(struct lttng_event_field), GFP_KERNEL);
-       if (!desc->fields) {
-               ret = -ENOMEM;
-               goto error_fields;
-       }
-       fields[0].name = "ip";
-       fields[0].type.atype = atype_integer;
-       fields[0].type.u.integer.size = sizeof(unsigned long) * CHAR_BIT;
-       fields[0].type.u.integer.alignment = lttng_alignof(unsigned long) * CHAR_BIT;
-       fields[0].type.u.integer.signedness = lttng_is_signed_type(unsigned long);
-       fields[0].type.u.integer.reverse_byte_order = 0;
-       fields[0].type.u.integer.base = 16;
-       fields[0].type.u.integer.encoding = lttng_encode_none;
-
-       fields[1].name = "parent_ip";
-       fields[1].type.atype = atype_integer;
-       fields[1].type.u.integer.size = sizeof(unsigned long) * CHAR_BIT;
-       fields[1].type.u.integer.alignment = lttng_alignof(unsigned long) * CHAR_BIT;
-       fields[1].type.u.integer.signedness = lttng_is_signed_type(unsigned long);
-       fields[1].type.u.integer.reverse_byte_order = 0;
-       fields[1].type.u.integer.base = 16;
-       fields[1].type.u.integer.encoding = lttng_encode_none;
-
-       desc->owner = THIS_MODULE;
-       event->desc = desc;
-
-       return 0;
-
-error_fields:
-       kfree(desc->name);
-error_str:
-       kfree(desc);
-       return ret;
-}
-
-int lttng_kretprobes_register(const char *name,
-                          const char *symbol_name,
-                          uint64_t offset,
-                          uint64_t addr,
-                          struct lttng_event *event_entry,
-                          struct lttng_event *event_return)
-{
-       int ret;
-       struct lttng_krp *lttng_krp;
-
-       /* Kprobes expects a NULL symbol name if unused */
-       if (symbol_name[0] == '\0')
-               symbol_name = NULL;
-
-       ret = lttng_create_kprobe_event(name, event_entry, EVENT_ENTRY);
-       if (ret)
-               goto error;
-       ret = lttng_create_kprobe_event(name, event_return, EVENT_RETURN);
-       if (ret)
-               goto event_return_error;
-       lttng_krp = kzalloc(sizeof(*lttng_krp), GFP_KERNEL);
-       if (!lttng_krp)
-               goto krp_error;
-       lttng_krp->krp.entry_handler = lttng_kretprobes_handler_entry;
-       lttng_krp->krp.handler = lttng_kretprobes_handler_return;
-       if (symbol_name) {
-               char *alloc_symbol;
-
-               alloc_symbol = kstrdup(symbol_name, GFP_KERNEL);
-               if (!alloc_symbol) {
-                       ret = -ENOMEM;
-                       goto name_error;
-               }
-               lttng_krp->krp.kp.symbol_name =
-                       alloc_symbol;
-               event_entry->u.kretprobe.symbol_name =
-                       alloc_symbol;
-               event_return->u.kretprobe.symbol_name =
-                       alloc_symbol;
-       }
-       lttng_krp->krp.kp.offset = offset;
-       lttng_krp->krp.kp.addr = (void *) (unsigned long) addr;
-
-       /* Allow probe handler to find event structures */
-       lttng_krp->event[EVENT_ENTRY] = event_entry;
-       lttng_krp->event[EVENT_RETURN] = event_return;
-       event_entry->u.kretprobe.lttng_krp = lttng_krp;
-       event_return->u.kretprobe.lttng_krp = lttng_krp;
-
-       /*
-        * Both events must be unregistered before the kretprobe is
-        * unregistered. Same for memory allocation.
-        */
-       kref_init(&lttng_krp->kref_alloc);
-       kref_get(&lttng_krp->kref_alloc);       /* inc refcount to 2, no overflow. */
-       kref_init(&lttng_krp->kref_register);
-       kref_get(&lttng_krp->kref_register);    /* inc refcount to 2, no overflow. */
-
-       /*
-        * Ensure the memory we just allocated don't trigger page faults.
-        * Well.. kprobes itself puts the page fault handler on the blacklist,
-        * but we can never be too careful.
-        */
-       wrapper_vmalloc_sync_mappings();
-
-       ret = register_kretprobe(&lttng_krp->krp);
-       if (ret)
-               goto register_error;
-       return 0;
-
-register_error:
-       kfree(lttng_krp->krp.kp.symbol_name);
-name_error:
-       kfree(lttng_krp);
-krp_error:
-       kfree(event_return->desc->fields);
-       kfree(event_return->desc->name);
-       kfree(event_return->desc);
-event_return_error:
-       kfree(event_entry->desc->fields);
-       kfree(event_entry->desc->name);
-       kfree(event_entry->desc);
-error:
-       return ret;
-}
-EXPORT_SYMBOL_GPL(lttng_kretprobes_register);
-
-static
-void _lttng_kretprobes_unregister_release(struct kref *kref)
-{
-       struct lttng_krp *lttng_krp =
-               container_of(kref, struct lttng_krp, kref_register);
-       unregister_kretprobe(&lttng_krp->krp);
-}
-
-void lttng_kretprobes_unregister(struct lttng_event *event)
-{
-       kref_put(&event->u.kretprobe.lttng_krp->kref_register,
-               _lttng_kretprobes_unregister_release);
-}
-EXPORT_SYMBOL_GPL(lttng_kretprobes_unregister);
-
-static
-void _lttng_kretprobes_release(struct kref *kref)
-{
-       struct lttng_krp *lttng_krp =
-               container_of(kref, struct lttng_krp, kref_alloc);
-       kfree(lttng_krp->krp.kp.symbol_name);
-}
-
-void lttng_kretprobes_destroy_private(struct lttng_event *event)
-{
-       kfree(event->desc->fields);
-       kfree(event->desc->name);
-       kfree(event->desc);
-       kref_put(&event->u.kretprobe.lttng_krp->kref_alloc,
-               _lttng_kretprobes_release);
-}
-EXPORT_SYMBOL_GPL(lttng_kretprobes_destroy_private);
-
-int lttng_kretprobes_event_enable_state(struct lttng_event *event,
-               int enable)
-{
-       struct lttng_event *event_return;
-       struct lttng_krp *lttng_krp;
-
-       if (event->instrumentation != LTTNG_KERNEL_KRETPROBE) {
-               return -EINVAL;
-       }
-       if (event->enabled == enable) {
-               return -EBUSY;
-       }
-       lttng_krp = event->u.kretprobe.lttng_krp;
-       event_return = lttng_krp->event[EVENT_RETURN];
-       WRITE_ONCE(event->enabled, enable);
-       WRITE_ONCE(event_return->enabled, enable);
-       return 0;
-}
-EXPORT_SYMBOL_GPL(lttng_kretprobes_event_enable_state);
-
-MODULE_LICENSE("GPL and additional rights");
-MODULE_AUTHOR("Mathieu Desnoyers <mathieu.desnoyers@efficios.com>");
-MODULE_DESCRIPTION("LTTng kretprobes probes");
-MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
-       __stringify(LTTNG_MODULES_MINOR_VERSION) "."
-       __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
-       LTTNG_MODULES_EXTRAVERSION);
diff --git a/probes/lttng-probe-9p.c b/probes/lttng-probe-9p.c
deleted file mode 100644 (file)
index ec588e4..0000000
+++ /dev/null
@@ -1,37 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
- *
- * probes/lttng-probe-9p.c
- *
- * LTTng 9p probes.
- *
- * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- * Copyright (C) 2018 Michael Jeanson <mjeanson@efficios.com>
- */
-
-#include <linux/module.h>
-#include <net/9p/9p.h>
-#include <net/9p/client.h>
-#include <lttng/tracer.h>
-
-/*
- * Create the tracepoint static inlines from the kernel to validate that our
- * trace event macros match the kernel we run on.
- */
-#include <trace/events/9p.h>
-
-/*
- * Create LTTng tracepoint probes.
- */
-#define LTTNG_PACKAGE_BUILD
-#define CREATE_TRACE_POINTS
-#define TRACE_INCLUDE_PATH instrumentation/events
-
-#include <instrumentation/events/9p.h>
-
-MODULE_LICENSE("GPL and additional rights");
-MODULE_AUTHOR("Michael Jeanson <mjeanson@efficios.com>");
-MODULE_DESCRIPTION("LTTng 9p probes");
-MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
-       __stringify(LTTNG_MODULES_MINOR_VERSION) "."
-       __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
-       LTTNG_MODULES_EXTRAVERSION);
diff --git a/probes/lttng-probe-asoc.c b/probes/lttng-probe-asoc.c
deleted file mode 100644 (file)
index 6cac9c6..0000000
+++ /dev/null
@@ -1,39 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
- *
- * probes/lttng-probe-asoc.c
- *
- * LTTng asoc probes.
- *
- * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- * Copyright (C) 2012 Mentor Graphics Corp.
- */
-
-#include <linux/module.h>
-#include <sound/jack.h>
-#include <sound/soc.h>
-#include <lttng/tracer.h>
-
-/*
- * Create the tracepoint static inlines from the kernel to validate that our
- * trace event macros match the kernel we run on.
- */
-#include <trace/events/asoc.h>
-
-/*
- * Create LTTng tracepoint probes.
- */
-#define LTTNG_PACKAGE_BUILD
-#define CREATE_TRACE_POINTS
-#define TRACE_INCLUDE_PATH instrumentation/events
-
-#include <instrumentation/events/asoc.h>
-
-MODULE_LICENSE("GPL and additional rights");
-MODULE_AUTHOR("Wade Farnsworth <wade_farnsworth@mentor.com>");
-MODULE_AUTHOR("Paul Woegerer <paul_woegerer@mentor.com>");
-MODULE_AUTHOR("Andrew Gabbasov <andrew_gabbasov@mentor.com>");
-MODULE_DESCRIPTION("LTTng asoc probes");
-MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
-       __stringify(LTTNG_MODULES_MINOR_VERSION) "."
-       __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
-       LTTNG_MODULES_EXTRAVERSION);
diff --git a/probes/lttng-probe-block.c b/probes/lttng-probe-block.c
deleted file mode 100644 (file)
index 5f8e830..0000000
+++ /dev/null
@@ -1,38 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
- *
- * probes/lttng-probe-block.c
- *
- * LTTng block probes.
- *
- * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#include <linux/module.h>
-#include <linux/blktrace_api.h>
-#include <lttng/tracer.h>
-#include <lttng/kernel-version.h>
-
-/*
- * Create the tracepoint static inlines from the kernel to validate that our
- * trace event macros match the kernel we run on.
- */
-#include <trace/events/block.h>
-
-#include <wrapper/tracepoint.h>
-
-/*
- * Create LTTng tracepoint probes.
- */
-#define LTTNG_PACKAGE_BUILD
-#define CREATE_TRACE_POINTS
-#define TRACE_INCLUDE_PATH instrumentation/events
-
-#include <instrumentation/events/block.h>
-
-MODULE_LICENSE("GPL and additional rights");
-MODULE_AUTHOR("Mathieu Desnoyers <mathieu.desnoyers@efficios.com>");
-MODULE_DESCRIPTION("LTTng block probes");
-MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
-       __stringify(LTTNG_MODULES_MINOR_VERSION) "."
-       __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
-       LTTNG_MODULES_EXTRAVERSION);
diff --git a/probes/lttng-probe-btrfs.c b/probes/lttng-probe-btrfs.c
deleted file mode 100644 (file)
index 4461c99..0000000
+++ /dev/null
@@ -1,44 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
- *
- * probes/lttng-probe-btrfs.c
- *
- * LTTng btrfs probes.
- *
- * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- * Copyright (C) 2012 Mentor Graphics Corp.
- */
-
-#include <linux/module.h>
-#include <linux/fs.h>
-#include <linux/version.h>
-#include <../fs/btrfs/ctree.h>
-#include <../fs/btrfs/transaction.h>
-#include <../fs/btrfs/volumes.h>
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,4,0))
-#include <../fs/btrfs/block-group.h>
-#endif
-#include <linux/dcache.h>
-#include <lttng/tracer.h>
-
-/*
- * Create the tracepoint static inlines from the kernel to validate that our
- * trace event macros match the kernel we run on.
- */
-#include <trace/events/btrfs.h>
-
-/*
- * Create LTTng tracepoint probes.
- */
-#define LTTNG_PACKAGE_BUILD
-#define CREATE_TRACE_POINTS
-#define TRACE_INCLUDE_PATH instrumentation/events
-
-#include <instrumentation/events/btrfs.h>
-
-MODULE_LICENSE("GPL and additional rights");
-MODULE_AUTHOR("Andrew Gabbasov <andrew_gabbasov@mentor.com>");
-MODULE_DESCRIPTION("LTTng btrfs probes");
-MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
-       __stringify(LTTNG_MODULES_MINOR_VERSION) "."
-       __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
-       LTTNG_MODULES_EXTRAVERSION);
diff --git a/probes/lttng-probe-compaction.c b/probes/lttng-probe-compaction.c
deleted file mode 100644 (file)
index f8ddf38..0000000
+++ /dev/null
@@ -1,35 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
- *
- * probes/lttng-probe-compaction.c
- *
- * LTTng compaction probes.
- *
- * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- * Copyright (C) 2012 Mentor Graphics Corp.
- */
-
-#include <linux/module.h>
-#include <lttng/tracer.h>
-
-/*
- * Create the tracepoint static inlines from the kernel to validate that our
- * trace event macros match the kernel we run on.
- */
-#include <trace/events/compaction.h>
-
-/*
- * Create LTTng tracepoint probes.
- */
-#define LTTNG_PACKAGE_BUILD
-#define CREATE_TRACE_POINTS
-#define TRACE_INCLUDE_PATH instrumentation/events
-
-#include <instrumentation/events/compaction.h>
-
-MODULE_LICENSE("GPL and additional rights");
-MODULE_AUTHOR("Andrew Gabbasov <andrew_gabbasov@mentor.com>");
-MODULE_DESCRIPTION("LTTng compaction probes");
-MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
-       __stringify(LTTNG_MODULES_MINOR_VERSION) "."
-       __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
-       LTTNG_MODULES_EXTRAVERSION);
diff --git a/probes/lttng-probe-ext3.c b/probes/lttng-probe-ext3.c
deleted file mode 100644 (file)
index 70adb56..0000000
+++ /dev/null
@@ -1,44 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
- *
- * probes/lttng-probe-ext3.c
- *
- * LTTng ext3 probes.
- *
- * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- * Copyright (C) 2012 Mentor Graphics Corp.
- */
-
-#include <linux/module.h>
-#include <linux/fs.h>
-#include <linux/dcache.h>
-#include <linux/version.h>
-#include <lttng/tracer.h>
-
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0))
-#include <../fs/ext3/ext3.h>
-#else
-#include <linux/ext3_fs_i.h>
-#endif
-
-/*
- * Create the tracepoint static inlines from the kernel to validate that our
- * trace event macros match the kernel we run on.
- */
-#include <trace/events/ext3.h>
-
-/*
- * Create LTTng tracepoint probes.
- */
-#define LTTNG_PACKAGE_BUILD
-#define CREATE_TRACE_POINTS
-#define TRACE_INCLUDE_PATH instrumentation/events
-
-#include <instrumentation/events/ext3.h>
-
-MODULE_LICENSE("GPL and additional rights");
-MODULE_AUTHOR("Wade Farnsworth <wade_farnsworth@mentor.com>, Paul Woegerer <paul_woegerer@mentor.com>, and Andrew Gabbasov <andrew_gabbasov@mentor.com>");
-MODULE_DESCRIPTION("LTTng ext3 probes");
-MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
-       __stringify(LTTNG_MODULES_MINOR_VERSION) "."
-       __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
-       LTTNG_MODULES_EXTRAVERSION);
diff --git a/probes/lttng-probe-ext4.c b/probes/lttng-probe-ext4.c
deleted file mode 100644 (file)
index 0d0e3a8..0000000
+++ /dev/null
@@ -1,43 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
- *
- * probes/lttng-probe-ext4.c
- *
- * LTTng ext4 probes.
- *
- * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- * Copyright (C) 2012 Mentor Graphics Corp.
- */
-
-#include <linux/module.h>
-#include <linux/fs.h>
-#include <../fs/ext4/ext4.h>
-#include <../fs/ext4/mballoc.h>
-#include <../fs/ext4/ext4_extents.h>
-#include <linux/dcache.h>
-#include <lttng/tracer.h>
-
-/*
- * Create the tracepoint static inlines from the kernel to validate that our
- * trace event macros match the kernel we run on.
- */
-#include <trace/events/ext4.h>
-
-#include <lttng/kernel-version.h>
-#include <wrapper/tracepoint.h>
-
-/*
- * Create LTTng tracepoint probes.
- */
-#define LTTNG_PACKAGE_BUILD
-#define CREATE_TRACE_POINTS
-#define TRACE_INCLUDE_PATH instrumentation/events
-
-#include <instrumentation/events/ext4.h>
-
-MODULE_LICENSE("GPL and additional rights");
-MODULE_AUTHOR("Andrew Gabbasov <andrew_gabbasov@mentor.com>");
-MODULE_DESCRIPTION("LTTng ext4 probes");
-MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
-       __stringify(LTTNG_MODULES_MINOR_VERSION) "."
-       __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
-       LTTNG_MODULES_EXTRAVERSION);
diff --git a/probes/lttng-probe-gpio.c b/probes/lttng-probe-gpio.c
deleted file mode 100644 (file)
index 42b9b13..0000000
+++ /dev/null
@@ -1,35 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
- *
- * probes/lttng-probe-gpio.c
- *
- * LTTng gpio probes.
- *
- * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- * Copyright (C) 2012 Mentor Graphics Corp.
- */
-
-#include <linux/module.h>
-#include <lttng/tracer.h>
-
-/*
- * Create the tracepoint static inlines from the kernel to validate that our
- * trace event macros match the kernel we run on.
- */
-#include <trace/events/gpio.h>
-
-/*
- * Create LTTng tracepoint probes.
- */
-#define LTTNG_PACKAGE_BUILD
-#define CREATE_TRACE_POINTS
-#define TRACE_INCLUDE_PATH instrumentation/events
-
-#include <instrumentation/events/gpio.h>
-
-MODULE_LICENSE("GPL and additional rights");
-MODULE_AUTHOR("Wade Farnsworth <wade_farnsworth@mentor.com>");
-MODULE_DESCRIPTION("LTTng gpio probes");
-MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
-       __stringify(LTTNG_MODULES_MINOR_VERSION) "."
-       __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
-       LTTNG_MODULES_EXTRAVERSION);
diff --git a/probes/lttng-probe-i2c.c b/probes/lttng-probe-i2c.c
deleted file mode 100644 (file)
index 9dc1c79..0000000
+++ /dev/null
@@ -1,42 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
- *
- * probes/lttng-probe-i2c.c
- *
- * LTTng i2c probes.
- *
- * Copyright (C) 2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- * Copyright (C) 2016 Simon Marchi <simon.marchi@ericsson.com>
- */
-
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <lttng/tracer.h>
-
-/*
- * Create the tracepoint static inlines from the kernel to validate that our
- * trace event macros match the kernel we run on.
- */
-#include <trace/events/i2c.h>
-
-/*
- * Create LTTng tracepoint probes.
- */
-#define LTTNG_PACKAGE_BUILD
-#define CREATE_TRACE_POINTS
-#define TRACE_INCLUDE_PATH instrumentation/events
-
-static int extract_sensitive_payload;
-module_param(extract_sensitive_payload, int, 0644);
-MODULE_PARM_DESC(extract_sensitive_payload,
-               "Whether to extract possibly sensitive data from events (i2c "
-               "buffer contents) or not (1 or 0, default: 0).");
-
-#include <instrumentation/events/i2c.h>
-
-MODULE_LICENSE("GPL and additional rights");
-MODULE_AUTHOR("Simon Marchi <simon.marchi@ericsson.com>");
-MODULE_DESCRIPTION("LTTng i2c probes");
-MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
-       __stringify(LTTNG_MODULES_MINOR_VERSION) "."
-       __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
-       LTTNG_MODULES_EXTRAVERSION);
diff --git a/probes/lttng-probe-irq.c b/probes/lttng-probe-irq.c
deleted file mode 100644 (file)
index f88093b..0000000
+++ /dev/null
@@ -1,37 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
- *
- * probes/lttng-probe-irq.c
- *
- * LTTng irq probes.
- *
- * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#include <linux/module.h>
-#include <linux/interrupt.h>
-#include <lttng/tracer.h>
-
-/*
- * Create the tracepoint static inlines from the kernel to validate that our
- * trace event macros match the kernel we run on.
- */
-#include <trace/events/irq.h>
-
-#include <wrapper/tracepoint.h>
-
-/*
- * Create LTTng tracepoint probes.
- */
-#define LTTNG_PACKAGE_BUILD
-#define CREATE_TRACE_POINTS
-#define TRACE_INCLUDE_PATH instrumentation/events
-
-#include <instrumentation/events/irq.h>
-
-MODULE_LICENSE("GPL and additional rights");
-MODULE_AUTHOR("Mathieu Desnoyers <mathieu.desnoyers@efficios.com>");
-MODULE_DESCRIPTION("LTTng irq probes");
-MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
-       __stringify(LTTNG_MODULES_MINOR_VERSION) "."
-       __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
-       LTTNG_MODULES_EXTRAVERSION);
diff --git a/probes/lttng-probe-jbd.c b/probes/lttng-probe-jbd.c
deleted file mode 100644 (file)
index 21c0798..0000000
+++ /dev/null
@@ -1,35 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
- *
- * probes/lttng-probe-jbd.c
- *
- * LTTng jbd probes.
- *
- * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- * Copyright (C) 2012 Mentor Graphics Corp.
- */
-
-#include <linux/module.h>
-#include <lttng/tracer.h>
-
-/*
- * Create the tracepoint static inlines from the kernel to validate that our
- * trace event macros match the kernel we run on.
- */
-#include <trace/events/jbd.h>
-
-/*
- * Create LTTng tracepoint probes.
- */
-#define LTTNG_PACKAGE_BUILD
-#define CREATE_TRACE_POINTS
-#define TRACE_INCLUDE_PATH instrumentation/events
-
-#include <instrumentation/events/jbd.h>
-
-MODULE_LICENSE("GPL and additional rights");
-MODULE_AUTHOR("Wade Farnsworth <wade_farnsworth@mentor.com>, Paul Woegerer <paul_woegerer@mentor.com>, and Andrew Gabbasov <andrew_gabbasov@mentor.com>");
-MODULE_DESCRIPTION("LTTng jbd probes");
-MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
-       __stringify(LTTNG_MODULES_MINOR_VERSION) "."
-       __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
-       LTTNG_MODULES_EXTRAVERSION);
diff --git a/probes/lttng-probe-jbd2.c b/probes/lttng-probe-jbd2.c
deleted file mode 100644 (file)
index ac3ac93..0000000
+++ /dev/null
@@ -1,38 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
- *
- * probes/lttng-probe-jbd2.c
- *
- * LTTng jbd2 probes.
- *
- * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- * Copyright (C) 2012 Mentor Graphics Corp.
- */
-
-#include <linux/module.h>
-#include <lttng/tracer.h>
-
-/*
- * Create the tracepoint static inlines from the kernel to validate that our
- * trace event macros match the kernel we run on.
- */
-#include <trace/events/jbd2.h>
-
-#include <wrapper/tracepoint.h>
-
-/*
- * Create LTTng tracepoint probes.
- */
-#define LTTNG_PACKAGE_BUILD
-#define CREATE_TRACE_POINTS
-#define TRACE_INCLUDE_PATH instrumentation/events
-
-#include <instrumentation/events/jbd2.h>
-
-MODULE_LICENSE("GPL and additional rights");
-MODULE_AUTHOR("Wade Farnsworth <wade_farnsworth@mentor.com>");
-MODULE_AUTHOR("Andrew Gabbasov <andrew_gabbasov@mentor.com>");
-MODULE_DESCRIPTION("LTTng jbd2 probes");
-MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
-       __stringify(LTTNG_MODULES_MINOR_VERSION) "."
-       __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
-       LTTNG_MODULES_EXTRAVERSION);
diff --git a/probes/lttng-probe-kmem.c b/probes/lttng-probe-kmem.c
deleted file mode 100644 (file)
index 1b120ab..0000000
+++ /dev/null
@@ -1,44 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
- *
- * probes/lttng-probe-kmem.c
- *
- * LTTng kmem probes.
- *
- * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- * Copyright (C) 2012 Mentor Graphics Corp.
- */
-
-/*
- * This page_alloc.h wrapper needs to be included before gfpflags.h because it
- * overrides a function with a define.
- */
-#include <wrapper/page_alloc.h>
-#include <linux/module.h>
-#include <linux/mm.h>
-#include <lttng/tracer.h>
-
-/*
- * Create the tracepoint static inlines from the kernel to validate that our
- * trace event macros match the kernel we run on.
- */
-#include <trace/events/kmem.h>
-
-#include <wrapper/tracepoint.h>
-
-/*
- * Create LTTng tracepoint probes.
- */
-#define LTTNG_PACKAGE_BUILD
-#define CREATE_TRACE_POINTS
-#define TRACE_INCLUDE_PATH instrumentation/events
-
-#include <instrumentation/events/kmem.h>
-
-MODULE_LICENSE("GPL and additional rights");
-MODULE_AUTHOR("Wade Farnsworth <wade_farnsworth@mentor.com>");
-MODULE_AUTHOR("Andrew Gabbasov <andrew_gabbasov@mentor.com>");
-MODULE_DESCRIPTION("LTTng kmem probes");
-MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
-       __stringify(LTTNG_MODULES_MINOR_VERSION) "."
-       __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
-       LTTNG_MODULES_EXTRAVERSION);
diff --git a/probes/lttng-probe-kvm-x86-mmu.c b/probes/lttng-probe-kvm-x86-mmu.c
deleted file mode 100644 (file)
index 9ccc242..0000000
+++ /dev/null
@@ -1,47 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
- *
- * probes/lttng-probe-kvm.c
- *
- * LTTng kvm probes.
- *
- * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#include <linux/module.h>
-#include <linux/kvm_host.h>
-#include <lttng/tracer.h>
-#include <lttng/kernel-version.h>
-
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,1,0))
-#include <kvm/iodev.h>
-#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,1,0)) */
-#include <../../virt/kvm/iodev.h>
-#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,1,0)) */
-
-/*
- * Create the tracepoint static inlines from the kernel to validate that our
- * trace event macros match the kernel we run on.
- */
-#include <wrapper/tracepoint.h>
-
-#include <../../arch/x86/kvm/mmutrace.h>
-
-#undef TRACE_INCLUDE_PATH
-#undef TRACE_INCLUDE_FILE
-
-/*
- * Create LTTng tracepoint probes.
- */
-#define LTTNG_PACKAGE_BUILD
-#define CREATE_TRACE_POINTS
-
-#define TRACE_INCLUDE_PATH instrumentation/events/arch/x86/kvm
-#include <instrumentation/events/arch/x86/kvm/mmutrace.h>
-
-MODULE_LICENSE("GPL and additional rights");
-MODULE_AUTHOR("Mathieu Desnoyers <mathieu.desnoyers@efficios.com>");
-MODULE_DESCRIPTION("LTTng kvm mmu probes");
-MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
-       __stringify(LTTNG_MODULES_MINOR_VERSION) "."
-       __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
-       LTTNG_MODULES_EXTRAVERSION);
diff --git a/probes/lttng-probe-kvm-x86.c b/probes/lttng-probe-kvm-x86.c
deleted file mode 100644 (file)
index 4e4f5c8..0000000
+++ /dev/null
@@ -1,48 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
- *
- * probes/lttng-probe-kvm.c
- *
- * LTTng kvm probes.
- *
- * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#include <linux/module.h>
-#include <linux/kvm_host.h>
-#include <lttng/tracer.h>
-#include <lttng/kernel-version.h>
-
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,7,0))
-#include <kvm_emulate.h>
-#endif
-
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,1,0))
-#include <kvm/iodev.h>
-#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,1,0)) */
-#include <../../virt/kvm/iodev.h>
-#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,1,0)) */
-
-/*
- * Create the tracepoint static inlines from the kernel to validate that our
- * trace event macros match the kernel we run on.
- */
-#include <trace/events/kvm.h>
-
-#include <wrapper/tracepoint.h>
-
-/*
- * Create LTTng tracepoint probes.
- */
-#define LTTNG_PACKAGE_BUILD
-#define CREATE_TRACE_POINTS
-
-#define TRACE_INCLUDE_PATH instrumentation/events/arch/x86/kvm
-#include <instrumentation/events/arch/x86/kvm/trace.h>
-
-MODULE_LICENSE("GPL and additional rights");
-MODULE_AUTHOR("Mathieu Desnoyers <mathieu.desnoyers@efficios.com>");
-MODULE_DESCRIPTION("LTTng kvm probes");
-MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
-       __stringify(LTTNG_MODULES_MINOR_VERSION) "."
-       __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
-       LTTNG_MODULES_EXTRAVERSION);
diff --git a/probes/lttng-probe-kvm.c b/probes/lttng-probe-kvm.c
deleted file mode 100644 (file)
index 8b30f26..0000000
+++ /dev/null
@@ -1,37 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
- *
- * probes/lttng-probe-kvm.c
- *
- * LTTng kvm probes.
- *
- * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#include <linux/module.h>
-#include <linux/kvm_host.h>
-#include <lttng/tracer.h>
-
-/*
- * Create the tracepoint static inlines from the kernel to validate that our
- * trace event macros match the kernel we run on.
- */
-#include <trace/events/kvm.h>
-
-#include <wrapper/tracepoint.h>
-
-/*
- * Create LTTng tracepoint probes.
- */
-#define LTTNG_PACKAGE_BUILD
-#define CREATE_TRACE_POINTS
-#define TRACE_INCLUDE_PATH instrumentation/events
-
-#include <instrumentation/events/kvm.h>
-
-MODULE_LICENSE("GPL and additional rights");
-MODULE_AUTHOR("Mathieu Desnoyers <mathieu.desnoyers@efficios.com>");
-MODULE_DESCRIPTION("LTTng kvm probes");
-MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
-       __stringify(LTTNG_MODULES_MINOR_VERSION) "."
-       __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
-       LTTNG_MODULES_EXTRAVERSION);
diff --git a/probes/lttng-probe-lock.c b/probes/lttng-probe-lock.c
deleted file mode 100644 (file)
index 2ab9138..0000000
+++ /dev/null
@@ -1,37 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
- *
- * probes/lttng-probe-lock.c
- *
- * LTTng lock probes.
- *
- * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- * Copyright (C) 2012 Mentor Graphics Corp.
- */
-
-#include <linux/module.h>
-#include <linux/version.h>
-#include <lttng/tracer.h>
-
-/*
- * Create the tracepoint static inlines from the kernel to validate that our
- * trace event macros match the kernel we run on.
- */
-#include <trace/events/lock.h>
-#include <wrapper/tracepoint.h>
-
-/*
- * Create LTTng tracepoint probes.
- */
-#define LTTNG_PACKAGE_BUILD
-#define CREATE_TRACE_POINTS
-#define TRACE_INCLUDE_PATH instrumentation/events
-
-#include <instrumentation/events/lock.h>
-
-MODULE_LICENSE("GPL and additional rights");
-MODULE_AUTHOR("Wade Farnsworth <wade_farnsworth@mentor.com> and Andrew Gabbasov <andrew_gabbasov@mentor.com>");
-MODULE_DESCRIPTION("LTTng lock probes");
-MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
-       __stringify(LTTNG_MODULES_MINOR_VERSION) "."
-       __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
-       LTTNG_MODULES_EXTRAVERSION);
diff --git a/probes/lttng-probe-module.c b/probes/lttng-probe-module.c
deleted file mode 100644 (file)
index 4f4f4a5..0000000
+++ /dev/null
@@ -1,38 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
- *
- * probes/lttng-probe-module.c
- *
- * LTTng module probes.
- *
- * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- * Copyright (C) 2012 Mentor Graphics Corp.
- */
-
-#include <linux/module.h>
-#include <lttng/tracer.h>
-
-/*
- * Create the tracepoint static inlines from the kernel to validate that our
- * trace event macros match the kernel we run on.
- */
-#include <trace/events/module.h>
-
-#include <wrapper/tracepoint.h>
-
-/*
- * Create LTTng tracepoint probes.
- */
-#define LTTNG_PACKAGE_BUILD
-#define CREATE_TRACE_POINTS
-#define TRACE_INCLUDE_PATH instrumentation/events
-
-#include <instrumentation/events/module.h>
-
-MODULE_LICENSE("GPL and additional rights");
-MODULE_AUTHOR("Wade Farnsworth <wade_farnsworth@mentor.com>");
-MODULE_AUTHOR("Andrew Gabbasov <andrew_gabbasov@mentor.com>");
-MODULE_DESCRIPTION("LTTng module probes");
-MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
-       __stringify(LTTNG_MODULES_MINOR_VERSION) "."
-       __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
-       LTTNG_MODULES_EXTRAVERSION);
diff --git a/probes/lttng-probe-napi.c b/probes/lttng-probe-napi.c
deleted file mode 100644 (file)
index ce2bf8c..0000000
+++ /dev/null
@@ -1,37 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
- *
- * probes/lttng-probe-napi.c
- *
- * LTTng napi probes.
- *
- * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- * Copyright (C) 2012 Mentor Graphics Corp.
- */
-
-#include <linux/module.h>
-#include <lttng/tracer.h>
-
-/*
- * Create the tracepoint static inlines from the kernel to validate that our
- * trace event macros match the kernel we run on.
- */
-#include <trace/events/napi.h>
-
-#include <wrapper/tracepoint.h>
-
-/*
- * Create LTTng tracepoint probes.
- */
-#define LTTNG_PACKAGE_BUILD
-#define CREATE_TRACE_POINTS
-#define TRACE_INCLUDE_PATH instrumentation/events
-
-#include <instrumentation/events/napi.h>
-
-MODULE_LICENSE("GPL and additional rights");
-MODULE_AUTHOR("Wade Farnsworth <wade_farnsworth@mentor.com>");
-MODULE_DESCRIPTION("LTTng napi probes");
-MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
-       __stringify(LTTNG_MODULES_MINOR_VERSION) "."
-       __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
-       LTTNG_MODULES_EXTRAVERSION);
diff --git a/probes/lttng-probe-net.c b/probes/lttng-probe-net.c
deleted file mode 100644 (file)
index a0ef450..0000000
+++ /dev/null
@@ -1,35 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
- *
- * probes/lttng-probe-net.c
- *
- * LTTng net probes.
- *
- * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- * Copyright (C) 2012 Mentor Graphics Corp.
- */
-
-#include <linux/module.h>
-#include <lttng/tracer.h>
-
-/*
- * Create the tracepoint static inlines from the kernel to validate that our
- * trace event macros match the kernel we run on.
- */
-#include <trace/events/net.h>
-
-/*
- * Create LTTng tracepoint probes.
- */
-#define LTTNG_PACKAGE_BUILD
-#define CREATE_TRACE_POINTS
-#define TRACE_INCLUDE_PATH instrumentation/events
-
-#include <instrumentation/events/net.h>
-
-MODULE_LICENSE("GPL and additional rights");
-MODULE_AUTHOR("Wade Farnsworth <wade_farnsworth@mentor.com>");
-MODULE_DESCRIPTION("LTTng net probes");
-MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
-       __stringify(LTTNG_MODULES_MINOR_VERSION) "."
-       __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
-       LTTNG_MODULES_EXTRAVERSION);
diff --git a/probes/lttng-probe-power.c b/probes/lttng-probe-power.c
deleted file mode 100644 (file)
index d5ac38f..0000000
+++ /dev/null
@@ -1,38 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
- *
- * probes/lttng-probe-power.c
- *
- * LTTng power probes.
- *
- * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- * Copyright (C) 2012 Mentor Graphics Corp.
- */
-
-#include <linux/module.h>
-#include <lttng/tracer.h>
-
-/*
- * Create the tracepoint static inlines from the kernel to validate that our
- * trace event macros match the kernel we run on.
- */
-#include <trace/events/power.h>
-
-#include <wrapper/tracepoint.h>
-
-/*
- * Create LTTng tracepoint probes.
- */
-#define LTTNG_PACKAGE_BUILD
-#define CREATE_TRACE_POINTS
-#define TRACE_INCLUDE_PATH instrumentation/events
-
-#include <instrumentation/events/power.h>
-
-MODULE_LICENSE("GPL and additional rights");
-MODULE_AUTHOR("Wade Farnsworth <wade_farnsworth@mentor.com>");
-MODULE_AUTHOR("Andrew Gabbasov <andrew_gabbasov@mentor.com>");
-MODULE_DESCRIPTION("LTTng power probes");
-MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
-       __stringify(LTTNG_MODULES_MINOR_VERSION) "."
-       __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
-       LTTNG_MODULES_EXTRAVERSION);
diff --git a/probes/lttng-probe-preemptirq.c b/probes/lttng-probe-preemptirq.c
deleted file mode 100644 (file)
index 497b2de..0000000
+++ /dev/null
@@ -1,36 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
- *
- * probes/lttng-probe-preemptirq.c
- *
- * LTTng preemptirq probes.
- *
- * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *               2017 Michael Jeanson <mjeanson@efficios.com>
- */
-
-#include <linux/module.h>
-#include <linux/uaccess.h>
-#include <lttng/tracer.h>
-
-/*
- * Create the tracepoint static inlines from the kernel to validate that our
- * trace event macros match the kernel we run on.
- */
-#include <trace/events/preemptirq.h>
-
-/*
- * Create LTTng tracepoint probes.
- */
-#define LTTNG_PACKAGE_BUILD
-#define CREATE_TRACE_POINTS
-#define TRACE_INCLUDE_PATH instrumentation/events
-
-#include <instrumentation/events/preemptirq.h>
-
-MODULE_LICENSE("GPL and additional rights");
-MODULE_AUTHOR("Michael Jeanson <mjeanson@efficios.com>");
-MODULE_DESCRIPTION("LTTng preemptirq probes");
-MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
-       __stringify(LTTNG_MODULES_MINOR_VERSION) "."
-       __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
-       LTTNG_MODULES_EXTRAVERSION);
diff --git a/probes/lttng-probe-printk.c b/probes/lttng-probe-printk.c
deleted file mode 100644 (file)
index 3a37826..0000000
+++ /dev/null
@@ -1,35 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
- *
- * probes/lttng-probe-printk.c
- *
- * LTTng printk probes.
- *
- * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- * Copyright (C) 2012 Mentor Graphics Corp.
- */
-
-#include <linux/module.h>
-#include <lttng/tracer.h>
-
-/*
- * Create the tracepoint static inlines from the kernel to validate that our
- * trace event macros match the kernel we run on.
- */
-#include <trace/events/printk.h>
-
-/*
- * Create LTTng tracepoint probes.
- */
-#define LTTNG_PACKAGE_BUILD
-#define CREATE_TRACE_POINTS
-#define TRACE_INCLUDE_PATH instrumentation/events
-
-#include <instrumentation/events/printk.h>
-
-MODULE_LICENSE("GPL and additional rights");
-MODULE_AUTHOR("Andrew Gabbasov <andrew_gabbasov@mentor.com>");
-MODULE_DESCRIPTION("LTTng printk probes");
-MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
-       __stringify(LTTNG_MODULES_MINOR_VERSION) "."
-       __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
-       LTTNG_MODULES_EXTRAVERSION);
diff --git a/probes/lttng-probe-random.c b/probes/lttng-probe-random.c
deleted file mode 100644 (file)
index 4cf6ce8..0000000
+++ /dev/null
@@ -1,35 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
- *
- * probes/lttng-probe-random.c
- *
- * LTTng random probes.
- *
- * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- * Copyright (C) 2012 Mentor Graphics Corp.
- */
-
-#include <linux/module.h>
-#include <lttng/tracer.h>
-
-/*
- * Create the tracepoint static inlines from the kernel to validate that our
- * trace event macros match the kernel we run on.
- */
-#include <trace/events/random.h>
-
-/*
- * Create LTTng tracepoint probes.
- */
-#define LTTNG_PACKAGE_BUILD
-#define CREATE_TRACE_POINTS
-#define TRACE_INCLUDE_PATH instrumentation/events
-
-#include <instrumentation/events/random.h>
-
-MODULE_LICENSE("GPL and additional rights");
-MODULE_AUTHOR("Andrew Gabbasov <andrew_gabbasov@mentor.com>");
-MODULE_DESCRIPTION("LTTng random probes");
-MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
-       __stringify(LTTNG_MODULES_MINOR_VERSION) "."
-       __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
-       LTTNG_MODULES_EXTRAVERSION);
diff --git a/probes/lttng-probe-rcu.c b/probes/lttng-probe-rcu.c
deleted file mode 100644 (file)
index 89c7213..0000000
+++ /dev/null
@@ -1,36 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
- *
- * probes/lttng-probe-rcu.c
- *
- * LTTng rcu probes.
- *
- * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- * Copyright (C) 2012 Mentor Graphics Corp.
- */
-
-#include <linux/module.h>
-#include <linux/rcupdate.h>
-#include <lttng/tracer.h>
-
-/*
- * Create the tracepoint static inlines from the kernel to validate that our
- * trace event macros match the kernel we run on.
- */
-#include <trace/events/rcu.h>
-
-/*
- * Create LTTng tracepoint probes.
- */
-#define LTTNG_PACKAGE_BUILD
-#define CREATE_TRACE_POINTS
-#define TRACE_INCLUDE_PATH instrumentation/events
-
-#include <instrumentation/events/rcu.h>
-
-MODULE_LICENSE("GPL and additional rights");
-MODULE_AUTHOR("Andrew Gabbasov <andrew_gabbasov@mentor.com>");
-MODULE_DESCRIPTION("LTTng rcu probes");
-MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
-       __stringify(LTTNG_MODULES_MINOR_VERSION) "."
-       __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
-       LTTNG_MODULES_EXTRAVERSION);
diff --git a/probes/lttng-probe-regmap.c b/probes/lttng-probe-regmap.c
deleted file mode 100644 (file)
index f3eaef8..0000000
+++ /dev/null
@@ -1,40 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
- *
- * probes/lttng-probe-regmap.c
- *
- * LTTng regmap probes.
- *
- * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- * Copyright (C) 2012 Mentor Graphics Corp.
- */
-
-#include <linux/module.h>
-#include <linux/device.h>
-#include <lttng/kernel-version.h>
-#include <lttng/tracer.h>
-
-/*
- * Create the tracepoint static inlines from the kernel to validate that our
- * trace event macros match the kernel we run on.
- */
-#include <../../drivers/base/regmap/trace.h>
-
-#undef TRACE_INCLUDE_PATH
-#undef TRACE_INCLUDE_FILE
-
-/*
- * Create LTTng tracepoint probes.
- */
-#define LTTNG_PACKAGE_BUILD
-#define CREATE_TRACE_POINTS
-#define TRACE_INCLUDE_PATH instrumentation/events
-
-#include <instrumentation/events/regmap.h>
-
-MODULE_LICENSE("GPL and additional rights");
-MODULE_AUTHOR("Andrew Gabbasov <andrew_gabbasov@mentor.com>");
-MODULE_DESCRIPTION("LTTng regmap probes");
-MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
-       __stringify(LTTNG_MODULES_MINOR_VERSION) "."
-       __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
-       LTTNG_MODULES_EXTRAVERSION);
diff --git a/probes/lttng-probe-regulator.c b/probes/lttng-probe-regulator.c
deleted file mode 100644 (file)
index 8f45771..0000000
+++ /dev/null
@@ -1,35 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
- *
- * probes/lttng-probe-regulator.c
- *
- * LTTng regulator probes.
- *
- * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- * Copyright (C) 2012 Mentor Graphics Corp.
- */
-
-#include <linux/module.h>
-#include <lttng/tracer.h>
-
-/*
- * Create the tracepoint static inlines from the kernel to validate that our
- * trace event macros match the kernel we run on.
- */
-#include <trace/events/regulator.h>
-
-/*
- * Create LTTng tracepoint probes.
- */
-#define LTTNG_PACKAGE_BUILD
-#define CREATE_TRACE_POINTS
-#define TRACE_INCLUDE_PATH instrumentation/events
-
-#include <instrumentation/events/regulator.h>
-
-MODULE_LICENSE("GPL and additional rights");
-MODULE_AUTHOR("Wade Farnsworth <wade_farnsworth@mentor.com>");
-MODULE_DESCRIPTION("LTTng regulator probes");
-MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
-       __stringify(LTTNG_MODULES_MINOR_VERSION) "."
-       __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
-       LTTNG_MODULES_EXTRAVERSION);
diff --git a/probes/lttng-probe-rpm.c b/probes/lttng-probe-rpm.c
deleted file mode 100644 (file)
index eea7bc3..0000000
+++ /dev/null
@@ -1,36 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
- *
- * probes/lttng-probe-rpm.c
- *
- * LTTng rpm probes.
- *
- * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- * Copyright (C) 2012 Mentor Graphics Corp.
- */
-
-#include <linux/module.h>
-#include <linux/device.h>
-#include <lttng/tracer.h>
-
-/*
- * Create the tracepoint static inlines from the kernel to validate that our
- * trace event macros match the kernel we run on.
- */
-#include <trace/events/rpm.h>
-
-/*
- * Create LTTng tracepoint probes.
- */
-#define LTTNG_PACKAGE_BUILD
-#define CREATE_TRACE_POINTS
-#define TRACE_INCLUDE_PATH instrumentation/events
-
-#include <instrumentation/events/rpm.h>
-
-MODULE_LICENSE("GPL and additional rights");
-MODULE_AUTHOR("Andrew Gabbasov <andrew_gabbasov@mentor.com>");
-MODULE_DESCRIPTION("LTTng rpm probes");
-MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
-       __stringify(LTTNG_MODULES_MINOR_VERSION) "."
-       __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
-       LTTNG_MODULES_EXTRAVERSION);
diff --git a/probes/lttng-probe-sched.c b/probes/lttng-probe-sched.c
deleted file mode 100644 (file)
index ba1b3f7..0000000
+++ /dev/null
@@ -1,36 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
- *
- * probes/lttng-probe-sched.c
- *
- * LTTng sched probes.
- *
- * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#include <linux/module.h>
-#include <lttng/tracer.h>
-
-/*
- * Create the tracepoint static inlines from the kernel to validate that our
- * trace event macros match the kernel we run on.
- */
-#include <trace/events/sched.h>
-
-#include <wrapper/tracepoint.h>
-
-/*
- * Create LTTng tracepoint probes.
- */
-#define LTTNG_PACKAGE_BUILD
-#define CREATE_TRACE_POINTS
-#define TRACE_INCLUDE_PATH instrumentation/events
-
-#include <instrumentation/events/sched.h>
-
-MODULE_LICENSE("GPL and additional rights");
-MODULE_AUTHOR("Mathieu Desnoyers <mathieu.desnoyers@efficios.com>");
-MODULE_DESCRIPTION("LTTng sched probes");
-MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
-       __stringify(LTTNG_MODULES_MINOR_VERSION) "."
-       __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
-       LTTNG_MODULES_EXTRAVERSION);
diff --git a/probes/lttng-probe-scsi.c b/probes/lttng-probe-scsi.c
deleted file mode 100644 (file)
index a367c51..0000000
+++ /dev/null
@@ -1,37 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
- *
- * probes/lttng-probe-scsi.c
- *
- * LTTng scsi probes.
- *
- * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- * Copyright (C) 2012 Mentor Graphics Corp.
- */
-
-#include <linux/module.h>
-#include <scsi/scsi_device.h>
-#include <lttng/tracer.h>
-
-/*
- * Create the tracepoint static inlines from the kernel to validate that our
- * trace event macros match the kernel we run on.
- */
-#include <trace/events/scsi.h>
-
-/*
- * Create LTTng tracepoint probes.
- */
-#define LTTNG_PACKAGE_BUILD
-#define CREATE_TRACE_POINTS
-#define TRACE_INCLUDE_PATH instrumentation/events
-
-#include <instrumentation/events/scsi.h>
-
-MODULE_LICENSE("GPL and additional rights");
-MODULE_AUTHOR("Wade Farnsworth <wade_farnsworth@mentor.com>");
-MODULE_AUTHOR("Andrew Gabbasov <andrew_gabbasov@mentor.com>");
-MODULE_DESCRIPTION("LTTng scsi probes");
-MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
-       __stringify(LTTNG_MODULES_MINOR_VERSION) "."
-       __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
-       LTTNG_MODULES_EXTRAVERSION);
diff --git a/probes/lttng-probe-signal.c b/probes/lttng-probe-signal.c
deleted file mode 100644 (file)
index aee9468..0000000
+++ /dev/null
@@ -1,34 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
- *
- * probes/lttng-probe-signal.c
- *
- * LTTng signal probes.
- *
- * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#include <linux/module.h>
-#include <lttng/tracer.h>
-
-/*
- * Create the tracepoint static inlines from the kernel to validate that our
- * trace event macros match the kernel we run on.
- */
-#include <trace/events/signal.h>
-
-/*
- * Create LTTng tracepoint probes.
- */
-#define LTTNG_PACKAGE_BUILD
-#define CREATE_TRACE_POINTS
-#define TRACE_INCLUDE_PATH instrumentation/events
-
-#include <instrumentation/events/signal.h>
-
-MODULE_LICENSE("GPL and additional rights");
-MODULE_AUTHOR("Mathieu Desnoyers <mathieu.desnoyers@efficios.com>");
-MODULE_DESCRIPTION("LTTng signal probes");
-MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
-       __stringify(LTTNG_MODULES_MINOR_VERSION) "."
-       __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
-       LTTNG_MODULES_EXTRAVERSION);
diff --git a/probes/lttng-probe-skb.c b/probes/lttng-probe-skb.c
deleted file mode 100644 (file)
index 682a9f0..0000000
+++ /dev/null
@@ -1,38 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
- *
- * probes/lttng-probe-skb.c
- *
- * LTTng skb probes.
- *
- * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- * Copyright (C) 2012 Mentor Graphics Corp.
- */
-
-#include <linux/module.h>
-#include <lttng/tracer.h>
-
-/*
- * Create the tracepoint static inlines from the kernel to validate that our
- * trace event macros match the kernel we run on.
- */
-#include <trace/events/skb.h>
-
-#include <wrapper/tracepoint.h>
-
-/*
- * Create LTTng tracepoint probes.
- */
-#define LTTNG_PACKAGE_BUILD
-#define CREATE_TRACE_POINTS
-#define TRACE_INCLUDE_PATH instrumentation/events
-
-#include <instrumentation/events/skb.h>
-
-MODULE_LICENSE("GPL and additional rights");
-MODULE_AUTHOR("Wade Farnsworth <wade_farnsworth@mentor.com>");
-MODULE_AUTHOR("Andrew Gabbasov <andrew_gabbasov@mentor.com>");
-MODULE_DESCRIPTION("LTTng skb probes");
-MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
-       __stringify(LTTNG_MODULES_MINOR_VERSION) "."
-       __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
-       LTTNG_MODULES_EXTRAVERSION);
diff --git a/probes/lttng-probe-sock.c b/probes/lttng-probe-sock.c
deleted file mode 100644 (file)
index f3e1ebf..0000000
+++ /dev/null
@@ -1,35 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
- *
- * probes/lttng-probe-sock.c
- *
- * LTTng sock probes.
- *
- * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- * Copyright (C) 2012 Mentor Graphics Corp.
- */
-
-#include <linux/module.h>
-#include <lttng/tracer.h>
-
-/*
- * Create the tracepoint static inlines from the kernel to validate that our
- * trace event macros match the kernel we run on.
- */
-#include <trace/events/sock.h>
-
-/*
- * Create LTTng tracepoint probes.
- */
-#define LTTNG_PACKAGE_BUILD
-#define CREATE_TRACE_POINTS
-#define TRACE_INCLUDE_PATH instrumentation/events
-
-#include <instrumentation/events/sock.h>
-
-MODULE_LICENSE("GPL and additional rights");
-MODULE_AUTHOR("Wade Farnsworth <wade_farnsworth@mentor.com>");
-MODULE_DESCRIPTION("LTTng sock probes");
-MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
-       __stringify(LTTNG_MODULES_MINOR_VERSION) "."
-       __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
-       LTTNG_MODULES_EXTRAVERSION);
diff --git a/probes/lttng-probe-statedump.c b/probes/lttng-probe-statedump.c
deleted file mode 100644 (file)
index 81e0613..0000000
+++ /dev/null
@@ -1,38 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
- *
- * probes/lttng-probe-statedump.c
- *
- * LTTng statedump probes.
- *
- * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#include <linux/module.h>
-#include <linux/interrupt.h>
-#include <linux/netlink.h>
-#include <linux/inet.h>
-#include <linux/ip.h>
-#include <linux/netdevice.h>
-#include <linux/inetdevice.h>
-#include <linux/sched.h>
-#include <lttng/events.h>
-#include <lttng/tracer.h>
-
-/*
- * Create LTTng tracepoint probes.
- */
-#define LTTNG_PACKAGE_BUILD
-#define CREATE_TRACE_POINTS
-#define TP_SESSION_CHECK
-#define TRACE_INCLUDE_PATH instrumentation/events
-#define TRACE_INCLUDE_FILE lttng-statedump
-
-#include <instrumentation/events/lttng-statedump.h>
-
-MODULE_LICENSE("GPL and additional rights");
-MODULE_AUTHOR("Mathieu Desnoyers <mathieu.desnoyers@efficios.com>");
-MODULE_DESCRIPTION("LTTng statedump probes");
-MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
-       __stringify(LTTNG_MODULES_MINOR_VERSION) "."
-       __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
-       LTTNG_MODULES_EXTRAVERSION);
diff --git a/probes/lttng-probe-sunrpc.c b/probes/lttng-probe-sunrpc.c
deleted file mode 100644 (file)
index 2244a57..0000000
+++ /dev/null
@@ -1,35 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
- *
- * probes/lttng-probe-sunrpc.c
- *
- * LTTng sunrpc probes.
- *
- * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- * Copyright (C) 2012 Mentor Graphics Corp.
- */
-
-#include <linux/module.h>
-#include <lttng/tracer.h>
-
-/*
- * Create the tracepoint static inlines from the kernel to validate that our
- * trace event macros match the kernel we run on.
- */
-#include <trace/events/sunrpc.h>
-
-/*
- * Create LTTng tracepoint probes.
- */
-#define LTTNG_PACKAGE_BUILD
-#define CREATE_TRACE_POINTS
-#define TRACE_INCLUDE_PATH instrumentation/events
-
-#include <instrumentation/events/rpc.h>
-
-MODULE_LICENSE("GPL and additional rights");
-MODULE_AUTHOR("Andrew Gabbasov <andrew_gabbasov@mentor.com>");
-MODULE_DESCRIPTION("LTTng sunrpc probes");
-MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
-       __stringify(LTTNG_MODULES_MINOR_VERSION) "."
-       __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
-       LTTNG_MODULES_EXTRAVERSION);
diff --git a/probes/lttng-probe-timer.c b/probes/lttng-probe-timer.c
deleted file mode 100644 (file)
index 149fcbc..0000000
+++ /dev/null
@@ -1,38 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
- *
- * probes/lttng-probe-timer.c
- *
- * LTTng timer probes.
- *
- * Copyright (C) 2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#include <linux/module.h>
-#include <lttng/tracer.h>
-
-/*
- * Create the tracepoint static inlines from the kernel to validate that our
- * trace event macros match the kernel we run on.
- */
-
-#include <linux/sched.h>
-#include <trace/events/timer.h>
-
-#include <wrapper/tracepoint.h>
-
-/*
- * Create LTTng tracepoint probes.
- */
-#define LTTNG_PACKAGE_BUILD
-#define CREATE_TRACE_POINTS
-#define TRACE_INCLUDE_PATH instrumentation/events
-
-#include <instrumentation/events/timer.h>
-
-MODULE_LICENSE("GPL and additional rights");
-MODULE_AUTHOR("Mathieu Desnoyers <mathieu.desnoyers@efficios.com>");
-MODULE_DESCRIPTION("LTTng timer probes");
-MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
-       __stringify(LTTNG_MODULES_MINOR_VERSION) "."
-       __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
-       LTTNG_MODULES_EXTRAVERSION);
diff --git a/probes/lttng-probe-udp.c b/probes/lttng-probe-udp.c
deleted file mode 100644 (file)
index ad7707b..0000000
+++ /dev/null
@@ -1,35 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
- *
- * probes/lttng-probe-udp.c
- *
- * LTTng udp probes.
- *
- * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- * Copyright (C) 2012 Mentor Graphics Corp.
- */
-
-#include <linux/module.h>
-#include <lttng/tracer.h>
-
-/*
- * Create the tracepoint static inlines from the kernel to validate that our
- * trace event macros match the kernel we run on.
- */
-#include <trace/events/udp.h>
-
-/*
- * Create LTTng tracepoint probes.
- */
-#define LTTNG_PACKAGE_BUILD
-#define CREATE_TRACE_POINTS
-#define TRACE_INCLUDE_PATH instrumentation/events
-
-#include <instrumentation/events/udp.h>
-
-MODULE_LICENSE("GPL and additional rights");
-MODULE_AUTHOR("Wade Farnsworth <wade_farnsworth@mentor.com>");
-MODULE_DESCRIPTION("LTTng udp probes");
-MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
-       __stringify(LTTNG_MODULES_MINOR_VERSION) "."
-       __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
-       LTTNG_MODULES_EXTRAVERSION);
diff --git a/probes/lttng-probe-user.c b/probes/lttng-probe-user.c
deleted file mode 100644 (file)
index 009cfed..0000000
+++ /dev/null
@@ -1,52 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
- *
- * lttng-probe-user.c
- *
- * Copyright (C) 2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#include <linux/uaccess.h>
-#include <linux/module.h>
-#include <wrapper/uaccess.h>
-#include <lttng/probe-user.h>
-
-/*
- * Calculate string length. Include final null terminating character if there is
- * one, or ends at first fault. Disabling page faults ensures that we can safely
- * call this from pretty much any context, including those where the caller
- * holds mmap_sem, or any lock which nests in mmap_sem.
- */
-long lttng_strlen_user_inatomic(const char *addr)
-{
-       long count = 0;
-       mm_segment_t old_fs;
-
-       if (!addr)
-               return 0;
-
-       old_fs = get_fs();
-       set_fs(KERNEL_DS);
-       pagefault_disable();
-       for (;;) {
-               char v;
-               unsigned long ret;
-
-               if (unlikely(!lttng_access_ok(VERIFY_READ,
-                               (__force const char __user *) addr,
-                               sizeof(v))))
-                       break;
-               ret = __copy_from_user_inatomic(&v,
-                       (__force const char __user *)(addr),
-                       sizeof(v));
-               if (unlikely(ret > 0))
-                       break;
-               count++;
-               if (unlikely(!v))
-                       break;
-               addr++;
-       }
-       pagefault_enable();
-       set_fs(old_fs);
-       return count;
-}
-EXPORT_SYMBOL_GPL(lttng_strlen_user_inatomic);
diff --git a/probes/lttng-probe-v4l2.c b/probes/lttng-probe-v4l2.c
deleted file mode 100644 (file)
index 0c86da0..0000000
+++ /dev/null
@@ -1,37 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
- *
- * probes/lttng-probe-v4l2.c
- *
- * LTTng v4l2 probes.
- *
- * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- * Copyright (C) 2012,2013 Mentor Graphics Corp.
- */
-
-#include <linux/module.h>
-#include <linux/videodev2.h>
-#include <media/videobuf2-core.h>
-#include <media/v4l2-common.h>
-#include <lttng/tracer.h>
-/*
- * Create the tracepoint static inlines from the kernel to validate that our
- * trace event macros match the kernel we run on.
- */
-#include <trace/events/v4l2.h>
-
-/*
- * Create LTTng tracepoint probes.
- */
-#define LTTNG_PACKAGE_BUILD
-#define CREATE_TRACE_POINTS
-#define TRACE_INCLUDE_PATH instrumentation/events
-
-#include <instrumentation/events/v4l2.h>
-
-MODULE_LICENSE("GPL and additional rights");
-MODULE_AUTHOR("Wade Farnsworth <wade_farnsworth@mentor.com>");
-MODULE_DESCRIPTION("LTTng v4l2 probes");
-MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
-       __stringify(LTTNG_MODULES_MINOR_VERSION) "."
-       __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
-       LTTNG_MODULES_EXTRAVERSION);
diff --git a/probes/lttng-probe-vmscan.c b/probes/lttng-probe-vmscan.c
deleted file mode 100644 (file)
index 8e1f605..0000000
+++ /dev/null
@@ -1,39 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
- *
- * probes/lttng-probe-vmscan.c
- *
- * LTTng vmscan probes.
- *
- * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- * Copyright (C) 2012 Mentor Graphics Corp.
- */
-
-#include <linux/module.h>
-#include <lttng/tracer.h>
-
-/*
- * Create the tracepoint static inlines from the kernel to validate that our
- * trace event macros match the kernel we run on.
- */
-#include <trace/events/vmscan.h>
-
-#include <lttng/kernel-version.h>
-
-/*
- * Create LTTng tracepoint probes.
- */
-#define LTTNG_PACKAGE_BUILD
-#define CREATE_TRACE_POINTS
-#define TRACE_INCLUDE_PATH instrumentation/events
-
-#include <instrumentation/events/mm_vmscan.h>
-
-MODULE_LICENSE("GPL and additional rights");
-MODULE_AUTHOR("Wade Farnsworth <wade_farnsworth@mentor.com>");
-MODULE_AUTHOR("Paul Woegerer <paul_woegerer@mentor.com>");
-MODULE_AUTHOR("Andrew Gabbasov <andrew_gabbasov@mentor.com>");
-MODULE_DESCRIPTION("LTTng vmscan probes");
-MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
-       __stringify(LTTNG_MODULES_MINOR_VERSION) "."
-       __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
-       LTTNG_MODULES_EXTRAVERSION);
diff --git a/probes/lttng-probe-workqueue.c b/probes/lttng-probe-workqueue.c
deleted file mode 100644 (file)
index 57cd560..0000000
+++ /dev/null
@@ -1,41 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
- *
- * probes/lttng-probe-workqueue.c
- *
- * LTTng workqueue probes.
- *
- * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- * Copyright (C) 2012 Mentor Graphics Corp.
- */
-
-#include <linux/module.h>
-#include <linux/idr.h>
-#include <lttng/tracer.h>
-
-struct cpu_workqueue_struct;
-struct pool_workqueue;
-
-/*
- * Create the tracepoint static inlines from the kernel to validate that our
- * trace event macros match the kernel we run on.
- */
-#include <trace/events/workqueue.h>
-
-#include <wrapper/tracepoint.h>
-
-/*
- * Create LTTng tracepoint probes.
- */
-#define LTTNG_PACKAGE_BUILD
-#define CREATE_TRACE_POINTS
-#define TRACE_INCLUDE_PATH instrumentation/events
-
-#include <instrumentation/events/workqueue.h>
-
-MODULE_LICENSE("GPL and additional rights");
-MODULE_AUTHOR("Andrew Gabbasov <andrew_gabbasov@mentor.com>");
-MODULE_DESCRIPTION("LTTng workqueue probes");
-MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
-       __stringify(LTTNG_MODULES_MINOR_VERSION) "."
-       __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
-       LTTNG_MODULES_EXTRAVERSION);
diff --git a/probes/lttng-probe-writeback.c b/probes/lttng-probe-writeback.c
deleted file mode 100644 (file)
index 727f2b7..0000000
+++ /dev/null
@@ -1,46 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
- *
- * probes/lttng-probe-writeback.c
- *
- * LTTng writeback probes.
- *
- * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- * Copyright (C) 2012 Mentor Graphics Corp.
- */
-
-#include <linux/module.h>
-#include <linux/device.h>
-#include <linux/mm.h>
-#include <lttng/tracer.h>
-
-/*
- * Create the tracepoint static inlines from the kernel to validate that our
- * trace event macros match the kernel we run on.
- */
-#include <trace/events/writeback.h>
-
-#include <lttng/kernel-version.h>
-#include <wrapper/writeback.h>
-
-/* #if <check version number if global_dirty_limit will be exported> */
-
-#define global_dirty_limit wrapper_global_dirty_limit()
-
-/* #endif <check version number> */
-
-/*
- * Create LTTng tracepoint probes.
- */
-#define LTTNG_PACKAGE_BUILD
-#define CREATE_TRACE_POINTS
-#define TRACE_INCLUDE_PATH instrumentation/events
-
-#include <instrumentation/events/writeback.h>
-
-MODULE_LICENSE("GPL and additional rights");
-MODULE_AUTHOR("Andrew Gabbasov <andrew_gabbasov@mentor.com>");
-MODULE_DESCRIPTION("LTTng writeback probes");
-MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
-       __stringify(LTTNG_MODULES_MINOR_VERSION) "."
-       __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
-       LTTNG_MODULES_EXTRAVERSION);
diff --git a/probes/lttng-probe-x86-exceptions.c b/probes/lttng-probe-x86-exceptions.c
deleted file mode 100644 (file)
index 4a7d4e4..0000000
+++ /dev/null
@@ -1,39 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
- *
- * probes/lttng-probe-x86-exceptions.c
- *
- * LTTng x86 exceptions probes.
- *
- * Copyright (C) 2010-2015 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#include <linux/module.h>
-#include <lttng/tracer.h>
-
-/*
- * Create the tracepoint static inlines from the kernel to validate that our
- * trace event macros match the kernel we run on.
- */
-#include <asm/trace/exceptions.h>
-
-#include <wrapper/tracepoint.h>
-
-#undef TRACE_INCLUDE_PATH
-#undef TRACE_INCLUDE_FILE
-
-/*
- * Create LTTng tracepoint probes.
- */
-#define LTTNG_PACKAGE_BUILD
-#define CREATE_TRACE_POINTS
-#define TRACE_INCLUDE_PATH instrumentation/events/arch/x86
-
-#include <instrumentation/events/arch/x86/exceptions.h>
-
-MODULE_LICENSE("GPL and additional rights");
-MODULE_AUTHOR("Mathieu Desnoyers <mathieu.desnoyers@efficios.com>");
-MODULE_DESCRIPTION("LTTng x86 exceptions probes");
-MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
-       __stringify(LTTNG_MODULES_MINOR_VERSION) "."
-       __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
-       LTTNG_MODULES_EXTRAVERSION);
diff --git a/probes/lttng-probe-x86-irq-vectors.c b/probes/lttng-probe-x86-irq-vectors.c
deleted file mode 100644 (file)
index 1f64406..0000000
+++ /dev/null
@@ -1,39 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
- *
- * probes/lttng-probe-x86-irq-vectors.c
- *
- * LTTng x86 irq vectors probes.
- *
- * Copyright (C) 2010-2015 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#include <linux/module.h>
-#include <lttng/tracer.h>
-
-/*
- * Create the tracepoint static inlines from the kernel to validate that our
- * trace event macros match the kernel we run on.
- */
-#include <asm/trace/irq_vectors.h>
-
-#include <wrapper/tracepoint.h>
-
-#undef TRACE_INCLUDE_PATH
-#undef TRACE_INCLUDE_FILE
-
-/*
- * Create LTTng tracepoint probes.
- */
-#define LTTNG_PACKAGE_BUILD
-#define CREATE_TRACE_POINTS
-#define TRACE_INCLUDE_PATH instrumentation/events/arch/x86
-
-#include <instrumentation/events/arch/x86/irq_vectors.h>
-
-MODULE_LICENSE("GPL and additional rights");
-MODULE_AUTHOR("Mathieu Desnoyers <mathieu.desnoyers@efficios.com>");
-MODULE_DESCRIPTION("LTTng x86 irq vectors probes");
-MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
-       __stringify(LTTNG_MODULES_MINOR_VERSION) "."
-       __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
-       LTTNG_MODULES_EXTRAVERSION);
diff --git a/probes/lttng-uprobes.c b/probes/lttng-uprobes.c
deleted file mode 100644 (file)
index c0f6e7c..0000000
+++ /dev/null
@@ -1,250 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only OR LGPL-2.1-only)
- *
- * probes/lttng-uprobes.c
- *
- * LTTng uprobes integration module.
- *
- * Copyright (C) 2013 Yannick Brosseau <yannick.brosseau@gmail.com>
- * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
- */
-
-#include <linux/fdtable.h>
-#include <linux/list.h>
-#include <linux/module.h>
-#include <linux/namei.h>
-#include <linux/slab.h>
-#include <linux/uaccess.h>
-#include <lttng/events.h>
-#include <lttng/tracer.h>
-#include <wrapper/irqflags.h>
-#include <ringbuffer/frontend_types.h>
-#include <wrapper/uprobes.h>
-#include <wrapper/vmalloc.h>
-
-static
-int lttng_uprobes_handler_pre(struct uprobe_consumer *uc, struct pt_regs *regs)
-{
-       struct lttng_uprobe_handler *uprobe_handler =
-               container_of(uc, struct lttng_uprobe_handler, up_consumer);
-       struct lttng_event *event = uprobe_handler->event;
-       struct lttng_probe_ctx lttng_probe_ctx = {
-               .event = event,
-               .interruptible = !lttng_regs_irqs_disabled(regs),
-       };
-       struct lttng_channel *chan = event->chan;
-       struct lib_ring_buffer_ctx ctx;
-       int ret;
-
-       struct {
-               unsigned long ip;
-       } payload;
-
-       if (unlikely(!READ_ONCE(chan->session->active)))
-               return 0;
-       if (unlikely(!READ_ONCE(chan->enabled)))
-               return 0;
-       if (unlikely(!READ_ONCE(event->enabled)))
-               return 0;
-
-       lib_ring_buffer_ctx_init(&ctx, chan->chan, &lttng_probe_ctx,
-               sizeof(payload), lttng_alignof(payload), -1);
-
-       ret = chan->ops->event_reserve(&ctx, event->id);
-       if (ret < 0)
-               return 0;
-
-       /* Event payload. */
-       payload.ip = (unsigned long)instruction_pointer(regs);
-
-       lib_ring_buffer_align_ctx(&ctx, lttng_alignof(payload));
-       chan->ops->event_write(&ctx, &payload, sizeof(payload));
-       chan->ops->event_commit(&ctx);
-       return 0;
-}
-
-/*
- * Create event description.
- */
-static
-int lttng_create_uprobe_event(const char *name, struct lttng_event *event)
-{
-       struct lttng_event_desc *desc;
-       struct lttng_event_field *fields;
-       int ret;
-
-       desc = kzalloc(sizeof(*event->desc), GFP_KERNEL);
-       if (!desc)
-               return -ENOMEM;
-       desc->name = kstrdup(name, GFP_KERNEL);
-       if (!desc->name) {
-               ret = -ENOMEM;
-               goto error_str;
-       }
-
-       desc->nr_fields = 1;
-       desc->fields = fields =
-               kzalloc(1 * sizeof(struct lttng_event_field), GFP_KERNEL);
-
-       if (!desc->fields) {
-               ret = -ENOMEM;
-               goto error_fields;
-       }
-       fields[0].name = "ip";
-       fields[0].type.atype = atype_integer;
-       fields[0].type.u.integer.size = sizeof(unsigned long) * CHAR_BIT;
-       fields[0].type.u.integer.alignment = lttng_alignof(unsigned long) * CHAR_BIT;
-       fields[0].type.u.integer.signedness = lttng_is_signed_type(unsigned long);
-       fields[0].type.u.integer.reverse_byte_order = 0;
-       fields[0].type.u.integer.base = 16;
-       fields[0].type.u.integer.encoding = lttng_encode_none;
-
-       desc->owner = THIS_MODULE;
-       event->desc = desc;
-
-       return 0;
-
-error_fields:
-       kfree(desc->name);
-error_str:
-       kfree(desc);
-       return ret;
-}
-
-/*
- * Returns the inode struct from the current task and an fd. The inode is
- * grabbed by this function and must be put once we are done with it using
- * iput().
- */
-static struct inode *get_inode_from_fd(int fd)
-{
-       struct file *file;
-       struct inode *inode;
-
-       rcu_read_lock();
-       /*
-        * Returns the file backing the given fd. Needs to be done inside an RCU
-        * critical section.
-        */
-       file = fcheck(fd);
-       if (file == NULL) {
-               printk(KERN_WARNING "Cannot access file backing the fd(%d)\n", fd);
-               inode = NULL;
-               goto error;
-       }
-
-       /* Grab a reference on the inode. */
-       inode = igrab(file->f_path.dentry->d_inode);
-       if (inode == NULL)
-               printk(KERN_WARNING "Cannot grab a reference on the inode.\n");
-error:
-       rcu_read_unlock();
-       return inode;
-}
-
-int lttng_uprobes_add_callsite(struct lttng_event *event,
-       struct lttng_kernel_event_callsite __user *callsite)
-{
-       int ret = 0;
-       struct lttng_uprobe_handler *uprobe_handler;
-
-       if (!event) {
-               ret = -EINVAL;
-               goto end;
-       }
-
-       uprobe_handler = kzalloc(sizeof(struct lttng_uprobe_handler), GFP_KERNEL);
-       if (!uprobe_handler) {
-               printk(KERN_WARNING "Error allocating uprobe_uprobe_handlers");
-               ret = -ENOMEM;
-               goto end;
-       }
-
-       /* Ensure the memory we just allocated don't trigger page faults. */
-       wrapper_vmalloc_sync_mappings();
-
-       uprobe_handler->event = event;
-       uprobe_handler->up_consumer.handler = lttng_uprobes_handler_pre;
-
-       ret = copy_from_user(&uprobe_handler->offset, &callsite->u.uprobe.offset, sizeof(uint64_t));
-       if (ret) {
-               goto register_error;
-       }
-
-       ret = wrapper_uprobe_register(event->u.uprobe.inode,
-                     uprobe_handler->offset, &uprobe_handler->up_consumer);
-       if (ret) {
-               printk(KERN_WARNING "Error registering probe on inode %lu "
-                      "and offset 0x%llx\n", event->u.uprobe.inode->i_ino,
-                      uprobe_handler->offset);
-               ret = -1;
-               goto register_error;
-       }
-
-       list_add(&uprobe_handler->node, &event->u.uprobe.head);
-
-       return ret;
-
-register_error:
-       kfree(uprobe_handler);
-end:
-       return ret;
-}
-EXPORT_SYMBOL_GPL(lttng_uprobes_add_callsite);
-
-int lttng_uprobes_register(const char *name, int fd, struct lttng_event *event)
-{
-       int ret = 0;
-       struct inode *inode;
-
-       ret = lttng_create_uprobe_event(name, event);
-       if (ret)
-               goto error;
-
-       inode = get_inode_from_fd(fd);
-       if (!inode) {
-               printk(KERN_WARNING "Cannot get inode from fd\n");
-               ret = -EBADF;
-               goto inode_error;
-       }
-       event->u.uprobe.inode = inode;
-       INIT_LIST_HEAD(&event->u.uprobe.head);
-
-       return 0;
-
-inode_error:
-       kfree(event->desc->name);
-       kfree(event->desc);
-error:
-       return ret;
-}
-EXPORT_SYMBOL_GPL(lttng_uprobes_register);
-
-void lttng_uprobes_unregister(struct lttng_event *event)
-{
-       struct lttng_uprobe_handler *iter, *tmp;
-
-       /*
-        * Iterate over the list of handler, remove each handler from the list
-        * and free the struct.
-        */
-       list_for_each_entry_safe(iter, tmp, &event->u.uprobe.head, node) {
-               wrapper_uprobe_unregister(event->u.uprobe.inode, iter->offset,
-                       &iter->up_consumer);
-               list_del(&iter->node);
-               kfree(iter);
-       }
-}
-EXPORT_SYMBOL_GPL(lttng_uprobes_unregister);
-
-void lttng_uprobes_destroy_private(struct lttng_event *event)
-{
-       iput(event->u.uprobe.inode);
-       kfree(event->desc->name);
-       kfree(event->desc);
-}
-EXPORT_SYMBOL_GPL(lttng_uprobes_destroy_private);
-
-MODULE_LICENSE("GPL and additional rights");
-MODULE_AUTHOR("Yannick Brosseau");
-MODULE_DESCRIPTION("Linux Trace Toolkit Uprobes Support");
diff --git a/probes/lttng.c b/probes/lttng.c
deleted file mode 100644 (file)
index 8a0dd4b..0000000
+++ /dev/null
@@ -1,158 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
- *
- * lttng.c
- *
- * LTTng logger ABI
- *
- * Copyright (C) 2008-2014 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#include <linux/module.h>
-#include <linux/tracepoint.h>
-#include <linux/uaccess.h>
-#include <linux/gfp.h>
-#include <linux/fs.h>
-#include <linux/proc_fs.h>
-#include <linux/slab.h>
-#include <linux/mm.h>
-#include <linux/miscdevice.h>
-#include <wrapper/vmalloc.h>
-#include <lttng/events.h>
-
-#define TP_MODULE_NOAUTOLOAD
-#define LTTNG_PACKAGE_BUILD
-#define CREATE_TRACE_POINTS
-#define TRACE_INCLUDE_PATH instrumentation/events
-#define TRACE_INCLUDE_FILE lttng
-#define LTTNG_INSTRUMENTATION
-
-#include <instrumentation/events/lttng.h>
-
-/* Events written through logger are truncated at 1024 bytes */
-#define LTTNG_LOGGER_COUNT_MAX 1024
-#define LTTNG_LOGGER_FILE      "lttng-logger"
-
-DEFINE_TRACE(lttng_logger);
-
-static struct proc_dir_entry *lttng_logger_dentry;
-
-/**
- * lttng_logger_write - write a userspace string into the trace system
- * @file: file pointer
- * @user_buf: user string
- * @count: length to copy
- * @ppos: file position
- *
- * Copy a userspace string into a trace event named "lttng:logger".
- * Copies at most @count bytes into the event "msg" dynamic array.
- * Truncates the count at LTTNG_LOGGER_COUNT_MAX. Returns the number of
- * bytes copied from the source.
- * Return -1 on error, with EFAULT errno.
- */
-static
-ssize_t lttng_logger_write(struct file *file, const char __user *user_buf,
-                   size_t count, loff_t *ppos)
-{
-       int nr_pages = 1, i;
-       unsigned long uaddr = (unsigned long) user_buf;
-       struct page *pages[2];
-       ssize_t written;
-       int ret;
-
-       /* Truncate count */
-       if (unlikely(count > LTTNG_LOGGER_COUNT_MAX))
-               count = LTTNG_LOGGER_COUNT_MAX;
-
-       /* How many pages are we dealing with ? */
-       if (unlikely((uaddr & PAGE_MASK) != ((uaddr + count) & PAGE_MASK)))
-               nr_pages = 2;
-
-       /* Pin userspace pages */
-       ret = get_user_pages_fast(uaddr, nr_pages, 0, pages);
-       if (unlikely(ret < nr_pages)) {
-               if (ret > 0) {
-                       BUG_ON(ret != 1);
-                       put_page(pages[0]);
-               }
-               written = -EFAULT;
-               goto end;
-       }
-
-       /* Trace the event */
-       trace_lttng_logger(user_buf, count);
-       written = count;
-       *ppos += written;
-
-       for (i = 0; i < nr_pages; i++)
-               put_page(pages[i]);
-end:
-       return written;
-}
-
-static const struct file_operations lttng_logger_operations = {
-       .write = lttng_logger_write,
-};
-
-/*
- * Linux 5.6 introduced a separate proc_ops struct for /proc operations
- * to decouple it from the vfs.
- */
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,6,0))
-static const struct proc_ops lttng_logger_proc_ops = {
-       .proc_write = lttng_logger_write,
-};
-#else
-#define lttng_logger_proc_ops lttng_logger_operations
-#endif
-
-static struct miscdevice logger_dev = {
-       .minor = MISC_DYNAMIC_MINOR,
-       .name = "lttng-logger",
-       .mode = 0666,
-       .fops = &lttng_logger_operations
-};
-
-int __init lttng_logger_init(void)
-{
-       int ret = 0;
-
-       wrapper_vmalloc_sync_mappings();
-
-       /* /dev/lttng-logger */
-       ret = misc_register(&logger_dev);
-       if (ret) {
-               printk(KERN_ERR "Error creating LTTng logger device\n");
-               goto error;
-       }
-
-       /* /proc/lttng-logger */
-       lttng_logger_dentry = proc_create_data(LTTNG_LOGGER_FILE,
-                               S_IRUGO | S_IWUGO, NULL,
-                               &lttng_logger_proc_ops, NULL);
-       if (!lttng_logger_dentry) {
-               printk(KERN_ERR "Error creating LTTng logger proc file\n");
-               ret = -ENOMEM;
-               goto error_proc;
-       }
-
-       /* Init */
-       ret = __lttng_events_init__lttng();
-       if (ret)
-               goto error_events;
-       return ret;
-
-error_events:
-       remove_proc_entry("lttng-logger", NULL);
-error_proc:
-       misc_deregister(&logger_dev);
-error:
-       return ret;
-}
-
-void lttng_logger_exit(void)
-{
-       __lttng_events_exit__lttng();
-       if (lttng_logger_dentry)
-               remove_proc_entry("lttng-logger", NULL);
-       misc_deregister(&logger_dev);
-}
diff --git a/src/Kbuild b/src/Kbuild
new file mode 100644 (file)
index 0000000..fad4460
--- /dev/null
@@ -0,0 +1,112 @@
+# SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+
+TOP_LTTNG_MODULES_DIR := $(shell dirname $(lastword $(MAKEFILE_LIST)))/..
+
+include $(TOP_LTTNG_MODULES_DIR)/Kbuild.common
+
+ccflags-y += -I$(TOP_LTTNG_MODULES_DIR)/include
+
+obj-$(CONFIG_LTTNG) += lttng-ring-buffer-client-discard.o
+obj-$(CONFIG_LTTNG) += lttng-ring-buffer-client-overwrite.o
+obj-$(CONFIG_LTTNG) += lttng-ring-buffer-metadata-client.o
+obj-$(CONFIG_LTTNG) += lttng-ring-buffer-client-mmap-discard.o
+obj-$(CONFIG_LTTNG) += lttng-ring-buffer-client-mmap-overwrite.o
+obj-$(CONFIG_LTTNG) += lttng-ring-buffer-metadata-mmap-client.o
+obj-$(CONFIG_LTTNG) += lttng-clock.o
+
+obj-$(CONFIG_LTTNG) += lttng-tracer.o
+
+obj-$(CONFIG_LTTNG) += lttng-wrapper.o
+
+lttng-tracer-objs := lttng-events.o lttng-abi.o lttng-string-utils.o \
+                     lttng-probes.o lttng-context.o \
+                     lttng-context-pid.o lttng-context-procname.o \
+                     lttng-context-prio.o lttng-context-nice.o \
+                     lttng-context-vpid.o lttng-context-tid.o \
+                     lttng-context-vtid.o lttng-context-ppid.o \
+                     lttng-context-vppid.o lttng-context-cpu-id.o \
+                     lttng-context-uid.o \
+                     lttng-context-euid.o \
+                     lttng-context-suid.o \
+                     lttng-context-gid.o \
+                     lttng-context-egid.o \
+                     lttng-context-sgid.o \
+                     lttng-context-vuid.o \
+                     lttng-context-veuid.o \
+                     lttng-context-vsuid.o \
+                     lttng-context-vgid.o \
+                     lttng-context-vegid.o \
+                     lttng-context-vsgid.o \
+                     lttng-context-interruptible.o \
+                     lttng-context-need-reschedule.o \
+                     lttng-context-callstack.o lttng-calibrate.o \
+                     lttng-context-hostname.o \
+                     probes/lttng.o \
+                     lttng-tracker-id.o \
+                     lttng-filter.o lttng-filter-interpreter.o \
+                     lttng-filter-specialize.o \
+                     lttng-filter-validator.o \
+                     probes/lttng-probe-user.o \
+                     lttng-tp-mempool.o \
+
+lttng-wrapper-objs := wrapper/page_alloc.o \
+                      wrapper/random.o \
+                      wrapper/trace-clock.o \
+                      wrapper/kallsyms.o \
+                      wrapper/irqdesc.o \
+                      wrapper/fdtable.o \
+                      lttng-wrapper-impl.o
+
+ifneq ($(CONFIG_HAVE_SYSCALL_TRACEPOINTS),)
+  lttng-tracer-objs += lttng-syscalls.o
+endif # CONFIG_HAVE_SYSCALL_TRACEPOINTS
+
+ifneq ($(CONFIG_PERF_EVENTS),)
+  lttng-tracer-objs += lttng-context-perf-counters.o
+endif # CONFIG_PERF_EVENTS
+
+ifneq ($(CONFIG_PREEMPT_RT_FULL),)
+  lttng-tracer-objs += lttng-context-migratable.o
+  lttng-tracer-objs += lttng-context-preemptible.o
+endif # CONFIG_PREEMPT_RT_FULL
+
+ifneq ($(CONFIG_PREEMPT),)
+  lttng-tracer-objs += lttng-context-preemptible.o
+endif
+
+lttng-tracer-objs += $(shell \
+  if [ $(VERSION) -ge 4 \
+    -o \( $(VERSION) -eq 3 -a $(PATCHLEVEL) -ge 15 \) ] ; then \
+    echo "lttng-tracepoint.o" ; fi;)
+
+lttng-tracer-objs += lttng-context-cgroup-ns.o
+
+ifneq ($(CONFIG_IPC_NS),)
+  lttng-tracer-objs += lttng-context-ipc-ns.o
+endif
+
+ifneq ($(wildcard $(mnt_ns_dep)),)
+   lttng-tracer-objs += lttng-context-mnt-ns.o
+endif
+
+ifneq ($(CONFIG_NET_NS),)
+  lttng-tracer-objs += lttng-context-net-ns.o
+endif
+
+ifneq ($(CONFIG_PID_NS),)
+  lttng-tracer-objs += lttng-context-pid-ns.o
+endif
+
+ifneq ($(CONFIG_USER_NS),)
+  lttng-tracer-objs += lttng-context-user-ns.o
+endif
+
+ifneq ($(CONFIG_UTS_NS),)
+  lttng-tracer-objs += lttng-context-uts-ns.o
+endif
+
+obj-$(CONFIG_LTTNG) += lttng-statedump.o
+lttng-statedump-objs := lttng-statedump-impl.o
+
+obj-$(CONFIG_LTTNG) += probes/
+obj-$(CONFIG_LTTNG) += lib/
diff --git a/src/lib/Kbuild b/src/lib/Kbuild
new file mode 100644 (file)
index 0000000..82049e9
--- /dev/null
@@ -0,0 +1,21 @@
+# SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+
+TOP_LTTNG_MODULES_DIR := $(shell dirname $(lastword $(MAKEFILE_LIST)))/../..
+
+include $(TOP_LTTNG_MODULES_DIR)/Kbuild.common
+
+ccflags-y += -I$(TOP_LTTNG_MODULES_DIR)/include
+
+obj-$(CONFIG_LTTNG) += lttng-lib-ring-buffer.o
+
+lttng-lib-ring-buffer-objs := \
+  ringbuffer/ring_buffer_backend.o \
+  ringbuffer/ring_buffer_frontend.o \
+  ringbuffer/ring_buffer_iterator.o \
+  ringbuffer/ring_buffer_vfs.o \
+  ringbuffer/ring_buffer_splice.o \
+  ringbuffer/ring_buffer_mmap.o \
+  prio_heap/lttng_prio_heap.o \
+  ../wrapper/splice.o
+
+# vim:syntax=make
diff --git a/src/lib/prio_heap/lttng_prio_heap.c b/src/lib/prio_heap/lttng_prio_heap.c
new file mode 100644 (file)
index 0000000..0b85ddb
--- /dev/null
@@ -0,0 +1,199 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * lttng_prio_heap.c
+ *
+ * Priority heap containing pointers. Based on CLRS, chapter 6.
+ *
+ * Copyright 2011 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#include <linux/slab.h>
+#include <lttng/prio_heap.h>
+#include <wrapper/vmalloc.h>
+
+#ifdef DEBUG_HEAP
+void lttng_check_heap(const struct lttng_ptr_heap *heap)
+{
+       size_t i;
+
+       if (!heap->len)
+               return;
+
+       for (i = 1; i < heap->len; i++)
+               WARN_ON_ONCE(!heap->gt(heap->ptrs[i], heap->ptrs[0]));
+}
+#endif
+
+static
+size_t parent(size_t i)
+{
+       return (i -1) >> 1;
+}
+
+static
+size_t left(size_t i)
+{
+       return (i << 1) + 1;
+}
+
+static
+size_t right(size_t i)
+{
+       return (i << 1) + 2;
+}
+
+/*
+ * Copy of heap->ptrs pointer is invalid after heap_grow.
+ */
+static
+int heap_grow(struct lttng_ptr_heap *heap, size_t new_len)
+{
+       void **new_ptrs;
+
+       if (heap->alloc_len >= new_len)
+               return 0;
+
+       heap->alloc_len = max_t(size_t, new_len, heap->alloc_len << 1);
+       new_ptrs = lttng_kvmalloc(heap->alloc_len * sizeof(void *), heap->gfpmask);
+       if (!new_ptrs)
+               return -ENOMEM;
+       if (heap->ptrs)
+               memcpy(new_ptrs, heap->ptrs, heap->len * sizeof(void *));
+       lttng_kvfree(heap->ptrs);
+       heap->ptrs = new_ptrs;
+       return 0;
+}
+
+static
+int heap_set_len(struct lttng_ptr_heap *heap, size_t new_len)
+{
+       int ret;
+
+       ret = heap_grow(heap, new_len);
+       if (ret)
+               return ret;
+       heap->len = new_len;
+       return 0;
+}
+
+int lttng_heap_init(struct lttng_ptr_heap *heap, size_t alloc_len,
+             gfp_t gfpmask, int gt(void *a, void *b))
+{
+       heap->ptrs = NULL;
+       heap->len = 0;
+       heap->alloc_len = 0;
+       heap->gt = gt;
+       heap->gfpmask = gfpmask;
+       /*
+        * Minimum size allocated is 1 entry to ensure memory allocation
+        * never fails within heap_replace_max.
+        */
+       return heap_grow(heap, max_t(size_t, 1, alloc_len));
+}
+
+void lttng_heap_free(struct lttng_ptr_heap *heap)
+{
+       lttng_kvfree(heap->ptrs);
+}
+
+static void heapify(struct lttng_ptr_heap *heap, size_t i)
+{
+       void **ptrs = heap->ptrs;
+       size_t l, r, largest;
+
+       for (;;) {
+               void *tmp;
+
+               l = left(i);
+               r = right(i);
+               if (l < heap->len && heap->gt(ptrs[l], ptrs[i]))
+                       largest = l;
+               else
+                       largest = i;
+               if (r < heap->len && heap->gt(ptrs[r], ptrs[largest]))
+                       largest = r;
+               if (largest == i)
+                       break;
+               tmp = ptrs[i];
+               ptrs[i] = ptrs[largest];
+               ptrs[largest] = tmp;
+               i = largest;
+       }
+       lttng_check_heap(heap);
+}
+
+void *lttng_heap_replace_max(struct lttng_ptr_heap *heap, void *p)
+{
+       void *res;
+
+       if (!heap->len) {
+               (void) heap_set_len(heap, 1);
+               heap->ptrs[0] = p;
+               lttng_check_heap(heap);
+               return NULL;
+       }
+
+       /* Replace the current max and heapify */
+       res = heap->ptrs[0];
+       heap->ptrs[0] = p;
+       heapify(heap, 0);
+       return res;
+}
+
+int lttng_heap_insert(struct lttng_ptr_heap *heap, void *p)
+{
+       void **ptrs;
+       size_t pos;
+       int ret;
+
+       ret = heap_set_len(heap, heap->len + 1);
+       if (ret)
+               return ret;
+       ptrs = heap->ptrs;
+       pos = heap->len - 1;
+       while (pos > 0 && heap->gt(p, ptrs[parent(pos)])) {
+               /* Move parent down until we find the right spot */
+               ptrs[pos] = ptrs[parent(pos)];
+               pos = parent(pos);
+       }
+       ptrs[pos] = p;
+       lttng_check_heap(heap);
+       return 0;
+}
+
+void *lttng_heap_remove(struct lttng_ptr_heap *heap)
+{
+       switch (heap->len) {
+       case 0:
+               return NULL;
+       case 1:
+               (void) heap_set_len(heap, 0);
+               return heap->ptrs[0];
+       }
+       /* Shrink, replace the current max by previous last entry and heapify */
+       heap_set_len(heap, heap->len - 1);
+       /* len changed. previous last entry is at heap->len */
+       return lttng_heap_replace_max(heap, heap->ptrs[heap->len]);
+}
+
+void *lttng_heap_cherrypick(struct lttng_ptr_heap *heap, void *p)
+{
+       size_t pos, len = heap->len;
+
+       for (pos = 0; pos < len; pos++)
+               if (heap->ptrs[pos] == p)
+                       goto found;
+       return NULL;
+found:
+       if (heap->len == 1) {
+               (void) heap_set_len(heap, 0);
+               lttng_check_heap(heap);
+               return heap->ptrs[0];
+       }
+       /* Replace p with previous last entry and heapify. */
+       heap_set_len(heap, heap->len - 1);
+       /* len changed. previous last entry is at heap->len */
+       heap->ptrs[pos] = heap->ptrs[heap->len];
+       heapify(heap, pos);
+       return p;
+}
diff --git a/src/lib/ringbuffer/ring_buffer_backend.c b/src/lib/ringbuffer/ring_buffer_backend.c
new file mode 100644 (file)
index 0000000..d6547d7
--- /dev/null
@@ -0,0 +1,1124 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR LGPL-2.1-only)
+ *
+ * ring_buffer_backend.c
+ *
+ * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#include <linux/stddef.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/cpu.h>
+#include <linux/mm.h>
+#include <linux/vmalloc.h>
+
+#include <wrapper/mm.h>
+#include <wrapper/vmalloc.h>   /* for wrapper_vmalloc_sync_mappings() */
+#include <ringbuffer/config.h>
+#include <ringbuffer/backend.h>
+#include <ringbuffer/frontend.h>
+
+/**
+ * lib_ring_buffer_backend_allocate - allocate a channel buffer
+ * @config: ring buffer instance configuration
+ * @buf: the buffer struct
+ * @size: total size of the buffer
+ * @num_subbuf: number of subbuffers
+ * @extra_reader_sb: need extra subbuffer for reader
+ */
+static
+int lib_ring_buffer_backend_allocate(const struct lib_ring_buffer_config *config,
+                                    struct lib_ring_buffer_backend *bufb,
+                                    size_t size, size_t num_subbuf,
+                                    int extra_reader_sb)
+{
+       struct channel_backend *chanb = &bufb->chan->backend;
+       unsigned long j, num_pages, num_pages_per_subbuf, page_idx = 0;
+       unsigned long subbuf_size, mmap_offset = 0;
+       unsigned long num_subbuf_alloc;
+       struct page **pages;
+       unsigned long i;
+
+       num_pages = size >> PAGE_SHIFT;
+
+       /*
+        * Verify that there is enough free pages available on the system for
+        * the current allocation request.
+        * wrapper_check_enough_free_pages uses si_mem_available() if available
+        * and returns if there should be enough free pages based on the
+        * current estimate.
+        */
+       if (!wrapper_check_enough_free_pages(num_pages))
+               goto not_enough_pages;
+
+       /*
+        * Set the current user thread as the first target of the OOM killer.
+        * If the estimate received by si_mem_available() was off, and we do
+        * end up running out of memory because of this buffer allocation, we
+        * want to kill the offending app first.
+        */
+       wrapper_set_current_oom_origin();
+
+       num_pages_per_subbuf = num_pages >> get_count_order(num_subbuf);
+       subbuf_size = chanb->subbuf_size;
+       num_subbuf_alloc = num_subbuf;
+
+       if (extra_reader_sb) {
+               num_pages += num_pages_per_subbuf; /* Add pages for reader */
+               num_subbuf_alloc++;
+       }
+
+       pages = vmalloc_node(ALIGN(sizeof(*pages) * num_pages,
+                                  1 << INTERNODE_CACHE_SHIFT),
+                       cpu_to_node(max(bufb->cpu, 0)));
+       if (unlikely(!pages))
+               goto pages_error;
+
+       bufb->array = lttng_kvmalloc_node(ALIGN(sizeof(*bufb->array)
+                                        * num_subbuf_alloc,
+                                 1 << INTERNODE_CACHE_SHIFT),
+                       GFP_KERNEL | __GFP_NOWARN,
+                       cpu_to_node(max(bufb->cpu, 0)));
+       if (unlikely(!bufb->array))
+               goto array_error;
+
+       for (i = 0; i < num_pages; i++) {
+               pages[i] = alloc_pages_node(cpu_to_node(max(bufb->cpu, 0)),
+                               GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO, 0);
+               if (unlikely(!pages[i]))
+                       goto depopulate;
+       }
+       bufb->num_pages_per_subbuf = num_pages_per_subbuf;
+
+       /* Allocate backend pages array elements */
+       for (i = 0; i < num_subbuf_alloc; i++) {
+               bufb->array[i] =
+                       lttng_kvzalloc_node(ALIGN(
+                               sizeof(struct lib_ring_buffer_backend_pages) +
+                               sizeof(struct lib_ring_buffer_backend_page)
+                               * num_pages_per_subbuf,
+                               1 << INTERNODE_CACHE_SHIFT),
+                               GFP_KERNEL | __GFP_NOWARN,
+                               cpu_to_node(max(bufb->cpu, 0)));
+               if (!bufb->array[i])
+                       goto free_array;
+       }
+
+       /* Allocate write-side subbuffer table */
+       bufb->buf_wsb = lttng_kvzalloc_node(ALIGN(
+                               sizeof(struct lib_ring_buffer_backend_subbuffer)
+                               * num_subbuf,
+                               1 << INTERNODE_CACHE_SHIFT),
+                               GFP_KERNEL | __GFP_NOWARN,
+                               cpu_to_node(max(bufb->cpu, 0)));
+       if (unlikely(!bufb->buf_wsb))
+               goto free_array;
+
+       for (i = 0; i < num_subbuf; i++)
+               bufb->buf_wsb[i].id = subbuffer_id(config, 0, 1, i);
+
+       /* Assign read-side subbuffer table */
+       if (extra_reader_sb)
+               bufb->buf_rsb.id = subbuffer_id(config, 0, 1,
+                                               num_subbuf_alloc - 1);
+       else
+               bufb->buf_rsb.id = subbuffer_id(config, 0, 1, 0);
+
+       /* Allocate subbuffer packet counter table */
+       bufb->buf_cnt = lttng_kvzalloc_node(ALIGN(
+                               sizeof(struct lib_ring_buffer_backend_counts)
+                               * num_subbuf,
+                               1 << INTERNODE_CACHE_SHIFT),
+                       GFP_KERNEL | __GFP_NOWARN,
+                       cpu_to_node(max(bufb->cpu, 0)));
+       if (unlikely(!bufb->buf_cnt))
+               goto free_wsb;
+
+       /* Assign pages to page index */
+       for (i = 0; i < num_subbuf_alloc; i++) {
+               for (j = 0; j < num_pages_per_subbuf; j++) {
+                       CHAN_WARN_ON(chanb, page_idx > num_pages);
+                       bufb->array[i]->p[j].virt = page_address(pages[page_idx]);
+                       bufb->array[i]->p[j].pfn = page_to_pfn(pages[page_idx]);
+                       page_idx++;
+               }
+               if (config->output == RING_BUFFER_MMAP) {
+                       bufb->array[i]->mmap_offset = mmap_offset;
+                       mmap_offset += subbuf_size;
+               }
+       }
+
+       /*
+        * If kmalloc ever uses vmalloc underneath, make sure the buffer pages
+        * will not fault.
+        */
+       wrapper_vmalloc_sync_mappings();
+       wrapper_clear_current_oom_origin();
+       vfree(pages);
+       return 0;
+
+free_wsb:
+       lttng_kvfree(bufb->buf_wsb);
+free_array:
+       for (i = 0; (i < num_subbuf_alloc && bufb->array[i]); i++)
+               lttng_kvfree(bufb->array[i]);
+depopulate:
+       /* Free all allocated pages */
+       for (i = 0; (i < num_pages && pages[i]); i++)
+               __free_page(pages[i]);
+       lttng_kvfree(bufb->array);
+array_error:
+       vfree(pages);
+pages_error:
+       wrapper_clear_current_oom_origin();
+not_enough_pages:
+       return -ENOMEM;
+}
+
+int lib_ring_buffer_backend_create(struct lib_ring_buffer_backend *bufb,
+                                  struct channel_backend *chanb, int cpu)
+{
+       const struct lib_ring_buffer_config *config = &chanb->config;
+
+       bufb->chan = container_of(chanb, struct channel, backend);
+       bufb->cpu = cpu;
+
+       return lib_ring_buffer_backend_allocate(config, bufb, chanb->buf_size,
+                                               chanb->num_subbuf,
+                                               chanb->extra_reader_sb);
+}
+
+void lib_ring_buffer_backend_free(struct lib_ring_buffer_backend *bufb)
+{
+       struct channel_backend *chanb = &bufb->chan->backend;
+       unsigned long i, j, num_subbuf_alloc;
+
+       num_subbuf_alloc = chanb->num_subbuf;
+       if (chanb->extra_reader_sb)
+               num_subbuf_alloc++;
+
+       lttng_kvfree(bufb->buf_wsb);
+       lttng_kvfree(bufb->buf_cnt);
+       for (i = 0; i < num_subbuf_alloc; i++) {
+               for (j = 0; j < bufb->num_pages_per_subbuf; j++)
+                       __free_page(pfn_to_page(bufb->array[i]->p[j].pfn));
+               lttng_kvfree(bufb->array[i]);
+       }
+       lttng_kvfree(bufb->array);
+       bufb->allocated = 0;
+}
+
+void lib_ring_buffer_backend_reset(struct lib_ring_buffer_backend *bufb)
+{
+       struct channel_backend *chanb = &bufb->chan->backend;
+       const struct lib_ring_buffer_config *config = &chanb->config;
+       unsigned long num_subbuf_alloc;
+       unsigned int i;
+
+       num_subbuf_alloc = chanb->num_subbuf;
+       if (chanb->extra_reader_sb)
+               num_subbuf_alloc++;
+
+       for (i = 0; i < chanb->num_subbuf; i++)
+               bufb->buf_wsb[i].id = subbuffer_id(config, 0, 1, i);
+       if (chanb->extra_reader_sb)
+               bufb->buf_rsb.id = subbuffer_id(config, 0, 1,
+                                               num_subbuf_alloc - 1);
+       else
+               bufb->buf_rsb.id = subbuffer_id(config, 0, 1, 0);
+
+       for (i = 0; i < num_subbuf_alloc; i++) {
+               /* Don't reset mmap_offset */
+               v_set(config, &bufb->array[i]->records_commit, 0);
+               v_set(config, &bufb->array[i]->records_unread, 0);
+               bufb->array[i]->data_size = 0;
+               /* Don't reset backend page and virt addresses */
+       }
+       /* Don't reset num_pages_per_subbuf, cpu, allocated */
+       v_set(config, &bufb->records_read, 0);
+}
+
+/*
+ * The frontend is responsible for also calling ring_buffer_backend_reset for
+ * each buffer when calling channel_backend_reset.
+ */
+void channel_backend_reset(struct channel_backend *chanb)
+{
+       struct channel *chan = container_of(chanb, struct channel, backend);
+       const struct lib_ring_buffer_config *config = &chanb->config;
+
+       /*
+        * Don't reset buf_size, subbuf_size, subbuf_size_order,
+        * num_subbuf_order, buf_size_order, extra_reader_sb, num_subbuf,
+        * priv, notifiers, config, cpumask and name.
+        */
+       chanb->start_tsc = config->cb.ring_buffer_clock_read(chan);
+}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
+
+/*
+ * No need to implement a "dead" callback to do a buffer switch here,
+ * because it will happen when tracing is stopped, or will be done by
+ * switch timer CPU DEAD callback.
+ * We don't free buffers when CPU go away, because it would make trace
+ * data vanish, which is unwanted.
+ */
+int lttng_cpuhp_rb_backend_prepare(unsigned int cpu,
+               struct lttng_cpuhp_node *node)
+{
+       struct channel_backend *chanb = container_of(node,
+                       struct channel_backend, cpuhp_prepare);
+       const struct lib_ring_buffer_config *config = &chanb->config;
+       struct lib_ring_buffer *buf;
+       int ret;
+
+       CHAN_WARN_ON(chanb, config->alloc == RING_BUFFER_ALLOC_GLOBAL);
+
+       buf = per_cpu_ptr(chanb->buf, cpu);
+       ret = lib_ring_buffer_create(buf, chanb, cpu);
+       if (ret) {
+               printk(KERN_ERR
+                 "ring_buffer_cpu_hp_callback: cpu %d "
+                 "buffer creation failed\n", cpu);
+               return ret;
+       }
+       return 0;
+}
+EXPORT_SYMBOL_GPL(lttng_cpuhp_rb_backend_prepare);
+
+#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
+
+#ifdef CONFIG_HOTPLUG_CPU
+
+/**
+ *     lib_ring_buffer_cpu_hp_callback - CPU hotplug callback
+ *     @nb: notifier block
+ *     @action: hotplug action to take
+ *     @hcpu: CPU number
+ *
+ *     Returns the success/failure of the operation. (%NOTIFY_OK, %NOTIFY_BAD)
+ */
+static
+int lib_ring_buffer_cpu_hp_callback(struct notifier_block *nb,
+                                             unsigned long action,
+                                             void *hcpu)
+{
+       unsigned int cpu = (unsigned long)hcpu;
+       struct channel_backend *chanb = container_of(nb, struct channel_backend,
+                                                    cpu_hp_notifier);
+       const struct lib_ring_buffer_config *config = &chanb->config;
+       struct lib_ring_buffer *buf;
+       int ret;
+
+       CHAN_WARN_ON(chanb, config->alloc == RING_BUFFER_ALLOC_GLOBAL);
+
+       switch (action) {
+       case CPU_UP_PREPARE:
+       case CPU_UP_PREPARE_FROZEN:
+               buf = per_cpu_ptr(chanb->buf, cpu);
+               ret = lib_ring_buffer_create(buf, chanb, cpu);
+               if (ret) {
+                       printk(KERN_ERR
+                         "ring_buffer_cpu_hp_callback: cpu %d "
+                         "buffer creation failed\n", cpu);
+                       return NOTIFY_BAD;
+               }
+               break;
+       case CPU_DEAD:
+       case CPU_DEAD_FROZEN:
+               /* No need to do a buffer switch here, because it will happen
+                * when tracing is stopped, or will be done by switch timer CPU
+                * DEAD callback. */
+               break;
+       }
+       return NOTIFY_OK;
+}
+
+#endif
+
+#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
+
+/**
+ * channel_backend_init - initialize a channel backend
+ * @chanb: channel backend
+ * @name: channel name
+ * @config: client ring buffer configuration
+ * @priv: client private data
+ * @parent: dentry of parent directory, %NULL for root directory
+ * @subbuf_size: size of sub-buffers (> PAGE_SIZE, power of 2)
+ * @num_subbuf: number of sub-buffers (power of 2)
+ *
+ * Returns channel pointer if successful, %NULL otherwise.
+ *
+ * Creates per-cpu channel buffers using the sizes and attributes
+ * specified.  The created channel buffer files will be named
+ * name_0...name_N-1.  File permissions will be %S_IRUSR.
+ *
+ * Called with CPU hotplug disabled.
+ */
+int channel_backend_init(struct channel_backend *chanb,
+                        const char *name,
+                        const struct lib_ring_buffer_config *config,
+                        void *priv, size_t subbuf_size, size_t num_subbuf)
+{
+       struct channel *chan = container_of(chanb, struct channel, backend);
+       unsigned int i;
+       int ret;
+
+       if (!name)
+               return -EPERM;
+
+       /* Check that the subbuffer size is larger than a page. */
+       if (subbuf_size < PAGE_SIZE)
+               return -EINVAL;
+
+       /*
+        * Make sure the number of subbuffers and subbuffer size are
+        * power of 2 and nonzero.
+        */
+       if (!subbuf_size || (subbuf_size & (subbuf_size - 1)))
+               return -EINVAL;
+       if (!num_subbuf || (num_subbuf & (num_subbuf - 1)))
+               return -EINVAL;
+       /*
+        * Overwrite mode buffers require at least 2 subbuffers per
+        * buffer.
+        */
+       if (config->mode == RING_BUFFER_OVERWRITE && num_subbuf < 2)
+               return -EINVAL;
+
+       ret = subbuffer_id_check_index(config, num_subbuf);
+       if (ret)
+               return ret;
+
+       chanb->priv = priv;
+       chanb->buf_size = num_subbuf * subbuf_size;
+       chanb->subbuf_size = subbuf_size;
+       chanb->buf_size_order = get_count_order(chanb->buf_size);
+       chanb->subbuf_size_order = get_count_order(subbuf_size);
+       chanb->num_subbuf_order = get_count_order(num_subbuf);
+       chanb->extra_reader_sb =
+                       (config->mode == RING_BUFFER_OVERWRITE) ? 1 : 0;
+       chanb->num_subbuf = num_subbuf;
+       strlcpy(chanb->name, name, NAME_MAX);
+       memcpy(&chanb->config, config, sizeof(chanb->config));
+
+       if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
+               if (!zalloc_cpumask_var(&chanb->cpumask, GFP_KERNEL))
+                       return -ENOMEM;
+       }
+
+       if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
+               /* Allocating the buffer per-cpu structures */
+               chanb->buf = alloc_percpu(struct lib_ring_buffer);
+               if (!chanb->buf)
+                       goto free_cpumask;
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
+               chanb->cpuhp_prepare.component = LTTNG_RING_BUFFER_BACKEND;
+               ret = cpuhp_state_add_instance(lttng_rb_hp_prepare,
+                       &chanb->cpuhp_prepare.node);
+               if (ret)
+                       goto free_bufs;
+#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
+
+               {
+                       /*
+                        * In case of non-hotplug cpu, if the ring-buffer is allocated
+                        * in early initcall, it will not be notified of secondary cpus.
+                        * In that off case, we need to allocate for all possible cpus.
+                        */
+#ifdef CONFIG_HOTPLUG_CPU
+                       /*
+                        * buf->backend.allocated test takes care of concurrent CPU
+                        * hotplug.
+                        * Priority higher than frontend, so we create the ring buffer
+                        * before we start the timer.
+                        */
+                       chanb->cpu_hp_notifier.notifier_call =
+                                       lib_ring_buffer_cpu_hp_callback;
+                       chanb->cpu_hp_notifier.priority = 5;
+                       register_hotcpu_notifier(&chanb->cpu_hp_notifier);
+
+                       get_online_cpus();
+                       for_each_online_cpu(i) {
+                               ret = lib_ring_buffer_create(per_cpu_ptr(chanb->buf, i),
+                                                        chanb, i);
+                               if (ret)
+                                       goto free_bufs; /* cpu hotplug locked */
+                       }
+                       put_online_cpus();
+#else
+                       for_each_possible_cpu(i) {
+                               ret = lib_ring_buffer_create(per_cpu_ptr(chanb->buf, i),
+                                                        chanb, i);
+                               if (ret)
+                                       goto free_bufs;
+                       }
+#endif
+               }
+#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
+       } else {
+               chanb->buf = kzalloc(sizeof(struct lib_ring_buffer), GFP_KERNEL);
+               if (!chanb->buf)
+                       goto free_cpumask;
+               ret = lib_ring_buffer_create(chanb->buf, chanb, -1);
+               if (ret)
+                       goto free_bufs;
+       }
+       chanb->start_tsc = config->cb.ring_buffer_clock_read(chan);
+
+       return 0;
+
+free_bufs:
+       if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
+               /*
+                * Teardown of lttng_rb_hp_prepare instance
+                * on "add" error is handled within cpu hotplug,
+                * no teardown to do from the caller.
+                */
+#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
+#ifdef CONFIG_HOTPLUG_CPU
+               put_online_cpus();
+               unregister_hotcpu_notifier(&chanb->cpu_hp_notifier);
+#endif
+#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
+               for_each_possible_cpu(i) {
+                       struct lib_ring_buffer *buf =
+                               per_cpu_ptr(chanb->buf, i);
+
+                       if (!buf->backend.allocated)
+                               continue;
+                       lib_ring_buffer_free(buf);
+               }
+               free_percpu(chanb->buf);
+       } else
+               kfree(chanb->buf);
+free_cpumask:
+       if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
+               free_cpumask_var(chanb->cpumask);
+       return -ENOMEM;
+}
+
+/**
+ * channel_backend_unregister_notifiers - unregister notifiers
+ * @chan: the channel
+ *
+ * Holds CPU hotplug.
+ */
+void channel_backend_unregister_notifiers(struct channel_backend *chanb)
+{
+       const struct lib_ring_buffer_config *config = &chanb->config;
+
+       if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
+               int ret;
+
+               ret = cpuhp_state_remove_instance(lttng_rb_hp_prepare,
+                               &chanb->cpuhp_prepare.node);
+               WARN_ON(ret);
+#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
+               unregister_hotcpu_notifier(&chanb->cpu_hp_notifier);
+#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
+       }
+}
+
+/**
+ * channel_backend_free - destroy the channel
+ * @chan: the channel
+ *
+ * Destroy all channel buffers and frees the channel.
+ */
+void channel_backend_free(struct channel_backend *chanb)
+{
+       const struct lib_ring_buffer_config *config = &chanb->config;
+       unsigned int i;
+
+       if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
+               for_each_possible_cpu(i) {
+                       struct lib_ring_buffer *buf = per_cpu_ptr(chanb->buf, i);
+
+                       if (!buf->backend.allocated)
+                               continue;
+                       lib_ring_buffer_free(buf);
+               }
+               free_cpumask_var(chanb->cpumask);
+               free_percpu(chanb->buf);
+       } else {
+               struct lib_ring_buffer *buf = chanb->buf;
+
+               CHAN_WARN_ON(chanb, !buf->backend.allocated);
+               lib_ring_buffer_free(buf);
+               kfree(buf);
+       }
+}
+
+/**
+ * lib_ring_buffer_write - write data to a ring_buffer buffer.
+ * @bufb : buffer backend
+ * @offset : offset within the buffer
+ * @src : source address
+ * @len : length to write
+ * @pagecpy : page size copied so far
+ */
+void _lib_ring_buffer_write(struct lib_ring_buffer_backend *bufb, size_t offset,
+                           const void *src, size_t len, size_t pagecpy)
+{
+       struct channel_backend *chanb = &bufb->chan->backend;
+       const struct lib_ring_buffer_config *config = &chanb->config;
+       size_t sbidx, index;
+       struct lib_ring_buffer_backend_pages *rpages;
+       unsigned long sb_bindex, id;
+
+       do {
+               len -= pagecpy;
+               src += pagecpy;
+               offset += pagecpy;
+               sbidx = offset >> chanb->subbuf_size_order;
+               index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
+
+               /*
+                * Underlying layer should never ask for writes across
+                * subbuffers.
+                */
+               CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
+
+               pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK));
+               id = bufb->buf_wsb[sbidx].id;
+               sb_bindex = subbuffer_id_get_index(config, id);
+               rpages = bufb->array[sb_bindex];
+               CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
+                            && subbuffer_id_is_noref(config, id));
+               lib_ring_buffer_do_copy(config,
+                                       rpages->p[index].virt
+                                               + (offset & ~PAGE_MASK),
+                                       src, pagecpy);
+       } while (unlikely(len != pagecpy));
+}
+EXPORT_SYMBOL_GPL(_lib_ring_buffer_write);
+
+
+/**
+ * lib_ring_buffer_memset - write len bytes of c to a ring_buffer buffer.
+ * @bufb : buffer backend
+ * @offset : offset within the buffer
+ * @c : the byte to write
+ * @len : length to write
+ * @pagecpy : page size copied so far
+ */
+void _lib_ring_buffer_memset(struct lib_ring_buffer_backend *bufb,
+                            size_t offset,
+                            int c, size_t len, size_t pagecpy)
+{
+       struct channel_backend *chanb = &bufb->chan->backend;
+       const struct lib_ring_buffer_config *config = &chanb->config;
+       size_t sbidx, index;
+       struct lib_ring_buffer_backend_pages *rpages;
+       unsigned long sb_bindex, id;
+
+       do {
+               len -= pagecpy;
+               offset += pagecpy;
+               sbidx = offset >> chanb->subbuf_size_order;
+               index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
+
+               /*
+                * Underlying layer should never ask for writes across
+                * subbuffers.
+                */
+               CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
+
+               pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK));
+               id = bufb->buf_wsb[sbidx].id;
+               sb_bindex = subbuffer_id_get_index(config, id);
+               rpages = bufb->array[sb_bindex];
+               CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
+                            && subbuffer_id_is_noref(config, id));
+               lib_ring_buffer_do_memset(rpages->p[index].virt
+                                         + (offset & ~PAGE_MASK),
+                                         c, pagecpy);
+       } while (unlikely(len != pagecpy));
+}
+EXPORT_SYMBOL_GPL(_lib_ring_buffer_memset);
+
+/**
+ * lib_ring_buffer_strcpy - write string data to a ring_buffer buffer.
+ * @bufb : buffer backend
+ * @offset : offset within the buffer
+ * @src : source address
+ * @len : length to write
+ * @pagecpy : page size copied so far
+ * @pad : character to use for padding
+ */
+void _lib_ring_buffer_strcpy(struct lib_ring_buffer_backend *bufb,
+                       size_t offset, const char *src, size_t len,
+                       size_t pagecpy, int pad)
+{
+       struct channel_backend *chanb = &bufb->chan->backend;
+       const struct lib_ring_buffer_config *config = &chanb->config;
+       size_t sbidx, index;
+       struct lib_ring_buffer_backend_pages *rpages;
+       unsigned long sb_bindex, id;
+       int src_terminated = 0;
+
+       CHAN_WARN_ON(chanb, !len);
+       offset += pagecpy;
+       do {
+               len -= pagecpy;
+               if (!src_terminated)
+                       src += pagecpy;
+               sbidx = offset >> chanb->subbuf_size_order;
+               index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
+
+               /*
+                * Underlying layer should never ask for writes across
+                * subbuffers.
+                */
+               CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
+
+               pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK));
+               id = bufb->buf_wsb[sbidx].id;
+               sb_bindex = subbuffer_id_get_index(config, id);
+               rpages = bufb->array[sb_bindex];
+               CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
+                            && subbuffer_id_is_noref(config, id));
+
+               if (likely(!src_terminated)) {
+                       size_t count, to_copy;
+
+                       to_copy = pagecpy;
+                       if (pagecpy == len)
+                               to_copy--;      /* Final '\0' */
+                       count = lib_ring_buffer_do_strcpy(config,
+                                       rpages->p[index].virt
+                                               + (offset & ~PAGE_MASK),
+                                       src, to_copy);
+                       offset += count;
+                       /* Padding */
+                       if (unlikely(count < to_copy)) {
+                               size_t pad_len = to_copy - count;
+
+                               /* Next pages will have padding */
+                               src_terminated = 1;
+                               lib_ring_buffer_do_memset(rpages->p[index].virt
+                                               + (offset & ~PAGE_MASK),
+                                       pad, pad_len);
+                               offset += pad_len;
+                       }
+               } else {
+                       size_t pad_len;
+
+                       pad_len = pagecpy;
+                       if (pagecpy == len)
+                               pad_len--;      /* Final '\0' */
+                       lib_ring_buffer_do_memset(rpages->p[index].virt
+                                       + (offset & ~PAGE_MASK),
+                               pad, pad_len);
+                       offset += pad_len;
+               }
+       } while (unlikely(len != pagecpy));
+       /* Ending '\0' */
+       lib_ring_buffer_do_memset(rpages->p[index].virt + (offset & ~PAGE_MASK),
+                       '\0', 1);
+}
+EXPORT_SYMBOL_GPL(_lib_ring_buffer_strcpy);
+
+/**
+ * lib_ring_buffer_copy_from_user_inatomic - write user data to a ring_buffer buffer.
+ * @bufb : buffer backend
+ * @offset : offset within the buffer
+ * @src : source address
+ * @len : length to write
+ * @pagecpy : page size copied so far
+ *
+ * This function deals with userspace pointers, it should never be called
+ * directly without having the src pointer checked with access_ok()
+ * previously.
+ */
+void _lib_ring_buffer_copy_from_user_inatomic(struct lib_ring_buffer_backend *bufb,
+                                     size_t offset,
+                                     const void __user *src, size_t len,
+                                     size_t pagecpy)
+{
+       struct channel_backend *chanb = &bufb->chan->backend;
+       const struct lib_ring_buffer_config *config = &chanb->config;
+       size_t sbidx, index;
+       struct lib_ring_buffer_backend_pages *rpages;
+       unsigned long sb_bindex, id;
+       int ret;
+
+       do {
+               len -= pagecpy;
+               src += pagecpy;
+               offset += pagecpy;
+               sbidx = offset >> chanb->subbuf_size_order;
+               index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
+
+               /*
+                * Underlying layer should never ask for writes across
+                * subbuffers.
+                */
+               CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
+
+               pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK));
+               id = bufb->buf_wsb[sbidx].id;
+               sb_bindex = subbuffer_id_get_index(config, id);
+               rpages = bufb->array[sb_bindex];
+               CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
+                               && subbuffer_id_is_noref(config, id));
+               ret = lib_ring_buffer_do_copy_from_user_inatomic(rpages->p[index].virt
+                                                       + (offset & ~PAGE_MASK),
+                                                       src, pagecpy) != 0;
+               if (ret > 0) {
+                       /* Copy failed. */
+                       _lib_ring_buffer_memset(bufb, offset, 0, len, 0);
+                       break; /* stop copy */
+               }
+       } while (unlikely(len != pagecpy));
+}
+EXPORT_SYMBOL_GPL(_lib_ring_buffer_copy_from_user_inatomic);
+
+/**
+ * lib_ring_buffer_strcpy_from_user_inatomic - write userspace string data to a ring_buffer buffer.
+ * @bufb : buffer backend
+ * @offset : offset within the buffer
+ * @src : source address
+ * @len : length to write
+ * @pagecpy : page size copied so far
+ * @pad : character to use for padding
+ *
+ * This function deals with userspace pointers, it should never be called
+ * directly without having the src pointer checked with access_ok()
+ * previously.
+ */
+void _lib_ring_buffer_strcpy_from_user_inatomic(struct lib_ring_buffer_backend *bufb,
+               size_t offset, const char __user *src, size_t len,
+               size_t pagecpy, int pad)
+{
+       struct channel_backend *chanb = &bufb->chan->backend;
+       const struct lib_ring_buffer_config *config = &chanb->config;
+       size_t sbidx, index;
+       struct lib_ring_buffer_backend_pages *rpages;
+       unsigned long sb_bindex, id;
+       int src_terminated = 0;
+
+       offset += pagecpy;
+       do {
+               len -= pagecpy;
+               if (!src_terminated)
+                       src += pagecpy;
+               sbidx = offset >> chanb->subbuf_size_order;
+               index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
+
+               /*
+                * Underlying layer should never ask for writes across
+                * subbuffers.
+                */
+               CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
+
+               pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK));
+               id = bufb->buf_wsb[sbidx].id;
+               sb_bindex = subbuffer_id_get_index(config, id);
+               rpages = bufb->array[sb_bindex];
+               CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
+                               && subbuffer_id_is_noref(config, id));
+
+               if (likely(!src_terminated)) {
+                       size_t count, to_copy;
+
+                       to_copy = pagecpy;
+                       if (pagecpy == len)
+                               to_copy--;      /* Final '\0' */
+                       count = lib_ring_buffer_do_strcpy_from_user_inatomic(config,
+                                       rpages->p[index].virt
+                                               + (offset & ~PAGE_MASK),
+                                       src, to_copy);
+                       offset += count;
+                       /* Padding */
+                       if (unlikely(count < to_copy)) {
+                               size_t pad_len = to_copy - count;
+
+                               /* Next pages will have padding */
+                               src_terminated = 1;
+                               lib_ring_buffer_do_memset(rpages->p[index].virt
+                                               + (offset & ~PAGE_MASK),
+                                       pad, pad_len);
+                               offset += pad_len;
+                       }
+               } else {
+                       size_t pad_len;
+
+                       pad_len = pagecpy;
+                       if (pagecpy == len)
+                               pad_len--;      /* Final '\0' */
+                       lib_ring_buffer_do_memset(rpages->p[index].virt
+                                       + (offset & ~PAGE_MASK),
+                               pad, pad_len);
+                       offset += pad_len;
+               }
+       } while (unlikely(len != pagecpy));
+       /* Ending '\0' */
+       lib_ring_buffer_do_memset(rpages->p[index].virt + (offset & ~PAGE_MASK),
+                       '\0', 1);
+}
+EXPORT_SYMBOL_GPL(_lib_ring_buffer_strcpy_from_user_inatomic);
+
+/**
+ * lib_ring_buffer_read - read data from ring_buffer_buffer.
+ * @bufb : buffer backend
+ * @offset : offset within the buffer
+ * @dest : destination address
+ * @len : length to copy to destination
+ *
+ * Should be protected by get_subbuf/put_subbuf.
+ * Returns the length copied.
+ */
+size_t lib_ring_buffer_read(struct lib_ring_buffer_backend *bufb, size_t offset,
+                           void *dest, size_t len)
+{
+       struct channel_backend *chanb = &bufb->chan->backend;
+       const struct lib_ring_buffer_config *config = &chanb->config;
+       size_t index, pagecpy, orig_len;
+       struct lib_ring_buffer_backend_pages *rpages;
+       unsigned long sb_bindex, id;
+
+       orig_len = len;
+       offset &= chanb->buf_size - 1;
+       index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
+       if (unlikely(!len))
+               return 0;
+       for (;;) {
+               pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK));
+               id = bufb->buf_rsb.id;
+               sb_bindex = subbuffer_id_get_index(config, id);
+               rpages = bufb->array[sb_bindex];
+               CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
+                            && subbuffer_id_is_noref(config, id));
+               memcpy(dest, rpages->p[index].virt + (offset & ~PAGE_MASK),
+                      pagecpy);
+               len -= pagecpy;
+               if (likely(!len))
+                       break;
+               dest += pagecpy;
+               offset += pagecpy;
+               index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
+               /*
+                * Underlying layer should never ask for reads across
+                * subbuffers.
+                */
+               CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
+       }
+       return orig_len;
+}
+EXPORT_SYMBOL_GPL(lib_ring_buffer_read);
+
+/**
+ * __lib_ring_buffer_copy_to_user - read data from ring_buffer to userspace
+ * @bufb : buffer backend
+ * @offset : offset within the buffer
+ * @dest : destination userspace address
+ * @len : length to copy to destination
+ *
+ * Should be protected by get_subbuf/put_subbuf.
+ * access_ok() must have been performed on dest addresses prior to call this
+ * function.
+ * Returns -EFAULT on error, 0 if ok.
+ */
+int __lib_ring_buffer_copy_to_user(struct lib_ring_buffer_backend *bufb,
+                                  size_t offset, void __user *dest, size_t len)
+{
+       struct channel_backend *chanb = &bufb->chan->backend;
+       const struct lib_ring_buffer_config *config = &chanb->config;
+       size_t index;
+       ssize_t pagecpy;
+       struct lib_ring_buffer_backend_pages *rpages;
+       unsigned long sb_bindex, id;
+
+       offset &= chanb->buf_size - 1;
+       index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
+       if (unlikely(!len))
+               return 0;
+       for (;;) {
+               pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK));
+               id = bufb->buf_rsb.id;
+               sb_bindex = subbuffer_id_get_index(config, id);
+               rpages = bufb->array[sb_bindex];
+               CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
+                            && subbuffer_id_is_noref(config, id));
+               if (__copy_to_user(dest,
+                              rpages->p[index].virt + (offset & ~PAGE_MASK),
+                              pagecpy))
+                       return -EFAULT;
+               len -= pagecpy;
+               if (likely(!len))
+                       break;
+               dest += pagecpy;
+               offset += pagecpy;
+               index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
+               /*
+                * Underlying layer should never ask for reads across
+                * subbuffers.
+                */
+               CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
+       }
+       return 0;
+}
+EXPORT_SYMBOL_GPL(__lib_ring_buffer_copy_to_user);
+
+/**
+ * lib_ring_buffer_read_cstr - read a C-style string from ring_buffer.
+ * @bufb : buffer backend
+ * @offset : offset within the buffer
+ * @dest : destination address
+ * @len : destination's length
+ *
+ * Return string's length, or -EINVAL on error.
+ * Should be protected by get_subbuf/put_subbuf.
+ * Destination length should be at least 1 to hold '\0'.
+ */
+int lib_ring_buffer_read_cstr(struct lib_ring_buffer_backend *bufb, size_t offset,
+                             void *dest, size_t len)
+{
+       struct channel_backend *chanb = &bufb->chan->backend;
+       const struct lib_ring_buffer_config *config = &chanb->config;
+       size_t index;
+       ssize_t pagecpy, pagelen, strpagelen, orig_offset;
+       char *str;
+       struct lib_ring_buffer_backend_pages *rpages;
+       unsigned long sb_bindex, id;
+
+       offset &= chanb->buf_size - 1;
+       index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
+       orig_offset = offset;
+       if (unlikely(!len))
+               return -EINVAL;
+       for (;;) {
+               id = bufb->buf_rsb.id;
+               sb_bindex = subbuffer_id_get_index(config, id);
+               rpages = bufb->array[sb_bindex];
+               CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
+                            && subbuffer_id_is_noref(config, id));
+               str = (char *)rpages->p[index].virt + (offset & ~PAGE_MASK);
+               pagelen = PAGE_SIZE - (offset & ~PAGE_MASK);
+               strpagelen = strnlen(str, pagelen);
+               if (len) {
+                       pagecpy = min_t(size_t, len, strpagelen);
+                       if (dest) {
+                               memcpy(dest, str, pagecpy);
+                               dest += pagecpy;
+                       }
+                       len -= pagecpy;
+               }
+               offset += strpagelen;
+               index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
+               if (strpagelen < pagelen)
+                       break;
+               /*
+                * Underlying layer should never ask for reads across
+                * subbuffers.
+                */
+               CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
+       }
+       if (dest && len)
+               ((char *)dest)[0] = 0;
+       return offset - orig_offset;
+}
+EXPORT_SYMBOL_GPL(lib_ring_buffer_read_cstr);
+
+/**
+ * lib_ring_buffer_read_get_pfn - Get a page frame number to read from
+ * @bufb : buffer backend
+ * @offset : offset within the buffer
+ * @virt : pointer to page address (output)
+ *
+ * Should be protected by get_subbuf/put_subbuf.
+ * Returns the pointer to the page frame number unsigned long.
+ */
+unsigned long *lib_ring_buffer_read_get_pfn(struct lib_ring_buffer_backend *bufb,
+                                           size_t offset, void ***virt)
+{
+       size_t index;
+       struct lib_ring_buffer_backend_pages *rpages;
+       struct channel_backend *chanb = &bufb->chan->backend;
+       const struct lib_ring_buffer_config *config = &chanb->config;
+       unsigned long sb_bindex, id;
+
+       offset &= chanb->buf_size - 1;
+       index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
+       id = bufb->buf_rsb.id;
+       sb_bindex = subbuffer_id_get_index(config, id);
+       rpages = bufb->array[sb_bindex];
+       CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
+                    && subbuffer_id_is_noref(config, id));
+       *virt = &rpages->p[index].virt;
+       return &rpages->p[index].pfn;
+}
+EXPORT_SYMBOL_GPL(lib_ring_buffer_read_get_pfn);
+
+/**
+ * lib_ring_buffer_read_offset_address - get address of a buffer location
+ * @bufb : buffer backend
+ * @offset : offset within the buffer.
+ *
+ * Return the address where a given offset is located (for read).
+ * Should be used to get the current subbuffer header pointer. Given we know
+ * it's never on a page boundary, it's safe to read/write directly
+ * from/to this address, as long as the read/write is never bigger than a
+ * page size.
+ */
+void *lib_ring_buffer_read_offset_address(struct lib_ring_buffer_backend *bufb,
+                                         size_t offset)
+{
+       size_t index;
+       struct lib_ring_buffer_backend_pages *rpages;
+       struct channel_backend *chanb = &bufb->chan->backend;
+       const struct lib_ring_buffer_config *config = &chanb->config;
+       unsigned long sb_bindex, id;
+
+       offset &= chanb->buf_size - 1;
+       index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
+       id = bufb->buf_rsb.id;
+       sb_bindex = subbuffer_id_get_index(config, id);
+       rpages = bufb->array[sb_bindex];
+       CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
+                    && subbuffer_id_is_noref(config, id));
+       return rpages->p[index].virt + (offset & ~PAGE_MASK);
+}
+EXPORT_SYMBOL_GPL(lib_ring_buffer_read_offset_address);
+
+/**
+ * lib_ring_buffer_offset_address - get address of a location within the buffer
+ * @bufb : buffer backend
+ * @offset : offset within the buffer.
+ *
+ * Return the address where a given offset is located.
+ * Should be used to get the current subbuffer header pointer. Given we know
+ * it's always at the beginning of a page, it's safe to write directly to this
+ * address, as long as the write is never bigger than a page size.
+ */
+void *lib_ring_buffer_offset_address(struct lib_ring_buffer_backend *bufb,
+                                    size_t offset)
+{
+       size_t sbidx, index;
+       struct lib_ring_buffer_backend_pages *rpages;
+       struct channel_backend *chanb = &bufb->chan->backend;
+       const struct lib_ring_buffer_config *config = &chanb->config;
+       unsigned long sb_bindex, id;
+
+       offset &= chanb->buf_size - 1;
+       sbidx = offset >> chanb->subbuf_size_order;
+       index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
+       id = bufb->buf_wsb[sbidx].id;
+       sb_bindex = subbuffer_id_get_index(config, id);
+       rpages = bufb->array[sb_bindex];
+       CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
+                    && subbuffer_id_is_noref(config, id));
+       return rpages->p[index].virt + (offset & ~PAGE_MASK);
+}
+EXPORT_SYMBOL_GPL(lib_ring_buffer_offset_address);
diff --git a/src/lib/ringbuffer/ring_buffer_frontend.c b/src/lib/ringbuffer/ring_buffer_frontend.c
new file mode 100644 (file)
index 0000000..fca37fb
--- /dev/null
@@ -0,0 +1,2387 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR LGPL-2.1-only)
+ *
+ * ring_buffer_frontend.c
+ *
+ * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * Ring buffer wait-free buffer synchronization. Producer-consumer and flight
+ * recorder (overwrite) modes. See thesis:
+ *
+ * Desnoyers, Mathieu (2009), "Low-Impact Operating System Tracing", Ph.D.
+ * dissertation, Ecole Polytechnique de Montreal.
+ * http://www.lttng.org/pub/thesis/desnoyers-dissertation-2009-12.pdf
+ *
+ * - Algorithm presentation in Chapter 5:
+ *     "Lockless Multi-Core High-Throughput Buffering".
+ * - Algorithm formal verification in Section 8.6:
+ *     "Formal verification of LTTng"
+ *
+ * Author:
+ *     Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * Inspired from LTT and RelayFS:
+ *  Karim Yaghmour <karim@opersys.com>
+ *  Tom Zanussi <zanussi@us.ibm.com>
+ *  Bob Wisniewski <bob@watson.ibm.com>
+ * And from K42 :
+ *  Bob Wisniewski <bob@watson.ibm.com>
+ *
+ * Buffer reader semantic :
+ *
+ * - get_subbuf_size
+ * while buffer is not finalized and empty
+ *   - get_subbuf
+ *     - if return value != 0, continue
+ *   - splice one subbuffer worth of data to a pipe
+ *   - splice the data from pipe to disk/network
+ *   - put_subbuf
+ */
+
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/percpu.h>
+#include <asm/cacheflush.h>
+
+#include <ringbuffer/config.h>
+#include <ringbuffer/backend.h>
+#include <ringbuffer/frontend.h>
+#include <ringbuffer/iterator.h>
+#include <ringbuffer/nohz.h>
+#include <wrapper/atomic.h>
+#include <wrapper/kref.h>
+#include <wrapper/percpu-defs.h>
+#include <wrapper/timer.h>
+#include <wrapper/vmalloc.h>
+
+/*
+ * Internal structure representing offsets to use at a sub-buffer switch.
+ */
+struct switch_offsets {
+       unsigned long begin, end, old;
+       size_t pre_header_padding, size;
+       unsigned int switch_new_start:1, switch_new_end:1, switch_old_start:1,
+                    switch_old_end:1;
+};
+
+#ifdef CONFIG_NO_HZ
+enum tick_nohz_val {
+       TICK_NOHZ_STOP,
+       TICK_NOHZ_FLUSH,
+       TICK_NOHZ_RESTART,
+};
+
+static ATOMIC_NOTIFIER_HEAD(tick_nohz_notifier);
+#endif /* CONFIG_NO_HZ */
+
+static DEFINE_PER_CPU(spinlock_t, ring_buffer_nohz_lock);
+
+DEFINE_PER_CPU(unsigned int, lib_ring_buffer_nesting);
+EXPORT_PER_CPU_SYMBOL(lib_ring_buffer_nesting);
+
+static
+void lib_ring_buffer_print_errors(struct channel *chan,
+                                 struct lib_ring_buffer *buf, int cpu);
+static
+void _lib_ring_buffer_switch_remote(struct lib_ring_buffer *buf,
+               enum switch_mode mode);
+
+static
+int lib_ring_buffer_poll_deliver(const struct lib_ring_buffer_config *config,
+                                struct lib_ring_buffer *buf,
+                                struct channel *chan)
+{
+       unsigned long consumed_old, consumed_idx, commit_count, write_offset;
+
+       consumed_old = atomic_long_read(&buf->consumed);
+       consumed_idx = subbuf_index(consumed_old, chan);
+       commit_count = v_read(config, &buf->commit_cold[consumed_idx].cc_sb);
+       /*
+        * No memory barrier here, since we are only interested
+        * in a statistically correct polling result. The next poll will
+        * get the data is we are racing. The mb() that ensures correct
+        * memory order is in get_subbuf.
+        */
+       write_offset = v_read(config, &buf->offset);
+
+       /*
+        * Check that the subbuffer we are trying to consume has been
+        * already fully committed.
+        */
+
+       if (((commit_count - chan->backend.subbuf_size)
+            & chan->commit_count_mask)
+           - (buf_trunc(consumed_old, chan)
+              >> chan->backend.num_subbuf_order)
+           != 0)
+               return 0;
+
+       /*
+        * Check that we are not about to read the same subbuffer in
+        * which the writer head is.
+        */
+       if (subbuf_trunc(write_offset, chan) - subbuf_trunc(consumed_old, chan)
+           == 0)
+               return 0;
+
+       return 1;
+}
+
+/*
+ * Must be called under cpu hotplug protection.
+ */
+void lib_ring_buffer_free(struct lib_ring_buffer *buf)
+{
+       struct channel *chan = buf->backend.chan;
+
+       lib_ring_buffer_print_errors(chan, buf, buf->backend.cpu);
+       lttng_kvfree(buf->commit_hot);
+       lttng_kvfree(buf->commit_cold);
+       lttng_kvfree(buf->ts_end);
+
+       lib_ring_buffer_backend_free(&buf->backend);
+}
+
+/**
+ * lib_ring_buffer_reset - Reset ring buffer to initial values.
+ * @buf: Ring buffer.
+ *
+ * Effectively empty the ring buffer. Should be called when the buffer is not
+ * used for writing. The ring buffer can be opened for reading, but the reader
+ * should not be using the iterator concurrently with reset. The previous
+ * current iterator record is reset.
+ */
+void lib_ring_buffer_reset(struct lib_ring_buffer *buf)
+{
+       struct channel *chan = buf->backend.chan;
+       const struct lib_ring_buffer_config *config = &chan->backend.config;
+       unsigned int i;
+
+       /*
+        * Reset iterator first. It will put the subbuffer if it currently holds
+        * it.
+        */
+       lib_ring_buffer_iterator_reset(buf);
+       v_set(config, &buf->offset, 0);
+       for (i = 0; i < chan->backend.num_subbuf; i++) {
+               v_set(config, &buf->commit_hot[i].cc, 0);
+               v_set(config, &buf->commit_hot[i].seq, 0);
+               v_set(config, &buf->commit_cold[i].cc_sb, 0);
+               buf->ts_end[i] = 0;
+       }
+       atomic_long_set(&buf->consumed, 0);
+       atomic_set(&buf->record_disabled, 0);
+       v_set(config, &buf->last_tsc, 0);
+       lib_ring_buffer_backend_reset(&buf->backend);
+       /* Don't reset number of active readers */
+       v_set(config, &buf->records_lost_full, 0);
+       v_set(config, &buf->records_lost_wrap, 0);
+       v_set(config, &buf->records_lost_big, 0);
+       v_set(config, &buf->records_count, 0);
+       v_set(config, &buf->records_overrun, 0);
+       buf->finalized = 0;
+}
+EXPORT_SYMBOL_GPL(lib_ring_buffer_reset);
+
+/**
+ * channel_reset - Reset channel to initial values.
+ * @chan: Channel.
+ *
+ * Effectively empty the channel. Should be called when the channel is not used
+ * for writing. The channel can be opened for reading, but the reader should not
+ * be using the iterator concurrently with reset. The previous current iterator
+ * record is reset.
+ */
+void channel_reset(struct channel *chan)
+{
+       /*
+        * Reset iterators first. Will put the subbuffer if held for reading.
+        */
+       channel_iterator_reset(chan);
+       atomic_set(&chan->record_disabled, 0);
+       /* Don't reset commit_count_mask, still valid */
+       channel_backend_reset(&chan->backend);
+       /* Don't reset switch/read timer interval */
+       /* Don't reset notifiers and notifier enable bits */
+       /* Don't reset reader reference count */
+}
+EXPORT_SYMBOL_GPL(channel_reset);
+
+/*
+ * Must be called under cpu hotplug protection.
+ */
+int lib_ring_buffer_create(struct lib_ring_buffer *buf,
+                          struct channel_backend *chanb, int cpu)
+{
+       const struct lib_ring_buffer_config *config = &chanb->config;
+       struct channel *chan = container_of(chanb, struct channel, backend);
+       void *priv = chanb->priv;
+       size_t subbuf_header_size;
+       u64 tsc;
+       int ret;
+
+       /* Test for cpu hotplug */
+       if (buf->backend.allocated)
+               return 0;
+
+       /*
+        * Paranoia: per cpu dynamic allocation is not officially documented as
+        * zeroing the memory, so let's do it here too, just in case.
+        */
+       memset(buf, 0, sizeof(*buf));
+
+       ret = lib_ring_buffer_backend_create(&buf->backend, &chan->backend, cpu);
+       if (ret)
+               return ret;
+
+       buf->commit_hot =
+               lttng_kvzalloc_node(ALIGN(sizeof(*buf->commit_hot)
+                                  * chan->backend.num_subbuf,
+                                  1 << INTERNODE_CACHE_SHIFT),
+                       GFP_KERNEL | __GFP_NOWARN,
+                       cpu_to_node(max(cpu, 0)));
+       if (!buf->commit_hot) {
+               ret = -ENOMEM;
+               goto free_chanbuf;
+       }
+
+       buf->commit_cold =
+               lttng_kvzalloc_node(ALIGN(sizeof(*buf->commit_cold)
+                                  * chan->backend.num_subbuf,
+                                  1 << INTERNODE_CACHE_SHIFT),
+                       GFP_KERNEL | __GFP_NOWARN,
+                       cpu_to_node(max(cpu, 0)));
+       if (!buf->commit_cold) {
+               ret = -ENOMEM;
+               goto free_commit;
+       }
+
+       buf->ts_end =
+               lttng_kvzalloc_node(ALIGN(sizeof(*buf->ts_end)
+                                  * chan->backend.num_subbuf,
+                                  1 << INTERNODE_CACHE_SHIFT),
+                       GFP_KERNEL | __GFP_NOWARN,
+                       cpu_to_node(max(cpu, 0)));
+       if (!buf->ts_end) {
+               ret = -ENOMEM;
+               goto free_commit_cold;
+       }
+
+       init_waitqueue_head(&buf->read_wait);
+       init_waitqueue_head(&buf->write_wait);
+       raw_spin_lock_init(&buf->raw_tick_nohz_spinlock);
+
+       /*
+        * Write the subbuffer header for first subbuffer so we know the total
+        * duration of data gathering.
+        */
+       subbuf_header_size = config->cb.subbuffer_header_size();
+       v_set(config, &buf->offset, subbuf_header_size);
+       subbuffer_id_clear_noref(config, &buf->backend.buf_wsb[0].id);
+       tsc = config->cb.ring_buffer_clock_read(buf->backend.chan);
+       config->cb.buffer_begin(buf, tsc, 0);
+       v_add(config, subbuf_header_size, &buf->commit_hot[0].cc);
+
+       if (config->cb.buffer_create) {
+               ret = config->cb.buffer_create(buf, priv, cpu, chanb->name);
+               if (ret)
+                       goto free_init;
+       }
+
+       /*
+        * Ensure the buffer is ready before setting it to allocated and setting
+        * the cpumask.
+        * Used for cpu hotplug vs cpumask iteration.
+        */
+       smp_wmb();
+       buf->backend.allocated = 1;
+
+       if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
+               CHAN_WARN_ON(chan, cpumask_test_cpu(cpu,
+                            chan->backend.cpumask));
+               cpumask_set_cpu(cpu, chan->backend.cpumask);
+       }
+
+       return 0;
+
+       /* Error handling */
+free_init:
+       lttng_kvfree(buf->ts_end);
+free_commit_cold:
+       lttng_kvfree(buf->commit_cold);
+free_commit:
+       lttng_kvfree(buf->commit_hot);
+free_chanbuf:
+       lib_ring_buffer_backend_free(&buf->backend);
+       return ret;
+}
+
+static void switch_buffer_timer(LTTNG_TIMER_FUNC_ARG_TYPE t)
+{
+       struct lib_ring_buffer *buf = lttng_from_timer(buf, t, switch_timer);
+       struct channel *chan = buf->backend.chan;
+       const struct lib_ring_buffer_config *config = &chan->backend.config;
+
+       /*
+        * Only flush buffers periodically if readers are active.
+        */
+       if (atomic_long_read(&buf->active_readers))
+               lib_ring_buffer_switch_slow(buf, SWITCH_ACTIVE);
+
+       if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
+               lttng_mod_timer_pinned(&buf->switch_timer,
+                                jiffies + chan->switch_timer_interval);
+       else
+               mod_timer(&buf->switch_timer,
+                         jiffies + chan->switch_timer_interval);
+}
+
+/*
+ * Called with ring_buffer_nohz_lock held for per-cpu buffers.
+ */
+static void lib_ring_buffer_start_switch_timer(struct lib_ring_buffer *buf)
+{
+       struct channel *chan = buf->backend.chan;
+       const struct lib_ring_buffer_config *config = &chan->backend.config;
+       unsigned int flags = 0;
+
+       if (!chan->switch_timer_interval || buf->switch_timer_enabled)
+               return;
+
+       if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
+               flags = LTTNG_TIMER_PINNED;
+
+       lttng_timer_setup(&buf->switch_timer, switch_buffer_timer, flags, buf);
+       buf->switch_timer.expires = jiffies + chan->switch_timer_interval;
+
+       if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
+               add_timer_on(&buf->switch_timer, buf->backend.cpu);
+       else
+               add_timer(&buf->switch_timer);
+
+       buf->switch_timer_enabled = 1;
+}
+
+/*
+ * Called with ring_buffer_nohz_lock held for per-cpu buffers.
+ */
+static void lib_ring_buffer_stop_switch_timer(struct lib_ring_buffer *buf)
+{
+       struct channel *chan = buf->backend.chan;
+
+       if (!chan->switch_timer_interval || !buf->switch_timer_enabled)
+               return;
+
+       del_timer_sync(&buf->switch_timer);
+       buf->switch_timer_enabled = 0;
+}
+
+/*
+ * Polling timer to check the channels for data.
+ */
+static void read_buffer_timer(LTTNG_TIMER_FUNC_ARG_TYPE t)
+{
+       struct lib_ring_buffer *buf = lttng_from_timer(buf, t, read_timer);
+       struct channel *chan = buf->backend.chan;
+       const struct lib_ring_buffer_config *config = &chan->backend.config;
+
+       CHAN_WARN_ON(chan, !buf->backend.allocated);
+
+       if (atomic_long_read(&buf->active_readers)
+           && lib_ring_buffer_poll_deliver(config, buf, chan)) {
+               wake_up_interruptible(&buf->read_wait);
+               wake_up_interruptible(&chan->read_wait);
+       }
+
+       if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
+               lttng_mod_timer_pinned(&buf->read_timer,
+                                jiffies + chan->read_timer_interval);
+       else
+               mod_timer(&buf->read_timer,
+                         jiffies + chan->read_timer_interval);
+}
+
+/*
+ * Called with ring_buffer_nohz_lock held for per-cpu buffers.
+ */
+static void lib_ring_buffer_start_read_timer(struct lib_ring_buffer *buf)
+{
+       struct channel *chan = buf->backend.chan;
+       const struct lib_ring_buffer_config *config = &chan->backend.config;
+       unsigned int flags = 0;
+
+       if (config->wakeup != RING_BUFFER_WAKEUP_BY_TIMER
+           || !chan->read_timer_interval
+           || buf->read_timer_enabled)
+               return;
+
+       if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
+               flags = LTTNG_TIMER_PINNED;
+
+       lttng_timer_setup(&buf->read_timer, read_buffer_timer, flags, buf);
+       buf->read_timer.expires = jiffies + chan->read_timer_interval;
+
+       if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
+               add_timer_on(&buf->read_timer, buf->backend.cpu);
+       else
+               add_timer(&buf->read_timer);
+
+       buf->read_timer_enabled = 1;
+}
+
+/*
+ * Called with ring_buffer_nohz_lock held for per-cpu buffers.
+ */
+static void lib_ring_buffer_stop_read_timer(struct lib_ring_buffer *buf)
+{
+       struct channel *chan = buf->backend.chan;
+       const struct lib_ring_buffer_config *config = &chan->backend.config;
+
+       if (config->wakeup != RING_BUFFER_WAKEUP_BY_TIMER
+           || !chan->read_timer_interval
+           || !buf->read_timer_enabled)
+               return;
+
+       del_timer_sync(&buf->read_timer);
+       /*
+        * do one more check to catch data that has been written in the last
+        * timer period.
+        */
+       if (lib_ring_buffer_poll_deliver(config, buf, chan)) {
+               wake_up_interruptible(&buf->read_wait);
+               wake_up_interruptible(&chan->read_wait);
+       }
+       buf->read_timer_enabled = 0;
+}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
+
+enum cpuhp_state lttng_rb_hp_prepare;
+enum cpuhp_state lttng_rb_hp_online;
+
+void lttng_rb_set_hp_prepare(enum cpuhp_state val)
+{
+       lttng_rb_hp_prepare = val;
+}
+EXPORT_SYMBOL_GPL(lttng_rb_set_hp_prepare);
+
+void lttng_rb_set_hp_online(enum cpuhp_state val)
+{
+       lttng_rb_hp_online = val;
+}
+EXPORT_SYMBOL_GPL(lttng_rb_set_hp_online);
+
+int lttng_cpuhp_rb_frontend_dead(unsigned int cpu,
+               struct lttng_cpuhp_node *node)
+{
+       struct channel *chan = container_of(node, struct channel,
+                                           cpuhp_prepare);
+       struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf, cpu);
+       const struct lib_ring_buffer_config *config = &chan->backend.config;
+
+       CHAN_WARN_ON(chan, config->alloc == RING_BUFFER_ALLOC_GLOBAL);
+
+       /*
+        * Performing a buffer switch on a remote CPU. Performed by
+        * the CPU responsible for doing the hotunplug after the target
+        * CPU stopped running completely. Ensures that all data
+        * from that remote CPU is flushed.
+        */
+       lib_ring_buffer_switch_slow(buf, SWITCH_ACTIVE);
+       return 0;
+}
+EXPORT_SYMBOL_GPL(lttng_cpuhp_rb_frontend_dead);
+
+int lttng_cpuhp_rb_frontend_online(unsigned int cpu,
+               struct lttng_cpuhp_node *node)
+{
+       struct channel *chan = container_of(node, struct channel,
+                                           cpuhp_online);
+       struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf, cpu);
+       const struct lib_ring_buffer_config *config = &chan->backend.config;
+
+       CHAN_WARN_ON(chan, config->alloc == RING_BUFFER_ALLOC_GLOBAL);
+
+       wake_up_interruptible(&chan->hp_wait);
+       lib_ring_buffer_start_switch_timer(buf);
+       lib_ring_buffer_start_read_timer(buf);
+       return 0;
+}
+EXPORT_SYMBOL_GPL(lttng_cpuhp_rb_frontend_online);
+
+int lttng_cpuhp_rb_frontend_offline(unsigned int cpu,
+               struct lttng_cpuhp_node *node)
+{
+       struct channel *chan = container_of(node, struct channel,
+                                           cpuhp_online);
+       struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf, cpu);
+       const struct lib_ring_buffer_config *config = &chan->backend.config;
+
+       CHAN_WARN_ON(chan, config->alloc == RING_BUFFER_ALLOC_GLOBAL);
+
+       lib_ring_buffer_stop_switch_timer(buf);
+       lib_ring_buffer_stop_read_timer(buf);
+       return 0;
+}
+EXPORT_SYMBOL_GPL(lttng_cpuhp_rb_frontend_offline);
+
+#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
+
+#ifdef CONFIG_HOTPLUG_CPU
+
+/**
+ *     lib_ring_buffer_cpu_hp_callback - CPU hotplug callback
+ *     @nb: notifier block
+ *     @action: hotplug action to take
+ *     @hcpu: CPU number
+ *
+ *     Returns the success/failure of the operation. (%NOTIFY_OK, %NOTIFY_BAD)
+ */
+static
+int lib_ring_buffer_cpu_hp_callback(struct notifier_block *nb,
+                                             unsigned long action,
+                                             void *hcpu)
+{
+       unsigned int cpu = (unsigned long)hcpu;
+       struct channel *chan = container_of(nb, struct channel,
+                                           cpu_hp_notifier);
+       struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf, cpu);
+       const struct lib_ring_buffer_config *config = &chan->backend.config;
+
+       if (!chan->cpu_hp_enable)
+               return NOTIFY_DONE;
+
+       CHAN_WARN_ON(chan, config->alloc == RING_BUFFER_ALLOC_GLOBAL);
+
+       switch (action) {
+       case CPU_DOWN_FAILED:
+       case CPU_DOWN_FAILED_FROZEN:
+       case CPU_ONLINE:
+       case CPU_ONLINE_FROZEN:
+               wake_up_interruptible(&chan->hp_wait);
+               lib_ring_buffer_start_switch_timer(buf);
+               lib_ring_buffer_start_read_timer(buf);
+               return NOTIFY_OK;
+
+       case CPU_DOWN_PREPARE:
+       case CPU_DOWN_PREPARE_FROZEN:
+               lib_ring_buffer_stop_switch_timer(buf);
+               lib_ring_buffer_stop_read_timer(buf);
+               return NOTIFY_OK;
+
+       case CPU_DEAD:
+       case CPU_DEAD_FROZEN:
+               /*
+                * Performing a buffer switch on a remote CPU. Performed by
+                * the CPU responsible for doing the hotunplug after the target
+                * CPU stopped running completely. Ensures that all data
+                * from that remote CPU is flushed.
+                */
+               lib_ring_buffer_switch_slow(buf, SWITCH_ACTIVE);
+               return NOTIFY_OK;
+
+       default:
+               return NOTIFY_DONE;
+       }
+}
+
+#endif
+
+#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
+
+#if defined(CONFIG_NO_HZ) && defined(CONFIG_LIB_RING_BUFFER)
+/*
+ * For per-cpu buffers, call the reader wakeups before switching the buffer, so
+ * that wake-up-tracing generated events are flushed before going idle (in
+ * tick_nohz). We test if the spinlock is locked to deal with the race where
+ * readers try to sample the ring buffer before we perform the switch. We let
+ * the readers retry in that case. If there is data in the buffer, the wake up
+ * is going to forbid the CPU running the reader thread from going idle.
+ */
+static int notrace ring_buffer_tick_nohz_callback(struct notifier_block *nb,
+                                                 unsigned long val,
+                                                 void *data)
+{
+       struct channel *chan = container_of(nb, struct channel,
+                                           tick_nohz_notifier);
+       const struct lib_ring_buffer_config *config = &chan->backend.config;
+       struct lib_ring_buffer *buf;
+       int cpu = smp_processor_id();
+
+       if (config->alloc != RING_BUFFER_ALLOC_PER_CPU) {
+               /*
+                * We don't support keeping the system idle with global buffers
+                * and streaming active. In order to do so, we would need to
+                * sample a non-nohz-cpumask racelessly with the nohz updates
+                * without adding synchronization overhead to nohz. Leave this
+                * use-case out for now.
+                */
+               return 0;
+       }
+
+       buf = channel_get_ring_buffer(config, chan, cpu);
+       switch (val) {
+       case TICK_NOHZ_FLUSH:
+               raw_spin_lock(&buf->raw_tick_nohz_spinlock);
+               if (config->wakeup == RING_BUFFER_WAKEUP_BY_TIMER
+                   && chan->read_timer_interval
+                   && atomic_long_read(&buf->active_readers)
+                   && (lib_ring_buffer_poll_deliver(config, buf, chan)
+                       || lib_ring_buffer_pending_data(config, buf, chan))) {
+                       wake_up_interruptible(&buf->read_wait);
+                       wake_up_interruptible(&chan->read_wait);
+               }
+               if (chan->switch_timer_interval)
+                       lib_ring_buffer_switch_slow(buf, SWITCH_ACTIVE);
+               raw_spin_unlock(&buf->raw_tick_nohz_spinlock);
+               break;
+       case TICK_NOHZ_STOP:
+               spin_lock(lttng_this_cpu_ptr(&ring_buffer_nohz_lock));
+               lib_ring_buffer_stop_switch_timer(buf);
+               lib_ring_buffer_stop_read_timer(buf);
+               spin_unlock(lttng_this_cpu_ptr(&ring_buffer_nohz_lock));
+               break;
+       case TICK_NOHZ_RESTART:
+               spin_lock(lttng_this_cpu_ptr(&ring_buffer_nohz_lock));
+               lib_ring_buffer_start_read_timer(buf);
+               lib_ring_buffer_start_switch_timer(buf);
+               spin_unlock(lttng_this_cpu_ptr(&ring_buffer_nohz_lock));
+               break;
+       }
+
+       return 0;
+}
+
+void notrace lib_ring_buffer_tick_nohz_flush(void)
+{
+       atomic_notifier_call_chain(&tick_nohz_notifier, TICK_NOHZ_FLUSH,
+                                  NULL);
+}
+
+void notrace lib_ring_buffer_tick_nohz_stop(void)
+{
+       atomic_notifier_call_chain(&tick_nohz_notifier, TICK_NOHZ_STOP,
+                                  NULL);
+}
+
+void notrace lib_ring_buffer_tick_nohz_restart(void)
+{
+       atomic_notifier_call_chain(&tick_nohz_notifier, TICK_NOHZ_RESTART,
+                                  NULL);
+}
+#endif /* defined(CONFIG_NO_HZ) && defined(CONFIG_LIB_RING_BUFFER) */
+
+/*
+ * Holds CPU hotplug.
+ */
+static void channel_unregister_notifiers(struct channel *chan)
+{
+       const struct lib_ring_buffer_config *config = &chan->backend.config;
+
+       channel_iterator_unregister_notifiers(chan);
+       if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
+#ifdef CONFIG_NO_HZ
+               /*
+                * Remove the nohz notifier first, so we are certain we stop
+                * the timers.
+                */
+               atomic_notifier_chain_unregister(&tick_nohz_notifier,
+                                                &chan->tick_nohz_notifier);
+               /*
+                * ring_buffer_nohz_lock will not be needed below, because
+                * we just removed the notifiers, which were the only source of
+                * concurrency.
+                */
+#endif /* CONFIG_NO_HZ */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
+               {
+                       int ret;
+
+                       ret = cpuhp_state_remove_instance(lttng_rb_hp_online,
+                               &chan->cpuhp_online.node);
+                       WARN_ON(ret);
+                       ret = cpuhp_state_remove_instance_nocalls(lttng_rb_hp_prepare,
+                               &chan->cpuhp_prepare.node);
+                       WARN_ON(ret);
+               }
+#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
+               {
+                       int cpu;
+
+#ifdef CONFIG_HOTPLUG_CPU
+                       get_online_cpus();
+                       chan->cpu_hp_enable = 0;
+                       for_each_online_cpu(cpu) {
+                               struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf,
+                                                                     cpu);
+                               lib_ring_buffer_stop_switch_timer(buf);
+                               lib_ring_buffer_stop_read_timer(buf);
+                       }
+                       put_online_cpus();
+                       unregister_cpu_notifier(&chan->cpu_hp_notifier);
+#else
+                       for_each_possible_cpu(cpu) {
+                               struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf,
+                                                                     cpu);
+                               lib_ring_buffer_stop_switch_timer(buf);
+                               lib_ring_buffer_stop_read_timer(buf);
+                       }
+#endif
+               }
+#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
+       } else {
+               struct lib_ring_buffer *buf = chan->backend.buf;
+
+               lib_ring_buffer_stop_switch_timer(buf);
+               lib_ring_buffer_stop_read_timer(buf);
+       }
+       channel_backend_unregister_notifiers(&chan->backend);
+}
+
+static void lib_ring_buffer_set_quiescent(struct lib_ring_buffer *buf)
+{
+       if (!buf->quiescent) {
+               buf->quiescent = true;
+               _lib_ring_buffer_switch_remote(buf, SWITCH_FLUSH);
+       }
+}
+
+static void lib_ring_buffer_clear_quiescent(struct lib_ring_buffer *buf)
+{
+       buf->quiescent = false;
+}
+
+void lib_ring_buffer_set_quiescent_channel(struct channel *chan)
+{
+       int cpu;
+       const struct lib_ring_buffer_config *config = &chan->backend.config;
+
+       if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
+               get_online_cpus();
+               for_each_channel_cpu(cpu, chan) {
+                       struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf,
+                                                             cpu);
+
+                       lib_ring_buffer_set_quiescent(buf);
+               }
+               put_online_cpus();
+       } else {
+               struct lib_ring_buffer *buf = chan->backend.buf;
+
+               lib_ring_buffer_set_quiescent(buf);
+       }
+}
+EXPORT_SYMBOL_GPL(lib_ring_buffer_set_quiescent_channel);
+
+void lib_ring_buffer_clear_quiescent_channel(struct channel *chan)
+{
+       int cpu;
+       const struct lib_ring_buffer_config *config = &chan->backend.config;
+
+       if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
+               get_online_cpus();
+               for_each_channel_cpu(cpu, chan) {
+                       struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf,
+                                                             cpu);
+
+                       lib_ring_buffer_clear_quiescent(buf);
+               }
+               put_online_cpus();
+       } else {
+               struct lib_ring_buffer *buf = chan->backend.buf;
+
+               lib_ring_buffer_clear_quiescent(buf);
+       }
+}
+EXPORT_SYMBOL_GPL(lib_ring_buffer_clear_quiescent_channel);
+
+static void channel_free(struct channel *chan)
+{
+       if (chan->backend.release_priv_ops) {
+               chan->backend.release_priv_ops(chan->backend.priv_ops);
+       }
+       channel_iterator_free(chan);
+       channel_backend_free(&chan->backend);
+       kfree(chan);
+}
+
+/**
+ * channel_create - Create channel.
+ * @config: ring buffer instance configuration
+ * @name: name of the channel
+ * @priv: ring buffer client private data
+ * @buf_addr: pointer the the beginning of the preallocated buffer contiguous
+ *            address mapping. It is used only by RING_BUFFER_STATIC
+ *            configuration. It can be set to NULL for other backends.
+ * @subbuf_size: subbuffer size
+ * @num_subbuf: number of subbuffers
+ * @switch_timer_interval: Time interval (in us) to fill sub-buffers with
+ *                         padding to let readers get those sub-buffers.
+ *                         Used for live streaming.
+ * @read_timer_interval: Time interval (in us) to wake up pending readers.
+ *
+ * Holds cpu hotplug.
+ * Returns NULL on failure.
+ */
+struct channel *channel_create(const struct lib_ring_buffer_config *config,
+                  const char *name, void *priv, void *buf_addr,
+                  size_t subbuf_size,
+                  size_t num_subbuf, unsigned int switch_timer_interval,
+                  unsigned int read_timer_interval)
+{
+       int ret;
+       struct channel *chan;
+
+       if (lib_ring_buffer_check_config(config, switch_timer_interval,
+                                        read_timer_interval))
+               return NULL;
+
+       chan = kzalloc(sizeof(struct channel), GFP_KERNEL);
+       if (!chan)
+               return NULL;
+
+       ret = channel_backend_init(&chan->backend, name, config, priv,
+                                  subbuf_size, num_subbuf);
+       if (ret)
+               goto error;
+
+       ret = channel_iterator_init(chan);
+       if (ret)
+               goto error_free_backend;
+
+       chan->commit_count_mask = (~0UL >> chan->backend.num_subbuf_order);
+       chan->switch_timer_interval = usecs_to_jiffies(switch_timer_interval);
+       chan->read_timer_interval = usecs_to_jiffies(read_timer_interval);
+       kref_init(&chan->ref);
+       init_waitqueue_head(&chan->read_wait);
+       init_waitqueue_head(&chan->hp_wait);
+
+       if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
+               chan->cpuhp_prepare.component = LTTNG_RING_BUFFER_FRONTEND;
+               ret = cpuhp_state_add_instance_nocalls(lttng_rb_hp_prepare,
+                       &chan->cpuhp_prepare.node);
+               if (ret)
+                       goto cpuhp_prepare_error;
+
+               chan->cpuhp_online.component = LTTNG_RING_BUFFER_FRONTEND;
+               ret = cpuhp_state_add_instance(lttng_rb_hp_online,
+                       &chan->cpuhp_online.node);
+               if (ret)
+                       goto cpuhp_online_error;
+#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
+               {
+                       int cpu;
+                       /*
+                        * In case of non-hotplug cpu, if the ring-buffer is allocated
+                        * in early initcall, it will not be notified of secondary cpus.
+                        * In that off case, we need to allocate for all possible cpus.
+                        */
+#ifdef CONFIG_HOTPLUG_CPU
+                       chan->cpu_hp_notifier.notifier_call =
+                                       lib_ring_buffer_cpu_hp_callback;
+                       chan->cpu_hp_notifier.priority = 6;
+                       register_cpu_notifier(&chan->cpu_hp_notifier);
+
+                       get_online_cpus();
+                       for_each_online_cpu(cpu) {
+                               struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf,
+                                                                      cpu);
+                               spin_lock(&per_cpu(ring_buffer_nohz_lock, cpu));
+                               lib_ring_buffer_start_switch_timer(buf);
+                               lib_ring_buffer_start_read_timer(buf);
+                               spin_unlock(&per_cpu(ring_buffer_nohz_lock, cpu));
+                       }
+                       chan->cpu_hp_enable = 1;
+                       put_online_cpus();
+#else
+                       for_each_possible_cpu(cpu) {
+                               struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf,
+                                                                     cpu);
+                               spin_lock(&per_cpu(ring_buffer_nohz_lock, cpu));
+                               lib_ring_buffer_start_switch_timer(buf);
+                               lib_ring_buffer_start_read_timer(buf);
+                               spin_unlock(&per_cpu(ring_buffer_nohz_lock, cpu));
+                       }
+#endif
+               }
+#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
+
+#if defined(CONFIG_NO_HZ) && defined(CONFIG_LIB_RING_BUFFER)
+               /* Only benefit from NO_HZ idle with per-cpu buffers for now. */
+               chan->tick_nohz_notifier.notifier_call =
+                       ring_buffer_tick_nohz_callback;
+               chan->tick_nohz_notifier.priority = ~0U;
+               atomic_notifier_chain_register(&tick_nohz_notifier,
+                                      &chan->tick_nohz_notifier);
+#endif /* defined(CONFIG_NO_HZ) && defined(CONFIG_LIB_RING_BUFFER) */
+
+       } else {
+               struct lib_ring_buffer *buf = chan->backend.buf;
+
+               lib_ring_buffer_start_switch_timer(buf);
+               lib_ring_buffer_start_read_timer(buf);
+       }
+
+       return chan;
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
+cpuhp_online_error:
+       ret = cpuhp_state_remove_instance_nocalls(lttng_rb_hp_prepare,
+                       &chan->cpuhp_prepare.node);
+       WARN_ON(ret);
+cpuhp_prepare_error:
+#endif /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
+error_free_backend:
+       channel_backend_free(&chan->backend);
+error:
+       kfree(chan);
+       return NULL;
+}
+EXPORT_SYMBOL_GPL(channel_create);
+
+static
+void channel_release(struct kref *kref)
+{
+       struct channel *chan = container_of(kref, struct channel, ref);
+       channel_free(chan);
+}
+
+/**
+ * channel_destroy - Finalize, wait for q.s. and destroy channel.
+ * @chan: channel to destroy
+ *
+ * Holds cpu hotplug.
+ * Call "destroy" callback, finalize channels, and then decrement the
+ * channel reference count.  Note that when readers have completed data
+ * consumption of finalized channels, get_subbuf() will return -ENODATA.
+ * They should release their handle at that point.  Returns the private
+ * data pointer.
+ */
+void *channel_destroy(struct channel *chan)
+{
+       int cpu;
+       const struct lib_ring_buffer_config *config = &chan->backend.config;
+       void *priv;
+
+       channel_unregister_notifiers(chan);
+
+       if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
+               /*
+                * No need to hold cpu hotplug, because all notifiers have been
+                * unregistered.
+                */
+               for_each_channel_cpu(cpu, chan) {
+                       struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf,
+                                                             cpu);
+
+                       if (config->cb.buffer_finalize)
+                               config->cb.buffer_finalize(buf,
+                                                          chan->backend.priv,
+                                                          cpu);
+                       /*
+                        * Perform flush before writing to finalized.
+                        */
+                       smp_wmb();
+                       WRITE_ONCE(buf->finalized, 1);
+                       wake_up_interruptible(&buf->read_wait);
+               }
+       } else {
+               struct lib_ring_buffer *buf = chan->backend.buf;
+
+               if (config->cb.buffer_finalize)
+                       config->cb.buffer_finalize(buf, chan->backend.priv, -1);
+               /*
+                * Perform flush before writing to finalized.
+                */
+               smp_wmb();
+               WRITE_ONCE(buf->finalized, 1);
+               wake_up_interruptible(&buf->read_wait);
+       }
+       WRITE_ONCE(chan->finalized, 1);
+       wake_up_interruptible(&chan->hp_wait);
+       wake_up_interruptible(&chan->read_wait);
+       priv = chan->backend.priv;
+       kref_put(&chan->ref, channel_release);
+       return priv;
+}
+EXPORT_SYMBOL_GPL(channel_destroy);
+
+struct lib_ring_buffer *channel_get_ring_buffer(
+                                       const struct lib_ring_buffer_config *config,
+                                       struct channel *chan, int cpu)
+{
+       if (config->alloc == RING_BUFFER_ALLOC_GLOBAL)
+               return chan->backend.buf;
+       else
+               return per_cpu_ptr(chan->backend.buf, cpu);
+}
+EXPORT_SYMBOL_GPL(channel_get_ring_buffer);
+
+int lib_ring_buffer_open_read(struct lib_ring_buffer *buf)
+{
+       struct channel *chan = buf->backend.chan;
+
+       if (!atomic_long_add_unless(&buf->active_readers, 1, 1))
+               return -EBUSY;
+       if (!lttng_kref_get(&chan->ref)) {
+               atomic_long_dec(&buf->active_readers);
+               return -EOVERFLOW;
+       }
+       lttng_smp_mb__after_atomic();
+       return 0;
+}
+EXPORT_SYMBOL_GPL(lib_ring_buffer_open_read);
+
+void lib_ring_buffer_release_read(struct lib_ring_buffer *buf)
+{
+       struct channel *chan = buf->backend.chan;
+
+       CHAN_WARN_ON(chan, atomic_long_read(&buf->active_readers) != 1);
+       lttng_smp_mb__before_atomic();
+       atomic_long_dec(&buf->active_readers);
+       kref_put(&chan->ref, channel_release);
+}
+EXPORT_SYMBOL_GPL(lib_ring_buffer_release_read);
+
+/*
+ * Promote compiler barrier to a smp_mb().
+ * For the specific ring buffer case, this IPI call should be removed if the
+ * architecture does not reorder writes.  This should eventually be provided by
+ * a separate architecture-specific infrastructure.
+ */
+static void remote_mb(void *info)
+{
+       smp_mb();
+}
+
+/**
+ * lib_ring_buffer_snapshot - save subbuffer position snapshot (for read)
+ * @buf: ring buffer
+ * @consumed: consumed count indicating the position where to read
+ * @produced: produced count, indicates position when to stop reading
+ *
+ * Returns -ENODATA if buffer is finalized, -EAGAIN if there is currently no
+ * data to read at consumed position, or 0 if the get operation succeeds.
+ * Busy-loop trying to get data if the tick_nohz sequence lock is held.
+ */
+
+int lib_ring_buffer_snapshot(struct lib_ring_buffer *buf,
+                            unsigned long *consumed, unsigned long *produced)
+{
+       struct channel *chan = buf->backend.chan;
+       const struct lib_ring_buffer_config *config = &chan->backend.config;
+       unsigned long consumed_cur, write_offset;
+       int finalized;
+
+retry:
+       finalized = READ_ONCE(buf->finalized);
+       /*
+        * Read finalized before counters.
+        */
+       smp_rmb();
+       consumed_cur = atomic_long_read(&buf->consumed);
+       /*
+        * No need to issue a memory barrier between consumed count read and
+        * write offset read, because consumed count can only change
+        * concurrently in overwrite mode, and we keep a sequence counter
+        * identifier derived from the write offset to check we are getting
+        * the same sub-buffer we are expecting (the sub-buffers are atomically
+        * "tagged" upon writes, tags are checked upon read).
+        */
+       write_offset = v_read(config, &buf->offset);
+
+       /*
+        * Check that we are not about to read the same subbuffer in
+        * which the writer head is.
+        */
+       if (subbuf_trunc(write_offset, chan) - subbuf_trunc(consumed_cur, chan)
+           == 0)
+               goto nodata;
+
+       *consumed = consumed_cur;
+       *produced = subbuf_trunc(write_offset, chan);
+
+       return 0;
+
+nodata:
+       /*
+        * The memory barriers __wait_event()/wake_up_interruptible() take care
+        * of "raw_spin_is_locked" memory ordering.
+        */
+       if (finalized)
+               return -ENODATA;
+       else if (raw_spin_is_locked(&buf->raw_tick_nohz_spinlock))
+               goto retry;
+       else
+               return -EAGAIN;
+}
+EXPORT_SYMBOL_GPL(lib_ring_buffer_snapshot);
+
+/**
+ * Performs the same function as lib_ring_buffer_snapshot(), but the positions
+ * are saved regardless of whether the consumed and produced positions are
+ * in the same subbuffer.
+ * @buf: ring buffer
+ * @consumed: consumed byte count indicating the last position read
+ * @produced: produced byte count indicating the last position written
+ *
+ * This function is meant to provide information on the exact producer and
+ * consumer positions without regard for the "snapshot" feature.
+ */
+int lib_ring_buffer_snapshot_sample_positions(struct lib_ring_buffer *buf,
+               unsigned long *consumed, unsigned long *produced)
+{
+       struct channel *chan = buf->backend.chan;
+       const struct lib_ring_buffer_config *config = &chan->backend.config;
+
+       smp_rmb();
+       *consumed = atomic_long_read(&buf->consumed);
+       /*
+        * No need to issue a memory barrier between consumed count read and
+        * write offset read, because consumed count can only change
+        * concurrently in overwrite mode, and we keep a sequence counter
+        * identifier derived from the write offset to check we are getting
+        * the same sub-buffer we are expecting (the sub-buffers are atomically
+        * "tagged" upon writes, tags are checked upon read).
+        */
+       *produced = v_read(config, &buf->offset);
+       return 0;
+}
+
+/**
+ * lib_ring_buffer_put_snapshot - move consumed counter forward
+ *
+ * Should only be called from consumer context.
+ * @buf: ring buffer
+ * @consumed_new: new consumed count value
+ */
+void lib_ring_buffer_move_consumer(struct lib_ring_buffer *buf,
+                                  unsigned long consumed_new)
+{
+       struct lib_ring_buffer_backend *bufb = &buf->backend;
+       struct channel *chan = bufb->chan;
+       unsigned long consumed;
+
+       CHAN_WARN_ON(chan, atomic_long_read(&buf->active_readers) != 1);
+
+       /*
+        * Only push the consumed value forward.
+        * If the consumed cmpxchg fails, this is because we have been pushed by
+        * the writer in flight recorder mode.
+        */
+       consumed = atomic_long_read(&buf->consumed);
+       while ((long) consumed - (long) consumed_new < 0)
+               consumed = atomic_long_cmpxchg(&buf->consumed, consumed,
+                                              consumed_new);
+       /* Wake-up the metadata producer */
+       wake_up_interruptible(&buf->write_wait);
+}
+EXPORT_SYMBOL_GPL(lib_ring_buffer_move_consumer);
+
+#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
+static void lib_ring_buffer_flush_read_subbuf_dcache(
+               const struct lib_ring_buffer_config *config,
+               struct channel *chan,
+               struct lib_ring_buffer *buf)
+{
+       struct lib_ring_buffer_backend_pages *pages;
+       unsigned long sb_bindex, id, i, nr_pages;
+
+       if (config->output != RING_BUFFER_MMAP)
+               return;
+
+       /*
+        * Architectures with caches aliased on virtual addresses may
+        * use different cache lines for the linear mapping vs
+        * user-space memory mapping. Given that the ring buffer is
+        * based on the kernel linear mapping, aligning it with the
+        * user-space mapping is not straightforward, and would require
+        * extra TLB entries. Therefore, simply flush the dcache for the
+        * entire sub-buffer before reading it.
+        */
+       id = buf->backend.buf_rsb.id;
+       sb_bindex = subbuffer_id_get_index(config, id);
+       pages = buf->backend.array[sb_bindex];
+       nr_pages = buf->backend.num_pages_per_subbuf;
+       for (i = 0; i < nr_pages; i++) {
+               struct lib_ring_buffer_backend_page *backend_page;
+
+               backend_page = &pages->p[i];
+               flush_dcache_page(pfn_to_page(backend_page->pfn));
+       }
+}
+#else
+static void lib_ring_buffer_flush_read_subbuf_dcache(
+               const struct lib_ring_buffer_config *config,
+               struct channel *chan,
+               struct lib_ring_buffer *buf)
+{
+}
+#endif
+
+/**
+ * lib_ring_buffer_get_subbuf - get exclusive access to subbuffer for reading
+ * @buf: ring buffer
+ * @consumed: consumed count indicating the position where to read
+ *
+ * Returns -ENODATA if buffer is finalized, -EAGAIN if there is currently no
+ * data to read at consumed position, or 0 if the get operation succeeds.
+ * Busy-loop trying to get data if the tick_nohz sequence lock is held.
+ */
+int lib_ring_buffer_get_subbuf(struct lib_ring_buffer *buf,
+                              unsigned long consumed)
+{
+       struct channel *chan = buf->backend.chan;
+       const struct lib_ring_buffer_config *config = &chan->backend.config;
+       unsigned long consumed_cur, consumed_idx, commit_count, write_offset;
+       int ret;
+       int finalized;
+
+       if (buf->get_subbuf) {
+               /*
+                * Reader is trying to get a subbuffer twice.
+                */
+               CHAN_WARN_ON(chan, 1);
+               return -EBUSY;
+       }
+retry:
+       finalized = READ_ONCE(buf->finalized);
+       /*
+        * Read finalized before counters.
+        */
+       smp_rmb();
+       consumed_cur = atomic_long_read(&buf->consumed);
+       consumed_idx = subbuf_index(consumed, chan);
+       commit_count = v_read(config, &buf->commit_cold[consumed_idx].cc_sb);
+       /*
+        * Make sure we read the commit count before reading the buffer
+        * data and the write offset. Correct consumed offset ordering
+        * wrt commit count is insured by the use of cmpxchg to update
+        * the consumed offset.
+        * smp_call_function_single can fail if the remote CPU is offline,
+        * this is OK because then there is no wmb to execute there.
+        * If our thread is executing on the same CPU as the on the buffers
+        * belongs to, we don't have to synchronize it at all. If we are
+        * migrated, the scheduler will take care of the memory barriers.
+        * Normally, smp_call_function_single() should ensure program order when
+        * executing the remote function, which implies that it surrounds the
+        * function execution with :
+        * smp_mb()
+        * send IPI
+        * csd_lock_wait
+        *                recv IPI
+        *                smp_mb()
+        *                exec. function
+        *                smp_mb()
+        *                csd unlock
+        * smp_mb()
+        *
+        * However, smp_call_function_single() does not seem to clearly execute
+        * such barriers. It depends on spinlock semantic to provide the barrier
+        * before executing the IPI and, when busy-looping, csd_lock_wait only
+        * executes smp_mb() when it has to wait for the other CPU.
+        *
+        * I don't trust this code. Therefore, let's add the smp_mb() sequence
+        * required ourself, even if duplicated. It has no performance impact
+        * anyway.
+        *
+        * smp_mb() is needed because smp_rmb() and smp_wmb() only order read vs
+        * read and write vs write. They do not ensure core synchronization. We
+        * really have to ensure total order between the 3 barriers running on
+        * the 2 CPUs.
+        */
+       if (config->ipi == RING_BUFFER_IPI_BARRIER) {
+               if (config->sync == RING_BUFFER_SYNC_PER_CPU
+                   && config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
+                       if (raw_smp_processor_id() != buf->backend.cpu) {
+                               /* Total order with IPI handler smp_mb() */
+                               smp_mb();
+                               smp_call_function_single(buf->backend.cpu,
+                                                        remote_mb, NULL, 1);
+                               /* Total order with IPI handler smp_mb() */
+                               smp_mb();
+                       }
+               } else {
+                       /* Total order with IPI handler smp_mb() */
+                       smp_mb();
+                       smp_call_function(remote_mb, NULL, 1);
+                       /* Total order with IPI handler smp_mb() */
+                       smp_mb();
+               }
+       } else {
+               /*
+                * Local rmb to match the remote wmb to read the commit count
+                * before the buffer data and the write offset.
+                */
+               smp_rmb();
+       }
+
+       write_offset = v_read(config, &buf->offset);
+
+       /*
+        * Check that the buffer we are getting is after or at consumed_cur
+        * position.
+        */
+       if ((long) subbuf_trunc(consumed, chan)
+           - (long) subbuf_trunc(consumed_cur, chan) < 0)
+               goto nodata;
+
+       /*
+        * Check that the subbuffer we are trying to consume has been
+        * already fully committed.
+        */
+       if (((commit_count - chan->backend.subbuf_size)
+            & chan->commit_count_mask)
+           - (buf_trunc(consumed, chan)
+              >> chan->backend.num_subbuf_order)
+           != 0)
+               goto nodata;
+
+       /*
+        * Check that we are not about to read the same subbuffer in
+        * which the writer head is.
+        */
+       if (subbuf_trunc(write_offset, chan) - subbuf_trunc(consumed, chan)
+           == 0)
+               goto nodata;
+
+       /*
+        * Failure to get the subbuffer causes a busy-loop retry without going
+        * to a wait queue. These are caused by short-lived race windows where
+        * the writer is getting access to a subbuffer we were trying to get
+        * access to. Also checks that the "consumed" buffer count we are
+        * looking for matches the one contained in the subbuffer id.
+        */
+       ret = update_read_sb_index(config, &buf->backend, &chan->backend,
+                                  consumed_idx, buf_trunc_val(consumed, chan));
+       if (ret)
+               goto retry;
+       subbuffer_id_clear_noref(config, &buf->backend.buf_rsb.id);
+
+       buf->get_subbuf_consumed = consumed;
+       buf->get_subbuf = 1;
+
+       lib_ring_buffer_flush_read_subbuf_dcache(config, chan, buf);
+
+       return 0;
+
+nodata:
+       /*
+        * The memory barriers __wait_event()/wake_up_interruptible() take care
+        * of "raw_spin_is_locked" memory ordering.
+        */
+       if (finalized)
+               return -ENODATA;
+       else if (raw_spin_is_locked(&buf->raw_tick_nohz_spinlock))
+               goto retry;
+       else
+               return -EAGAIN;
+}
+EXPORT_SYMBOL_GPL(lib_ring_buffer_get_subbuf);
+
+/**
+ * lib_ring_buffer_put_subbuf - release exclusive subbuffer access
+ * @buf: ring buffer
+ */
+void lib_ring_buffer_put_subbuf(struct lib_ring_buffer *buf)
+{
+       struct lib_ring_buffer_backend *bufb = &buf->backend;
+       struct channel *chan = bufb->chan;
+       const struct lib_ring_buffer_config *config = &chan->backend.config;
+       unsigned long read_sb_bindex, consumed_idx, consumed;
+
+       CHAN_WARN_ON(chan, atomic_long_read(&buf->active_readers) != 1);
+
+       if (!buf->get_subbuf) {
+               /*
+                * Reader puts a subbuffer it did not get.
+                */
+               CHAN_WARN_ON(chan, 1);
+               return;
+       }
+       consumed = buf->get_subbuf_consumed;
+       buf->get_subbuf = 0;
+
+       /*
+        * Clear the records_unread counter. (overruns counter)
+        * Can still be non-zero if a file reader simply grabbed the data
+        * without using iterators.
+        * Can be below zero if an iterator is used on a snapshot more than
+        * once.
+        */
+       read_sb_bindex = subbuffer_id_get_index(config, bufb->buf_rsb.id);
+       v_add(config, v_read(config,
+                            &bufb->array[read_sb_bindex]->records_unread),
+             &bufb->records_read);
+       v_set(config, &bufb->array[read_sb_bindex]->records_unread, 0);
+       CHAN_WARN_ON(chan, config->mode == RING_BUFFER_OVERWRITE
+                    && subbuffer_id_is_noref(config, bufb->buf_rsb.id));
+       subbuffer_id_set_noref(config, &bufb->buf_rsb.id);
+
+       /*
+        * Exchange the reader subbuffer with the one we put in its place in the
+        * writer subbuffer table. Expect the original consumed count. If
+        * update_read_sb_index fails, this is because the writer updated the
+        * subbuffer concurrently. We should therefore keep the subbuffer we
+        * currently have: it has become invalid to try reading this sub-buffer
+        * consumed count value anyway.
+        */
+       consumed_idx = subbuf_index(consumed, chan);
+       update_read_sb_index(config, &buf->backend, &chan->backend,
+                            consumed_idx, buf_trunc_val(consumed, chan));
+       /*
+        * update_read_sb_index return value ignored. Don't exchange sub-buffer
+        * if the writer concurrently updated it.
+        */
+}
+EXPORT_SYMBOL_GPL(lib_ring_buffer_put_subbuf);
+
+/*
+ * cons_offset is an iterator on all subbuffer offsets between the reader
+ * position and the writer position. (inclusive)
+ */
+static
+void lib_ring_buffer_print_subbuffer_errors(struct lib_ring_buffer *buf,
+                                           struct channel *chan,
+                                           unsigned long cons_offset,
+                                           int cpu)
+{
+       const struct lib_ring_buffer_config *config = &chan->backend.config;
+       unsigned long cons_idx, commit_count, commit_count_sb;
+
+       cons_idx = subbuf_index(cons_offset, chan);
+       commit_count = v_read(config, &buf->commit_hot[cons_idx].cc);
+       commit_count_sb = v_read(config, &buf->commit_cold[cons_idx].cc_sb);
+
+       if (subbuf_offset(commit_count, chan) != 0)
+               printk(KERN_WARNING
+                      "ring buffer %s, cpu %d: "
+                      "commit count in subbuffer %lu,\n"
+                      "expecting multiples of %lu bytes\n"
+                      "  [ %lu bytes committed, %lu bytes reader-visible ]\n",
+                      chan->backend.name, cpu, cons_idx,
+                      chan->backend.subbuf_size,
+                      commit_count, commit_count_sb);
+
+       printk(KERN_DEBUG "ring buffer: %s, cpu %d: %lu bytes committed\n",
+              chan->backend.name, cpu, commit_count);
+}
+
+static
+void lib_ring_buffer_print_buffer_errors(struct lib_ring_buffer *buf,
+                                        struct channel *chan,
+                                        void *priv, int cpu)
+{
+       const struct lib_ring_buffer_config *config = &chan->backend.config;
+       unsigned long write_offset, cons_offset;
+
+       /*
+        * No need to order commit_count, write_offset and cons_offset reads
+        * because we execute at teardown when no more writer nor reader
+        * references are left.
+        */
+       write_offset = v_read(config, &buf->offset);
+       cons_offset = atomic_long_read(&buf->consumed);
+       if (write_offset != cons_offset)
+               printk(KERN_DEBUG
+                      "ring buffer %s, cpu %d: "
+                      "non-consumed data\n"
+                      "  [ %lu bytes written, %lu bytes read ]\n",
+                      chan->backend.name, cpu, write_offset, cons_offset);
+
+       for (cons_offset = atomic_long_read(&buf->consumed);
+            (long) (subbuf_trunc((unsigned long) v_read(config, &buf->offset),
+                                 chan)
+                    - cons_offset) > 0;
+            cons_offset = subbuf_align(cons_offset, chan))
+               lib_ring_buffer_print_subbuffer_errors(buf, chan, cons_offset,
+                                                      cpu);
+}
+
+#ifdef LTTNG_RING_BUFFER_COUNT_EVENTS
+static
+void lib_ring_buffer_print_records_count(struct channel *chan,
+                                        struct lib_ring_buffer *buf,
+                                        int cpu)
+{
+       const struct lib_ring_buffer_config *config = &chan->backend.config;
+
+       if (!strcmp(chan->backend.name, "relay-metadata")) {
+               printk(KERN_DEBUG "ring buffer %s: %lu records written, "
+                       "%lu records overrun\n",
+                       chan->backend.name,
+                       v_read(config, &buf->records_count),
+                       v_read(config, &buf->records_overrun));
+       } else {
+               printk(KERN_DEBUG "ring buffer %s, cpu %d: %lu records written, "
+                       "%lu records overrun\n",
+                       chan->backend.name, cpu,
+                       v_read(config, &buf->records_count),
+                       v_read(config, &buf->records_overrun));
+       }
+}
+#else
+static
+void lib_ring_buffer_print_records_count(struct channel *chan,
+                                        struct lib_ring_buffer *buf,
+                                        int cpu)
+{
+}
+#endif
+
+static
+void lib_ring_buffer_print_errors(struct channel *chan,
+                                 struct lib_ring_buffer *buf, int cpu)
+{
+       const struct lib_ring_buffer_config *config = &chan->backend.config;
+       void *priv = chan->backend.priv;
+
+       lib_ring_buffer_print_records_count(chan, buf, cpu);
+       if (strcmp(chan->backend.name, "relay-metadata")) {
+               if (v_read(config, &buf->records_lost_full)
+                   || v_read(config, &buf->records_lost_wrap)
+                   || v_read(config, &buf->records_lost_big))
+                       printk(KERN_WARNING
+                               "ring buffer %s, cpu %d: records were lost. Caused by:\n"
+                               "  [ %lu buffer full, %lu nest buffer wrap-around, "
+                               "%lu event too big ]\n",
+                               chan->backend.name, cpu,
+                               v_read(config, &buf->records_lost_full),
+                               v_read(config, &buf->records_lost_wrap),
+                               v_read(config, &buf->records_lost_big));
+       }
+       lib_ring_buffer_print_buffer_errors(buf, chan, priv, cpu);
+}
+
+/*
+ * lib_ring_buffer_switch_old_start: Populate old subbuffer header.
+ *
+ * Only executed when the buffer is finalized, in SWITCH_FLUSH.
+ */
+static
+void lib_ring_buffer_switch_old_start(struct lib_ring_buffer *buf,
+                                     struct channel *chan,
+                                     struct switch_offsets *offsets,
+                                     u64 tsc)
+{
+       const struct lib_ring_buffer_config *config = &chan->backend.config;
+       unsigned long oldidx = subbuf_index(offsets->old, chan);
+       unsigned long commit_count;
+       struct commit_counters_hot *cc_hot;
+
+       config->cb.buffer_begin(buf, tsc, oldidx);
+
+       /*
+        * Order all writes to buffer before the commit count update that will
+        * determine that the subbuffer is full.
+        */
+       if (config->ipi == RING_BUFFER_IPI_BARRIER) {
+               /*
+                * Must write slot data before incrementing commit count.  This
+                * compiler barrier is upgraded into a smp_mb() by the IPI sent
+                * by get_subbuf().
+                */
+               barrier();
+       } else
+               smp_wmb();
+       cc_hot = &buf->commit_hot[oldidx];
+       v_add(config, config->cb.subbuffer_header_size(), &cc_hot->cc);
+       commit_count = v_read(config, &cc_hot->cc);
+       /* Check if the written buffer has to be delivered */
+       lib_ring_buffer_check_deliver(config, buf, chan, offsets->old,
+                                     commit_count, oldidx, tsc);
+       lib_ring_buffer_write_commit_counter(config, buf, chan,
+                       offsets->old + config->cb.subbuffer_header_size(),
+                       commit_count, cc_hot);
+}
+
+/*
+ * lib_ring_buffer_switch_old_end: switch old subbuffer
+ *
+ * Note : offset_old should never be 0 here. It is ok, because we never perform
+ * buffer switch on an empty subbuffer in SWITCH_ACTIVE mode. The caller
+ * increments the offset_old value when doing a SWITCH_FLUSH on an empty
+ * subbuffer.
+ */
+static
+void lib_ring_buffer_switch_old_end(struct lib_ring_buffer *buf,
+                                   struct channel *chan,
+                                   struct switch_offsets *offsets,
+                                   u64 tsc)
+{
+       const struct lib_ring_buffer_config *config = &chan->backend.config;
+       unsigned long oldidx = subbuf_index(offsets->old - 1, chan);
+       unsigned long commit_count, padding_size, data_size;
+       struct commit_counters_hot *cc_hot;
+       u64 *ts_end;
+
+       data_size = subbuf_offset(offsets->old - 1, chan) + 1;
+       padding_size = chan->backend.subbuf_size - data_size;
+       subbuffer_set_data_size(config, &buf->backend, oldidx, data_size);
+
+       ts_end = &buf->ts_end[oldidx];
+       /*
+        * This is the last space reservation in that sub-buffer before
+        * it gets delivered. This provides exclusive access to write to
+        * this sub-buffer's ts_end. There are also no concurrent
+        * readers of that ts_end because delivery of that sub-buffer is
+        * postponed until the commit counter is incremented for the
+        * current space reservation.
+        */
+       *ts_end = tsc;
+
+       /*
+        * Order all writes to buffer and store to ts_end before the commit
+        * count update that will determine that the subbuffer is full.
+        */
+       if (config->ipi == RING_BUFFER_IPI_BARRIER) {
+               /*
+                * Must write slot data before incrementing commit count.  This
+                * compiler barrier is upgraded into a smp_mb() by the IPI sent
+                * by get_subbuf().
+                */
+               barrier();
+       } else
+               smp_wmb();
+       cc_hot = &buf->commit_hot[oldidx];
+       v_add(config, padding_size, &cc_hot->cc);
+       commit_count = v_read(config, &cc_hot->cc);
+       lib_ring_buffer_check_deliver(config, buf, chan, offsets->old - 1,
+                                     commit_count, oldidx, tsc);
+       lib_ring_buffer_write_commit_counter(config, buf, chan,
+                       offsets->old + padding_size, commit_count,
+                       cc_hot);
+}
+
+/*
+ * lib_ring_buffer_switch_new_start: Populate new subbuffer.
+ *
+ * This code can be executed unordered : writers may already have written to the
+ * sub-buffer before this code gets executed, caution.  The commit makes sure
+ * that this code is executed before the deliver of this sub-buffer.
+ */
+static
+void lib_ring_buffer_switch_new_start(struct lib_ring_buffer *buf,
+                                     struct channel *chan,
+                                     struct switch_offsets *offsets,
+                                     u64 tsc)
+{
+       const struct lib_ring_buffer_config *config = &chan->backend.config;
+       unsigned long beginidx = subbuf_index(offsets->begin, chan);
+       unsigned long commit_count;
+       struct commit_counters_hot *cc_hot;
+
+       config->cb.buffer_begin(buf, tsc, beginidx);
+
+       /*
+        * Order all writes to buffer before the commit count update that will
+        * determine that the subbuffer is full.
+        */
+       if (config->ipi == RING_BUFFER_IPI_BARRIER) {
+               /*
+                * Must write slot data before incrementing commit count.  This
+                * compiler barrier is upgraded into a smp_mb() by the IPI sent
+                * by get_subbuf().
+                */
+               barrier();
+       } else
+               smp_wmb();
+       cc_hot = &buf->commit_hot[beginidx];
+       v_add(config, config->cb.subbuffer_header_size(), &cc_hot->cc);
+       commit_count = v_read(config, &cc_hot->cc);
+       /* Check if the written buffer has to be delivered */
+       lib_ring_buffer_check_deliver(config, buf, chan, offsets->begin,
+                                     commit_count, beginidx, tsc);
+       lib_ring_buffer_write_commit_counter(config, buf, chan,
+                       offsets->begin + config->cb.subbuffer_header_size(),
+                       commit_count, cc_hot);
+}
+
+/*
+ * lib_ring_buffer_switch_new_end: finish switching current subbuffer
+ *
+ * Calls subbuffer_set_data_size() to set the data size of the current
+ * sub-buffer. We do not need to perform check_deliver nor commit here,
+ * since this task will be done by the "commit" of the event for which
+ * we are currently doing the space reservation.
+ */
+static
+void lib_ring_buffer_switch_new_end(struct lib_ring_buffer *buf,
+                                           struct channel *chan,
+                                           struct switch_offsets *offsets,
+                                           u64 tsc)
+{
+       const struct lib_ring_buffer_config *config = &chan->backend.config;
+       unsigned long endidx, data_size;
+       u64 *ts_end;
+
+       endidx = subbuf_index(offsets->end - 1, chan);
+       data_size = subbuf_offset(offsets->end - 1, chan) + 1;
+       subbuffer_set_data_size(config, &buf->backend, endidx, data_size);
+       ts_end = &buf->ts_end[endidx];
+       /*
+        * This is the last space reservation in that sub-buffer before
+        * it gets delivered. This provides exclusive access to write to
+        * this sub-buffer's ts_end. There are also no concurrent
+        * readers of that ts_end because delivery of that sub-buffer is
+        * postponed until the commit counter is incremented for the
+        * current space reservation.
+        */
+       *ts_end = tsc;
+}
+
+/*
+ * Returns :
+ * 0 if ok
+ * !0 if execution must be aborted.
+ */
+static
+int lib_ring_buffer_try_switch_slow(enum switch_mode mode,
+                                   struct lib_ring_buffer *buf,
+                                   struct channel *chan,
+                                   struct switch_offsets *offsets,
+                                   u64 *tsc)
+{
+       const struct lib_ring_buffer_config *config = &chan->backend.config;
+       unsigned long off, reserve_commit_diff;
+
+       offsets->begin = v_read(config, &buf->offset);
+       offsets->old = offsets->begin;
+       offsets->switch_old_start = 0;
+       off = subbuf_offset(offsets->begin, chan);
+
+       *tsc = config->cb.ring_buffer_clock_read(chan);
+
+       /*
+        * Ensure we flush the header of an empty subbuffer when doing the
+        * finalize (SWITCH_FLUSH). This ensures that we end up knowing the
+        * total data gathering duration even if there were no records saved
+        * after the last buffer switch.
+        * In SWITCH_ACTIVE mode, switch the buffer when it contains events.
+        * SWITCH_ACTIVE only flushes the current subbuffer, dealing with end of
+        * subbuffer header as appropriate.
+        * The next record that reserves space will be responsible for
+        * populating the following subbuffer header. We choose not to populate
+        * the next subbuffer header here because we want to be able to use
+        * SWITCH_ACTIVE for periodical buffer flush and CPU tick_nohz stop
+        * buffer flush, which must guarantee that all the buffer content
+        * (records and header timestamps) are visible to the reader. This is
+        * required for quiescence guarantees for the fusion merge.
+        */
+       if (mode != SWITCH_FLUSH && !off)
+               return -1;      /* we do not have to switch : buffer is empty */
+
+       if (unlikely(off == 0)) {
+               unsigned long sb_index, commit_count;
+
+               /*
+                * We are performing a SWITCH_FLUSH. At this stage, there are no
+                * concurrent writes into the buffer.
+                *
+                * The client does not save any header information.  Don't
+                * switch empty subbuffer on finalize, because it is invalid to
+                * deliver a completely empty subbuffer.
+                */
+               if (!config->cb.subbuffer_header_size())
+                       return -1;
+
+               /* Test new buffer integrity */
+               sb_index = subbuf_index(offsets->begin, chan);
+               commit_count = v_read(config,
+                               &buf->commit_cold[sb_index].cc_sb);
+               reserve_commit_diff =
+                 (buf_trunc(offsets->begin, chan)
+                  >> chan->backend.num_subbuf_order)
+                 - (commit_count & chan->commit_count_mask);
+               if (likely(reserve_commit_diff == 0)) {
+                       /* Next subbuffer not being written to. */
+                       if (unlikely(config->mode != RING_BUFFER_OVERWRITE &&
+                               subbuf_trunc(offsets->begin, chan)
+                                - subbuf_trunc((unsigned long)
+                                    atomic_long_read(&buf->consumed), chan)
+                               >= chan->backend.buf_size)) {
+                               /*
+                                * We do not overwrite non consumed buffers
+                                * and we are full : don't switch.
+                                */
+                               return -1;
+                       } else {
+                               /*
+                                * Next subbuffer not being written to, and we
+                                * are either in overwrite mode or the buffer is
+                                * not full. It's safe to write in this new
+                                * subbuffer.
+                                */
+                       }
+               } else {
+                       /*
+                        * Next subbuffer reserve offset does not match the
+                        * commit offset. Don't perform switch in
+                        * producer-consumer and overwrite mode.  Caused by
+                        * either a writer OOPS or too many nested writes over a
+                        * reserve/commit pair.
+                        */
+                       return -1;
+               }
+
+               /*
+                * Need to write the subbuffer start header on finalize.
+                */
+               offsets->switch_old_start = 1;
+       }
+       offsets->begin = subbuf_align(offsets->begin, chan);
+       /* Note: old points to the next subbuf at offset 0 */
+       offsets->end = offsets->begin;
+       return 0;
+}
+
+/*
+ * Force a sub-buffer switch. This operation is completely reentrant : can be
+ * called while tracing is active with absolutely no lock held.
+ *
+ * Note, however, that as a v_cmpxchg is used for some atomic
+ * operations, this function must be called from the CPU which owns the buffer
+ * for a ACTIVE flush.
+ */
+void lib_ring_buffer_switch_slow(struct lib_ring_buffer *buf, enum switch_mode mode)
+{
+       struct channel *chan = buf->backend.chan;
+       const struct lib_ring_buffer_config *config = &chan->backend.config;
+       struct switch_offsets offsets;
+       unsigned long oldidx;
+       u64 tsc;
+
+       offsets.size = 0;
+
+       /*
+        * Perform retryable operations.
+        */
+       do {
+               if (lib_ring_buffer_try_switch_slow(mode, buf, chan, &offsets,
+                                                   &tsc))
+                       return; /* Switch not needed */
+       } while (v_cmpxchg(config, &buf->offset, offsets.old, offsets.end)
+                != offsets.old);
+
+       /*
+        * Atomically update last_tsc. This update races against concurrent
+        * atomic updates, but the race will always cause supplementary full TSC
+        * records, never the opposite (missing a full TSC record when it would
+        * be needed).
+        */
+       save_last_tsc(config, buf, tsc);
+
+       /*
+        * Push the reader if necessary
+        */
+       lib_ring_buffer_reserve_push_reader(buf, chan, offsets.old);
+
+       oldidx = subbuf_index(offsets.old, chan);
+       lib_ring_buffer_clear_noref(config, &buf->backend, oldidx);
+
+       /*
+        * May need to populate header start on SWITCH_FLUSH.
+        */
+       if (offsets.switch_old_start) {
+               lib_ring_buffer_switch_old_start(buf, chan, &offsets, tsc);
+               offsets.old += config->cb.subbuffer_header_size();
+       }
+
+       /*
+        * Switch old subbuffer.
+        */
+       lib_ring_buffer_switch_old_end(buf, chan, &offsets, tsc);
+}
+EXPORT_SYMBOL_GPL(lib_ring_buffer_switch_slow);
+
+struct switch_param {
+       struct lib_ring_buffer *buf;
+       enum switch_mode mode;
+};
+
+static void remote_switch(void *info)
+{
+       struct switch_param *param = info;
+       struct lib_ring_buffer *buf = param->buf;
+
+       lib_ring_buffer_switch_slow(buf, param->mode);
+}
+
+static void _lib_ring_buffer_switch_remote(struct lib_ring_buffer *buf,
+               enum switch_mode mode)
+{
+       struct channel *chan = buf->backend.chan;
+       const struct lib_ring_buffer_config *config = &chan->backend.config;
+       int ret;
+       struct switch_param param;
+
+       /*
+        * With global synchronization we don't need to use the IPI scheme.
+        */
+       if (config->sync == RING_BUFFER_SYNC_GLOBAL) {
+               lib_ring_buffer_switch_slow(buf, mode);
+               return;
+       }
+
+       /*
+        * Disabling preemption ensures two things: first, that the
+        * target cpu is not taken concurrently offline while we are within
+        * smp_call_function_single(). Secondly, if it happens that the
+        * CPU is not online, our own call to lib_ring_buffer_switch_slow()
+        * needs to be protected from CPU hotplug handlers, which can
+        * also perform a remote subbuffer switch.
+        */
+       preempt_disable();
+       param.buf = buf;
+       param.mode = mode;
+       ret = smp_call_function_single(buf->backend.cpu,
+                                remote_switch, &param, 1);
+       if (ret) {
+               /* Remote CPU is offline, do it ourself. */
+               lib_ring_buffer_switch_slow(buf, mode);
+       }
+       preempt_enable();
+}
+
+/* Switch sub-buffer if current sub-buffer is non-empty. */
+void lib_ring_buffer_switch_remote(struct lib_ring_buffer *buf)
+{
+       _lib_ring_buffer_switch_remote(buf, SWITCH_ACTIVE);
+}
+EXPORT_SYMBOL_GPL(lib_ring_buffer_switch_remote);
+
+/* Switch sub-buffer even if current sub-buffer is empty. */
+void lib_ring_buffer_switch_remote_empty(struct lib_ring_buffer *buf)
+{
+       _lib_ring_buffer_switch_remote(buf, SWITCH_FLUSH);
+}
+EXPORT_SYMBOL_GPL(lib_ring_buffer_switch_remote_empty);
+
+void lib_ring_buffer_clear(struct lib_ring_buffer *buf)
+{
+       struct lib_ring_buffer_backend *bufb = &buf->backend;
+       struct channel *chan = bufb->chan;
+
+       lib_ring_buffer_switch_remote(buf);
+       lib_ring_buffer_clear_reader(buf, chan);
+}
+EXPORT_SYMBOL_GPL(lib_ring_buffer_clear);
+
+/*
+ * Returns :
+ * 0 if ok
+ * -ENOSPC if event size is too large for packet.
+ * -ENOBUFS if there is currently not enough space in buffer for the event.
+ * -EIO if data cannot be written into the buffer for any other reason.
+ */
+static
+int lib_ring_buffer_try_reserve_slow(struct lib_ring_buffer *buf,
+                                    struct channel *chan,
+                                    struct switch_offsets *offsets,
+                                    struct lib_ring_buffer_ctx *ctx,
+                                    void *client_ctx)
+{
+       const struct lib_ring_buffer_config *config = &chan->backend.config;
+       unsigned long reserve_commit_diff, offset_cmp;
+
+retry:
+       offsets->begin = offset_cmp = v_read(config, &buf->offset);
+       offsets->old = offsets->begin;
+       offsets->switch_new_start = 0;
+       offsets->switch_new_end = 0;
+       offsets->switch_old_end = 0;
+       offsets->pre_header_padding = 0;
+
+       ctx->tsc = config->cb.ring_buffer_clock_read(chan);
+       if ((int64_t) ctx->tsc == -EIO)
+               return -EIO;
+
+       if (last_tsc_overflow(config, buf, ctx->tsc))
+               ctx->rflags |= RING_BUFFER_RFLAG_FULL_TSC;
+
+       if (unlikely(subbuf_offset(offsets->begin, ctx->chan) == 0)) {
+               offsets->switch_new_start = 1;          /* For offsets->begin */
+       } else {
+               offsets->size = config->cb.record_header_size(config, chan,
+                                               offsets->begin,
+                                               &offsets->pre_header_padding,
+                                               ctx, client_ctx);
+               offsets->size +=
+                       lib_ring_buffer_align(offsets->begin + offsets->size,
+                                             ctx->largest_align)
+                       + ctx->data_size;
+               if (unlikely(subbuf_offset(offsets->begin, chan) +
+                            offsets->size > chan->backend.subbuf_size)) {
+                       offsets->switch_old_end = 1;    /* For offsets->old */
+                       offsets->switch_new_start = 1;  /* For offsets->begin */
+               }
+       }
+       if (unlikely(offsets->switch_new_start)) {
+               unsigned long sb_index, commit_count;
+
+               /*
+                * We are typically not filling the previous buffer completely.
+                */
+               if (likely(offsets->switch_old_end))
+                       offsets->begin = subbuf_align(offsets->begin, chan);
+               offsets->begin = offsets->begin
+                                + config->cb.subbuffer_header_size();
+               /* Test new buffer integrity */
+               sb_index = subbuf_index(offsets->begin, chan);
+               /*
+                * Read buf->offset before buf->commit_cold[sb_index].cc_sb.
+                * lib_ring_buffer_check_deliver() has the matching
+                * memory barriers required around commit_cold cc_sb
+                * updates to ensure reserve and commit counter updates
+                * are not seen reordered when updated by another CPU.
+                */
+               smp_rmb();
+               commit_count = v_read(config,
+                               &buf->commit_cold[sb_index].cc_sb);
+               /* Read buf->commit_cold[sb_index].cc_sb before buf->offset. */
+               smp_rmb();
+               if (unlikely(offset_cmp != v_read(config, &buf->offset))) {
+                       /*
+                        * The reserve counter have been concurrently updated
+                        * while we read the commit counter. This means the
+                        * commit counter we read might not match buf->offset
+                        * due to concurrent update. We therefore need to retry.
+                        */
+                       goto retry;
+               }
+               reserve_commit_diff =
+                 (buf_trunc(offsets->begin, chan)
+                  >> chan->backend.num_subbuf_order)
+                 - (commit_count & chan->commit_count_mask);
+               if (likely(reserve_commit_diff == 0)) {
+                       /* Next subbuffer not being written to. */
+                       if (unlikely(config->mode != RING_BUFFER_OVERWRITE &&
+                               subbuf_trunc(offsets->begin, chan)
+                                - subbuf_trunc((unsigned long)
+                                    atomic_long_read(&buf->consumed), chan)
+                               >= chan->backend.buf_size)) {
+                               /*
+                                * We do not overwrite non consumed buffers
+                                * and we are full : record is lost.
+                                */
+                               v_inc(config, &buf->records_lost_full);
+                               return -ENOBUFS;
+                       } else {
+                               /*
+                                * Next subbuffer not being written to, and we
+                                * are either in overwrite mode or the buffer is
+                                * not full. It's safe to write in this new
+                                * subbuffer.
+                                */
+                       }
+               } else {
+                       /*
+                        * Next subbuffer reserve offset does not match the
+                        * commit offset, and this did not involve update to the
+                        * reserve counter. Drop record in producer-consumer and
+                        * overwrite mode.  Caused by either a writer OOPS or
+                        * too many nested writes over a reserve/commit pair.
+                        */
+                       v_inc(config, &buf->records_lost_wrap);
+                       return -EIO;
+               }
+               offsets->size =
+                       config->cb.record_header_size(config, chan,
+                                               offsets->begin,
+                                               &offsets->pre_header_padding,
+                                               ctx, client_ctx);
+               offsets->size +=
+                       lib_ring_buffer_align(offsets->begin + offsets->size,
+                                             ctx->largest_align)
+                       + ctx->data_size;
+               if (unlikely(subbuf_offset(offsets->begin, chan)
+                            + offsets->size > chan->backend.subbuf_size)) {
+                       /*
+                        * Record too big for subbuffers, report error, don't
+                        * complete the sub-buffer switch.
+                        */
+                       v_inc(config, &buf->records_lost_big);
+                       return -ENOSPC;
+               } else {
+                       /*
+                        * We just made a successful buffer switch and the
+                        * record fits in the new subbuffer. Let's write.
+                        */
+               }
+       } else {
+               /*
+                * Record fits in the current buffer and we are not on a switch
+                * boundary. It's safe to write.
+                */
+       }
+       offsets->end = offsets->begin + offsets->size;
+
+       if (unlikely(subbuf_offset(offsets->end, chan) == 0)) {
+               /*
+                * The offset_end will fall at the very beginning of the next
+                * subbuffer.
+                */
+               offsets->switch_new_end = 1;    /* For offsets->begin */
+       }
+       return 0;
+}
+
+static struct lib_ring_buffer *get_current_buf(struct channel *chan, int cpu)
+{
+       const struct lib_ring_buffer_config *config = &chan->backend.config;
+
+       if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
+               return per_cpu_ptr(chan->backend.buf, cpu);
+       else
+               return chan->backend.buf;
+}
+
+void lib_ring_buffer_lost_event_too_big(struct channel *chan)
+{
+       const struct lib_ring_buffer_config *config = &chan->backend.config;
+       struct lib_ring_buffer *buf = get_current_buf(chan, smp_processor_id());
+
+       v_inc(config, &buf->records_lost_big);
+}
+EXPORT_SYMBOL_GPL(lib_ring_buffer_lost_event_too_big);
+
+/**
+ * lib_ring_buffer_reserve_slow - Atomic slot reservation in a buffer.
+ * @ctx: ring buffer context.
+ *
+ * Return : -NOBUFS if not enough space, -ENOSPC if event size too large,
+ * -EIO for other errors, else returns 0.
+ * It will take care of sub-buffer switching.
+ */
+int lib_ring_buffer_reserve_slow(struct lib_ring_buffer_ctx *ctx,
+               void *client_ctx)
+{
+       struct channel *chan = ctx->chan;
+       const struct lib_ring_buffer_config *config = &chan->backend.config;
+       struct lib_ring_buffer *buf;
+       struct switch_offsets offsets;
+       int ret;
+
+       ctx->buf = buf = get_current_buf(chan, ctx->cpu);
+       offsets.size = 0;
+
+       do {
+               ret = lib_ring_buffer_try_reserve_slow(buf, chan, &offsets,
+                                                      ctx, client_ctx);
+               if (unlikely(ret))
+                       return ret;
+       } while (unlikely(v_cmpxchg(config, &buf->offset, offsets.old,
+                                   offsets.end)
+                         != offsets.old));
+
+       /*
+        * Atomically update last_tsc. This update races against concurrent
+        * atomic updates, but the race will always cause supplementary full TSC
+        * records, never the opposite (missing a full TSC record when it would
+        * be needed).
+        */
+       save_last_tsc(config, buf, ctx->tsc);
+
+       /*
+        * Push the reader if necessary
+        */
+       lib_ring_buffer_reserve_push_reader(buf, chan, offsets.end - 1);
+
+       /*
+        * Clear noref flag for this subbuffer.
+        */
+       lib_ring_buffer_clear_noref(config, &buf->backend,
+                                   subbuf_index(offsets.end - 1, chan));
+
+       /*
+        * Switch old subbuffer if needed.
+        */
+       if (unlikely(offsets.switch_old_end)) {
+               lib_ring_buffer_clear_noref(config, &buf->backend,
+                                           subbuf_index(offsets.old - 1, chan));
+               lib_ring_buffer_switch_old_end(buf, chan, &offsets, ctx->tsc);
+       }
+
+       /*
+        * Populate new subbuffer.
+        */
+       if (unlikely(offsets.switch_new_start))
+               lib_ring_buffer_switch_new_start(buf, chan, &offsets, ctx->tsc);
+
+       if (unlikely(offsets.switch_new_end))
+               lib_ring_buffer_switch_new_end(buf, chan, &offsets, ctx->tsc);
+
+       ctx->slot_size = offsets.size;
+       ctx->pre_offset = offsets.begin;
+       ctx->buf_offset = offsets.begin + offsets.pre_header_padding;
+       return 0;
+}
+EXPORT_SYMBOL_GPL(lib_ring_buffer_reserve_slow);
+
+static
+void lib_ring_buffer_vmcore_check_deliver(const struct lib_ring_buffer_config *config,
+                                         struct lib_ring_buffer *buf,
+                                         unsigned long commit_count,
+                                         unsigned long idx)
+{
+       if (config->oops == RING_BUFFER_OOPS_CONSISTENCY)
+               v_set(config, &buf->commit_hot[idx].seq, commit_count);
+}
+
+/*
+ * The ring buffer can count events recorded and overwritten per buffer,
+ * but it is disabled by default due to its performance overhead.
+ */
+#ifdef LTTNG_RING_BUFFER_COUNT_EVENTS
+static
+void deliver_count_events(const struct lib_ring_buffer_config *config,
+               struct lib_ring_buffer *buf,
+               unsigned long idx)
+{
+       v_add(config, subbuffer_get_records_count(config,
+                       &buf->backend, idx),
+               &buf->records_count);
+       v_add(config, subbuffer_count_records_overrun(config,
+                       &buf->backend, idx),
+               &buf->records_overrun);
+}
+#else /* LTTNG_RING_BUFFER_COUNT_EVENTS */
+static
+void deliver_count_events(const struct lib_ring_buffer_config *config,
+               struct lib_ring_buffer *buf,
+               unsigned long idx)
+{
+}
+#endif /* #else LTTNG_RING_BUFFER_COUNT_EVENTS */
+
+
+void lib_ring_buffer_check_deliver_slow(const struct lib_ring_buffer_config *config,
+                                  struct lib_ring_buffer *buf,
+                                  struct channel *chan,
+                                  unsigned long offset,
+                                  unsigned long commit_count,
+                                  unsigned long idx,
+                                  u64 tsc)
+{
+       unsigned long old_commit_count = commit_count
+                                        - chan->backend.subbuf_size;
+
+       /*
+        * If we succeeded at updating cc_sb below, we are the subbuffer
+        * writer delivering the subbuffer. Deals with concurrent
+        * updates of the "cc" value without adding a add_return atomic
+        * operation to the fast path.
+        *
+        * We are doing the delivery in two steps:
+        * - First, we cmpxchg() cc_sb to the new value
+        *   old_commit_count + 1. This ensures that we are the only
+        *   subbuffer user successfully filling the subbuffer, but we
+        *   do _not_ set the cc_sb value to "commit_count" yet.
+        *   Therefore, other writers that would wrap around the ring
+        *   buffer and try to start writing to our subbuffer would
+        *   have to drop records, because it would appear as
+        *   non-filled.
+        *   We therefore have exclusive access to the subbuffer control
+        *   structures.  This mutual exclusion with other writers is
+        *   crucially important to perform record overruns count in
+        *   flight recorder mode locklessly.
+        * - When we are ready to release the subbuffer (either for
+        *   reading or for overrun by other writers), we simply set the
+        *   cc_sb value to "commit_count" and perform delivery.
+        *
+        * The subbuffer size is least 2 bytes (minimum size: 1 page).
+        * This guarantees that old_commit_count + 1 != commit_count.
+        */
+
+       /*
+        * Order prior updates to reserve count prior to the
+        * commit_cold cc_sb update.
+        */
+       smp_wmb();
+       if (likely(v_cmpxchg(config, &buf->commit_cold[idx].cc_sb,
+                                old_commit_count, old_commit_count + 1)
+                  == old_commit_count)) {
+               u64 *ts_end;
+
+               /*
+                * Start of exclusive subbuffer access. We are
+                * guaranteed to be the last writer in this subbuffer
+                * and any other writer trying to access this subbuffer
+                * in this state is required to drop records.
+                *
+                * We can read the ts_end for the current sub-buffer
+                * which has been saved by the very last space
+                * reservation for the current sub-buffer.
+                *
+                * Order increment of commit counter before reading ts_end.
+                */
+               smp_mb();
+               ts_end = &buf->ts_end[idx];
+               deliver_count_events(config, buf, idx);
+               config->cb.buffer_end(buf, *ts_end, idx,
+                                     lib_ring_buffer_get_data_size(config,
+                                                               buf,
+                                                               idx));
+
+               /*
+                * Increment the packet counter while we have exclusive
+                * access.
+                */
+               subbuffer_inc_packet_count(config, &buf->backend, idx);
+
+               /*
+                * Set noref flag and offset for this subbuffer id.
+                * Contains a memory barrier that ensures counter stores
+                * are ordered before set noref and offset.
+                */
+               lib_ring_buffer_set_noref_offset(config, &buf->backend, idx,
+                                                buf_trunc_val(offset, chan));
+
+               /*
+                * Order set_noref and record counter updates before the
+                * end of subbuffer exclusive access. Orders with
+                * respect to writers coming into the subbuffer after
+                * wrap around, and also order wrt concurrent readers.
+                */
+               smp_mb();
+               /* End of exclusive subbuffer access */
+               v_set(config, &buf->commit_cold[idx].cc_sb,
+                     commit_count);
+               /*
+                * Order later updates to reserve count after
+                * the commit_cold cc_sb update.
+                */
+               smp_wmb();
+               lib_ring_buffer_vmcore_check_deliver(config, buf,
+                                                commit_count, idx);
+
+               /*
+                * RING_BUFFER_WAKEUP_BY_WRITER wakeup is not lock-free.
+                */
+               if (config->wakeup == RING_BUFFER_WAKEUP_BY_WRITER
+                   && atomic_long_read(&buf->active_readers)
+                   && lib_ring_buffer_poll_deliver(config, buf, chan)) {
+                       wake_up_interruptible(&buf->read_wait);
+                       wake_up_interruptible(&chan->read_wait);
+               }
+
+       }
+}
+EXPORT_SYMBOL_GPL(lib_ring_buffer_check_deliver_slow);
+
+int __init init_lib_ring_buffer_frontend(void)
+{
+       int cpu;
+
+       for_each_possible_cpu(cpu)
+               spin_lock_init(&per_cpu(ring_buffer_nohz_lock, cpu));
+       return 0;
+}
+
+module_init(init_lib_ring_buffer_frontend);
+
+void __exit exit_lib_ring_buffer_frontend(void)
+{
+}
+
+module_exit(exit_lib_ring_buffer_frontend);
diff --git a/src/lib/ringbuffer/ring_buffer_iterator.c b/src/lib/ringbuffer/ring_buffer_iterator.c
new file mode 100644 (file)
index 0000000..15d7c75
--- /dev/null
@@ -0,0 +1,841 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR LGPL-2.1-only)
+ *
+ * ring_buffer_iterator.c
+ *
+ * Ring buffer and channel iterators. Get each event of a channel in order. Uses
+ * a prio heap for per-cpu buffers, giving a O(log(NR_CPUS)) algorithmic
+ * complexity for the "get next event" operation.
+ *
+ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#include <ringbuffer/iterator.h>
+#include <wrapper/file.h>
+#include <wrapper/uaccess.h>
+#include <linux/jiffies.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+
+/*
+ * Safety factor taking into account internal kernel interrupt latency.
+ * Assuming 250ms worse-case latency.
+ */
+#define MAX_SYSTEM_LATENCY     250
+
+/*
+ * Maximum delta expected between trace clocks. At most 1 jiffy delta.
+ */
+#define MAX_CLOCK_DELTA                (jiffies_to_usecs(1) * 1000)
+
+/**
+ * lib_ring_buffer_get_next_record - Get the next record in a buffer.
+ * @chan: channel
+ * @buf: buffer
+ *
+ * Returns the size of the event read, -EAGAIN if buffer is empty, -ENODATA if
+ * buffer is empty and finalized. The buffer must already be opened for reading.
+ */
+ssize_t lib_ring_buffer_get_next_record(struct channel *chan,
+                                       struct lib_ring_buffer *buf)
+{
+       const struct lib_ring_buffer_config *config = &chan->backend.config;
+       struct lib_ring_buffer_iter *iter = &buf->iter;
+       int ret;
+
+restart:
+       switch (iter->state) {
+       case ITER_GET_SUBBUF:
+               ret = lib_ring_buffer_get_next_subbuf(buf);
+               if (ret && !READ_ONCE(buf->finalized)
+                   && config->alloc == RING_BUFFER_ALLOC_GLOBAL) {
+                       /*
+                        * Use "pull" scheme for global buffers. The reader
+                        * itself flushes the buffer to "pull" data not visible
+                        * to readers yet. Flush current subbuffer and re-try.
+                        *
+                        * Per-CPU buffers rather use a "push" scheme because
+                        * the IPI needed to flush all CPU's buffers is too
+                        * costly. In the "push" scheme, the reader waits for
+                        * the writer periodic timer to flush the
+                        * buffers (keeping track of a quiescent state
+                        * timestamp). Therefore, the writer "pushes" data out
+                        * of the buffers rather than letting the reader "pull"
+                        * data from the buffer.
+                        */
+                       lib_ring_buffer_switch_slow(buf, SWITCH_ACTIVE);
+                       ret = lib_ring_buffer_get_next_subbuf(buf);
+               }
+               if (ret)
+                       return ret;
+               iter->consumed = buf->cons_snapshot;
+               iter->data_size = lib_ring_buffer_get_read_data_size(config, buf);
+               iter->read_offset = iter->consumed;
+               /* skip header */
+               iter->read_offset += config->cb.subbuffer_header_size();
+               iter->state = ITER_TEST_RECORD;
+               goto restart;
+       case ITER_TEST_RECORD:
+               if (iter->read_offset - iter->consumed >= iter->data_size) {
+                       iter->state = ITER_PUT_SUBBUF;
+               } else {
+                       CHAN_WARN_ON(chan, !config->cb.record_get);
+                       config->cb.record_get(config, chan, buf,
+                                             iter->read_offset,
+                                             &iter->header_len,
+                                             &iter->payload_len,
+                                             &iter->timestamp);
+                       iter->read_offset += iter->header_len;
+                       subbuffer_consume_record(config, &buf->backend);
+                       iter->state = ITER_NEXT_RECORD;
+                       return iter->payload_len;
+               }
+               goto restart;
+       case ITER_NEXT_RECORD:
+               iter->read_offset += iter->payload_len;
+               iter->state = ITER_TEST_RECORD;
+               goto restart;
+       case ITER_PUT_SUBBUF:
+               lib_ring_buffer_put_next_subbuf(buf);
+               iter->state = ITER_GET_SUBBUF;
+               goto restart;
+       default:
+               CHAN_WARN_ON(chan, 1);  /* Should not happen */
+               return -EPERM;
+       }
+}
+EXPORT_SYMBOL_GPL(lib_ring_buffer_get_next_record);
+
+static int buf_is_higher(void *a, void *b)
+{
+       struct lib_ring_buffer *bufa = a;
+       struct lib_ring_buffer *bufb = b;
+
+       /* Consider lowest timestamps to be at the top of the heap */
+       return (bufa->iter.timestamp < bufb->iter.timestamp);
+}
+
+static
+void lib_ring_buffer_get_empty_buf_records(const struct lib_ring_buffer_config *config,
+                                          struct channel *chan)
+{
+       struct lttng_ptr_heap *heap = &chan->iter.heap;
+       struct lib_ring_buffer *buf, *tmp;
+       ssize_t len;
+
+       list_for_each_entry_safe(buf, tmp, &chan->iter.empty_head,
+                                iter.empty_node) {
+               len = lib_ring_buffer_get_next_record(chan, buf);
+
+               /*
+                * Deal with -EAGAIN and -ENODATA.
+                * len >= 0 means record contains data.
+                * -EBUSY should never happen, because we support only one
+                * reader.
+                */
+               switch (len) {
+               case -EAGAIN:
+                       /* Keep node in empty list */
+                       break;
+               case -ENODATA:
+                       /*
+                        * Buffer is finalized. Don't add to list of empty
+                        * buffer, because it has no more data to provide, ever.
+                        */
+                       list_del(&buf->iter.empty_node);
+                       break;
+               case -EBUSY:
+                       CHAN_WARN_ON(chan, 1);
+                       break;
+               default:
+                       /*
+                        * Insert buffer into the heap, remove from empty buffer
+                        * list.
+                        */
+                       CHAN_WARN_ON(chan, len < 0);
+                       list_del(&buf->iter.empty_node);
+                       CHAN_WARN_ON(chan, lttng_heap_insert(heap, buf));
+               }
+       }
+}
+
+static
+void lib_ring_buffer_wait_for_qs(const struct lib_ring_buffer_config *config,
+                                struct channel *chan)
+{
+       u64 timestamp_qs;
+       unsigned long wait_msecs;
+
+       /*
+        * No need to wait if no empty buffers are present.
+        */
+       if (list_empty(&chan->iter.empty_head))
+               return;
+
+       timestamp_qs = config->cb.ring_buffer_clock_read(chan);
+       /*
+        * We need to consider previously empty buffers.
+        * Do a get next buf record on each of them. Add them to
+        * the heap if they have data. If at least one of them
+        * don't have data, we need to wait for
+        * switch_timer_interval + MAX_SYSTEM_LATENCY (so we are sure the
+        * buffers have been switched either by the timer or idle entry) and
+        * check them again, adding them if they have data.
+        */
+       lib_ring_buffer_get_empty_buf_records(config, chan);
+
+       /*
+        * No need to wait if no empty buffers are present.
+        */
+       if (list_empty(&chan->iter.empty_head))
+               return;
+
+       /*
+        * We need to wait for the buffer switch timer to run. If the
+        * CPU is idle, idle entry performed the switch.
+        * TODO: we could optimize further by skipping the sleep if all
+        * empty buffers belong to idle or offline cpus.
+        */
+       wait_msecs = jiffies_to_msecs(chan->switch_timer_interval);
+       wait_msecs += MAX_SYSTEM_LATENCY;
+       msleep(wait_msecs);
+       lib_ring_buffer_get_empty_buf_records(config, chan);
+       /*
+        * Any buffer still in the empty list here cannot possibly
+        * contain an event with a timestamp prior to "timestamp_qs".
+        * The new quiescent state timestamp is the one we grabbed
+        * before waiting for buffer data.  It is therefore safe to
+        * ignore empty buffers up to last_qs timestamp for fusion
+        * merge.
+        */
+       chan->iter.last_qs = timestamp_qs;
+}
+
+/**
+ * channel_get_next_record - Get the next record in a channel.
+ * @chan: channel
+ * @ret_buf: the buffer in which the event is located (output)
+ *
+ * Returns the size of new current event, -EAGAIN if all buffers are empty,
+ * -ENODATA if all buffers are empty and finalized. The channel must already be
+ * opened for reading.
+ */
+
+ssize_t channel_get_next_record(struct channel *chan,
+                               struct lib_ring_buffer **ret_buf)
+{
+       const struct lib_ring_buffer_config *config = &chan->backend.config;
+       struct lib_ring_buffer *buf;
+       struct lttng_ptr_heap *heap;
+       ssize_t len;
+
+       if (config->alloc == RING_BUFFER_ALLOC_GLOBAL) {
+               *ret_buf = channel_get_ring_buffer(config, chan, 0);
+               return lib_ring_buffer_get_next_record(chan, *ret_buf);
+       }
+
+       heap = &chan->iter.heap;
+
+       /*
+        * get next record for topmost buffer.
+        */
+       buf = lttng_heap_maximum(heap);
+       if (buf) {
+               len = lib_ring_buffer_get_next_record(chan, buf);
+               /*
+                * Deal with -EAGAIN and -ENODATA.
+                * len >= 0 means record contains data.
+                */
+               switch (len) {
+               case -EAGAIN:
+                       buf->iter.timestamp = 0;
+                       list_add(&buf->iter.empty_node, &chan->iter.empty_head);
+                       /* Remove topmost buffer from the heap */
+                       CHAN_WARN_ON(chan, lttng_heap_remove(heap) != buf);
+                       break;
+               case -ENODATA:
+                       /*
+                        * Buffer is finalized. Remove buffer from heap and
+                        * don't add to list of empty buffer, because it has no
+                        * more data to provide, ever.
+                        */
+                       CHAN_WARN_ON(chan, lttng_heap_remove(heap) != buf);
+                       break;
+               case -EBUSY:
+                       CHAN_WARN_ON(chan, 1);
+                       break;
+               default:
+                       /*
+                        * Reinsert buffer into the heap. Note that heap can be
+                        * partially empty, so we need to use
+                        * lttng_heap_replace_max().
+                        */
+                       CHAN_WARN_ON(chan, len < 0);
+                       CHAN_WARN_ON(chan, lttng_heap_replace_max(heap, buf) != buf);
+                       break;
+               }
+       }
+
+       buf = lttng_heap_maximum(heap);
+       if (!buf || buf->iter.timestamp > chan->iter.last_qs) {
+               /*
+                * Deal with buffers previously showing no data.
+                * Add buffers containing data to the heap, update
+                * last_qs.
+                */
+               lib_ring_buffer_wait_for_qs(config, chan);
+       }
+
+       *ret_buf = buf = lttng_heap_maximum(heap);
+       if (buf) {
+               /*
+                * If this warning triggers, you probably need to check your
+                * system interrupt latency. Typical causes: too many printk()
+                * output going to a serial console with interrupts off.
+                * Allow for MAX_CLOCK_DELTA ns timestamp delta going backward.
+                * Observed on SMP KVM setups with trace_clock().
+                */
+               if (chan->iter.last_timestamp
+                   > (buf->iter.timestamp + MAX_CLOCK_DELTA)) {
+                       printk(KERN_WARNING "ring_buffer: timestamps going "
+                              "backward. Last time %llu ns, cpu %d, "
+                              "current time %llu ns, cpu %d, "
+                              "delta %llu ns.\n",
+                              chan->iter.last_timestamp, chan->iter.last_cpu,
+                              buf->iter.timestamp, buf->backend.cpu,
+                              chan->iter.last_timestamp - buf->iter.timestamp);
+                       CHAN_WARN_ON(chan, 1);
+               }
+               chan->iter.last_timestamp = buf->iter.timestamp;
+               chan->iter.last_cpu = buf->backend.cpu;
+               return buf->iter.payload_len;
+       } else {
+               /* Heap is empty */
+               if (list_empty(&chan->iter.empty_head))
+                       return -ENODATA;        /* All buffers finalized */
+               else
+                       return -EAGAIN;         /* Temporarily empty */
+       }
+}
+EXPORT_SYMBOL_GPL(channel_get_next_record);
+
+static
+void lib_ring_buffer_iterator_init(struct channel *chan, struct lib_ring_buffer *buf)
+{
+       if (buf->iter.allocated)
+               return;
+
+       buf->iter.allocated = 1;
+       if (chan->iter.read_open && !buf->iter.read_open) {
+               CHAN_WARN_ON(chan, lib_ring_buffer_open_read(buf) != 0);
+               buf->iter.read_open = 1;
+       }
+
+       /* Add to list of buffers without any current record */
+       if (chan->backend.config.alloc == RING_BUFFER_ALLOC_PER_CPU)
+               list_add(&buf->iter.empty_node, &chan->iter.empty_head);
+}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
+
+int lttng_cpuhp_rb_iter_online(unsigned int cpu,
+               struct lttng_cpuhp_node *node)
+{
+       struct channel *chan = container_of(node, struct channel,
+                                           cpuhp_iter_online);
+       struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf, cpu);
+       const struct lib_ring_buffer_config *config = &chan->backend.config;
+
+       CHAN_WARN_ON(chan, config->alloc == RING_BUFFER_ALLOC_GLOBAL);
+
+       lib_ring_buffer_iterator_init(chan, buf);
+       return 0;
+}
+EXPORT_SYMBOL_GPL(lttng_cpuhp_rb_iter_online);
+
+#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
+
+#ifdef CONFIG_HOTPLUG_CPU
+static
+int channel_iterator_cpu_hotplug(struct notifier_block *nb,
+                                          unsigned long action,
+                                          void *hcpu)
+{
+       unsigned int cpu = (unsigned long)hcpu;
+       struct channel *chan = container_of(nb, struct channel,
+                                           hp_iter_notifier);
+       struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf, cpu);
+       const struct lib_ring_buffer_config *config = &chan->backend.config;
+
+       if (!chan->hp_iter_enable)
+               return NOTIFY_DONE;
+
+       CHAN_WARN_ON(chan, config->alloc == RING_BUFFER_ALLOC_GLOBAL);
+
+       switch (action) {
+       case CPU_DOWN_FAILED:
+       case CPU_DOWN_FAILED_FROZEN:
+       case CPU_ONLINE:
+       case CPU_ONLINE_FROZEN:
+               lib_ring_buffer_iterator_init(chan, buf);
+               return NOTIFY_OK;
+       default:
+               return NOTIFY_DONE;
+       }
+}
+#endif
+
+#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
+
+int channel_iterator_init(struct channel *chan)
+{
+       const struct lib_ring_buffer_config *config = &chan->backend.config;
+       struct lib_ring_buffer *buf;
+
+       if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
+               int ret;
+
+               INIT_LIST_HEAD(&chan->iter.empty_head);
+               ret = lttng_heap_init(&chan->iter.heap,
+                               num_possible_cpus(),
+                               GFP_KERNEL, buf_is_higher);
+               if (ret)
+                       return ret;
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
+               chan->cpuhp_iter_online.component = LTTNG_RING_BUFFER_ITER;
+               ret = cpuhp_state_add_instance(lttng_rb_hp_online,
+                       &chan->cpuhp_iter_online.node);
+               if (ret)
+                       return ret;
+#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
+               {
+                       int cpu;
+
+                       /*
+                        * In case of non-hotplug cpu, if the ring-buffer is allocated
+                        * in early initcall, it will not be notified of secondary cpus.
+                        * In that off case, we need to allocate for all possible cpus.
+                        */
+#ifdef CONFIG_HOTPLUG_CPU
+                       chan->hp_iter_notifier.notifier_call =
+                               channel_iterator_cpu_hotplug;
+                       chan->hp_iter_notifier.priority = 10;
+                       register_cpu_notifier(&chan->hp_iter_notifier);
+
+                       get_online_cpus();
+                       for_each_online_cpu(cpu) {
+                               buf = per_cpu_ptr(chan->backend.buf, cpu);
+                               lib_ring_buffer_iterator_init(chan, buf);
+                       }
+                       chan->hp_iter_enable = 1;
+                       put_online_cpus();
+#else
+                       for_each_possible_cpu(cpu) {
+                               buf = per_cpu_ptr(chan->backend.buf, cpu);
+                               lib_ring_buffer_iterator_init(chan, buf);
+                       }
+#endif
+               }
+#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
+       } else {
+               buf = channel_get_ring_buffer(config, chan, 0);
+               lib_ring_buffer_iterator_init(chan, buf);
+       }
+       return 0;
+}
+
+void channel_iterator_unregister_notifiers(struct channel *chan)
+{
+       const struct lib_ring_buffer_config *config = &chan->backend.config;
+
+       if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
+               {
+                       int ret;
+
+                       ret = cpuhp_state_remove_instance(lttng_rb_hp_online,
+                               &chan->cpuhp_iter_online.node);
+                       WARN_ON(ret);
+               }
+#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
+               chan->hp_iter_enable = 0;
+               unregister_cpu_notifier(&chan->hp_iter_notifier);
+#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
+       }
+}
+
+void channel_iterator_free(struct channel *chan)
+{
+       const struct lib_ring_buffer_config *config = &chan->backend.config;
+
+       if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
+               lttng_heap_free(&chan->iter.heap);
+}
+
+int lib_ring_buffer_iterator_open(struct lib_ring_buffer *buf)
+{
+       struct channel *chan = buf->backend.chan;
+       const struct lib_ring_buffer_config *config = &chan->backend.config;
+       CHAN_WARN_ON(chan, config->output != RING_BUFFER_ITERATOR);
+       return lib_ring_buffer_open_read(buf);
+}
+EXPORT_SYMBOL_GPL(lib_ring_buffer_iterator_open);
+
+/*
+ * Note: Iterators must not be mixed with other types of outputs, because an
+ * iterator can leave the buffer in "GET" state, which is not consistent with
+ * other types of output (mmap, splice, raw data read).
+ */
+void lib_ring_buffer_iterator_release(struct lib_ring_buffer *buf)
+{
+       lib_ring_buffer_release_read(buf);
+}
+EXPORT_SYMBOL_GPL(lib_ring_buffer_iterator_release);
+
+int channel_iterator_open(struct channel *chan)
+{
+       const struct lib_ring_buffer_config *config = &chan->backend.config;
+       struct lib_ring_buffer *buf;
+       int ret = 0, cpu;
+
+       CHAN_WARN_ON(chan, config->output != RING_BUFFER_ITERATOR);
+
+       if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
+               get_online_cpus();
+               /* Allow CPU hotplug to keep track of opened reader */
+               chan->iter.read_open = 1;
+               for_each_channel_cpu(cpu, chan) {
+                       buf = channel_get_ring_buffer(config, chan, cpu);
+                       ret = lib_ring_buffer_iterator_open(buf);
+                       if (ret)
+                               goto error;
+                       buf->iter.read_open = 1;
+               }
+               put_online_cpus();
+       } else {
+               buf = channel_get_ring_buffer(config, chan, 0);
+               ret = lib_ring_buffer_iterator_open(buf);
+       }
+       return ret;
+error:
+       /* Error should always happen on CPU 0, hence no close is required. */
+       CHAN_WARN_ON(chan, cpu != 0);
+       put_online_cpus();
+       return ret;
+}
+EXPORT_SYMBOL_GPL(channel_iterator_open);
+
+void channel_iterator_release(struct channel *chan)
+{
+       const struct lib_ring_buffer_config *config = &chan->backend.config;
+       struct lib_ring_buffer *buf;
+       int cpu;
+
+       if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
+               get_online_cpus();
+               for_each_channel_cpu(cpu, chan) {
+                       buf = channel_get_ring_buffer(config, chan, cpu);
+                       if (buf->iter.read_open) {
+                               lib_ring_buffer_iterator_release(buf);
+                               buf->iter.read_open = 0;
+                       }
+               }
+               chan->iter.read_open = 0;
+               put_online_cpus();
+       } else {
+               buf = channel_get_ring_buffer(config, chan, 0);
+               lib_ring_buffer_iterator_release(buf);
+       }
+}
+EXPORT_SYMBOL_GPL(channel_iterator_release);
+
+void lib_ring_buffer_iterator_reset(struct lib_ring_buffer *buf)
+{
+       struct channel *chan = buf->backend.chan;
+
+       if (buf->iter.state != ITER_GET_SUBBUF)
+               lib_ring_buffer_put_next_subbuf(buf);
+       buf->iter.state = ITER_GET_SUBBUF;
+       /* Remove from heap (if present). */
+       if (lttng_heap_cherrypick(&chan->iter.heap, buf))
+               list_add(&buf->iter.empty_node, &chan->iter.empty_head);
+       buf->iter.timestamp = 0;
+       buf->iter.header_len = 0;
+       buf->iter.payload_len = 0;
+       buf->iter.consumed = 0;
+       buf->iter.read_offset = 0;
+       buf->iter.data_size = 0;
+       /* Don't reset allocated and read_open */
+}
+
+void channel_iterator_reset(struct channel *chan)
+{
+       const struct lib_ring_buffer_config *config = &chan->backend.config;
+       struct lib_ring_buffer *buf;
+       int cpu;
+
+       /* Empty heap, put into empty_head */
+       while ((buf = lttng_heap_remove(&chan->iter.heap)) != NULL)
+               list_add(&buf->iter.empty_node, &chan->iter.empty_head);
+
+       for_each_channel_cpu(cpu, chan) {
+               buf = channel_get_ring_buffer(config, chan, cpu);
+               lib_ring_buffer_iterator_reset(buf);
+       }
+       /* Don't reset read_open */
+       chan->iter.last_qs = 0;
+       chan->iter.last_timestamp = 0;
+       chan->iter.last_cpu = 0;
+       chan->iter.len_left = 0;
+}
+
+/*
+ * Ring buffer payload extraction read() implementation.
+ */
+static
+ssize_t channel_ring_buffer_file_read(struct file *filp,
+                                     char __user *user_buf,
+                                     size_t count,
+                                     loff_t *ppos,
+                                     struct channel *chan,
+                                     struct lib_ring_buffer *buf,
+                                     int fusionmerge)
+{
+       const struct lib_ring_buffer_config *config = &chan->backend.config;
+       size_t read_count = 0, read_offset;
+       ssize_t len;
+
+       might_sleep();
+       if (!lttng_access_ok(VERIFY_WRITE, user_buf, count))
+               return -EFAULT;
+
+       /* Finish copy of previous record */
+       if (*ppos != 0) {
+               if (read_count < count) {
+                       len = chan->iter.len_left;
+                       read_offset = *ppos;
+                       if (config->alloc == RING_BUFFER_ALLOC_PER_CPU
+                           && fusionmerge)
+                               buf = lttng_heap_maximum(&chan->iter.heap);
+                       CHAN_WARN_ON(chan, !buf);
+                       goto skip_get_next;
+               }
+       }
+
+       while (read_count < count) {
+               size_t copy_len, space_left;
+
+               if (fusionmerge)
+                       len = channel_get_next_record(chan, &buf);
+               else
+                       len = lib_ring_buffer_get_next_record(chan, buf);
+len_test:
+               if (len < 0) {
+                       /*
+                        * Check if buffer is finalized (end of file).
+                        */
+                       if (len == -ENODATA) {
+                               /* A 0 read_count will tell about end of file */
+                               goto nodata;
+                       }
+                       if (filp->f_flags & O_NONBLOCK) {
+                               if (!read_count)
+                                       read_count = -EAGAIN;
+                               goto nodata;
+                       } else {
+                               int error;
+
+                               /*
+                                * No data available at the moment, return what
+                                * we got.
+                                */
+                               if (read_count)
+                                       goto nodata;
+
+                               /*
+                                * Wait for returned len to be >= 0 or -ENODATA.
+                                */
+                               if (fusionmerge)
+                                       error = wait_event_interruptible(
+                                         chan->read_wait,
+                                         ((len = channel_get_next_record(chan,
+                                               &buf)), len != -EAGAIN));
+                               else
+                                       error = wait_event_interruptible(
+                                         buf->read_wait,
+                                         ((len = lib_ring_buffer_get_next_record(
+                                                 chan, buf)), len != -EAGAIN));
+                               CHAN_WARN_ON(chan, len == -EBUSY);
+                               if (error) {
+                                       read_count = error;
+                                       goto nodata;
+                               }
+                               CHAN_WARN_ON(chan, len < 0 && len != -ENODATA);
+                               goto len_test;
+                       }
+               }
+               read_offset = buf->iter.read_offset;
+skip_get_next:
+               space_left = count - read_count;
+               if (len <= space_left) {
+                       copy_len = len;
+                       chan->iter.len_left = 0;
+                       *ppos = 0;
+               } else {
+                       copy_len = space_left;
+                       chan->iter.len_left = len - copy_len;
+                       *ppos = read_offset + copy_len;
+               }
+               if (__lib_ring_buffer_copy_to_user(&buf->backend, read_offset,
+                                              &user_buf[read_count],
+                                              copy_len)) {
+                       /*
+                        * Leave the len_left and ppos values at their current
+                        * state, as we currently have a valid event to read.
+                        */
+                       return -EFAULT;
+               }
+               read_count += copy_len;
+       };
+       return read_count;
+
+nodata:
+       *ppos = 0;
+       chan->iter.len_left = 0;
+       return read_count;
+}
+
+/**
+ * lib_ring_buffer_file_read - Read buffer record payload.
+ * @filp: file structure pointer.
+ * @buffer: user buffer to read data into.
+ * @count: number of bytes to read.
+ * @ppos: file read position.
+ *
+ * Returns a negative value on error, or the number of bytes read on success.
+ * ppos is used to save the position _within the current record_ between calls
+ * to read().
+ */
+static
+ssize_t lib_ring_buffer_file_read(struct file *filp,
+                                 char __user *user_buf,
+                                 size_t count,
+                                 loff_t *ppos)
+{
+       struct inode *inode = filp->lttng_f_dentry->d_inode;
+       struct lib_ring_buffer *buf = inode->i_private;
+       struct channel *chan = buf->backend.chan;
+
+       return channel_ring_buffer_file_read(filp, user_buf, count, ppos,
+                                            chan, buf, 0);
+}
+
+/**
+ * channel_file_read - Read channel record payload.
+ * @filp: file structure pointer.
+ * @buffer: user buffer to read data into.
+ * @count: number of bytes to read.
+ * @ppos: file read position.
+ *
+ * Returns a negative value on error, or the number of bytes read on success.
+ * ppos is used to save the position _within the current record_ between calls
+ * to read().
+ */
+static
+ssize_t channel_file_read(struct file *filp,
+                         char __user *user_buf,
+                         size_t count,
+                         loff_t *ppos)
+{
+       struct inode *inode = filp->lttng_f_dentry->d_inode;
+       struct channel *chan = inode->i_private;
+       const struct lib_ring_buffer_config *config = &chan->backend.config;
+
+       if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
+               return channel_ring_buffer_file_read(filp, user_buf, count,
+                                                    ppos, chan, NULL, 1);
+       else {
+               struct lib_ring_buffer *buf =
+                       channel_get_ring_buffer(config, chan, 0);
+               return channel_ring_buffer_file_read(filp, user_buf, count,
+                                                    ppos, chan, buf, 0);
+       }
+}
+
+static
+int lib_ring_buffer_file_open(struct inode *inode, struct file *file)
+{
+       struct lib_ring_buffer *buf = inode->i_private;
+       int ret;
+
+       ret = lib_ring_buffer_iterator_open(buf);
+       if (ret)
+               return ret;
+
+       file->private_data = buf;
+       ret = nonseekable_open(inode, file);
+       if (ret)
+               goto release_iter;
+       return 0;
+
+release_iter:
+       lib_ring_buffer_iterator_release(buf);
+       return ret;
+}
+
+static
+int lib_ring_buffer_file_release(struct inode *inode, struct file *file)
+{
+       struct lib_ring_buffer *buf = inode->i_private;
+
+       lib_ring_buffer_iterator_release(buf);
+       return 0;
+}
+
+static
+int channel_file_open(struct inode *inode, struct file *file)
+{
+       struct channel *chan = inode->i_private;
+       int ret;
+
+       ret = channel_iterator_open(chan);
+       if (ret)
+               return ret;
+
+       file->private_data = chan;
+       ret = nonseekable_open(inode, file);
+       if (ret)
+               goto release_iter;
+       return 0;
+
+release_iter:
+       channel_iterator_release(chan);
+       return ret;
+}
+
+static
+int channel_file_release(struct inode *inode, struct file *file)
+{
+       struct channel *chan = inode->i_private;
+
+       channel_iterator_release(chan);
+       return 0;
+}
+
+const struct file_operations channel_payload_file_operations = {
+       .owner = THIS_MODULE,
+       .open = channel_file_open,
+       .release = channel_file_release,
+       .read = channel_file_read,
+       .llseek = vfs_lib_ring_buffer_no_llseek,
+};
+EXPORT_SYMBOL_GPL(channel_payload_file_operations);
+
+const struct file_operations lib_ring_buffer_payload_file_operations = {
+       .owner = THIS_MODULE,
+       .open = lib_ring_buffer_file_open,
+       .release = lib_ring_buffer_file_release,
+       .read = lib_ring_buffer_file_read,
+       .llseek = vfs_lib_ring_buffer_no_llseek,
+};
+EXPORT_SYMBOL_GPL(lib_ring_buffer_payload_file_operations);
diff --git a/src/lib/ringbuffer/ring_buffer_mmap.c b/src/lib/ringbuffer/ring_buffer_mmap.c
new file mode 100644 (file)
index 0000000..34174a5
--- /dev/null
@@ -0,0 +1,139 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * ring_buffer_mmap.c
+ *
+ * Copyright (C) 2002-2005 - Tom Zanussi <zanussi@us.ibm.com>, IBM Corp
+ * Copyright (C) 1999-2005 - Karim Yaghmour <karim@opersys.com>
+ * Copyright (C) 2008-2012 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * Re-using code from kernel/relay.c, hence the GPL-2.0-only license for this
+ * file.
+ */
+
+#include <linux/module.h>
+#include <linux/mm.h>
+
+#include <ringbuffer/backend.h>
+#include <ringbuffer/frontend.h>
+#include <ringbuffer/vfs.h>
+
+/*
+ * fault() vm_op implementation for ring buffer file mapping.
+ */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,1,0))
+static vm_fault_t lib_ring_buffer_fault_compat(struct vm_area_struct *vma, struct vm_fault *vmf)
+#else
+static int lib_ring_buffer_fault_compat(struct vm_area_struct *vma, struct vm_fault *vmf)
+#endif
+{
+       struct lib_ring_buffer *buf = vma->vm_private_data;
+       struct channel *chan = buf->backend.chan;
+       const struct lib_ring_buffer_config *config = &chan->backend.config;
+       pgoff_t pgoff = vmf->pgoff;
+       unsigned long *pfnp;
+       void **virt;
+       unsigned long offset, sb_bindex;
+
+       /*
+        * Verify that faults are only done on the range of pages owned by the
+        * reader.
+        */
+       offset = pgoff << PAGE_SHIFT;
+       sb_bindex = subbuffer_id_get_index(config, buf->backend.buf_rsb.id);
+       if (!(offset >= buf->backend.array[sb_bindex]->mmap_offset
+             && offset < buf->backend.array[sb_bindex]->mmap_offset +
+                         buf->backend.chan->backend.subbuf_size))
+               return VM_FAULT_SIGBUS;
+       /*
+        * ring_buffer_read_get_pfn() gets the page frame number for the
+        * current reader's pages.
+        */
+       pfnp = lib_ring_buffer_read_get_pfn(&buf->backend, offset, &virt);
+       if (!*pfnp)
+               return VM_FAULT_SIGBUS;
+       get_page(pfn_to_page(*pfnp));
+       vmf->page = pfn_to_page(*pfnp);
+
+       return 0;
+}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,1,0))
+static vm_fault_t lib_ring_buffer_fault(struct vm_fault *vmf)
+{
+       struct vm_area_struct *vma = vmf->vma;
+       return lib_ring_buffer_fault_compat(vma, vmf);
+}
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(4,11,0))
+static int lib_ring_buffer_fault(struct vm_fault *vmf)
+{
+       struct vm_area_struct *vma = vmf->vma;
+       return lib_ring_buffer_fault_compat(vma, vmf);
+}
+#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,11,0)) */
+static int lib_ring_buffer_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+       return lib_ring_buffer_fault_compat(vma, vmf);
+}
+#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,11,0)) */
+
+/*
+ * vm_ops for ring buffer file mappings.
+ */
+static const struct vm_operations_struct lib_ring_buffer_mmap_ops = {
+       .fault = lib_ring_buffer_fault,
+};
+
+/**
+ *     lib_ring_buffer_mmap_buf: - mmap channel buffer to process address space
+ *     @buf: ring buffer to map
+ *     @vma: vm_area_struct describing memory to be mapped
+ *
+ *     Returns 0 if ok, negative on error
+ *
+ *     Caller should already have grabbed mmap_sem.
+ */
+static int lib_ring_buffer_mmap_buf(struct lib_ring_buffer *buf,
+                                   struct vm_area_struct *vma)
+{
+       unsigned long length = vma->vm_end - vma->vm_start;
+       struct channel *chan = buf->backend.chan;
+       const struct lib_ring_buffer_config *config = &chan->backend.config;
+       unsigned long mmap_buf_len;
+
+       if (config->output != RING_BUFFER_MMAP)
+               return -EINVAL;
+
+       mmap_buf_len = chan->backend.buf_size;
+       if (chan->backend.extra_reader_sb)
+               mmap_buf_len += chan->backend.subbuf_size;
+
+       if (length != mmap_buf_len)
+               return -EINVAL;
+
+       vma->vm_ops = &lib_ring_buffer_mmap_ops;
+       vma->vm_flags |= VM_DONTEXPAND;
+       vma->vm_private_data = buf;
+
+       return 0;
+}
+
+int lib_ring_buffer_mmap(struct file *filp, struct vm_area_struct *vma,
+               struct lib_ring_buffer *buf)
+{
+       return lib_ring_buffer_mmap_buf(buf, vma);
+}
+EXPORT_SYMBOL_GPL(lib_ring_buffer_mmap);
+
+/**
+ *     vfs_lib_ring_buffer_mmap - mmap file op
+ *     @filp: the file
+ *     @vma: the vma describing what to map
+ *
+ *     Calls upon lib_ring_buffer_mmap_buf() to map the file into user space.
+ */
+int vfs_lib_ring_buffer_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+       struct lib_ring_buffer *buf = filp->private_data;
+       return lib_ring_buffer_mmap(filp, vma, buf);
+}
+EXPORT_SYMBOL_GPL(vfs_lib_ring_buffer_mmap);
diff --git a/src/lib/ringbuffer/ring_buffer_splice.c b/src/lib/ringbuffer/ring_buffer_splice.c
new file mode 100644 (file)
index 0000000..cd803a7
--- /dev/null
@@ -0,0 +1,222 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * ring_buffer_splice.c
+ *
+ * Copyright (C) 2002-2005 - Tom Zanussi <zanussi@us.ibm.com>, IBM Corp
+ * Copyright (C) 1999-2005 - Karim Yaghmour <karim@opersys.com>
+ * Copyright (C) 2008-2012 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * Re-using code from kernel/relay.c, which is why it is licensed under
+ * the GPL-2.0.
+ */
+
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/version.h>
+
+#include <wrapper/splice.h>
+#include <ringbuffer/backend.h>
+#include <ringbuffer/frontend.h>
+#include <ringbuffer/vfs.h>
+
+#if 0
+#define printk_dbg(fmt, args...) printk(fmt, args)
+#else
+#define printk_dbg(fmt, args...)
+#endif
+
+loff_t vfs_lib_ring_buffer_no_llseek(struct file *file, loff_t offset,
+               int origin)
+{
+       return -ESPIPE;
+}
+EXPORT_SYMBOL_GPL(vfs_lib_ring_buffer_no_llseek);
+
+/*
+ * Release pages from the buffer so splice pipe_to_file can move them.
+ * Called after the pipe has been populated with buffer pages.
+ */
+static void lib_ring_buffer_pipe_buf_release(struct pipe_inode_info *pipe,
+                                            struct pipe_buffer *pbuf)
+{
+       __free_page(pbuf->page);
+}
+
+static const struct pipe_buf_operations ring_buffer_pipe_buf_ops = {
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,1,0))
+       .can_merge = 0,
+#endif
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,15,0))
+       .map = generic_pipe_buf_map,
+       .unmap = generic_pipe_buf_unmap,
+#endif
+       .confirm = generic_pipe_buf_confirm,
+       .release = lib_ring_buffer_pipe_buf_release,
+       .steal = generic_pipe_buf_steal,
+       .get = generic_pipe_buf_get,
+};
+
+/*
+ * Page release operation after splice pipe_to_file ends.
+ */
+static void lib_ring_buffer_page_release(struct splice_pipe_desc *spd,
+                                        unsigned int i)
+{
+       __free_page(spd->pages[i]);
+}
+
+/*
+ *     subbuf_splice_actor - splice up to one subbuf's worth of data
+ */
+static int subbuf_splice_actor(struct file *in,
+                              loff_t *ppos,
+                              struct pipe_inode_info *pipe,
+                              size_t len,
+                              unsigned int flags,
+                              struct lib_ring_buffer *buf)
+{
+       struct channel *chan = buf->backend.chan;
+       const struct lib_ring_buffer_config *config = &chan->backend.config;
+       unsigned int poff, subbuf_pages, nr_pages;
+       struct page *pages[PIPE_DEF_BUFFERS];
+       struct partial_page partial[PIPE_DEF_BUFFERS];
+       struct splice_pipe_desc spd = {
+               .pages = pages,
+               .nr_pages = 0,
+               .partial = partial,
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,12,0))
+               .flags = flags,
+#endif
+               .ops = &ring_buffer_pipe_buf_ops,
+               .spd_release = lib_ring_buffer_page_release,
+       };
+       unsigned long consumed_old, roffset;
+       unsigned long bytes_avail;
+
+       /*
+        * Check that a GET_SUBBUF ioctl has been done before.
+        */
+       WARN_ON(atomic_long_read(&buf->active_readers) != 1);
+       consumed_old = lib_ring_buffer_get_consumed(config, buf);
+       consumed_old += *ppos;
+
+       /*
+        * Adjust read len, if longer than what is available.
+        * Max read size is 1 subbuffer due to get_subbuf/put_subbuf for
+        * protection.
+        */
+       bytes_avail = chan->backend.subbuf_size;
+       WARN_ON(bytes_avail > chan->backend.buf_size);
+       len = min_t(size_t, len, bytes_avail);
+       subbuf_pages = bytes_avail >> PAGE_SHIFT;
+       nr_pages = min_t(unsigned int, subbuf_pages, PIPE_DEF_BUFFERS);
+       roffset = consumed_old & PAGE_MASK;
+       poff = consumed_old & ~PAGE_MASK;
+       printk_dbg(KERN_DEBUG "SPLICE actor len %zu pos %zd write_pos %ld\n",
+                  len, (ssize_t)*ppos, lib_ring_buffer_get_offset(config, buf));
+
+       for (; spd.nr_pages < nr_pages; spd.nr_pages++) {
+               unsigned int this_len;
+               unsigned long *pfnp, new_pfn;
+               struct page *new_page;
+               void **virt;
+
+               if (!len)
+                       break;
+               printk_dbg(KERN_DEBUG "SPLICE actor loop len %zu roffset %ld\n",
+                          len, roffset);
+
+               /*
+                * We have to replace the page we are moving into the splice
+                * pipe.
+                */
+               new_page = alloc_pages_node(cpu_to_node(max(buf->backend.cpu,
+                                                           0)),
+                                           GFP_KERNEL | __GFP_ZERO, 0);
+               if (!new_page)
+                       break;
+               new_pfn = page_to_pfn(new_page);
+               this_len = PAGE_SIZE - poff;
+               pfnp = lib_ring_buffer_read_get_pfn(&buf->backend, roffset, &virt);
+               spd.pages[spd.nr_pages] = pfn_to_page(*pfnp);
+               *pfnp = new_pfn;
+               *virt = page_address(new_page);
+               spd.partial[spd.nr_pages].offset = poff;
+               spd.partial[spd.nr_pages].len = this_len;
+
+               poff = 0;
+               roffset += PAGE_SIZE;
+               len -= this_len;
+       }
+
+       if (!spd.nr_pages)
+               return 0;
+
+       return wrapper_splice_to_pipe(pipe, &spd);
+}
+
+ssize_t lib_ring_buffer_splice_read(struct file *in, loff_t *ppos,
+                                   struct pipe_inode_info *pipe, size_t len,
+                                   unsigned int flags,
+                                   struct lib_ring_buffer *buf)
+{
+       struct channel *chan = buf->backend.chan;
+       const struct lib_ring_buffer_config *config = &chan->backend.config;
+       ssize_t spliced;
+       int ret;
+
+       if (config->output != RING_BUFFER_SPLICE)
+               return -EINVAL;
+
+       /*
+        * We require ppos and length to be page-aligned for performance reasons
+        * (no page copy). Size is known using the ioctl
+        * RING_BUFFER_GET_PADDED_SUBBUF_SIZE, which is page-size padded.
+        * We fail when the ppos or len passed is not page-sized, because splice
+        * is not allowed to copy more than the length passed as parameter (so
+        * the ABI does not let us silently copy more than requested to include
+        * padding).
+        */
+       if (*ppos != PAGE_ALIGN(*ppos) || len != PAGE_ALIGN(len))
+               return -EINVAL;
+
+       ret = 0;
+       spliced = 0;
+
+       printk_dbg(KERN_DEBUG "SPLICE read len %zu pos %zd\n", len,
+                  (ssize_t)*ppos);
+       while (len && !spliced) {
+               ret = subbuf_splice_actor(in, ppos, pipe, len, flags, buf);
+               printk_dbg(KERN_DEBUG "SPLICE read loop ret %d\n", ret);
+               if (ret < 0)
+                       break;
+               else if (!ret) {
+                       if (flags & SPLICE_F_NONBLOCK)
+                               ret = -EAGAIN;
+                       break;
+               }
+
+               *ppos += ret;
+               if (ret > len)
+                       len = 0;
+               else
+                       len -= ret;
+               spliced += ret;
+       }
+
+       if (spliced)
+               return spliced;
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(lib_ring_buffer_splice_read);
+
+ssize_t vfs_lib_ring_buffer_splice_read(struct file *in, loff_t *ppos,
+                                   struct pipe_inode_info *pipe, size_t len,
+                                   unsigned int flags)
+{
+       struct lib_ring_buffer *buf = in->private_data;
+
+       return lib_ring_buffer_splice_read(in, ppos, pipe, len, flags, buf);
+}
+EXPORT_SYMBOL_GPL(vfs_lib_ring_buffer_splice_read);
diff --git a/src/lib/ringbuffer/ring_buffer_vfs.c b/src/lib/ringbuffer/ring_buffer_vfs.c
new file mode 100644 (file)
index 0000000..e5d6a70
--- /dev/null
@@ -0,0 +1,460 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR LGPL-2.1-only)
+ *
+ * ring_buffer_vfs.c
+ *
+ * Ring Buffer VFS file operations.
+ *
+ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/compat.h>
+
+#include <ringbuffer/backend.h>
+#include <ringbuffer/frontend.h>
+#include <ringbuffer/vfs.h>
+#include <wrapper/poll.h>
+#include <lttng/tracer.h>
+
+static int put_ulong(unsigned long val, unsigned long arg)
+{
+       return put_user(val, (unsigned long __user *)arg);
+}
+
+#ifdef CONFIG_COMPAT
+static int compat_put_ulong(compat_ulong_t val, unsigned long arg)
+{
+       return put_user(val, (compat_ulong_t __user *)compat_ptr(arg));
+}
+#endif
+
+/*
+ * This is not used by anonymous file descriptors. This code is left
+ * there if we ever want to implement an inode with open() operation.
+ */
+int lib_ring_buffer_open(struct inode *inode, struct file *file,
+               struct lib_ring_buffer *buf)
+{
+       int ret;
+
+       if (!buf)
+               return -EINVAL;
+
+       ret = lib_ring_buffer_open_read(buf);
+       if (ret)
+               return ret;
+
+       ret = nonseekable_open(inode, file);
+       if (ret)
+               goto release_read;
+       return 0;
+
+release_read:
+       lib_ring_buffer_release_read(buf);
+       return ret;
+}
+EXPORT_SYMBOL_GPL(lib_ring_buffer_open);
+
+/**
+ *     vfs_lib_ring_buffer_open - ring buffer open file operation
+ *     @inode: opened inode
+ *     @file: opened file
+ *
+ *     Open implementation. Makes sure only one open instance of a buffer is
+ *     done at a given moment.
+ */
+static
+int vfs_lib_ring_buffer_open(struct inode *inode, struct file *file)
+{
+       struct lib_ring_buffer *buf = inode->i_private;
+
+       file->private_data = buf;
+       return lib_ring_buffer_open(inode, file, buf);
+}
+
+int lib_ring_buffer_release(struct inode *inode, struct file *file,
+               struct lib_ring_buffer *buf)
+{
+       lib_ring_buffer_release_read(buf);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(lib_ring_buffer_release);
+
+/**
+ *     vfs_lib_ring_buffer_release - ring buffer release file operation
+ *     @inode: opened inode
+ *     @file: opened file
+ *
+ *     Release implementation.
+ */
+static
+int vfs_lib_ring_buffer_release(struct inode *inode, struct file *file)
+{
+       struct lib_ring_buffer *buf = file->private_data;
+
+       return lib_ring_buffer_release(inode, file, buf);
+}
+
+unsigned int lib_ring_buffer_poll(struct file *filp, poll_table *wait,
+               struct lib_ring_buffer *buf)
+{
+       unsigned int mask = 0;
+       struct channel *chan = buf->backend.chan;
+       const struct lib_ring_buffer_config *config = &chan->backend.config;
+       int finalized, disabled;
+
+       if (filp->f_mode & FMODE_READ) {
+               poll_wait_set_exclusive(wait);
+               poll_wait(filp, &buf->read_wait, wait);
+
+               finalized = lib_ring_buffer_is_finalized(config, buf);
+               disabled = lib_ring_buffer_channel_is_disabled(chan);
+
+               /*
+                * lib_ring_buffer_is_finalized() contains a smp_rmb() ordering
+                * finalized load before offsets loads.
+                */
+               WARN_ON(atomic_long_read(&buf->active_readers) != 1);
+retry:
+               if (disabled)
+                       return POLLERR;
+
+               if (subbuf_trunc(lib_ring_buffer_get_offset(config, buf), chan)
+                 - subbuf_trunc(lib_ring_buffer_get_consumed(config, buf), chan)
+                 == 0) {
+                       if (finalized)
+                               return POLLHUP;
+                       else {
+                               /*
+                                * The memory barriers
+                                * __wait_event()/wake_up_interruptible() take
+                                * care of "raw_spin_is_locked" memory ordering.
+                                */
+                               if (raw_spin_is_locked(&buf->raw_tick_nohz_spinlock))
+                                       goto retry;
+                               else
+                                       return 0;
+                       }
+               } else {
+                       if (subbuf_trunc(lib_ring_buffer_get_offset(config, buf),
+                                        chan)
+                         - subbuf_trunc(lib_ring_buffer_get_consumed(config, buf),
+                                        chan)
+                         >= chan->backend.buf_size)
+                               return POLLPRI | POLLRDBAND;
+                       else
+                               return POLLIN | POLLRDNORM;
+               }
+       }
+       return mask;
+}
+EXPORT_SYMBOL_GPL(lib_ring_buffer_poll);
+
+/**
+ *     vfs_lib_ring_buffer_poll - ring buffer poll file operation
+ *     @filp: the file
+ *     @wait: poll table
+ *
+ *     Poll implementation.
+ */
+static
+unsigned int vfs_lib_ring_buffer_poll(struct file *filp, poll_table *wait)
+{
+       struct lib_ring_buffer *buf = filp->private_data;
+
+       return lib_ring_buffer_poll(filp, wait, buf);
+}
+
+long lib_ring_buffer_ioctl(struct file *filp, unsigned int cmd,
+               unsigned long arg, struct lib_ring_buffer *buf)
+{
+       struct channel *chan = buf->backend.chan;
+       const struct lib_ring_buffer_config *config = &chan->backend.config;
+
+       if (lib_ring_buffer_channel_is_disabled(chan))
+               return -EIO;
+
+       switch (cmd) {
+       case RING_BUFFER_SNAPSHOT:
+               return lib_ring_buffer_snapshot(buf, &buf->cons_snapshot,
+                                           &buf->prod_snapshot);
+       case RING_BUFFER_SNAPSHOT_SAMPLE_POSITIONS:
+               return lib_ring_buffer_snapshot_sample_positions(buf,
+                               &buf->cons_snapshot, &buf->prod_snapshot);
+       case RING_BUFFER_SNAPSHOT_GET_CONSUMED:
+               return put_ulong(buf->cons_snapshot, arg);
+       case RING_BUFFER_SNAPSHOT_GET_PRODUCED:
+               return put_ulong(buf->prod_snapshot, arg);
+       case RING_BUFFER_GET_SUBBUF:
+       {
+               unsigned long uconsume;
+               long ret;
+
+               ret = get_user(uconsume, (unsigned long __user *) arg);
+               if (ret)
+                       return ret; /* will return -EFAULT */
+               ret = lib_ring_buffer_get_subbuf(buf, uconsume);
+               if (!ret) {
+                       /* Set file position to zero at each successful "get" */
+                       filp->f_pos = 0;
+               }
+               return ret;
+       }
+       case RING_BUFFER_PUT_SUBBUF:
+               lib_ring_buffer_put_subbuf(buf);
+               return 0;
+
+       case RING_BUFFER_GET_NEXT_SUBBUF:
+       {
+               long ret;
+
+               ret = lib_ring_buffer_get_next_subbuf(buf);
+               if (!ret) {
+                       /* Set file position to zero at each successful "get" */
+                       filp->f_pos = 0;
+               }
+               return ret;
+       }
+       case RING_BUFFER_PUT_NEXT_SUBBUF:
+               lib_ring_buffer_put_next_subbuf(buf);
+               return 0;
+       case RING_BUFFER_GET_SUBBUF_SIZE:
+               return put_ulong(lib_ring_buffer_get_read_data_size(config, buf),
+                                arg);
+       case RING_BUFFER_GET_PADDED_SUBBUF_SIZE:
+       {
+               unsigned long size;
+
+               size = lib_ring_buffer_get_read_data_size(config, buf);
+               size = PAGE_ALIGN(size);
+               return put_ulong(size, arg);
+       }
+       case RING_BUFFER_GET_MAX_SUBBUF_SIZE:
+               return put_ulong(chan->backend.subbuf_size, arg);
+       case RING_BUFFER_GET_MMAP_LEN:
+       {
+               unsigned long mmap_buf_len;
+
+               if (config->output != RING_BUFFER_MMAP)
+                       return -EINVAL;
+               mmap_buf_len = chan->backend.buf_size;
+               if (chan->backend.extra_reader_sb)
+                       mmap_buf_len += chan->backend.subbuf_size;
+               if (mmap_buf_len > INT_MAX)
+                       return -EFBIG;
+               return put_ulong(mmap_buf_len, arg);
+       }
+       case RING_BUFFER_GET_MMAP_READ_OFFSET:
+       {
+               unsigned long sb_bindex;
+
+               if (config->output != RING_BUFFER_MMAP)
+                       return -EINVAL;
+               sb_bindex = subbuffer_id_get_index(config,
+                                                  buf->backend.buf_rsb.id);
+               return put_ulong(buf->backend.array[sb_bindex]->mmap_offset,
+                                arg);
+       }
+       case RING_BUFFER_FLUSH:
+               lib_ring_buffer_switch_remote(buf);
+               return 0;
+       case RING_BUFFER_FLUSH_EMPTY:
+               lib_ring_buffer_switch_remote_empty(buf);
+               return 0;
+       case RING_BUFFER_CLEAR:
+               lib_ring_buffer_clear(buf);
+               return 0;
+       default:
+               return -ENOIOCTLCMD;
+       }
+}
+EXPORT_SYMBOL_GPL(lib_ring_buffer_ioctl);
+
+/**
+ *     vfs_lib_ring_buffer_ioctl - control ring buffer reader synchronization
+ *
+ *     @filp: the file
+ *     @cmd: the command
+ *     @arg: command arg
+ *
+ *     This ioctl implements commands necessary for producer/consumer
+ *     and flight recorder reader interaction :
+ *     RING_BUFFER_GET_NEXT_SUBBUF
+ *             Get the next sub-buffer that can be read. It never blocks.
+ *     RING_BUFFER_PUT_NEXT_SUBBUF
+ *             Release the currently read sub-buffer.
+ *     RING_BUFFER_GET_SUBBUF_SIZE
+ *             returns the size of the current sub-buffer.
+ *     RING_BUFFER_GET_MAX_SUBBUF_SIZE
+ *             returns the maximum size for sub-buffers.
+ *     RING_BUFFER_GET_NUM_SUBBUF
+ *             returns the number of reader-visible sub-buffers in the per cpu
+ *              channel (for mmap).
+ *      RING_BUFFER_GET_MMAP_READ_OFFSET
+ *              returns the offset of the subbuffer belonging to the reader.
+ *              Should only be used for mmap clients.
+ */
+static
+long vfs_lib_ring_buffer_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+       struct lib_ring_buffer *buf = filp->private_data;
+
+       return lib_ring_buffer_ioctl(filp, cmd, arg, buf);
+}
+
+#ifdef CONFIG_COMPAT
+long lib_ring_buffer_compat_ioctl(struct file *filp, unsigned int cmd,
+               unsigned long arg, struct lib_ring_buffer *buf)
+{
+       struct channel *chan = buf->backend.chan;
+       const struct lib_ring_buffer_config *config = &chan->backend.config;
+
+       if (lib_ring_buffer_channel_is_disabled(chan))
+               return -EIO;
+
+       switch (cmd) {
+       case RING_BUFFER_COMPAT_SNAPSHOT:
+               return lib_ring_buffer_snapshot(buf, &buf->cons_snapshot,
+                                               &buf->prod_snapshot);
+       case RING_BUFFER_COMPAT_SNAPSHOT_SAMPLE_POSITIONS:
+               return lib_ring_buffer_snapshot_sample_positions(buf,
+                               &buf->cons_snapshot, &buf->prod_snapshot);
+       case RING_BUFFER_COMPAT_SNAPSHOT_GET_CONSUMED:
+               return compat_put_ulong(buf->cons_snapshot, arg);
+       case RING_BUFFER_COMPAT_SNAPSHOT_GET_PRODUCED:
+               return compat_put_ulong(buf->prod_snapshot, arg);
+       case RING_BUFFER_COMPAT_GET_SUBBUF:
+       {
+               __u32 uconsume;
+               unsigned long consume;
+               long ret;
+
+               ret = get_user(uconsume, (__u32 __user *) arg);
+               if (ret)
+                       return ret; /* will return -EFAULT */
+               consume = buf->cons_snapshot;
+               consume &= ~0xFFFFFFFFL;
+               consume |= uconsume;
+               ret = lib_ring_buffer_get_subbuf(buf, consume);
+               if (!ret) {
+                       /* Set file position to zero at each successful "get" */
+                       filp->f_pos = 0;
+               }
+               return ret;
+       }
+       case RING_BUFFER_COMPAT_PUT_SUBBUF:
+               lib_ring_buffer_put_subbuf(buf);
+               return 0;
+
+       case RING_BUFFER_COMPAT_GET_NEXT_SUBBUF:
+       {
+               long ret;
+
+               ret = lib_ring_buffer_get_next_subbuf(buf);
+               if (!ret) {
+                       /* Set file position to zero at each successful "get" */
+                       filp->f_pos = 0;
+               }
+               return ret;
+       }
+       case RING_BUFFER_COMPAT_PUT_NEXT_SUBBUF:
+               lib_ring_buffer_put_next_subbuf(buf);
+               return 0;
+       case RING_BUFFER_COMPAT_GET_SUBBUF_SIZE:
+       {
+               unsigned long data_size;
+
+               data_size = lib_ring_buffer_get_read_data_size(config, buf);
+               if (data_size > UINT_MAX)
+                       return -EFBIG;
+               return compat_put_ulong(data_size, arg);
+       }
+       case RING_BUFFER_COMPAT_GET_PADDED_SUBBUF_SIZE:
+       {
+               unsigned long size;
+
+               size = lib_ring_buffer_get_read_data_size(config, buf);
+               size = PAGE_ALIGN(size);
+               if (size > UINT_MAX)
+                       return -EFBIG;
+               return compat_put_ulong(size, arg);
+       }
+       case RING_BUFFER_COMPAT_GET_MAX_SUBBUF_SIZE:
+               if (chan->backend.subbuf_size > UINT_MAX)
+                       return -EFBIG;
+               return compat_put_ulong(chan->backend.subbuf_size, arg);
+       case RING_BUFFER_COMPAT_GET_MMAP_LEN:
+       {
+               unsigned long mmap_buf_len;
+
+               if (config->output != RING_BUFFER_MMAP)
+                       return -EINVAL;
+               mmap_buf_len = chan->backend.buf_size;
+               if (chan->backend.extra_reader_sb)
+                       mmap_buf_len += chan->backend.subbuf_size;
+               if (mmap_buf_len > UINT_MAX)
+                       return -EFBIG;
+               return compat_put_ulong(mmap_buf_len, arg);
+       }
+       case RING_BUFFER_COMPAT_GET_MMAP_READ_OFFSET:
+       {
+               unsigned long sb_bindex, read_offset;
+
+               if (config->output != RING_BUFFER_MMAP)
+                       return -EINVAL;
+               sb_bindex = subbuffer_id_get_index(config,
+                                                  buf->backend.buf_rsb.id);
+               read_offset = buf->backend.array[sb_bindex]->mmap_offset;
+               if (read_offset > UINT_MAX)
+                       return -EINVAL;
+               return compat_put_ulong(read_offset, arg);
+       }
+       case RING_BUFFER_COMPAT_FLUSH:
+               lib_ring_buffer_switch_remote(buf);
+               return 0;
+       case RING_BUFFER_COMPAT_FLUSH_EMPTY:
+               lib_ring_buffer_switch_remote_empty(buf);
+               return 0;
+       case RING_BUFFER_COMPAT_CLEAR:
+               lib_ring_buffer_clear(buf);
+               return 0;
+       default:
+               return -ENOIOCTLCMD;
+       }
+}
+EXPORT_SYMBOL_GPL(lib_ring_buffer_compat_ioctl);
+
+static
+long vfs_lib_ring_buffer_compat_ioctl(struct file *filp, unsigned int cmd,
+                                 unsigned long arg)
+{
+       struct lib_ring_buffer *buf = filp->private_data;
+
+       return lib_ring_buffer_compat_ioctl(filp, cmd, arg, buf);
+}
+#endif
+
+const struct file_operations lib_ring_buffer_file_operations = {
+       .owner = THIS_MODULE,
+       .open = vfs_lib_ring_buffer_open,
+       .release = vfs_lib_ring_buffer_release,
+       .poll = vfs_lib_ring_buffer_poll,
+       .splice_read = vfs_lib_ring_buffer_splice_read,
+       .mmap = vfs_lib_ring_buffer_mmap,
+       .unlocked_ioctl = vfs_lib_ring_buffer_ioctl,
+       .llseek = vfs_lib_ring_buffer_no_llseek,
+#ifdef CONFIG_COMPAT
+       .compat_ioctl = vfs_lib_ring_buffer_compat_ioctl,
+#endif
+};
+EXPORT_SYMBOL_GPL(lib_ring_buffer_file_operations);
+
+MODULE_LICENSE("GPL and additional rights");
+MODULE_AUTHOR("Mathieu Desnoyers <mathieu.desnoyers@efficios.com>");
+MODULE_DESCRIPTION("LTTng ring buffer library");
+MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
+       __stringify(LTTNG_MODULES_MINOR_VERSION) "."
+       __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
+       LTTNG_MODULES_EXTRAVERSION);
diff --git a/src/lttng-abi.c b/src/lttng-abi.c
new file mode 100644 (file)
index 0000000..01eb4d5
--- /dev/null
@@ -0,0 +1,1973 @@
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * lttng-abi.c
+ *
+ * LTTng ABI
+ *
+ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * Mimic system calls for:
+ * - session creation, returns a file descriptor or failure.
+ *   - channel creation, returns a file descriptor or failure.
+ *     - Operates on a session file descriptor
+ *     - Takes all channel options as parameters.
+ *   - stream get, returns a file descriptor or failure.
+ *     - Operates on a channel file descriptor.
+ *   - stream notifier get, returns a file descriptor or failure.
+ *     - Operates on a channel file descriptor.
+ *   - event creation, returns a file descriptor or failure.
+ *     - Operates on a channel file descriptor
+ *     - Takes an event name as parameter
+ *     - Takes an instrumentation source as parameter
+ *       - e.g. tracepoints, dynamic_probes...
+ *     - Takes instrumentation source specific arguments.
+ */
+
+#include <linux/module.h>
+#include <linux/proc_fs.h>
+#include <linux/anon_inodes.h>
+#include <linux/file.h>
+#include <linux/uaccess.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <wrapper/vmalloc.h>   /* for wrapper_vmalloc_sync_mappings() */
+#include <ringbuffer/vfs.h>
+#include <ringbuffer/backend.h>
+#include <ringbuffer/frontend.h>
+#include <wrapper/poll.h>
+#include <wrapper/file.h>
+#include <wrapper/kref.h>
+#include <lttng/string-utils.h>
+#include <lttng/abi.h>
+#include <lttng/abi-old.h>
+#include <lttng/events.h>
+#include <lttng/tracer.h>
+#include <lttng/tp-mempool.h>
+#include <ringbuffer/frontend_types.h>
+
+/*
+ * This is LTTng's own personal way to create a system call as an external
+ * module. We use ioctl() on /proc/lttng.
+ */
+
+static struct proc_dir_entry *lttng_proc_dentry;
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,6,0))
+static const struct proc_ops lttng_proc_ops;
+#else
+static const struct file_operations lttng_proc_ops;
+#endif
+
+static const struct file_operations lttng_session_fops;
+static const struct file_operations lttng_channel_fops;
+static const struct file_operations lttng_metadata_fops;
+static const struct file_operations lttng_event_fops;
+static struct file_operations lttng_stream_ring_buffer_file_operations;
+
+static int put_u64(uint64_t val, unsigned long arg);
+
+/*
+ * Teardown management: opened file descriptors keep a refcount on the module,
+ * so it can only exit when all file descriptors are closed.
+ */
+
+static
+int lttng_abi_create_session(void)
+{
+       struct lttng_session *session;
+       struct file *session_file;
+       int session_fd, ret;
+
+       session = lttng_session_create();
+       if (!session)
+               return -ENOMEM;
+       session_fd = lttng_get_unused_fd();
+       if (session_fd < 0) {
+               ret = session_fd;
+               goto fd_error;
+       }
+       session_file = anon_inode_getfile("[lttng_session]",
+                                         &lttng_session_fops,
+                                         session, O_RDWR);
+       if (IS_ERR(session_file)) {
+               ret = PTR_ERR(session_file);
+               goto file_error;
+       }
+       session->file = session_file;
+       fd_install(session_fd, session_file);
+       return session_fd;
+
+file_error:
+       put_unused_fd(session_fd);
+fd_error:
+       lttng_session_destroy(session);
+       return ret;
+}
+
+static
+int lttng_abi_tracepoint_list(void)
+{
+       struct file *tracepoint_list_file;
+       int file_fd, ret;
+
+       file_fd = lttng_get_unused_fd();
+       if (file_fd < 0) {
+               ret = file_fd;
+               goto fd_error;
+       }
+
+       tracepoint_list_file = anon_inode_getfile("[lttng_tracepoint_list]",
+                                         &lttng_tracepoint_list_fops,
+                                         NULL, O_RDWR);
+       if (IS_ERR(tracepoint_list_file)) {
+               ret = PTR_ERR(tracepoint_list_file);
+               goto file_error;
+       }
+       ret = lttng_tracepoint_list_fops.open(NULL, tracepoint_list_file);
+       if (ret < 0)
+               goto open_error;
+       fd_install(file_fd, tracepoint_list_file);
+       return file_fd;
+
+open_error:
+       fput(tracepoint_list_file);
+file_error:
+       put_unused_fd(file_fd);
+fd_error:
+       return ret;
+}
+
+#ifndef CONFIG_HAVE_SYSCALL_TRACEPOINTS
+static inline
+int lttng_abi_syscall_list(void)
+{
+       return -ENOSYS;
+}
+#else
+static
+int lttng_abi_syscall_list(void)
+{
+       struct file *syscall_list_file;
+       int file_fd, ret;
+
+       file_fd = lttng_get_unused_fd();
+       if (file_fd < 0) {
+               ret = file_fd;
+               goto fd_error;
+       }
+
+       syscall_list_file = anon_inode_getfile("[lttng_syscall_list]",
+                                         &lttng_syscall_list_fops,
+                                         NULL, O_RDWR);
+       if (IS_ERR(syscall_list_file)) {
+               ret = PTR_ERR(syscall_list_file);
+               goto file_error;
+       }
+       ret = lttng_syscall_list_fops.open(NULL, syscall_list_file);
+       if (ret < 0)
+               goto open_error;
+       fd_install(file_fd, syscall_list_file);
+       return file_fd;
+
+open_error:
+       fput(syscall_list_file);
+file_error:
+       put_unused_fd(file_fd);
+fd_error:
+       return ret;
+}
+#endif
+
+static
+void lttng_abi_tracer_version(struct lttng_kernel_tracer_version *v)
+{
+       v->major = LTTNG_MODULES_MAJOR_VERSION;
+       v->minor = LTTNG_MODULES_MINOR_VERSION;
+       v->patchlevel = LTTNG_MODULES_PATCHLEVEL_VERSION;
+}
+
+static
+void lttng_abi_tracer_abi_version(struct lttng_kernel_tracer_abi_version *v)
+{
+       v->major = LTTNG_MODULES_ABI_MAJOR_VERSION;
+       v->minor = LTTNG_MODULES_ABI_MINOR_VERSION;
+}
+
+static
+long lttng_abi_add_context(struct file *file,
+       struct lttng_kernel_context *context_param,
+       struct lttng_ctx **ctx, struct lttng_session *session)
+{
+
+       if (session->been_active)
+               return -EPERM;
+
+       switch (context_param->ctx) {
+       case LTTNG_KERNEL_CONTEXT_PID:
+               return lttng_add_pid_to_ctx(ctx);
+       case LTTNG_KERNEL_CONTEXT_PRIO:
+               return lttng_add_prio_to_ctx(ctx);
+       case LTTNG_KERNEL_CONTEXT_NICE:
+               return lttng_add_nice_to_ctx(ctx);
+       case LTTNG_KERNEL_CONTEXT_VPID:
+               return lttng_add_vpid_to_ctx(ctx);
+       case LTTNG_KERNEL_CONTEXT_TID:
+               return lttng_add_tid_to_ctx(ctx);
+       case LTTNG_KERNEL_CONTEXT_VTID:
+               return lttng_add_vtid_to_ctx(ctx);
+       case LTTNG_KERNEL_CONTEXT_PPID:
+               return lttng_add_ppid_to_ctx(ctx);
+       case LTTNG_KERNEL_CONTEXT_VPPID:
+               return lttng_add_vppid_to_ctx(ctx);
+       case LTTNG_KERNEL_CONTEXT_PERF_COUNTER:
+               context_param->u.perf_counter.name[LTTNG_KERNEL_SYM_NAME_LEN - 1] = '\0';
+               return lttng_add_perf_counter_to_ctx(context_param->u.perf_counter.type,
+                               context_param->u.perf_counter.config,
+                               context_param->u.perf_counter.name,
+                               ctx);
+       case LTTNG_KERNEL_CONTEXT_PROCNAME:
+               return lttng_add_procname_to_ctx(ctx);
+       case LTTNG_KERNEL_CONTEXT_HOSTNAME:
+               return lttng_add_hostname_to_ctx(ctx);
+       case LTTNG_KERNEL_CONTEXT_CPU_ID:
+               return lttng_add_cpu_id_to_ctx(ctx);
+       case LTTNG_KERNEL_CONTEXT_INTERRUPTIBLE:
+               return lttng_add_interruptible_to_ctx(ctx);
+       case LTTNG_KERNEL_CONTEXT_NEED_RESCHEDULE:
+               return lttng_add_need_reschedule_to_ctx(ctx);
+       case LTTNG_KERNEL_CONTEXT_PREEMPTIBLE:
+               return lttng_add_preemptible_to_ctx(ctx);
+       case LTTNG_KERNEL_CONTEXT_MIGRATABLE:
+               return lttng_add_migratable_to_ctx(ctx);
+       case LTTNG_KERNEL_CONTEXT_CALLSTACK_KERNEL:
+       case LTTNG_KERNEL_CONTEXT_CALLSTACK_USER:
+               return lttng_add_callstack_to_ctx(ctx, context_param->ctx);
+       case LTTNG_KERNEL_CONTEXT_CGROUP_NS:
+               return lttng_add_cgroup_ns_to_ctx(ctx);
+       case LTTNG_KERNEL_CONTEXT_IPC_NS:
+               return lttng_add_ipc_ns_to_ctx(ctx);
+       case LTTNG_KERNEL_CONTEXT_MNT_NS:
+               return lttng_add_mnt_ns_to_ctx(ctx);
+       case LTTNG_KERNEL_CONTEXT_NET_NS:
+               return lttng_add_net_ns_to_ctx(ctx);
+       case LTTNG_KERNEL_CONTEXT_PID_NS:
+               return lttng_add_pid_ns_to_ctx(ctx);
+       case LTTNG_KERNEL_CONTEXT_USER_NS:
+               return lttng_add_user_ns_to_ctx(ctx);
+       case LTTNG_KERNEL_CONTEXT_UTS_NS:
+               return lttng_add_uts_ns_to_ctx(ctx);
+       case LTTNG_KERNEL_CONTEXT_UID:
+               return lttng_add_uid_to_ctx(ctx);
+       case LTTNG_KERNEL_CONTEXT_EUID:
+               return lttng_add_euid_to_ctx(ctx);
+       case LTTNG_KERNEL_CONTEXT_SUID:
+               return lttng_add_suid_to_ctx(ctx);
+       case LTTNG_KERNEL_CONTEXT_GID:
+               return lttng_add_gid_to_ctx(ctx);
+       case LTTNG_KERNEL_CONTEXT_EGID:
+               return lttng_add_egid_to_ctx(ctx);
+       case LTTNG_KERNEL_CONTEXT_SGID:
+               return lttng_add_sgid_to_ctx(ctx);
+       case LTTNG_KERNEL_CONTEXT_VUID:
+               return lttng_add_vuid_to_ctx(ctx);
+       case LTTNG_KERNEL_CONTEXT_VEUID:
+               return lttng_add_veuid_to_ctx(ctx);
+       case LTTNG_KERNEL_CONTEXT_VSUID:
+               return lttng_add_vsuid_to_ctx(ctx);
+       case LTTNG_KERNEL_CONTEXT_VGID:
+               return lttng_add_vgid_to_ctx(ctx);
+       case LTTNG_KERNEL_CONTEXT_VEGID:
+               return lttng_add_vegid_to_ctx(ctx);
+       case LTTNG_KERNEL_CONTEXT_VSGID:
+               return lttng_add_vsgid_to_ctx(ctx);
+       default:
+               return -EINVAL;
+       }
+}
+
+/**
+ *     lttng_ioctl - lttng syscall through ioctl
+ *
+ *     @file: the file
+ *     @cmd: the command
+ *     @arg: command arg
+ *
+ *     This ioctl implements lttng commands:
+ *     LTTNG_KERNEL_SESSION
+ *             Returns a LTTng trace session file descriptor
+ *     LTTNG_KERNEL_TRACER_VERSION
+ *             Returns the LTTng kernel tracer version
+ *     LTTNG_KERNEL_TRACEPOINT_LIST
+ *             Returns a file descriptor listing available tracepoints
+ *     LTTNG_KERNEL_WAIT_QUIESCENT
+ *             Returns after all previously running probes have completed
+ *     LTTNG_KERNEL_TRACER_ABI_VERSION
+ *             Returns the LTTng kernel tracer ABI version
+ *
+ * The returned session will be deleted when its file descriptor is closed.
+ */
+static
+long lttng_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+       switch (cmd) {
+       case LTTNG_KERNEL_OLD_SESSION:
+       case LTTNG_KERNEL_SESSION:
+               return lttng_abi_create_session();
+       case LTTNG_KERNEL_OLD_TRACER_VERSION:
+       {
+               struct lttng_kernel_tracer_version v;
+               struct lttng_kernel_old_tracer_version oldv;
+               struct lttng_kernel_old_tracer_version *uversion =
+                       (struct lttng_kernel_old_tracer_version __user *) arg;
+
+               lttng_abi_tracer_version(&v);
+               oldv.major = v.major;
+               oldv.minor = v.minor;
+               oldv.patchlevel = v.patchlevel;
+
+               if (copy_to_user(uversion, &oldv, sizeof(oldv)))
+                       return -EFAULT;
+               return 0;
+       }
+       case LTTNG_KERNEL_TRACER_VERSION:
+       {
+               struct lttng_kernel_tracer_version version;
+               struct lttng_kernel_tracer_version *uversion =
+                       (struct lttng_kernel_tracer_version __user *) arg;
+
+               lttng_abi_tracer_version(&version);
+
+               if (copy_to_user(uversion, &version, sizeof(version)))
+                       return -EFAULT;
+               return 0;
+       }
+       case LTTNG_KERNEL_TRACER_ABI_VERSION:
+       {
+               struct lttng_kernel_tracer_abi_version version;
+               struct lttng_kernel_tracer_abi_version *uversion =
+                       (struct lttng_kernel_tracer_abi_version __user *) arg;
+
+               lttng_abi_tracer_abi_version(&version);
+
+               if (copy_to_user(uversion, &version, sizeof(version)))
+                       return -EFAULT;
+               return 0;
+       }
+       case LTTNG_KERNEL_OLD_TRACEPOINT_LIST:
+       case LTTNG_KERNEL_TRACEPOINT_LIST:
+               return lttng_abi_tracepoint_list();
+       case LTTNG_KERNEL_SYSCALL_LIST:
+               return lttng_abi_syscall_list();
+       case LTTNG_KERNEL_OLD_WAIT_QUIESCENT:
+       case LTTNG_KERNEL_WAIT_QUIESCENT:
+               synchronize_trace();
+               return 0;
+       case LTTNG_KERNEL_OLD_CALIBRATE:
+       {
+               struct lttng_kernel_old_calibrate __user *ucalibrate =
+                       (struct lttng_kernel_old_calibrate __user *) arg;
+               struct lttng_kernel_old_calibrate old_calibrate;
+               struct lttng_kernel_calibrate calibrate;
+               int ret;
+
+               if (copy_from_user(&old_calibrate, ucalibrate, sizeof(old_calibrate)))
+                       return -EFAULT;
+               calibrate.type = old_calibrate.type;
+               ret = lttng_calibrate(&calibrate);
+               if (copy_to_user(ucalibrate, &old_calibrate, sizeof(old_calibrate)))
+                       return -EFAULT;
+               return ret;
+       }
+       case LTTNG_KERNEL_CALIBRATE:
+       {
+               struct lttng_kernel_calibrate __user *ucalibrate =
+                       (struct lttng_kernel_calibrate __user *) arg;
+               struct lttng_kernel_calibrate calibrate;
+               int ret;
+
+               if (copy_from_user(&calibrate, ucalibrate, sizeof(calibrate)))
+                       return -EFAULT;
+               ret = lttng_calibrate(&calibrate);
+               if (copy_to_user(ucalibrate, &calibrate, sizeof(calibrate)))
+                       return -EFAULT;
+               return ret;
+       }
+       default:
+               return -ENOIOCTLCMD;
+       }
+}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,6,0))
+static const struct proc_ops lttng_proc_ops = {
+       .proc_ioctl = lttng_ioctl,
+#ifdef CONFIG_COMPAT
+       .proc_compat_ioctl = lttng_ioctl,
+#endif /* CONFIG_COMPAT */
+};
+#else
+static const struct file_operations lttng_proc_ops = {
+       .owner = THIS_MODULE,
+       .unlocked_ioctl = lttng_ioctl,
+#ifdef CONFIG_COMPAT
+       .compat_ioctl = lttng_ioctl,
+#endif /* CONFIG_COMPAT */
+};
+#endif
+
+static
+int lttng_abi_create_channel(struct file *session_file,
+                            struct lttng_kernel_channel *chan_param,
+                            enum channel_type channel_type)
+{
+       struct lttng_session *session = session_file->private_data;
+       const struct file_operations *fops = NULL;
+       const char *transport_name;
+       struct lttng_channel *chan;
+       struct file *chan_file;
+       int chan_fd;
+       int ret = 0;
+
+       chan_fd = lttng_get_unused_fd();
+       if (chan_fd < 0) {
+               ret = chan_fd;
+               goto fd_error;
+       }
+       switch (channel_type) {
+       case PER_CPU_CHANNEL:
+               fops = &lttng_channel_fops;
+               break;
+       case METADATA_CHANNEL:
+               fops = &lttng_metadata_fops;
+               break;
+       }
+
+       chan_file = anon_inode_getfile("[lttng_channel]",
+                                      fops,
+                                      NULL, O_RDWR);
+       if (IS_ERR(chan_file)) {
+               ret = PTR_ERR(chan_file);
+               goto file_error;
+       }
+       switch (channel_type) {
+       case PER_CPU_CHANNEL:
+               if (chan_param->output == LTTNG_KERNEL_SPLICE) {
+                       transport_name = chan_param->overwrite ?
+                               "relay-overwrite" : "relay-discard";
+               } else if (chan_param->output == LTTNG_KERNEL_MMAP) {
+                       transport_name = chan_param->overwrite ?
+                               "relay-overwrite-mmap" : "relay-discard-mmap";
+               } else {
+                       return -EINVAL;
+               }
+               break;
+       case METADATA_CHANNEL:
+               if (chan_param->output == LTTNG_KERNEL_SPLICE)
+                       transport_name = "relay-metadata";
+               else if (chan_param->output == LTTNG_KERNEL_MMAP)
+                       transport_name = "relay-metadata-mmap";
+               else
+                       return -EINVAL;
+               break;
+       default:
+               transport_name = "<unknown>";
+               break;
+       }
+       if (!atomic_long_add_unless(&session_file->f_count, 1, LONG_MAX)) {
+               ret = -EOVERFLOW;
+               goto refcount_error;
+       }
+       /*
+        * We tolerate no failure path after channel creation. It will stay
+        * invariant for the rest of the session.
+        */
+       chan = lttng_channel_create(session, transport_name, NULL,
+                                 chan_param->subbuf_size,
+                                 chan_param->num_subbuf,
+                                 chan_param->switch_timer_interval,
+                                 chan_param->read_timer_interval,
+                                 channel_type);
+       if (!chan) {
+               ret = -EINVAL;
+               goto chan_error;
+       }
+       chan->file = chan_file;
+       chan_file->private_data = chan;
+       fd_install(chan_fd, chan_file);
+
+       return chan_fd;
+
+chan_error:
+       atomic_long_dec(&session_file->f_count);
+refcount_error:
+       fput(chan_file);
+file_error:
+       put_unused_fd(chan_fd);
+fd_error:
+       return ret;
+}
+
+static
+int lttng_abi_session_set_name(struct lttng_session *session,
+               struct lttng_kernel_session_name *name)
+{
+       size_t len;
+
+       len = strnlen(name->name, LTTNG_KERNEL_SESSION_NAME_LEN);
+
+       if (len == LTTNG_KERNEL_SESSION_NAME_LEN) {
+               /* Name is too long/malformed */
+               return -EINVAL;
+       }
+
+       strcpy(session->name, name->name);
+       return 0;
+}
+
+static
+int lttng_abi_session_set_creation_time(struct lttng_session *session,
+               struct lttng_kernel_session_creation_time *time)
+{
+       size_t len;
+
+       len = strnlen(time->iso8601, LTTNG_KERNEL_SESSION_CREATION_TIME_ISO8601_LEN);
+
+       if (len == LTTNG_KERNEL_SESSION_CREATION_TIME_ISO8601_LEN) {
+               /* Time is too long/malformed */
+               return -EINVAL;
+       }
+
+       strcpy(session->creation_time, time->iso8601);
+       return 0;
+}
+
+static
+enum tracker_type get_tracker_type(struct lttng_kernel_tracker_args *tracker)
+{
+       switch (tracker->type) {
+       case LTTNG_KERNEL_TRACKER_PID:
+               return TRACKER_PID;
+       case LTTNG_KERNEL_TRACKER_VPID:
+               return TRACKER_VPID;
+       case LTTNG_KERNEL_TRACKER_UID:
+               return TRACKER_UID;
+       case LTTNG_KERNEL_TRACKER_VUID:
+               return TRACKER_VUID;
+       case LTTNG_KERNEL_TRACKER_GID:
+               return TRACKER_GID;
+       case LTTNG_KERNEL_TRACKER_VGID:
+               return TRACKER_VGID;
+       default:
+               return TRACKER_UNKNOWN;
+       }
+}
+
+/**
+ *     lttng_session_ioctl - lttng session fd ioctl
+ *
+ *     @file: the file
+ *     @cmd: the command
+ *     @arg: command arg
+ *
+ *     This ioctl implements lttng commands:
+ *     LTTNG_KERNEL_CHANNEL
+ *             Returns a LTTng channel file descriptor
+ *     LTTNG_KERNEL_ENABLE
+ *             Enables tracing for a session (weak enable)
+ *     LTTNG_KERNEL_DISABLE
+ *             Disables tracing for a session (strong disable)
+ *     LTTNG_KERNEL_METADATA
+ *             Returns a LTTng metadata file descriptor
+ *     LTTNG_KERNEL_SESSION_TRACK_PID
+ *             Add PID to session PID tracker
+ *     LTTNG_KERNEL_SESSION_UNTRACK_PID
+ *             Remove PID from session PID tracker
+ *     LTTNG_KERNEL_SESSION_TRACK_ID
+ *             Add ID to tracker
+ *     LTTNG_KERNEL_SESSION_UNTRACK_ID
+ *             Remove ID from tracker
+ *
+ * The returned channel will be deleted when its file descriptor is closed.
+ */
+static
+long lttng_session_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+       struct lttng_session *session = file->private_data;
+       struct lttng_kernel_channel chan_param;
+       struct lttng_kernel_old_channel old_chan_param;
+
+       switch (cmd) {
+       case LTTNG_KERNEL_OLD_CHANNEL:
+       {
+               if (copy_from_user(&old_chan_param,
+                               (struct lttng_kernel_old_channel __user *) arg,
+                               sizeof(struct lttng_kernel_old_channel)))
+                       return -EFAULT;
+               chan_param.overwrite = old_chan_param.overwrite;
+               chan_param.subbuf_size = old_chan_param.subbuf_size;
+               chan_param.num_subbuf = old_chan_param.num_subbuf;
+               chan_param.switch_timer_interval = old_chan_param.switch_timer_interval;
+               chan_param.read_timer_interval = old_chan_param.read_timer_interval;
+               chan_param.output = old_chan_param.output;
+
+               return lttng_abi_create_channel(file, &chan_param,
+                               PER_CPU_CHANNEL);
+       }
+       case LTTNG_KERNEL_CHANNEL:
+       {
+               if (copy_from_user(&chan_param,
+                               (struct lttng_kernel_channel __user *) arg,
+                               sizeof(struct lttng_kernel_channel)))
+                       return -EFAULT;
+               return lttng_abi_create_channel(file, &chan_param,
+                               PER_CPU_CHANNEL);
+       }
+       case LTTNG_KERNEL_OLD_SESSION_START:
+       case LTTNG_KERNEL_OLD_ENABLE:
+       case LTTNG_KERNEL_SESSION_START:
+       case LTTNG_KERNEL_ENABLE:
+               return lttng_session_enable(session);
+       case LTTNG_KERNEL_OLD_SESSION_STOP:
+       case LTTNG_KERNEL_OLD_DISABLE:
+       case LTTNG_KERNEL_SESSION_STOP:
+       case LTTNG_KERNEL_DISABLE:
+               return lttng_session_disable(session);
+       case LTTNG_KERNEL_OLD_METADATA:
+       {
+               if (copy_from_user(&old_chan_param,
+                               (struct lttng_kernel_old_channel __user *) arg,
+                               sizeof(struct lttng_kernel_old_channel)))
+                       return -EFAULT;
+               chan_param.overwrite = old_chan_param.overwrite;
+               chan_param.subbuf_size = old_chan_param.subbuf_size;
+               chan_param.num_subbuf = old_chan_param.num_subbuf;
+               chan_param.switch_timer_interval = old_chan_param.switch_timer_interval;
+               chan_param.read_timer_interval = old_chan_param.read_timer_interval;
+               chan_param.output = old_chan_param.output;
+
+               return lttng_abi_create_channel(file, &chan_param,
+                               METADATA_CHANNEL);
+       }
+       case LTTNG_KERNEL_METADATA:
+       {
+               if (copy_from_user(&chan_param,
+                                       (struct lttng_kernel_channel __user *) arg,
+                                       sizeof(struct lttng_kernel_channel)))
+                       return -EFAULT;
+               return lttng_abi_create_channel(file, &chan_param,
+                               METADATA_CHANNEL);
+       }
+       case LTTNG_KERNEL_SESSION_TRACK_PID:
+               return lttng_session_track_id(session, TRACKER_PID, (int) arg);
+       case LTTNG_KERNEL_SESSION_UNTRACK_PID:
+               return lttng_session_untrack_id(session, TRACKER_PID, (int) arg);
+       case LTTNG_KERNEL_SESSION_TRACK_ID:
+       {
+               struct lttng_kernel_tracker_args tracker;
+               enum tracker_type tracker_type;
+
+               if (copy_from_user(&tracker,
+                               (struct lttng_kernel_tracker_args __user *) arg,
+                               sizeof(struct lttng_kernel_tracker_args)))
+                       return -EFAULT;
+               tracker_type = get_tracker_type(&tracker);
+               if (tracker_type == TRACKER_UNKNOWN)
+                       return -EINVAL;
+               return lttng_session_track_id(session, tracker_type, tracker.id);
+       }
+       case LTTNG_KERNEL_SESSION_UNTRACK_ID:
+       {
+               struct lttng_kernel_tracker_args tracker;
+               enum tracker_type tracker_type;
+
+               if (copy_from_user(&tracker,
+                               (struct lttng_kernel_tracker_args __user *) arg,
+                               sizeof(struct lttng_kernel_tracker_args)))
+                       return -EFAULT;
+               tracker_type = get_tracker_type(&tracker);
+               if (tracker_type == TRACKER_UNKNOWN)
+                       return -EINVAL;
+               return lttng_session_untrack_id(session, tracker_type,
+                               tracker.id);
+       }
+       case LTTNG_KERNEL_SESSION_LIST_TRACKER_PIDS:
+               return lttng_session_list_tracker_ids(session, TRACKER_PID);
+       case LTTNG_KERNEL_SESSION_LIST_TRACKER_IDS:
+       {
+               struct lttng_kernel_tracker_args tracker;
+               enum tracker_type tracker_type;
+
+               if (copy_from_user(&tracker,
+                               (struct lttng_kernel_tracker_args __user *) arg,
+                               sizeof(struct lttng_kernel_tracker_args)))
+                       return -EFAULT;
+               tracker_type = get_tracker_type(&tracker);
+               if (tracker_type == TRACKER_UNKNOWN)
+                       return -EINVAL;
+               return lttng_session_list_tracker_ids(session, tracker_type);
+       }
+       case LTTNG_KERNEL_SESSION_METADATA_REGEN:
+               return lttng_session_metadata_regenerate(session);
+       case LTTNG_KERNEL_SESSION_STATEDUMP:
+               return lttng_session_statedump(session);
+       case LTTNG_KERNEL_SESSION_SET_NAME:
+       {
+               struct lttng_kernel_session_name name;
+
+               if (copy_from_user(&name,
+                               (struct lttng_kernel_session_name __user *) arg,
+                               sizeof(struct lttng_kernel_session_name)))
+                       return -EFAULT;
+               return lttng_abi_session_set_name(session, &name);
+       }
+       case LTTNG_KERNEL_SESSION_SET_CREATION_TIME:
+       {
+               struct lttng_kernel_session_creation_time time;
+
+               if (copy_from_user(&time,
+                               (struct lttng_kernel_session_creation_time __user *) arg,
+                               sizeof(struct lttng_kernel_session_creation_time)))
+                       return -EFAULT;
+               return lttng_abi_session_set_creation_time(session, &time);
+       }
+       default:
+               return -ENOIOCTLCMD;
+       }
+}
+
+/*
+ * Called when the last file reference is dropped.
+ *
+ * Big fat note: channels and events are invariant for the whole session after
+ * their creation. So this session destruction also destroys all channel and
+ * event structures specific to this session (they are not destroyed when their
+ * individual file is released).
+ */
+static
+int lttng_session_release(struct inode *inode, struct file *file)
+{
+       struct lttng_session *session = file->private_data;
+
+       if (session)
+               lttng_session_destroy(session);
+       return 0;
+}
+
+static const struct file_operations lttng_session_fops = {
+       .owner = THIS_MODULE,
+       .release = lttng_session_release,
+       .unlocked_ioctl = lttng_session_ioctl,
+#ifdef CONFIG_COMPAT
+       .compat_ioctl = lttng_session_ioctl,
+#endif
+};
+
+/**
+ *     lttng_metadata_ring_buffer_poll - LTTng ring buffer poll file operation
+ *     @filp: the file
+ *     @wait: poll table
+ *
+ *     Handles the poll operations for the metadata channels.
+ */
+static
+unsigned int lttng_metadata_ring_buffer_poll(struct file *filp,
+               poll_table *wait)
+{
+       struct lttng_metadata_stream *stream = filp->private_data;
+       struct lib_ring_buffer *buf = stream->priv;
+       int finalized;
+       unsigned int mask = 0;
+
+       if (filp->f_mode & FMODE_READ) {
+               poll_wait_set_exclusive(wait);
+               poll_wait(filp, &stream->read_wait, wait);
+
+               finalized = stream->finalized;
+
+               /*
+                * lib_ring_buffer_is_finalized() contains a smp_rmb()
+                * ordering finalized load before offsets loads.
+                */
+               WARN_ON(atomic_long_read(&buf->active_readers) != 1);
+
+               if (finalized)
+                       mask |= POLLHUP;
+
+               mutex_lock(&stream->metadata_cache->lock);
+               if (stream->metadata_cache->metadata_written >
+                               stream->metadata_out)
+                       mask |= POLLIN;
+               mutex_unlock(&stream->metadata_cache->lock);
+       }
+
+       return mask;
+}
+
+static
+void lttng_metadata_ring_buffer_ioctl_put_next_subbuf(struct file *filp,
+               unsigned int cmd, unsigned long arg)
+{
+       struct lttng_metadata_stream *stream = filp->private_data;
+
+       stream->metadata_out = stream->metadata_in;
+}
+
+/*
+ * Reset the counter of how much metadata has been consumed to 0. That way,
+ * the consumer receives the content of the metadata cache unchanged. This is
+ * different from the metadata_regenerate where the offset from epoch is
+ * resampled, here we want the exact same content as the last time the metadata
+ * was generated. This command is only possible if all the metadata written
+ * in the cache has been output to the metadata stream to avoid corrupting the
+ * metadata file.
+ *
+ * Return 0 on success, a negative value on error.
+ */
+static
+int lttng_metadata_cache_dump(struct lttng_metadata_stream *stream)
+{
+       int ret;
+       struct lttng_metadata_cache *cache = stream->metadata_cache;
+
+       mutex_lock(&cache->lock);
+       if (stream->metadata_out != cache->metadata_written) {
+               ret = -EBUSY;
+               goto end;
+       }
+       stream->metadata_out = 0;
+       stream->metadata_in = 0;
+       wake_up_interruptible(&stream->read_wait);
+       ret = 0;
+
+end:
+       mutex_unlock(&cache->lock);
+       return ret;
+}
+
+static
+long lttng_metadata_ring_buffer_ioctl(struct file *filp,
+               unsigned int cmd, unsigned long arg)
+{
+       int ret;
+       struct lttng_metadata_stream *stream = filp->private_data;
+       struct lib_ring_buffer *buf = stream->priv;
+
+       switch (cmd) {
+       case RING_BUFFER_GET_NEXT_SUBBUF:
+       {
+               struct lttng_metadata_stream *stream = filp->private_data;
+               struct lib_ring_buffer *buf = stream->priv;
+               struct channel *chan = buf->backend.chan;
+
+               ret = lttng_metadata_output_channel(stream, chan);
+               if (ret > 0) {
+                       lib_ring_buffer_switch_slow(buf, SWITCH_ACTIVE);
+                       ret = 0;
+               } else if (ret < 0)
+                       goto err;
+               break;
+       }
+       case RING_BUFFER_GET_SUBBUF:
+       {
+               /*
+                * Random access is not allowed for metadata channel.
+                */
+               return -ENOSYS;
+       }
+       case RING_BUFFER_FLUSH_EMPTY:   /* Fall-through. */
+       case RING_BUFFER_FLUSH:
+       {
+               struct lttng_metadata_stream *stream = filp->private_data;
+               struct lib_ring_buffer *buf = stream->priv;
+               struct channel *chan = buf->backend.chan;
+
+               /*
+                * Before doing the actual ring buffer flush, write up to one
+                * packet of metadata in the ring buffer.
+                */
+               ret = lttng_metadata_output_channel(stream, chan);
+               if (ret < 0)
+                       goto err;
+               break;
+       }
+       case RING_BUFFER_GET_METADATA_VERSION:
+       {
+               struct lttng_metadata_stream *stream = filp->private_data;
+
+               return put_u64(stream->version, arg);
+       }
+       case RING_BUFFER_METADATA_CACHE_DUMP:
+       {
+               struct lttng_metadata_stream *stream = filp->private_data;
+
+               return lttng_metadata_cache_dump(stream);
+       }
+       default:
+               break;
+       }
+       /* PUT_SUBBUF is the one from lib ring buffer, unmodified. */
+
+       /* Performing lib ring buffer ioctl after our own. */
+       ret = lib_ring_buffer_ioctl(filp, cmd, arg, buf);
+       if (ret < 0)
+               goto err;
+
+       switch (cmd) {
+       case RING_BUFFER_PUT_NEXT_SUBBUF:
+       {
+               lttng_metadata_ring_buffer_ioctl_put_next_subbuf(filp,
+                               cmd, arg);
+               break;
+       }
+       default:
+               break;
+       }
+err:
+       return ret;
+}
+
+#ifdef CONFIG_COMPAT
+static
+long lttng_metadata_ring_buffer_compat_ioctl(struct file *filp,
+               unsigned int cmd, unsigned long arg)
+{
+       int ret;
+       struct lttng_metadata_stream *stream = filp->private_data;
+       struct lib_ring_buffer *buf = stream->priv;
+
+       switch (cmd) {
+       case RING_BUFFER_GET_NEXT_SUBBUF:
+       {
+               struct lttng_metadata_stream *stream = filp->private_data;
+               struct lib_ring_buffer *buf = stream->priv;
+               struct channel *chan = buf->backend.chan;
+
+               ret = lttng_metadata_output_channel(stream, chan);
+               if (ret > 0) {
+                       lib_ring_buffer_switch_slow(buf, SWITCH_ACTIVE);
+                       ret = 0;
+               } else if (ret < 0)
+                       goto err;
+               break;
+       }
+       case RING_BUFFER_GET_SUBBUF:
+       {
+               /*
+                * Random access is not allowed for metadata channel.
+                */
+               return -ENOSYS;
+       }
+       case RING_BUFFER_FLUSH_EMPTY:   /* Fall-through. */
+       case RING_BUFFER_FLUSH:
+       {
+               struct lttng_metadata_stream *stream = filp->private_data;
+               struct lib_ring_buffer *buf = stream->priv;
+               struct channel *chan = buf->backend.chan;
+
+               /*
+                * Before doing the actual ring buffer flush, write up to one
+                * packet of metadata in the ring buffer.
+                */
+               ret = lttng_metadata_output_channel(stream, chan);
+               if (ret < 0)
+                       goto err;
+               break;
+       }
+       case RING_BUFFER_GET_METADATA_VERSION:
+       {
+               struct lttng_metadata_stream *stream = filp->private_data;
+
+               return put_u64(stream->version, arg);
+       }
+       case RING_BUFFER_METADATA_CACHE_DUMP:
+       {
+               struct lttng_metadata_stream *stream = filp->private_data;
+
+               return lttng_metadata_cache_dump(stream);
+       }
+       default:
+               break;
+       }
+       /* PUT_SUBBUF is the one from lib ring buffer, unmodified. */
+
+       /* Performing lib ring buffer ioctl after our own. */
+       ret = lib_ring_buffer_compat_ioctl(filp, cmd, arg, buf);
+       if (ret < 0)
+               goto err;
+
+       switch (cmd) {
+       case RING_BUFFER_PUT_NEXT_SUBBUF:
+       {
+               lttng_metadata_ring_buffer_ioctl_put_next_subbuf(filp,
+                               cmd, arg);
+               break;
+       }
+       default:
+               break;
+       }
+err:
+       return ret;
+}
+#endif
+
+/*
+ * This is not used by anonymous file descriptors. This code is left
+ * there if we ever want to implement an inode with open() operation.
+ */
+static
+int lttng_metadata_ring_buffer_open(struct inode *inode, struct file *file)
+{
+       struct lttng_metadata_stream *stream = inode->i_private;
+       struct lib_ring_buffer *buf = stream->priv;
+
+       file->private_data = buf;
+       /*
+        * Since life-time of metadata cache differs from that of
+        * session, we need to keep our own reference on the transport.
+        */
+       if (!try_module_get(stream->transport->owner)) {
+               printk(KERN_WARNING "LTT : Can't lock transport module.\n");
+               return -EBUSY;
+       }
+       return lib_ring_buffer_open(inode, file, buf);
+}
+
+static
+int lttng_metadata_ring_buffer_release(struct inode *inode, struct file *file)
+{
+       struct lttng_metadata_stream *stream = file->private_data;
+       struct lib_ring_buffer *buf = stream->priv;
+
+       kref_put(&stream->metadata_cache->refcount, metadata_cache_destroy);
+       module_put(stream->transport->owner);
+       return lib_ring_buffer_release(inode, file, buf);
+}
+
+static
+ssize_t lttng_metadata_ring_buffer_splice_read(struct file *in, loff_t *ppos,
+               struct pipe_inode_info *pipe, size_t len,
+               unsigned int flags)
+{
+       struct lttng_metadata_stream *stream = in->private_data;
+       struct lib_ring_buffer *buf = stream->priv;
+
+       return lib_ring_buffer_splice_read(in, ppos, pipe, len,
+                       flags, buf);
+}
+
+static
+int lttng_metadata_ring_buffer_mmap(struct file *filp,
+               struct vm_area_struct *vma)
+{
+       struct lttng_metadata_stream *stream = filp->private_data;
+       struct lib_ring_buffer *buf = stream->priv;
+
+       return lib_ring_buffer_mmap(filp, vma, buf);
+}
+
+static
+const struct file_operations lttng_metadata_ring_buffer_file_operations = {
+       .owner = THIS_MODULE,
+       .open = lttng_metadata_ring_buffer_open,
+       .release = lttng_metadata_ring_buffer_release,
+       .poll = lttng_metadata_ring_buffer_poll,
+       .splice_read = lttng_metadata_ring_buffer_splice_read,
+       .mmap = lttng_metadata_ring_buffer_mmap,
+       .unlocked_ioctl = lttng_metadata_ring_buffer_ioctl,
+       .llseek = vfs_lib_ring_buffer_no_llseek,
+#ifdef CONFIG_COMPAT
+       .compat_ioctl = lttng_metadata_ring_buffer_compat_ioctl,
+#endif
+};
+
+static
+int lttng_abi_create_stream_fd(struct file *channel_file, void *stream_priv,
+               const struct file_operations *fops)
+{
+       int stream_fd, ret;
+       struct file *stream_file;
+
+       stream_fd = lttng_get_unused_fd();
+       if (stream_fd < 0) {
+               ret = stream_fd;
+               goto fd_error;
+       }
+       stream_file = anon_inode_getfile("[lttng_stream]", fops,
+                       stream_priv, O_RDWR);
+       if (IS_ERR(stream_file)) {
+               ret = PTR_ERR(stream_file);
+               goto file_error;
+       }
+       /*
+        * OPEN_FMODE, called within anon_inode_getfile/alloc_file, don't honor
+        * FMODE_LSEEK, FMODE_PREAD nor FMODE_PWRITE. We need to read from this
+        * file descriptor, so we set FMODE_PREAD here.
+        */
+       stream_file->f_mode |= FMODE_PREAD;
+       fd_install(stream_fd, stream_file);
+       /*
+        * The stream holds a reference to the channel within the generic ring
+        * buffer library, so no need to hold a refcount on the channel and
+        * session files here.
+        */
+       return stream_fd;
+
+file_error:
+       put_unused_fd(stream_fd);
+fd_error:
+       return ret;
+}
+
+static
+int lttng_abi_open_stream(struct file *channel_file)
+{
+       struct lttng_channel *channel = channel_file->private_data;
+       struct lib_ring_buffer *buf;
+       int ret;
+       void *stream_priv;
+
+       buf = channel->ops->buffer_read_open(channel->chan);
+       if (!buf)
+               return -ENOENT;
+
+       stream_priv = buf;
+       ret = lttng_abi_create_stream_fd(channel_file, stream_priv,
+                       &lttng_stream_ring_buffer_file_operations);
+       if (ret < 0)
+               goto fd_error;
+
+       return ret;
+
+fd_error:
+       channel->ops->buffer_read_close(buf);
+       return ret;
+}
+
+static
+int lttng_abi_open_metadata_stream(struct file *channel_file)
+{
+       struct lttng_channel *channel = channel_file->private_data;
+       struct lttng_session *session = channel->session;
+       struct lib_ring_buffer *buf;
+       int ret;
+       struct lttng_metadata_stream *metadata_stream;
+       void *stream_priv;
+
+       buf = channel->ops->buffer_read_open(channel->chan);
+       if (!buf)
+               return -ENOENT;
+
+       metadata_stream = kzalloc(sizeof(struct lttng_metadata_stream),
+                       GFP_KERNEL);
+       if (!metadata_stream) {
+               ret = -ENOMEM;
+               goto nomem;
+       }
+       metadata_stream->metadata_cache = session->metadata_cache;
+       init_waitqueue_head(&metadata_stream->read_wait);
+       metadata_stream->priv = buf;
+       stream_priv = metadata_stream;
+       metadata_stream->transport = channel->transport;
+
+       /*
+        * Since life-time of metadata cache differs from that of
+        * session, we need to keep our own reference on the transport.
+        */
+       if (!try_module_get(metadata_stream->transport->owner)) {
+               printk(KERN_WARNING "LTT : Can't lock transport module.\n");
+               ret = -EINVAL;
+               goto notransport;
+       }
+
+       if (!lttng_kref_get(&session->metadata_cache->refcount)) {
+               ret = -EOVERFLOW;
+               goto kref_error;
+       }
+
+       ret = lttng_abi_create_stream_fd(channel_file, stream_priv,
+                       &lttng_metadata_ring_buffer_file_operations);
+       if (ret < 0)
+               goto fd_error;
+
+       list_add(&metadata_stream->list,
+               &session->metadata_cache->metadata_stream);
+       return ret;
+
+fd_error:
+       kref_put(&session->metadata_cache->refcount, metadata_cache_destroy);
+kref_error:
+       module_put(metadata_stream->transport->owner);
+notransport:
+       kfree(metadata_stream);
+nomem:
+       channel->ops->buffer_read_close(buf);
+       return ret;
+}
+
+static
+int lttng_abi_create_event(struct file *channel_file,
+                          struct lttng_kernel_event *event_param)
+{
+       struct lttng_channel *channel = channel_file->private_data;
+       int event_fd, ret;
+       struct file *event_file;
+       void *priv;
+
+       event_param->name[LTTNG_KERNEL_SYM_NAME_LEN - 1] = '\0';
+       switch (event_param->instrumentation) {
+       case LTTNG_KERNEL_KRETPROBE:
+               event_param->u.kretprobe.symbol_name[LTTNG_KERNEL_SYM_NAME_LEN - 1] = '\0';
+               break;
+       case LTTNG_KERNEL_KPROBE:
+               event_param->u.kprobe.symbol_name[LTTNG_KERNEL_SYM_NAME_LEN - 1] = '\0';
+               break;
+       case LTTNG_KERNEL_FUNCTION:
+               WARN_ON_ONCE(1);
+               /* Not implemented. */
+               break;
+       default:
+               break;
+       }
+       event_fd = lttng_get_unused_fd();
+       if (event_fd < 0) {
+               ret = event_fd;
+               goto fd_error;
+       }
+       event_file = anon_inode_getfile("[lttng_event]",
+                                       &lttng_event_fops,
+                                       NULL, O_RDWR);
+       if (IS_ERR(event_file)) {
+               ret = PTR_ERR(event_file);
+               goto file_error;
+       }
+       /* The event holds a reference on the channel */
+       if (!atomic_long_add_unless(&channel_file->f_count, 1, LONG_MAX)) {
+               ret = -EOVERFLOW;
+               goto refcount_error;
+       }
+       if (event_param->instrumentation == LTTNG_KERNEL_TRACEPOINT
+                       || event_param->instrumentation == LTTNG_KERNEL_SYSCALL) {
+               struct lttng_enabler *enabler;
+
+               if (strutils_is_star_glob_pattern(event_param->name)) {
+                       /*
+                        * If the event name is a star globbing pattern,
+                        * we create the special star globbing enabler.
+                        */
+                       enabler = lttng_enabler_create(LTTNG_ENABLER_STAR_GLOB,
+                               event_param, channel);
+               } else {
+                       enabler = lttng_enabler_create(LTTNG_ENABLER_NAME,
+                               event_param, channel);
+               }
+               priv = enabler;
+       } else {
+               struct lttng_event *event;
+
+               /*
+                * We tolerate no failure path after event creation. It
+                * will stay invariant for the rest of the session.
+                */
+               event = lttng_event_create(channel, event_param,
+                               NULL, NULL,
+                               event_param->instrumentation);
+               WARN_ON_ONCE(!event);
+               if (IS_ERR(event)) {
+                       ret = PTR_ERR(event);
+                       goto event_error;
+               }
+               priv = event;
+       }
+       event_file->private_data = priv;
+       fd_install(event_fd, event_file);
+       return event_fd;
+
+event_error:
+       atomic_long_dec(&channel_file->f_count);
+refcount_error:
+       fput(event_file);
+file_error:
+       put_unused_fd(event_fd);
+fd_error:
+       return ret;
+}
+
+/**
+ *     lttng_channel_ioctl - lttng syscall through ioctl
+ *
+ *     @file: the file
+ *     @cmd: the command
+ *     @arg: command arg
+ *
+ *     This ioctl implements lttng commands:
+ *      LTTNG_KERNEL_STREAM
+ *              Returns an event stream file descriptor or failure.
+ *              (typically, one event stream records events from one CPU)
+ *     LTTNG_KERNEL_EVENT
+ *             Returns an event file descriptor or failure.
+ *     LTTNG_KERNEL_CONTEXT
+ *             Prepend a context field to each event in the channel
+ *     LTTNG_KERNEL_ENABLE
+ *             Enable recording for events in this channel (weak enable)
+ *     LTTNG_KERNEL_DISABLE
+ *             Disable recording for events in this channel (strong disable)
+ *
+ * Channel and event file descriptors also hold a reference on the session.
+ */
+static
+long lttng_channel_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+       struct lttng_channel *channel = file->private_data;
+
+       switch (cmd) {
+       case LTTNG_KERNEL_OLD_STREAM:
+       case LTTNG_KERNEL_STREAM:
+               return lttng_abi_open_stream(file);
+       case LTTNG_KERNEL_OLD_EVENT:
+       {
+               struct lttng_kernel_event *uevent_param;
+               struct lttng_kernel_old_event *old_uevent_param;
+               int ret;
+
+               uevent_param = kmalloc(sizeof(struct lttng_kernel_event),
+                               GFP_KERNEL);
+               if (!uevent_param) {
+                       ret = -ENOMEM;
+                       goto old_event_end;
+               }
+               old_uevent_param = kmalloc(
+                               sizeof(struct lttng_kernel_old_event),
+                               GFP_KERNEL);
+               if (!old_uevent_param) {
+                       ret = -ENOMEM;
+                       goto old_event_error_free_param;
+               }
+               if (copy_from_user(old_uevent_param,
+                               (struct lttng_kernel_old_event __user *) arg,
+                               sizeof(struct lttng_kernel_old_event))) {
+                       ret = -EFAULT;
+                       goto old_event_error_free_old_param;
+               }
+
+               memcpy(uevent_param->name, old_uevent_param->name,
+                               sizeof(uevent_param->name));
+               uevent_param->instrumentation =
+                       old_uevent_param->instrumentation;
+
+               switch (old_uevent_param->instrumentation) {
+               case LTTNG_KERNEL_KPROBE:
+                       uevent_param->u.kprobe.addr =
+                               old_uevent_param->u.kprobe.addr;
+                       uevent_param->u.kprobe.offset =
+                               old_uevent_param->u.kprobe.offset;
+                       memcpy(uevent_param->u.kprobe.symbol_name,
+                               old_uevent_param->u.kprobe.symbol_name,
+                               sizeof(uevent_param->u.kprobe.symbol_name));
+                       break;
+               case LTTNG_KERNEL_KRETPROBE:
+                       uevent_param->u.kretprobe.addr =
+                               old_uevent_param->u.kretprobe.addr;
+                       uevent_param->u.kretprobe.offset =
+                               old_uevent_param->u.kretprobe.offset;
+                       memcpy(uevent_param->u.kretprobe.symbol_name,
+                               old_uevent_param->u.kretprobe.symbol_name,
+                               sizeof(uevent_param->u.kretprobe.symbol_name));
+                       break;
+               case LTTNG_KERNEL_FUNCTION:
+                       WARN_ON_ONCE(1);
+                       /* Not implemented. */
+                       break;
+               default:
+                       break;
+               }
+               ret = lttng_abi_create_event(file, uevent_param);
+
+old_event_error_free_old_param:
+               kfree(old_uevent_param);
+old_event_error_free_param:
+               kfree(uevent_param);
+old_event_end:
+               return ret;
+       }
+       case LTTNG_KERNEL_EVENT:
+       {
+               struct lttng_kernel_event uevent_param;
+
+               if (copy_from_user(&uevent_param,
+                               (struct lttng_kernel_event __user *) arg,
+                               sizeof(uevent_param)))
+                       return -EFAULT;
+               return lttng_abi_create_event(file, &uevent_param);
+       }
+       case LTTNG_KERNEL_OLD_CONTEXT:
+       {
+               struct lttng_kernel_context *ucontext_param;
+               struct lttng_kernel_old_context *old_ucontext_param;
+               int ret;
+
+               ucontext_param = kmalloc(sizeof(struct lttng_kernel_context),
+                               GFP_KERNEL);
+               if (!ucontext_param) {
+                       ret = -ENOMEM;
+                       goto old_ctx_end;
+               }
+               old_ucontext_param = kmalloc(sizeof(struct lttng_kernel_old_context),
+                               GFP_KERNEL);
+               if (!old_ucontext_param) {
+                       ret = -ENOMEM;
+                       goto old_ctx_error_free_param;
+               }
+
+               if (copy_from_user(old_ucontext_param,
+                               (struct lttng_kernel_old_context __user *) arg,
+                               sizeof(struct lttng_kernel_old_context))) {
+                       ret = -EFAULT;
+                       goto old_ctx_error_free_old_param;
+               }
+               ucontext_param->ctx = old_ucontext_param->ctx;
+               memcpy(ucontext_param->padding, old_ucontext_param->padding,
+                               sizeof(ucontext_param->padding));
+               /* only type that uses the union */
+               if (old_ucontext_param->ctx == LTTNG_KERNEL_CONTEXT_PERF_COUNTER) {
+                       ucontext_param->u.perf_counter.type =
+                               old_ucontext_param->u.perf_counter.type;
+                       ucontext_param->u.perf_counter.config =
+                               old_ucontext_param->u.perf_counter.config;
+                       memcpy(ucontext_param->u.perf_counter.name,
+                               old_ucontext_param->u.perf_counter.name,
+                               sizeof(ucontext_param->u.perf_counter.name));
+               }
+
+               ret = lttng_abi_add_context(file,
+                               ucontext_param,
+                               &channel->ctx, channel->session);
+
+old_ctx_error_free_old_param:
+               kfree(old_ucontext_param);
+old_ctx_error_free_param:
+               kfree(ucontext_param);
+old_ctx_end:
+               return ret;
+       }
+       case LTTNG_KERNEL_CONTEXT:
+       {
+               struct lttng_kernel_context ucontext_param;
+
+               if (copy_from_user(&ucontext_param,
+                               (struct lttng_kernel_context __user *) arg,
+                               sizeof(ucontext_param)))
+                       return -EFAULT;
+               return lttng_abi_add_context(file,
+                               &ucontext_param,
+                               &channel->ctx, channel->session);
+       }
+       case LTTNG_KERNEL_OLD_ENABLE:
+       case LTTNG_KERNEL_ENABLE:
+               return lttng_channel_enable(channel);
+       case LTTNG_KERNEL_OLD_DISABLE:
+       case LTTNG_KERNEL_DISABLE:
+               return lttng_channel_disable(channel);
+       case LTTNG_KERNEL_SYSCALL_MASK:
+               return lttng_channel_syscall_mask(channel,
+                       (struct lttng_kernel_syscall_mask __user *) arg);
+       default:
+               return -ENOIOCTLCMD;
+       }
+}
+
+/**
+ *     lttng_metadata_ioctl - lttng syscall through ioctl
+ *
+ *     @file: the file
+ *     @cmd: the command
+ *     @arg: command arg
+ *
+ *     This ioctl implements lttng commands:
+ *      LTTNG_KERNEL_STREAM
+ *              Returns an event stream file descriptor or failure.
+ *
+ * Channel and event file descriptors also hold a reference on the session.
+ */
+static
+long lttng_metadata_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+       switch (cmd) {
+       case LTTNG_KERNEL_OLD_STREAM:
+       case LTTNG_KERNEL_STREAM:
+               return lttng_abi_open_metadata_stream(file);
+       default:
+               return -ENOIOCTLCMD;
+       }
+}
+
+/**
+ *     lttng_channel_poll - lttng stream addition/removal monitoring
+ *
+ *     @file: the file
+ *     @wait: poll table
+ */
+unsigned int lttng_channel_poll(struct file *file, poll_table *wait)
+{
+       struct lttng_channel *channel = file->private_data;
+       unsigned int mask = 0;
+
+       if (file->f_mode & FMODE_READ) {
+               poll_wait_set_exclusive(wait);
+               poll_wait(file, channel->ops->get_hp_wait_queue(channel->chan),
+                         wait);
+
+               if (channel->ops->is_disabled(channel->chan))
+                       return POLLERR;
+               if (channel->ops->is_finalized(channel->chan))
+                       return POLLHUP;
+               if (channel->ops->buffer_has_read_closed_stream(channel->chan))
+                       return POLLIN | POLLRDNORM;
+               return 0;
+       }
+       return mask;
+
+}
+
+static
+int lttng_channel_release(struct inode *inode, struct file *file)
+{
+       struct lttng_channel *channel = file->private_data;
+
+       if (channel)
+               fput(channel->session->file);
+       return 0;
+}
+
+static
+int lttng_metadata_channel_release(struct inode *inode, struct file *file)
+{
+       struct lttng_channel *channel = file->private_data;
+
+       if (channel) {
+               fput(channel->session->file);
+               lttng_metadata_channel_destroy(channel);
+       }
+
+       return 0;
+}
+
+static const struct file_operations lttng_channel_fops = {
+       .owner = THIS_MODULE,
+       .release = lttng_channel_release,
+       .poll = lttng_channel_poll,
+       .unlocked_ioctl = lttng_channel_ioctl,
+#ifdef CONFIG_COMPAT
+       .compat_ioctl = lttng_channel_ioctl,
+#endif
+};
+
+static const struct file_operations lttng_metadata_fops = {
+       .owner = THIS_MODULE,
+       .release = lttng_metadata_channel_release,
+       .unlocked_ioctl = lttng_metadata_ioctl,
+#ifdef CONFIG_COMPAT
+       .compat_ioctl = lttng_metadata_ioctl,
+#endif
+};
+
+/**
+ *     lttng_event_ioctl - lttng syscall through ioctl
+ *
+ *     @file: the file
+ *     @cmd: the command
+ *     @arg: command arg
+ *
+ *     This ioctl implements lttng commands:
+ *     LTTNG_KERNEL_CONTEXT
+ *             Prepend a context field to each record of this event
+ *     LTTNG_KERNEL_ENABLE
+ *             Enable recording for this event (weak enable)
+ *     LTTNG_KERNEL_DISABLE
+ *             Disable recording for this event (strong disable)
+ */
+static
+long lttng_event_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+       struct lttng_event *event;
+       struct lttng_enabler *enabler;
+       enum lttng_event_type *evtype = file->private_data;
+
+       switch (cmd) {
+       case LTTNG_KERNEL_OLD_CONTEXT:
+       {
+               /* Not implemented */
+               return -ENOSYS;
+       }
+       case LTTNG_KERNEL_CONTEXT:
+       {
+               /* Not implemented */
+               return -ENOSYS;
+       }
+       case LTTNG_KERNEL_OLD_ENABLE:
+       case LTTNG_KERNEL_ENABLE:
+               switch (*evtype) {
+               case LTTNG_TYPE_EVENT:
+                       event = file->private_data;
+                       return lttng_event_enable(event);
+               case LTTNG_TYPE_ENABLER:
+                       enabler = file->private_data;
+                       return lttng_enabler_enable(enabler);
+               default:
+                       WARN_ON_ONCE(1);
+                       return -ENOSYS;
+               }
+       case LTTNG_KERNEL_OLD_DISABLE:
+       case LTTNG_KERNEL_DISABLE:
+               switch (*evtype) {
+               case LTTNG_TYPE_EVENT:
+                       event = file->private_data;
+                       return lttng_event_disable(event);
+               case LTTNG_TYPE_ENABLER:
+                       enabler = file->private_data;
+                       return lttng_enabler_disable(enabler);
+               default:
+                       WARN_ON_ONCE(1);
+                       return -ENOSYS;
+               }
+       case LTTNG_KERNEL_FILTER:
+               switch (*evtype) {
+               case LTTNG_TYPE_EVENT:
+                       return -EINVAL;
+               case LTTNG_TYPE_ENABLER:
+               {
+                       enabler = file->private_data;
+                       return lttng_enabler_attach_bytecode(enabler,
+                               (struct lttng_kernel_filter_bytecode __user *) arg);
+               }
+               default:
+                       WARN_ON_ONCE(1);
+                       return -ENOSYS;
+               }
+       case LTTNG_KERNEL_ADD_CALLSITE:
+               switch (*evtype) {
+               case LTTNG_TYPE_EVENT:
+                       event = file->private_data;
+                       return lttng_event_add_callsite(event,
+                               (struct lttng_kernel_event_callsite __user *) arg);
+               case LTTNG_TYPE_ENABLER:
+                       return -EINVAL;
+               default:
+                       WARN_ON_ONCE(1);
+                       return -ENOSYS;
+               }
+       default:
+               return -ENOIOCTLCMD;
+       }
+}
+
+static
+int lttng_event_release(struct inode *inode, struct file *file)
+{
+       struct lttng_event *event;
+       struct lttng_enabler *enabler;
+       enum lttng_event_type *evtype = file->private_data;
+
+       if (!evtype)
+               return 0;
+
+       switch (*evtype) {
+       case LTTNG_TYPE_EVENT:
+               event = file->private_data;
+               if (event)
+                       fput(event->chan->file);
+               break;
+       case LTTNG_TYPE_ENABLER:
+               enabler = file->private_data;
+               if (enabler)
+                       fput(enabler->chan->file);
+               break;
+       default:
+               WARN_ON_ONCE(1);
+               break;
+       }
+
+       return 0;
+}
+
+/* TODO: filter control ioctl */
+static const struct file_operations lttng_event_fops = {
+       .owner = THIS_MODULE,
+       .release = lttng_event_release,
+       .unlocked_ioctl = lttng_event_ioctl,
+#ifdef CONFIG_COMPAT
+       .compat_ioctl = lttng_event_ioctl,
+#endif
+};
+
+static int put_u64(uint64_t val, unsigned long arg)
+{
+       return put_user(val, (uint64_t __user *) arg);
+}
+
+static long lttng_stream_ring_buffer_ioctl(struct file *filp,
+               unsigned int cmd, unsigned long arg)
+{
+       struct lib_ring_buffer *buf = filp->private_data;
+       struct channel *chan = buf->backend.chan;
+       const struct lib_ring_buffer_config *config = &chan->backend.config;
+       const struct lttng_channel_ops *ops = chan->backend.priv_ops;
+       int ret;
+
+       if (atomic_read(&chan->record_disabled))
+               return -EIO;
+
+       switch (cmd) {
+       case LTTNG_RING_BUFFER_GET_TIMESTAMP_BEGIN:
+       {
+               uint64_t ts;
+
+               ret = ops->timestamp_begin(config, buf, &ts);
+               if (ret < 0)
+                       goto error;
+               return put_u64(ts, arg);
+       }
+       case LTTNG_RING_BUFFER_GET_TIMESTAMP_END:
+       {
+               uint64_t ts;
+
+               ret = ops->timestamp_end(config, buf, &ts);
+               if (ret < 0)
+                       goto error;
+               return put_u64(ts, arg);
+       }
+       case LTTNG_RING_BUFFER_GET_EVENTS_DISCARDED:
+       {
+               uint64_t ed;
+
+               ret = ops->events_discarded(config, buf, &ed);
+               if (ret < 0)
+                       goto error;
+               return put_u64(ed, arg);
+       }
+       case LTTNG_RING_BUFFER_GET_CONTENT_SIZE:
+       {
+               uint64_t cs;
+
+               ret = ops->content_size(config, buf, &cs);
+               if (ret < 0)
+                       goto error;
+               return put_u64(cs, arg);
+       }
+       case LTTNG_RING_BUFFER_GET_PACKET_SIZE:
+       {
+               uint64_t ps;
+
+               ret = ops->packet_size(config, buf, &ps);
+               if (ret < 0)
+                       goto error;
+               return put_u64(ps, arg);
+       }
+       case LTTNG_RING_BUFFER_GET_STREAM_ID:
+       {
+               uint64_t si;
+
+               ret = ops->stream_id(config, buf, &si);
+               if (ret < 0)
+                       goto error;
+               return put_u64(si, arg);
+       }
+       case LTTNG_RING_BUFFER_GET_CURRENT_TIMESTAMP:
+       {
+               uint64_t ts;
+
+               ret = ops->current_timestamp(config, buf, &ts);
+               if (ret < 0)
+                       goto error;
+               return put_u64(ts, arg);
+       }
+       case LTTNG_RING_BUFFER_GET_SEQ_NUM:
+       {
+               uint64_t seq;
+
+               ret = ops->sequence_number(config, buf, &seq);
+               if (ret < 0)
+                       goto error;
+               return put_u64(seq, arg);
+       }
+       case LTTNG_RING_BUFFER_INSTANCE_ID:
+       {
+               uint64_t id;
+
+               ret = ops->instance_id(config, buf, &id);
+               if (ret < 0)
+                       goto error;
+               return put_u64(id, arg);
+       }
+       default:
+               return lib_ring_buffer_file_operations.unlocked_ioctl(filp,
+                               cmd, arg);
+       }
+
+error:
+       return -ENOSYS;
+}
+
+#ifdef CONFIG_COMPAT
+static long lttng_stream_ring_buffer_compat_ioctl(struct file *filp,
+               unsigned int cmd, unsigned long arg)
+{
+       struct lib_ring_buffer *buf = filp->private_data;
+       struct channel *chan = buf->backend.chan;
+       const struct lib_ring_buffer_config *config = &chan->backend.config;
+       const struct lttng_channel_ops *ops = chan->backend.priv_ops;
+       int ret;
+
+       if (atomic_read(&chan->record_disabled))
+               return -EIO;
+
+       switch (cmd) {
+       case LTTNG_RING_BUFFER_COMPAT_GET_TIMESTAMP_BEGIN:
+       {
+               uint64_t ts;
+
+               ret = ops->timestamp_begin(config, buf, &ts);
+               if (ret < 0)
+                       goto error;
+               return put_u64(ts, arg);
+       }
+       case LTTNG_RING_BUFFER_COMPAT_GET_TIMESTAMP_END:
+       {
+               uint64_t ts;
+
+               ret = ops->timestamp_end(config, buf, &ts);
+               if (ret < 0)
+                       goto error;
+               return put_u64(ts, arg);
+       }
+       case LTTNG_RING_BUFFER_COMPAT_GET_EVENTS_DISCARDED:
+       {
+               uint64_t ed;
+
+               ret = ops->events_discarded(config, buf, &ed);
+               if (ret < 0)
+                       goto error;
+               return put_u64(ed, arg);
+       }
+       case LTTNG_RING_BUFFER_COMPAT_GET_CONTENT_SIZE:
+       {
+               uint64_t cs;
+
+               ret = ops->content_size(config, buf, &cs);
+               if (ret < 0)
+                       goto error;
+               return put_u64(cs, arg);
+       }
+       case LTTNG_RING_BUFFER_COMPAT_GET_PACKET_SIZE:
+       {
+               uint64_t ps;
+
+               ret = ops->packet_size(config, buf, &ps);
+               if (ret < 0)
+                       goto error;
+               return put_u64(ps, arg);
+       }
+       case LTTNG_RING_BUFFER_COMPAT_GET_STREAM_ID:
+       {
+               uint64_t si;
+
+               ret = ops->stream_id(config, buf, &si);
+               if (ret < 0)
+                       goto error;
+               return put_u64(si, arg);
+       }
+       case LTTNG_RING_BUFFER_GET_CURRENT_TIMESTAMP:
+       {
+               uint64_t ts;
+
+               ret = ops->current_timestamp(config, buf, &ts);
+               if (ret < 0)
+                       goto error;
+               return put_u64(ts, arg);
+       }
+       case LTTNG_RING_BUFFER_COMPAT_GET_SEQ_NUM:
+       {
+               uint64_t seq;
+
+               ret = ops->sequence_number(config, buf, &seq);
+               if (ret < 0)
+                       goto error;
+               return put_u64(seq, arg);
+       }
+       case LTTNG_RING_BUFFER_COMPAT_INSTANCE_ID:
+       {
+               uint64_t id;
+
+               ret = ops->instance_id(config, buf, &id);
+               if (ret < 0)
+                       goto error;
+               return put_u64(id, arg);
+       }
+       default:
+               return lib_ring_buffer_file_operations.compat_ioctl(filp,
+                               cmd, arg);
+       }
+
+error:
+       return -ENOSYS;
+}
+#endif /* CONFIG_COMPAT */
+
+static void lttng_stream_override_ring_buffer_fops(void)
+{
+       lttng_stream_ring_buffer_file_operations.owner = THIS_MODULE;
+       lttng_stream_ring_buffer_file_operations.open =
+               lib_ring_buffer_file_operations.open;
+       lttng_stream_ring_buffer_file_operations.release =
+               lib_ring_buffer_file_operations.release;
+       lttng_stream_ring_buffer_file_operations.poll =
+               lib_ring_buffer_file_operations.poll;
+       lttng_stream_ring_buffer_file_operations.splice_read =
+               lib_ring_buffer_file_operations.splice_read;
+       lttng_stream_ring_buffer_file_operations.mmap =
+               lib_ring_buffer_file_operations.mmap;
+       lttng_stream_ring_buffer_file_operations.unlocked_ioctl =
+               lttng_stream_ring_buffer_ioctl;
+       lttng_stream_ring_buffer_file_operations.llseek =
+               lib_ring_buffer_file_operations.llseek;
+#ifdef CONFIG_COMPAT
+       lttng_stream_ring_buffer_file_operations.compat_ioctl =
+               lttng_stream_ring_buffer_compat_ioctl;
+#endif
+}
+
+int __init lttng_abi_init(void)
+{
+       int ret = 0;
+
+       wrapper_vmalloc_sync_mappings();
+       lttng_clock_ref();
+
+       ret = lttng_tp_mempool_init();
+       if (ret) {
+               goto error;
+       }
+
+       lttng_proc_dentry = proc_create_data("lttng", S_IRUSR | S_IWUSR, NULL,
+                                       &lttng_proc_ops, NULL);
+
+       if (!lttng_proc_dentry) {
+               printk(KERN_ERR "Error creating LTTng control file\n");
+               ret = -ENOMEM;
+               goto error;
+       }
+       lttng_stream_override_ring_buffer_fops();
+       return 0;
+
+error:
+       lttng_tp_mempool_destroy();
+       lttng_clock_unref();
+       return ret;
+}
+
+/* No __exit annotation because used by init error path too. */
+void lttng_abi_exit(void)
+{
+       lttng_tp_mempool_destroy();
+       lttng_clock_unref();
+       if (lttng_proc_dentry)
+               remove_proc_entry("lttng", NULL);
+}
diff --git a/src/lttng-calibrate.c b/src/lttng-calibrate.c
new file mode 100644 (file)
index 0000000..3886319
--- /dev/null
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * lttng-calibrate.c
+ *
+ * LTTng probe calibration.
+ *
+ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#include <lttng/abi.h>
+#include <lttng/events.h>
+
+noinline
+void lttng_calibrate_kretprobe(void)
+{
+       asm volatile ("");
+}
+
+int lttng_calibrate(struct lttng_kernel_calibrate *calibrate)
+{
+       switch (calibrate->type) {
+       case LTTNG_KERNEL_CALIBRATE_KRETPROBE:
+               lttng_calibrate_kretprobe();
+               break;
+       default:
+               return -EINVAL;
+       }
+       return 0;
+}
diff --git a/src/lttng-clock.c b/src/lttng-clock.c
new file mode 100644 (file)
index 0000000..7512a3f
--- /dev/null
@@ -0,0 +1,97 @@
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * lttng-clock.c
+ *
+ * Copyright (C) 2014 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#include <linux/module.h>
+#include <linux/kmod.h>
+#include <linux/mutex.h>
+
+#include <wrapper/trace-clock.h>
+#include <lttng/events.h>
+#include <lttng/tracer.h>
+
+struct lttng_trace_clock *lttng_trace_clock;
+EXPORT_SYMBOL_GPL(lttng_trace_clock);
+
+static DEFINE_MUTEX(clock_mutex);
+static struct module *lttng_trace_clock_mod;   /* plugin */
+static int clock_used;                         /* refcount */
+
+int lttng_clock_register_plugin(struct lttng_trace_clock *ltc,
+               struct module *mod)
+{
+       int ret = 0;
+
+       mutex_lock(&clock_mutex);
+       if (clock_used) {
+               ret = -EBUSY;
+               goto end;
+       }
+       if (lttng_trace_clock_mod) {
+               ret = -EEXIST;
+               goto end;
+       }
+       /* set clock */
+       WRITE_ONCE(lttng_trace_clock, ltc);
+       lttng_trace_clock_mod = mod;
+end:
+       mutex_unlock(&clock_mutex);
+       return ret;
+}
+EXPORT_SYMBOL_GPL(lttng_clock_register_plugin);
+
+void lttng_clock_unregister_plugin(struct lttng_trace_clock *ltc,
+               struct module *mod)
+{
+       mutex_lock(&clock_mutex);
+       WARN_ON_ONCE(clock_used);
+       if (!lttng_trace_clock_mod) {
+               goto end;
+       }
+       WARN_ON_ONCE(lttng_trace_clock_mod != mod);
+
+       WRITE_ONCE(lttng_trace_clock, NULL);
+       lttng_trace_clock_mod = NULL;
+end:
+       mutex_unlock(&clock_mutex);
+}
+EXPORT_SYMBOL_GPL(lttng_clock_unregister_plugin);
+
+void lttng_clock_ref(void)
+{
+       mutex_lock(&clock_mutex);
+       clock_used++;
+       if (lttng_trace_clock_mod) {
+               int ret;
+
+               ret = try_module_get(lttng_trace_clock_mod);
+               if (!ret) {
+                       printk(KERN_ERR "LTTng-clock cannot get clock plugin module\n");
+                       WRITE_ONCE(lttng_trace_clock, NULL);
+                       lttng_trace_clock_mod = NULL;
+               }
+       }
+       mutex_unlock(&clock_mutex);
+}
+EXPORT_SYMBOL_GPL(lttng_clock_ref);
+
+void lttng_clock_unref(void)
+{
+       mutex_lock(&clock_mutex);
+       clock_used--;
+       if (lttng_trace_clock_mod)
+               module_put(lttng_trace_clock_mod);
+       mutex_unlock(&clock_mutex);
+}
+EXPORT_SYMBOL_GPL(lttng_clock_unref);
+
+MODULE_LICENSE("GPL and additional rights");
+MODULE_AUTHOR("Mathieu Desnoyers <mathieu.desnoyers@efficios.com>");
+MODULE_DESCRIPTION("LTTng Clock");
+MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
+       __stringify(LTTNG_MODULES_MINOR_VERSION) "."
+       __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
+       LTTNG_MODULES_EXTRAVERSION);
diff --git a/src/lttng-context-callstack-legacy-impl.h b/src/lttng-context-callstack-legacy-impl.h
new file mode 100644 (file)
index 0000000..8d78fb9
--- /dev/null
@@ -0,0 +1,244 @@
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * lttng-context-callstack-legacy-impl.h
+ *
+ * LTTng callstack event context, legacy implementation. Targets
+ * kernels and architectures not yet using the stacktrace common
+ * infrastructure introduced in the upstream Linux kernel by commit
+ * 214d8ca6ee "stacktrace: Provide common infrastructure" (merged in
+ * Linux 5.2, then gradually introduced within architectures).
+ *
+ * Copyright (C) 2014 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ * Copyright (C) 2014 Francis Giraldeau <francis.giraldeau@gmail.com>
+ */
+
+#define MAX_ENTRIES 128
+
+enum lttng_cs_ctx_modes {
+       CALLSTACK_KERNEL = 0,
+       CALLSTACK_USER = 1,
+       NR_CALLSTACK_MODES,
+};
+
+struct lttng_cs_dispatch {
+       struct stack_trace stack_trace;
+       unsigned long entries[MAX_ENTRIES];
+};
+
+struct lttng_cs {
+       struct lttng_cs_dispatch dispatch[RING_BUFFER_MAX_NESTING];
+};
+
+struct field_data {
+       struct lttng_cs __percpu *cs_percpu;
+       enum lttng_cs_ctx_modes mode;
+};
+
+struct lttng_cs_type {
+       const char *name;
+       const char *length_name;
+       const char *save_func_name;
+       void (*save_func)(struct stack_trace *trace);
+};
+
+static struct lttng_cs_type cs_types[] = {
+       {
+               .name           = "callstack_kernel",
+               .length_name    = "_callstack_kernel_length",
+               .save_func_name = "save_stack_trace",
+               .save_func      = NULL,
+       },
+       {
+               .name           = "callstack_user",
+               .length_name    = "_callstack_user_length",
+               .save_func_name = "save_stack_trace_user",
+               .save_func      = NULL,
+       },
+};
+
+static
+const char *lttng_cs_ctx_mode_name(enum lttng_cs_ctx_modes mode)
+{
+       return cs_types[mode].name;
+}
+
+static
+const char *lttng_cs_ctx_mode_length_name(enum lttng_cs_ctx_modes mode)
+{
+       return cs_types[mode].length_name;
+}
+
+static
+int init_type(enum lttng_cs_ctx_modes mode)
+{
+       unsigned long func;
+
+       if (cs_types[mode].save_func)
+               return 0;
+       func = kallsyms_lookup_funcptr(cs_types[mode].save_func_name);
+       if (!func) {
+               printk(KERN_WARNING "LTTng: symbol lookup failed: %s\n",
+                               cs_types[mode].save_func_name);
+               return -EINVAL;
+       }
+       cs_types[mode].save_func = (void *) func;
+       return 0;
+}
+
+static
+void lttng_cs_set_init(struct lttng_cs __percpu *cs_set)
+{
+       int cpu, i;
+
+       for_each_possible_cpu(cpu) {
+               struct lttng_cs *cs;
+
+               cs = per_cpu_ptr(cs_set, cpu);
+               for (i = 0; i < RING_BUFFER_MAX_NESTING; i++) {
+                       struct lttng_cs_dispatch *dispatch;
+
+                       dispatch = &cs->dispatch[i];
+                       dispatch->stack_trace.entries = dispatch->entries;
+                       dispatch->stack_trace.max_entries = MAX_ENTRIES;
+               }
+       }
+}
+
+/* Keep track of nesting inside userspace callstack context code */
+DEFINE_PER_CPU(int, callstack_user_nesting);
+
+static
+struct stack_trace *stack_trace_context(struct lttng_ctx_field *field,
+                                       struct lib_ring_buffer_ctx *ctx)
+{
+       int buffer_nesting, cs_user_nesting;
+       struct lttng_cs *cs;
+       struct field_data *fdata = field->priv;
+
+       /*
+        * Do not gather the userspace callstack context when the event was
+        * triggered by the userspace callstack context saving mechanism.
+        */
+       cs_user_nesting = per_cpu(callstack_user_nesting, ctx->cpu);
+
+       if (fdata->mode == CALLSTACK_USER && cs_user_nesting >= 1)
+               return NULL;
+
+       /*
+        * get_cpu() is not required, preemption is already
+        * disabled while event is written.
+        *
+        * max nesting is checked in lib_ring_buffer_get_cpu().
+        * Check it again as a safety net.
+        */
+       cs = per_cpu_ptr(fdata->cs_percpu, ctx->cpu);
+       buffer_nesting = per_cpu(lib_ring_buffer_nesting, ctx->cpu) - 1;
+       if (buffer_nesting >= RING_BUFFER_MAX_NESTING)
+               return NULL;
+
+       return &cs->dispatch[buffer_nesting].stack_trace;
+}
+
+static
+size_t lttng_callstack_length_get_size(size_t offset, struct lttng_ctx_field *field,
+                               struct lib_ring_buffer_ctx *ctx,
+                               struct lttng_channel *chan)
+{
+       size_t orig_offset = offset;
+
+       offset += lib_ring_buffer_align(offset, lttng_alignof(unsigned int));
+       offset += sizeof(unsigned int);
+       return offset - orig_offset;
+}
+
+/*
+ * In order to reserve the correct size, the callstack is computed. The
+ * resulting callstack is saved to be accessed in the record step.
+ */
+static
+size_t lttng_callstack_sequence_get_size(size_t offset, struct lttng_ctx_field *field,
+                                       struct lib_ring_buffer_ctx *ctx,
+                                       struct lttng_channel *chan)
+{
+       struct stack_trace *trace;
+       struct field_data *fdata = field->priv;
+       size_t orig_offset = offset;
+
+       /* do not write data if no space is available */
+       trace = stack_trace_context(field, ctx);
+       if (unlikely(!trace)) {
+               offset += lib_ring_buffer_align(offset, lttng_alignof(unsigned long));
+               return offset - orig_offset;
+       }
+
+       /* reset stack trace, no need to clear memory */
+       trace->nr_entries = 0;
+
+       if (fdata->mode == CALLSTACK_USER)
+               ++per_cpu(callstack_user_nesting, ctx->cpu);
+
+       /* do the real work and reserve space */
+       cs_types[fdata->mode].save_func(trace);
+
+       if (fdata->mode == CALLSTACK_USER)
+               per_cpu(callstack_user_nesting, ctx->cpu)--;
+
+       /*
+        * Remove final ULONG_MAX delimiter. If we cannot find it, add
+        * our own marker to show that the stack is incomplete. This is
+        * more compact for a trace.
+        */
+       if (trace->nr_entries > 0
+                       && trace->entries[trace->nr_entries - 1] == ULONG_MAX) {
+               trace->nr_entries--;
+       }
+       offset += lib_ring_buffer_align(offset, lttng_alignof(unsigned long));
+       offset += sizeof(unsigned long) * trace->nr_entries;
+       /* Add our own ULONG_MAX delimiter to show incomplete stack. */
+       if (trace->nr_entries == trace->max_entries)
+               offset += sizeof(unsigned long);
+       return offset - orig_offset;
+}
+
+static
+void lttng_callstack_length_record(struct lttng_ctx_field *field,
+                       struct lib_ring_buffer_ctx *ctx,
+                       struct lttng_channel *chan)
+{
+       struct stack_trace *trace = stack_trace_context(field, ctx);
+       unsigned int nr_seq_entries;
+
+       lib_ring_buffer_align_ctx(ctx, lttng_alignof(unsigned int));
+       if (unlikely(!trace)) {
+               nr_seq_entries = 0;
+       } else {
+               nr_seq_entries = trace->nr_entries;
+               if (trace->nr_entries == trace->max_entries)
+                       nr_seq_entries++;
+       }
+       chan->ops->event_write(ctx, &nr_seq_entries, sizeof(unsigned int));
+}
+static
+void lttng_callstack_sequence_record(struct lttng_ctx_field *field,
+                       struct lib_ring_buffer_ctx *ctx,
+                       struct lttng_channel *chan)
+{
+       struct stack_trace *trace = stack_trace_context(field, ctx);
+       unsigned int nr_seq_entries;
+
+       lib_ring_buffer_align_ctx(ctx, lttng_alignof(unsigned long));
+       if (unlikely(!trace)) {
+               return;
+       }
+       nr_seq_entries = trace->nr_entries;
+       if (trace->nr_entries == trace->max_entries)
+               nr_seq_entries++;
+       chan->ops->event_write(ctx, trace->entries,
+                       sizeof(unsigned long) * trace->nr_entries);
+       /* Add our own ULONG_MAX delimiter to show incomplete stack. */
+       if (trace->nr_entries == trace->max_entries) {
+               unsigned long delim = ULONG_MAX;
+
+               chan->ops->event_write(ctx, &delim, sizeof(unsigned long));
+       }
+}
diff --git a/src/lttng-context-callstack-stackwalk-impl.h b/src/lttng-context-callstack-stackwalk-impl.h
new file mode 100644 (file)
index 0000000..42f4273
--- /dev/null
@@ -0,0 +1,264 @@
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * lttng-context-callstack-stackwalk-impl.h
+ *
+ * LTTng callstack event context, stackwalk implementation. Targets
+ * kernels and architectures using the stacktrace common infrastructure
+ * introduced in the upstream Linux kernel by commit 214d8ca6ee
+ * "stacktrace: Provide common infrastructure" (merged in Linux 5.2,
+ * then gradually introduced within architectures).
+ *
+ * Copyright (C) 2014-2019 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ * Copyright (C) 2014 Francis Giraldeau <francis.giraldeau@gmail.com>
+ */
+
+#define MAX_ENTRIES 128
+
+enum lttng_cs_ctx_modes {
+       CALLSTACK_KERNEL = 0,
+       CALLSTACK_USER = 1,
+       NR_CALLSTACK_MODES,
+};
+
+struct lttng_stack_trace {
+       unsigned long entries[MAX_ENTRIES];
+       unsigned int nr_entries;
+};
+
+struct lttng_cs {
+       struct lttng_stack_trace stack_trace[RING_BUFFER_MAX_NESTING];
+};
+
+struct field_data {
+       struct lttng_cs __percpu *cs_percpu;
+       enum lttng_cs_ctx_modes mode;
+};
+
+static
+unsigned int (*save_func_kernel)(unsigned long *store, unsigned int size,
+                               unsigned int skipnr);
+static
+unsigned int (*save_func_user)(unsigned long *store, unsigned int size);
+
+static
+const char *lttng_cs_ctx_mode_name(enum lttng_cs_ctx_modes mode)
+{
+       switch (mode) {
+       case CALLSTACK_KERNEL:
+               return "callstack_kernel";
+       case CALLSTACK_USER:
+               return "callstack_user";
+       default:
+               return NULL;
+       }
+}
+
+static
+const char *lttng_cs_ctx_mode_length_name(enum lttng_cs_ctx_modes mode)
+{
+       switch (mode) {
+       case CALLSTACK_KERNEL:
+               return "_callstack_kernel_length";
+       case CALLSTACK_USER:
+               return "_callstack_user_length";
+       default:
+               return NULL;
+       }
+}
+
+static
+int init_type_callstack_kernel(void)
+{
+       unsigned long func;
+       const char *func_name = "stack_trace_save";
+
+       if (save_func_kernel)
+               return 0;
+       func = kallsyms_lookup_funcptr(func_name);
+       if (!func) {
+               printk(KERN_WARNING "LTTng: symbol lookup failed: %s\n",
+                               func_name);
+               return -EINVAL;
+       }
+       save_func_kernel = (void *) func;
+       return 0;
+}
+
+static
+int init_type_callstack_user(void)
+{
+       unsigned long func;
+       const char *func_name = "stack_trace_save_user";
+
+       if (save_func_user)
+               return 0;
+       func = kallsyms_lookup_funcptr(func_name);
+       if (!func) {
+               printk(KERN_WARNING "LTTng: symbol lookup failed: %s\n",
+                               func_name);
+               return -EINVAL;
+       }
+       save_func_user = (void *) func;
+       return 0;
+}
+
+static
+int init_type(enum lttng_cs_ctx_modes mode)
+{
+       switch (mode) {
+       case CALLSTACK_KERNEL:
+               return init_type_callstack_kernel();
+       case CALLSTACK_USER:
+               return init_type_callstack_user();
+       default:
+               return -EINVAL;
+       }
+}
+
+static
+void lttng_cs_set_init(struct lttng_cs __percpu *cs_set)
+{
+}
+
+/* Keep track of nesting inside userspace callstack context code */
+DEFINE_PER_CPU(int, callstack_user_nesting);
+
+static
+struct lttng_stack_trace *stack_trace_context(struct lttng_ctx_field *field,
+                                       struct lib_ring_buffer_ctx *ctx)
+{
+       int buffer_nesting, cs_user_nesting;
+       struct lttng_cs *cs;
+       struct field_data *fdata = field->priv;
+
+       /*
+        * Do not gather the userspace callstack context when the event was
+        * triggered by the userspace callstack context saving mechanism.
+        */
+       cs_user_nesting = per_cpu(callstack_user_nesting, ctx->cpu);
+
+       if (fdata->mode == CALLSTACK_USER && cs_user_nesting >= 1)
+               return NULL;
+
+       /*
+        * get_cpu() is not required, preemption is already
+        * disabled while event is written.
+        *
+        * max nesting is checked in lib_ring_buffer_get_cpu().
+        * Check it again as a safety net.
+        */
+       cs = per_cpu_ptr(fdata->cs_percpu, ctx->cpu);
+       buffer_nesting = per_cpu(lib_ring_buffer_nesting, ctx->cpu) - 1;
+       if (buffer_nesting >= RING_BUFFER_MAX_NESTING)
+               return NULL;
+
+       return &cs->stack_trace[buffer_nesting];
+}
+
+static
+size_t lttng_callstack_length_get_size(size_t offset, struct lttng_ctx_field *field,
+                               struct lib_ring_buffer_ctx *ctx,
+                               struct lttng_channel *chan)
+{
+       size_t orig_offset = offset;
+
+       offset += lib_ring_buffer_align(offset, lttng_alignof(unsigned int));
+       offset += sizeof(unsigned int);
+       return offset - orig_offset;
+}
+
+/*
+ * In order to reserve the correct size, the callstack is computed. The
+ * resulting callstack is saved to be accessed in the record step.
+ */
+static
+size_t lttng_callstack_sequence_get_size(size_t offset, struct lttng_ctx_field *field,
+                                       struct lib_ring_buffer_ctx *ctx,
+                                       struct lttng_channel *chan)
+{
+       struct lttng_stack_trace *trace;
+       struct field_data *fdata = field->priv;
+       size_t orig_offset = offset;
+
+       /* do not write data if no space is available */
+       trace = stack_trace_context(field, ctx);
+       if (unlikely(!trace)) {
+               offset += lib_ring_buffer_align(offset, lttng_alignof(unsigned long));
+               return offset - orig_offset;
+       }
+
+       /* reset stack trace, no need to clear memory */
+       trace->nr_entries = 0;
+
+       switch (fdata->mode) {
+       case CALLSTACK_KERNEL:
+               /* do the real work and reserve space */
+               trace->nr_entries = save_func_kernel(trace->entries,
+                                               MAX_ENTRIES, 0);
+               break;
+       case CALLSTACK_USER:
+               ++per_cpu(callstack_user_nesting, ctx->cpu);
+               /* do the real work and reserve space */
+               trace->nr_entries = save_func_user(trace->entries,
+                                               MAX_ENTRIES);
+               per_cpu(callstack_user_nesting, ctx->cpu)--;
+               break;
+       default:
+               WARN_ON_ONCE(1);
+       }
+
+       /*
+        * If the array is filled, add our own marker to show that the
+        * stack is incomplete.
+        */
+       offset += lib_ring_buffer_align(offset, lttng_alignof(unsigned long));
+       offset += sizeof(unsigned long) * trace->nr_entries;
+       /* Add our own ULONG_MAX delimiter to show incomplete stack. */
+       if (trace->nr_entries == MAX_ENTRIES)
+               offset += sizeof(unsigned long);
+       return offset - orig_offset;
+}
+
+static
+void lttng_callstack_length_record(struct lttng_ctx_field *field,
+                       struct lib_ring_buffer_ctx *ctx,
+                       struct lttng_channel *chan)
+{
+       struct lttng_stack_trace *trace = stack_trace_context(field, ctx);
+       unsigned int nr_seq_entries;
+
+       lib_ring_buffer_align_ctx(ctx, lttng_alignof(unsigned int));
+       if (unlikely(!trace)) {
+               nr_seq_entries = 0;
+       } else {
+               nr_seq_entries = trace->nr_entries;
+               if (trace->nr_entries == MAX_ENTRIES)
+                       nr_seq_entries++;
+       }
+       chan->ops->event_write(ctx, &nr_seq_entries, sizeof(unsigned int));
+}
+
+static
+void lttng_callstack_sequence_record(struct lttng_ctx_field *field,
+                       struct lib_ring_buffer_ctx *ctx,
+                       struct lttng_channel *chan)
+{
+       struct lttng_stack_trace *trace = stack_trace_context(field, ctx);
+       unsigned int nr_seq_entries;
+
+       lib_ring_buffer_align_ctx(ctx, lttng_alignof(unsigned long));
+       if (unlikely(!trace)) {
+               return;
+       }
+       nr_seq_entries = trace->nr_entries;
+       if (trace->nr_entries == MAX_ENTRIES)
+               nr_seq_entries++;
+       chan->ops->event_write(ctx, trace->entries,
+                       sizeof(unsigned long) * trace->nr_entries);
+       /* Add our own ULONG_MAX delimiter to show incomplete stack. */
+       if (trace->nr_entries == MAX_ENTRIES) {
+               unsigned long delim = ULONG_MAX;
+
+               chan->ops->event_write(ctx, &delim, sizeof(unsigned long));
+       }
+}
diff --git a/src/lttng-context-callstack.c b/src/lttng-context-callstack.c
new file mode 100644 (file)
index 0000000..7b9e651
--- /dev/null
@@ -0,0 +1,193 @@
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * lttng-context-callstack.c
+ *
+ * LTTng callstack event context.
+ *
+ * Copyright (C) 2014 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ * Copyright (C) 2014 Francis Giraldeau <francis.giraldeau@gmail.com>
+ *
+ * The callstack context can be added to any kernel event. It records
+ * either the kernel or the userspace callstack, up to a max depth. The
+ * context is a CTF sequence, such that it uses only the space required
+ * for the number of callstack entries.
+ *
+ * It allocates callstack buffers per-CPU up to 4 interrupt nesting.
+ * This nesting limit is the same as defined in the ring buffer. It
+ * therefore uses a fixed amount of memory, proportional to the number
+ * of CPUs:
+ *
+ *   size = cpus * nest * depth * sizeof(unsigned long)
+ *
+ * Which is 4096 bytes per CPU on 64-bit host and a depth of 128.
+ * The allocation is done at the initialization to avoid memory
+ * allocation overhead while tracing, using a shallow stack.
+ *
+ * The kernel callstack is recovered using save_stack_trace(), and the
+ * userspace callstack uses save_stack_trace_user(). They rely on frame
+ * pointers. These are usually available for the kernel, but the
+ * compiler option -fomit-frame-pointer frequently used in popular Linux
+ * distributions may cause the userspace callstack to be unreliable, and
+ * is a known limitation of this approach. If frame pointers are not
+ * available, it produces no error, but the callstack will be empty. We
+ * still provide the feature, because it works well for runtime
+ * environments having frame pointers. In the future, unwind support
+ * and/or last branch record may provide a solution to this problem.
+ *
+ * The symbol name resolution is left to the trace reader.
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/utsname.h>
+#include <linux/stacktrace.h>
+#include <linux/spinlock.h>
+#include <ringbuffer/backend.h>
+#include <ringbuffer/frontend.h>
+#include <lttng/events.h>
+#include <lttng/tracer.h>
+#include <lttng/endian.h>
+#include "wrapper/vmalloc.h"
+
+#ifdef CONFIG_ARCH_STACKWALK
+#include "lttng-context-callstack-stackwalk-impl.h"
+#else
+#include "lttng-context-callstack-legacy-impl.h"
+#endif
+
+static
+void field_data_free(struct field_data *fdata)
+{
+       if (!fdata)
+               return;
+       free_percpu(fdata->cs_percpu);
+       kfree(fdata);
+}
+
+static
+struct field_data __percpu *field_data_create(enum lttng_cs_ctx_modes mode)
+{
+       struct lttng_cs __percpu *cs_set;
+       struct field_data *fdata;
+
+       fdata = kzalloc(sizeof(*fdata), GFP_KERNEL);
+       if (!fdata)
+               return NULL;
+       cs_set = alloc_percpu(struct lttng_cs);
+       if (!cs_set)
+               goto error_alloc;
+       lttng_cs_set_init(cs_set);
+       fdata->cs_percpu = cs_set;
+       fdata->mode = mode;
+       return fdata;
+
+error_alloc:
+       field_data_free(fdata);
+       return NULL;
+}
+
+static
+void lttng_callstack_sequence_destroy(struct lttng_ctx_field *field)
+{
+       struct field_data *fdata = field->priv;
+
+       field_data_free(fdata);
+}
+
+static const struct lttng_type sequence_elem_type =
+       __type_integer(unsigned long, 0, 0, -1, __BYTE_ORDER, 16, none);
+
+static
+int __lttng_add_callstack_generic(struct lttng_ctx **ctx,
+               enum lttng_cs_ctx_modes mode)
+{
+       const char *ctx_name = lttng_cs_ctx_mode_name(mode);
+       const char *ctx_length_name = lttng_cs_ctx_mode_length_name(mode);
+       struct lttng_ctx_field *length_field, *sequence_field;
+       struct lttng_event_field *field;
+       struct field_data *fdata;
+       int ret;
+
+       ret = init_type(mode);
+       if (ret)
+               return ret;
+       length_field = lttng_append_context(ctx);
+       if (!length_field)
+               return -ENOMEM;
+       sequence_field = lttng_append_context(ctx);
+       if (!sequence_field) {
+               lttng_remove_context_field(ctx, length_field);
+               return -ENOMEM;
+       }
+       if (lttng_find_context(*ctx, ctx_name)) {
+               ret = -EEXIST;
+               goto error_find;
+       }
+       fdata = field_data_create(mode);
+       if (!fdata) {
+               ret = -ENOMEM;
+               goto error_create;
+       }
+
+       field = &length_field->event_field;
+       field->name = ctx_length_name;
+       field->type.atype = atype_integer;
+       field->type.u.integer.size = sizeof(unsigned int) * CHAR_BIT;
+       field->type.u.integer.alignment = lttng_alignof(unsigned int) * CHAR_BIT;
+       field->type.u.integer.signedness = lttng_is_signed_type(unsigned int);
+       field->type.u.integer.reverse_byte_order = 0;
+       field->type.u.integer.base = 10;
+       field->type.u.integer.encoding = lttng_encode_none;
+       length_field->get_size_arg = lttng_callstack_length_get_size;
+       length_field->record = lttng_callstack_length_record;
+
+       field = &sequence_field->event_field;
+       field->name = ctx_name;
+       field->type.atype = atype_sequence_nestable;
+       field->type.u.sequence_nestable.elem_type = &sequence_elem_type;
+       field->type.u.sequence_nestable.alignment = 0;
+       sequence_field->get_size_arg = lttng_callstack_sequence_get_size;
+       sequence_field->record = lttng_callstack_sequence_record;
+       sequence_field->priv = fdata;
+       sequence_field->destroy = lttng_callstack_sequence_destroy;
+
+       wrapper_vmalloc_sync_mappings();
+       return 0;
+
+error_create:
+       field_data_free(fdata);
+error_find:
+       lttng_remove_context_field(ctx, sequence_field);
+       lttng_remove_context_field(ctx, length_field);
+       return ret;
+}
+
+/**
+ *     lttng_add_callstack_to_ctx - add callstack event context
+ *
+ *     @ctx: the lttng_ctx pointer to initialize
+ *     @type: the context type
+ *
+ *     Supported callstack type supported:
+ *     LTTNG_KERNEL_CONTEXT_CALLSTACK_KERNEL
+ *             Records the callstack of the kernel
+ *     LTTNG_KERNEL_CONTEXT_CALLSTACK_USER
+ *             Records the callstack of the userspace program (from the kernel)
+ *
+ * Return 0 for success, or error code.
+ */
+int lttng_add_callstack_to_ctx(struct lttng_ctx **ctx, int type)
+{
+       switch (type) {
+       case LTTNG_KERNEL_CONTEXT_CALLSTACK_KERNEL:
+               return __lttng_add_callstack_generic(ctx, CALLSTACK_KERNEL);
+#ifdef CONFIG_X86
+       case LTTNG_KERNEL_CONTEXT_CALLSTACK_USER:
+               return __lttng_add_callstack_generic(ctx, CALLSTACK_USER);
+#endif
+       default:
+               return -EINVAL;
+       }
+}
+EXPORT_SYMBOL_GPL(lttng_add_callstack_to_ctx);
diff --git a/src/lttng-context-cgroup-ns.c b/src/lttng-context-cgroup-ns.c
new file mode 100644 (file)
index 0000000..27f00f6
--- /dev/null
@@ -0,0 +1,105 @@
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * lttng-context-cgroup-ns.c
+ *
+ * LTTng cgroup namespace context.
+ *
+ * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *               2019 Michael Jeanson <mjeanson@efficios.com>
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/cgroup.h>
+#include <lttng/events.h>
+#include <ringbuffer/frontend_types.h>
+#include <wrapper/vmalloc.h>
+#include <wrapper/namespace.h>
+#include <lttng/tracer.h>
+
+#if defined(CONFIG_CGROUPS) && \
+       ((LINUX_VERSION_CODE >= KERNEL_VERSION(4,6,0)) || \
+        LTTNG_UBUNTU_KERNEL_RANGE(4,4,0,0, 4,5,0,0))
+
+static
+size_t cgroup_ns_get_size(size_t offset)
+{
+       size_t size = 0;
+
+       size += lib_ring_buffer_align(offset, lttng_alignof(unsigned int));
+       size += sizeof(unsigned int);
+       return size;
+}
+
+static
+void cgroup_ns_record(struct lttng_ctx_field *field,
+                struct lib_ring_buffer_ctx *ctx,
+                struct lttng_channel *chan)
+{
+       unsigned int cgroup_ns_inum = 0;
+
+       /*
+        * nsproxy can be NULL when scheduled out of exit.
+        *
+        * As documented in 'linux/nsproxy.h' namespaces access rules, no
+        * precautions should be taken when accessing the current task's
+        * namespaces, just dereference the pointers.
+        */
+       if (current->nsproxy)
+               cgroup_ns_inum = current->nsproxy->cgroup_ns->lttng_ns_inum;
+
+       lib_ring_buffer_align_ctx(ctx, lttng_alignof(cgroup_ns_inum));
+       chan->ops->event_write(ctx, &cgroup_ns_inum, sizeof(cgroup_ns_inum));
+}
+
+static
+void cgroup_ns_get_value(struct lttng_ctx_field *field,
+               struct lttng_probe_ctx *lttng_probe_ctx,
+               union lttng_ctx_value *value)
+{
+       unsigned int cgroup_ns_inum = 0;
+
+       /*
+        * nsproxy can be NULL when scheduled out of exit.
+        *
+        * As documented in 'linux/nsproxy.h' namespaces access rules, no
+        * precautions should be taken when accessing the current task's
+        * namespaces, just dereference the pointers.
+        */
+       if (current->nsproxy)
+               cgroup_ns_inum = current->nsproxy->cgroup_ns->lttng_ns_inum;
+
+       value->s64 = cgroup_ns_inum;
+}
+
+int lttng_add_cgroup_ns_to_ctx(struct lttng_ctx **ctx)
+{
+       struct lttng_ctx_field *field;
+
+       field = lttng_append_context(ctx);
+       if (!field)
+               return -ENOMEM;
+       if (lttng_find_context(*ctx, "cgroup_ns")) {
+               lttng_remove_context_field(ctx, field);
+               return -EEXIST;
+       }
+       field->event_field.name = "cgroup_ns";
+       field->event_field.type.atype = atype_integer;
+       field->event_field.type.u.integer.size = sizeof(unsigned int) * CHAR_BIT;
+       field->event_field.type.u.integer.alignment = lttng_alignof(unsigned int) * CHAR_BIT;
+       field->event_field.type.u.integer.signedness = lttng_is_signed_type(unsigned int);
+       field->event_field.type.u.integer.reverse_byte_order = 0;
+       field->event_field.type.u.integer.base = 10;
+       field->event_field.type.u.integer.encoding = lttng_encode_none;
+       field->get_size = cgroup_ns_get_size;
+       field->record = cgroup_ns_record;
+       field->get_value = cgroup_ns_get_value;
+       lttng_context_update(*ctx);
+       wrapper_vmalloc_sync_mappings();
+       return 0;
+}
+EXPORT_SYMBOL_GPL(lttng_add_cgroup_ns_to_ctx);
+
+#endif
diff --git a/src/lttng-context-cpu-id.c b/src/lttng-context-cpu-id.c
new file mode 100644 (file)
index 0000000..498dfcf
--- /dev/null
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * lttng-context-cpu-id.c
+ *
+ * LTTng CPU id context.
+ *
+ * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <lttng/events.h>
+#include <ringbuffer/frontend_types.h>
+#include <wrapper/vmalloc.h>
+#include <lttng/tracer.h>
+
+static
+size_t cpu_id_get_size(size_t offset)
+{
+       size_t size = 0;
+
+       size += lib_ring_buffer_align(offset, lttng_alignof(int));
+       size += sizeof(int);
+       return size;
+}
+
+static
+void cpu_id_record(struct lttng_ctx_field *field,
+               struct lib_ring_buffer_ctx *ctx,
+               struct lttng_channel *chan)
+{
+       int cpu;
+
+       cpu = ctx->cpu;
+       lib_ring_buffer_align_ctx(ctx, lttng_alignof(cpu));
+       chan->ops->event_write(ctx, &cpu, sizeof(cpu));
+}
+
+static
+void cpu_id_get_value(struct lttng_ctx_field *field,
+               struct lttng_probe_ctx *lttng_probe_ctx,
+               union lttng_ctx_value *value)
+{
+       value->s64 = smp_processor_id();
+}
+
+int lttng_add_cpu_id_to_ctx(struct lttng_ctx **ctx)
+{
+       struct lttng_ctx_field *field;
+
+       field = lttng_append_context(ctx);
+       if (!field)
+               return -ENOMEM;
+       if (lttng_find_context(*ctx, "cpu_id")) {
+               lttng_remove_context_field(ctx, field);
+               return -EEXIST;
+       }
+       field->event_field.name = "cpu_id";
+       field->event_field.type.atype = atype_integer;
+       field->event_field.type.u.integer.size = sizeof(int) * CHAR_BIT;
+       field->event_field.type.u.integer.alignment = lttng_alignof(int) * CHAR_BIT;
+       field->event_field.type.u.integer.signedness = lttng_is_signed_type(int);
+       field->event_field.type.u.integer.reverse_byte_order = 0;
+       field->event_field.type.u.integer.base = 10;
+       field->event_field.type.u.integer.encoding = lttng_encode_none;
+       field->get_size = cpu_id_get_size;
+       field->record = cpu_id_record;
+       field->get_value = cpu_id_get_value;
+       lttng_context_update(*ctx);
+       wrapper_vmalloc_sync_mappings();
+       return 0;
+}
+EXPORT_SYMBOL_GPL(lttng_add_cpu_id_to_ctx);
diff --git a/src/lttng-context-egid.c b/src/lttng-context-egid.c
new file mode 100644 (file)
index 0000000..e649fec
--- /dev/null
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * lttng-context-egid.c
+ *
+ * LTTng effective group ID context.
+ *
+ * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *               2019 Michael Jeanson <mjeanson@efficios.com>
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <lttng/events.h>
+#include <lttng/tracer.h>
+#include <ringbuffer/frontend_types.h>
+#include <wrapper/vmalloc.h>
+#include <wrapper/user_namespace.h>
+
+static
+size_t egid_get_size(size_t offset)
+{
+       size_t size = 0;
+
+       size += lib_ring_buffer_align(offset, lttng_alignof(gid_t));
+       size += sizeof(gid_t);
+       return size;
+}
+
+static
+void egid_record(struct lttng_ctx_field *field,
+                struct lib_ring_buffer_ctx *ctx,
+                struct lttng_channel *chan)
+{
+       gid_t egid;
+
+       egid = lttng_current_egid();
+       lib_ring_buffer_align_ctx(ctx, lttng_alignof(egid));
+       chan->ops->event_write(ctx, &egid, sizeof(egid));
+}
+
+static
+void egid_get_value(struct lttng_ctx_field *field,
+               struct lttng_probe_ctx *lttng_probe_ctx,
+               union lttng_ctx_value *value)
+{
+       value->s64 = lttng_current_egid();
+}
+
+int lttng_add_egid_to_ctx(struct lttng_ctx **ctx)
+{
+       struct lttng_ctx_field *field;
+
+       field = lttng_append_context(ctx);
+       if (!field)
+               return -ENOMEM;
+       if (lttng_find_context(*ctx, "egid")) {
+               lttng_remove_context_field(ctx, field);
+               return -EEXIST;
+       }
+       field->event_field.name = "egid";
+       field->event_field.type.atype = atype_integer;
+       field->event_field.type.u.integer.size = sizeof(gid_t) * CHAR_BIT;
+       field->event_field.type.u.integer.alignment = lttng_alignof(gid_t) * CHAR_BIT;
+       field->event_field.type.u.integer.signedness = lttng_is_signed_type(gid_t);
+       field->event_field.type.u.integer.reverse_byte_order = 0;
+       field->event_field.type.u.integer.base = 10;
+       field->event_field.type.u.integer.encoding = lttng_encode_none;
+       field->get_size = egid_get_size;
+       field->record = egid_record;
+       field->get_value = egid_get_value;
+       lttng_context_update(*ctx);
+       wrapper_vmalloc_sync_mappings();
+       return 0;
+}
+EXPORT_SYMBOL_GPL(lttng_add_egid_to_ctx);
diff --git a/src/lttng-context-euid.c b/src/lttng-context-euid.c
new file mode 100644 (file)
index 0000000..79faf3a
--- /dev/null
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * lttng-context-euid.c
+ *
+ * LTTng effective user ID context.
+ *
+ * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *               2019 Michael Jeanson <mjeanson@efficios.com>
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <lttng/events.h>
+#include <lttng/tracer.h>
+#include <ringbuffer/frontend_types.h>
+#include <wrapper/vmalloc.h>
+#include <wrapper/user_namespace.h>
+
+static
+size_t euid_get_size(size_t offset)
+{
+       size_t size = 0;
+
+       size += lib_ring_buffer_align(offset, lttng_alignof(uid_t));
+       size += sizeof(uid_t);
+       return size;
+}
+
+static
+void euid_record(struct lttng_ctx_field *field,
+                struct lib_ring_buffer_ctx *ctx,
+                struct lttng_channel *chan)
+{
+       uid_t euid;
+
+       euid = lttng_current_euid();
+       lib_ring_buffer_align_ctx(ctx, lttng_alignof(euid));
+       chan->ops->event_write(ctx, &euid, sizeof(euid));
+}
+
+static
+void euid_get_value(struct lttng_ctx_field *field,
+               struct lttng_probe_ctx *lttng_probe_ctx,
+               union lttng_ctx_value *value)
+{
+       value->s64 = lttng_current_euid();
+}
+
+int lttng_add_euid_to_ctx(struct lttng_ctx **ctx)
+{
+       struct lttng_ctx_field *field;
+
+       field = lttng_append_context(ctx);
+       if (!field)
+               return -ENOMEM;
+       if (lttng_find_context(*ctx, "euid")) {
+               lttng_remove_context_field(ctx, field);
+               return -EEXIST;
+       }
+       field->event_field.name = "euid";
+       field->event_field.type.atype = atype_integer;
+       field->event_field.type.u.integer.size = sizeof(uid_t) * CHAR_BIT;
+       field->event_field.type.u.integer.alignment = lttng_alignof(uid_t) * CHAR_BIT;
+       field->event_field.type.u.integer.signedness = lttng_is_signed_type(uid_t);
+       field->event_field.type.u.integer.reverse_byte_order = 0;
+       field->event_field.type.u.integer.base = 10;
+       field->event_field.type.u.integer.encoding = lttng_encode_none;
+       field->get_size = euid_get_size;
+       field->record = euid_record;
+       field->get_value = euid_get_value;
+       lttng_context_update(*ctx);
+       wrapper_vmalloc_sync_mappings();
+       return 0;
+}
+EXPORT_SYMBOL_GPL(lttng_add_euid_to_ctx);
diff --git a/src/lttng-context-gid.c b/src/lttng-context-gid.c
new file mode 100644 (file)
index 0000000..5620469
--- /dev/null
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * lttng-context-gid.c
+ *
+ * LTTng real group ID context.
+ *
+ * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *               2019 Michael Jeanson <mjeanson@efficios.com>
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <lttng/events.h>
+#include <lttng/tracer.h>
+#include <ringbuffer/frontend_types.h>
+#include <wrapper/vmalloc.h>
+#include <wrapper/user_namespace.h>
+
+static
+size_t gid_get_size(size_t offset)
+{
+       size_t size = 0;
+
+       size += lib_ring_buffer_align(offset, lttng_alignof(gid_t));
+       size += sizeof(gid_t);
+       return size;
+}
+
+static
+void gid_record(struct lttng_ctx_field *field,
+                struct lib_ring_buffer_ctx *ctx,
+                struct lttng_channel *chan)
+{
+       gid_t gid;
+
+       gid = lttng_current_gid();
+       lib_ring_buffer_align_ctx(ctx, lttng_alignof(gid));
+       chan->ops->event_write(ctx, &gid, sizeof(gid));
+}
+
+static
+void gid_get_value(struct lttng_ctx_field *field,
+               struct lttng_probe_ctx *lttng_probe_ctx,
+               union lttng_ctx_value *value)
+{
+       value->s64 = lttng_current_gid();
+}
+
+int lttng_add_gid_to_ctx(struct lttng_ctx **ctx)
+{
+       struct lttng_ctx_field *field;
+
+       field = lttng_append_context(ctx);
+       if (!field)
+               return -ENOMEM;
+       if (lttng_find_context(*ctx, "gid")) {
+               lttng_remove_context_field(ctx, field);
+               return -EEXIST;
+       }
+       field->event_field.name = "gid";
+       field->event_field.type.atype = atype_integer;
+       field->event_field.type.u.integer.size = sizeof(gid_t) * CHAR_BIT;
+       field->event_field.type.u.integer.alignment = lttng_alignof(gid_t) * CHAR_BIT;
+       field->event_field.type.u.integer.signedness = lttng_is_signed_type(gid_t);
+       field->event_field.type.u.integer.reverse_byte_order = 0;
+       field->event_field.type.u.integer.base = 10;
+       field->event_field.type.u.integer.encoding = lttng_encode_none;
+       field->get_size = gid_get_size;
+       field->record = gid_record;
+       field->get_value = gid_get_value;
+       lttng_context_update(*ctx);
+       wrapper_vmalloc_sync_mappings();
+       return 0;
+}
+EXPORT_SYMBOL_GPL(lttng_add_gid_to_ctx);
diff --git a/src/lttng-context-hostname.c b/src/lttng-context-hostname.c
new file mode 100644 (file)
index 0000000..86c5d02
--- /dev/null
@@ -0,0 +1,106 @@
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * lttng-context-hostname.c
+ *
+ * LTTng hostname context.
+ *
+ * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/utsname.h>
+#include <lttng/events.h>
+#include <ringbuffer/frontend_types.h>
+#include <wrapper/vmalloc.h>
+#include <lttng/tracer.h>
+
+#define LTTNG_HOSTNAME_CTX_LEN (__NEW_UTS_LEN + 1)
+
+static
+size_t hostname_get_size(size_t offset)
+{
+       size_t size = 0;
+
+       size += LTTNG_HOSTNAME_CTX_LEN;
+       return size;
+}
+
+static
+void hostname_record(struct lttng_ctx_field *field,
+                struct lib_ring_buffer_ctx *ctx,
+                struct lttng_channel *chan)
+{
+       struct nsproxy *nsproxy;
+       struct uts_namespace *ns;
+       char *hostname;
+
+       /*
+        * No need to take the RCU read-side lock to read current
+        * nsproxy. (documented in nsproxy.h)
+        */
+       nsproxy = current->nsproxy;
+       if (nsproxy) {
+               ns = nsproxy->uts_ns;
+               hostname = ns->name.nodename;
+               chan->ops->event_write(ctx, hostname,
+                               LTTNG_HOSTNAME_CTX_LEN);
+       } else {
+               chan->ops->event_memset(ctx, 0,
+                               LTTNG_HOSTNAME_CTX_LEN);
+       }
+}
+
+static
+void hostname_get_value(struct lttng_ctx_field *field,
+               struct lttng_probe_ctx *lttng_probe_ctx,
+               union lttng_ctx_value *value)
+{
+       struct nsproxy *nsproxy;
+       struct uts_namespace *ns;
+       char *hostname;
+
+       /*
+        * No need to take the RCU read-side lock to read current
+        * nsproxy. (documented in nsproxy.h)
+        */
+       nsproxy = current->nsproxy;
+       if (nsproxy) {
+               ns = nsproxy->uts_ns;
+               hostname = ns->name.nodename;
+       } else {
+               hostname = "";
+       }
+       value->str = hostname;
+}
+
+static const struct lttng_type hostname_array_elem_type =
+       __type_integer(char, 0, 0, -1, __BYTE_ORDER, 10, UTF8);
+
+int lttng_add_hostname_to_ctx(struct lttng_ctx **ctx)
+{
+       struct lttng_ctx_field *field;
+
+       field = lttng_append_context(ctx);
+       if (!field)
+               return -ENOMEM;
+       if (lttng_find_context(*ctx, "hostname")) {
+               lttng_remove_context_field(ctx, field);
+               return -EEXIST;
+       }
+       field->event_field.name = "hostname";
+       field->event_field.type.atype = atype_array_nestable;
+       field->event_field.type.u.array_nestable.elem_type =
+               &hostname_array_elem_type;
+       field->event_field.type.u.array_nestable.length = LTTNG_HOSTNAME_CTX_LEN;
+       field->event_field.type.u.array_nestable.alignment = 0;
+
+       field->get_size = hostname_get_size;
+       field->record = hostname_record;
+       field->get_value = hostname_get_value;
+       lttng_context_update(*ctx);
+       wrapper_vmalloc_sync_mappings();
+       return 0;
+}
+EXPORT_SYMBOL_GPL(lttng_add_hostname_to_ctx);
diff --git a/src/lttng-context-interruptible.c b/src/lttng-context-interruptible.c
new file mode 100644 (file)
index 0000000..9fbf266
--- /dev/null
@@ -0,0 +1,81 @@
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * lttng-context-interruptible.c
+ *
+ * LTTng interruptible context.
+ *
+ * Copyright (C) 2009-2015 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/irqflags.h>
+#include <lttng/events.h>
+#include <ringbuffer/frontend_types.h>
+#include <wrapper/vmalloc.h>
+#include <lttng/tracer.h>
+
+/*
+ * Interruptible at value -1 means "unknown".
+ */
+
+static
+size_t interruptible_get_size(size_t offset)
+{
+       size_t size = 0;
+
+       size += lib_ring_buffer_align(offset, lttng_alignof(int8_t));
+       size += sizeof(int8_t);
+       return size;
+}
+
+static
+void interruptible_record(struct lttng_ctx_field *field,
+               struct lib_ring_buffer_ctx *ctx,
+               struct lttng_channel *chan)
+{
+       struct lttng_probe_ctx *lttng_probe_ctx = ctx->priv;
+       int8_t interruptible = lttng_probe_ctx->interruptible;
+
+       lib_ring_buffer_align_ctx(ctx, lttng_alignof(interruptible));
+       chan->ops->event_write(ctx, &interruptible, sizeof(interruptible));
+}
+
+static
+void interruptible_get_value(struct lttng_ctx_field *field,
+               struct lttng_probe_ctx *lttng_probe_ctx,
+               union lttng_ctx_value *value)
+{
+       int8_t interruptible = lttng_probe_ctx->interruptible;
+
+       value->s64 = interruptible;
+}
+
+int lttng_add_interruptible_to_ctx(struct lttng_ctx **ctx)
+{
+       struct lttng_ctx_field *field;
+
+       field = lttng_append_context(ctx);
+       if (!field)
+               return -ENOMEM;
+       if (lttng_find_context(*ctx, "interruptible")) {
+               lttng_remove_context_field(ctx, field);
+               return -EEXIST;
+       }
+       field->event_field.name = "interruptible";
+       field->event_field.type.atype = atype_integer;
+       field->event_field.type.u.integer.size = sizeof(int8_t) * CHAR_BIT;
+       field->event_field.type.u.integer.alignment = lttng_alignof(int8_t) * CHAR_BIT;
+       field->event_field.type.u.integer.signedness = lttng_is_signed_type(int8_t);
+       field->event_field.type.u.integer.reverse_byte_order = 0;
+       field->event_field.type.u.integer.base = 10;
+       field->event_field.type.u.integer.encoding = lttng_encode_none;
+       field->get_size = interruptible_get_size;
+       field->record = interruptible_record;
+       field->get_value = interruptible_get_value;
+       lttng_context_update(*ctx);
+       wrapper_vmalloc_sync_mappings();
+       return 0;
+}
+EXPORT_SYMBOL_GPL(lttng_add_interruptible_to_ctx);
diff --git a/src/lttng-context-ipc-ns.c b/src/lttng-context-ipc-ns.c
new file mode 100644 (file)
index 0000000..a112922
--- /dev/null
@@ -0,0 +1,104 @@
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * lttng-context-ipc-ns.c
+ *
+ * LTTng ipc namespace context.
+ *
+ * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *               2019 Michael Jeanson <mjeanson@efficios.com>
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/ipc_namespace.h>
+#include <lttng/events.h>
+#include <ringbuffer/frontend_types.h>
+#include <wrapper/vmalloc.h>
+#include <wrapper/namespace.h>
+#include <lttng/tracer.h>
+
+#if defined(CONFIG_IPC_NS) && \
+       (LINUX_VERSION_CODE >= KERNEL_VERSION(3,8,0))
+
+static
+size_t ipc_ns_get_size(size_t offset)
+{
+       size_t size = 0;
+
+       size += lib_ring_buffer_align(offset, lttng_alignof(unsigned int));
+       size += sizeof(unsigned int);
+       return size;
+}
+
+static
+void ipc_ns_record(struct lttng_ctx_field *field,
+                struct lib_ring_buffer_ctx *ctx,
+                struct lttng_channel *chan)
+{
+       unsigned int ipc_ns_inum = 0;
+
+       /*
+        * nsproxy can be NULL when scheduled out of exit.
+        *
+        * As documented in 'linux/nsproxy.h' namespaces access rules, no
+        * precautions should be taken when accessing the current task's
+        * namespaces, just dereference the pointers.
+        */
+       if (current->nsproxy)
+               ipc_ns_inum = current->nsproxy->ipc_ns->lttng_ns_inum;
+
+       lib_ring_buffer_align_ctx(ctx, lttng_alignof(ipc_ns_inum));
+       chan->ops->event_write(ctx, &ipc_ns_inum, sizeof(ipc_ns_inum));
+}
+
+static
+void ipc_ns_get_value(struct lttng_ctx_field *field,
+               struct lttng_probe_ctx *lttng_probe_ctx,
+               union lttng_ctx_value *value)
+{
+       unsigned int ipc_ns_inum = 0;
+
+       /*
+        * nsproxy can be NULL when scheduled out of exit.
+        *
+        * As documented in 'linux/nsproxy.h' namespaces access rules, no
+        * precautions should be taken when accessing the current task's
+        * namespaces, just dereference the pointers.
+        */
+       if (current->nsproxy)
+               ipc_ns_inum = current->nsproxy->ipc_ns->lttng_ns_inum;
+
+       value->s64 = ipc_ns_inum;
+}
+
+int lttng_add_ipc_ns_to_ctx(struct lttng_ctx **ctx)
+{
+       struct lttng_ctx_field *field;
+
+       field = lttng_append_context(ctx);
+       if (!field)
+               return -ENOMEM;
+       if (lttng_find_context(*ctx, "ipc_ns")) {
+               lttng_remove_context_field(ctx, field);
+               return -EEXIST;
+       }
+       field->event_field.name = "ipc_ns";
+       field->event_field.type.atype = atype_integer;
+       field->event_field.type.u.integer.size = sizeof(unsigned int) * CHAR_BIT;
+       field->event_field.type.u.integer.alignment = lttng_alignof(unsigned int) * CHAR_BIT;
+       field->event_field.type.u.integer.signedness = lttng_is_signed_type(unsigned int);
+       field->event_field.type.u.integer.reverse_byte_order = 0;
+       field->event_field.type.u.integer.base = 10;
+       field->event_field.type.u.integer.encoding = lttng_encode_none;
+       field->get_size = ipc_ns_get_size;
+       field->record = ipc_ns_record;
+       field->get_value = ipc_ns_get_value;
+       lttng_context_update(*ctx);
+       wrapper_vmalloc_sync_mappings();
+       return 0;
+}
+EXPORT_SYMBOL_GPL(lttng_add_ipc_ns_to_ctx);
+
+#endif
diff --git a/src/lttng-context-migratable.c b/src/lttng-context-migratable.c
new file mode 100644 (file)
index 0000000..207e02f
--- /dev/null
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * lttng-context-migratable.c
+ *
+ * LTTng migratable context.
+ *
+ * Copyright (C) 2009-2015 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/irqflags.h>
+#include <lttng/events.h>
+#include <ringbuffer/frontend_types.h>
+#include <wrapper/vmalloc.h>
+#include <lttng/tracer.h>
+
+static
+size_t migratable_get_size(size_t offset)
+{
+       size_t size = 0;
+
+       size += lib_ring_buffer_align(offset, lttng_alignof(uint8_t));
+       size += sizeof(uint8_t);
+       return size;
+}
+
+static
+void migratable_record(struct lttng_ctx_field *field,
+               struct lib_ring_buffer_ctx *ctx,
+               struct lttng_channel *chan)
+{
+       uint8_t migratable = !current->migrate_disable;
+
+       lib_ring_buffer_align_ctx(ctx, lttng_alignof(migratable));
+       chan->ops->event_write(ctx, &migratable, sizeof(migratable));
+}
+
+static
+void migratable_get_value(struct lttng_ctx_field *field,
+               struct lttng_probe_ctx *lttng_probe_ctx,
+               union lttng_ctx_value *value)
+{
+       value->s64 = !current->migrate_disable;
+}
+
+int lttng_add_migratable_to_ctx(struct lttng_ctx **ctx)
+{
+       struct lttng_ctx_field *field;
+
+       field = lttng_append_context(ctx);
+       if (!field)
+               return -ENOMEM;
+       if (lttng_find_context(*ctx, "migratable")) {
+               lttng_remove_context_field(ctx, field);
+               return -EEXIST;
+       }
+       field->event_field.name = "migratable";
+       field->event_field.type.atype = atype_integer;
+       field->event_field.type.u.integer.size = sizeof(uint8_t) * CHAR_BIT;
+       field->event_field.type.u.integer.alignment = lttng_alignof(uint8_t) * CHAR_BIT;
+       field->event_field.type.u.integer.signedness = lttng_is_signed_type(uint8_t);
+       field->event_field.type.u.integer.reverse_byte_order = 0;
+       field->event_field.type.u.integer.base = 10;
+       field->event_field.type.u.integer.encoding = lttng_encode_none;
+       field->get_size = migratable_get_size;
+       field->record = migratable_record;
+       field->get_value = migratable_get_value;
+       lttng_context_update(*ctx);
+       wrapper_vmalloc_sync_mappings();
+       return 0;
+}
+EXPORT_SYMBOL_GPL(lttng_add_migratable_to_ctx);
diff --git a/src/lttng-context-mnt-ns.c b/src/lttng-context-mnt-ns.c
new file mode 100644 (file)
index 0000000..7fce5dd
--- /dev/null
@@ -0,0 +1,106 @@
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * lttng-context-mnt-ns.c
+ *
+ * LTTng mount namespace context.
+ *
+ * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *               2019 Michael Jeanson <mjeanson@efficios.com>
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <lttng/events.h>
+#include <linux/nsproxy.h>
+#include <ringbuffer/frontend_types.h>
+#include <wrapper/vmalloc.h>
+#include <wrapper/namespace.h>
+#include <lttng/tracer.h>
+
+#if !defined(LTTNG_MNT_NS_MISSING_HEADER) && \
+       (LINUX_VERSION_CODE >= KERNEL_VERSION(3,8,0))
+
+#include <../fs/mount.h>
+
+static
+size_t mnt_ns_get_size(size_t offset)
+{
+       size_t size = 0;
+
+       size += lib_ring_buffer_align(offset, lttng_alignof(unsigned int));
+       size += sizeof(unsigned int);
+       return size;
+}
+
+static
+void mnt_ns_record(struct lttng_ctx_field *field,
+                struct lib_ring_buffer_ctx *ctx,
+                struct lttng_channel *chan)
+{
+       unsigned int mnt_ns_inum = 0;
+
+       /*
+        * nsproxy can be NULL when scheduled out of exit.
+        *
+        * As documented in 'linux/nsproxy.h' namespaces access rules, no
+        * precautions should be taken when accessing the current task's
+        * namespaces, just dereference the pointers.
+        */
+       if (current->nsproxy)
+               mnt_ns_inum = current->nsproxy->mnt_ns->lttng_ns_inum;
+
+       lib_ring_buffer_align_ctx(ctx, lttng_alignof(mnt_ns_inum));
+       chan->ops->event_write(ctx, &mnt_ns_inum, sizeof(mnt_ns_inum));
+}
+
+static
+void mnt_ns_get_value(struct lttng_ctx_field *field,
+               struct lttng_probe_ctx *lttng_probe_ctx,
+               union lttng_ctx_value *value)
+{
+       unsigned int mnt_ns_inum = 0;
+
+       /*
+        * nsproxy can be NULL when scheduled out of exit.
+        *
+        * As documented in 'linux/nsproxy.h' namespaces access rules, no
+        * precautions should be taken when accessing the current task's
+        * namespaces, just dereference the pointers.
+        */
+       if (current->nsproxy)
+               mnt_ns_inum = current->nsproxy->mnt_ns->lttng_ns_inum;
+
+       value->s64 = mnt_ns_inum;
+}
+
+int lttng_add_mnt_ns_to_ctx(struct lttng_ctx **ctx)
+{
+       struct lttng_ctx_field *field;
+
+       field = lttng_append_context(ctx);
+       if (!field)
+               return -ENOMEM;
+       if (lttng_find_context(*ctx, "mnt_ns")) {
+               lttng_remove_context_field(ctx, field);
+               return -EEXIST;
+       }
+       field->event_field.name = "mnt_ns";
+       field->event_field.type.atype = atype_integer;
+       field->event_field.type.u.integer.size = sizeof(unsigned int) * CHAR_BIT;
+       field->event_field.type.u.integer.alignment = lttng_alignof(unsigned int) * CHAR_BIT;
+       field->event_field.type.u.integer.signedness = lttng_is_signed_type(unsigned int);
+       field->event_field.type.u.integer.reverse_byte_order = 0;
+       field->event_field.type.u.integer.base = 10;
+       field->event_field.type.u.integer.encoding = lttng_encode_none;
+       field->get_size = mnt_ns_get_size;
+       field->record = mnt_ns_record;
+       field->get_value = mnt_ns_get_value;
+       lttng_context_update(*ctx);
+       wrapper_vmalloc_sync_mappings();
+       return 0;
+}
+EXPORT_SYMBOL_GPL(lttng_add_mnt_ns_to_ctx);
+
+#endif
diff --git a/src/lttng-context-need-reschedule.c b/src/lttng-context-need-reschedule.c
new file mode 100644 (file)
index 0000000..7f8deec
--- /dev/null
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * lttng-context-need-reschedule.c
+ *
+ * LTTng need_reschedule context.
+ *
+ * Copyright (C) 2009-2015 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/irqflags.h>
+#include <lttng/events.h>
+#include <ringbuffer/frontend_types.h>
+#include <wrapper/vmalloc.h>
+#include <lttng/tracer.h>
+
+static
+size_t need_reschedule_get_size(size_t offset)
+{
+       size_t size = 0;
+
+       size += lib_ring_buffer_align(offset, lttng_alignof(uint8_t));
+       size += sizeof(uint8_t);
+       return size;
+}
+
+static
+void need_reschedule_record(struct lttng_ctx_field *field,
+               struct lib_ring_buffer_ctx *ctx,
+               struct lttng_channel *chan)
+{
+       uint8_t need_reschedule = test_tsk_need_resched(current);
+
+       lib_ring_buffer_align_ctx(ctx, lttng_alignof(need_reschedule));
+       chan->ops->event_write(ctx, &need_reschedule, sizeof(need_reschedule));
+}
+
+static
+void need_reschedule_get_value(struct lttng_ctx_field *field,
+               struct lttng_probe_ctx *lttng_probe_ctx,
+               union lttng_ctx_value *value)
+{
+       value->s64 = test_tsk_need_resched(current);;
+}
+
+int lttng_add_need_reschedule_to_ctx(struct lttng_ctx **ctx)
+{
+       struct lttng_ctx_field *field;
+
+       field = lttng_append_context(ctx);
+       if (!field)
+               return -ENOMEM;
+       if (lttng_find_context(*ctx, "need_reschedule")) {
+               lttng_remove_context_field(ctx, field);
+               return -EEXIST;
+       }
+       field->event_field.name = "need_reschedule";
+       field->event_field.type.atype = atype_integer;
+       field->event_field.type.u.integer.size = sizeof(uint8_t) * CHAR_BIT;
+       field->event_field.type.u.integer.alignment = lttng_alignof(uint8_t) * CHAR_BIT;
+       field->event_field.type.u.integer.signedness = lttng_is_signed_type(uint8_t);
+       field->event_field.type.u.integer.reverse_byte_order = 0;
+       field->event_field.type.u.integer.base = 10;
+       field->event_field.type.u.integer.encoding = lttng_encode_none;
+       field->get_size = need_reschedule_get_size;
+       field->record = need_reschedule_record;
+       field->get_value = need_reschedule_get_value;
+       lttng_context_update(*ctx);
+       wrapper_vmalloc_sync_mappings();
+       return 0;
+}
+EXPORT_SYMBOL_GPL(lttng_add_need_reschedule_to_ctx);
diff --git a/src/lttng-context-net-ns.c b/src/lttng-context-net-ns.c
new file mode 100644 (file)
index 0000000..879a61b
--- /dev/null
@@ -0,0 +1,105 @@
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * lttng-context-net-ns.c
+ *
+ * LTTng net namespace context.
+ *
+ * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *               2019 Michael Jeanson <mjeanson@efficios.com>
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/nsproxy.h>
+#include <net/net_namespace.h>
+#include <lttng/events.h>
+#include <ringbuffer/frontend_types.h>
+#include <wrapper/vmalloc.h>
+#include <wrapper/namespace.h>
+#include <lttng/tracer.h>
+
+#if defined(CONFIG_NET_NS) && \
+       (LINUX_VERSION_CODE >= KERNEL_VERSION(3,8,0))
+
+static
+size_t net_ns_get_size(size_t offset)
+{
+       size_t size = 0;
+
+       size += lib_ring_buffer_align(offset, lttng_alignof(unsigned int));
+       size += sizeof(unsigned int);
+       return size;
+}
+
+static
+void net_ns_record(struct lttng_ctx_field *field,
+                struct lib_ring_buffer_ctx *ctx,
+                struct lttng_channel *chan)
+{
+       unsigned int net_ns_inum = 0;
+
+       /*
+        * nsproxy can be NULL when scheduled out of exit.
+        *
+        * As documented in 'linux/nsproxy.h' namespaces access rules, no
+        * precautions should be taken when accessing the current task's
+        * namespaces, just dereference the pointers.
+        */
+       if (current->nsproxy)
+               net_ns_inum = current->nsproxy->net_ns->lttng_ns_inum;
+
+       lib_ring_buffer_align_ctx(ctx, lttng_alignof(net_ns_inum));
+       chan->ops->event_write(ctx, &net_ns_inum, sizeof(net_ns_inum));
+}
+
+static
+void net_ns_get_value(struct lttng_ctx_field *field,
+               struct lttng_probe_ctx *lttng_probe_ctx,
+               union lttng_ctx_value *value)
+{
+       unsigned int net_ns_inum = 0;
+
+       /*
+        * nsproxy can be NULL when scheduled out of exit.
+        *
+        * As documented in 'linux/nsproxy.h' namespaces access rules, no
+        * precautions should be taken when accessing the current task's
+        * namespaces, just dereference the pointers.
+        */
+       if (current->nsproxy)
+               net_ns_inum = current->nsproxy->net_ns->lttng_ns_inum;
+
+       value->s64 = net_ns_inum;
+}
+
+int lttng_add_net_ns_to_ctx(struct lttng_ctx **ctx)
+{
+       struct lttng_ctx_field *field;
+
+       field = lttng_append_context(ctx);
+       if (!field)
+               return -ENOMEM;
+       if (lttng_find_context(*ctx, "net_ns")) {
+               lttng_remove_context_field(ctx, field);
+               return -EEXIST;
+       }
+       field->event_field.name = "net_ns";
+       field->event_field.type.atype = atype_integer;
+       field->event_field.type.u.integer.size = sizeof(unsigned int) * CHAR_BIT;
+       field->event_field.type.u.integer.alignment = lttng_alignof(unsigned int) * CHAR_BIT;
+       field->event_field.type.u.integer.signedness = lttng_is_signed_type(unsigned int);
+       field->event_field.type.u.integer.reverse_byte_order = 0;
+       field->event_field.type.u.integer.base = 10;
+       field->event_field.type.u.integer.encoding = lttng_encode_none;
+       field->get_size = net_ns_get_size;
+       field->record = net_ns_record;
+       field->get_value = net_ns_get_value;
+       lttng_context_update(*ctx);
+       wrapper_vmalloc_sync_mappings();
+       return 0;
+}
+EXPORT_SYMBOL_GPL(lttng_add_net_ns_to_ctx);
+
+#endif
diff --git a/src/lttng-context-nice.c b/src/lttng-context-nice.c
new file mode 100644 (file)
index 0000000..aaa3643
--- /dev/null
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * lttng-context-nice.c
+ *
+ * LTTng nice context.
+ *
+ * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <lttng/events.h>
+#include <ringbuffer/frontend_types.h>
+#include <wrapper/vmalloc.h>
+#include <lttng/tracer.h>
+
+static
+size_t nice_get_size(size_t offset)
+{
+       size_t size = 0;
+
+       size += lib_ring_buffer_align(offset, lttng_alignof(int));
+       size += sizeof(int);
+       return size;
+}
+
+static
+void nice_record(struct lttng_ctx_field *field,
+               struct lib_ring_buffer_ctx *ctx,
+               struct lttng_channel *chan)
+{
+       int nice;
+
+       nice = task_nice(current);
+       lib_ring_buffer_align_ctx(ctx, lttng_alignof(nice));
+       chan->ops->event_write(ctx, &nice, sizeof(nice));
+}
+
+static
+void nice_get_value(struct lttng_ctx_field *field,
+               struct lttng_probe_ctx *lttng_probe_ctx,
+               union lttng_ctx_value *value)
+{
+       value->s64 = task_nice(current);
+}
+
+int lttng_add_nice_to_ctx(struct lttng_ctx **ctx)
+{
+       struct lttng_ctx_field *field;
+
+       field = lttng_append_context(ctx);
+       if (!field)
+               return -ENOMEM;
+       if (lttng_find_context(*ctx, "nice")) {
+               lttng_remove_context_field(ctx, field);
+               return -EEXIST;
+       }
+       field->event_field.name = "nice";
+       field->event_field.type.atype = atype_integer;
+       field->event_field.type.u.integer.size = sizeof(int) * CHAR_BIT;
+       field->event_field.type.u.integer.alignment = lttng_alignof(int) * CHAR_BIT;
+       field->event_field.type.u.integer.signedness = lttng_is_signed_type(int);
+       field->event_field.type.u.integer.reverse_byte_order = 0;
+       field->event_field.type.u.integer.base = 10;
+       field->event_field.type.u.integer.encoding = lttng_encode_none;
+       field->get_size = nice_get_size;
+       field->record = nice_record;
+       field->get_value = nice_get_value;
+       lttng_context_update(*ctx);
+       wrapper_vmalloc_sync_mappings();
+       return 0;
+}
+EXPORT_SYMBOL_GPL(lttng_add_nice_to_ctx);
diff --git a/src/lttng-context-perf-counters.c b/src/lttng-context-perf-counters.c
new file mode 100644 (file)
index 0000000..5784f75
--- /dev/null
@@ -0,0 +1,364 @@
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * lttng-context-perf-counters.c
+ *
+ * LTTng performance monitoring counters (perf-counters) integration module.
+ *
+ * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/perf_event.h>
+#include <linux/list.h>
+#include <linux/string.h>
+#include <linux/cpu.h>
+#include <lttng/events.h>
+#include <ringbuffer/frontend_types.h>
+#include <wrapper/vmalloc.h>
+#include <wrapper/perf.h>
+#include <lttng/tracer.h>
+
+static
+size_t perf_counter_get_size(size_t offset)
+{
+       size_t size = 0;
+
+       size += lib_ring_buffer_align(offset, lttng_alignof(uint64_t));
+       size += sizeof(uint64_t);
+       return size;
+}
+
+static
+void perf_counter_record(struct lttng_ctx_field *field,
+                        struct lib_ring_buffer_ctx *ctx,
+                        struct lttng_channel *chan)
+{
+       struct perf_event *event;
+       uint64_t value;
+
+       event = field->u.perf_counter->e[ctx->cpu];
+       if (likely(event)) {
+               if (unlikely(event->state == PERF_EVENT_STATE_ERROR)) {
+                       value = 0;
+               } else {
+                       event->pmu->read(event);
+                       value = local64_read(&event->count);
+               }
+       } else {
+               /*
+                * Perf chooses not to be clever and not to support enabling a
+                * perf counter before the cpu is brought up. Therefore, we need
+                * to support having events coming (e.g. scheduler events)
+                * before the counter is setup. Write an arbitrary 0 in this
+                * case.
+                */
+               value = 0;
+       }
+       lib_ring_buffer_align_ctx(ctx, lttng_alignof(value));
+       chan->ops->event_write(ctx, &value, sizeof(value));
+}
+
+#if defined(CONFIG_PERF_EVENTS) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,99))
+static
+void overflow_callback(struct perf_event *event,
+                      struct perf_sample_data *data,
+                      struct pt_regs *regs)
+{
+}
+#else
+static
+void overflow_callback(struct perf_event *event, int nmi,
+                      struct perf_sample_data *data,
+                      struct pt_regs *regs)
+{
+}
+#endif
+
+static
+void lttng_destroy_perf_counter_field(struct lttng_ctx_field *field)
+{
+       struct perf_event **events = field->u.perf_counter->e;
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
+       {
+               int ret;
+
+               ret = cpuhp_state_remove_instance(lttng_hp_online,
+                       &field->u.perf_counter->cpuhp_online.node);
+               WARN_ON(ret);
+               ret = cpuhp_state_remove_instance(lttng_hp_prepare,
+                       &field->u.perf_counter->cpuhp_prepare.node);
+               WARN_ON(ret);
+       }
+#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
+       {
+               int cpu;
+
+               get_online_cpus();
+               for_each_online_cpu(cpu)
+                       perf_event_release_kernel(events[cpu]);
+               put_online_cpus();
+#ifdef CONFIG_HOTPLUG_CPU
+               unregister_cpu_notifier(&field->u.perf_counter->nb);
+#endif
+       }
+#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
+       kfree(field->event_field.name);
+       kfree(field->u.perf_counter->attr);
+       lttng_kvfree(events);
+       kfree(field->u.perf_counter);
+}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
+
+int lttng_cpuhp_perf_counter_online(unsigned int cpu,
+               struct lttng_cpuhp_node *node)
+{
+       struct lttng_perf_counter_field *perf_field =
+               container_of(node, struct lttng_perf_counter_field,
+                               cpuhp_online);
+       struct perf_event **events = perf_field->e;
+       struct perf_event_attr *attr = perf_field->attr;
+       struct perf_event *pevent;
+
+       pevent = wrapper_perf_event_create_kernel_counter(attr,
+                       cpu, NULL, overflow_callback);
+       if (!pevent || IS_ERR(pevent))
+               return -EINVAL;
+       if (pevent->state == PERF_EVENT_STATE_ERROR) {
+               perf_event_release_kernel(pevent);
+               return -EINVAL;
+       }
+       barrier();      /* Create perf counter before setting event */
+       events[cpu] = pevent;
+       return 0;
+}
+
+int lttng_cpuhp_perf_counter_dead(unsigned int cpu,
+               struct lttng_cpuhp_node *node)
+{
+       struct lttng_perf_counter_field *perf_field =
+               container_of(node, struct lttng_perf_counter_field,
+                               cpuhp_prepare);
+       struct perf_event **events = perf_field->e;
+       struct perf_event *pevent;
+
+       pevent = events[cpu];
+       events[cpu] = NULL;
+       barrier();      /* NULLify event before perf counter teardown */
+       perf_event_release_kernel(pevent);
+       return 0;
+}
+
+#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
+
+#ifdef CONFIG_HOTPLUG_CPU
+
+/**
+ *     lttng_perf_counter_hp_callback - CPU hotplug callback
+ *     @nb: notifier block
+ *     @action: hotplug action to take
+ *     @hcpu: CPU number
+ *
+ *     Returns the success/failure of the operation. (%NOTIFY_OK, %NOTIFY_BAD)
+ *
+ * We can setup perf counters when the cpu is online (up prepare seems to be too
+ * soon).
+ */
+static
+int lttng_perf_counter_cpu_hp_callback(struct notifier_block *nb,
+                                                unsigned long action,
+                                                void *hcpu)
+{
+       unsigned int cpu = (unsigned long) hcpu;
+       struct lttng_perf_counter_field *perf_field =
+               container_of(nb, struct lttng_perf_counter_field, nb);
+       struct perf_event **events = perf_field->e;
+       struct perf_event_attr *attr = perf_field->attr;
+       struct perf_event *pevent;
+
+       if (!perf_field->hp_enable)
+               return NOTIFY_OK;
+
+       switch (action) {
+       case CPU_ONLINE:
+       case CPU_ONLINE_FROZEN:
+               pevent = wrapper_perf_event_create_kernel_counter(attr,
+                               cpu, NULL, overflow_callback);
+               if (!pevent || IS_ERR(pevent))
+                       return NOTIFY_BAD;
+               if (pevent->state == PERF_EVENT_STATE_ERROR) {
+                       perf_event_release_kernel(pevent);
+                       return NOTIFY_BAD;
+               }
+               barrier();      /* Create perf counter before setting event */
+               events[cpu] = pevent;
+               break;
+       case CPU_UP_CANCELED:
+       case CPU_UP_CANCELED_FROZEN:
+       case CPU_DEAD:
+       case CPU_DEAD_FROZEN:
+               pevent = events[cpu];
+               events[cpu] = NULL;
+               barrier();      /* NULLify event before perf counter teardown */
+               perf_event_release_kernel(pevent);
+               break;
+       }
+       return NOTIFY_OK;
+}
+
+#endif
+
+#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
+
+int lttng_add_perf_counter_to_ctx(uint32_t type,
+                                 uint64_t config,
+                                 const char *name,
+                                 struct lttng_ctx **ctx)
+{
+       struct lttng_ctx_field *field;
+       struct lttng_perf_counter_field *perf_field;
+       struct perf_event **events;
+       struct perf_event_attr *attr;
+       int ret;
+       char *name_alloc;
+
+       events = lttng_kvzalloc(num_possible_cpus() * sizeof(*events), GFP_KERNEL);
+       if (!events)
+               return -ENOMEM;
+
+       attr = kzalloc(sizeof(struct perf_event_attr), GFP_KERNEL);
+       if (!attr) {
+               ret = -ENOMEM;
+               goto error_attr;
+       }
+
+       attr->type = type;
+       attr->config = config;
+       attr->size = sizeof(struct perf_event_attr);
+       attr->pinned = 1;
+       attr->disabled = 0;
+
+       perf_field = kzalloc(sizeof(struct lttng_perf_counter_field), GFP_KERNEL);
+       if (!perf_field) {
+               ret = -ENOMEM;
+               goto error_alloc_perf_field;
+       }
+       perf_field->e = events;
+       perf_field->attr = attr;
+
+       name_alloc = kstrdup(name, GFP_KERNEL);
+       if (!name_alloc) {
+               ret = -ENOMEM;
+               goto name_alloc_error;
+       }
+
+       field = lttng_append_context(ctx);
+       if (!field) {
+               ret = -ENOMEM;
+               goto append_context_error;
+       }
+       if (lttng_find_context(*ctx, name_alloc)) {
+               ret = -EEXIST;
+               goto find_error;
+       }
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
+
+       perf_field->cpuhp_prepare.component = LTTNG_CONTEXT_PERF_COUNTERS;
+       ret = cpuhp_state_add_instance(lttng_hp_prepare,
+               &perf_field->cpuhp_prepare.node);
+       if (ret)
+               goto cpuhp_prepare_error;
+
+       perf_field->cpuhp_online.component = LTTNG_CONTEXT_PERF_COUNTERS;
+       ret = cpuhp_state_add_instance(lttng_hp_online,
+               &perf_field->cpuhp_online.node);
+       if (ret)
+               goto cpuhp_online_error;
+
+#else  /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
+       {
+               int cpu;
+
+#ifdef CONFIG_HOTPLUG_CPU
+               perf_field->nb.notifier_call =
+                       lttng_perf_counter_cpu_hp_callback;
+               perf_field->nb.priority = 0;
+               register_cpu_notifier(&perf_field->nb);
+#endif
+               get_online_cpus();
+               for_each_online_cpu(cpu) {
+                       events[cpu] = wrapper_perf_event_create_kernel_counter(attr,
+                                               cpu, NULL, overflow_callback);
+                       if (!events[cpu] || IS_ERR(events[cpu])) {
+                               ret = -EINVAL;
+                               goto counter_error;
+                       }
+                       if (events[cpu]->state == PERF_EVENT_STATE_ERROR) {
+                               ret = -EBUSY;
+                               goto counter_busy;
+                       }
+               }
+               put_online_cpus();
+               perf_field->hp_enable = 1;
+       }
+#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
+
+       field->destroy = lttng_destroy_perf_counter_field;
+
+       field->event_field.name = name_alloc;
+       field->event_field.type.atype = atype_integer;
+       field->event_field.type.u.integer.size = sizeof(uint64_t) * CHAR_BIT;
+       field->event_field.type.u.integer.alignment = lttng_alignof(uint64_t) * CHAR_BIT;
+       field->event_field.type.u.integer.signedness = lttng_is_signed_type(uint64_t);
+       field->event_field.type.u.integer.reverse_byte_order = 0;
+       field->event_field.type.u.integer.base = 10;
+       field->event_field.type.u.integer.encoding = lttng_encode_none;
+       field->get_size = perf_counter_get_size;
+       field->record = perf_counter_record;
+       field->u.perf_counter = perf_field;
+       lttng_context_update(*ctx);
+
+       wrapper_vmalloc_sync_mappings();
+       return 0;
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
+cpuhp_online_error:
+       {
+               int remove_ret;
+
+               remove_ret = cpuhp_state_remove_instance(lttng_hp_prepare,
+                               &perf_field->cpuhp_prepare.node);
+               WARN_ON(remove_ret);
+       }
+cpuhp_prepare_error:
+#else  /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
+counter_busy:
+counter_error:
+       {
+               int cpu;
+
+               for_each_online_cpu(cpu) {
+                       if (events[cpu] && !IS_ERR(events[cpu]))
+                               perf_event_release_kernel(events[cpu]);
+               }
+               put_online_cpus();
+#ifdef CONFIG_HOTPLUG_CPU
+               unregister_cpu_notifier(&perf_field->nb);
+#endif
+       }
+#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
+find_error:
+       lttng_remove_context_field(ctx, field);
+append_context_error:
+       kfree(name_alloc);
+name_alloc_error:
+       kfree(perf_field);
+error_alloc_perf_field:
+       kfree(attr);
+error_attr:
+       lttng_kvfree(events);
+       return ret;
+}
diff --git a/src/lttng-context-pid-ns.c b/src/lttng-context-pid-ns.c
new file mode 100644 (file)
index 0000000..721485d
--- /dev/null
@@ -0,0 +1,106 @@
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * lttng-context-pid-ns.c
+ *
+ * LTTng pid namespace context.
+ *
+ * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *               2019 Michael Jeanson <mjeanson@efficios.com>
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/pid_namespace.h>
+#include <lttng/events.h>
+#include <ringbuffer/frontend_types.h>
+#include <wrapper/vmalloc.h>
+#include <wrapper/namespace.h>
+#include <lttng/tracer.h>
+
+#if defined(CONFIG_PID_NS) && \
+       (LINUX_VERSION_CODE >= KERNEL_VERSION(3,8,0))
+
+static
+size_t pid_ns_get_size(size_t offset)
+{
+       size_t size = 0;
+
+       size += lib_ring_buffer_align(offset, lttng_alignof(unsigned int));
+       size += sizeof(unsigned int);
+       return size;
+}
+
+static
+void pid_ns_record(struct lttng_ctx_field *field,
+                struct lib_ring_buffer_ctx *ctx,
+                struct lttng_channel *chan)
+{
+       struct pid_namespace *ns;
+       unsigned int pid_ns_inum = 0;
+
+       /*
+        * The pid namespace is an exception -- it's accessed using
+        * task_active_pid_ns. The pid namespace in nsproxy is the
+        * namespace that children will use.
+        */
+       ns = task_active_pid_ns(current);
+
+       if (ns)
+               pid_ns_inum = ns->lttng_ns_inum;
+
+       lib_ring_buffer_align_ctx(ctx, lttng_alignof(pid_ns_inum));
+       chan->ops->event_write(ctx, &pid_ns_inum, sizeof(pid_ns_inum));
+}
+
+static
+void pid_ns_get_value(struct lttng_ctx_field *field,
+               struct lttng_probe_ctx *lttng_probe_ctx,
+               union lttng_ctx_value *value)
+{
+       struct pid_namespace *ns;
+       unsigned int pid_ns_inum = 0;
+
+       /*
+        * The pid namespace is an exception -- it's accessed using
+        * task_active_pid_ns. The pid namespace in nsproxy is the
+        * namespace that children will use.
+        */
+       ns = task_active_pid_ns(current);
+
+       if (ns)
+               pid_ns_inum = ns->lttng_ns_inum;
+
+       value->s64 = pid_ns_inum;
+}
+
+int lttng_add_pid_ns_to_ctx(struct lttng_ctx **ctx)
+{
+       struct lttng_ctx_field *field;
+
+       field = lttng_append_context(ctx);
+       if (!field)
+               return -ENOMEM;
+       if (lttng_find_context(*ctx, "pid_ns")) {
+               lttng_remove_context_field(ctx, field);
+               return -EEXIST;
+       }
+       field->event_field.name = "pid_ns";
+       field->event_field.type.atype = atype_integer;
+       field->event_field.type.u.integer.size = sizeof(unsigned int) * CHAR_BIT;
+       field->event_field.type.u.integer.alignment = lttng_alignof(unsigned int) * CHAR_BIT;
+       field->event_field.type.u.integer.signedness = lttng_is_signed_type(unsigned int);
+       field->event_field.type.u.integer.reverse_byte_order = 0;
+       field->event_field.type.u.integer.base = 10;
+       field->event_field.type.u.integer.encoding = lttng_encode_none;
+       field->get_size = pid_ns_get_size;
+       field->record = pid_ns_record;
+       field->get_value = pid_ns_get_value;
+       lttng_context_update(*ctx);
+       wrapper_vmalloc_sync_mappings();
+       return 0;
+}
+EXPORT_SYMBOL_GPL(lttng_add_pid_ns_to_ctx);
+
+#endif
diff --git a/src/lttng-context-pid.c b/src/lttng-context-pid.c
new file mode 100644 (file)
index 0000000..f3e4aef
--- /dev/null
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * lttng-context-pid.c
+ *
+ * LTTng PID context.
+ *
+ * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <lttng/events.h>
+#include <ringbuffer/frontend_types.h>
+#include <wrapper/vmalloc.h>
+#include <lttng/tracer.h>
+
+static
+size_t pid_get_size(size_t offset)
+{
+       size_t size = 0;
+
+       size += lib_ring_buffer_align(offset, lttng_alignof(pid_t));
+       size += sizeof(pid_t);
+       return size;
+}
+
+static
+void pid_record(struct lttng_ctx_field *field,
+               struct lib_ring_buffer_ctx *ctx,
+               struct lttng_channel *chan)
+{
+       pid_t pid;
+
+       pid = task_tgid_nr(current);
+       lib_ring_buffer_align_ctx(ctx, lttng_alignof(pid));
+       chan->ops->event_write(ctx, &pid, sizeof(pid));
+}
+
+static
+void pid_get_value(struct lttng_ctx_field *field,
+               struct lttng_probe_ctx *lttng_probe_ctx,
+               union lttng_ctx_value *value)
+{
+       value->s64 = task_tgid_nr(current);
+}
+
+int lttng_add_pid_to_ctx(struct lttng_ctx **ctx)
+{
+       struct lttng_ctx_field *field;
+
+       field = lttng_append_context(ctx);
+       if (!field)
+               return -ENOMEM;
+       if (lttng_find_context(*ctx, "pid")) {
+               lttng_remove_context_field(ctx, field);
+               return -EEXIST;
+       }
+       field->event_field.name = "pid";
+       field->event_field.type.atype = atype_integer;
+       field->event_field.type.u.integer.size = sizeof(pid_t) * CHAR_BIT;
+       field->event_field.type.u.integer.alignment = lttng_alignof(pid_t) * CHAR_BIT;
+       field->event_field.type.u.integer.signedness = lttng_is_signed_type(pid_t);
+       field->event_field.type.u.integer.reverse_byte_order = 0;
+       field->event_field.type.u.integer.base = 10;
+       field->event_field.type.u.integer.encoding = lttng_encode_none;
+       field->get_size = pid_get_size;
+       field->record = pid_record;
+       field->get_value = pid_get_value;
+       lttng_context_update(*ctx);
+       wrapper_vmalloc_sync_mappings();
+       return 0;
+}
+EXPORT_SYMBOL_GPL(lttng_add_pid_to_ctx);
diff --git a/src/lttng-context-ppid.c b/src/lttng-context-ppid.c
new file mode 100644 (file)
index 0000000..854c515
--- /dev/null
@@ -0,0 +1,96 @@
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * lttng-context-ppid.c
+ *
+ * LTTng PPID context.
+ *
+ * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/syscalls.h>
+#include <lttng/events.h>
+#include <ringbuffer/frontend_types.h>
+#include <wrapper/vmalloc.h>
+#include <lttng/tracer.h>
+
+static
+size_t ppid_get_size(size_t offset)
+{
+       size_t size = 0;
+
+       size += lib_ring_buffer_align(offset, lttng_alignof(pid_t));
+       size += sizeof(pid_t);
+       return size;
+}
+
+static
+void ppid_record(struct lttng_ctx_field *field,
+                struct lib_ring_buffer_ctx *ctx,
+                struct lttng_channel *chan)
+{
+       pid_t ppid;
+
+       /*
+        * TODO: when we eventually add RCU subsystem instrumentation,
+        * taking the rcu read lock here will trigger RCU tracing
+        * recursively. We should modify the kernel synchronization so
+        * it synchronizes both for RCU and RCU sched, and rely on
+        * rcu_read_lock_sched_notrace.
+        */
+       rcu_read_lock();
+       ppid = task_tgid_nr(current->real_parent);
+       rcu_read_unlock();
+       lib_ring_buffer_align_ctx(ctx, lttng_alignof(ppid));
+       chan->ops->event_write(ctx, &ppid, sizeof(ppid));
+}
+
+static
+void ppid_get_value(struct lttng_ctx_field *field,
+               struct lttng_probe_ctx *lttng_probe_ctx,
+               union lttng_ctx_value *value)
+{
+       pid_t ppid;
+
+       /*
+        * TODO: when we eventually add RCU subsystem instrumentation,
+        * taking the rcu read lock here will trigger RCU tracing
+        * recursively. We should modify the kernel synchronization so
+        * it synchronizes both for RCU and RCU sched, and rely on
+        * rcu_read_lock_sched_notrace.
+        */
+       rcu_read_lock();
+       ppid = task_tgid_nr(current->real_parent);
+       rcu_read_unlock();
+       value->s64 = ppid;
+}
+
+int lttng_add_ppid_to_ctx(struct lttng_ctx **ctx)
+{
+       struct lttng_ctx_field *field;
+
+       field = lttng_append_context(ctx);
+       if (!field)
+               return -ENOMEM;
+       if (lttng_find_context(*ctx, "ppid")) {
+               lttng_remove_context_field(ctx, field);
+               return -EEXIST;
+       }
+       field->event_field.name = "ppid";
+       field->event_field.type.atype = atype_integer;
+       field->event_field.type.u.integer.size = sizeof(pid_t) * CHAR_BIT;
+       field->event_field.type.u.integer.alignment = lttng_alignof(pid_t) * CHAR_BIT;
+       field->event_field.type.u.integer.signedness = lttng_is_signed_type(pid_t);
+       field->event_field.type.u.integer.reverse_byte_order = 0;
+       field->event_field.type.u.integer.base = 10;
+       field->event_field.type.u.integer.encoding = lttng_encode_none;
+       field->get_size = ppid_get_size;
+       field->record = ppid_record;
+       field->get_value = ppid_get_value;
+       lttng_context_update(*ctx);
+       wrapper_vmalloc_sync_mappings();
+       return 0;
+}
+EXPORT_SYMBOL_GPL(lttng_add_ppid_to_ctx);
diff --git a/src/lttng-context-preemptible.c b/src/lttng-context-preemptible.c
new file mode 100644 (file)
index 0000000..6130a1a
--- /dev/null
@@ -0,0 +1,92 @@
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * lttng-context-preemptible.c
+ *
+ * LTTng preemptible context.
+ *
+ * Copyright (C) 2009-2015 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/irqflags.h>
+#include <lttng/events.h>
+#include <ringbuffer/frontend_types.h>
+#include <wrapper/vmalloc.h>
+#include <lttng/tracer.h>
+
+/*
+ * We nest twice in preempt disabling within LTTng: one nesting is done
+ * by the instrumentation (tracepoint, kprobes, kretprobes, syscall
+ * tracepoint), and the second is within the lib ring buffer
+ * lib_ring_buffer_get_cpu().
+ */
+#define LTTNG_PREEMPT_DISABLE_NESTING  2
+
+static
+size_t preemptible_get_size(size_t offset)
+{
+       size_t size = 0;
+
+       size += lib_ring_buffer_align(offset, lttng_alignof(uint8_t));
+       size += sizeof(uint8_t);
+       return size;
+}
+
+static
+void preemptible_record(struct lttng_ctx_field *field,
+               struct lib_ring_buffer_ctx *ctx,
+               struct lttng_channel *chan)
+{
+       int pc = preempt_count();
+       uint8_t preemptible = 0;
+
+       WARN_ON_ONCE(pc < LTTNG_PREEMPT_DISABLE_NESTING);
+       if (pc == LTTNG_PREEMPT_DISABLE_NESTING)
+               preemptible = 1;
+       lib_ring_buffer_align_ctx(ctx, lttng_alignof(preemptible));
+       chan->ops->event_write(ctx, &preemptible, sizeof(preemptible));
+}
+
+static
+void preemptible_get_value(struct lttng_ctx_field *field,
+               struct lttng_probe_ctx *lttng_probe_ctx,
+               union lttng_ctx_value *value)
+{
+       int pc = preempt_count();
+
+       WARN_ON_ONCE(pc < LTTNG_PREEMPT_DISABLE_NESTING);
+       if (pc == LTTNG_PREEMPT_DISABLE_NESTING)
+               value->s64 = 1;
+       else
+               value->s64 = 0;
+}
+
+int lttng_add_preemptible_to_ctx(struct lttng_ctx **ctx)
+{
+       struct lttng_ctx_field *field;
+
+       field = lttng_append_context(ctx);
+       if (!field)
+               return -ENOMEM;
+       if (lttng_find_context(*ctx, "preemptible")) {
+               lttng_remove_context_field(ctx, field);
+               return -EEXIST;
+       }
+       field->event_field.name = "preemptible";
+       field->event_field.type.atype = atype_integer;
+       field->event_field.type.u.integer.size = sizeof(uint8_t) * CHAR_BIT;
+       field->event_field.type.u.integer.alignment = lttng_alignof(uint8_t) * CHAR_BIT;
+       field->event_field.type.u.integer.signedness = lttng_is_signed_type(uint8_t);
+       field->event_field.type.u.integer.reverse_byte_order = 0;
+       field->event_field.type.u.integer.base = 10;
+       field->event_field.type.u.integer.encoding = lttng_encode_none;
+       field->get_size = preemptible_get_size;
+       field->record = preemptible_record;
+       field->get_value = preemptible_get_value;
+       lttng_context_update(*ctx);
+       wrapper_vmalloc_sync_mappings();
+       return 0;
+}
+EXPORT_SYMBOL_GPL(lttng_add_preemptible_to_ctx);
diff --git a/src/lttng-context-prio.c b/src/lttng-context-prio.c
new file mode 100644 (file)
index 0000000..d300445
--- /dev/null
@@ -0,0 +1,95 @@
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * lttng-context-prio.c
+ *
+ * LTTng priority context.
+ *
+ * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <lttng/events.h>
+#include <ringbuffer/frontend_types.h>
+#include <wrapper/vmalloc.h>
+#include <wrapper/kallsyms.h>
+#include <lttng/tracer.h>
+
+static
+int (*wrapper_task_prio_sym)(struct task_struct *t);
+
+int wrapper_task_prio_init(void)
+{
+       wrapper_task_prio_sym = (void *) kallsyms_lookup_funcptr("task_prio");
+       if (!wrapper_task_prio_sym) {
+               printk(KERN_WARNING "LTTng: task_prio symbol lookup failed.\n");
+               return -EINVAL;
+       }
+       return 0;
+}
+
+static
+size_t prio_get_size(size_t offset)
+{
+       size_t size = 0;
+
+       size += lib_ring_buffer_align(offset, lttng_alignof(int));
+       size += sizeof(int);
+       return size;
+}
+
+static
+void prio_record(struct lttng_ctx_field *field,
+               struct lib_ring_buffer_ctx *ctx,
+               struct lttng_channel *chan)
+{
+       int prio;
+
+       prio = wrapper_task_prio_sym(current);
+       lib_ring_buffer_align_ctx(ctx, lttng_alignof(prio));
+       chan->ops->event_write(ctx, &prio, sizeof(prio));
+}
+
+static
+void prio_get_value(struct lttng_ctx_field *field,
+               struct lttng_probe_ctx *lttng_probe_ctx,
+               union lttng_ctx_value *value)
+{
+       value->s64 = wrapper_task_prio_sym(current);
+}
+
+int lttng_add_prio_to_ctx(struct lttng_ctx **ctx)
+{
+       struct lttng_ctx_field *field;
+       int ret;
+
+       if (!wrapper_task_prio_sym) {
+               ret = wrapper_task_prio_init();
+               if (ret)
+                       return ret;
+       }
+
+       field = lttng_append_context(ctx);
+       if (!field)
+               return -ENOMEM;
+       if (lttng_find_context(*ctx, "prio")) {
+               lttng_remove_context_field(ctx, field);
+               return -EEXIST;
+       }
+       field->event_field.name = "prio";
+       field->event_field.type.atype = atype_integer;
+       field->event_field.type.u.integer.size = sizeof(int) * CHAR_BIT;
+       field->event_field.type.u.integer.alignment = lttng_alignof(int) * CHAR_BIT;
+       field->event_field.type.u.integer.signedness = lttng_is_signed_type(int);
+       field->event_field.type.u.integer.reverse_byte_order = 0;
+       field->event_field.type.u.integer.base = 10;
+       field->event_field.type.u.integer.encoding = lttng_encode_none;
+       field->get_size = prio_get_size;
+       field->record = prio_record;
+       field->get_value = prio_get_value;
+       lttng_context_update(*ctx);
+       wrapper_vmalloc_sync_mappings();
+       return 0;
+}
+EXPORT_SYMBOL_GPL(lttng_add_prio_to_ctx);
diff --git a/src/lttng-context-procname.c b/src/lttng-context-procname.c
new file mode 100644 (file)
index 0000000..fb5c36b
--- /dev/null
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * lttng-context-procname.c
+ *
+ * LTTng procname context.
+ *
+ * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <lttng/events.h>
+#include <ringbuffer/frontend_types.h>
+#include <wrapper/vmalloc.h>
+#include <lttng/tracer.h>
+#include <lttng/endian.h>
+
+static
+size_t procname_get_size(size_t offset)
+{
+       size_t size = 0;
+
+       size += sizeof(current->comm);
+       return size;
+}
+
+/*
+ * Racy read of procname. We simply copy its whole array size.
+ * Races with /proc/<task>/procname write only.
+ * Otherwise having to take a mutex for each event is cumbersome and
+ * could lead to crash in IRQ context and deadlock of the lockdep tracer.
+ */
+static
+void procname_record(struct lttng_ctx_field *field,
+                struct lib_ring_buffer_ctx *ctx,
+                struct lttng_channel *chan)
+{
+       chan->ops->event_write(ctx, current->comm, sizeof(current->comm));
+}
+
+static
+void procname_get_value(struct lttng_ctx_field *field,
+               struct lttng_probe_ctx *lttng_probe_ctx,
+               union lttng_ctx_value *value)
+{
+       value->str = current->comm;
+}
+
+static const struct lttng_type procname_array_elem_type =
+       __type_integer(char, 0, 0, -1, __BYTE_ORDER, 10, UTF8);
+
+int lttng_add_procname_to_ctx(struct lttng_ctx **ctx)
+{
+       struct lttng_ctx_field *field;
+
+       field = lttng_append_context(ctx);
+       if (!field)
+               return -ENOMEM;
+       if (lttng_find_context(*ctx, "procname")) {
+               lttng_remove_context_field(ctx, field);
+               return -EEXIST;
+       }
+       field->event_field.name = "procname";
+       field->event_field.type.atype = atype_array_nestable;
+       field->event_field.type.u.array_nestable.elem_type = &procname_array_elem_type;
+       field->event_field.type.u.array_nestable.length = sizeof(current->comm);
+       field->event_field.type.u.array_nestable.alignment = 0;
+
+       field->get_size = procname_get_size;
+       field->record = procname_record;
+       field->get_value = procname_get_value;
+       lttng_context_update(*ctx);
+       wrapper_vmalloc_sync_mappings();
+       return 0;
+}
+EXPORT_SYMBOL_GPL(lttng_add_procname_to_ctx);
diff --git a/src/lttng-context-sgid.c b/src/lttng-context-sgid.c
new file mode 100644 (file)
index 0000000..18f1b83
--- /dev/null
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * lttng-context-sgid.c
+ *
+ * LTTng saved set-group-ID context.
+ *
+ * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *               2019 Michael Jeanson <mjeanson@efficios.com>
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <lttng/events.h>
+#include <lttng/tracer.h>
+#include <ringbuffer/frontend_types.h>
+#include <wrapper/vmalloc.h>
+#include <wrapper/user_namespace.h>
+
+static
+size_t sgid_get_size(size_t offset)
+{
+       size_t size = 0;
+
+       size += lib_ring_buffer_align(offset, lttng_alignof(gid_t));
+       size += sizeof(gid_t);
+       return size;
+}
+
+static
+void sgid_record(struct lttng_ctx_field *field,
+                struct lib_ring_buffer_ctx *ctx,
+                struct lttng_channel *chan)
+{
+       gid_t sgid;
+
+       sgid = lttng_current_sgid();
+       lib_ring_buffer_align_ctx(ctx, lttng_alignof(sgid));
+       chan->ops->event_write(ctx, &sgid, sizeof(sgid));
+}
+
+static
+void sgid_get_value(struct lttng_ctx_field *field,
+               struct lttng_probe_ctx *lttng_probe_ctx,
+               union lttng_ctx_value *value)
+{
+       value->s64 = lttng_current_sgid();
+}
+
+int lttng_add_sgid_to_ctx(struct lttng_ctx **ctx)
+{
+       struct lttng_ctx_field *field;
+
+       field = lttng_append_context(ctx);
+       if (!field)
+               return -ENOMEM;
+       if (lttng_find_context(*ctx, "sgid")) {
+               lttng_remove_context_field(ctx, field);
+               return -EEXIST;
+       }
+       field->event_field.name = "sgid";
+       field->event_field.type.atype = atype_integer;
+       field->event_field.type.u.integer.size = sizeof(gid_t) * CHAR_BIT;
+       field->event_field.type.u.integer.alignment = lttng_alignof(gid_t) * CHAR_BIT;
+       field->event_field.type.u.integer.signedness = lttng_is_signed_type(gid_t);
+       field->event_field.type.u.integer.reverse_byte_order = 0;
+       field->event_field.type.u.integer.base = 10;
+       field->event_field.type.u.integer.encoding = lttng_encode_none;
+       field->get_size = sgid_get_size;
+       field->record = sgid_record;
+       field->get_value = sgid_get_value;
+       lttng_context_update(*ctx);
+       wrapper_vmalloc_sync_mappings();
+       return 0;
+}
+EXPORT_SYMBOL_GPL(lttng_add_sgid_to_ctx);
diff --git a/src/lttng-context-suid.c b/src/lttng-context-suid.c
new file mode 100644 (file)
index 0000000..1aa52dc
--- /dev/null
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * lttng-context-suid.c
+ *
+ * LTTng saved set-user-ID context.
+ *
+ * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *               2019 Michael Jeanson <mjeanson@efficios.com>
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <lttng/events.h>
+#include <lttng/tracer.h>
+#include <ringbuffer/frontend_types.h>
+#include <wrapper/vmalloc.h>
+#include <wrapper/user_namespace.h>
+
+static
+size_t suid_get_size(size_t offset)
+{
+       size_t size = 0;
+
+       size += lib_ring_buffer_align(offset, lttng_alignof(uid_t));
+       size += sizeof(uid_t);
+       return size;
+}
+
+static
+void suid_record(struct lttng_ctx_field *field,
+                struct lib_ring_buffer_ctx *ctx,
+                struct lttng_channel *chan)
+{
+       uid_t suid;
+
+       suid = lttng_current_suid();
+       lib_ring_buffer_align_ctx(ctx, lttng_alignof(suid));
+       chan->ops->event_write(ctx, &suid, sizeof(suid));
+}
+
+static
+void suid_get_value(struct lttng_ctx_field *field,
+               struct lttng_probe_ctx *lttng_probe_ctx,
+               union lttng_ctx_value *value)
+{
+       value->s64 = lttng_current_suid();
+}
+
+int lttng_add_suid_to_ctx(struct lttng_ctx **ctx)
+{
+       struct lttng_ctx_field *field;
+
+       field = lttng_append_context(ctx);
+       if (!field)
+               return -ENOMEM;
+       if (lttng_find_context(*ctx, "suid")) {
+               lttng_remove_context_field(ctx, field);
+               return -EEXIST;
+       }
+       field->event_field.name = "suid";
+       field->event_field.type.atype = atype_integer;
+       field->event_field.type.u.integer.size = sizeof(uid_t) * CHAR_BIT;
+       field->event_field.type.u.integer.alignment = lttng_alignof(uid_t) * CHAR_BIT;
+       field->event_field.type.u.integer.signedness = lttng_is_signed_type(uid_t);
+       field->event_field.type.u.integer.reverse_byte_order = 0;
+       field->event_field.type.u.integer.base = 10;
+       field->event_field.type.u.integer.encoding = lttng_encode_none;
+       field->get_size = suid_get_size;
+       field->record = suid_record;
+       field->get_value = suid_get_value;
+       lttng_context_update(*ctx);
+       wrapper_vmalloc_sync_mappings();
+       return 0;
+}
+EXPORT_SYMBOL_GPL(lttng_add_suid_to_ctx);
diff --git a/src/lttng-context-tid.c b/src/lttng-context-tid.c
new file mode 100644 (file)
index 0000000..3116130
--- /dev/null
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * lttng-context-tid.c
+ *
+ * LTTng TID context.
+ *
+ * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <lttng/events.h>
+#include <ringbuffer/frontend_types.h>
+#include <wrapper/vmalloc.h>
+#include <lttng/tracer.h>
+
+static
+size_t tid_get_size(size_t offset)
+{
+       size_t size = 0;
+
+       size += lib_ring_buffer_align(offset, lttng_alignof(pid_t));
+       size += sizeof(pid_t);
+       return size;
+}
+
+static
+void tid_record(struct lttng_ctx_field *field,
+                struct lib_ring_buffer_ctx *ctx,
+                struct lttng_channel *chan)
+{
+       pid_t tid;
+
+       tid = task_pid_nr(current);
+       lib_ring_buffer_align_ctx(ctx, lttng_alignof(tid));
+       chan->ops->event_write(ctx, &tid, sizeof(tid));
+}
+
+static
+void tid_get_value(struct lttng_ctx_field *field,
+               struct lttng_probe_ctx *lttng_probe_ctx,
+               union lttng_ctx_value *value)
+{
+       pid_t tid;
+
+       tid = task_pid_nr(current);
+       value->s64 = tid;
+}
+
+int lttng_add_tid_to_ctx(struct lttng_ctx **ctx)
+{
+       struct lttng_ctx_field *field;
+
+       field = lttng_append_context(ctx);
+       if (!field)
+               return -ENOMEM;
+       if (lttng_find_context(*ctx, "tid")) {
+               lttng_remove_context_field(ctx, field);
+               return -EEXIST;
+       }
+       field->event_field.name = "tid";
+       field->event_field.type.atype = atype_integer;
+       field->event_field.type.u.integer.size = sizeof(pid_t) * CHAR_BIT;
+       field->event_field.type.u.integer.alignment = lttng_alignof(pid_t) * CHAR_BIT;
+       field->event_field.type.u.integer.signedness = lttng_is_signed_type(pid_t);
+       field->event_field.type.u.integer.reverse_byte_order = 0;
+       field->event_field.type.u.integer.base = 10;
+       field->event_field.type.u.integer.encoding = lttng_encode_none;
+       field->get_size = tid_get_size;
+       field->record = tid_record;
+       field->get_value = tid_get_value;
+       lttng_context_update(*ctx);
+       wrapper_vmalloc_sync_mappings();
+       return 0;
+}
+EXPORT_SYMBOL_GPL(lttng_add_tid_to_ctx);
diff --git a/src/lttng-context-uid.c b/src/lttng-context-uid.c
new file mode 100644 (file)
index 0000000..c48bd0a
--- /dev/null
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * lttng-context-uid.c
+ *
+ * LTTng real user ID context.
+ *
+ * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *               2019 Michael Jeanson <mjeanson@efficios.com>
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <lttng/events.h>
+#include <lttng/tracer.h>
+#include <ringbuffer/frontend_types.h>
+#include <wrapper/vmalloc.h>
+#include <wrapper/user_namespace.h>
+
+static
+size_t uid_get_size(size_t offset)
+{
+       size_t size = 0;
+
+       size += lib_ring_buffer_align(offset, lttng_alignof(uid_t));
+       size += sizeof(uid_t);
+       return size;
+}
+
+static
+void uid_record(struct lttng_ctx_field *field,
+                struct lib_ring_buffer_ctx *ctx,
+                struct lttng_channel *chan)
+{
+       uid_t uid;
+
+       uid = lttng_current_uid();
+       lib_ring_buffer_align_ctx(ctx, lttng_alignof(uid));
+       chan->ops->event_write(ctx, &uid, sizeof(uid));
+}
+
+static
+void uid_get_value(struct lttng_ctx_field *field,
+               struct lttng_probe_ctx *lttng_probe_ctx,
+               union lttng_ctx_value *value)
+{
+       value->s64 = lttng_current_uid();
+}
+
+int lttng_add_uid_to_ctx(struct lttng_ctx **ctx)
+{
+       struct lttng_ctx_field *field;
+
+       field = lttng_append_context(ctx);
+       if (!field)
+               return -ENOMEM;
+       if (lttng_find_context(*ctx, "uid")) {
+               lttng_remove_context_field(ctx, field);
+               return -EEXIST;
+       }
+       field->event_field.name = "uid";
+       field->event_field.type.atype = atype_integer;
+       field->event_field.type.u.integer.size = sizeof(uid_t) * CHAR_BIT;
+       field->event_field.type.u.integer.alignment = lttng_alignof(uid_t) * CHAR_BIT;
+       field->event_field.type.u.integer.signedness = lttng_is_signed_type(uid_t);
+       field->event_field.type.u.integer.reverse_byte_order = 0;
+       field->event_field.type.u.integer.base = 10;
+       field->event_field.type.u.integer.encoding = lttng_encode_none;
+       field->get_size = uid_get_size;
+       field->record = uid_record;
+       field->get_value = uid_get_value;
+       lttng_context_update(*ctx);
+       wrapper_vmalloc_sync_mappings();
+       return 0;
+}
+EXPORT_SYMBOL_GPL(lttng_add_uid_to_ctx);
diff --git a/src/lttng-context-user-ns.c b/src/lttng-context-user-ns.c
new file mode 100644 (file)
index 0000000..b2c1189
--- /dev/null
@@ -0,0 +1,90 @@
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * lttng-context-user-ns.c
+ *
+ * LTTng user namespace context.
+ *
+ * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *               2019 Michael Jeanson <mjeanson@efficios.com>
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/user_namespace.h>
+#include <lttng/events.h>
+#include <ringbuffer/frontend_types.h>
+#include <wrapper/vmalloc.h>
+#include <wrapper/namespace.h>
+#include <lttng/tracer.h>
+
+#if defined(CONFIG_USER_NS) && \
+       (LINUX_VERSION_CODE >= KERNEL_VERSION(3,8,0))
+
+static
+size_t user_ns_get_size(size_t offset)
+{
+       size_t size = 0;
+
+       size += lib_ring_buffer_align(offset, lttng_alignof(unsigned int));
+       size += sizeof(unsigned int);
+       return size;
+}
+
+static
+void user_ns_record(struct lttng_ctx_field *field,
+                struct lib_ring_buffer_ctx *ctx,
+                struct lttng_channel *chan)
+{
+       unsigned int user_ns_inum = 0;
+
+       if (current_user_ns())
+               user_ns_inum = current_user_ns()->lttng_ns_inum;
+
+       lib_ring_buffer_align_ctx(ctx, lttng_alignof(user_ns_inum));
+       chan->ops->event_write(ctx, &user_ns_inum, sizeof(user_ns_inum));
+}
+
+static
+void user_ns_get_value(struct lttng_ctx_field *field,
+               struct lttng_probe_ctx *lttng_probe_ctx,
+               union lttng_ctx_value *value)
+{
+       unsigned int user_ns_inum = 0;
+
+       if (current_user_ns())
+               user_ns_inum = current_user_ns()->lttng_ns_inum;
+
+       value->s64 = user_ns_inum;
+}
+
+int lttng_add_user_ns_to_ctx(struct lttng_ctx **ctx)
+{
+       struct lttng_ctx_field *field;
+
+       field = lttng_append_context(ctx);
+       if (!field)
+               return -ENOMEM;
+       if (lttng_find_context(*ctx, "user_ns")) {
+               lttng_remove_context_field(ctx, field);
+               return -EEXIST;
+       }
+       field->event_field.name = "user_ns";
+       field->event_field.type.atype = atype_integer;
+       field->event_field.type.u.integer.size = sizeof(unsigned int) * CHAR_BIT;
+       field->event_field.type.u.integer.alignment = lttng_alignof(unsigned int) * CHAR_BIT;
+       field->event_field.type.u.integer.signedness = lttng_is_signed_type(unsigned int);
+       field->event_field.type.u.integer.reverse_byte_order = 0;
+       field->event_field.type.u.integer.base = 10;
+       field->event_field.type.u.integer.encoding = lttng_encode_none;
+       field->get_size = user_ns_get_size;
+       field->record = user_ns_record;
+       field->get_value = user_ns_get_value;
+       lttng_context_update(*ctx);
+       wrapper_vmalloc_sync_mappings();
+       return 0;
+}
+EXPORT_SYMBOL_GPL(lttng_add_user_ns_to_ctx);
+
+#endif
diff --git a/src/lttng-context-uts-ns.c b/src/lttng-context-uts-ns.c
new file mode 100644 (file)
index 0000000..b4284a5
--- /dev/null
@@ -0,0 +1,104 @@
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * lttng-context-uts-ns.c
+ *
+ * LTTng uts namespace context.
+ *
+ * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *               2019 Michael Jeanson <mjeanson@efficios.com>
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/utsname.h>
+#include <lttng/events.h>
+#include <ringbuffer/frontend_types.h>
+#include <wrapper/vmalloc.h>
+#include <wrapper/namespace.h>
+#include <lttng/tracer.h>
+
+#if defined(CONFIG_UTS_NS) && \
+       (LINUX_VERSION_CODE >= KERNEL_VERSION(3,8,0))
+
+static
+size_t uts_ns_get_size(size_t offset)
+{
+       size_t size = 0;
+
+       size += lib_ring_buffer_align(offset, lttng_alignof(unsigned int));
+       size += sizeof(unsigned int);
+       return size;
+}
+
+static
+void uts_ns_record(struct lttng_ctx_field *field,
+                struct lib_ring_buffer_ctx *ctx,
+                struct lttng_channel *chan)
+{
+       unsigned int uts_ns_inum = 0;
+
+       /*
+        * nsproxy can be NULL when scheduled out of exit.
+        *
+        * As documented in 'linux/nsproxy.h' namespaces access rules, no
+        * precautions should be taken when accessing the current task's
+        * namespaces, just dereference the pointers.
+        */
+       if (current->nsproxy)
+               uts_ns_inum = current->nsproxy->uts_ns->lttng_ns_inum;
+
+       lib_ring_buffer_align_ctx(ctx, lttng_alignof(uts_ns_inum));
+       chan->ops->event_write(ctx, &uts_ns_inum, sizeof(uts_ns_inum));
+}
+
+static
+void uts_ns_get_value(struct lttng_ctx_field *field,
+               struct lttng_probe_ctx *lttng_probe_ctx,
+               union lttng_ctx_value *value)
+{
+       unsigned int uts_ns_inum = 0;
+
+       /*
+        * nsproxy can be NULL when scheduled out of exit.
+        *
+        * As documented in 'linux/nsproxy.h' namespaces access rules, no
+        * precautions should be taken when accessing the current task's
+        * namespaces, just dereference the pointers.
+        */
+       if (current->nsproxy)
+               uts_ns_inum = current->nsproxy->uts_ns->lttng_ns_inum;
+
+       value->s64 = uts_ns_inum;
+}
+
+int lttng_add_uts_ns_to_ctx(struct lttng_ctx **ctx)
+{
+       struct lttng_ctx_field *field;
+
+       field = lttng_append_context(ctx);
+       if (!field)
+               return -ENOMEM;
+       if (lttng_find_context(*ctx, "uts_ns")) {
+               lttng_remove_context_field(ctx, field);
+               return -EEXIST;
+       }
+       field->event_field.name = "uts_ns";
+       field->event_field.type.atype = atype_integer;
+       field->event_field.type.u.integer.size = sizeof(unsigned int) * CHAR_BIT;
+       field->event_field.type.u.integer.alignment = lttng_alignof(unsigned int) * CHAR_BIT;
+       field->event_field.type.u.integer.signedness = lttng_is_signed_type(unsigned int);
+       field->event_field.type.u.integer.reverse_byte_order = 0;
+       field->event_field.type.u.integer.base = 10;
+       field->event_field.type.u.integer.encoding = lttng_encode_none;
+       field->get_size = uts_ns_get_size;
+       field->record = uts_ns_record;
+       field->get_value = uts_ns_get_value;
+       lttng_context_update(*ctx);
+       wrapper_vmalloc_sync_mappings();
+       return 0;
+}
+EXPORT_SYMBOL_GPL(lttng_add_uts_ns_to_ctx);
+
+#endif
diff --git a/src/lttng-context-vegid.c b/src/lttng-context-vegid.c
new file mode 100644 (file)
index 0000000..6207e61
--- /dev/null
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * lttng-context-vegid.c
+ *
+ * LTTng namespaced effective group ID context.
+ *
+ * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *               2019 Michael Jeanson <mjeanson@efficios.com>
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <lttng/events.h>
+#include <lttng/tracer.h>
+#include <ringbuffer/frontend_types.h>
+#include <wrapper/vmalloc.h>
+#include <wrapper/user_namespace.h>
+
+static
+size_t vegid_get_size(size_t offset)
+{
+       size_t size = 0;
+
+       size += lib_ring_buffer_align(offset, lttng_alignof(gid_t));
+       size += sizeof(gid_t);
+       return size;
+}
+
+static
+void vegid_record(struct lttng_ctx_field *field,
+                struct lib_ring_buffer_ctx *ctx,
+                struct lttng_channel *chan)
+{
+       gid_t vegid;
+
+       vegid = lttng_current_vegid();
+       lib_ring_buffer_align_ctx(ctx, lttng_alignof(vegid));
+       chan->ops->event_write(ctx, &vegid, sizeof(vegid));
+}
+
+static
+void vegid_get_value(struct lttng_ctx_field *field,
+               struct lttng_probe_ctx *lttng_probe_ctx,
+               union lttng_ctx_value *value)
+{
+       value->s64 = lttng_current_vegid();
+}
+
+int lttng_add_vegid_to_ctx(struct lttng_ctx **ctx)
+{
+       struct lttng_ctx_field *field;
+
+       field = lttng_append_context(ctx);
+       if (!field)
+               return -ENOMEM;
+       if (lttng_find_context(*ctx, "vegid")) {
+               lttng_remove_context_field(ctx, field);
+               return -EEXIST;
+       }
+       field->event_field.name = "vegid";
+       field->event_field.type.atype = atype_integer;
+       field->event_field.type.u.integer.size = sizeof(gid_t) * CHAR_BIT;
+       field->event_field.type.u.integer.alignment = lttng_alignof(gid_t) * CHAR_BIT;
+       field->event_field.type.u.integer.signedness = lttng_is_signed_type(gid_t);
+       field->event_field.type.u.integer.reverse_byte_order = 0;
+       field->event_field.type.u.integer.base = 10;
+       field->event_field.type.u.integer.encoding = lttng_encode_none;
+       field->get_size = vegid_get_size;
+       field->record = vegid_record;
+       field->get_value = vegid_get_value;
+       lttng_context_update(*ctx);
+       wrapper_vmalloc_sync_mappings();
+       return 0;
+}
+EXPORT_SYMBOL_GPL(lttng_add_vegid_to_ctx);
diff --git a/src/lttng-context-veuid.c b/src/lttng-context-veuid.c
new file mode 100644 (file)
index 0000000..a249820
--- /dev/null
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * lttng-context-veuid.c
+ *
+ * LTTng namespaced effective user ID context.
+ *
+ * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *               2019 Michael Jeanson <mjeanson@efficios.com>
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <lttng/events.h>
+#include <lttng/tracer.h>
+#include <ringbuffer/frontend_types.h>
+#include <wrapper/vmalloc.h>
+#include <wrapper/user_namespace.h>
+
+static
+size_t veuid_get_size(size_t offset)
+{
+       size_t size = 0;
+
+       size += lib_ring_buffer_align(offset, lttng_alignof(uid_t));
+       size += sizeof(uid_t);
+       return size;
+}
+
+static
+void veuid_record(struct lttng_ctx_field *field,
+                struct lib_ring_buffer_ctx *ctx,
+                struct lttng_channel *chan)
+{
+       uid_t veuid;
+
+       veuid = lttng_current_veuid();
+       lib_ring_buffer_align_ctx(ctx, lttng_alignof(veuid));
+       chan->ops->event_write(ctx, &veuid, sizeof(veuid));
+}
+
+static
+void veuid_get_value(struct lttng_ctx_field *field,
+               struct lttng_probe_ctx *lttng_probe_ctx,
+               union lttng_ctx_value *value)
+{
+       value->s64 = lttng_current_veuid();
+}
+
+int lttng_add_veuid_to_ctx(struct lttng_ctx **ctx)
+{
+       struct lttng_ctx_field *field;
+
+       field = lttng_append_context(ctx);
+       if (!field)
+               return -ENOMEM;
+       if (lttng_find_context(*ctx, "veuid")) {
+               lttng_remove_context_field(ctx, field);
+               return -EEXIST;
+       }
+       field->event_field.name = "veuid";
+       field->event_field.type.atype = atype_integer;
+       field->event_field.type.u.integer.size = sizeof(uid_t) * CHAR_BIT;
+       field->event_field.type.u.integer.alignment = lttng_alignof(uid_t) * CHAR_BIT;
+       field->event_field.type.u.integer.signedness = lttng_is_signed_type(uid_t);
+       field->event_field.type.u.integer.reverse_byte_order = 0;
+       field->event_field.type.u.integer.base = 10;
+       field->event_field.type.u.integer.encoding = lttng_encode_none;
+       field->get_size = veuid_get_size;
+       field->record = veuid_record;
+       field->get_value = veuid_get_value;
+       lttng_context_update(*ctx);
+       wrapper_vmalloc_sync_mappings();
+       return 0;
+}
+EXPORT_SYMBOL_GPL(lttng_add_veuid_to_ctx);
diff --git a/src/lttng-context-vgid.c b/src/lttng-context-vgid.c
new file mode 100644 (file)
index 0000000..a833915
--- /dev/null
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * lttng-context-vgid.c
+ *
+ * LTTng namespaced real group ID context.
+ *
+ * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *               2019 Michael Jeanson <mjeanson@efficios.com>
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <lttng/events.h>
+#include <lttng/tracer.h>
+#include <ringbuffer/frontend_types.h>
+#include <wrapper/vmalloc.h>
+#include <wrapper/user_namespace.h>
+
+static
+size_t vgid_get_size(size_t offset)
+{
+       size_t size = 0;
+
+       size += lib_ring_buffer_align(offset, lttng_alignof(gid_t));
+       size += sizeof(gid_t);
+       return size;
+}
+
+static
+void vgid_record(struct lttng_ctx_field *field,
+                struct lib_ring_buffer_ctx *ctx,
+                struct lttng_channel *chan)
+{
+       gid_t vgid;
+
+       vgid = lttng_current_vgid();
+       lib_ring_buffer_align_ctx(ctx, lttng_alignof(vgid));
+       chan->ops->event_write(ctx, &vgid, sizeof(vgid));
+}
+
+static
+void vgid_get_value(struct lttng_ctx_field *field,
+               struct lttng_probe_ctx *lttng_probe_ctx,
+               union lttng_ctx_value *value)
+{
+       value->s64 = lttng_current_vgid();
+}
+
+int lttng_add_vgid_to_ctx(struct lttng_ctx **ctx)
+{
+       struct lttng_ctx_field *field;
+
+       field = lttng_append_context(ctx);
+       if (!field)
+               return -ENOMEM;
+       if (lttng_find_context(*ctx, "vgid")) {
+               lttng_remove_context_field(ctx, field);
+               return -EEXIST;
+       }
+       field->event_field.name = "vgid";
+       field->event_field.type.atype = atype_integer;
+       field->event_field.type.u.integer.size = sizeof(gid_t) * CHAR_BIT;
+       field->event_field.type.u.integer.alignment = lttng_alignof(gid_t) * CHAR_BIT;
+       field->event_field.type.u.integer.signedness = lttng_is_signed_type(gid_t);
+       field->event_field.type.u.integer.reverse_byte_order = 0;
+       field->event_field.type.u.integer.base = 10;
+       field->event_field.type.u.integer.encoding = lttng_encode_none;
+       field->get_size = vgid_get_size;
+       field->record = vgid_record;
+       field->get_value = vgid_get_value;
+       lttng_context_update(*ctx);
+       wrapper_vmalloc_sync_mappings();
+       return 0;
+}
+EXPORT_SYMBOL_GPL(lttng_add_vgid_to_ctx);
diff --git a/src/lttng-context-vpid.c b/src/lttng-context-vpid.c
new file mode 100644 (file)
index 0000000..28178b9
--- /dev/null
@@ -0,0 +1,89 @@
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * lttng-context-vpid.c
+ *
+ * LTTng vPID context.
+ *
+ * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <lttng/events.h>
+#include <ringbuffer/frontend_types.h>
+#include <wrapper/vmalloc.h>
+#include <lttng/tracer.h>
+
+static
+size_t vpid_get_size(size_t offset)
+{
+       size_t size = 0;
+
+       size += lib_ring_buffer_align(offset, lttng_alignof(pid_t));
+       size += sizeof(pid_t);
+       return size;
+}
+
+static
+void vpid_record(struct lttng_ctx_field *field,
+                struct lib_ring_buffer_ctx *ctx,
+                struct lttng_channel *chan)
+{
+       pid_t vpid;
+
+       /*
+        * nsproxy can be NULL when scheduled out of exit.
+        */
+       if (!current->nsproxy)
+               vpid = 0;
+       else
+               vpid = task_tgid_vnr(current);
+       lib_ring_buffer_align_ctx(ctx, lttng_alignof(vpid));
+       chan->ops->event_write(ctx, &vpid, sizeof(vpid));
+}
+
+static
+void vpid_get_value(struct lttng_ctx_field *field,
+               struct lttng_probe_ctx *lttng_probe_ctx,
+               union lttng_ctx_value *value)
+{
+       pid_t vpid;
+
+       /*
+        * nsproxy can be NULL when scheduled out of exit.
+        */
+       if (!current->nsproxy)
+               vpid = 0;
+       else
+               vpid = task_tgid_vnr(current);
+       value->s64 = vpid;
+}
+
+int lttng_add_vpid_to_ctx(struct lttng_ctx **ctx)
+{
+       struct lttng_ctx_field *field;
+
+       field = lttng_append_context(ctx);
+       if (!field)
+               return -ENOMEM;
+       if (lttng_find_context(*ctx, "vpid")) {
+               lttng_remove_context_field(ctx, field);
+               return -EEXIST;
+       }
+       field->event_field.name = "vpid";
+       field->event_field.type.atype = atype_integer;
+       field->event_field.type.u.integer.size = sizeof(pid_t) * CHAR_BIT;
+       field->event_field.type.u.integer.alignment = lttng_alignof(pid_t) * CHAR_BIT;
+       field->event_field.type.u.integer.signedness = lttng_is_signed_type(pid_t);
+       field->event_field.type.u.integer.reverse_byte_order = 0;
+       field->event_field.type.u.integer.base = 10;
+       field->event_field.type.u.integer.encoding = lttng_encode_none;
+       field->get_size = vpid_get_size;
+       field->record = vpid_record;
+       field->get_value = vpid_get_value;
+       lttng_context_update(*ctx);
+       wrapper_vmalloc_sync_mappings();
+       return 0;
+}
+EXPORT_SYMBOL_GPL(lttng_add_vpid_to_ctx);
diff --git a/src/lttng-context-vppid.c b/src/lttng-context-vppid.c
new file mode 100644 (file)
index 0000000..8757eb2
--- /dev/null
@@ -0,0 +1,118 @@
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * lttng-context-vppid.c
+ *
+ * LTTng vPPID context.
+ *
+ * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/syscalls.h>
+#include <lttng/events.h>
+#include <ringbuffer/frontend_types.h>
+#include <wrapper/vmalloc.h>
+#include <lttng/tracer.h>
+
+static
+size_t vppid_get_size(size_t offset)
+{
+       size_t size = 0;
+
+       size += lib_ring_buffer_align(offset, lttng_alignof(pid_t));
+       size += sizeof(pid_t);
+       return size;
+}
+
+static
+void vppid_record(struct lttng_ctx_field *field,
+                 struct lib_ring_buffer_ctx *ctx,
+                 struct lttng_channel *chan)
+{
+       struct task_struct *parent;
+       pid_t vppid;
+
+       /*
+        * current nsproxy can be NULL when scheduled out of exit. pid_vnr uses
+        * the current thread nsproxy to perform the lookup.
+        */
+
+       /*
+        * TODO: when we eventually add RCU subsystem instrumentation,
+        * taking the rcu read lock here will trigger RCU tracing
+        * recursively. We should modify the kernel synchronization so
+        * it synchronizes both for RCU and RCU sched, and rely on
+        * rcu_read_lock_sched_notrace.
+        */
+
+       rcu_read_lock();
+       parent = rcu_dereference(current->real_parent);
+       if (!current->nsproxy)
+               vppid = 0;
+       else
+               vppid = task_tgid_vnr(parent);
+       rcu_read_unlock();
+       lib_ring_buffer_align_ctx(ctx, lttng_alignof(vppid));
+       chan->ops->event_write(ctx, &vppid, sizeof(vppid));
+}
+
+static
+void vppid_get_value(struct lttng_ctx_field *field,
+               struct lttng_probe_ctx *lttng_probe_ctx,
+               union lttng_ctx_value *value)
+{
+       struct task_struct *parent;
+       pid_t vppid;
+
+       /*
+        * current nsproxy can be NULL when scheduled out of exit. pid_vnr uses
+        * the current thread nsproxy to perform the lookup.
+        */
+
+       /*
+        * TODO: when we eventually add RCU subsystem instrumentation,
+        * taking the rcu read lock here will trigger RCU tracing
+        * recursively. We should modify the kernel synchronization so
+        * it synchronizes both for RCU and RCU sched, and rely on
+        * rcu_read_lock_sched_notrace.
+        */
+
+       rcu_read_lock();
+       parent = rcu_dereference(current->real_parent);
+       if (!current->nsproxy)
+               vppid = 0;
+       else
+               vppid = task_tgid_vnr(parent);
+       rcu_read_unlock();
+       value->s64 = vppid;
+}
+
+int lttng_add_vppid_to_ctx(struct lttng_ctx **ctx)
+{
+       struct lttng_ctx_field *field;
+
+       field = lttng_append_context(ctx);
+       if (!field)
+               return -ENOMEM;
+       if (lttng_find_context(*ctx, "vppid")) {
+               lttng_remove_context_field(ctx, field);
+               return -EEXIST;
+       }
+       field->event_field.name = "vppid";
+       field->event_field.type.atype = atype_integer;
+       field->event_field.type.u.integer.size = sizeof(pid_t) * CHAR_BIT;
+       field->event_field.type.u.integer.alignment = lttng_alignof(pid_t) * CHAR_BIT;
+       field->event_field.type.u.integer.signedness = lttng_is_signed_type(pid_t);
+       field->event_field.type.u.integer.reverse_byte_order = 0;
+       field->event_field.type.u.integer.base = 10;
+       field->event_field.type.u.integer.encoding = lttng_encode_none;
+       field->get_size = vppid_get_size;
+       field->record = vppid_record;
+       field->get_value = vppid_get_value;
+       lttng_context_update(*ctx);
+       wrapper_vmalloc_sync_mappings();
+       return 0;
+}
+EXPORT_SYMBOL_GPL(lttng_add_vppid_to_ctx);
diff --git a/src/lttng-context-vsgid.c b/src/lttng-context-vsgid.c
new file mode 100644 (file)
index 0000000..c6a6ea7
--- /dev/null
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * lttng-context-vsgid.c
+ *
+ * LTTng namespaced saved set-group-ID context.
+ *
+ * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *               2019 Michael Jeanson <mjeanson@efficios.com>
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <lttng/events.h>
+#include <lttng/tracer.h>
+#include <ringbuffer/frontend_types.h>
+#include <wrapper/vmalloc.h>
+#include <wrapper/user_namespace.h>
+
+static
+size_t vsgid_get_size(size_t offset)
+{
+       size_t size = 0;
+
+       size += lib_ring_buffer_align(offset, lttng_alignof(gid_t));
+       size += sizeof(gid_t);
+       return size;
+}
+
+static
+void vsgid_record(struct lttng_ctx_field *field,
+                struct lib_ring_buffer_ctx *ctx,
+                struct lttng_channel *chan)
+{
+       gid_t vsgid;
+
+       vsgid = lttng_current_vsgid();
+       lib_ring_buffer_align_ctx(ctx, lttng_alignof(vsgid));
+       chan->ops->event_write(ctx, &vsgid, sizeof(vsgid));
+}
+
+static
+void vsgid_get_value(struct lttng_ctx_field *field,
+               struct lttng_probe_ctx *lttng_probe_ctx,
+               union lttng_ctx_value *value)
+{
+       value->s64 = lttng_current_vsgid();
+}
+
+int lttng_add_vsgid_to_ctx(struct lttng_ctx **ctx)
+{
+       struct lttng_ctx_field *field;
+
+       field = lttng_append_context(ctx);
+       if (!field)
+               return -ENOMEM;
+       if (lttng_find_context(*ctx, "vsgid")) {
+               lttng_remove_context_field(ctx, field);
+               return -EEXIST;
+       }
+       field->event_field.name = "vsgid";
+       field->event_field.type.atype = atype_integer;
+       field->event_field.type.u.integer.size = sizeof(gid_t) * CHAR_BIT;
+       field->event_field.type.u.integer.alignment = lttng_alignof(gid_t) * CHAR_BIT;
+       field->event_field.type.u.integer.signedness = lttng_is_signed_type(gid_t);
+       field->event_field.type.u.integer.reverse_byte_order = 0;
+       field->event_field.type.u.integer.base = 10;
+       field->event_field.type.u.integer.encoding = lttng_encode_none;
+       field->get_size = vsgid_get_size;
+       field->record = vsgid_record;
+       field->get_value = vsgid_get_value;
+       lttng_context_update(*ctx);
+       wrapper_vmalloc_sync_mappings();
+       return 0;
+}
+EXPORT_SYMBOL_GPL(lttng_add_vsgid_to_ctx);
diff --git a/src/lttng-context-vsuid.c b/src/lttng-context-vsuid.c
new file mode 100644 (file)
index 0000000..c22d430
--- /dev/null
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * lttng-context-vsuid.c
+ *
+ * LTTng namespaced saved set-user-ID context.
+ *
+ * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *               2019 Michael Jeanson <mjeanson@efficios.com>
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <lttng/events.h>
+#include <lttng/tracer.h>
+#include <ringbuffer/frontend_types.h>
+#include <wrapper/vmalloc.h>
+#include <wrapper/user_namespace.h>
+
+static
+size_t vsuid_get_size(size_t offset)
+{
+       size_t size = 0;
+
+       size += lib_ring_buffer_align(offset, lttng_alignof(uid_t));
+       size += sizeof(uid_t);
+       return size;
+}
+
+static
+void vsuid_record(struct lttng_ctx_field *field,
+                struct lib_ring_buffer_ctx *ctx,
+                struct lttng_channel *chan)
+{
+       uid_t vsuid;
+
+       vsuid = lttng_current_vsuid();
+       lib_ring_buffer_align_ctx(ctx, lttng_alignof(vsuid));
+       chan->ops->event_write(ctx, &vsuid, sizeof(vsuid));
+}
+
+static
+void vsuid_get_value(struct lttng_ctx_field *field,
+               struct lttng_probe_ctx *lttng_probe_ctx,
+               union lttng_ctx_value *value)
+{
+       value->s64 = lttng_current_vsuid();
+}
+
+int lttng_add_vsuid_to_ctx(struct lttng_ctx **ctx)
+{
+       struct lttng_ctx_field *field;
+
+       field = lttng_append_context(ctx);
+       if (!field)
+               return -ENOMEM;
+       if (lttng_find_context(*ctx, "vsuid")) {
+               lttng_remove_context_field(ctx, field);
+               return -EEXIST;
+       }
+       field->event_field.name = "vsuid";
+       field->event_field.type.atype = atype_integer;
+       field->event_field.type.u.integer.size = sizeof(uid_t) * CHAR_BIT;
+       field->event_field.type.u.integer.alignment = lttng_alignof(uid_t) * CHAR_BIT;
+       field->event_field.type.u.integer.signedness = lttng_is_signed_type(uid_t);
+       field->event_field.type.u.integer.reverse_byte_order = 0;
+       field->event_field.type.u.integer.base = 10;
+       field->event_field.type.u.integer.encoding = lttng_encode_none;
+       field->get_size = vsuid_get_size;
+       field->record = vsuid_record;
+       field->get_value = vsuid_get_value;
+       lttng_context_update(*ctx);
+       wrapper_vmalloc_sync_mappings();
+       return 0;
+}
+EXPORT_SYMBOL_GPL(lttng_add_vsuid_to_ctx);
diff --git a/src/lttng-context-vtid.c b/src/lttng-context-vtid.c
new file mode 100644 (file)
index 0000000..3b0cadc
--- /dev/null
@@ -0,0 +1,89 @@
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * lttng-context-vtid.c
+ *
+ * LTTng vTID context.
+ *
+ * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <lttng/events.h>
+#include <ringbuffer/frontend_types.h>
+#include <wrapper/vmalloc.h>
+#include <lttng/tracer.h>
+
+static
+size_t vtid_get_size(size_t offset)
+{
+       size_t size = 0;
+
+       size += lib_ring_buffer_align(offset, lttng_alignof(pid_t));
+       size += sizeof(pid_t);
+       return size;
+}
+
+static
+void vtid_record(struct lttng_ctx_field *field,
+                struct lib_ring_buffer_ctx *ctx,
+                struct lttng_channel *chan)
+{
+       pid_t vtid;
+
+       /*
+        * nsproxy can be NULL when scheduled out of exit.
+        */
+       if (!current->nsproxy)
+               vtid = 0;
+       else
+               vtid = task_pid_vnr(current);
+       lib_ring_buffer_align_ctx(ctx, lttng_alignof(vtid));
+       chan->ops->event_write(ctx, &vtid, sizeof(vtid));
+}
+
+static
+void vtid_get_value(struct lttng_ctx_field *field,
+               struct lttng_probe_ctx *lttng_probe_ctx,
+               union lttng_ctx_value *value)
+{
+       pid_t vtid;
+
+       /*
+        * nsproxy can be NULL when scheduled out of exit.
+        */
+       if (!current->nsproxy)
+               vtid = 0;
+       else
+               vtid = task_pid_vnr(current);
+       value->s64 = vtid;
+}
+
+int lttng_add_vtid_to_ctx(struct lttng_ctx **ctx)
+{
+       struct lttng_ctx_field *field;
+
+       field = lttng_append_context(ctx);
+       if (!field)
+               return -ENOMEM;
+       if (lttng_find_context(*ctx, "vtid")) {
+               lttng_remove_context_field(ctx, field);
+               return -EEXIST;
+       }
+       field->event_field.name = "vtid";
+       field->event_field.type.atype = atype_integer;
+       field->event_field.type.u.integer.size = sizeof(pid_t) * CHAR_BIT;
+       field->event_field.type.u.integer.alignment = lttng_alignof(pid_t) * CHAR_BIT;
+       field->event_field.type.u.integer.signedness = lttng_is_signed_type(pid_t);
+       field->event_field.type.u.integer.reverse_byte_order = 0;
+       field->event_field.type.u.integer.base = 10;
+       field->event_field.type.u.integer.encoding = lttng_encode_none;
+       field->get_size = vtid_get_size;
+       field->record = vtid_record;
+       field->get_value = vtid_get_value;
+       lttng_context_update(*ctx);
+       wrapper_vmalloc_sync_mappings();
+       return 0;
+}
+EXPORT_SYMBOL_GPL(lttng_add_vtid_to_ctx);
diff --git a/src/lttng-context-vuid.c b/src/lttng-context-vuid.c
new file mode 100644 (file)
index 0000000..e83f898
--- /dev/null
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * lttng-context-vuid.c
+ *
+ * LTTng namespaced real user ID context.
+ *
+ * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *               2019 Michael Jeanson <mjeanson@efficios.com>
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <lttng/events.h>
+#include <lttng/tracer.h>
+#include <ringbuffer/frontend_types.h>
+#include <wrapper/vmalloc.h>
+#include <wrapper/user_namespace.h>
+
+static
+size_t vuid_get_size(size_t offset)
+{
+       size_t size = 0;
+
+       size += lib_ring_buffer_align(offset, lttng_alignof(uid_t));
+       size += sizeof(uid_t);
+       return size;
+}
+
+static
+void vuid_record(struct lttng_ctx_field *field,
+                struct lib_ring_buffer_ctx *ctx,
+                struct lttng_channel *chan)
+{
+       uid_t vuid;
+
+       vuid = lttng_current_vuid();
+       lib_ring_buffer_align_ctx(ctx, lttng_alignof(vuid));
+       chan->ops->event_write(ctx, &vuid, sizeof(vuid));
+}
+
+static
+void vuid_get_value(struct lttng_ctx_field *field,
+               struct lttng_probe_ctx *lttng_probe_ctx,
+               union lttng_ctx_value *value)
+{
+       value->s64 = lttng_current_vuid();
+}
+
+int lttng_add_vuid_to_ctx(struct lttng_ctx **ctx)
+{
+       struct lttng_ctx_field *field;
+
+       field = lttng_append_context(ctx);
+       if (!field)
+               return -ENOMEM;
+       if (lttng_find_context(*ctx, "vuid")) {
+               lttng_remove_context_field(ctx, field);
+               return -EEXIST;
+       }
+       field->event_field.name = "vuid";
+       field->event_field.type.atype = atype_integer;
+       field->event_field.type.u.integer.size = sizeof(uid_t) * CHAR_BIT;
+       field->event_field.type.u.integer.alignment = lttng_alignof(uid_t) * CHAR_BIT;
+       field->event_field.type.u.integer.signedness = lttng_is_signed_type(uid_t);
+       field->event_field.type.u.integer.reverse_byte_order = 0;
+       field->event_field.type.u.integer.base = 10;
+       field->event_field.type.u.integer.encoding = lttng_encode_none;
+       field->get_size = vuid_get_size;
+       field->record = vuid_record;
+       field->get_value = vuid_get_value;
+       lttng_context_update(*ctx);
+       wrapper_vmalloc_sync_mappings();
+       return 0;
+}
+EXPORT_SYMBOL_GPL(lttng_add_vuid_to_ctx);
diff --git a/src/lttng-context.c b/src/lttng-context.c
new file mode 100644 (file)
index 0000000..eb5e5d1
--- /dev/null
@@ -0,0 +1,312 @@
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * lttng-context.c
+ *
+ * LTTng trace/channel/event context management.
+ *
+ * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <wrapper/vmalloc.h>   /* for wrapper_vmalloc_sync_mappings() */
+#include <lttng/events.h>
+#include <lttng/tracer.h>
+
+/*
+ * The filter implementation requires that two consecutive "get" for the
+ * same context performed by the same thread return the same result.
+ */
+
+/*
+ * Static array of contexts, for $ctx filters.
+ */
+struct lttng_ctx *lttng_static_ctx;
+
+int lttng_find_context(struct lttng_ctx *ctx, const char *name)
+{
+       unsigned int i;
+
+       for (i = 0; i < ctx->nr_fields; i++) {
+               /* Skip allocated (but non-initialized) contexts */
+               if (!ctx->fields[i].event_field.name)
+                       continue;
+               if (!strcmp(ctx->fields[i].event_field.name, name))
+                       return 1;
+       }
+       return 0;
+}
+EXPORT_SYMBOL_GPL(lttng_find_context);
+
+int lttng_get_context_index(struct lttng_ctx *ctx, const char *name)
+{
+       unsigned int i;
+       const char *subname;
+
+       if (!ctx)
+               return -1;
+       if (strncmp(name, "$ctx.", strlen("$ctx.")) == 0) {
+               subname = name + strlen("$ctx.");
+       } else {
+               subname = name;
+       }
+       for (i = 0; i < ctx->nr_fields; i++) {
+               /* Skip allocated (but non-initialized) contexts */
+               if (!ctx->fields[i].event_field.name)
+                       continue;
+               if (!strcmp(ctx->fields[i].event_field.name, subname))
+                       return i;
+       }
+       return -1;
+}
+EXPORT_SYMBOL_GPL(lttng_get_context_index);
+
+/*
+ * Note: as we append context information, the pointer location may change.
+ */
+struct lttng_ctx_field *lttng_append_context(struct lttng_ctx **ctx_p)
+{
+       struct lttng_ctx_field *field;
+       struct lttng_ctx *ctx;
+
+       if (!*ctx_p) {
+               *ctx_p = kzalloc(sizeof(struct lttng_ctx), GFP_KERNEL);
+               if (!*ctx_p)
+                       return NULL;
+               (*ctx_p)->largest_align = 1;
+       }
+       ctx = *ctx_p;
+       if (ctx->nr_fields + 1 > ctx->allocated_fields) {
+               struct lttng_ctx_field *new_fields;
+
+               ctx->allocated_fields = max_t(size_t, 1, 2 * ctx->allocated_fields);
+               new_fields = lttng_kvzalloc(ctx->allocated_fields * sizeof(struct lttng_ctx_field), GFP_KERNEL);
+               if (!new_fields)
+                       return NULL;
+               if (ctx->fields)
+                       memcpy(new_fields, ctx->fields, sizeof(*ctx->fields) * ctx->nr_fields);
+               lttng_kvfree(ctx->fields);
+               ctx->fields = new_fields;
+       }
+       field = &ctx->fields[ctx->nr_fields];
+       ctx->nr_fields++;
+       return field;
+}
+EXPORT_SYMBOL_GPL(lttng_append_context);
+
+/*
+ * lttng_context_update() should be called at least once between context
+ * modification and trace start.
+ */
+void lttng_context_update(struct lttng_ctx *ctx)
+{
+       int i;
+       size_t largest_align = 8;       /* in bits */
+
+       for (i = 0; i < ctx->nr_fields; i++) {
+               struct lttng_type *type;
+               size_t field_align = 8;
+
+               type = &ctx->fields[i].event_field.type;
+               switch (type->atype) {
+               case atype_integer:
+                       field_align = type->u.integer.alignment;
+                       break;
+               case atype_array_nestable:
+               {
+                       const struct lttng_type *nested_type;
+
+                       nested_type = type->u.array_nestable.elem_type;
+                       switch (nested_type->atype) {
+                       case atype_integer:
+                               field_align = nested_type->u.integer.alignment;
+                               break;
+                       case atype_string:
+                               break;
+
+                       case atype_array_nestable:
+                       case atype_sequence_nestable:
+                       case atype_struct_nestable:
+                       case atype_variant_nestable:
+                       default:
+                               WARN_ON_ONCE(1);
+                               break;
+                       }
+                       field_align = max_t(size_t, field_align,
+                                       type->u.array_nestable.alignment);
+                       break;
+               }
+               case atype_sequence_nestable:
+               {
+                       const struct lttng_type *nested_type;
+
+                       nested_type = type->u.sequence_nestable.elem_type;
+                       switch (nested_type->atype) {
+                       case atype_integer:
+                               field_align = nested_type->u.integer.alignment;
+                               break;
+
+                       case atype_string:
+                               break;
+
+                       case atype_array_nestable:
+                       case atype_sequence_nestable:
+                       case atype_struct_nestable:
+                       case atype_variant_nestable:
+                       default:
+                               WARN_ON_ONCE(1);
+                               break;
+                       }
+                       field_align = max_t(size_t, field_align,
+                                       type->u.sequence_nestable.alignment);
+                       break;
+               }
+               case atype_string:
+                       break;
+
+               case atype_struct_nestable:
+               case atype_variant_nestable:
+                       break;
+
+               case atype_enum_nestable:
+               default:
+                       WARN_ON_ONCE(1);
+                       break;
+               }
+               largest_align = max_t(size_t, largest_align, field_align);
+       }
+       ctx->largest_align = largest_align >> 3;        /* bits to bytes */
+}
+
+/*
+ * Remove last context field.
+ */
+void lttng_remove_context_field(struct lttng_ctx **ctx_p,
+                               struct lttng_ctx_field *field)
+{
+       struct lttng_ctx *ctx;
+
+       ctx = *ctx_p;
+       ctx->nr_fields--;
+       WARN_ON_ONCE(&ctx->fields[ctx->nr_fields] != field);
+       memset(&ctx->fields[ctx->nr_fields], 0, sizeof(struct lttng_ctx_field));
+}
+EXPORT_SYMBOL_GPL(lttng_remove_context_field);
+
+void lttng_destroy_context(struct lttng_ctx *ctx)
+{
+       int i;
+
+       if (!ctx)
+               return;
+       for (i = 0; i < ctx->nr_fields; i++) {
+               if (ctx->fields[i].destroy)
+                       ctx->fields[i].destroy(&ctx->fields[i]);
+       }
+       lttng_kvfree(ctx->fields);
+       kfree(ctx);
+}
+
+int lttng_context_init(void)
+{
+       int ret;
+
+       ret = lttng_add_hostname_to_ctx(&lttng_static_ctx);
+       if (ret) {
+               printk(KERN_WARNING "Cannot add context lttng_add_hostname_to_ctx");
+       }
+       ret = lttng_add_nice_to_ctx(&lttng_static_ctx);
+       if (ret) {
+               printk(KERN_WARNING "Cannot add context lttng_add_nice_to_ctx");
+       }
+       ret = lttng_add_pid_to_ctx(&lttng_static_ctx);
+       if (ret) {
+               printk(KERN_WARNING "Cannot add context lttng_add_pid_to_ctx");
+       }
+       ret = lttng_add_ppid_to_ctx(&lttng_static_ctx);
+       if (ret) {
+               printk(KERN_WARNING "Cannot add context lttng_add_ppid_to_ctx");
+       }
+       ret = lttng_add_prio_to_ctx(&lttng_static_ctx);
+       if (ret) {
+               printk(KERN_WARNING "Cannot add context lttng_add_prio_to_ctx");
+       }
+       ret = lttng_add_procname_to_ctx(&lttng_static_ctx);
+       if (ret) {
+               printk(KERN_WARNING "Cannot add context lttng_add_procname_to_ctx");
+       }
+       ret = lttng_add_tid_to_ctx(&lttng_static_ctx);
+       if (ret) {
+               printk(KERN_WARNING "Cannot add context lttng_add_tid_to_ctx");
+       }
+       ret = lttng_add_vppid_to_ctx(&lttng_static_ctx);
+       if (ret) {
+               printk(KERN_WARNING "Cannot add context lttng_add_vppid_to_ctx");
+       }
+       ret = lttng_add_vtid_to_ctx(&lttng_static_ctx);
+       if (ret) {
+               printk(KERN_WARNING "Cannot add context lttng_add_vtid_to_ctx");
+       }
+       ret = lttng_add_vpid_to_ctx(&lttng_static_ctx);
+       if (ret) {
+               printk(KERN_WARNING "Cannot add context lttng_add_vpid_to_ctx");
+       }
+       ret = lttng_add_cpu_id_to_ctx(&lttng_static_ctx);
+       if (ret) {
+               printk(KERN_WARNING "Cannot add context lttng_add_cpu_id_to_ctx");
+       }
+       ret = lttng_add_interruptible_to_ctx(&lttng_static_ctx);
+       if (ret) {
+               printk(KERN_WARNING "Cannot add context lttng_add_interruptible_to_ctx");
+       }
+       ret = lttng_add_need_reschedule_to_ctx(&lttng_static_ctx);
+       if (ret) {
+               printk(KERN_WARNING "Cannot add context lttng_add_need_reschedule_to_ctx");
+       }
+       ret = lttng_add_preemptible_to_ctx(&lttng_static_ctx);
+       if (ret && ret != -ENOSYS) {
+               printk(KERN_WARNING "Cannot add context lttng_add_preemptible_to_ctx");
+       }
+       ret = lttng_add_migratable_to_ctx(&lttng_static_ctx);
+       if (ret && ret != -ENOSYS) {
+               printk(KERN_WARNING "Cannot add context lttng_add_migratable_to_ctx");
+       }
+       ret = lttng_add_cgroup_ns_to_ctx(&lttng_static_ctx);
+       if (ret && ret != -ENOSYS) {
+               printk(KERN_WARNING "Cannot add context lttng_add_cgroup_ns_to_ctx");
+       }
+       ret = lttng_add_ipc_ns_to_ctx(&lttng_static_ctx);
+       if (ret && ret != -ENOSYS) {
+               printk(KERN_WARNING "Cannot add context lttng_add_ipc_ns_to_ctx");
+       }
+       ret = lttng_add_mnt_ns_to_ctx(&lttng_static_ctx);
+       if (ret && ret != -ENOSYS) {
+               printk(KERN_WARNING "Cannot add context lttng_add_mnt_ns_to_ctx");
+       }
+       ret = lttng_add_net_ns_to_ctx(&lttng_static_ctx);
+       if (ret && ret != -ENOSYS) {
+               printk(KERN_WARNING "Cannot add context lttng_add_net_ns_to_ctx");
+       }
+       ret = lttng_add_pid_ns_to_ctx(&lttng_static_ctx);
+       if (ret && ret != -ENOSYS) {
+               printk(KERN_WARNING "Cannot add context lttng_add_pid_ns_to_ctx");
+       }
+       ret = lttng_add_user_ns_to_ctx(&lttng_static_ctx);
+       if (ret && ret != -ENOSYS) {
+               printk(KERN_WARNING "Cannot add context lttng_add_user_ns_to_ctx");
+       }
+       ret = lttng_add_uts_ns_to_ctx(&lttng_static_ctx);
+       if (ret && ret != -ENOSYS) {
+               printk(KERN_WARNING "Cannot add context lttng_add_uts_ns_to_ctx");
+       }
+       /* TODO: perf counters for filtering */
+       return 0;
+}
+
+void lttng_context_exit(void)
+{
+       lttng_destroy_context(lttng_static_ctx);
+       lttng_static_ctx = NULL;
+}
diff --git a/src/lttng-events.c b/src/lttng-events.c
new file mode 100644 (file)
index 0000000..a853609
--- /dev/null
@@ -0,0 +1,3087 @@
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * lttng-events.c
+ *
+ * Holds LTTng per-session event registry.
+ *
+ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+/*
+ * This page_alloc.h wrapper needs to be included before gfpflags.h because it
+ * overrides a function with a define.
+ */
+#include "wrapper/page_alloc.h"
+
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/jiffies.h>
+#include <linux/utsname.h>
+#include <linux/err.h>
+#include <linux/seq_file.h>
+#include <linux/file.h>
+#include <linux/anon_inodes.h>
+#include <wrapper/file.h>
+#include <linux/jhash.h>
+#include <linux/uaccess.h>
+#include <linux/vmalloc.h>
+#include <linux/dmi.h>
+
+#include <wrapper/uuid.h>
+#include <wrapper/vmalloc.h>   /* for wrapper_vmalloc_sync_mappings() */
+#include <wrapper/random.h>
+#include <wrapper/tracepoint.h>
+#include <wrapper/list.h>
+#include <wrapper/types.h>
+#include <lttng/kernel-version.h>
+#include <lttng/events.h>
+#include <lttng/tracer.h>
+#include <lttng/abi-old.h>
+#include <lttng/endian.h>
+#include <lttng/string-utils.h>
+#include <ringbuffer/backend.h>
+#include <ringbuffer/frontend.h>
+#include <wrapper/time.h>
+
+#define METADATA_CACHE_DEFAULT_SIZE 4096
+
+static LIST_HEAD(sessions);
+static LIST_HEAD(lttng_transport_list);
+/*
+ * Protect the sessions and metadata caches.
+ */
+static DEFINE_MUTEX(sessions_mutex);
+static struct kmem_cache *event_cache;
+
+static void lttng_session_lazy_sync_enablers(struct lttng_session *session);
+static void lttng_session_sync_enablers(struct lttng_session *session);
+static void lttng_enabler_destroy(struct lttng_enabler *enabler);
+
+static void _lttng_event_destroy(struct lttng_event *event);
+static void _lttng_channel_destroy(struct lttng_channel *chan);
+static int _lttng_event_unregister(struct lttng_event *event);
+static
+int _lttng_event_metadata_statedump(struct lttng_session *session,
+                                 struct lttng_channel *chan,
+                                 struct lttng_event *event);
+static
+int _lttng_session_metadata_statedump(struct lttng_session *session);
+static
+void _lttng_metadata_channel_hangup(struct lttng_metadata_stream *stream);
+static
+int _lttng_type_statedump(struct lttng_session *session,
+               const struct lttng_type *type,
+               size_t nesting);
+static
+int _lttng_field_statedump(struct lttng_session *session,
+               const struct lttng_event_field *field,
+               size_t nesting);
+
+void synchronize_trace(void)
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,1,0))
+       synchronize_rcu();
+#else
+       synchronize_sched();
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0))
+#ifdef CONFIG_PREEMPT_RT_FULL
+       synchronize_rcu();
+#endif
+#else /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0)) */
+#ifdef CONFIG_PREEMPT_RT
+       synchronize_rcu();
+#endif
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0)) */
+}
+
+void lttng_lock_sessions(void)
+{
+       mutex_lock(&sessions_mutex);
+}
+
+void lttng_unlock_sessions(void)
+{
+       mutex_unlock(&sessions_mutex);
+}
+
+/*
+ * Called with sessions lock held.
+ */
+int lttng_session_active(void)
+{
+       struct lttng_session *iter;
+
+       list_for_each_entry(iter, &sessions, list) {
+               if (iter->active)
+                       return 1;
+       }
+       return 0;
+}
+
+struct lttng_session *lttng_session_create(void)
+{
+       struct lttng_session *session;
+       struct lttng_metadata_cache *metadata_cache;
+       int i;
+
+       mutex_lock(&sessions_mutex);
+       session = lttng_kvzalloc(sizeof(struct lttng_session), GFP_KERNEL);
+       if (!session)
+               goto err;
+       INIT_LIST_HEAD(&session->chan);
+       INIT_LIST_HEAD(&session->events);
+       lttng_guid_gen(&session->uuid);
+
+       metadata_cache = kzalloc(sizeof(struct lttng_metadata_cache),
+                       GFP_KERNEL);
+       if (!metadata_cache)
+               goto err_free_session;
+       metadata_cache->data = vzalloc(METADATA_CACHE_DEFAULT_SIZE);
+       if (!metadata_cache->data)
+               goto err_free_cache;
+       metadata_cache->cache_alloc = METADATA_CACHE_DEFAULT_SIZE;
+       kref_init(&metadata_cache->refcount);
+       mutex_init(&metadata_cache->lock);
+       session->metadata_cache = metadata_cache;
+       INIT_LIST_HEAD(&metadata_cache->metadata_stream);
+       memcpy(&metadata_cache->uuid, &session->uuid,
+               sizeof(metadata_cache->uuid));
+       INIT_LIST_HEAD(&session->enablers_head);
+       for (i = 0; i < LTTNG_EVENT_HT_SIZE; i++)
+               INIT_HLIST_HEAD(&session->events_ht.table[i]);
+       list_add(&session->list, &sessions);
+       session->pid_tracker.session = session;
+       session->pid_tracker.tracker_type = TRACKER_PID;
+       session->vpid_tracker.session = session;
+       session->vpid_tracker.tracker_type = TRACKER_VPID;
+       session->uid_tracker.session = session;
+       session->uid_tracker.tracker_type = TRACKER_UID;
+       session->vuid_tracker.session = session;
+       session->vuid_tracker.tracker_type = TRACKER_VUID;
+       session->gid_tracker.session = session;
+       session->gid_tracker.tracker_type = TRACKER_GID;
+       session->vgid_tracker.session = session;
+       session->vgid_tracker.tracker_type = TRACKER_VGID;
+       mutex_unlock(&sessions_mutex);
+       return session;
+
+err_free_cache:
+       kfree(metadata_cache);
+err_free_session:
+       lttng_kvfree(session);
+err:
+       mutex_unlock(&sessions_mutex);
+       return NULL;
+}
+
+void metadata_cache_destroy(struct kref *kref)
+{
+       struct lttng_metadata_cache *cache =
+               container_of(kref, struct lttng_metadata_cache, refcount);
+       vfree(cache->data);
+       kfree(cache);
+}
+
+void lttng_session_destroy(struct lttng_session *session)
+{
+       struct lttng_channel *chan, *tmpchan;
+       struct lttng_event *event, *tmpevent;
+       struct lttng_metadata_stream *metadata_stream;
+       struct lttng_enabler *enabler, *tmpenabler;
+       int ret;
+
+       mutex_lock(&sessions_mutex);
+       WRITE_ONCE(session->active, 0);
+       list_for_each_entry(chan, &session->chan, list) {
+               ret = lttng_syscalls_unregister(chan);
+               WARN_ON(ret);
+       }
+       list_for_each_entry(event, &session->events, list) {
+               ret = _lttng_event_unregister(event);
+               WARN_ON(ret);
+       }
+       synchronize_trace();    /* Wait for in-flight events to complete */
+       list_for_each_entry_safe(enabler, tmpenabler,
+                       &session->enablers_head, node)
+               lttng_enabler_destroy(enabler);
+       list_for_each_entry_safe(event, tmpevent, &session->events, list)
+               _lttng_event_destroy(event);
+       list_for_each_entry_safe(chan, tmpchan, &session->chan, list) {
+               BUG_ON(chan->channel_type == METADATA_CHANNEL);
+               _lttng_channel_destroy(chan);
+       }
+       list_for_each_entry(metadata_stream, &session->metadata_cache->metadata_stream, list)
+               _lttng_metadata_channel_hangup(metadata_stream);
+       lttng_id_tracker_destroy(&session->pid_tracker, false);
+       lttng_id_tracker_destroy(&session->vpid_tracker, false);
+       lttng_id_tracker_destroy(&session->uid_tracker, false);
+       lttng_id_tracker_destroy(&session->vuid_tracker, false);
+       lttng_id_tracker_destroy(&session->gid_tracker, false);
+       lttng_id_tracker_destroy(&session->vgid_tracker, false);
+       kref_put(&session->metadata_cache->refcount, metadata_cache_destroy);
+       list_del(&session->list);
+       mutex_unlock(&sessions_mutex);
+       lttng_kvfree(session);
+}
+
+int lttng_session_statedump(struct lttng_session *session)
+{
+       int ret;
+
+       mutex_lock(&sessions_mutex);
+       ret = lttng_statedump_start(session);
+       mutex_unlock(&sessions_mutex);
+       return ret;
+}
+
+int lttng_session_enable(struct lttng_session *session)
+{
+       int ret = 0;
+       struct lttng_channel *chan;
+
+       mutex_lock(&sessions_mutex);
+       if (session->active) {
+               ret = -EBUSY;
+               goto end;
+       }
+
+       /* Set transient enabler state to "enabled" */
+       session->tstate = 1;
+
+       /* We need to sync enablers with session before activation. */
+       lttng_session_sync_enablers(session);
+
+       /*
+        * Snapshot the number of events per channel to know the type of header
+        * we need to use.
+        */
+       list_for_each_entry(chan, &session->chan, list) {
+               if (chan->header_type)
+                       continue;               /* don't change it if session stop/restart */
+               if (chan->free_event_id < 31)
+                       chan->header_type = 1;  /* compact */
+               else
+                       chan->header_type = 2;  /* large */
+       }
+
+       /* Clear each stream's quiescent state. */
+       list_for_each_entry(chan, &session->chan, list) {
+               if (chan->channel_type != METADATA_CHANNEL)
+                       lib_ring_buffer_clear_quiescent_channel(chan->chan);
+       }
+
+       WRITE_ONCE(session->active, 1);
+       WRITE_ONCE(session->been_active, 1);
+       ret = _lttng_session_metadata_statedump(session);
+       if (ret) {
+               WRITE_ONCE(session->active, 0);
+               goto end;
+       }
+       ret = lttng_statedump_start(session);
+       if (ret)
+               WRITE_ONCE(session->active, 0);
+end:
+       mutex_unlock(&sessions_mutex);
+       return ret;
+}
+
+int lttng_session_disable(struct lttng_session *session)
+{
+       int ret = 0;
+       struct lttng_channel *chan;
+
+       mutex_lock(&sessions_mutex);
+       if (!session->active) {
+               ret = -EBUSY;
+               goto end;
+       }
+       WRITE_ONCE(session->active, 0);
+
+       /* Set transient enabler state to "disabled" */
+       session->tstate = 0;
+       lttng_session_sync_enablers(session);
+
+       /* Set each stream's quiescent state. */
+       list_for_each_entry(chan, &session->chan, list) {
+               if (chan->channel_type != METADATA_CHANNEL)
+                       lib_ring_buffer_set_quiescent_channel(chan->chan);
+       }
+end:
+       mutex_unlock(&sessions_mutex);
+       return ret;
+}
+
+int lttng_session_metadata_regenerate(struct lttng_session *session)
+{
+       int ret = 0;
+       struct lttng_channel *chan;
+       struct lttng_event *event;
+       struct lttng_metadata_cache *cache = session->metadata_cache;
+       struct lttng_metadata_stream *stream;
+
+       mutex_lock(&sessions_mutex);
+       if (!session->active) {
+               ret = -EBUSY;
+               goto end;
+       }
+
+       mutex_lock(&cache->lock);
+       memset(cache->data, 0, cache->cache_alloc);
+       cache->metadata_written = 0;
+       cache->version++;
+       list_for_each_entry(stream, &session->metadata_cache->metadata_stream, list) {
+               stream->metadata_out = 0;
+               stream->metadata_in = 0;
+       }
+       mutex_unlock(&cache->lock);
+
+       session->metadata_dumped = 0;
+       list_for_each_entry(chan, &session->chan, list) {
+               chan->metadata_dumped = 0;
+       }
+
+       list_for_each_entry(event, &session->events, list) {
+               event->metadata_dumped = 0;
+       }
+
+       ret = _lttng_session_metadata_statedump(session);
+
+end:
+       mutex_unlock(&sessions_mutex);
+       return ret;
+}
+
+int lttng_channel_enable(struct lttng_channel *channel)
+{
+       int ret = 0;
+
+       mutex_lock(&sessions_mutex);
+       if (channel->channel_type == METADATA_CHANNEL) {
+               ret = -EPERM;
+               goto end;
+       }
+       if (channel->enabled) {
+               ret = -EEXIST;
+               goto end;
+       }
+       /* Set transient enabler state to "enabled" */
+       channel->tstate = 1;
+       lttng_session_sync_enablers(channel->session);
+       /* Set atomically the state to "enabled" */
+       WRITE_ONCE(channel->enabled, 1);
+end:
+       mutex_unlock(&sessions_mutex);
+       return ret;
+}
+
+int lttng_channel_disable(struct lttng_channel *channel)
+{
+       int ret = 0;
+
+       mutex_lock(&sessions_mutex);
+       if (channel->channel_type == METADATA_CHANNEL) {
+               ret = -EPERM;
+               goto end;
+       }
+       if (!channel->enabled) {
+               ret = -EEXIST;
+               goto end;
+       }
+       /* Set atomically the state to "disabled" */
+       WRITE_ONCE(channel->enabled, 0);
+       /* Set transient enabler state to "enabled" */
+       channel->tstate = 0;
+       lttng_session_sync_enablers(channel->session);
+end:
+       mutex_unlock(&sessions_mutex);
+       return ret;
+}
+
+int lttng_event_enable(struct lttng_event *event)
+{
+       int ret = 0;
+
+       mutex_lock(&sessions_mutex);
+       if (event->chan->channel_type == METADATA_CHANNEL) {
+               ret = -EPERM;
+               goto end;
+       }
+       if (event->enabled) {
+               ret = -EEXIST;
+               goto end;
+       }
+       switch (event->instrumentation) {
+       case LTTNG_KERNEL_TRACEPOINT:
+       case LTTNG_KERNEL_SYSCALL:
+               ret = -EINVAL;
+               break;
+       case LTTNG_KERNEL_KPROBE:
+       case LTTNG_KERNEL_UPROBE:
+       case LTTNG_KERNEL_NOOP:
+               WRITE_ONCE(event->enabled, 1);
+               break;
+       case LTTNG_KERNEL_KRETPROBE:
+               ret = lttng_kretprobes_event_enable_state(event, 1);
+               break;
+       case LTTNG_KERNEL_FUNCTION:     /* Fall-through. */
+       default:
+               WARN_ON_ONCE(1);
+               ret = -EINVAL;
+       }
+end:
+       mutex_unlock(&sessions_mutex);
+       return ret;
+}
+
+int lttng_event_disable(struct lttng_event *event)
+{
+       int ret = 0;
+
+       mutex_lock(&sessions_mutex);
+       if (event->chan->channel_type == METADATA_CHANNEL) {
+               ret = -EPERM;
+               goto end;
+       }
+       if (!event->enabled) {
+               ret = -EEXIST;
+               goto end;
+       }
+       switch (event->instrumentation) {
+       case LTTNG_KERNEL_TRACEPOINT:
+       case LTTNG_KERNEL_SYSCALL:
+               ret = -EINVAL;
+               break;
+       case LTTNG_KERNEL_KPROBE:
+       case LTTNG_KERNEL_UPROBE:
+       case LTTNG_KERNEL_NOOP:
+               WRITE_ONCE(event->enabled, 0);
+               break;
+       case LTTNG_KERNEL_KRETPROBE:
+               ret = lttng_kretprobes_event_enable_state(event, 0);
+               break;
+       case LTTNG_KERNEL_FUNCTION:     /* Fall-through. */
+       default:
+               WARN_ON_ONCE(1);
+               ret = -EINVAL;
+       }
+end:
+       mutex_unlock(&sessions_mutex);
+       return ret;
+}
+
+static struct lttng_transport *lttng_transport_find(const char *name)
+{
+       struct lttng_transport *transport;
+
+       list_for_each_entry(transport, &lttng_transport_list, node) {
+               if (!strcmp(transport->name, name))
+                       return transport;
+       }
+       return NULL;
+}
+
+struct lttng_channel *lttng_channel_create(struct lttng_session *session,
+                                      const char *transport_name,
+                                      void *buf_addr,
+                                      size_t subbuf_size, size_t num_subbuf,
+                                      unsigned int switch_timer_interval,
+                                      unsigned int read_timer_interval,
+                                      enum channel_type channel_type)
+{
+       struct lttng_channel *chan;
+       struct lttng_transport *transport = NULL;
+
+       mutex_lock(&sessions_mutex);
+       if (session->been_active && channel_type != METADATA_CHANNEL)
+               goto active;    /* Refuse to add channel to active session */
+       transport = lttng_transport_find(transport_name);
+       if (!transport) {
+               printk(KERN_WARNING "LTTng transport %s not found\n",
+                      transport_name);
+               goto notransport;
+       }
+       if (!try_module_get(transport->owner)) {
+               printk(KERN_WARNING "LTT : Can't lock transport module.\n");
+               goto notransport;
+       }
+       chan = kzalloc(sizeof(struct lttng_channel), GFP_KERNEL);
+       if (!chan)
+               goto nomem;
+       chan->session = session;
+       chan->id = session->free_chan_id++;
+       chan->ops = &transport->ops;
+       /*
+        * Note: the channel creation op already writes into the packet
+        * headers. Therefore the "chan" information used as input
+        * should be already accessible.
+        */
+       chan->chan = transport->ops.channel_create(transport_name,
+                       chan, buf_addr, subbuf_size, num_subbuf,
+                       switch_timer_interval, read_timer_interval);
+       if (!chan->chan)
+               goto create_error;
+       chan->tstate = 1;
+       chan->enabled = 1;
+       chan->transport = transport;
+       chan->channel_type = channel_type;
+       list_add(&chan->list, &session->chan);
+       mutex_unlock(&sessions_mutex);
+       return chan;
+
+create_error:
+       kfree(chan);
+nomem:
+       if (transport)
+               module_put(transport->owner);
+notransport:
+active:
+       mutex_unlock(&sessions_mutex);
+       return NULL;
+}
+
+/*
+ * Only used internally at session destruction for per-cpu channels, and
+ * when metadata channel is released.
+ * Needs to be called with sessions mutex held.
+ */
+static
+void _lttng_channel_destroy(struct lttng_channel *chan)
+{
+       chan->ops->channel_destroy(chan->chan);
+       module_put(chan->transport->owner);
+       list_del(&chan->list);
+       lttng_destroy_context(chan->ctx);
+       kfree(chan);
+}
+
+void lttng_metadata_channel_destroy(struct lttng_channel *chan)
+{
+       BUG_ON(chan->channel_type != METADATA_CHANNEL);
+
+       /* Protect the metadata cache with the sessions_mutex. */
+       mutex_lock(&sessions_mutex);
+       _lttng_channel_destroy(chan);
+       mutex_unlock(&sessions_mutex);
+}
+EXPORT_SYMBOL_GPL(lttng_metadata_channel_destroy);
+
+static
+void _lttng_metadata_channel_hangup(struct lttng_metadata_stream *stream)
+{
+       stream->finalized = 1;
+       wake_up_interruptible(&stream->read_wait);
+}
+
+/*
+ * Supports event creation while tracing session is active.
+ * Needs to be called with sessions mutex held.
+ */
+struct lttng_event *_lttng_event_create(struct lttng_channel *chan,
+                               struct lttng_kernel_event *event_param,
+                               void *filter,
+                               const struct lttng_event_desc *event_desc,
+                               enum lttng_kernel_instrumentation itype)
+{
+       struct lttng_session *session = chan->session;
+       struct lttng_event *event;
+       const char *event_name;
+       struct hlist_head *head;
+       size_t name_len;
+       uint32_t hash;
+       int ret;
+
+       if (chan->free_event_id == -1U) {
+               ret = -EMFILE;
+               goto full;
+       }
+
+       switch (itype) {
+       case LTTNG_KERNEL_TRACEPOINT:
+               event_name = event_desc->name;
+               break;
+       case LTTNG_KERNEL_KPROBE:
+       case LTTNG_KERNEL_UPROBE:
+       case LTTNG_KERNEL_KRETPROBE:
+       case LTTNG_KERNEL_NOOP:
+       case LTTNG_KERNEL_SYSCALL:
+               event_name = event_param->name;
+               break;
+       case LTTNG_KERNEL_FUNCTION:     /* Fall-through. */
+       default:
+               WARN_ON_ONCE(1);
+               ret = -EINVAL;
+               goto type_error;
+       }
+       name_len = strlen(event_name);
+       hash = jhash(event_name, name_len, 0);
+       head = &session->events_ht.table[hash & (LTTNG_EVENT_HT_SIZE - 1)];
+       lttng_hlist_for_each_entry(event, head, hlist) {
+               WARN_ON_ONCE(!event->desc);
+               if (!strncmp(event->desc->name, event_name,
+                                       LTTNG_KERNEL_SYM_NAME_LEN - 1)
+                               && chan == event->chan) {
+                       ret = -EEXIST;
+                       goto exist;
+               }
+       }
+
+       event = kmem_cache_zalloc(event_cache, GFP_KERNEL);
+       if (!event) {
+               ret = -ENOMEM;
+               goto cache_error;
+       }
+       event->chan = chan;
+       event->filter = filter;
+       event->id = chan->free_event_id++;
+       event->instrumentation = itype;
+       event->evtype = LTTNG_TYPE_EVENT;
+       INIT_LIST_HEAD(&event->bytecode_runtime_head);
+       INIT_LIST_HEAD(&event->enablers_ref_head);
+
+       switch (itype) {
+       case LTTNG_KERNEL_TRACEPOINT:
+               /* Event will be enabled by enabler sync. */
+               event->enabled = 0;
+               event->registered = 0;
+               event->desc = lttng_event_get(event_name);
+               if (!event->desc) {
+                       ret = -ENOENT;
+                       goto register_error;
+               }
+               /* Populate lttng_event structure before event registration. */
+               smp_wmb();
+               break;
+       case LTTNG_KERNEL_KPROBE:
+               /*
+                * Needs to be explicitly enabled after creation, since
+                * we may want to apply filters.
+                */
+               event->enabled = 0;
+               event->registered = 1;
+               /*
+                * Populate lttng_event structure before event
+                * registration.
+                */
+               smp_wmb();
+               ret = lttng_kprobes_register(event_name,
+                               event_param->u.kprobe.symbol_name,
+                               event_param->u.kprobe.offset,
+                               event_param->u.kprobe.addr,
+                               event);
+               if (ret) {
+                       ret = -EINVAL;
+                       goto register_error;
+               }
+               ret = try_module_get(event->desc->owner);
+               WARN_ON_ONCE(!ret);
+               break;
+       case LTTNG_KERNEL_KRETPROBE:
+       {
+               struct lttng_event *event_return;
+
+               /* kretprobe defines 2 events */
+               /*
+                * Needs to be explicitly enabled after creation, since
+                * we may want to apply filters.
+                */
+               event->enabled = 0;
+               event->registered = 1;
+               event_return =
+                       kmem_cache_zalloc(event_cache, GFP_KERNEL);
+               if (!event_return) {
+                       ret = -ENOMEM;
+                       goto register_error;
+               }
+               event_return->chan = chan;
+               event_return->filter = filter;
+               event_return->id = chan->free_event_id++;
+               event_return->enabled = 0;
+               event_return->registered = 1;
+               event_return->instrumentation = itype;
+               /*
+                * Populate lttng_event structure before kretprobe registration.
+                */
+               smp_wmb();
+               ret = lttng_kretprobes_register(event_name,
+                               event_param->u.kretprobe.symbol_name,
+                               event_param->u.kretprobe.offset,
+                               event_param->u.kretprobe.addr,
+                               event, event_return);
+               if (ret) {
+                       kmem_cache_free(event_cache, event_return);
+                       ret = -EINVAL;
+                       goto register_error;
+               }
+               /* Take 2 refs on the module: one per event. */
+               ret = try_module_get(event->desc->owner);
+               WARN_ON_ONCE(!ret);
+               ret = try_module_get(event->desc->owner);
+               WARN_ON_ONCE(!ret);
+               ret = _lttng_event_metadata_statedump(chan->session, chan,
+                                                   event_return);
+               WARN_ON_ONCE(ret > 0);
+               if (ret) {
+                       kmem_cache_free(event_cache, event_return);
+                       module_put(event->desc->owner);
+                       module_put(event->desc->owner);
+                       goto statedump_error;
+               }
+               list_add(&event_return->list, &chan->session->events);
+               break;
+       }
+       case LTTNG_KERNEL_NOOP:
+       case LTTNG_KERNEL_SYSCALL:
+               /*
+                * Needs to be explicitly enabled after creation, since
+                * we may want to apply filters.
+                */
+               event->enabled = 0;
+               event->registered = 0;
+               event->desc = event_desc;
+               if (!event->desc) {
+                       ret = -EINVAL;
+                       goto register_error;
+               }
+               break;
+       case LTTNG_KERNEL_UPROBE:
+               /*
+                * Needs to be explicitly enabled after creation, since
+                * we may want to apply filters.
+                */
+               event->enabled = 0;
+               event->registered = 1;
+
+               /*
+                * Populate lttng_event structure before event
+                * registration.
+                */
+               smp_wmb();
+
+               ret = lttng_uprobes_register(event_param->name,
+                               event_param->u.uprobe.fd,
+                               event);
+               if (ret)
+                       goto register_error;
+               ret = try_module_get(event->desc->owner);
+               WARN_ON_ONCE(!ret);
+               break;
+       case LTTNG_KERNEL_FUNCTION:     /* Fall-through */
+       default:
+               WARN_ON_ONCE(1);
+               ret = -EINVAL;
+               goto register_error;
+       }
+       ret = _lttng_event_metadata_statedump(chan->session, chan, event);
+       WARN_ON_ONCE(ret > 0);
+       if (ret) {
+               goto statedump_error;
+       }
+       hlist_add_head(&event->hlist, head);
+       list_add(&event->list, &chan->session->events);
+       return event;
+
+statedump_error:
+       /* If a statedump error occurs, events will not be readable. */
+register_error:
+       kmem_cache_free(event_cache, event);
+cache_error:
+exist:
+type_error:
+full:
+       return ERR_PTR(ret);
+}
+
+struct lttng_event *lttng_event_create(struct lttng_channel *chan,
+                               struct lttng_kernel_event *event_param,
+                               void *filter,
+                               const struct lttng_event_desc *event_desc,
+                               enum lttng_kernel_instrumentation itype)
+{
+       struct lttng_event *event;
+
+       mutex_lock(&sessions_mutex);
+       event = _lttng_event_create(chan, event_param, filter, event_desc,
+                               itype);
+       mutex_unlock(&sessions_mutex);
+       return event;
+}
+
+/* Only used for tracepoints for now. */
+static
+void register_event(struct lttng_event *event)
+{
+       const struct lttng_event_desc *desc;
+       int ret = -EINVAL;
+
+       if (event->registered)
+               return;
+
+       desc = event->desc;
+       switch (event->instrumentation) {
+       case LTTNG_KERNEL_TRACEPOINT:
+               ret = lttng_wrapper_tracepoint_probe_register(desc->kname,
+                                                 desc->probe_callback,
+                                                 event);
+               break;
+       case LTTNG_KERNEL_SYSCALL:
+               ret = lttng_syscall_filter_enable(event->chan,
+                       desc->name);
+               break;
+       case LTTNG_KERNEL_KPROBE:
+       case LTTNG_KERNEL_UPROBE:
+       case LTTNG_KERNEL_KRETPROBE:
+       case LTTNG_KERNEL_NOOP:
+               ret = 0;
+               break;
+       case LTTNG_KERNEL_FUNCTION:     /* Fall-through */
+       default:
+               WARN_ON_ONCE(1);
+       }
+       if (!ret)
+               event->registered = 1;
+}
+
+/*
+ * Only used internally at session destruction.
+ */
+int _lttng_event_unregister(struct lttng_event *event)
+{
+       const struct lttng_event_desc *desc;
+       int ret = -EINVAL;
+
+       if (!event->registered)
+               return 0;
+
+       desc = event->desc;
+       switch (event->instrumentation) {
+       case LTTNG_KERNEL_TRACEPOINT:
+               ret = lttng_wrapper_tracepoint_probe_unregister(event->desc->kname,
+                                                 event->desc->probe_callback,
+                                                 event);
+               break;
+       case LTTNG_KERNEL_KPROBE:
+               lttng_kprobes_unregister(event);
+               ret = 0;
+               break;
+       case LTTNG_KERNEL_KRETPROBE:
+               lttng_kretprobes_unregister(event);
+               ret = 0;
+               break;
+       case LTTNG_KERNEL_SYSCALL:
+               ret = lttng_syscall_filter_disable(event->chan,
+                       desc->name);
+               break;
+       case LTTNG_KERNEL_NOOP:
+               ret = 0;
+               break;
+       case LTTNG_KERNEL_UPROBE:
+               lttng_uprobes_unregister(event);
+               ret = 0;
+               break;
+       case LTTNG_KERNEL_FUNCTION:     /* Fall-through */
+       default:
+               WARN_ON_ONCE(1);
+       }
+       if (!ret)
+               event->registered = 0;
+       return ret;
+}
+
+/*
+ * Only used internally at session destruction.
+ */
+static
+void _lttng_event_destroy(struct lttng_event *event)
+{
+       switch (event->instrumentation) {
+       case LTTNG_KERNEL_TRACEPOINT:
+               lttng_event_put(event->desc);
+               break;
+       case LTTNG_KERNEL_KPROBE:
+               module_put(event->desc->owner);
+               lttng_kprobes_destroy_private(event);
+               break;
+       case LTTNG_KERNEL_KRETPROBE:
+               module_put(event->desc->owner);
+               lttng_kretprobes_destroy_private(event);
+               break;
+       case LTTNG_KERNEL_NOOP:
+       case LTTNG_KERNEL_SYSCALL:
+               break;
+       case LTTNG_KERNEL_UPROBE:
+               module_put(event->desc->owner);
+               lttng_uprobes_destroy_private(event);
+               break;
+       case LTTNG_KERNEL_FUNCTION:     /* Fall-through */
+       default:
+               WARN_ON_ONCE(1);
+       }
+       list_del(&event->list);
+       lttng_destroy_context(event->ctx);
+       kmem_cache_free(event_cache, event);
+}
+
+struct lttng_id_tracker *get_tracker(struct lttng_session *session,
+               enum tracker_type tracker_type)
+{
+       switch (tracker_type) {
+       case TRACKER_PID:
+               return &session->pid_tracker;
+       case TRACKER_VPID:
+               return &session->vpid_tracker;
+       case TRACKER_UID:
+               return &session->uid_tracker;
+       case TRACKER_VUID:
+               return &session->vuid_tracker;
+       case TRACKER_GID:
+               return &session->gid_tracker;
+       case TRACKER_VGID:
+               return &session->vgid_tracker;
+       default:
+               WARN_ON_ONCE(1);
+               return NULL;
+       }
+}
+
+int lttng_session_track_id(struct lttng_session *session,
+               enum tracker_type tracker_type, int id)
+{
+       struct lttng_id_tracker *tracker;
+       int ret;
+
+       tracker = get_tracker(session, tracker_type);
+       if (!tracker)
+               return -EINVAL;
+       if (id < -1)
+               return -EINVAL;
+       mutex_lock(&sessions_mutex);
+       if (id == -1) {
+               /* track all ids: destroy tracker. */
+               lttng_id_tracker_destroy(tracker, true);
+               ret = 0;
+       } else {
+               ret = lttng_id_tracker_add(tracker, id);
+       }
+       mutex_unlock(&sessions_mutex);
+       return ret;
+}
+
+int lttng_session_untrack_id(struct lttng_session *session,
+               enum tracker_type tracker_type, int id)
+{
+       struct lttng_id_tracker *tracker;
+       int ret;
+
+       tracker = get_tracker(session, tracker_type);
+       if (!tracker)
+               return -EINVAL;
+       if (id < -1)
+               return -EINVAL;
+       mutex_lock(&sessions_mutex);
+       if (id == -1) {
+               /* untrack all ids: replace by empty tracker. */
+               ret = lttng_id_tracker_empty_set(tracker);
+       } else {
+               ret = lttng_id_tracker_del(tracker, id);
+       }
+       mutex_unlock(&sessions_mutex);
+       return ret;
+}
+
+static
+void *id_list_start(struct seq_file *m, loff_t *pos)
+{
+       struct lttng_id_tracker *id_tracker = m->private;
+       struct lttng_id_tracker_rcu *id_tracker_p = id_tracker->p;
+       struct lttng_id_hash_node *e;
+       int iter = 0, i;
+
+       mutex_lock(&sessions_mutex);
+       if (id_tracker_p) {
+               for (i = 0; i < LTTNG_ID_TABLE_SIZE; i++) {
+                       struct hlist_head *head = &id_tracker_p->id_hash[i];
+
+                       lttng_hlist_for_each_entry(e, head, hlist) {
+                               if (iter++ >= *pos)
+                                       return e;
+                       }
+               }
+       } else {
+               /* ID tracker disabled. */
+               if (iter >= *pos && iter == 0) {
+                       return id_tracker_p;    /* empty tracker */
+               }
+               iter++;
+       }
+       /* End of list */
+       return NULL;
+}
+
+/* Called with sessions_mutex held. */
+static
+void *id_list_next(struct seq_file *m, void *p, loff_t *ppos)
+{
+       struct lttng_id_tracker *id_tracker = m->private;
+       struct lttng_id_tracker_rcu *id_tracker_p = id_tracker->p;
+       struct lttng_id_hash_node *e;
+       int iter = 0, i;
+
+       (*ppos)++;
+       if (id_tracker_p) {
+               for (i = 0; i < LTTNG_ID_TABLE_SIZE; i++) {
+                       struct hlist_head *head = &id_tracker_p->id_hash[i];
+
+                       lttng_hlist_for_each_entry(e, head, hlist) {
+                               if (iter++ >= *ppos)
+                                       return e;
+                       }
+               }
+       } else {
+               /* ID tracker disabled. */
+               if (iter >= *ppos && iter == 0)
+                       return p;       /* empty tracker */
+               iter++;
+       }
+
+       /* End of list */
+       return NULL;
+}
+
+static
+void id_list_stop(struct seq_file *m, void *p)
+{
+       mutex_unlock(&sessions_mutex);
+}
+
+static
+int id_list_show(struct seq_file *m, void *p)
+{
+       struct lttng_id_tracker *id_tracker = m->private;
+       struct lttng_id_tracker_rcu *id_tracker_p = id_tracker->p;
+       int id;
+
+       if (p == id_tracker_p) {
+               /* Tracker disabled. */
+               id = -1;
+       } else {
+               const struct lttng_id_hash_node *e = p;
+
+               id = lttng_id_tracker_get_node_id(e);
+       }
+       switch (id_tracker->tracker_type) {
+       case TRACKER_PID:
+               seq_printf(m,   "process { pid = %d; };\n", id);
+               break;
+       case TRACKER_VPID:
+               seq_printf(m,   "process { vpid = %d; };\n", id);
+               break;
+       case TRACKER_UID:
+               seq_printf(m,   "user { uid = %d; };\n", id);
+               break;
+       case TRACKER_VUID:
+               seq_printf(m,   "user { vuid = %d; };\n", id);
+               break;
+       case TRACKER_GID:
+               seq_printf(m,   "group { gid = %d; };\n", id);
+               break;
+       case TRACKER_VGID:
+               seq_printf(m,   "group { vgid = %d; };\n", id);
+               break;
+       default:
+               seq_printf(m,   "UNKNOWN { field = %d };\n", id);
+       }
+       return 0;
+}
+
+static
+const struct seq_operations lttng_tracker_ids_list_seq_ops = {
+       .start = id_list_start,
+       .next = id_list_next,
+       .stop = id_list_stop,
+       .show = id_list_show,
+};
+
+static
+int lttng_tracker_ids_list_open(struct inode *inode, struct file *file)
+{
+       return seq_open(file, &lttng_tracker_ids_list_seq_ops);
+}
+
+static
+int lttng_tracker_ids_list_release(struct inode *inode, struct file *file)
+{
+       struct seq_file *m = file->private_data;
+       struct lttng_id_tracker *id_tracker = m->private;
+       int ret;
+
+       WARN_ON_ONCE(!id_tracker);
+       ret = seq_release(inode, file);
+       if (!ret)
+               fput(id_tracker->session->file);
+       return ret;
+}
+
+const struct file_operations lttng_tracker_ids_list_fops = {
+       .owner = THIS_MODULE,
+       .open = lttng_tracker_ids_list_open,
+       .read = seq_read,
+       .llseek = seq_lseek,
+       .release = lttng_tracker_ids_list_release,
+};
+
+int lttng_session_list_tracker_ids(struct lttng_session *session,
+               enum tracker_type tracker_type)
+{
+       struct file *tracker_ids_list_file;
+       struct seq_file *m;
+       int file_fd, ret;
+
+       file_fd = lttng_get_unused_fd();
+       if (file_fd < 0) {
+               ret = file_fd;
+               goto fd_error;
+       }
+
+       tracker_ids_list_file = anon_inode_getfile("[lttng_tracker_ids_list]",
+                                         &lttng_tracker_ids_list_fops,
+                                         NULL, O_RDWR);
+       if (IS_ERR(tracker_ids_list_file)) {
+               ret = PTR_ERR(tracker_ids_list_file);
+               goto file_error;
+       }
+       if (!atomic_long_add_unless(&session->file->f_count, 1, LONG_MAX)) {
+               ret = -EOVERFLOW;
+               goto refcount_error;
+       }
+       ret = lttng_tracker_ids_list_fops.open(NULL, tracker_ids_list_file);
+       if (ret < 0)
+               goto open_error;
+       m = tracker_ids_list_file->private_data;
+
+       m->private = get_tracker(session, tracker_type);
+       BUG_ON(!m->private);
+       fd_install(file_fd, tracker_ids_list_file);
+
+       return file_fd;
+
+open_error:
+       atomic_long_dec(&session->file->f_count);
+refcount_error:
+       fput(tracker_ids_list_file);
+file_error:
+       put_unused_fd(file_fd);
+fd_error:
+       return ret;
+}
+
+/*
+ * Enabler management.
+ */
+static
+int lttng_match_enabler_star_glob(const char *desc_name,
+               const char *pattern)
+{
+       if (!strutils_star_glob_match(pattern, LTTNG_SIZE_MAX,
+                       desc_name, LTTNG_SIZE_MAX))
+               return 0;
+       return 1;
+}
+
+static
+int lttng_match_enabler_name(const char *desc_name,
+               const char *name)
+{
+       if (strcmp(desc_name, name))
+               return 0;
+       return 1;
+}
+
+static
+int lttng_desc_match_enabler(const struct lttng_event_desc *desc,
+               struct lttng_enabler *enabler)
+{
+       const char *desc_name, *enabler_name;
+
+       enabler_name = enabler->event_param.name;
+       switch (enabler->event_param.instrumentation) {
+       case LTTNG_KERNEL_TRACEPOINT:
+               desc_name = desc->name;
+               break;
+       case LTTNG_KERNEL_SYSCALL:
+               desc_name = desc->name;
+               if (!strncmp(desc_name, "compat_", strlen("compat_")))
+                       desc_name += strlen("compat_");
+               if (!strncmp(desc_name, "syscall_exit_",
+                               strlen("syscall_exit_"))) {
+                       desc_name += strlen("syscall_exit_");
+               } else if (!strncmp(desc_name, "syscall_entry_",
+                               strlen("syscall_entry_"))) {
+                       desc_name += strlen("syscall_entry_");
+               } else {
+                       WARN_ON_ONCE(1);
+                       return -EINVAL;
+               }
+               break;
+       default:
+               WARN_ON_ONCE(1);
+               return -EINVAL;
+       }
+       switch (enabler->type) {
+       case LTTNG_ENABLER_STAR_GLOB:
+               return lttng_match_enabler_star_glob(desc_name, enabler_name);
+       case LTTNG_ENABLER_NAME:
+               return lttng_match_enabler_name(desc_name, enabler_name);
+       default:
+               return -EINVAL;
+       }
+}
+
+static
+int lttng_event_match_enabler(struct lttng_event *event,
+               struct lttng_enabler *enabler)
+{
+       if (enabler->event_param.instrumentation != event->instrumentation)
+               return 0;
+       if (lttng_desc_match_enabler(event->desc, enabler)
+                       && event->chan == enabler->chan)
+               return 1;
+       else
+               return 0;
+}
+
+static
+struct lttng_enabler_ref *lttng_event_enabler_ref(struct lttng_event *event,
+               struct lttng_enabler *enabler)
+{
+       struct lttng_enabler_ref *enabler_ref;
+
+       list_for_each_entry(enabler_ref,
+                       &event->enablers_ref_head, node) {
+               if (enabler_ref->ref == enabler)
+                       return enabler_ref;
+       }
+       return NULL;
+}
+
+static
+void lttng_create_tracepoint_if_missing(struct lttng_enabler *enabler)
+{
+       struct lttng_session *session = enabler->chan->session;
+       struct lttng_probe_desc *probe_desc;
+       const struct lttng_event_desc *desc;
+       int i;
+       struct list_head *probe_list;
+
+       probe_list = lttng_get_probe_list_head();
+       /*
+        * For each probe event, if we find that a probe event matches
+        * our enabler, create an associated lttng_event if not
+        * already present.
+        */
+       list_for_each_entry(probe_desc, probe_list, head) {
+               for (i = 0; i < probe_desc->nr_events; i++) {
+                       int found = 0;
+                       struct hlist_head *head;
+                       const char *event_name;
+                       size_t name_len;
+                       uint32_t hash;
+                       struct lttng_event *event;
+
+                       desc = probe_desc->event_desc[i];
+                       if (!lttng_desc_match_enabler(desc, enabler))
+                               continue;
+                       event_name = desc->name;
+                       name_len = strlen(event_name);
+
+                       /*
+                        * Check if already created.
+                        */
+                       hash = jhash(event_name, name_len, 0);
+                       head = &session->events_ht.table[hash & (LTTNG_EVENT_HT_SIZE - 1)];
+                       lttng_hlist_for_each_entry(event, head, hlist) {
+                               if (event->desc == desc
+                                               && event->chan == enabler->chan)
+                                       found = 1;
+                       }
+                       if (found)
+                               continue;
+
+                       /*
+                        * We need to create an event for this
+                        * event probe.
+                        */
+                       event = _lttng_event_create(enabler->chan,
+                                       NULL, NULL, desc,
+                                       LTTNG_KERNEL_TRACEPOINT);
+                       if (!event) {
+                               printk(KERN_INFO "Unable to create event %s\n",
+                                       probe_desc->event_desc[i]->name);
+                       }
+               }
+       }
+}
+
+static
+void lttng_create_syscall_if_missing(struct lttng_enabler *enabler)
+{
+       int ret;
+
+       ret = lttng_syscalls_register(enabler->chan, NULL);
+       WARN_ON_ONCE(ret);
+}
+
+/*
+ * Create struct lttng_event if it is missing and present in the list of
+ * tracepoint probes.
+ * Should be called with sessions mutex held.
+ */
+static
+void lttng_create_event_if_missing(struct lttng_enabler *enabler)
+{
+       switch (enabler->event_param.instrumentation) {
+       case LTTNG_KERNEL_TRACEPOINT:
+               lttng_create_tracepoint_if_missing(enabler);
+               break;
+       case LTTNG_KERNEL_SYSCALL:
+               lttng_create_syscall_if_missing(enabler);
+               break;
+       default:
+               WARN_ON_ONCE(1);
+               break;
+       }
+}
+
+/*
+ * Create events associated with an enabler (if not already present),
+ * and add backward reference from the event to the enabler.
+ * Should be called with sessions mutex held.
+ */
+static
+int lttng_enabler_ref_events(struct lttng_enabler *enabler)
+{
+       struct lttng_session *session = enabler->chan->session;
+       struct lttng_event *event;
+
+       /* First ensure that probe events are created for this enabler. */
+       lttng_create_event_if_missing(enabler);
+
+       /* For each event matching enabler in session event list. */
+       list_for_each_entry(event, &session->events, list) {
+               struct lttng_enabler_ref *enabler_ref;
+
+               if (!lttng_event_match_enabler(event, enabler))
+                       continue;
+               enabler_ref = lttng_event_enabler_ref(event, enabler);
+               if (!enabler_ref) {
+                       /*
+                        * If no backward ref, create it.
+                        * Add backward ref from event to enabler.
+                        */
+                       enabler_ref = kzalloc(sizeof(*enabler_ref), GFP_KERNEL);
+                       if (!enabler_ref)
+                               return -ENOMEM;
+                       enabler_ref->ref = enabler;
+                       list_add(&enabler_ref->node,
+                               &event->enablers_ref_head);
+               }
+
+               /*
+                * Link filter bytecodes if not linked yet.
+                */
+               lttng_enabler_event_link_bytecode(event, enabler);
+
+               /* TODO: merge event context. */
+       }
+       return 0;
+}
+
+/*
+ * Called at module load: connect the probe on all enablers matching
+ * this event.
+ * Called with sessions lock held.
+ */
+int lttng_fix_pending_events(void)
+{
+       struct lttng_session *session;
+
+       list_for_each_entry(session, &sessions, list)
+               lttng_session_lazy_sync_enablers(session);
+       return 0;
+}
+
+struct lttng_enabler *lttng_enabler_create(enum lttng_enabler_type type,
+               struct lttng_kernel_event *event_param,
+               struct lttng_channel *chan)
+{
+       struct lttng_enabler *enabler;
+
+       enabler = kzalloc(sizeof(*enabler), GFP_KERNEL);
+       if (!enabler)
+               return NULL;
+       enabler->type = type;
+       INIT_LIST_HEAD(&enabler->filter_bytecode_head);
+       memcpy(&enabler->event_param, event_param,
+               sizeof(enabler->event_param));
+       enabler->chan = chan;
+       /* ctx left NULL */
+       enabler->enabled = 0;
+       enabler->evtype = LTTNG_TYPE_ENABLER;
+       mutex_lock(&sessions_mutex);
+       list_add(&enabler->node, &enabler->chan->session->enablers_head);
+       lttng_session_lazy_sync_enablers(enabler->chan->session);
+       mutex_unlock(&sessions_mutex);
+       return enabler;
+}
+
+int lttng_enabler_enable(struct lttng_enabler *enabler)
+{
+       mutex_lock(&sessions_mutex);
+       enabler->enabled = 1;
+       lttng_session_lazy_sync_enablers(enabler->chan->session);
+       mutex_unlock(&sessions_mutex);
+       return 0;
+}
+
+int lttng_enabler_disable(struct lttng_enabler *enabler)
+{
+       mutex_lock(&sessions_mutex);
+       enabler->enabled = 0;
+       lttng_session_lazy_sync_enablers(enabler->chan->session);
+       mutex_unlock(&sessions_mutex);
+       return 0;
+}
+
+int lttng_enabler_attach_bytecode(struct lttng_enabler *enabler,
+               struct lttng_kernel_filter_bytecode __user *bytecode)
+{
+       struct lttng_filter_bytecode_node *bytecode_node;
+       uint32_t bytecode_len;
+       int ret;
+
+       ret = get_user(bytecode_len, &bytecode->len);
+       if (ret)
+               return ret;
+       bytecode_node = kzalloc(sizeof(*bytecode_node) + bytecode_len,
+                       GFP_KERNEL);
+       if (!bytecode_node)
+               return -ENOMEM;
+       ret = copy_from_user(&bytecode_node->bc, bytecode,
+               sizeof(*bytecode) + bytecode_len);
+       if (ret)
+               goto error_free;
+       bytecode_node->enabler = enabler;
+       /* Enforce length based on allocated size */
+       bytecode_node->bc.len = bytecode_len;
+       list_add_tail(&bytecode_node->node, &enabler->filter_bytecode_head);
+       lttng_session_lazy_sync_enablers(enabler->chan->session);
+       return 0;
+
+error_free:
+       kfree(bytecode_node);
+       return ret;
+}
+
+int lttng_event_add_callsite(struct lttng_event *event,
+               struct lttng_kernel_event_callsite __user *callsite)
+{
+
+       switch (event->instrumentation) {
+       case LTTNG_KERNEL_UPROBE:
+               return lttng_uprobes_add_callsite(event, callsite);
+       default:
+               return -EINVAL;
+       }
+}
+
+int lttng_enabler_attach_context(struct lttng_enabler *enabler,
+               struct lttng_kernel_context *context_param)
+{
+       return -ENOSYS;
+}
+
+static
+void lttng_enabler_destroy(struct lttng_enabler *enabler)
+{
+       struct lttng_filter_bytecode_node *filter_node, *tmp_filter_node;
+
+       /* Destroy filter bytecode */
+       list_for_each_entry_safe(filter_node, tmp_filter_node,
+                       &enabler->filter_bytecode_head, node) {
+               kfree(filter_node);
+       }
+
+       /* Destroy contexts */
+       lttng_destroy_context(enabler->ctx);
+
+       list_del(&enabler->node);
+       kfree(enabler);
+}
+
+/*
+ * lttng_session_sync_enablers should be called just before starting a
+ * session.
+ * Should be called with sessions mutex held.
+ */
+static
+void lttng_session_sync_enablers(struct lttng_session *session)
+{
+       struct lttng_enabler *enabler;
+       struct lttng_event *event;
+
+       list_for_each_entry(enabler, &session->enablers_head, node)
+               lttng_enabler_ref_events(enabler);
+       /*
+        * For each event, if at least one of its enablers is enabled,
+        * and its channel and session transient states are enabled, we
+        * enable the event, else we disable it.
+        */
+       list_for_each_entry(event, &session->events, list) {
+               struct lttng_enabler_ref *enabler_ref;
+               struct lttng_bytecode_runtime *runtime;
+               int enabled = 0, has_enablers_without_bytecode = 0;
+
+               switch (event->instrumentation) {
+               case LTTNG_KERNEL_TRACEPOINT:
+               case LTTNG_KERNEL_SYSCALL:
+                       /* Enable events */
+                       list_for_each_entry(enabler_ref,
+                                       &event->enablers_ref_head, node) {
+                               if (enabler_ref->ref->enabled) {
+                                       enabled = 1;
+                                       break;
+                               }
+                       }
+                       break;
+               default:
+                       /* Not handled with lazy sync. */
+                       continue;
+               }
+               /*
+                * Enabled state is based on union of enablers, with
+                * intesection of session and channel transient enable
+                * states.
+                */
+               enabled = enabled && session->tstate && event->chan->tstate;
+
+               WRITE_ONCE(event->enabled, enabled);
+               /*
+                * Sync tracepoint registration with event enabled
+                * state.
+                */
+               if (enabled) {
+                       register_event(event);
+               } else {
+                       _lttng_event_unregister(event);
+               }
+
+               /* Check if has enablers without bytecode enabled */
+               list_for_each_entry(enabler_ref,
+                               &event->enablers_ref_head, node) {
+                       if (enabler_ref->ref->enabled
+                                       && list_empty(&enabler_ref->ref->filter_bytecode_head)) {
+                               has_enablers_without_bytecode = 1;
+                               break;
+                       }
+               }
+               event->has_enablers_without_bytecode =
+                       has_enablers_without_bytecode;
+
+               /* Enable filters */
+               list_for_each_entry(runtime,
+                               &event->bytecode_runtime_head, node)
+                       lttng_filter_sync_state(runtime);
+       }
+}
+
+/*
+ * Apply enablers to session events, adding events to session if need
+ * be. It is required after each modification applied to an active
+ * session, and right before session "start".
+ * "lazy" sync means we only sync if required.
+ * Should be called with sessions mutex held.
+ */
+static
+void lttng_session_lazy_sync_enablers(struct lttng_session *session)
+{
+       /* We can skip if session is not active */
+       if (!session->active)
+               return;
+       lttng_session_sync_enablers(session);
+}
+
+/*
+ * Serialize at most one packet worth of metadata into a metadata
+ * channel.
+ * We grab the metadata cache mutex to get exclusive access to our metadata
+ * buffer and to the metadata cache. Exclusive access to the metadata buffer
+ * allows us to do racy operations such as looking for remaining space left in
+ * packet and write, since mutual exclusion protects us from concurrent writes.
+ * Mutual exclusion on the metadata cache allow us to read the cache content
+ * without racing against reallocation of the cache by updates.
+ * Returns the number of bytes written in the channel, 0 if no data
+ * was written and a negative value on error.
+ */
+int lttng_metadata_output_channel(struct lttng_metadata_stream *stream,
+               struct channel *chan)
+{
+       struct lib_ring_buffer_ctx ctx;
+       int ret = 0;
+       size_t len, reserve_len;
+
+       /*
+        * Ensure we support mutiple get_next / put sequences followed by
+        * put_next. The metadata cache lock protects reading the metadata
+        * cache. It can indeed be read concurrently by "get_next_subbuf" and
+        * "flush" operations on the buffer invoked by different processes.
+        * Moreover, since the metadata cache memory can be reallocated, we
+        * need to have exclusive access against updates even though we only
+        * read it.
+        */
+       mutex_lock(&stream->metadata_cache->lock);
+       WARN_ON(stream->metadata_in < stream->metadata_out);
+       if (stream->metadata_in != stream->metadata_out)
+               goto end;
+
+       /* Metadata regenerated, change the version. */
+       if (stream->metadata_cache->version != stream->version)
+               stream->version = stream->metadata_cache->version;
+
+       len = stream->metadata_cache->metadata_written -
+               stream->metadata_in;
+       if (!len)
+               goto end;
+       reserve_len = min_t(size_t,
+                       stream->transport->ops.packet_avail_size(chan),
+                       len);
+       lib_ring_buffer_ctx_init(&ctx, chan, NULL, reserve_len,
+                       sizeof(char), -1);
+       /*
+        * If reservation failed, return an error to the caller.
+        */
+       ret = stream->transport->ops.event_reserve(&ctx, 0);
+       if (ret != 0) {
+               printk(KERN_WARNING "LTTng: Metadata event reservation failed\n");
+               goto end;
+       }
+       stream->transport->ops.event_write(&ctx,
+                       stream->metadata_cache->data + stream->metadata_in,
+                       reserve_len);
+       stream->transport->ops.event_commit(&ctx);
+       stream->metadata_in += reserve_len;
+       ret = reserve_len;
+
+end:
+       mutex_unlock(&stream->metadata_cache->lock);
+       return ret;
+}
+
+/*
+ * Write the metadata to the metadata cache.
+ * Must be called with sessions_mutex held.
+ * The metadata cache lock protects us from concurrent read access from
+ * thread outputting metadata content to ring buffer.
+ */
+int lttng_metadata_printf(struct lttng_session *session,
+                         const char *fmt, ...)
+{
+       char *str;
+       size_t len;
+       va_list ap;
+       struct lttng_metadata_stream *stream;
+
+       WARN_ON_ONCE(!READ_ONCE(session->active));
+
+       va_start(ap, fmt);
+       str = kvasprintf(GFP_KERNEL, fmt, ap);
+       va_end(ap);
+       if (!str)
+               return -ENOMEM;
+
+       len = strlen(str);
+       mutex_lock(&session->metadata_cache->lock);
+       if (session->metadata_cache->metadata_written + len >
+                       session->metadata_cache->cache_alloc) {
+               char *tmp_cache_realloc;
+               unsigned int tmp_cache_alloc_size;
+
+               tmp_cache_alloc_size = max_t(unsigned int,
+                               session->metadata_cache->cache_alloc + len,
+                               session->metadata_cache->cache_alloc << 1);
+               tmp_cache_realloc = vzalloc(tmp_cache_alloc_size);
+               if (!tmp_cache_realloc)
+                       goto err;
+               if (session->metadata_cache->data) {
+                       memcpy(tmp_cache_realloc,
+                               session->metadata_cache->data,
+                               session->metadata_cache->cache_alloc);
+                       vfree(session->metadata_cache->data);
+               }
+
+               session->metadata_cache->cache_alloc = tmp_cache_alloc_size;
+               session->metadata_cache->data = tmp_cache_realloc;
+       }
+       memcpy(session->metadata_cache->data +
+                       session->metadata_cache->metadata_written,
+                       str, len);
+       session->metadata_cache->metadata_written += len;
+       mutex_unlock(&session->metadata_cache->lock);
+       kfree(str);
+
+       list_for_each_entry(stream, &session->metadata_cache->metadata_stream, list)
+               wake_up_interruptible(&stream->read_wait);
+
+       return 0;
+
+err:
+       mutex_unlock(&session->metadata_cache->lock);
+       kfree(str);
+       return -ENOMEM;
+}
+
+static
+int print_tabs(struct lttng_session *session, size_t nesting)
+{
+       size_t i;
+
+       for (i = 0; i < nesting; i++) {
+               int ret;
+
+               ret = lttng_metadata_printf(session, "  ");
+               if (ret) {
+                       return ret;
+               }
+       }
+       return 0;
+}
+
+static
+int lttng_field_name_statedump(struct lttng_session *session,
+               const struct lttng_event_field *field,
+               size_t nesting)
+{
+       return lttng_metadata_printf(session, " _%s;\n", field->name);
+}
+
+static
+int _lttng_integer_type_statedump(struct lttng_session *session,
+               const struct lttng_type *type,
+               size_t nesting)
+{
+       int ret;
+
+       WARN_ON_ONCE(type->atype != atype_integer);
+       ret = print_tabs(session, nesting);
+       if (ret)
+               return ret;
+       ret = lttng_metadata_printf(session,
+               "integer { size = %u; align = %u; signed = %u; encoding = %s; base = %u;%s }",
+               type->u.integer.size,
+               type->u.integer.alignment,
+               type->u.integer.signedness,
+               (type->u.integer.encoding == lttng_encode_none)
+                       ? "none"
+                       : (type->u.integer.encoding == lttng_encode_UTF8)
+                               ? "UTF8"
+                               : "ASCII",
+               type->u.integer.base,
+#if __BYTE_ORDER == __BIG_ENDIAN
+               type->u.integer.reverse_byte_order ? " byte_order = le;" : ""
+#else
+               type->u.integer.reverse_byte_order ? " byte_order = be;" : ""
+#endif
+       );
+       return ret;
+}
+
+/*
+ * Must be called with sessions_mutex held.
+ */
+static
+int _lttng_struct_type_statedump(struct lttng_session *session,
+               const struct lttng_type *type,
+               size_t nesting)
+{
+       int ret;
+       uint32_t i, nr_fields;
+       unsigned int alignment;
+
+       WARN_ON_ONCE(type->atype != atype_struct_nestable);
+
+       ret = print_tabs(session, nesting);
+       if (ret)
+               return ret;
+       ret = lttng_metadata_printf(session,
+               "struct {\n");
+       if (ret)
+               return ret;
+       nr_fields = type->u.struct_nestable.nr_fields;
+       for (i = 0; i < nr_fields; i++) {
+               const struct lttng_event_field *iter_field;
+
+               iter_field = &type->u.struct_nestable.fields[i];
+               ret = _lttng_field_statedump(session, iter_field, nesting + 1);
+               if (ret)
+                       return ret;
+       }
+       ret = print_tabs(session, nesting);
+       if (ret)
+               return ret;
+       alignment = type->u.struct_nestable.alignment;
+       if (alignment) {
+               ret = lttng_metadata_printf(session,
+                       "} align(%u)",
+                       alignment);
+       } else {
+               ret = lttng_metadata_printf(session,
+                       "}");
+       }
+       return ret;
+}
+
+/*
+ * Must be called with sessions_mutex held.
+ */
+static
+int _lttng_struct_field_statedump(struct lttng_session *session,
+               const struct lttng_event_field *field,
+               size_t nesting)
+{
+       int ret;
+
+       ret = _lttng_struct_type_statedump(session,
+                       &field->type, nesting);
+       if (ret)
+               return ret;
+       return lttng_field_name_statedump(session, field, nesting);
+}
+
+/*
+ * Must be called with sessions_mutex held.
+ */
+static
+int _lttng_variant_type_statedump(struct lttng_session *session,
+               const struct lttng_type *type,
+               size_t nesting)
+{
+       int ret;
+       uint32_t i, nr_choices;
+
+       WARN_ON_ONCE(type->atype != atype_variant_nestable);
+       /*
+        * CTF 1.8 does not allow expressing nonzero variant alignment in a nestable way.
+        */
+       if (type->u.variant_nestable.alignment != 0)
+               return -EINVAL;
+       ret = print_tabs(session, nesting);
+       if (ret)
+               return ret;
+       ret = lttng_metadata_printf(session,
+               "variant <_%s> {\n",
+               type->u.variant_nestable.tag_name);
+       if (ret)
+               return ret;
+       nr_choices = type->u.variant_nestable.nr_choices;
+       for (i = 0; i < nr_choices; i++) {
+               const struct lttng_event_field *iter_field;
+
+               iter_field = &type->u.variant_nestable.choices[i];
+               ret = _lttng_field_statedump(session, iter_field, nesting + 1);
+               if (ret)
+                       return ret;
+       }
+       ret = print_tabs(session, nesting);
+       if (ret)
+               return ret;
+       ret = lttng_metadata_printf(session,
+               "}");
+       return ret;
+}
+
+/*
+ * Must be called with sessions_mutex held.
+ */
+static
+int _lttng_variant_field_statedump(struct lttng_session *session,
+               const struct lttng_event_field *field,
+               size_t nesting)
+{
+       int ret;
+
+       ret = _lttng_variant_type_statedump(session,
+                       &field->type, nesting);
+       if (ret)
+               return ret;
+       return lttng_field_name_statedump(session, field, nesting);
+}
+
+/*
+ * Must be called with sessions_mutex held.
+ */
+static
+int _lttng_array_field_statedump(struct lttng_session *session,
+               const struct lttng_event_field *field,
+               size_t nesting)
+{
+       int ret;
+       const struct lttng_type *elem_type;
+
+       WARN_ON_ONCE(field->type.atype != atype_array_nestable);
+
+       if (field->type.u.array_nestable.alignment) {
+               ret = print_tabs(session, nesting);
+               if (ret)
+                       return ret;
+               ret = lttng_metadata_printf(session,
+               "struct { } align(%u) _%s_padding;\n",
+                               field->type.u.array_nestable.alignment * CHAR_BIT,
+                               field->name);
+               if (ret)
+                       return ret;
+       }
+       /*
+        * Nested compound types: Only array of structures and variants are
+        * currently supported.
+        */
+       elem_type = field->type.u.array_nestable.elem_type;
+       switch (elem_type->atype) {
+       case atype_integer:
+       case atype_struct_nestable:
+       case atype_variant_nestable:
+               ret = _lttng_type_statedump(session, elem_type, nesting);
+               if (ret)
+                       return ret;
+               break;
+
+       default:
+               return -EINVAL;
+       }
+       ret = lttng_metadata_printf(session,
+               " _%s[%u];\n",
+               field->name,
+               field->type.u.array_nestable.length);
+       return ret;
+}
+
+/*
+ * Must be called with sessions_mutex held.
+ */
+static
+int _lttng_sequence_field_statedump(struct lttng_session *session,
+               const struct lttng_event_field *field,
+               size_t nesting)
+{
+       int ret;
+       const char *length_name;
+       const struct lttng_type *elem_type;
+
+       WARN_ON_ONCE(field->type.atype != atype_sequence_nestable);
+
+       length_name = field->type.u.sequence_nestable.length_name;
+
+       if (field->type.u.sequence_nestable.alignment) {
+               ret = print_tabs(session, nesting);
+               if (ret)
+                       return ret;
+               ret = lttng_metadata_printf(session,
+               "struct { } align(%u) _%s_padding;\n",
+                               field->type.u.sequence_nestable.alignment * CHAR_BIT,
+                               field->name);
+               if (ret)
+                       return ret;
+       }
+
+       /*
+        * Nested compound types: Only array of structures and variants are
+        * currently supported.
+        */
+       elem_type = field->type.u.sequence_nestable.elem_type;
+       switch (elem_type->atype) {
+       case atype_integer:
+       case atype_struct_nestable:
+       case atype_variant_nestable:
+               ret = _lttng_type_statedump(session, elem_type, nesting);
+               if (ret)
+                       return ret;
+               break;
+
+       default:
+               return -EINVAL;
+       }
+       ret = lttng_metadata_printf(session,
+               " _%s[ _%s ];\n",
+               field->name,
+               field->type.u.sequence_nestable.length_name);
+       return ret;
+}
+
+/*
+ * Must be called with sessions_mutex held.
+ */
+static
+int _lttng_enum_type_statedump(struct lttng_session *session,
+               const struct lttng_type *type,
+               size_t nesting)
+{
+       const struct lttng_enum_desc *enum_desc;
+       const struct lttng_type *container_type;
+       int ret;
+       unsigned int i, nr_entries;
+
+       container_type = type->u.enum_nestable.container_type;
+       if (container_type->atype != atype_integer) {
+               ret = -EINVAL;
+               goto end;
+       }
+       enum_desc = type->u.enum_nestable.desc;
+       nr_entries = enum_desc->nr_entries;
+
+       ret = print_tabs(session, nesting);
+       if (ret)
+               goto end;
+       ret = lttng_metadata_printf(session, "enum : ");
+       if (ret)
+               goto end;
+       ret = _lttng_integer_type_statedump(session, container_type, 0);
+       if (ret)
+               goto end;
+       ret = lttng_metadata_printf(session, " {\n");
+       if (ret)
+               goto end;
+       /* Dump all entries */
+       for (i = 0; i < nr_entries; i++) {
+               const struct lttng_enum_entry *entry = &enum_desc->entries[i];
+               int j, len;
+
+               ret = print_tabs(session, nesting + 1);
+               if (ret)
+                       goto end;
+               ret = lttng_metadata_printf(session,
+                               "\"");
+               if (ret)
+                       goto end;
+               len = strlen(entry->string);
+               /* Escape the character '"' */
+               for (j = 0; j < len; j++) {
+                       char c = entry->string[j];
+
+                       switch (c) {
+                       case '"':
+                               ret = lttng_metadata_printf(session,
+                                               "\\\"");
+                               break;
+                       case '\\':
+                               ret = lttng_metadata_printf(session,
+                                               "\\\\");
+                               break;
+                       default:
+                               ret = lttng_metadata_printf(session,
+                                               "%c", c);
+                               break;
+                       }
+                       if (ret)
+                               goto end;
+               }
+               ret = lttng_metadata_printf(session, "\"");
+               if (ret)
+                       goto end;
+
+               if (entry->options.is_auto) {
+                       ret = lttng_metadata_printf(session, ",\n");
+                       if (ret)
+                               goto end;
+               } else {
+                       ret = lttng_metadata_printf(session,
+                                       " = ");
+                       if (ret)
+                               goto end;
+                       if (entry->start.signedness)
+                               ret = lttng_metadata_printf(session,
+                                       "%lld", (long long) entry->start.value);
+                       else
+                               ret = lttng_metadata_printf(session,
+                                       "%llu", entry->start.value);
+                       if (ret)
+                               goto end;
+                       if (entry->start.signedness == entry->end.signedness &&
+                                       entry->start.value
+                                               == entry->end.value) {
+                               ret = lttng_metadata_printf(session,
+                                       ",\n");
+                       } else {
+                               if (entry->end.signedness) {
+                                       ret = lttng_metadata_printf(session,
+                                               " ... %lld,\n",
+                                               (long long) entry->end.value);
+                               } else {
+                                       ret = lttng_metadata_printf(session,
+                                               " ... %llu,\n",
+                                               entry->end.value);
+                               }
+                       }
+                       if (ret)
+                               goto end;
+               }
+       }
+       ret = print_tabs(session, nesting);
+       if (ret)
+               goto end;
+       ret = lttng_metadata_printf(session, "}");
+end:
+       return ret;
+}
+
+/*
+ * Must be called with sessions_mutex held.
+ */
+static
+int _lttng_enum_field_statedump(struct lttng_session *session,
+               const struct lttng_event_field *field,
+               size_t nesting)
+{
+       int ret;
+
+       ret = _lttng_enum_type_statedump(session, &field->type, nesting);
+       if (ret)
+               return ret;
+       return lttng_field_name_statedump(session, field, nesting);
+}
+
+static
+int _lttng_integer_field_statedump(struct lttng_session *session,
+               const struct lttng_event_field *field,
+               size_t nesting)
+{
+       int ret;
+
+       ret = _lttng_integer_type_statedump(session, &field->type, nesting);
+       if (ret)
+               return ret;
+       return lttng_field_name_statedump(session, field, nesting);
+}
+
+static
+int _lttng_string_type_statedump(struct lttng_session *session,
+               const struct lttng_type *type,
+               size_t nesting)
+{
+       int ret;
+
+       WARN_ON_ONCE(type->atype != atype_string);
+       /* Default encoding is UTF8 */
+       ret = print_tabs(session, nesting);
+       if (ret)
+               return ret;
+       ret = lttng_metadata_printf(session,
+               "string%s",
+               type->u.string.encoding == lttng_encode_ASCII ?
+                       " { encoding = ASCII; }" : "");
+       return ret;
+}
+
+static
+int _lttng_string_field_statedump(struct lttng_session *session,
+               const struct lttng_event_field *field,
+               size_t nesting)
+{
+       int ret;
+
+       WARN_ON_ONCE(field->type.atype != atype_string);
+       ret = _lttng_string_type_statedump(session, &field->type, nesting);
+       if (ret)
+               return ret;
+       return lttng_field_name_statedump(session, field, nesting);
+}
+
+/*
+ * Must be called with sessions_mutex held.
+ */
+static
+int _lttng_type_statedump(struct lttng_session *session,
+               const struct lttng_type *type,
+               size_t nesting)
+{
+       int ret = 0;
+
+       switch (type->atype) {
+       case atype_integer:
+               ret = _lttng_integer_type_statedump(session, type, nesting);
+               break;
+       case atype_enum_nestable:
+               ret = _lttng_enum_type_statedump(session, type, nesting);
+               break;
+       case atype_string:
+               ret = _lttng_string_type_statedump(session, type, nesting);
+               break;
+       case atype_struct_nestable:
+               ret = _lttng_struct_type_statedump(session, type, nesting);
+               break;
+       case atype_variant_nestable:
+               ret = _lttng_variant_type_statedump(session, type, nesting);
+               break;
+
+       /* Nested arrays and sequences are not supported yet. */
+       case atype_array_nestable:
+       case atype_sequence_nestable:
+       default:
+               WARN_ON_ONCE(1);
+               return -EINVAL;
+       }
+       return ret;
+}
+
+/*
+ * Must be called with sessions_mutex held.
+ */
+static
+int _lttng_field_statedump(struct lttng_session *session,
+               const struct lttng_event_field *field,
+               size_t nesting)
+{
+       int ret = 0;
+
+       switch (field->type.atype) {
+       case atype_integer:
+               ret = _lttng_integer_field_statedump(session, field, nesting);
+               break;
+       case atype_enum_nestable:
+               ret = _lttng_enum_field_statedump(session, field, nesting);
+               break;
+       case atype_string:
+               ret = _lttng_string_field_statedump(session, field, nesting);
+               break;
+       case atype_struct_nestable:
+               ret = _lttng_struct_field_statedump(session, field, nesting);
+               break;
+       case atype_array_nestable:
+               ret = _lttng_array_field_statedump(session, field, nesting);
+               break;
+       case atype_sequence_nestable:
+               ret = _lttng_sequence_field_statedump(session, field, nesting);
+               break;
+       case atype_variant_nestable:
+               ret = _lttng_variant_field_statedump(session, field, nesting);
+               break;
+
+       default:
+               WARN_ON_ONCE(1);
+               return -EINVAL;
+       }
+       return ret;
+}
+
+static
+int _lttng_context_metadata_statedump(struct lttng_session *session,
+                                   struct lttng_ctx *ctx)
+{
+       int ret = 0;
+       int i;
+
+       if (!ctx)
+               return 0;
+       for (i = 0; i < ctx->nr_fields; i++) {
+               const struct lttng_ctx_field *field = &ctx->fields[i];
+
+               ret = _lttng_field_statedump(session, &field->event_field, 2);
+               if (ret)
+                       return ret;
+       }
+       return ret;
+}
+
+static
+int _lttng_fields_metadata_statedump(struct lttng_session *session,
+                                  struct lttng_event *event)
+{
+       const struct lttng_event_desc *desc = event->desc;
+       int ret = 0;
+       int i;
+
+       for (i = 0; i < desc->nr_fields; i++) {
+               const struct lttng_event_field *field = &desc->fields[i];
+
+               ret = _lttng_field_statedump(session, field, 2);
+               if (ret)
+                       return ret;
+       }
+       return ret;
+}
+
+/*
+ * Must be called with sessions_mutex held.
+ */
+static
+int _lttng_event_metadata_statedump(struct lttng_session *session,
+                                 struct lttng_channel *chan,
+                                 struct lttng_event *event)
+{
+       int ret = 0;
+
+       if (event->metadata_dumped || !READ_ONCE(session->active))
+               return 0;
+       if (chan->channel_type == METADATA_CHANNEL)
+               return 0;
+
+       ret = lttng_metadata_printf(session,
+               "event {\n"
+               "       name = \"%s\";\n"
+               "       id = %u;\n"
+               "       stream_id = %u;\n",
+               event->desc->name,
+               event->id,
+               event->chan->id);
+       if (ret)
+               goto end;
+
+       if (event->ctx) {
+               ret = lttng_metadata_printf(session,
+                       "       context := struct {\n");
+               if (ret)
+                       goto end;
+       }
+       ret = _lttng_context_metadata_statedump(session, event->ctx);
+       if (ret)
+               goto end;
+       if (event->ctx) {
+               ret = lttng_metadata_printf(session,
+                       "       };\n");
+               if (ret)
+                       goto end;
+       }
+
+       ret = lttng_metadata_printf(session,
+               "       fields := struct {\n"
+               );
+       if (ret)
+               goto end;
+
+       ret = _lttng_fields_metadata_statedump(session, event);
+       if (ret)
+               goto end;
+
+       /*
+        * LTTng space reservation can only reserve multiples of the
+        * byte size.
+        */
+       ret = lttng_metadata_printf(session,
+               "       };\n"
+               "};\n\n");
+       if (ret)
+               goto end;
+
+       event->metadata_dumped = 1;
+end:
+       return ret;
+
+}
+
+/*
+ * Must be called with sessions_mutex held.
+ */
+static
+int _lttng_channel_metadata_statedump(struct lttng_session *session,
+                                   struct lttng_channel *chan)
+{
+       int ret = 0;
+
+       if (chan->metadata_dumped || !READ_ONCE(session->active))
+               return 0;
+
+       if (chan->channel_type == METADATA_CHANNEL)
+               return 0;
+
+       WARN_ON_ONCE(!chan->header_type);
+       ret = lttng_metadata_printf(session,
+               "stream {\n"
+               "       id = %u;\n"
+               "       event.header := %s;\n"
+               "       packet.context := struct packet_context;\n",
+               chan->id,
+               chan->header_type == 1 ? "struct event_header_compact" :
+                       "struct event_header_large");
+       if (ret)
+               goto end;
+
+       if (chan->ctx) {
+               ret = lttng_metadata_printf(session,
+                       "       event.context := struct {\n");
+               if (ret)
+                       goto end;
+       }
+       ret = _lttng_context_metadata_statedump(session, chan->ctx);
+       if (ret)
+               goto end;
+       if (chan->ctx) {
+               ret = lttng_metadata_printf(session,
+                       "       };\n");
+               if (ret)
+                       goto end;
+       }
+
+       ret = lttng_metadata_printf(session,
+               "};\n\n");
+
+       chan->metadata_dumped = 1;
+end:
+       return ret;
+}
+
+/*
+ * Must be called with sessions_mutex held.
+ */
+static
+int _lttng_stream_packet_context_declare(struct lttng_session *session)
+{
+       return lttng_metadata_printf(session,
+               "struct packet_context {\n"
+               "       uint64_clock_monotonic_t timestamp_begin;\n"
+               "       uint64_clock_monotonic_t timestamp_end;\n"
+               "       uint64_t content_size;\n"
+               "       uint64_t packet_size;\n"
+               "       uint64_t packet_seq_num;\n"
+               "       unsigned long events_discarded;\n"
+               "       uint32_t cpu_id;\n"
+               "};\n\n"
+               );
+}
+
+/*
+ * Compact header:
+ * id: range: 0 - 30.
+ * id 31 is reserved to indicate an extended header.
+ *
+ * Large header:
+ * id: range: 0 - 65534.
+ * id 65535 is reserved to indicate an extended header.
+ *
+ * Must be called with sessions_mutex held.
+ */
+static
+int _lttng_event_header_declare(struct lttng_session *session)
+{
+       return lttng_metadata_printf(session,
+       "struct event_header_compact {\n"
+       "       enum : uint5_t { compact = 0 ... 30, extended = 31 } id;\n"
+       "       variant <id> {\n"
+       "               struct {\n"
+       "                       uint27_clock_monotonic_t timestamp;\n"
+       "               } compact;\n"
+       "               struct {\n"
+       "                       uint32_t id;\n"
+       "                       uint64_clock_monotonic_t timestamp;\n"
+       "               } extended;\n"
+       "       } v;\n"
+       "} align(%u);\n"
+       "\n"
+       "struct event_header_large {\n"
+       "       enum : uint16_t { compact = 0 ... 65534, extended = 65535 } id;\n"
+       "       variant <id> {\n"
+       "               struct {\n"
+       "                       uint32_clock_monotonic_t timestamp;\n"
+       "               } compact;\n"
+       "               struct {\n"
+       "                       uint32_t id;\n"
+       "                       uint64_clock_monotonic_t timestamp;\n"
+       "               } extended;\n"
+       "       } v;\n"
+       "} align(%u);\n\n",
+       lttng_alignof(uint32_t) * CHAR_BIT,
+       lttng_alignof(uint16_t) * CHAR_BIT
+       );
+}
+
+ /*
+ * Approximation of NTP time of day to clock monotonic correlation,
+ * taken at start of trace.
+ * Yes, this is only an approximation. Yes, we can (and will) do better
+ * in future versions.
+ * This function may return a negative offset. It may happen if the
+ * system sets the REALTIME clock to 0 after boot.
+ *
+ * Use 64bit timespec on kernels that have it, this makes 32bit arch
+ * y2038 compliant.
+ */
+static
+int64_t measure_clock_offset(void)
+{
+       uint64_t monotonic_avg, monotonic[2], realtime;
+       uint64_t tcf = trace_clock_freq();
+       int64_t offset;
+       unsigned long flags;
+#ifdef LTTNG_KERNEL_HAS_TIMESPEC64
+       struct timespec64 rts = { 0, 0 };
+#else
+       struct timespec rts = { 0, 0 };
+#endif
+
+       /* Disable interrupts to increase correlation precision. */
+       local_irq_save(flags);
+       monotonic[0] = trace_clock_read64();
+#ifdef LTTNG_KERNEL_HAS_TIMESPEC64
+       ktime_get_real_ts64(&rts);
+#else
+       getnstimeofday(&rts);
+#endif
+       monotonic[1] = trace_clock_read64();
+       local_irq_restore(flags);
+
+       monotonic_avg = (monotonic[0] + monotonic[1]) >> 1;
+       realtime = (uint64_t) rts.tv_sec * tcf;
+       if (tcf == NSEC_PER_SEC) {
+               realtime += rts.tv_nsec;
+       } else {
+               uint64_t n = rts.tv_nsec * tcf;
+
+               do_div(n, NSEC_PER_SEC);
+               realtime += n;
+       }
+       offset = (int64_t) realtime - monotonic_avg;
+       return offset;
+}
+
+static
+int print_escaped_ctf_string(struct lttng_session *session, const char *string)
+{
+       int ret = 0;
+       size_t i;
+       char cur;
+
+       i = 0;
+       cur = string[i];
+       while (cur != '\0') {
+               switch (cur) {
+               case '\n':
+                       ret = lttng_metadata_printf(session, "%s", "\\n");
+                       break;
+               case '\\':
+               case '"':
+                       ret = lttng_metadata_printf(session, "%c", '\\');
+                       if (ret)
+                               goto error;
+                       /* We still print the current char */
+                       /* Fallthrough */
+               default:
+                       ret = lttng_metadata_printf(session, "%c", cur);
+                       break;
+               }
+
+               if (ret)
+                       goto error;
+
+               cur = string[++i];
+       }
+error:
+       return ret;
+}
+
+static
+int print_metadata_escaped_field(struct lttng_session *session, const char *field,
+               const char *field_value)
+{
+       int ret;
+
+       ret = lttng_metadata_printf(session, "  %s = \"", field);
+       if (ret)
+               goto error;
+
+       ret = print_escaped_ctf_string(session, field_value);
+       if (ret)
+               goto error;
+
+       ret = lttng_metadata_printf(session, "\";\n");
+
+error:
+       return ret;
+}
+
+/*
+ * Output metadata into this session's metadata buffers.
+ * Must be called with sessions_mutex held.
+ */
+static
+int _lttng_session_metadata_statedump(struct lttng_session *session)
+{
+       unsigned char *uuid_c = session->uuid.b;
+       unsigned char uuid_s[37], clock_uuid_s[BOOT_ID_LEN];
+       const char *product_uuid;
+       struct lttng_channel *chan;
+       struct lttng_event *event;
+       int ret = 0;
+
+       if (!READ_ONCE(session->active))
+               return 0;
+       if (session->metadata_dumped)
+               goto skip_session;
+
+       snprintf(uuid_s, sizeof(uuid_s),
+               "%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x%02x-%02x%02x%02x%02x%02x%02x",
+               uuid_c[0], uuid_c[1], uuid_c[2], uuid_c[3],
+               uuid_c[4], uuid_c[5], uuid_c[6], uuid_c[7],
+               uuid_c[8], uuid_c[9], uuid_c[10], uuid_c[11],
+               uuid_c[12], uuid_c[13], uuid_c[14], uuid_c[15]);
+
+       ret = lttng_metadata_printf(session,
+               "typealias integer { size = 8; align = %u; signed = false; } := uint8_t;\n"
+               "typealias integer { size = 16; align = %u; signed = false; } := uint16_t;\n"
+               "typealias integer { size = 32; align = %u; signed = false; } := uint32_t;\n"
+               "typealias integer { size = 64; align = %u; signed = false; } := uint64_t;\n"
+               "typealias integer { size = %u; align = %u; signed = false; } := unsigned long;\n"
+               "typealias integer { size = 5; align = 1; signed = false; } := uint5_t;\n"
+               "typealias integer { size = 27; align = 1; signed = false; } := uint27_t;\n"
+               "\n"
+               "trace {\n"
+               "       major = %u;\n"
+               "       minor = %u;\n"
+               "       uuid = \"%s\";\n"
+               "       byte_order = %s;\n"
+               "       packet.header := struct {\n"
+               "               uint32_t magic;\n"
+               "               uint8_t  uuid[16];\n"
+               "               uint32_t stream_id;\n"
+               "               uint64_t stream_instance_id;\n"
+               "       };\n"
+               "};\n\n",
+               lttng_alignof(uint8_t) * CHAR_BIT,
+               lttng_alignof(uint16_t) * CHAR_BIT,
+               lttng_alignof(uint32_t) * CHAR_BIT,
+               lttng_alignof(uint64_t) * CHAR_BIT,
+               sizeof(unsigned long) * CHAR_BIT,
+               lttng_alignof(unsigned long) * CHAR_BIT,
+               CTF_SPEC_MAJOR,
+               CTF_SPEC_MINOR,
+               uuid_s,
+#if __BYTE_ORDER == __BIG_ENDIAN
+               "be"
+#else
+               "le"
+#endif
+               );
+       if (ret)
+               goto end;
+
+       ret = lttng_metadata_printf(session,
+               "env {\n"
+               "       hostname = \"%s\";\n"
+               "       domain = \"kernel\";\n"
+               "       sysname = \"%s\";\n"
+               "       kernel_release = \"%s\";\n"
+               "       kernel_version = \"%s\";\n"
+               "       tracer_name = \"lttng-modules\";\n"
+               "       tracer_major = %d;\n"
+               "       tracer_minor = %d;\n"
+               "       tracer_patchlevel = %d;\n"
+               "       trace_buffering_scheme = \"global\";\n",
+               current->nsproxy->uts_ns->name.nodename,
+               utsname()->sysname,
+               utsname()->release,
+               utsname()->version,
+               LTTNG_MODULES_MAJOR_VERSION,
+               LTTNG_MODULES_MINOR_VERSION,
+               LTTNG_MODULES_PATCHLEVEL_VERSION
+               );
+       if (ret)
+               goto end;
+
+       ret = print_metadata_escaped_field(session, "trace_name", session->name);
+       if (ret)
+               goto end;
+       ret = print_metadata_escaped_field(session, "trace_creation_datetime",
+                       session->creation_time);
+       if (ret)
+               goto end;
+
+       /* Add the product UUID to the 'env' section */
+       product_uuid = dmi_get_system_info(DMI_PRODUCT_UUID);
+       if (product_uuid) {
+               ret = lttng_metadata_printf(session,
+                               "       product_uuid = \"%s\";\n",
+                               product_uuid
+                               );
+               if (ret)
+                       goto end;
+       }
+
+       /* Close the 'env' section */
+       ret = lttng_metadata_printf(session, "};\n\n");
+       if (ret)
+               goto end;
+
+       ret = lttng_metadata_printf(session,
+               "clock {\n"
+               "       name = \"%s\";\n",
+               trace_clock_name()
+               );
+       if (ret)
+               goto end;
+
+       if (!trace_clock_uuid(clock_uuid_s)) {
+               ret = lttng_metadata_printf(session,
+                       "       uuid = \"%s\";\n",
+                       clock_uuid_s
+                       );
+               if (ret)
+                       goto end;
+       }
+
+       ret = lttng_metadata_printf(session,
+               "       description = \"%s\";\n"
+               "       freq = %llu; /* Frequency, in Hz */\n"
+               "       /* clock value offset from Epoch is: offset * (1/freq) */\n"
+               "       offset = %lld;\n"
+               "};\n\n",
+               trace_clock_description(),
+               (unsigned long long) trace_clock_freq(),
+               (long long) measure_clock_offset()
+               );
+       if (ret)
+               goto end;
+
+       ret = lttng_metadata_printf(session,
+               "typealias integer {\n"
+               "       size = 27; align = 1; signed = false;\n"
+               "       map = clock.%s.value;\n"
+               "} := uint27_clock_monotonic_t;\n"
+               "\n"
+               "typealias integer {\n"
+               "       size = 32; align = %u; signed = false;\n"
+               "       map = clock.%s.value;\n"
+               "} := uint32_clock_monotonic_t;\n"
+               "\n"
+               "typealias integer {\n"
+               "       size = 64; align = %u; signed = false;\n"
+               "       map = clock.%s.value;\n"
+               "} := uint64_clock_monotonic_t;\n\n",
+               trace_clock_name(),
+               lttng_alignof(uint32_t) * CHAR_BIT,
+               trace_clock_name(),
+               lttng_alignof(uint64_t) * CHAR_BIT,
+               trace_clock_name()
+               );
+       if (ret)
+               goto end;
+
+       ret = _lttng_stream_packet_context_declare(session);
+       if (ret)
+               goto end;
+
+       ret = _lttng_event_header_declare(session);
+       if (ret)
+               goto end;
+
+skip_session:
+       list_for_each_entry(chan, &session->chan, list) {
+               ret = _lttng_channel_metadata_statedump(session, chan);
+               if (ret)
+                       goto end;
+       }
+
+       list_for_each_entry(event, &session->events, list) {
+               ret = _lttng_event_metadata_statedump(session, event->chan, event);
+               if (ret)
+                       goto end;
+       }
+       session->metadata_dumped = 1;
+end:
+       return ret;
+}
+
+/**
+ * lttng_transport_register - LTT transport registration
+ * @transport: transport structure
+ *
+ * Registers a transport which can be used as output to extract the data out of
+ * LTTng. The module calling this registration function must ensure that no
+ * trap-inducing code will be executed by the transport functions. E.g.
+ * vmalloc_sync_mappings() must be called between a vmalloc and the moment the memory
+ * is made visible to the transport function. This registration acts as a
+ * vmalloc_sync_mappings. Therefore, only if the module allocates virtual memory
+ * after its registration must it synchronize the TLBs.
+ */
+void lttng_transport_register(struct lttng_transport *transport)
+{
+       /*
+        * Make sure no page fault can be triggered by the module about to be
+        * registered. We deal with this here so we don't have to call
+        * vmalloc_sync_mappings() in each module's init.
+        */
+       wrapper_vmalloc_sync_mappings();
+
+       mutex_lock(&sessions_mutex);
+       list_add_tail(&transport->node, &lttng_transport_list);
+       mutex_unlock(&sessions_mutex);
+}
+EXPORT_SYMBOL_GPL(lttng_transport_register);
+
+/**
+ * lttng_transport_unregister - LTT transport unregistration
+ * @transport: transport structure
+ */
+void lttng_transport_unregister(struct lttng_transport *transport)
+{
+       mutex_lock(&sessions_mutex);
+       list_del(&transport->node);
+       mutex_unlock(&sessions_mutex);
+}
+EXPORT_SYMBOL_GPL(lttng_transport_unregister);
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
+
+enum cpuhp_state lttng_hp_prepare;
+enum cpuhp_state lttng_hp_online;
+
+static int lttng_hotplug_prepare(unsigned int cpu, struct hlist_node *node)
+{
+       struct lttng_cpuhp_node *lttng_node;
+
+       lttng_node = container_of(node, struct lttng_cpuhp_node, node);
+       switch (lttng_node->component) {
+       case LTTNG_RING_BUFFER_FRONTEND:
+               return 0;
+       case LTTNG_RING_BUFFER_BACKEND:
+               return lttng_cpuhp_rb_backend_prepare(cpu, lttng_node);
+       case LTTNG_RING_BUFFER_ITER:
+               return 0;
+       case LTTNG_CONTEXT_PERF_COUNTERS:
+               return 0;
+       default:
+               return -EINVAL;
+       }
+}
+
+static int lttng_hotplug_dead(unsigned int cpu, struct hlist_node *node)
+{
+       struct lttng_cpuhp_node *lttng_node;
+
+       lttng_node = container_of(node, struct lttng_cpuhp_node, node);
+       switch (lttng_node->component) {
+       case LTTNG_RING_BUFFER_FRONTEND:
+               return lttng_cpuhp_rb_frontend_dead(cpu, lttng_node);
+       case LTTNG_RING_BUFFER_BACKEND:
+               return 0;
+       case LTTNG_RING_BUFFER_ITER:
+               return 0;
+       case LTTNG_CONTEXT_PERF_COUNTERS:
+               return lttng_cpuhp_perf_counter_dead(cpu, lttng_node);
+       default:
+               return -EINVAL;
+       }
+}
+
+static int lttng_hotplug_online(unsigned int cpu, struct hlist_node *node)
+{
+       struct lttng_cpuhp_node *lttng_node;
+
+       lttng_node = container_of(node, struct lttng_cpuhp_node, node);
+       switch (lttng_node->component) {
+       case LTTNG_RING_BUFFER_FRONTEND:
+               return lttng_cpuhp_rb_frontend_online(cpu, lttng_node);
+       case LTTNG_RING_BUFFER_BACKEND:
+               return 0;
+       case LTTNG_RING_BUFFER_ITER:
+               return lttng_cpuhp_rb_iter_online(cpu, lttng_node);
+       case LTTNG_CONTEXT_PERF_COUNTERS:
+               return lttng_cpuhp_perf_counter_online(cpu, lttng_node);
+       default:
+               return -EINVAL;
+       }
+}
+
+static int lttng_hotplug_offline(unsigned int cpu, struct hlist_node *node)
+{
+       struct lttng_cpuhp_node *lttng_node;
+
+       lttng_node = container_of(node, struct lttng_cpuhp_node, node);
+       switch (lttng_node->component) {
+       case LTTNG_RING_BUFFER_FRONTEND:
+               return lttng_cpuhp_rb_frontend_offline(cpu, lttng_node);
+       case LTTNG_RING_BUFFER_BACKEND:
+               return 0;
+       case LTTNG_RING_BUFFER_ITER:
+               return 0;
+       case LTTNG_CONTEXT_PERF_COUNTERS:
+               return 0;
+       default:
+               return -EINVAL;
+       }
+}
+
+static int __init lttng_init_cpu_hotplug(void)
+{
+       int ret;
+
+       ret = cpuhp_setup_state_multi(CPUHP_BP_PREPARE_DYN, "lttng:prepare",
+                       lttng_hotplug_prepare,
+                       lttng_hotplug_dead);
+       if (ret < 0) {
+               return ret;
+       }
+       lttng_hp_prepare = ret;
+       lttng_rb_set_hp_prepare(ret);
+
+       ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "lttng:online",
+                       lttng_hotplug_online,
+                       lttng_hotplug_offline);
+       if (ret < 0) {
+               cpuhp_remove_multi_state(lttng_hp_prepare);
+               lttng_hp_prepare = 0;
+               return ret;
+       }
+       lttng_hp_online = ret;
+       lttng_rb_set_hp_online(ret);
+
+       return 0;
+}
+
+static void __exit lttng_exit_cpu_hotplug(void)
+{
+       lttng_rb_set_hp_online(0);
+       cpuhp_remove_multi_state(lttng_hp_online);
+       lttng_rb_set_hp_prepare(0);
+       cpuhp_remove_multi_state(lttng_hp_prepare);
+}
+
+#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
+static int lttng_init_cpu_hotplug(void)
+{
+       return 0;
+}
+static void lttng_exit_cpu_hotplug(void)
+{
+}
+#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
+
+
+static int __init lttng_events_init(void)
+{
+       int ret;
+
+       ret = wrapper_lttng_fixup_sig(THIS_MODULE);
+       if (ret)
+               return ret;
+       ret = wrapper_get_pfnblock_flags_mask_init();
+       if (ret)
+               return ret;
+       ret = wrapper_get_pageblock_flags_mask_init();
+       if (ret)
+               return ret;
+       ret = lttng_probes_init();
+       if (ret)
+               return ret;
+       ret = lttng_context_init();
+       if (ret)
+               return ret;
+       ret = lttng_tracepoint_init();
+       if (ret)
+               goto error_tp;
+       event_cache = KMEM_CACHE(lttng_event, 0);
+       if (!event_cache) {
+               ret = -ENOMEM;
+               goto error_kmem;
+       }
+       ret = lttng_abi_init();
+       if (ret)
+               goto error_abi;
+       ret = lttng_logger_init();
+       if (ret)
+               goto error_logger;
+       ret = lttng_init_cpu_hotplug();
+       if (ret)
+               goto error_hotplug;
+       printk(KERN_NOTICE "LTTng: Loaded modules v%s.%s.%s%s (%s)%s%s\n",
+               __stringify(LTTNG_MODULES_MAJOR_VERSION),
+               __stringify(LTTNG_MODULES_MINOR_VERSION),
+               __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION),
+               LTTNG_MODULES_EXTRAVERSION,
+               LTTNG_VERSION_NAME,
+#ifdef LTTNG_EXTRA_VERSION_GIT
+               LTTNG_EXTRA_VERSION_GIT[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_GIT,
+#else
+               "",
+#endif
+#ifdef LTTNG_EXTRA_VERSION_NAME
+               LTTNG_EXTRA_VERSION_NAME[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_NAME);
+#else
+               "");
+#endif
+       return 0;
+
+error_hotplug:
+       lttng_logger_exit();
+error_logger:
+       lttng_abi_exit();
+error_abi:
+       kmem_cache_destroy(event_cache);
+error_kmem:
+       lttng_tracepoint_exit();
+error_tp:
+       lttng_context_exit();
+       printk(KERN_NOTICE "LTTng: Failed to load modules v%s.%s.%s%s (%s)%s%s\n",
+               __stringify(LTTNG_MODULES_MAJOR_VERSION),
+               __stringify(LTTNG_MODULES_MINOR_VERSION),
+               __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION),
+               LTTNG_MODULES_EXTRAVERSION,
+               LTTNG_VERSION_NAME,
+#ifdef LTTNG_EXTRA_VERSION_GIT
+               LTTNG_EXTRA_VERSION_GIT[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_GIT,
+#else
+               "",
+#endif
+#ifdef LTTNG_EXTRA_VERSION_NAME
+               LTTNG_EXTRA_VERSION_NAME[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_NAME);
+#else
+               "");
+#endif
+       return ret;
+}
+
+module_init(lttng_events_init);
+
+static void __exit lttng_events_exit(void)
+{
+       struct lttng_session *session, *tmpsession;
+
+       lttng_exit_cpu_hotplug();
+       lttng_logger_exit();
+       lttng_abi_exit();
+       list_for_each_entry_safe(session, tmpsession, &sessions, list)
+               lttng_session_destroy(session);
+       kmem_cache_destroy(event_cache);
+       lttng_tracepoint_exit();
+       lttng_context_exit();
+       printk(KERN_NOTICE "LTTng: Unloaded modules v%s.%s.%s%s (%s)%s%s\n",
+               __stringify(LTTNG_MODULES_MAJOR_VERSION),
+               __stringify(LTTNG_MODULES_MINOR_VERSION),
+               __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION),
+               LTTNG_MODULES_EXTRAVERSION,
+               LTTNG_VERSION_NAME,
+#ifdef LTTNG_EXTRA_VERSION_GIT
+               LTTNG_EXTRA_VERSION_GIT[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_GIT,
+#else
+               "",
+#endif
+#ifdef LTTNG_EXTRA_VERSION_NAME
+               LTTNG_EXTRA_VERSION_NAME[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_NAME);
+#else
+               "");
+#endif
+}
+
+module_exit(lttng_events_exit);
+
+#include <generated/patches.i>
+#ifdef LTTNG_EXTRA_VERSION_GIT
+MODULE_INFO(extra_version_git, LTTNG_EXTRA_VERSION_GIT);
+#endif
+#ifdef LTTNG_EXTRA_VERSION_NAME
+MODULE_INFO(extra_version_name, LTTNG_EXTRA_VERSION_NAME);
+#endif
+MODULE_LICENSE("GPL and additional rights");
+MODULE_AUTHOR("Mathieu Desnoyers <mathieu.desnoyers@efficios.com>");
+MODULE_DESCRIPTION("LTTng tracer");
+MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
+       __stringify(LTTNG_MODULES_MINOR_VERSION) "."
+       __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
+       LTTNG_MODULES_EXTRAVERSION);
diff --git a/src/lttng-filter-interpreter.c b/src/lttng-filter-interpreter.c
new file mode 100644 (file)
index 0000000..5ba7e23
--- /dev/null
@@ -0,0 +1,1579 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * lttng-filter-interpreter.c
+ *
+ * LTTng modules filter interpreter.
+ *
+ * Copyright (C) 2010-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#include <wrapper/uaccess.h>
+#include <wrapper/frame.h>
+#include <wrapper/types.h>
+#include <linux/swab.h>
+
+#include <lttng/filter.h>
+#include <lttng/string-utils.h>
+
+LTTNG_STACK_FRAME_NON_STANDARD(lttng_filter_interpret_bytecode);
+
+/*
+ * get_char should be called with page fault handler disabled if it is expected
+ * to handle user-space read.
+ */
+static
+char get_char(struct estack_entry *reg, size_t offset)
+{
+       if (unlikely(offset >= reg->u.s.seq_len))
+               return '\0';
+       if (reg->u.s.user) {
+               char c;
+
+               /* Handle invalid access as end of string. */
+               if (unlikely(!lttng_access_ok(VERIFY_READ,
+                               reg->u.s.user_str + offset,
+                               sizeof(c))))
+                       return '\0';
+               /* Handle fault (nonzero return value) as end of string. */
+               if (unlikely(__copy_from_user_inatomic(&c,
+                               reg->u.s.user_str + offset,
+                               sizeof(c))))
+                       return '\0';
+               return c;
+       } else {
+               return reg->u.s.str[offset];
+       }
+}
+
+/*
+ * -1: wildcard found.
+ * -2: unknown escape char.
+ * 0: normal char.
+ */
+static
+int parse_char(struct estack_entry *reg, char *c, size_t *offset)
+{
+       switch (*c) {
+       case '\\':
+               (*offset)++;
+               *c = get_char(reg, *offset);
+               switch (*c) {
+               case '\\':
+               case '*':
+                       return 0;
+               default:
+                       return -2;
+               }
+       case '*':
+               return -1;
+       default:
+               return 0;
+       }
+}
+
+static
+char get_char_at_cb(size_t at, void *data)
+{
+       return get_char(data, at);
+}
+
+static
+int stack_star_glob_match(struct estack *stack, int top, const char *cmp_type)
+{
+       bool has_user = false;
+       mm_segment_t old_fs;
+       int result;
+       struct estack_entry *pattern_reg;
+       struct estack_entry *candidate_reg;
+
+       if (estack_bx(stack, top)->u.s.user
+                       || estack_ax(stack, top)->u.s.user) {
+               has_user = true;
+               old_fs = get_fs();
+               set_fs(KERNEL_DS);
+               pagefault_disable();
+       }
+
+       /* Find out which side is the pattern vs. the candidate. */
+       if (estack_ax(stack, top)->u.s.literal_type == ESTACK_STRING_LITERAL_TYPE_STAR_GLOB) {
+               pattern_reg = estack_ax(stack, top);
+               candidate_reg = estack_bx(stack, top);
+       } else {
+               pattern_reg = estack_bx(stack, top);
+               candidate_reg = estack_ax(stack, top);
+       }
+
+       /* Perform the match operation. */
+       result = !strutils_star_glob_match_char_cb(get_char_at_cb,
+               pattern_reg, get_char_at_cb, candidate_reg);
+       if (has_user) {
+               pagefault_enable();
+               set_fs(old_fs);
+       }
+
+       return result;
+}
+
+static
+int stack_strcmp(struct estack *stack, int top, const char *cmp_type)
+{
+       size_t offset_bx = 0, offset_ax = 0;
+       int diff, has_user = 0;
+       mm_segment_t old_fs;
+
+       if (estack_bx(stack, top)->u.s.user
+                       || estack_ax(stack, top)->u.s.user) {
+               has_user = 1;
+               old_fs = get_fs();
+               set_fs(KERNEL_DS);
+               pagefault_disable();
+       }
+
+       for (;;) {
+               int ret;
+               int escaped_r0 = 0;
+               char char_bx, char_ax;
+
+               char_bx = get_char(estack_bx(stack, top), offset_bx);
+               char_ax = get_char(estack_ax(stack, top), offset_ax);
+
+               if (unlikely(char_bx == '\0')) {
+                       if (char_ax == '\0') {
+                               diff = 0;
+                               break;
+                       } else {
+                               if (estack_ax(stack, top)->u.s.literal_type ==
+                                               ESTACK_STRING_LITERAL_TYPE_PLAIN) {
+                                       ret = parse_char(estack_ax(stack, top),
+                                               &char_ax, &offset_ax);
+                                       if (ret == -1) {
+                                               diff = 0;
+                                               break;
+                                       }
+                               }
+                               diff = -1;
+                               break;
+                       }
+               }
+               if (unlikely(char_ax == '\0')) {
+                       if (estack_bx(stack, top)->u.s.literal_type ==
+                                       ESTACK_STRING_LITERAL_TYPE_PLAIN) {
+                               ret = parse_char(estack_bx(stack, top),
+                                       &char_bx, &offset_bx);
+                               if (ret == -1) {
+                                       diff = 0;
+                                       break;
+                               }
+                       }
+                       diff = 1;
+                       break;
+               }
+               if (estack_bx(stack, top)->u.s.literal_type ==
+                               ESTACK_STRING_LITERAL_TYPE_PLAIN) {
+                       ret = parse_char(estack_bx(stack, top),
+                               &char_bx, &offset_bx);
+                       if (ret == -1) {
+                               diff = 0;
+                               break;
+                       } else if (ret == -2) {
+                               escaped_r0 = 1;
+                       }
+                       /* else compare both char */
+               }
+               if (estack_ax(stack, top)->u.s.literal_type ==
+                               ESTACK_STRING_LITERAL_TYPE_PLAIN) {
+                       ret = parse_char(estack_ax(stack, top),
+                               &char_ax, &offset_ax);
+                       if (ret == -1) {
+                               diff = 0;
+                               break;
+                       } else if (ret == -2) {
+                               if (!escaped_r0) {
+                                       diff = -1;
+                                       break;
+                               }
+                       } else {
+                               if (escaped_r0) {
+                                       diff = 1;
+                                       break;
+                               }
+                       }
+               } else {
+                       if (escaped_r0) {
+                               diff = 1;
+                               break;
+                       }
+               }
+               diff = char_bx - char_ax;
+               if (diff != 0)
+                       break;
+               offset_bx++;
+               offset_ax++;
+       }
+       if (has_user) {
+               pagefault_enable();
+               set_fs(old_fs);
+       }
+       return diff;
+}
+
+uint64_t lttng_filter_false(void *filter_data,
+               struct lttng_probe_ctx *lttng_probe_ctx,
+               const char *filter_stack_data)
+{
+       return 0;
+}
+
+#ifdef INTERPRETER_USE_SWITCH
+
+/*
+ * Fallback for compilers that do not support taking address of labels.
+ */
+
+#define START_OP                                                       \
+       start_pc = &bytecode->data[0];                                  \
+       for (pc = next_pc = start_pc; pc - start_pc < bytecode->len;    \
+                       pc = next_pc) {                                 \
+               dbg_printk("Executing op %s (%u)\n",                    \
+                       lttng_filter_print_op((unsigned int) *(filter_opcode_t *) pc), \
+                       (unsigned int) *(filter_opcode_t *) pc);        \
+               switch (*(filter_opcode_t *) pc)        {
+
+#define OP(name)       case name
+
+#define PO             break
+
+#define END_OP         }                                               \
+       }
+
+#else
+
+/*
+ * Dispatch-table based interpreter.
+ */
+
+#define START_OP                                                       \
+       start_pc = &bytecode->code[0];                                  \
+       pc = next_pc = start_pc;                                        \
+       if (unlikely(pc - start_pc >= bytecode->len))                   \
+               goto end;                                               \
+       goto *dispatch[*(filter_opcode_t *) pc];
+
+#define OP(name)                                                       \
+LABEL_##name
+
+#define PO                                                             \
+               pc = next_pc;                                           \
+               goto *dispatch[*(filter_opcode_t *) pc];
+
+#define END_OP
+
+#endif
+
+static int context_get_index(struct lttng_probe_ctx *lttng_probe_ctx,
+               struct load_ptr *ptr,
+               uint32_t idx)
+{
+
+       struct lttng_ctx_field *ctx_field;
+       struct lttng_event_field *field;
+       union lttng_ctx_value v;
+
+       ctx_field = &lttng_static_ctx->fields[idx];
+       field = &ctx_field->event_field;
+       ptr->type = LOAD_OBJECT;
+       /* field is only used for types nested within variants. */
+       ptr->field = NULL;
+
+       switch (field->type.atype) {
+       case atype_integer:
+               ctx_field->get_value(ctx_field, lttng_probe_ctx, &v);
+               if (field->type.u.integer.signedness) {
+                       ptr->object_type = OBJECT_TYPE_S64;
+                       ptr->u.s64 = v.s64;
+                       ptr->ptr = &ptr->u.s64;
+               } else {
+                       ptr->object_type = OBJECT_TYPE_U64;
+                       ptr->u.u64 = v.s64;     /* Cast. */
+                       ptr->ptr = &ptr->u.u64;
+               }
+               break;
+       case atype_enum_nestable:
+       {
+               const struct lttng_integer_type *itype =
+                       &field->type.u.enum_nestable.container_type->u.integer;
+
+               ctx_field->get_value(ctx_field, lttng_probe_ctx, &v);
+               if (itype->signedness) {
+                       ptr->object_type = OBJECT_TYPE_S64;
+                       ptr->u.s64 = v.s64;
+                       ptr->ptr = &ptr->u.s64;
+               } else {
+                       ptr->object_type = OBJECT_TYPE_U64;
+                       ptr->u.u64 = v.s64;     /* Cast. */
+                       ptr->ptr = &ptr->u.u64;
+               }
+               break;
+       }
+       case atype_array_nestable:
+               if (!lttng_is_bytewise_integer(field->type.u.array_nestable.elem_type)) {
+                       printk(KERN_WARNING "Array nesting only supports integer types.\n");
+                       return -EINVAL;
+               }
+               if (field->type.u.array_nestable.elem_type->u.integer.encoding == lttng_encode_none) {
+                       printk(KERN_WARNING "Only string arrays are supported for contexts.\n");
+                       return -EINVAL;
+               }
+               ptr->object_type = OBJECT_TYPE_STRING;
+               ctx_field->get_value(ctx_field, lttng_probe_ctx, &v);
+               ptr->ptr = v.str;
+               break;
+       case atype_sequence_nestable:
+               if (!lttng_is_bytewise_integer(field->type.u.sequence_nestable.elem_type)) {
+                       printk(KERN_WARNING "Sequence nesting only supports integer types.\n");
+                       return -EINVAL;
+               }
+               if (field->type.u.sequence_nestable.elem_type->u.integer.encoding == lttng_encode_none) {
+                       printk(KERN_WARNING "Only string sequences are supported for contexts.\n");
+                       return -EINVAL;
+               }
+               ptr->object_type = OBJECT_TYPE_STRING;
+               ctx_field->get_value(ctx_field, lttng_probe_ctx, &v);
+               ptr->ptr = v.str;
+               break;
+       case atype_string:
+               ptr->object_type = OBJECT_TYPE_STRING;
+               ctx_field->get_value(ctx_field, lttng_probe_ctx, &v);
+               ptr->ptr = v.str;
+               break;
+       case atype_struct_nestable:
+               printk(KERN_WARNING "Structure type cannot be loaded.\n");
+               return -EINVAL;
+       case atype_variant_nestable:
+               printk(KERN_WARNING "Variant type cannot be loaded.\n");
+               return -EINVAL;
+       default:
+               printk(KERN_WARNING "Unknown type: %d", (int) field->type.atype);
+               return -EINVAL;
+       }
+       return 0;
+}
+
+static int dynamic_get_index(struct lttng_probe_ctx *lttng_probe_ctx,
+               struct bytecode_runtime *runtime,
+               uint64_t index, struct estack_entry *stack_top)
+{
+       int ret;
+       const struct filter_get_index_data *gid;
+
+       /*
+        * Types nested within variants need to perform dynamic lookup
+        * based on the field descriptions. LTTng-UST does not implement
+        * variants for now.
+        */
+       if (stack_top->u.ptr.field)
+               return -EINVAL;
+       gid = (const struct filter_get_index_data *) &runtime->data[index];
+       switch (stack_top->u.ptr.type) {
+       case LOAD_OBJECT:
+               switch (stack_top->u.ptr.object_type) {
+               case OBJECT_TYPE_ARRAY:
+               {
+                       const char *ptr;
+
+                       WARN_ON_ONCE(gid->offset >= gid->array_len);
+                       /* Skip count (unsigned long) */
+                       ptr = *(const char **) (stack_top->u.ptr.ptr + sizeof(unsigned long));
+                       ptr = ptr + gid->offset;
+                       stack_top->u.ptr.ptr = ptr;
+                       stack_top->u.ptr.object_type = gid->elem.type;
+                       stack_top->u.ptr.rev_bo = gid->elem.rev_bo;
+                       /* field is only used for types nested within variants. */
+                       stack_top->u.ptr.field = NULL;
+                       break;
+               }
+               case OBJECT_TYPE_SEQUENCE:
+               {
+                       const char *ptr;
+                       size_t ptr_seq_len;
+
+                       ptr = *(const char **) (stack_top->u.ptr.ptr + sizeof(unsigned long));
+                       ptr_seq_len = *(unsigned long *) stack_top->u.ptr.ptr;
+                       if (gid->offset >= gid->elem.len * ptr_seq_len) {
+                               ret = -EINVAL;
+                               goto end;
+                       }
+                       ptr = ptr + gid->offset;
+                       stack_top->u.ptr.ptr = ptr;
+                       stack_top->u.ptr.object_type = gid->elem.type;
+                       stack_top->u.ptr.rev_bo = gid->elem.rev_bo;
+                       /* field is only used for types nested within variants. */
+                       stack_top->u.ptr.field = NULL;
+                       break;
+               }
+               case OBJECT_TYPE_STRUCT:
+                       printk(KERN_WARNING "Nested structures are not supported yet.\n");
+                       ret = -EINVAL;
+                       goto end;
+               case OBJECT_TYPE_VARIANT:
+               default:
+                       printk(KERN_WARNING "Unexpected get index type %d",
+                               (int) stack_top->u.ptr.object_type);
+                       ret = -EINVAL;
+                       goto end;
+               }
+               break;
+       case LOAD_ROOT_CONTEXT:
+       case LOAD_ROOT_APP_CONTEXT:     /* Fall-through */
+       {
+               ret = context_get_index(lttng_probe_ctx,
+                               &stack_top->u.ptr,
+                               gid->ctx_index);
+               if (ret) {
+                       goto end;
+               }
+               break;
+       }
+       case LOAD_ROOT_PAYLOAD:
+               stack_top->u.ptr.ptr += gid->offset;
+               if (gid->elem.type == OBJECT_TYPE_STRING)
+                       stack_top->u.ptr.ptr = *(const char * const *) stack_top->u.ptr.ptr;
+               stack_top->u.ptr.object_type = gid->elem.type;
+               stack_top->u.ptr.type = LOAD_OBJECT;
+               /* field is only used for types nested within variants. */
+               stack_top->u.ptr.field = NULL;
+               break;
+       }
+       return 0;
+
+end:
+       return ret;
+}
+
+static int dynamic_load_field(struct estack_entry *stack_top)
+{
+       int ret;
+
+       switch (stack_top->u.ptr.type) {
+       case LOAD_OBJECT:
+               break;
+       case LOAD_ROOT_CONTEXT:
+       case LOAD_ROOT_APP_CONTEXT:
+       case LOAD_ROOT_PAYLOAD:
+       default:
+               dbg_printk("Filter warning: cannot load root, missing field name.\n");
+               ret = -EINVAL;
+               goto end;
+       }
+       switch (stack_top->u.ptr.object_type) {
+       case OBJECT_TYPE_S8:
+               dbg_printk("op load field s8\n");
+               stack_top->u.v = *(int8_t *) stack_top->u.ptr.ptr;
+               break;
+       case OBJECT_TYPE_S16:
+       {
+               int16_t tmp;
+
+               dbg_printk("op load field s16\n");
+               tmp = *(int16_t *) stack_top->u.ptr.ptr;
+               if (stack_top->u.ptr.rev_bo)
+                       __swab16s(&tmp);
+               stack_top->u.v = tmp;
+               break;
+       }
+       case OBJECT_TYPE_S32:
+       {
+               int32_t tmp;
+
+               dbg_printk("op load field s32\n");
+               tmp = *(int32_t *) stack_top->u.ptr.ptr;
+               if (stack_top->u.ptr.rev_bo)
+                       __swab32s(&tmp);
+               stack_top->u.v = tmp;
+               break;
+       }
+       case OBJECT_TYPE_S64:
+       {
+               int64_t tmp;
+
+               dbg_printk("op load field s64\n");
+               tmp = *(int64_t *) stack_top->u.ptr.ptr;
+               if (stack_top->u.ptr.rev_bo)
+                       __swab64s(&tmp);
+               stack_top->u.v = tmp;
+               break;
+       }
+       case OBJECT_TYPE_U8:
+               dbg_printk("op load field u8\n");
+               stack_top->u.v = *(uint8_t *) stack_top->u.ptr.ptr;
+               break;
+       case OBJECT_TYPE_U16:
+       {
+               uint16_t tmp;
+
+               dbg_printk("op load field s16\n");
+               tmp = *(uint16_t *) stack_top->u.ptr.ptr;
+               if (stack_top->u.ptr.rev_bo)
+                       __swab16s(&tmp);
+               stack_top->u.v = tmp;
+               break;
+       }
+       case OBJECT_TYPE_U32:
+       {
+               uint32_t tmp;
+
+               dbg_printk("op load field u32\n");
+               tmp = *(uint32_t *) stack_top->u.ptr.ptr;
+               if (stack_top->u.ptr.rev_bo)
+                       __swab32s(&tmp);
+               stack_top->u.v = tmp;
+               break;
+       }
+       case OBJECT_TYPE_U64:
+       {
+               uint64_t tmp;
+
+               dbg_printk("op load field u64\n");
+               tmp = *(uint64_t *) stack_top->u.ptr.ptr;
+               if (stack_top->u.ptr.rev_bo)
+                       __swab64s(&tmp);
+               stack_top->u.v = tmp;
+               break;
+       }
+       case OBJECT_TYPE_STRING:
+       {
+               const char *str;
+
+               dbg_printk("op load field string\n");
+               str = (const char *) stack_top->u.ptr.ptr;
+               stack_top->u.s.str = str;
+               if (unlikely(!stack_top->u.s.str)) {
+                       dbg_printk("Filter warning: loading a NULL string.\n");
+                       ret = -EINVAL;
+                       goto end;
+               }
+               stack_top->u.s.seq_len = LTTNG_SIZE_MAX;
+               stack_top->u.s.literal_type =
+                       ESTACK_STRING_LITERAL_TYPE_NONE;
+               break;
+       }
+       case OBJECT_TYPE_STRING_SEQUENCE:
+       {
+               const char *ptr;
+
+               dbg_printk("op load field string sequence\n");
+               ptr = stack_top->u.ptr.ptr;
+               stack_top->u.s.seq_len = *(unsigned long *) ptr;
+               stack_top->u.s.str = *(const char **) (ptr + sizeof(unsigned long));
+               if (unlikely(!stack_top->u.s.str)) {
+                       dbg_printk("Filter warning: loading a NULL sequence.\n");
+                       ret = -EINVAL;
+                       goto end;
+               }
+               stack_top->u.s.literal_type =
+                       ESTACK_STRING_LITERAL_TYPE_NONE;
+               break;
+       }
+       case OBJECT_TYPE_DYNAMIC:
+               /*
+                * Dynamic types in context are looked up
+                * by context get index.
+                */
+               ret = -EINVAL;
+               goto end;
+       case OBJECT_TYPE_DOUBLE:
+               ret = -EINVAL;
+               goto end;
+       case OBJECT_TYPE_SEQUENCE:
+       case OBJECT_TYPE_ARRAY:
+       case OBJECT_TYPE_STRUCT:
+       case OBJECT_TYPE_VARIANT:
+               printk(KERN_WARNING "Sequences, arrays, struct and variant cannot be loaded (nested types).\n");
+               ret = -EINVAL;
+               goto end;
+       }
+       return 0;
+
+end:
+       return ret;
+}
+
+/*
+ * Return 0 (discard), or raise the 0x1 flag (log event).
+ * Currently, other flags are kept for future extensions and have no
+ * effect.
+ */
+uint64_t lttng_filter_interpret_bytecode(void *filter_data,
+               struct lttng_probe_ctx *lttng_probe_ctx,
+               const char *filter_stack_data)
+{
+       struct bytecode_runtime *bytecode = filter_data;
+       void *pc, *next_pc, *start_pc;
+       int ret = -EINVAL;
+       uint64_t retval = 0;
+       struct estack _stack;
+       struct estack *stack = &_stack;
+       register int64_t ax = 0, bx = 0;
+       register int top = FILTER_STACK_EMPTY;
+#ifndef INTERPRETER_USE_SWITCH
+       static void *dispatch[NR_FILTER_OPS] = {
+               [ FILTER_OP_UNKNOWN ] = &&LABEL_FILTER_OP_UNKNOWN,
+
+               [ FILTER_OP_RETURN ] = &&LABEL_FILTER_OP_RETURN,
+
+               /* binary */
+               [ FILTER_OP_MUL ] = &&LABEL_FILTER_OP_MUL,
+               [ FILTER_OP_DIV ] = &&LABEL_FILTER_OP_DIV,
+               [ FILTER_OP_MOD ] = &&LABEL_FILTER_OP_MOD,
+               [ FILTER_OP_PLUS ] = &&LABEL_FILTER_OP_PLUS,
+               [ FILTER_OP_MINUS ] = &&LABEL_FILTER_OP_MINUS,
+               [ FILTER_OP_BIT_RSHIFT ] = &&LABEL_FILTER_OP_BIT_RSHIFT,
+               [ FILTER_OP_BIT_LSHIFT ] = &&LABEL_FILTER_OP_BIT_LSHIFT,
+               [ FILTER_OP_BIT_AND ] = &&LABEL_FILTER_OP_BIT_AND,
+               [ FILTER_OP_BIT_OR ] = &&LABEL_FILTER_OP_BIT_OR,
+               [ FILTER_OP_BIT_XOR ] = &&LABEL_FILTER_OP_BIT_XOR,
+
+               /* binary comparators */
+               [ FILTER_OP_EQ ] = &&LABEL_FILTER_OP_EQ,
+               [ FILTER_OP_NE ] = &&LABEL_FILTER_OP_NE,
+               [ FILTER_OP_GT ] = &&LABEL_FILTER_OP_GT,
+               [ FILTER_OP_LT ] = &&LABEL_FILTER_OP_LT,
+               [ FILTER_OP_GE ] = &&LABEL_FILTER_OP_GE,
+               [ FILTER_OP_LE ] = &&LABEL_FILTER_OP_LE,
+
+               /* string binary comparator */
+               [ FILTER_OP_EQ_STRING ] = &&LABEL_FILTER_OP_EQ_STRING,
+               [ FILTER_OP_NE_STRING ] = &&LABEL_FILTER_OP_NE_STRING,
+               [ FILTER_OP_GT_STRING ] = &&LABEL_FILTER_OP_GT_STRING,
+               [ FILTER_OP_LT_STRING ] = &&LABEL_FILTER_OP_LT_STRING,
+               [ FILTER_OP_GE_STRING ] = &&LABEL_FILTER_OP_GE_STRING,
+               [ FILTER_OP_LE_STRING ] = &&LABEL_FILTER_OP_LE_STRING,
+
+               /* globbing pattern binary comparator */
+               [ FILTER_OP_EQ_STAR_GLOB_STRING ] = &&LABEL_FILTER_OP_EQ_STAR_GLOB_STRING,
+               [ FILTER_OP_NE_STAR_GLOB_STRING ] = &&LABEL_FILTER_OP_NE_STAR_GLOB_STRING,
+
+               /* s64 binary comparator */
+               [ FILTER_OP_EQ_S64 ] = &&LABEL_FILTER_OP_EQ_S64,
+               [ FILTER_OP_NE_S64 ] = &&LABEL_FILTER_OP_NE_S64,
+               [ FILTER_OP_GT_S64 ] = &&LABEL_FILTER_OP_GT_S64,
+               [ FILTER_OP_LT_S64 ] = &&LABEL_FILTER_OP_LT_S64,
+               [ FILTER_OP_GE_S64 ] = &&LABEL_FILTER_OP_GE_S64,
+               [ FILTER_OP_LE_S64 ] = &&LABEL_FILTER_OP_LE_S64,
+
+               /* double binary comparator */
+               [ FILTER_OP_EQ_DOUBLE ] = &&LABEL_FILTER_OP_EQ_DOUBLE,
+               [ FILTER_OP_NE_DOUBLE ] = &&LABEL_FILTER_OP_NE_DOUBLE,
+               [ FILTER_OP_GT_DOUBLE ] = &&LABEL_FILTER_OP_GT_DOUBLE,
+               [ FILTER_OP_LT_DOUBLE ] = &&LABEL_FILTER_OP_LT_DOUBLE,
+               [ FILTER_OP_GE_DOUBLE ] = &&LABEL_FILTER_OP_GE_DOUBLE,
+               [ FILTER_OP_LE_DOUBLE ] = &&LABEL_FILTER_OP_LE_DOUBLE,
+
+               /* Mixed S64-double binary comparators */
+               [ FILTER_OP_EQ_DOUBLE_S64 ] = &&LABEL_FILTER_OP_EQ_DOUBLE_S64,
+               [ FILTER_OP_NE_DOUBLE_S64 ] = &&LABEL_FILTER_OP_NE_DOUBLE_S64,
+               [ FILTER_OP_GT_DOUBLE_S64 ] = &&LABEL_FILTER_OP_GT_DOUBLE_S64,
+               [ FILTER_OP_LT_DOUBLE_S64 ] = &&LABEL_FILTER_OP_LT_DOUBLE_S64,
+               [ FILTER_OP_GE_DOUBLE_S64 ] = &&LABEL_FILTER_OP_GE_DOUBLE_S64,
+               [ FILTER_OP_LE_DOUBLE_S64 ] = &&LABEL_FILTER_OP_LE_DOUBLE_S64,
+
+               [ FILTER_OP_EQ_S64_DOUBLE ] = &&LABEL_FILTER_OP_EQ_S64_DOUBLE,
+               [ FILTER_OP_NE_S64_DOUBLE ] = &&LABEL_FILTER_OP_NE_S64_DOUBLE,
+               [ FILTER_OP_GT_S64_DOUBLE ] = &&LABEL_FILTER_OP_GT_S64_DOUBLE,
+               [ FILTER_OP_LT_S64_DOUBLE ] = &&LABEL_FILTER_OP_LT_S64_DOUBLE,
+               [ FILTER_OP_GE_S64_DOUBLE ] = &&LABEL_FILTER_OP_GE_S64_DOUBLE,
+               [ FILTER_OP_LE_S64_DOUBLE ] = &&LABEL_FILTER_OP_LE_S64_DOUBLE,
+
+               /* unary */
+               [ FILTER_OP_UNARY_PLUS ] = &&LABEL_FILTER_OP_UNARY_PLUS,
+               [ FILTER_OP_UNARY_MINUS ] = &&LABEL_FILTER_OP_UNARY_MINUS,
+               [ FILTER_OP_UNARY_NOT ] = &&LABEL_FILTER_OP_UNARY_NOT,
+               [ FILTER_OP_UNARY_PLUS_S64 ] = &&LABEL_FILTER_OP_UNARY_PLUS_S64,
+               [ FILTER_OP_UNARY_MINUS_S64 ] = &&LABEL_FILTER_OP_UNARY_MINUS_S64,
+               [ FILTER_OP_UNARY_NOT_S64 ] = &&LABEL_FILTER_OP_UNARY_NOT_S64,
+               [ FILTER_OP_UNARY_PLUS_DOUBLE ] = &&LABEL_FILTER_OP_UNARY_PLUS_DOUBLE,
+               [ FILTER_OP_UNARY_MINUS_DOUBLE ] = &&LABEL_FILTER_OP_UNARY_MINUS_DOUBLE,
+               [ FILTER_OP_UNARY_NOT_DOUBLE ] = &&LABEL_FILTER_OP_UNARY_NOT_DOUBLE,
+
+               /* logical */
+               [ FILTER_OP_AND ] = &&LABEL_FILTER_OP_AND,
+               [ FILTER_OP_OR ] = &&LABEL_FILTER_OP_OR,
+
+               /* load field ref */
+               [ FILTER_OP_LOAD_FIELD_REF ] = &&LABEL_FILTER_OP_LOAD_FIELD_REF,
+               [ FILTER_OP_LOAD_FIELD_REF_STRING ] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_STRING,
+               [ FILTER_OP_LOAD_FIELD_REF_SEQUENCE ] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_SEQUENCE,
+               [ FILTER_OP_LOAD_FIELD_REF_S64 ] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_S64,
+               [ FILTER_OP_LOAD_FIELD_REF_DOUBLE ] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_DOUBLE,
+
+               /* load from immediate operand */
+               [ FILTER_OP_LOAD_STRING ] = &&LABEL_FILTER_OP_LOAD_STRING,
+               [ FILTER_OP_LOAD_STAR_GLOB_STRING ] = &&LABEL_FILTER_OP_LOAD_STAR_GLOB_STRING,
+               [ FILTER_OP_LOAD_S64 ] = &&LABEL_FILTER_OP_LOAD_S64,
+               [ FILTER_OP_LOAD_DOUBLE ] = &&LABEL_FILTER_OP_LOAD_DOUBLE,
+
+               /* cast */
+               [ FILTER_OP_CAST_TO_S64 ] = &&LABEL_FILTER_OP_CAST_TO_S64,
+               [ FILTER_OP_CAST_DOUBLE_TO_S64 ] = &&LABEL_FILTER_OP_CAST_DOUBLE_TO_S64,
+               [ FILTER_OP_CAST_NOP ] = &&LABEL_FILTER_OP_CAST_NOP,
+
+               /* get context ref */
+               [ FILTER_OP_GET_CONTEXT_REF ] = &&LABEL_FILTER_OP_GET_CONTEXT_REF,
+               [ FILTER_OP_GET_CONTEXT_REF_STRING ] = &&LABEL_FILTER_OP_GET_CONTEXT_REF_STRING,
+               [ FILTER_OP_GET_CONTEXT_REF_S64 ] = &&LABEL_FILTER_OP_GET_CONTEXT_REF_S64,
+               [ FILTER_OP_GET_CONTEXT_REF_DOUBLE ] = &&LABEL_FILTER_OP_GET_CONTEXT_REF_DOUBLE,
+
+               /* load userspace field ref */
+               [ FILTER_OP_LOAD_FIELD_REF_USER_STRING ] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_USER_STRING,
+               [ FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE ] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE,
+
+               /* Instructions for recursive traversal through composed types. */
+               [ FILTER_OP_GET_CONTEXT_ROOT ] = &&LABEL_FILTER_OP_GET_CONTEXT_ROOT,
+               [ FILTER_OP_GET_APP_CONTEXT_ROOT ] = &&LABEL_FILTER_OP_GET_APP_CONTEXT_ROOT,
+               [ FILTER_OP_GET_PAYLOAD_ROOT ] = &&LABEL_FILTER_OP_GET_PAYLOAD_ROOT,
+
+               [ FILTER_OP_GET_SYMBOL ] = &&LABEL_FILTER_OP_GET_SYMBOL,
+               [ FILTER_OP_GET_SYMBOL_FIELD ] = &&LABEL_FILTER_OP_GET_SYMBOL_FIELD,
+               [ FILTER_OP_GET_INDEX_U16 ] = &&LABEL_FILTER_OP_GET_INDEX_U16,
+               [ FILTER_OP_GET_INDEX_U64 ] = &&LABEL_FILTER_OP_GET_INDEX_U64,
+
+               [ FILTER_OP_LOAD_FIELD ] = &&LABEL_FILTER_OP_LOAD_FIELD,
+               [ FILTER_OP_LOAD_FIELD_S8        ] = &&LABEL_FILTER_OP_LOAD_FIELD_S8,
+               [ FILTER_OP_LOAD_FIELD_S16 ] = &&LABEL_FILTER_OP_LOAD_FIELD_S16,
+               [ FILTER_OP_LOAD_FIELD_S32 ] = &&LABEL_FILTER_OP_LOAD_FIELD_S32,
+               [ FILTER_OP_LOAD_FIELD_S64 ] = &&LABEL_FILTER_OP_LOAD_FIELD_S64,
+               [ FILTER_OP_LOAD_FIELD_U8 ] = &&LABEL_FILTER_OP_LOAD_FIELD_U8,
+               [ FILTER_OP_LOAD_FIELD_U16 ] = &&LABEL_FILTER_OP_LOAD_FIELD_U16,
+               [ FILTER_OP_LOAD_FIELD_U32 ] = &&LABEL_FILTER_OP_LOAD_FIELD_U32,
+               [ FILTER_OP_LOAD_FIELD_U64 ] = &&LABEL_FILTER_OP_LOAD_FIELD_U64,
+               [ FILTER_OP_LOAD_FIELD_STRING ] = &&LABEL_FILTER_OP_LOAD_FIELD_STRING,
+               [ FILTER_OP_LOAD_FIELD_SEQUENCE ] = &&LABEL_FILTER_OP_LOAD_FIELD_SEQUENCE,
+               [ FILTER_OP_LOAD_FIELD_DOUBLE ] = &&LABEL_FILTER_OP_LOAD_FIELD_DOUBLE,
+
+               [ FILTER_OP_UNARY_BIT_NOT ] = &&LABEL_FILTER_OP_UNARY_BIT_NOT,
+
+               [ FILTER_OP_RETURN_S64 ] = &&LABEL_FILTER_OP_RETURN_S64,
+       };
+#endif /* #ifndef INTERPRETER_USE_SWITCH */
+
+       START_OP
+
+               OP(FILTER_OP_UNKNOWN):
+               OP(FILTER_OP_LOAD_FIELD_REF):
+               OP(FILTER_OP_GET_CONTEXT_REF):
+#ifdef INTERPRETER_USE_SWITCH
+               default:
+#endif /* INTERPRETER_USE_SWITCH */
+                       printk(KERN_WARNING "unknown bytecode op %u\n",
+                               (unsigned int) *(filter_opcode_t *) pc);
+                       ret = -EINVAL;
+                       goto end;
+
+               OP(FILTER_OP_RETURN):
+               OP(FILTER_OP_RETURN_S64):
+                       /* LTTNG_FILTER_DISCARD  or LTTNG_FILTER_RECORD_FLAG */
+                       retval = !!estack_ax_v;
+                       ret = 0;
+                       goto end;
+
+               /* binary */
+               OP(FILTER_OP_MUL):
+               OP(FILTER_OP_DIV):
+               OP(FILTER_OP_MOD):
+               OP(FILTER_OP_PLUS):
+               OP(FILTER_OP_MINUS):
+                       printk(KERN_WARNING "unsupported bytecode op %u\n",
+                               (unsigned int) *(filter_opcode_t *) pc);
+                       ret = -EINVAL;
+                       goto end;
+
+               OP(FILTER_OP_EQ):
+               OP(FILTER_OP_NE):
+               OP(FILTER_OP_GT):
+               OP(FILTER_OP_LT):
+               OP(FILTER_OP_GE):
+               OP(FILTER_OP_LE):
+                       printk(KERN_WARNING "unsupported non-specialized bytecode op %u\n",
+                               (unsigned int) *(filter_opcode_t *) pc);
+                       ret = -EINVAL;
+                       goto end;
+
+               OP(FILTER_OP_EQ_STRING):
+               {
+                       int res;
+
+                       res = (stack_strcmp(stack, top, "==") == 0);
+                       estack_pop(stack, top, ax, bx);
+                       estack_ax_v = res;
+                       next_pc += sizeof(struct binary_op);
+                       PO;
+               }
+               OP(FILTER_OP_NE_STRING):
+               {
+                       int res;
+
+                       res = (stack_strcmp(stack, top, "!=") != 0);
+                       estack_pop(stack, top, ax, bx);
+                       estack_ax_v = res;
+                       next_pc += sizeof(struct binary_op);
+                       PO;
+               }
+               OP(FILTER_OP_GT_STRING):
+               {
+                       int res;
+
+                       res = (stack_strcmp(stack, top, ">") > 0);
+                       estack_pop(stack, top, ax, bx);
+                       estack_ax_v = res;
+                       next_pc += sizeof(struct binary_op);
+                       PO;
+               }
+               OP(FILTER_OP_LT_STRING):
+               {
+                       int res;
+
+                       res = (stack_strcmp(stack, top, "<") < 0);
+                       estack_pop(stack, top, ax, bx);
+                       estack_ax_v = res;
+                       next_pc += sizeof(struct binary_op);
+                       PO;
+               }
+               OP(FILTER_OP_GE_STRING):
+               {
+                       int res;
+
+                       res = (stack_strcmp(stack, top, ">=") >= 0);
+                       estack_pop(stack, top, ax, bx);
+                       estack_ax_v = res;
+                       next_pc += sizeof(struct binary_op);
+                       PO;
+               }
+               OP(FILTER_OP_LE_STRING):
+               {
+                       int res;
+
+                       res = (stack_strcmp(stack, top, "<=") <= 0);
+                       estack_pop(stack, top, ax, bx);
+                       estack_ax_v = res;
+                       next_pc += sizeof(struct binary_op);
+                       PO;
+               }
+
+               OP(FILTER_OP_EQ_STAR_GLOB_STRING):
+               {
+                       int res;
+
+                       res = (stack_star_glob_match(stack, top, "==") == 0);
+                       estack_pop(stack, top, ax, bx);
+                       estack_ax_v = res;
+                       next_pc += sizeof(struct binary_op);
+                       PO;
+               }
+               OP(FILTER_OP_NE_STAR_GLOB_STRING):
+               {
+                       int res;
+
+                       res = (stack_star_glob_match(stack, top, "!=") != 0);
+                       estack_pop(stack, top, ax, bx);
+                       estack_ax_v = res;
+                       next_pc += sizeof(struct binary_op);
+                       PO;
+               }
+
+               OP(FILTER_OP_EQ_S64):
+               {
+                       int res;
+
+                       res = (estack_bx_v == estack_ax_v);
+                       estack_pop(stack, top, ax, bx);
+                       estack_ax_v = res;
+                       next_pc += sizeof(struct binary_op);
+                       PO;
+               }
+               OP(FILTER_OP_NE_S64):
+               {
+                       int res;
+
+                       res = (estack_bx_v != estack_ax_v);
+                       estack_pop(stack, top, ax, bx);
+                       estack_ax_v = res;
+                       next_pc += sizeof(struct binary_op);
+                       PO;
+               }
+               OP(FILTER_OP_GT_S64):
+               {
+                       int res;
+
+                       res = (estack_bx_v > estack_ax_v);
+                       estack_pop(stack, top, ax, bx);
+                       estack_ax_v = res;
+                       next_pc += sizeof(struct binary_op);
+                       PO;
+               }
+               OP(FILTER_OP_LT_S64):
+               {
+                       int res;
+
+                       res = (estack_bx_v < estack_ax_v);
+                       estack_pop(stack, top, ax, bx);
+                       estack_ax_v = res;
+                       next_pc += sizeof(struct binary_op);
+                       PO;
+               }
+               OP(FILTER_OP_GE_S64):
+               {
+                       int res;
+
+                       res = (estack_bx_v >= estack_ax_v);
+                       estack_pop(stack, top, ax, bx);
+                       estack_ax_v = res;
+                       next_pc += sizeof(struct binary_op);
+                       PO;
+               }
+               OP(FILTER_OP_LE_S64):
+               {
+                       int res;
+
+                       res = (estack_bx_v <= estack_ax_v);
+                       estack_pop(stack, top, ax, bx);
+                       estack_ax_v = res;
+                       next_pc += sizeof(struct binary_op);
+                       PO;
+               }
+
+               OP(FILTER_OP_EQ_DOUBLE):
+               OP(FILTER_OP_NE_DOUBLE):
+               OP(FILTER_OP_GT_DOUBLE):
+               OP(FILTER_OP_LT_DOUBLE):
+               OP(FILTER_OP_GE_DOUBLE):
+               OP(FILTER_OP_LE_DOUBLE):
+               {
+                       BUG_ON(1);
+                       PO;
+               }
+
+               /* Mixed S64-double binary comparators */
+               OP(FILTER_OP_EQ_DOUBLE_S64):
+               OP(FILTER_OP_NE_DOUBLE_S64):
+               OP(FILTER_OP_GT_DOUBLE_S64):
+               OP(FILTER_OP_LT_DOUBLE_S64):
+               OP(FILTER_OP_GE_DOUBLE_S64):
+               OP(FILTER_OP_LE_DOUBLE_S64):
+               OP(FILTER_OP_EQ_S64_DOUBLE):
+               OP(FILTER_OP_NE_S64_DOUBLE):
+               OP(FILTER_OP_GT_S64_DOUBLE):
+               OP(FILTER_OP_LT_S64_DOUBLE):
+               OP(FILTER_OP_GE_S64_DOUBLE):
+               OP(FILTER_OP_LE_S64_DOUBLE):
+               {
+                       BUG_ON(1);
+                       PO;
+               }
+               OP(FILTER_OP_BIT_RSHIFT):
+               {
+                       int64_t res;
+
+                       /* Catch undefined behavior. */
+                       if (unlikely(estack_ax_v < 0 || estack_ax_v >= 64)) {
+                               ret = -EINVAL;
+                               goto end;
+                       }
+                       res = ((uint64_t) estack_bx_v >> (uint32_t) estack_ax_v);
+                       estack_pop(stack, top, ax, bx);
+                       estack_ax_v = res;
+                       next_pc += sizeof(struct binary_op);
+                       PO;
+               }
+               OP(FILTER_OP_BIT_LSHIFT):
+               {
+                       int64_t res;
+
+                       /* Catch undefined behavior. */
+                       if (unlikely(estack_ax_v < 0 || estack_ax_v >= 64)) {
+                               ret = -EINVAL;
+                               goto end;
+                       }
+                       res = ((uint64_t) estack_bx_v << (uint32_t) estack_ax_v);
+                       estack_pop(stack, top, ax, bx);
+                       estack_ax_v = res;
+                       next_pc += sizeof(struct binary_op);
+                       PO;
+               }
+               OP(FILTER_OP_BIT_AND):
+               {
+                       int64_t res;
+
+                       res = ((uint64_t) estack_bx_v & (uint64_t) estack_ax_v);
+                       estack_pop(stack, top, ax, bx);
+                       estack_ax_v = res;
+                       next_pc += sizeof(struct binary_op);
+                       PO;
+               }
+               OP(FILTER_OP_BIT_OR):
+               {
+                       int64_t res;
+
+                       res = ((uint64_t) estack_bx_v | (uint64_t) estack_ax_v);
+                       estack_pop(stack, top, ax, bx);
+                       estack_ax_v = res;
+                       next_pc += sizeof(struct binary_op);
+                       PO;
+               }
+               OP(FILTER_OP_BIT_XOR):
+               {
+                       int64_t res;
+
+                       res = ((uint64_t) estack_bx_v ^ (uint64_t) estack_ax_v);
+                       estack_pop(stack, top, ax, bx);
+                       estack_ax_v = res;
+                       next_pc += sizeof(struct binary_op);
+                       PO;
+               }
+
+               /* unary */
+               OP(FILTER_OP_UNARY_PLUS):
+               OP(FILTER_OP_UNARY_MINUS):
+               OP(FILTER_OP_UNARY_NOT):
+                       printk(KERN_WARNING "unsupported non-specialized bytecode op %u\n",
+                               (unsigned int) *(filter_opcode_t *) pc);
+                       ret = -EINVAL;
+                       goto end;
+
+
+               OP(FILTER_OP_UNARY_BIT_NOT):
+               {
+                       estack_ax_v = ~(uint64_t) estack_ax_v;
+                       next_pc += sizeof(struct unary_op);
+                       PO;
+               }
+
+               OP(FILTER_OP_UNARY_PLUS_S64):
+               {
+                       next_pc += sizeof(struct unary_op);
+                       PO;
+               }
+               OP(FILTER_OP_UNARY_MINUS_S64):
+               {
+                       estack_ax_v = -estack_ax_v;
+                       next_pc += sizeof(struct unary_op);
+                       PO;
+               }
+               OP(FILTER_OP_UNARY_PLUS_DOUBLE):
+               OP(FILTER_OP_UNARY_MINUS_DOUBLE):
+               {
+                       BUG_ON(1);
+                       PO;
+               }
+               OP(FILTER_OP_UNARY_NOT_S64):
+               {
+                       estack_ax_v = !estack_ax_v;
+                       next_pc += sizeof(struct unary_op);
+                       PO;
+               }
+               OP(FILTER_OP_UNARY_NOT_DOUBLE):
+               {
+                       BUG_ON(1);
+                       PO;
+               }
+
+               /* logical */
+               OP(FILTER_OP_AND):
+               {
+                       struct logical_op *insn = (struct logical_op *) pc;
+
+                       /* If AX is 0, skip and evaluate to 0 */
+                       if (unlikely(estack_ax_v == 0)) {
+                               dbg_printk("Jumping to bytecode offset %u\n",
+                                       (unsigned int) insn->skip_offset);
+                               next_pc = start_pc + insn->skip_offset;
+                       } else {
+                               /* Pop 1 when jump not taken */
+                               estack_pop(stack, top, ax, bx);
+                               next_pc += sizeof(struct logical_op);
+                       }
+                       PO;
+               }
+               OP(FILTER_OP_OR):
+               {
+                       struct logical_op *insn = (struct logical_op *) pc;
+
+                       /* If AX is nonzero, skip and evaluate to 1 */
+
+                       if (unlikely(estack_ax_v != 0)) {
+                               estack_ax_v = 1;
+                               dbg_printk("Jumping to bytecode offset %u\n",
+                                       (unsigned int) insn->skip_offset);
+                               next_pc = start_pc + insn->skip_offset;
+                       } else {
+                               /* Pop 1 when jump not taken */
+                               estack_pop(stack, top, ax, bx);
+                               next_pc += sizeof(struct logical_op);
+                       }
+                       PO;
+               }
+
+
+               /* load field ref */
+               OP(FILTER_OP_LOAD_FIELD_REF_STRING):
+               {
+                       struct load_op *insn = (struct load_op *) pc;
+                       struct field_ref *ref = (struct field_ref *) insn->data;
+
+                       dbg_printk("load field ref offset %u type string\n",
+                               ref->offset);
+                       estack_push(stack, top, ax, bx);
+                       estack_ax(stack, top)->u.s.str =
+                               *(const char * const *) &filter_stack_data[ref->offset];
+                       if (unlikely(!estack_ax(stack, top)->u.s.str)) {
+                               dbg_printk("Filter warning: loading a NULL string.\n");
+                               ret = -EINVAL;
+                               goto end;
+                       }
+                       estack_ax(stack, top)->u.s.seq_len = LTTNG_SIZE_MAX;
+                       estack_ax(stack, top)->u.s.literal_type =
+                               ESTACK_STRING_LITERAL_TYPE_NONE;
+                       estack_ax(stack, top)->u.s.user = 0;
+                       dbg_printk("ref load string %s\n", estack_ax(stack, top)->u.s.str);
+                       next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
+                       PO;
+               }
+
+               OP(FILTER_OP_LOAD_FIELD_REF_SEQUENCE):
+               {
+                       struct load_op *insn = (struct load_op *) pc;
+                       struct field_ref *ref = (struct field_ref *) insn->data;
+
+                       dbg_printk("load field ref offset %u type sequence\n",
+                               ref->offset);
+                       estack_push(stack, top, ax, bx);
+                       estack_ax(stack, top)->u.s.seq_len =
+                               *(unsigned long *) &filter_stack_data[ref->offset];
+                       estack_ax(stack, top)->u.s.str =
+                               *(const char **) (&filter_stack_data[ref->offset
+                                                               + sizeof(unsigned long)]);
+                       if (unlikely(!estack_ax(stack, top)->u.s.str)) {
+                               dbg_printk("Filter warning: loading a NULL sequence.\n");
+                               ret = -EINVAL;
+                               goto end;
+                       }
+                       estack_ax(stack, top)->u.s.literal_type =
+                               ESTACK_STRING_LITERAL_TYPE_NONE;
+                       estack_ax(stack, top)->u.s.user = 0;
+                       next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
+                       PO;
+               }
+
+               OP(FILTER_OP_LOAD_FIELD_REF_S64):
+               {
+                       struct load_op *insn = (struct load_op *) pc;
+                       struct field_ref *ref = (struct field_ref *) insn->data;
+
+                       dbg_printk("load field ref offset %u type s64\n",
+                               ref->offset);
+                       estack_push(stack, top, ax, bx);
+                       estack_ax_v =
+                               ((struct literal_numeric *) &filter_stack_data[ref->offset])->v;
+                       dbg_printk("ref load s64 %lld\n",
+                               (long long) estack_ax_v);
+                       next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
+                       PO;
+               }
+
+               OP(FILTER_OP_LOAD_FIELD_REF_DOUBLE):
+               {
+                       BUG_ON(1);
+                       PO;
+               }
+
+               /* load from immediate operand */
+               OP(FILTER_OP_LOAD_STRING):
+               {
+                       struct load_op *insn = (struct load_op *) pc;
+
+                       dbg_printk("load string %s\n", insn->data);
+                       estack_push(stack, top, ax, bx);
+                       estack_ax(stack, top)->u.s.str = insn->data;
+                       estack_ax(stack, top)->u.s.seq_len = LTTNG_SIZE_MAX;
+                       estack_ax(stack, top)->u.s.literal_type =
+                               ESTACK_STRING_LITERAL_TYPE_PLAIN;
+                       estack_ax(stack, top)->u.s.user = 0;
+                       next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
+                       PO;
+               }
+
+               OP(FILTER_OP_LOAD_STAR_GLOB_STRING):
+               {
+                       struct load_op *insn = (struct load_op *) pc;
+
+                       dbg_printk("load globbing pattern %s\n", insn->data);
+                       estack_push(stack, top, ax, bx);
+                       estack_ax(stack, top)->u.s.str = insn->data;
+                       estack_ax(stack, top)->u.s.seq_len = LTTNG_SIZE_MAX;
+                       estack_ax(stack, top)->u.s.literal_type =
+                               ESTACK_STRING_LITERAL_TYPE_STAR_GLOB;
+                       estack_ax(stack, top)->u.s.user = 0;
+                       next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
+                       PO;
+               }
+
+               OP(FILTER_OP_LOAD_S64):
+               {
+                       struct load_op *insn = (struct load_op *) pc;
+
+                       estack_push(stack, top, ax, bx);
+                       estack_ax_v = ((struct literal_numeric *) insn->data)->v;
+                       dbg_printk("load s64 %lld\n",
+                               (long long) estack_ax_v);
+                       next_pc += sizeof(struct load_op)
+                                       + sizeof(struct literal_numeric);
+                       PO;
+               }
+
+               OP(FILTER_OP_LOAD_DOUBLE):
+               {
+                       BUG_ON(1);
+                       PO;
+               }
+
+               /* cast */
+               OP(FILTER_OP_CAST_TO_S64):
+                       printk(KERN_WARNING "unsupported non-specialized bytecode op %u\n",
+                               (unsigned int) *(filter_opcode_t *) pc);
+                       ret = -EINVAL;
+                       goto end;
+
+               OP(FILTER_OP_CAST_DOUBLE_TO_S64):
+               {
+                       BUG_ON(1);
+                       PO;
+               }
+
+               OP(FILTER_OP_CAST_NOP):
+               {
+                       next_pc += sizeof(struct cast_op);
+                       PO;
+               }
+
+               /* get context ref */
+               OP(FILTER_OP_GET_CONTEXT_REF_STRING):
+               {
+                       struct load_op *insn = (struct load_op *) pc;
+                       struct field_ref *ref = (struct field_ref *) insn->data;
+                       struct lttng_ctx_field *ctx_field;
+                       union lttng_ctx_value v;
+
+                       dbg_printk("get context ref offset %u type string\n",
+                               ref->offset);
+                       ctx_field = &lttng_static_ctx->fields[ref->offset];
+                       ctx_field->get_value(ctx_field, lttng_probe_ctx, &v);
+                       estack_push(stack, top, ax, bx);
+                       estack_ax(stack, top)->u.s.str = v.str;
+                       if (unlikely(!estack_ax(stack, top)->u.s.str)) {
+                               dbg_printk("Filter warning: loading a NULL string.\n");
+                               ret = -EINVAL;
+                               goto end;
+                       }
+                       estack_ax(stack, top)->u.s.seq_len = LTTNG_SIZE_MAX;
+                       estack_ax(stack, top)->u.s.literal_type =
+                               ESTACK_STRING_LITERAL_TYPE_NONE;
+                       estack_ax(stack, top)->u.s.user = 0;
+                       dbg_printk("ref get context string %s\n", estack_ax(stack, top)->u.s.str);
+                       next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
+                       PO;
+               }
+
+               OP(FILTER_OP_GET_CONTEXT_REF_S64):
+               {
+                       struct load_op *insn = (struct load_op *) pc;
+                       struct field_ref *ref = (struct field_ref *) insn->data;
+                       struct lttng_ctx_field *ctx_field;
+                       union lttng_ctx_value v;
+
+                       dbg_printk("get context ref offset %u type s64\n",
+                               ref->offset);
+                       ctx_field = &lttng_static_ctx->fields[ref->offset];
+                       ctx_field->get_value(ctx_field, lttng_probe_ctx, &v);
+                       estack_push(stack, top, ax, bx);
+                       estack_ax_v = v.s64;
+                       dbg_printk("ref get context s64 %lld\n",
+                               (long long) estack_ax_v);
+                       next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
+                       PO;
+               }
+
+               OP(FILTER_OP_GET_CONTEXT_REF_DOUBLE):
+               {
+                       BUG_ON(1);
+                       PO;
+               }
+
+               /* load userspace field ref */
+               OP(FILTER_OP_LOAD_FIELD_REF_USER_STRING):
+               {
+                       struct load_op *insn = (struct load_op *) pc;
+                       struct field_ref *ref = (struct field_ref *) insn->data;
+
+                       dbg_printk("load field ref offset %u type user string\n",
+                               ref->offset);
+                       estack_push(stack, top, ax, bx);
+                       estack_ax(stack, top)->u.s.user_str =
+                               *(const char * const *) &filter_stack_data[ref->offset];
+                       if (unlikely(!estack_ax(stack, top)->u.s.str)) {
+                               dbg_printk("Filter warning: loading a NULL string.\n");
+                               ret = -EINVAL;
+                               goto end;
+                       }
+                       estack_ax(stack, top)->u.s.seq_len = LTTNG_SIZE_MAX;
+                       estack_ax(stack, top)->u.s.literal_type =
+                               ESTACK_STRING_LITERAL_TYPE_NONE;
+                       estack_ax(stack, top)->u.s.user = 1;
+                       dbg_printk("ref load string %s\n", estack_ax(stack, top)->u.s.str);
+                       next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
+                       PO;
+               }
+
+               OP(FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE):
+               {
+                       struct load_op *insn = (struct load_op *) pc;
+                       struct field_ref *ref = (struct field_ref *) insn->data;
+
+                       dbg_printk("load field ref offset %u type user sequence\n",
+                               ref->offset);
+                       estack_push(stack, top, ax, bx);
+                       estack_ax(stack, top)->u.s.seq_len =
+                               *(unsigned long *) &filter_stack_data[ref->offset];
+                       estack_ax(stack, top)->u.s.user_str =
+                               *(const char **) (&filter_stack_data[ref->offset
+                                                               + sizeof(unsigned long)]);
+                       if (unlikely(!estack_ax(stack, top)->u.s.str)) {
+                               dbg_printk("Filter warning: loading a NULL sequence.\n");
+                               ret = -EINVAL;
+                               goto end;
+                       }
+                       estack_ax(stack, top)->u.s.literal_type =
+                               ESTACK_STRING_LITERAL_TYPE_NONE;
+                       estack_ax(stack, top)->u.s.user = 1;
+                       next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
+                       PO;
+               }
+
+               OP(FILTER_OP_GET_CONTEXT_ROOT):
+               {
+                       dbg_printk("op get context root\n");
+                       estack_push(stack, top, ax, bx);
+                       estack_ax(stack, top)->u.ptr.type = LOAD_ROOT_CONTEXT;
+                       /* "field" only needed for variants. */
+                       estack_ax(stack, top)->u.ptr.field = NULL;
+                       next_pc += sizeof(struct load_op);
+                       PO;
+               }
+
+               OP(FILTER_OP_GET_APP_CONTEXT_ROOT):
+               {
+                       BUG_ON(1);
+                       PO;
+               }
+
+               OP(FILTER_OP_GET_PAYLOAD_ROOT):
+               {
+                       dbg_printk("op get app payload root\n");
+                       estack_push(stack, top, ax, bx);
+                       estack_ax(stack, top)->u.ptr.type = LOAD_ROOT_PAYLOAD;
+                       estack_ax(stack, top)->u.ptr.ptr = filter_stack_data;
+                       /* "field" only needed for variants. */
+                       estack_ax(stack, top)->u.ptr.field = NULL;
+                       next_pc += sizeof(struct load_op);
+                       PO;
+               }
+
+               OP(FILTER_OP_GET_SYMBOL):
+               {
+                       dbg_printk("op get symbol\n");
+                       switch (estack_ax(stack, top)->u.ptr.type) {
+                       case LOAD_OBJECT:
+                               printk(KERN_WARNING "Nested fields not implemented yet.\n");
+                               ret = -EINVAL;
+                               goto end;
+                       case LOAD_ROOT_CONTEXT:
+                       case LOAD_ROOT_APP_CONTEXT:
+                       case LOAD_ROOT_PAYLOAD:
+                               /*
+                                * symbol lookup is performed by
+                                * specialization.
+                                */
+                               ret = -EINVAL;
+                               goto end;
+                       }
+                       next_pc += sizeof(struct load_op) + sizeof(struct get_symbol);
+                       PO;
+               }
+
+               OP(FILTER_OP_GET_SYMBOL_FIELD):
+               {
+                       /*
+                        * Used for first variant encountered in a
+                        * traversal. Variants are not implemented yet.
+                        */
+                       ret = -EINVAL;
+                       goto end;
+               }
+
+               OP(FILTER_OP_GET_INDEX_U16):
+               {
+                       struct load_op *insn = (struct load_op *) pc;
+                       struct get_index_u16 *index = (struct get_index_u16 *) insn->data;
+
+                       dbg_printk("op get index u16\n");
+                       ret = dynamic_get_index(lttng_probe_ctx, bytecode, index->index, estack_ax(stack, top));
+                       if (ret)
+                               goto end;
+                       estack_ax_v = estack_ax(stack, top)->u.v;
+                       next_pc += sizeof(struct load_op) + sizeof(struct get_index_u16);
+                       PO;
+               }
+
+               OP(FILTER_OP_GET_INDEX_U64):
+               {
+                       struct load_op *insn = (struct load_op *) pc;
+                       struct get_index_u64 *index = (struct get_index_u64 *) insn->data;
+
+                       dbg_printk("op get index u64\n");
+                       ret = dynamic_get_index(lttng_probe_ctx, bytecode, index->index, estack_ax(stack, top));
+                       if (ret)
+                               goto end;
+                       estack_ax_v = estack_ax(stack, top)->u.v;
+                       next_pc += sizeof(struct load_op) + sizeof(struct get_index_u64);
+                       PO;
+               }
+
+               OP(FILTER_OP_LOAD_FIELD):
+               {
+                       dbg_printk("op load field\n");
+                       ret = dynamic_load_field(estack_ax(stack, top));
+                       if (ret)
+                               goto end;
+                       estack_ax_v = estack_ax(stack, top)->u.v;
+                       next_pc += sizeof(struct load_op);
+                       PO;
+               }
+
+               OP(FILTER_OP_LOAD_FIELD_S8):
+               {
+                       dbg_printk("op load field s8\n");
+
+                       estack_ax_v = *(int8_t *) estack_ax(stack, top)->u.ptr.ptr;
+                       next_pc += sizeof(struct load_op);
+                       PO;
+               }
+               OP(FILTER_OP_LOAD_FIELD_S16):
+               {
+                       dbg_printk("op load field s16\n");
+
+                       estack_ax_v = *(int16_t *) estack_ax(stack, top)->u.ptr.ptr;
+                       next_pc += sizeof(struct load_op);
+                       PO;
+               }
+               OP(FILTER_OP_LOAD_FIELD_S32):
+               {
+                       dbg_printk("op load field s32\n");
+
+                       estack_ax_v = *(int32_t *) estack_ax(stack, top)->u.ptr.ptr;
+                       next_pc += sizeof(struct load_op);
+                       PO;
+               }
+               OP(FILTER_OP_LOAD_FIELD_S64):
+               {
+                       dbg_printk("op load field s64\n");
+
+                       estack_ax_v = *(int64_t *) estack_ax(stack, top)->u.ptr.ptr;
+                       next_pc += sizeof(struct load_op);
+                       PO;
+               }
+               OP(FILTER_OP_LOAD_FIELD_U8):
+               {
+                       dbg_printk("op load field u8\n");
+
+                       estack_ax_v = *(uint8_t *) estack_ax(stack, top)->u.ptr.ptr;
+                       next_pc += sizeof(struct load_op);
+                       PO;
+               }
+               OP(FILTER_OP_LOAD_FIELD_U16):
+               {
+                       dbg_printk("op load field u16\n");
+
+                       estack_ax_v = *(uint16_t *) estack_ax(stack, top)->u.ptr.ptr;
+                       next_pc += sizeof(struct load_op);
+                       PO;
+               }
+               OP(FILTER_OP_LOAD_FIELD_U32):
+               {
+                       dbg_printk("op load field u32\n");
+
+                       estack_ax_v = *(uint32_t *) estack_ax(stack, top)->u.ptr.ptr;
+                       next_pc += sizeof(struct load_op);
+                       PO;
+               }
+               OP(FILTER_OP_LOAD_FIELD_U64):
+               {
+                       dbg_printk("op load field u64\n");
+
+                       estack_ax_v = *(uint64_t *) estack_ax(stack, top)->u.ptr.ptr;
+                       next_pc += sizeof(struct load_op);
+                       PO;
+               }
+               OP(FILTER_OP_LOAD_FIELD_DOUBLE):
+               {
+                       ret = -EINVAL;
+                       goto end;
+               }
+
+               OP(FILTER_OP_LOAD_FIELD_STRING):
+               {
+                       const char *str;
+
+                       dbg_printk("op load field string\n");
+                       str = (const char *) estack_ax(stack, top)->u.ptr.ptr;
+                       estack_ax(stack, top)->u.s.str = str;
+                       if (unlikely(!estack_ax(stack, top)->u.s.str)) {
+                               dbg_printk("Filter warning: loading a NULL string.\n");
+                               ret = -EINVAL;
+                               goto end;
+                       }
+                       estack_ax(stack, top)->u.s.seq_len = LTTNG_SIZE_MAX;
+                       estack_ax(stack, top)->u.s.literal_type =
+                               ESTACK_STRING_LITERAL_TYPE_NONE;
+                       next_pc += sizeof(struct load_op);
+                       PO;
+               }
+
+               OP(FILTER_OP_LOAD_FIELD_SEQUENCE):
+               {
+                       const char *ptr;
+
+                       dbg_printk("op load field string sequence\n");
+                       ptr = estack_ax(stack, top)->u.ptr.ptr;
+                       estack_ax(stack, top)->u.s.seq_len = *(unsigned long *) ptr;
+                       estack_ax(stack, top)->u.s.str = *(const char **) (ptr + sizeof(unsigned long));
+                       if (unlikely(!estack_ax(stack, top)->u.s.str)) {
+                               dbg_printk("Filter warning: loading a NULL sequence.\n");
+                               ret = -EINVAL;
+                               goto end;
+                       }
+                       estack_ax(stack, top)->u.s.literal_type =
+                               ESTACK_STRING_LITERAL_TYPE_NONE;
+                       next_pc += sizeof(struct load_op);
+                       PO;
+               }
+
+       END_OP
+end:
+       /* return 0 (discard) on error */
+       if (ret)
+               return 0;
+       return retval;
+}
+
+#undef START_OP
+#undef OP
+#undef PO
+#undef END_OP
diff --git a/src/lttng-filter-specialize.c b/src/lttng-filter-specialize.c
new file mode 100644 (file)
index 0000000..ccc4583
--- /dev/null
@@ -0,0 +1,1215 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * lttng-filter-specialize.c
+ *
+ * LTTng modules filter code specializer.
+ *
+ * Copyright (C) 2010-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#include <linux/slab.h>
+#include <lttng/filter.h>
+#include <lttng/align.h>
+
+static ssize_t bytecode_reserve_data(struct bytecode_runtime *runtime,
+               size_t align, size_t len)
+{
+       ssize_t ret;
+       size_t padding = offset_align(runtime->data_len, align);
+       size_t new_len = runtime->data_len + padding + len;
+       size_t new_alloc_len = new_len;
+       size_t old_alloc_len = runtime->data_alloc_len;
+
+       if (new_len > FILTER_MAX_DATA_LEN)
+               return -EINVAL;
+
+       if (new_alloc_len > old_alloc_len) {
+               char *newptr;
+
+               new_alloc_len =
+                       max_t(size_t, 1U << get_count_order(new_alloc_len), old_alloc_len << 1);
+               newptr = krealloc(runtime->data, new_alloc_len, GFP_KERNEL);
+               if (!newptr)
+                       return -ENOMEM;
+               runtime->data = newptr;
+               /* We zero directly the memory from start of allocation. */
+               memset(&runtime->data[old_alloc_len], 0, new_alloc_len - old_alloc_len);
+               runtime->data_alloc_len = new_alloc_len;
+       }
+       runtime->data_len += padding;
+       ret = runtime->data_len;
+       runtime->data_len += len;
+       return ret;
+}
+
+static ssize_t bytecode_push_data(struct bytecode_runtime *runtime,
+               const void *p, size_t align, size_t len)
+{
+       ssize_t offset;
+
+       offset = bytecode_reserve_data(runtime, align, len);
+       if (offset < 0)
+               return -ENOMEM;
+       memcpy(&runtime->data[offset], p, len);
+       return offset;
+}
+
+static int specialize_load_field(struct vstack_entry *stack_top,
+               struct load_op *insn)
+{
+       int ret;
+
+       switch (stack_top->load.type) {
+       case LOAD_OBJECT:
+               break;
+       case LOAD_ROOT_CONTEXT:
+       case LOAD_ROOT_APP_CONTEXT:
+       case LOAD_ROOT_PAYLOAD:
+       default:
+               dbg_printk("Filter warning: cannot load root, missing field name.\n");
+               ret = -EINVAL;
+               goto end;
+       }
+       switch (stack_top->load.object_type) {
+       case OBJECT_TYPE_S8:
+               dbg_printk("op load field s8\n");
+               stack_top->type = REG_S64;
+               if (!stack_top->load.rev_bo)
+                       insn->op = FILTER_OP_LOAD_FIELD_S8;
+               break;
+       case OBJECT_TYPE_S16:
+               dbg_printk("op load field s16\n");
+               stack_top->type = REG_S64;
+               if (!stack_top->load.rev_bo)
+                       insn->op = FILTER_OP_LOAD_FIELD_S16;
+               break;
+       case OBJECT_TYPE_S32:
+               dbg_printk("op load field s32\n");
+               stack_top->type = REG_S64;
+               if (!stack_top->load.rev_bo)
+                       insn->op = FILTER_OP_LOAD_FIELD_S32;
+               break;
+       case OBJECT_TYPE_S64:
+               dbg_printk("op load field s64\n");
+               stack_top->type = REG_S64;
+               if (!stack_top->load.rev_bo)
+                       insn->op = FILTER_OP_LOAD_FIELD_S64;
+               break;
+       case OBJECT_TYPE_U8:
+               dbg_printk("op load field u8\n");
+               stack_top->type = REG_S64;
+               insn->op = FILTER_OP_LOAD_FIELD_U8;
+               break;
+       case OBJECT_TYPE_U16:
+               dbg_printk("op load field u16\n");
+               stack_top->type = REG_S64;
+               if (!stack_top->load.rev_bo)
+                       insn->op = FILTER_OP_LOAD_FIELD_U16;
+               break;
+       case OBJECT_TYPE_U32:
+               dbg_printk("op load field u32\n");
+               stack_top->type = REG_S64;
+               if (!stack_top->load.rev_bo)
+                       insn->op = FILTER_OP_LOAD_FIELD_U32;
+               break;
+       case OBJECT_TYPE_U64:
+               dbg_printk("op load field u64\n");
+               stack_top->type = REG_S64;
+               if (!stack_top->load.rev_bo)
+                       insn->op = FILTER_OP_LOAD_FIELD_U64;
+               break;
+       case OBJECT_TYPE_DOUBLE:
+               printk(KERN_WARNING "Double type unsupported\n\n");
+               ret = -EINVAL;
+               goto end;
+       case OBJECT_TYPE_STRING:
+               dbg_printk("op load field string\n");
+               stack_top->type = REG_STRING;
+               insn->op = FILTER_OP_LOAD_FIELD_STRING;
+               break;
+       case OBJECT_TYPE_STRING_SEQUENCE:
+               dbg_printk("op load field string sequence\n");
+               stack_top->type = REG_STRING;
+               insn->op = FILTER_OP_LOAD_FIELD_SEQUENCE;
+               break;
+       case OBJECT_TYPE_DYNAMIC:
+               ret = -EINVAL;
+               goto end;
+       case OBJECT_TYPE_SEQUENCE:
+       case OBJECT_TYPE_ARRAY:
+       case OBJECT_TYPE_STRUCT:
+       case OBJECT_TYPE_VARIANT:
+               printk(KERN_WARNING "Sequences, arrays, struct and variant cannot be loaded (nested types).\n");
+               ret = -EINVAL;
+               goto end;
+       }
+       return 0;
+
+end:
+       return ret;
+}
+
+static int specialize_get_index_object_type(enum object_type *otype,
+               int signedness, uint32_t elem_len)
+{
+       switch (elem_len) {
+       case 8:
+               if (signedness)
+                       *otype = OBJECT_TYPE_S8;
+               else
+                       *otype = OBJECT_TYPE_U8;
+               break;
+       case 16:
+               if (signedness)
+                       *otype = OBJECT_TYPE_S16;
+               else
+                       *otype = OBJECT_TYPE_U16;
+               break;
+       case 32:
+               if (signedness)
+                       *otype = OBJECT_TYPE_S32;
+               else
+                       *otype = OBJECT_TYPE_U32;
+               break;
+       case 64:
+               if (signedness)
+                       *otype = OBJECT_TYPE_S64;
+               else
+                       *otype = OBJECT_TYPE_U64;
+               break;
+       default:
+               return -EINVAL;
+       }
+       return 0;
+}
+
+static int specialize_get_index(struct bytecode_runtime *runtime,
+               struct load_op *insn, uint64_t index,
+               struct vstack_entry *stack_top,
+               int idx_len)
+{
+       int ret;
+       struct filter_get_index_data gid;
+       ssize_t data_offset;
+
+       memset(&gid, 0, sizeof(gid));
+       switch (stack_top->load.type) {
+       case LOAD_OBJECT:
+               switch (stack_top->load.object_type) {
+               case OBJECT_TYPE_ARRAY:
+               {
+                       const struct lttng_integer_type *integer_type;
+                       const struct lttng_event_field *field;
+                       uint32_t elem_len, num_elems;
+                       int signedness;
+
+                       field = stack_top->load.field;
+                       if (!lttng_is_bytewise_integer(field->type.u.array_nestable.elem_type)) {
+                               ret = -EINVAL;
+                               goto end;
+                       }
+                       integer_type = &field->type.u.array_nestable.elem_type->u.integer;
+                       num_elems = field->type.u.array_nestable.length;
+                       elem_len = integer_type->size;
+                       signedness = integer_type->signedness;
+                       if (index >= num_elems) {
+                               ret = -EINVAL;
+                               goto end;
+                       }
+                       ret = specialize_get_index_object_type(&stack_top->load.object_type,
+                                       signedness, elem_len);
+                       if (ret)
+                               goto end;
+                       gid.offset = index * (elem_len / CHAR_BIT);
+                       gid.array_len = num_elems * (elem_len / CHAR_BIT);
+                       gid.elem.type = stack_top->load.object_type;
+                       gid.elem.len = elem_len;
+                       if (integer_type->reverse_byte_order)
+                               gid.elem.rev_bo = true;
+                       stack_top->load.rev_bo = gid.elem.rev_bo;
+                       break;
+               }
+               case OBJECT_TYPE_SEQUENCE:
+               {
+                       const struct lttng_integer_type *integer_type;
+                       const struct lttng_event_field *field;
+                       uint32_t elem_len;
+                       int signedness;
+
+                       field = stack_top->load.field;
+                       if (!lttng_is_bytewise_integer(field->type.u.sequence_nestable.elem_type)) {
+                               ret = -EINVAL;
+                               goto end;
+                       }
+                       integer_type = &field->type.u.sequence_nestable.elem_type->u.integer;
+                       elem_len = integer_type->size;
+                       signedness = integer_type->signedness;
+                       ret = specialize_get_index_object_type(&stack_top->load.object_type,
+                                       signedness, elem_len);
+                       if (ret)
+                               goto end;
+                       gid.offset = index * (elem_len / CHAR_BIT);
+                       gid.elem.type = stack_top->load.object_type;
+                       gid.elem.len = elem_len;
+                       if (integer_type->reverse_byte_order)
+                               gid.elem.rev_bo = true;
+                       stack_top->load.rev_bo = gid.elem.rev_bo;
+                       break;
+               }
+               case OBJECT_TYPE_STRUCT:
+                       /* Only generated by the specialize phase. */
+               case OBJECT_TYPE_VARIANT:       /* Fall-through */
+               default:
+                       printk(KERN_WARNING "Unexpected get index type %d",
+                               (int) stack_top->load.object_type);
+                       ret = -EINVAL;
+                       goto end;
+               }
+               break;
+       case LOAD_ROOT_CONTEXT:
+       case LOAD_ROOT_APP_CONTEXT:
+       case LOAD_ROOT_PAYLOAD:
+               printk(KERN_WARNING "Index lookup for root field not implemented yet.\n");
+               ret = -EINVAL;
+               goto end;
+       }
+       data_offset = bytecode_push_data(runtime, &gid,
+               __alignof__(gid), sizeof(gid));
+       if (data_offset < 0) {
+               ret = -EINVAL;
+               goto end;
+       }
+       switch (idx_len) {
+       case 2:
+               ((struct get_index_u16 *) insn->data)->index = data_offset;
+               break;
+       case 8:
+               ((struct get_index_u64 *) insn->data)->index = data_offset;
+               break;
+       default:
+               ret = -EINVAL;
+               goto end;
+       }
+
+       return 0;
+
+end:
+       return ret;
+}
+
+static int specialize_context_lookup_name(struct bytecode_runtime *bytecode,
+               struct load_op *insn)
+{
+       uint16_t offset;
+       const char *name;
+
+       offset = ((struct get_symbol *) insn->data)->offset;
+       name = bytecode->p.bc->bc.data + bytecode->p.bc->bc.reloc_offset + offset;
+       return lttng_get_context_index(lttng_static_ctx, name);
+}
+
+static int specialize_load_object(const struct lttng_event_field *field,
+               struct vstack_load *load, bool is_context)
+{
+       load->type = LOAD_OBJECT;
+       /*
+        * LTTng-UST layout all integer fields as s64 on the stack for the filter.
+        */
+       switch (field->type.atype) {
+       case atype_integer:
+               if (field->type.u.integer.signedness)
+                       load->object_type = OBJECT_TYPE_S64;
+               else
+                       load->object_type = OBJECT_TYPE_U64;
+               load->rev_bo = false;
+               break;
+       case atype_enum_nestable:
+       {
+               const struct lttng_integer_type *itype =
+                       &field->type.u.enum_nestable.container_type->u.integer;
+
+               if (itype->signedness)
+                       load->object_type = OBJECT_TYPE_S64;
+               else
+                       load->object_type = OBJECT_TYPE_U64;
+               load->rev_bo = false;
+               break;
+       }
+       case atype_array_nestable:
+               if (!lttng_is_bytewise_integer(field->type.u.array_nestable.elem_type)) {
+                       printk(KERN_WARNING "Array nesting only supports integer types.\n");
+                       return -EINVAL;
+               }
+               if (is_context) {
+                       load->object_type = OBJECT_TYPE_STRING;
+               } else {
+                       if (field->type.u.array_nestable.elem_type->u.integer.encoding == lttng_encode_none) {
+                               load->object_type = OBJECT_TYPE_ARRAY;
+                               load->field = field;
+                       } else {
+                               load->object_type = OBJECT_TYPE_STRING_SEQUENCE;
+                       }
+               }
+               break;
+       case atype_sequence_nestable:
+               if (!lttng_is_bytewise_integer(field->type.u.sequence_nestable.elem_type)) {
+                       printk(KERN_WARNING "Sequence nesting only supports integer types.\n");
+                       return -EINVAL;
+               }
+               if (is_context) {
+                       load->object_type = OBJECT_TYPE_STRING;
+               } else {
+                       if (field->type.u.sequence_nestable.elem_type->u.integer.encoding == lttng_encode_none) {
+                               load->object_type = OBJECT_TYPE_SEQUENCE;
+                               load->field = field;
+                       } else {
+                               load->object_type = OBJECT_TYPE_STRING_SEQUENCE;
+                       }
+               }
+               break;
+       case atype_string:
+               load->object_type = OBJECT_TYPE_STRING;
+               break;
+       case atype_struct_nestable:
+               printk(KERN_WARNING "Structure type cannot be loaded.\n");
+               return -EINVAL;
+       case atype_variant_nestable:
+               printk(KERN_WARNING "Variant type cannot be loaded.\n");
+               return -EINVAL;
+       default:
+               printk(KERN_WARNING "Unknown type: %d", (int) field->type.atype);
+               return -EINVAL;
+       }
+       return 0;
+}
+
+static int specialize_context_lookup(struct bytecode_runtime *runtime,
+               struct load_op *insn,
+               struct vstack_load *load)
+{
+       int idx, ret;
+       struct lttng_ctx_field *ctx_field;
+       struct lttng_event_field *field;
+       struct filter_get_index_data gid;
+       ssize_t data_offset;
+
+       idx = specialize_context_lookup_name(runtime, insn);
+       if (idx < 0) {
+               return -ENOENT;
+       }
+       ctx_field = &lttng_static_ctx->fields[idx];
+       field = &ctx_field->event_field;
+       ret = specialize_load_object(field, load, true);
+       if (ret)
+               return ret;
+       /* Specialize each get_symbol into a get_index. */
+       insn->op = FILTER_OP_GET_INDEX_U16;
+       memset(&gid, 0, sizeof(gid));
+       gid.ctx_index = idx;
+       gid.elem.type = load->object_type;
+       data_offset = bytecode_push_data(runtime, &gid,
+               __alignof__(gid), sizeof(gid));
+       if (data_offset < 0) {
+               return -EINVAL;
+       }
+       ((struct get_index_u16 *) insn->data)->index = data_offset;
+       return 0;
+}
+
+static int specialize_event_payload_lookup(struct lttng_event *event,
+               struct bytecode_runtime *runtime,
+               struct load_op *insn,
+               struct vstack_load *load)
+{
+       const char *name;
+       uint16_t offset;
+       const struct lttng_event_desc *desc = event->desc;
+       unsigned int i, nr_fields;
+       bool found = false;
+       uint32_t field_offset = 0;
+       const struct lttng_event_field *field;
+       int ret;
+       struct filter_get_index_data gid;
+       ssize_t data_offset;
+
+       nr_fields = desc->nr_fields;
+       offset = ((struct get_symbol *) insn->data)->offset;
+       name = runtime->p.bc->bc.data + runtime->p.bc->bc.reloc_offset + offset;
+       for (i = 0; i < nr_fields; i++) {
+               field = &desc->fields[i];
+               if (field->nofilter) {
+                       continue;
+               }
+               if (!strcmp(field->name, name)) {
+                       found = true;
+                       break;
+               }
+               /* compute field offset on stack */
+               switch (field->type.atype) {
+               case atype_integer:
+               case atype_enum_nestable:
+                       field_offset += sizeof(int64_t);
+                       break;
+               case atype_array_nestable:
+               case atype_sequence_nestable:
+                       field_offset += sizeof(unsigned long);
+                       field_offset += sizeof(void *);
+                       break;
+               case atype_string:
+                       field_offset += sizeof(void *);
+                       break;
+               default:
+                       ret = -EINVAL;
+                       goto end;
+               }
+       }
+       if (!found) {
+               ret = -EINVAL;
+               goto end;
+       }
+
+       ret = specialize_load_object(field, load, false);
+       if (ret)
+               goto end;
+
+       /* Specialize each get_symbol into a get_index. */
+       insn->op = FILTER_OP_GET_INDEX_U16;
+       memset(&gid, 0, sizeof(gid));
+       gid.offset = field_offset;
+       gid.elem.type = load->object_type;
+       data_offset = bytecode_push_data(runtime, &gid,
+               __alignof__(gid), sizeof(gid));
+       if (data_offset < 0) {
+               ret = -EINVAL;
+               goto end;
+       }
+       ((struct get_index_u16 *) insn->data)->index = data_offset;
+       ret = 0;
+end:
+       return ret;
+}
+
+int lttng_filter_specialize_bytecode(struct lttng_event *event,
+               struct bytecode_runtime *bytecode)
+{
+       void *pc, *next_pc, *start_pc;
+       int ret = -EINVAL;
+       struct vstack _stack;
+       struct vstack *stack = &_stack;
+
+       vstack_init(stack);
+
+       start_pc = &bytecode->code[0];
+       for (pc = next_pc = start_pc; pc - start_pc < bytecode->len;
+                       pc = next_pc) {
+               switch (*(filter_opcode_t *) pc) {
+               case FILTER_OP_UNKNOWN:
+               default:
+                       printk(KERN_WARNING "unknown bytecode op %u\n",
+                               (unsigned int) *(filter_opcode_t *) pc);
+                       ret = -EINVAL;
+                       goto end;
+
+               case FILTER_OP_RETURN:
+               case FILTER_OP_RETURN_S64:
+                       ret = 0;
+                       goto end;
+
+               /* binary */
+               case FILTER_OP_MUL:
+               case FILTER_OP_DIV:
+               case FILTER_OP_MOD:
+               case FILTER_OP_PLUS:
+               case FILTER_OP_MINUS:
+                       printk(KERN_WARNING "unsupported bytecode op %u\n",
+                               (unsigned int) *(filter_opcode_t *) pc);
+                       ret = -EINVAL;
+                       goto end;
+
+               case FILTER_OP_EQ:
+               {
+                       struct binary_op *insn = (struct binary_op *) pc;
+
+                       switch(vstack_ax(stack)->type) {
+                       default:
+                               printk(KERN_WARNING "unknown register type\n");
+                               ret = -EINVAL;
+                               goto end;
+
+                       case REG_STRING:
+                               if (vstack_bx(stack)->type == REG_STAR_GLOB_STRING)
+                                       insn->op = FILTER_OP_EQ_STAR_GLOB_STRING;
+                               else
+                                       insn->op = FILTER_OP_EQ_STRING;
+                               break;
+                       case REG_STAR_GLOB_STRING:
+                               insn->op = FILTER_OP_EQ_STAR_GLOB_STRING;
+                               break;
+                       case REG_S64:
+                               if (vstack_bx(stack)->type == REG_S64)
+                                       insn->op = FILTER_OP_EQ_S64;
+                               else
+                                       insn->op = FILTER_OP_EQ_DOUBLE_S64;
+                               break;
+                       case REG_DOUBLE:
+                               if (vstack_bx(stack)->type == REG_S64)
+                                       insn->op = FILTER_OP_EQ_S64_DOUBLE;
+                               else
+                                       insn->op = FILTER_OP_EQ_DOUBLE;
+                               break;
+                       }
+                       /* Pop 2, push 1 */
+                       if (vstack_pop(stack)) {
+                               ret = -EINVAL;
+                               goto end;
+                       }
+                       vstack_ax(stack)->type = REG_S64;
+                       next_pc += sizeof(struct binary_op);
+                       break;
+               }
+
+               case FILTER_OP_NE:
+               {
+                       struct binary_op *insn = (struct binary_op *) pc;
+
+                       switch(vstack_ax(stack)->type) {
+                       default:
+                               printk(KERN_WARNING "unknown register type\n");
+                               ret = -EINVAL;
+                               goto end;
+
+                       case REG_STRING:
+                               if (vstack_bx(stack)->type == REG_STAR_GLOB_STRING)
+                                       insn->op = FILTER_OP_NE_STAR_GLOB_STRING;
+                               else
+                                       insn->op = FILTER_OP_NE_STRING;
+                               break;
+                       case REG_STAR_GLOB_STRING:
+                               insn->op = FILTER_OP_NE_STAR_GLOB_STRING;
+                               break;
+                       case REG_S64:
+                               if (vstack_bx(stack)->type == REG_S64)
+                                       insn->op = FILTER_OP_NE_S64;
+                               else
+                                       insn->op = FILTER_OP_NE_DOUBLE_S64;
+                               break;
+                       case REG_DOUBLE:
+                               if (vstack_bx(stack)->type == REG_S64)
+                                       insn->op = FILTER_OP_NE_S64_DOUBLE;
+                               else
+                                       insn->op = FILTER_OP_NE_DOUBLE;
+                               break;
+                       }
+                       /* Pop 2, push 1 */
+                       if (vstack_pop(stack)) {
+                               ret = -EINVAL;
+                               goto end;
+                       }
+                       vstack_ax(stack)->type = REG_S64;
+                       next_pc += sizeof(struct binary_op);
+                       break;
+               }
+
+               case FILTER_OP_GT:
+               {
+                       struct binary_op *insn = (struct binary_op *) pc;
+
+                       switch(vstack_ax(stack)->type) {
+                       default:
+                               printk(KERN_WARNING "unknown register type\n");
+                               ret = -EINVAL;
+                               goto end;
+
+                       case REG_STAR_GLOB_STRING:
+                               printk(KERN_WARNING "invalid register type for > binary operator\n");
+                               ret = -EINVAL;
+                               goto end;
+                       case REG_STRING:
+                               insn->op = FILTER_OP_GT_STRING;
+                               break;
+                       case REG_S64:
+                               if (vstack_bx(stack)->type == REG_S64)
+                                       insn->op = FILTER_OP_GT_S64;
+                               else
+                                       insn->op = FILTER_OP_GT_DOUBLE_S64;
+                               break;
+                       case REG_DOUBLE:
+                               if (vstack_bx(stack)->type == REG_S64)
+                                       insn->op = FILTER_OP_GT_S64_DOUBLE;
+                               else
+                                       insn->op = FILTER_OP_GT_DOUBLE;
+                               break;
+                       }
+                       /* Pop 2, push 1 */
+                       if (vstack_pop(stack)) {
+                               ret = -EINVAL;
+                               goto end;
+                       }
+                       vstack_ax(stack)->type = REG_S64;
+                       next_pc += sizeof(struct binary_op);
+                       break;
+               }
+
+               case FILTER_OP_LT:
+               {
+                       struct binary_op *insn = (struct binary_op *) pc;
+
+                       switch(vstack_ax(stack)->type) {
+                       default:
+                               printk(KERN_WARNING "unknown register type\n");
+                               ret = -EINVAL;
+                               goto end;
+
+                       case REG_STAR_GLOB_STRING:
+                               printk(KERN_WARNING "invalid register type for < binary operator\n");
+                               ret = -EINVAL;
+                               goto end;
+                       case REG_STRING:
+                               insn->op = FILTER_OP_LT_STRING;
+                               break;
+                       case REG_S64:
+                               if (vstack_bx(stack)->type == REG_S64)
+                                       insn->op = FILTER_OP_LT_S64;
+                               else
+                                       insn->op = FILTER_OP_LT_DOUBLE_S64;
+                               break;
+                       case REG_DOUBLE:
+                               if (vstack_bx(stack)->type == REG_S64)
+                                       insn->op = FILTER_OP_LT_S64_DOUBLE;
+                               else
+                                       insn->op = FILTER_OP_LT_DOUBLE;
+                               break;
+                       }
+                       /* Pop 2, push 1 */
+                       if (vstack_pop(stack)) {
+                               ret = -EINVAL;
+                               goto end;
+                       }
+                       vstack_ax(stack)->type = REG_S64;
+                       next_pc += sizeof(struct binary_op);
+                       break;
+               }
+
+               case FILTER_OP_GE:
+               {
+                       struct binary_op *insn = (struct binary_op *) pc;
+
+                       switch(vstack_ax(stack)->type) {
+                       default:
+                               printk(KERN_WARNING "unknown register type\n");
+                               ret = -EINVAL;
+                               goto end;
+
+                       case REG_STAR_GLOB_STRING:
+                               printk(KERN_WARNING "invalid register type for >= binary operator\n");
+                               ret = -EINVAL;
+                               goto end;
+                       case REG_STRING:
+                               insn->op = FILTER_OP_GE_STRING;
+                               break;
+                       case REG_S64:
+                               if (vstack_bx(stack)->type == REG_S64)
+                                       insn->op = FILTER_OP_GE_S64;
+                               else
+                                       insn->op = FILTER_OP_GE_DOUBLE_S64;
+                               break;
+                       case REG_DOUBLE:
+                               if (vstack_bx(stack)->type == REG_S64)
+                                       insn->op = FILTER_OP_GE_S64_DOUBLE;
+                               else
+                                       insn->op = FILTER_OP_GE_DOUBLE;
+                               break;
+                       }
+                       /* Pop 2, push 1 */
+                       if (vstack_pop(stack)) {
+                               ret = -EINVAL;
+                               goto end;
+                       }
+                       vstack_ax(stack)->type = REG_S64;
+                       next_pc += sizeof(struct binary_op);
+                       break;
+               }
+               case FILTER_OP_LE:
+               {
+                       struct binary_op *insn = (struct binary_op *) pc;
+
+                       switch(vstack_ax(stack)->type) {
+                       default:
+                               printk(KERN_WARNING "unknown register type\n");
+                               ret = -EINVAL;
+                               goto end;
+
+                       case REG_STAR_GLOB_STRING:
+                               printk(KERN_WARNING "invalid register type for <= binary operator\n");
+                               ret = -EINVAL;
+                               goto end;
+                       case REG_STRING:
+                               insn->op = FILTER_OP_LE_STRING;
+                               break;
+                       case REG_S64:
+                               if (vstack_bx(stack)->type == REG_S64)
+                                       insn->op = FILTER_OP_LE_S64;
+                               else
+                                       insn->op = FILTER_OP_LE_DOUBLE_S64;
+                               break;
+                       case REG_DOUBLE:
+                               if (vstack_bx(stack)->type == REG_S64)
+                                       insn->op = FILTER_OP_LE_S64_DOUBLE;
+                               else
+                                       insn->op = FILTER_OP_LE_DOUBLE;
+                               break;
+                       }
+                       vstack_ax(stack)->type = REG_S64;
+                       next_pc += sizeof(struct binary_op);
+                       break;
+               }
+
+               case FILTER_OP_EQ_STRING:
+               case FILTER_OP_NE_STRING:
+               case FILTER_OP_GT_STRING:
+               case FILTER_OP_LT_STRING:
+               case FILTER_OP_GE_STRING:
+               case FILTER_OP_LE_STRING:
+               case FILTER_OP_EQ_STAR_GLOB_STRING:
+               case FILTER_OP_NE_STAR_GLOB_STRING:
+               case FILTER_OP_EQ_S64:
+               case FILTER_OP_NE_S64:
+               case FILTER_OP_GT_S64:
+               case FILTER_OP_LT_S64:
+               case FILTER_OP_GE_S64:
+               case FILTER_OP_LE_S64:
+               case FILTER_OP_EQ_DOUBLE:
+               case FILTER_OP_NE_DOUBLE:
+               case FILTER_OP_GT_DOUBLE:
+               case FILTER_OP_LT_DOUBLE:
+               case FILTER_OP_GE_DOUBLE:
+               case FILTER_OP_LE_DOUBLE:
+               case FILTER_OP_EQ_DOUBLE_S64:
+               case FILTER_OP_NE_DOUBLE_S64:
+               case FILTER_OP_GT_DOUBLE_S64:
+               case FILTER_OP_LT_DOUBLE_S64:
+               case FILTER_OP_GE_DOUBLE_S64:
+               case FILTER_OP_LE_DOUBLE_S64:
+               case FILTER_OP_EQ_S64_DOUBLE:
+               case FILTER_OP_NE_S64_DOUBLE:
+               case FILTER_OP_GT_S64_DOUBLE:
+               case FILTER_OP_LT_S64_DOUBLE:
+               case FILTER_OP_GE_S64_DOUBLE:
+               case FILTER_OP_LE_S64_DOUBLE:
+               case FILTER_OP_BIT_RSHIFT:
+               case FILTER_OP_BIT_LSHIFT:
+               case FILTER_OP_BIT_AND:
+               case FILTER_OP_BIT_OR:
+               case FILTER_OP_BIT_XOR:
+               {
+                       /* Pop 2, push 1 */
+                       if (vstack_pop(stack)) {
+                               ret = -EINVAL;
+                               goto end;
+                       }
+                       vstack_ax(stack)->type = REG_S64;
+                       next_pc += sizeof(struct binary_op);
+                       break;
+               }
+
+               /* unary */
+               case FILTER_OP_UNARY_PLUS:
+               {
+                       struct unary_op *insn = (struct unary_op *) pc;
+
+                       switch(vstack_ax(stack)->type) {
+                       default:
+                               printk(KERN_WARNING "unknown register type\n");
+                               ret = -EINVAL;
+                               goto end;
+
+                       case REG_S64:
+                               insn->op = FILTER_OP_UNARY_PLUS_S64;
+                               break;
+                       case REG_DOUBLE:
+                               insn->op = FILTER_OP_UNARY_PLUS_DOUBLE;
+                               break;
+                       }
+                       /* Pop 1, push 1 */
+                       next_pc += sizeof(struct unary_op);
+                       break;
+               }
+
+               case FILTER_OP_UNARY_MINUS:
+               {
+                       struct unary_op *insn = (struct unary_op *) pc;
+
+                       switch(vstack_ax(stack)->type) {
+                       default:
+                               printk(KERN_WARNING "unknown register type\n");
+                               ret = -EINVAL;
+                               goto end;
+
+                       case REG_S64:
+                               insn->op = FILTER_OP_UNARY_MINUS_S64;
+                               break;
+                       case REG_DOUBLE:
+                               insn->op = FILTER_OP_UNARY_MINUS_DOUBLE;
+                               break;
+                       }
+                       /* Pop 1, push 1 */
+                       next_pc += sizeof(struct unary_op);
+                       break;
+               }
+
+               case FILTER_OP_UNARY_NOT:
+               {
+                       struct unary_op *insn = (struct unary_op *) pc;
+
+                       switch(vstack_ax(stack)->type) {
+                       default:
+                               printk(KERN_WARNING "unknown register type\n");
+                               ret = -EINVAL;
+                               goto end;
+
+                       case REG_S64:
+                               insn->op = FILTER_OP_UNARY_NOT_S64;
+                               break;
+                       case REG_DOUBLE:
+                               insn->op = FILTER_OP_UNARY_NOT_DOUBLE;
+                               break;
+                       }
+                       /* Pop 1, push 1 */
+                       next_pc += sizeof(struct unary_op);
+                       break;
+               }
+
+               case FILTER_OP_UNARY_BIT_NOT:
+               {
+                       /* Pop 1, push 1 */
+                       next_pc += sizeof(struct unary_op);
+                       break;
+               }
+
+               case FILTER_OP_UNARY_PLUS_S64:
+               case FILTER_OP_UNARY_MINUS_S64:
+               case FILTER_OP_UNARY_NOT_S64:
+               case FILTER_OP_UNARY_PLUS_DOUBLE:
+               case FILTER_OP_UNARY_MINUS_DOUBLE:
+               case FILTER_OP_UNARY_NOT_DOUBLE:
+               {
+                       /* Pop 1, push 1 */
+                       next_pc += sizeof(struct unary_op);
+                       break;
+               }
+
+               /* logical */
+               case FILTER_OP_AND:
+               case FILTER_OP_OR:
+               {
+                       /* Continue to next instruction */
+                       /* Pop 1 when jump not taken */
+                       if (vstack_pop(stack)) {
+                               ret = -EINVAL;
+                               goto end;
+                       }
+                       next_pc += sizeof(struct logical_op);
+                       break;
+               }
+
+               /* load field ref */
+               case FILTER_OP_LOAD_FIELD_REF:
+               {
+                       printk(KERN_WARNING "Unknown field ref type\n");
+                       ret = -EINVAL;
+                       goto end;
+               }
+               /* get context ref */
+               case FILTER_OP_GET_CONTEXT_REF:
+               {
+                       printk(KERN_WARNING "Unknown get context ref type\n");
+                       ret = -EINVAL;
+                       goto end;
+               }
+               case FILTER_OP_LOAD_FIELD_REF_STRING:
+               case FILTER_OP_LOAD_FIELD_REF_SEQUENCE:
+               case FILTER_OP_GET_CONTEXT_REF_STRING:
+               case FILTER_OP_LOAD_FIELD_REF_USER_STRING:
+               case FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE:
+               {
+                       if (vstack_push(stack)) {
+                               ret = -EINVAL;
+                               goto end;
+                       }
+                       vstack_ax(stack)->type = REG_STRING;
+                       next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
+                       break;
+               }
+               case FILTER_OP_LOAD_FIELD_REF_S64:
+               case FILTER_OP_GET_CONTEXT_REF_S64:
+               {
+                       if (vstack_push(stack)) {
+                               ret = -EINVAL;
+                               goto end;
+                       }
+                       vstack_ax(stack)->type = REG_S64;
+                       next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
+                       break;
+               }
+               case FILTER_OP_LOAD_FIELD_REF_DOUBLE:
+               case FILTER_OP_GET_CONTEXT_REF_DOUBLE:
+               {
+                       if (vstack_push(stack)) {
+                               ret = -EINVAL;
+                               goto end;
+                       }
+                       vstack_ax(stack)->type = REG_DOUBLE;
+                       next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
+                       break;
+               }
+
+               /* load from immediate operand */
+               case FILTER_OP_LOAD_STRING:
+               {
+                       struct load_op *insn = (struct load_op *) pc;
+
+                       if (vstack_push(stack)) {
+                               ret = -EINVAL;
+                               goto end;
+                       }
+                       vstack_ax(stack)->type = REG_STRING;
+                       next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
+                       break;
+               }
+
+               case FILTER_OP_LOAD_STAR_GLOB_STRING:
+               {
+                       struct load_op *insn = (struct load_op *) pc;
+
+                       if (vstack_push(stack)) {
+                               ret = -EINVAL;
+                               goto end;
+                       }
+                       vstack_ax(stack)->type = REG_STAR_GLOB_STRING;
+                       next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
+                       break;
+               }
+
+               case FILTER_OP_LOAD_S64:
+               {
+                       if (vstack_push(stack)) {
+                               ret = -EINVAL;
+                               goto end;
+                       }
+                       vstack_ax(stack)->type = REG_S64;
+                       next_pc += sizeof(struct load_op)
+                                       + sizeof(struct literal_numeric);
+                       break;
+               }
+
+               case FILTER_OP_LOAD_DOUBLE:
+               {
+                       if (vstack_push(stack)) {
+                               ret = -EINVAL;
+                               goto end;
+                       }
+                       vstack_ax(stack)->type = REG_DOUBLE;
+                       next_pc += sizeof(struct load_op)
+                                       + sizeof(struct literal_double);
+                       break;
+               }
+
+               /* cast */
+               case FILTER_OP_CAST_TO_S64:
+               {
+                       struct cast_op *insn = (struct cast_op *) pc;
+
+                       switch (vstack_ax(stack)->type) {
+                       default:
+                               printk(KERN_WARNING "unknown register type\n");
+                               ret = -EINVAL;
+                               goto end;
+
+                       case REG_STRING:
+                       case REG_STAR_GLOB_STRING:
+                               printk(KERN_WARNING "Cast op can only be applied to numeric or floating point registers\n");
+                               ret = -EINVAL;
+                               goto end;
+                       case REG_S64:
+                               insn->op = FILTER_OP_CAST_NOP;
+                               break;
+                       case REG_DOUBLE:
+                               insn->op = FILTER_OP_CAST_DOUBLE_TO_S64;
+                               break;
+                       }
+                       /* Pop 1, push 1 */
+                       vstack_ax(stack)->type = REG_S64;
+                       next_pc += sizeof(struct cast_op);
+                       break;
+               }
+               case FILTER_OP_CAST_DOUBLE_TO_S64:
+               {
+                       /* Pop 1, push 1 */
+                       vstack_ax(stack)->type = REG_S64;
+                       next_pc += sizeof(struct cast_op);
+                       break;
+               }
+               case FILTER_OP_CAST_NOP:
+               {
+                       next_pc += sizeof(struct cast_op);
+                       break;
+               }
+
+               /*
+                * Instructions for recursive traversal through composed types.
+                */
+               case FILTER_OP_GET_CONTEXT_ROOT:
+               {
+                       if (vstack_push(stack)) {
+                               ret = -EINVAL;
+                               goto end;
+                       }
+                       vstack_ax(stack)->type = REG_PTR;
+                       vstack_ax(stack)->load.type = LOAD_ROOT_CONTEXT;
+                       next_pc += sizeof(struct load_op);
+                       break;
+               }
+               case FILTER_OP_GET_APP_CONTEXT_ROOT:
+               {
+                       if (vstack_push(stack)) {
+                               ret = -EINVAL;
+                               goto end;
+                       }
+                       vstack_ax(stack)->type = REG_PTR;
+                       vstack_ax(stack)->load.type = LOAD_ROOT_APP_CONTEXT;
+                       next_pc += sizeof(struct load_op);
+                       break;
+               }
+               case FILTER_OP_GET_PAYLOAD_ROOT:
+               {
+                       if (vstack_push(stack)) {
+                               ret = -EINVAL;
+                               goto end;
+                       }
+                       vstack_ax(stack)->type = REG_PTR;
+                       vstack_ax(stack)->load.type = LOAD_ROOT_PAYLOAD;
+                       next_pc += sizeof(struct load_op);
+                       break;
+               }
+
+               case FILTER_OP_LOAD_FIELD:
+               {
+                       struct load_op *insn = (struct load_op *) pc;
+
+                       WARN_ON_ONCE(vstack_ax(stack)->type != REG_PTR);
+                       /* Pop 1, push 1 */
+                       ret = specialize_load_field(vstack_ax(stack), insn);
+                       if (ret)
+                               goto end;
+
+                       next_pc += sizeof(struct load_op);
+                       break;
+               }
+
+               case FILTER_OP_LOAD_FIELD_S8:
+               case FILTER_OP_LOAD_FIELD_S16:
+               case FILTER_OP_LOAD_FIELD_S32:
+               case FILTER_OP_LOAD_FIELD_S64:
+               case FILTER_OP_LOAD_FIELD_U8:
+               case FILTER_OP_LOAD_FIELD_U16:
+               case FILTER_OP_LOAD_FIELD_U32:
+               case FILTER_OP_LOAD_FIELD_U64:
+               {
+                       /* Pop 1, push 1 */
+                       vstack_ax(stack)->type = REG_S64;
+                       next_pc += sizeof(struct load_op);
+                       break;
+               }
+
+               case FILTER_OP_LOAD_FIELD_STRING:
+               case FILTER_OP_LOAD_FIELD_SEQUENCE:
+               {
+                       /* Pop 1, push 1 */
+                       vstack_ax(stack)->type = REG_STRING;
+                       next_pc += sizeof(struct load_op);
+                       break;
+               }
+
+               case FILTER_OP_LOAD_FIELD_DOUBLE:
+               {
+                       /* Pop 1, push 1 */
+                       vstack_ax(stack)->type = REG_DOUBLE;
+                       next_pc += sizeof(struct load_op);
+                       break;
+               }
+
+               case FILTER_OP_GET_SYMBOL:
+               {
+                       struct load_op *insn = (struct load_op *) pc;
+
+                       dbg_printk("op get symbol\n");
+                       switch (vstack_ax(stack)->load.type) {
+                       case LOAD_OBJECT:
+                               printk(KERN_WARNING "Nested fields not implemented yet.\n");
+                               ret = -EINVAL;
+                               goto end;
+                       case LOAD_ROOT_CONTEXT:
+                               /* Lookup context field. */
+                               ret = specialize_context_lookup(bytecode, insn,
+                                       &vstack_ax(stack)->load);
+                               if (ret)
+                                       goto end;
+                               break;
+                       case LOAD_ROOT_APP_CONTEXT:
+                               ret = -EINVAL;
+                               goto end;
+                       case LOAD_ROOT_PAYLOAD:
+                               /* Lookup event payload field. */
+                               ret = specialize_event_payload_lookup(event,
+                                       bytecode, insn,
+                                       &vstack_ax(stack)->load);
+                               if (ret)
+                                       goto end;
+                               break;
+                       }
+                       next_pc += sizeof(struct load_op) + sizeof(struct get_symbol);
+                       break;
+               }
+
+               case FILTER_OP_GET_SYMBOL_FIELD:
+               {
+                       /* Always generated by specialize phase. */
+                       ret = -EINVAL;
+                       goto end;
+               }
+
+               case FILTER_OP_GET_INDEX_U16:
+               {
+                       struct load_op *insn = (struct load_op *) pc;
+                       struct get_index_u16 *index = (struct get_index_u16 *) insn->data;
+
+                       dbg_printk("op get index u16\n");
+                       /* Pop 1, push 1 */
+                       ret = specialize_get_index(bytecode, insn, index->index,
+                                       vstack_ax(stack), sizeof(*index));
+                       if (ret)
+                               goto end;
+                       next_pc += sizeof(struct load_op) + sizeof(struct get_index_u16);
+                       break;
+               }
+
+               case FILTER_OP_GET_INDEX_U64:
+               {
+                       struct load_op *insn = (struct load_op *) pc;
+                       struct get_index_u64 *index = (struct get_index_u64 *) insn->data;
+
+                       dbg_printk("op get index u64\n");
+                       /* Pop 1, push 1 */
+                       ret = specialize_get_index(bytecode, insn, index->index,
+                                       vstack_ax(stack), sizeof(*index));
+                       if (ret)
+                               goto end;
+                       next_pc += sizeof(struct load_op) + sizeof(struct get_index_u64);
+                       break;
+               }
+
+               }
+       }
+end:
+       return ret;
+}
diff --git a/src/lttng-filter-validator.c b/src/lttng-filter-validator.c
new file mode 100644 (file)
index 0000000..38d6ed0
--- /dev/null
@@ -0,0 +1,1743 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * lttng-filter-validator.c
+ *
+ * LTTng modules filter bytecode validator.
+ *
+ * Copyright (C) 2010-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#include <linux/types.h>
+#include <linux/jhash.h>
+#include <linux/slab.h>
+
+#include <wrapper/list.h>
+#include <lttng/filter.h>
+
+#define MERGE_POINT_TABLE_BITS         7
+#define MERGE_POINT_TABLE_SIZE         (1U << MERGE_POINT_TABLE_BITS)
+
+/* merge point table node */
+struct mp_node {
+       struct hlist_node node;
+
+       /* Context at merge point */
+       struct vstack stack;
+       unsigned long target_pc;
+};
+
+struct mp_table {
+       struct hlist_head mp_head[MERGE_POINT_TABLE_SIZE];
+};
+
+static
+int lttng_hash_match(struct mp_node *mp_node, unsigned long key_pc)
+{
+       if (mp_node->target_pc == key_pc)
+               return 1;
+       else
+               return 0;
+}
+
+static
+int merge_points_compare(const struct vstack *stacka,
+                       const struct vstack *stackb)
+{
+       int i, len;
+
+       if (stacka->top != stackb->top)
+               return 1;
+       len = stacka->top + 1;
+       WARN_ON_ONCE(len < 0);
+       for (i = 0; i < len; i++) {
+               if (stacka->e[i].type != stackb->e[i].type)
+                       return 1;
+       }
+       return 0;
+}
+
+static
+int merge_point_add_check(struct mp_table *mp_table, unsigned long target_pc,
+               const struct vstack *stack)
+{
+       struct mp_node *mp_node;
+       unsigned long hash = jhash_1word(target_pc, 0);
+       struct hlist_head *head;
+       struct mp_node *lookup_node;
+       int found = 0;
+
+       dbg_printk("Filter: adding merge point at offset %lu, hash %lu\n",
+                       target_pc, hash);
+       mp_node = kzalloc(sizeof(struct mp_node), GFP_KERNEL);
+       if (!mp_node)
+               return -ENOMEM;
+       mp_node->target_pc = target_pc;
+       memcpy(&mp_node->stack, stack, sizeof(mp_node->stack));
+
+       head = &mp_table->mp_head[hash & (MERGE_POINT_TABLE_SIZE - 1)];
+       lttng_hlist_for_each_entry(lookup_node, head, node) {
+               if (lttng_hash_match(lookup_node, target_pc)) {
+                       found = 1;
+                       break;
+               }
+       }
+       if (found) {
+               /* Key already present */
+               dbg_printk("Filter: compare merge points for offset %lu, hash %lu\n",
+                               target_pc, hash);
+               kfree(mp_node);
+               if (merge_points_compare(stack, &lookup_node->stack)) {
+                       printk(KERN_WARNING "Merge points differ for offset %lu\n",
+                               target_pc);
+                       return -EINVAL;
+               }
+       } else {
+               hlist_add_head(&mp_node->node, head);
+       }
+       return 0;
+}
+
+/*
+ * Binary comparators use top of stack and top of stack -1.
+ */
+static
+int bin_op_compare_check(struct vstack *stack, const filter_opcode_t opcode,
+               const char *str)
+{
+       if (unlikely(!vstack_ax(stack) || !vstack_bx(stack)))
+               goto error_empty;
+
+       switch (vstack_ax(stack)->type) {
+       default:
+       case REG_DOUBLE:
+               goto error_type;
+
+       case REG_STRING:
+               switch (vstack_bx(stack)->type) {
+               default:
+               case REG_DOUBLE:
+                       goto error_type;
+               case REG_TYPE_UNKNOWN:
+                       goto unknown;
+               case REG_STRING:
+                       break;
+               case REG_STAR_GLOB_STRING:
+                       if (opcode != FILTER_OP_EQ && opcode != FILTER_OP_NE) {
+                               goto error_mismatch;
+                       }
+                       break;
+               case REG_S64:
+                       goto error_mismatch;
+               }
+               break;
+       case REG_STAR_GLOB_STRING:
+               switch (vstack_bx(stack)->type) {
+               default:
+               case REG_DOUBLE:
+                       goto error_type;
+               case REG_TYPE_UNKNOWN:
+                       goto unknown;
+               case REG_STRING:
+                       if (opcode != FILTER_OP_EQ && opcode != FILTER_OP_NE) {
+                               goto error_mismatch;
+                       }
+                       break;
+               case REG_STAR_GLOB_STRING:
+               case REG_S64:
+                       goto error_mismatch;
+               }
+               break;
+       case REG_S64:
+               switch (vstack_bx(stack)->type) {
+               default:
+               case REG_DOUBLE:
+                       goto error_type;
+               case REG_TYPE_UNKNOWN:
+                       goto unknown;
+               case REG_STRING:
+               case REG_STAR_GLOB_STRING:
+                       goto error_mismatch;
+               case REG_S64:
+                       break;
+               }
+               break;
+       case REG_TYPE_UNKNOWN:
+               switch (vstack_bx(stack)->type) {
+               default:
+               case REG_DOUBLE:
+                       goto error_type;
+               case REG_TYPE_UNKNOWN:
+               case REG_STRING:
+               case REG_STAR_GLOB_STRING:
+               case REG_S64:
+                       goto unknown;
+               }
+               break;
+       }
+       return 0;
+
+unknown:
+       return 1;
+
+error_empty:
+       printk(KERN_WARNING "empty stack for '%s' binary operator\n", str);
+       return -EINVAL;
+
+error_mismatch:
+       printk(KERN_WARNING "type mismatch for '%s' binary operator\n", str);
+       return -EINVAL;
+
+error_type:
+       printk(KERN_WARNING "unknown type for '%s' binary operator\n", str);
+       return -EINVAL;
+}
+
+/*
+ * Binary bitwise operators use top of stack and top of stack -1.
+ * Return 0 if typing is known to match, 1 if typing is dynamic
+ * (unknown), negative error value on error.
+ */
+static
+int bin_op_bitwise_check(struct vstack *stack, filter_opcode_t opcode,
+               const char *str)
+{
+       if (unlikely(!vstack_ax(stack) || !vstack_bx(stack)))
+               goto error_empty;
+
+       switch (vstack_ax(stack)->type) {
+       default:
+       case REG_DOUBLE:
+               goto error_type;
+
+       case REG_TYPE_UNKNOWN:
+               switch (vstack_bx(stack)->type) {
+               default:
+               case REG_DOUBLE:
+                       goto error_type;
+               case REG_TYPE_UNKNOWN:
+               case REG_STRING:
+               case REG_STAR_GLOB_STRING:
+               case REG_S64:
+                       goto unknown;
+               }
+               break;
+       case REG_S64:
+               switch (vstack_bx(stack)->type) {
+               default:
+               case REG_DOUBLE:
+                       goto error_type;
+               case REG_TYPE_UNKNOWN:
+                       goto unknown;
+               case REG_S64:
+                       break;
+               }
+               break;
+       }
+       return 0;
+
+unknown:
+       return 1;
+
+error_empty:
+       printk(KERN_WARNING "empty stack for '%s' binary operator\n", str);
+       return -EINVAL;
+
+error_type:
+       printk(KERN_WARNING "unknown type for '%s' binary operator\n", str);
+       return -EINVAL;
+}
+
+static
+int validate_get_symbol(struct bytecode_runtime *bytecode,
+               const struct get_symbol *sym)
+{
+       const char *str, *str_limit;
+       size_t len_limit;
+
+       if (sym->offset >= bytecode->p.bc->bc.len - bytecode->p.bc->bc.reloc_offset)
+               return -EINVAL;
+
+       str = bytecode->p.bc->bc.data + bytecode->p.bc->bc.reloc_offset + sym->offset;
+       str_limit = bytecode->p.bc->bc.data + bytecode->p.bc->bc.len;
+       len_limit = str_limit - str;
+       if (strnlen(str, len_limit) == len_limit)
+               return -EINVAL;
+       return 0;
+}
+
+/*
+ * Validate bytecode range overflow within the validation pass.
+ * Called for each instruction encountered.
+ */
+static
+int bytecode_validate_overflow(struct bytecode_runtime *bytecode,
+               char *start_pc, char *pc)
+{
+       int ret = 0;
+
+       switch (*(filter_opcode_t *) pc) {
+       case FILTER_OP_UNKNOWN:
+       default:
+       {
+               printk(KERN_WARNING "unknown bytecode op %u\n",
+                       (unsigned int) *(filter_opcode_t *) pc);
+               ret = -EINVAL;
+               break;
+       }
+
+       case FILTER_OP_RETURN:
+       case FILTER_OP_RETURN_S64:
+       {
+               if (unlikely(pc + sizeof(struct return_op)
+                               > start_pc + bytecode->len)) {
+                       ret = -ERANGE;
+               }
+               break;
+       }
+
+       /* binary */
+       case FILTER_OP_MUL:
+       case FILTER_OP_DIV:
+       case FILTER_OP_MOD:
+       case FILTER_OP_PLUS:
+       case FILTER_OP_MINUS:
+       case FILTER_OP_EQ_DOUBLE:
+       case FILTER_OP_NE_DOUBLE:
+       case FILTER_OP_GT_DOUBLE:
+       case FILTER_OP_LT_DOUBLE:
+       case FILTER_OP_GE_DOUBLE:
+       case FILTER_OP_LE_DOUBLE:
+       /* Floating point */
+       case FILTER_OP_EQ_DOUBLE_S64:
+       case FILTER_OP_NE_DOUBLE_S64:
+       case FILTER_OP_GT_DOUBLE_S64:
+       case FILTER_OP_LT_DOUBLE_S64:
+       case FILTER_OP_GE_DOUBLE_S64:
+       case FILTER_OP_LE_DOUBLE_S64:
+       case FILTER_OP_EQ_S64_DOUBLE:
+       case FILTER_OP_NE_S64_DOUBLE:
+       case FILTER_OP_GT_S64_DOUBLE:
+       case FILTER_OP_LT_S64_DOUBLE:
+       case FILTER_OP_GE_S64_DOUBLE:
+       case FILTER_OP_LE_S64_DOUBLE:
+       case FILTER_OP_LOAD_FIELD_REF_DOUBLE:
+       case FILTER_OP_GET_CONTEXT_REF_DOUBLE:
+       case FILTER_OP_LOAD_DOUBLE:
+       case FILTER_OP_CAST_DOUBLE_TO_S64:
+       case FILTER_OP_UNARY_PLUS_DOUBLE:
+       case FILTER_OP_UNARY_MINUS_DOUBLE:
+       case FILTER_OP_UNARY_NOT_DOUBLE:
+       {
+               printk(KERN_WARNING "unsupported bytecode op %u\n",
+                       (unsigned int) *(filter_opcode_t *) pc);
+               ret = -EINVAL;
+               break;
+       }
+
+       case FILTER_OP_EQ:
+       case FILTER_OP_NE:
+       case FILTER_OP_GT:
+       case FILTER_OP_LT:
+       case FILTER_OP_GE:
+       case FILTER_OP_LE:
+       case FILTER_OP_EQ_STRING:
+       case FILTER_OP_NE_STRING:
+       case FILTER_OP_GT_STRING:
+       case FILTER_OP_LT_STRING:
+       case FILTER_OP_GE_STRING:
+       case FILTER_OP_LE_STRING:
+       case FILTER_OP_EQ_STAR_GLOB_STRING:
+       case FILTER_OP_NE_STAR_GLOB_STRING:
+       case FILTER_OP_EQ_S64:
+       case FILTER_OP_NE_S64:
+       case FILTER_OP_GT_S64:
+       case FILTER_OP_LT_S64:
+       case FILTER_OP_GE_S64:
+       case FILTER_OP_LE_S64:
+       case FILTER_OP_BIT_RSHIFT:
+       case FILTER_OP_BIT_LSHIFT:
+       case FILTER_OP_BIT_AND:
+       case FILTER_OP_BIT_OR:
+       case FILTER_OP_BIT_XOR:
+       {
+               if (unlikely(pc + sizeof(struct binary_op)
+                               > start_pc + bytecode->len)) {
+                       ret = -ERANGE;
+               }
+               break;
+       }
+
+       /* unary */
+       case FILTER_OP_UNARY_PLUS:
+       case FILTER_OP_UNARY_MINUS:
+       case FILTER_OP_UNARY_NOT:
+       case FILTER_OP_UNARY_PLUS_S64:
+       case FILTER_OP_UNARY_MINUS_S64:
+       case FILTER_OP_UNARY_NOT_S64:
+       case FILTER_OP_UNARY_BIT_NOT:
+       {
+               if (unlikely(pc + sizeof(struct unary_op)
+                               > start_pc + bytecode->len)) {
+                       ret = -ERANGE;
+               }
+               break;
+       }
+
+       /* logical */
+       case FILTER_OP_AND:
+       case FILTER_OP_OR:
+       {
+               if (unlikely(pc + sizeof(struct logical_op)
+                               > start_pc + bytecode->len)) {
+                       ret = -ERANGE;
+               }
+               break;
+       }
+
+       /* load field ref */
+       case FILTER_OP_LOAD_FIELD_REF:
+       {
+               printk(KERN_WARNING "Unknown field ref type\n");
+               ret = -EINVAL;
+               break;
+       }
+
+       /* get context ref */
+       case FILTER_OP_GET_CONTEXT_REF:
+       {
+               printk(KERN_WARNING "Unknown field ref type\n");
+               ret = -EINVAL;
+               break;
+       }
+       case FILTER_OP_LOAD_FIELD_REF_STRING:
+       case FILTER_OP_LOAD_FIELD_REF_SEQUENCE:
+       case FILTER_OP_LOAD_FIELD_REF_USER_STRING:
+       case FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE:
+       case FILTER_OP_LOAD_FIELD_REF_S64:
+       case FILTER_OP_GET_CONTEXT_REF_STRING:
+       case FILTER_OP_GET_CONTEXT_REF_S64:
+       {
+               if (unlikely(pc + sizeof(struct load_op) + sizeof(struct field_ref)
+                               > start_pc + bytecode->len)) {
+                       ret = -ERANGE;
+               }
+               break;
+       }
+
+       /* load from immediate operand */
+       case FILTER_OP_LOAD_STRING:
+       case FILTER_OP_LOAD_STAR_GLOB_STRING:
+       {
+               struct load_op *insn = (struct load_op *) pc;
+               uint32_t str_len, maxlen;
+
+               if (unlikely(pc + sizeof(struct load_op)
+                               > start_pc + bytecode->len)) {
+                       ret = -ERANGE;
+                       break;
+               }
+
+               maxlen = start_pc + bytecode->len - pc - sizeof(struct load_op);
+               str_len = strnlen(insn->data, maxlen);
+               if (unlikely(str_len >= maxlen)) {
+                       /* Final '\0' not found within range */
+                       ret = -ERANGE;
+               }
+               break;
+       }
+
+       case FILTER_OP_LOAD_S64:
+       {
+               if (unlikely(pc + sizeof(struct load_op) + sizeof(struct literal_numeric)
+                               > start_pc + bytecode->len)) {
+                       ret = -ERANGE;
+               }
+               break;
+       }
+
+       case FILTER_OP_CAST_TO_S64:
+       case FILTER_OP_CAST_NOP:
+       {
+               if (unlikely(pc + sizeof(struct cast_op)
+                               > start_pc + bytecode->len)) {
+                       ret = -ERANGE;
+               }
+               break;
+       }
+
+       /*
+        * Instructions for recursive traversal through composed types.
+        */
+       case FILTER_OP_GET_CONTEXT_ROOT:
+       case FILTER_OP_GET_APP_CONTEXT_ROOT:
+       case FILTER_OP_GET_PAYLOAD_ROOT:
+       case FILTER_OP_LOAD_FIELD:
+       case FILTER_OP_LOAD_FIELD_S8:
+       case FILTER_OP_LOAD_FIELD_S16:
+       case FILTER_OP_LOAD_FIELD_S32:
+       case FILTER_OP_LOAD_FIELD_S64:
+       case FILTER_OP_LOAD_FIELD_U8:
+       case FILTER_OP_LOAD_FIELD_U16:
+       case FILTER_OP_LOAD_FIELD_U32:
+       case FILTER_OP_LOAD_FIELD_U64:
+       case FILTER_OP_LOAD_FIELD_STRING:
+       case FILTER_OP_LOAD_FIELD_SEQUENCE:
+       case FILTER_OP_LOAD_FIELD_DOUBLE:
+               if (unlikely(pc + sizeof(struct load_op)
+                               > start_pc + bytecode->len)) {
+                       ret = -ERANGE;
+               }
+               break;
+
+       case FILTER_OP_GET_SYMBOL:
+       {
+               struct load_op *insn = (struct load_op *) pc;
+               struct get_symbol *sym = (struct get_symbol *) insn->data;
+
+               if (unlikely(pc + sizeof(struct load_op) + sizeof(struct get_symbol)
+                               > start_pc + bytecode->len)) {
+                       ret = -ERANGE;
+                       break;
+               }
+               ret = validate_get_symbol(bytecode, sym);
+               break;
+       }
+
+       case FILTER_OP_GET_SYMBOL_FIELD:
+               printk(KERN_WARNING "Unexpected get symbol field\n");
+               ret = -EINVAL;
+               break;
+
+       case FILTER_OP_GET_INDEX_U16:
+               if (unlikely(pc + sizeof(struct load_op) + sizeof(struct get_index_u16)
+                               > start_pc + bytecode->len)) {
+                       ret = -ERANGE;
+               }
+               break;
+
+       case FILTER_OP_GET_INDEX_U64:
+               if (unlikely(pc + sizeof(struct load_op) + sizeof(struct get_index_u64)
+                               > start_pc + bytecode->len)) {
+                       ret = -ERANGE;
+               }
+               break;
+       }
+
+       return ret;
+}
+
+static
+unsigned long delete_all_nodes(struct mp_table *mp_table)
+{
+       struct mp_node *mp_node;
+       struct hlist_node *tmp;
+       unsigned long nr_nodes = 0;
+       int i;
+
+       for (i = 0; i < MERGE_POINT_TABLE_SIZE; i++) {
+               struct hlist_head *head;
+
+               head = &mp_table->mp_head[i];
+               lttng_hlist_for_each_entry_safe(mp_node, tmp, head, node) {
+                       kfree(mp_node);
+                       nr_nodes++;
+               }
+       }
+       return nr_nodes;
+}
+
+/*
+ * Return value:
+ * >=0: success
+ * <0: error
+ */
+static
+int validate_instruction_context(struct bytecode_runtime *bytecode,
+               struct vstack *stack,
+               char *start_pc,
+               char *pc)
+{
+       int ret = 0;
+       const filter_opcode_t opcode = *(filter_opcode_t *) pc;
+
+       switch (opcode) {
+       case FILTER_OP_UNKNOWN:
+       default:
+       {
+               printk(KERN_WARNING "unknown bytecode op %u\n",
+                       (unsigned int) *(filter_opcode_t *) pc);
+               ret = -EINVAL;
+               goto end;
+       }
+
+       case FILTER_OP_RETURN:
+       case FILTER_OP_RETURN_S64:
+       {
+               goto end;
+       }
+
+       /* binary */
+       case FILTER_OP_MUL:
+       case FILTER_OP_DIV:
+       case FILTER_OP_MOD:
+       case FILTER_OP_PLUS:
+       case FILTER_OP_MINUS:
+       /* Floating point */
+       case FILTER_OP_EQ_DOUBLE:
+       case FILTER_OP_NE_DOUBLE:
+       case FILTER_OP_GT_DOUBLE:
+       case FILTER_OP_LT_DOUBLE:
+       case FILTER_OP_GE_DOUBLE:
+       case FILTER_OP_LE_DOUBLE:
+       case FILTER_OP_EQ_DOUBLE_S64:
+       case FILTER_OP_NE_DOUBLE_S64:
+       case FILTER_OP_GT_DOUBLE_S64:
+       case FILTER_OP_LT_DOUBLE_S64:
+       case FILTER_OP_GE_DOUBLE_S64:
+       case FILTER_OP_LE_DOUBLE_S64:
+       case FILTER_OP_EQ_S64_DOUBLE:
+       case FILTER_OP_NE_S64_DOUBLE:
+       case FILTER_OP_GT_S64_DOUBLE:
+       case FILTER_OP_LT_S64_DOUBLE:
+       case FILTER_OP_GE_S64_DOUBLE:
+       case FILTER_OP_LE_S64_DOUBLE:
+       case FILTER_OP_UNARY_PLUS_DOUBLE:
+       case FILTER_OP_UNARY_MINUS_DOUBLE:
+       case FILTER_OP_UNARY_NOT_DOUBLE:
+       case FILTER_OP_LOAD_FIELD_REF_DOUBLE:
+       case FILTER_OP_LOAD_DOUBLE:
+       case FILTER_OP_CAST_DOUBLE_TO_S64:
+       case FILTER_OP_GET_CONTEXT_REF_DOUBLE:
+       {
+               printk(KERN_WARNING "unsupported bytecode op %u\n",
+                       (unsigned int) *(filter_opcode_t *) pc);
+               ret = -EINVAL;
+               goto end;
+       }
+
+       case FILTER_OP_EQ:
+       {
+               ret = bin_op_compare_check(stack, opcode, "==");
+               if (ret < 0)
+                       goto end;
+               break;
+       }
+       case FILTER_OP_NE:
+       {
+               ret = bin_op_compare_check(stack, opcode, "!=");
+               if (ret < 0)
+                       goto end;
+               break;
+       }
+       case FILTER_OP_GT:
+       {
+               ret = bin_op_compare_check(stack, opcode, ">");
+               if (ret < 0)
+                       goto end;
+               break;
+       }
+       case FILTER_OP_LT:
+       {
+               ret = bin_op_compare_check(stack, opcode, "<");
+               if (ret < 0)
+                       goto end;
+               break;
+       }
+       case FILTER_OP_GE:
+       {
+               ret = bin_op_compare_check(stack, opcode, ">=");
+               if (ret < 0)
+                       goto end;
+               break;
+       }
+       case FILTER_OP_LE:
+       {
+               ret = bin_op_compare_check(stack, opcode, "<=");
+               if (ret < 0)
+                       goto end;
+               break;
+       }
+
+       case FILTER_OP_EQ_STRING:
+       case FILTER_OP_NE_STRING:
+       case FILTER_OP_GT_STRING:
+       case FILTER_OP_LT_STRING:
+       case FILTER_OP_GE_STRING:
+       case FILTER_OP_LE_STRING:
+       {
+               if (!vstack_ax(stack) || !vstack_bx(stack)) {
+                       printk(KERN_WARNING "Empty stack\n");
+                       ret = -EINVAL;
+                       goto end;
+               }
+               if (vstack_ax(stack)->type != REG_STRING
+                               || vstack_bx(stack)->type != REG_STRING) {
+                       printk(KERN_WARNING "Unexpected register type for string comparator\n");
+                       ret = -EINVAL;
+                       goto end;
+               }
+               break;
+       }
+
+
+       case FILTER_OP_EQ_STAR_GLOB_STRING:
+       case FILTER_OP_NE_STAR_GLOB_STRING:
+       {
+               if (!vstack_ax(stack) || !vstack_bx(stack)) {
+                       printk(KERN_WARNING "Empty stack\n");
+                       ret = -EINVAL;
+                       goto end;
+               }
+               if (vstack_ax(stack)->type != REG_STAR_GLOB_STRING
+                               && vstack_bx(stack)->type != REG_STAR_GLOB_STRING) {
+                       printk(KERN_WARNING "Unexpected register type for globbing pattern comparator\n");
+                       ret = -EINVAL;
+                       goto end;
+               }
+               break;
+       }
+
+       case FILTER_OP_EQ_S64:
+       case FILTER_OP_NE_S64:
+       case FILTER_OP_GT_S64:
+       case FILTER_OP_LT_S64:
+       case FILTER_OP_GE_S64:
+       case FILTER_OP_LE_S64:
+       {
+               if (!vstack_ax(stack) || !vstack_bx(stack)) {
+                       printk(KERN_WARNING "Empty stack\n");
+                       ret = -EINVAL;
+                       goto end;
+               }
+               if (vstack_ax(stack)->type != REG_S64
+                               || vstack_bx(stack)->type != REG_S64) {
+                       printk(KERN_WARNING "Unexpected register type for s64 comparator\n");
+                       ret = -EINVAL;
+                       goto end;
+               }
+               break;
+       }
+
+       case FILTER_OP_BIT_RSHIFT:
+               ret = bin_op_bitwise_check(stack, opcode, ">>");
+               if (ret < 0)
+                       goto end;
+               break;
+       case FILTER_OP_BIT_LSHIFT:
+               ret = bin_op_bitwise_check(stack, opcode, "<<");
+               if (ret < 0)
+                       goto end;
+               break;
+       case FILTER_OP_BIT_AND:
+               ret = bin_op_bitwise_check(stack, opcode, "&");
+               if (ret < 0)
+                       goto end;
+               break;
+       case FILTER_OP_BIT_OR:
+               ret = bin_op_bitwise_check(stack, opcode, "|");
+               if (ret < 0)
+                       goto end;
+               break;
+       case FILTER_OP_BIT_XOR:
+               ret = bin_op_bitwise_check(stack, opcode, "^");
+               if (ret < 0)
+                       goto end;
+               break;
+
+       /* unary */
+       case FILTER_OP_UNARY_PLUS:
+       case FILTER_OP_UNARY_MINUS:
+       case FILTER_OP_UNARY_NOT:
+       {
+               if (!vstack_ax(stack)) {
+                       printk(KERN_WARNING "Empty stack\n");
+                       ret = -EINVAL;
+                       goto end;
+               }
+               switch (vstack_ax(stack)->type) {
+               default:
+               case REG_DOUBLE:
+                       printk(KERN_WARNING "unknown register type\n");
+                       ret = -EINVAL;
+                       goto end;
+
+               case REG_STRING:
+               case REG_STAR_GLOB_STRING:
+                       printk(KERN_WARNING "Unary op can only be applied to numeric or floating point registers\n");
+                       ret = -EINVAL;
+                       goto end;
+               case REG_S64:
+               case REG_TYPE_UNKNOWN:
+                       break;
+               }
+               break;
+       }
+       case FILTER_OP_UNARY_BIT_NOT:
+       {
+               if (!vstack_ax(stack)) {
+                       printk(KERN_WARNING "Empty stack\n");
+                       ret = -EINVAL;
+                       goto end;
+               }
+               switch (vstack_ax(stack)->type) {
+               default:
+                       printk(KERN_WARNING "unknown register type\n");
+                       ret = -EINVAL;
+                       goto end;
+
+               case REG_STRING:
+               case REG_STAR_GLOB_STRING:
+               case REG_DOUBLE:
+                       printk(KERN_WARNING "Unary bitwise op can only be applied to numeric registers\n");
+                       ret = -EINVAL;
+                       goto end;
+               case REG_S64:
+                       break;
+               case REG_TYPE_UNKNOWN:
+                       break;
+               }
+               break;
+       }
+
+       case FILTER_OP_UNARY_PLUS_S64:
+       case FILTER_OP_UNARY_MINUS_S64:
+       case FILTER_OP_UNARY_NOT_S64:
+       {
+               if (!vstack_ax(stack)) {
+                       printk(KERN_WARNING "Empty stack\n");
+                       ret = -EINVAL;
+                       goto end;
+               }
+               if (vstack_ax(stack)->type != REG_S64) {
+                       printk(KERN_WARNING "Invalid register type\n");
+                       ret = -EINVAL;
+                       goto end;
+               }
+               break;
+       }
+
+       /* logical */
+       case FILTER_OP_AND:
+       case FILTER_OP_OR:
+       {
+               struct logical_op *insn = (struct logical_op *) pc;
+
+               if (!vstack_ax(stack)) {
+                       printk(KERN_WARNING "Empty stack\n");
+                       ret = -EINVAL;
+                       goto end;
+               }
+               if (vstack_ax(stack)->type != REG_S64) {
+                       printk(KERN_WARNING "Logical comparator expects S64 register\n");
+                       ret = -EINVAL;
+                       goto end;
+               }
+
+               dbg_printk("Validate jumping to bytecode offset %u\n",
+                       (unsigned int) insn->skip_offset);
+               if (unlikely(start_pc + insn->skip_offset <= pc)) {
+                       printk(KERN_WARNING "Loops are not allowed in bytecode\n");
+                       ret = -EINVAL;
+                       goto end;
+               }
+               break;
+       }
+
+       /* load field ref */
+       case FILTER_OP_LOAD_FIELD_REF:
+       {
+               printk(KERN_WARNING "Unknown field ref type\n");
+               ret = -EINVAL;
+               goto end;
+       }
+       case FILTER_OP_LOAD_FIELD_REF_STRING:
+       case FILTER_OP_LOAD_FIELD_REF_SEQUENCE:
+       case FILTER_OP_LOAD_FIELD_REF_USER_STRING:
+       case FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE:
+       {
+               struct load_op *insn = (struct load_op *) pc;
+               struct field_ref *ref = (struct field_ref *) insn->data;
+
+               dbg_printk("Validate load field ref offset %u type string\n",
+                       ref->offset);
+               break;
+       }
+       case FILTER_OP_LOAD_FIELD_REF_S64:
+       {
+               struct load_op *insn = (struct load_op *) pc;
+               struct field_ref *ref = (struct field_ref *) insn->data;
+
+               dbg_printk("Validate load field ref offset %u type s64\n",
+                       ref->offset);
+               break;
+       }
+
+       /* load from immediate operand */
+       case FILTER_OP_LOAD_STRING:
+       case FILTER_OP_LOAD_STAR_GLOB_STRING:
+       {
+               break;
+       }
+
+       case FILTER_OP_LOAD_S64:
+       {
+               break;
+       }
+
+       case FILTER_OP_CAST_TO_S64:
+       {
+               struct cast_op *insn = (struct cast_op *) pc;
+
+               if (!vstack_ax(stack)) {
+                       printk(KERN_WARNING "Empty stack\n");
+                       ret = -EINVAL;
+                       goto end;
+               }
+               switch (vstack_ax(stack)->type) {
+               default:
+               case REG_DOUBLE:
+                       printk(KERN_WARNING "unknown register type\n");
+                       ret = -EINVAL;
+                       goto end;
+
+               case REG_STRING:
+               case REG_STAR_GLOB_STRING:
+                       printk(KERN_WARNING "Cast op can only be applied to numeric or floating point registers\n");
+                       ret = -EINVAL;
+                       goto end;
+               case REG_S64:
+                       break;
+               }
+               if (insn->op == FILTER_OP_CAST_DOUBLE_TO_S64) {
+                       if (vstack_ax(stack)->type != REG_DOUBLE) {
+                               printk(KERN_WARNING "Cast expects double\n");
+                               ret = -EINVAL;
+                               goto end;
+                       }
+               }
+               break;
+       }
+       case FILTER_OP_CAST_NOP:
+       {
+               break;
+       }
+
+       /* get context ref */
+       case FILTER_OP_GET_CONTEXT_REF:
+       {
+               printk(KERN_WARNING "Unknown get context ref type\n");
+               ret = -EINVAL;
+               goto end;
+       }
+       case FILTER_OP_GET_CONTEXT_REF_STRING:
+       {
+               struct load_op *insn = (struct load_op *) pc;
+               struct field_ref *ref = (struct field_ref *) insn->data;
+
+               dbg_printk("Validate get context ref offset %u type string\n",
+                       ref->offset);
+               break;
+       }
+       case FILTER_OP_GET_CONTEXT_REF_S64:
+       {
+               struct load_op *insn = (struct load_op *) pc;
+               struct field_ref *ref = (struct field_ref *) insn->data;
+
+               dbg_printk("Validate get context ref offset %u type s64\n",
+                       ref->offset);
+               break;
+       }
+
+       /*
+        * Instructions for recursive traversal through composed types.
+        */
+       case FILTER_OP_GET_CONTEXT_ROOT:
+       {
+               dbg_printk("Validate get context root\n");
+               break;
+       }
+       case FILTER_OP_GET_APP_CONTEXT_ROOT:
+       {
+               dbg_printk("Validate get app context root\n");
+               break;
+       }
+       case FILTER_OP_GET_PAYLOAD_ROOT:
+       {
+               dbg_printk("Validate get payload root\n");
+               break;
+       }
+       case FILTER_OP_LOAD_FIELD:
+       {
+               /*
+                * We tolerate that field type is unknown at validation,
+                * because we are performing the load specialization in
+                * a phase after validation.
+                */
+               dbg_printk("Validate load field\n");
+               break;
+       }
+       case FILTER_OP_LOAD_FIELD_S8:
+       {
+               dbg_printk("Validate load field s8\n");
+               break;
+       }
+       case FILTER_OP_LOAD_FIELD_S16:
+       {
+               dbg_printk("Validate load field s16\n");
+               break;
+       }
+       case FILTER_OP_LOAD_FIELD_S32:
+       {
+               dbg_printk("Validate load field s32\n");
+               break;
+       }
+       case FILTER_OP_LOAD_FIELD_S64:
+       {
+               dbg_printk("Validate load field s64\n");
+               break;
+       }
+       case FILTER_OP_LOAD_FIELD_U8:
+       {
+               dbg_printk("Validate load field u8\n");
+               break;
+       }
+       case FILTER_OP_LOAD_FIELD_U16:
+       {
+               dbg_printk("Validate load field u16\n");
+               break;
+       }
+       case FILTER_OP_LOAD_FIELD_U32:
+       {
+               dbg_printk("Validate load field u32\n");
+               break;
+       }
+       case FILTER_OP_LOAD_FIELD_U64:
+       {
+               dbg_printk("Validate load field u64\n");
+               break;
+       }
+       case FILTER_OP_LOAD_FIELD_STRING:
+       {
+               dbg_printk("Validate load field string\n");
+               break;
+       }
+       case FILTER_OP_LOAD_FIELD_SEQUENCE:
+       {
+               dbg_printk("Validate load field sequence\n");
+               break;
+       }
+       case FILTER_OP_LOAD_FIELD_DOUBLE:
+       {
+               dbg_printk("Validate load field double\n");
+               break;
+       }
+
+       case FILTER_OP_GET_SYMBOL:
+       {
+               struct load_op *insn = (struct load_op *) pc;
+               struct get_symbol *sym = (struct get_symbol *) insn->data;
+
+               dbg_printk("Validate get symbol offset %u\n", sym->offset);
+               break;
+       }
+
+       case FILTER_OP_GET_SYMBOL_FIELD:
+       {
+               struct load_op *insn = (struct load_op *) pc;
+               struct get_symbol *sym = (struct get_symbol *) insn->data;
+
+               dbg_printk("Validate get symbol field offset %u\n", sym->offset);
+               break;
+       }
+
+       case FILTER_OP_GET_INDEX_U16:
+       {
+               struct load_op *insn = (struct load_op *) pc;
+               struct get_index_u16 *get_index = (struct get_index_u16 *) insn->data;
+
+               dbg_printk("Validate get index u16 index %u\n", get_index->index);
+               break;
+       }
+
+       case FILTER_OP_GET_INDEX_U64:
+       {
+               struct load_op *insn = (struct load_op *) pc;
+               struct get_index_u64 *get_index = (struct get_index_u64 *) insn->data;
+
+               dbg_printk("Validate get index u64 index %llu\n",
+                       (unsigned long long) get_index->index);
+               break;
+       }
+       }
+end:
+       return ret;
+}
+
+/*
+ * Return value:
+ * 0: success
+ * <0: error
+ */
+static
+int validate_instruction_all_contexts(struct bytecode_runtime *bytecode,
+               struct mp_table *mp_table,
+               struct vstack *stack,
+               char *start_pc,
+               char *pc)
+{
+       int ret, found = 0;
+       unsigned long target_pc = pc - start_pc;
+       unsigned long hash;
+       struct hlist_head *head;
+       struct mp_node *mp_node;
+
+       /* Validate the context resulting from the previous instruction */
+       ret = validate_instruction_context(bytecode, stack, start_pc, pc);
+       if (ret < 0)
+               return ret;
+
+       /* Validate merge points */
+       hash = jhash_1word(target_pc, 0);
+       head = &mp_table->mp_head[hash & (MERGE_POINT_TABLE_SIZE - 1)];
+       lttng_hlist_for_each_entry(mp_node, head, node) {
+               if (lttng_hash_match(mp_node, target_pc)) {
+                       found = 1;
+                       break;
+               }
+       }
+       if (found) {
+               dbg_printk("Filter: validate merge point at offset %lu\n",
+                               target_pc);
+               if (merge_points_compare(stack, &mp_node->stack)) {
+                       printk(KERN_WARNING "Merge points differ for offset %lu\n",
+                               target_pc);
+                       return -EINVAL;
+               }
+               /* Once validated, we can remove the merge point */
+               dbg_printk("Filter: remove merge point at offset %lu\n",
+                               target_pc);
+               hlist_del(&mp_node->node);
+       }
+       return 0;
+}
+
+/*
+ * Return value:
+ * >0: going to next insn.
+ * 0: success, stop iteration.
+ * <0: error
+ */
+static
+int exec_insn(struct bytecode_runtime *bytecode,
+               struct mp_table *mp_table,
+               struct vstack *stack,
+               char **_next_pc,
+               char *pc)
+{
+       int ret = 1;
+       char *next_pc = *_next_pc;
+
+       switch (*(filter_opcode_t *) pc) {
+       case FILTER_OP_UNKNOWN:
+       default:
+       {
+               printk(KERN_WARNING "unknown bytecode op %u\n",
+                       (unsigned int) *(filter_opcode_t *) pc);
+               ret = -EINVAL;
+               goto end;
+       }
+
+       case FILTER_OP_RETURN:
+       {
+               if (!vstack_ax(stack)) {
+                       printk(KERN_WARNING "Empty stack\n");
+                       ret = -EINVAL;
+                       goto end;
+               }
+               switch (vstack_ax(stack)->type) {
+               case REG_S64:
+               case REG_TYPE_UNKNOWN:
+                       break;
+               default:
+                       printk(KERN_WARNING "Unexpected register type %d at end of bytecode\n",
+                               (int) vstack_ax(stack)->type);
+                       ret = -EINVAL;
+                       goto end;
+               }
+
+               ret = 0;
+               goto end;
+       }
+
+       case FILTER_OP_RETURN_S64:
+       {
+               if (!vstack_ax(stack)) {
+                       printk(KERN_WARNING "Empty stack\n");
+                       ret = -EINVAL;
+                       goto end;
+               }
+               switch (vstack_ax(stack)->type) {
+               case REG_S64:
+                       break;
+               default:
+               case REG_TYPE_UNKNOWN:
+                       printk(KERN_WARNING "Unexpected register type %d at end of bytecode\n",
+                               (int) vstack_ax(stack)->type);
+                       ret = -EINVAL;
+                       goto end;
+               }
+
+               ret = 0;
+               goto end;
+       }
+
+       /* binary */
+       case FILTER_OP_MUL:
+       case FILTER_OP_DIV:
+       case FILTER_OP_MOD:
+       case FILTER_OP_PLUS:
+       case FILTER_OP_MINUS:
+       /* Floating point */
+       case FILTER_OP_EQ_DOUBLE:
+       case FILTER_OP_NE_DOUBLE:
+       case FILTER_OP_GT_DOUBLE:
+       case FILTER_OP_LT_DOUBLE:
+       case FILTER_OP_GE_DOUBLE:
+       case FILTER_OP_LE_DOUBLE:
+       case FILTER_OP_EQ_DOUBLE_S64:
+       case FILTER_OP_NE_DOUBLE_S64:
+       case FILTER_OP_GT_DOUBLE_S64:
+       case FILTER_OP_LT_DOUBLE_S64:
+       case FILTER_OP_GE_DOUBLE_S64:
+       case FILTER_OP_LE_DOUBLE_S64:
+       case FILTER_OP_EQ_S64_DOUBLE:
+       case FILTER_OP_NE_S64_DOUBLE:
+       case FILTER_OP_GT_S64_DOUBLE:
+       case FILTER_OP_LT_S64_DOUBLE:
+       case FILTER_OP_GE_S64_DOUBLE:
+       case FILTER_OP_LE_S64_DOUBLE:
+       case FILTER_OP_UNARY_PLUS_DOUBLE:
+       case FILTER_OP_UNARY_MINUS_DOUBLE:
+       case FILTER_OP_UNARY_NOT_DOUBLE:
+       case FILTER_OP_LOAD_FIELD_REF_DOUBLE:
+       case FILTER_OP_GET_CONTEXT_REF_DOUBLE:
+       case FILTER_OP_LOAD_DOUBLE:
+       case FILTER_OP_CAST_DOUBLE_TO_S64:
+       {
+               printk(KERN_WARNING "unsupported bytecode op %u\n",
+                       (unsigned int) *(filter_opcode_t *) pc);
+               ret = -EINVAL;
+               goto end;
+       }
+
+       case FILTER_OP_EQ:
+       case FILTER_OP_NE:
+       case FILTER_OP_GT:
+       case FILTER_OP_LT:
+       case FILTER_OP_GE:
+       case FILTER_OP_LE:
+       case FILTER_OP_EQ_STRING:
+       case FILTER_OP_NE_STRING:
+       case FILTER_OP_GT_STRING:
+       case FILTER_OP_LT_STRING:
+       case FILTER_OP_GE_STRING:
+       case FILTER_OP_LE_STRING:
+       case FILTER_OP_EQ_STAR_GLOB_STRING:
+       case FILTER_OP_NE_STAR_GLOB_STRING:
+       case FILTER_OP_EQ_S64:
+       case FILTER_OP_NE_S64:
+       case FILTER_OP_GT_S64:
+       case FILTER_OP_LT_S64:
+       case FILTER_OP_GE_S64:
+       case FILTER_OP_LE_S64:
+       case FILTER_OP_BIT_RSHIFT:
+       case FILTER_OP_BIT_LSHIFT:
+       case FILTER_OP_BIT_AND:
+       case FILTER_OP_BIT_OR:
+       case FILTER_OP_BIT_XOR:
+       {
+               /* Pop 2, push 1 */
+               if (vstack_pop(stack)) {
+                       ret = -EINVAL;
+                       goto end;
+               }
+               if (!vstack_ax(stack)) {
+                       printk(KERN_WARNING "Empty stack\n");
+                       ret = -EINVAL;
+                       goto end;
+               }
+               switch (vstack_ax(stack)->type) {
+               case REG_S64:
+               case REG_DOUBLE:
+               case REG_STRING:
+               case REG_STAR_GLOB_STRING:
+               case REG_TYPE_UNKNOWN:
+                       break;
+               default:
+                       printk(KERN_WARNING "Unexpected register type %d for operation\n",
+                               (int) vstack_ax(stack)->type);
+                       ret = -EINVAL;
+                       goto end;
+               }
+
+               vstack_ax(stack)->type = REG_S64;
+               next_pc += sizeof(struct binary_op);
+               break;
+       }
+
+       /* unary */
+       case FILTER_OP_UNARY_PLUS:
+       case FILTER_OP_UNARY_MINUS:
+       {
+               /* Pop 1, push 1 */
+               if (!vstack_ax(stack)) {
+                       printk(KERN_WARNING "Empty stack\n\n");
+                       ret = -EINVAL;
+                       goto end;
+               }
+               switch (vstack_ax(stack)->type) {
+               case REG_S64:
+               case REG_TYPE_UNKNOWN:
+                       break;
+               default:
+                       printk(KERN_WARNING "Unexpected register type %d for operation\n",
+                               (int) vstack_ax(stack)->type);
+                       ret = -EINVAL;
+                       goto end;
+               }
+
+               vstack_ax(stack)->type = REG_TYPE_UNKNOWN;
+               next_pc += sizeof(struct unary_op);
+               break;
+       }
+
+       case FILTER_OP_UNARY_PLUS_S64:
+       case FILTER_OP_UNARY_MINUS_S64:
+       case FILTER_OP_UNARY_NOT_S64:
+       {
+               /* Pop 1, push 1 */
+               if (!vstack_ax(stack)) {
+                       printk(KERN_WARNING "Empty stack\n\n");
+                       ret = -EINVAL;
+                       goto end;
+               }
+               switch (vstack_ax(stack)->type) {
+               case REG_S64:
+                       break;
+               default:
+                       printk(KERN_WARNING "Unexpected register type %d for operation\n",
+                               (int) vstack_ax(stack)->type);
+                       ret = -EINVAL;
+                       goto end;
+               }
+
+               vstack_ax(stack)->type = REG_S64;
+               next_pc += sizeof(struct unary_op);
+               break;
+       }
+
+       case FILTER_OP_UNARY_NOT:
+       {
+               /* Pop 1, push 1 */
+               if (!vstack_ax(stack)) {
+                       printk(KERN_WARNING "Empty stack\n\n");
+                       ret = -EINVAL;
+                       goto end;
+               }
+               switch (vstack_ax(stack)->type) {
+               case REG_S64:
+               case REG_TYPE_UNKNOWN:
+                       break;
+               default:
+                       printk(KERN_WARNING "Unexpected register type %d for operation\n",
+                               (int) vstack_ax(stack)->type);
+                       ret = -EINVAL;
+                       goto end;
+               }
+
+               vstack_ax(stack)->type = REG_S64;
+               next_pc += sizeof(struct unary_op);
+               break;
+       }
+
+       case FILTER_OP_UNARY_BIT_NOT:
+       {
+               /* Pop 1, push 1 */
+               if (!vstack_ax(stack)) {
+                       printk(KERN_WARNING "Empty stack\n");
+                       ret = -EINVAL;
+                       goto end;
+               }
+               switch (vstack_ax(stack)->type) {
+               case REG_S64:
+               case REG_TYPE_UNKNOWN:
+                       break;
+               case REG_DOUBLE:
+               default:
+                       printk(KERN_WARNING "Unexpected register type %d for operation\n",
+                               (int) vstack_ax(stack)->type);
+                       ret = -EINVAL;
+                       goto end;
+               }
+
+               vstack_ax(stack)->type = REG_S64;
+               next_pc += sizeof(struct unary_op);
+               break;
+       }
+
+       /* logical */
+       case FILTER_OP_AND:
+       case FILTER_OP_OR:
+       {
+               struct logical_op *insn = (struct logical_op *) pc;
+               int merge_ret;
+
+               /* Add merge point to table */
+               merge_ret = merge_point_add_check(mp_table,
+                                       insn->skip_offset, stack);
+               if (merge_ret) {
+                       ret = merge_ret;
+                       goto end;
+               }
+
+               if (!vstack_ax(stack)) {
+                       printk(KERN_WARNING "Empty stack\n\n");
+                       ret = -EINVAL;
+                       goto end;
+               }
+               /* There is always a cast-to-s64 operation before a or/and op. */
+               switch (vstack_ax(stack)->type) {
+               case REG_S64:
+                       break;
+               default:
+                       printk(KERN_WARNING "Incorrect register type %d for operation\n",
+                               (int) vstack_ax(stack)->type);
+                       ret = -EINVAL;
+                       goto end;
+               }
+
+               /* Continue to next instruction */
+               /* Pop 1 when jump not taken */
+               if (vstack_pop(stack)) {
+                       ret = -EINVAL;
+                       goto end;
+               }
+               next_pc += sizeof(struct logical_op);
+               break;
+       }
+
+       /* load field ref */
+       case FILTER_OP_LOAD_FIELD_REF:
+       {
+               printk(KERN_WARNING "Unknown field ref type\n");
+               ret = -EINVAL;
+               goto end;
+       }
+       /* get context ref */
+       case FILTER_OP_GET_CONTEXT_REF:
+       {
+               printk(KERN_WARNING "Unknown get context ref type\n");
+               ret = -EINVAL;
+               goto end;
+       }
+       case FILTER_OP_LOAD_FIELD_REF_STRING:
+       case FILTER_OP_LOAD_FIELD_REF_SEQUENCE:
+       case FILTER_OP_GET_CONTEXT_REF_STRING:
+       case FILTER_OP_LOAD_FIELD_REF_USER_STRING:
+       case FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE:
+       {
+               if (vstack_push(stack)) {
+                       ret = -EINVAL;
+                       goto end;
+               }
+               vstack_ax(stack)->type = REG_STRING;
+               next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
+               break;
+       }
+       case FILTER_OP_LOAD_FIELD_REF_S64:
+       case FILTER_OP_GET_CONTEXT_REF_S64:
+       {
+               if (vstack_push(stack)) {
+                       ret = -EINVAL;
+                       goto end;
+               }
+               vstack_ax(stack)->type = REG_S64;
+               next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
+               break;
+       }
+
+       /* load from immediate operand */
+       case FILTER_OP_LOAD_STRING:
+       {
+               struct load_op *insn = (struct load_op *) pc;
+
+               if (vstack_push(stack)) {
+                       ret = -EINVAL;
+                       goto end;
+               }
+               vstack_ax(stack)->type = REG_STRING;
+               next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
+               break;
+       }
+
+       case FILTER_OP_LOAD_STAR_GLOB_STRING:
+       {
+               struct load_op *insn = (struct load_op *) pc;
+
+               if (vstack_push(stack)) {
+                       ret = -EINVAL;
+                       goto end;
+               }
+               vstack_ax(stack)->type = REG_STAR_GLOB_STRING;
+               next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
+               break;
+       }
+
+       case FILTER_OP_LOAD_S64:
+       {
+               if (vstack_push(stack)) {
+                       ret = -EINVAL;
+                       goto end;
+               }
+               vstack_ax(stack)->type = REG_S64;
+               next_pc += sizeof(struct load_op)
+                               + sizeof(struct literal_numeric);
+               break;
+       }
+
+       case FILTER_OP_CAST_TO_S64:
+       {
+               /* Pop 1, push 1 */
+               if (!vstack_ax(stack)) {
+                       printk(KERN_WARNING "Empty stack\n");
+                       ret = -EINVAL;
+                       goto end;
+               }
+               switch (vstack_ax(stack)->type) {
+               case REG_S64:
+               case REG_DOUBLE:
+               case REG_TYPE_UNKNOWN:
+                       break;
+               default:
+                       printk(KERN_WARNING "Incorrect register type %d for cast\n",
+                               (int) vstack_ax(stack)->type);
+                       ret = -EINVAL;
+                       goto end;
+               }
+               vstack_ax(stack)->type = REG_S64;
+               next_pc += sizeof(struct cast_op);
+               break;
+       }
+       case FILTER_OP_CAST_NOP:
+       {
+               next_pc += sizeof(struct cast_op);
+               break;
+       }
+
+       /*
+        * Instructions for recursive traversal through composed types.
+        */
+       case FILTER_OP_GET_CONTEXT_ROOT:
+       case FILTER_OP_GET_APP_CONTEXT_ROOT:
+       case FILTER_OP_GET_PAYLOAD_ROOT:
+       {
+               if (vstack_push(stack)) {
+                       ret = -EINVAL;
+                       goto end;
+               }
+               vstack_ax(stack)->type = REG_PTR;
+               next_pc += sizeof(struct load_op);
+               break;
+       }
+
+       case FILTER_OP_LOAD_FIELD:
+       {
+               /* Pop 1, push 1 */
+               if (!vstack_ax(stack)) {
+                       printk(KERN_WARNING "Empty stack\n\n");
+                       ret = -EINVAL;
+                       goto end;
+               }
+               if (vstack_ax(stack)->type != REG_PTR) {
+                       printk(KERN_WARNING "Expecting pointer on top of stack\n\n");
+                       ret = -EINVAL;
+                       goto end;
+               }
+               vstack_ax(stack)->type = REG_TYPE_UNKNOWN;
+               next_pc += sizeof(struct load_op);
+               break;
+       }
+
+       case FILTER_OP_LOAD_FIELD_S8:
+       case FILTER_OP_LOAD_FIELD_S16:
+       case FILTER_OP_LOAD_FIELD_S32:
+       case FILTER_OP_LOAD_FIELD_S64:
+       case FILTER_OP_LOAD_FIELD_U8:
+       case FILTER_OP_LOAD_FIELD_U16:
+       case FILTER_OP_LOAD_FIELD_U32:
+       case FILTER_OP_LOAD_FIELD_U64:
+       {
+               /* Pop 1, push 1 */
+               if (!vstack_ax(stack)) {
+                       printk(KERN_WARNING "Empty stack\n\n");
+                       ret = -EINVAL;
+                       goto end;
+               }
+               if (vstack_ax(stack)->type != REG_PTR) {
+                       printk(KERN_WARNING "Expecting pointer on top of stack\n\n");
+                       ret = -EINVAL;
+                       goto end;
+               }
+               vstack_ax(stack)->type = REG_S64;
+               next_pc += sizeof(struct load_op);
+               break;
+       }
+
+       case FILTER_OP_LOAD_FIELD_STRING:
+       case FILTER_OP_LOAD_FIELD_SEQUENCE:
+       {
+               /* Pop 1, push 1 */
+               if (!vstack_ax(stack)) {
+                       printk(KERN_WARNING "Empty stack\n\n");
+                       ret = -EINVAL;
+                       goto end;
+               }
+               if (vstack_ax(stack)->type != REG_PTR) {
+                       printk(KERN_WARNING "Expecting pointer on top of stack\n\n");
+                       ret = -EINVAL;
+                       goto end;
+               }
+               vstack_ax(stack)->type = REG_STRING;
+               next_pc += sizeof(struct load_op);
+               break;
+       }
+
+       case FILTER_OP_LOAD_FIELD_DOUBLE:
+       {
+               /* Pop 1, push 1 */
+               if (!vstack_ax(stack)) {
+                       printk(KERN_WARNING "Empty stack\n\n");
+                       ret = -EINVAL;
+                       goto end;
+               }
+               if (vstack_ax(stack)->type != REG_PTR) {
+                       printk(KERN_WARNING "Expecting pointer on top of stack\n\n");
+                       ret = -EINVAL;
+                       goto end;
+               }
+               vstack_ax(stack)->type = REG_DOUBLE;
+               next_pc += sizeof(struct load_op);
+               break;
+       }
+
+       case FILTER_OP_GET_SYMBOL:
+       case FILTER_OP_GET_SYMBOL_FIELD:
+       {
+               /* Pop 1, push 1 */
+               if (!vstack_ax(stack)) {
+                       printk(KERN_WARNING "Empty stack\n\n");
+                       ret = -EINVAL;
+                       goto end;
+               }
+               if (vstack_ax(stack)->type != REG_PTR) {
+                       printk(KERN_WARNING "Expecting pointer on top of stack\n\n");
+                       ret = -EINVAL;
+                       goto end;
+               }
+               next_pc += sizeof(struct load_op) + sizeof(struct get_symbol);
+               break;
+       }
+
+       case FILTER_OP_GET_INDEX_U16:
+       {
+               /* Pop 1, push 1 */
+               if (!vstack_ax(stack)) {
+                       printk(KERN_WARNING "Empty stack\n\n");
+                       ret = -EINVAL;
+                       goto end;
+               }
+               if (vstack_ax(stack)->type != REG_PTR) {
+                       printk(KERN_WARNING "Expecting pointer on top of stack\n\n");
+                       ret = -EINVAL;
+                       goto end;
+               }
+               next_pc += sizeof(struct load_op) + sizeof(struct get_index_u16);
+               break;
+       }
+
+       case FILTER_OP_GET_INDEX_U64:
+       {
+               /* Pop 1, push 1 */
+               if (!vstack_ax(stack)) {
+                       printk(KERN_WARNING "Empty stack\n\n");
+                       ret = -EINVAL;
+                       goto end;
+               }
+               if (vstack_ax(stack)->type != REG_PTR) {
+                       printk(KERN_WARNING "Expecting pointer on top of stack\n\n");
+                       ret = -EINVAL;
+                       goto end;
+               }
+               next_pc += sizeof(struct load_op) + sizeof(struct get_index_u64);
+               break;
+       }
+
+       }
+end:
+       *_next_pc = next_pc;
+       return ret;
+}
+
+/*
+ * Never called concurrently (hash seed is shared).
+ */
+int lttng_filter_validate_bytecode(struct bytecode_runtime *bytecode)
+{
+       struct mp_table *mp_table;
+       char *pc, *next_pc, *start_pc;
+       int ret = -EINVAL;
+       struct vstack stack;
+
+       vstack_init(&stack);
+
+       mp_table = kzalloc(sizeof(*mp_table), GFP_KERNEL);
+       if (!mp_table) {
+               printk(KERN_WARNING "Error allocating hash table for bytecode validation\n");
+               return -ENOMEM;
+       }
+       start_pc = &bytecode->code[0];
+       for (pc = next_pc = start_pc; pc - start_pc < bytecode->len;
+                       pc = next_pc) {
+               ret = bytecode_validate_overflow(bytecode, start_pc, pc);
+               if (ret != 0) {
+                       if (ret == -ERANGE)
+                               printk(KERN_WARNING "filter bytecode overflow\n");
+                       goto end;
+               }
+               dbg_printk("Validating op %s (%u)\n",
+                       lttng_filter_print_op((unsigned int) *(filter_opcode_t *) pc),
+                       (unsigned int) *(filter_opcode_t *) pc);
+
+               /*
+                * For each instruction, validate the current context
+                * (traversal of entire execution flow), and validate
+                * all merge points targeting this instruction.
+                */
+               ret = validate_instruction_all_contexts(bytecode, mp_table,
+                                       &stack, start_pc, pc);
+               if (ret)
+                       goto end;
+               ret = exec_insn(bytecode, mp_table, &stack, &next_pc, pc);
+               if (ret <= 0)
+                       goto end;
+       }
+end:
+       if (delete_all_nodes(mp_table)) {
+               if (!ret) {
+                       printk(KERN_WARNING "Unexpected merge points\n");
+                       ret = -EINVAL;
+               }
+       }
+       kfree(mp_table);
+       return ret;
+}
diff --git a/src/lttng-filter.c b/src/lttng-filter.c
new file mode 100644 (file)
index 0000000..12c2264
--- /dev/null
@@ -0,0 +1,565 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * lttng-filter.c
+ *
+ * LTTng modules filter code.
+ *
+ * Copyright (C) 2010-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#include <linux/list.h>
+#include <linux/slab.h>
+
+#include <lttng/filter.h>
+
+static const char *opnames[] = {
+       [ FILTER_OP_UNKNOWN ] = "UNKNOWN",
+
+       [ FILTER_OP_RETURN ] = "RETURN",
+
+       /* binary */
+       [ FILTER_OP_MUL ] = "MUL",
+       [ FILTER_OP_DIV ] = "DIV",
+       [ FILTER_OP_MOD ] = "MOD",
+       [ FILTER_OP_PLUS ] = "PLUS",
+       [ FILTER_OP_MINUS ] = "MINUS",
+       [ FILTER_OP_BIT_RSHIFT ] = "BIT_RSHIFT",
+       [ FILTER_OP_BIT_LSHIFT ] = "BIT_LSHIFT",
+       [ FILTER_OP_BIT_AND ] = "BIT_AND",
+       [ FILTER_OP_BIT_OR ] = "BIT_OR",
+       [ FILTER_OP_BIT_XOR ] = "BIT_XOR",
+
+       /* binary comparators */
+       [ FILTER_OP_EQ ] = "EQ",
+       [ FILTER_OP_NE ] = "NE",
+       [ FILTER_OP_GT ] = "GT",
+       [ FILTER_OP_LT ] = "LT",
+       [ FILTER_OP_GE ] = "GE",
+       [ FILTER_OP_LE ] = "LE",
+
+       /* string binary comparators */
+       [ FILTER_OP_EQ_STRING ] = "EQ_STRING",
+       [ FILTER_OP_NE_STRING ] = "NE_STRING",
+       [ FILTER_OP_GT_STRING ] = "GT_STRING",
+       [ FILTER_OP_LT_STRING ] = "LT_STRING",
+       [ FILTER_OP_GE_STRING ] = "GE_STRING",
+       [ FILTER_OP_LE_STRING ] = "LE_STRING",
+
+       /* s64 binary comparators */
+       [ FILTER_OP_EQ_S64 ] = "EQ_S64",
+       [ FILTER_OP_NE_S64 ] = "NE_S64",
+       [ FILTER_OP_GT_S64 ] = "GT_S64",
+       [ FILTER_OP_LT_S64 ] = "LT_S64",
+       [ FILTER_OP_GE_S64 ] = "GE_S64",
+       [ FILTER_OP_LE_S64 ] = "LE_S64",
+
+       /* double binary comparators */
+       [ FILTER_OP_EQ_DOUBLE ] = "EQ_DOUBLE",
+       [ FILTER_OP_NE_DOUBLE ] = "NE_DOUBLE",
+       [ FILTER_OP_GT_DOUBLE ] = "GT_DOUBLE",
+       [ FILTER_OP_LT_DOUBLE ] = "LT_DOUBLE",
+       [ FILTER_OP_GE_DOUBLE ] = "GE_DOUBLE",
+       [ FILTER_OP_LE_DOUBLE ] = "LE_DOUBLE",
+
+       /* Mixed S64-double binary comparators */
+       [ FILTER_OP_EQ_DOUBLE_S64 ] = "EQ_DOUBLE_S64",
+       [ FILTER_OP_NE_DOUBLE_S64 ] = "NE_DOUBLE_S64",
+       [ FILTER_OP_GT_DOUBLE_S64 ] = "GT_DOUBLE_S64",
+       [ FILTER_OP_LT_DOUBLE_S64 ] = "LT_DOUBLE_S64",
+       [ FILTER_OP_GE_DOUBLE_S64 ] = "GE_DOUBLE_S64",
+       [ FILTER_OP_LE_DOUBLE_S64 ] = "LE_DOUBLE_S64",
+
+       [ FILTER_OP_EQ_S64_DOUBLE ] = "EQ_S64_DOUBLE",
+       [ FILTER_OP_NE_S64_DOUBLE ] = "NE_S64_DOUBLE",
+       [ FILTER_OP_GT_S64_DOUBLE ] = "GT_S64_DOUBLE",
+       [ FILTER_OP_LT_S64_DOUBLE ] = "LT_S64_DOUBLE",
+       [ FILTER_OP_GE_S64_DOUBLE ] = "GE_S64_DOUBLE",
+       [ FILTER_OP_LE_S64_DOUBLE ] = "LE_S64_DOUBLE",
+
+       /* unary */
+       [ FILTER_OP_UNARY_PLUS ] = "UNARY_PLUS",
+       [ FILTER_OP_UNARY_MINUS ] = "UNARY_MINUS",
+       [ FILTER_OP_UNARY_NOT ] = "UNARY_NOT",
+       [ FILTER_OP_UNARY_PLUS_S64 ] = "UNARY_PLUS_S64",
+       [ FILTER_OP_UNARY_MINUS_S64 ] = "UNARY_MINUS_S64",
+       [ FILTER_OP_UNARY_NOT_S64 ] = "UNARY_NOT_S64",
+       [ FILTER_OP_UNARY_PLUS_DOUBLE ] = "UNARY_PLUS_DOUBLE",
+       [ FILTER_OP_UNARY_MINUS_DOUBLE ] = "UNARY_MINUS_DOUBLE",
+       [ FILTER_OP_UNARY_NOT_DOUBLE ] = "UNARY_NOT_DOUBLE",
+
+       /* logical */
+       [ FILTER_OP_AND ] = "AND",
+       [ FILTER_OP_OR ] = "OR",
+
+       /* load field ref */
+       [ FILTER_OP_LOAD_FIELD_REF ] = "LOAD_FIELD_REF",
+       [ FILTER_OP_LOAD_FIELD_REF_STRING ] = "LOAD_FIELD_REF_STRING",
+       [ FILTER_OP_LOAD_FIELD_REF_SEQUENCE ] = "LOAD_FIELD_REF_SEQUENCE",
+       [ FILTER_OP_LOAD_FIELD_REF_S64 ] = "LOAD_FIELD_REF_S64",
+       [ FILTER_OP_LOAD_FIELD_REF_DOUBLE ] = "LOAD_FIELD_REF_DOUBLE",
+
+       /* load from immediate operand */
+       [ FILTER_OP_LOAD_STRING ] = "LOAD_STRING",
+       [ FILTER_OP_LOAD_S64 ] = "LOAD_S64",
+       [ FILTER_OP_LOAD_DOUBLE ] = "LOAD_DOUBLE",
+
+       /* cast */
+       [ FILTER_OP_CAST_TO_S64 ] = "CAST_TO_S64",
+       [ FILTER_OP_CAST_DOUBLE_TO_S64 ] = "CAST_DOUBLE_TO_S64",
+       [ FILTER_OP_CAST_NOP ] = "CAST_NOP",
+
+       /* get context ref */
+       [ FILTER_OP_GET_CONTEXT_REF ] = "GET_CONTEXT_REF",
+       [ FILTER_OP_GET_CONTEXT_REF_STRING ] = "GET_CONTEXT_REF_STRING",
+       [ FILTER_OP_GET_CONTEXT_REF_S64 ] = "GET_CONTEXT_REF_S64",
+       [ FILTER_OP_GET_CONTEXT_REF_DOUBLE ] = "GET_CONTEXT_REF_DOUBLE",
+
+       /* load userspace field ref */
+       [ FILTER_OP_LOAD_FIELD_REF_USER_STRING ] = "LOAD_FIELD_REF_USER_STRING",
+       [ FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE ] = "LOAD_FIELD_REF_USER_SEQUENCE",
+
+       /*
+        * load immediate star globbing pattern (literal string)
+        * from immediate.
+        */
+       [ FILTER_OP_LOAD_STAR_GLOB_STRING ] = "LOAD_STAR_GLOB_STRING",
+
+       /* globbing pattern binary operator: apply to */
+       [ FILTER_OP_EQ_STAR_GLOB_STRING ] = "EQ_STAR_GLOB_STRING",
+       [ FILTER_OP_NE_STAR_GLOB_STRING ] = "NE_STAR_GLOB_STRING",
+
+       /*
+        * Instructions for recursive traversal through composed types.
+        */
+       [ FILTER_OP_GET_CONTEXT_ROOT ] = "GET_CONTEXT_ROOT",
+       [ FILTER_OP_GET_APP_CONTEXT_ROOT ] = "GET_APP_CONTEXT_ROOT",
+       [ FILTER_OP_GET_PAYLOAD_ROOT ] = "GET_PAYLOAD_ROOT",
+
+       [ FILTER_OP_GET_SYMBOL ] = "GET_SYMBOL",
+       [ FILTER_OP_GET_SYMBOL_FIELD ] = "GET_SYMBOL_FIELD",
+       [ FILTER_OP_GET_INDEX_U16 ] = "GET_INDEX_U16",
+       [ FILTER_OP_GET_INDEX_U64 ] = "GET_INDEX_U64",
+
+       [ FILTER_OP_LOAD_FIELD ] = "LOAD_FIELD",
+       [ FILTER_OP_LOAD_FIELD_S8 ] = "LOAD_FIELD_S8",
+       [ FILTER_OP_LOAD_FIELD_S16 ] = "LOAD_FIELD_S16",
+       [ FILTER_OP_LOAD_FIELD_S32 ] = "LOAD_FIELD_S32",
+       [ FILTER_OP_LOAD_FIELD_S64 ] = "LOAD_FIELD_S64",
+       [ FILTER_OP_LOAD_FIELD_U8 ] = "LOAD_FIELD_U8",
+       [ FILTER_OP_LOAD_FIELD_U16 ] = "LOAD_FIELD_U16",
+       [ FILTER_OP_LOAD_FIELD_U32 ] = "LOAD_FIELD_U32",
+       [ FILTER_OP_LOAD_FIELD_U64 ] = "LOAD_FIELD_U64",
+       [ FILTER_OP_LOAD_FIELD_STRING ] = "LOAD_FIELD_STRING",
+       [ FILTER_OP_LOAD_FIELD_SEQUENCE ] = "LOAD_FIELD_SEQUENCE",
+       [ FILTER_OP_LOAD_FIELD_DOUBLE ] = "LOAD_FIELD_DOUBLE",
+
+       [ FILTER_OP_UNARY_BIT_NOT ] = "UNARY_BIT_NOT",
+
+       [ FILTER_OP_RETURN_S64 ] = "RETURN_S64",
+};
+
+const char *lttng_filter_print_op(enum filter_op op)
+{
+       if (op >= NR_FILTER_OPS)
+               return "UNKNOWN";
+       else
+               return opnames[op];
+}
+
+static
+int apply_field_reloc(struct lttng_event *event,
+               struct bytecode_runtime *runtime,
+               uint32_t runtime_len,
+               uint32_t reloc_offset,
+               const char *field_name,
+               enum filter_op filter_op)
+{
+       const struct lttng_event_desc *desc;
+       const struct lttng_event_field *fields, *field = NULL;
+       unsigned int nr_fields, i;
+       struct load_op *op;
+       uint32_t field_offset = 0;
+
+       dbg_printk("Apply field reloc: %u %s\n", reloc_offset, field_name);
+
+       /* Lookup event by name */
+       desc = event->desc;
+       if (!desc)
+               return -EINVAL;
+       fields = desc->fields;
+       if (!fields)
+               return -EINVAL;
+       nr_fields = desc->nr_fields;
+       for (i = 0; i < nr_fields; i++) {
+               if (fields[i].nofilter)
+                       continue;
+               if (!strcmp(fields[i].name, field_name)) {
+                       field = &fields[i];
+                       break;
+               }
+               /* compute field offset */
+               switch (fields[i].type.atype) {
+               case atype_integer:
+               case atype_enum_nestable:
+                       field_offset += sizeof(int64_t);
+                       break;
+               case atype_array_nestable:
+                       if (!lttng_is_bytewise_integer(fields[i].type.u.array_nestable.elem_type))
+                               return -EINVAL;
+                       field_offset += sizeof(unsigned long);
+                       field_offset += sizeof(void *);
+                       break;
+               case atype_sequence_nestable:
+                       if (!lttng_is_bytewise_integer(fields[i].type.u.sequence_nestable.elem_type))
+                               return -EINVAL;
+                       field_offset += sizeof(unsigned long);
+                       field_offset += sizeof(void *);
+                       break;
+               case atype_string:
+                       field_offset += sizeof(void *);
+                       break;
+               case atype_struct_nestable:     /* Unsupported. */
+               case atype_variant_nestable:    /* Unsupported. */
+               default:
+                       return -EINVAL;
+               }
+       }
+       if (!field)
+               return -EINVAL;
+
+       /* Check if field offset is too large for 16-bit offset */
+       if (field_offset > LTTNG_KERNEL_FILTER_BYTECODE_MAX_LEN - 1)
+               return -EINVAL;
+
+       /* set type */
+       op = (struct load_op *) &runtime->code[reloc_offset];
+
+       switch (filter_op) {
+       case FILTER_OP_LOAD_FIELD_REF:
+       {
+               struct field_ref *field_ref;
+
+               field_ref = (struct field_ref *) op->data;
+               switch (field->type.atype) {
+               case atype_integer:
+               case atype_enum_nestable:
+                       op->op = FILTER_OP_LOAD_FIELD_REF_S64;
+                       break;
+               case atype_array_nestable:
+               case atype_sequence_nestable:
+                       if (field->user)
+                               op->op = FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE;
+                       else
+                               op->op = FILTER_OP_LOAD_FIELD_REF_SEQUENCE;
+                       break;
+               case atype_string:
+                       if (field->user)
+                               op->op = FILTER_OP_LOAD_FIELD_REF_USER_STRING;
+                       else
+                               op->op = FILTER_OP_LOAD_FIELD_REF_STRING;
+                       break;
+               case atype_struct_nestable:     /* Unsupported. */
+               case atype_variant_nestable:    /* Unsupported. */
+               default:
+                       return -EINVAL;
+               }
+               /* set offset */
+               field_ref->offset = (uint16_t) field_offset;
+               break;
+       }
+       default:
+               return -EINVAL;
+       }
+       return 0;
+}
+
+static
+int apply_context_reloc(struct lttng_event *event,
+               struct bytecode_runtime *runtime,
+               uint32_t runtime_len,
+               uint32_t reloc_offset,
+               const char *context_name,
+               enum filter_op filter_op)
+{
+       struct load_op *op;
+       struct lttng_ctx_field *ctx_field;
+       int idx;
+
+       dbg_printk("Apply context reloc: %u %s\n", reloc_offset, context_name);
+
+       /* Get context index */
+       idx = lttng_get_context_index(lttng_static_ctx, context_name);
+       if (idx < 0)
+               return -ENOENT;
+
+       /* Check if idx is too large for 16-bit offset */
+       if (idx > LTTNG_KERNEL_FILTER_BYTECODE_MAX_LEN - 1)
+               return -EINVAL;
+
+       /* Get context return type */
+       ctx_field = &lttng_static_ctx->fields[idx];
+       op = (struct load_op *) &runtime->code[reloc_offset];
+
+       switch (filter_op) {
+       case FILTER_OP_GET_CONTEXT_REF:
+       {
+               struct field_ref *field_ref;
+
+               field_ref = (struct field_ref *) op->data;
+               switch (ctx_field->event_field.type.atype) {
+               case atype_integer:
+               case atype_enum_nestable:
+                       op->op = FILTER_OP_GET_CONTEXT_REF_S64;
+                       break;
+                       /* Sequence and array supported as string */
+               case atype_string:
+                       BUG_ON(ctx_field->event_field.user);
+                       op->op = FILTER_OP_GET_CONTEXT_REF_STRING;
+                       break;
+               case atype_array_nestable:
+                       if (!lttng_is_bytewise_integer(ctx_field->event_field.type.u.array_nestable.elem_type))
+                               return -EINVAL;
+                       BUG_ON(ctx_field->event_field.user);
+                       op->op = FILTER_OP_GET_CONTEXT_REF_STRING;
+                       break;
+               case atype_sequence_nestable:
+                       if (!lttng_is_bytewise_integer(ctx_field->event_field.type.u.sequence_nestable.elem_type))
+                               return -EINVAL;
+                       BUG_ON(ctx_field->event_field.user);
+                       op->op = FILTER_OP_GET_CONTEXT_REF_STRING;
+                       break;
+               case atype_struct_nestable:     /* Unsupported. */
+               case atype_variant_nestable:    /* Unsupported. */
+               default:
+                       return -EINVAL;
+               }
+               /* set offset to context index within channel contexts */
+               field_ref->offset = (uint16_t) idx;
+               break;
+       }
+       default:
+               return -EINVAL;
+       }
+       return 0;
+}
+
+static
+int apply_reloc(struct lttng_event *event,
+               struct bytecode_runtime *runtime,
+               uint32_t runtime_len,
+               uint32_t reloc_offset,
+               const char *name)
+{
+       struct load_op *op;
+
+       dbg_printk("Apply reloc: %u %s\n", reloc_offset, name);
+
+       /* Ensure that the reloc is within the code */
+       if (runtime_len - reloc_offset < sizeof(uint16_t))
+               return -EINVAL;
+
+       op = (struct load_op *) &runtime->code[reloc_offset];
+       switch (op->op) {
+       case FILTER_OP_LOAD_FIELD_REF:
+               return apply_field_reloc(event, runtime, runtime_len,
+                       reloc_offset, name, op->op);
+       case FILTER_OP_GET_CONTEXT_REF:
+               return apply_context_reloc(event, runtime, runtime_len,
+                       reloc_offset, name, op->op);
+       case FILTER_OP_GET_SYMBOL:
+       case FILTER_OP_GET_SYMBOL_FIELD:
+               /*
+                * Will be handled by load specialize phase or
+                * dynamically by interpreter.
+                */
+               return 0;
+       default:
+               printk(KERN_WARNING "Unknown reloc op type %u\n", op->op);
+               return -EINVAL;
+       }
+       return 0;
+}
+
+static
+int bytecode_is_linked(struct lttng_filter_bytecode_node *filter_bytecode,
+               struct lttng_event *event)
+{
+       struct lttng_bytecode_runtime *bc_runtime;
+
+       list_for_each_entry(bc_runtime,
+                       &event->bytecode_runtime_head, node) {
+               if (bc_runtime->bc == filter_bytecode)
+                       return 1;
+       }
+       return 0;
+}
+
+/*
+ * Take a bytecode with reloc table and link it to an event to create a
+ * bytecode runtime.
+ */
+static
+int _lttng_filter_event_link_bytecode(struct lttng_event *event,
+               struct lttng_filter_bytecode_node *filter_bytecode,
+               struct list_head *insert_loc)
+{
+       int ret, offset, next_offset;
+       struct bytecode_runtime *runtime = NULL;
+       size_t runtime_alloc_len;
+
+       if (!filter_bytecode)
+               return 0;
+       /* Bytecode already linked */
+       if (bytecode_is_linked(filter_bytecode, event))
+               return 0;
+
+       dbg_printk("Linking...\n");
+
+       /* We don't need the reloc table in the runtime */
+       runtime_alloc_len = sizeof(*runtime) + filter_bytecode->bc.reloc_offset;
+       runtime = kzalloc(runtime_alloc_len, GFP_KERNEL);
+       if (!runtime) {
+               ret = -ENOMEM;
+               goto alloc_error;
+       }
+       runtime->p.bc = filter_bytecode;
+       runtime->p.event = event;
+       runtime->len = filter_bytecode->bc.reloc_offset;
+       /* copy original bytecode */
+       memcpy(runtime->code, filter_bytecode->bc.data, runtime->len);
+       /*
+        * apply relocs. Those are a uint16_t (offset in bytecode)
+        * followed by a string (field name).
+        */
+       for (offset = filter_bytecode->bc.reloc_offset;
+                       offset < filter_bytecode->bc.len;
+                       offset = next_offset) {
+               uint16_t reloc_offset =
+                       *(uint16_t *) &filter_bytecode->bc.data[offset];
+               const char *name =
+                       (const char *) &filter_bytecode->bc.data[offset + sizeof(uint16_t)];
+
+               ret = apply_reloc(event, runtime, runtime->len, reloc_offset, name);
+               if (ret) {
+                       goto link_error;
+               }
+               next_offset = offset + sizeof(uint16_t) + strlen(name) + 1;
+       }
+       /* Validate bytecode */
+       ret = lttng_filter_validate_bytecode(runtime);
+       if (ret) {
+               goto link_error;
+       }
+       /* Specialize bytecode */
+       ret = lttng_filter_specialize_bytecode(event, runtime);
+       if (ret) {
+               goto link_error;
+       }
+       runtime->p.filter = lttng_filter_interpret_bytecode;
+       runtime->p.link_failed = 0;
+       list_add_rcu(&runtime->p.node, insert_loc);
+       dbg_printk("Linking successful.\n");
+       return 0;
+
+link_error:
+       runtime->p.filter = lttng_filter_false;
+       runtime->p.link_failed = 1;
+       list_add_rcu(&runtime->p.node, insert_loc);
+alloc_error:
+       dbg_printk("Linking failed.\n");
+       return ret;
+}
+
+void lttng_filter_sync_state(struct lttng_bytecode_runtime *runtime)
+{
+       struct lttng_filter_bytecode_node *bc = runtime->bc;
+
+       if (!bc->enabler->enabled || runtime->link_failed)
+               runtime->filter = lttng_filter_false;
+       else
+               runtime->filter = lttng_filter_interpret_bytecode;
+}
+
+/*
+ * Link bytecode for all enablers referenced by an event.
+ */
+void lttng_enabler_event_link_bytecode(struct lttng_event *event,
+               struct lttng_enabler *enabler)
+{
+       struct lttng_filter_bytecode_node *bc;
+       struct lttng_bytecode_runtime *runtime;
+
+       /* Can only be called for events with desc attached */
+       WARN_ON_ONCE(!event->desc);
+
+       /* Link each bytecode. */
+       list_for_each_entry(bc, &enabler->filter_bytecode_head, node) {
+               int found = 0, ret;
+               struct list_head *insert_loc;
+
+               list_for_each_entry(runtime,
+                               &event->bytecode_runtime_head, node) {
+                       if (runtime->bc == bc) {
+                               found = 1;
+                               break;
+                       }
+               }
+               /* Skip bytecode already linked */
+               if (found)
+                       continue;
+
+               /*
+                * Insert at specified priority (seqnum) in increasing
+                * order. If there already is a bytecode of the same priority,
+                * insert the new bytecode right after it.
+                */
+               list_for_each_entry_reverse(runtime,
+                               &event->bytecode_runtime_head, node) {
+                       if (runtime->bc->bc.seqnum <= bc->bc.seqnum) {
+                               /* insert here */
+                               insert_loc = &runtime->node;
+                               goto add_within;
+                       }
+               }
+               /* Add to head to list */
+               insert_loc = &event->bytecode_runtime_head;
+       add_within:
+               dbg_printk("linking bytecode\n");
+               ret = _lttng_filter_event_link_bytecode(event, bc,
+                               insert_loc);
+               if (ret) {
+                       dbg_printk("[lttng filter] warning: cannot link event bytecode\n");
+               }
+       }
+}
+
+/*
+ * We own the filter_bytecode if we return success.
+ */
+int lttng_filter_enabler_attach_bytecode(struct lttng_enabler *enabler,
+               struct lttng_filter_bytecode_node *filter_bytecode)
+{
+       list_add(&filter_bytecode->node, &enabler->filter_bytecode_head);
+       return 0;
+}
+
+void lttng_free_enabler_filter_bytecode(struct lttng_enabler *enabler)
+{
+       struct lttng_filter_bytecode_node *filter_bytecode, *tmp;
+
+       list_for_each_entry_safe(filter_bytecode, tmp,
+                       &enabler->filter_bytecode_head, node) {
+               kfree(filter_bytecode);
+       }
+}
+
+void lttng_free_event_filter_runtime(struct lttng_event *event)
+{
+       struct bytecode_runtime *runtime, *tmp;
+
+       list_for_each_entry_safe(runtime, tmp,
+                       &event->bytecode_runtime_head, p.node) {
+               kfree(runtime->data);
+               kfree(runtime);
+       }
+}
diff --git a/src/lttng-probes.c b/src/lttng-probes.c
new file mode 100644 (file)
index 0000000..4a2bb63
--- /dev/null
@@ -0,0 +1,324 @@
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * lttng-probes.c
+ *
+ * Holds LTTng probes registry.
+ *
+ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/seq_file.h>
+
+#include <lttng/events.h>
+
+/*
+ * probe list is protected by sessions lock.
+ */
+static LIST_HEAD(_probe_list);
+
+/*
+ * List of probes registered by not yet processed.
+ */
+static LIST_HEAD(lazy_probe_init);
+
+/*
+ * lazy_nesting counter ensures we don't trigger lazy probe registration
+ * fixup while we are performing the fixup. It is protected by the
+ * sessions lock.
+ */
+static int lazy_nesting;
+
+DEFINE_PER_CPU(struct lttng_dynamic_len_stack, lttng_dynamic_len_stack);
+
+EXPORT_PER_CPU_SYMBOL_GPL(lttng_dynamic_len_stack);
+
+/*
+ * Called under sessions lock.
+ */
+static
+int check_event_provider(struct lttng_probe_desc *desc)
+{
+       int i;
+       size_t provider_name_len;
+
+       provider_name_len = strnlen(desc->provider,
+                               LTTNG_KERNEL_SYM_NAME_LEN - 1);
+       for (i = 0; i < desc->nr_events; i++) {
+               if (strncmp(desc->event_desc[i]->name,
+                               desc->provider,
+                               provider_name_len))
+                       return 0;       /* provider mismatch */
+               /*
+                * The event needs to contain at least provider name + _ +
+                * one or more letter.
+                */
+               if (strlen(desc->event_desc[i]->name) <= provider_name_len + 1)
+                       return 0;       /* provider mismatch */
+               if (desc->event_desc[i]->name[provider_name_len] != '_')
+                       return 0;       /* provider mismatch */
+       }
+       return 1;
+}
+
+/*
+ * Called under sessions lock.
+ */
+static
+void lttng_lazy_probe_register(struct lttng_probe_desc *desc)
+{
+       struct lttng_probe_desc *iter;
+       struct list_head *probe_list;
+
+       /*
+        * Each provider enforce that every event name begins with the
+        * provider name. Check this in an assertion for extra
+        * carefulness. This ensures we cannot have duplicate event
+        * names across providers.
+        */
+       WARN_ON_ONCE(!check_event_provider(desc));
+
+       /*
+        * The provider ensures there are no duplicate event names.
+        * Duplicated TRACEPOINT_EVENT event names would generate a
+        * compile-time error due to duplicated symbol names.
+        */
+
+       /*
+        * We sort the providers by struct lttng_probe_desc pointer
+        * address.
+        */
+       probe_list = &_probe_list;
+       list_for_each_entry_reverse(iter, probe_list, head) {
+               BUG_ON(iter == desc); /* Should never be in the list twice */
+               if (iter < desc) {
+                       /* We belong to the location right after iter. */
+                       list_add(&desc->head, &iter->head);
+                       goto desc_added;
+               }
+       }
+       /* We should be added at the head of the list */
+       list_add(&desc->head, probe_list);
+desc_added:
+       pr_debug("LTTng: just registered probe %s containing %u events\n",
+               desc->provider, desc->nr_events);
+}
+
+/*
+ * Called under sessions lock.
+ */
+static
+void fixup_lazy_probes(void)
+{
+       struct lttng_probe_desc *iter, *tmp;
+       int ret;
+
+       lazy_nesting++;
+       list_for_each_entry_safe(iter, tmp,
+                       &lazy_probe_init, lazy_init_head) {
+               lttng_lazy_probe_register(iter);
+               iter->lazy = 0;
+               list_del(&iter->lazy_init_head);
+       }
+       ret = lttng_fix_pending_events();
+       WARN_ON_ONCE(ret);
+       lazy_nesting--;
+}
+
+/*
+ * Called under sessions lock.
+ */
+struct list_head *lttng_get_probe_list_head(void)
+{
+       if (!lazy_nesting && !list_empty(&lazy_probe_init))
+               fixup_lazy_probes();
+       return &_probe_list;
+}
+
+static
+const struct lttng_probe_desc *find_provider(const char *provider)
+{
+       struct lttng_probe_desc *iter;
+       struct list_head *probe_list;
+
+       probe_list = lttng_get_probe_list_head();
+       list_for_each_entry(iter, probe_list, head) {
+               if (!strcmp(iter->provider, provider))
+                       return iter;
+       }
+       return NULL;
+}
+
+int lttng_probe_register(struct lttng_probe_desc *desc)
+{
+       int ret = 0;
+
+       lttng_lock_sessions();
+
+       /*
+        * Check if the provider has already been registered.
+        */
+       if (find_provider(desc->provider)) {
+               ret = -EEXIST;
+               goto end;
+       }
+       list_add(&desc->lazy_init_head, &lazy_probe_init);
+       desc->lazy = 1;
+       pr_debug("LTTng: adding probe %s containing %u events to lazy registration list\n",
+               desc->provider, desc->nr_events);
+       /*
+        * If there is at least one active session, we need to register
+        * the probe immediately, since we cannot delay event
+        * registration because they are needed ASAP.
+        */
+       if (lttng_session_active())
+               fixup_lazy_probes();
+end:
+       lttng_unlock_sessions();
+       return ret;
+}
+EXPORT_SYMBOL_GPL(lttng_probe_register);
+
+void lttng_probe_unregister(struct lttng_probe_desc *desc)
+{
+       lttng_lock_sessions();
+       if (!desc->lazy)
+               list_del(&desc->head);
+       else
+               list_del(&desc->lazy_init_head);
+       pr_debug("LTTng: just unregistered probe %s\n", desc->provider);
+       lttng_unlock_sessions();
+}
+EXPORT_SYMBOL_GPL(lttng_probe_unregister);
+
+/*
+ * TODO: this is O(nr_probes * nb_events), could be faster.
+ * Called with sessions lock held.
+ */
+static
+const struct lttng_event_desc *find_event(const char *name)
+{
+       struct lttng_probe_desc *probe_desc;
+       int i;
+
+       list_for_each_entry(probe_desc, &_probe_list, head) {
+               for (i = 0; i < probe_desc->nr_events; i++) {
+                       if (!strcmp(probe_desc->event_desc[i]->name, name))
+                               return probe_desc->event_desc[i];
+                       }
+       }
+       return NULL;
+}
+
+/*
+ * Called with sessions lock held.
+ */
+const struct lttng_event_desc *lttng_event_get(const char *name)
+{
+       const struct lttng_event_desc *event;
+       int ret;
+
+       event = find_event(name);
+       if (!event)
+               return NULL;
+       ret = try_module_get(event->owner);
+       WARN_ON_ONCE(!ret);
+       return event;
+}
+EXPORT_SYMBOL_GPL(lttng_event_get);
+
+/*
+ * Called with sessions lock held.
+ */
+void lttng_event_put(const struct lttng_event_desc *event)
+{
+       module_put(event->owner);
+}
+EXPORT_SYMBOL_GPL(lttng_event_put);
+
+static
+void *tp_list_start(struct seq_file *m, loff_t *pos)
+{
+       struct lttng_probe_desc *probe_desc;
+       struct list_head *probe_list;
+       int iter = 0, i;
+
+       lttng_lock_sessions();
+       probe_list = lttng_get_probe_list_head();
+       list_for_each_entry(probe_desc, probe_list, head) {
+               for (i = 0; i < probe_desc->nr_events; i++) {
+                       if (iter++ >= *pos)
+                               return (void *) probe_desc->event_desc[i];
+               }
+       }
+       /* End of list */
+       return NULL;
+}
+
+static
+void *tp_list_next(struct seq_file *m, void *p, loff_t *ppos)
+{
+       struct lttng_probe_desc *probe_desc;
+       struct list_head *probe_list;
+       int iter = 0, i;
+
+       (*ppos)++;
+       probe_list = lttng_get_probe_list_head();
+       list_for_each_entry(probe_desc, probe_list, head) {
+               for (i = 0; i < probe_desc->nr_events; i++) {
+                       if (iter++ >= *ppos)
+                               return (void *) probe_desc->event_desc[i];
+               }
+       }
+       /* End of list */
+       return NULL;
+}
+
+static
+void tp_list_stop(struct seq_file *m, void *p)
+{
+       lttng_unlock_sessions();
+}
+
+static
+int tp_list_show(struct seq_file *m, void *p)
+{
+       const struct lttng_event_desc *probe_desc = p;
+
+       seq_printf(m,   "event { name = %s; };\n",
+                  probe_desc->name);
+       return 0;
+}
+
+static
+const struct seq_operations lttng_tracepoint_list_seq_ops = {
+       .start = tp_list_start,
+       .next = tp_list_next,
+       .stop = tp_list_stop,
+       .show = tp_list_show,
+};
+
+static
+int lttng_tracepoint_list_open(struct inode *inode, struct file *file)
+{
+       return seq_open(file, &lttng_tracepoint_list_seq_ops);
+}
+
+const struct file_operations lttng_tracepoint_list_fops = {
+       .owner = THIS_MODULE,
+       .open = lttng_tracepoint_list_open,
+       .read = seq_read,
+       .llseek = seq_lseek,
+       .release = seq_release,
+};
+
+int lttng_probes_init(void)
+{
+       int cpu;
+
+       for_each_possible_cpu(cpu)
+               per_cpu_ptr(&lttng_dynamic_len_stack, cpu)->offset = 0;
+       return 0;
+}
diff --git a/src/lttng-ring-buffer-client-discard.c b/src/lttng-ring-buffer-client-discard.c
new file mode 100644 (file)
index 0000000..c9d617a
--- /dev/null
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * lttng-ring-buffer-client-discard.c
+ *
+ * LTTng lib ring buffer client (discard mode).
+ *
+ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#include <linux/module.h>
+#include <lttng/tracer.h>
+
+#define RING_BUFFER_MODE_TEMPLATE              RING_BUFFER_DISCARD
+#define RING_BUFFER_MODE_TEMPLATE_STRING       "discard"
+#define RING_BUFFER_OUTPUT_TEMPLATE            RING_BUFFER_SPLICE
+#include "lttng-ring-buffer-client.h"
diff --git a/src/lttng-ring-buffer-client-mmap-discard.c b/src/lttng-ring-buffer-client-mmap-discard.c
new file mode 100644 (file)
index 0000000..c79ab66
--- /dev/null
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * lttng-ring-buffer-client-discard.c
+ *
+ * LTTng lib ring buffer client (discard mode).
+ *
+ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#include <linux/module.h>
+#include <lttng/tracer.h>
+
+#define RING_BUFFER_MODE_TEMPLATE              RING_BUFFER_DISCARD
+#define RING_BUFFER_MODE_TEMPLATE_STRING       "discard-mmap"
+#define RING_BUFFER_OUTPUT_TEMPLATE            RING_BUFFER_MMAP
+#include "lttng-ring-buffer-client.h"
diff --git a/src/lttng-ring-buffer-client-mmap-overwrite.c b/src/lttng-ring-buffer-client-mmap-overwrite.c
new file mode 100644 (file)
index 0000000..1166fc7
--- /dev/null
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * lttng-ring-buffer-client-overwrite.c
+ *
+ * LTTng lib ring buffer client (overwrite mode).
+ *
+ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#include <linux/module.h>
+#include <lttng/tracer.h>
+
+#define RING_BUFFER_MODE_TEMPLATE              RING_BUFFER_OVERWRITE
+#define RING_BUFFER_MODE_TEMPLATE_STRING       "overwrite-mmap"
+#define RING_BUFFER_OUTPUT_TEMPLATE            RING_BUFFER_MMAP
+#include "lttng-ring-buffer-client.h"
diff --git a/src/lttng-ring-buffer-client-overwrite.c b/src/lttng-ring-buffer-client-overwrite.c
new file mode 100644 (file)
index 0000000..c4a7c5e
--- /dev/null
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * lttng-ring-buffer-client-overwrite.c
+ *
+ * LTTng lib ring buffer client (overwrite mode).
+ *
+ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#include <linux/module.h>
+#include <lttng/tracer.h>
+
+#define RING_BUFFER_MODE_TEMPLATE              RING_BUFFER_OVERWRITE
+#define RING_BUFFER_MODE_TEMPLATE_STRING       "overwrite"
+#define RING_BUFFER_OUTPUT_TEMPLATE            RING_BUFFER_SPLICE
+#include "lttng-ring-buffer-client.h"
diff --git a/src/lttng-ring-buffer-client.h b/src/lttng-ring-buffer-client.h
new file mode 100644 (file)
index 0000000..aad7955
--- /dev/null
@@ -0,0 +1,790 @@
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * lttng-ring-buffer-client.h
+ *
+ * LTTng lib ring buffer client template.
+ *
+ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <lttng/bitfield.h>
+#include <wrapper/vmalloc.h>   /* for wrapper_vmalloc_sync_mappings() */
+#include <wrapper/trace-clock.h>
+#include <lttng/events.h>
+#include <lttng/tracer.h>
+#include <ringbuffer/frontend_types.h>
+
+#define LTTNG_COMPACT_EVENT_BITS       5
+#define LTTNG_COMPACT_TSC_BITS         27
+
+static struct lttng_transport lttng_relay_transport;
+
+/*
+ * Keep the natural field alignment for _each field_ within this structure if
+ * you ever add/remove a field from this header. Packed attribute is not used
+ * because gcc generates poor code on at least powerpc and mips. Don't ever
+ * let gcc add padding between the structure elements.
+ *
+ * The guarantee we have with timestamps is that all the events in a
+ * packet are included (inclusive) within the begin/end timestamps of
+ * the packet. Another guarantee we have is that the "timestamp begin",
+ * as well as the event timestamps, are monotonically increasing (never
+ * decrease) when moving forward in a stream (physically). But this
+ * guarantee does not apply to "timestamp end", because it is sampled at
+ * commit time, which is not ordered with respect to space reservation.
+ */
+
+struct packet_header {
+       /* Trace packet header */
+       uint32_t magic;                 /*
+                                        * Trace magic number.
+                                        * contains endianness information.
+                                        */
+       uint8_t uuid[16];
+       uint32_t stream_id;
+       uint64_t stream_instance_id;
+
+       struct {
+               /* Stream packet context */
+               uint64_t timestamp_begin;       /* Cycle count at subbuffer start */
+               uint64_t timestamp_end;         /* Cycle count at subbuffer end */
+               uint64_t content_size;          /* Size of data in subbuffer */
+               uint64_t packet_size;           /* Subbuffer size (include padding) */
+               uint64_t packet_seq_num;        /* Packet sequence number */
+               unsigned long events_discarded; /*
+                                                * Events lost in this subbuffer since
+                                                * the beginning of the trace.
+                                                * (may overflow)
+                                                */
+               uint32_t cpu_id;                /* CPU id associated with stream */
+               uint8_t header_end;             /* End of header */
+       } ctx;
+};
+
+struct lttng_client_ctx {
+       size_t packet_context_len;
+       size_t event_context_len;
+};
+
+static inline notrace u64 lib_ring_buffer_clock_read(struct channel *chan)
+{
+       return trace_clock_read64();
+}
+
+static inline
+size_t ctx_get_aligned_size(size_t offset, struct lttng_ctx *ctx,
+               size_t ctx_len)
+{
+       size_t orig_offset = offset;
+
+       if (likely(!ctx))
+               return 0;
+       offset += lib_ring_buffer_align(offset, ctx->largest_align);
+       offset += ctx_len;
+       return offset - orig_offset;
+}
+
+static inline
+void ctx_get_struct_size(struct lttng_ctx *ctx, size_t *ctx_len,
+               struct lttng_channel *chan, struct lib_ring_buffer_ctx *bufctx)
+{
+       int i;
+       size_t offset = 0;
+
+       if (likely(!ctx)) {
+               *ctx_len = 0;
+               return;
+       }
+       for (i = 0; i < ctx->nr_fields; i++) {
+               if (ctx->fields[i].get_size)
+                       offset += ctx->fields[i].get_size(offset);
+               if (ctx->fields[i].get_size_arg)
+                       offset += ctx->fields[i].get_size_arg(offset,
+                                       &ctx->fields[i], bufctx, chan);
+       }
+       *ctx_len = offset;
+}
+
+static inline
+void ctx_record(struct lib_ring_buffer_ctx *bufctx,
+               struct lttng_channel *chan,
+               struct lttng_ctx *ctx)
+{
+       int i;
+
+       if (likely(!ctx))
+               return;
+       lib_ring_buffer_align_ctx(bufctx, ctx->largest_align);
+       for (i = 0; i < ctx->nr_fields; i++)
+               ctx->fields[i].record(&ctx->fields[i], bufctx, chan);
+}
+
+/*
+ * record_header_size - Calculate the header size and padding necessary.
+ * @config: ring buffer instance configuration
+ * @chan: channel
+ * @offset: offset in the write buffer
+ * @pre_header_padding: padding to add before the header (output)
+ * @ctx: reservation context
+ *
+ * Returns the event header size (including padding).
+ *
+ * The payload must itself determine its own alignment from the biggest type it
+ * contains.
+ */
+static __inline__
+size_t record_header_size(const struct lib_ring_buffer_config *config,
+                                struct channel *chan, size_t offset,
+                                size_t *pre_header_padding,
+                                struct lib_ring_buffer_ctx *ctx,
+                                struct lttng_client_ctx *client_ctx)
+{
+       struct lttng_channel *lttng_chan = channel_get_private(chan);
+       struct lttng_probe_ctx *lttng_probe_ctx = ctx->priv;
+       struct lttng_event *event = lttng_probe_ctx->event;
+       size_t orig_offset = offset;
+       size_t padding;
+
+       switch (lttng_chan->header_type) {
+       case 1: /* compact */
+               padding = lib_ring_buffer_align(offset, lttng_alignof(uint32_t));
+               offset += padding;
+               if (!(ctx->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTTNG_RFLAG_EXTENDED))) {
+                       offset += sizeof(uint32_t);     /* id and timestamp */
+               } else {
+                       /* Minimum space taken by LTTNG_COMPACT_EVENT_BITS id */
+                       offset += (LTTNG_COMPACT_EVENT_BITS + CHAR_BIT - 1) / CHAR_BIT;
+                       /* Align extended struct on largest member */
+                       offset += lib_ring_buffer_align(offset, lttng_alignof(uint64_t));
+                       offset += sizeof(uint32_t);     /* id */
+                       offset += lib_ring_buffer_align(offset, lttng_alignof(uint64_t));
+                       offset += sizeof(uint64_t);     /* timestamp */
+               }
+               break;
+       case 2: /* large */
+               padding = lib_ring_buffer_align(offset, lttng_alignof(uint16_t));
+               offset += padding;
+               offset += sizeof(uint16_t);
+               if (!(ctx->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTTNG_RFLAG_EXTENDED))) {
+                       offset += lib_ring_buffer_align(offset, lttng_alignof(uint32_t));
+                       offset += sizeof(uint32_t);     /* timestamp */
+               } else {
+                       /* Align extended struct on largest member */
+                       offset += lib_ring_buffer_align(offset, lttng_alignof(uint64_t));
+                       offset += sizeof(uint32_t);     /* id */
+                       offset += lib_ring_buffer_align(offset, lttng_alignof(uint64_t));
+                       offset += sizeof(uint64_t);     /* timestamp */
+               }
+               break;
+       default:
+               padding = 0;
+               WARN_ON_ONCE(1);
+       }
+       offset += ctx_get_aligned_size(offset, lttng_chan->ctx,
+                       client_ctx->packet_context_len);
+       offset += ctx_get_aligned_size(offset, event->ctx,
+                       client_ctx->event_context_len);
+
+       *pre_header_padding = padding;
+       return offset - orig_offset;
+}
+
+#include <ringbuffer/api.h>
+
+static
+void lttng_write_event_header_slow(const struct lib_ring_buffer_config *config,
+                                struct lib_ring_buffer_ctx *ctx,
+                                uint32_t event_id);
+
+/*
+ * lttng_write_event_header
+ *
+ * Writes the event header to the offset (already aligned on 32-bits).
+ *
+ * @config: ring buffer instance configuration
+ * @ctx: reservation context
+ * @event_id: event ID
+ */
+static __inline__
+void lttng_write_event_header(const struct lib_ring_buffer_config *config,
+                           struct lib_ring_buffer_ctx *ctx,
+                           uint32_t event_id)
+{
+       struct lttng_channel *lttng_chan = channel_get_private(ctx->chan);
+       struct lttng_probe_ctx *lttng_probe_ctx = ctx->priv;
+       struct lttng_event *event = lttng_probe_ctx->event;
+
+       if (unlikely(ctx->rflags))
+               goto slow_path;
+
+       switch (lttng_chan->header_type) {
+       case 1: /* compact */
+       {
+               uint32_t id_time = 0;
+
+               bt_bitfield_write(&id_time, uint32_t,
+                               0,
+                               LTTNG_COMPACT_EVENT_BITS,
+                               event_id);
+               bt_bitfield_write(&id_time, uint32_t,
+                               LTTNG_COMPACT_EVENT_BITS,
+                               LTTNG_COMPACT_TSC_BITS,
+                               ctx->tsc);
+               lib_ring_buffer_write(config, ctx, &id_time, sizeof(id_time));
+               break;
+       }
+       case 2: /* large */
+       {
+               uint32_t timestamp = (uint32_t) ctx->tsc;
+               uint16_t id = event_id;
+
+               lib_ring_buffer_write(config, ctx, &id, sizeof(id));
+               lib_ring_buffer_align_ctx(ctx, lttng_alignof(uint32_t));
+               lib_ring_buffer_write(config, ctx, &timestamp, sizeof(timestamp));
+               break;
+       }
+       default:
+               WARN_ON_ONCE(1);
+       }
+
+       ctx_record(ctx, lttng_chan, lttng_chan->ctx);
+       ctx_record(ctx, lttng_chan, event->ctx);
+       lib_ring_buffer_align_ctx(ctx, ctx->largest_align);
+
+       return;
+
+slow_path:
+       lttng_write_event_header_slow(config, ctx, event_id);
+}
+
+static
+void lttng_write_event_header_slow(const struct lib_ring_buffer_config *config,
+                                struct lib_ring_buffer_ctx *ctx,
+                                uint32_t event_id)
+{
+       struct lttng_channel *lttng_chan = channel_get_private(ctx->chan);
+       struct lttng_probe_ctx *lttng_probe_ctx = ctx->priv;
+       struct lttng_event *event = lttng_probe_ctx->event;
+
+       switch (lttng_chan->header_type) {
+       case 1: /* compact */
+               if (!(ctx->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTTNG_RFLAG_EXTENDED))) {
+                       uint32_t id_time = 0;
+
+                       bt_bitfield_write(&id_time, uint32_t,
+                                       0,
+                                       LTTNG_COMPACT_EVENT_BITS,
+                                       event_id);
+                       bt_bitfield_write(&id_time, uint32_t,
+                                       LTTNG_COMPACT_EVENT_BITS,
+                                       LTTNG_COMPACT_TSC_BITS, ctx->tsc);
+                       lib_ring_buffer_write(config, ctx, &id_time, sizeof(id_time));
+               } else {
+                       uint8_t id = 0;
+                       uint64_t timestamp = ctx->tsc;
+
+                       bt_bitfield_write(&id, uint8_t,
+                                       0,
+                                       LTTNG_COMPACT_EVENT_BITS,
+                                       31);
+                       lib_ring_buffer_write(config, ctx, &id, sizeof(id));
+                       /* Align extended struct on largest member */
+                       lib_ring_buffer_align_ctx(ctx, lttng_alignof(uint64_t));
+                       lib_ring_buffer_write(config, ctx, &event_id, sizeof(event_id));
+                       lib_ring_buffer_align_ctx(ctx, lttng_alignof(uint64_t));
+                       lib_ring_buffer_write(config, ctx, &timestamp, sizeof(timestamp));
+               }
+               break;
+       case 2: /* large */
+       {
+               if (!(ctx->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTTNG_RFLAG_EXTENDED))) {
+                       uint32_t timestamp = (uint32_t) ctx->tsc;
+                       uint16_t id = event_id;
+
+                       lib_ring_buffer_write(config, ctx, &id, sizeof(id));
+                       lib_ring_buffer_align_ctx(ctx, lttng_alignof(uint32_t));
+                       lib_ring_buffer_write(config, ctx, &timestamp, sizeof(timestamp));
+               } else {
+                       uint16_t id = 65535;
+                       uint64_t timestamp = ctx->tsc;
+
+                       lib_ring_buffer_write(config, ctx, &id, sizeof(id));
+                       /* Align extended struct on largest member */
+                       lib_ring_buffer_align_ctx(ctx, lttng_alignof(uint64_t));
+                       lib_ring_buffer_write(config, ctx, &event_id, sizeof(event_id));
+                       lib_ring_buffer_align_ctx(ctx, lttng_alignof(uint64_t));
+                       lib_ring_buffer_write(config, ctx, &timestamp, sizeof(timestamp));
+               }
+               break;
+       }
+       default:
+               WARN_ON_ONCE(1);
+       }
+       ctx_record(ctx, lttng_chan, lttng_chan->ctx);
+       ctx_record(ctx, lttng_chan, event->ctx);
+       lib_ring_buffer_align_ctx(ctx, ctx->largest_align);
+}
+
+static const struct lib_ring_buffer_config client_config;
+
+static u64 client_ring_buffer_clock_read(struct channel *chan)
+{
+       return lib_ring_buffer_clock_read(chan);
+}
+
+static
+size_t client_record_header_size(const struct lib_ring_buffer_config *config,
+                                struct channel *chan, size_t offset,
+                                size_t *pre_header_padding,
+                                struct lib_ring_buffer_ctx *ctx,
+                                void *client_ctx)
+{
+       return record_header_size(config, chan, offset,
+                                 pre_header_padding, ctx, client_ctx);
+}
+
+/**
+ * client_packet_header_size - called on buffer-switch to a new sub-buffer
+ *
+ * Return header size without padding after the structure. Don't use packed
+ * structure because gcc generates inefficient code on some architectures
+ * (powerpc, mips..)
+ */
+static size_t client_packet_header_size(void)
+{
+       return offsetof(struct packet_header, ctx.header_end);
+}
+
+static void client_buffer_begin(struct lib_ring_buffer *buf, u64 tsc,
+                               unsigned int subbuf_idx)
+{
+       struct channel *chan = buf->backend.chan;
+       struct packet_header *header =
+               (struct packet_header *)
+                       lib_ring_buffer_offset_address(&buf->backend,
+                               subbuf_idx * chan->backend.subbuf_size);
+       struct lttng_channel *lttng_chan = channel_get_private(chan);
+       struct lttng_session *session = lttng_chan->session;
+
+       header->magic = CTF_MAGIC_NUMBER;
+       memcpy(header->uuid, session->uuid.b, sizeof(session->uuid));
+       header->stream_id = lttng_chan->id;
+       header->stream_instance_id = buf->backend.cpu;
+       header->ctx.timestamp_begin = tsc;
+       header->ctx.timestamp_end = 0;
+       header->ctx.content_size = ~0ULL; /* for debugging */
+       header->ctx.packet_size = ~0ULL;
+       header->ctx.packet_seq_num = chan->backend.num_subbuf * \
+                                    buf->backend.buf_cnt[subbuf_idx].seq_cnt + \
+                                    subbuf_idx;
+       header->ctx.events_discarded = 0;
+       header->ctx.cpu_id = buf->backend.cpu;
+}
+
+/*
+ * offset is assumed to never be 0 here : never deliver a completely empty
+ * subbuffer. data_size is between 1 and subbuf_size.
+ */
+static void client_buffer_end(struct lib_ring_buffer *buf, u64 tsc,
+                             unsigned int subbuf_idx, unsigned long data_size)
+{
+       struct channel *chan = buf->backend.chan;
+       struct packet_header *header =
+               (struct packet_header *)
+                       lib_ring_buffer_offset_address(&buf->backend,
+                               subbuf_idx * chan->backend.subbuf_size);
+       unsigned long records_lost = 0;
+
+       header->ctx.timestamp_end = tsc;
+       header->ctx.content_size =
+               (uint64_t) data_size * CHAR_BIT;                /* in bits */
+       header->ctx.packet_size =
+               (uint64_t) PAGE_ALIGN(data_size) * CHAR_BIT;    /* in bits */
+       records_lost += lib_ring_buffer_get_records_lost_full(&client_config, buf);
+       records_lost += lib_ring_buffer_get_records_lost_wrap(&client_config, buf);
+       records_lost += lib_ring_buffer_get_records_lost_big(&client_config, buf);
+       header->ctx.events_discarded = records_lost;
+}
+
+static int client_buffer_create(struct lib_ring_buffer *buf, void *priv,
+                               int cpu, const char *name)
+{
+       return 0;
+}
+
+static void client_buffer_finalize(struct lib_ring_buffer *buf, void *priv, int cpu)
+{
+}
+
+static struct packet_header *client_packet_header(
+               const struct lib_ring_buffer_config *config,
+               struct lib_ring_buffer *buf)
+{
+       return lib_ring_buffer_read_offset_address(&buf->backend, 0);
+}
+
+static int client_timestamp_begin(const struct lib_ring_buffer_config *config,
+               struct lib_ring_buffer *buf,
+               uint64_t *timestamp_begin)
+{
+       struct packet_header *header = client_packet_header(config, buf);
+       *timestamp_begin = header->ctx.timestamp_begin;
+
+       return 0;
+}
+
+static int client_timestamp_end(const struct lib_ring_buffer_config *config,
+                       struct lib_ring_buffer *buf,
+                       uint64_t *timestamp_end)
+{
+       struct packet_header *header = client_packet_header(config, buf);
+       *timestamp_end = header->ctx.timestamp_end;
+
+       return 0;
+}
+
+static int client_events_discarded(const struct lib_ring_buffer_config *config,
+                       struct lib_ring_buffer *buf,
+                       uint64_t *events_discarded)
+{
+       struct packet_header *header = client_packet_header(config, buf);
+       *events_discarded = header->ctx.events_discarded;
+
+       return 0;
+}
+
+static int client_content_size(const struct lib_ring_buffer_config *config,
+                       struct lib_ring_buffer *buf,
+                       uint64_t *content_size)
+{
+       struct packet_header *header = client_packet_header(config, buf);
+       *content_size = header->ctx.content_size;
+
+       return 0;
+}
+
+static int client_packet_size(const struct lib_ring_buffer_config *config,
+                       struct lib_ring_buffer *buf,
+                       uint64_t *packet_size)
+{
+       struct packet_header *header = client_packet_header(config, buf);
+       *packet_size = header->ctx.packet_size;
+
+       return 0;
+}
+
+static int client_stream_id(const struct lib_ring_buffer_config *config,
+                       struct lib_ring_buffer *buf,
+                       uint64_t *stream_id)
+{
+       struct channel *chan = buf->backend.chan;
+       struct lttng_channel *lttng_chan = channel_get_private(chan);
+
+       *stream_id = lttng_chan->id;
+       return 0;
+}
+
+static int client_current_timestamp(const struct lib_ring_buffer_config *config,
+               struct lib_ring_buffer *bufb,
+               uint64_t *ts)
+{
+       *ts = config->cb.ring_buffer_clock_read(bufb->backend.chan);
+
+       return 0;
+}
+
+static int client_sequence_number(const struct lib_ring_buffer_config *config,
+                       struct lib_ring_buffer *buf,
+                       uint64_t *seq)
+{
+       struct packet_header *header = client_packet_header(config, buf);
+
+       *seq = header->ctx.packet_seq_num;
+
+       return 0;
+}
+
+static
+int client_instance_id(const struct lib_ring_buffer_config *config,
+               struct lib_ring_buffer *buf,
+               uint64_t *id)
+{
+       *id = buf->backend.cpu;
+
+       return 0;
+}
+
+static const struct lib_ring_buffer_config client_config = {
+       .cb.ring_buffer_clock_read = client_ring_buffer_clock_read,
+       .cb.record_header_size = client_record_header_size,
+       .cb.subbuffer_header_size = client_packet_header_size,
+       .cb.buffer_begin = client_buffer_begin,
+       .cb.buffer_end = client_buffer_end,
+       .cb.buffer_create = client_buffer_create,
+       .cb.buffer_finalize = client_buffer_finalize,
+
+       .tsc_bits = LTTNG_COMPACT_TSC_BITS,
+       .alloc = RING_BUFFER_ALLOC_PER_CPU,
+       .sync = RING_BUFFER_SYNC_PER_CPU,
+       .mode = RING_BUFFER_MODE_TEMPLATE,
+       .backend = RING_BUFFER_PAGE,
+       .output = RING_BUFFER_OUTPUT_TEMPLATE,
+       .oops = RING_BUFFER_OOPS_CONSISTENCY,
+       .ipi = RING_BUFFER_IPI_BARRIER,
+       .wakeup = RING_BUFFER_WAKEUP_BY_TIMER,
+};
+
+static
+void release_priv_ops(void *priv_ops)
+{
+       module_put(THIS_MODULE);
+}
+
+static
+void lttng_channel_destroy(struct channel *chan)
+{
+       channel_destroy(chan);
+}
+
+static
+struct channel *_channel_create(const char *name,
+                               struct lttng_channel *lttng_chan, void *buf_addr,
+                               size_t subbuf_size, size_t num_subbuf,
+                               unsigned int switch_timer_interval,
+                               unsigned int read_timer_interval)
+{
+       struct channel *chan;
+
+       chan = channel_create(&client_config, name, lttng_chan, buf_addr,
+                             subbuf_size, num_subbuf, switch_timer_interval,
+                             read_timer_interval);
+       if (chan) {
+               /*
+                * Ensure this module is not unloaded before we finish
+                * using lttng_relay_transport.ops.
+                */
+               if (!try_module_get(THIS_MODULE)) {
+                       printk(KERN_WARNING "LTT : Can't lock transport module.\n");
+                       goto error;
+               }
+               chan->backend.priv_ops = &lttng_relay_transport.ops;
+               chan->backend.release_priv_ops = release_priv_ops;
+       }
+       return chan;
+
+error:
+       lttng_channel_destroy(chan);
+       return NULL;
+}
+
+static
+struct lib_ring_buffer *lttng_buffer_read_open(struct channel *chan)
+{
+       struct lib_ring_buffer *buf;
+       int cpu;
+
+       for_each_channel_cpu(cpu, chan) {
+               buf = channel_get_ring_buffer(&client_config, chan, cpu);
+               if (!lib_ring_buffer_open_read(buf))
+                       return buf;
+       }
+       return NULL;
+}
+
+static
+int lttng_buffer_has_read_closed_stream(struct channel *chan)
+{
+       struct lib_ring_buffer *buf;
+       int cpu;
+
+       for_each_channel_cpu(cpu, chan) {
+               buf = channel_get_ring_buffer(&client_config, chan, cpu);
+               if (!atomic_long_read(&buf->active_readers))
+                       return 1;
+       }
+       return 0;
+}
+
+static
+void lttng_buffer_read_close(struct lib_ring_buffer *buf)
+{
+       lib_ring_buffer_release_read(buf);
+}
+
+static
+int lttng_event_reserve(struct lib_ring_buffer_ctx *ctx,
+                     uint32_t event_id)
+{
+       struct lttng_channel *lttng_chan = channel_get_private(ctx->chan);
+       struct lttng_probe_ctx *lttng_probe_ctx = ctx->priv;
+       struct lttng_event *event = lttng_probe_ctx->event;
+       struct lttng_client_ctx client_ctx;
+       int ret, cpu;
+
+       cpu = lib_ring_buffer_get_cpu(&client_config);
+       if (unlikely(cpu < 0))
+               return -EPERM;
+       ctx->cpu = cpu;
+
+       /* Compute internal size of context structures. */
+       ctx_get_struct_size(lttng_chan->ctx, &client_ctx.packet_context_len, lttng_chan, ctx);
+       ctx_get_struct_size(event->ctx, &client_ctx.event_context_len, lttng_chan, ctx);
+
+       switch (lttng_chan->header_type) {
+       case 1: /* compact */
+               if (event_id > 30)
+                       ctx->rflags |= LTTNG_RFLAG_EXTENDED;
+               break;
+       case 2: /* large */
+               if (event_id > 65534)
+                       ctx->rflags |= LTTNG_RFLAG_EXTENDED;
+               break;
+       default:
+               WARN_ON_ONCE(1);
+       }
+
+       ret = lib_ring_buffer_reserve(&client_config, ctx, &client_ctx);
+       if (unlikely(ret))
+               goto put;
+       lib_ring_buffer_backend_get_pages(&client_config, ctx,
+                       &ctx->backend_pages);
+       lttng_write_event_header(&client_config, ctx, event_id);
+       return 0;
+put:
+       lib_ring_buffer_put_cpu(&client_config);
+       return ret;
+}
+
+static
+void lttng_event_commit(struct lib_ring_buffer_ctx *ctx)
+{
+       lib_ring_buffer_commit(&client_config, ctx);
+       lib_ring_buffer_put_cpu(&client_config);
+}
+
+static
+void lttng_event_write(struct lib_ring_buffer_ctx *ctx, const void *src,
+                    size_t len)
+{
+       lib_ring_buffer_write(&client_config, ctx, src, len);
+}
+
+static
+void lttng_event_write_from_user(struct lib_ring_buffer_ctx *ctx,
+                              const void __user *src, size_t len)
+{
+       lib_ring_buffer_copy_from_user_inatomic(&client_config, ctx, src, len);
+}
+
+static
+void lttng_event_memset(struct lib_ring_buffer_ctx *ctx,
+               int c, size_t len)
+{
+       lib_ring_buffer_memset(&client_config, ctx, c, len);
+}
+
+static
+void lttng_event_strcpy(struct lib_ring_buffer_ctx *ctx, const char *src,
+               size_t len)
+{
+       lib_ring_buffer_strcpy(&client_config, ctx, src, len, '#');
+}
+
+static
+void lttng_event_strcpy_from_user(struct lib_ring_buffer_ctx *ctx,
+               const char __user *src, size_t len)
+{
+       lib_ring_buffer_strcpy_from_user_inatomic(&client_config, ctx, src,
+                       len, '#');
+}
+
+static
+wait_queue_head_t *lttng_get_writer_buf_wait_queue(struct channel *chan, int cpu)
+{
+       struct lib_ring_buffer *buf = channel_get_ring_buffer(&client_config,
+                                       chan, cpu);
+       return &buf->write_wait;
+}
+
+static
+wait_queue_head_t *lttng_get_hp_wait_queue(struct channel *chan)
+{
+       return &chan->hp_wait;
+}
+
+static
+int lttng_is_finalized(struct channel *chan)
+{
+       return lib_ring_buffer_channel_is_finalized(chan);
+}
+
+static
+int lttng_is_disabled(struct channel *chan)
+{
+       return lib_ring_buffer_channel_is_disabled(chan);
+}
+
+static struct lttng_transport lttng_relay_transport = {
+       .name = "relay-" RING_BUFFER_MODE_TEMPLATE_STRING,
+       .owner = THIS_MODULE,
+       .ops = {
+               .channel_create = _channel_create,
+               .channel_destroy = lttng_channel_destroy,
+               .buffer_read_open = lttng_buffer_read_open,
+               .buffer_has_read_closed_stream =
+                       lttng_buffer_has_read_closed_stream,
+               .buffer_read_close = lttng_buffer_read_close,
+               .event_reserve = lttng_event_reserve,
+               .event_commit = lttng_event_commit,
+               .event_write = lttng_event_write,
+               .event_write_from_user = lttng_event_write_from_user,
+               .event_memset = lttng_event_memset,
+               .event_strcpy = lttng_event_strcpy,
+               .event_strcpy_from_user = lttng_event_strcpy_from_user,
+               .packet_avail_size = NULL,      /* Would be racy anyway */
+               .get_writer_buf_wait_queue = lttng_get_writer_buf_wait_queue,
+               .get_hp_wait_queue = lttng_get_hp_wait_queue,
+               .is_finalized = lttng_is_finalized,
+               .is_disabled = lttng_is_disabled,
+               .timestamp_begin = client_timestamp_begin,
+               .timestamp_end = client_timestamp_end,
+               .events_discarded = client_events_discarded,
+               .content_size = client_content_size,
+               .packet_size = client_packet_size,
+               .stream_id = client_stream_id,
+               .current_timestamp = client_current_timestamp,
+               .sequence_number = client_sequence_number,
+               .instance_id = client_instance_id,
+       },
+};
+
+static int __init lttng_ring_buffer_client_init(void)
+{
+       /*
+        * This vmalloc sync all also takes care of the lib ring buffer
+        * vmalloc'd module pages when it is built as a module into LTTng.
+        */
+       wrapper_vmalloc_sync_mappings();
+       lttng_transport_register(&lttng_relay_transport);
+       return 0;
+}
+
+module_init(lttng_ring_buffer_client_init);
+
+static void __exit lttng_ring_buffer_client_exit(void)
+{
+       lttng_transport_unregister(&lttng_relay_transport);
+}
+
+module_exit(lttng_ring_buffer_client_exit);
+
+MODULE_LICENSE("GPL and additional rights");
+MODULE_AUTHOR("Mathieu Desnoyers <mathieu.desnoyers@efficios.com>");
+MODULE_DESCRIPTION("LTTng ring buffer " RING_BUFFER_MODE_TEMPLATE_STRING
+                  " client");
+MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
+       __stringify(LTTNG_MODULES_MINOR_VERSION) "."
+       __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
+       LTTNG_MODULES_EXTRAVERSION);
diff --git a/src/lttng-ring-buffer-metadata-client.c b/src/lttng-ring-buffer-metadata-client.c
new file mode 100644 (file)
index 0000000..2d52492
--- /dev/null
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * lttng-ring-buffer-metadata-client.c
+ *
+ * LTTng lib ring buffer metadta client.
+ *
+ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#include <linux/module.h>
+#include <lttng/tracer.h>
+
+#define RING_BUFFER_MODE_TEMPLATE              RING_BUFFER_DISCARD
+#define RING_BUFFER_MODE_TEMPLATE_STRING       "metadata"
+#define RING_BUFFER_OUTPUT_TEMPLATE            RING_BUFFER_SPLICE
+#include "lttng-ring-buffer-metadata-client.h"
diff --git a/src/lttng-ring-buffer-metadata-client.h b/src/lttng-ring-buffer-metadata-client.h
new file mode 100644 (file)
index 0000000..0f68b38
--- /dev/null
@@ -0,0 +1,451 @@
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * lttng-ring-buffer-client.h
+ *
+ * LTTng lib ring buffer client template.
+ *
+ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <wrapper/vmalloc.h>   /* for wrapper_vmalloc_sync_mappings() */
+#include <lttng/events.h>
+#include <lttng/tracer.h>
+
+static struct lttng_transport lttng_relay_transport;
+
+struct metadata_packet_header {
+       uint32_t magic;                 /* 0x75D11D57 */
+       uint8_t  uuid[16];              /* Unique Universal Identifier */
+       uint32_t checksum;              /* 0 if unused */
+       uint32_t content_size;          /* in bits */
+       uint32_t packet_size;           /* in bits */
+       uint8_t  compression_scheme;    /* 0 if unused */
+       uint8_t  encryption_scheme;     /* 0 if unused */
+       uint8_t  checksum_scheme;       /* 0 if unused */
+       uint8_t  major;                 /* CTF spec major version number */
+       uint8_t  minor;                 /* CTF spec minor version number */
+       uint8_t  header_end[0];
+};
+
+struct metadata_record_header {
+       uint8_t header_end[0];          /* End of header */
+};
+
+static const struct lib_ring_buffer_config client_config;
+
+static inline
+u64 lib_ring_buffer_clock_read(struct channel *chan)
+{
+       return 0;
+}
+
+static inline
+size_t record_header_size(const struct lib_ring_buffer_config *config,
+                                struct channel *chan, size_t offset,
+                                size_t *pre_header_padding,
+                                struct lib_ring_buffer_ctx *ctx,
+                                void *client_ctx)
+{
+       return 0;
+}
+
+#include <ringbuffer/api.h>
+
+static u64 client_ring_buffer_clock_read(struct channel *chan)
+{
+       return 0;
+}
+
+static
+size_t client_record_header_size(const struct lib_ring_buffer_config *config,
+                                struct channel *chan, size_t offset,
+                                size_t *pre_header_padding,
+                                struct lib_ring_buffer_ctx *ctx,
+                                void *client_ctx)
+{
+       return 0;
+}
+
+/**
+ * client_packet_header_size - called on buffer-switch to a new sub-buffer
+ *
+ * Return header size without padding after the structure. Don't use packed
+ * structure because gcc generates inefficient code on some architectures
+ * (powerpc, mips..)
+ */
+static size_t client_packet_header_size(void)
+{
+       return offsetof(struct metadata_packet_header, header_end);
+}
+
+static void client_buffer_begin(struct lib_ring_buffer *buf, u64 tsc,
+                               unsigned int subbuf_idx)
+{
+       struct channel *chan = buf->backend.chan;
+       struct metadata_packet_header *header =
+               (struct metadata_packet_header *)
+                       lib_ring_buffer_offset_address(&buf->backend,
+                               subbuf_idx * chan->backend.subbuf_size);
+       struct lttng_metadata_cache *metadata_cache =
+               channel_get_private(chan);
+
+       header->magic = TSDL_MAGIC_NUMBER;
+       memcpy(header->uuid, metadata_cache->uuid.b,
+               sizeof(metadata_cache->uuid));
+       header->checksum = 0;           /* 0 if unused */
+       header->content_size = 0xFFFFFFFF; /* in bits, for debugging */
+       header->packet_size = 0xFFFFFFFF;  /* in bits, for debugging */
+       header->compression_scheme = 0; /* 0 if unused */
+       header->encryption_scheme = 0;  /* 0 if unused */
+       header->checksum_scheme = 0;    /* 0 if unused */
+       header->major = CTF_SPEC_MAJOR;
+       header->minor = CTF_SPEC_MINOR;
+}
+
+/*
+ * offset is assumed to never be 0 here : never deliver a completely empty
+ * subbuffer. data_size is between 1 and subbuf_size.
+ */
+static void client_buffer_end(struct lib_ring_buffer *buf, u64 tsc,
+                             unsigned int subbuf_idx, unsigned long data_size)
+{
+       struct channel *chan = buf->backend.chan;
+       struct metadata_packet_header *header =
+               (struct metadata_packet_header *)
+                       lib_ring_buffer_offset_address(&buf->backend,
+                               subbuf_idx * chan->backend.subbuf_size);
+       unsigned long records_lost = 0;
+
+       header->content_size = data_size * CHAR_BIT;            /* in bits */
+       header->packet_size = PAGE_ALIGN(data_size) * CHAR_BIT; /* in bits */
+       /*
+        * We do not care about the records lost count, because the metadata
+        * channel waits and retry.
+        */
+       (void) lib_ring_buffer_get_records_lost_full(&client_config, buf);
+       records_lost += lib_ring_buffer_get_records_lost_wrap(&client_config, buf);
+       records_lost += lib_ring_buffer_get_records_lost_big(&client_config, buf);
+       WARN_ON_ONCE(records_lost != 0);
+}
+
+static int client_buffer_create(struct lib_ring_buffer *buf, void *priv,
+                               int cpu, const char *name)
+{
+       return 0;
+}
+
+static void client_buffer_finalize(struct lib_ring_buffer *buf, void *priv, int cpu)
+{
+}
+
+static int client_timestamp_begin(const struct lib_ring_buffer_config *config,
+               struct lib_ring_buffer *buf, uint64_t *timestamp_begin)
+{
+       return -ENOSYS;
+}
+
+static int client_timestamp_end(const struct lib_ring_buffer_config *config,
+                       struct lib_ring_buffer *bufb,
+                       uint64_t *timestamp_end)
+{
+       return -ENOSYS;
+}
+
+static int client_events_discarded(const struct lib_ring_buffer_config *config,
+                       struct lib_ring_buffer *bufb,
+                       uint64_t *events_discarded)
+{
+       return -ENOSYS;
+}
+
+static int client_current_timestamp(const struct lib_ring_buffer_config *config,
+               struct lib_ring_buffer *bufb,
+               uint64_t *ts)
+{
+       return -ENOSYS;
+}
+
+static int client_content_size(const struct lib_ring_buffer_config *config,
+                       struct lib_ring_buffer *bufb,
+                       uint64_t *content_size)
+{
+       return -ENOSYS;
+}
+
+static int client_packet_size(const struct lib_ring_buffer_config *config,
+                       struct lib_ring_buffer *bufb,
+                       uint64_t *packet_size)
+{
+       return -ENOSYS;
+}
+
+static int client_stream_id(const struct lib_ring_buffer_config *config,
+                       struct lib_ring_buffer *bufb,
+                       uint64_t *stream_id)
+{
+       return -ENOSYS;
+}
+
+static int client_sequence_number(const struct lib_ring_buffer_config *config,
+                       struct lib_ring_buffer *bufb,
+                       uint64_t *seq)
+{
+       return -ENOSYS;
+}
+
+static
+int client_instance_id(const struct lib_ring_buffer_config *config,
+               struct lib_ring_buffer *bufb,
+               uint64_t *id)
+{
+       return -ENOSYS;
+}
+
+static const struct lib_ring_buffer_config client_config = {
+       .cb.ring_buffer_clock_read = client_ring_buffer_clock_read,
+       .cb.record_header_size = client_record_header_size,
+       .cb.subbuffer_header_size = client_packet_header_size,
+       .cb.buffer_begin = client_buffer_begin,
+       .cb.buffer_end = client_buffer_end,
+       .cb.buffer_create = client_buffer_create,
+       .cb.buffer_finalize = client_buffer_finalize,
+
+       .tsc_bits = 0,
+       .alloc = RING_BUFFER_ALLOC_GLOBAL,
+       .sync = RING_BUFFER_SYNC_GLOBAL,
+       .mode = RING_BUFFER_MODE_TEMPLATE,
+       .backend = RING_BUFFER_PAGE,
+       .output = RING_BUFFER_OUTPUT_TEMPLATE,
+       .oops = RING_BUFFER_OOPS_CONSISTENCY,
+       .ipi = RING_BUFFER_IPI_BARRIER,
+       .wakeup = RING_BUFFER_WAKEUP_BY_TIMER,
+};
+
+static
+void release_priv_ops(void *priv_ops)
+{
+       module_put(THIS_MODULE);
+}
+
+static
+void lttng_channel_destroy(struct channel *chan)
+{
+       channel_destroy(chan);
+}
+
+static
+struct channel *_channel_create(const char *name,
+                               struct lttng_channel *lttng_chan, void *buf_addr,
+                               size_t subbuf_size, size_t num_subbuf,
+                               unsigned int switch_timer_interval,
+                               unsigned int read_timer_interval)
+{
+       struct channel *chan;
+
+       chan = channel_create(&client_config, name,
+                             lttng_chan->session->metadata_cache, buf_addr,
+                             subbuf_size, num_subbuf, switch_timer_interval,
+                             read_timer_interval);
+       if (chan) {
+               /*
+                * Ensure this module is not unloaded before we finish
+                * using lttng_relay_transport.ops.
+                */
+               if (!try_module_get(THIS_MODULE)) {
+                       printk(KERN_WARNING "LTT : Can't lock transport module.\n");
+                       goto error;
+               }
+               chan->backend.priv_ops = &lttng_relay_transport.ops;
+               chan->backend.release_priv_ops = release_priv_ops;
+       }
+       return chan;
+
+error:
+       lttng_channel_destroy(chan);
+       return NULL;
+}
+
+static
+struct lib_ring_buffer *lttng_buffer_read_open(struct channel *chan)
+{
+       struct lib_ring_buffer *buf;
+
+       buf = channel_get_ring_buffer(&client_config, chan, 0);
+       if (!lib_ring_buffer_open_read(buf))
+               return buf;
+       return NULL;
+}
+
+static
+int lttng_buffer_has_read_closed_stream(struct channel *chan)
+{
+       struct lib_ring_buffer *buf;
+       int cpu;
+
+       for_each_channel_cpu(cpu, chan) {
+               buf = channel_get_ring_buffer(&client_config, chan, cpu);
+               if (!atomic_long_read(&buf->active_readers))
+                       return 1;
+       }
+       return 0;
+}
+
+static
+void lttng_buffer_read_close(struct lib_ring_buffer *buf)
+{
+       lib_ring_buffer_release_read(buf);
+}
+
+static
+int lttng_event_reserve(struct lib_ring_buffer_ctx *ctx, uint32_t event_id)
+{
+       int ret;
+
+       ret = lib_ring_buffer_reserve(&client_config, ctx, NULL);
+       if (ret)
+               return ret;
+       lib_ring_buffer_backend_get_pages(&client_config, ctx,
+                       &ctx->backend_pages);
+       return 0;
+
+}
+
+static
+void lttng_event_commit(struct lib_ring_buffer_ctx *ctx)
+{
+       lib_ring_buffer_commit(&client_config, ctx);
+}
+
+static
+void lttng_event_write(struct lib_ring_buffer_ctx *ctx, const void *src,
+                    size_t len)
+{
+       lib_ring_buffer_write(&client_config, ctx, src, len);
+}
+
+static
+void lttng_event_write_from_user(struct lib_ring_buffer_ctx *ctx,
+                              const void __user *src, size_t len)
+{
+       lib_ring_buffer_copy_from_user_inatomic(&client_config, ctx, src, len);
+}
+
+static
+void lttng_event_memset(struct lib_ring_buffer_ctx *ctx,
+               int c, size_t len)
+{
+       lib_ring_buffer_memset(&client_config, ctx, c, len);
+}
+
+static
+void lttng_event_strcpy(struct lib_ring_buffer_ctx *ctx, const char *src,
+               size_t len)
+{
+       lib_ring_buffer_strcpy(&client_config, ctx, src, len, '#');
+}
+
+static
+size_t lttng_packet_avail_size(struct channel *chan)
+{
+       unsigned long o_begin;
+       struct lib_ring_buffer *buf;
+
+       buf = chan->backend.buf;        /* Only for global buffer ! */
+       o_begin = v_read(&client_config, &buf->offset);
+       if (subbuf_offset(o_begin, chan) != 0) {
+               return chan->backend.subbuf_size - subbuf_offset(o_begin, chan);
+       } else {
+               return chan->backend.subbuf_size - subbuf_offset(o_begin, chan)
+                       - sizeof(struct metadata_packet_header);
+       }
+}
+
+static
+wait_queue_head_t *lttng_get_writer_buf_wait_queue(struct channel *chan, int cpu)
+{
+       struct lib_ring_buffer *buf = channel_get_ring_buffer(&client_config,
+                                       chan, cpu);
+       return &buf->write_wait;
+}
+
+static
+wait_queue_head_t *lttng_get_hp_wait_queue(struct channel *chan)
+{
+       return &chan->hp_wait;
+}
+
+static
+int lttng_is_finalized(struct channel *chan)
+{
+       return lib_ring_buffer_channel_is_finalized(chan);
+}
+
+static
+int lttng_is_disabled(struct channel *chan)
+{
+       return lib_ring_buffer_channel_is_disabled(chan);
+}
+
+static struct lttng_transport lttng_relay_transport = {
+       .name = "relay-" RING_BUFFER_MODE_TEMPLATE_STRING,
+       .owner = THIS_MODULE,
+       .ops = {
+               .channel_create = _channel_create,
+               .channel_destroy = lttng_channel_destroy,
+               .buffer_read_open = lttng_buffer_read_open,
+               .buffer_has_read_closed_stream =
+                       lttng_buffer_has_read_closed_stream,
+               .buffer_read_close = lttng_buffer_read_close,
+               .event_reserve = lttng_event_reserve,
+               .event_commit = lttng_event_commit,
+               .event_write_from_user = lttng_event_write_from_user,
+               .event_memset = lttng_event_memset,
+               .event_write = lttng_event_write,
+               .event_strcpy = lttng_event_strcpy,
+               .packet_avail_size = lttng_packet_avail_size,
+               .get_writer_buf_wait_queue = lttng_get_writer_buf_wait_queue,
+               .get_hp_wait_queue = lttng_get_hp_wait_queue,
+               .is_finalized = lttng_is_finalized,
+               .is_disabled = lttng_is_disabled,
+               .timestamp_begin = client_timestamp_begin,
+               .timestamp_end = client_timestamp_end,
+               .events_discarded = client_events_discarded,
+               .content_size = client_content_size,
+               .packet_size = client_packet_size,
+               .stream_id = client_stream_id,
+               .current_timestamp = client_current_timestamp,
+               .sequence_number = client_sequence_number,
+               .instance_id = client_instance_id,
+       },
+};
+
+static int __init lttng_ring_buffer_client_init(void)
+{
+       /*
+        * This vmalloc sync all also takes care of the lib ring buffer
+        * vmalloc'd module pages when it is built as a module into LTTng.
+        */
+       wrapper_vmalloc_sync_mappings();
+       lttng_transport_register(&lttng_relay_transport);
+       return 0;
+}
+
+module_init(lttng_ring_buffer_client_init);
+
+static void __exit lttng_ring_buffer_client_exit(void)
+{
+       lttng_transport_unregister(&lttng_relay_transport);
+}
+
+module_exit(lttng_ring_buffer_client_exit);
+
+MODULE_LICENSE("GPL and additional rights");
+MODULE_AUTHOR("Mathieu Desnoyers <mathieu.desnoyers@efficios.com>");
+MODULE_DESCRIPTION("LTTng ring buffer " RING_BUFFER_MODE_TEMPLATE_STRING
+                  " client");
+MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
+       __stringify(LTTNG_MODULES_MINOR_VERSION) "."
+       __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
+       LTTNG_MODULES_EXTRAVERSION);
diff --git a/src/lttng-ring-buffer-metadata-mmap-client.c b/src/lttng-ring-buffer-metadata-mmap-client.c
new file mode 100644 (file)
index 0000000..15975b4
--- /dev/null
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * lttng-ring-buffer-metadata-client.c
+ *
+ * LTTng lib ring buffer metadta client.
+ *
+ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#include <linux/module.h>
+#include <lttng/tracer.h>
+
+#define RING_BUFFER_MODE_TEMPLATE              RING_BUFFER_DISCARD
+#define RING_BUFFER_MODE_TEMPLATE_STRING       "metadata-mmap"
+#define RING_BUFFER_OUTPUT_TEMPLATE            RING_BUFFER_MMAP
+#include "lttng-ring-buffer-metadata-client.h"
diff --git a/src/lttng-statedump-impl.c b/src/lttng-statedump-impl.c
new file mode 100644 (file)
index 0000000..1a2a12b
--- /dev/null
@@ -0,0 +1,647 @@
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * lttng-statedump.c
+ *
+ * Linux Trace Toolkit Next Generation Kernel State Dump
+ *
+ * Copyright 2005 Jean-Hugues Deschenes <jean-hugues.deschenes@polymtl.ca>
+ * Copyright 2006-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * Changes:
+ *     Eric Clement:                   Add listing of network IP interface
+ *     2006, 2007 Mathieu Desnoyers    Fix kernel threads
+ *                                     Various updates
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/netlink.h>
+#include <linux/inet.h>
+#include <linux/ip.h>
+#include <linux/kthread.h>
+#include <linux/proc_fs.h>
+#include <linux/file.h>
+#include <linux/interrupt.h>
+#include <linux/irqnr.h>
+#include <linux/cpu.h>
+#include <linux/netdevice.h>
+#include <linux/inetdevice.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/swap.h>
+#include <linux/wait.h>
+#include <linux/mutex.h>
+#include <linux/device.h>
+
+#include <lttng/events.h>
+#include <lttng/tracer.h>
+#include <wrapper/irqdesc.h>
+#include <wrapper/fdtable.h>
+#include <wrapper/namespace.h>
+#include <wrapper/irq.h>
+#include <wrapper/tracepoint.h>
+#include <wrapper/genhd.h>
+#include <wrapper/file.h>
+#include <wrapper/fdtable.h>
+
+#ifdef CONFIG_LTTNG_HAS_LIST_IRQ
+#include <linux/irq.h>
+#endif
+
+/* Define the tracepoints, but do not build the probes */
+#define CREATE_TRACE_POINTS
+#define TRACE_INCLUDE_PATH instrumentation/events
+#define TRACE_INCLUDE_FILE lttng-statedump
+#define LTTNG_INSTRUMENTATION
+#include <instrumentation/events/lttng-statedump.h>
+
+DEFINE_TRACE(lttng_statedump_block_device);
+DEFINE_TRACE(lttng_statedump_end);
+DEFINE_TRACE(lttng_statedump_interrupt);
+DEFINE_TRACE(lttng_statedump_file_descriptor);
+DEFINE_TRACE(lttng_statedump_start);
+DEFINE_TRACE(lttng_statedump_process_state);
+DEFINE_TRACE(lttng_statedump_process_pid_ns);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,6,0))
+DEFINE_TRACE(lttng_statedump_process_cgroup_ns);
+#endif
+DEFINE_TRACE(lttng_statedump_process_ipc_ns);
+#ifndef LTTNG_MNT_NS_MISSING_HEADER
+DEFINE_TRACE(lttng_statedump_process_mnt_ns);
+#endif
+DEFINE_TRACE(lttng_statedump_process_net_ns);
+DEFINE_TRACE(lttng_statedump_process_user_ns);
+DEFINE_TRACE(lttng_statedump_process_uts_ns);
+DEFINE_TRACE(lttng_statedump_network_interface);
+#ifdef LTTNG_HAVE_STATEDUMP_CPU_TOPOLOGY
+DEFINE_TRACE(lttng_statedump_cpu_topology);
+#endif
+
+struct lttng_fd_ctx {
+       char *page;
+       struct lttng_session *session;
+       struct files_struct *files;
+};
+
+/*
+ * Protected by the trace lock.
+ */
+static struct delayed_work cpu_work[NR_CPUS];
+static DECLARE_WAIT_QUEUE_HEAD(statedump_wq);
+static atomic_t kernel_threads_to_run;
+
+enum lttng_thread_type {
+       LTTNG_USER_THREAD = 0,
+       LTTNG_KERNEL_THREAD = 1,
+};
+
+enum lttng_execution_mode {
+       LTTNG_USER_MODE = 0,
+       LTTNG_SYSCALL = 1,
+       LTTNG_TRAP = 2,
+       LTTNG_IRQ = 3,
+       LTTNG_SOFTIRQ = 4,
+       LTTNG_MODE_UNKNOWN = 5,
+};
+
+enum lttng_execution_submode {
+       LTTNG_NONE = 0,
+       LTTNG_UNKNOWN = 1,
+};
+
+enum lttng_process_status {
+       LTTNG_UNNAMED = 0,
+       LTTNG_WAIT_FORK = 1,
+       LTTNG_WAIT_CPU = 2,
+       LTTNG_EXIT = 3,
+       LTTNG_ZOMBIE = 4,
+       LTTNG_WAIT = 5,
+       LTTNG_RUN = 6,
+       LTTNG_DEAD = 7,
+};
+
+static
+int lttng_enumerate_block_devices(struct lttng_session *session)
+{
+       struct class *ptr_block_class;
+       struct device_type *ptr_disk_type;
+       struct class_dev_iter iter;
+       struct device *dev;
+
+       ptr_block_class = wrapper_get_block_class();
+       if (!ptr_block_class)
+               return -ENOSYS;
+       ptr_disk_type = wrapper_get_disk_type();
+       if (!ptr_disk_type) {
+               return -ENOSYS;
+       }
+       class_dev_iter_init(&iter, ptr_block_class, NULL, ptr_disk_type);
+       while ((dev = class_dev_iter_next(&iter))) {
+               struct disk_part_iter piter;
+               struct gendisk *disk = dev_to_disk(dev);
+               struct hd_struct *part;
+
+               /*
+                * Don't show empty devices or things that have been
+                * suppressed
+                */
+               if (get_capacity(disk) == 0 ||
+                   (disk->flags & GENHD_FL_SUPPRESS_PARTITION_INFO))
+                       continue;
+
+               disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0);
+               while ((part = disk_part_iter_next(&piter))) {
+                       char name_buf[BDEVNAME_SIZE];
+                       char *p;
+
+                       p = wrapper_disk_name(disk, part->partno, name_buf);
+                       if (!p) {
+                               disk_part_iter_exit(&piter);
+                               class_dev_iter_exit(&iter);
+                               return -ENOSYS;
+                       }
+                       trace_lttng_statedump_block_device(session,
+                                       part_devt(part), name_buf);
+               }
+               disk_part_iter_exit(&piter);
+       }
+       class_dev_iter_exit(&iter);
+       return 0;
+}
+
+#ifdef CONFIG_INET
+
+static
+void lttng_enumerate_device(struct lttng_session *session,
+               struct net_device *dev)
+{
+       struct in_device *in_dev;
+       struct in_ifaddr *ifa;
+
+       if (dev->flags & IFF_UP) {
+               in_dev = in_dev_get(dev);
+               if (in_dev) {
+                       for (ifa = in_dev->ifa_list; ifa != NULL;
+                            ifa = ifa->ifa_next) {
+                               trace_lttng_statedump_network_interface(
+                                       session, dev, ifa);
+                       }
+                       in_dev_put(in_dev);
+               }
+       } else {
+               trace_lttng_statedump_network_interface(
+                       session, dev, NULL);
+       }
+}
+
+static
+int lttng_enumerate_network_ip_interface(struct lttng_session *session)
+{
+       struct net_device *dev;
+
+       read_lock(&dev_base_lock);
+       for_each_netdev(&init_net, dev)
+               lttng_enumerate_device(session, dev);
+       read_unlock(&dev_base_lock);
+
+       return 0;
+}
+#else /* CONFIG_INET */
+static inline
+int lttng_enumerate_network_ip_interface(struct lttng_session *session)
+{
+       return 0;
+}
+#endif /* CONFIG_INET */
+
+static
+int lttng_dump_one_fd(const void *p, struct file *file, unsigned int fd)
+{
+       const struct lttng_fd_ctx *ctx = p;
+       const char *s = d_path(&file->f_path, ctx->page, PAGE_SIZE);
+       unsigned int flags = file->f_flags;
+       struct fdtable *fdt;
+
+       /*
+        * We don't expose kernel internal flags, only userspace-visible
+        * flags.
+        */
+       flags &= ~FMODE_NONOTIFY;
+       fdt = files_fdtable(ctx->files);
+       /*
+        * We need to check here again whether fd is within the fdt
+        * max_fds range, because we might be seeing a different
+        * files_fdtable() than iterate_fd(), assuming only RCU is
+        * protecting the read. In reality, iterate_fd() holds
+        * file_lock, which should ensure the fdt does not change while
+        * the lock is taken, but we are not aware whether this is
+        * guaranteed or not, so play safe.
+        */
+       if (fd < fdt->max_fds && lttng_close_on_exec(fd, fdt))
+               flags |= O_CLOEXEC;
+       if (IS_ERR(s)) {
+               struct dentry *dentry = file->f_path.dentry;
+
+               /* Make sure we give at least some info */
+               spin_lock(&dentry->d_lock);
+               trace_lttng_statedump_file_descriptor(ctx->session,
+                       ctx->files, fd, dentry->d_name.name, flags,
+                       file->f_mode);
+               spin_unlock(&dentry->d_lock);
+               goto end;
+       }
+       trace_lttng_statedump_file_descriptor(ctx->session,
+               ctx->files, fd, s, flags, file->f_mode);
+end:
+       return 0;
+}
+
+/* Called with task lock held. */
+static
+void lttng_enumerate_files(struct lttng_session *session,
+               struct files_struct *files,
+               char *tmp)
+{
+       struct lttng_fd_ctx ctx = { .page = tmp, .session = session, .files = files, };
+
+       lttng_iterate_fd(files, 0, lttng_dump_one_fd, &ctx);
+}
+
+#ifdef LTTNG_HAVE_STATEDUMP_CPU_TOPOLOGY
+static
+int lttng_enumerate_cpu_topology(struct lttng_session *session)
+{
+       int cpu;
+       const cpumask_t *cpumask = cpu_possible_mask;
+
+       for (cpu = cpumask_first(cpumask); cpu < nr_cpu_ids;
+                       cpu = cpumask_next(cpu, cpumask)) {
+               trace_lttng_statedump_cpu_topology(session, &cpu_data(cpu));
+       }
+
+       return 0;
+}
+#else
+static
+int lttng_enumerate_cpu_topology(struct lttng_session *session)
+{
+       return 0;
+}
+#endif
+
+#if 0
+/*
+ * FIXME: we cannot take a mmap_sem while in a RCU read-side critical section
+ * (scheduling in atomic). Normally, the tasklist lock protects this kind of
+ * iteration, but it is not exported to modules.
+ */
+static
+void lttng_enumerate_task_vm_maps(struct lttng_session *session,
+               struct task_struct *p)
+{
+       struct mm_struct *mm;
+       struct vm_area_struct *map;
+       unsigned long ino;
+
+       /* get_task_mm does a task_lock... */
+       mm = get_task_mm(p);
+       if (!mm)
+               return;
+
+       map = mm->mmap;
+       if (map) {
+               down_read(&mm->mmap_sem);
+               while (map) {
+                       if (map->vm_file)
+                               ino = map->vm_file->lttng_f_dentry->d_inode->i_ino;
+                       else
+                               ino = 0;
+                       trace_lttng_statedump_vm_map(session, p, map, ino);
+                       map = map->vm_next;
+               }
+               up_read(&mm->mmap_sem);
+       }
+       mmput(mm);
+}
+
+static
+int lttng_enumerate_vm_maps(struct lttng_session *session)
+{
+       struct task_struct *p;
+
+       rcu_read_lock();
+       for_each_process(p)
+               lttng_enumerate_task_vm_maps(session, p);
+       rcu_read_unlock();
+       return 0;
+}
+#endif
+
+#ifdef CONFIG_LTTNG_HAS_LIST_IRQ
+
+static
+int lttng_list_interrupts(struct lttng_session *session)
+{
+       unsigned int irq;
+       unsigned long flags = 0;
+       struct irq_desc *desc;
+
+#define irq_to_desc    wrapper_irq_to_desc
+       /* needs irq_desc */
+       for_each_irq_desc(irq, desc) {
+               struct irqaction *action;
+               const char *irq_chip_name =
+                       irq_desc_get_chip(desc)->name ? : "unnamed_irq_chip";
+
+               local_irq_save(flags);
+               raw_spin_lock(&desc->lock);
+               for (action = desc->action; action; action = action->next) {
+                       trace_lttng_statedump_interrupt(session,
+                               irq, irq_chip_name, action);
+               }
+               raw_spin_unlock(&desc->lock);
+               local_irq_restore(flags);
+       }
+       return 0;
+#undef irq_to_desc
+}
+#else
+static inline
+int lttng_list_interrupts(struct lttng_session *session)
+{
+       return 0;
+}
+#endif
+
+/*
+ * Statedump the task's namespaces using the proc filesystem inode number as
+ * the unique identifier. The user and pid ns are nested and will be dumped
+ * recursively.
+ *
+ * Called with task lock held.
+ */
+static
+void lttng_statedump_process_ns(struct lttng_session *session,
+               struct task_struct *p,
+               enum lttng_thread_type type,
+               enum lttng_execution_mode mode,
+               enum lttng_execution_submode submode,
+               enum lttng_process_status status)
+{
+       struct nsproxy *proxy;
+       struct pid_namespace *pid_ns;
+       struct user_namespace *user_ns;
+
+       /*
+        * The pid and user namespaces are special, they are nested and
+        * accessed with specific functions instead of the nsproxy struct
+        * like the other namespaces.
+        */
+       pid_ns = task_active_pid_ns(p);
+       do {
+               trace_lttng_statedump_process_pid_ns(session, p, pid_ns);
+               pid_ns = pid_ns ? pid_ns->parent : NULL;
+       } while (pid_ns);
+
+
+       user_ns = task_cred_xxx(p, user_ns);
+       do {
+               trace_lttng_statedump_process_user_ns(session, p, user_ns);
+               /*
+                * trace_lttng_statedump_process_user_ns() internally
+                * checks whether user_ns is NULL. While this does not
+                * appear to be a possible return value for
+                * task_cred_xxx(), err on the safe side and check
+                * for NULL here as well to be consistent with the
+                * paranoid behavior of
+                * trace_lttng_statedump_process_user_ns().
+                */
+               user_ns = user_ns ? user_ns->lttng_user_ns_parent : NULL;
+       } while (user_ns);
+
+       /*
+        * Back and forth on locking strategy within Linux upstream for nsproxy.
+        * See Linux upstream commit 728dba3a39c66b3d8ac889ddbe38b5b1c264aec3
+        * "namespaces: Use task_lock and not rcu to protect nsproxy"
+        * for details.
+        */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,17,0) || \
+               LTTNG_UBUNTU_KERNEL_RANGE(3,13,11,36, 3,14,0,0) || \
+               LTTNG_UBUNTU_KERNEL_RANGE(3,16,1,11, 3,17,0,0) || \
+               LTTNG_RHEL_KERNEL_RANGE(3,10,0,229,13,0, 3,11,0,0,0,0))
+       proxy = p->nsproxy;
+#else
+       rcu_read_lock();
+       proxy = task_nsproxy(p);
+#endif
+       if (proxy) {
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,6,0))
+               trace_lttng_statedump_process_cgroup_ns(session, p, proxy->cgroup_ns);
+#endif
+               trace_lttng_statedump_process_ipc_ns(session, p, proxy->ipc_ns);
+#ifndef LTTNG_MNT_NS_MISSING_HEADER
+               trace_lttng_statedump_process_mnt_ns(session, p, proxy->mnt_ns);
+#endif
+               trace_lttng_statedump_process_net_ns(session, p, proxy->net_ns);
+               trace_lttng_statedump_process_uts_ns(session, p, proxy->uts_ns);
+       }
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,17,0) || \
+               LTTNG_UBUNTU_KERNEL_RANGE(3,13,11,36, 3,14,0,0) || \
+               LTTNG_UBUNTU_KERNEL_RANGE(3,16,1,11, 3,17,0,0) || \
+               LTTNG_RHEL_KERNEL_RANGE(3,10,0,229,13,0, 3,11,0,0,0,0))
+       /* (nothing) */
+#else
+       rcu_read_unlock();
+#endif
+}
+
+static
+int lttng_enumerate_process_states(struct lttng_session *session)
+{
+       struct task_struct *g, *p;
+       char *tmp;
+
+       tmp = (char *) __get_free_page(GFP_KERNEL);
+       if (!tmp)
+               return -ENOMEM;
+
+       rcu_read_lock();
+       for_each_process(g) {
+               struct files_struct *prev_files = NULL;
+
+               p = g;
+               do {
+                       enum lttng_execution_mode mode =
+                               LTTNG_MODE_UNKNOWN;
+                       enum lttng_execution_submode submode =
+                               LTTNG_UNKNOWN;
+                       enum lttng_process_status status;
+                       enum lttng_thread_type type;
+                       struct files_struct *files;
+
+                       task_lock(p);
+                       if (p->exit_state == EXIT_ZOMBIE)
+                               status = LTTNG_ZOMBIE;
+                       else if (p->exit_state == EXIT_DEAD)
+                               status = LTTNG_DEAD;
+                       else if (p->state == TASK_RUNNING) {
+                               /* Is this a forked child that has not run yet? */
+                               if (list_empty(&p->rt.run_list))
+                                       status = LTTNG_WAIT_FORK;
+                               else
+                                       /*
+                                        * All tasks are considered as wait_cpu;
+                                        * the viewer will sort out if the task
+                                        * was really running at this time.
+                                        */
+                                       status = LTTNG_WAIT_CPU;
+                       } else if (p->state &
+                               (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)) {
+                               /* Task is waiting for something to complete */
+                               status = LTTNG_WAIT;
+                       } else
+                               status = LTTNG_UNNAMED;
+                       submode = LTTNG_NONE;
+
+                       /*
+                        * Verification of t->mm is to filter out kernel
+                        * threads; Viewer will further filter out if a
+                        * user-space thread was in syscall mode or not.
+                        */
+                       if (p->mm)
+                               type = LTTNG_USER_THREAD;
+                       else
+                               type = LTTNG_KERNEL_THREAD;
+                       files = p->files;
+
+                       trace_lttng_statedump_process_state(session,
+                               p, type, mode, submode, status, files);
+                       lttng_statedump_process_ns(session,
+                               p, type, mode, submode, status);
+                       /*
+                        * As an optimisation for the common case, do not
+                        * repeat information for the same files_struct in
+                        * two consecutive threads. This is the common case
+                        * for threads sharing the same fd table. RCU guarantees
+                        * that the same files_struct pointer is not re-used
+                        * throughout processes/threads iteration.
+                        */
+                       if (files && files != prev_files) {
+                               lttng_enumerate_files(session, files, tmp);
+                               prev_files = files;
+                       }
+                       task_unlock(p);
+               } while_each_thread(g, p);
+       }
+       rcu_read_unlock();
+
+       free_page((unsigned long) tmp);
+
+       return 0;
+}
+
+static
+void lttng_statedump_work_func(struct work_struct *work)
+{
+       if (atomic_dec_and_test(&kernel_threads_to_run))
+               /* If we are the last thread, wake up do_lttng_statedump */
+               wake_up(&statedump_wq);
+}
+
+static
+int do_lttng_statedump(struct lttng_session *session)
+{
+       int cpu, ret;
+
+       trace_lttng_statedump_start(session);
+       ret = lttng_enumerate_process_states(session);
+       if (ret)
+               return ret;
+       /*
+        * FIXME
+        * ret = lttng_enumerate_vm_maps(session);
+        * if (ret)
+        *      return ret;
+        */
+       ret = lttng_list_interrupts(session);
+       if (ret)
+               return ret;
+       ret = lttng_enumerate_network_ip_interface(session);
+       if (ret)
+               return ret;
+       ret = lttng_enumerate_block_devices(session);
+       switch (ret) {
+       case 0:
+               break;
+       case -ENOSYS:
+               printk(KERN_WARNING "LTTng: block device enumeration is not supported by kernel\n");
+               break;
+       default:
+               return ret;
+       }
+       ret = lttng_enumerate_cpu_topology(session);
+       if (ret)
+               return ret;
+
+       /* TODO lttng_dump_idt_table(session); */
+       /* TODO lttng_dump_softirq_vec(session); */
+       /* TODO lttng_list_modules(session); */
+       /* TODO lttng_dump_swap_files(session); */
+
+       /*
+        * Fire off a work queue on each CPU. Their sole purpose in life
+        * is to guarantee that each CPU has been in a state where is was in
+        * syscall mode (i.e. not in a trap, an IRQ or a soft IRQ).
+        */
+       get_online_cpus();
+       atomic_set(&kernel_threads_to_run, num_online_cpus());
+       for_each_online_cpu(cpu) {
+               INIT_DELAYED_WORK(&cpu_work[cpu], lttng_statedump_work_func);
+               schedule_delayed_work_on(cpu, &cpu_work[cpu], 0);
+       }
+       /* Wait for all threads to run */
+       __wait_event(statedump_wq, (atomic_read(&kernel_threads_to_run) == 0));
+       put_online_cpus();
+       /* Our work is done */
+       trace_lttng_statedump_end(session);
+       return 0;
+}
+
+/*
+ * Called with session mutex held.
+ */
+int lttng_statedump_start(struct lttng_session *session)
+{
+       return do_lttng_statedump(session);
+}
+EXPORT_SYMBOL_GPL(lttng_statedump_start);
+
+static
+int __init lttng_statedump_init(void)
+{
+       /*
+        * Allow module to load even if the fixup cannot be done. This
+        * will allow seemless transition when the underlying issue fix
+        * is merged into the Linux kernel, and when tracepoint.c
+        * "tracepoint_module_notify" is turned into a static function.
+        */
+       (void) wrapper_lttng_fixup_sig(THIS_MODULE);
+       return 0;
+}
+
+module_init(lttng_statedump_init);
+
+static
+void __exit lttng_statedump_exit(void)
+{
+}
+
+module_exit(lttng_statedump_exit);
+
+MODULE_LICENSE("GPL and additional rights");
+MODULE_AUTHOR("Jean-Hugues Deschenes");
+MODULE_DESCRIPTION("LTTng statedump provider");
+MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
+       __stringify(LTTNG_MODULES_MINOR_VERSION) "."
+       __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
+       LTTNG_MODULES_EXTRAVERSION);
diff --git a/src/lttng-string-utils.c b/src/lttng-string-utils.c
new file mode 100644 (file)
index 0000000..d944790
--- /dev/null
@@ -0,0 +1,356 @@
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * Copyright (C) 2017 Philippe Proulx <pproulx@efficios.com>
+ */
+
+#include <linux/types.h>
+
+#include <lttng/string-utils.h>
+
+enum star_glob_pattern_type_flags {
+       STAR_GLOB_PATTERN_TYPE_FLAG_NONE        = 0,
+       STAR_GLOB_PATTERN_TYPE_FLAG_PATTERN     = (1U << 0),
+       STAR_GLOB_PATTERN_TYPE_FLAG_END_ONLY    = (1U << 1),
+};
+
+static
+enum star_glob_pattern_type_flags strutils_test_glob_pattern(const char *pattern)
+{
+       enum star_glob_pattern_type_flags ret =
+               STAR_GLOB_PATTERN_TYPE_FLAG_NONE;
+       const char *p;
+
+       for (p = pattern; *p != '\0'; p++) {
+               switch (*p) {
+               case '*':
+                       ret = STAR_GLOB_PATTERN_TYPE_FLAG_PATTERN;
+
+                       if (p[1] == '\0') {
+                               ret |= STAR_GLOB_PATTERN_TYPE_FLAG_END_ONLY;
+                       }
+                       goto end;
+               case '\\':
+                       p++;
+
+                       if (*p == '\0') {
+                               goto end;
+                       }
+                       break;
+               default:
+                       break;
+               }
+       }
+
+end:
+       return ret;
+}
+
+/*
+ * Returns true if `pattern` is a star-only globbing pattern, that is,
+ * it contains at least one non-escaped `*`.
+ */
+bool strutils_is_star_glob_pattern(const char *pattern)
+{
+       return strutils_test_glob_pattern(pattern) &
+               STAR_GLOB_PATTERN_TYPE_FLAG_PATTERN;
+}
+
+/*
+ * Returns true if `pattern` is a globbing pattern with a globbing,
+ * non-escaped star only at its very end.
+ */
+bool strutils_is_star_at_the_end_only_glob_pattern(const char *pattern)
+{
+       return strutils_test_glob_pattern(pattern) &
+               STAR_GLOB_PATTERN_TYPE_FLAG_END_ONLY;
+}
+
+struct string_with_len {
+       const char *str;
+       size_t len;
+};
+
+static
+char string_get_char_at_cb(size_t at, void *data)
+{
+       struct string_with_len *string_with_len = data;
+
+       if (at >= string_with_len->len) {
+               return '\0';
+       }
+
+       return string_with_len->str[at];
+}
+
+/*
+ * Globbing matching function with the star feature only (`?` and
+ * character sets are not supported). This matches `candidate` (plain
+ * string) against `pattern`. A literal star can be escaped with `\` in
+ * `pattern`.
+ *
+ * `pattern_len` or `candidate_len` can be greater than the actual
+ * string length of `pattern` or `candidate` if the string is
+ * null-terminated.
+ */
+bool strutils_star_glob_match(const char *pattern, size_t pattern_len,
+               const char *candidate, size_t candidate_len) {
+       struct string_with_len pattern_with_len = {
+               pattern, pattern_len
+       };
+       struct string_with_len candidate_with_len = {
+               candidate, candidate_len
+       };
+
+       return strutils_star_glob_match_char_cb(string_get_char_at_cb,
+               &pattern_with_len, string_get_char_at_cb,
+               &candidate_with_len);
+}
+
+bool strutils_star_glob_match_char_cb(
+               strutils_get_char_at_cb pattern_get_char_at_cb,
+               void *pattern_get_char_at_cb_data,
+               strutils_get_char_at_cb candidate_get_char_at_cb,
+               void *candidate_get_char_at_cb_data)
+{
+       size_t retry_p_at = 0, retry_c_at = 0, c_at, p_at;
+       char c, p, prev_p;
+       bool got_a_star = false;
+
+retry:
+       c_at = retry_c_at;
+       c = candidate_get_char_at_cb(c_at, candidate_get_char_at_cb_data);
+       p_at = retry_p_at;
+       p = pattern_get_char_at_cb(p_at, pattern_get_char_at_cb_data);
+
+       /*
+        * The concept here is to retry a match in the specific case
+        * where we already got a star. The retry position for the
+        * pattern is just after the most recent star, and the retry
+        * position for the candidate is the character following the
+        * last try's first character.
+        *
+        * Example:
+        *
+        *     candidate: hi ev every onyx one
+        *                ^
+        *     pattern:   hi*every*one
+        *                ^
+        *
+        *     candidate: hi ev every onyx one
+        *                 ^
+        *     pattern:   hi*every*one
+        *                 ^
+        *
+        *     candidate: hi ev every onyx one
+        *                  ^
+        *     pattern:   hi*every*one
+        *                  ^
+        *
+        *     candidate: hi ev every onyx one
+        *                  ^
+        *     pattern:   hi*every*one
+        *                   ^ MISMATCH
+        *
+        *     candidate: hi ev every onyx one
+        *                   ^
+        *     pattern:   hi*every*one
+        *                   ^
+        *
+        *     candidate: hi ev every onyx one
+        *                   ^^
+        *     pattern:   hi*every*one
+        *                   ^^
+        *
+        *     candidate: hi ev every onyx one
+        *                   ^ ^
+        *     pattern:   hi*every*one
+        *                   ^ ^ MISMATCH
+        *
+        *     candidate: hi ev every onyx one
+        *                    ^
+        *     pattern:   hi*every*one
+        *                   ^ MISMATCH
+        *
+        *     candidate: hi ev every onyx one
+        *                     ^
+        *     pattern:   hi*every*one
+        *                   ^ MISMATCH
+        *
+        *     candidate: hi ev every onyx one
+        *                      ^
+        *     pattern:   hi*every*one
+        *                   ^
+        *
+        *     candidate: hi ev every onyx one
+        *                      ^^
+        *     pattern:   hi*every*one
+        *                   ^^
+        *
+        *     candidate: hi ev every onyx one
+        *                      ^ ^
+        *     pattern:   hi*every*one
+        *                   ^ ^
+        *
+        *     candidate: hi ev every onyx one
+        *                      ^  ^
+        *     pattern:   hi*every*one
+        *                   ^  ^
+        *
+        *     candidate: hi ev every onyx one
+        *                      ^   ^
+        *     pattern:   hi*every*one
+        *                   ^   ^
+        *
+        *     candidate: hi ev every onyx one
+        *                           ^
+        *     pattern:   hi*every*one
+        *                        ^
+        *
+        *     candidate: hi ev every onyx one
+        *                           ^
+        *     pattern:   hi*every*one
+        *                         ^ MISMATCH
+        *
+        *     candidate: hi ev every onyx one
+        *                            ^
+        *     pattern:   hi*every*one
+        *                         ^
+        *
+        *     candidate: hi ev every onyx one
+        *                            ^^
+        *     pattern:   hi*every*one
+        *                         ^^
+        *
+        *     candidate: hi ev every onyx one
+        *                            ^ ^
+        *     pattern:   hi*every*one
+        *                         ^ ^ MISMATCH
+        *
+        *     candidate: hi ev every onyx one
+        *                             ^
+        *     pattern:   hi*every*one
+        *                         ^ MISMATCH
+        *
+        *     candidate: hi ev every onyx one
+        *                              ^
+        *     pattern:   hi*every*one
+        *                         ^ MISMATCH
+        *
+        *     candidate: hi ev every onyx one
+        *                               ^
+        *     pattern:   hi*every*one
+        *                         ^ MISMATCH
+        *
+        *     candidate: hi ev every onyx one
+        *                                ^
+        *     pattern:   hi*every*one
+        *                         ^ MISMATCH
+        *
+        *     candidate: hi ev every onyx one
+        *                                 ^
+        *     pattern:   hi*every*one
+        *                         ^
+        *
+        *     candidate: hi ev every onyx one
+        *                                 ^^
+        *     pattern:   hi*every*one
+        *                         ^^
+        *
+        *     candidate: hi ev every onyx one
+        *                                 ^ ^
+        *     pattern:   hi*every*one
+        *                         ^ ^
+        *
+        *     candidate: hi ev every onyx one
+        *                                 ^  ^
+        *     pattern:   hi*every*one
+        *                         ^  ^ SUCCESS
+        */
+       while (c != '\0') {
+               if (p == '\0') {
+                       goto end_of_pattern;
+               }
+
+               switch (p) {
+               case '*':
+               {
+                       char retry_p;
+                       got_a_star = true;
+
+                       /*
+                        * Our first try starts at the current candidate
+                        * character and after the star in the pattern.
+                        */
+                       retry_c_at = c_at;
+                       retry_p_at = p_at + 1;
+                       retry_p = pattern_get_char_at_cb(retry_p_at,
+                               pattern_get_char_at_cb_data);
+
+                       if (retry_p == '\0') {
+                               /*
+                                * Star at the end of the pattern at
+                                * this point: automatic match.
+                                */
+                               return true;
+                       }
+
+                       goto retry;
+               }
+               case '\\':
+                       /* Go to escaped character. */
+                       p_at++;
+                       p = pattern_get_char_at_cb(p_at,
+                               pattern_get_char_at_cb_data);
+
+                       /* Fall-through. */
+               default:
+                       /*
+                        * Default case which will compare the escaped
+                        * character now.
+                        */
+                       if (p == '\0' || c != p) {
+end_of_pattern:
+                               /* Character mismatch OR end of pattern. */
+                               if (!got_a_star) {
+                                       /*
+                                        * We didn't get any star yet,
+                                        * so this first mismatch
+                                        * automatically makes the whole
+                                        * test fail.
+                                        */
+                                       return false;
+                               }
+
+                               /*
+                                * Next try: next candidate character,
+                                * original pattern character (following
+                                * the most recent star).
+                                */
+                               retry_c_at++;
+                               goto retry;
+                       }
+                       break;
+               }
+
+               /* Next pattern and candidate characters. */
+               c_at++;
+               c = candidate_get_char_at_cb(c_at,
+                       candidate_get_char_at_cb_data);
+               p_at++;
+               p = pattern_get_char_at_cb(p_at, pattern_get_char_at_cb_data);
+       }
+
+       /*
+        * We checked every candidate character and we're still in a
+        * success state: the only pattern character allowed to remain
+        * is a star.
+        */
+       if (p == '\0') {
+               return true;
+       }
+
+       prev_p = p;
+       p_at++;
+       p = pattern_get_char_at_cb(p_at, pattern_get_char_at_cb_data);
+       return prev_p == '*' && p == '\0';
+}
diff --git a/src/lttng-syscalls.c b/src/lttng-syscalls.c
new file mode 100644 (file)
index 0000000..a5b5f40
--- /dev/null
@@ -0,0 +1,1325 @@
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * lttng-syscalls.c
+ *
+ * LTTng syscall probes.
+ *
+ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/compat.h>
+#include <linux/err.h>
+#include <linux/bitmap.h>
+#include <linux/in.h>
+#include <linux/in6.h>
+#include <linux/seq_file.h>
+#include <linux/stringify.h>
+#include <linux/file.h>
+#include <linux/anon_inodes.h>
+#include <linux/fcntl.h>
+#include <asm/ptrace.h>
+#include <asm/syscall.h>
+
+#include <lttng/bitfield.h>
+#include <wrapper/tracepoint.h>
+#include <wrapper/file.h>
+#include <wrapper/rcu.h>
+#include <wrapper/syscall.h>
+#include <lttng/events.h>
+
+#ifndef CONFIG_COMPAT
+# ifndef is_compat_task
+#  define is_compat_task()     (0)
+# endif
+#endif
+
+/* in_compat_syscall appears in kernel 4.6. */
+#ifndef in_compat_syscall
+ #define in_compat_syscall()   is_compat_task()
+#endif
+
+enum sc_type {
+       SC_TYPE_ENTRY,
+       SC_TYPE_EXIT,
+       SC_TYPE_COMPAT_ENTRY,
+       SC_TYPE_COMPAT_EXIT,
+};
+
+#define SYSCALL_ENTRY_TOK              syscall_entry_
+#define COMPAT_SYSCALL_ENTRY_TOK       compat_syscall_entry_
+#define SYSCALL_EXIT_TOK               syscall_exit_
+#define COMPAT_SYSCALL_EXIT_TOK                compat_syscall_exit_
+
+#define SYSCALL_ENTRY_STR              __stringify(SYSCALL_ENTRY_TOK)
+#define COMPAT_SYSCALL_ENTRY_STR       __stringify(COMPAT_SYSCALL_ENTRY_TOK)
+#define SYSCALL_EXIT_STR               __stringify(SYSCALL_EXIT_TOK)
+#define COMPAT_SYSCALL_EXIT_STR                __stringify(COMPAT_SYSCALL_EXIT_TOK)
+
+static
+void syscall_entry_probe(void *__data, struct pt_regs *regs, long id);
+static
+void syscall_exit_probe(void *__data, struct pt_regs *regs, long ret);
+
+/*
+ * Forward declarations for old kernels.
+ */
+struct mmsghdr;
+struct rlimit64;
+struct oldold_utsname;
+struct old_utsname;
+struct sel_arg_struct;
+struct mmap_arg_struct;
+struct file_handle;
+struct user_msghdr;
+
+/*
+ * Forward declaration for kernels >= 5.6
+ */
+struct timex;
+struct timeval;
+struct itimerval;
+struct itimerspec;
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,6,0))
+typedef __kernel_old_time_t time_t;
+#endif
+
+#ifdef IA32_NR_syscalls
+#define NR_compat_syscalls IA32_NR_syscalls
+#else
+#define NR_compat_syscalls NR_syscalls
+#endif
+
+/*
+ * Create LTTng tracepoint probes.
+ */
+#define LTTNG_PACKAGE_BUILD
+#define CREATE_TRACE_POINTS
+#define TP_MODULE_NOINIT
+#define TRACE_INCLUDE_PATH instrumentation/syscalls/headers
+
+#define PARAMS(args...)        args
+
+/* Handle unknown syscalls */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM syscalls_unknown
+#include <instrumentation/syscalls/headers/syscalls_unknown.h>
+#undef TRACE_SYSTEM
+
+#define SC_ENTER
+
+#undef sc_exit
+#define sc_exit(...)
+#undef sc_in
+#define sc_in(...)     __VA_ARGS__
+#undef sc_out
+#define sc_out(...)
+#undef sc_inout
+#define sc_inout(...)  __VA_ARGS__
+
+/* Hijack probe callback for system call enter */
+#undef TP_PROBE_CB
+#define TP_PROBE_CB(_template)         &syscall_entry_probe
+#define SC_LTTNG_TRACEPOINT_EVENT(_name, _proto, _args, _fields) \
+       LTTNG_TRACEPOINT_EVENT(syscall_entry_##_name, PARAMS(_proto), PARAMS(_args), \
+               PARAMS(_fields))
+#define SC_LTTNG_TRACEPOINT_EVENT_CODE(_name, _proto, _args, _locvar, _code_pre, _fields, _code_post) \
+       LTTNG_TRACEPOINT_EVENT_CODE(syscall_entry_##_name, PARAMS(_proto), PARAMS(_args), \
+               PARAMS(_locvar), PARAMS(_code_pre),                             \
+               PARAMS(_fields), PARAMS(_code_post))
+#define SC_LTTNG_TRACEPOINT_EVENT_CLASS_NOARGS(_name, _fields) \
+       LTTNG_TRACEPOINT_EVENT_CLASS_NOARGS(syscall_entry_##_name, PARAMS(_fields))
+#define SC_LTTNG_TRACEPOINT_EVENT_INSTANCE_NOARGS(_template, _name)            \
+       LTTNG_TRACEPOINT_EVENT_INSTANCE_NOARGS(syscall_entry_##_template, syscall_entry_##_name)
+/* Enumerations only defined at first inclusion. */
+#define SC_LTTNG_TRACEPOINT_ENUM(_name, _values) \
+       LTTNG_TRACEPOINT_ENUM(_name, PARAMS(_values))
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM syscall_entry_integers
+#define TRACE_INCLUDE_FILE syscalls_integers
+#include <instrumentation/syscalls/headers/syscalls_integers.h>
+#undef TRACE_INCLUDE_FILE
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM syscall_entry_pointers
+#define TRACE_INCLUDE_FILE syscalls_pointers
+#include <instrumentation/syscalls/headers/syscalls_pointers.h>
+#undef TRACE_INCLUDE_FILE
+#undef TRACE_SYSTEM
+#undef SC_LTTNG_TRACEPOINT_ENUM
+#undef SC_LTTNG_TRACEPOINT_EVENT_CODE
+#undef SC_LTTNG_TRACEPOINT_EVENT
+#undef SC_LTTNG_TRACEPOINT_EVENT_CLASS_NOARGS
+#undef SC_LTTNG_TRACEPOINT_EVENT_INSTANCE_NOARGS
+#undef TP_PROBE_CB
+#undef _TRACE_SYSCALLS_INTEGERS_H
+#undef _TRACE_SYSCALLS_POINTERS_H
+
+/* Hijack probe callback for compat system call enter */
+#define TP_PROBE_CB(_template)         &syscall_entry_probe
+#define LTTNG_SC_COMPAT
+#define SC_LTTNG_TRACEPOINT_EVENT(_name, _proto, _args, _fields) \
+       LTTNG_TRACEPOINT_EVENT(compat_syscall_entry_##_name, PARAMS(_proto), PARAMS(_args), \
+               PARAMS(_fields))
+#define SC_LTTNG_TRACEPOINT_EVENT_CODE(_name, _proto, _args, _locvar, _code_pre, _fields, _code_post) \
+       LTTNG_TRACEPOINT_EVENT_CODE(compat_syscall_entry_##_name, PARAMS(_proto), PARAMS(_args), \
+               PARAMS(_locvar), PARAMS(_code_pre), PARAMS(_fields), PARAMS(_code_post))
+#define SC_LTTNG_TRACEPOINT_EVENT_CLASS_NOARGS(_name, _fields) \
+       LTTNG_TRACEPOINT_EVENT_CLASS_NOARGS(compat_syscall_entry_##_name, PARAMS(_fields))
+#define SC_LTTNG_TRACEPOINT_EVENT_INSTANCE_NOARGS(_template, _name)            \
+       LTTNG_TRACEPOINT_EVENT_INSTANCE_NOARGS(compat_syscall_entry_##_template, \
+               compat_syscall_entry_##_name)
+/* Enumerations only defined at inital inclusion (not here). */
+#define SC_LTTNG_TRACEPOINT_ENUM(_name, _values)
+#define TRACE_SYSTEM compat_syscall_entry_integers
+#define TRACE_INCLUDE_FILE compat_syscalls_integers
+#include <instrumentation/syscalls/headers/compat_syscalls_integers.h>
+#undef TRACE_INCLUDE_FILE
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM compat_syscall_entry_pointers
+#define TRACE_INCLUDE_FILE compat_syscalls_pointers
+#include <instrumentation/syscalls/headers/compat_syscalls_pointers.h>
+#undef TRACE_INCLUDE_FILE
+#undef TRACE_SYSTEM
+#undef SC_LTTNG_TRACEPOINT_ENUM
+#undef SC_LTTNG_TRACEPOINT_EVENT_CODE
+#undef SC_LTTNG_TRACEPOINT_EVENT
+#undef SC_LTTNG_TRACEPOINT_EVENT_CLASS_NOARGS
+#undef SC_LTTNG_TRACEPOINT_EVENT_INSTANCE_NOARGS
+#undef TP_PROBE_CB
+#undef _TRACE_SYSCALLS_INTEGERS_H
+#undef _TRACE_SYSCALLS_POINTERS_H
+#undef LTTNG_SC_COMPAT
+
+#undef SC_ENTER
+
+#define SC_EXIT
+
+#undef sc_exit
+#define sc_exit(...)           __VA_ARGS__
+#undef sc_in
+#define sc_in(...)
+#undef sc_out
+#define sc_out(...)            __VA_ARGS__
+#undef sc_inout
+#define sc_inout(...)          __VA_ARGS__
+
+/* Hijack probe callback for system call exit */
+#define TP_PROBE_CB(_template)         &syscall_exit_probe
+#define SC_LTTNG_TRACEPOINT_EVENT(_name, _proto, _args, _fields) \
+       LTTNG_TRACEPOINT_EVENT(syscall_exit_##_name, PARAMS(_proto), PARAMS(_args), \
+               PARAMS(_fields))
+#define SC_LTTNG_TRACEPOINT_EVENT_CODE(_name, _proto, _args, _locvar, _code_pre, _fields, _code_post) \
+       LTTNG_TRACEPOINT_EVENT_CODE(syscall_exit_##_name, PARAMS(_proto), PARAMS(_args), \
+               PARAMS(_locvar), PARAMS(_code_pre), PARAMS(_fields), PARAMS(_code_post))
+#define SC_LTTNG_TRACEPOINT_EVENT_CLASS_NOARGS(_name, _fields) \
+       LTTNG_TRACEPOINT_EVENT_CLASS_NOARGS(syscall_exit_##_name, PARAMS(_fields))
+#define SC_LTTNG_TRACEPOINT_EVENT_INSTANCE_NOARGS(_template, _name)            \
+       LTTNG_TRACEPOINT_EVENT_INSTANCE_NOARGS(syscall_exit_##_template,        \
+               syscall_exit_##_name)
+/* Enumerations only defined at inital inclusion (not here). */
+#define SC_LTTNG_TRACEPOINT_ENUM(_name, _values)
+#define TRACE_SYSTEM syscall_exit_integers
+#define TRACE_INCLUDE_FILE syscalls_integers
+#include <instrumentation/syscalls/headers/syscalls_integers.h>
+#undef TRACE_INCLUDE_FILE
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM syscall_exit_pointers
+#define TRACE_INCLUDE_FILE syscalls_pointers
+#include <instrumentation/syscalls/headers/syscalls_pointers.h>
+#undef TRACE_INCLUDE_FILE
+#undef TRACE_SYSTEM
+#undef SC_LTTNG_TRACEPOINT_ENUM
+#undef SC_LTTNG_TRACEPOINT_EVENT_CODE
+#undef SC_LTTNG_TRACEPOINT_EVENT
+#undef SC_LTTNG_TRACEPOINT_EVENT_CLASS_NOARGS
+#undef SC_LTTNG_TRACEPOINT_EVENT_INSTANCE_NOARGS
+#undef TP_PROBE_CB
+#undef _TRACE_SYSCALLS_INTEGERS_H
+#undef _TRACE_SYSCALLS_POINTERS_H
+
+
+/* Hijack probe callback for compat system call exit */
+#define TP_PROBE_CB(_template)         &syscall_exit_probe
+#define LTTNG_SC_COMPAT
+#define SC_LTTNG_TRACEPOINT_EVENT(_name, _proto, _args, _fields) \
+       LTTNG_TRACEPOINT_EVENT(compat_syscall_exit_##_name, PARAMS(_proto), PARAMS(_args), \
+               PARAMS(_fields))
+#define SC_LTTNG_TRACEPOINT_EVENT_CODE(_name, _proto, _args, _locvar, _code_pre, _fields, _code_post) \
+       LTTNG_TRACEPOINT_EVENT_CODE(compat_syscall_exit_##_name, PARAMS(_proto), PARAMS(_args), \
+               PARAMS(_locvar), PARAMS(_code_pre), PARAMS(_fields), PARAMS(_code_post))
+#define SC_LTTNG_TRACEPOINT_EVENT_CLASS_NOARGS(_name, _fields) \
+       LTTNG_TRACEPOINT_EVENT_CLASS_NOARGS(compat_syscall_exit_##_name, PARAMS(_fields))
+#define SC_LTTNG_TRACEPOINT_EVENT_INSTANCE_NOARGS(_template, _name)            \
+       LTTNG_TRACEPOINT_EVENT_INSTANCE_NOARGS(compat_syscall_exit_##_template, \
+               compat_syscall_exit_##_name)
+/* Enumerations only defined at inital inclusion (not here). */
+#define SC_LTTNG_TRACEPOINT_ENUM(_name, _values)
+#define TRACE_SYSTEM compat_syscall_exit_integers
+#define TRACE_INCLUDE_FILE compat_syscalls_integers
+#include <instrumentation/syscalls/headers/compat_syscalls_integers.h>
+#undef TRACE_INCLUDE_FILE
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM compat_syscall_exit_pointers
+#define TRACE_INCLUDE_FILE compat_syscalls_pointers
+#include <instrumentation/syscalls/headers/compat_syscalls_pointers.h>
+#undef TRACE_INCLUDE_FILE
+#undef TRACE_SYSTEM
+#undef SC_LTTNG_TRACEPOINT_ENUM
+#undef SC_LTTNG_TRACEPOINT_EVENT_CODE
+#undef SC_LTTNG_TRACEPOINT_EVENT
+#undef SC_LTTNG_TRACEPOINT_EVENT_CLASS_NOARGS
+#undef SC_LTTNG_TRACEPOINT_EVENT_INSTANCE_NOARGS
+#undef TP_PROBE_CB
+#undef _TRACE_SYSCALLS_INTEGERS_H
+#undef _TRACE_SYSCALLS_POINTERS_H
+#undef LTTNG_SC_COMPAT
+
+#undef SC_EXIT
+
+#undef TP_MODULE_NOINIT
+#undef LTTNG_PACKAGE_BUILD
+#undef CREATE_TRACE_POINTS
+
+struct trace_syscall_entry {
+       void *func;
+       const struct lttng_event_desc *desc;
+       const struct lttng_event_field *fields;
+       unsigned int nrargs;
+};
+
+#define CREATE_SYSCALL_TABLE
+
+#define SC_ENTER
+
+#undef sc_exit
+#define sc_exit(...)
+
+#undef TRACE_SYSCALL_TABLE
+#define TRACE_SYSCALL_TABLE(_template, _name, _nr, _nrargs)    \
+       [ _nr ] = {                                             \
+               .func = __event_probe__syscall_entry_##_template, \
+               .nrargs = (_nrargs),                            \
+               .fields = __event_fields___syscall_entry_##_template, \
+               .desc = &__event_desc___syscall_entry_##_name,  \
+       },
+
+/* Syscall enter tracing table */
+static const struct trace_syscall_entry sc_table[] = {
+#include <instrumentation/syscalls/headers/syscalls_integers.h>
+#include <instrumentation/syscalls/headers/syscalls_pointers.h>
+};
+
+#undef TRACE_SYSCALL_TABLE
+#define TRACE_SYSCALL_TABLE(_template, _name, _nr, _nrargs)    \
+       [ _nr ] = {                                             \
+               .func = __event_probe__compat_syscall_entry_##_template, \
+               .nrargs = (_nrargs),                            \
+               .fields = __event_fields___compat_syscall_entry_##_template, \
+               .desc = &__event_desc___compat_syscall_entry_##_name, \
+       },
+
+/* Compat syscall enter table */
+const struct trace_syscall_entry compat_sc_table[] = {
+#include <instrumentation/syscalls/headers/compat_syscalls_integers.h>
+#include <instrumentation/syscalls/headers/compat_syscalls_pointers.h>
+};
+
+#undef SC_ENTER
+
+#define SC_EXIT
+
+#undef sc_exit
+#define sc_exit(...)           __VA_ARGS__
+
+#undef TRACE_SYSCALL_TABLE
+#define TRACE_SYSCALL_TABLE(_template, _name, _nr, _nrargs)    \
+       [ _nr ] = {                                             \
+               .func = __event_probe__syscall_exit_##_template, \
+               .nrargs = (_nrargs),                            \
+               .fields = __event_fields___syscall_exit_##_template, \
+               .desc = &__event_desc___syscall_exit_##_name, \
+       },
+
+/* Syscall exit table */
+static const struct trace_syscall_entry sc_exit_table[] = {
+#include <instrumentation/syscalls/headers/syscalls_integers.h>
+#include <instrumentation/syscalls/headers/syscalls_pointers.h>
+};
+
+#undef TRACE_SYSCALL_TABLE
+#define TRACE_SYSCALL_TABLE(_template, _name, _nr, _nrargs)    \
+       [ _nr ] = {                                             \
+               .func = __event_probe__compat_syscall_exit_##_template, \
+               .nrargs = (_nrargs),                            \
+               .fields = __event_fields___compat_syscall_exit_##_template, \
+               .desc = &__event_desc___compat_syscall_exit_##_name, \
+       },
+
+/* Compat syscall exit table */
+const struct trace_syscall_entry compat_sc_exit_table[] = {
+#include <instrumentation/syscalls/headers/compat_syscalls_integers.h>
+#include <instrumentation/syscalls/headers/compat_syscalls_pointers.h>
+};
+
+#undef SC_EXIT
+
+#undef CREATE_SYSCALL_TABLE
+
+struct lttng_syscall_filter {
+       DECLARE_BITMAP(sc, NR_syscalls);
+       DECLARE_BITMAP(sc_compat, NR_compat_syscalls);
+};
+
+static void syscall_entry_unknown(struct lttng_event *event,
+       struct pt_regs *regs, unsigned int id)
+{
+       unsigned long args[LTTNG_SYSCALL_NR_ARGS];
+
+       lttng_syscall_get_arguments(current, regs, args);
+       if (unlikely(in_compat_syscall()))
+               __event_probe__compat_syscall_entry_unknown(event, id, args);
+       else
+               __event_probe__syscall_entry_unknown(event, id, args);
+}
+
+void syscall_entry_probe(void *__data, struct pt_regs *regs, long id)
+{
+       struct lttng_channel *chan = __data;
+       struct lttng_event *event, *unknown_event;
+       const struct trace_syscall_entry *table, *entry;
+       size_t table_len;
+
+       if (unlikely(in_compat_syscall())) {
+               struct lttng_syscall_filter *filter;
+
+               filter = lttng_rcu_dereference(chan->sc_filter);
+               if (filter) {
+                       if (id < 0 || id >= NR_compat_syscalls
+                               || !test_bit(id, filter->sc_compat)) {
+                               /* System call filtered out. */
+                               return;
+                       }
+               }
+               table = compat_sc_table;
+               table_len = ARRAY_SIZE(compat_sc_table);
+               unknown_event = chan->sc_compat_unknown;
+       } else {
+               struct lttng_syscall_filter *filter;
+
+               filter = lttng_rcu_dereference(chan->sc_filter);
+               if (filter) {
+                       if (id < 0 || id >= NR_syscalls
+                               || !test_bit(id, filter->sc)) {
+                               /* System call filtered out. */
+                               return;
+                       }
+               }
+               table = sc_table;
+               table_len = ARRAY_SIZE(sc_table);
+               unknown_event = chan->sc_unknown;
+       }
+       if (unlikely(id < 0 || id >= table_len)) {
+               syscall_entry_unknown(unknown_event, regs, id);
+               return;
+       }
+       if (unlikely(in_compat_syscall()))
+               event = chan->compat_sc_table[id];
+       else
+               event = chan->sc_table[id];
+       if (unlikely(!event)) {
+               syscall_entry_unknown(unknown_event, regs, id);
+               return;
+       }
+       entry = &table[id];
+       WARN_ON_ONCE(!entry);
+
+       switch (entry->nrargs) {
+       case 0:
+       {
+               void (*fptr)(void *__data) = entry->func;
+
+               fptr(event);
+               break;
+       }
+       case 1:
+       {
+               void (*fptr)(void *__data, unsigned long arg0) = entry->func;
+               unsigned long args[LTTNG_SYSCALL_NR_ARGS];
+
+               lttng_syscall_get_arguments(current, regs, args);
+               fptr(event, args[0]);
+               break;
+       }
+       case 2:
+       {
+               void (*fptr)(void *__data,
+                       unsigned long arg0,
+                       unsigned long arg1) = entry->func;
+               unsigned long args[LTTNG_SYSCALL_NR_ARGS];
+
+               lttng_syscall_get_arguments(current, regs, args);
+               fptr(event, args[0], args[1]);
+               break;
+       }
+       case 3:
+       {
+               void (*fptr)(void *__data,
+                       unsigned long arg0,
+                       unsigned long arg1,
+                       unsigned long arg2) = entry->func;
+               unsigned long args[LTTNG_SYSCALL_NR_ARGS];
+
+               lttng_syscall_get_arguments(current, regs, args);
+               fptr(event, args[0], args[1], args[2]);
+               break;
+       }
+       case 4:
+       {
+               void (*fptr)(void *__data,
+                       unsigned long arg0,
+                       unsigned long arg1,
+                       unsigned long arg2,
+                       unsigned long arg3) = entry->func;
+               unsigned long args[LTTNG_SYSCALL_NR_ARGS];
+
+               lttng_syscall_get_arguments(current, regs, args);
+               fptr(event, args[0], args[1], args[2], args[3]);
+               break;
+       }
+       case 5:
+       {
+               void (*fptr)(void *__data,
+                       unsigned long arg0,
+                       unsigned long arg1,
+                       unsigned long arg2,
+                       unsigned long arg3,
+                       unsigned long arg4) = entry->func;
+               unsigned long args[LTTNG_SYSCALL_NR_ARGS];
+
+               lttng_syscall_get_arguments(current, regs, args);
+               fptr(event, args[0], args[1], args[2], args[3], args[4]);
+               break;
+       }
+       case 6:
+       {
+               void (*fptr)(void *__data,
+                       unsigned long arg0,
+                       unsigned long arg1,
+                       unsigned long arg2,
+                       unsigned long arg3,
+                       unsigned long arg4,
+                       unsigned long arg5) = entry->func;
+               unsigned long args[LTTNG_SYSCALL_NR_ARGS];
+
+               lttng_syscall_get_arguments(current, regs, args);
+               fptr(event, args[0], args[1], args[2],
+                       args[3], args[4], args[5]);
+               break;
+       }
+       default:
+               break;
+       }
+}
+
+static void syscall_exit_unknown(struct lttng_event *event,
+       struct pt_regs *regs, int id, long ret)
+{
+       unsigned long args[LTTNG_SYSCALL_NR_ARGS];
+
+       lttng_syscall_get_arguments(current, regs, args);
+       if (unlikely(in_compat_syscall()))
+               __event_probe__compat_syscall_exit_unknown(event, id, ret,
+                       args);
+       else
+               __event_probe__syscall_exit_unknown(event, id, ret, args);
+}
+
+void syscall_exit_probe(void *__data, struct pt_regs *regs, long ret)
+{
+       struct lttng_channel *chan = __data;
+       struct lttng_event *event, *unknown_event;
+       const struct trace_syscall_entry *table, *entry;
+       size_t table_len;
+       long id;
+
+       id = syscall_get_nr(current, regs);
+       if (unlikely(in_compat_syscall())) {
+               struct lttng_syscall_filter *filter;
+
+               filter = lttng_rcu_dereference(chan->sc_filter);
+               if (filter) {
+                       if (id < 0 || id >= NR_compat_syscalls
+                               || !test_bit(id, filter->sc_compat)) {
+                               /* System call filtered out. */
+                               return;
+                       }
+               }
+               table = compat_sc_exit_table;
+               table_len = ARRAY_SIZE(compat_sc_exit_table);
+               unknown_event = chan->compat_sc_exit_unknown;
+       } else {
+               struct lttng_syscall_filter *filter;
+
+               filter = lttng_rcu_dereference(chan->sc_filter);
+               if (filter) {
+                       if (id < 0 || id >= NR_syscalls
+                               || !test_bit(id, filter->sc)) {
+                               /* System call filtered out. */
+                               return;
+                       }
+               }
+               table = sc_exit_table;
+               table_len = ARRAY_SIZE(sc_exit_table);
+               unknown_event = chan->sc_exit_unknown;
+       }
+       if (unlikely(id < 0 || id >= table_len)) {
+               syscall_exit_unknown(unknown_event, regs, id, ret);
+               return;
+       }
+       if (unlikely(in_compat_syscall()))
+               event = chan->compat_sc_exit_table[id];
+       else
+               event = chan->sc_exit_table[id];
+       if (unlikely(!event)) {
+               syscall_exit_unknown(unknown_event, regs, id, ret);
+               return;
+       }
+       entry = &table[id];
+       WARN_ON_ONCE(!entry);
+
+       switch (entry->nrargs) {
+       case 0:
+       {
+               void (*fptr)(void *__data, long ret) = entry->func;
+
+               fptr(event, ret);
+               break;
+       }
+       case 1:
+       {
+               void (*fptr)(void *__data,
+                       long ret,
+                       unsigned long arg0) = entry->func;
+               unsigned long args[LTTNG_SYSCALL_NR_ARGS];
+
+               lttng_syscall_get_arguments(current, regs, args);
+               fptr(event, ret, args[0]);
+               break;
+       }
+       case 2:
+       {
+               void (*fptr)(void *__data,
+                       long ret,
+                       unsigned long arg0,
+                       unsigned long arg1) = entry->func;
+               unsigned long args[LTTNG_SYSCALL_NR_ARGS];
+
+               lttng_syscall_get_arguments(current, regs, args);
+               fptr(event, ret, args[0], args[1]);
+               break;
+       }
+       case 3:
+       {
+               void (*fptr)(void *__data,
+                       long ret,
+                       unsigned long arg0,
+                       unsigned long arg1,
+                       unsigned long arg2) = entry->func;
+               unsigned long args[LTTNG_SYSCALL_NR_ARGS];
+
+               lttng_syscall_get_arguments(current, regs, args);
+               fptr(event, ret, args[0], args[1], args[2]);
+               break;
+       }
+       case 4:
+       {
+               void (*fptr)(void *__data,
+                       long ret,
+                       unsigned long arg0,
+                       unsigned long arg1,
+                       unsigned long arg2,
+                       unsigned long arg3) = entry->func;
+               unsigned long args[LTTNG_SYSCALL_NR_ARGS];
+
+               lttng_syscall_get_arguments(current, regs, args);
+               fptr(event, ret, args[0], args[1], args[2], args[3]);
+               break;
+       }
+       case 5:
+       {
+               void (*fptr)(void *__data,
+                       long ret,
+                       unsigned long arg0,
+                       unsigned long arg1,
+                       unsigned long arg2,
+                       unsigned long arg3,
+                       unsigned long arg4) = entry->func;
+               unsigned long args[LTTNG_SYSCALL_NR_ARGS];
+
+               lttng_syscall_get_arguments(current, regs, args);
+               fptr(event, ret, args[0], args[1], args[2], args[3], args[4]);
+               break;
+       }
+       case 6:
+       {
+               void (*fptr)(void *__data,
+                       long ret,
+                       unsigned long arg0,
+                       unsigned long arg1,
+                       unsigned long arg2,
+                       unsigned long arg3,
+                       unsigned long arg4,
+                       unsigned long arg5) = entry->func;
+               unsigned long args[LTTNG_SYSCALL_NR_ARGS];
+
+               lttng_syscall_get_arguments(current, regs, args);
+               fptr(event, ret, args[0], args[1], args[2],
+                       args[3], args[4], args[5]);
+               break;
+       }
+       default:
+               break;
+       }
+}
+
+/*
+ * noinline to diminish caller stack size.
+ * Should be called with sessions lock held.
+ */
+static
+int fill_table(const struct trace_syscall_entry *table, size_t table_len,
+       struct lttng_event **chan_table, struct lttng_channel *chan,
+       void *filter, enum sc_type type)
+{
+       const struct lttng_event_desc *desc;
+       unsigned int i;
+
+       /* Allocate events for each syscall, insert into table */
+       for (i = 0; i < table_len; i++) {
+               struct lttng_kernel_event ev;
+               desc = table[i].desc;
+
+               if (!desc) {
+                       /* Unknown syscall */
+                       continue;
+               }
+               /*
+                * Skip those already populated by previous failed
+                * register for this channel.
+                */
+               if (chan_table[i])
+                       continue;
+               memset(&ev, 0, sizeof(ev));
+               switch (type) {
+               case SC_TYPE_ENTRY:
+                       strncpy(ev.name, SYSCALL_ENTRY_STR,
+                               LTTNG_KERNEL_SYM_NAME_LEN);
+                       break;
+               case SC_TYPE_EXIT:
+                       strncpy(ev.name, SYSCALL_EXIT_STR,
+                               LTTNG_KERNEL_SYM_NAME_LEN);
+                       break;
+               case SC_TYPE_COMPAT_ENTRY:
+                       strncpy(ev.name, COMPAT_SYSCALL_ENTRY_STR,
+                               LTTNG_KERNEL_SYM_NAME_LEN);
+                       break;
+               case SC_TYPE_COMPAT_EXIT:
+                       strncpy(ev.name, COMPAT_SYSCALL_EXIT_STR,
+                               LTTNG_KERNEL_SYM_NAME_LEN);
+                       break;
+               default:
+                       BUG_ON(1);
+                       break;
+               }
+               strncat(ev.name, desc->name,
+                       LTTNG_KERNEL_SYM_NAME_LEN - strlen(ev.name) - 1);
+               ev.name[LTTNG_KERNEL_SYM_NAME_LEN - 1] = '\0';
+               ev.instrumentation = LTTNG_KERNEL_SYSCALL;
+               chan_table[i] = _lttng_event_create(chan, &ev, filter,
+                                               desc, ev.instrumentation);
+               WARN_ON_ONCE(!chan_table[i]);
+               if (IS_ERR(chan_table[i])) {
+                       /*
+                        * If something goes wrong in event registration
+                        * after the first one, we have no choice but to
+                        * leave the previous events in there, until
+                        * deleted by session teardown.
+                        */
+                       return PTR_ERR(chan_table[i]);
+               }
+       }
+       return 0;
+}
+
+/*
+ * Should be called with sessions lock held.
+ */
+int lttng_syscalls_register(struct lttng_channel *chan, void *filter)
+{
+       struct lttng_kernel_event ev;
+       int ret;
+
+       wrapper_vmalloc_sync_mappings();
+
+       if (!chan->sc_table) {
+               /* create syscall table mapping syscall to events */
+               chan->sc_table = kzalloc(sizeof(struct lttng_event *)
+                                       * ARRAY_SIZE(sc_table), GFP_KERNEL);
+               if (!chan->sc_table)
+                       return -ENOMEM;
+       }
+       if (!chan->sc_exit_table) {
+               /* create syscall table mapping syscall to events */
+               chan->sc_exit_table = kzalloc(sizeof(struct lttng_event *)
+                                       * ARRAY_SIZE(sc_exit_table), GFP_KERNEL);
+               if (!chan->sc_exit_table)
+                       return -ENOMEM;
+       }
+
+
+#ifdef CONFIG_COMPAT
+       if (!chan->compat_sc_table) {
+               /* create syscall table mapping compat syscall to events */
+               chan->compat_sc_table = kzalloc(sizeof(struct lttng_event *)
+                                       * ARRAY_SIZE(compat_sc_table), GFP_KERNEL);
+               if (!chan->compat_sc_table)
+                       return -ENOMEM;
+       }
+
+       if (!chan->compat_sc_exit_table) {
+               /* create syscall table mapping compat syscall to events */
+               chan->compat_sc_exit_table = kzalloc(sizeof(struct lttng_event *)
+                                       * ARRAY_SIZE(compat_sc_exit_table), GFP_KERNEL);
+               if (!chan->compat_sc_exit_table)
+                       return -ENOMEM;
+       }
+#endif
+       if (!chan->sc_unknown) {
+               const struct lttng_event_desc *desc =
+                       &__event_desc___syscall_entry_unknown;
+
+               memset(&ev, 0, sizeof(ev));
+               strncpy(ev.name, desc->name, LTTNG_KERNEL_SYM_NAME_LEN);
+               ev.name[LTTNG_KERNEL_SYM_NAME_LEN - 1] = '\0';
+               ev.instrumentation = LTTNG_KERNEL_SYSCALL;
+               chan->sc_unknown = _lttng_event_create(chan, &ev, filter,
+                                               desc,
+                                               ev.instrumentation);
+               WARN_ON_ONCE(!chan->sc_unknown);
+               if (IS_ERR(chan->sc_unknown)) {
+                       return PTR_ERR(chan->sc_unknown);
+               }
+       }
+
+       if (!chan->sc_compat_unknown) {
+               const struct lttng_event_desc *desc =
+                       &__event_desc___compat_syscall_entry_unknown;
+
+               memset(&ev, 0, sizeof(ev));
+               strncpy(ev.name, desc->name, LTTNG_KERNEL_SYM_NAME_LEN);
+               ev.name[LTTNG_KERNEL_SYM_NAME_LEN - 1] = '\0';
+               ev.instrumentation = LTTNG_KERNEL_SYSCALL;
+               chan->sc_compat_unknown = _lttng_event_create(chan, &ev, filter,
+                                               desc,
+                                               ev.instrumentation);
+               WARN_ON_ONCE(!chan->sc_unknown);
+               if (IS_ERR(chan->sc_compat_unknown)) {
+                       return PTR_ERR(chan->sc_compat_unknown);
+               }
+       }
+
+       if (!chan->compat_sc_exit_unknown) {
+               const struct lttng_event_desc *desc =
+                       &__event_desc___compat_syscall_exit_unknown;
+
+               memset(&ev, 0, sizeof(ev));
+               strncpy(ev.name, desc->name, LTTNG_KERNEL_SYM_NAME_LEN);
+               ev.name[LTTNG_KERNEL_SYM_NAME_LEN - 1] = '\0';
+               ev.instrumentation = LTTNG_KERNEL_SYSCALL;
+               chan->compat_sc_exit_unknown = _lttng_event_create(chan, &ev,
+                                               filter, desc,
+                                               ev.instrumentation);
+               WARN_ON_ONCE(!chan->compat_sc_exit_unknown);
+               if (IS_ERR(chan->compat_sc_exit_unknown)) {
+                       return PTR_ERR(chan->compat_sc_exit_unknown);
+               }
+       }
+
+       if (!chan->sc_exit_unknown) {
+               const struct lttng_event_desc *desc =
+                       &__event_desc___syscall_exit_unknown;
+
+               memset(&ev, 0, sizeof(ev));
+               strncpy(ev.name, desc->name, LTTNG_KERNEL_SYM_NAME_LEN);
+               ev.name[LTTNG_KERNEL_SYM_NAME_LEN - 1] = '\0';
+               ev.instrumentation = LTTNG_KERNEL_SYSCALL;
+               chan->sc_exit_unknown = _lttng_event_create(chan, &ev, filter,
+                                               desc, ev.instrumentation);
+               WARN_ON_ONCE(!chan->sc_exit_unknown);
+               if (IS_ERR(chan->sc_exit_unknown)) {
+                       return PTR_ERR(chan->sc_exit_unknown);
+               }
+       }
+
+       ret = fill_table(sc_table, ARRAY_SIZE(sc_table),
+                       chan->sc_table, chan, filter, SC_TYPE_ENTRY);
+       if (ret)
+               return ret;
+       ret = fill_table(sc_exit_table, ARRAY_SIZE(sc_exit_table),
+                       chan->sc_exit_table, chan, filter, SC_TYPE_EXIT);
+       if (ret)
+               return ret;
+
+#ifdef CONFIG_COMPAT
+       ret = fill_table(compat_sc_table, ARRAY_SIZE(compat_sc_table),
+                       chan->compat_sc_table, chan, filter,
+                       SC_TYPE_COMPAT_ENTRY);
+       if (ret)
+               return ret;
+       ret = fill_table(compat_sc_exit_table, ARRAY_SIZE(compat_sc_exit_table),
+                       chan->compat_sc_exit_table, chan, filter,
+                       SC_TYPE_COMPAT_EXIT);
+       if (ret)
+               return ret;
+#endif
+       if (!chan->sys_enter_registered) {
+               ret = lttng_wrapper_tracepoint_probe_register("sys_enter",
+                               (void *) syscall_entry_probe, chan);
+               if (ret)
+                       return ret;
+               chan->sys_enter_registered = 1;
+       }
+       /*
+        * We change the name of sys_exit tracepoint due to namespace
+        * conflict with sys_exit syscall entry.
+        */
+       if (!chan->sys_exit_registered) {
+               ret = lttng_wrapper_tracepoint_probe_register("sys_exit",
+                               (void *) syscall_exit_probe, chan);
+               if (ret) {
+                       WARN_ON_ONCE(lttng_wrapper_tracepoint_probe_unregister("sys_enter",
+                               (void *) syscall_entry_probe, chan));
+                       return ret;
+               }
+               chan->sys_exit_registered = 1;
+       }
+       return ret;
+}
+
+/*
+ * Only called at session destruction.
+ */
+int lttng_syscalls_unregister(struct lttng_channel *chan)
+{
+       int ret;
+
+       if (!chan->sc_table)
+               return 0;
+       if (chan->sys_enter_registered) {
+               ret = lttng_wrapper_tracepoint_probe_unregister("sys_enter",
+                               (void *) syscall_entry_probe, chan);
+               if (ret)
+                       return ret;
+               chan->sys_enter_registered = 0;
+       }
+       if (chan->sys_exit_registered) {
+               ret = lttng_wrapper_tracepoint_probe_unregister("sys_exit",
+                               (void *) syscall_exit_probe, chan);
+               if (ret)
+                       return ret;
+               chan->sys_exit_registered = 0;
+       }
+       /* lttng_event destroy will be performed by lttng_session_destroy() */
+       kfree(chan->sc_table);
+       kfree(chan->sc_exit_table);
+#ifdef CONFIG_COMPAT
+       kfree(chan->compat_sc_table);
+       kfree(chan->compat_sc_exit_table);
+#endif
+       kfree(chan->sc_filter);
+       return 0;
+}
+
+static
+int get_syscall_nr(const char *syscall_name)
+{
+       int syscall_nr = -1;
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(sc_table); i++) {
+               const struct trace_syscall_entry *entry;
+               const char *it_name;
+
+               entry = &sc_table[i];
+               if (!entry->desc)
+                       continue;
+               it_name = entry->desc->name;
+               it_name += strlen(SYSCALL_ENTRY_STR);
+               if (!strcmp(syscall_name, it_name)) {
+                       syscall_nr = i;
+                       break;
+               }
+       }
+       return syscall_nr;
+}
+
+static
+int get_compat_syscall_nr(const char *syscall_name)
+{
+       int syscall_nr = -1;
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(compat_sc_table); i++) {
+               const struct trace_syscall_entry *entry;
+               const char *it_name;
+
+               entry = &compat_sc_table[i];
+               if (!entry->desc)
+                       continue;
+               it_name = entry->desc->name;
+               it_name += strlen(COMPAT_SYSCALL_ENTRY_STR);
+               if (!strcmp(syscall_name, it_name)) {
+                       syscall_nr = i;
+                       break;
+               }
+       }
+       return syscall_nr;
+}
+
+static
+uint32_t get_sc_tables_len(void)
+{
+       return ARRAY_SIZE(sc_table) + ARRAY_SIZE(compat_sc_table);
+}
+
+int lttng_syscall_filter_enable(struct lttng_channel *chan,
+               const char *name)
+{
+       int syscall_nr, compat_syscall_nr, ret;
+       struct lttng_syscall_filter *filter;
+
+       WARN_ON_ONCE(!chan->sc_table);
+
+       if (!name) {
+               /* Enable all system calls by removing filter */
+               if (chan->sc_filter) {
+                       filter = chan->sc_filter;
+                       rcu_assign_pointer(chan->sc_filter, NULL);
+                       synchronize_trace();
+                       kfree(filter);
+               }
+               chan->syscall_all = 1;
+               return 0;
+       }
+
+       if (!chan->sc_filter) {
+               if (chan->syscall_all) {
+                       /*
+                        * All syscalls are already enabled.
+                        */
+                       return -EEXIST;
+               }
+               filter = kzalloc(sizeof(struct lttng_syscall_filter),
+                               GFP_KERNEL);
+               if (!filter)
+                       return -ENOMEM;
+       } else {
+               filter = chan->sc_filter;
+       }
+       syscall_nr = get_syscall_nr(name);
+       compat_syscall_nr = get_compat_syscall_nr(name);
+       if (syscall_nr < 0 && compat_syscall_nr < 0) {
+               ret = -ENOENT;
+               goto error;
+       }
+       if (syscall_nr >= 0) {
+               if (test_bit(syscall_nr, filter->sc)) {
+                       ret = -EEXIST;
+                       goto error;
+               }
+               bitmap_set(filter->sc, syscall_nr, 1);
+       }
+       if (compat_syscall_nr >= 0) {
+               if (test_bit(compat_syscall_nr, filter->sc_compat)) {
+                       ret = -EEXIST;
+                       goto error;
+               }
+               bitmap_set(filter->sc_compat, compat_syscall_nr, 1);
+       }
+       if (!chan->sc_filter)
+               rcu_assign_pointer(chan->sc_filter, filter);
+       return 0;
+
+error:
+       if (!chan->sc_filter)
+               kfree(filter);
+       return ret;
+}
+
+int lttng_syscall_filter_disable(struct lttng_channel *chan,
+               const char *name)
+{
+       int syscall_nr, compat_syscall_nr, ret;
+       struct lttng_syscall_filter *filter;
+
+       WARN_ON_ONCE(!chan->sc_table);
+
+       if (!chan->sc_filter) {
+               if (!chan->syscall_all)
+                       return -EEXIST;
+               filter = kzalloc(sizeof(struct lttng_syscall_filter),
+                               GFP_KERNEL);
+               if (!filter)
+                       return -ENOMEM;
+               /* Trace all system calls, then apply disable. */
+               bitmap_set(filter->sc, 0, NR_syscalls);
+               bitmap_set(filter->sc_compat, 0, NR_compat_syscalls);
+       } else {
+               filter = chan->sc_filter;
+       }
+
+       if (!name) {
+               /* Fail if all syscalls are already disabled. */
+               if (bitmap_empty(filter->sc, NR_syscalls)
+                       && bitmap_empty(filter->sc_compat,
+                               NR_compat_syscalls)) {
+                       ret = -EEXIST;
+                       goto error;
+               }
+
+               /* Disable all system calls */
+               bitmap_clear(filter->sc, 0, NR_syscalls);
+               bitmap_clear(filter->sc_compat, 0, NR_compat_syscalls);
+               goto apply_filter;
+       }
+       syscall_nr = get_syscall_nr(name);
+       compat_syscall_nr = get_compat_syscall_nr(name);
+       if (syscall_nr < 0 && compat_syscall_nr < 0) {
+               ret = -ENOENT;
+               goto error;
+       }
+       if (syscall_nr >= 0) {
+               if (!test_bit(syscall_nr, filter->sc)) {
+                       ret = -EEXIST;
+                       goto error;
+               }
+               bitmap_clear(filter->sc, syscall_nr, 1);
+       }
+       if (compat_syscall_nr >= 0) {
+               if (!test_bit(compat_syscall_nr, filter->sc_compat)) {
+                       ret = -EEXIST;
+                       goto error;
+               }
+               bitmap_clear(filter->sc_compat, compat_syscall_nr, 1);
+       }
+apply_filter:
+       if (!chan->sc_filter)
+               rcu_assign_pointer(chan->sc_filter, filter);
+       chan->syscall_all = 0;
+       return 0;
+
+error:
+       if (!chan->sc_filter)
+               kfree(filter);
+       return ret;
+}
+
+static
+const struct trace_syscall_entry *syscall_list_get_entry(loff_t *pos)
+{
+       const struct trace_syscall_entry *entry;
+       int iter = 0;
+
+       for (entry = sc_table;
+                       entry < sc_table + ARRAY_SIZE(sc_table);
+                        entry++) {
+               if (iter++ >= *pos)
+                       return entry;
+       }
+       for (entry = compat_sc_table;
+                       entry < compat_sc_table + ARRAY_SIZE(compat_sc_table);
+                        entry++) {
+               if (iter++ >= *pos)
+                       return entry;
+       }
+       /* End of list */
+       return NULL;
+}
+
+static
+void *syscall_list_start(struct seq_file *m, loff_t *pos)
+{
+       return (void *) syscall_list_get_entry(pos);
+}
+
+static
+void *syscall_list_next(struct seq_file *m, void *p, loff_t *ppos)
+{
+       (*ppos)++;
+       return (void *) syscall_list_get_entry(ppos);
+}
+
+static
+void syscall_list_stop(struct seq_file *m, void *p)
+{
+}
+
+static
+int get_sc_table(const struct trace_syscall_entry *entry,
+               const struct trace_syscall_entry **table,
+               unsigned int *bitness)
+{
+       if (entry >= sc_table && entry < sc_table + ARRAY_SIZE(sc_table)) {
+               if (bitness)
+                       *bitness = BITS_PER_LONG;
+               if (table)
+                       *table = sc_table;
+               return 0;
+       }
+       if (!(entry >= compat_sc_table
+                       && entry < compat_sc_table + ARRAY_SIZE(compat_sc_table))) {
+               return -EINVAL;
+       }
+       if (bitness)
+               *bitness = 32;
+       if (table)
+               *table = compat_sc_table;
+       return 0;
+}
+
+static
+int syscall_list_show(struct seq_file *m, void *p)
+{
+       const struct trace_syscall_entry *table, *entry = p;
+       unsigned int bitness;
+       unsigned long index;
+       int ret;
+       const char *name;
+
+       ret = get_sc_table(entry, &table, &bitness);
+       if (ret)
+               return ret;
+       if (!entry->desc)
+               return 0;
+       if (table == sc_table) {
+               index = entry - table;
+               name = &entry->desc->name[strlen(SYSCALL_ENTRY_STR)];
+       } else {
+               index = (entry - table) + ARRAY_SIZE(sc_table);
+               name = &entry->desc->name[strlen(COMPAT_SYSCALL_ENTRY_STR)];
+       }
+       seq_printf(m,   "syscall { index = %lu; name = %s; bitness = %u; };\n",
+               index, name, bitness);
+       return 0;
+}
+
+static
+const struct seq_operations lttng_syscall_list_seq_ops = {
+       .start = syscall_list_start,
+       .next = syscall_list_next,
+       .stop = syscall_list_stop,
+       .show = syscall_list_show,
+};
+
+static
+int lttng_syscall_list_open(struct inode *inode, struct file *file)
+{
+       return seq_open(file, &lttng_syscall_list_seq_ops);
+}
+
+const struct file_operations lttng_syscall_list_fops = {
+       .owner = THIS_MODULE,
+       .open = lttng_syscall_list_open,
+       .read = seq_read,
+       .llseek = seq_lseek,
+       .release = seq_release,
+};
+
+long lttng_channel_syscall_mask(struct lttng_channel *channel,
+               struct lttng_kernel_syscall_mask __user *usyscall_mask)
+{
+       uint32_t len, sc_tables_len, bitmask_len;
+       int ret = 0, bit;
+       char *tmp_mask;
+       struct lttng_syscall_filter *filter;
+
+       ret = get_user(len, &usyscall_mask->len);
+       if (ret)
+               return ret;
+       sc_tables_len = get_sc_tables_len();
+       bitmask_len = ALIGN(sc_tables_len, 8) >> 3;
+       if (len < sc_tables_len) {
+               return put_user(sc_tables_len, &usyscall_mask->len);
+       }
+       /* Array is large enough, we can copy array to user-space. */
+       tmp_mask = kzalloc(bitmask_len, GFP_KERNEL);
+       if (!tmp_mask)
+               return -ENOMEM;
+       filter = channel->sc_filter;
+
+       for (bit = 0; bit < ARRAY_SIZE(sc_table); bit++) {
+               char state;
+
+               if (channel->sc_table) {
+                       if (filter)
+                               state = test_bit(bit, filter->sc);
+                       else
+                               state = 1;
+               } else {
+                       state = 0;
+               }
+               bt_bitfield_write_be(tmp_mask, char, bit, 1, state);
+       }
+       for (; bit < sc_tables_len; bit++) {
+               char state;
+
+               if (channel->compat_sc_table) {
+                       if (filter)
+                               state = test_bit(bit - ARRAY_SIZE(sc_table),
+                                               filter->sc_compat);
+                       else
+                               state = 1;
+               } else {
+                       state = 0;
+               }
+               bt_bitfield_write_be(tmp_mask, char, bit, 1, state);
+       }
+       if (copy_to_user(usyscall_mask->mask, tmp_mask, bitmask_len))
+               ret = -EFAULT;
+       kfree(tmp_mask);
+       return ret;
+}
+
+int lttng_abi_syscall_list(void)
+{
+       struct file *syscall_list_file;
+       int file_fd, ret;
+
+       file_fd = lttng_get_unused_fd();
+       if (file_fd < 0) {
+               ret = file_fd;
+               goto fd_error;
+       }
+
+       syscall_list_file = anon_inode_getfile("[lttng_syscall_list]",
+                                         &lttng_syscall_list_fops,
+                                         NULL, O_RDWR);
+       if (IS_ERR(syscall_list_file)) {
+               ret = PTR_ERR(syscall_list_file);
+               goto file_error;
+       }
+       ret = lttng_syscall_list_fops.open(NULL, syscall_list_file);
+       if (ret < 0)
+               goto open_error;
+       fd_install(file_fd, syscall_list_file);
+       return file_fd;
+
+open_error:
+       fput(syscall_list_file);
+file_error:
+       put_unused_fd(file_fd);
+fd_error:
+       return ret;
+}
diff --git a/src/lttng-tp-mempool.c b/src/lttng-tp-mempool.c
new file mode 100644 (file)
index 0000000..70ee5cc
--- /dev/null
@@ -0,0 +1,152 @@
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * lttng-tp-mempool.c
+ *
+ * Copyright (C) 2018 Julien Desfossez <jdesfossez@efficios.com>
+ */
+
+#include <linux/slab.h>
+#include <linux/percpu.h>
+
+#include <lttng/tp-mempool.h>
+
+struct lttng_tp_buf_entry {
+       int cpu; /* To make sure we return the entry to the right pool. */
+       char buf[LTTNG_TP_MEMPOOL_BUF_SIZE];
+       struct list_head list;
+};
+
+/*
+ * No exclusive access strategy for now, this memory pool is currently only
+ * used from a non-preemptible context, and the interrupt tracepoint probes do
+ * not use this facility.
+ */
+struct per_cpu_buf {
+       struct list_head free_list; /* Free struct lttng_tp_buf_entry. */
+};
+
+static struct per_cpu_buf __percpu *pool; /* Per-cpu buffer. */
+
+int lttng_tp_mempool_init(void)
+{
+       int ret, cpu;
+
+       /* The pool is only supposed to be allocated once. */
+       if (pool) {
+               WARN_ON_ONCE(1);
+               ret = -1;
+               goto end;
+       }
+
+       pool = alloc_percpu(struct per_cpu_buf);
+       if (!pool) {
+               ret = -ENOMEM;
+               goto end;
+       }
+
+       for_each_possible_cpu(cpu) {
+               struct per_cpu_buf *cpu_buf = per_cpu_ptr(pool, cpu);
+
+               INIT_LIST_HEAD(&cpu_buf->free_list);
+       }
+
+       for_each_possible_cpu(cpu) {
+               int i;
+               struct per_cpu_buf *cpu_buf = per_cpu_ptr(pool, cpu);
+
+               for (i = 0; i < LTTNG_TP_MEMPOOL_NR_BUF_PER_CPU; i++) {
+                       struct lttng_tp_buf_entry *entry;
+
+                       entry = kzalloc_node(sizeof(struct lttng_tp_buf_entry),
+                                       GFP_KERNEL, cpu_to_node(cpu));
+                       if (!entry) {
+                               ret = -ENOMEM;
+                               goto error_free_pool;
+                       }
+                       entry->cpu = cpu;
+                       list_add_tail(&entry->list, &cpu_buf->free_list);
+               }
+       }
+
+       ret = 0;
+       goto end;
+
+error_free_pool:
+       lttng_tp_mempool_destroy();
+end:
+       return ret;
+}
+
+void lttng_tp_mempool_destroy(void)
+{
+       int cpu;
+
+       if (!pool) {
+               return;
+       }
+
+       for_each_possible_cpu(cpu) {
+               struct per_cpu_buf *cpu_buf = per_cpu_ptr(pool, cpu);
+               struct lttng_tp_buf_entry *entry, *tmp;
+               int i = 0;
+
+               list_for_each_entry_safe(entry, tmp, &cpu_buf->free_list, list) {
+                       list_del(&entry->list);
+                       kfree(entry);
+                       i++;
+               }
+               if (i < LTTNG_TP_MEMPOOL_NR_BUF_PER_CPU) {
+                       printk(KERN_WARNING "Leak detected in tp-mempool\n");
+               }
+       }
+       free_percpu(pool);
+       pool = NULL;
+}
+
+void *lttng_tp_mempool_alloc(size_t size)
+{
+       void *ret;
+       struct lttng_tp_buf_entry *entry;
+       struct per_cpu_buf *cpu_buf;
+       int cpu = smp_processor_id();
+
+       if (size > LTTNG_TP_MEMPOOL_BUF_SIZE) {
+               ret = NULL;
+               goto end;
+       }
+
+       cpu_buf = per_cpu_ptr(pool, cpu);
+       if (list_empty(&cpu_buf->free_list)) {
+               ret = NULL;
+               goto end;
+       }
+
+       entry = list_first_entry(&cpu_buf->free_list, struct lttng_tp_buf_entry, list);
+       /* Remove the entry from the free list. */
+       list_del(&entry->list);
+
+       memset(entry->buf, 0, LTTNG_TP_MEMPOOL_BUF_SIZE);
+
+       ret = (void *) entry->buf;
+
+end:
+       return ret;
+}
+
+void lttng_tp_mempool_free(void *ptr)
+{
+       struct lttng_tp_buf_entry *entry;
+       struct per_cpu_buf *cpu_buf;
+
+       if (!ptr)
+               goto end;
+       entry = container_of(ptr, struct lttng_tp_buf_entry, buf);
+       cpu_buf = per_cpu_ptr(pool, entry->cpu);
+       if (!cpu_buf)
+               goto end;
+       /* Add it to the free list. */
+       list_add_tail(&entry->list, &cpu_buf->free_list);
+
+end:
+       return;
+}
diff --git a/src/lttng-tracepoint.c b/src/lttng-tracepoint.c
new file mode 100644 (file)
index 0000000..ed78a17
--- /dev/null
@@ -0,0 +1,439 @@
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * lttng-tracepoint.c
+ *
+ * LTTng adaptation layer for Linux kernel 3.15+ tracepoints.
+ *
+ * Copyright (C) 2014 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#include <linux/mutex.h>
+#include <linux/err.h>
+#include <linux/notifier.h>
+#include <linux/tracepoint.h>
+#include <linux/slab.h>
+#include <linux/jhash.h>
+#include <linux/module.h>
+
+#include <lttng/tracepoint.h>
+#include <wrapper/list.h>
+#include <wrapper/tracepoint.h>
+
+/*
+ * Protect the tracepoint table. lttng_tracepoint_mutex nests within
+ * kernel/tracepoint.c tp_modlist_mutex. kernel/tracepoint.c
+ * tracepoint_mutex nests within lttng_tracepoint_mutex.
+ */
+static
+DEFINE_MUTEX(lttng_tracepoint_mutex);
+
+#define TRACEPOINT_HASH_BITS 6
+#define TRACEPOINT_TABLE_SIZE (1 << TRACEPOINT_HASH_BITS)
+static
+struct hlist_head tracepoint_table[TRACEPOINT_TABLE_SIZE];
+
+/*
+ * The tracepoint entry is the node contained within the hash table. It
+ * is a mapping from the "string" key to the struct tracepoint pointer.
+ */
+struct tracepoint_entry {
+       struct hlist_node hlist;
+       struct tracepoint *tp;
+       int refcount;
+       struct list_head probes;
+       char name[0];
+};
+
+struct lttng_tp_probe {
+       struct tracepoint_func tp_func;
+       struct list_head list;
+};
+
+static
+int add_probe(struct tracepoint_entry *e, void *probe, void *data)
+{
+       struct lttng_tp_probe *p;
+       int found = 0;
+
+       list_for_each_entry(p, &e->probes, list) {
+               if (p->tp_func.func == probe && p->tp_func.data == data) {
+                       found = 1;
+                       break;
+               }
+       }
+       if (found)
+               return -EEXIST;
+       p = kmalloc(sizeof(struct lttng_tp_probe), GFP_KERNEL);
+       if (!p)
+               return -ENOMEM;
+       p->tp_func.func = probe;
+       p->tp_func.data = data;
+       list_add(&p->list, &e->probes);
+       return 0;
+}
+
+static
+int remove_probe(struct tracepoint_entry *e, void *probe, void *data)
+{
+       struct lttng_tp_probe *p;
+       int found = 0;
+
+       list_for_each_entry(p, &e->probes, list) {
+               if (p->tp_func.func == probe && p->tp_func.data == data) {
+                       found = 1;
+                       break;
+               }
+       }
+       if (found) {
+               list_del(&p->list);
+               kfree(p);
+               return 0;
+       } else {
+               WARN_ON(1);
+               return -ENOENT;
+       }
+}
+
+/*
+ * Get tracepoint if the tracepoint is present in the tracepoint hash table.
+ * Must be called with lttng_tracepoint_mutex held.
+ * Returns NULL if not present.
+ */
+static
+struct tracepoint_entry *get_tracepoint(const char *name)
+{
+       struct hlist_head *head;
+       struct tracepoint_entry *e;
+       u32 hash = jhash(name, strlen(name), 0);
+
+       head = &tracepoint_table[hash & (TRACEPOINT_TABLE_SIZE - 1)];
+       lttng_hlist_for_each_entry(e, head, hlist) {
+               if (!strcmp(name, e->name))
+                       return e;
+       }
+       return NULL;
+}
+
+/*
+ * Add the tracepoint to the tracepoint hash table. Must be called with
+ * lttng_tracepoint_mutex held.
+ */
+static
+struct tracepoint_entry *add_tracepoint(const char *name)
+{
+       struct hlist_head *head;
+       struct tracepoint_entry *e;
+       size_t name_len = strlen(name) + 1;
+       u32 hash = jhash(name, name_len - 1, 0);
+
+       head = &tracepoint_table[hash & (TRACEPOINT_TABLE_SIZE - 1)];
+       lttng_hlist_for_each_entry(e, head, hlist) {
+               if (!strcmp(name, e->name)) {
+                       printk(KERN_NOTICE
+                               "tracepoint %s busy\n", name);
+                       return ERR_PTR(-EEXIST);        /* Already there */
+               }
+       }
+       /*
+        * Using kmalloc here to allocate a variable length element. Could
+        * cause some memory fragmentation if overused.
+        */
+       e = kmalloc(sizeof(struct tracepoint_entry) + name_len, GFP_KERNEL);
+       if (!e)
+               return ERR_PTR(-ENOMEM);
+       memcpy(&e->name[0], name, name_len);
+       e->tp = NULL;
+       e->refcount = 0;
+       INIT_LIST_HEAD(&e->probes);
+       hlist_add_head(&e->hlist, head);
+       return e;
+}
+
+/*
+ * Remove the tracepoint from the tracepoint hash table. Must be called
+ * with lttng_tracepoint_mutex held.
+ */
+static
+void remove_tracepoint(struct tracepoint_entry *e)
+{
+       hlist_del(&e->hlist);
+       kfree(e);
+}
+
+int lttng_tracepoint_probe_register(const char *name, void *probe, void *data)
+{
+       struct tracepoint_entry *e;
+       int ret = 0;
+
+       mutex_lock(&lttng_tracepoint_mutex);
+       e = get_tracepoint(name);
+       if (!e) {
+               e = add_tracepoint(name);
+               if (IS_ERR(e)) {
+                       ret = PTR_ERR(e);
+                       goto end;
+               }
+       }
+       /* add (probe, data) to entry */
+       ret = add_probe(e, probe, data);
+       if (ret)
+               goto end;
+       e->refcount++;
+       if (e->tp) {
+               ret = tracepoint_probe_register(e->tp, probe, data);
+               WARN_ON_ONCE(ret);
+               ret = 0;
+       }
+end:
+       mutex_unlock(&lttng_tracepoint_mutex);
+       return ret;
+}
+
+int lttng_tracepoint_probe_unregister(const char *name, void *probe, void *data)
+{
+       struct tracepoint_entry *e;
+       int ret = 0;
+
+       mutex_lock(&lttng_tracepoint_mutex);
+       e = get_tracepoint(name);
+       if (!e) {
+               ret = -ENOENT;
+               goto end;
+       }
+       /* remove (probe, data) from entry */
+       ret = remove_probe(e, probe, data);
+       if (ret)
+               goto end;
+       if (e->tp) {
+               ret = tracepoint_probe_unregister(e->tp, probe, data);
+               WARN_ON_ONCE(ret);
+               ret = 0;
+       }
+       if (!--e->refcount)
+               remove_tracepoint(e);
+end:
+       mutex_unlock(&lttng_tracepoint_mutex);
+       return ret;
+}
+
+#ifdef CONFIG_MODULES
+
+static
+int lttng_tracepoint_coming(struct tp_module *tp_mod)
+{
+       int i;
+
+       mutex_lock(&lttng_tracepoint_mutex);
+       for (i = 0; i < tp_mod->mod->num_tracepoints; i++) {
+               struct tracepoint *tp;
+               struct tracepoint_entry *e;
+               struct lttng_tp_probe *p;
+
+               tp = lttng_tracepoint_ptr_deref(&tp_mod->mod->tracepoints_ptrs[i]);
+               e = get_tracepoint(tp->name);
+               if (!e) {
+                       e = add_tracepoint(tp->name);
+                       if (IS_ERR(e)) {
+                               pr_warn("LTTng: error (%ld) adding tracepoint\n",
+                                       PTR_ERR(e));
+                               continue;
+                       }
+               }
+               /* If already enabled, just check consistency */
+               if (e->tp) {
+                       WARN_ON(e->tp != tp);
+                       continue;
+               }
+               e->tp = tp;
+               e->refcount++;
+               /* register each (probe, data) */
+               list_for_each_entry(p, &e->probes, list) {
+                       int ret;
+
+                       ret = tracepoint_probe_register(e->tp,
+                                       p->tp_func.func, p->tp_func.data);
+                       WARN_ON_ONCE(ret);
+               }
+       }
+       mutex_unlock(&lttng_tracepoint_mutex);
+       return NOTIFY_OK;
+}
+
+static
+int lttng_tracepoint_going(struct tp_module *tp_mod)
+{
+       int i;
+
+       mutex_lock(&lttng_tracepoint_mutex);
+       for (i = 0; i < tp_mod->mod->num_tracepoints; i++) {
+               struct tracepoint *tp;
+               struct tracepoint_entry *e;
+               struct lttng_tp_probe *p;
+
+               tp = lttng_tracepoint_ptr_deref(&tp_mod->mod->tracepoints_ptrs[i]);
+               e = get_tracepoint(tp->name);
+               if (!e || !e->tp)
+                       continue;
+               /* unregister each (probe, data) */
+               list_for_each_entry(p, &e->probes, list) {
+                       int ret;
+
+                       ret = tracepoint_probe_unregister(e->tp,
+                                       p->tp_func.func, p->tp_func.data);
+                       WARN_ON_ONCE(ret);
+               }
+               e->tp = NULL;
+               if (!--e->refcount)
+                       remove_tracepoint(e);
+       }
+       mutex_unlock(&lttng_tracepoint_mutex);
+       return 0;
+}
+
+static
+int lttng_tracepoint_notify(struct notifier_block *self,
+               unsigned long val, void *data)
+{
+       struct tp_module *tp_mod = data;
+       int ret = 0;
+
+       switch (val) {
+       case MODULE_STATE_COMING:
+               ret = lttng_tracepoint_coming(tp_mod);
+               break;
+       case MODULE_STATE_GOING:
+               ret = lttng_tracepoint_going(tp_mod);
+               break;
+       default:
+               break;
+       }
+       return ret;
+}
+
+static
+struct notifier_block lttng_tracepoint_notifier = {
+       .notifier_call = lttng_tracepoint_notify,
+       .priority = 0,
+};
+
+static
+int lttng_tracepoint_module_init(void)
+{
+       return register_tracepoint_module_notifier(&lttng_tracepoint_notifier);
+}
+
+static
+void lttng_tracepoint_module_exit(void)
+{
+       WARN_ON(unregister_tracepoint_module_notifier(&lttng_tracepoint_notifier));
+}
+
+#else /* #ifdef CONFIG_MODULES */
+
+static
+int lttng_tracepoint_module_init(void)
+{
+       return 0;
+}
+
+static
+void lttng_tracepoint_module_exit(void)
+{
+}
+
+#endif /* #else #ifdef CONFIG_MODULES */
+
+static
+void lttng_kernel_tracepoint_add(struct tracepoint *tp, void *priv)
+{
+       struct tracepoint_entry *e;
+       struct lttng_tp_probe *p;
+       int *ret = priv;
+
+       mutex_lock(&lttng_tracepoint_mutex);
+       e = get_tracepoint(tp->name);
+       if (!e) {
+               e = add_tracepoint(tp->name);
+               if (IS_ERR(e)) {
+                       pr_warn("LTTng: error (%ld) adding tracepoint\n",
+                               PTR_ERR(e));
+                       *ret = (int) PTR_ERR(e);
+                       goto end;
+               }
+       }
+       /* If already enabled, just check consistency */
+       if (e->tp) {
+               WARN_ON(e->tp != tp);
+               goto end;
+       }
+       e->tp = tp;
+       e->refcount++;
+       /* register each (probe, data) */
+       list_for_each_entry(p, &e->probes, list) {
+               int ret;
+
+               ret = tracepoint_probe_register(e->tp,
+                               p->tp_func.func, p->tp_func.data);
+               WARN_ON_ONCE(ret);
+       }
+end:
+       mutex_unlock(&lttng_tracepoint_mutex);
+}
+
+static
+void lttng_kernel_tracepoint_remove(struct tracepoint *tp, void *priv)
+{
+       struct tracepoint_entry *e;
+       int *ret = priv;
+
+       mutex_lock(&lttng_tracepoint_mutex);
+       e = get_tracepoint(tp->name);
+       if (!e || e->refcount != 1 || !list_empty(&e->probes)) {
+               *ret = -EINVAL;
+               goto end;
+       }
+       remove_tracepoint(e);
+end:
+       mutex_unlock(&lttng_tracepoint_mutex);
+}
+
+int __init lttng_tracepoint_init(void)
+{
+       int ret = 0;
+
+       for_each_kernel_tracepoint(lttng_kernel_tracepoint_add, &ret);
+       if (ret)
+               goto error;
+       ret = lttng_tracepoint_module_init();
+       if (ret)
+               goto error_module;
+       return 0;
+
+error_module:
+       {
+               int error_ret = 0;
+
+               for_each_kernel_tracepoint(lttng_kernel_tracepoint_remove,
+                               &error_ret);
+               WARN_ON(error_ret);
+       }
+error:
+       return ret;
+}
+
+void lttng_tracepoint_exit(void)
+{
+       int i, ret = 0;
+
+       lttng_tracepoint_module_exit();
+       for_each_kernel_tracepoint(lttng_kernel_tracepoint_remove, &ret);
+       WARN_ON(ret);
+       mutex_lock(&lttng_tracepoint_mutex);
+       for (i = 0; i < TRACEPOINT_TABLE_SIZE; i++) {
+               struct hlist_head *head = &tracepoint_table[i];
+
+               /* All tracepoints should be removed */
+               WARN_ON(!hlist_empty(head));
+       }
+       mutex_unlock(&lttng_tracepoint_mutex);
+}
diff --git a/src/lttng-tracker-id.c b/src/lttng-tracker-id.c
new file mode 100644 (file)
index 0000000..205c4af
--- /dev/null
@@ -0,0 +1,192 @@
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * lttng-tracker-pid.c
+ *
+ * LTTng Process ID tracking.
+ *
+ * Copyright (C) 2014 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/seq_file.h>
+#include <linux/stringify.h>
+#include <linux/hash.h>
+#include <linux/rcupdate.h>
+
+#include <wrapper/tracepoint.h>
+#include <wrapper/rcu.h>
+#include <wrapper/list.h>
+#include <lttng/events.h>
+
+/*
+ * Hash table is allocated and freed when there are no possible
+ * concurrent lookups (ensured by the alloc/free caller). However,
+ * there can be concurrent RCU lookups vs add/del operations.
+ *
+ * Concurrent updates of the PID hash table are forbidden: the caller
+ * must ensure mutual exclusion. This is currently done by holding the
+ * sessions_mutex across calls to create, destroy, add, and del
+ * functions of this API.
+ */
+int lttng_id_tracker_get_node_id(const struct lttng_id_hash_node *node)
+{
+       return node->id;
+}
+
+/*
+ * Lookup performed from RCU read-side critical section (RCU sched),
+ * protected by preemption off at the tracepoint call site.
+ * Return true if found, false if not found.
+ */
+bool lttng_id_tracker_lookup(struct lttng_id_tracker_rcu *p, int id)
+{
+       struct hlist_head *head;
+       struct lttng_id_hash_node *e;
+       uint32_t hash = hash_32(id, 32);
+
+       head = &p->id_hash[hash & (LTTNG_ID_TABLE_SIZE - 1)];
+       lttng_hlist_for_each_entry_rcu(e, head, hlist) {
+               if (id == e->id)
+                       return true;    /* Found */
+       }
+       return false;
+}
+EXPORT_SYMBOL_GPL(lttng_id_tracker_lookup);
+
+static struct lttng_id_tracker_rcu *lttng_id_tracker_rcu_create(void)
+{
+       struct lttng_id_tracker_rcu *tracker;
+
+       tracker = kzalloc(sizeof(struct lttng_id_tracker_rcu), GFP_KERNEL);
+       if (!tracker)
+               return NULL;
+       return tracker;
+}
+
+/*
+ * Tracker add and del operations support concurrent RCU lookups.
+ */
+int lttng_id_tracker_add(struct lttng_id_tracker *lf, int id)
+{
+       struct hlist_head *head;
+       struct lttng_id_hash_node *e;
+       struct lttng_id_tracker_rcu *p = lf->p;
+       uint32_t hash = hash_32(id, 32);
+       bool allocated = false;
+
+       if (!p) {
+               p = lttng_id_tracker_rcu_create();
+               if (!p)
+                       return -ENOMEM;
+               allocated = true;
+       }
+       head = &p->id_hash[hash & (LTTNG_ID_TABLE_SIZE - 1)];
+       lttng_hlist_for_each_entry(e, head, hlist) {
+               if (id == e->id)
+                       return -EEXIST;
+       }
+       e = kmalloc(sizeof(struct lttng_id_hash_node), GFP_KERNEL);
+       if (!e)
+               return -ENOMEM;
+       e->id = id;
+       hlist_add_head_rcu(&e->hlist, head);
+       if (allocated) {
+               rcu_assign_pointer(lf->p, p);
+       }
+       return 0;
+}
+
+static
+void id_tracker_del_node_rcu(struct lttng_id_hash_node *e)
+{
+       hlist_del_rcu(&e->hlist);
+       /*
+        * We choose to use a heavyweight synchronize on removal here,
+        * since removal of an ID from the tracker mask is a rare
+        * operation, and we don't want to use more cache lines than
+        * what we really need when doing the ID lookups, so we don't
+        * want to afford adding a rcu_head field to those pid hash
+        * node.
+        */
+       synchronize_trace();
+       kfree(e);
+}
+
+/*
+ * This removal is only used on destroy, so it does not need to support
+ * concurrent RCU lookups.
+ */
+static
+void id_tracker_del_node(struct lttng_id_hash_node *e)
+{
+       hlist_del(&e->hlist);
+       kfree(e);
+}
+
+int lttng_id_tracker_del(struct lttng_id_tracker *lf, int id)
+{
+       struct hlist_head *head;
+       struct lttng_id_hash_node *e;
+       struct lttng_id_tracker_rcu *p = lf->p;
+       uint32_t hash = hash_32(id, 32);
+
+       if (!p)
+               return -ENOENT;
+       head = &p->id_hash[hash & (LTTNG_ID_TABLE_SIZE - 1)];
+       /*
+        * No need of _safe iteration, because we stop traversal as soon
+        * as we remove the entry.
+        */
+       lttng_hlist_for_each_entry(e, head, hlist) {
+               if (id == e->id) {
+                       id_tracker_del_node_rcu(e);
+                       return 0;
+               }
+       }
+       return -ENOENT; /* Not found */
+}
+
+static void lttng_id_tracker_rcu_destroy(struct lttng_id_tracker_rcu *p)
+{
+       int i;
+
+       if (!p)
+               return;
+       for (i = 0; i < LTTNG_ID_TABLE_SIZE; i++) {
+               struct hlist_head *head = &p->id_hash[i];
+               struct lttng_id_hash_node *e;
+               struct hlist_node *tmp;
+
+               lttng_hlist_for_each_entry_safe(e, tmp, head, hlist)
+                       id_tracker_del_node(e);
+       }
+       kfree(p);
+}
+
+int lttng_id_tracker_empty_set(struct lttng_id_tracker *lf)
+{
+       struct lttng_id_tracker_rcu *p, *oldp;
+
+       p = lttng_id_tracker_rcu_create();
+       if (!p)
+               return -ENOMEM;
+       oldp = lf->p;
+       rcu_assign_pointer(lf->p, p);
+       synchronize_trace();
+       lttng_id_tracker_rcu_destroy(oldp);
+       return 0;
+}
+
+void lttng_id_tracker_destroy(struct lttng_id_tracker *lf, bool rcu)
+{
+       struct lttng_id_tracker_rcu *p = lf->p;
+
+       if (!lf->p)
+               return;
+       rcu_assign_pointer(lf->p, NULL);
+       if (rcu)
+               synchronize_trace();
+       lttng_id_tracker_rcu_destroy(p);
+}
diff --git a/src/lttng-wrapper-impl.c b/src/lttng-wrapper-impl.c
new file mode 100644 (file)
index 0000000..e7f5660
--- /dev/null
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * lttng-wrapper.c
+ *
+ * Copyright (C) 2020 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#include <linux/module.h>
+#include <lttng/tracer.h>
+
+static int __init lttng_wrapper_init(void)
+{
+       return 0;
+}
+
+module_init(lttng_wrapper_init);
+
+static void __exit lttng_exit(void)
+{
+}
+
+module_exit(lttng_exit);
+
+#include <generated/patches.i>
+#ifdef LTTNG_EXTRA_VERSION_GIT
+MODULE_INFO(extra_version_git, LTTNG_EXTRA_VERSION_GIT);
+#endif
+#ifdef LTTNG_EXTRA_VERSION_NAME
+MODULE_INFO(extra_version_name, LTTNG_EXTRA_VERSION_NAME);
+#endif
+MODULE_LICENSE("GPL and additional rights");
+MODULE_AUTHOR("Mathieu Desnoyers <mathieu.desnoyers@efficios.com>");
+MODULE_DESCRIPTION("LTTng wrapper");
+MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
+       __stringify(LTTNG_MODULES_MINOR_VERSION) "."
+       __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
+       LTTNG_MODULES_EXTRAVERSION);
diff --git a/src/probes/Kbuild b/src/probes/Kbuild
new file mode 100644 (file)
index 0000000..0fc7e9c
--- /dev/null
@@ -0,0 +1,259 @@
+# SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+
+TOP_LTTNG_MODULES_DIR := $(shell dirname $(lastword $(MAKEFILE_LIST)))/../..
+
+include $(TOP_LTTNG_MODULES_DIR)/Kbuild.common
+
+ccflags-y += -I$(TOP_LTTNG_MODULES_DIR)/include
+
+obj-$(CONFIG_LTTNG) += lttng-probe-sched.o
+obj-$(CONFIG_LTTNG) += lttng-probe-irq.o
+obj-$(CONFIG_LTTNG) += lttng-probe-timer.o
+obj-$(CONFIG_LTTNG) += lttng-probe-kmem.o
+obj-$(CONFIG_LTTNG) += lttng-probe-module.o
+obj-$(CONFIG_LTTNG) += lttng-probe-power.o
+obj-$(CONFIG_LTTNG) += lttng-probe-statedump.o
+
+ifneq ($(CONFIG_NET_9P),)
+  obj-$(CONFIG_LTTNG) +=  $(shell \
+    if [ $(VERSION) -ge 4 \
+      -o \( $(VERSION) -eq 3 -a $(PATCHLEVEL) -eq 10 \) ] ; then \
+      echo "lttng-probe-9p.o" ; fi;)
+endif # CONFIG_NET_9P
+
+i2c_dep = $(srctree)/include/trace/events/i2c.h
+ifneq ($(wildcard $(i2c_dep)),)
+  obj-$(CONFIG_LTTNG) += lttng-probe-i2c.o
+endif
+
+ifneq ($(CONFIG_KVM),)
+  obj-$(CONFIG_LTTNG) += lttng-probe-kvm.o
+  ifneq ($(CONFIG_X86),)
+    kvm_dep_lapic = $(srctree)/arch/x86/kvm/lapic.h
+    kvm_dep_lapic_check = $(wildcard $(kvm_dep_lapic))
+    ifneq ($(kvm_dep_lapic_check),)
+      # search for iodev.h in any of its known locations
+      kvm_dep_iodev = $(srctree)/virt/kvm/iodev.h $(srctree)/include/kvm/iodev.h
+      kvm_dep_iodev_check = $(wildcard $(kvm_dep_iodev))
+      ifneq ($(kvm_dep_iodev_check),)
+        kvm_dep_emulate = $(srctree)/arch/x86/kvm/kvm_emulate.h
+        kvm_dep_emulate_wildcard = $(wildcard $(kvm_dep_emulate))
+        kvm_dep_emulate_check = $(shell \
+        if [ \( $(VERSION) -ge 6 \
+           -o \( $(VERSION) -eq 5 -a $(PATCHLEVEL) -eq 7 \) \) -a \
+           -z "$(kvm_dep_emulate_wildcard)" ] ; then \
+          echo "warn" ; \
+        else \
+          echo "ok" ; \
+        fi ;)
+        ifeq ($(kvm_dep_emulate_check),ok)
+          CFLAGS_lttng-probe-kvm-x86.o += -I$(srctree)/virt/kvm \
+                     -I$(srctree)/arch/x86/kvm
+          CFLAGS_lttng-probe-kvm-x86-mmu.o += -I$(srctree)/virt/kvm
+          obj-$(CONFIG_LTTNG) += lttng-probe-kvm-x86.o
+          obj-$(CONFIG_LTTNG) += lttng-probe-kvm-x86-mmu.o
+        else # ($(kvm_dep_emulate_check),ok)
+          $(warning File $(kvm_dep_emulate) not found. Probe "kvm" x86-specific is disabled. Use full kernel source tree to enable it.)
+        endif # ($(kvm_dep_emulate_check),ok)
+      else # $(kvm_dep_iodev_check)
+        $(warning File $(kvm_dep_iodev) not found. Probe "kvm" x86-specific is disabled. Use full kernel source tree to enable it.)
+      endif # $(kvm_dep_iodev_check)
+    else # $(kvm_dep_lapic_check)
+      $(warning File $(kvm_dep_lapic) not found. Probe "kvm" x86-specific is disabled. Use full kernel source tree to enable it.)
+    endif # $(kvm_dep_lapic_check)
+  endif # CONFIG_X86
+endif # CONFIG_KVM
+
+ifneq ($(CONFIG_X86),)
+  x86_irq_vectors_dep = $(srctree)/arch/x86/include/asm/trace/irq_vectors.h
+
+  ifneq ($(wildcard $(x86_irq_vectors_dep)),)
+    obj-$(CONFIG_LTTNG) += lttng-probe-x86-irq-vectors.o
+  endif # $(wildcard $(x86_irq_vectors_dep))
+
+  x86_exceptions_dep = $(srctree)/arch/x86/include/asm/trace/exceptions.h
+
+  ifneq ($(wildcard $(x86_exceptions_dep)),)
+    obj-$(CONFIG_LTTNG) += lttng-probe-x86-exceptions.o
+  endif # $(wildcard $(x86_exceptions_dep))
+endif # CONFIG_X86
+
+obj-$(CONFIG_LTTNG) += lttng-probe-signal.o
+
+ifneq ($(CONFIG_BLOCK),)
+  # need blk_cmd_buf_len
+  ifneq ($(CONFIG_EVENT_TRACING),)
+    obj-$(CONFIG_LTTNG) += lttng-probe-block.o
+  endif # CONFIG_EVENT_TRACING
+endif # CONFIG_BLOCK
+
+ifneq ($(CONFIG_NET),)
+  obj-$(CONFIG_LTTNG) += lttng-probe-napi.o
+  obj-$(CONFIG_LTTNG) += lttng-probe-skb.o
+  obj-$(CONFIG_LTTNG) += lttng-probe-net.o
+  obj-$(CONFIG_LTTNG) +=  $(shell \
+    if [ $(VERSION) -ge 4 \
+      -o \( $(VERSION) -eq 3 -a $(PATCHLEVEL) -ge 1 \) ] ; then \
+      echo "lttng-probe-sock.o" ; fi;)
+  obj-$(CONFIG_LTTNG) +=  $(shell \
+    if [ $(VERSION) -ge 4 \
+      -o \( $(VERSION) -eq 3 -a $(PATCHLEVEL) -ge 1 \) ] ; then \
+      echo "lttng-probe-udp.o" ; fi;)
+endif # CONFIG_NET
+
+ifneq ($(CONFIG_SND_SOC),)
+  obj-$(CONFIG_LTTNG) += lttng-probe-asoc.o
+endif # CONFIG_SND_SOC
+
+ifneq ($(CONFIG_EXT3_FS),)
+  ext3_dep = $(srctree)/fs/ext3/*.h
+  ext3_dep_check = $(wildcard $(ext3_dep))
+  ext3 = $(shell \
+    if [ $(VERSION) -lt 4 -o \( $(VERSION) -eq 4 -a $(PATCHLEVEL) -lt 3 \) ] ; then \
+      if [ $(VERSION) -ge 4 -o \( $(VERSION) -eq 3 -a $(PATCHLEVEL) -ge 1 \) ] ; then \
+        if [ \( $(VERSION) -ge 4 -o \( $(VERSION) -eq 3 -a $(PATCHLEVEL) -ge 4 \) \) -a \
+          -z "$(ext3_dep_check)" ] ; then \
+          echo "warn" ; \
+          exit ; \
+        fi; \
+        echo "lttng-probe-ext3.o" ; \
+      fi; \
+    fi;)
+  ifeq ($(ext3),warn)
+    $(warning Files $(ext3_dep) not found. Probe "ext3" is disabled. Use full kernel source tree to enable it.)
+    ext3 =
+  endif # $(ext3),warn
+  obj-$(CONFIG_LTTNG) += $(ext3)
+endif # CONFIG_EXT3_FS
+
+ifneq ($(CONFIG_GPIOLIB),)
+  obj-$(CONFIG_LTTNG) +=  $(shell \
+    if [ $(VERSION) -ge 3 ] ; then \
+      echo "lttng-probe-gpio.o" ; fi;)
+endif # CONFIG_GPIOLIB
+
+ifneq ($(CONFIG_JBD2),)
+  obj-$(CONFIG_LTTNG) += lttng-probe-jbd2.o
+endif # CONFIG_JBD2
+
+ifneq ($(CONFIG_JBD),)
+  obj-$(CONFIG_LTTNG) +=  $(shell \
+    if [ $(VERSION) -ge 4 \
+      -o \( $(VERSION) -eq 3 -a $(PATCHLEVEL) -ge 1 \) ] ; then \
+      echo "lttng-probe-jbd.o" ; fi;)
+endif # CONFIG_JBD
+
+ifneq ($(CONFIG_REGULATOR),)
+  obj-$(CONFIG_LTTNG) += lttng-probe-regulator.o
+endif # CONFIG_REGULATOR
+
+ifneq ($(CONFIG_SCSI),)
+  obj-$(CONFIG_LTTNG) += lttng-probe-scsi.o
+endif # CONFIG_SCSI
+
+obj-$(CONFIG_LTTNG) += lttng-probe-vmscan.o
+
+# lock probe does not work, so disabling it for now
+#ifneq ($(CONFIG_LOCKDEP),)
+#  obj-$(CONFIG_LTTNG) += lttng-probe-lock.o
+#endif # CONFIG_LOCKDEP
+
+ifneq ($(CONFIG_BTRFS_FS),)
+  btrfs_dep = $(srctree)/fs/btrfs/*.h
+  ifneq ($(wildcard $(btrfs_dep)),)
+    obj-$(CONFIG_LTTNG) += lttng-probe-btrfs.o
+  else
+    $(warning Files $(btrfs_dep) not found. Probe "btrfs" is disabled. Use full kernel source tree to enable it.)
+  endif # $(wildcard $(btrfs_dep))
+endif # CONFIG_BTRFS_FS
+
+obj-$(CONFIG_LTTNG) += lttng-probe-compaction.o
+
+ifneq ($(CONFIG_EXT4_FS),)
+  ext4_dep = $(srctree)/fs/ext4/*.h
+  ifneq ($(wildcard $(ext4_dep)),)
+    obj-$(CONFIG_LTTNG) += lttng-probe-ext4.o
+  else
+    $(warning Files $(ext4_dep) not found. Probe "ext4" is disabled. Use full kernel source tree to enable it.)
+  endif # $(wildcard $(ext4_dep))
+endif # CONFIG_EXT4_FS
+
+obj-$(CONFIG_LTTNG) +=  $(shell \
+  if [ $(VERSION) -ge 4 \
+    -o \( $(VERSION) -eq 3 -a $(PATCHLEVEL) -ge 4 \) ] ; then \
+    echo "lttng-probe-printk.o" ; fi;)
+
+ifneq ($(CONFIG_FRAME_WARN),0)
+  CFLAGS_lttng-probe-printk.o += -Wframe-larger-than=2200
+endif
+
+obj-$(CONFIG_LTTNG) +=  $(shell \
+    if [ $(VERSION) -ge 4 \
+      -o \( $(VERSION) -eq 3 -a $(PATCHLEVEL) -ge 6 \) \
+      -o \( $(VERSION) -eq 3 -a $(PATCHLEVEL) -eq 5 -a $(SUBLEVEL) -ge 2 \) \
+      -o \( $(VERSION) -eq 3 -a $(PATCHLEVEL) -eq 4 -a $(SUBLEVEL) -ge 9 \) \
+      -o \( $(VERSION) -eq 3 -a $(PATCHLEVEL) -eq 0 -a $(SUBLEVEL) -ge 41 \) ] ; then \
+      echo "lttng-probe-random.o" ; fi;)
+
+obj-$(CONFIG_LTTNG) +=  $(shell \
+  if [ $(VERSION) -ge 4 \
+    -o \( $(VERSION) -eq 3 -a $(PATCHLEVEL) -ge 2 \) ] ; then \
+    echo "lttng-probe-rcu.o" ; fi;)
+
+ifneq ($(CONFIG_REGMAP),)
+  regmap_dep_4_1 = $(srctree)/drivers/base/regmap/trace.h
+  ifneq ($(wildcard $(regmap_dep_4_1)),)
+    obj-$(CONFIG_LTTNG) += lttng-probe-regmap.o
+  else
+    $(warning File $(regmap_dep_4_1) not found. Probe "regmap" is disabled. Need Linux 4.1+ kernel source tree to enable it.)
+  endif # $(wildcard $(regmap_dep_4_1)),
+endif # CONFIG_REGMAP
+
+ifneq ($(CONFIG_PM_RUNTIME),)
+  obj-$(CONFIG_LTTNG) +=  $(shell \
+    if [ $(VERSION) -ge 4 \
+      -o \( $(VERSION) -eq 3 -a $(PATCHLEVEL) -ge 2 \) ] ; then \
+      echo "lttng-probe-rpm.o" ; fi;)
+endif # CONFIG_PM_RUNTIME
+
+ifneq ($(CONFIG_SUNRPC),)
+  obj-$(CONFIG_LTTNG) +=  $(shell \
+    if [ $(VERSION) -ge 4 \
+      -o \( $(VERSION) -eq 3 -a $(PATCHLEVEL) -ge 4 \) ] ; then \
+      echo "lttng-probe-sunrpc.o" ; fi;)
+endif # CONFIG_SUNRPC
+
+ifneq ($(CONFIG_VIDEO_V4L2),)
+  obj-$(CONFIG_LTTNG) += $(shell \
+    if [ $(VERSION) -ge 4 \
+      -o \( $(VERSION) -eq 3 -a $(PATCHLEVEL) -ge 14 \) ] ; then \
+      echo "lttng-probe-v4l2.o" ; fi;)
+endif # CONFIG_VIDEO_V4L2
+
+obj-$(CONFIG_LTTNG) += lttng-probe-workqueue.o
+
+ifneq ($(CONFIG_KALLSYMS_ALL),)
+  obj-$(CONFIG_LTTNG) += lttng-probe-writeback.o
+else
+  ifdef CONFIG_LOCALVERSION # Check if dot-config is included.
+    $(warning CONFIG_KALLSYMS_ALL is disabled, therefore probe "writeback" is disabled. Rebuild your kernel with this configuration option enabled in order to trace this subsystem.)
+  endif
+endif # CONFIG_KALLSYMS_ALL
+
+ifneq ($(CONFIG_KPROBES),)
+  obj-$(CONFIG_LTTNG) += lttng-kprobes.o
+endif # CONFIG_KPROBES
+
+ifneq ($(CONFIG_UPROBES),)
+  obj-$(CONFIG_LTTNG) += lttng-uprobes.o
+endif # CONFIG_UPROBES
+
+ifneq ($(CONFIG_KRETPROBES),)
+  obj-$(CONFIG_LTTNG) += lttng-kretprobes.o
+endif # CONFIG_KRETPROBES
+
+ifneq ($(CONFIG_PREEMPTIRQ_EVENTS),)
+  obj-$(CONFIG_LTTNG) += lttng-probe-preemptirq.o
+endif # CONFIG_PREEMPTIRQ_EVENTS
+
+# vim:syntax=make
diff --git a/src/probes/lttng-kprobes.c b/src/probes/lttng-kprobes.c
new file mode 100644 (file)
index 0000000..a2474d0
--- /dev/null
@@ -0,0 +1,174 @@
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * probes/lttng-kprobes.c
+ *
+ * LTTng kprobes integration module.
+ *
+ * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#include <linux/module.h>
+#include <linux/kprobes.h>
+#include <linux/slab.h>
+#include <lttng/events.h>
+#include <ringbuffer/frontend_types.h>
+#include <wrapper/vmalloc.h>
+#include <wrapper/irqflags.h>
+#include <lttng/tracer.h>
+#include <blacklist/kprobes.h>
+
+static
+int lttng_kprobes_handler_pre(struct kprobe *p, struct pt_regs *regs)
+{
+       struct lttng_event *event =
+               container_of(p, struct lttng_event, u.kprobe.kp);
+       struct lttng_probe_ctx lttng_probe_ctx = {
+               .event = event,
+               .interruptible = !lttng_regs_irqs_disabled(regs),
+       };
+       struct lttng_channel *chan = event->chan;
+       struct lib_ring_buffer_ctx ctx;
+       int ret;
+       unsigned long data = (unsigned long) p->addr;
+
+       if (unlikely(!READ_ONCE(chan->session->active)))
+               return 0;
+       if (unlikely(!READ_ONCE(chan->enabled)))
+               return 0;
+       if (unlikely(!READ_ONCE(event->enabled)))
+               return 0;
+
+       lib_ring_buffer_ctx_init(&ctx, chan->chan, &lttng_probe_ctx, sizeof(data),
+                                lttng_alignof(data), -1);
+       ret = chan->ops->event_reserve(&ctx, event->id);
+       if (ret < 0)
+               return 0;
+       lib_ring_buffer_align_ctx(&ctx, lttng_alignof(data));
+       chan->ops->event_write(&ctx, &data, sizeof(data));
+       chan->ops->event_commit(&ctx);
+       return 0;
+}
+
+/*
+ * Create event description
+ */
+static
+int lttng_create_kprobe_event(const char *name, struct lttng_event *event)
+{
+       struct lttng_event_field *field;
+       struct lttng_event_desc *desc;
+       int ret;
+
+       desc = kzalloc(sizeof(*event->desc), GFP_KERNEL);
+       if (!desc)
+               return -ENOMEM;
+       desc->name = kstrdup(name, GFP_KERNEL);
+       if (!desc->name) {
+               ret = -ENOMEM;
+               goto error_str;
+       }
+       desc->nr_fields = 1;
+       desc->fields = field =
+               kzalloc(1 * sizeof(struct lttng_event_field), GFP_KERNEL);
+       if (!field) {
+               ret = -ENOMEM;
+               goto error_field;
+       }
+       field->name = "ip";
+       field->type.atype = atype_integer;
+       field->type.u.integer.size = sizeof(unsigned long) * CHAR_BIT;
+       field->type.u.integer.alignment = lttng_alignof(unsigned long) * CHAR_BIT;
+       field->type.u.integer.signedness = lttng_is_signed_type(unsigned long);
+       field->type.u.integer.reverse_byte_order = 0;
+       field->type.u.integer.base = 16;
+       field->type.u.integer.encoding = lttng_encode_none;
+       desc->owner = THIS_MODULE;
+       event->desc = desc;
+
+       return 0;
+
+error_field:
+       kfree(desc->name);
+error_str:
+       kfree(desc);
+       return ret;
+}
+
+int lttng_kprobes_register(const char *name,
+                          const char *symbol_name,
+                          uint64_t offset,
+                          uint64_t addr,
+                          struct lttng_event *event)
+{
+       int ret;
+
+       /* Kprobes expects a NULL symbol name if unused */
+       if (symbol_name[0] == '\0')
+               symbol_name = NULL;
+
+       ret = lttng_create_kprobe_event(name, event);
+       if (ret)
+               goto error;
+       memset(&event->u.kprobe.kp, 0, sizeof(event->u.kprobe.kp));
+       event->u.kprobe.kp.pre_handler = lttng_kprobes_handler_pre;
+       if (symbol_name) {
+               event->u.kprobe.symbol_name =
+                       kzalloc(LTTNG_KERNEL_SYM_NAME_LEN * sizeof(char),
+                               GFP_KERNEL);
+               if (!event->u.kprobe.symbol_name) {
+                       ret = -ENOMEM;
+                       goto name_error;
+               }
+               memcpy(event->u.kprobe.symbol_name, symbol_name,
+                      LTTNG_KERNEL_SYM_NAME_LEN * sizeof(char));
+               event->u.kprobe.kp.symbol_name =
+                       event->u.kprobe.symbol_name;
+       }
+       event->u.kprobe.kp.offset = offset;
+       event->u.kprobe.kp.addr = (void *) (unsigned long) addr;
+
+       /*
+        * Ensure the memory we just allocated don't trigger page faults.
+        * Well.. kprobes itself puts the page fault handler on the blacklist,
+        * but we can never be too careful.
+        */
+       wrapper_vmalloc_sync_mappings();
+
+       ret = register_kprobe(&event->u.kprobe.kp);
+       if (ret)
+               goto register_error;
+       return 0;
+
+register_error:
+       kfree(event->u.kprobe.symbol_name);
+name_error:
+       kfree(event->desc->fields);
+       kfree(event->desc->name);
+       kfree(event->desc);
+error:
+       return ret;
+}
+EXPORT_SYMBOL_GPL(lttng_kprobes_register);
+
+void lttng_kprobes_unregister(struct lttng_event *event)
+{
+       unregister_kprobe(&event->u.kprobe.kp);
+}
+EXPORT_SYMBOL_GPL(lttng_kprobes_unregister);
+
+void lttng_kprobes_destroy_private(struct lttng_event *event)
+{
+       kfree(event->u.kprobe.symbol_name);
+       kfree(event->desc->fields);
+       kfree(event->desc->name);
+       kfree(event->desc);
+}
+EXPORT_SYMBOL_GPL(lttng_kprobes_destroy_private);
+
+MODULE_LICENSE("GPL and additional rights");
+MODULE_AUTHOR("Mathieu Desnoyers <mathieu.desnoyers@efficios.com>");
+MODULE_DESCRIPTION("LTTng kprobes probes");
+MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
+       __stringify(LTTNG_MODULES_MINOR_VERSION) "."
+       __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
+       LTTNG_MODULES_EXTRAVERSION);
diff --git a/src/probes/lttng-kretprobes.c b/src/probes/lttng-kretprobes.c
new file mode 100644 (file)
index 0000000..0067593
--- /dev/null
@@ -0,0 +1,307 @@
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * probes/lttng-kretprobes.c
+ *
+ * LTTng kretprobes integration module.
+ *
+ * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#include <linux/module.h>
+#include <linux/kprobes.h>
+#include <linux/slab.h>
+#include <linux/kref.h>
+#include <lttng/events.h>
+#include <ringbuffer/frontend_types.h>
+#include <wrapper/vmalloc.h>
+#include <wrapper/irqflags.h>
+#include <lttng/tracer.h>
+#include <blacklist/kprobes.h>
+
+enum lttng_kretprobe_type {
+       EVENT_ENTRY = 0,
+       EVENT_RETURN = 1,
+};
+
+struct lttng_krp {
+       struct kretprobe krp;
+       struct lttng_event *event[2];   /* ENTRY and RETURN */
+       struct kref kref_register;
+       struct kref kref_alloc;
+};
+
+static
+int _lttng_kretprobes_handler(struct kretprobe_instance *krpi,
+                             struct pt_regs *regs,
+                             enum lttng_kretprobe_type type)
+{
+       struct lttng_krp *lttng_krp =
+               container_of(krpi->rp, struct lttng_krp, krp);
+       struct lttng_event *event =
+               lttng_krp->event[type];
+       struct lttng_probe_ctx lttng_probe_ctx = {
+               .event = event,
+               .interruptible = !lttng_regs_irqs_disabled(regs),
+       };
+       struct lttng_channel *chan = event->chan;
+       struct lib_ring_buffer_ctx ctx;
+       int ret;
+       struct {
+               unsigned long ip;
+               unsigned long parent_ip;
+       } payload;
+
+       if (unlikely(!READ_ONCE(chan->session->active)))
+               return 0;
+       if (unlikely(!READ_ONCE(chan->enabled)))
+               return 0;
+       if (unlikely(!READ_ONCE(event->enabled)))
+               return 0;
+
+       payload.ip = (unsigned long) krpi->rp->kp.addr;
+       payload.parent_ip = (unsigned long) krpi->ret_addr;
+
+       lib_ring_buffer_ctx_init(&ctx, chan->chan, &lttng_probe_ctx, sizeof(payload),
+                                lttng_alignof(payload), -1);
+       ret = chan->ops->event_reserve(&ctx, event->id);
+       if (ret < 0)
+               return 0;
+       lib_ring_buffer_align_ctx(&ctx, lttng_alignof(payload));
+       chan->ops->event_write(&ctx, &payload, sizeof(payload));
+       chan->ops->event_commit(&ctx);
+       return 0;
+}
+
+static
+int lttng_kretprobes_handler_entry(struct kretprobe_instance *krpi,
+                                  struct pt_regs *regs)
+{
+       return _lttng_kretprobes_handler(krpi, regs, EVENT_ENTRY);
+}
+
+static
+int lttng_kretprobes_handler_return(struct kretprobe_instance *krpi,
+                                   struct pt_regs *regs)
+{
+       return _lttng_kretprobes_handler(krpi, regs, EVENT_RETURN);
+}
+
+/*
+ * Create event description
+ */
+static
+int lttng_create_kprobe_event(const char *name, struct lttng_event *event,
+                             enum lttng_kretprobe_type type)
+{
+       struct lttng_event_field *fields;
+       struct lttng_event_desc *desc;
+       int ret;
+       char *alloc_name;
+       size_t name_len;
+       const char *suffix = NULL;
+
+       desc = kzalloc(sizeof(*event->desc), GFP_KERNEL);
+       if (!desc)
+               return -ENOMEM;
+       name_len = strlen(name);
+       switch (type) {
+       case EVENT_ENTRY:
+               suffix = "_entry";
+               break;
+       case EVENT_RETURN:
+               suffix = "_return";
+               break;
+       }
+       name_len += strlen(suffix);
+       alloc_name = kmalloc(name_len + 1, GFP_KERNEL);
+       if (!alloc_name) {
+               ret = -ENOMEM;
+               goto error_str;
+       }
+       strcpy(alloc_name, name);
+       strcat(alloc_name, suffix);
+       desc->name = alloc_name;
+       desc->nr_fields = 2;
+       desc->fields = fields =
+               kzalloc(2 * sizeof(struct lttng_event_field), GFP_KERNEL);
+       if (!desc->fields) {
+               ret = -ENOMEM;
+               goto error_fields;
+       }
+       fields[0].name = "ip";
+       fields[0].type.atype = atype_integer;
+       fields[0].type.u.integer.size = sizeof(unsigned long) * CHAR_BIT;
+       fields[0].type.u.integer.alignment = lttng_alignof(unsigned long) * CHAR_BIT;
+       fields[0].type.u.integer.signedness = lttng_is_signed_type(unsigned long);
+       fields[0].type.u.integer.reverse_byte_order = 0;
+       fields[0].type.u.integer.base = 16;
+       fields[0].type.u.integer.encoding = lttng_encode_none;
+
+       fields[1].name = "parent_ip";
+       fields[1].type.atype = atype_integer;
+       fields[1].type.u.integer.size = sizeof(unsigned long) * CHAR_BIT;
+       fields[1].type.u.integer.alignment = lttng_alignof(unsigned long) * CHAR_BIT;
+       fields[1].type.u.integer.signedness = lttng_is_signed_type(unsigned long);
+       fields[1].type.u.integer.reverse_byte_order = 0;
+       fields[1].type.u.integer.base = 16;
+       fields[1].type.u.integer.encoding = lttng_encode_none;
+
+       desc->owner = THIS_MODULE;
+       event->desc = desc;
+
+       return 0;
+
+error_fields:
+       kfree(desc->name);
+error_str:
+       kfree(desc);
+       return ret;
+}
+
+int lttng_kretprobes_register(const char *name,
+                          const char *symbol_name,
+                          uint64_t offset,
+                          uint64_t addr,
+                          struct lttng_event *event_entry,
+                          struct lttng_event *event_return)
+{
+       int ret;
+       struct lttng_krp *lttng_krp;
+
+       /* Kprobes expects a NULL symbol name if unused */
+       if (symbol_name[0] == '\0')
+               symbol_name = NULL;
+
+       ret = lttng_create_kprobe_event(name, event_entry, EVENT_ENTRY);
+       if (ret)
+               goto error;
+       ret = lttng_create_kprobe_event(name, event_return, EVENT_RETURN);
+       if (ret)
+               goto event_return_error;
+       lttng_krp = kzalloc(sizeof(*lttng_krp), GFP_KERNEL);
+       if (!lttng_krp)
+               goto krp_error;
+       lttng_krp->krp.entry_handler = lttng_kretprobes_handler_entry;
+       lttng_krp->krp.handler = lttng_kretprobes_handler_return;
+       if (symbol_name) {
+               char *alloc_symbol;
+
+               alloc_symbol = kstrdup(symbol_name, GFP_KERNEL);
+               if (!alloc_symbol) {
+                       ret = -ENOMEM;
+                       goto name_error;
+               }
+               lttng_krp->krp.kp.symbol_name =
+                       alloc_symbol;
+               event_entry->u.kretprobe.symbol_name =
+                       alloc_symbol;
+               event_return->u.kretprobe.symbol_name =
+                       alloc_symbol;
+       }
+       lttng_krp->krp.kp.offset = offset;
+       lttng_krp->krp.kp.addr = (void *) (unsigned long) addr;
+
+       /* Allow probe handler to find event structures */
+       lttng_krp->event[EVENT_ENTRY] = event_entry;
+       lttng_krp->event[EVENT_RETURN] = event_return;
+       event_entry->u.kretprobe.lttng_krp = lttng_krp;
+       event_return->u.kretprobe.lttng_krp = lttng_krp;
+
+       /*
+        * Both events must be unregistered before the kretprobe is
+        * unregistered. Same for memory allocation.
+        */
+       kref_init(&lttng_krp->kref_alloc);
+       kref_get(&lttng_krp->kref_alloc);       /* inc refcount to 2, no overflow. */
+       kref_init(&lttng_krp->kref_register);
+       kref_get(&lttng_krp->kref_register);    /* inc refcount to 2, no overflow. */
+
+       /*
+        * Ensure the memory we just allocated don't trigger page faults.
+        * Well.. kprobes itself puts the page fault handler on the blacklist,
+        * but we can never be too careful.
+        */
+       wrapper_vmalloc_sync_mappings();
+
+       ret = register_kretprobe(&lttng_krp->krp);
+       if (ret)
+               goto register_error;
+       return 0;
+
+register_error:
+       kfree(lttng_krp->krp.kp.symbol_name);
+name_error:
+       kfree(lttng_krp);
+krp_error:
+       kfree(event_return->desc->fields);
+       kfree(event_return->desc->name);
+       kfree(event_return->desc);
+event_return_error:
+       kfree(event_entry->desc->fields);
+       kfree(event_entry->desc->name);
+       kfree(event_entry->desc);
+error:
+       return ret;
+}
+EXPORT_SYMBOL_GPL(lttng_kretprobes_register);
+
+static
+void _lttng_kretprobes_unregister_release(struct kref *kref)
+{
+       struct lttng_krp *lttng_krp =
+               container_of(kref, struct lttng_krp, kref_register);
+       unregister_kretprobe(&lttng_krp->krp);
+}
+
+void lttng_kretprobes_unregister(struct lttng_event *event)
+{
+       kref_put(&event->u.kretprobe.lttng_krp->kref_register,
+               _lttng_kretprobes_unregister_release);
+}
+EXPORT_SYMBOL_GPL(lttng_kretprobes_unregister);
+
+static
+void _lttng_kretprobes_release(struct kref *kref)
+{
+       struct lttng_krp *lttng_krp =
+               container_of(kref, struct lttng_krp, kref_alloc);
+       kfree(lttng_krp->krp.kp.symbol_name);
+}
+
+void lttng_kretprobes_destroy_private(struct lttng_event *event)
+{
+       kfree(event->desc->fields);
+       kfree(event->desc->name);
+       kfree(event->desc);
+       kref_put(&event->u.kretprobe.lttng_krp->kref_alloc,
+               _lttng_kretprobes_release);
+}
+EXPORT_SYMBOL_GPL(lttng_kretprobes_destroy_private);
+
+int lttng_kretprobes_event_enable_state(struct lttng_event *event,
+               int enable)
+{
+       struct lttng_event *event_return;
+       struct lttng_krp *lttng_krp;
+
+       if (event->instrumentation != LTTNG_KERNEL_KRETPROBE) {
+               return -EINVAL;
+       }
+       if (event->enabled == enable) {
+               return -EBUSY;
+       }
+       lttng_krp = event->u.kretprobe.lttng_krp;
+       event_return = lttng_krp->event[EVENT_RETURN];
+       WRITE_ONCE(event->enabled, enable);
+       WRITE_ONCE(event_return->enabled, enable);
+       return 0;
+}
+EXPORT_SYMBOL_GPL(lttng_kretprobes_event_enable_state);
+
+MODULE_LICENSE("GPL and additional rights");
+MODULE_AUTHOR("Mathieu Desnoyers <mathieu.desnoyers@efficios.com>");
+MODULE_DESCRIPTION("LTTng kretprobes probes");
+MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
+       __stringify(LTTNG_MODULES_MINOR_VERSION) "."
+       __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
+       LTTNG_MODULES_EXTRAVERSION);
diff --git a/src/probes/lttng-probe-9p.c b/src/probes/lttng-probe-9p.c
new file mode 100644 (file)
index 0000000..ec588e4
--- /dev/null
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * probes/lttng-probe-9p.c
+ *
+ * LTTng 9p probes.
+ *
+ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ * Copyright (C) 2018 Michael Jeanson <mjeanson@efficios.com>
+ */
+
+#include <linux/module.h>
+#include <net/9p/9p.h>
+#include <net/9p/client.h>
+#include <lttng/tracer.h>
+
+/*
+ * Create the tracepoint static inlines from the kernel to validate that our
+ * trace event macros match the kernel we run on.
+ */
+#include <trace/events/9p.h>
+
+/*
+ * Create LTTng tracepoint probes.
+ */
+#define LTTNG_PACKAGE_BUILD
+#define CREATE_TRACE_POINTS
+#define TRACE_INCLUDE_PATH instrumentation/events
+
+#include <instrumentation/events/9p.h>
+
+MODULE_LICENSE("GPL and additional rights");
+MODULE_AUTHOR("Michael Jeanson <mjeanson@efficios.com>");
+MODULE_DESCRIPTION("LTTng 9p probes");
+MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
+       __stringify(LTTNG_MODULES_MINOR_VERSION) "."
+       __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
+       LTTNG_MODULES_EXTRAVERSION);
diff --git a/src/probes/lttng-probe-asoc.c b/src/probes/lttng-probe-asoc.c
new file mode 100644 (file)
index 0000000..6cac9c6
--- /dev/null
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * probes/lttng-probe-asoc.c
+ *
+ * LTTng asoc probes.
+ *
+ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ * Copyright (C) 2012 Mentor Graphics Corp.
+ */
+
+#include <linux/module.h>
+#include <sound/jack.h>
+#include <sound/soc.h>
+#include <lttng/tracer.h>
+
+/*
+ * Create the tracepoint static inlines from the kernel to validate that our
+ * trace event macros match the kernel we run on.
+ */
+#include <trace/events/asoc.h>
+
+/*
+ * Create LTTng tracepoint probes.
+ */
+#define LTTNG_PACKAGE_BUILD
+#define CREATE_TRACE_POINTS
+#define TRACE_INCLUDE_PATH instrumentation/events
+
+#include <instrumentation/events/asoc.h>
+
+MODULE_LICENSE("GPL and additional rights");
+MODULE_AUTHOR("Wade Farnsworth <wade_farnsworth@mentor.com>");
+MODULE_AUTHOR("Paul Woegerer <paul_woegerer@mentor.com>");
+MODULE_AUTHOR("Andrew Gabbasov <andrew_gabbasov@mentor.com>");
+MODULE_DESCRIPTION("LTTng asoc probes");
+MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
+       __stringify(LTTNG_MODULES_MINOR_VERSION) "."
+       __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
+       LTTNG_MODULES_EXTRAVERSION);
diff --git a/src/probes/lttng-probe-block.c b/src/probes/lttng-probe-block.c
new file mode 100644 (file)
index 0000000..5f8e830
--- /dev/null
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * probes/lttng-probe-block.c
+ *
+ * LTTng block probes.
+ *
+ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#include <linux/module.h>
+#include <linux/blktrace_api.h>
+#include <lttng/tracer.h>
+#include <lttng/kernel-version.h>
+
+/*
+ * Create the tracepoint static inlines from the kernel to validate that our
+ * trace event macros match the kernel we run on.
+ */
+#include <trace/events/block.h>
+
+#include <wrapper/tracepoint.h>
+
+/*
+ * Create LTTng tracepoint probes.
+ */
+#define LTTNG_PACKAGE_BUILD
+#define CREATE_TRACE_POINTS
+#define TRACE_INCLUDE_PATH instrumentation/events
+
+#include <instrumentation/events/block.h>
+
+MODULE_LICENSE("GPL and additional rights");
+MODULE_AUTHOR("Mathieu Desnoyers <mathieu.desnoyers@efficios.com>");
+MODULE_DESCRIPTION("LTTng block probes");
+MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
+       __stringify(LTTNG_MODULES_MINOR_VERSION) "."
+       __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
+       LTTNG_MODULES_EXTRAVERSION);
diff --git a/src/probes/lttng-probe-btrfs.c b/src/probes/lttng-probe-btrfs.c
new file mode 100644 (file)
index 0000000..4461c99
--- /dev/null
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * probes/lttng-probe-btrfs.c
+ *
+ * LTTng btrfs probes.
+ *
+ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ * Copyright (C) 2012 Mentor Graphics Corp.
+ */
+
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/version.h>
+#include <../fs/btrfs/ctree.h>
+#include <../fs/btrfs/transaction.h>
+#include <../fs/btrfs/volumes.h>
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,4,0))
+#include <../fs/btrfs/block-group.h>
+#endif
+#include <linux/dcache.h>
+#include <lttng/tracer.h>
+
+/*
+ * Create the tracepoint static inlines from the kernel to validate that our
+ * trace event macros match the kernel we run on.
+ */
+#include <trace/events/btrfs.h>
+
+/*
+ * Create LTTng tracepoint probes.
+ */
+#define LTTNG_PACKAGE_BUILD
+#define CREATE_TRACE_POINTS
+#define TRACE_INCLUDE_PATH instrumentation/events
+
+#include <instrumentation/events/btrfs.h>
+
+MODULE_LICENSE("GPL and additional rights");
+MODULE_AUTHOR("Andrew Gabbasov <andrew_gabbasov@mentor.com>");
+MODULE_DESCRIPTION("LTTng btrfs probes");
+MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
+       __stringify(LTTNG_MODULES_MINOR_VERSION) "."
+       __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
+       LTTNG_MODULES_EXTRAVERSION);
diff --git a/src/probes/lttng-probe-compaction.c b/src/probes/lttng-probe-compaction.c
new file mode 100644 (file)
index 0000000..f8ddf38
--- /dev/null
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * probes/lttng-probe-compaction.c
+ *
+ * LTTng compaction probes.
+ *
+ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ * Copyright (C) 2012 Mentor Graphics Corp.
+ */
+
+#include <linux/module.h>
+#include <lttng/tracer.h>
+
+/*
+ * Create the tracepoint static inlines from the kernel to validate that our
+ * trace event macros match the kernel we run on.
+ */
+#include <trace/events/compaction.h>
+
+/*
+ * Create LTTng tracepoint probes.
+ */
+#define LTTNG_PACKAGE_BUILD
+#define CREATE_TRACE_POINTS
+#define TRACE_INCLUDE_PATH instrumentation/events
+
+#include <instrumentation/events/compaction.h>
+
+MODULE_LICENSE("GPL and additional rights");
+MODULE_AUTHOR("Andrew Gabbasov <andrew_gabbasov@mentor.com>");
+MODULE_DESCRIPTION("LTTng compaction probes");
+MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
+       __stringify(LTTNG_MODULES_MINOR_VERSION) "."
+       __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
+       LTTNG_MODULES_EXTRAVERSION);
diff --git a/src/probes/lttng-probe-ext3.c b/src/probes/lttng-probe-ext3.c
new file mode 100644 (file)
index 0000000..70adb56
--- /dev/null
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * probes/lttng-probe-ext3.c
+ *
+ * LTTng ext3 probes.
+ *
+ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ * Copyright (C) 2012 Mentor Graphics Corp.
+ */
+
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/dcache.h>
+#include <linux/version.h>
+#include <lttng/tracer.h>
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0))
+#include <../fs/ext3/ext3.h>
+#else
+#include <linux/ext3_fs_i.h>
+#endif
+
+/*
+ * Create the tracepoint static inlines from the kernel to validate that our
+ * trace event macros match the kernel we run on.
+ */
+#include <trace/events/ext3.h>
+
+/*
+ * Create LTTng tracepoint probes.
+ */
+#define LTTNG_PACKAGE_BUILD
+#define CREATE_TRACE_POINTS
+#define TRACE_INCLUDE_PATH instrumentation/events
+
+#include <instrumentation/events/ext3.h>
+
+MODULE_LICENSE("GPL and additional rights");
+MODULE_AUTHOR("Wade Farnsworth <wade_farnsworth@mentor.com>, Paul Woegerer <paul_woegerer@mentor.com>, and Andrew Gabbasov <andrew_gabbasov@mentor.com>");
+MODULE_DESCRIPTION("LTTng ext3 probes");
+MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
+       __stringify(LTTNG_MODULES_MINOR_VERSION) "."
+       __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
+       LTTNG_MODULES_EXTRAVERSION);
diff --git a/src/probes/lttng-probe-ext4.c b/src/probes/lttng-probe-ext4.c
new file mode 100644 (file)
index 0000000..0d0e3a8
--- /dev/null
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * probes/lttng-probe-ext4.c
+ *
+ * LTTng ext4 probes.
+ *
+ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ * Copyright (C) 2012 Mentor Graphics Corp.
+ */
+
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <../fs/ext4/ext4.h>
+#include <../fs/ext4/mballoc.h>
+#include <../fs/ext4/ext4_extents.h>
+#include <linux/dcache.h>
+#include <lttng/tracer.h>
+
+/*
+ * Create the tracepoint static inlines from the kernel to validate that our
+ * trace event macros match the kernel we run on.
+ */
+#include <trace/events/ext4.h>
+
+#include <lttng/kernel-version.h>
+#include <wrapper/tracepoint.h>
+
+/*
+ * Create LTTng tracepoint probes.
+ */
+#define LTTNG_PACKAGE_BUILD
+#define CREATE_TRACE_POINTS
+#define TRACE_INCLUDE_PATH instrumentation/events
+
+#include <instrumentation/events/ext4.h>
+
+MODULE_LICENSE("GPL and additional rights");
+MODULE_AUTHOR("Andrew Gabbasov <andrew_gabbasov@mentor.com>");
+MODULE_DESCRIPTION("LTTng ext4 probes");
+MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
+       __stringify(LTTNG_MODULES_MINOR_VERSION) "."
+       __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
+       LTTNG_MODULES_EXTRAVERSION);
diff --git a/src/probes/lttng-probe-gpio.c b/src/probes/lttng-probe-gpio.c
new file mode 100644 (file)
index 0000000..42b9b13
--- /dev/null
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * probes/lttng-probe-gpio.c
+ *
+ * LTTng gpio probes.
+ *
+ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ * Copyright (C) 2012 Mentor Graphics Corp.
+ */
+
+#include <linux/module.h>
+#include <lttng/tracer.h>
+
+/*
+ * Create the tracepoint static inlines from the kernel to validate that our
+ * trace event macros match the kernel we run on.
+ */
+#include <trace/events/gpio.h>
+
+/*
+ * Create LTTng tracepoint probes.
+ */
+#define LTTNG_PACKAGE_BUILD
+#define CREATE_TRACE_POINTS
+#define TRACE_INCLUDE_PATH instrumentation/events
+
+#include <instrumentation/events/gpio.h>
+
+MODULE_LICENSE("GPL and additional rights");
+MODULE_AUTHOR("Wade Farnsworth <wade_farnsworth@mentor.com>");
+MODULE_DESCRIPTION("LTTng gpio probes");
+MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
+       __stringify(LTTNG_MODULES_MINOR_VERSION) "."
+       __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
+       LTTNG_MODULES_EXTRAVERSION);
diff --git a/src/probes/lttng-probe-i2c.c b/src/probes/lttng-probe-i2c.c
new file mode 100644 (file)
index 0000000..9dc1c79
--- /dev/null
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * probes/lttng-probe-i2c.c
+ *
+ * LTTng i2c probes.
+ *
+ * Copyright (C) 2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ * Copyright (C) 2016 Simon Marchi <simon.marchi@ericsson.com>
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <lttng/tracer.h>
+
+/*
+ * Create the tracepoint static inlines from the kernel to validate that our
+ * trace event macros match the kernel we run on.
+ */
+#include <trace/events/i2c.h>
+
+/*
+ * Create LTTng tracepoint probes.
+ */
+#define LTTNG_PACKAGE_BUILD
+#define CREATE_TRACE_POINTS
+#define TRACE_INCLUDE_PATH instrumentation/events
+
+static int extract_sensitive_payload;
+module_param(extract_sensitive_payload, int, 0644);
+MODULE_PARM_DESC(extract_sensitive_payload,
+               "Whether to extract possibly sensitive data from events (i2c "
+               "buffer contents) or not (1 or 0, default: 0).");
+
+#include <instrumentation/events/i2c.h>
+
+MODULE_LICENSE("GPL and additional rights");
+MODULE_AUTHOR("Simon Marchi <simon.marchi@ericsson.com>");
+MODULE_DESCRIPTION("LTTng i2c probes");
+MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
+       __stringify(LTTNG_MODULES_MINOR_VERSION) "."
+       __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
+       LTTNG_MODULES_EXTRAVERSION);
diff --git a/src/probes/lttng-probe-irq.c b/src/probes/lttng-probe-irq.c
new file mode 100644 (file)
index 0000000..f88093b
--- /dev/null
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * probes/lttng-probe-irq.c
+ *
+ * LTTng irq probes.
+ *
+ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <lttng/tracer.h>
+
+/*
+ * Create the tracepoint static inlines from the kernel to validate that our
+ * trace event macros match the kernel we run on.
+ */
+#include <trace/events/irq.h>
+
+#include <wrapper/tracepoint.h>
+
+/*
+ * Create LTTng tracepoint probes.
+ */
+#define LTTNG_PACKAGE_BUILD
+#define CREATE_TRACE_POINTS
+#define TRACE_INCLUDE_PATH instrumentation/events
+
+#include <instrumentation/events/irq.h>
+
+MODULE_LICENSE("GPL and additional rights");
+MODULE_AUTHOR("Mathieu Desnoyers <mathieu.desnoyers@efficios.com>");
+MODULE_DESCRIPTION("LTTng irq probes");
+MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
+       __stringify(LTTNG_MODULES_MINOR_VERSION) "."
+       __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
+       LTTNG_MODULES_EXTRAVERSION);
diff --git a/src/probes/lttng-probe-jbd.c b/src/probes/lttng-probe-jbd.c
new file mode 100644 (file)
index 0000000..21c0798
--- /dev/null
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * probes/lttng-probe-jbd.c
+ *
+ * LTTng jbd probes.
+ *
+ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ * Copyright (C) 2012 Mentor Graphics Corp.
+ */
+
+#include <linux/module.h>
+#include <lttng/tracer.h>
+
+/*
+ * Create the tracepoint static inlines from the kernel to validate that our
+ * trace event macros match the kernel we run on.
+ */
+#include <trace/events/jbd.h>
+
+/*
+ * Create LTTng tracepoint probes.
+ */
+#define LTTNG_PACKAGE_BUILD
+#define CREATE_TRACE_POINTS
+#define TRACE_INCLUDE_PATH instrumentation/events
+
+#include <instrumentation/events/jbd.h>
+
+MODULE_LICENSE("GPL and additional rights");
+MODULE_AUTHOR("Wade Farnsworth <wade_farnsworth@mentor.com>, Paul Woegerer <paul_woegerer@mentor.com>, and Andrew Gabbasov <andrew_gabbasov@mentor.com>");
+MODULE_DESCRIPTION("LTTng jbd probes");
+MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
+       __stringify(LTTNG_MODULES_MINOR_VERSION) "."
+       __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
+       LTTNG_MODULES_EXTRAVERSION);
diff --git a/src/probes/lttng-probe-jbd2.c b/src/probes/lttng-probe-jbd2.c
new file mode 100644 (file)
index 0000000..ac3ac93
--- /dev/null
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * probes/lttng-probe-jbd2.c
+ *
+ * LTTng jbd2 probes.
+ *
+ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ * Copyright (C) 2012 Mentor Graphics Corp.
+ */
+
+#include <linux/module.h>
+#include <lttng/tracer.h>
+
+/*
+ * Create the tracepoint static inlines from the kernel to validate that our
+ * trace event macros match the kernel we run on.
+ */
+#include <trace/events/jbd2.h>
+
+#include <wrapper/tracepoint.h>
+
+/*
+ * Create LTTng tracepoint probes.
+ */
+#define LTTNG_PACKAGE_BUILD
+#define CREATE_TRACE_POINTS
+#define TRACE_INCLUDE_PATH instrumentation/events
+
+#include <instrumentation/events/jbd2.h>
+
+MODULE_LICENSE("GPL and additional rights");
+MODULE_AUTHOR("Wade Farnsworth <wade_farnsworth@mentor.com>");
+MODULE_AUTHOR("Andrew Gabbasov <andrew_gabbasov@mentor.com>");
+MODULE_DESCRIPTION("LTTng jbd2 probes");
+MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
+       __stringify(LTTNG_MODULES_MINOR_VERSION) "."
+       __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
+       LTTNG_MODULES_EXTRAVERSION);
diff --git a/src/probes/lttng-probe-kmem.c b/src/probes/lttng-probe-kmem.c
new file mode 100644 (file)
index 0000000..1b120ab
--- /dev/null
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * probes/lttng-probe-kmem.c
+ *
+ * LTTng kmem probes.
+ *
+ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ * Copyright (C) 2012 Mentor Graphics Corp.
+ */
+
+/*
+ * This page_alloc.h wrapper needs to be included before gfpflags.h because it
+ * overrides a function with a define.
+ */
+#include <wrapper/page_alloc.h>
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <lttng/tracer.h>
+
+/*
+ * Create the tracepoint static inlines from the kernel to validate that our
+ * trace event macros match the kernel we run on.
+ */
+#include <trace/events/kmem.h>
+
+#include <wrapper/tracepoint.h>
+
+/*
+ * Create LTTng tracepoint probes.
+ */
+#define LTTNG_PACKAGE_BUILD
+#define CREATE_TRACE_POINTS
+#define TRACE_INCLUDE_PATH instrumentation/events
+
+#include <instrumentation/events/kmem.h>
+
+MODULE_LICENSE("GPL and additional rights");
+MODULE_AUTHOR("Wade Farnsworth <wade_farnsworth@mentor.com>");
+MODULE_AUTHOR("Andrew Gabbasov <andrew_gabbasov@mentor.com>");
+MODULE_DESCRIPTION("LTTng kmem probes");
+MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
+       __stringify(LTTNG_MODULES_MINOR_VERSION) "."
+       __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
+       LTTNG_MODULES_EXTRAVERSION);
diff --git a/src/probes/lttng-probe-kvm-x86-mmu.c b/src/probes/lttng-probe-kvm-x86-mmu.c
new file mode 100644 (file)
index 0000000..9ccc242
--- /dev/null
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * probes/lttng-probe-kvm.c
+ *
+ * LTTng kvm probes.
+ *
+ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#include <linux/module.h>
+#include <linux/kvm_host.h>
+#include <lttng/tracer.h>
+#include <lttng/kernel-version.h>
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,1,0))
+#include <kvm/iodev.h>
+#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,1,0)) */
+#include <../../virt/kvm/iodev.h>
+#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,1,0)) */
+
+/*
+ * Create the tracepoint static inlines from the kernel to validate that our
+ * trace event macros match the kernel we run on.
+ */
+#include <wrapper/tracepoint.h>
+
+#include <../../arch/x86/kvm/mmutrace.h>
+
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+
+/*
+ * Create LTTng tracepoint probes.
+ */
+#define LTTNG_PACKAGE_BUILD
+#define CREATE_TRACE_POINTS
+
+#define TRACE_INCLUDE_PATH instrumentation/events/arch/x86/kvm
+#include <instrumentation/events/arch/x86/kvm/mmutrace.h>
+
+MODULE_LICENSE("GPL and additional rights");
+MODULE_AUTHOR("Mathieu Desnoyers <mathieu.desnoyers@efficios.com>");
+MODULE_DESCRIPTION("LTTng kvm mmu probes");
+MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
+       __stringify(LTTNG_MODULES_MINOR_VERSION) "."
+       __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
+       LTTNG_MODULES_EXTRAVERSION);
diff --git a/src/probes/lttng-probe-kvm-x86.c b/src/probes/lttng-probe-kvm-x86.c
new file mode 100644 (file)
index 0000000..4e4f5c8
--- /dev/null
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * probes/lttng-probe-kvm.c
+ *
+ * LTTng kvm probes.
+ *
+ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#include <linux/module.h>
+#include <linux/kvm_host.h>
+#include <lttng/tracer.h>
+#include <lttng/kernel-version.h>
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,7,0))
+#include <kvm_emulate.h>
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,1,0))
+#include <kvm/iodev.h>
+#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,1,0)) */
+#include <../../virt/kvm/iodev.h>
+#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,1,0)) */
+
+/*
+ * Create the tracepoint static inlines from the kernel to validate that our
+ * trace event macros match the kernel we run on.
+ */
+#include <trace/events/kvm.h>
+
+#include <wrapper/tracepoint.h>
+
+/*
+ * Create LTTng tracepoint probes.
+ */
+#define LTTNG_PACKAGE_BUILD
+#define CREATE_TRACE_POINTS
+
+#define TRACE_INCLUDE_PATH instrumentation/events/arch/x86/kvm
+#include <instrumentation/events/arch/x86/kvm/trace.h>
+
+MODULE_LICENSE("GPL and additional rights");
+MODULE_AUTHOR("Mathieu Desnoyers <mathieu.desnoyers@efficios.com>");
+MODULE_DESCRIPTION("LTTng kvm probes");
+MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
+       __stringify(LTTNG_MODULES_MINOR_VERSION) "."
+       __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
+       LTTNG_MODULES_EXTRAVERSION);
diff --git a/src/probes/lttng-probe-kvm.c b/src/probes/lttng-probe-kvm.c
new file mode 100644 (file)
index 0000000..8b30f26
--- /dev/null
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * probes/lttng-probe-kvm.c
+ *
+ * LTTng kvm probes.
+ *
+ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#include <linux/module.h>
+#include <linux/kvm_host.h>
+#include <lttng/tracer.h>
+
+/*
+ * Create the tracepoint static inlines from the kernel to validate that our
+ * trace event macros match the kernel we run on.
+ */
+#include <trace/events/kvm.h>
+
+#include <wrapper/tracepoint.h>
+
+/*
+ * Create LTTng tracepoint probes.
+ */
+#define LTTNG_PACKAGE_BUILD
+#define CREATE_TRACE_POINTS
+#define TRACE_INCLUDE_PATH instrumentation/events
+
+#include <instrumentation/events/kvm.h>
+
+MODULE_LICENSE("GPL and additional rights");
+MODULE_AUTHOR("Mathieu Desnoyers <mathieu.desnoyers@efficios.com>");
+MODULE_DESCRIPTION("LTTng kvm probes");
+MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
+       __stringify(LTTNG_MODULES_MINOR_VERSION) "."
+       __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
+       LTTNG_MODULES_EXTRAVERSION);
diff --git a/src/probes/lttng-probe-lock.c b/src/probes/lttng-probe-lock.c
new file mode 100644 (file)
index 0000000..2ab9138
--- /dev/null
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * probes/lttng-probe-lock.c
+ *
+ * LTTng lock probes.
+ *
+ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ * Copyright (C) 2012 Mentor Graphics Corp.
+ */
+
+#include <linux/module.h>
+#include <linux/version.h>
+#include <lttng/tracer.h>
+
+/*
+ * Create the tracepoint static inlines from the kernel to validate that our
+ * trace event macros match the kernel we run on.
+ */
+#include <trace/events/lock.h>
+#include <wrapper/tracepoint.h>
+
+/*
+ * Create LTTng tracepoint probes.
+ */
+#define LTTNG_PACKAGE_BUILD
+#define CREATE_TRACE_POINTS
+#define TRACE_INCLUDE_PATH instrumentation/events
+
+#include <instrumentation/events/lock.h>
+
+MODULE_LICENSE("GPL and additional rights");
+MODULE_AUTHOR("Wade Farnsworth <wade_farnsworth@mentor.com> and Andrew Gabbasov <andrew_gabbasov@mentor.com>");
+MODULE_DESCRIPTION("LTTng lock probes");
+MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
+       __stringify(LTTNG_MODULES_MINOR_VERSION) "."
+       __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
+       LTTNG_MODULES_EXTRAVERSION);
diff --git a/src/probes/lttng-probe-module.c b/src/probes/lttng-probe-module.c
new file mode 100644 (file)
index 0000000..4f4f4a5
--- /dev/null
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * probes/lttng-probe-module.c
+ *
+ * LTTng module probes.
+ *
+ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ * Copyright (C) 2012 Mentor Graphics Corp.
+ */
+
+#include <linux/module.h>
+#include <lttng/tracer.h>
+
+/*
+ * Create the tracepoint static inlines from the kernel to validate that our
+ * trace event macros match the kernel we run on.
+ */
+#include <trace/events/module.h>
+
+#include <wrapper/tracepoint.h>
+
+/*
+ * Create LTTng tracepoint probes.
+ */
+#define LTTNG_PACKAGE_BUILD
+#define CREATE_TRACE_POINTS
+#define TRACE_INCLUDE_PATH instrumentation/events
+
+#include <instrumentation/events/module.h>
+
+MODULE_LICENSE("GPL and additional rights");
+MODULE_AUTHOR("Wade Farnsworth <wade_farnsworth@mentor.com>");
+MODULE_AUTHOR("Andrew Gabbasov <andrew_gabbasov@mentor.com>");
+MODULE_DESCRIPTION("LTTng module probes");
+MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
+       __stringify(LTTNG_MODULES_MINOR_VERSION) "."
+       __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
+       LTTNG_MODULES_EXTRAVERSION);
diff --git a/src/probes/lttng-probe-napi.c b/src/probes/lttng-probe-napi.c
new file mode 100644 (file)
index 0000000..ce2bf8c
--- /dev/null
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * probes/lttng-probe-napi.c
+ *
+ * LTTng napi probes.
+ *
+ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ * Copyright (C) 2012 Mentor Graphics Corp.
+ */
+
+#include <linux/module.h>
+#include <lttng/tracer.h>
+
+/*
+ * Create the tracepoint static inlines from the kernel to validate that our
+ * trace event macros match the kernel we run on.
+ */
+#include <trace/events/napi.h>
+
+#include <wrapper/tracepoint.h>
+
+/*
+ * Create LTTng tracepoint probes.
+ */
+#define LTTNG_PACKAGE_BUILD
+#define CREATE_TRACE_POINTS
+#define TRACE_INCLUDE_PATH instrumentation/events
+
+#include <instrumentation/events/napi.h>
+
+MODULE_LICENSE("GPL and additional rights");
+MODULE_AUTHOR("Wade Farnsworth <wade_farnsworth@mentor.com>");
+MODULE_DESCRIPTION("LTTng napi probes");
+MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
+       __stringify(LTTNG_MODULES_MINOR_VERSION) "."
+       __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
+       LTTNG_MODULES_EXTRAVERSION);
diff --git a/src/probes/lttng-probe-net.c b/src/probes/lttng-probe-net.c
new file mode 100644 (file)
index 0000000..a0ef450
--- /dev/null
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * probes/lttng-probe-net.c
+ *
+ * LTTng net probes.
+ *
+ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ * Copyright (C) 2012 Mentor Graphics Corp.
+ */
+
+#include <linux/module.h>
+#include <lttng/tracer.h>
+
+/*
+ * Create the tracepoint static inlines from the kernel to validate that our
+ * trace event macros match the kernel we run on.
+ */
+#include <trace/events/net.h>
+
+/*
+ * Create LTTng tracepoint probes.
+ */
+#define LTTNG_PACKAGE_BUILD
+#define CREATE_TRACE_POINTS
+#define TRACE_INCLUDE_PATH instrumentation/events
+
+#include <instrumentation/events/net.h>
+
+MODULE_LICENSE("GPL and additional rights");
+MODULE_AUTHOR("Wade Farnsworth <wade_farnsworth@mentor.com>");
+MODULE_DESCRIPTION("LTTng net probes");
+MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
+       __stringify(LTTNG_MODULES_MINOR_VERSION) "."
+       __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
+       LTTNG_MODULES_EXTRAVERSION);
diff --git a/src/probes/lttng-probe-power.c b/src/probes/lttng-probe-power.c
new file mode 100644 (file)
index 0000000..d5ac38f
--- /dev/null
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * probes/lttng-probe-power.c
+ *
+ * LTTng power probes.
+ *
+ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ * Copyright (C) 2012 Mentor Graphics Corp.
+ */
+
+#include <linux/module.h>
+#include <lttng/tracer.h>
+
+/*
+ * Create the tracepoint static inlines from the kernel to validate that our
+ * trace event macros match the kernel we run on.
+ */
+#include <trace/events/power.h>
+
+#include <wrapper/tracepoint.h>
+
+/*
+ * Create LTTng tracepoint probes.
+ */
+#define LTTNG_PACKAGE_BUILD
+#define CREATE_TRACE_POINTS
+#define TRACE_INCLUDE_PATH instrumentation/events
+
+#include <instrumentation/events/power.h>
+
+MODULE_LICENSE("GPL and additional rights");
+MODULE_AUTHOR("Wade Farnsworth <wade_farnsworth@mentor.com>");
+MODULE_AUTHOR("Andrew Gabbasov <andrew_gabbasov@mentor.com>");
+MODULE_DESCRIPTION("LTTng power probes");
+MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
+       __stringify(LTTNG_MODULES_MINOR_VERSION) "."
+       __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
+       LTTNG_MODULES_EXTRAVERSION);
diff --git a/src/probes/lttng-probe-preemptirq.c b/src/probes/lttng-probe-preemptirq.c
new file mode 100644 (file)
index 0000000..497b2de
--- /dev/null
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * probes/lttng-probe-preemptirq.c
+ *
+ * LTTng preemptirq probes.
+ *
+ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *               2017 Michael Jeanson <mjeanson@efficios.com>
+ */
+
+#include <linux/module.h>
+#include <linux/uaccess.h>
+#include <lttng/tracer.h>
+
+/*
+ * Create the tracepoint static inlines from the kernel to validate that our
+ * trace event macros match the kernel we run on.
+ */
+#include <trace/events/preemptirq.h>
+
+/*
+ * Create LTTng tracepoint probes.
+ */
+#define LTTNG_PACKAGE_BUILD
+#define CREATE_TRACE_POINTS
+#define TRACE_INCLUDE_PATH instrumentation/events
+
+#include <instrumentation/events/preemptirq.h>
+
+MODULE_LICENSE("GPL and additional rights");
+MODULE_AUTHOR("Michael Jeanson <mjeanson@efficios.com>");
+MODULE_DESCRIPTION("LTTng preemptirq probes");
+MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
+       __stringify(LTTNG_MODULES_MINOR_VERSION) "."
+       __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
+       LTTNG_MODULES_EXTRAVERSION);
diff --git a/src/probes/lttng-probe-printk.c b/src/probes/lttng-probe-printk.c
new file mode 100644 (file)
index 0000000..3a37826
--- /dev/null
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * probes/lttng-probe-printk.c
+ *
+ * LTTng printk probes.
+ *
+ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ * Copyright (C) 2012 Mentor Graphics Corp.
+ */
+
+#include <linux/module.h>
+#include <lttng/tracer.h>
+
+/*
+ * Create the tracepoint static inlines from the kernel to validate that our
+ * trace event macros match the kernel we run on.
+ */
+#include <trace/events/printk.h>
+
+/*
+ * Create LTTng tracepoint probes.
+ */
+#define LTTNG_PACKAGE_BUILD
+#define CREATE_TRACE_POINTS
+#define TRACE_INCLUDE_PATH instrumentation/events
+
+#include <instrumentation/events/printk.h>
+
+MODULE_LICENSE("GPL and additional rights");
+MODULE_AUTHOR("Andrew Gabbasov <andrew_gabbasov@mentor.com>");
+MODULE_DESCRIPTION("LTTng printk probes");
+MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
+       __stringify(LTTNG_MODULES_MINOR_VERSION) "."
+       __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
+       LTTNG_MODULES_EXTRAVERSION);
diff --git a/src/probes/lttng-probe-random.c b/src/probes/lttng-probe-random.c
new file mode 100644 (file)
index 0000000..4cf6ce8
--- /dev/null
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * probes/lttng-probe-random.c
+ *
+ * LTTng random probes.
+ *
+ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ * Copyright (C) 2012 Mentor Graphics Corp.
+ */
+
+#include <linux/module.h>
+#include <lttng/tracer.h>
+
+/*
+ * Create the tracepoint static inlines from the kernel to validate that our
+ * trace event macros match the kernel we run on.
+ */
+#include <trace/events/random.h>
+
+/*
+ * Create LTTng tracepoint probes.
+ */
+#define LTTNG_PACKAGE_BUILD
+#define CREATE_TRACE_POINTS
+#define TRACE_INCLUDE_PATH instrumentation/events
+
+#include <instrumentation/events/random.h>
+
+MODULE_LICENSE("GPL and additional rights");
+MODULE_AUTHOR("Andrew Gabbasov <andrew_gabbasov@mentor.com>");
+MODULE_DESCRIPTION("LTTng random probes");
+MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
+       __stringify(LTTNG_MODULES_MINOR_VERSION) "."
+       __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
+       LTTNG_MODULES_EXTRAVERSION);
diff --git a/src/probes/lttng-probe-rcu.c b/src/probes/lttng-probe-rcu.c
new file mode 100644 (file)
index 0000000..89c7213
--- /dev/null
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * probes/lttng-probe-rcu.c
+ *
+ * LTTng rcu probes.
+ *
+ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ * Copyright (C) 2012 Mentor Graphics Corp.
+ */
+
+#include <linux/module.h>
+#include <linux/rcupdate.h>
+#include <lttng/tracer.h>
+
+/*
+ * Create the tracepoint static inlines from the kernel to validate that our
+ * trace event macros match the kernel we run on.
+ */
+#include <trace/events/rcu.h>
+
+/*
+ * Create LTTng tracepoint probes.
+ */
+#define LTTNG_PACKAGE_BUILD
+#define CREATE_TRACE_POINTS
+#define TRACE_INCLUDE_PATH instrumentation/events
+
+#include <instrumentation/events/rcu.h>
+
+MODULE_LICENSE("GPL and additional rights");
+MODULE_AUTHOR("Andrew Gabbasov <andrew_gabbasov@mentor.com>");
+MODULE_DESCRIPTION("LTTng rcu probes");
+MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
+       __stringify(LTTNG_MODULES_MINOR_VERSION) "."
+       __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
+       LTTNG_MODULES_EXTRAVERSION);
diff --git a/src/probes/lttng-probe-regmap.c b/src/probes/lttng-probe-regmap.c
new file mode 100644 (file)
index 0000000..f3eaef8
--- /dev/null
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * probes/lttng-probe-regmap.c
+ *
+ * LTTng regmap probes.
+ *
+ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ * Copyright (C) 2012 Mentor Graphics Corp.
+ */
+
+#include <linux/module.h>
+#include <linux/device.h>
+#include <lttng/kernel-version.h>
+#include <lttng/tracer.h>
+
+/*
+ * Create the tracepoint static inlines from the kernel to validate that our
+ * trace event macros match the kernel we run on.
+ */
+#include <../../drivers/base/regmap/trace.h>
+
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+
+/*
+ * Create LTTng tracepoint probes.
+ */
+#define LTTNG_PACKAGE_BUILD
+#define CREATE_TRACE_POINTS
+#define TRACE_INCLUDE_PATH instrumentation/events
+
+#include <instrumentation/events/regmap.h>
+
+MODULE_LICENSE("GPL and additional rights");
+MODULE_AUTHOR("Andrew Gabbasov <andrew_gabbasov@mentor.com>");
+MODULE_DESCRIPTION("LTTng regmap probes");
+MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
+       __stringify(LTTNG_MODULES_MINOR_VERSION) "."
+       __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
+       LTTNG_MODULES_EXTRAVERSION);
diff --git a/src/probes/lttng-probe-regulator.c b/src/probes/lttng-probe-regulator.c
new file mode 100644 (file)
index 0000000..8f45771
--- /dev/null
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * probes/lttng-probe-regulator.c
+ *
+ * LTTng regulator probes.
+ *
+ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ * Copyright (C) 2012 Mentor Graphics Corp.
+ */
+
+#include <linux/module.h>
+#include <lttng/tracer.h>
+
+/*
+ * Create the tracepoint static inlines from the kernel to validate that our
+ * trace event macros match the kernel we run on.
+ */
+#include <trace/events/regulator.h>
+
+/*
+ * Create LTTng tracepoint probes.
+ */
+#define LTTNG_PACKAGE_BUILD
+#define CREATE_TRACE_POINTS
+#define TRACE_INCLUDE_PATH instrumentation/events
+
+#include <instrumentation/events/regulator.h>
+
+MODULE_LICENSE("GPL and additional rights");
+MODULE_AUTHOR("Wade Farnsworth <wade_farnsworth@mentor.com>");
+MODULE_DESCRIPTION("LTTng regulator probes");
+MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
+       __stringify(LTTNG_MODULES_MINOR_VERSION) "."
+       __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
+       LTTNG_MODULES_EXTRAVERSION);
diff --git a/src/probes/lttng-probe-rpm.c b/src/probes/lttng-probe-rpm.c
new file mode 100644 (file)
index 0000000..eea7bc3
--- /dev/null
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * probes/lttng-probe-rpm.c
+ *
+ * LTTng rpm probes.
+ *
+ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ * Copyright (C) 2012 Mentor Graphics Corp.
+ */
+
+#include <linux/module.h>
+#include <linux/device.h>
+#include <lttng/tracer.h>
+
+/*
+ * Create the tracepoint static inlines from the kernel to validate that our
+ * trace event macros match the kernel we run on.
+ */
+#include <trace/events/rpm.h>
+
+/*
+ * Create LTTng tracepoint probes.
+ */
+#define LTTNG_PACKAGE_BUILD
+#define CREATE_TRACE_POINTS
+#define TRACE_INCLUDE_PATH instrumentation/events
+
+#include <instrumentation/events/rpm.h>
+
+MODULE_LICENSE("GPL and additional rights");
+MODULE_AUTHOR("Andrew Gabbasov <andrew_gabbasov@mentor.com>");
+MODULE_DESCRIPTION("LTTng rpm probes");
+MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
+       __stringify(LTTNG_MODULES_MINOR_VERSION) "."
+       __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
+       LTTNG_MODULES_EXTRAVERSION);
diff --git a/src/probes/lttng-probe-sched.c b/src/probes/lttng-probe-sched.c
new file mode 100644 (file)
index 0000000..ba1b3f7
--- /dev/null
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * probes/lttng-probe-sched.c
+ *
+ * LTTng sched probes.
+ *
+ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#include <linux/module.h>
+#include <lttng/tracer.h>
+
+/*
+ * Create the tracepoint static inlines from the kernel to validate that our
+ * trace event macros match the kernel we run on.
+ */
+#include <trace/events/sched.h>
+
+#include <wrapper/tracepoint.h>
+
+/*
+ * Create LTTng tracepoint probes.
+ */
+#define LTTNG_PACKAGE_BUILD
+#define CREATE_TRACE_POINTS
+#define TRACE_INCLUDE_PATH instrumentation/events
+
+#include <instrumentation/events/sched.h>
+
+MODULE_LICENSE("GPL and additional rights");
+MODULE_AUTHOR("Mathieu Desnoyers <mathieu.desnoyers@efficios.com>");
+MODULE_DESCRIPTION("LTTng sched probes");
+MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
+       __stringify(LTTNG_MODULES_MINOR_VERSION) "."
+       __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
+       LTTNG_MODULES_EXTRAVERSION);
diff --git a/src/probes/lttng-probe-scsi.c b/src/probes/lttng-probe-scsi.c
new file mode 100644 (file)
index 0000000..a367c51
--- /dev/null
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * probes/lttng-probe-scsi.c
+ *
+ * LTTng scsi probes.
+ *
+ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ * Copyright (C) 2012 Mentor Graphics Corp.
+ */
+
+#include <linux/module.h>
+#include <scsi/scsi_device.h>
+#include <lttng/tracer.h>
+
+/*
+ * Create the tracepoint static inlines from the kernel to validate that our
+ * trace event macros match the kernel we run on.
+ */
+#include <trace/events/scsi.h>
+
+/*
+ * Create LTTng tracepoint probes.
+ */
+#define LTTNG_PACKAGE_BUILD
+#define CREATE_TRACE_POINTS
+#define TRACE_INCLUDE_PATH instrumentation/events
+
+#include <instrumentation/events/scsi.h>
+
+MODULE_LICENSE("GPL and additional rights");
+MODULE_AUTHOR("Wade Farnsworth <wade_farnsworth@mentor.com>");
+MODULE_AUTHOR("Andrew Gabbasov <andrew_gabbasov@mentor.com>");
+MODULE_DESCRIPTION("LTTng scsi probes");
+MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
+       __stringify(LTTNG_MODULES_MINOR_VERSION) "."
+       __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
+       LTTNG_MODULES_EXTRAVERSION);
diff --git a/src/probes/lttng-probe-signal.c b/src/probes/lttng-probe-signal.c
new file mode 100644 (file)
index 0000000..aee9468
--- /dev/null
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * probes/lttng-probe-signal.c
+ *
+ * LTTng signal probes.
+ *
+ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#include <linux/module.h>
+#include <lttng/tracer.h>
+
+/*
+ * Create the tracepoint static inlines from the kernel to validate that our
+ * trace event macros match the kernel we run on.
+ */
+#include <trace/events/signal.h>
+
+/*
+ * Create LTTng tracepoint probes.
+ */
+#define LTTNG_PACKAGE_BUILD
+#define CREATE_TRACE_POINTS
+#define TRACE_INCLUDE_PATH instrumentation/events
+
+#include <instrumentation/events/signal.h>
+
+MODULE_LICENSE("GPL and additional rights");
+MODULE_AUTHOR("Mathieu Desnoyers <mathieu.desnoyers@efficios.com>");
+MODULE_DESCRIPTION("LTTng signal probes");
+MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
+       __stringify(LTTNG_MODULES_MINOR_VERSION) "."
+       __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
+       LTTNG_MODULES_EXTRAVERSION);
diff --git a/src/probes/lttng-probe-skb.c b/src/probes/lttng-probe-skb.c
new file mode 100644 (file)
index 0000000..682a9f0
--- /dev/null
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * probes/lttng-probe-skb.c
+ *
+ * LTTng skb probes.
+ *
+ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ * Copyright (C) 2012 Mentor Graphics Corp.
+ */
+
+#include <linux/module.h>
+#include <lttng/tracer.h>
+
+/*
+ * Create the tracepoint static inlines from the kernel to validate that our
+ * trace event macros match the kernel we run on.
+ */
+#include <trace/events/skb.h>
+
+#include <wrapper/tracepoint.h>
+
+/*
+ * Create LTTng tracepoint probes.
+ */
+#define LTTNG_PACKAGE_BUILD
+#define CREATE_TRACE_POINTS
+#define TRACE_INCLUDE_PATH instrumentation/events
+
+#include <instrumentation/events/skb.h>
+
+MODULE_LICENSE("GPL and additional rights");
+MODULE_AUTHOR("Wade Farnsworth <wade_farnsworth@mentor.com>");
+MODULE_AUTHOR("Andrew Gabbasov <andrew_gabbasov@mentor.com>");
+MODULE_DESCRIPTION("LTTng skb probes");
+MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
+       __stringify(LTTNG_MODULES_MINOR_VERSION) "."
+       __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
+       LTTNG_MODULES_EXTRAVERSION);
diff --git a/src/probes/lttng-probe-sock.c b/src/probes/lttng-probe-sock.c
new file mode 100644 (file)
index 0000000..f3e1ebf
--- /dev/null
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * probes/lttng-probe-sock.c
+ *
+ * LTTng sock probes.
+ *
+ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ * Copyright (C) 2012 Mentor Graphics Corp.
+ */
+
+#include <linux/module.h>
+#include <lttng/tracer.h>
+
+/*
+ * Create the tracepoint static inlines from the kernel to validate that our
+ * trace event macros match the kernel we run on.
+ */
+#include <trace/events/sock.h>
+
+/*
+ * Create LTTng tracepoint probes.
+ */
+#define LTTNG_PACKAGE_BUILD
+#define CREATE_TRACE_POINTS
+#define TRACE_INCLUDE_PATH instrumentation/events
+
+#include <instrumentation/events/sock.h>
+
+MODULE_LICENSE("GPL and additional rights");
+MODULE_AUTHOR("Wade Farnsworth <wade_farnsworth@mentor.com>");
+MODULE_DESCRIPTION("LTTng sock probes");
+MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
+       __stringify(LTTNG_MODULES_MINOR_VERSION) "."
+       __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
+       LTTNG_MODULES_EXTRAVERSION);
diff --git a/src/probes/lttng-probe-statedump.c b/src/probes/lttng-probe-statedump.c
new file mode 100644 (file)
index 0000000..81e0613
--- /dev/null
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * probes/lttng-probe-statedump.c
+ *
+ * LTTng statedump probes.
+ *
+ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/netlink.h>
+#include <linux/inet.h>
+#include <linux/ip.h>
+#include <linux/netdevice.h>
+#include <linux/inetdevice.h>
+#include <linux/sched.h>
+#include <lttng/events.h>
+#include <lttng/tracer.h>
+
+/*
+ * Create LTTng tracepoint probes.
+ */
+#define LTTNG_PACKAGE_BUILD
+#define CREATE_TRACE_POINTS
+#define TP_SESSION_CHECK
+#define TRACE_INCLUDE_PATH instrumentation/events
+#define TRACE_INCLUDE_FILE lttng-statedump
+
+#include <instrumentation/events/lttng-statedump.h>
+
+MODULE_LICENSE("GPL and additional rights");
+MODULE_AUTHOR("Mathieu Desnoyers <mathieu.desnoyers@efficios.com>");
+MODULE_DESCRIPTION("LTTng statedump probes");
+MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
+       __stringify(LTTNG_MODULES_MINOR_VERSION) "."
+       __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
+       LTTNG_MODULES_EXTRAVERSION);
diff --git a/src/probes/lttng-probe-sunrpc.c b/src/probes/lttng-probe-sunrpc.c
new file mode 100644 (file)
index 0000000..2244a57
--- /dev/null
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * probes/lttng-probe-sunrpc.c
+ *
+ * LTTng sunrpc probes.
+ *
+ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ * Copyright (C) 2012 Mentor Graphics Corp.
+ */
+
+#include <linux/module.h>
+#include <lttng/tracer.h>
+
+/*
+ * Create the tracepoint static inlines from the kernel to validate that our
+ * trace event macros match the kernel we run on.
+ */
+#include <trace/events/sunrpc.h>
+
+/*
+ * Create LTTng tracepoint probes.
+ */
+#define LTTNG_PACKAGE_BUILD
+#define CREATE_TRACE_POINTS
+#define TRACE_INCLUDE_PATH instrumentation/events
+
+#include <instrumentation/events/rpc.h>
+
+MODULE_LICENSE("GPL and additional rights");
+MODULE_AUTHOR("Andrew Gabbasov <andrew_gabbasov@mentor.com>");
+MODULE_DESCRIPTION("LTTng sunrpc probes");
+MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
+       __stringify(LTTNG_MODULES_MINOR_VERSION) "."
+       __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
+       LTTNG_MODULES_EXTRAVERSION);
diff --git a/src/probes/lttng-probe-timer.c b/src/probes/lttng-probe-timer.c
new file mode 100644 (file)
index 0000000..149fcbc
--- /dev/null
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * probes/lttng-probe-timer.c
+ *
+ * LTTng timer probes.
+ *
+ * Copyright (C) 2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#include <linux/module.h>
+#include <lttng/tracer.h>
+
+/*
+ * Create the tracepoint static inlines from the kernel to validate that our
+ * trace event macros match the kernel we run on.
+ */
+
+#include <linux/sched.h>
+#include <trace/events/timer.h>
+
+#include <wrapper/tracepoint.h>
+
+/*
+ * Create LTTng tracepoint probes.
+ */
+#define LTTNG_PACKAGE_BUILD
+#define CREATE_TRACE_POINTS
+#define TRACE_INCLUDE_PATH instrumentation/events
+
+#include <instrumentation/events/timer.h>
+
+MODULE_LICENSE("GPL and additional rights");
+MODULE_AUTHOR("Mathieu Desnoyers <mathieu.desnoyers@efficios.com>");
+MODULE_DESCRIPTION("LTTng timer probes");
+MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
+       __stringify(LTTNG_MODULES_MINOR_VERSION) "."
+       __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
+       LTTNG_MODULES_EXTRAVERSION);
diff --git a/src/probes/lttng-probe-udp.c b/src/probes/lttng-probe-udp.c
new file mode 100644 (file)
index 0000000..ad7707b
--- /dev/null
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * probes/lttng-probe-udp.c
+ *
+ * LTTng udp probes.
+ *
+ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ * Copyright (C) 2012 Mentor Graphics Corp.
+ */
+
+#include <linux/module.h>
+#include <lttng/tracer.h>
+
+/*
+ * Create the tracepoint static inlines from the kernel to validate that our
+ * trace event macros match the kernel we run on.
+ */
+#include <trace/events/udp.h>
+
+/*
+ * Create LTTng tracepoint probes.
+ */
+#define LTTNG_PACKAGE_BUILD
+#define CREATE_TRACE_POINTS
+#define TRACE_INCLUDE_PATH instrumentation/events
+
+#include <instrumentation/events/udp.h>
+
+MODULE_LICENSE("GPL and additional rights");
+MODULE_AUTHOR("Wade Farnsworth <wade_farnsworth@mentor.com>");
+MODULE_DESCRIPTION("LTTng udp probes");
+MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
+       __stringify(LTTNG_MODULES_MINOR_VERSION) "."
+       __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
+       LTTNG_MODULES_EXTRAVERSION);
diff --git a/src/probes/lttng-probe-user.c b/src/probes/lttng-probe-user.c
new file mode 100644 (file)
index 0000000..009cfed
--- /dev/null
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * lttng-probe-user.c
+ *
+ * Copyright (C) 2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#include <linux/uaccess.h>
+#include <linux/module.h>
+#include <wrapper/uaccess.h>
+#include <lttng/probe-user.h>
+
+/*
+ * Calculate string length. Include final null terminating character if there is
+ * one, or ends at first fault. Disabling page faults ensures that we can safely
+ * call this from pretty much any context, including those where the caller
+ * holds mmap_sem, or any lock which nests in mmap_sem.
+ */
+long lttng_strlen_user_inatomic(const char *addr)
+{
+       long count = 0;
+       mm_segment_t old_fs;
+
+       if (!addr)
+               return 0;
+
+       old_fs = get_fs();
+       set_fs(KERNEL_DS);
+       pagefault_disable();
+       for (;;) {
+               char v;
+               unsigned long ret;
+
+               if (unlikely(!lttng_access_ok(VERIFY_READ,
+                               (__force const char __user *) addr,
+                               sizeof(v))))
+                       break;
+               ret = __copy_from_user_inatomic(&v,
+                       (__force const char __user *)(addr),
+                       sizeof(v));
+               if (unlikely(ret > 0))
+                       break;
+               count++;
+               if (unlikely(!v))
+                       break;
+               addr++;
+       }
+       pagefault_enable();
+       set_fs(old_fs);
+       return count;
+}
+EXPORT_SYMBOL_GPL(lttng_strlen_user_inatomic);
diff --git a/src/probes/lttng-probe-v4l2.c b/src/probes/lttng-probe-v4l2.c
new file mode 100644 (file)
index 0000000..0c86da0
--- /dev/null
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * probes/lttng-probe-v4l2.c
+ *
+ * LTTng v4l2 probes.
+ *
+ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ * Copyright (C) 2012,2013 Mentor Graphics Corp.
+ */
+
+#include <linux/module.h>
+#include <linux/videodev2.h>
+#include <media/videobuf2-core.h>
+#include <media/v4l2-common.h>
+#include <lttng/tracer.h>
+/*
+ * Create the tracepoint static inlines from the kernel to validate that our
+ * trace event macros match the kernel we run on.
+ */
+#include <trace/events/v4l2.h>
+
+/*
+ * Create LTTng tracepoint probes.
+ */
+#define LTTNG_PACKAGE_BUILD
+#define CREATE_TRACE_POINTS
+#define TRACE_INCLUDE_PATH instrumentation/events
+
+#include <instrumentation/events/v4l2.h>
+
+MODULE_LICENSE("GPL and additional rights");
+MODULE_AUTHOR("Wade Farnsworth <wade_farnsworth@mentor.com>");
+MODULE_DESCRIPTION("LTTng v4l2 probes");
+MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
+       __stringify(LTTNG_MODULES_MINOR_VERSION) "."
+       __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
+       LTTNG_MODULES_EXTRAVERSION);
diff --git a/src/probes/lttng-probe-vmscan.c b/src/probes/lttng-probe-vmscan.c
new file mode 100644 (file)
index 0000000..8e1f605
--- /dev/null
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * probes/lttng-probe-vmscan.c
+ *
+ * LTTng vmscan probes.
+ *
+ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ * Copyright (C) 2012 Mentor Graphics Corp.
+ */
+
+#include <linux/module.h>
+#include <lttng/tracer.h>
+
+/*
+ * Create the tracepoint static inlines from the kernel to validate that our
+ * trace event macros match the kernel we run on.
+ */
+#include <trace/events/vmscan.h>
+
+#include <lttng/kernel-version.h>
+
+/*
+ * Create LTTng tracepoint probes.
+ */
+#define LTTNG_PACKAGE_BUILD
+#define CREATE_TRACE_POINTS
+#define TRACE_INCLUDE_PATH instrumentation/events
+
+#include <instrumentation/events/mm_vmscan.h>
+
+MODULE_LICENSE("GPL and additional rights");
+MODULE_AUTHOR("Wade Farnsworth <wade_farnsworth@mentor.com>");
+MODULE_AUTHOR("Paul Woegerer <paul_woegerer@mentor.com>");
+MODULE_AUTHOR("Andrew Gabbasov <andrew_gabbasov@mentor.com>");
+MODULE_DESCRIPTION("LTTng vmscan probes");
+MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
+       __stringify(LTTNG_MODULES_MINOR_VERSION) "."
+       __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
+       LTTNG_MODULES_EXTRAVERSION);
diff --git a/src/probes/lttng-probe-workqueue.c b/src/probes/lttng-probe-workqueue.c
new file mode 100644 (file)
index 0000000..57cd560
--- /dev/null
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * probes/lttng-probe-workqueue.c
+ *
+ * LTTng workqueue probes.
+ *
+ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ * Copyright (C) 2012 Mentor Graphics Corp.
+ */
+
+#include <linux/module.h>
+#include <linux/idr.h>
+#include <lttng/tracer.h>
+
+struct cpu_workqueue_struct;
+struct pool_workqueue;
+
+/*
+ * Create the tracepoint static inlines from the kernel to validate that our
+ * trace event macros match the kernel we run on.
+ */
+#include <trace/events/workqueue.h>
+
+#include <wrapper/tracepoint.h>
+
+/*
+ * Create LTTng tracepoint probes.
+ */
+#define LTTNG_PACKAGE_BUILD
+#define CREATE_TRACE_POINTS
+#define TRACE_INCLUDE_PATH instrumentation/events
+
+#include <instrumentation/events/workqueue.h>
+
+MODULE_LICENSE("GPL and additional rights");
+MODULE_AUTHOR("Andrew Gabbasov <andrew_gabbasov@mentor.com>");
+MODULE_DESCRIPTION("LTTng workqueue probes");
+MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
+       __stringify(LTTNG_MODULES_MINOR_VERSION) "."
+       __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
+       LTTNG_MODULES_EXTRAVERSION);
diff --git a/src/probes/lttng-probe-writeback.c b/src/probes/lttng-probe-writeback.c
new file mode 100644 (file)
index 0000000..727f2b7
--- /dev/null
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * probes/lttng-probe-writeback.c
+ *
+ * LTTng writeback probes.
+ *
+ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ * Copyright (C) 2012 Mentor Graphics Corp.
+ */
+
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/mm.h>
+#include <lttng/tracer.h>
+
+/*
+ * Create the tracepoint static inlines from the kernel to validate that our
+ * trace event macros match the kernel we run on.
+ */
+#include <trace/events/writeback.h>
+
+#include <lttng/kernel-version.h>
+#include <wrapper/writeback.h>
+
+/* #if <check version number if global_dirty_limit will be exported> */
+
+#define global_dirty_limit wrapper_global_dirty_limit()
+
+/* #endif <check version number> */
+
+/*
+ * Create LTTng tracepoint probes.
+ */
+#define LTTNG_PACKAGE_BUILD
+#define CREATE_TRACE_POINTS
+#define TRACE_INCLUDE_PATH instrumentation/events
+
+#include <instrumentation/events/writeback.h>
+
+MODULE_LICENSE("GPL and additional rights");
+MODULE_AUTHOR("Andrew Gabbasov <andrew_gabbasov@mentor.com>");
+MODULE_DESCRIPTION("LTTng writeback probes");
+MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
+       __stringify(LTTNG_MODULES_MINOR_VERSION) "."
+       __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
+       LTTNG_MODULES_EXTRAVERSION);
diff --git a/src/probes/lttng-probe-x86-exceptions.c b/src/probes/lttng-probe-x86-exceptions.c
new file mode 100644 (file)
index 0000000..4a7d4e4
--- /dev/null
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * probes/lttng-probe-x86-exceptions.c
+ *
+ * LTTng x86 exceptions probes.
+ *
+ * Copyright (C) 2010-2015 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#include <linux/module.h>
+#include <lttng/tracer.h>
+
+/*
+ * Create the tracepoint static inlines from the kernel to validate that our
+ * trace event macros match the kernel we run on.
+ */
+#include <asm/trace/exceptions.h>
+
+#include <wrapper/tracepoint.h>
+
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+
+/*
+ * Create LTTng tracepoint probes.
+ */
+#define LTTNG_PACKAGE_BUILD
+#define CREATE_TRACE_POINTS
+#define TRACE_INCLUDE_PATH instrumentation/events/arch/x86
+
+#include <instrumentation/events/arch/x86/exceptions.h>
+
+MODULE_LICENSE("GPL and additional rights");
+MODULE_AUTHOR("Mathieu Desnoyers <mathieu.desnoyers@efficios.com>");
+MODULE_DESCRIPTION("LTTng x86 exceptions probes");
+MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
+       __stringify(LTTNG_MODULES_MINOR_VERSION) "."
+       __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
+       LTTNG_MODULES_EXTRAVERSION);
diff --git a/src/probes/lttng-probe-x86-irq-vectors.c b/src/probes/lttng-probe-x86-irq-vectors.c
new file mode 100644 (file)
index 0000000..1f64406
--- /dev/null
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * probes/lttng-probe-x86-irq-vectors.c
+ *
+ * LTTng x86 irq vectors probes.
+ *
+ * Copyright (C) 2010-2015 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#include <linux/module.h>
+#include <lttng/tracer.h>
+
+/*
+ * Create the tracepoint static inlines from the kernel to validate that our
+ * trace event macros match the kernel we run on.
+ */
+#include <asm/trace/irq_vectors.h>
+
+#include <wrapper/tracepoint.h>
+
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+
+/*
+ * Create LTTng tracepoint probes.
+ */
+#define LTTNG_PACKAGE_BUILD
+#define CREATE_TRACE_POINTS
+#define TRACE_INCLUDE_PATH instrumentation/events/arch/x86
+
+#include <instrumentation/events/arch/x86/irq_vectors.h>
+
+MODULE_LICENSE("GPL and additional rights");
+MODULE_AUTHOR("Mathieu Desnoyers <mathieu.desnoyers@efficios.com>");
+MODULE_DESCRIPTION("LTTng x86 irq vectors probes");
+MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
+       __stringify(LTTNG_MODULES_MINOR_VERSION) "."
+       __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
+       LTTNG_MODULES_EXTRAVERSION);
diff --git a/src/probes/lttng-uprobes.c b/src/probes/lttng-uprobes.c
new file mode 100644 (file)
index 0000000..c0f6e7c
--- /dev/null
@@ -0,0 +1,250 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR LGPL-2.1-only)
+ *
+ * probes/lttng-uprobes.c
+ *
+ * LTTng uprobes integration module.
+ *
+ * Copyright (C) 2013 Yannick Brosseau <yannick.brosseau@gmail.com>
+ * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ */
+
+#include <linux/fdtable.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/namei.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <lttng/events.h>
+#include <lttng/tracer.h>
+#include <wrapper/irqflags.h>
+#include <ringbuffer/frontend_types.h>
+#include <wrapper/uprobes.h>
+#include <wrapper/vmalloc.h>
+
+static
+int lttng_uprobes_handler_pre(struct uprobe_consumer *uc, struct pt_regs *regs)
+{
+       struct lttng_uprobe_handler *uprobe_handler =
+               container_of(uc, struct lttng_uprobe_handler, up_consumer);
+       struct lttng_event *event = uprobe_handler->event;
+       struct lttng_probe_ctx lttng_probe_ctx = {
+               .event = event,
+               .interruptible = !lttng_regs_irqs_disabled(regs),
+       };
+       struct lttng_channel *chan = event->chan;
+       struct lib_ring_buffer_ctx ctx;
+       int ret;
+
+       struct {
+               unsigned long ip;
+       } payload;
+
+       if (unlikely(!READ_ONCE(chan->session->active)))
+               return 0;
+       if (unlikely(!READ_ONCE(chan->enabled)))
+               return 0;
+       if (unlikely(!READ_ONCE(event->enabled)))
+               return 0;
+
+       lib_ring_buffer_ctx_init(&ctx, chan->chan, &lttng_probe_ctx,
+               sizeof(payload), lttng_alignof(payload), -1);
+
+       ret = chan->ops->event_reserve(&ctx, event->id);
+       if (ret < 0)
+               return 0;
+
+       /* Event payload. */
+       payload.ip = (unsigned long)instruction_pointer(regs);
+
+       lib_ring_buffer_align_ctx(&ctx, lttng_alignof(payload));
+       chan->ops->event_write(&ctx, &payload, sizeof(payload));
+       chan->ops->event_commit(&ctx);
+       return 0;
+}
+
+/*
+ * Create event description.
+ */
+static
+int lttng_create_uprobe_event(const char *name, struct lttng_event *event)
+{
+       struct lttng_event_desc *desc;
+       struct lttng_event_field *fields;
+       int ret;
+
+       desc = kzalloc(sizeof(*event->desc), GFP_KERNEL);
+       if (!desc)
+               return -ENOMEM;
+       desc->name = kstrdup(name, GFP_KERNEL);
+       if (!desc->name) {
+               ret = -ENOMEM;
+               goto error_str;
+       }
+
+       desc->nr_fields = 1;
+       desc->fields = fields =
+               kzalloc(1 * sizeof(struct lttng_event_field), GFP_KERNEL);
+
+       if (!desc->fields) {
+               ret = -ENOMEM;
+               goto error_fields;
+       }
+       fields[0].name = "ip";
+       fields[0].type.atype = atype_integer;
+       fields[0].type.u.integer.size = sizeof(unsigned long) * CHAR_BIT;
+       fields[0].type.u.integer.alignment = lttng_alignof(unsigned long) * CHAR_BIT;
+       fields[0].type.u.integer.signedness = lttng_is_signed_type(unsigned long);
+       fields[0].type.u.integer.reverse_byte_order = 0;
+       fields[0].type.u.integer.base = 16;
+       fields[0].type.u.integer.encoding = lttng_encode_none;
+
+       desc->owner = THIS_MODULE;
+       event->desc = desc;
+
+       return 0;
+
+error_fields:
+       kfree(desc->name);
+error_str:
+       kfree(desc);
+       return ret;
+}
+
+/*
+ * Returns the inode struct from the current task and an fd. The inode is
+ * grabbed by this function and must be put once we are done with it using
+ * iput().
+ */
+static struct inode *get_inode_from_fd(int fd)
+{
+       struct file *file;
+       struct inode *inode;
+
+       rcu_read_lock();
+       /*
+        * Returns the file backing the given fd. Needs to be done inside an RCU
+        * critical section.
+        */
+       file = fcheck(fd);
+       if (file == NULL) {
+               printk(KERN_WARNING "Cannot access file backing the fd(%d)\n", fd);
+               inode = NULL;
+               goto error;
+       }
+
+       /* Grab a reference on the inode. */
+       inode = igrab(file->f_path.dentry->d_inode);
+       if (inode == NULL)
+               printk(KERN_WARNING "Cannot grab a reference on the inode.\n");
+error:
+       rcu_read_unlock();
+       return inode;
+}
+
+int lttng_uprobes_add_callsite(struct lttng_event *event,
+       struct lttng_kernel_event_callsite __user *callsite)
+{
+       int ret = 0;
+       struct lttng_uprobe_handler *uprobe_handler;
+
+       if (!event) {
+               ret = -EINVAL;
+               goto end;
+       }
+
+       uprobe_handler = kzalloc(sizeof(struct lttng_uprobe_handler), GFP_KERNEL);
+       if (!uprobe_handler) {
+               printk(KERN_WARNING "Error allocating uprobe_uprobe_handlers");
+               ret = -ENOMEM;
+               goto end;
+       }
+
+       /* Ensure the memory we just allocated don't trigger page faults. */
+       wrapper_vmalloc_sync_mappings();
+
+       uprobe_handler->event = event;
+       uprobe_handler->up_consumer.handler = lttng_uprobes_handler_pre;
+
+       ret = copy_from_user(&uprobe_handler->offset, &callsite->u.uprobe.offset, sizeof(uint64_t));
+       if (ret) {
+               goto register_error;
+       }
+
+       ret = wrapper_uprobe_register(event->u.uprobe.inode,
+                     uprobe_handler->offset, &uprobe_handler->up_consumer);
+       if (ret) {
+               printk(KERN_WARNING "Error registering probe on inode %lu "
+                      "and offset 0x%llx\n", event->u.uprobe.inode->i_ino,
+                      uprobe_handler->offset);
+               ret = -1;
+               goto register_error;
+       }
+
+       list_add(&uprobe_handler->node, &event->u.uprobe.head);
+
+       return ret;
+
+register_error:
+       kfree(uprobe_handler);
+end:
+       return ret;
+}
+EXPORT_SYMBOL_GPL(lttng_uprobes_add_callsite);
+
+int lttng_uprobes_register(const char *name, int fd, struct lttng_event *event)
+{
+       int ret = 0;
+       struct inode *inode;
+
+       ret = lttng_create_uprobe_event(name, event);
+       if (ret)
+               goto error;
+
+       inode = get_inode_from_fd(fd);
+       if (!inode) {
+               printk(KERN_WARNING "Cannot get inode from fd\n");
+               ret = -EBADF;
+               goto inode_error;
+       }
+       event->u.uprobe.inode = inode;
+       INIT_LIST_HEAD(&event->u.uprobe.head);
+
+       return 0;
+
+inode_error:
+       kfree(event->desc->name);
+       kfree(event->desc);
+error:
+       return ret;
+}
+EXPORT_SYMBOL_GPL(lttng_uprobes_register);
+
+void lttng_uprobes_unregister(struct lttng_event *event)
+{
+       struct lttng_uprobe_handler *iter, *tmp;
+
+       /*
+        * Iterate over the list of handler, remove each handler from the list
+        * and free the struct.
+        */
+       list_for_each_entry_safe(iter, tmp, &event->u.uprobe.head, node) {
+               wrapper_uprobe_unregister(event->u.uprobe.inode, iter->offset,
+                       &iter->up_consumer);
+               list_del(&iter->node);
+               kfree(iter);
+       }
+}
+EXPORT_SYMBOL_GPL(lttng_uprobes_unregister);
+
+void lttng_uprobes_destroy_private(struct lttng_event *event)
+{
+       iput(event->u.uprobe.inode);
+       kfree(event->desc->name);
+       kfree(event->desc);
+}
+EXPORT_SYMBOL_GPL(lttng_uprobes_destroy_private);
+
+MODULE_LICENSE("GPL and additional rights");
+MODULE_AUTHOR("Yannick Brosseau");
+MODULE_DESCRIPTION("Linux Trace Toolkit Uprobes Support");
diff --git a/src/probes/lttng.c b/src/probes/lttng.c
new file mode 100644 (file)
index 0000000..8a0dd4b
--- /dev/null
@@ -0,0 +1,158 @@
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * lttng.c
+ *
+ * LTTng logger ABI
+ *
+ * Copyright (C) 2008-2014 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#include <linux/module.h>
+#include <linux/tracepoint.h>
+#include <linux/uaccess.h>
+#include <linux/gfp.h>
+#include <linux/fs.h>
+#include <linux/proc_fs.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/miscdevice.h>
+#include <wrapper/vmalloc.h>
+#include <lttng/events.h>
+
+#define TP_MODULE_NOAUTOLOAD
+#define LTTNG_PACKAGE_BUILD
+#define CREATE_TRACE_POINTS
+#define TRACE_INCLUDE_PATH instrumentation/events
+#define TRACE_INCLUDE_FILE lttng
+#define LTTNG_INSTRUMENTATION
+
+#include <instrumentation/events/lttng.h>
+
+/* Events written through logger are truncated at 1024 bytes */
+#define LTTNG_LOGGER_COUNT_MAX 1024
+#define LTTNG_LOGGER_FILE      "lttng-logger"
+
+DEFINE_TRACE(lttng_logger);
+
+static struct proc_dir_entry *lttng_logger_dentry;
+
+/**
+ * lttng_logger_write - write a userspace string into the trace system
+ * @file: file pointer
+ * @user_buf: user string
+ * @count: length to copy
+ * @ppos: file position
+ *
+ * Copy a userspace string into a trace event named "lttng:logger".
+ * Copies at most @count bytes into the event "msg" dynamic array.
+ * Truncates the count at LTTNG_LOGGER_COUNT_MAX. Returns the number of
+ * bytes copied from the source.
+ * Return -1 on error, with EFAULT errno.
+ */
+static
+ssize_t lttng_logger_write(struct file *file, const char __user *user_buf,
+                   size_t count, loff_t *ppos)
+{
+       int nr_pages = 1, i;
+       unsigned long uaddr = (unsigned long) user_buf;
+       struct page *pages[2];
+       ssize_t written;
+       int ret;
+
+       /* Truncate count */
+       if (unlikely(count > LTTNG_LOGGER_COUNT_MAX))
+               count = LTTNG_LOGGER_COUNT_MAX;
+
+       /* How many pages are we dealing with ? */
+       if (unlikely((uaddr & PAGE_MASK) != ((uaddr + count) & PAGE_MASK)))
+               nr_pages = 2;
+
+       /* Pin userspace pages */
+       ret = get_user_pages_fast(uaddr, nr_pages, 0, pages);
+       if (unlikely(ret < nr_pages)) {
+               if (ret > 0) {
+                       BUG_ON(ret != 1);
+                       put_page(pages[0]);
+               }
+               written = -EFAULT;
+               goto end;
+       }
+
+       /* Trace the event */
+       trace_lttng_logger(user_buf, count);
+       written = count;
+       *ppos += written;
+
+       for (i = 0; i < nr_pages; i++)
+               put_page(pages[i]);
+end:
+       return written;
+}
+
+static const struct file_operations lttng_logger_operations = {
+       .write = lttng_logger_write,
+};
+
+/*
+ * Linux 5.6 introduced a separate proc_ops struct for /proc operations
+ * to decouple it from the vfs.
+ */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,6,0))
+static const struct proc_ops lttng_logger_proc_ops = {
+       .proc_write = lttng_logger_write,
+};
+#else
+#define lttng_logger_proc_ops lttng_logger_operations
+#endif
+
+static struct miscdevice logger_dev = {
+       .minor = MISC_DYNAMIC_MINOR,
+       .name = "lttng-logger",
+       .mode = 0666,
+       .fops = &lttng_logger_operations
+};
+
+int __init lttng_logger_init(void)
+{
+       int ret = 0;
+
+       wrapper_vmalloc_sync_mappings();
+
+       /* /dev/lttng-logger */
+       ret = misc_register(&logger_dev);
+       if (ret) {
+               printk(KERN_ERR "Error creating LTTng logger device\n");
+               goto error;
+       }
+
+       /* /proc/lttng-logger */
+       lttng_logger_dentry = proc_create_data(LTTNG_LOGGER_FILE,
+                               S_IRUGO | S_IWUGO, NULL,
+                               &lttng_logger_proc_ops, NULL);
+       if (!lttng_logger_dentry) {
+               printk(KERN_ERR "Error creating LTTng logger proc file\n");
+               ret = -ENOMEM;
+               goto error_proc;
+       }
+
+       /* Init */
+       ret = __lttng_events_init__lttng();
+       if (ret)
+               goto error_events;
+       return ret;
+
+error_events:
+       remove_proc_entry("lttng-logger", NULL);
+error_proc:
+       misc_deregister(&logger_dev);
+error:
+       return ret;
+}
+
+void lttng_logger_exit(void)
+{
+       __lttng_events_exit__lttng();
+       if (lttng_logger_dentry)
+               remove_proc_entry("lttng-logger", NULL);
+       misc_deregister(&logger_dev);
+}
diff --git a/src/wrapper/fdtable.c b/src/wrapper/fdtable.c
new file mode 100644 (file)
index 0000000..26a8367
--- /dev/null
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ * wrapper/fdtable.c
+ *
+ * Copyright (C) 2013 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#include <linux/version.h>
+#include <linux/spinlock.h>
+#include <wrapper/fdtable.h>
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0))
+
+/*
+ * Reimplementation of iterate_fd() for kernels between 2.6.32 and 3.6
+ * (inclusive).
+ */
+int lttng_iterate_fd(struct files_struct *files,
+               unsigned int first,
+               int (*cb)(const void *, struct file *, unsigned int),
+               const void *ctx)
+{
+       struct fdtable *fdt;
+       struct file *filp;
+       unsigned int i;
+       int res = 0;
+
+       if (!files)
+               return 0;
+       spin_lock(&files->file_lock);
+       fdt = files_fdtable(files);
+       for (i = 0; i < fdt->max_fds; i++) {
+               filp = fcheck_files(files, i);
+               if (!filp)
+                       continue;
+               res = cb(ctx, filp, i);
+               if (res)
+                       break;
+       }
+       spin_unlock(&files->file_lock);
+       return res;
+}
+
+#endif
diff --git a/src/wrapper/irqdesc.c b/src/wrapper/irqdesc.c
new file mode 100644 (file)
index 0000000..397624b
--- /dev/null
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR LGPL-2.1-only)
+ *
+ * wrapper/irqdesc.c
+ *
+ * wrapper around irq_to_desc. Using KALLSYMS to get its address when
+ * available, else we need to have a kernel that exports this function to GPL
+ * modules. This export was added to the 3.4 kernels.
+ *
+ * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#include <lttng/kernel-version.h>
+
+#if (defined(CONFIG_KALLSYMS) \
+       && (LINUX_VERSION_CODE < KERNEL_VERSION(3,4,0)))
+
+#include <linux/kallsyms.h>
+#include <linux/interrupt.h>
+#include <linux/irqnr.h>
+#include <wrapper/kallsyms.h>
+#include <wrapper/irqdesc.h>
+
+static
+struct irq_desc *(*irq_to_desc_sym)(unsigned int irq);
+
+struct irq_desc *wrapper_irq_to_desc(unsigned int irq)
+{
+       if (!irq_to_desc_sym)
+               irq_to_desc_sym = (void *) kallsyms_lookup_funcptr("irq_to_desc");
+       if (irq_to_desc_sym) {
+               return irq_to_desc_sym(irq);
+       } else {
+               printk_once(KERN_WARNING "LTTng: irq_to_desc symbol lookup failed.\n");
+               return NULL;
+       }
+}
+EXPORT_SYMBOL_GPL(wrapper_irq_to_desc);
+
+#else
+
+#include <linux/interrupt.h>
+#include <linux/irqnr.h>
+
+struct irq_desc *wrapper_irq_to_desc(unsigned int irq)
+{
+       return irq_to_desc(irq);
+}
+EXPORT_SYMBOL_GPL(wrapper_irq_to_desc);
+
+#endif
diff --git a/src/wrapper/kallsyms.c b/src/wrapper/kallsyms.c
new file mode 100644 (file)
index 0000000..6af77f5
--- /dev/null
@@ -0,0 +1,70 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR LGPL-2.1-only)
+ *
+ * wrapper/kallsyms.c
+ *
+ * Wrapper around kallsyms. Using kprobes to get its address when available.
+ *
+ * Can we mainline LTTng already so we don't have to waste our time doing this
+ * kind of hack ?
+ *
+ * Copyright (C) 2020 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#include <linux/kprobes.h>
+#include <linux/module.h>
+#include <wrapper/kallsyms.h>
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,7,0))
+
+#ifndef CONFIG_KPROBES
+# error "LTTng-modules requires CONFIG_KPROBES on kernels >= 5.7.0"
+#endif
+
+static
+unsigned long (*kallsyms_lookup_name_sym)(const char *name);
+
+static
+int dummy_kprobe_handler(struct kprobe *p, struct pt_regs *regs)
+{
+       return 0;
+}
+
+static
+unsigned long do_get_kallsyms(void)
+{
+       struct kprobe probe;
+       int ret;
+       unsigned long addr;
+
+       memset(&probe, 0, sizeof(probe));
+       probe.pre_handler = dummy_kprobe_handler;
+       probe.symbol_name = "kallsyms_lookup_name";
+       ret = register_kprobe(&probe);
+       if (ret)
+               return 0;
+       addr = (unsigned long)probe.addr;
+#ifdef CONFIG_ARM
+#ifdef CONFIG_THUMB2_KERNEL
+       if (addr)
+               addr |= 1; /* set bit 0 in address for thumb mode */
+#endif
+#endif
+       unregister_kprobe(&probe);
+       return addr;
+}
+
+unsigned long wrapper_kallsyms_lookup_name(const char *name)
+{
+       if (!kallsyms_lookup_name_sym) {
+               kallsyms_lookup_name_sym = (void *)do_get_kallsyms();
+       }
+       if (kallsyms_lookup_name_sym)
+               return kallsyms_lookup_name_sym(name);
+       else {
+               printk_once(KERN_WARNING "LTTng requires kallsyms_lookup_name\n");
+               return 0;
+       }
+}
+EXPORT_SYMBOL_GPL(wrapper_kallsyms_lookup_name);
+
+#endif
diff --git a/src/wrapper/page_alloc.c b/src/wrapper/page_alloc.c
new file mode 100644 (file)
index 0000000..93504c9
--- /dev/null
@@ -0,0 +1,104 @@
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ * wrapper/page_alloc.c
+ *
+ * wrapper around get_pfnblock_flags_mask and Ubuntu
+ * get_pageblock_flags_mask. Using KALLSYMS to get their address when
+ * available, else we need to have a kernel that exports this function
+ * to GPL modules.
+ *
+ * Copyright (C) 2015 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#include <lttng/kernel-version.h>
+
+#if (defined(CONFIG_KALLSYMS) \
+       && (LINUX_VERSION_CODE >= KERNEL_VERSION(3,19,2)        \
+               || LTTNG_KERNEL_RANGE(3,14,36, 3,15,0)          \
+               || LTTNG_KERNEL_RANGE(3,18,10, 3,19,0)          \
+               || LTTNG_DEBIAN_KERNEL_RANGE(3,16,7,9,0,0, 3,17,0,0,0,0) \
+               || LTTNG_UBUNTU_KERNEL_RANGE(3,16,7,34, 3,17,0,0)))
+
+#include <linux/kallsyms.h>
+#include <linux/mm_types.h>
+#include <linux/module.h>
+#include <wrapper/kallsyms.h>
+#include <wrapper/page_alloc.h>
+
+static
+unsigned long (*get_pfnblock_flags_mask_sym)(struct page *page,
+               unsigned long pfn,
+               unsigned long end_bitidx,
+               unsigned long mask);
+
+unsigned long wrapper_get_pfnblock_flags_mask(struct page *page,
+               unsigned long pfn,
+               unsigned long end_bitidx,
+               unsigned long mask)
+{
+       WARN_ON_ONCE(!get_pfnblock_flags_mask_sym);
+       if (get_pfnblock_flags_mask_sym) {
+               return get_pfnblock_flags_mask_sym(page, pfn, end_bitidx, mask);
+       } else {
+               return -ENOSYS;
+       }
+}
+EXPORT_SYMBOL_GPL(wrapper_get_pfnblock_flags_mask);
+
+int wrapper_get_pfnblock_flags_mask_init(void)
+{
+       get_pfnblock_flags_mask_sym =
+               (void *) kallsyms_lookup_funcptr("get_pfnblock_flags_mask");
+       if (!get_pfnblock_flags_mask_sym)
+               return -1;
+       return 0;
+}
+EXPORT_SYMBOL_GPL(wrapper_get_pfnblock_flags_mask_init);
+
+#else
+
+#include <linux/pageblock-flags.h>
+
+#endif
+
+#if (defined(CONFIG_KALLSYMS) \
+       && LTTNG_UBUNTU_KERNEL_RANGE(3,13,11,50, 3,14,0,0))
+
+#include <linux/kallsyms.h>
+#include <linux/mm_types.h>
+#include <linux/module.h>
+#include <wrapper/kallsyms.h>
+#include <wrapper/page_alloc.h>
+
+static
+unsigned long (*get_pageblock_flags_mask_sym)(struct page *page,
+               unsigned long end_bitidx,
+               unsigned long mask);
+
+unsigned long wrapper_get_pageblock_flags_mask(struct page *page,
+               unsigned long end_bitidx,
+               unsigned long mask)
+{
+       WARN_ON_ONCE(!get_pageblock_flags_mask_sym);
+       if (get_pageblock_flags_mask_sym) {
+               return get_pageblock_flags_mask_sym(page, end_bitidx, mask);
+       } else {
+               return -ENOSYS;
+       }
+}
+EXPORT_SYMBOL_GPL(wrapper_get_pageblock_flags_mask);
+
+int wrapper_get_pageblock_flags_mask_init(void)
+{
+       get_pageblock_flags_mask_sym =
+               (void *) kallsyms_lookup_funcptr("get_pageblock_flags_mask");
+       if (!get_pageblock_flags_mask_sym)
+               return -1;
+       return 0;
+}
+EXPORT_SYMBOL_GPL(wrapper_get_pfnblock_flags_mask_init);
+
+#else
+
+#include <linux/pageblock-flags.h>
+
+#endif
diff --git a/src/wrapper/random.c b/src/wrapper/random.c
new file mode 100644 (file)
index 0000000..7b91fc7
--- /dev/null
@@ -0,0 +1,66 @@
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * wrapper/random.c
+ *
+ * wrapper around bootid read. Read the boot id through the /proc filesystem.
+ *
+ * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#include <linux/errno.h>
+
+/* boot_id depends on sysctl */
+#if defined(CONFIG_SYSCTL)
+
+#include <linux/fs.h>
+#include <linux/file.h>
+#include <linux/sched.h>
+#include <linux/uaccess.h>
+#include <wrapper/random.h>
+
+/*
+ * Returns string boot id.
+ */
+int wrapper_get_bootid(char *bootid)
+{
+       struct file *file;
+       int ret;
+       ssize_t len;
+       mm_segment_t old_fs;
+
+       file = filp_open("/proc/sys/kernel/random/boot_id", O_RDONLY, 0);
+       if (IS_ERR(file))
+               return PTR_ERR(file);
+
+       old_fs = get_fs();
+       set_fs(KERNEL_DS);
+
+       if (!file->f_op || !file->f_op->read) {
+               ret = -EINVAL;
+               goto end;
+       }
+
+       len = file->f_op->read(file, bootid, BOOT_ID_LEN - 1, &file->f_pos);
+       if (len != BOOT_ID_LEN - 1) {
+               ret = -EINVAL;
+               goto end;
+       }
+
+       bootid[BOOT_ID_LEN - 1] = '\0';
+       ret = 0;
+end:
+       set_fs(old_fs);
+       filp_close(file, current->files);
+       return ret;
+}
+EXPORT_SYMBOL_GPL(wrapper_get_bootid);
+
+#else
+
+int wrapper_get_bootid(char *bootid)
+{
+       return -ENOSYS;
+}
+EXPORT_SYMBOL_GPL(wrapper_get_bootid);
+
+#endif
diff --git a/src/wrapper/splice.c b/src/wrapper/splice.c
new file mode 100644 (file)
index 0000000..33e3aaa
--- /dev/null
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * wrapper/splice.c
+ *
+ * wrapper around splice_to_pipe. Using KALLSYMS to get its address when
+ * available, else we need to have a kernel that exports this function to GPL
+ * modules. The export was introduced in kernel 4.2.
+ *
+ * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#include <lttng/kernel-version.h>
+
+#if (defined(CONFIG_KALLSYMS) \
+       && (LINUX_VERSION_CODE < KERNEL_VERSION(4,2,0)))
+
+#include <linux/kallsyms.h>
+#include <linux/fs.h>
+#include <linux/splice.h>
+#include <wrapper/kallsyms.h>
+
+static
+ssize_t (*splice_to_pipe_sym)(struct pipe_inode_info *pipe,
+                             struct splice_pipe_desc *spd);
+
+ssize_t wrapper_splice_to_pipe(struct pipe_inode_info *pipe,
+                              struct splice_pipe_desc *spd)
+{
+       if (!splice_to_pipe_sym)
+               splice_to_pipe_sym = (void *) kallsyms_lookup_funcptr("splice_to_pipe");
+       if (splice_to_pipe_sym) {
+               return splice_to_pipe_sym(pipe, spd);
+       } else {
+               printk_once(KERN_WARNING "LTTng: splice_to_pipe symbol lookup failed.\n");
+               return -ENOSYS;
+       }
+}
+
+#else
+
+#include <linux/fs.h>
+#include <linux/splice.h>
+
+ssize_t wrapper_splice_to_pipe(struct pipe_inode_info *pipe,
+                              struct splice_pipe_desc *spd)
+{
+       return splice_to_pipe(pipe, spd);
+}
+
+#endif
diff --git a/src/wrapper/trace-clock.c b/src/wrapper/trace-clock.c
new file mode 100644 (file)
index 0000000..74995fe
--- /dev/null
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * wrapper/trace-clock.c
+ *
+ * Contains LTTng trace clock mapping to LTTng trace clock or mainline monotonic
+ * clock. This wrapper depends on CONFIG_HIGH_RES_TIMERS=y.
+ *
+ * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#include <wrapper/trace-clock.h>
+
+#ifdef LTTNG_USE_NMI_SAFE_CLOCK
+DEFINE_PER_CPU(u64, lttng_last_tsc);
+EXPORT_PER_CPU_SYMBOL(lttng_last_tsc);
+#endif /* #ifdef LTTNG_USE_NMI_SAFE_CLOCK */
+
+#ifdef LTTNG_CLOCK_NMI_SAFE_BROKEN
+#warning "Your kernel implements a bogus nmi-safe clock source. Falling back to the non-nmi-safe clock source, which discards events traced from NMI context. Upgrade your kernel to resolve this situation."
+#endif
diff --git a/wrapper/fdtable.c b/wrapper/fdtable.c
deleted file mode 100644 (file)
index 26a8367..0000000
+++ /dev/null
@@ -1,43 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
- * wrapper/fdtable.c
- *
- * Copyright (C) 2013 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#include <linux/version.h>
-#include <linux/spinlock.h>
-#include <wrapper/fdtable.h>
-
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0))
-
-/*
- * Reimplementation of iterate_fd() for kernels between 2.6.32 and 3.6
- * (inclusive).
- */
-int lttng_iterate_fd(struct files_struct *files,
-               unsigned int first,
-               int (*cb)(const void *, struct file *, unsigned int),
-               const void *ctx)
-{
-       struct fdtable *fdt;
-       struct file *filp;
-       unsigned int i;
-       int res = 0;
-
-       if (!files)
-               return 0;
-       spin_lock(&files->file_lock);
-       fdt = files_fdtable(files);
-       for (i = 0; i < fdt->max_fds; i++) {
-               filp = fcheck_files(files, i);
-               if (!filp)
-                       continue;
-               res = cb(ctx, filp, i);
-               if (res)
-                       break;
-       }
-       spin_unlock(&files->file_lock);
-       return res;
-}
-
-#endif
diff --git a/wrapper/irqdesc.c b/wrapper/irqdesc.c
deleted file mode 100644 (file)
index 397624b..0000000
+++ /dev/null
@@ -1,50 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only OR LGPL-2.1-only)
- *
- * wrapper/irqdesc.c
- *
- * wrapper around irq_to_desc. Using KALLSYMS to get its address when
- * available, else we need to have a kernel that exports this function to GPL
- * modules. This export was added to the 3.4 kernels.
- *
- * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#include <lttng/kernel-version.h>
-
-#if (defined(CONFIG_KALLSYMS) \
-       && (LINUX_VERSION_CODE < KERNEL_VERSION(3,4,0)))
-
-#include <linux/kallsyms.h>
-#include <linux/interrupt.h>
-#include <linux/irqnr.h>
-#include <wrapper/kallsyms.h>
-#include <wrapper/irqdesc.h>
-
-static
-struct irq_desc *(*irq_to_desc_sym)(unsigned int irq);
-
-struct irq_desc *wrapper_irq_to_desc(unsigned int irq)
-{
-       if (!irq_to_desc_sym)
-               irq_to_desc_sym = (void *) kallsyms_lookup_funcptr("irq_to_desc");
-       if (irq_to_desc_sym) {
-               return irq_to_desc_sym(irq);
-       } else {
-               printk_once(KERN_WARNING "LTTng: irq_to_desc symbol lookup failed.\n");
-               return NULL;
-       }
-}
-EXPORT_SYMBOL_GPL(wrapper_irq_to_desc);
-
-#else
-
-#include <linux/interrupt.h>
-#include <linux/irqnr.h>
-
-struct irq_desc *wrapper_irq_to_desc(unsigned int irq)
-{
-       return irq_to_desc(irq);
-}
-EXPORT_SYMBOL_GPL(wrapper_irq_to_desc);
-
-#endif
diff --git a/wrapper/kallsyms.c b/wrapper/kallsyms.c
deleted file mode 100644 (file)
index 6af77f5..0000000
+++ /dev/null
@@ -1,70 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only OR LGPL-2.1-only)
- *
- * wrapper/kallsyms.c
- *
- * Wrapper around kallsyms. Using kprobes to get its address when available.
- *
- * Can we mainline LTTng already so we don't have to waste our time doing this
- * kind of hack ?
- *
- * Copyright (C) 2020 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#include <linux/kprobes.h>
-#include <linux/module.h>
-#include <wrapper/kallsyms.h>
-
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,7,0))
-
-#ifndef CONFIG_KPROBES
-# error "LTTng-modules requires CONFIG_KPROBES on kernels >= 5.7.0"
-#endif
-
-static
-unsigned long (*kallsyms_lookup_name_sym)(const char *name);
-
-static
-int dummy_kprobe_handler(struct kprobe *p, struct pt_regs *regs)
-{
-       return 0;
-}
-
-static
-unsigned long do_get_kallsyms(void)
-{
-       struct kprobe probe;
-       int ret;
-       unsigned long addr;
-
-       memset(&probe, 0, sizeof(probe));
-       probe.pre_handler = dummy_kprobe_handler;
-       probe.symbol_name = "kallsyms_lookup_name";
-       ret = register_kprobe(&probe);
-       if (ret)
-               return 0;
-       addr = (unsigned long)probe.addr;
-#ifdef CONFIG_ARM
-#ifdef CONFIG_THUMB2_KERNEL
-       if (addr)
-               addr |= 1; /* set bit 0 in address for thumb mode */
-#endif
-#endif
-       unregister_kprobe(&probe);
-       return addr;
-}
-
-unsigned long wrapper_kallsyms_lookup_name(const char *name)
-{
-       if (!kallsyms_lookup_name_sym) {
-               kallsyms_lookup_name_sym = (void *)do_get_kallsyms();
-       }
-       if (kallsyms_lookup_name_sym)
-               return kallsyms_lookup_name_sym(name);
-       else {
-               printk_once(KERN_WARNING "LTTng requires kallsyms_lookup_name\n");
-               return 0;
-       }
-}
-EXPORT_SYMBOL_GPL(wrapper_kallsyms_lookup_name);
-
-#endif
diff --git a/wrapper/page_alloc.c b/wrapper/page_alloc.c
deleted file mode 100644 (file)
index 93504c9..0000000
+++ /dev/null
@@ -1,104 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
- * wrapper/page_alloc.c
- *
- * wrapper around get_pfnblock_flags_mask and Ubuntu
- * get_pageblock_flags_mask. Using KALLSYMS to get their address when
- * available, else we need to have a kernel that exports this function
- * to GPL modules.
- *
- * Copyright (C) 2015 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#include <lttng/kernel-version.h>
-
-#if (defined(CONFIG_KALLSYMS) \
-       && (LINUX_VERSION_CODE >= KERNEL_VERSION(3,19,2)        \
-               || LTTNG_KERNEL_RANGE(3,14,36, 3,15,0)          \
-               || LTTNG_KERNEL_RANGE(3,18,10, 3,19,0)          \
-               || LTTNG_DEBIAN_KERNEL_RANGE(3,16,7,9,0,0, 3,17,0,0,0,0) \
-               || LTTNG_UBUNTU_KERNEL_RANGE(3,16,7,34, 3,17,0,0)))
-
-#include <linux/kallsyms.h>
-#include <linux/mm_types.h>
-#include <linux/module.h>
-#include <wrapper/kallsyms.h>
-#include <wrapper/page_alloc.h>
-
-static
-unsigned long (*get_pfnblock_flags_mask_sym)(struct page *page,
-               unsigned long pfn,
-               unsigned long end_bitidx,
-               unsigned long mask);
-
-unsigned long wrapper_get_pfnblock_flags_mask(struct page *page,
-               unsigned long pfn,
-               unsigned long end_bitidx,
-               unsigned long mask)
-{
-       WARN_ON_ONCE(!get_pfnblock_flags_mask_sym);
-       if (get_pfnblock_flags_mask_sym) {
-               return get_pfnblock_flags_mask_sym(page, pfn, end_bitidx, mask);
-       } else {
-               return -ENOSYS;
-       }
-}
-EXPORT_SYMBOL_GPL(wrapper_get_pfnblock_flags_mask);
-
-int wrapper_get_pfnblock_flags_mask_init(void)
-{
-       get_pfnblock_flags_mask_sym =
-               (void *) kallsyms_lookup_funcptr("get_pfnblock_flags_mask");
-       if (!get_pfnblock_flags_mask_sym)
-               return -1;
-       return 0;
-}
-EXPORT_SYMBOL_GPL(wrapper_get_pfnblock_flags_mask_init);
-
-#else
-
-#include <linux/pageblock-flags.h>
-
-#endif
-
-#if (defined(CONFIG_KALLSYMS) \
-       && LTTNG_UBUNTU_KERNEL_RANGE(3,13,11,50, 3,14,0,0))
-
-#include <linux/kallsyms.h>
-#include <linux/mm_types.h>
-#include <linux/module.h>
-#include <wrapper/kallsyms.h>
-#include <wrapper/page_alloc.h>
-
-static
-unsigned long (*get_pageblock_flags_mask_sym)(struct page *page,
-               unsigned long end_bitidx,
-               unsigned long mask);
-
-unsigned long wrapper_get_pageblock_flags_mask(struct page *page,
-               unsigned long end_bitidx,
-               unsigned long mask)
-{
-       WARN_ON_ONCE(!get_pageblock_flags_mask_sym);
-       if (get_pageblock_flags_mask_sym) {
-               return get_pageblock_flags_mask_sym(page, end_bitidx, mask);
-       } else {
-               return -ENOSYS;
-       }
-}
-EXPORT_SYMBOL_GPL(wrapper_get_pageblock_flags_mask);
-
-int wrapper_get_pageblock_flags_mask_init(void)
-{
-       get_pageblock_flags_mask_sym =
-               (void *) kallsyms_lookup_funcptr("get_pageblock_flags_mask");
-       if (!get_pageblock_flags_mask_sym)
-               return -1;
-       return 0;
-}
-EXPORT_SYMBOL_GPL(wrapper_get_pfnblock_flags_mask_init);
-
-#else
-
-#include <linux/pageblock-flags.h>
-
-#endif
diff --git a/wrapper/random.c b/wrapper/random.c
deleted file mode 100644 (file)
index 7b91fc7..0000000
+++ /dev/null
@@ -1,66 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
- *
- * wrapper/random.c
- *
- * wrapper around bootid read. Read the boot id through the /proc filesystem.
- *
- * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#include <linux/errno.h>
-
-/* boot_id depends on sysctl */
-#if defined(CONFIG_SYSCTL)
-
-#include <linux/fs.h>
-#include <linux/file.h>
-#include <linux/sched.h>
-#include <linux/uaccess.h>
-#include <wrapper/random.h>
-
-/*
- * Returns string boot id.
- */
-int wrapper_get_bootid(char *bootid)
-{
-       struct file *file;
-       int ret;
-       ssize_t len;
-       mm_segment_t old_fs;
-
-       file = filp_open("/proc/sys/kernel/random/boot_id", O_RDONLY, 0);
-       if (IS_ERR(file))
-               return PTR_ERR(file);
-
-       old_fs = get_fs();
-       set_fs(KERNEL_DS);
-
-       if (!file->f_op || !file->f_op->read) {
-               ret = -EINVAL;
-               goto end;
-       }
-
-       len = file->f_op->read(file, bootid, BOOT_ID_LEN - 1, &file->f_pos);
-       if (len != BOOT_ID_LEN - 1) {
-               ret = -EINVAL;
-               goto end;
-       }
-
-       bootid[BOOT_ID_LEN - 1] = '\0';
-       ret = 0;
-end:
-       set_fs(old_fs);
-       filp_close(file, current->files);
-       return ret;
-}
-EXPORT_SYMBOL_GPL(wrapper_get_bootid);
-
-#else
-
-int wrapper_get_bootid(char *bootid)
-{
-       return -ENOSYS;
-}
-EXPORT_SYMBOL_GPL(wrapper_get_bootid);
-
-#endif
diff --git a/wrapper/splice.c b/wrapper/splice.c
deleted file mode 100644 (file)
index 33e3aaa..0000000
+++ /dev/null
@@ -1,50 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
- *
- * wrapper/splice.c
- *
- * wrapper around splice_to_pipe. Using KALLSYMS to get its address when
- * available, else we need to have a kernel that exports this function to GPL
- * modules. The export was introduced in kernel 4.2.
- *
- * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#include <lttng/kernel-version.h>
-
-#if (defined(CONFIG_KALLSYMS) \
-       && (LINUX_VERSION_CODE < KERNEL_VERSION(4,2,0)))
-
-#include <linux/kallsyms.h>
-#include <linux/fs.h>
-#include <linux/splice.h>
-#include <wrapper/kallsyms.h>
-
-static
-ssize_t (*splice_to_pipe_sym)(struct pipe_inode_info *pipe,
-                             struct splice_pipe_desc *spd);
-
-ssize_t wrapper_splice_to_pipe(struct pipe_inode_info *pipe,
-                              struct splice_pipe_desc *spd)
-{
-       if (!splice_to_pipe_sym)
-               splice_to_pipe_sym = (void *) kallsyms_lookup_funcptr("splice_to_pipe");
-       if (splice_to_pipe_sym) {
-               return splice_to_pipe_sym(pipe, spd);
-       } else {
-               printk_once(KERN_WARNING "LTTng: splice_to_pipe symbol lookup failed.\n");
-               return -ENOSYS;
-       }
-}
-
-#else
-
-#include <linux/fs.h>
-#include <linux/splice.h>
-
-ssize_t wrapper_splice_to_pipe(struct pipe_inode_info *pipe,
-                              struct splice_pipe_desc *spd)
-{
-       return splice_to_pipe(pipe, spd);
-}
-
-#endif
diff --git a/wrapper/trace-clock.c b/wrapper/trace-clock.c
deleted file mode 100644 (file)
index 74995fe..0000000
+++ /dev/null
@@ -1,20 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
- *
- * wrapper/trace-clock.c
- *
- * Contains LTTng trace clock mapping to LTTng trace clock or mainline monotonic
- * clock. This wrapper depends on CONFIG_HIGH_RES_TIMERS=y.
- *
- * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#include <wrapper/trace-clock.h>
-
-#ifdef LTTNG_USE_NMI_SAFE_CLOCK
-DEFINE_PER_CPU(u64, lttng_last_tsc);
-EXPORT_PER_CPU_SYMBOL(lttng_last_tsc);
-#endif /* #ifdef LTTNG_USE_NMI_SAFE_CLOCK */
-
-#ifdef LTTNG_CLOCK_NMI_SAFE_BROKEN
-#warning "Your kernel implements a bogus nmi-safe clock source. Falling back to the non-nmi-safe clock source, which discards events traced from NMI context. Upgrade your kernel to resolve this situation."
-#endif
This page took 0.774256 seconds and 4 git commands to generate.