Cleanup: Move lib/ringbuffer/ headers to include/ringbuffer/
authorMathieu Desnoyers <mathieu.desnoyers@efficios.com>
Wed, 6 May 2020 13:21:00 +0000 (09:21 -0400)
committerMathieu Desnoyers <mathieu.desnoyers@efficios.com>
Wed, 6 May 2020 14:33:17 +0000 (10:33 -0400)
Remove the <wrapper/ringbuffer/...> proxy include files, and add the
include/ directory to the preprocessor include search patch.

Adapt all includes accordingly.

Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
94 files changed:
Makefile
include/ringbuffer/api.h [new file with mode: 0644]
include/ringbuffer/backend.h [new file with mode: 0644]
include/ringbuffer/backend_internal.h [new file with mode: 0644]
include/ringbuffer/backend_types.h [new file with mode: 0644]
include/ringbuffer/config.h [new file with mode: 0644]
include/ringbuffer/frontend.h [new file with mode: 0644]
include/ringbuffer/frontend_api.h [new file with mode: 0644]
include/ringbuffer/frontend_internal.h [new file with mode: 0644]
include/ringbuffer/frontend_types.h [new file with mode: 0644]
include/ringbuffer/iterator.h [new file with mode: 0644]
include/ringbuffer/nohz.h [new file with mode: 0644]
include/ringbuffer/vatomic.h [new file with mode: 0644]
include/ringbuffer/vfs.h [new file with mode: 0644]
lib/Kbuild
lib/ringbuffer/api.h [deleted file]
lib/ringbuffer/backend.h [deleted file]
lib/ringbuffer/backend_internal.h [deleted file]
lib/ringbuffer/backend_types.h [deleted file]
lib/ringbuffer/config.h [deleted file]
lib/ringbuffer/frontend.h [deleted file]
lib/ringbuffer/frontend_api.h [deleted file]
lib/ringbuffer/frontend_internal.h [deleted file]
lib/ringbuffer/frontend_types.h [deleted file]
lib/ringbuffer/iterator.h [deleted file]
lib/ringbuffer/nohz.h [deleted file]
lib/ringbuffer/ring_buffer_backend.c
lib/ringbuffer/ring_buffer_frontend.c
lib/ringbuffer/ring_buffer_iterator.c
lib/ringbuffer/ring_buffer_mmap.c
lib/ringbuffer/ring_buffer_splice.c
lib/ringbuffer/ring_buffer_vfs.c
lib/ringbuffer/vatomic.h [deleted file]
lib/ringbuffer/vfs.h [deleted file]
lttng-abi.c
lttng-context-callstack.c
lttng-context-cgroup-ns.c
lttng-context-cpu-id.c
lttng-context-egid.c
lttng-context-euid.c
lttng-context-gid.c
lttng-context-hostname.c
lttng-context-interruptible.c
lttng-context-ipc-ns.c
lttng-context-migratable.c
lttng-context-mnt-ns.c
lttng-context-need-reschedule.c
lttng-context-net-ns.c
lttng-context-nice.c
lttng-context-perf-counters.c
lttng-context-pid-ns.c
lttng-context-pid.c
lttng-context-ppid.c
lttng-context-preemptible.c
lttng-context-prio.c
lttng-context-procname.c
lttng-context-sgid.c
lttng-context-suid.c
lttng-context-tid.c
lttng-context-uid.c
lttng-context-user-ns.c
lttng-context-uts-ns.c
lttng-context-vegid.c
lttng-context-veuid.c
lttng-context-vgid.c
lttng-context-vpid.c
lttng-context-vppid.c
lttng-context-vsgid.c
lttng-context-vsuid.c
lttng-context-vtid.c
lttng-context-vuid.c
lttng-events.c
lttng-ring-buffer-client.h
lttng-ring-buffer-metadata-client.h
lttng-tracer-core.h
probes/Kbuild
probes/lttng-kprobes.c
probes/lttng-kretprobes.c
probes/lttng-tracepoint-event-impl.h
probes/lttng-uprobes.c
tests/Kbuild
wrapper/ringbuffer/api.h [deleted file]
wrapper/ringbuffer/backend.h [deleted file]
wrapper/ringbuffer/backend_internal.h [deleted file]
wrapper/ringbuffer/backend_types.h [deleted file]
wrapper/ringbuffer/config.h [deleted file]
wrapper/ringbuffer/frontend.h [deleted file]
wrapper/ringbuffer/frontend_api.h [deleted file]
wrapper/ringbuffer/frontend_internal.h [deleted file]
wrapper/ringbuffer/frontend_types.h [deleted file]
wrapper/ringbuffer/iterator.h [deleted file]
wrapper/ringbuffer/nohz.h [deleted file]
wrapper/ringbuffer/vatomic.h [deleted file]
wrapper/ringbuffer/vfs.h [deleted file]

index 6e6c2fe380b1341529f81b7e61b30f971ed1bd4d..07c21ea4f12603318c9886f220632fbd68d9cff3 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -29,7 +29,7 @@ ifneq ($(KERNELRELEASE),)
 
   include $(TOP_LTTNG_MODULES_DIR)/Kbuild.common
 
-  ccflags-y += -I$(TOP_LTTNG_MODULES_DIR)
+  ccflags-y += -I$(TOP_LTTNG_MODULES_DIR) -I$(TOP_LTTNG_MODULES_DIR)/include
 
   obj-$(CONFIG_LTTNG) += lttng-ring-buffer-client-discard.o
   obj-$(CONFIG_LTTNG) += lttng-ring-buffer-client-overwrite.o
diff --git a/include/ringbuffer/api.h b/include/ringbuffer/api.h
new file mode 100644 (file)
index 0000000..3ad7725
--- /dev/null
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR LGPL-2.1-only)
+ *
+ * ringbuffer/api.h
+ *
+ * Ring Buffer API.
+ *
+ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#ifndef _LIB_RING_BUFFER_API_H
+#define _LIB_RING_BUFFER_API_H
+
+#include <ringbuffer/backend.h>
+#include <ringbuffer/frontend.h>
+#include <ringbuffer/vfs.h>
+
+/*
+ * ring_buffer_frontend_api.h contains static inline functions that depend on
+ * client static inlines. Hence the inclusion of this >api> header only
+ * within the client.
+ */
+#include <ringbuffer/frontend_api.h>
+
+#endif /* _LIB_RING_BUFFER_API_H */
diff --git a/include/ringbuffer/backend.h b/include/ringbuffer/backend.h
new file mode 100644 (file)
index 0000000..1f499c9
--- /dev/null
@@ -0,0 +1,463 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR LGPL-2.1-only)
+ *
+ * ringbuffer/backend.h
+ *
+ * Ring buffer backend (API).
+ *
+ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * Credits to Steven Rostedt for proposing to use an extra-subbuffer owned by
+ * the reader in flight recorder mode.
+ */
+
+#ifndef _LIB_RING_BUFFER_BACKEND_H
+#define _LIB_RING_BUFFER_BACKEND_H
+
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/timer.h>
+#include <linux/wait.h>
+#include <linux/poll.h>
+#include <linux/list.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <wrapper/uaccess.h>
+
+/* Internal helpers */
+#include <ringbuffer/backend_internal.h>
+#include <ringbuffer/frontend_internal.h>
+
+/* Ring buffer backend API */
+
+/* Ring buffer backend access (read/write) */
+
+extern size_t lib_ring_buffer_read(struct lib_ring_buffer_backend *bufb,
+                                  size_t offset, void *dest, size_t len);
+
+extern int __lib_ring_buffer_copy_to_user(struct lib_ring_buffer_backend *bufb,
+                                         size_t offset, void __user *dest,
+                                         size_t len);
+
+extern int lib_ring_buffer_read_cstr(struct lib_ring_buffer_backend *bufb,
+                                    size_t offset, void *dest, size_t len);
+
+extern unsigned long *
+lib_ring_buffer_read_get_pfn(struct lib_ring_buffer_backend *bufb, size_t offset,
+                             void ***virt);
+
+/*
+ * Return the address where a given offset is located.
+ * Should be used to get the current subbuffer header pointer. Given we know
+ * it's never on a page boundary, it's safe to write directly to this address,
+ * as long as the write is never bigger than a page size.
+ */
+extern void *
+lib_ring_buffer_offset_address(struct lib_ring_buffer_backend *bufb,
+                              size_t offset);
+extern void *
+lib_ring_buffer_read_offset_address(struct lib_ring_buffer_backend *bufb,
+                                   size_t offset);
+
+/**
+ * lib_ring_buffer_write - write data to a buffer backend
+ * @config : ring buffer instance configuration
+ * @ctx: ring buffer context. (input arguments only)
+ * @src : source pointer to copy from
+ * @len : length of data to copy
+ *
+ * This function copies "len" bytes of data from a source pointer to a buffer
+ * backend, at the current context offset. This is more or less a buffer
+ * backend-specific memcpy() operation. Calls the slow path (_ring_buffer_write)
+ * if copy is crossing a page boundary.
+ */
+static inline __attribute__((always_inline))
+void lib_ring_buffer_write(const struct lib_ring_buffer_config *config,
+                          struct lib_ring_buffer_ctx *ctx,
+                          const void *src, size_t len)
+{
+       struct lib_ring_buffer_backend *bufb = &ctx->buf->backend;
+       struct channel_backend *chanb = &ctx->chan->backend;
+       size_t index, pagecpy;
+       size_t offset = ctx->buf_offset;
+       struct lib_ring_buffer_backend_pages *backend_pages;
+
+       if (unlikely(!len))
+               return;
+       backend_pages =
+               lib_ring_buffer_get_backend_pages_from_ctx(config, ctx);
+       offset &= chanb->buf_size - 1;
+       index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
+       pagecpy = min_t(size_t, len, (-offset) & ~PAGE_MASK);
+       if (likely(pagecpy == len))
+               lib_ring_buffer_do_copy(config,
+                                       backend_pages->p[index].virt
+                                           + (offset & ~PAGE_MASK),
+                                       src, len);
+       else
+               _lib_ring_buffer_write(bufb, offset, src, len, 0);
+       ctx->buf_offset += len;
+}
+
+/**
+ * lib_ring_buffer_memset - write len bytes of c to a buffer backend
+ * @config : ring buffer instance configuration
+ * @bufb : ring buffer backend
+ * @offset : offset within the buffer
+ * @c : the byte to copy
+ * @len : number of bytes to copy
+ *
+ * This function writes "len" bytes of "c" to a buffer backend, at a specific
+ * offset. This is more or less a buffer backend-specific memset() operation.
+ * Calls the slow path (_ring_buffer_memset) if write is crossing a page
+ * boundary.
+ */
+static inline
+void lib_ring_buffer_memset(const struct lib_ring_buffer_config *config,
+                           struct lib_ring_buffer_ctx *ctx, int c, size_t len)
+{
+
+       struct lib_ring_buffer_backend *bufb = &ctx->buf->backend;
+       struct channel_backend *chanb = &ctx->chan->backend;
+       size_t index, pagecpy;
+       size_t offset = ctx->buf_offset;
+       struct lib_ring_buffer_backend_pages *backend_pages;
+
+       if (unlikely(!len))
+               return;
+       backend_pages =
+               lib_ring_buffer_get_backend_pages_from_ctx(config, ctx);
+       offset &= chanb->buf_size - 1;
+       index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
+       pagecpy = min_t(size_t, len, (-offset) & ~PAGE_MASK);
+       if (likely(pagecpy == len))
+               lib_ring_buffer_do_memset(backend_pages->p[index].virt
+                                         + (offset & ~PAGE_MASK),
+                                         c, len);
+       else
+               _lib_ring_buffer_memset(bufb, offset, c, len, 0);
+       ctx->buf_offset += len;
+}
+
+/*
+ * Copy up to @len string bytes from @src to @dest. Stop whenever a NULL
+ * terminating character is found in @src. Returns the number of bytes
+ * copied. Does *not* terminate @dest with NULL terminating character.
+ */
+static inline __attribute__((always_inline))
+size_t lib_ring_buffer_do_strcpy(const struct lib_ring_buffer_config *config,
+               char *dest, const char *src, size_t len)
+{
+       size_t count;
+
+       for (count = 0; count < len; count++) {
+               char c;
+
+               /*
+                * Only read source character once, in case it is
+                * modified concurrently.
+                */
+               c = READ_ONCE(src[count]);
+               if (!c)
+                       break;
+               lib_ring_buffer_do_copy(config, &dest[count], &c, 1);
+       }
+       return count;
+}
+
+/*
+ * Copy up to @len string bytes from @src to @dest. Stop whenever a NULL
+ * terminating character is found in @src, or when a fault occurs.
+ * Returns the number of bytes copied. Does *not* terminate @dest with
+ * NULL terminating character.
+ *
+ * This function deals with userspace pointers, it should never be called
+ * directly without having the src pointer checked with access_ok()
+ * previously.
+ */
+static inline __attribute__((always_inline))
+size_t lib_ring_buffer_do_strcpy_from_user_inatomic(const struct lib_ring_buffer_config *config,
+               char *dest, const char __user *src, size_t len)
+{
+       size_t count;
+
+       for (count = 0; count < len; count++) {
+               int ret;
+               char c;
+
+               ret = __copy_from_user_inatomic(&c, src + count, 1);
+               if (ret || !c)
+                       break;
+               lib_ring_buffer_do_copy(config, &dest[count], &c, 1);
+       }
+       return count;
+}
+
+/**
+ * lib_ring_buffer_strcpy - write string data to a buffer backend
+ * @config : ring buffer instance configuration
+ * @ctx: ring buffer context. (input arguments only)
+ * @src : source pointer to copy from
+ * @len : length of data to copy
+ * @pad : character to use for padding
+ *
+ * This function copies @len - 1 bytes of string data from a source
+ * pointer to a buffer backend, followed by a terminating '\0'
+ * character, at the current context offset. This is more or less a
+ * buffer backend-specific strncpy() operation. If a terminating '\0'
+ * character is found in @src before @len - 1 characters are copied, pad
+ * the buffer with @pad characters (e.g. '#'). Calls the slow path
+ * (_ring_buffer_strcpy) if copy is crossing a page boundary.
+ */
+static inline
+void lib_ring_buffer_strcpy(const struct lib_ring_buffer_config *config,
+                          struct lib_ring_buffer_ctx *ctx,
+                          const char *src, size_t len, int pad)
+{
+       struct lib_ring_buffer_backend *bufb = &ctx->buf->backend;
+       struct channel_backend *chanb = &ctx->chan->backend;
+       size_t index, pagecpy;
+       size_t offset = ctx->buf_offset;
+       struct lib_ring_buffer_backend_pages *backend_pages;
+
+       if (unlikely(!len))
+               return;
+       backend_pages =
+               lib_ring_buffer_get_backend_pages_from_ctx(config, ctx);
+       offset &= chanb->buf_size - 1;
+       index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
+       pagecpy = min_t(size_t, len, (-offset) & ~PAGE_MASK);
+       if (likely(pagecpy == len)) {
+               size_t count;
+
+               count = lib_ring_buffer_do_strcpy(config,
+                                       backend_pages->p[index].virt
+                                           + (offset & ~PAGE_MASK),
+                                       src, len - 1);
+               offset += count;
+               /* Padding */
+               if (unlikely(count < len - 1)) {
+                       size_t pad_len = len - 1 - count;
+
+                       lib_ring_buffer_do_memset(backend_pages->p[index].virt
+                                               + (offset & ~PAGE_MASK),
+                                       pad, pad_len);
+                       offset += pad_len;
+               }
+               /* Ending '\0' */
+               lib_ring_buffer_do_memset(backend_pages->p[index].virt
+                                       + (offset & ~PAGE_MASK),
+                               '\0', 1);
+       } else {
+               _lib_ring_buffer_strcpy(bufb, offset, src, len, 0, pad);
+       }
+       ctx->buf_offset += len;
+}
+
+/**
+ * lib_ring_buffer_copy_from_user_inatomic - write userspace data to a buffer backend
+ * @config : ring buffer instance configuration
+ * @ctx: ring buffer context. (input arguments only)
+ * @src : userspace source pointer to copy from
+ * @len : length of data to copy
+ *
+ * This function copies "len" bytes of data from a userspace pointer to a
+ * buffer backend, at the current context offset. This is more or less a buffer
+ * backend-specific memcpy() operation. Calls the slow path
+ * (_ring_buffer_write_from_user_inatomic) if copy is crossing a page boundary.
+ * Disable the page fault handler to ensure we never try to take the mmap_sem.
+ */
+static inline __attribute__((always_inline))
+void lib_ring_buffer_copy_from_user_inatomic(const struct lib_ring_buffer_config *config,
+                                   struct lib_ring_buffer_ctx *ctx,
+                                   const void __user *src, size_t len)
+{
+       struct lib_ring_buffer_backend *bufb = &ctx->buf->backend;
+       struct channel_backend *chanb = &ctx->chan->backend;
+       size_t index, pagecpy;
+       size_t offset = ctx->buf_offset;
+       struct lib_ring_buffer_backend_pages *backend_pages;
+       unsigned long ret;
+       mm_segment_t old_fs = get_fs();
+
+       if (unlikely(!len))
+               return;
+       backend_pages =
+               lib_ring_buffer_get_backend_pages_from_ctx(config, ctx);
+       offset &= chanb->buf_size - 1;
+       index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
+       pagecpy = min_t(size_t, len, (-offset) & ~PAGE_MASK);
+
+       set_fs(KERNEL_DS);
+       pagefault_disable();
+       if (unlikely(!lttng_access_ok(VERIFY_READ, src, len)))
+               goto fill_buffer;
+
+       if (likely(pagecpy == len)) {
+               ret = lib_ring_buffer_do_copy_from_user_inatomic(
+                       backend_pages->p[index].virt + (offset & ~PAGE_MASK),
+                       src, len);
+               if (unlikely(ret > 0)) {
+                       /* Copy failed. */
+                       goto fill_buffer;
+               }
+       } else {
+               _lib_ring_buffer_copy_from_user_inatomic(bufb, offset, src, len, 0);
+       }
+       pagefault_enable();
+       set_fs(old_fs);
+       ctx->buf_offset += len;
+
+       return;
+
+fill_buffer:
+       pagefault_enable();
+       set_fs(old_fs);
+       /*
+        * In the error path we call the slow path version to avoid
+        * the pollution of static inline code.
+        */
+       _lib_ring_buffer_memset(bufb, offset, 0, len, 0);
+}
+
+/**
+ * lib_ring_buffer_strcpy_from_user_inatomic - write userspace string data to a buffer backend
+ * @config : ring buffer instance configuration
+ * @ctx: ring buffer context (input arguments only)
+ * @src : userspace source pointer to copy from
+ * @len : length of data to copy
+ * @pad : character to use for padding
+ *
+ * This function copies @len - 1 bytes of string data from a userspace
+ * source pointer to a buffer backend, followed by a terminating '\0'
+ * character, at the current context offset. This is more or less a
+ * buffer backend-specific strncpy() operation. If a terminating '\0'
+ * character is found in @src before @len - 1 characters are copied, pad
+ * the buffer with @pad characters (e.g. '#'). Calls the slow path
+ * (_ring_buffer_strcpy_from_user_inatomic) if copy is crossing a page
+ * boundary. Disable the page fault handler to ensure we never try to
+ * take the mmap_sem.
+ */
+static inline
+void lib_ring_buffer_strcpy_from_user_inatomic(const struct lib_ring_buffer_config *config,
+               struct lib_ring_buffer_ctx *ctx,
+               const void __user *src, size_t len, int pad)
+{
+       struct lib_ring_buffer_backend *bufb = &ctx->buf->backend;
+       struct channel_backend *chanb = &ctx->chan->backend;
+       size_t index, pagecpy;
+       size_t offset = ctx->buf_offset;
+       struct lib_ring_buffer_backend_pages *backend_pages;
+       mm_segment_t old_fs = get_fs();
+
+       if (unlikely(!len))
+               return;
+       backend_pages =
+               lib_ring_buffer_get_backend_pages_from_ctx(config, ctx);
+       offset &= chanb->buf_size - 1;
+       index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
+       pagecpy = min_t(size_t, len, (-offset) & ~PAGE_MASK);
+
+       set_fs(KERNEL_DS);
+       pagefault_disable();
+       if (unlikely(!lttng_access_ok(VERIFY_READ, src, len)))
+               goto fill_buffer;
+
+       if (likely(pagecpy == len)) {
+               size_t count;
+
+               count = lib_ring_buffer_do_strcpy_from_user_inatomic(config,
+                                       backend_pages->p[index].virt
+                                           + (offset & ~PAGE_MASK),
+                                       src, len - 1);
+               offset += count;
+               /* Padding */
+               if (unlikely(count < len - 1)) {
+                       size_t pad_len = len - 1 - count;
+
+                       lib_ring_buffer_do_memset(backend_pages->p[index].virt
+                                               + (offset & ~PAGE_MASK),
+                                       pad, pad_len);
+                       offset += pad_len;
+               }
+               /* Ending '\0' */
+               lib_ring_buffer_do_memset(backend_pages->p[index].virt
+                                       + (offset & ~PAGE_MASK),
+                               '\0', 1);
+       } else {
+               _lib_ring_buffer_strcpy_from_user_inatomic(bufb, offset, src,
+                                       len, 0, pad);
+       }
+       pagefault_enable();
+       set_fs(old_fs);
+       ctx->buf_offset += len;
+
+       return;
+
+fill_buffer:
+       pagefault_enable();
+       set_fs(old_fs);
+       /*
+        * In the error path we call the slow path version to avoid
+        * the pollution of static inline code.
+        */
+       _lib_ring_buffer_memset(bufb, offset, pad, len - 1, 0);
+       offset += len - 1;
+       _lib_ring_buffer_memset(bufb, offset, '\0', 1, 0);
+}
+
+/*
+ * This accessor counts the number of unread records in a buffer.
+ * It only provides a consistent value if no reads not writes are performed
+ * concurrently.
+ */
+static inline
+unsigned long lib_ring_buffer_get_records_unread(
+                               const struct lib_ring_buffer_config *config,
+                               struct lib_ring_buffer *buf)
+{
+       struct lib_ring_buffer_backend *bufb = &buf->backend;
+       struct lib_ring_buffer_backend_pages *pages;
+       unsigned long records_unread = 0, sb_bindex, id;
+       unsigned int i;
+
+       for (i = 0; i < bufb->chan->backend.num_subbuf; i++) {
+               id = bufb->buf_wsb[i].id;
+               sb_bindex = subbuffer_id_get_index(config, id);
+               pages = bufb->array[sb_bindex];
+               records_unread += v_read(config, &pages->records_unread);
+       }
+       if (config->mode == RING_BUFFER_OVERWRITE) {
+               id = bufb->buf_rsb.id;
+               sb_bindex = subbuffer_id_get_index(config, id);
+               pages = bufb->array[sb_bindex];
+               records_unread += v_read(config, &pages->records_unread);
+       }
+       return records_unread;
+}
+
+/*
+ * We use __copy_from_user_inatomic to copy userspace data after
+ * checking with access_ok() and disabling page faults.
+ *
+ * Return 0 if OK, nonzero on error.
+ */
+static inline
+unsigned long lib_ring_buffer_copy_from_user_check_nofault(void *dest,
+                                               const void __user *src,
+                                               unsigned long len)
+{
+       unsigned long ret;
+       mm_segment_t old_fs;
+
+       if (!lttng_access_ok(VERIFY_READ, src, len))
+               return 1;
+       old_fs = get_fs();
+       set_fs(KERNEL_DS);
+       pagefault_disable();
+       ret = __copy_from_user_inatomic(dest, src, len);
+       pagefault_enable();
+       set_fs(old_fs);
+       return ret;
+}
+
+#endif /* _LIB_RING_BUFFER_BACKEND_H */
diff --git a/include/ringbuffer/backend_internal.h b/include/ringbuffer/backend_internal.h
new file mode 100644 (file)
index 0000000..aab408f
--- /dev/null
@@ -0,0 +1,533 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR LGPL-2.1-only)
+ *
+ * ringbuffer/backend_internal.h
+ *
+ * Ring buffer backend (internal helpers).
+ *
+ * Copyright (C) 2008-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#ifndef _LIB_RING_BUFFER_BACKEND_INTERNAL_H
+#define _LIB_RING_BUFFER_BACKEND_INTERNAL_H
+
+#include <wrapper/compiler.h>
+#include <wrapper/inline_memcpy.h>
+#include <ringbuffer/config.h>
+#include <ringbuffer/backend_types.h>
+#include <ringbuffer/frontend_types.h>
+#include <linux/string.h>
+#include <linux/uaccess.h>
+
+/* Ring buffer backend API presented to the frontend */
+
+/* Ring buffer and channel backend create/free */
+
+int lib_ring_buffer_backend_create(struct lib_ring_buffer_backend *bufb,
+                                  struct channel_backend *chan, int cpu);
+void channel_backend_unregister_notifiers(struct channel_backend *chanb);
+void lib_ring_buffer_backend_free(struct lib_ring_buffer_backend *bufb);
+int channel_backend_init(struct channel_backend *chanb,
+                        const char *name,
+                        const struct lib_ring_buffer_config *config,
+                        void *priv, size_t subbuf_size,
+                        size_t num_subbuf);
+void channel_backend_free(struct channel_backend *chanb);
+
+void lib_ring_buffer_backend_reset(struct lib_ring_buffer_backend *bufb);
+void channel_backend_reset(struct channel_backend *chanb);
+
+int lib_ring_buffer_backend_init(void);
+void lib_ring_buffer_backend_exit(void);
+
+extern void _lib_ring_buffer_write(struct lib_ring_buffer_backend *bufb,
+                                  size_t offset, const void *src, size_t len,
+                                  size_t pagecpy);
+extern void _lib_ring_buffer_memset(struct lib_ring_buffer_backend *bufb,
+                                   size_t offset, int c, size_t len,
+                                   size_t pagecpy);
+extern void _lib_ring_buffer_strcpy(struct lib_ring_buffer_backend *bufb,
+                                  size_t offset, const char *src, size_t len,
+                                  size_t pagecpy, int pad);
+extern void _lib_ring_buffer_copy_from_user_inatomic(struct lib_ring_buffer_backend *bufb,
+                                           size_t offset, const void *src,
+                                           size_t len, size_t pagecpy);
+extern void _lib_ring_buffer_strcpy_from_user_inatomic(struct lib_ring_buffer_backend *bufb,
+               size_t offset, const char __user *src, size_t len,
+               size_t pagecpy, int pad);
+
+/*
+ * Subbuffer ID bits for overwrite mode. Need to fit within a single word to be
+ * exchanged atomically.
+ *
+ * Top half word, except lowest bit, belongs to "offset", which is used to keep
+ * to count the produced buffers.  For overwrite mode, this provides the
+ * consumer with the capacity to read subbuffers in order, handling the
+ * situation where producers would write up to 2^15 buffers (or 2^31 for 64-bit
+ * systems) concurrently with a single execution of get_subbuf (between offset
+ * sampling and subbuffer ID exchange).
+ */
+
+#define HALF_ULONG_BITS                (BITS_PER_LONG >> 1)
+
+#define SB_ID_OFFSET_SHIFT     (HALF_ULONG_BITS + 1)
+#define SB_ID_OFFSET_COUNT     (1UL << SB_ID_OFFSET_SHIFT)
+#define SB_ID_OFFSET_MASK      (~(SB_ID_OFFSET_COUNT - 1))
+/*
+ * Lowest bit of top word half belongs to noref. Used only for overwrite mode.
+ */
+#define SB_ID_NOREF_SHIFT      (SB_ID_OFFSET_SHIFT - 1)
+#define SB_ID_NOREF_COUNT      (1UL << SB_ID_NOREF_SHIFT)
+#define SB_ID_NOREF_MASK       SB_ID_NOREF_COUNT
+/*
+ * In overwrite mode: lowest half of word is used for index.
+ * Limit of 2^16 subbuffers per buffer on 32-bit, 2^32 on 64-bit.
+ * In producer-consumer mode: whole word used for index.
+ */
+#define SB_ID_INDEX_SHIFT      0
+#define SB_ID_INDEX_COUNT      (1UL << SB_ID_INDEX_SHIFT)
+#define SB_ID_INDEX_MASK       (SB_ID_NOREF_COUNT - 1)
+
+/*
+ * Construct the subbuffer id from offset, index and noref. Use only the index
+ * for producer-consumer mode (offset and noref are only used in overwrite
+ * mode).
+ */
+static inline
+unsigned long subbuffer_id(const struct lib_ring_buffer_config *config,
+                          unsigned long offset, unsigned long noref,
+                          unsigned long index)
+{
+       if (config->mode == RING_BUFFER_OVERWRITE)
+               return (offset << SB_ID_OFFSET_SHIFT)
+                      | (noref << SB_ID_NOREF_SHIFT)
+                      | index;
+       else
+               return index;
+}
+
+/*
+ * Compare offset with the offset contained within id. Return 1 if the offset
+ * bits are identical, else 0.
+ */
+static inline
+int subbuffer_id_compare_offset(const struct lib_ring_buffer_config *config,
+                               unsigned long id, unsigned long offset)
+{
+       return (id & SB_ID_OFFSET_MASK) == (offset << SB_ID_OFFSET_SHIFT);
+}
+
+static inline
+unsigned long subbuffer_id_get_index(const struct lib_ring_buffer_config *config,
+                                    unsigned long id)
+{
+       if (config->mode == RING_BUFFER_OVERWRITE)
+               return id & SB_ID_INDEX_MASK;
+       else
+               return id;
+}
+
+static inline
+unsigned long subbuffer_id_is_noref(const struct lib_ring_buffer_config *config,
+                                   unsigned long id)
+{
+       if (config->mode == RING_BUFFER_OVERWRITE)
+               return !!(id & SB_ID_NOREF_MASK);
+       else
+               return 1;
+}
+
+/*
+ * Only used by reader on subbuffer ID it has exclusive access to. No volatile
+ * needed.
+ */
+static inline
+void subbuffer_id_set_noref(const struct lib_ring_buffer_config *config,
+                           unsigned long *id)
+{
+       if (config->mode == RING_BUFFER_OVERWRITE)
+               *id |= SB_ID_NOREF_MASK;
+}
+
+static inline
+void subbuffer_id_set_noref_offset(const struct lib_ring_buffer_config *config,
+                                  unsigned long *id, unsigned long offset)
+{
+       unsigned long tmp;
+
+       if (config->mode == RING_BUFFER_OVERWRITE) {
+               tmp = *id;
+               tmp &= ~SB_ID_OFFSET_MASK;
+               tmp |= offset << SB_ID_OFFSET_SHIFT;
+               tmp |= SB_ID_NOREF_MASK;
+               /* Volatile store, read concurrently by readers. */
+               WRITE_ONCE(*id, tmp);
+       }
+}
+
+/* No volatile access, since already used locally */
+static inline
+void subbuffer_id_clear_noref(const struct lib_ring_buffer_config *config,
+                             unsigned long *id)
+{
+       if (config->mode == RING_BUFFER_OVERWRITE)
+               *id &= ~SB_ID_NOREF_MASK;
+}
+
+/*
+ * For overwrite mode, cap the number of subbuffers per buffer to:
+ * 2^16 on 32-bit architectures
+ * 2^32 on 64-bit architectures
+ * This is required to fit in the index part of the ID. Return 0 on success,
+ * -EPERM on failure.
+ */
+static inline
+int subbuffer_id_check_index(const struct lib_ring_buffer_config *config,
+                            unsigned long num_subbuf)
+{
+       if (config->mode == RING_BUFFER_OVERWRITE)
+               return (num_subbuf > (1UL << HALF_ULONG_BITS)) ? -EPERM : 0;
+       else
+               return 0;
+}
+
+static inline
+void lib_ring_buffer_backend_get_pages(const struct lib_ring_buffer_config *config,
+                       struct lib_ring_buffer_ctx *ctx,
+                       struct lib_ring_buffer_backend_pages **backend_pages)
+{
+       struct lib_ring_buffer_backend *bufb = &ctx->buf->backend;
+       struct channel_backend *chanb = &ctx->chan->backend;
+       size_t sbidx, offset = ctx->buf_offset;
+       unsigned long sb_bindex, id;
+       struct lib_ring_buffer_backend_pages *rpages;
+
+       offset &= chanb->buf_size - 1;
+       sbidx = offset >> chanb->subbuf_size_order;
+       id = bufb->buf_wsb[sbidx].id;
+       sb_bindex = subbuffer_id_get_index(config, id);
+       rpages = bufb->array[sb_bindex];
+       CHAN_WARN_ON(ctx->chan,
+                    config->mode == RING_BUFFER_OVERWRITE
+                    && subbuffer_id_is_noref(config, id));
+       *backend_pages = rpages;
+}
+
+/* Get backend pages from cache. */
+static inline
+struct lib_ring_buffer_backend_pages *
+       lib_ring_buffer_get_backend_pages_from_ctx(const struct lib_ring_buffer_config *config,
+               struct lib_ring_buffer_ctx *ctx)
+{
+       return ctx->backend_pages;
+}
+
+/*
+ * The ring buffer can count events recorded and overwritten per buffer,
+ * but it is disabled by default due to its performance overhead.
+ */
+#ifdef LTTNG_RING_BUFFER_COUNT_EVENTS
+static inline
+void subbuffer_count_record(const struct lib_ring_buffer_config *config,
+                           struct lib_ring_buffer_backend *bufb,
+                           unsigned long idx)
+{
+       unsigned long sb_bindex;
+
+       sb_bindex = subbuffer_id_get_index(config, bufb->buf_wsb[idx].id);
+       v_inc(config, &bufb->array[sb_bindex]->records_commit);
+}
+#else /* LTTNG_RING_BUFFER_COUNT_EVENTS */
+static inline
+void subbuffer_count_record(const struct lib_ring_buffer_config *config,
+                           struct lib_ring_buffer_backend *bufb,
+                           unsigned long idx)
+{
+}
+#endif /* #else LTTNG_RING_BUFFER_COUNT_EVENTS */
+
+/*
+ * Reader has exclusive subbuffer access for record consumption. No need to
+ * perform the decrement atomically.
+ */
+static inline
+void subbuffer_consume_record(const struct lib_ring_buffer_config *config,
+                             struct lib_ring_buffer_backend *bufb)
+{
+       unsigned long sb_bindex;
+
+       sb_bindex = subbuffer_id_get_index(config, bufb->buf_rsb.id);
+       CHAN_WARN_ON(bufb->chan,
+                    !v_read(config, &bufb->array[sb_bindex]->records_unread));
+       /* Non-atomic decrement protected by exclusive subbuffer access */
+       _v_dec(config, &bufb->array[sb_bindex]->records_unread);
+       v_inc(config, &bufb->records_read);
+}
+
+static inline
+unsigned long subbuffer_get_records_count(
+                               const struct lib_ring_buffer_config *config,
+                               struct lib_ring_buffer_backend *bufb,
+                               unsigned long idx)
+{
+       unsigned long sb_bindex;
+
+       sb_bindex = subbuffer_id_get_index(config, bufb->buf_wsb[idx].id);
+       return v_read(config, &bufb->array[sb_bindex]->records_commit);
+}
+
+/*
+ * Must be executed at subbuffer delivery when the writer has _exclusive_
+ * subbuffer access. See lib_ring_buffer_check_deliver() for details.
+ * lib_ring_buffer_get_records_count() must be called to get the records
+ * count before this function, because it resets the records_commit
+ * count.
+ */
+static inline
+unsigned long subbuffer_count_records_overrun(
+                               const struct lib_ring_buffer_config *config,
+                               struct lib_ring_buffer_backend *bufb,
+                               unsigned long idx)
+{
+       struct lib_ring_buffer_backend_pages *pages;
+       unsigned long overruns, sb_bindex;
+
+       sb_bindex = subbuffer_id_get_index(config, bufb->buf_wsb[idx].id);
+       pages = bufb->array[sb_bindex];
+       overruns = v_read(config, &pages->records_unread);
+       v_set(config, &pages->records_unread,
+             v_read(config, &pages->records_commit));
+       v_set(config, &pages->records_commit, 0);
+
+       return overruns;
+}
+
+static inline
+void subbuffer_set_data_size(const struct lib_ring_buffer_config *config,
+                            struct lib_ring_buffer_backend *bufb,
+                            unsigned long idx,
+                            unsigned long data_size)
+{
+       struct lib_ring_buffer_backend_pages *pages;
+       unsigned long sb_bindex;
+
+       sb_bindex = subbuffer_id_get_index(config, bufb->buf_wsb[idx].id);
+       pages = bufb->array[sb_bindex];
+       pages->data_size = data_size;
+}
+
+static inline
+unsigned long subbuffer_get_read_data_size(
+                               const struct lib_ring_buffer_config *config,
+                               struct lib_ring_buffer_backend *bufb)
+{
+       struct lib_ring_buffer_backend_pages *pages;
+       unsigned long sb_bindex;
+
+       sb_bindex = subbuffer_id_get_index(config, bufb->buf_rsb.id);
+       pages = bufb->array[sb_bindex];
+       return pages->data_size;
+}
+
+static inline
+unsigned long subbuffer_get_data_size(
+                               const struct lib_ring_buffer_config *config,
+                               struct lib_ring_buffer_backend *bufb,
+                               unsigned long idx)
+{
+       struct lib_ring_buffer_backend_pages *pages;
+       unsigned long sb_bindex;
+
+       sb_bindex = subbuffer_id_get_index(config, bufb->buf_wsb[idx].id);
+       pages = bufb->array[sb_bindex];
+       return pages->data_size;
+}
+
+static inline
+void subbuffer_inc_packet_count(const struct lib_ring_buffer_config *config,
+                               struct lib_ring_buffer_backend *bufb,
+                               unsigned long idx)
+{
+       bufb->buf_cnt[idx].seq_cnt++;
+}
+
+/**
+ * lib_ring_buffer_clear_noref - Clear the noref subbuffer flag, called by
+ *                               writer.
+ */
+static inline
+void lib_ring_buffer_clear_noref(const struct lib_ring_buffer_config *config,
+                                struct lib_ring_buffer_backend *bufb,
+                                unsigned long idx)
+{
+       unsigned long id, new_id;
+
+       if (config->mode != RING_BUFFER_OVERWRITE)
+               return;
+
+       /*
+        * Performing a volatile access to read the sb_pages, because we want to
+        * read a coherent version of the pointer and the associated noref flag.
+        */
+       id = READ_ONCE(bufb->buf_wsb[idx].id);
+       for (;;) {
+               /* This check is called on the fast path for each record. */
+               if (likely(!subbuffer_id_is_noref(config, id))) {
+                       /*
+                        * Store after load dependency ordering the writes to
+                        * the subbuffer after load and test of the noref flag
+                        * matches the memory barrier implied by the cmpxchg()
+                        * in update_read_sb_index().
+                        */
+                       return; /* Already writing to this buffer */
+               }
+               new_id = id;
+               subbuffer_id_clear_noref(config, &new_id);
+               new_id = cmpxchg(&bufb->buf_wsb[idx].id, id, new_id);
+               if (likely(new_id == id))
+                       break;
+               id = new_id;
+       }
+}
+
+/**
+ * lib_ring_buffer_set_noref_offset - Set the noref subbuffer flag and offset,
+ *                                    called by writer.
+ */
+static inline
+void lib_ring_buffer_set_noref_offset(const struct lib_ring_buffer_config *config,
+                                     struct lib_ring_buffer_backend *bufb,
+                                     unsigned long idx, unsigned long offset)
+{
+       if (config->mode != RING_BUFFER_OVERWRITE)
+               return;
+
+       /*
+        * Because ring_buffer_set_noref() is only called by a single thread
+        * (the one which updated the cc_sb value), there are no concurrent
+        * updates to take care of: other writers have not updated cc_sb, so
+        * they cannot set the noref flag, and concurrent readers cannot modify
+        * the pointer because the noref flag is not set yet.
+        * The smp_wmb() in ring_buffer_commit() takes care of ordering writes
+        * to the subbuffer before this set noref operation.
+        * subbuffer_set_noref() uses a volatile store to deal with concurrent
+        * readers of the noref flag.
+        */
+       CHAN_WARN_ON(bufb->chan,
+                    subbuffer_id_is_noref(config, bufb->buf_wsb[idx].id));
+       /*
+        * Memory barrier that ensures counter stores are ordered before set
+        * noref and offset.
+        */
+       smp_mb();
+       subbuffer_id_set_noref_offset(config, &bufb->buf_wsb[idx].id, offset);
+}
+
+/**
+ * update_read_sb_index - Read-side subbuffer index update.
+ */
+static inline
+int update_read_sb_index(const struct lib_ring_buffer_config *config,
+                        struct lib_ring_buffer_backend *bufb,
+                        struct channel_backend *chanb,
+                        unsigned long consumed_idx,
+                        unsigned long consumed_count)
+{
+       unsigned long old_id, new_id;
+
+       if (config->mode == RING_BUFFER_OVERWRITE) {
+               /*
+                * Exchange the target writer subbuffer with our own unused
+                * subbuffer. No need to use READ_ONCE() here to read the
+                * old_wpage, because the value read will be confirmed by the
+                * following cmpxchg().
+                */
+               old_id = bufb->buf_wsb[consumed_idx].id;
+               if (unlikely(!subbuffer_id_is_noref(config, old_id)))
+                       return -EAGAIN;
+               /*
+                * Make sure the offset count we are expecting matches the one
+                * indicated by the writer.
+                */
+               if (unlikely(!subbuffer_id_compare_offset(config, old_id,
+                                                         consumed_count)))
+                       return -EAGAIN;
+               CHAN_WARN_ON(bufb->chan,
+                            !subbuffer_id_is_noref(config, bufb->buf_rsb.id));
+               subbuffer_id_set_noref_offset(config, &bufb->buf_rsb.id,
+                                             consumed_count);
+               new_id = cmpxchg(&bufb->buf_wsb[consumed_idx].id, old_id,
+                                bufb->buf_rsb.id);
+               if (unlikely(old_id != new_id))
+                       return -EAGAIN;
+               bufb->buf_rsb.id = new_id;
+       } else {
+               /* No page exchange, use the writer page directly */
+               bufb->buf_rsb.id = bufb->buf_wsb[consumed_idx].id;
+       }
+       return 0;
+}
+
+static inline __attribute__((always_inline))
+void lttng_inline_memcpy(void *dest, const void *src,
+               unsigned long len)
+{
+       switch (len) {
+       case 1:
+               *(uint8_t *) dest = *(const uint8_t *) src;
+               break;
+       case 2:
+               *(uint16_t *) dest = *(const uint16_t *) src;
+               break;
+       case 4:
+               *(uint32_t *) dest = *(const uint32_t *) src;
+               break;
+       case 8:
+               *(uint64_t *) dest = *(const uint64_t *) src;
+               break;
+       default:
+               inline_memcpy(dest, src, len);
+       }
+}
+
+/*
+ * Use the architecture-specific memcpy implementation for constant-sized
+ * inputs, but rely on an inline memcpy for length statically unknown.
+ * The function call to memcpy is just way too expensive for a fast path.
+ */
+#define lib_ring_buffer_do_copy(config, dest, src, len)                \
+do {                                                           \
+       size_t __len = (len);                                   \
+       if (__builtin_constant_p(len))                          \
+               memcpy(dest, src, __len);                       \
+       else                                                    \
+               lttng_inline_memcpy(dest, src, __len);          \
+} while (0)
+
+/*
+ * We use __copy_from_user_inatomic to copy userspace data since we already
+ * did the access_ok for the whole range.
+ *
+ * Return 0 if OK, nonzero on error.
+ */
+static inline
+unsigned long lib_ring_buffer_do_copy_from_user_inatomic(void *dest,
+                                               const void __user *src,
+                                               unsigned long len)
+{
+       return __copy_from_user_inatomic(dest, src, len);
+}
+
+/*
+ * write len bytes to dest with c
+ */
+static inline
+void lib_ring_buffer_do_memset(char *dest, int c,
+       unsigned long len)
+{
+       unsigned long i;
+
+       for (i = 0; i < len; i++)
+               dest[i] = c;
+}
+
+#endif /* _LIB_RING_BUFFER_BACKEND_INTERNAL_H */
diff --git a/include/ringbuffer/backend_types.h b/include/ringbuffer/backend_types.h
new file mode 100644 (file)
index 0000000..cfeecf4
--- /dev/null
@@ -0,0 +1,104 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR LGPL-2.1-only)
+ *
+ * ringbuffer/backend_types.h
+ *
+ * Ring buffer backend (types).
+ *
+ * Copyright (C) 2008-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#ifndef _LIB_RING_BUFFER_BACKEND_TYPES_H
+#define _LIB_RING_BUFFER_BACKEND_TYPES_H
+
+#include <linux/cpumask.h>
+#include <linux/types.h>
+#include <lttng-kernel-version.h>
+#include <lttng-cpuhotplug.h>
+
+struct lib_ring_buffer_backend_page {
+       void *virt;                     /* page virtual address (cached) */
+       unsigned long pfn;              /* page frame number */
+};
+
+struct lib_ring_buffer_backend_pages {
+       unsigned long mmap_offset;      /* offset of the subbuffer in mmap */
+       union v_atomic records_commit;  /* current records committed count */
+       union v_atomic records_unread;  /* records to read */
+       unsigned long data_size;        /* Amount of data to read from subbuf */
+       struct lib_ring_buffer_backend_page p[];
+};
+
+struct lib_ring_buffer_backend_subbuffer {
+       /* Identifier for subbuf backend pages. Exchanged atomically. */
+       unsigned long id;               /* backend subbuffer identifier */
+};
+
+struct lib_ring_buffer_backend_counts {
+       /*
+        * Counter specific to the sub-buffer location within the ring buffer.
+        * The actual sequence number of the packet within the entire ring
+        * buffer can be derived from the formula nr_subbuffers * seq_cnt +
+        * subbuf_idx.
+        */
+       uint64_t seq_cnt;               /* packet sequence number */
+};
+
+/*
+ * Forward declaration of frontend-specific channel and ring_buffer.
+ */
+struct channel;
+struct lib_ring_buffer;
+
+struct lib_ring_buffer_backend {
+       /* Array of ring_buffer_backend_subbuffer for writer */
+       struct lib_ring_buffer_backend_subbuffer *buf_wsb;
+       /* ring_buffer_backend_subbuffer for reader */
+       struct lib_ring_buffer_backend_subbuffer buf_rsb;
+       /* Array of lib_ring_buffer_backend_counts for the packet counter */
+       struct lib_ring_buffer_backend_counts *buf_cnt;
+       /*
+        * Pointer array of backend pages, for whole buffer.
+        * Indexed by ring_buffer_backend_subbuffer identifier (id) index.
+        */
+       struct lib_ring_buffer_backend_pages **array;
+       unsigned int num_pages_per_subbuf;
+
+       struct channel *chan;           /* Associated channel */
+       int cpu;                        /* This buffer's cpu. -1 if global. */
+       union v_atomic records_read;    /* Number of records read */
+       unsigned int allocated:1;       /* is buffer allocated ? */
+};
+
+struct channel_backend {
+       unsigned long buf_size;         /* Size of the buffer */
+       unsigned long subbuf_size;      /* Sub-buffer size */
+       unsigned int subbuf_size_order; /* Order of sub-buffer size */
+       unsigned int num_subbuf_order;  /*
+                                        * Order of number of sub-buffers/buffer
+                                        * for writer.
+                                        */
+       unsigned int buf_size_order;    /* Order of buffer size */
+       unsigned int extra_reader_sb:1; /* has extra reader subbuffer ? */
+       struct lib_ring_buffer *buf;    /* Channel per-cpu buffers */
+
+       unsigned long num_subbuf;       /* Number of sub-buffers for writer */
+       u64 start_tsc;                  /* Channel creation TSC value */
+       void *priv;                     /* Client-specific information */
+       void *priv_ops;                 /* Client-specific ops pointer */
+       void (*release_priv_ops)(void *priv_ops);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
+       struct lttng_cpuhp_node cpuhp_prepare;  /* CPU hotplug prepare */
+#else
+       struct notifier_block cpu_hp_notifier;   /* CPU hotplug notifier */
+#endif
+       /*
+        * We need to copy config because the module containing the
+        * source config can vanish before the last reference to this
+        * channel's streams is released.
+        */
+       struct lib_ring_buffer_config config; /* Ring buffer configuration */
+       cpumask_var_t cpumask;          /* Allocated per-cpu buffers cpumask */
+       char name[NAME_MAX];            /* Channel name */
+};
+
+#endif /* _LIB_RING_BUFFER_BACKEND_TYPES_H */
diff --git a/include/ringbuffer/config.h b/include/ringbuffer/config.h
new file mode 100644 (file)
index 0000000..1b01935
--- /dev/null
@@ -0,0 +1,306 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR LGPL-2.1-only)
+ *
+ * ringbuffer/config.h
+ *
+ * Ring buffer configuration header. Note: after declaring the standard inline
+ * functions, clients should also include linux/ringbuffer/api.h.
+ *
+ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#ifndef _LIB_RING_BUFFER_CONFIG_H
+#define _LIB_RING_BUFFER_CONFIG_H
+
+#include <linux/types.h>
+#include <linux/percpu.h>
+#include <lib/align.h>
+#include <lttng-tracer-core.h>
+
+struct lib_ring_buffer;
+struct channel;
+struct lib_ring_buffer_config;
+struct lib_ring_buffer_ctx;
+
+/*
+ * Ring buffer client callbacks. Only used by slow path, never on fast path.
+ * For the fast path, record_header_size(), ring_buffer_clock_read() should be
+ * provided as inline functions too.  These may simply return 0 if not used by
+ * the client.
+ */
+struct lib_ring_buffer_client_cb {
+       /* Mandatory callbacks */
+
+       /* A static inline version is also required for fast path */
+       u64 (*ring_buffer_clock_read) (struct channel *chan);
+       size_t (*record_header_size) (const struct lib_ring_buffer_config *config,
+                                     struct channel *chan, size_t offset,
+                                     size_t *pre_header_padding,
+                                     struct lib_ring_buffer_ctx *ctx,
+                                     void *client_ctx);
+
+       /* Slow path only, at subbuffer switch */
+       size_t (*subbuffer_header_size) (void);
+       void (*buffer_begin) (struct lib_ring_buffer *buf, u64 tsc,
+                             unsigned int subbuf_idx);
+       void (*buffer_end) (struct lib_ring_buffer *buf, u64 tsc,
+                           unsigned int subbuf_idx, unsigned long data_size);
+
+       /* Optional callbacks (can be set to NULL) */
+
+       /* Called at buffer creation/finalize */
+       int (*buffer_create) (struct lib_ring_buffer *buf, void *priv,
+                             int cpu, const char *name);
+       /*
+        * Clients should guarantee that no new reader handle can be opened
+        * after finalize.
+        */
+       void (*buffer_finalize) (struct lib_ring_buffer *buf, void *priv, int cpu);
+
+       /*
+        * Extract header length, payload length and timestamp from event
+        * record. Used by buffer iterators. Timestamp is only used by channel
+        * iterator.
+        */
+       void (*record_get) (const struct lib_ring_buffer_config *config,
+                           struct channel *chan, struct lib_ring_buffer *buf,
+                           size_t offset, size_t *header_len,
+                           size_t *payload_len, u64 *timestamp);
+};
+
+/*
+ * Ring buffer instance configuration.
+ *
+ * Declare as "static const" within the client object to ensure the inline fast
+ * paths can be optimized.
+ *
+ * alloc/sync pairs:
+ *
+ * RING_BUFFER_ALLOC_PER_CPU and RING_BUFFER_SYNC_PER_CPU :
+ *   Per-cpu buffers with per-cpu synchronization. Tracing must be performed
+ *   with preemption disabled (lib_ring_buffer_get_cpu() and
+ *   lib_ring_buffer_put_cpu()).
+ *
+ * RING_BUFFER_ALLOC_PER_CPU and RING_BUFFER_SYNC_GLOBAL :
+ *   Per-cpu buffer with global synchronization. Tracing can be performed with
+ *   preemption enabled, statistically stays on the local buffers.
+ *
+ * RING_BUFFER_ALLOC_GLOBAL and RING_BUFFER_SYNC_PER_CPU :
+ *   Should only be used for buffers belonging to a single thread or protected
+ *   by mutual exclusion by the client. Note that periodical sub-buffer switch
+ *   should be disabled in this kind of configuration.
+ *
+ * RING_BUFFER_ALLOC_GLOBAL and RING_BUFFER_SYNC_GLOBAL :
+ *   Global shared buffer with global synchronization.
+ *
+ * wakeup:
+ *
+ * RING_BUFFER_WAKEUP_BY_TIMER uses per-cpu timers to poll the
+ * buffers and wake up readers if data is ready. Mainly useful for tracers which
+ * don't want to call into the wakeup code on the tracing path. Use in
+ * combination with "read_timer_interval" channel_create() argument.
+ *
+ * RING_BUFFER_WAKEUP_BY_WRITER directly wakes up readers when a subbuffer is
+ * ready to read. Lower latencies before the reader is woken up. Mainly suitable
+ * for drivers.
+ *
+ * RING_BUFFER_WAKEUP_NONE does not perform any wakeup whatsoever. The client
+ * has the responsibility to perform wakeups.
+ */
+struct lib_ring_buffer_config {
+       enum {
+               RING_BUFFER_ALLOC_PER_CPU,
+               RING_BUFFER_ALLOC_GLOBAL,
+       } alloc;
+       enum {
+               RING_BUFFER_SYNC_PER_CPU,       /* Wait-free */
+               RING_BUFFER_SYNC_GLOBAL,        /* Lock-free */
+       } sync;
+       enum {
+               RING_BUFFER_OVERWRITE,          /* Overwrite when buffer full */
+               RING_BUFFER_DISCARD,            /* Discard when buffer full */
+       } mode;
+       enum {
+               RING_BUFFER_SPLICE,
+               RING_BUFFER_MMAP,
+               RING_BUFFER_READ,               /* TODO */
+               RING_BUFFER_ITERATOR,
+               RING_BUFFER_NONE,
+       } output;
+       enum {
+               RING_BUFFER_PAGE,
+               RING_BUFFER_VMAP,               /* TODO */
+               RING_BUFFER_STATIC,             /* TODO */
+       } backend;
+       enum {
+               RING_BUFFER_NO_OOPS_CONSISTENCY,
+               RING_BUFFER_OOPS_CONSISTENCY,
+       } oops;
+       enum {
+               RING_BUFFER_IPI_BARRIER,
+               RING_BUFFER_NO_IPI_BARRIER,
+       } ipi;
+       enum {
+               RING_BUFFER_WAKEUP_BY_TIMER,    /* wake up performed by timer */
+               RING_BUFFER_WAKEUP_BY_WRITER,   /*
+                                                * writer wakes up reader,
+                                                * not lock-free
+                                                * (takes spinlock).
+                                                */
+       } wakeup;
+       /*
+        * tsc_bits: timestamp bits saved at each record.
+        *   0 and 64 disable the timestamp compression scheme.
+        */
+       unsigned int tsc_bits;
+       struct lib_ring_buffer_client_cb cb;
+};
+
+/*
+ * ring buffer context
+ *
+ * Context passed to lib_ring_buffer_reserve(), lib_ring_buffer_commit(),
+ * lib_ring_buffer_try_discard_reserve(), lib_ring_buffer_align_ctx() and
+ * lib_ring_buffer_write().
+ */
+struct lib_ring_buffer_ctx {
+       /* input received by lib_ring_buffer_reserve(), saved here. */
+       struct channel *chan;           /* channel */
+       void *priv;                     /* client private data */
+       size_t data_size;               /* size of payload */
+       int largest_align;              /*
+                                        * alignment of the largest element
+                                        * in the payload
+                                        */
+       int cpu;                        /* processor id */
+
+       /* output from lib_ring_buffer_reserve() */
+       struct lib_ring_buffer *buf;    /*
+                                        * buffer corresponding to processor id
+                                        * for this channel
+                                        */
+       size_t slot_size;               /* size of the reserved slot */
+       unsigned long buf_offset;       /* offset following the record header */
+       unsigned long pre_offset;       /*
+                                        * Initial offset position _before_
+                                        * the record is written. Positioned
+                                        * prior to record header alignment
+                                        * padding.
+                                        */
+       u64 tsc;                        /* time-stamp counter value */
+       unsigned int rflags;            /* reservation flags */
+       /* Cache backend pages pointer chasing. */
+       struct lib_ring_buffer_backend_pages *backend_pages;
+};
+
+/**
+ * lib_ring_buffer_ctx_init - initialize ring buffer context
+ * @ctx: ring buffer context to initialize
+ * @chan: channel
+ * @priv: client private data
+ * @data_size: size of record data payload. It must be greater than 0.
+ * @largest_align: largest alignment within data payload types
+ * @cpu: processor id
+ */
+static inline
+void lib_ring_buffer_ctx_init(struct lib_ring_buffer_ctx *ctx,
+                             struct channel *chan, void *priv,
+                             size_t data_size, int largest_align,
+                             int cpu)
+{
+       ctx->chan = chan;
+       ctx->priv = priv;
+       ctx->data_size = data_size;
+       ctx->largest_align = largest_align;
+       ctx->cpu = cpu;
+       ctx->rflags = 0;
+       ctx->backend_pages = NULL;
+}
+
+/*
+ * Reservation flags.
+ *
+ * RING_BUFFER_RFLAG_FULL_TSC
+ *
+ * This flag is passed to record_header_size() and to the primitive used to
+ * write the record header. It indicates that the full 64-bit time value is
+ * needed in the record header. If this flag is not set, the record header needs
+ * only to contain "tsc_bits" bit of time value.
+ *
+ * Reservation flags can be added by the client, starting from
+ * "(RING_BUFFER_FLAGS_END << 0)". It can be used to pass information from
+ * record_header_size() to lib_ring_buffer_write_record_header().
+ */
+#define        RING_BUFFER_RFLAG_FULL_TSC              (1U << 0)
+#define RING_BUFFER_RFLAG_END                  (1U << 1)
+
+#ifndef LTTNG_TRACER_CORE_H
+#error "lttng-tracer-core.h is needed for RING_BUFFER_ALIGN define"
+#endif
+
+/*
+ * We need to define RING_BUFFER_ALIGN_ATTR so it is known early at
+ * compile-time. We have to duplicate the "config->align" information and the
+ * definition here because config->align is used both in the slow and fast
+ * paths, but RING_BUFFER_ALIGN_ATTR is only available for the client code.
+ */
+#ifdef RING_BUFFER_ALIGN
+
+# define RING_BUFFER_ALIGN_ATTR                /* Default arch alignment */
+
+/*
+ * Calculate the offset needed to align the type.
+ * size_of_type must be non-zero.
+ */
+static inline
+unsigned int lib_ring_buffer_align(size_t align_drift, size_t size_of_type)
+{
+       return offset_align(align_drift, size_of_type);
+}
+
+#else
+
+# define RING_BUFFER_ALIGN_ATTR __attribute__((packed))
+
+/*
+ * Calculate the offset needed to align the type.
+ * size_of_type must be non-zero.
+ */
+static inline
+unsigned int lib_ring_buffer_align(size_t align_drift, size_t size_of_type)
+{
+       return 0;
+}
+
+#endif
+
+/**
+ * lib_ring_buffer_align_ctx - Align context offset on "alignment"
+ * @ctx: ring buffer context.
+ */
+static inline
+void lib_ring_buffer_align_ctx(struct lib_ring_buffer_ctx *ctx,
+                          size_t alignment)
+{
+       ctx->buf_offset += lib_ring_buffer_align(ctx->buf_offset,
+                                                alignment);
+}
+
+/*
+ * lib_ring_buffer_check_config() returns 0 on success.
+ * Used internally to check for valid configurations at channel creation.
+ */
+static inline
+int lib_ring_buffer_check_config(const struct lib_ring_buffer_config *config,
+                            unsigned int switch_timer_interval,
+                            unsigned int read_timer_interval)
+{
+       if (config->alloc == RING_BUFFER_ALLOC_GLOBAL
+           && config->sync == RING_BUFFER_SYNC_PER_CPU
+           && switch_timer_interval)
+               return -EINVAL;
+       return 0;
+}
+
+#include <ringbuffer/vatomic.h>
+
+#endif /* _LIB_RING_BUFFER_CONFIG_H */
diff --git a/include/ringbuffer/frontend.h b/include/ringbuffer/frontend.h
new file mode 100644 (file)
index 0000000..41fec2a
--- /dev/null
@@ -0,0 +1,234 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR LGPL-2.1-only)
+ *
+ * ringbuffer/frontend.h
+ *
+ * Ring Buffer Library Synchronization Header (API).
+ *
+ * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * See ring_buffer_frontend.c for more information on wait-free algorithms.
+ */
+
+#ifndef _LIB_RING_BUFFER_FRONTEND_H
+#define _LIB_RING_BUFFER_FRONTEND_H
+
+#include <linux/pipe_fs_i.h>
+#include <linux/rcupdate.h>
+#include <linux/cpumask.h>
+#include <linux/module.h>
+#include <linux/bitops.h>
+#include <linux/splice.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/sched.h>
+#include <linux/cache.h>
+#include <linux/time.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/stat.h>
+#include <linux/cpu.h>
+#include <linux/fs.h>
+
+#include <asm/atomic.h>
+#include <asm/local.h>
+
+/* Internal helpers */
+#include <ringbuffer/frontend_internal.h>
+
+/* Max ring buffer nesting count, see lib_ring_buffer_get_cpu(). */
+#define RING_BUFFER_MAX_NESTING 4
+
+/* Buffer creation/removal and setup operations */
+
+/*
+ * switch_timer_interval is the time interval (in us) to fill sub-buffers with
+ * padding to let readers get those sub-buffers.  Used for live streaming.
+ *
+ * read_timer_interval is the time interval (in us) to wake up pending readers.
+ *
+ * buf_addr is a pointer the the beginning of the preallocated buffer contiguous
+ * address mapping. It is used only by RING_BUFFER_STATIC configuration. It can
+ * be set to NULL for other backends.
+ */
+
+extern
+struct channel *channel_create(const struct lib_ring_buffer_config *config,
+                              const char *name, void *priv,
+                              void *buf_addr,
+                              size_t subbuf_size, size_t num_subbuf,
+                              unsigned int switch_timer_interval,
+                              unsigned int read_timer_interval);
+
+/*
+ * channel_destroy returns the private data pointer. It finalizes all channel's
+ * buffers, waits for readers to release all references, and destroys the
+ * channel.
+ */
+extern
+void *channel_destroy(struct channel *chan);
+
+
+/* Buffer read operations */
+
+/*
+ * Iteration on channel cpumask needs to issue a read barrier to match the write
+ * barrier in cpu hotplug. It orders the cpumask read before read of per-cpu
+ * buffer data. The per-cpu buffer is never removed by cpu hotplug; teardown is
+ * only performed at channel destruction.
+ */
+#define for_each_channel_cpu(cpu, chan)                                        \
+       for ((cpu) = -1;                                                \
+               ({ (cpu) = cpumask_next(cpu, (chan)->backend.cpumask);  \
+                  smp_read_barrier_depends(); (cpu) < nr_cpu_ids; });)
+
+extern struct lib_ring_buffer *channel_get_ring_buffer(
+                               const struct lib_ring_buffer_config *config,
+                               struct channel *chan, int cpu);
+extern int lib_ring_buffer_open_read(struct lib_ring_buffer *buf);
+extern void lib_ring_buffer_release_read(struct lib_ring_buffer *buf);
+
+/*
+ * Read sequence: snapshot, many get_subbuf/put_subbuf, move_consumer.
+ */
+extern int lib_ring_buffer_snapshot(struct lib_ring_buffer *buf,
+                                   unsigned long *consumed,
+                                   unsigned long *produced);
+extern int lib_ring_buffer_snapshot_sample_positions(
+                                   struct lib_ring_buffer *buf,
+                                   unsigned long *consumed,
+                                   unsigned long *produced);
+extern void lib_ring_buffer_move_consumer(struct lib_ring_buffer *buf,
+                                         unsigned long consumed_new);
+
+extern int lib_ring_buffer_get_subbuf(struct lib_ring_buffer *buf,
+                                     unsigned long consumed);
+extern void lib_ring_buffer_put_subbuf(struct lib_ring_buffer *buf);
+
+void lib_ring_buffer_set_quiescent_channel(struct channel *chan);
+void lib_ring_buffer_clear_quiescent_channel(struct channel *chan);
+
+/*
+ * lib_ring_buffer_get_next_subbuf/lib_ring_buffer_put_next_subbuf are helpers
+ * to read sub-buffers sequentially.
+ */
+static inline int lib_ring_buffer_get_next_subbuf(struct lib_ring_buffer *buf)
+{
+       int ret;
+
+       ret = lib_ring_buffer_snapshot(buf, &buf->cons_snapshot,
+                                      &buf->prod_snapshot);
+       if (ret)
+               return ret;
+       ret = lib_ring_buffer_get_subbuf(buf, buf->cons_snapshot);
+       return ret;
+}
+
+static inline void lib_ring_buffer_put_next_subbuf(struct lib_ring_buffer *buf)
+{
+       lib_ring_buffer_put_subbuf(buf);
+       lib_ring_buffer_move_consumer(buf, subbuf_align(buf->cons_snapshot,
+                                                   buf->backend.chan));
+}
+
+extern void channel_reset(struct channel *chan);
+extern void lib_ring_buffer_reset(struct lib_ring_buffer *buf);
+
+static inline
+unsigned long lib_ring_buffer_get_offset(const struct lib_ring_buffer_config *config,
+                                        struct lib_ring_buffer *buf)
+{
+       return v_read(config, &buf->offset);
+}
+
+static inline
+unsigned long lib_ring_buffer_get_consumed(const struct lib_ring_buffer_config *config,
+                                          struct lib_ring_buffer *buf)
+{
+       return atomic_long_read(&buf->consumed);
+}
+
+/*
+ * Must call lib_ring_buffer_is_finalized before reading counters (memory
+ * ordering enforced with respect to trace teardown).
+ */
+static inline
+int lib_ring_buffer_is_finalized(const struct lib_ring_buffer_config *config,
+                                struct lib_ring_buffer *buf)
+{
+       int finalized = READ_ONCE(buf->finalized);
+       /*
+        * Read finalized before counters.
+        */
+       smp_rmb();
+       return finalized;
+}
+
+static inline
+int lib_ring_buffer_channel_is_finalized(const struct channel *chan)
+{
+       return chan->finalized;
+}
+
+static inline
+int lib_ring_buffer_channel_is_disabled(const struct channel *chan)
+{
+       return atomic_read(&chan->record_disabled);
+}
+
+static inline
+unsigned long lib_ring_buffer_get_read_data_size(
+                               const struct lib_ring_buffer_config *config,
+                               struct lib_ring_buffer *buf)
+{
+       return subbuffer_get_read_data_size(config, &buf->backend);
+}
+
+static inline
+unsigned long lib_ring_buffer_get_records_count(
+                               const struct lib_ring_buffer_config *config,
+                               struct lib_ring_buffer *buf)
+{
+       return v_read(config, &buf->records_count);
+}
+
+static inline
+unsigned long lib_ring_buffer_get_records_overrun(
+                               const struct lib_ring_buffer_config *config,
+                               struct lib_ring_buffer *buf)
+{
+       return v_read(config, &buf->records_overrun);
+}
+
+static inline
+unsigned long lib_ring_buffer_get_records_lost_full(
+                               const struct lib_ring_buffer_config *config,
+                               struct lib_ring_buffer *buf)
+{
+       return v_read(config, &buf->records_lost_full);
+}
+
+static inline
+unsigned long lib_ring_buffer_get_records_lost_wrap(
+                               const struct lib_ring_buffer_config *config,
+                               struct lib_ring_buffer *buf)
+{
+       return v_read(config, &buf->records_lost_wrap);
+}
+
+static inline
+unsigned long lib_ring_buffer_get_records_lost_big(
+                               const struct lib_ring_buffer_config *config,
+                               struct lib_ring_buffer *buf)
+{
+       return v_read(config, &buf->records_lost_big);
+}
+
+static inline
+unsigned long lib_ring_buffer_get_records_read(
+                               const struct lib_ring_buffer_config *config,
+                               struct lib_ring_buffer *buf)
+{
+       return v_read(config, &buf->backend.records_read);
+}
+
+#endif /* _LIB_RING_BUFFER_FRONTEND_H */
diff --git a/include/ringbuffer/frontend_api.h b/include/ringbuffer/frontend_api.h
new file mode 100644 (file)
index 0000000..3fa6c82
--- /dev/null
@@ -0,0 +1,358 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR LGPL-2.1-only)
+ *
+ * ringbuffer/frontend_api.h
+ *
+ * Ring Buffer Library Synchronization Header (buffer write API).
+ *
+ * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * See ring_buffer_frontend.c for more information on wait-free algorithms.
+ * See linux/ringbuffer/frontend.h for channel allocation and read-side API.
+ */
+
+#ifndef _LIB_RING_BUFFER_FRONTEND_API_H
+#define _LIB_RING_BUFFER_FRONTEND_API_H
+
+#include <ringbuffer/frontend.h>
+#include <wrapper/percpu-defs.h>
+#include <linux/errno.h>
+#include <linux/prefetch.h>
+
+/**
+ * lib_ring_buffer_get_cpu - Precedes ring buffer reserve/commit.
+ *
+ * Disables preemption (acts as a RCU read-side critical section) and keeps a
+ * ring buffer nesting count as supplementary safety net to ensure tracer client
+ * code will never trigger an endless recursion. Returns the processor ID on
+ * success, -EPERM on failure (nesting count too high).
+ *
+ * asm volatile and "memory" clobber prevent the compiler from moving
+ * instructions out of the ring buffer nesting count. This is required to ensure
+ * that probe side-effects which can cause recursion (e.g. unforeseen traps,
+ * divisions by 0, ...) are triggered within the incremented nesting count
+ * section.
+ */
+static inline
+int lib_ring_buffer_get_cpu(const struct lib_ring_buffer_config *config)
+{
+       int cpu, nesting;
+
+       rcu_read_lock_sched_notrace();
+       cpu = smp_processor_id();
+       nesting = ++per_cpu(lib_ring_buffer_nesting, cpu);
+       barrier();
+
+       if (unlikely(nesting > RING_BUFFER_MAX_NESTING)) {
+               WARN_ON_ONCE(1);
+               per_cpu(lib_ring_buffer_nesting, cpu)--;
+               rcu_read_unlock_sched_notrace();
+               return -EPERM;
+       } else
+               return cpu;
+}
+
+/**
+ * lib_ring_buffer_put_cpu - Follows ring buffer reserve/commit.
+ */
+static inline
+void lib_ring_buffer_put_cpu(const struct lib_ring_buffer_config *config)
+{
+       barrier();
+       (*lttng_this_cpu_ptr(&lib_ring_buffer_nesting))--;
+       rcu_read_unlock_sched_notrace();
+}
+
+/*
+ * lib_ring_buffer_try_reserve is called by lib_ring_buffer_reserve(). It is not
+ * part of the API per se.
+ *
+ * returns 0 if reserve ok, or 1 if the slow path must be taken.
+ */
+static inline
+int lib_ring_buffer_try_reserve(const struct lib_ring_buffer_config *config,
+                               struct lib_ring_buffer_ctx *ctx,
+                               void *client_ctx,
+                               unsigned long *o_begin, unsigned long *o_end,
+                               unsigned long *o_old, size_t *before_hdr_pad)
+{
+       struct channel *chan = ctx->chan;
+       struct lib_ring_buffer *buf = ctx->buf;
+       *o_begin = v_read(config, &buf->offset);
+       *o_old = *o_begin;
+
+       ctx->tsc = lib_ring_buffer_clock_read(chan);
+       if ((int64_t) ctx->tsc == -EIO)
+               return 1;
+
+       /*
+        * Prefetch cacheline for read because we have to read the previous
+        * commit counter to increment it and commit seq value to compare it to
+        * the commit counter.
+        */
+       prefetch(&buf->commit_hot[subbuf_index(*o_begin, chan)]);
+
+       if (last_tsc_overflow(config, buf, ctx->tsc))
+               ctx->rflags |= RING_BUFFER_RFLAG_FULL_TSC;
+
+       if (unlikely(subbuf_offset(*o_begin, chan) == 0))
+               return 1;
+
+       ctx->slot_size = record_header_size(config, chan, *o_begin,
+                                           before_hdr_pad, ctx, client_ctx);
+       ctx->slot_size +=
+               lib_ring_buffer_align(*o_begin + ctx->slot_size,
+                                     ctx->largest_align) + ctx->data_size;
+       if (unlikely((subbuf_offset(*o_begin, chan) + ctx->slot_size)
+                    > chan->backend.subbuf_size))
+               return 1;
+
+       /*
+        * Record fits in the current buffer and we are not on a switch
+        * boundary. It's safe to write.
+        */
+       *o_end = *o_begin + ctx->slot_size;
+
+       if (unlikely((subbuf_offset(*o_end, chan)) == 0))
+               /*
+                * The offset_end will fall at the very beginning of the next
+                * subbuffer.
+                */
+               return 1;
+
+       return 0;
+}
+
+/**
+ * lib_ring_buffer_reserve - Reserve space in a ring buffer.
+ * @config: ring buffer instance configuration.
+ * @ctx: ring buffer context. (input and output) Must be already initialized.
+ *
+ * Atomic wait-free slot reservation. The reserved space starts at the context
+ * "pre_offset". Its length is "slot_size". The associated time-stamp is "tsc".
+ *
+ * Return :
+ *  0 on success.
+ * -EAGAIN if channel is disabled.
+ * -ENOSPC if event size is too large for packet.
+ * -ENOBUFS if there is currently not enough space in buffer for the event.
+ * -EIO if data cannot be written into the buffer for any other reason.
+ */
+
+static inline
+int lib_ring_buffer_reserve(const struct lib_ring_buffer_config *config,
+                           struct lib_ring_buffer_ctx *ctx,
+                           void *client_ctx)
+{
+       struct channel *chan = ctx->chan;
+       struct lib_ring_buffer *buf;
+       unsigned long o_begin, o_end, o_old;
+       size_t before_hdr_pad = 0;
+
+       if (unlikely(atomic_read(&chan->record_disabled)))
+               return -EAGAIN;
+
+       if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
+               buf = per_cpu_ptr(chan->backend.buf, ctx->cpu);
+       else
+               buf = chan->backend.buf;
+       if (unlikely(atomic_read(&buf->record_disabled)))
+               return -EAGAIN;
+       ctx->buf = buf;
+
+       /*
+        * Perform retryable operations.
+        */
+       if (unlikely(lib_ring_buffer_try_reserve(config, ctx, client_ctx, &o_begin,
+                                                &o_end, &o_old, &before_hdr_pad)))
+               goto slow_path;
+
+       if (unlikely(v_cmpxchg(config, &ctx->buf->offset, o_old, o_end)
+                    != o_old))
+               goto slow_path;
+
+       /*
+        * Atomically update last_tsc. This update races against concurrent
+        * atomic updates, but the race will always cause supplementary full TSC
+        * record headers, never the opposite (missing a full TSC record header
+        * when it would be needed).
+        */
+       save_last_tsc(config, ctx->buf, ctx->tsc);
+
+       /*
+        * Push the reader if necessary
+        */
+       lib_ring_buffer_reserve_push_reader(ctx->buf, chan, o_end - 1);
+
+       /*
+        * Clear noref flag for this subbuffer.
+        */
+       lib_ring_buffer_clear_noref(config, &ctx->buf->backend,
+                               subbuf_index(o_end - 1, chan));
+
+       ctx->pre_offset = o_begin;
+       ctx->buf_offset = o_begin + before_hdr_pad;
+       return 0;
+slow_path:
+       return lib_ring_buffer_reserve_slow(ctx, client_ctx);
+}
+
+/**
+ * lib_ring_buffer_switch - Perform a sub-buffer switch for a per-cpu buffer.
+ * @config: ring buffer instance configuration.
+ * @buf: buffer
+ * @mode: buffer switch mode (SWITCH_ACTIVE or SWITCH_FLUSH)
+ *
+ * This operation is completely reentrant : can be called while tracing is
+ * active with absolutely no lock held.
+ *
+ * Note, however, that as a v_cmpxchg is used for some atomic operations and
+ * requires to be executed locally for per-CPU buffers, this function must be
+ * called from the CPU which owns the buffer for a ACTIVE flush, with preemption
+ * disabled, for RING_BUFFER_SYNC_PER_CPU configuration.
+ */
+static inline
+void lib_ring_buffer_switch(const struct lib_ring_buffer_config *config,
+                           struct lib_ring_buffer *buf, enum switch_mode mode)
+{
+       lib_ring_buffer_switch_slow(buf, mode);
+}
+
+/* See ring_buffer_frontend_api.h for lib_ring_buffer_reserve(). */
+
+/**
+ * lib_ring_buffer_commit - Commit an record.
+ * @config: ring buffer instance configuration.
+ * @ctx: ring buffer context. (input arguments only)
+ *
+ * Atomic unordered slot commit. Increments the commit count in the
+ * specified sub-buffer, and delivers it if necessary.
+ */
+static inline
+void lib_ring_buffer_commit(const struct lib_ring_buffer_config *config,
+                           const struct lib_ring_buffer_ctx *ctx)
+{
+       struct channel *chan = ctx->chan;
+       struct lib_ring_buffer *buf = ctx->buf;
+       unsigned long offset_end = ctx->buf_offset;
+       unsigned long endidx = subbuf_index(offset_end - 1, chan);
+       unsigned long commit_count;
+       struct commit_counters_hot *cc_hot = &buf->commit_hot[endidx];
+
+       /*
+        * Must count record before incrementing the commit count.
+        */
+       subbuffer_count_record(config, &buf->backend, endidx);
+
+       /*
+        * Order all writes to buffer before the commit count update that will
+        * determine that the subbuffer is full.
+        */
+       if (config->ipi == RING_BUFFER_IPI_BARRIER) {
+               /*
+                * Must write slot data before incrementing commit count.  This
+                * compiler barrier is upgraded into a smp_mb() by the IPI sent
+                * by get_subbuf().
+                */
+               barrier();
+       } else
+               smp_wmb();
+
+       v_add(config, ctx->slot_size, &cc_hot->cc);
+
+       /*
+        * commit count read can race with concurrent OOO commit count updates.
+        * This is only needed for lib_ring_buffer_check_deliver (for
+        * non-polling delivery only) and for
+        * lib_ring_buffer_write_commit_counter.  The race can only cause the
+        * counter to be read with the same value more than once, which could
+        * cause :
+        * - Multiple delivery for the same sub-buffer (which is handled
+        *   gracefully by the reader code) if the value is for a full
+        *   sub-buffer. It's important that we can never miss a sub-buffer
+        *   delivery. Re-reading the value after the v_add ensures this.
+        * - Reading a commit_count with a higher value that what was actually
+        *   added to it for the lib_ring_buffer_write_commit_counter call
+        *   (again caused by a concurrent committer). It does not matter,
+        *   because this function is interested in the fact that the commit
+        *   count reaches back the reserve offset for a specific sub-buffer,
+        *   which is completely independent of the order.
+        */
+       commit_count = v_read(config, &cc_hot->cc);
+
+       lib_ring_buffer_check_deliver(config, buf, chan, offset_end - 1,
+                                     commit_count, endidx, ctx->tsc);
+       /*
+        * Update used size at each commit. It's needed only for extracting
+        * ring_buffer buffers from vmcore, after crash.
+        */
+       lib_ring_buffer_write_commit_counter(config, buf, chan,
+                       offset_end, commit_count, cc_hot);
+}
+
+/**
+ * lib_ring_buffer_try_discard_reserve - Try discarding a record.
+ * @config: ring buffer instance configuration.
+ * @ctx: ring buffer context. (input arguments only)
+ *
+ * Only succeeds if no other record has been written after the record to
+ * discard. If discard fails, the record must be committed to the buffer.
+ *
+ * Returns 0 upon success, -EPERM if the record cannot be discarded.
+ */
+static inline
+int lib_ring_buffer_try_discard_reserve(const struct lib_ring_buffer_config *config,
+                                       const struct lib_ring_buffer_ctx *ctx)
+{
+       struct lib_ring_buffer *buf = ctx->buf;
+       unsigned long end_offset = ctx->pre_offset + ctx->slot_size;
+
+       /*
+        * We need to ensure that if the cmpxchg succeeds and discards the
+        * record, the next record will record a full TSC, because it cannot
+        * rely on the last_tsc associated with the discarded record to detect
+        * overflows. The only way to ensure this is to set the last_tsc to 0
+        * (assuming no 64-bit TSC overflow), which forces to write a 64-bit
+        * timestamp in the next record.
+        *
+        * Note: if discard fails, we must leave the TSC in the record header.
+        * It is needed to keep track of TSC overflows for the following
+        * records.
+        */
+       save_last_tsc(config, buf, 0ULL);
+
+       if (likely(v_cmpxchg(config, &buf->offset, end_offset, ctx->pre_offset)
+                  != end_offset))
+               return -EPERM;
+       else
+               return 0;
+}
+
+static inline
+void channel_record_disable(const struct lib_ring_buffer_config *config,
+                           struct channel *chan)
+{
+       atomic_inc(&chan->record_disabled);
+}
+
+static inline
+void channel_record_enable(const struct lib_ring_buffer_config *config,
+                          struct channel *chan)
+{
+       atomic_dec(&chan->record_disabled);
+}
+
+static inline
+void lib_ring_buffer_record_disable(const struct lib_ring_buffer_config *config,
+                                   struct lib_ring_buffer *buf)
+{
+       atomic_inc(&buf->record_disabled);
+}
+
+static inline
+void lib_ring_buffer_record_enable(const struct lib_ring_buffer_config *config,
+                                  struct lib_ring_buffer *buf)
+{
+       atomic_dec(&buf->record_disabled);
+}
+
+#endif /* _LIB_RING_BUFFER_FRONTEND_API_H */
diff --git a/include/ringbuffer/frontend_internal.h b/include/ringbuffer/frontend_internal.h
new file mode 100644 (file)
index 0000000..723656b
--- /dev/null
@@ -0,0 +1,334 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR LGPL-2.1-only)
+ *
+ * ringbuffer/frontend_internal.h
+ *
+ * Ring Buffer Library Synchronization Header (internal helpers).
+ *
+ * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * See ring_buffer_frontend.c for more information on wait-free algorithms.
+ */
+
+#ifndef _LIB_RING_BUFFER_FRONTEND_INTERNAL_H
+#define _LIB_RING_BUFFER_FRONTEND_INTERNAL_H
+
+#include <ringbuffer/config.h>
+#include <ringbuffer/backend_types.h>
+#include <ringbuffer/frontend_types.h>
+#include <lib/prio_heap/lttng_prio_heap.h>     /* For per-CPU read-side iterator */
+
+/* Buffer offset macros */
+
+/* buf_trunc mask selects only the buffer number. */
+static inline
+unsigned long buf_trunc(unsigned long offset, struct channel *chan)
+{
+       return offset & ~(chan->backend.buf_size - 1);
+
+}
+
+/* Select the buffer number value (counter). */
+static inline
+unsigned long buf_trunc_val(unsigned long offset, struct channel *chan)
+{
+       return buf_trunc(offset, chan) >> chan->backend.buf_size_order;
+}
+
+/* buf_offset mask selects only the offset within the current buffer. */
+static inline
+unsigned long buf_offset(unsigned long offset, struct channel *chan)
+{
+       return offset & (chan->backend.buf_size - 1);
+}
+
+/* subbuf_offset mask selects the offset within the current subbuffer. */
+static inline
+unsigned long subbuf_offset(unsigned long offset, struct channel *chan)
+{
+       return offset & (chan->backend.subbuf_size - 1);
+}
+
+/* subbuf_trunc mask selects the subbuffer number. */
+static inline
+unsigned long subbuf_trunc(unsigned long offset, struct channel *chan)
+{
+       return offset & ~(chan->backend.subbuf_size - 1);
+}
+
+/* subbuf_align aligns the offset to the next subbuffer. */
+static inline
+unsigned long subbuf_align(unsigned long offset, struct channel *chan)
+{
+       return (offset + chan->backend.subbuf_size)
+              & ~(chan->backend.subbuf_size - 1);
+}
+
+/* subbuf_index returns the index of the current subbuffer within the buffer. */
+static inline
+unsigned long subbuf_index(unsigned long offset, struct channel *chan)
+{
+       return buf_offset(offset, chan) >> chan->backend.subbuf_size_order;
+}
+
+/*
+ * Last TSC comparison functions. Check if the current TSC overflows tsc_bits
+ * bits from the last TSC read. When overflows are detected, the full 64-bit
+ * timestamp counter should be written in the record header. Reads and writes
+ * last_tsc atomically.
+ */
+
+#if (BITS_PER_LONG == 32)
+static inline
+void save_last_tsc(const struct lib_ring_buffer_config *config,
+                  struct lib_ring_buffer *buf, u64 tsc)
+{
+       if (config->tsc_bits == 0 || config->tsc_bits == 64)
+               return;
+
+       /*
+        * Ensure the compiler performs this update in a single instruction.
+        */
+       v_set(config, &buf->last_tsc, (unsigned long)(tsc >> config->tsc_bits));
+}
+
+static inline
+int last_tsc_overflow(const struct lib_ring_buffer_config *config,
+                     struct lib_ring_buffer *buf, u64 tsc)
+{
+       unsigned long tsc_shifted;
+
+       if (config->tsc_bits == 0 || config->tsc_bits == 64)
+               return 0;
+
+       tsc_shifted = (unsigned long)(tsc >> config->tsc_bits);
+       if (unlikely(tsc_shifted
+                    - (unsigned long)v_read(config, &buf->last_tsc)))
+               return 1;
+       else
+               return 0;
+}
+#else
+static inline
+void save_last_tsc(const struct lib_ring_buffer_config *config,
+                  struct lib_ring_buffer *buf, u64 tsc)
+{
+       if (config->tsc_bits == 0 || config->tsc_bits == 64)
+               return;
+
+       v_set(config, &buf->last_tsc, (unsigned long)tsc);
+}
+
+static inline
+int last_tsc_overflow(const struct lib_ring_buffer_config *config,
+                     struct lib_ring_buffer *buf, u64 tsc)
+{
+       if (config->tsc_bits == 0 || config->tsc_bits == 64)
+               return 0;
+
+       if (unlikely((tsc - v_read(config, &buf->last_tsc))
+                    >> config->tsc_bits))
+               return 1;
+       else
+               return 0;
+}
+#endif
+
+extern
+int lib_ring_buffer_reserve_slow(struct lib_ring_buffer_ctx *ctx,
+               void *client_ctx);
+
+extern
+void lib_ring_buffer_switch_slow(struct lib_ring_buffer *buf,
+                                enum switch_mode mode);
+
+extern
+void lib_ring_buffer_check_deliver_slow(const struct lib_ring_buffer_config *config,
+                                  struct lib_ring_buffer *buf,
+                                  struct channel *chan,
+                                  unsigned long offset,
+                                  unsigned long commit_count,
+                                  unsigned long idx,
+                                  u64 tsc);
+
+extern
+void lib_ring_buffer_switch_remote(struct lib_ring_buffer *buf);
+extern
+void lib_ring_buffer_switch_remote_empty(struct lib_ring_buffer *buf);
+extern
+void lib_ring_buffer_clear(struct lib_ring_buffer *buf);
+
+/* Buffer write helpers */
+
+static inline
+void lib_ring_buffer_reserve_push_reader(struct lib_ring_buffer *buf,
+                                        struct channel *chan,
+                                        unsigned long offset)
+{
+       unsigned long consumed_old, consumed_new;
+
+       do {
+               consumed_old = atomic_long_read(&buf->consumed);
+               /*
+                * If buffer is in overwrite mode, push the reader consumed
+                * count if the write position has reached it and we are not
+                * at the first iteration (don't push the reader farther than
+                * the writer). This operation can be done concurrently by many
+                * writers in the same buffer, the writer being at the farthest
+                * write position sub-buffer index in the buffer being the one
+                * which will win this loop.
+                */
+               if (unlikely(subbuf_trunc(offset, chan)
+                             - subbuf_trunc(consumed_old, chan)
+                            >= chan->backend.buf_size))
+                       consumed_new = subbuf_align(consumed_old, chan);
+               else
+                       return;
+       } while (unlikely(atomic_long_cmpxchg(&buf->consumed, consumed_old,
+                                             consumed_new) != consumed_old));
+}
+
+/*
+ * Move consumed position to the beginning of subbuffer in which the
+ * write offset is. Should only be used on ring buffers that are not
+ * actively being written into, because clear_reader does not take into
+ * account the commit counters when moving the consumed position, which
+ * can make concurrent trace producers or consumers observe consumed
+ * position further than the write offset, which breaks ring buffer
+ * algorithm guarantees.
+ */
+static inline
+void lib_ring_buffer_clear_reader(struct lib_ring_buffer *buf,
+                                 struct channel *chan)
+{
+       const struct lib_ring_buffer_config *config = &chan->backend.config;
+       unsigned long offset, consumed_old, consumed_new;
+
+       do {
+               offset = v_read(config, &buf->offset);
+               consumed_old = atomic_long_read(&buf->consumed);
+               CHAN_WARN_ON(chan, (long) (subbuf_trunc(offset, chan)
+                               - subbuf_trunc(consumed_old, chan))
+                               < 0);
+               consumed_new = subbuf_trunc(offset, chan);
+       } while (unlikely(atomic_long_cmpxchg(&buf->consumed, consumed_old,
+                                             consumed_new) != consumed_old));
+}
+
+static inline
+int lib_ring_buffer_pending_data(const struct lib_ring_buffer_config *config,
+                                struct lib_ring_buffer *buf,
+                                struct channel *chan)
+{
+       return !!subbuf_offset(v_read(config, &buf->offset), chan);
+}
+
+static inline
+unsigned long lib_ring_buffer_get_data_size(const struct lib_ring_buffer_config *config,
+                                           struct lib_ring_buffer *buf,
+                                           unsigned long idx)
+{
+       return subbuffer_get_data_size(config, &buf->backend, idx);
+}
+
+/*
+ * Check if all space reservation in a buffer have been committed. This helps
+ * knowing if an execution context is nested (for per-cpu buffers only).
+ * This is a very specific ftrace use-case, so we keep this as "internal" API.
+ */
+static inline
+int lib_ring_buffer_reserve_committed(const struct lib_ring_buffer_config *config,
+                                     struct lib_ring_buffer *buf,
+                                     struct channel *chan)
+{
+       unsigned long offset, idx, commit_count;
+
+       CHAN_WARN_ON(chan, config->alloc != RING_BUFFER_ALLOC_PER_CPU);
+       CHAN_WARN_ON(chan, config->sync != RING_BUFFER_SYNC_PER_CPU);
+
+       /*
+        * Read offset and commit count in a loop so they are both read
+        * atomically wrt interrupts. By deal with interrupt concurrency by
+        * restarting both reads if the offset has been pushed. Note that given
+        * we only have to deal with interrupt concurrency here, an interrupt
+        * modifying the commit count will also modify "offset", so it is safe
+        * to only check for offset modifications.
+        */
+       do {
+               offset = v_read(config, &buf->offset);
+               idx = subbuf_index(offset, chan);
+               commit_count = v_read(config, &buf->commit_hot[idx].cc);
+       } while (offset != v_read(config, &buf->offset));
+
+       return ((buf_trunc(offset, chan) >> chan->backend.num_subbuf_order)
+                    - (commit_count & chan->commit_count_mask) == 0);
+}
+
+/*
+ * Receive end of subbuffer TSC as parameter. It has been read in the
+ * space reservation loop of either reserve or switch, which ensures it
+ * progresses monotonically with event records in the buffer. Therefore,
+ * it ensures that the end timestamp of a subbuffer is <= begin
+ * timestamp of the following subbuffers.
+ */
+static inline
+void lib_ring_buffer_check_deliver(const struct lib_ring_buffer_config *config,
+                                  struct lib_ring_buffer *buf,
+                                  struct channel *chan,
+                                  unsigned long offset,
+                                  unsigned long commit_count,
+                                  unsigned long idx,
+                                  u64 tsc)
+{
+       unsigned long old_commit_count = commit_count
+                                        - chan->backend.subbuf_size;
+
+       /* Check if all commits have been done */
+       if (unlikely((buf_trunc(offset, chan) >> chan->backend.num_subbuf_order)
+                    - (old_commit_count & chan->commit_count_mask) == 0))
+               lib_ring_buffer_check_deliver_slow(config, buf, chan, offset,
+                       commit_count, idx, tsc);
+}
+
+/*
+ * lib_ring_buffer_write_commit_counter
+ *
+ * For flight recording. must be called after commit.
+ * This function increments the subbuffer's commit_seq counter each time the
+ * commit count reaches back the reserve offset (modulo subbuffer size). It is
+ * useful for crash dump.
+ */
+static inline
+void lib_ring_buffer_write_commit_counter(const struct lib_ring_buffer_config *config,
+                                         struct lib_ring_buffer *buf,
+                                         struct channel *chan,
+                                         unsigned long buf_offset,
+                                         unsigned long commit_count,
+                                         struct commit_counters_hot *cc_hot)
+{
+       unsigned long commit_seq_old;
+
+       if (config->oops != RING_BUFFER_OOPS_CONSISTENCY)
+               return;
+
+       /*
+        * subbuf_offset includes commit_count_mask. We can simply
+        * compare the offsets within the subbuffer without caring about
+        * buffer full/empty mismatch because offset is never zero here
+        * (subbuffer header and record headers have non-zero length).
+        */
+       if (unlikely(subbuf_offset(buf_offset - commit_count, chan)))
+               return;
+
+       commit_seq_old = v_read(config, &cc_hot->seq);
+       if (likely((long) (commit_seq_old - commit_count) < 0))
+               v_set(config, &cc_hot->seq, commit_count);
+}
+
+extern int lib_ring_buffer_create(struct lib_ring_buffer *buf,
+                                 struct channel_backend *chanb, int cpu);
+extern void lib_ring_buffer_free(struct lib_ring_buffer *buf);
+
+/* Keep track of trap nesting inside ring buffer code */
+DECLARE_PER_CPU(unsigned int, lib_ring_buffer_nesting);
+
+#endif /* _LIB_RING_BUFFER_FRONTEND_INTERNAL_H */
diff --git a/include/ringbuffer/frontend_types.h b/include/ringbuffer/frontend_types.h
new file mode 100644 (file)
index 0000000..b04c085
--- /dev/null
@@ -0,0 +1,195 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR LGPL-2.1-only)
+ *
+ * ringbuffer/frontend_types.h
+ *
+ * Ring Buffer Library Synchronization Header (types).
+ *
+ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * See ring_buffer_frontend.c for more information on wait-free algorithms.
+ */
+
+#ifndef _LIB_RING_BUFFER_FRONTEND_TYPES_H
+#define _LIB_RING_BUFFER_FRONTEND_TYPES_H
+
+#include <linux/kref.h>
+#include <ringbuffer/config.h>
+#include <ringbuffer/backend_types.h>
+#include <lib/prio_heap/lttng_prio_heap.h>     /* For per-CPU read-side iterator */
+#include <lttng-cpuhotplug.h>
+
+/*
+ * A switch is done during tracing or as a final flush after tracing (so it
+ * won't write in the new sub-buffer).
+ */
+enum switch_mode { SWITCH_ACTIVE, SWITCH_FLUSH };
+
+/* channel-level read-side iterator */
+struct channel_iter {
+       /* Prio heap of buffers. Lowest timestamps at the top. */
+       struct lttng_ptr_heap heap;     /* Heap of struct lib_ring_buffer ptrs */
+       struct list_head empty_head;    /* Empty buffers linked-list head */
+       int read_open;                  /* Opened for reading ? */
+       u64 last_qs;                    /* Last quiescent state timestamp */
+       u64 last_timestamp;             /* Last timestamp (for WARN_ON) */
+       int last_cpu;                   /* Last timestamp cpu */
+       /*
+        * read() file operation state.
+        */
+       unsigned long len_left;
+};
+
+/* channel: collection of per-cpu ring buffers. */
+struct channel {
+       atomic_t record_disabled;
+       unsigned long commit_count_mask;        /*
+                                                * Commit count mask, removing
+                                                * the MSBs corresponding to
+                                                * bits used to represent the
+                                                * subbuffer index.
+                                                */
+
+       struct channel_backend backend;         /* Associated backend */
+
+       unsigned long switch_timer_interval;    /* Buffer flush (jiffies) */
+       unsigned long read_timer_interval;      /* Reader wakeup (jiffies) */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
+       struct lttng_cpuhp_node cpuhp_prepare;
+       struct lttng_cpuhp_node cpuhp_online;
+       struct lttng_cpuhp_node cpuhp_iter_online;
+#else
+       struct notifier_block cpu_hp_notifier;  /* CPU hotplug notifier */
+       struct notifier_block hp_iter_notifier; /* hotplug iterator notifier */
+       unsigned int cpu_hp_enable:1;           /* Enable CPU hotplug notif. */
+       unsigned int hp_iter_enable:1;          /* Enable hp iter notif. */
+#endif
+       struct notifier_block tick_nohz_notifier; /* CPU nohz notifier */
+       wait_queue_head_t read_wait;            /* reader wait queue */
+       wait_queue_head_t hp_wait;              /* CPU hotplug wait queue */
+       int finalized;                          /* Has channel been finalized */
+       struct channel_iter iter;               /* Channel read-side iterator */
+       struct kref ref;                        /* Reference count */
+};
+
+/* Per-subbuffer commit counters used on the hot path */
+struct commit_counters_hot {
+       union v_atomic cc;              /* Commit counter */
+       union v_atomic seq;             /* Consecutive commits */
+};
+
+/* Per-subbuffer commit counters used only on cold paths */
+struct commit_counters_cold {
+       union v_atomic cc_sb;           /* Incremented _once_ at sb switch */
+};
+
+/* Per-buffer read iterator */
+struct lib_ring_buffer_iter {
+       u64 timestamp;                  /* Current record timestamp */
+       size_t header_len;              /* Current record header length */
+       size_t payload_len;             /* Current record payload length */
+
+       struct list_head empty_node;    /* Linked list of empty buffers */
+       unsigned long consumed, read_offset, data_size;
+       enum {
+               ITER_GET_SUBBUF = 0,
+               ITER_TEST_RECORD,
+               ITER_NEXT_RECORD,
+               ITER_PUT_SUBBUF,
+       } state;
+       unsigned int allocated:1;
+       unsigned int read_open:1;       /* Opened for reading ? */
+};
+
+/* ring buffer state */
+struct lib_ring_buffer {
+       /* First 32 bytes cache-hot cacheline */
+       union v_atomic offset;          /* Current offset in the buffer */
+       struct commit_counters_hot *commit_hot;
+                                       /* Commit count per sub-buffer */
+       atomic_long_t consumed;         /*
+                                        * Current offset in the buffer
+                                        * standard atomic access (shared)
+                                        */
+       atomic_t record_disabled;
+       /* End of first 32 bytes cacheline */
+       union v_atomic last_tsc;        /*
+                                        * Last timestamp written in the buffer.
+                                        */
+
+       struct lib_ring_buffer_backend backend; /* Associated backend */
+
+       struct commit_counters_cold *commit_cold;
+                                       /* Commit count per sub-buffer */
+       u64 *ts_end;                    /*
+                                        * timestamp_end per sub-buffer.
+                                        * Time is sampled by the
+                                        * switch_*_end() callbacks which
+                                        * are the last space reservation
+                                        * performed in the sub-buffer
+                                        * before it can be fully
+                                        * committed and delivered. This
+                                        * time value is then read by
+                                        * the deliver callback,
+                                        * performed by the last commit
+                                        * before the buffer becomes
+                                        * readable.
+                                        */
+       atomic_long_t active_readers;   /*
+                                        * Active readers count
+                                        * standard atomic access (shared)
+                                        */
+                                       /* Dropped records */
+       union v_atomic records_lost_full;       /* Buffer full */
+       union v_atomic records_lost_wrap;       /* Nested wrap-around */
+       union v_atomic records_lost_big;        /* Events too big */
+       union v_atomic records_count;   /* Number of records written */
+       union v_atomic records_overrun; /* Number of overwritten records */
+       wait_queue_head_t read_wait;    /* reader buffer-level wait queue */
+       wait_queue_head_t write_wait;   /* writer buffer-level wait queue (for metadata only) */
+       int finalized;                  /* buffer has been finalized */
+       struct timer_list switch_timer; /* timer for periodical switch */
+       struct timer_list read_timer;   /* timer for read poll */
+       raw_spinlock_t raw_tick_nohz_spinlock;  /* nohz entry lock/trylock */
+       struct lib_ring_buffer_iter iter;       /* read-side iterator */
+       unsigned long get_subbuf_consumed;      /* Read-side consumed */
+       unsigned long prod_snapshot;    /* Producer count snapshot */
+       unsigned long cons_snapshot;    /* Consumer count snapshot */
+       unsigned int get_subbuf:1,      /* Sub-buffer being held by reader */
+               switch_timer_enabled:1, /* Protected by ring_buffer_nohz_lock */
+               read_timer_enabled:1,   /* Protected by ring_buffer_nohz_lock */
+               quiescent:1;
+};
+
+static inline
+void *channel_get_private(struct channel *chan)
+{
+       return chan->backend.priv;
+}
+
+void lib_ring_buffer_lost_event_too_big(struct channel *chan);
+
+/*
+ * Issue warnings and disable channels upon internal error.
+ * Can receive struct lib_ring_buffer or struct lib_ring_buffer_backend
+ * parameters.
+ */
+#define CHAN_WARN_ON(c, cond)                                          \
+       ({                                                              \
+               struct channel *__chan;                                 \
+               int _____ret = unlikely(cond);                          \
+               if (_____ret) {                                         \
+                       if (__same_type(*(c), struct channel_backend))  \
+                               __chan = container_of((void *) (c),     \
+                                                       struct channel, \
+                                                       backend);       \
+                       else if (__same_type(*(c), struct channel))     \
+                               __chan = (void *) (c);                  \
+                       else                                            \
+                               BUG_ON(1);                              \
+                       atomic_inc(&__chan->record_disabled);           \
+                       WARN_ON(1);                                     \
+               }                                                       \
+               _____ret;                                               \
+       })
+
+#endif /* _LIB_RING_BUFFER_FRONTEND_TYPES_H */
diff --git a/include/ringbuffer/iterator.h b/include/ringbuffer/iterator.h
new file mode 100644 (file)
index 0000000..a006ed0
--- /dev/null
@@ -0,0 +1,67 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR LGPL-2.1-only)
+ *
+ * ringbuffer/iterator.h
+ *
+ * Ring buffer and channel iterators.
+ *
+ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#ifndef _LIB_RING_BUFFER_ITERATOR_H
+#define _LIB_RING_BUFFER_ITERATOR_H
+
+#include <ringbuffer/backend.h>
+#include <ringbuffer/frontend.h>
+#include <ringbuffer/vfs.h>
+
+/*
+ * lib_ring_buffer_get_next_record advances the buffer read position to the next
+ * record. It returns either the size of the next record, -EAGAIN if there is
+ * currently no data available, or -ENODATA if no data is available and buffer
+ * is finalized.
+ */
+extern ssize_t lib_ring_buffer_get_next_record(struct channel *chan,
+                                              struct lib_ring_buffer *buf);
+
+/*
+ * channel_get_next_record advances the buffer read position to the next record.
+ * It returns either the size of the next record, -EAGAIN if there is currently
+ * no data available, or -ENODATA if no data is available and buffer is
+ * finalized.
+ * Returns the current buffer in ret_buf.
+ */
+extern ssize_t channel_get_next_record(struct channel *chan,
+                                      struct lib_ring_buffer **ret_buf);
+
+/**
+ * read_current_record - copy the buffer current record into dest.
+ * @buf: ring buffer
+ * @dest: destination where the record should be copied
+ *
+ * dest should be large enough to contain the record. Returns the number of
+ * bytes copied.
+ */
+static inline size_t read_current_record(struct lib_ring_buffer *buf, void *dest)
+{
+       return lib_ring_buffer_read(&buf->backend, buf->iter.read_offset,
+                                   dest, buf->iter.payload_len);
+}
+
+extern int lib_ring_buffer_iterator_open(struct lib_ring_buffer *buf);
+extern void lib_ring_buffer_iterator_release(struct lib_ring_buffer *buf);
+extern int channel_iterator_open(struct channel *chan);
+extern void channel_iterator_release(struct channel *chan);
+
+extern const struct file_operations channel_payload_file_operations;
+extern const struct file_operations lib_ring_buffer_payload_file_operations;
+
+/*
+ * Used internally.
+ */
+int channel_iterator_init(struct channel *chan);
+void channel_iterator_unregister_notifiers(struct channel *chan);
+void channel_iterator_free(struct channel *chan);
+void channel_iterator_reset(struct channel *chan);
+void lib_ring_buffer_iterator_reset(struct lib_ring_buffer *buf);
+
+#endif /* _LIB_RING_BUFFER_ITERATOR_H */
diff --git a/include/ringbuffer/nohz.h b/include/ringbuffer/nohz.h
new file mode 100644 (file)
index 0000000..eba2541
--- /dev/null
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR LGPL-2.1-only)
+ *
+ * ringbuffer/nohz.h
+ *
+ * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#ifndef _LIB_RING_BUFFER_NOHZ_H
+#define _LIB_RING_BUFFER_NOHZ_H
+
+#ifdef CONFIG_LIB_RING_BUFFER
+void lib_ring_buffer_tick_nohz_flush(void);
+void lib_ring_buffer_tick_nohz_stop(void);
+void lib_ring_buffer_tick_nohz_restart(void);
+#else
+static inline void lib_ring_buffer_tick_nohz_flush(void)
+{
+}
+
+static inline void lib_ring_buffer_tick_nohz_stop(void)
+{
+}
+
+static inline void lib_ring_buffer_tick_nohz_restart(void)
+{
+}
+#endif
+
+#endif /* _LIB_RING_BUFFER_NOHZ_H */
diff --git a/include/ringbuffer/vatomic.h b/include/ringbuffer/vatomic.h
new file mode 100644 (file)
index 0000000..bc142f7
--- /dev/null
@@ -0,0 +1,84 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR LGPL-2.1-only)
+ *
+ * ringbuffer/vatomic.h
+ *
+ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#ifndef _LIB_RING_BUFFER_VATOMIC_H
+#define _LIB_RING_BUFFER_VATOMIC_H
+
+#include <asm/atomic.h>
+#include <asm/local.h>
+
+/*
+ * Same data type (long) accessed differently depending on configuration.
+ * v field is for non-atomic access (protected by mutual exclusion).
+ * In the fast-path, the ring_buffer_config structure is constant, so the
+ * compiler can statically select the appropriate branch.
+ * local_t is used for per-cpu and per-thread buffers.
+ * atomic_long_t is used for globally shared buffers.
+ */
+union v_atomic {
+       local_t l;
+       atomic_long_t a;
+       long v;
+};
+
+static inline
+long v_read(const struct lib_ring_buffer_config *config, union v_atomic *v_a)
+{
+       if (config->sync == RING_BUFFER_SYNC_PER_CPU)
+               return local_read(&v_a->l);
+       else
+               return atomic_long_read(&v_a->a);
+}
+
+static inline
+void v_set(const struct lib_ring_buffer_config *config, union v_atomic *v_a,
+          long v)
+{
+       if (config->sync == RING_BUFFER_SYNC_PER_CPU)
+               local_set(&v_a->l, v);
+       else
+               atomic_long_set(&v_a->a, v);
+}
+
+static inline
+void v_add(const struct lib_ring_buffer_config *config, long v, union v_atomic *v_a)
+{
+       if (config->sync == RING_BUFFER_SYNC_PER_CPU)
+               local_add(v, &v_a->l);
+       else
+               atomic_long_add(v, &v_a->a);
+}
+
+static inline
+void v_inc(const struct lib_ring_buffer_config *config, union v_atomic *v_a)
+{
+       if (config->sync == RING_BUFFER_SYNC_PER_CPU)
+               local_inc(&v_a->l);
+       else
+               atomic_long_inc(&v_a->a);
+}
+
+/*
+ * Non-atomic decrement. Only used by reader, apply to reader-owned subbuffer.
+ */
+static inline
+void _v_dec(const struct lib_ring_buffer_config *config, union v_atomic *v_a)
+{
+       --v_a->v;
+}
+
+static inline
+long v_cmpxchg(const struct lib_ring_buffer_config *config, union v_atomic *v_a,
+              long old, long _new)
+{
+       if (config->sync == RING_BUFFER_SYNC_PER_CPU)
+               return local_cmpxchg(&v_a->l, old, _new);
+       else
+               return atomic_long_cmpxchg(&v_a->a, old, _new);
+}
+
+#endif /* _LIB_RING_BUFFER_VATOMIC_H */
diff --git a/include/ringbuffer/vfs.h b/include/ringbuffer/vfs.h
new file mode 100644 (file)
index 0000000..5e13068
--- /dev/null
@@ -0,0 +1,166 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR LGPL-2.1-only)
+ *
+ * ringbuffer/vfs.h
+ *
+ * Wait-free ring buffer VFS file operations.
+ *
+ * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#ifndef _LIB_RING_BUFFER_VFS_H
+#define _LIB_RING_BUFFER_VFS_H
+
+#include <linux/fs.h>
+#include <linux/poll.h>
+
+/* VFS API */
+
+extern const struct file_operations lib_ring_buffer_file_operations;
+
+/*
+ * Internal file operations.
+ */
+
+struct lib_ring_buffer;
+
+int lib_ring_buffer_open(struct inode *inode, struct file *file,
+               struct lib_ring_buffer *buf);
+int lib_ring_buffer_release(struct inode *inode, struct file *file,
+               struct lib_ring_buffer *buf);
+unsigned int lib_ring_buffer_poll(struct file *filp, poll_table *wait,
+               struct lib_ring_buffer *buf);
+ssize_t lib_ring_buffer_splice_read(struct file *in, loff_t *ppos,
+               struct pipe_inode_info *pipe, size_t len,
+               unsigned int flags, struct lib_ring_buffer *buf);
+int lib_ring_buffer_mmap(struct file *filp, struct vm_area_struct *vma,
+               struct lib_ring_buffer *buf);
+
+/* Ring Buffer ioctl() and ioctl numbers */
+long lib_ring_buffer_ioctl(struct file *filp, unsigned int cmd,
+               unsigned long arg, struct lib_ring_buffer *buf);
+#ifdef CONFIG_COMPAT
+long lib_ring_buffer_compat_ioctl(struct file *filp, unsigned int cmd,
+               unsigned long arg, struct lib_ring_buffer *buf);
+#endif
+
+ssize_t vfs_lib_ring_buffer_file_splice_read(struct file *in, loff_t *ppos,
+               struct pipe_inode_info *pipe, size_t len, unsigned int flags);
+loff_t vfs_lib_ring_buffer_no_llseek(struct file *file, loff_t offset,
+               int origin);
+int vfs_lib_ring_buffer_mmap(struct file *filp, struct vm_area_struct *vma);
+ssize_t vfs_lib_ring_buffer_splice_read(struct file *in, loff_t *ppos,
+               struct pipe_inode_info *pipe, size_t len,
+               unsigned int flags);
+
+/*
+ * Use RING_BUFFER_GET_NEXT_SUBBUF / RING_BUFFER_PUT_NEXT_SUBBUF to read and
+ * consume sub-buffers sequentially.
+ *
+ * Reading sub-buffers without consuming them can be performed with:
+ *
+ * RING_BUFFER_SNAPSHOT
+ * RING_BUFFER_SNAPSHOT_GET_CONSUMED
+ * RING_BUFFER_SNAPSHOT_GET_PRODUCED
+ *
+ * to get the offset range to consume, and then by passing each sub-buffer
+ * offset to RING_BUFFER_GET_SUBBUF, read the sub-buffer, and then release it
+ * with RING_BUFFER_PUT_SUBBUF.
+ *
+ * Note that the "snapshot" API can be used to read the sub-buffer in reverse
+ * order, which is useful for flight recorder snapshots.
+ */
+
+/* Get a snapshot of the current ring buffer producer and consumer positions */
+#define RING_BUFFER_SNAPSHOT                   _IO(0xF6, 0x00)
+/* Get the consumer position (iteration start) */
+#define RING_BUFFER_SNAPSHOT_GET_CONSUMED      _IOR(0xF6, 0x01, unsigned long)
+/* Get the producer position (iteration end) */
+#define RING_BUFFER_SNAPSHOT_GET_PRODUCED      _IOR(0xF6, 0x02, unsigned long)
+/* Get exclusive read access to the specified sub-buffer position */
+#define RING_BUFFER_GET_SUBBUF                 _IOW(0xF6, 0x03, unsigned long)
+/* Release exclusive sub-buffer access */
+#define RING_BUFFER_PUT_SUBBUF                 _IO(0xF6, 0x04)
+
+/* Get exclusive read access to the next sub-buffer that can be read. */
+#define RING_BUFFER_GET_NEXT_SUBBUF            _IO(0xF6, 0x05)
+/* Release exclusive sub-buffer access, move consumer forward. */
+#define RING_BUFFER_PUT_NEXT_SUBBUF            _IO(0xF6, 0x06)
+/* returns the size of the current sub-buffer, without padding (for mmap). */
+#define RING_BUFFER_GET_SUBBUF_SIZE            _IOR(0xF6, 0x07, unsigned long)
+/* returns the size of the current sub-buffer, with padding (for splice). */
+#define RING_BUFFER_GET_PADDED_SUBBUF_SIZE     _IOR(0xF6, 0x08, unsigned long)
+/* returns the maximum size for sub-buffers. */
+#define RING_BUFFER_GET_MAX_SUBBUF_SIZE                _IOR(0xF6, 0x09, unsigned long)
+/* returns the length to mmap. */
+#define RING_BUFFER_GET_MMAP_LEN               _IOR(0xF6, 0x0A, unsigned long)
+/* returns the offset of the subbuffer belonging to the mmap reader. */
+#define RING_BUFFER_GET_MMAP_READ_OFFSET       _IOR(0xF6, 0x0B, unsigned long)
+/* Flush the current sub-buffer, if non-empty. */
+#define RING_BUFFER_FLUSH                      _IO(0xF6, 0x0C)
+/* Get the current version of the metadata cache (after a get_next). */
+#define RING_BUFFER_GET_METADATA_VERSION       _IOR(0xF6, 0x0D, uint64_t)
+/*
+ * Get a snapshot of the current ring buffer producer and consumer positions,
+ * regardless of whether or not the two positions are contained within the same
+ * sub-buffer.
+ */
+#define RING_BUFFER_SNAPSHOT_SAMPLE_POSITIONS  _IO(0xF6, 0x0E)
+/* Flush the current sub-buffer, even if empty. */
+#define RING_BUFFER_FLUSH_EMPTY                        _IO(0xF6, 0x0F)
+/*
+ * Reset the position of what has been consumed from the metadata cache to 0
+ * so it can be read again.
+ */
+#define RING_BUFFER_METADATA_CACHE_DUMP                _IO(0xF6, 0x10)
+/* Clear ring buffer content. */
+#define RING_BUFFER_CLEAR                      _IO(0xF6, 0x11)
+
+#ifdef CONFIG_COMPAT
+/* Get a snapshot of the current ring buffer producer and consumer positions */
+#define RING_BUFFER_COMPAT_SNAPSHOT            RING_BUFFER_SNAPSHOT
+/* Get the consumer position (iteration start) */
+#define RING_BUFFER_COMPAT_SNAPSHOT_GET_CONSUMED \
+       _IOR(0xF6, 0x01, compat_ulong_t)
+/* Get the producer position (iteration end) */
+#define RING_BUFFER_COMPAT_SNAPSHOT_GET_PRODUCED \
+       _IOR(0xF6, 0x02, compat_ulong_t)
+/* Get exclusive read access to the specified sub-buffer position */
+#define RING_BUFFER_COMPAT_GET_SUBBUF          _IOW(0xF6, 0x03, compat_ulong_t)
+/* Release exclusive sub-buffer access */
+#define RING_BUFFER_COMPAT_PUT_SUBBUF          RING_BUFFER_PUT_SUBBUF
+
+/* Get exclusive read access to the next sub-buffer that can be read. */
+#define RING_BUFFER_COMPAT_GET_NEXT_SUBBUF     RING_BUFFER_GET_NEXT_SUBBUF
+/* Release exclusive sub-buffer access, move consumer forward. */
+#define RING_BUFFER_COMPAT_PUT_NEXT_SUBBUF     RING_BUFFER_PUT_NEXT_SUBBUF
+/* returns the size of the current sub-buffer, without padding (for mmap). */
+#define RING_BUFFER_COMPAT_GET_SUBBUF_SIZE     _IOR(0xF6, 0x07, compat_ulong_t)
+/* returns the size of the current sub-buffer, with padding (for splice). */
+#define RING_BUFFER_COMPAT_GET_PADDED_SUBBUF_SIZE \
+       _IOR(0xF6, 0x08, compat_ulong_t)
+/* returns the maximum size for sub-buffers. */
+#define RING_BUFFER_COMPAT_GET_MAX_SUBBUF_SIZE _IOR(0xF6, 0x09, compat_ulong_t)
+/* returns the length to mmap. */
+#define RING_BUFFER_COMPAT_GET_MMAP_LEN                _IOR(0xF6, 0x0A, compat_ulong_t)
+/* returns the offset of the subbuffer belonging to the mmap reader. */
+#define RING_BUFFER_COMPAT_GET_MMAP_READ_OFFSET        _IOR(0xF6, 0x0B, compat_ulong_t)
+/* Flush the current sub-buffer, if non-empty. */
+#define RING_BUFFER_COMPAT_FLUSH               RING_BUFFER_FLUSH
+/* Get the current version of the metadata cache (after a get_next). */
+#define RING_BUFFER_COMPAT_GET_METADATA_VERSION        RING_BUFFER_GET_METADATA_VERSION
+/*
+ * Get a snapshot of the current ring buffer producer and consumer positions,
+ * regardless of whether or not the two positions are contained within the same
+ * sub-buffer.
+ */
+#define RING_BUFFER_COMPAT_SNAPSHOT_SAMPLE_POSITIONS   \
+       RING_BUFFER_SNAPSHOT_SAMPLE_POSITIONS
+/* Flush the current sub-buffer, even if empty. */
+#define RING_BUFFER_COMPAT_FLUSH_EMPTY                 \
+       RING_BUFFER_FLUSH_EMPTY
+/* Clear ring buffer content. */
+#define RING_BUFFER_COMPAT_CLEAR                       \
+       RING_BUFFER_CLEAR
+#endif /* CONFIG_COMPAT */
+
+#endif /* _LIB_RING_BUFFER_VFS_H */
index eb420800b9785bed80aaf900e6a62ee006a9a6a3..6818cd950b2878badb1f8c245f61bbdbb59fdb02 100644 (file)
@@ -4,7 +4,7 @@ TOP_LTTNG_MODULES_DIR := $(shell dirname $(lastword $(MAKEFILE_LIST)))/..
 
 include $(TOP_LTTNG_MODULES_DIR)/Kbuild.common
 
-ccflags-y += -I$(TOP_LTTNG_MODULES_DIR)
+ccflags-y += -I$(TOP_LTTNG_MODULES_DIR) -I$(TOP_LTTNG_MODULES_DIR)/include
 
 obj-$(CONFIG_LTTNG) += lttng-lib-ring-buffer.o
 
diff --git a/lib/ringbuffer/api.h b/lib/ringbuffer/api.h
deleted file mode 100644 (file)
index e469d46..0000000
+++ /dev/null
@@ -1,24 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only OR LGPL-2.1-only)
- *
- * lib/ringbuffer/api.h
- *
- * Ring Buffer API.
- *
- * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#ifndef _LIB_RING_BUFFER_API_H
-#define _LIB_RING_BUFFER_API_H
-
-#include <wrapper/ringbuffer/backend.h>
-#include <wrapper/ringbuffer/frontend.h>
-#include <wrapper/ringbuffer/vfs.h>
-
-/*
- * ring_buffer_frontend_api.h contains static inline functions that depend on
- * client static inlines. Hence the inclusion of this >api> header only
- * within the client.
- */
-#include <wrapper/ringbuffer/frontend_api.h>
-
-#endif /* _LIB_RING_BUFFER_API_H */
diff --git a/lib/ringbuffer/backend.h b/lib/ringbuffer/backend.h
deleted file mode 100644 (file)
index da4e92b..0000000
+++ /dev/null
@@ -1,463 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only OR LGPL-2.1-only)
- *
- * lib/ringbuffer/backend.h
- *
- * Ring buffer backend (API).
- *
- * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
- * Credits to Steven Rostedt for proposing to use an extra-subbuffer owned by
- * the reader in flight recorder mode.
- */
-
-#ifndef _LIB_RING_BUFFER_BACKEND_H
-#define _LIB_RING_BUFFER_BACKEND_H
-
-#include <linux/types.h>
-#include <linux/sched.h>
-#include <linux/timer.h>
-#include <linux/wait.h>
-#include <linux/poll.h>
-#include <linux/list.h>
-#include <linux/fs.h>
-#include <linux/mm.h>
-#include <wrapper/uaccess.h>
-
-/* Internal helpers */
-#include <wrapper/ringbuffer/backend_internal.h>
-#include <wrapper/ringbuffer/frontend_internal.h>
-
-/* Ring buffer backend API */
-
-/* Ring buffer backend access (read/write) */
-
-extern size_t lib_ring_buffer_read(struct lib_ring_buffer_backend *bufb,
-                                  size_t offset, void *dest, size_t len);
-
-extern int __lib_ring_buffer_copy_to_user(struct lib_ring_buffer_backend *bufb,
-                                         size_t offset, void __user *dest,
-                                         size_t len);
-
-extern int lib_ring_buffer_read_cstr(struct lib_ring_buffer_backend *bufb,
-                                    size_t offset, void *dest, size_t len);
-
-extern unsigned long *
-lib_ring_buffer_read_get_pfn(struct lib_ring_buffer_backend *bufb, size_t offset,
-                             void ***virt);
-
-/*
- * Return the address where a given offset is located.
- * Should be used to get the current subbuffer header pointer. Given we know
- * it's never on a page boundary, it's safe to write directly to this address,
- * as long as the write is never bigger than a page size.
- */
-extern void *
-lib_ring_buffer_offset_address(struct lib_ring_buffer_backend *bufb,
-                              size_t offset);
-extern void *
-lib_ring_buffer_read_offset_address(struct lib_ring_buffer_backend *bufb,
-                                   size_t offset);
-
-/**
- * lib_ring_buffer_write - write data to a buffer backend
- * @config : ring buffer instance configuration
- * @ctx: ring buffer context. (input arguments only)
- * @src : source pointer to copy from
- * @len : length of data to copy
- *
- * This function copies "len" bytes of data from a source pointer to a buffer
- * backend, at the current context offset. This is more or less a buffer
- * backend-specific memcpy() operation. Calls the slow path (_ring_buffer_write)
- * if copy is crossing a page boundary.
- */
-static inline __attribute__((always_inline))
-void lib_ring_buffer_write(const struct lib_ring_buffer_config *config,
-                          struct lib_ring_buffer_ctx *ctx,
-                          const void *src, size_t len)
-{
-       struct lib_ring_buffer_backend *bufb = &ctx->buf->backend;
-       struct channel_backend *chanb = &ctx->chan->backend;
-       size_t index, pagecpy;
-       size_t offset = ctx->buf_offset;
-       struct lib_ring_buffer_backend_pages *backend_pages;
-
-       if (unlikely(!len))
-               return;
-       backend_pages =
-               lib_ring_buffer_get_backend_pages_from_ctx(config, ctx);
-       offset &= chanb->buf_size - 1;
-       index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
-       pagecpy = min_t(size_t, len, (-offset) & ~PAGE_MASK);
-       if (likely(pagecpy == len))
-               lib_ring_buffer_do_copy(config,
-                                       backend_pages->p[index].virt
-                                           + (offset & ~PAGE_MASK),
-                                       src, len);
-       else
-               _lib_ring_buffer_write(bufb, offset, src, len, 0);
-       ctx->buf_offset += len;
-}
-
-/**
- * lib_ring_buffer_memset - write len bytes of c to a buffer backend
- * @config : ring buffer instance configuration
- * @bufb : ring buffer backend
- * @offset : offset within the buffer
- * @c : the byte to copy
- * @len : number of bytes to copy
- *
- * This function writes "len" bytes of "c" to a buffer backend, at a specific
- * offset. This is more or less a buffer backend-specific memset() operation.
- * Calls the slow path (_ring_buffer_memset) if write is crossing a page
- * boundary.
- */
-static inline
-void lib_ring_buffer_memset(const struct lib_ring_buffer_config *config,
-                           struct lib_ring_buffer_ctx *ctx, int c, size_t len)
-{
-
-       struct lib_ring_buffer_backend *bufb = &ctx->buf->backend;
-       struct channel_backend *chanb = &ctx->chan->backend;
-       size_t index, pagecpy;
-       size_t offset = ctx->buf_offset;
-       struct lib_ring_buffer_backend_pages *backend_pages;
-
-       if (unlikely(!len))
-               return;
-       backend_pages =
-               lib_ring_buffer_get_backend_pages_from_ctx(config, ctx);
-       offset &= chanb->buf_size - 1;
-       index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
-       pagecpy = min_t(size_t, len, (-offset) & ~PAGE_MASK);
-       if (likely(pagecpy == len))
-               lib_ring_buffer_do_memset(backend_pages->p[index].virt
-                                         + (offset & ~PAGE_MASK),
-                                         c, len);
-       else
-               _lib_ring_buffer_memset(bufb, offset, c, len, 0);
-       ctx->buf_offset += len;
-}
-
-/*
- * Copy up to @len string bytes from @src to @dest. Stop whenever a NULL
- * terminating character is found in @src. Returns the number of bytes
- * copied. Does *not* terminate @dest with NULL terminating character.
- */
-static inline __attribute__((always_inline))
-size_t lib_ring_buffer_do_strcpy(const struct lib_ring_buffer_config *config,
-               char *dest, const char *src, size_t len)
-{
-       size_t count;
-
-       for (count = 0; count < len; count++) {
-               char c;
-
-               /*
-                * Only read source character once, in case it is
-                * modified concurrently.
-                */
-               c = READ_ONCE(src[count]);
-               if (!c)
-                       break;
-               lib_ring_buffer_do_copy(config, &dest[count], &c, 1);
-       }
-       return count;
-}
-
-/*
- * Copy up to @len string bytes from @src to @dest. Stop whenever a NULL
- * terminating character is found in @src, or when a fault occurs.
- * Returns the number of bytes copied. Does *not* terminate @dest with
- * NULL terminating character.
- *
- * This function deals with userspace pointers, it should never be called
- * directly without having the src pointer checked with access_ok()
- * previously.
- */
-static inline __attribute__((always_inline))
-size_t lib_ring_buffer_do_strcpy_from_user_inatomic(const struct lib_ring_buffer_config *config,
-               char *dest, const char __user *src, size_t len)
-{
-       size_t count;
-
-       for (count = 0; count < len; count++) {
-               int ret;
-               char c;
-
-               ret = __copy_from_user_inatomic(&c, src + count, 1);
-               if (ret || !c)
-                       break;
-               lib_ring_buffer_do_copy(config, &dest[count], &c, 1);
-       }
-       return count;
-}
-
-/**
- * lib_ring_buffer_strcpy - write string data to a buffer backend
- * @config : ring buffer instance configuration
- * @ctx: ring buffer context. (input arguments only)
- * @src : source pointer to copy from
- * @len : length of data to copy
- * @pad : character to use for padding
- *
- * This function copies @len - 1 bytes of string data from a source
- * pointer to a buffer backend, followed by a terminating '\0'
- * character, at the current context offset. This is more or less a
- * buffer backend-specific strncpy() operation. If a terminating '\0'
- * character is found in @src before @len - 1 characters are copied, pad
- * the buffer with @pad characters (e.g. '#'). Calls the slow path
- * (_ring_buffer_strcpy) if copy is crossing a page boundary.
- */
-static inline
-void lib_ring_buffer_strcpy(const struct lib_ring_buffer_config *config,
-                          struct lib_ring_buffer_ctx *ctx,
-                          const char *src, size_t len, int pad)
-{
-       struct lib_ring_buffer_backend *bufb = &ctx->buf->backend;
-       struct channel_backend *chanb = &ctx->chan->backend;
-       size_t index, pagecpy;
-       size_t offset = ctx->buf_offset;
-       struct lib_ring_buffer_backend_pages *backend_pages;
-
-       if (unlikely(!len))
-               return;
-       backend_pages =
-               lib_ring_buffer_get_backend_pages_from_ctx(config, ctx);
-       offset &= chanb->buf_size - 1;
-       index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
-       pagecpy = min_t(size_t, len, (-offset) & ~PAGE_MASK);
-       if (likely(pagecpy == len)) {
-               size_t count;
-
-               count = lib_ring_buffer_do_strcpy(config,
-                                       backend_pages->p[index].virt
-                                           + (offset & ~PAGE_MASK),
-                                       src, len - 1);
-               offset += count;
-               /* Padding */
-               if (unlikely(count < len - 1)) {
-                       size_t pad_len = len - 1 - count;
-
-                       lib_ring_buffer_do_memset(backend_pages->p[index].virt
-                                               + (offset & ~PAGE_MASK),
-                                       pad, pad_len);
-                       offset += pad_len;
-               }
-               /* Ending '\0' */
-               lib_ring_buffer_do_memset(backend_pages->p[index].virt
-                                       + (offset & ~PAGE_MASK),
-                               '\0', 1);
-       } else {
-               _lib_ring_buffer_strcpy(bufb, offset, src, len, 0, pad);
-       }
-       ctx->buf_offset += len;
-}
-
-/**
- * lib_ring_buffer_copy_from_user_inatomic - write userspace data to a buffer backend
- * @config : ring buffer instance configuration
- * @ctx: ring buffer context. (input arguments only)
- * @src : userspace source pointer to copy from
- * @len : length of data to copy
- *
- * This function copies "len" bytes of data from a userspace pointer to a
- * buffer backend, at the current context offset. This is more or less a buffer
- * backend-specific memcpy() operation. Calls the slow path
- * (_ring_buffer_write_from_user_inatomic) if copy is crossing a page boundary.
- * Disable the page fault handler to ensure we never try to take the mmap_sem.
- */
-static inline __attribute__((always_inline))
-void lib_ring_buffer_copy_from_user_inatomic(const struct lib_ring_buffer_config *config,
-                                   struct lib_ring_buffer_ctx *ctx,
-                                   const void __user *src, size_t len)
-{
-       struct lib_ring_buffer_backend *bufb = &ctx->buf->backend;
-       struct channel_backend *chanb = &ctx->chan->backend;
-       size_t index, pagecpy;
-       size_t offset = ctx->buf_offset;
-       struct lib_ring_buffer_backend_pages *backend_pages;
-       unsigned long ret;
-       mm_segment_t old_fs = get_fs();
-
-       if (unlikely(!len))
-               return;
-       backend_pages =
-               lib_ring_buffer_get_backend_pages_from_ctx(config, ctx);
-       offset &= chanb->buf_size - 1;
-       index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
-       pagecpy = min_t(size_t, len, (-offset) & ~PAGE_MASK);
-
-       set_fs(KERNEL_DS);
-       pagefault_disable();
-       if (unlikely(!lttng_access_ok(VERIFY_READ, src, len)))
-               goto fill_buffer;
-
-       if (likely(pagecpy == len)) {
-               ret = lib_ring_buffer_do_copy_from_user_inatomic(
-                       backend_pages->p[index].virt + (offset & ~PAGE_MASK),
-                       src, len);
-               if (unlikely(ret > 0)) {
-                       /* Copy failed. */
-                       goto fill_buffer;
-               }
-       } else {
-               _lib_ring_buffer_copy_from_user_inatomic(bufb, offset, src, len, 0);
-       }
-       pagefault_enable();
-       set_fs(old_fs);
-       ctx->buf_offset += len;
-
-       return;
-
-fill_buffer:
-       pagefault_enable();
-       set_fs(old_fs);
-       /*
-        * In the error path we call the slow path version to avoid
-        * the pollution of static inline code.
-        */
-       _lib_ring_buffer_memset(bufb, offset, 0, len, 0);
-}
-
-/**
- * lib_ring_buffer_strcpy_from_user_inatomic - write userspace string data to a buffer backend
- * @config : ring buffer instance configuration
- * @ctx: ring buffer context (input arguments only)
- * @src : userspace source pointer to copy from
- * @len : length of data to copy
- * @pad : character to use for padding
- *
- * This function copies @len - 1 bytes of string data from a userspace
- * source pointer to a buffer backend, followed by a terminating '\0'
- * character, at the current context offset. This is more or less a
- * buffer backend-specific strncpy() operation. If a terminating '\0'
- * character is found in @src before @len - 1 characters are copied, pad
- * the buffer with @pad characters (e.g. '#'). Calls the slow path
- * (_ring_buffer_strcpy_from_user_inatomic) if copy is crossing a page
- * boundary. Disable the page fault handler to ensure we never try to
- * take the mmap_sem.
- */
-static inline
-void lib_ring_buffer_strcpy_from_user_inatomic(const struct lib_ring_buffer_config *config,
-               struct lib_ring_buffer_ctx *ctx,
-               const void __user *src, size_t len, int pad)
-{
-       struct lib_ring_buffer_backend *bufb = &ctx->buf->backend;
-       struct channel_backend *chanb = &ctx->chan->backend;
-       size_t index, pagecpy;
-       size_t offset = ctx->buf_offset;
-       struct lib_ring_buffer_backend_pages *backend_pages;
-       mm_segment_t old_fs = get_fs();
-
-       if (unlikely(!len))
-               return;
-       backend_pages =
-               lib_ring_buffer_get_backend_pages_from_ctx(config, ctx);
-       offset &= chanb->buf_size - 1;
-       index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
-       pagecpy = min_t(size_t, len, (-offset) & ~PAGE_MASK);
-
-       set_fs(KERNEL_DS);
-       pagefault_disable();
-       if (unlikely(!lttng_access_ok(VERIFY_READ, src, len)))
-               goto fill_buffer;
-
-       if (likely(pagecpy == len)) {
-               size_t count;
-
-               count = lib_ring_buffer_do_strcpy_from_user_inatomic(config,
-                                       backend_pages->p[index].virt
-                                           + (offset & ~PAGE_MASK),
-                                       src, len - 1);
-               offset += count;
-               /* Padding */
-               if (unlikely(count < len - 1)) {
-                       size_t pad_len = len - 1 - count;
-
-                       lib_ring_buffer_do_memset(backend_pages->p[index].virt
-                                               + (offset & ~PAGE_MASK),
-                                       pad, pad_len);
-                       offset += pad_len;
-               }
-               /* Ending '\0' */
-               lib_ring_buffer_do_memset(backend_pages->p[index].virt
-                                       + (offset & ~PAGE_MASK),
-                               '\0', 1);
-       } else {
-               _lib_ring_buffer_strcpy_from_user_inatomic(bufb, offset, src,
-                                       len, 0, pad);
-       }
-       pagefault_enable();
-       set_fs(old_fs);
-       ctx->buf_offset += len;
-
-       return;
-
-fill_buffer:
-       pagefault_enable();
-       set_fs(old_fs);
-       /*
-        * In the error path we call the slow path version to avoid
-        * the pollution of static inline code.
-        */
-       _lib_ring_buffer_memset(bufb, offset, pad, len - 1, 0);
-       offset += len - 1;
-       _lib_ring_buffer_memset(bufb, offset, '\0', 1, 0);
-}
-
-/*
- * This accessor counts the number of unread records in a buffer.
- * It only provides a consistent value if no reads not writes are performed
- * concurrently.
- */
-static inline
-unsigned long lib_ring_buffer_get_records_unread(
-                               const struct lib_ring_buffer_config *config,
-                               struct lib_ring_buffer *buf)
-{
-       struct lib_ring_buffer_backend *bufb = &buf->backend;
-       struct lib_ring_buffer_backend_pages *pages;
-       unsigned long records_unread = 0, sb_bindex, id;
-       unsigned int i;
-
-       for (i = 0; i < bufb->chan->backend.num_subbuf; i++) {
-               id = bufb->buf_wsb[i].id;
-               sb_bindex = subbuffer_id_get_index(config, id);
-               pages = bufb->array[sb_bindex];
-               records_unread += v_read(config, &pages->records_unread);
-       }
-       if (config->mode == RING_BUFFER_OVERWRITE) {
-               id = bufb->buf_rsb.id;
-               sb_bindex = subbuffer_id_get_index(config, id);
-               pages = bufb->array[sb_bindex];
-               records_unread += v_read(config, &pages->records_unread);
-       }
-       return records_unread;
-}
-
-/*
- * We use __copy_from_user_inatomic to copy userspace data after
- * checking with access_ok() and disabling page faults.
- *
- * Return 0 if OK, nonzero on error.
- */
-static inline
-unsigned long lib_ring_buffer_copy_from_user_check_nofault(void *dest,
-                                               const void __user *src,
-                                               unsigned long len)
-{
-       unsigned long ret;
-       mm_segment_t old_fs;
-
-       if (!lttng_access_ok(VERIFY_READ, src, len))
-               return 1;
-       old_fs = get_fs();
-       set_fs(KERNEL_DS);
-       pagefault_disable();
-       ret = __copy_from_user_inatomic(dest, src, len);
-       pagefault_enable();
-       set_fs(old_fs);
-       return ret;
-}
-
-#endif /* _LIB_RING_BUFFER_BACKEND_H */
diff --git a/lib/ringbuffer/backend_internal.h b/lib/ringbuffer/backend_internal.h
deleted file mode 100644 (file)
index 5ad6fac..0000000
+++ /dev/null
@@ -1,532 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only OR LGPL-2.1-only)
- *
- * lib/ringbuffer/backend_internal.h
- *
- * Ring buffer backend (internal helpers).
- *
- * Copyright (C) 2008-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#ifndef _LIB_RING_BUFFER_BACKEND_INTERNAL_H
-#define _LIB_RING_BUFFER_BACKEND_INTERNAL_H
-
-#include <wrapper/compiler.h>
-#include <wrapper/ringbuffer/config.h>
-#include <wrapper/ringbuffer/backend_types.h>
-#include <wrapper/ringbuffer/frontend_types.h>
-#include <linux/string.h>
-#include <linux/uaccess.h>
-
-/* Ring buffer backend API presented to the frontend */
-
-/* Ring buffer and channel backend create/free */
-
-int lib_ring_buffer_backend_create(struct lib_ring_buffer_backend *bufb,
-                                  struct channel_backend *chan, int cpu);
-void channel_backend_unregister_notifiers(struct channel_backend *chanb);
-void lib_ring_buffer_backend_free(struct lib_ring_buffer_backend *bufb);
-int channel_backend_init(struct channel_backend *chanb,
-                        const char *name,
-                        const struct lib_ring_buffer_config *config,
-                        void *priv, size_t subbuf_size,
-                        size_t num_subbuf);
-void channel_backend_free(struct channel_backend *chanb);
-
-void lib_ring_buffer_backend_reset(struct lib_ring_buffer_backend *bufb);
-void channel_backend_reset(struct channel_backend *chanb);
-
-int lib_ring_buffer_backend_init(void);
-void lib_ring_buffer_backend_exit(void);
-
-extern void _lib_ring_buffer_write(struct lib_ring_buffer_backend *bufb,
-                                  size_t offset, const void *src, size_t len,
-                                  size_t pagecpy);
-extern void _lib_ring_buffer_memset(struct lib_ring_buffer_backend *bufb,
-                                   size_t offset, int c, size_t len,
-                                   size_t pagecpy);
-extern void _lib_ring_buffer_strcpy(struct lib_ring_buffer_backend *bufb,
-                                  size_t offset, const char *src, size_t len,
-                                  size_t pagecpy, int pad);
-extern void _lib_ring_buffer_copy_from_user_inatomic(struct lib_ring_buffer_backend *bufb,
-                                           size_t offset, const void *src,
-                                           size_t len, size_t pagecpy);
-extern void _lib_ring_buffer_strcpy_from_user_inatomic(struct lib_ring_buffer_backend *bufb,
-               size_t offset, const char __user *src, size_t len,
-               size_t pagecpy, int pad);
-
-/*
- * Subbuffer ID bits for overwrite mode. Need to fit within a single word to be
- * exchanged atomically.
- *
- * Top half word, except lowest bit, belongs to "offset", which is used to keep
- * to count the produced buffers.  For overwrite mode, this provides the
- * consumer with the capacity to read subbuffers in order, handling the
- * situation where producers would write up to 2^15 buffers (or 2^31 for 64-bit
- * systems) concurrently with a single execution of get_subbuf (between offset
- * sampling and subbuffer ID exchange).
- */
-
-#define HALF_ULONG_BITS                (BITS_PER_LONG >> 1)
-
-#define SB_ID_OFFSET_SHIFT     (HALF_ULONG_BITS + 1)
-#define SB_ID_OFFSET_COUNT     (1UL << SB_ID_OFFSET_SHIFT)
-#define SB_ID_OFFSET_MASK      (~(SB_ID_OFFSET_COUNT - 1))
-/*
- * Lowest bit of top word half belongs to noref. Used only for overwrite mode.
- */
-#define SB_ID_NOREF_SHIFT      (SB_ID_OFFSET_SHIFT - 1)
-#define SB_ID_NOREF_COUNT      (1UL << SB_ID_NOREF_SHIFT)
-#define SB_ID_NOREF_MASK       SB_ID_NOREF_COUNT
-/*
- * In overwrite mode: lowest half of word is used for index.
- * Limit of 2^16 subbuffers per buffer on 32-bit, 2^32 on 64-bit.
- * In producer-consumer mode: whole word used for index.
- */
-#define SB_ID_INDEX_SHIFT      0
-#define SB_ID_INDEX_COUNT      (1UL << SB_ID_INDEX_SHIFT)
-#define SB_ID_INDEX_MASK       (SB_ID_NOREF_COUNT - 1)
-
-/*
- * Construct the subbuffer id from offset, index and noref. Use only the index
- * for producer-consumer mode (offset and noref are only used in overwrite
- * mode).
- */
-static inline
-unsigned long subbuffer_id(const struct lib_ring_buffer_config *config,
-                          unsigned long offset, unsigned long noref,
-                          unsigned long index)
-{
-       if (config->mode == RING_BUFFER_OVERWRITE)
-               return (offset << SB_ID_OFFSET_SHIFT)
-                      | (noref << SB_ID_NOREF_SHIFT)
-                      | index;
-       else
-               return index;
-}
-
-/*
- * Compare offset with the offset contained within id. Return 1 if the offset
- * bits are identical, else 0.
- */
-static inline
-int subbuffer_id_compare_offset(const struct lib_ring_buffer_config *config,
-                               unsigned long id, unsigned long offset)
-{
-       return (id & SB_ID_OFFSET_MASK) == (offset << SB_ID_OFFSET_SHIFT);
-}
-
-static inline
-unsigned long subbuffer_id_get_index(const struct lib_ring_buffer_config *config,
-                                    unsigned long id)
-{
-       if (config->mode == RING_BUFFER_OVERWRITE)
-               return id & SB_ID_INDEX_MASK;
-       else
-               return id;
-}
-
-static inline
-unsigned long subbuffer_id_is_noref(const struct lib_ring_buffer_config *config,
-                                   unsigned long id)
-{
-       if (config->mode == RING_BUFFER_OVERWRITE)
-               return !!(id & SB_ID_NOREF_MASK);
-       else
-               return 1;
-}
-
-/*
- * Only used by reader on subbuffer ID it has exclusive access to. No volatile
- * needed.
- */
-static inline
-void subbuffer_id_set_noref(const struct lib_ring_buffer_config *config,
-                           unsigned long *id)
-{
-       if (config->mode == RING_BUFFER_OVERWRITE)
-               *id |= SB_ID_NOREF_MASK;
-}
-
-static inline
-void subbuffer_id_set_noref_offset(const struct lib_ring_buffer_config *config,
-                                  unsigned long *id, unsigned long offset)
-{
-       unsigned long tmp;
-
-       if (config->mode == RING_BUFFER_OVERWRITE) {
-               tmp = *id;
-               tmp &= ~SB_ID_OFFSET_MASK;
-               tmp |= offset << SB_ID_OFFSET_SHIFT;
-               tmp |= SB_ID_NOREF_MASK;
-               /* Volatile store, read concurrently by readers. */
-               WRITE_ONCE(*id, tmp);
-       }
-}
-
-/* No volatile access, since already used locally */
-static inline
-void subbuffer_id_clear_noref(const struct lib_ring_buffer_config *config,
-                             unsigned long *id)
-{
-       if (config->mode == RING_BUFFER_OVERWRITE)
-               *id &= ~SB_ID_NOREF_MASK;
-}
-
-/*
- * For overwrite mode, cap the number of subbuffers per buffer to:
- * 2^16 on 32-bit architectures
- * 2^32 on 64-bit architectures
- * This is required to fit in the index part of the ID. Return 0 on success,
- * -EPERM on failure.
- */
-static inline
-int subbuffer_id_check_index(const struct lib_ring_buffer_config *config,
-                            unsigned long num_subbuf)
-{
-       if (config->mode == RING_BUFFER_OVERWRITE)
-               return (num_subbuf > (1UL << HALF_ULONG_BITS)) ? -EPERM : 0;
-       else
-               return 0;
-}
-
-static inline
-void lib_ring_buffer_backend_get_pages(const struct lib_ring_buffer_config *config,
-                       struct lib_ring_buffer_ctx *ctx,
-                       struct lib_ring_buffer_backend_pages **backend_pages)
-{
-       struct lib_ring_buffer_backend *bufb = &ctx->buf->backend;
-       struct channel_backend *chanb = &ctx->chan->backend;
-       size_t sbidx, offset = ctx->buf_offset;
-       unsigned long sb_bindex, id;
-       struct lib_ring_buffer_backend_pages *rpages;
-
-       offset &= chanb->buf_size - 1;
-       sbidx = offset >> chanb->subbuf_size_order;
-       id = bufb->buf_wsb[sbidx].id;
-       sb_bindex = subbuffer_id_get_index(config, id);
-       rpages = bufb->array[sb_bindex];
-       CHAN_WARN_ON(ctx->chan,
-                    config->mode == RING_BUFFER_OVERWRITE
-                    && subbuffer_id_is_noref(config, id));
-       *backend_pages = rpages;
-}
-
-/* Get backend pages from cache. */
-static inline
-struct lib_ring_buffer_backend_pages *
-       lib_ring_buffer_get_backend_pages_from_ctx(const struct lib_ring_buffer_config *config,
-               struct lib_ring_buffer_ctx *ctx)
-{
-       return ctx->backend_pages;
-}
-
-/*
- * The ring buffer can count events recorded and overwritten per buffer,
- * but it is disabled by default due to its performance overhead.
- */
-#ifdef LTTNG_RING_BUFFER_COUNT_EVENTS
-static inline
-void subbuffer_count_record(const struct lib_ring_buffer_config *config,
-                           struct lib_ring_buffer_backend *bufb,
-                           unsigned long idx)
-{
-       unsigned long sb_bindex;
-
-       sb_bindex = subbuffer_id_get_index(config, bufb->buf_wsb[idx].id);
-       v_inc(config, &bufb->array[sb_bindex]->records_commit);
-}
-#else /* LTTNG_RING_BUFFER_COUNT_EVENTS */
-static inline
-void subbuffer_count_record(const struct lib_ring_buffer_config *config,
-                           struct lib_ring_buffer_backend *bufb,
-                           unsigned long idx)
-{
-}
-#endif /* #else LTTNG_RING_BUFFER_COUNT_EVENTS */
-
-/*
- * Reader has exclusive subbuffer access for record consumption. No need to
- * perform the decrement atomically.
- */
-static inline
-void subbuffer_consume_record(const struct lib_ring_buffer_config *config,
-                             struct lib_ring_buffer_backend *bufb)
-{
-       unsigned long sb_bindex;
-
-       sb_bindex = subbuffer_id_get_index(config, bufb->buf_rsb.id);
-       CHAN_WARN_ON(bufb->chan,
-                    !v_read(config, &bufb->array[sb_bindex]->records_unread));
-       /* Non-atomic decrement protected by exclusive subbuffer access */
-       _v_dec(config, &bufb->array[sb_bindex]->records_unread);
-       v_inc(config, &bufb->records_read);
-}
-
-static inline
-unsigned long subbuffer_get_records_count(
-                               const struct lib_ring_buffer_config *config,
-                               struct lib_ring_buffer_backend *bufb,
-                               unsigned long idx)
-{
-       unsigned long sb_bindex;
-
-       sb_bindex = subbuffer_id_get_index(config, bufb->buf_wsb[idx].id);
-       return v_read(config, &bufb->array[sb_bindex]->records_commit);
-}
-
-/*
- * Must be executed at subbuffer delivery when the writer has _exclusive_
- * subbuffer access. See lib_ring_buffer_check_deliver() for details.
- * lib_ring_buffer_get_records_count() must be called to get the records
- * count before this function, because it resets the records_commit
- * count.
- */
-static inline
-unsigned long subbuffer_count_records_overrun(
-                               const struct lib_ring_buffer_config *config,
-                               struct lib_ring_buffer_backend *bufb,
-                               unsigned long idx)
-{
-       struct lib_ring_buffer_backend_pages *pages;
-       unsigned long overruns, sb_bindex;
-
-       sb_bindex = subbuffer_id_get_index(config, bufb->buf_wsb[idx].id);
-       pages = bufb->array[sb_bindex];
-       overruns = v_read(config, &pages->records_unread);
-       v_set(config, &pages->records_unread,
-             v_read(config, &pages->records_commit));
-       v_set(config, &pages->records_commit, 0);
-
-       return overruns;
-}
-
-static inline
-void subbuffer_set_data_size(const struct lib_ring_buffer_config *config,
-                            struct lib_ring_buffer_backend *bufb,
-                            unsigned long idx,
-                            unsigned long data_size)
-{
-       struct lib_ring_buffer_backend_pages *pages;
-       unsigned long sb_bindex;
-
-       sb_bindex = subbuffer_id_get_index(config, bufb->buf_wsb[idx].id);
-       pages = bufb->array[sb_bindex];
-       pages->data_size = data_size;
-}
-
-static inline
-unsigned long subbuffer_get_read_data_size(
-                               const struct lib_ring_buffer_config *config,
-                               struct lib_ring_buffer_backend *bufb)
-{
-       struct lib_ring_buffer_backend_pages *pages;
-       unsigned long sb_bindex;
-
-       sb_bindex = subbuffer_id_get_index(config, bufb->buf_rsb.id);
-       pages = bufb->array[sb_bindex];
-       return pages->data_size;
-}
-
-static inline
-unsigned long subbuffer_get_data_size(
-                               const struct lib_ring_buffer_config *config,
-                               struct lib_ring_buffer_backend *bufb,
-                               unsigned long idx)
-{
-       struct lib_ring_buffer_backend_pages *pages;
-       unsigned long sb_bindex;
-
-       sb_bindex = subbuffer_id_get_index(config, bufb->buf_wsb[idx].id);
-       pages = bufb->array[sb_bindex];
-       return pages->data_size;
-}
-
-static inline
-void subbuffer_inc_packet_count(const struct lib_ring_buffer_config *config,
-                               struct lib_ring_buffer_backend *bufb,
-                               unsigned long idx)
-{
-       bufb->buf_cnt[idx].seq_cnt++;
-}
-
-/**
- * lib_ring_buffer_clear_noref - Clear the noref subbuffer flag, called by
- *                               writer.
- */
-static inline
-void lib_ring_buffer_clear_noref(const struct lib_ring_buffer_config *config,
-                                struct lib_ring_buffer_backend *bufb,
-                                unsigned long idx)
-{
-       unsigned long id, new_id;
-
-       if (config->mode != RING_BUFFER_OVERWRITE)
-               return;
-
-       /*
-        * Performing a volatile access to read the sb_pages, because we want to
-        * read a coherent version of the pointer and the associated noref flag.
-        */
-       id = READ_ONCE(bufb->buf_wsb[idx].id);
-       for (;;) {
-               /* This check is called on the fast path for each record. */
-               if (likely(!subbuffer_id_is_noref(config, id))) {
-                       /*
-                        * Store after load dependency ordering the writes to
-                        * the subbuffer after load and test of the noref flag
-                        * matches the memory barrier implied by the cmpxchg()
-                        * in update_read_sb_index().
-                        */
-                       return; /* Already writing to this buffer */
-               }
-               new_id = id;
-               subbuffer_id_clear_noref(config, &new_id);
-               new_id = cmpxchg(&bufb->buf_wsb[idx].id, id, new_id);
-               if (likely(new_id == id))
-                       break;
-               id = new_id;
-       }
-}
-
-/**
- * lib_ring_buffer_set_noref_offset - Set the noref subbuffer flag and offset,
- *                                    called by writer.
- */
-static inline
-void lib_ring_buffer_set_noref_offset(const struct lib_ring_buffer_config *config,
-                                     struct lib_ring_buffer_backend *bufb,
-                                     unsigned long idx, unsigned long offset)
-{
-       if (config->mode != RING_BUFFER_OVERWRITE)
-               return;
-
-       /*
-        * Because ring_buffer_set_noref() is only called by a single thread
-        * (the one which updated the cc_sb value), there are no concurrent
-        * updates to take care of: other writers have not updated cc_sb, so
-        * they cannot set the noref flag, and concurrent readers cannot modify
-        * the pointer because the noref flag is not set yet.
-        * The smp_wmb() in ring_buffer_commit() takes care of ordering writes
-        * to the subbuffer before this set noref operation.
-        * subbuffer_set_noref() uses a volatile store to deal with concurrent
-        * readers of the noref flag.
-        */
-       CHAN_WARN_ON(bufb->chan,
-                    subbuffer_id_is_noref(config, bufb->buf_wsb[idx].id));
-       /*
-        * Memory barrier that ensures counter stores are ordered before set
-        * noref and offset.
-        */
-       smp_mb();
-       subbuffer_id_set_noref_offset(config, &bufb->buf_wsb[idx].id, offset);
-}
-
-/**
- * update_read_sb_index - Read-side subbuffer index update.
- */
-static inline
-int update_read_sb_index(const struct lib_ring_buffer_config *config,
-                        struct lib_ring_buffer_backend *bufb,
-                        struct channel_backend *chanb,
-                        unsigned long consumed_idx,
-                        unsigned long consumed_count)
-{
-       unsigned long old_id, new_id;
-
-       if (config->mode == RING_BUFFER_OVERWRITE) {
-               /*
-                * Exchange the target writer subbuffer with our own unused
-                * subbuffer. No need to use READ_ONCE() here to read the
-                * old_wpage, because the value read will be confirmed by the
-                * following cmpxchg().
-                */
-               old_id = bufb->buf_wsb[consumed_idx].id;
-               if (unlikely(!subbuffer_id_is_noref(config, old_id)))
-                       return -EAGAIN;
-               /*
-                * Make sure the offset count we are expecting matches the one
-                * indicated by the writer.
-                */
-               if (unlikely(!subbuffer_id_compare_offset(config, old_id,
-                                                         consumed_count)))
-                       return -EAGAIN;
-               CHAN_WARN_ON(bufb->chan,
-                            !subbuffer_id_is_noref(config, bufb->buf_rsb.id));
-               subbuffer_id_set_noref_offset(config, &bufb->buf_rsb.id,
-                                             consumed_count);
-               new_id = cmpxchg(&bufb->buf_wsb[consumed_idx].id, old_id,
-                                bufb->buf_rsb.id);
-               if (unlikely(old_id != new_id))
-                       return -EAGAIN;
-               bufb->buf_rsb.id = new_id;
-       } else {
-               /* No page exchange, use the writer page directly */
-               bufb->buf_rsb.id = bufb->buf_wsb[consumed_idx].id;
-       }
-       return 0;
-}
-
-static inline __attribute__((always_inline))
-void lttng_inline_memcpy(void *dest, const void *src,
-               unsigned long len)
-{
-       switch (len) {
-       case 1:
-               *(uint8_t *) dest = *(const uint8_t *) src;
-               break;
-       case 2:
-               *(uint16_t *) dest = *(const uint16_t *) src;
-               break;
-       case 4:
-               *(uint32_t *) dest = *(const uint32_t *) src;
-               break;
-       case 8:
-               *(uint64_t *) dest = *(const uint64_t *) src;
-               break;
-       default:
-               inline_memcpy(dest, src, len);
-       }
-}
-
-/*
- * Use the architecture-specific memcpy implementation for constant-sized
- * inputs, but rely on an inline memcpy for length statically unknown.
- * The function call to memcpy is just way too expensive for a fast path.
- */
-#define lib_ring_buffer_do_copy(config, dest, src, len)                \
-do {                                                           \
-       size_t __len = (len);                                   \
-       if (__builtin_constant_p(len))                          \
-               memcpy(dest, src, __len);                       \
-       else                                                    \
-               lttng_inline_memcpy(dest, src, __len);          \
-} while (0)
-
-/*
- * We use __copy_from_user_inatomic to copy userspace data since we already
- * did the access_ok for the whole range.
- *
- * Return 0 if OK, nonzero on error.
- */
-static inline
-unsigned long lib_ring_buffer_do_copy_from_user_inatomic(void *dest,
-                                               const void __user *src,
-                                               unsigned long len)
-{
-       return __copy_from_user_inatomic(dest, src, len);
-}
-
-/*
- * write len bytes to dest with c
- */
-static inline
-void lib_ring_buffer_do_memset(char *dest, int c,
-       unsigned long len)
-{
-       unsigned long i;
-
-       for (i = 0; i < len; i++)
-               dest[i] = c;
-}
-
-#endif /* _LIB_RING_BUFFER_BACKEND_INTERNAL_H */
diff --git a/lib/ringbuffer/backend_types.h b/lib/ringbuffer/backend_types.h
deleted file mode 100644 (file)
index fa6baaa..0000000
+++ /dev/null
@@ -1,104 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only OR LGPL-2.1-only)
- *
- * lib/ringbuffer/backend_types.h
- *
- * Ring buffer backend (types).
- *
- * Copyright (C) 2008-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#ifndef _LIB_RING_BUFFER_BACKEND_TYPES_H
-#define _LIB_RING_BUFFER_BACKEND_TYPES_H
-
-#include <linux/cpumask.h>
-#include <linux/types.h>
-#include <lttng-kernel-version.h>
-#include <lttng-cpuhotplug.h>
-
-struct lib_ring_buffer_backend_page {
-       void *virt;                     /* page virtual address (cached) */
-       unsigned long pfn;              /* page frame number */
-};
-
-struct lib_ring_buffer_backend_pages {
-       unsigned long mmap_offset;      /* offset of the subbuffer in mmap */
-       union v_atomic records_commit;  /* current records committed count */
-       union v_atomic records_unread;  /* records to read */
-       unsigned long data_size;        /* Amount of data to read from subbuf */
-       struct lib_ring_buffer_backend_page p[];
-};
-
-struct lib_ring_buffer_backend_subbuffer {
-       /* Identifier for subbuf backend pages. Exchanged atomically. */
-       unsigned long id;               /* backend subbuffer identifier */
-};
-
-struct lib_ring_buffer_backend_counts {
-       /*
-        * Counter specific to the sub-buffer location within the ring buffer.
-        * The actual sequence number of the packet within the entire ring
-        * buffer can be derived from the formula nr_subbuffers * seq_cnt +
-        * subbuf_idx.
-        */
-       uint64_t seq_cnt;               /* packet sequence number */
-};
-
-/*
- * Forward declaration of frontend-specific channel and ring_buffer.
- */
-struct channel;
-struct lib_ring_buffer;
-
-struct lib_ring_buffer_backend {
-       /* Array of ring_buffer_backend_subbuffer for writer */
-       struct lib_ring_buffer_backend_subbuffer *buf_wsb;
-       /* ring_buffer_backend_subbuffer for reader */
-       struct lib_ring_buffer_backend_subbuffer buf_rsb;
-       /* Array of lib_ring_buffer_backend_counts for the packet counter */
-       struct lib_ring_buffer_backend_counts *buf_cnt;
-       /*
-        * Pointer array of backend pages, for whole buffer.
-        * Indexed by ring_buffer_backend_subbuffer identifier (id) index.
-        */
-       struct lib_ring_buffer_backend_pages **array;
-       unsigned int num_pages_per_subbuf;
-
-       struct channel *chan;           /* Associated channel */
-       int cpu;                        /* This buffer's cpu. -1 if global. */
-       union v_atomic records_read;    /* Number of records read */
-       unsigned int allocated:1;       /* is buffer allocated ? */
-};
-
-struct channel_backend {
-       unsigned long buf_size;         /* Size of the buffer */
-       unsigned long subbuf_size;      /* Sub-buffer size */
-       unsigned int subbuf_size_order; /* Order of sub-buffer size */
-       unsigned int num_subbuf_order;  /*
-                                        * Order of number of sub-buffers/buffer
-                                        * for writer.
-                                        */
-       unsigned int buf_size_order;    /* Order of buffer size */
-       unsigned int extra_reader_sb:1; /* has extra reader subbuffer ? */
-       struct lib_ring_buffer *buf;    /* Channel per-cpu buffers */
-
-       unsigned long num_subbuf;       /* Number of sub-buffers for writer */
-       u64 start_tsc;                  /* Channel creation TSC value */
-       void *priv;                     /* Client-specific information */
-       void *priv_ops;                 /* Client-specific ops pointer */
-       void (*release_priv_ops)(void *priv_ops);
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
-       struct lttng_cpuhp_node cpuhp_prepare;  /* CPU hotplug prepare */
-#else
-       struct notifier_block cpu_hp_notifier;   /* CPU hotplug notifier */
-#endif
-       /*
-        * We need to copy config because the module containing the
-        * source config can vanish before the last reference to this
-        * channel's streams is released.
-        */
-       struct lib_ring_buffer_config config; /* Ring buffer configuration */
-       cpumask_var_t cpumask;          /* Allocated per-cpu buffers cpumask */
-       char name[NAME_MAX];            /* Channel name */
-};
-
-#endif /* _LIB_RING_BUFFER_BACKEND_TYPES_H */
diff --git a/lib/ringbuffer/config.h b/lib/ringbuffer/config.h
deleted file mode 100644 (file)
index dd53c4b..0000000
+++ /dev/null
@@ -1,306 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only OR LGPL-2.1-only)
- *
- * lib/ringbuffer/config.h
- *
- * Ring buffer configuration header. Note: after declaring the standard inline
- * functions, clients should also include linux/ringbuffer/api.h.
- *
- * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#ifndef _LIB_RING_BUFFER_CONFIG_H
-#define _LIB_RING_BUFFER_CONFIG_H
-
-#include <linux/types.h>
-#include <linux/percpu.h>
-#include <lib/align.h>
-#include <lttng-tracer-core.h>
-
-struct lib_ring_buffer;
-struct channel;
-struct lib_ring_buffer_config;
-struct lib_ring_buffer_ctx;
-
-/*
- * Ring buffer client callbacks. Only used by slow path, never on fast path.
- * For the fast path, record_header_size(), ring_buffer_clock_read() should be
- * provided as inline functions too.  These may simply return 0 if not used by
- * the client.
- */
-struct lib_ring_buffer_client_cb {
-       /* Mandatory callbacks */
-
-       /* A static inline version is also required for fast path */
-       u64 (*ring_buffer_clock_read) (struct channel *chan);
-       size_t (*record_header_size) (const struct lib_ring_buffer_config *config,
-                                     struct channel *chan, size_t offset,
-                                     size_t *pre_header_padding,
-                                     struct lib_ring_buffer_ctx *ctx,
-                                     void *client_ctx);
-
-       /* Slow path only, at subbuffer switch */
-       size_t (*subbuffer_header_size) (void);
-       void (*buffer_begin) (struct lib_ring_buffer *buf, u64 tsc,
-                             unsigned int subbuf_idx);
-       void (*buffer_end) (struct lib_ring_buffer *buf, u64 tsc,
-                           unsigned int subbuf_idx, unsigned long data_size);
-
-       /* Optional callbacks (can be set to NULL) */
-
-       /* Called at buffer creation/finalize */
-       int (*buffer_create) (struct lib_ring_buffer *buf, void *priv,
-                             int cpu, const char *name);
-       /*
-        * Clients should guarantee that no new reader handle can be opened
-        * after finalize.
-        */
-       void (*buffer_finalize) (struct lib_ring_buffer *buf, void *priv, int cpu);
-
-       /*
-        * Extract header length, payload length and timestamp from event
-        * record. Used by buffer iterators. Timestamp is only used by channel
-        * iterator.
-        */
-       void (*record_get) (const struct lib_ring_buffer_config *config,
-                           struct channel *chan, struct lib_ring_buffer *buf,
-                           size_t offset, size_t *header_len,
-                           size_t *payload_len, u64 *timestamp);
-};
-
-/*
- * Ring buffer instance configuration.
- *
- * Declare as "static const" within the client object to ensure the inline fast
- * paths can be optimized.
- *
- * alloc/sync pairs:
- *
- * RING_BUFFER_ALLOC_PER_CPU and RING_BUFFER_SYNC_PER_CPU :
- *   Per-cpu buffers with per-cpu synchronization. Tracing must be performed
- *   with preemption disabled (lib_ring_buffer_get_cpu() and
- *   lib_ring_buffer_put_cpu()).
- *
- * RING_BUFFER_ALLOC_PER_CPU and RING_BUFFER_SYNC_GLOBAL :
- *   Per-cpu buffer with global synchronization. Tracing can be performed with
- *   preemption enabled, statistically stays on the local buffers.
- *
- * RING_BUFFER_ALLOC_GLOBAL and RING_BUFFER_SYNC_PER_CPU :
- *   Should only be used for buffers belonging to a single thread or protected
- *   by mutual exclusion by the client. Note that periodical sub-buffer switch
- *   should be disabled in this kind of configuration.
- *
- * RING_BUFFER_ALLOC_GLOBAL and RING_BUFFER_SYNC_GLOBAL :
- *   Global shared buffer with global synchronization.
- *
- * wakeup:
- *
- * RING_BUFFER_WAKEUP_BY_TIMER uses per-cpu timers to poll the
- * buffers and wake up readers if data is ready. Mainly useful for tracers which
- * don't want to call into the wakeup code on the tracing path. Use in
- * combination with "read_timer_interval" channel_create() argument.
- *
- * RING_BUFFER_WAKEUP_BY_WRITER directly wakes up readers when a subbuffer is
- * ready to read. Lower latencies before the reader is woken up. Mainly suitable
- * for drivers.
- *
- * RING_BUFFER_WAKEUP_NONE does not perform any wakeup whatsoever. The client
- * has the responsibility to perform wakeups.
- */
-struct lib_ring_buffer_config {
-       enum {
-               RING_BUFFER_ALLOC_PER_CPU,
-               RING_BUFFER_ALLOC_GLOBAL,
-       } alloc;
-       enum {
-               RING_BUFFER_SYNC_PER_CPU,       /* Wait-free */
-               RING_BUFFER_SYNC_GLOBAL,        /* Lock-free */
-       } sync;
-       enum {
-               RING_BUFFER_OVERWRITE,          /* Overwrite when buffer full */
-               RING_BUFFER_DISCARD,            /* Discard when buffer full */
-       } mode;
-       enum {
-               RING_BUFFER_SPLICE,
-               RING_BUFFER_MMAP,
-               RING_BUFFER_READ,               /* TODO */
-               RING_BUFFER_ITERATOR,
-               RING_BUFFER_NONE,
-       } output;
-       enum {
-               RING_BUFFER_PAGE,
-               RING_BUFFER_VMAP,               /* TODO */
-               RING_BUFFER_STATIC,             /* TODO */
-       } backend;
-       enum {
-               RING_BUFFER_NO_OOPS_CONSISTENCY,
-               RING_BUFFER_OOPS_CONSISTENCY,
-       } oops;
-       enum {
-               RING_BUFFER_IPI_BARRIER,
-               RING_BUFFER_NO_IPI_BARRIER,
-       } ipi;
-       enum {
-               RING_BUFFER_WAKEUP_BY_TIMER,    /* wake up performed by timer */
-               RING_BUFFER_WAKEUP_BY_WRITER,   /*
-                                                * writer wakes up reader,
-                                                * not lock-free
-                                                * (takes spinlock).
-                                                */
-       } wakeup;
-       /*
-        * tsc_bits: timestamp bits saved at each record.
-        *   0 and 64 disable the timestamp compression scheme.
-        */
-       unsigned int tsc_bits;
-       struct lib_ring_buffer_client_cb cb;
-};
-
-/*
- * ring buffer context
- *
- * Context passed to lib_ring_buffer_reserve(), lib_ring_buffer_commit(),
- * lib_ring_buffer_try_discard_reserve(), lib_ring_buffer_align_ctx() and
- * lib_ring_buffer_write().
- */
-struct lib_ring_buffer_ctx {
-       /* input received by lib_ring_buffer_reserve(), saved here. */
-       struct channel *chan;           /* channel */
-       void *priv;                     /* client private data */
-       size_t data_size;               /* size of payload */
-       int largest_align;              /*
-                                        * alignment of the largest element
-                                        * in the payload
-                                        */
-       int cpu;                        /* processor id */
-
-       /* output from lib_ring_buffer_reserve() */
-       struct lib_ring_buffer *buf;    /*
-                                        * buffer corresponding to processor id
-                                        * for this channel
-                                        */
-       size_t slot_size;               /* size of the reserved slot */
-       unsigned long buf_offset;       /* offset following the record header */
-       unsigned long pre_offset;       /*
-                                        * Initial offset position _before_
-                                        * the record is written. Positioned
-                                        * prior to record header alignment
-                                        * padding.
-                                        */
-       u64 tsc;                        /* time-stamp counter value */
-       unsigned int rflags;            /* reservation flags */
-       /* Cache backend pages pointer chasing. */
-       struct lib_ring_buffer_backend_pages *backend_pages;
-};
-
-/**
- * lib_ring_buffer_ctx_init - initialize ring buffer context
- * @ctx: ring buffer context to initialize
- * @chan: channel
- * @priv: client private data
- * @data_size: size of record data payload. It must be greater than 0.
- * @largest_align: largest alignment within data payload types
- * @cpu: processor id
- */
-static inline
-void lib_ring_buffer_ctx_init(struct lib_ring_buffer_ctx *ctx,
-                             struct channel *chan, void *priv,
-                             size_t data_size, int largest_align,
-                             int cpu)
-{
-       ctx->chan = chan;
-       ctx->priv = priv;
-       ctx->data_size = data_size;
-       ctx->largest_align = largest_align;
-       ctx->cpu = cpu;
-       ctx->rflags = 0;
-       ctx->backend_pages = NULL;
-}
-
-/*
- * Reservation flags.
- *
- * RING_BUFFER_RFLAG_FULL_TSC
- *
- * This flag is passed to record_header_size() and to the primitive used to
- * write the record header. It indicates that the full 64-bit time value is
- * needed in the record header. If this flag is not set, the record header needs
- * only to contain "tsc_bits" bit of time value.
- *
- * Reservation flags can be added by the client, starting from
- * "(RING_BUFFER_FLAGS_END << 0)". It can be used to pass information from
- * record_header_size() to lib_ring_buffer_write_record_header().
- */
-#define        RING_BUFFER_RFLAG_FULL_TSC              (1U << 0)
-#define RING_BUFFER_RFLAG_END                  (1U << 1)
-
-#ifndef LTTNG_TRACER_CORE_H
-#error "lttng-tracer-core.h is needed for RING_BUFFER_ALIGN define"
-#endif
-
-/*
- * We need to define RING_BUFFER_ALIGN_ATTR so it is known early at
- * compile-time. We have to duplicate the "config->align" information and the
- * definition here because config->align is used both in the slow and fast
- * paths, but RING_BUFFER_ALIGN_ATTR is only available for the client code.
- */
-#ifdef RING_BUFFER_ALIGN
-
-# define RING_BUFFER_ALIGN_ATTR                /* Default arch alignment */
-
-/*
- * Calculate the offset needed to align the type.
- * size_of_type must be non-zero.
- */
-static inline
-unsigned int lib_ring_buffer_align(size_t align_drift, size_t size_of_type)
-{
-       return offset_align(align_drift, size_of_type);
-}
-
-#else
-
-# define RING_BUFFER_ALIGN_ATTR __attribute__((packed))
-
-/*
- * Calculate the offset needed to align the type.
- * size_of_type must be non-zero.
- */
-static inline
-unsigned int lib_ring_buffer_align(size_t align_drift, size_t size_of_type)
-{
-       return 0;
-}
-
-#endif
-
-/**
- * lib_ring_buffer_align_ctx - Align context offset on "alignment"
- * @ctx: ring buffer context.
- */
-static inline
-void lib_ring_buffer_align_ctx(struct lib_ring_buffer_ctx *ctx,
-                          size_t alignment)
-{
-       ctx->buf_offset += lib_ring_buffer_align(ctx->buf_offset,
-                                                alignment);
-}
-
-/*
- * lib_ring_buffer_check_config() returns 0 on success.
- * Used internally to check for valid configurations at channel creation.
- */
-static inline
-int lib_ring_buffer_check_config(const struct lib_ring_buffer_config *config,
-                            unsigned int switch_timer_interval,
-                            unsigned int read_timer_interval)
-{
-       if (config->alloc == RING_BUFFER_ALLOC_GLOBAL
-           && config->sync == RING_BUFFER_SYNC_PER_CPU
-           && switch_timer_interval)
-               return -EINVAL;
-       return 0;
-}
-
-#include <wrapper/ringbuffer/vatomic.h>
-
-#endif /* _LIB_RING_BUFFER_CONFIG_H */
diff --git a/lib/ringbuffer/frontend.h b/lib/ringbuffer/frontend.h
deleted file mode 100644 (file)
index f67edc0..0000000
+++ /dev/null
@@ -1,234 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only OR LGPL-2.1-only)
- *
- * lib/ringbuffer/frontend.h
- *
- * Ring Buffer Library Synchronization Header (API).
- *
- * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
- * See ring_buffer_frontend.c for more information on wait-free algorithms.
- */
-
-#ifndef _LIB_RING_BUFFER_FRONTEND_H
-#define _LIB_RING_BUFFER_FRONTEND_H
-
-#include <linux/pipe_fs_i.h>
-#include <linux/rcupdate.h>
-#include <linux/cpumask.h>
-#include <linux/module.h>
-#include <linux/bitops.h>
-#include <linux/splice.h>
-#include <linux/string.h>
-#include <linux/timer.h>
-#include <linux/sched.h>
-#include <linux/cache.h>
-#include <linux/time.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-#include <linux/stat.h>
-#include <linux/cpu.h>
-#include <linux/fs.h>
-
-#include <asm/atomic.h>
-#include <asm/local.h>
-
-/* Internal helpers */
-#include <wrapper/ringbuffer/frontend_internal.h>
-
-/* Max ring buffer nesting count, see lib_ring_buffer_get_cpu(). */
-#define RING_BUFFER_MAX_NESTING 4
-
-/* Buffer creation/removal and setup operations */
-
-/*
- * switch_timer_interval is the time interval (in us) to fill sub-buffers with
- * padding to let readers get those sub-buffers.  Used for live streaming.
- *
- * read_timer_interval is the time interval (in us) to wake up pending readers.
- *
- * buf_addr is a pointer the the beginning of the preallocated buffer contiguous
- * address mapping. It is used only by RING_BUFFER_STATIC configuration. It can
- * be set to NULL for other backends.
- */
-
-extern
-struct channel *channel_create(const struct lib_ring_buffer_config *config,
-                              const char *name, void *priv,
-                              void *buf_addr,
-                              size_t subbuf_size, size_t num_subbuf,
-                              unsigned int switch_timer_interval,
-                              unsigned int read_timer_interval);
-
-/*
- * channel_destroy returns the private data pointer. It finalizes all channel's
- * buffers, waits for readers to release all references, and destroys the
- * channel.
- */
-extern
-void *channel_destroy(struct channel *chan);
-
-
-/* Buffer read operations */
-
-/*
- * Iteration on channel cpumask needs to issue a read barrier to match the write
- * barrier in cpu hotplug. It orders the cpumask read before read of per-cpu
- * buffer data. The per-cpu buffer is never removed by cpu hotplug; teardown is
- * only performed at channel destruction.
- */
-#define for_each_channel_cpu(cpu, chan)                                        \
-       for ((cpu) = -1;                                                \
-               ({ (cpu) = cpumask_next(cpu, (chan)->backend.cpumask);  \
-                  smp_read_barrier_depends(); (cpu) < nr_cpu_ids; });)
-
-extern struct lib_ring_buffer *channel_get_ring_buffer(
-                               const struct lib_ring_buffer_config *config,
-                               struct channel *chan, int cpu);
-extern int lib_ring_buffer_open_read(struct lib_ring_buffer *buf);
-extern void lib_ring_buffer_release_read(struct lib_ring_buffer *buf);
-
-/*
- * Read sequence: snapshot, many get_subbuf/put_subbuf, move_consumer.
- */
-extern int lib_ring_buffer_snapshot(struct lib_ring_buffer *buf,
-                                   unsigned long *consumed,
-                                   unsigned long *produced);
-extern int lib_ring_buffer_snapshot_sample_positions(
-                                   struct lib_ring_buffer *buf,
-                                   unsigned long *consumed,
-                                   unsigned long *produced);
-extern void lib_ring_buffer_move_consumer(struct lib_ring_buffer *buf,
-                                         unsigned long consumed_new);
-
-extern int lib_ring_buffer_get_subbuf(struct lib_ring_buffer *buf,
-                                     unsigned long consumed);
-extern void lib_ring_buffer_put_subbuf(struct lib_ring_buffer *buf);
-
-void lib_ring_buffer_set_quiescent_channel(struct channel *chan);
-void lib_ring_buffer_clear_quiescent_channel(struct channel *chan);
-
-/*
- * lib_ring_buffer_get_next_subbuf/lib_ring_buffer_put_next_subbuf are helpers
- * to read sub-buffers sequentially.
- */
-static inline int lib_ring_buffer_get_next_subbuf(struct lib_ring_buffer *buf)
-{
-       int ret;
-
-       ret = lib_ring_buffer_snapshot(buf, &buf->cons_snapshot,
-                                      &buf->prod_snapshot);
-       if (ret)
-               return ret;
-       ret = lib_ring_buffer_get_subbuf(buf, buf->cons_snapshot);
-       return ret;
-}
-
-static inline void lib_ring_buffer_put_next_subbuf(struct lib_ring_buffer *buf)
-{
-       lib_ring_buffer_put_subbuf(buf);
-       lib_ring_buffer_move_consumer(buf, subbuf_align(buf->cons_snapshot,
-                                                   buf->backend.chan));
-}
-
-extern void channel_reset(struct channel *chan);
-extern void lib_ring_buffer_reset(struct lib_ring_buffer *buf);
-
-static inline
-unsigned long lib_ring_buffer_get_offset(const struct lib_ring_buffer_config *config,
-                                        struct lib_ring_buffer *buf)
-{
-       return v_read(config, &buf->offset);
-}
-
-static inline
-unsigned long lib_ring_buffer_get_consumed(const struct lib_ring_buffer_config *config,
-                                          struct lib_ring_buffer *buf)
-{
-       return atomic_long_read(&buf->consumed);
-}
-
-/*
- * Must call lib_ring_buffer_is_finalized before reading counters (memory
- * ordering enforced with respect to trace teardown).
- */
-static inline
-int lib_ring_buffer_is_finalized(const struct lib_ring_buffer_config *config,
-                                struct lib_ring_buffer *buf)
-{
-       int finalized = READ_ONCE(buf->finalized);
-       /*
-        * Read finalized before counters.
-        */
-       smp_rmb();
-       return finalized;
-}
-
-static inline
-int lib_ring_buffer_channel_is_finalized(const struct channel *chan)
-{
-       return chan->finalized;
-}
-
-static inline
-int lib_ring_buffer_channel_is_disabled(const struct channel *chan)
-{
-       return atomic_read(&chan->record_disabled);
-}
-
-static inline
-unsigned long lib_ring_buffer_get_read_data_size(
-                               const struct lib_ring_buffer_config *config,
-                               struct lib_ring_buffer *buf)
-{
-       return subbuffer_get_read_data_size(config, &buf->backend);
-}
-
-static inline
-unsigned long lib_ring_buffer_get_records_count(
-                               const struct lib_ring_buffer_config *config,
-                               struct lib_ring_buffer *buf)
-{
-       return v_read(config, &buf->records_count);
-}
-
-static inline
-unsigned long lib_ring_buffer_get_records_overrun(
-                               const struct lib_ring_buffer_config *config,
-                               struct lib_ring_buffer *buf)
-{
-       return v_read(config, &buf->records_overrun);
-}
-
-static inline
-unsigned long lib_ring_buffer_get_records_lost_full(
-                               const struct lib_ring_buffer_config *config,
-                               struct lib_ring_buffer *buf)
-{
-       return v_read(config, &buf->records_lost_full);
-}
-
-static inline
-unsigned long lib_ring_buffer_get_records_lost_wrap(
-                               const struct lib_ring_buffer_config *config,
-                               struct lib_ring_buffer *buf)
-{
-       return v_read(config, &buf->records_lost_wrap);
-}
-
-static inline
-unsigned long lib_ring_buffer_get_records_lost_big(
-                               const struct lib_ring_buffer_config *config,
-                               struct lib_ring_buffer *buf)
-{
-       return v_read(config, &buf->records_lost_big);
-}
-
-static inline
-unsigned long lib_ring_buffer_get_records_read(
-                               const struct lib_ring_buffer_config *config,
-                               struct lib_ring_buffer *buf)
-{
-       return v_read(config, &buf->backend.records_read);
-}
-
-#endif /* _LIB_RING_BUFFER_FRONTEND_H */
diff --git a/lib/ringbuffer/frontend_api.h b/lib/ringbuffer/frontend_api.h
deleted file mode 100644 (file)
index 3c46a17..0000000
+++ /dev/null
@@ -1,358 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only OR LGPL-2.1-only)
- *
- * lib/ringbuffer/frontend_api.h
- *
- * Ring Buffer Library Synchronization Header (buffer write API).
- *
- * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
- * See ring_buffer_frontend.c for more information on wait-free algorithms.
- * See linux/ringbuffer/frontend.h for channel allocation and read-side API.
- */
-
-#ifndef _LIB_RING_BUFFER_FRONTEND_API_H
-#define _LIB_RING_BUFFER_FRONTEND_API_H
-
-#include <wrapper/ringbuffer/frontend.h>
-#include <wrapper/percpu-defs.h>
-#include <linux/errno.h>
-#include <linux/prefetch.h>
-
-/**
- * lib_ring_buffer_get_cpu - Precedes ring buffer reserve/commit.
- *
- * Disables preemption (acts as a RCU read-side critical section) and keeps a
- * ring buffer nesting count as supplementary safety net to ensure tracer client
- * code will never trigger an endless recursion. Returns the processor ID on
- * success, -EPERM on failure (nesting count too high).
- *
- * asm volatile and "memory" clobber prevent the compiler from moving
- * instructions out of the ring buffer nesting count. This is required to ensure
- * that probe side-effects which can cause recursion (e.g. unforeseen traps,
- * divisions by 0, ...) are triggered within the incremented nesting count
- * section.
- */
-static inline
-int lib_ring_buffer_get_cpu(const struct lib_ring_buffer_config *config)
-{
-       int cpu, nesting;
-
-       rcu_read_lock_sched_notrace();
-       cpu = smp_processor_id();
-       nesting = ++per_cpu(lib_ring_buffer_nesting, cpu);
-       barrier();
-
-       if (unlikely(nesting > RING_BUFFER_MAX_NESTING)) {
-               WARN_ON_ONCE(1);
-               per_cpu(lib_ring_buffer_nesting, cpu)--;
-               rcu_read_unlock_sched_notrace();
-               return -EPERM;
-       } else
-               return cpu;
-}
-
-/**
- * lib_ring_buffer_put_cpu - Follows ring buffer reserve/commit.
- */
-static inline
-void lib_ring_buffer_put_cpu(const struct lib_ring_buffer_config *config)
-{
-       barrier();
-       (*lttng_this_cpu_ptr(&lib_ring_buffer_nesting))--;
-       rcu_read_unlock_sched_notrace();
-}
-
-/*
- * lib_ring_buffer_try_reserve is called by lib_ring_buffer_reserve(). It is not
- * part of the API per se.
- *
- * returns 0 if reserve ok, or 1 if the slow path must be taken.
- */
-static inline
-int lib_ring_buffer_try_reserve(const struct lib_ring_buffer_config *config,
-                               struct lib_ring_buffer_ctx *ctx,
-                               void *client_ctx,
-                               unsigned long *o_begin, unsigned long *o_end,
-                               unsigned long *o_old, size_t *before_hdr_pad)
-{
-       struct channel *chan = ctx->chan;
-       struct lib_ring_buffer *buf = ctx->buf;
-       *o_begin = v_read(config, &buf->offset);
-       *o_old = *o_begin;
-
-       ctx->tsc = lib_ring_buffer_clock_read(chan);
-       if ((int64_t) ctx->tsc == -EIO)
-               return 1;
-
-       /*
-        * Prefetch cacheline for read because we have to read the previous
-        * commit counter to increment it and commit seq value to compare it to
-        * the commit counter.
-        */
-       prefetch(&buf->commit_hot[subbuf_index(*o_begin, chan)]);
-
-       if (last_tsc_overflow(config, buf, ctx->tsc))
-               ctx->rflags |= RING_BUFFER_RFLAG_FULL_TSC;
-
-       if (unlikely(subbuf_offset(*o_begin, chan) == 0))
-               return 1;
-
-       ctx->slot_size = record_header_size(config, chan, *o_begin,
-                                           before_hdr_pad, ctx, client_ctx);
-       ctx->slot_size +=
-               lib_ring_buffer_align(*o_begin + ctx->slot_size,
-                                     ctx->largest_align) + ctx->data_size;
-       if (unlikely((subbuf_offset(*o_begin, chan) + ctx->slot_size)
-                    > chan->backend.subbuf_size))
-               return 1;
-
-       /*
-        * Record fits in the current buffer and we are not on a switch
-        * boundary. It's safe to write.
-        */
-       *o_end = *o_begin + ctx->slot_size;
-
-       if (unlikely((subbuf_offset(*o_end, chan)) == 0))
-               /*
-                * The offset_end will fall at the very beginning of the next
-                * subbuffer.
-                */
-               return 1;
-
-       return 0;
-}
-
-/**
- * lib_ring_buffer_reserve - Reserve space in a ring buffer.
- * @config: ring buffer instance configuration.
- * @ctx: ring buffer context. (input and output) Must be already initialized.
- *
- * Atomic wait-free slot reservation. The reserved space starts at the context
- * "pre_offset". Its length is "slot_size". The associated time-stamp is "tsc".
- *
- * Return :
- *  0 on success.
- * -EAGAIN if channel is disabled.
- * -ENOSPC if event size is too large for packet.
- * -ENOBUFS if there is currently not enough space in buffer for the event.
- * -EIO if data cannot be written into the buffer for any other reason.
- */
-
-static inline
-int lib_ring_buffer_reserve(const struct lib_ring_buffer_config *config,
-                           struct lib_ring_buffer_ctx *ctx,
-                           void *client_ctx)
-{
-       struct channel *chan = ctx->chan;
-       struct lib_ring_buffer *buf;
-       unsigned long o_begin, o_end, o_old;
-       size_t before_hdr_pad = 0;
-
-       if (unlikely(atomic_read(&chan->record_disabled)))
-               return -EAGAIN;
-
-       if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
-               buf = per_cpu_ptr(chan->backend.buf, ctx->cpu);
-       else
-               buf = chan->backend.buf;
-       if (unlikely(atomic_read(&buf->record_disabled)))
-               return -EAGAIN;
-       ctx->buf = buf;
-
-       /*
-        * Perform retryable operations.
-        */
-       if (unlikely(lib_ring_buffer_try_reserve(config, ctx, client_ctx, &o_begin,
-                                                &o_end, &o_old, &before_hdr_pad)))
-               goto slow_path;
-
-       if (unlikely(v_cmpxchg(config, &ctx->buf->offset, o_old, o_end)
-                    != o_old))
-               goto slow_path;
-
-       /*
-        * Atomically update last_tsc. This update races against concurrent
-        * atomic updates, but the race will always cause supplementary full TSC
-        * record headers, never the opposite (missing a full TSC record header
-        * when it would be needed).
-        */
-       save_last_tsc(config, ctx->buf, ctx->tsc);
-
-       /*
-        * Push the reader if necessary
-        */
-       lib_ring_buffer_reserve_push_reader(ctx->buf, chan, o_end - 1);
-
-       /*
-        * Clear noref flag for this subbuffer.
-        */
-       lib_ring_buffer_clear_noref(config, &ctx->buf->backend,
-                               subbuf_index(o_end - 1, chan));
-
-       ctx->pre_offset = o_begin;
-       ctx->buf_offset = o_begin + before_hdr_pad;
-       return 0;
-slow_path:
-       return lib_ring_buffer_reserve_slow(ctx, client_ctx);
-}
-
-/**
- * lib_ring_buffer_switch - Perform a sub-buffer switch for a per-cpu buffer.
- * @config: ring buffer instance configuration.
- * @buf: buffer
- * @mode: buffer switch mode (SWITCH_ACTIVE or SWITCH_FLUSH)
- *
- * This operation is completely reentrant : can be called while tracing is
- * active with absolutely no lock held.
- *
- * Note, however, that as a v_cmpxchg is used for some atomic operations and
- * requires to be executed locally for per-CPU buffers, this function must be
- * called from the CPU which owns the buffer for a ACTIVE flush, with preemption
- * disabled, for RING_BUFFER_SYNC_PER_CPU configuration.
- */
-static inline
-void lib_ring_buffer_switch(const struct lib_ring_buffer_config *config,
-                           struct lib_ring_buffer *buf, enum switch_mode mode)
-{
-       lib_ring_buffer_switch_slow(buf, mode);
-}
-
-/* See ring_buffer_frontend_api.h for lib_ring_buffer_reserve(). */
-
-/**
- * lib_ring_buffer_commit - Commit an record.
- * @config: ring buffer instance configuration.
- * @ctx: ring buffer context. (input arguments only)
- *
- * Atomic unordered slot commit. Increments the commit count in the
- * specified sub-buffer, and delivers it if necessary.
- */
-static inline
-void lib_ring_buffer_commit(const struct lib_ring_buffer_config *config,
-                           const struct lib_ring_buffer_ctx *ctx)
-{
-       struct channel *chan = ctx->chan;
-       struct lib_ring_buffer *buf = ctx->buf;
-       unsigned long offset_end = ctx->buf_offset;
-       unsigned long endidx = subbuf_index(offset_end - 1, chan);
-       unsigned long commit_count;
-       struct commit_counters_hot *cc_hot = &buf->commit_hot[endidx];
-
-       /*
-        * Must count record before incrementing the commit count.
-        */
-       subbuffer_count_record(config, &buf->backend, endidx);
-
-       /*
-        * Order all writes to buffer before the commit count update that will
-        * determine that the subbuffer is full.
-        */
-       if (config->ipi == RING_BUFFER_IPI_BARRIER) {
-               /*
-                * Must write slot data before incrementing commit count.  This
-                * compiler barrier is upgraded into a smp_mb() by the IPI sent
-                * by get_subbuf().
-                */
-               barrier();
-       } else
-               smp_wmb();
-
-       v_add(config, ctx->slot_size, &cc_hot->cc);
-
-       /*
-        * commit count read can race with concurrent OOO commit count updates.
-        * This is only needed for lib_ring_buffer_check_deliver (for
-        * non-polling delivery only) and for
-        * lib_ring_buffer_write_commit_counter.  The race can only cause the
-        * counter to be read with the same value more than once, which could
-        * cause :
-        * - Multiple delivery for the same sub-buffer (which is handled
-        *   gracefully by the reader code) if the value is for a full
-        *   sub-buffer. It's important that we can never miss a sub-buffer
-        *   delivery. Re-reading the value after the v_add ensures this.
-        * - Reading a commit_count with a higher value that what was actually
-        *   added to it for the lib_ring_buffer_write_commit_counter call
-        *   (again caused by a concurrent committer). It does not matter,
-        *   because this function is interested in the fact that the commit
-        *   count reaches back the reserve offset for a specific sub-buffer,
-        *   which is completely independent of the order.
-        */
-       commit_count = v_read(config, &cc_hot->cc);
-
-       lib_ring_buffer_check_deliver(config, buf, chan, offset_end - 1,
-                                     commit_count, endidx, ctx->tsc);
-       /*
-        * Update used size at each commit. It's needed only for extracting
-        * ring_buffer buffers from vmcore, after crash.
-        */
-       lib_ring_buffer_write_commit_counter(config, buf, chan,
-                       offset_end, commit_count, cc_hot);
-}
-
-/**
- * lib_ring_buffer_try_discard_reserve - Try discarding a record.
- * @config: ring buffer instance configuration.
- * @ctx: ring buffer context. (input arguments only)
- *
- * Only succeeds if no other record has been written after the record to
- * discard. If discard fails, the record must be committed to the buffer.
- *
- * Returns 0 upon success, -EPERM if the record cannot be discarded.
- */
-static inline
-int lib_ring_buffer_try_discard_reserve(const struct lib_ring_buffer_config *config,
-                                       const struct lib_ring_buffer_ctx *ctx)
-{
-       struct lib_ring_buffer *buf = ctx->buf;
-       unsigned long end_offset = ctx->pre_offset + ctx->slot_size;
-
-       /*
-        * We need to ensure that if the cmpxchg succeeds and discards the
-        * record, the next record will record a full TSC, because it cannot
-        * rely on the last_tsc associated with the discarded record to detect
-        * overflows. The only way to ensure this is to set the last_tsc to 0
-        * (assuming no 64-bit TSC overflow), which forces to write a 64-bit
-        * timestamp in the next record.
-        *
-        * Note: if discard fails, we must leave the TSC in the record header.
-        * It is needed to keep track of TSC overflows for the following
-        * records.
-        */
-       save_last_tsc(config, buf, 0ULL);
-
-       if (likely(v_cmpxchg(config, &buf->offset, end_offset, ctx->pre_offset)
-                  != end_offset))
-               return -EPERM;
-       else
-               return 0;
-}
-
-static inline
-void channel_record_disable(const struct lib_ring_buffer_config *config,
-                           struct channel *chan)
-{
-       atomic_inc(&chan->record_disabled);
-}
-
-static inline
-void channel_record_enable(const struct lib_ring_buffer_config *config,
-                          struct channel *chan)
-{
-       atomic_dec(&chan->record_disabled);
-}
-
-static inline
-void lib_ring_buffer_record_disable(const struct lib_ring_buffer_config *config,
-                                   struct lib_ring_buffer *buf)
-{
-       atomic_inc(&buf->record_disabled);
-}
-
-static inline
-void lib_ring_buffer_record_enable(const struct lib_ring_buffer_config *config,
-                                  struct lib_ring_buffer *buf)
-{
-       atomic_dec(&buf->record_disabled);
-}
-
-#endif /* _LIB_RING_BUFFER_FRONTEND_API_H */
diff --git a/lib/ringbuffer/frontend_internal.h b/lib/ringbuffer/frontend_internal.h
deleted file mode 100644 (file)
index 39f2b77..0000000
+++ /dev/null
@@ -1,334 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only OR LGPL-2.1-only)
- *
- * linux/ringbuffer/frontend_internal.h
- *
- * Ring Buffer Library Synchronization Header (internal helpers).
- *
- * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
- * See ring_buffer_frontend.c for more information on wait-free algorithms.
- */
-
-#ifndef _LIB_RING_BUFFER_FRONTEND_INTERNAL_H
-#define _LIB_RING_BUFFER_FRONTEND_INTERNAL_H
-
-#include <wrapper/ringbuffer/config.h>
-#include <wrapper/ringbuffer/backend_types.h>
-#include <wrapper/ringbuffer/frontend_types.h>
-#include <lib/prio_heap/lttng_prio_heap.h>     /* For per-CPU read-side iterator */
-
-/* Buffer offset macros */
-
-/* buf_trunc mask selects only the buffer number. */
-static inline
-unsigned long buf_trunc(unsigned long offset, struct channel *chan)
-{
-       return offset & ~(chan->backend.buf_size - 1);
-
-}
-
-/* Select the buffer number value (counter). */
-static inline
-unsigned long buf_trunc_val(unsigned long offset, struct channel *chan)
-{
-       return buf_trunc(offset, chan) >> chan->backend.buf_size_order;
-}
-
-/* buf_offset mask selects only the offset within the current buffer. */
-static inline
-unsigned long buf_offset(unsigned long offset, struct channel *chan)
-{
-       return offset & (chan->backend.buf_size - 1);
-}
-
-/* subbuf_offset mask selects the offset within the current subbuffer. */
-static inline
-unsigned long subbuf_offset(unsigned long offset, struct channel *chan)
-{
-       return offset & (chan->backend.subbuf_size - 1);
-}
-
-/* subbuf_trunc mask selects the subbuffer number. */
-static inline
-unsigned long subbuf_trunc(unsigned long offset, struct channel *chan)
-{
-       return offset & ~(chan->backend.subbuf_size - 1);
-}
-
-/* subbuf_align aligns the offset to the next subbuffer. */
-static inline
-unsigned long subbuf_align(unsigned long offset, struct channel *chan)
-{
-       return (offset + chan->backend.subbuf_size)
-              & ~(chan->backend.subbuf_size - 1);
-}
-
-/* subbuf_index returns the index of the current subbuffer within the buffer. */
-static inline
-unsigned long subbuf_index(unsigned long offset, struct channel *chan)
-{
-       return buf_offset(offset, chan) >> chan->backend.subbuf_size_order;
-}
-
-/*
- * Last TSC comparison functions. Check if the current TSC overflows tsc_bits
- * bits from the last TSC read. When overflows are detected, the full 64-bit
- * timestamp counter should be written in the record header. Reads and writes
- * last_tsc atomically.
- */
-
-#if (BITS_PER_LONG == 32)
-static inline
-void save_last_tsc(const struct lib_ring_buffer_config *config,
-                  struct lib_ring_buffer *buf, u64 tsc)
-{
-       if (config->tsc_bits == 0 || config->tsc_bits == 64)
-               return;
-
-       /*
-        * Ensure the compiler performs this update in a single instruction.
-        */
-       v_set(config, &buf->last_tsc, (unsigned long)(tsc >> config->tsc_bits));
-}
-
-static inline
-int last_tsc_overflow(const struct lib_ring_buffer_config *config,
-                     struct lib_ring_buffer *buf, u64 tsc)
-{
-       unsigned long tsc_shifted;
-
-       if (config->tsc_bits == 0 || config->tsc_bits == 64)
-               return 0;
-
-       tsc_shifted = (unsigned long)(tsc >> config->tsc_bits);
-       if (unlikely(tsc_shifted
-                    - (unsigned long)v_read(config, &buf->last_tsc)))
-               return 1;
-       else
-               return 0;
-}
-#else
-static inline
-void save_last_tsc(const struct lib_ring_buffer_config *config,
-                  struct lib_ring_buffer *buf, u64 tsc)
-{
-       if (config->tsc_bits == 0 || config->tsc_bits == 64)
-               return;
-
-       v_set(config, &buf->last_tsc, (unsigned long)tsc);
-}
-
-static inline
-int last_tsc_overflow(const struct lib_ring_buffer_config *config,
-                     struct lib_ring_buffer *buf, u64 tsc)
-{
-       if (config->tsc_bits == 0 || config->tsc_bits == 64)
-               return 0;
-
-       if (unlikely((tsc - v_read(config, &buf->last_tsc))
-                    >> config->tsc_bits))
-               return 1;
-       else
-               return 0;
-}
-#endif
-
-extern
-int lib_ring_buffer_reserve_slow(struct lib_ring_buffer_ctx *ctx,
-               void *client_ctx);
-
-extern
-void lib_ring_buffer_switch_slow(struct lib_ring_buffer *buf,
-                                enum switch_mode mode);
-
-extern
-void lib_ring_buffer_check_deliver_slow(const struct lib_ring_buffer_config *config,
-                                  struct lib_ring_buffer *buf,
-                                  struct channel *chan,
-                                  unsigned long offset,
-                                  unsigned long commit_count,
-                                  unsigned long idx,
-                                  u64 tsc);
-
-extern
-void lib_ring_buffer_switch_remote(struct lib_ring_buffer *buf);
-extern
-void lib_ring_buffer_switch_remote_empty(struct lib_ring_buffer *buf);
-extern
-void lib_ring_buffer_clear(struct lib_ring_buffer *buf);
-
-/* Buffer write helpers */
-
-static inline
-void lib_ring_buffer_reserve_push_reader(struct lib_ring_buffer *buf,
-                                        struct channel *chan,
-                                        unsigned long offset)
-{
-       unsigned long consumed_old, consumed_new;
-
-       do {
-               consumed_old = atomic_long_read(&buf->consumed);
-               /*
-                * If buffer is in overwrite mode, push the reader consumed
-                * count if the write position has reached it and we are not
-                * at the first iteration (don't push the reader farther than
-                * the writer). This operation can be done concurrently by many
-                * writers in the same buffer, the writer being at the farthest
-                * write position sub-buffer index in the buffer being the one
-                * which will win this loop.
-                */
-               if (unlikely(subbuf_trunc(offset, chan)
-                             - subbuf_trunc(consumed_old, chan)
-                            >= chan->backend.buf_size))
-                       consumed_new = subbuf_align(consumed_old, chan);
-               else
-                       return;
-       } while (unlikely(atomic_long_cmpxchg(&buf->consumed, consumed_old,
-                                             consumed_new) != consumed_old));
-}
-
-/*
- * Move consumed position to the beginning of subbuffer in which the
- * write offset is. Should only be used on ring buffers that are not
- * actively being written into, because clear_reader does not take into
- * account the commit counters when moving the consumed position, which
- * can make concurrent trace producers or consumers observe consumed
- * position further than the write offset, which breaks ring buffer
- * algorithm guarantees.
- */
-static inline
-void lib_ring_buffer_clear_reader(struct lib_ring_buffer *buf,
-                                 struct channel *chan)
-{
-       const struct lib_ring_buffer_config *config = &chan->backend.config;
-       unsigned long offset, consumed_old, consumed_new;
-
-       do {
-               offset = v_read(config, &buf->offset);
-               consumed_old = atomic_long_read(&buf->consumed);
-               CHAN_WARN_ON(chan, (long) (subbuf_trunc(offset, chan)
-                               - subbuf_trunc(consumed_old, chan))
-                               < 0);
-               consumed_new = subbuf_trunc(offset, chan);
-       } while (unlikely(atomic_long_cmpxchg(&buf->consumed, consumed_old,
-                                             consumed_new) != consumed_old));
-}
-
-static inline
-int lib_ring_buffer_pending_data(const struct lib_ring_buffer_config *config,
-                                struct lib_ring_buffer *buf,
-                                struct channel *chan)
-{
-       return !!subbuf_offset(v_read(config, &buf->offset), chan);
-}
-
-static inline
-unsigned long lib_ring_buffer_get_data_size(const struct lib_ring_buffer_config *config,
-                                           struct lib_ring_buffer *buf,
-                                           unsigned long idx)
-{
-       return subbuffer_get_data_size(config, &buf->backend, idx);
-}
-
-/*
- * Check if all space reservation in a buffer have been committed. This helps
- * knowing if an execution context is nested (for per-cpu buffers only).
- * This is a very specific ftrace use-case, so we keep this as "internal" API.
- */
-static inline
-int lib_ring_buffer_reserve_committed(const struct lib_ring_buffer_config *config,
-                                     struct lib_ring_buffer *buf,
-                                     struct channel *chan)
-{
-       unsigned long offset, idx, commit_count;
-
-       CHAN_WARN_ON(chan, config->alloc != RING_BUFFER_ALLOC_PER_CPU);
-       CHAN_WARN_ON(chan, config->sync != RING_BUFFER_SYNC_PER_CPU);
-
-       /*
-        * Read offset and commit count in a loop so they are both read
-        * atomically wrt interrupts. By deal with interrupt concurrency by
-        * restarting both reads if the offset has been pushed. Note that given
-        * we only have to deal with interrupt concurrency here, an interrupt
-        * modifying the commit count will also modify "offset", so it is safe
-        * to only check for offset modifications.
-        */
-       do {
-               offset = v_read(config, &buf->offset);
-               idx = subbuf_index(offset, chan);
-               commit_count = v_read(config, &buf->commit_hot[idx].cc);
-       } while (offset != v_read(config, &buf->offset));
-
-       return ((buf_trunc(offset, chan) >> chan->backend.num_subbuf_order)
-                    - (commit_count & chan->commit_count_mask) == 0);
-}
-
-/*
- * Receive end of subbuffer TSC as parameter. It has been read in the
- * space reservation loop of either reserve or switch, which ensures it
- * progresses monotonically with event records in the buffer. Therefore,
- * it ensures that the end timestamp of a subbuffer is <= begin
- * timestamp of the following subbuffers.
- */
-static inline
-void lib_ring_buffer_check_deliver(const struct lib_ring_buffer_config *config,
-                                  struct lib_ring_buffer *buf,
-                                  struct channel *chan,
-                                  unsigned long offset,
-                                  unsigned long commit_count,
-                                  unsigned long idx,
-                                  u64 tsc)
-{
-       unsigned long old_commit_count = commit_count
-                                        - chan->backend.subbuf_size;
-
-       /* Check if all commits have been done */
-       if (unlikely((buf_trunc(offset, chan) >> chan->backend.num_subbuf_order)
-                    - (old_commit_count & chan->commit_count_mask) == 0))
-               lib_ring_buffer_check_deliver_slow(config, buf, chan, offset,
-                       commit_count, idx, tsc);
-}
-
-/*
- * lib_ring_buffer_write_commit_counter
- *
- * For flight recording. must be called after commit.
- * This function increments the subbuffer's commit_seq counter each time the
- * commit count reaches back the reserve offset (modulo subbuffer size). It is
- * useful for crash dump.
- */
-static inline
-void lib_ring_buffer_write_commit_counter(const struct lib_ring_buffer_config *config,
-                                         struct lib_ring_buffer *buf,
-                                         struct channel *chan,
-                                         unsigned long buf_offset,
-                                         unsigned long commit_count,
-                                         struct commit_counters_hot *cc_hot)
-{
-       unsigned long commit_seq_old;
-
-       if (config->oops != RING_BUFFER_OOPS_CONSISTENCY)
-               return;
-
-       /*
-        * subbuf_offset includes commit_count_mask. We can simply
-        * compare the offsets within the subbuffer without caring about
-        * buffer full/empty mismatch because offset is never zero here
-        * (subbuffer header and record headers have non-zero length).
-        */
-       if (unlikely(subbuf_offset(buf_offset - commit_count, chan)))
-               return;
-
-       commit_seq_old = v_read(config, &cc_hot->seq);
-       if (likely((long) (commit_seq_old - commit_count) < 0))
-               v_set(config, &cc_hot->seq, commit_count);
-}
-
-extern int lib_ring_buffer_create(struct lib_ring_buffer *buf,
-                                 struct channel_backend *chanb, int cpu);
-extern void lib_ring_buffer_free(struct lib_ring_buffer *buf);
-
-/* Keep track of trap nesting inside ring buffer code */
-DECLARE_PER_CPU(unsigned int, lib_ring_buffer_nesting);
-
-#endif /* _LIB_RING_BUFFER_FRONTEND_INTERNAL_H */
diff --git a/lib/ringbuffer/frontend_types.h b/lib/ringbuffer/frontend_types.h
deleted file mode 100644 (file)
index 3fd6345..0000000
+++ /dev/null
@@ -1,195 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only OR LGPL-2.1-only)
- *
- * lib/ringbuffer/frontend_types.h
- *
- * Ring Buffer Library Synchronization Header (types).
- *
- * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
- * See ring_buffer_frontend.c for more information on wait-free algorithms.
- */
-
-#ifndef _LIB_RING_BUFFER_FRONTEND_TYPES_H
-#define _LIB_RING_BUFFER_FRONTEND_TYPES_H
-
-#include <linux/kref.h>
-#include <wrapper/ringbuffer/config.h>
-#include <wrapper/ringbuffer/backend_types.h>
-#include <lib/prio_heap/lttng_prio_heap.h>     /* For per-CPU read-side iterator */
-#include <lttng-cpuhotplug.h>
-
-/*
- * A switch is done during tracing or as a final flush after tracing (so it
- * won't write in the new sub-buffer).
- */
-enum switch_mode { SWITCH_ACTIVE, SWITCH_FLUSH };
-
-/* channel-level read-side iterator */
-struct channel_iter {
-       /* Prio heap of buffers. Lowest timestamps at the top. */
-       struct lttng_ptr_heap heap;     /* Heap of struct lib_ring_buffer ptrs */
-       struct list_head empty_head;    /* Empty buffers linked-list head */
-       int read_open;                  /* Opened for reading ? */
-       u64 last_qs;                    /* Last quiescent state timestamp */
-       u64 last_timestamp;             /* Last timestamp (for WARN_ON) */
-       int last_cpu;                   /* Last timestamp cpu */
-       /*
-        * read() file operation state.
-        */
-       unsigned long len_left;
-};
-
-/* channel: collection of per-cpu ring buffers. */
-struct channel {
-       atomic_t record_disabled;
-       unsigned long commit_count_mask;        /*
-                                                * Commit count mask, removing
-                                                * the MSBs corresponding to
-                                                * bits used to represent the
-                                                * subbuffer index.
-                                                */
-
-       struct channel_backend backend;         /* Associated backend */
-
-       unsigned long switch_timer_interval;    /* Buffer flush (jiffies) */
-       unsigned long read_timer_interval;      /* Reader wakeup (jiffies) */
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
-       struct lttng_cpuhp_node cpuhp_prepare;
-       struct lttng_cpuhp_node cpuhp_online;
-       struct lttng_cpuhp_node cpuhp_iter_online;
-#else
-       struct notifier_block cpu_hp_notifier;  /* CPU hotplug notifier */
-       struct notifier_block hp_iter_notifier; /* hotplug iterator notifier */
-       unsigned int cpu_hp_enable:1;           /* Enable CPU hotplug notif. */
-       unsigned int hp_iter_enable:1;          /* Enable hp iter notif. */
-#endif
-       struct notifier_block tick_nohz_notifier; /* CPU nohz notifier */
-       wait_queue_head_t read_wait;            /* reader wait queue */
-       wait_queue_head_t hp_wait;              /* CPU hotplug wait queue */
-       int finalized;                          /* Has channel been finalized */
-       struct channel_iter iter;               /* Channel read-side iterator */
-       struct kref ref;                        /* Reference count */
-};
-
-/* Per-subbuffer commit counters used on the hot path */
-struct commit_counters_hot {
-       union v_atomic cc;              /* Commit counter */
-       union v_atomic seq;             /* Consecutive commits */
-};
-
-/* Per-subbuffer commit counters used only on cold paths */
-struct commit_counters_cold {
-       union v_atomic cc_sb;           /* Incremented _once_ at sb switch */
-};
-
-/* Per-buffer read iterator */
-struct lib_ring_buffer_iter {
-       u64 timestamp;                  /* Current record timestamp */
-       size_t header_len;              /* Current record header length */
-       size_t payload_len;             /* Current record payload length */
-
-       struct list_head empty_node;    /* Linked list of empty buffers */
-       unsigned long consumed, read_offset, data_size;
-       enum {
-               ITER_GET_SUBBUF = 0,
-               ITER_TEST_RECORD,
-               ITER_NEXT_RECORD,
-               ITER_PUT_SUBBUF,
-       } state;
-       unsigned int allocated:1;
-       unsigned int read_open:1;       /* Opened for reading ? */
-};
-
-/* ring buffer state */
-struct lib_ring_buffer {
-       /* First 32 bytes cache-hot cacheline */
-       union v_atomic offset;          /* Current offset in the buffer */
-       struct commit_counters_hot *commit_hot;
-                                       /* Commit count per sub-buffer */
-       atomic_long_t consumed;         /*
-                                        * Current offset in the buffer
-                                        * standard atomic access (shared)
-                                        */
-       atomic_t record_disabled;
-       /* End of first 32 bytes cacheline */
-       union v_atomic last_tsc;        /*
-                                        * Last timestamp written in the buffer.
-                                        */
-
-       struct lib_ring_buffer_backend backend; /* Associated backend */
-
-       struct commit_counters_cold *commit_cold;
-                                       /* Commit count per sub-buffer */
-       u64 *ts_end;                    /*
-                                        * timestamp_end per sub-buffer.
-                                        * Time is sampled by the
-                                        * switch_*_end() callbacks which
-                                        * are the last space reservation
-                                        * performed in the sub-buffer
-                                        * before it can be fully
-                                        * committed and delivered. This
-                                        * time value is then read by
-                                        * the deliver callback,
-                                        * performed by the last commit
-                                        * before the buffer becomes
-                                        * readable.
-                                        */
-       atomic_long_t active_readers;   /*
-                                        * Active readers count
-                                        * standard atomic access (shared)
-                                        */
-                                       /* Dropped records */
-       union v_atomic records_lost_full;       /* Buffer full */
-       union v_atomic records_lost_wrap;       /* Nested wrap-around */
-       union v_atomic records_lost_big;        /* Events too big */
-       union v_atomic records_count;   /* Number of records written */
-       union v_atomic records_overrun; /* Number of overwritten records */
-       wait_queue_head_t read_wait;    /* reader buffer-level wait queue */
-       wait_queue_head_t write_wait;   /* writer buffer-level wait queue (for metadata only) */
-       int finalized;                  /* buffer has been finalized */
-       struct timer_list switch_timer; /* timer for periodical switch */
-       struct timer_list read_timer;   /* timer for read poll */
-       raw_spinlock_t raw_tick_nohz_spinlock;  /* nohz entry lock/trylock */
-       struct lib_ring_buffer_iter iter;       /* read-side iterator */
-       unsigned long get_subbuf_consumed;      /* Read-side consumed */
-       unsigned long prod_snapshot;    /* Producer count snapshot */
-       unsigned long cons_snapshot;    /* Consumer count snapshot */
-       unsigned int get_subbuf:1,      /* Sub-buffer being held by reader */
-               switch_timer_enabled:1, /* Protected by ring_buffer_nohz_lock */
-               read_timer_enabled:1,   /* Protected by ring_buffer_nohz_lock */
-               quiescent:1;
-};
-
-static inline
-void *channel_get_private(struct channel *chan)
-{
-       return chan->backend.priv;
-}
-
-void lib_ring_buffer_lost_event_too_big(struct channel *chan);
-
-/*
- * Issue warnings and disable channels upon internal error.
- * Can receive struct lib_ring_buffer or struct lib_ring_buffer_backend
- * parameters.
- */
-#define CHAN_WARN_ON(c, cond)                                          \
-       ({                                                              \
-               struct channel *__chan;                                 \
-               int _____ret = unlikely(cond);                          \
-               if (_____ret) {                                         \
-                       if (__same_type(*(c), struct channel_backend))  \
-                               __chan = container_of((void *) (c),     \
-                                                       struct channel, \
-                                                       backend);       \
-                       else if (__same_type(*(c), struct channel))     \
-                               __chan = (void *) (c);                  \
-                       else                                            \
-                               BUG_ON(1);                              \
-                       atomic_inc(&__chan->record_disabled);           \
-                       WARN_ON(1);                                     \
-               }                                                       \
-               _____ret;                                               \
-       })
-
-#endif /* _LIB_RING_BUFFER_FRONTEND_TYPES_H */
diff --git a/lib/ringbuffer/iterator.h b/lib/ringbuffer/iterator.h
deleted file mode 100644 (file)
index 06fbcd1..0000000
+++ /dev/null
@@ -1,67 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only OR LGPL-2.1-only)
- *
- * lib/ringbuffer/iterator.h
- *
- * Ring buffer and channel iterators.
- *
- * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#ifndef _LIB_RING_BUFFER_ITERATOR_H
-#define _LIB_RING_BUFFER_ITERATOR_H
-
-#include <wrapper/ringbuffer/backend.h>
-#include <wrapper/ringbuffer/frontend.h>
-#include <wrapper/ringbuffer/vfs.h>
-
-/*
- * lib_ring_buffer_get_next_record advances the buffer read position to the next
- * record. It returns either the size of the next record, -EAGAIN if there is
- * currently no data available, or -ENODATA if no data is available and buffer
- * is finalized.
- */
-extern ssize_t lib_ring_buffer_get_next_record(struct channel *chan,
-                                              struct lib_ring_buffer *buf);
-
-/*
- * channel_get_next_record advances the buffer read position to the next record.
- * It returns either the size of the next record, -EAGAIN if there is currently
- * no data available, or -ENODATA if no data is available and buffer is
- * finalized.
- * Returns the current buffer in ret_buf.
- */
-extern ssize_t channel_get_next_record(struct channel *chan,
-                                      struct lib_ring_buffer **ret_buf);
-
-/**
- * read_current_record - copy the buffer current record into dest.
- * @buf: ring buffer
- * @dest: destination where the record should be copied
- *
- * dest should be large enough to contain the record. Returns the number of
- * bytes copied.
- */
-static inline size_t read_current_record(struct lib_ring_buffer *buf, void *dest)
-{
-       return lib_ring_buffer_read(&buf->backend, buf->iter.read_offset,
-                                   dest, buf->iter.payload_len);
-}
-
-extern int lib_ring_buffer_iterator_open(struct lib_ring_buffer *buf);
-extern void lib_ring_buffer_iterator_release(struct lib_ring_buffer *buf);
-extern int channel_iterator_open(struct channel *chan);
-extern void channel_iterator_release(struct channel *chan);
-
-extern const struct file_operations channel_payload_file_operations;
-extern const struct file_operations lib_ring_buffer_payload_file_operations;
-
-/*
- * Used internally.
- */
-int channel_iterator_init(struct channel *chan);
-void channel_iterator_unregister_notifiers(struct channel *chan);
-void channel_iterator_free(struct channel *chan);
-void channel_iterator_reset(struct channel *chan);
-void lib_ring_buffer_iterator_reset(struct lib_ring_buffer *buf);
-
-#endif /* _LIB_RING_BUFFER_ITERATOR_H */
diff --git a/lib/ringbuffer/nohz.h b/lib/ringbuffer/nohz.h
deleted file mode 100644 (file)
index 1d28d27..0000000
+++ /dev/null
@@ -1,29 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only OR LGPL-2.1-only)
- *
- * lib/ringbuffer/nohz.h
- *
- * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#ifndef _LIB_RING_BUFFER_NOHZ_H
-#define _LIB_RING_BUFFER_NOHZ_H
-
-#ifdef CONFIG_LIB_RING_BUFFER
-void lib_ring_buffer_tick_nohz_flush(void);
-void lib_ring_buffer_tick_nohz_stop(void);
-void lib_ring_buffer_tick_nohz_restart(void);
-#else
-static inline void lib_ring_buffer_tick_nohz_flush(void)
-{
-}
-
-static inline void lib_ring_buffer_tick_nohz_stop(void)
-{
-}
-
-static inline void lib_ring_buffer_tick_nohz_restart(void)
-{
-}
-#endif
-
-#endif /* _LIB_RING_BUFFER_NOHZ_H */
index 2a47948e3efa6597d12018364e58c0c7e739d9d9..d6547d7de9c9cef6f45a6fb426c864a1b8ee3347 100644 (file)
@@ -18,9 +18,9 @@
 
 #include <wrapper/mm.h>
 #include <wrapper/vmalloc.h>   /* for wrapper_vmalloc_sync_mappings() */
-#include <wrapper/ringbuffer/config.h>
-#include <wrapper/ringbuffer/backend.h>
-#include <wrapper/ringbuffer/frontend.h>
+#include <ringbuffer/config.h>
+#include <ringbuffer/backend.h>
+#include <ringbuffer/frontend.h>
 
 /**
  * lib_ring_buffer_backend_allocate - allocate a channel buffer
index 91cf0ae5ee0042e4cc920d3652242cbf6a5b89f1..fca37fbc3a5b143a08cb55b94f762bf664fb7187 100644 (file)
 #include <linux/percpu.h>
 #include <asm/cacheflush.h>
 
-#include <wrapper/ringbuffer/config.h>
-#include <wrapper/ringbuffer/backend.h>
-#include <wrapper/ringbuffer/frontend.h>
-#include <wrapper/ringbuffer/iterator.h>
-#include <wrapper/ringbuffer/nohz.h>
+#include <ringbuffer/config.h>
+#include <ringbuffer/backend.h>
+#include <ringbuffer/frontend.h>
+#include <ringbuffer/iterator.h>
+#include <ringbuffer/nohz.h>
 #include <wrapper/atomic.h>
 #include <wrapper/kref.h>
 #include <wrapper/percpu-defs.h>
index 00faa729aa2507a661ccc29e804fdcce823a66d2..15d7c75c15862b8ec4f47771dd98dc1eb8291c30 100644 (file)
@@ -9,7 +9,7 @@
  * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
  */
 
-#include <wrapper/ringbuffer/iterator.h>
+#include <ringbuffer/iterator.h>
 #include <wrapper/file.h>
 #include <wrapper/uaccess.h>
 #include <linux/jiffies.h>
index c5abe84778d4a1d8c26adaf08cb19731df0959c4..34174a587ab4af1a8d601d752a89ae31e92a4b29 100644 (file)
@@ -13,9 +13,9 @@
 #include <linux/module.h>
 #include <linux/mm.h>
 
-#include <wrapper/ringbuffer/backend.h>
-#include <wrapper/ringbuffer/frontend.h>
-#include <wrapper/ringbuffer/vfs.h>
+#include <ringbuffer/backend.h>
+#include <ringbuffer/frontend.h>
+#include <ringbuffer/vfs.h>
 
 /*
  * fault() vm_op implementation for ring buffer file mapping.
index f0b5479a8b8bfb993b29da4d3c0db3e708d76385..cd803a70805e29abada097e5933170ce453804ae 100644 (file)
@@ -15,9 +15,9 @@
 #include <linux/version.h>
 
 #include <wrapper/splice.h>
-#include <wrapper/ringbuffer/backend.h>
-#include <wrapper/ringbuffer/frontend.h>
-#include <wrapper/ringbuffer/vfs.h>
+#include <ringbuffer/backend.h>
+#include <ringbuffer/frontend.h>
+#include <ringbuffer/vfs.h>
 
 #if 0
 #define printk_dbg(fmt, args...) printk(fmt, args)
index af753b9bc21a15d807d9eb882d39173406582717..2be550c41ddaa8a662bc6c0ac32f5aa35bd37e20 100644 (file)
@@ -11,9 +11,9 @@
 #include <linux/fs.h>
 #include <linux/compat.h>
 
-#include <wrapper/ringbuffer/backend.h>
-#include <wrapper/ringbuffer/frontend.h>
-#include <wrapper/ringbuffer/vfs.h>
+#include <ringbuffer/backend.h>
+#include <ringbuffer/frontend.h>
+#include <ringbuffer/vfs.h>
 #include <wrapper/poll.h>
 #include <lttng-tracer.h>
 
diff --git a/lib/ringbuffer/vatomic.h b/lib/ringbuffer/vatomic.h
deleted file mode 100644 (file)
index 6fdc4d1..0000000
+++ /dev/null
@@ -1,84 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only OR LGPL-2.1-only)
- *
- * lib/ringbuffer/vatomic.h
- *
- * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#ifndef _LIB_RING_BUFFER_VATOMIC_H
-#define _LIB_RING_BUFFER_VATOMIC_H
-
-#include <asm/atomic.h>
-#include <asm/local.h>
-
-/*
- * Same data type (long) accessed differently depending on configuration.
- * v field is for non-atomic access (protected by mutual exclusion).
- * In the fast-path, the ring_buffer_config structure is constant, so the
- * compiler can statically select the appropriate branch.
- * local_t is used for per-cpu and per-thread buffers.
- * atomic_long_t is used for globally shared buffers.
- */
-union v_atomic {
-       local_t l;
-       atomic_long_t a;
-       long v;
-};
-
-static inline
-long v_read(const struct lib_ring_buffer_config *config, union v_atomic *v_a)
-{
-       if (config->sync == RING_BUFFER_SYNC_PER_CPU)
-               return local_read(&v_a->l);
-       else
-               return atomic_long_read(&v_a->a);
-}
-
-static inline
-void v_set(const struct lib_ring_buffer_config *config, union v_atomic *v_a,
-          long v)
-{
-       if (config->sync == RING_BUFFER_SYNC_PER_CPU)
-               local_set(&v_a->l, v);
-       else
-               atomic_long_set(&v_a->a, v);
-}
-
-static inline
-void v_add(const struct lib_ring_buffer_config *config, long v, union v_atomic *v_a)
-{
-       if (config->sync == RING_BUFFER_SYNC_PER_CPU)
-               local_add(v, &v_a->l);
-       else
-               atomic_long_add(v, &v_a->a);
-}
-
-static inline
-void v_inc(const struct lib_ring_buffer_config *config, union v_atomic *v_a)
-{
-       if (config->sync == RING_BUFFER_SYNC_PER_CPU)
-               local_inc(&v_a->l);
-       else
-               atomic_long_inc(&v_a->a);
-}
-
-/*
- * Non-atomic decrement. Only used by reader, apply to reader-owned subbuffer.
- */
-static inline
-void _v_dec(const struct lib_ring_buffer_config *config, union v_atomic *v_a)
-{
-       --v_a->v;
-}
-
-static inline
-long v_cmpxchg(const struct lib_ring_buffer_config *config, union v_atomic *v_a,
-              long old, long _new)
-{
-       if (config->sync == RING_BUFFER_SYNC_PER_CPU)
-               return local_cmpxchg(&v_a->l, old, _new);
-       else
-               return atomic_long_cmpxchg(&v_a->a, old, _new);
-}
-
-#endif /* _LIB_RING_BUFFER_VATOMIC_H */
diff --git a/lib/ringbuffer/vfs.h b/lib/ringbuffer/vfs.h
deleted file mode 100644 (file)
index ee23a62..0000000
+++ /dev/null
@@ -1,166 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only OR LGPL-2.1-only)
- *
- * lib/ringbuffer/vfs.h
- *
- * Wait-free ring buffer VFS file operations.
- *
- * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#ifndef _LIB_RING_BUFFER_VFS_H
-#define _LIB_RING_BUFFER_VFS_H
-
-#include <linux/fs.h>
-#include <linux/poll.h>
-
-/* VFS API */
-
-extern const struct file_operations lib_ring_buffer_file_operations;
-
-/*
- * Internal file operations.
- */
-
-struct lib_ring_buffer;
-
-int lib_ring_buffer_open(struct inode *inode, struct file *file,
-               struct lib_ring_buffer *buf);
-int lib_ring_buffer_release(struct inode *inode, struct file *file,
-               struct lib_ring_buffer *buf);
-unsigned int lib_ring_buffer_poll(struct file *filp, poll_table *wait,
-               struct lib_ring_buffer *buf);
-ssize_t lib_ring_buffer_splice_read(struct file *in, loff_t *ppos,
-               struct pipe_inode_info *pipe, size_t len,
-               unsigned int flags, struct lib_ring_buffer *buf);
-int lib_ring_buffer_mmap(struct file *filp, struct vm_area_struct *vma,
-               struct lib_ring_buffer *buf);
-
-/* Ring Buffer ioctl() and ioctl numbers */
-long lib_ring_buffer_ioctl(struct file *filp, unsigned int cmd,
-               unsigned long arg, struct lib_ring_buffer *buf);
-#ifdef CONFIG_COMPAT
-long lib_ring_buffer_compat_ioctl(struct file *filp, unsigned int cmd,
-               unsigned long arg, struct lib_ring_buffer *buf);
-#endif
-
-ssize_t vfs_lib_ring_buffer_file_splice_read(struct file *in, loff_t *ppos,
-               struct pipe_inode_info *pipe, size_t len, unsigned int flags);
-loff_t vfs_lib_ring_buffer_no_llseek(struct file *file, loff_t offset,
-               int origin);
-int vfs_lib_ring_buffer_mmap(struct file *filp, struct vm_area_struct *vma);
-ssize_t vfs_lib_ring_buffer_splice_read(struct file *in, loff_t *ppos,
-               struct pipe_inode_info *pipe, size_t len,
-               unsigned int flags);
-
-/*
- * Use RING_BUFFER_GET_NEXT_SUBBUF / RING_BUFFER_PUT_NEXT_SUBBUF to read and
- * consume sub-buffers sequentially.
- *
- * Reading sub-buffers without consuming them can be performed with:
- *
- * RING_BUFFER_SNAPSHOT
- * RING_BUFFER_SNAPSHOT_GET_CONSUMED
- * RING_BUFFER_SNAPSHOT_GET_PRODUCED
- *
- * to get the offset range to consume, and then by passing each sub-buffer
- * offset to RING_BUFFER_GET_SUBBUF, read the sub-buffer, and then release it
- * with RING_BUFFER_PUT_SUBBUF.
- *
- * Note that the "snapshot" API can be used to read the sub-buffer in reverse
- * order, which is useful for flight recorder snapshots.
- */
-
-/* Get a snapshot of the current ring buffer producer and consumer positions */
-#define RING_BUFFER_SNAPSHOT                   _IO(0xF6, 0x00)
-/* Get the consumer position (iteration start) */
-#define RING_BUFFER_SNAPSHOT_GET_CONSUMED      _IOR(0xF6, 0x01, unsigned long)
-/* Get the producer position (iteration end) */
-#define RING_BUFFER_SNAPSHOT_GET_PRODUCED      _IOR(0xF6, 0x02, unsigned long)
-/* Get exclusive read access to the specified sub-buffer position */
-#define RING_BUFFER_GET_SUBBUF                 _IOW(0xF6, 0x03, unsigned long)
-/* Release exclusive sub-buffer access */
-#define RING_BUFFER_PUT_SUBBUF                 _IO(0xF6, 0x04)
-
-/* Get exclusive read access to the next sub-buffer that can be read. */
-#define RING_BUFFER_GET_NEXT_SUBBUF            _IO(0xF6, 0x05)
-/* Release exclusive sub-buffer access, move consumer forward. */
-#define RING_BUFFER_PUT_NEXT_SUBBUF            _IO(0xF6, 0x06)
-/* returns the size of the current sub-buffer, without padding (for mmap). */
-#define RING_BUFFER_GET_SUBBUF_SIZE            _IOR(0xF6, 0x07, unsigned long)
-/* returns the size of the current sub-buffer, with padding (for splice). */
-#define RING_BUFFER_GET_PADDED_SUBBUF_SIZE     _IOR(0xF6, 0x08, unsigned long)
-/* returns the maximum size for sub-buffers. */
-#define RING_BUFFER_GET_MAX_SUBBUF_SIZE                _IOR(0xF6, 0x09, unsigned long)
-/* returns the length to mmap. */
-#define RING_BUFFER_GET_MMAP_LEN               _IOR(0xF6, 0x0A, unsigned long)
-/* returns the offset of the subbuffer belonging to the mmap reader. */
-#define RING_BUFFER_GET_MMAP_READ_OFFSET       _IOR(0xF6, 0x0B, unsigned long)
-/* Flush the current sub-buffer, if non-empty. */
-#define RING_BUFFER_FLUSH                      _IO(0xF6, 0x0C)
-/* Get the current version of the metadata cache (after a get_next). */
-#define RING_BUFFER_GET_METADATA_VERSION       _IOR(0xF6, 0x0D, uint64_t)
-/*
- * Get a snapshot of the current ring buffer producer and consumer positions,
- * regardless of whether or not the two positions are contained within the same
- * sub-buffer.
- */
-#define RING_BUFFER_SNAPSHOT_SAMPLE_POSITIONS  _IO(0xF6, 0x0E)
-/* Flush the current sub-buffer, even if empty. */
-#define RING_BUFFER_FLUSH_EMPTY                        _IO(0xF6, 0x0F)
-/*
- * Reset the position of what has been consumed from the metadata cache to 0
- * so it can be read again.
- */
-#define RING_BUFFER_METADATA_CACHE_DUMP                _IO(0xF6, 0x10)
-/* Clear ring buffer content. */
-#define RING_BUFFER_CLEAR                      _IO(0xF6, 0x11)
-
-#ifdef CONFIG_COMPAT
-/* Get a snapshot of the current ring buffer producer and consumer positions */
-#define RING_BUFFER_COMPAT_SNAPSHOT            RING_BUFFER_SNAPSHOT
-/* Get the consumer position (iteration start) */
-#define RING_BUFFER_COMPAT_SNAPSHOT_GET_CONSUMED \
-       _IOR(0xF6, 0x01, compat_ulong_t)
-/* Get the producer position (iteration end) */
-#define RING_BUFFER_COMPAT_SNAPSHOT_GET_PRODUCED \
-       _IOR(0xF6, 0x02, compat_ulong_t)
-/* Get exclusive read access to the specified sub-buffer position */
-#define RING_BUFFER_COMPAT_GET_SUBBUF          _IOW(0xF6, 0x03, compat_ulong_t)
-/* Release exclusive sub-buffer access */
-#define RING_BUFFER_COMPAT_PUT_SUBBUF          RING_BUFFER_PUT_SUBBUF
-
-/* Get exclusive read access to the next sub-buffer that can be read. */
-#define RING_BUFFER_COMPAT_GET_NEXT_SUBBUF     RING_BUFFER_GET_NEXT_SUBBUF
-/* Release exclusive sub-buffer access, move consumer forward. */
-#define RING_BUFFER_COMPAT_PUT_NEXT_SUBBUF     RING_BUFFER_PUT_NEXT_SUBBUF
-/* returns the size of the current sub-buffer, without padding (for mmap). */
-#define RING_BUFFER_COMPAT_GET_SUBBUF_SIZE     _IOR(0xF6, 0x07, compat_ulong_t)
-/* returns the size of the current sub-buffer, with padding (for splice). */
-#define RING_BUFFER_COMPAT_GET_PADDED_SUBBUF_SIZE \
-       _IOR(0xF6, 0x08, compat_ulong_t)
-/* returns the maximum size for sub-buffers. */
-#define RING_BUFFER_COMPAT_GET_MAX_SUBBUF_SIZE _IOR(0xF6, 0x09, compat_ulong_t)
-/* returns the length to mmap. */
-#define RING_BUFFER_COMPAT_GET_MMAP_LEN                _IOR(0xF6, 0x0A, compat_ulong_t)
-/* returns the offset of the subbuffer belonging to the mmap reader. */
-#define RING_BUFFER_COMPAT_GET_MMAP_READ_OFFSET        _IOR(0xF6, 0x0B, compat_ulong_t)
-/* Flush the current sub-buffer, if non-empty. */
-#define RING_BUFFER_COMPAT_FLUSH               RING_BUFFER_FLUSH
-/* Get the current version of the metadata cache (after a get_next). */
-#define RING_BUFFER_COMPAT_GET_METADATA_VERSION        RING_BUFFER_GET_METADATA_VERSION
-/*
- * Get a snapshot of the current ring buffer producer and consumer positions,
- * regardless of whether or not the two positions are contained within the same
- * sub-buffer.
- */
-#define RING_BUFFER_COMPAT_SNAPSHOT_SAMPLE_POSITIONS   \
-       RING_BUFFER_SNAPSHOT_SAMPLE_POSITIONS
-/* Flush the current sub-buffer, even if empty. */
-#define RING_BUFFER_COMPAT_FLUSH_EMPTY                 \
-       RING_BUFFER_FLUSH_EMPTY
-/* Clear ring buffer content. */
-#define RING_BUFFER_COMPAT_CLEAR                       \
-       RING_BUFFER_CLEAR
-#endif /* CONFIG_COMPAT */
-
-#endif /* _LIB_RING_BUFFER_VFS_H */
index 4051264e5e908fddd8c98e1ec13607dec63dc67f..1b5239e51164112932c87bbc47637db9d54d23d3 100644 (file)
@@ -31,9 +31,9 @@
 #include <linux/slab.h>
 #include <linux/err.h>
 #include <wrapper/vmalloc.h>   /* for wrapper_vmalloc_sync_mappings() */
-#include <wrapper/ringbuffer/vfs.h>
-#include <wrapper/ringbuffer/backend.h>
-#include <wrapper/ringbuffer/frontend.h>
+#include <ringbuffer/vfs.h>
+#include <ringbuffer/backend.h>
+#include <ringbuffer/frontend.h>
 #include <wrapper/poll.h>
 #include <wrapper/file.h>
 #include <wrapper/kref.h>
@@ -43,7 +43,7 @@
 #include <lttng-events.h>
 #include <lttng-tracer.h>
 #include <lttng-tp-mempool.h>
-#include <lib/ringbuffer/frontend_types.h>
+#include <ringbuffer/frontend_types.h>
 
 /*
  * This is LTTng's own personal way to create a system call as an external
index 1e791f9c5b24eaa62bad40fe079d5432be5c095c..e8be78faab37c97c25d4cfbd8a300837d909d581 100644 (file)
@@ -44,8 +44,8 @@
 #include <linux/stacktrace.h>
 #include <linux/spinlock.h>
 #include "lttng-events.h"
-#include "wrapper/ringbuffer/backend.h"
-#include "wrapper/ringbuffer/frontend.h"
+#include <ringbuffer/backend.h>
+#include <ringbuffer/frontend.h>
 #include "wrapper/vmalloc.h"
 #include "lttng-tracer.h"
 #include "lttng-endian.h"
index ced8c1246528e0d3ea8c0f347b0e2b61020b4e82..eebedeb5ef04291a5a85e8956e2c8d25f58f4371 100644 (file)
@@ -14,7 +14,7 @@
 #include <linux/sched.h>
 #include <linux/cgroup.h>
 #include <lttng-events.h>
-#include <wrapper/ringbuffer/frontend_types.h>
+#include <ringbuffer/frontend_types.h>
 #include <wrapper/vmalloc.h>
 #include <wrapper/namespace.h>
 #include <lttng-tracer.h>
index 20c186a43bd54c63967e2039b9286182ff0c26dc..1f79a5e98fab0282603df04c209fe1b57702eb4e 100644 (file)
@@ -11,7 +11,7 @@
 #include <linux/slab.h>
 #include <linux/sched.h>
 #include <lttng-events.h>
-#include <wrapper/ringbuffer/frontend_types.h>
+#include <ringbuffer/frontend_types.h>
 #include <wrapper/vmalloc.h>
 #include <lttng-tracer.h>
 
index 6ec3922443e6a296cd3ce5a17abdab84754d546b..535402a2c923bd8d7fd3184314a6a30cf4ebb6d8 100644 (file)
@@ -13,7 +13,7 @@
 #include <linux/sched.h>
 #include <lttng-events.h>
 #include <lttng-tracer.h>
-#include <wrapper/ringbuffer/frontend_types.h>
+#include <ringbuffer/frontend_types.h>
 #include <wrapper/vmalloc.h>
 #include <wrapper/user_namespace.h>
 
index 2895cef94186eb023b62149394bde503553a71b2..3e6d03f988535422b52eecbc711fcfcb5d4e4839 100644 (file)
@@ -13,7 +13,7 @@
 #include <linux/sched.h>
 #include <lttng-events.h>
 #include <lttng-tracer.h>
-#include <wrapper/ringbuffer/frontend_types.h>
+#include <ringbuffer/frontend_types.h>
 #include <wrapper/vmalloc.h>
 #include <wrapper/user_namespace.h>
 
index ebdbc737ec5f5aec2eb5cf0b33e46b35fb1ae721..0508c9ba38bad7a46fb9da78faa064908429b694 100644 (file)
@@ -13,7 +13,7 @@
 #include <linux/sched.h>
 #include <lttng-events.h>
 #include <lttng-tracer.h>
-#include <wrapper/ringbuffer/frontend_types.h>
+#include <ringbuffer/frontend_types.h>
 #include <wrapper/vmalloc.h>
 #include <wrapper/user_namespace.h>
 
index a16da3507de719e7a8468f96aa268018103de916..b8fa5149993c7a69f52b75b2b77c869cc4e3a0f7 100644 (file)
@@ -12,7 +12,7 @@
 #include <linux/sched.h>
 #include <linux/utsname.h>
 #include <lttng-events.h>
-#include <wrapper/ringbuffer/frontend_types.h>
+#include <ringbuffer/frontend_types.h>
 #include <wrapper/vmalloc.h>
 #include <lttng-tracer.h>
 
index c9faaa7f66330a1f9d0c4f330cb38936470bdf41..62e8583d31d412e8c8da73ce1b62c67d1a4eca76 100644 (file)
@@ -12,7 +12,7 @@
 #include <linux/sched.h>
 #include <linux/irqflags.h>
 #include <lttng-events.h>
-#include <wrapper/ringbuffer/frontend_types.h>
+#include <ringbuffer/frontend_types.h>
 #include <wrapper/vmalloc.h>
 #include <lttng-tracer.h>
 
index 7171928c706138ace4f8f4f8fd77250225ad3acc..61c193ca7cad3384af1d075944fd2f2584b86c4e 100644 (file)
@@ -14,7 +14,7 @@
 #include <linux/sched.h>
 #include <linux/ipc_namespace.h>
 #include <lttng-events.h>
-#include <wrapper/ringbuffer/frontend_types.h>
+#include <ringbuffer/frontend_types.h>
 #include <wrapper/vmalloc.h>
 #include <wrapper/namespace.h>
 #include <lttng-tracer.h>
index 08a55f4b59b7de14345d377a4595c03819477b86..1e7cdeab8b5a657c59b4b926f7ebbb09813577c3 100644 (file)
@@ -12,7 +12,7 @@
 #include <linux/sched.h>
 #include <linux/irqflags.h>
 #include <lttng-events.h>
-#include <wrapper/ringbuffer/frontend_types.h>
+#include <ringbuffer/frontend_types.h>
 #include <wrapper/vmalloc.h>
 #include <lttng-tracer.h>
 
index 539b043bd37e3d4d486caa4efb640e02a5e131eb..5d396aef614fb43d79f2e84e3dde730221b2d6a6 100644 (file)
@@ -14,7 +14,7 @@
 #include <linux/sched.h>
 #include <lttng-events.h>
 #include <linux/nsproxy.h>
-#include <wrapper/ringbuffer/frontend_types.h>
+#include <ringbuffer/frontend_types.h>
 #include <wrapper/vmalloc.h>
 #include <wrapper/namespace.h>
 #include <lttng-tracer.h>
index dc404076452d490b45477d7fdd1904e7aa20b72d..2b16c270ce6eecee80a5434aea2b3e92895bac65 100644 (file)
@@ -12,7 +12,7 @@
 #include <linux/sched.h>
 #include <linux/irqflags.h>
 #include <lttng-events.h>
-#include <wrapper/ringbuffer/frontend_types.h>
+#include <ringbuffer/frontend_types.h>
 #include <wrapper/vmalloc.h>
 #include <lttng-tracer.h>
 
index 06088a51e94c725662088ae54272e79c6c77344a..74cf8f82418b7e334b5a7c1dac9ff4ebc7d1b464 100644 (file)
@@ -15,7 +15,7 @@
 #include <linux/nsproxy.h>
 #include <net/net_namespace.h>
 #include <lttng-events.h>
-#include <wrapper/ringbuffer/frontend_types.h>
+#include <ringbuffer/frontend_types.h>
 #include <wrapper/vmalloc.h>
 #include <wrapper/namespace.h>
 #include <lttng-tracer.h>
index 271f2049ab2452a4d0aaced774c5a0b4988fcc27..095462ebdbef2698844d7073924e3fd92a1fbbca 100644 (file)
@@ -11,7 +11,7 @@
 #include <linux/slab.h>
 #include <linux/sched.h>
 #include <lttng-events.h>
-#include <wrapper/ringbuffer/frontend_types.h>
+#include <ringbuffer/frontend_types.h>
 #include <wrapper/vmalloc.h>
 #include <lttng-tracer.h>
 
index be26e4955fef58b533a16ff335669b205ea2d28f..05a28ab55077506b96505be242a70a5389cc9bc5 100644 (file)
@@ -14,7 +14,7 @@
 #include <linux/string.h>
 #include <linux/cpu.h>
 #include <lttng-events.h>
-#include <wrapper/ringbuffer/frontend_types.h>
+#include <ringbuffer/frontend_types.h>
 #include <wrapper/vmalloc.h>
 #include <wrapper/perf.h>
 #include <lttng-tracer.h>
index 90c21dead4bea48513f8204e3bb0202a11467e97..e0f5636ab72a5ca3a7c7b02b9350adad10ddf09a 100644 (file)
@@ -14,7 +14,7 @@
 #include <linux/sched.h>
 #include <linux/pid_namespace.h>
 #include <lttng-events.h>
-#include <wrapper/ringbuffer/frontend_types.h>
+#include <ringbuffer/frontend_types.h>
 #include <wrapper/vmalloc.h>
 #include <wrapper/namespace.h>
 #include <lttng-tracer.h>
index b8155ea8dbb1456300fa8645a026bd5eae60ab33..a84a284c36ef174f103c86c4698db003a9bbcff7 100644 (file)
@@ -11,7 +11,7 @@
 #include <linux/slab.h>
 #include <linux/sched.h>
 #include <lttng-events.h>
-#include <wrapper/ringbuffer/frontend_types.h>
+#include <ringbuffer/frontend_types.h>
 #include <wrapper/vmalloc.h>
 #include <lttng-tracer.h>
 
index 13a521bdfc3a1efcfecb2647480b9501e6caa88f..44956c7e1979c6de39d1d2c2ea93d5c2d454781a 100644 (file)
@@ -12,7 +12,7 @@
 #include <linux/sched.h>
 #include <linux/syscalls.h>
 #include <lttng-events.h>
-#include <wrapper/ringbuffer/frontend_types.h>
+#include <ringbuffer/frontend_types.h>
 #include <wrapper/vmalloc.h>
 #include <lttng-tracer.h>
 
index 95a385478c759faf4b9010c56203901423f8298f..0bd8e9127513cd914e1775c28a0576477eaa9c8e 100644 (file)
@@ -12,7 +12,7 @@
 #include <linux/sched.h>
 #include <linux/irqflags.h>
 #include <lttng-events.h>
-#include <wrapper/ringbuffer/frontend_types.h>
+#include <ringbuffer/frontend_types.h>
 #include <wrapper/vmalloc.h>
 #include <lttng-tracer.h>
 
index 6441b5f1f4c38726bdd25ce57ae09b8b24271c5a..84d5c5bf06ec917f624f6afb274a6ec4760da410 100644 (file)
@@ -11,7 +11,7 @@
 #include <linux/slab.h>
 #include <linux/sched.h>
 #include <lttng-events.h>
-#include <wrapper/ringbuffer/frontend_types.h>
+#include <ringbuffer/frontend_types.h>
 #include <wrapper/vmalloc.h>
 #include <wrapper/kallsyms.h>
 #include <lttng-tracer.h>
index fc40f6cf8f55f798d4452c231562724786a67c18..3fd8236914e9b66b7d8a4447461e20c57d857189 100644 (file)
@@ -11,7 +11,7 @@
 #include <linux/slab.h>
 #include <linux/sched.h>
 #include <lttng-events.h>
-#include <wrapper/ringbuffer/frontend_types.h>
+#include <ringbuffer/frontend_types.h>
 #include <wrapper/vmalloc.h>
 #include <lttng-tracer.h>
 #include <lttng-endian.h>
index 81fa3f1070fd2f38fdf0c47b53a440a48554be74..a729d0bbbb884d54b2cc22684c03ea69fd32bf35 100644 (file)
@@ -13,7 +13,7 @@
 #include <linux/sched.h>
 #include <lttng-events.h>
 #include <lttng-tracer.h>
-#include <wrapper/ringbuffer/frontend_types.h>
+#include <ringbuffer/frontend_types.h>
 #include <wrapper/vmalloc.h>
 #include <wrapper/user_namespace.h>
 
index 5f1ce3cd175daf26befe7730a09567ffb4334035..790874717da19f616f08b96bcef16c80ce41178c 100644 (file)
@@ -13,7 +13,7 @@
 #include <linux/sched.h>
 #include <lttng-events.h>
 #include <lttng-tracer.h>
-#include <wrapper/ringbuffer/frontend_types.h>
+#include <ringbuffer/frontend_types.h>
 #include <wrapper/vmalloc.h>
 #include <wrapper/user_namespace.h>
 
index efeca439010c603256dc5cc8e2c82ef94c5d2bc7..144b081bc2bc7b1a6817b429a936cd79256aace5 100644 (file)
@@ -11,7 +11,7 @@
 #include <linux/slab.h>
 #include <linux/sched.h>
 #include <lttng-events.h>
-#include <wrapper/ringbuffer/frontend_types.h>
+#include <ringbuffer/frontend_types.h>
 #include <wrapper/vmalloc.h>
 #include <lttng-tracer.h>
 
index 3942e94afa1636c048118de82662452e2e74643e..c639dc186a15b5d0f026e53d67b79c6c2568a801 100644 (file)
@@ -13,7 +13,7 @@
 #include <linux/sched.h>
 #include <lttng-events.h>
 #include <lttng-tracer.h>
-#include <wrapper/ringbuffer/frontend_types.h>
+#include <ringbuffer/frontend_types.h>
 #include <wrapper/vmalloc.h>
 #include <wrapper/user_namespace.h>
 
index 45db066d9025f9ccfcee5ded0223373eb368de2c..3bee6d6510f55593e00b40295d71c5154a7d0379 100644 (file)
@@ -14,7 +14,7 @@
 #include <linux/sched.h>
 #include <linux/user_namespace.h>
 #include <lttng-events.h>
-#include <wrapper/ringbuffer/frontend_types.h>
+#include <ringbuffer/frontend_types.h>
 #include <wrapper/vmalloc.h>
 #include <wrapper/namespace.h>
 #include <lttng-tracer.h>
index 26bdfa8354300a11a31d4a6d8f0f3fd655f62296..02192e7fb7fde703cd09b5f28dc7c9b192c3918e 100644 (file)
@@ -14,7 +14,7 @@
 #include <linux/sched.h>
 #include <linux/utsname.h>
 #include <lttng-events.h>
-#include <wrapper/ringbuffer/frontend_types.h>
+#include <ringbuffer/frontend_types.h>
 #include <wrapper/vmalloc.h>
 #include <wrapper/namespace.h>
 #include <lttng-tracer.h>
index 029658b6465e0ddbe4a138efbc860005a7fbc6dd..abd81aa385101a52a3145ffe9e013802564112d7 100644 (file)
@@ -13,7 +13,7 @@
 #include <linux/sched.h>
 #include <lttng-events.h>
 #include <lttng-tracer.h>
-#include <wrapper/ringbuffer/frontend_types.h>
+#include <ringbuffer/frontend_types.h>
 #include <wrapper/vmalloc.h>
 #include <wrapper/user_namespace.h>
 
index 211a16514e53d000357107ba2a9c3d5c721b62fa..f032b3a6d47495f0cceb0139c45061c9b4aacbd3 100644 (file)
@@ -13,7 +13,7 @@
 #include <linux/sched.h>
 #include <lttng-events.h>
 #include <lttng-tracer.h>
-#include <wrapper/ringbuffer/frontend_types.h>
+#include <ringbuffer/frontend_types.h>
 #include <wrapper/vmalloc.h>
 #include <wrapper/user_namespace.h>
 
index a7b70d847192a983a15d407f466710a3ecbf3429..5c70003c0e204119143800a1d68a3c79e9c581d4 100644 (file)
@@ -13,7 +13,7 @@
 #include <linux/sched.h>
 #include <lttng-events.h>
 #include <lttng-tracer.h>
-#include <wrapper/ringbuffer/frontend_types.h>
+#include <ringbuffer/frontend_types.h>
 #include <wrapper/vmalloc.h>
 #include <wrapper/user_namespace.h>
 
index 544215cb9f1f575cc64cb7f07cde41b7f24c3d5a..979d52aa899b381d6464add54a31266f7e344359 100644 (file)
@@ -11,7 +11,7 @@
 #include <linux/slab.h>
 #include <linux/sched.h>
 #include <lttng-events.h>
-#include <wrapper/ringbuffer/frontend_types.h>
+#include <ringbuffer/frontend_types.h>
 #include <wrapper/vmalloc.h>
 #include <lttng-tracer.h>
 
index 87dd3d0d6e1745e5c6548cbfcc7c1fa43dbdbae3..cedf68043250791b14b6a9a4b2fda0e4804d302f 100644 (file)
@@ -12,7 +12,7 @@
 #include <linux/sched.h>
 #include <linux/syscalls.h>
 #include <lttng-events.h>
-#include <wrapper/ringbuffer/frontend_types.h>
+#include <ringbuffer/frontend_types.h>
 #include <wrapper/vmalloc.h>
 #include <lttng-tracer.h>
 
index b82e4eab6d5b5b71d13e413a352e36ab1c73e433..82ada63745b1774dfe410890f6f6bc13ebb8e1fb 100644 (file)
@@ -13,7 +13,7 @@
 #include <linux/sched.h>
 #include <lttng-events.h>
 #include <lttng-tracer.h>
-#include <wrapper/ringbuffer/frontend_types.h>
+#include <ringbuffer/frontend_types.h>
 #include <wrapper/vmalloc.h>
 #include <wrapper/user_namespace.h>
 
index 71a40d59b10f54762c3cb48da75512e30db0f23f..fab50726dc0253d6251a715d07af129885ea67fe 100644 (file)
@@ -13,7 +13,7 @@
 #include <linux/sched.h>
 #include <lttng-events.h>
 #include <lttng-tracer.h>
-#include <wrapper/ringbuffer/frontend_types.h>
+#include <ringbuffer/frontend_types.h>
 #include <wrapper/vmalloc.h>
 #include <wrapper/user_namespace.h>
 
index a2d90bb9fc7737de67f31b298929f2e9dff7df56..74198d02ba62ddecb424105ee213914ef6eb5db3 100644 (file)
@@ -11,7 +11,7 @@
 #include <linux/slab.h>
 #include <linux/sched.h>
 #include <lttng-events.h>
-#include <wrapper/ringbuffer/frontend_types.h>
+#include <ringbuffer/frontend_types.h>
 #include <wrapper/vmalloc.h>
 #include <lttng-tracer.h>
 
index dd8eab89aaaf0da9d6d1aa431bc734ccfc5747bc..13fb70ef85514c3d16554f308ebcead6aa21f80c 100644 (file)
@@ -13,7 +13,7 @@
 #include <linux/sched.h>
 #include <lttng-events.h>
 #include <lttng-tracer.h>
-#include <wrapper/ringbuffer/frontend_types.h>
+#include <ringbuffer/frontend_types.h>
 #include <wrapper/vmalloc.h>
 #include <wrapper/user_namespace.h>
 
index 49a181b6a5d09aba4c5279ae8fb73c59d5f25049..d22bf327ca2cba79a38da92067344427a07abb6d 100644 (file)
@@ -41,8 +41,8 @@
 #include <lttng-abi-old.h>
 #include <lttng-endian.h>
 #include <lttng-string-utils.h>
-#include <wrapper/ringbuffer/backend.h>
-#include <wrapper/ringbuffer/frontend.h>
+#include <ringbuffer/backend.h>
+#include <ringbuffer/frontend.h>
 #include <wrapper/time.h>
 
 #define METADATA_CACHE_DEFAULT_SIZE 4096
index ecdc4adf60ede2e9138f8deac4679d0398d3c05b..9e204caa24546a9ece56a97034ad5e2c5a68ce32 100644 (file)
@@ -14,7 +14,7 @@
 #include <wrapper/trace-clock.h>
 #include <lttng-events.h>
 #include <lttng-tracer.h>
-#include <wrapper/ringbuffer/frontend_types.h>
+#include <ringbuffer/frontend_types.h>
 
 #define LTTNG_COMPACT_EVENT_BITS       5
 #define LTTNG_COMPACT_TSC_BITS         27
@@ -191,7 +191,7 @@ size_t record_header_size(const struct lib_ring_buffer_config *config,
        return offset - orig_offset;
 }
 
-#include <wrapper/ringbuffer/api.h>
+#include <ringbuffer/api.h>
 
 static
 void lttng_write_event_header_slow(const struct lib_ring_buffer_config *config,
index fb71ad2248d7dcf70f07f6bed24865f135223207..9b9ac2c2ae0d39fca6a2b534c3ada6bc3766b9b0 100644 (file)
@@ -51,7 +51,7 @@ size_t record_header_size(const struct lib_ring_buffer_config *config,
        return 0;
 }
 
-#include <wrapper/ringbuffer/api.h>
+#include <ringbuffer/api.h>
 
 static u64 client_ring_buffer_clock_read(struct channel *chan)
 {
index 514ae3f8ca5f9186becd85d70cb37f7765ade78d..18716738fd4e77657cf182f5eceb7fb79d2e53c6 100644 (file)
@@ -19,7 +19,7 @@
 #define RING_BUFFER_ALIGN
 #endif
 
-#include <wrapper/ringbuffer/config.h>
+#include <ringbuffer/config.h>
 
 struct lttng_session;
 struct lttng_channel;
index a871a4b14fff733e641ef259d057ce74b25328f7..effd6f95ca0cbd96e1226c147682b6e9f601798f 100644 (file)
@@ -4,7 +4,7 @@ TOP_LTTNG_MODULES_DIR := $(shell dirname $(lastword $(MAKEFILE_LIST)))/..
 
 include $(TOP_LTTNG_MODULES_DIR)/Kbuild.common
 
-ccflags-y += -I$(TOP_LTTNG_MODULES_DIR)
+ccflags-y += -I$(TOP_LTTNG_MODULES_DIR) -I$(TOP_LTTNG_MODULES_DIR)/include
 
 obj-$(CONFIG_LTTNG) += lttng-probe-sched.o
 obj-$(CONFIG_LTTNG) += lttng-probe-irq.o
index d0a48d2894db0e321af1b4b8ea32240a0f729471..6a1347321fed1f05d531b201b8ab1b3b3e010305 100644 (file)
@@ -11,7 +11,7 @@
 #include <linux/kprobes.h>
 #include <linux/slab.h>
 #include <lttng-events.h>
-#include <wrapper/ringbuffer/frontend_types.h>
+#include <ringbuffer/frontend_types.h>
 #include <wrapper/vmalloc.h>
 #include <wrapper/irqflags.h>
 #include <lttng-tracer.h>
index 133158fe44f0bdc12bd6d4c7bfcf5af793d83b04..e95ef7d5fb76f1a4ab8d91f773815577a23253c4 100644 (file)
@@ -12,7 +12,7 @@
 #include <linux/slab.h>
 #include <linux/kref.h>
 #include <lttng-events.h>
-#include <wrapper/ringbuffer/frontend_types.h>
+#include <ringbuffer/frontend_types.h>
 #include <wrapper/vmalloc.h>
 #include <wrapper/irqflags.h>
 #include <lttng-tracer.h>
index 48639152a3e1cc02839c85f348e4cbb3f78529c7..5dee7fbe3036974338ce1abc25fd6df748f7e7cc 100644 (file)
@@ -16,8 +16,8 @@
 #include <probes/lttng-types.h>
 #include <probes/lttng-probe-user.h>
 #include <wrapper/vmalloc.h>   /* for wrapper_vmalloc_sync_mappings() */
-#include <wrapper/ringbuffer/frontend_types.h>
-#include <wrapper/ringbuffer/backend.h>
+#include <ringbuffer/frontend_types.h>
+#include <ringbuffer/backend.h>
 #include <wrapper/rcu.h>
 #include <wrapper/user_namespace.h>
 #include <lttng-events.h>
index 82bd4fd4991ba08925d0437f347937815836b804..c29e18144d181381ec05ffd11a3547f9f0880e8a 100644 (file)
@@ -18,7 +18,7 @@
 #include <lttng-events.h>
 #include <lttng-tracer.h>
 #include <wrapper/irqflags.h>
-#include <wrapper/ringbuffer/frontend_types.h>
+#include <ringbuffer/frontend_types.h>
 #include <wrapper/uprobes.h>
 #include <wrapper/vmalloc.h>
 
index 3e2edf5bafb5767f6c84ccdd2886ab0d8b2165f4..fa6740803a43905f0f5d3fa8c0c8a17f25b1f440 100644 (file)
@@ -4,7 +4,7 @@ TOP_LTTNG_MODULES_DIR := $(shell dirname $(lastword $(MAKEFILE_LIST)))/..
 
 include $(TOP_LTTNG_MODULES_DIR)/Kbuild.common
 
-ccflags-y += -I$(TOP_LTTNG_MODULES_DIR)
+ccflags-y += -I$(TOP_LTTNG_MODULES_DIR) -I$(TOP_LTTNG_MODULES_DIR)/include
 
 obj-$(CONFIG_LTTNG) += lttng-test.o
 lttng-test-objs := probes/lttng-test.o
diff --git a/wrapper/ringbuffer/api.h b/wrapper/ringbuffer/api.h
deleted file mode 100644 (file)
index 73aa874..0000000
+++ /dev/null
@@ -1,2 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only) */
-#include <lib/ringbuffer/api.h>
diff --git a/wrapper/ringbuffer/backend.h b/wrapper/ringbuffer/backend.h
deleted file mode 100644 (file)
index 4864160..0000000
+++ /dev/null
@@ -1,2 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only) */
-#include <lib/ringbuffer/backend.h>
diff --git a/wrapper/ringbuffer/backend_internal.h b/wrapper/ringbuffer/backend_internal.h
deleted file mode 100644 (file)
index d48c231..0000000
+++ /dev/null
@@ -1,3 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only) */
-#include <wrapper/inline_memcpy.h>
-#include <lib/ringbuffer/backend_internal.h>
diff --git a/wrapper/ringbuffer/backend_types.h b/wrapper/ringbuffer/backend_types.h
deleted file mode 100644 (file)
index ff237c0..0000000
+++ /dev/null
@@ -1,2 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only) */
-#include <lib/ringbuffer/backend_types.h>
diff --git a/wrapper/ringbuffer/config.h b/wrapper/ringbuffer/config.h
deleted file mode 100644 (file)
index e1f0a96..0000000
+++ /dev/null
@@ -1,2 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only) */
-#include <lib/ringbuffer/config.h>
diff --git a/wrapper/ringbuffer/frontend.h b/wrapper/ringbuffer/frontend.h
deleted file mode 100644 (file)
index 3542158..0000000
+++ /dev/null
@@ -1,2 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only) */
-#include <lib/ringbuffer/frontend.h>
diff --git a/wrapper/ringbuffer/frontend_api.h b/wrapper/ringbuffer/frontend_api.h
deleted file mode 100644 (file)
index 53187bf..0000000
+++ /dev/null
@@ -1,2 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only) */
-#include <lib/ringbuffer/frontend_api.h>
diff --git a/wrapper/ringbuffer/frontend_internal.h b/wrapper/ringbuffer/frontend_internal.h
deleted file mode 100644 (file)
index c14757a..0000000
+++ /dev/null
@@ -1,2 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only) */
-#include <lib/ringbuffer/frontend_internal.h>
diff --git a/wrapper/ringbuffer/frontend_types.h b/wrapper/ringbuffer/frontend_types.h
deleted file mode 100644 (file)
index 8f6b72f..0000000
+++ /dev/null
@@ -1,2 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only) */
-#include <lib/ringbuffer/frontend_types.h>
diff --git a/wrapper/ringbuffer/iterator.h b/wrapper/ringbuffer/iterator.h
deleted file mode 100644 (file)
index 08586d3..0000000
+++ /dev/null
@@ -1,2 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only) */
-#include <lib/ringbuffer/iterator.h>
diff --git a/wrapper/ringbuffer/nohz.h b/wrapper/ringbuffer/nohz.h
deleted file mode 100644 (file)
index 33568ed..0000000
+++ /dev/null
@@ -1,2 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only) */
-#include <lib/ringbuffer/nohz.h>
diff --git a/wrapper/ringbuffer/vatomic.h b/wrapper/ringbuffer/vatomic.h
deleted file mode 100644 (file)
index a5e8c76..0000000
+++ /dev/null
@@ -1,2 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only) */
-#include <lib/ringbuffer/vatomic.h>
diff --git a/wrapper/ringbuffer/vfs.h b/wrapper/ringbuffer/vfs.h
deleted file mode 100644 (file)
index 6591840..0000000
+++ /dev/null
@@ -1,2 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only) */
-#include <lib/ringbuffer/vfs.h>
This page took 0.110326 seconds and 4 git commands to generate.