fix: cpu/hotplug: Remove deprecated CPU-hotplug functions. (v5.15)
authorMichael Jeanson <mjeanson@efficios.com>
Mon, 13 Sep 2021 16:00:38 +0000 (12:00 -0400)
committerMathieu Desnoyers <mathieu.desnoyers@efficios.com>
Tue, 14 Sep 2021 14:49:18 +0000 (10:49 -0400)
The CPU-hotplug functions get|put_online_cpus() were deprecated in v4.13
and removed in v5.15.

See upstream commits :

commit 8c854303ce0e38e5bbedd725ff39da7e235865d8
Author: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date:   Tue Aug 3 16:16:21 2021 +0200

    cpu/hotplug: Remove deprecated CPU-hotplug functions.

    No users in tree use the deprecated CPU-hotplug functions anymore.

    Remove them.

Introduced in v4.13 :

  commit 8f553c498e1772cccb39a114da4a498d22992758
  Author: Thomas Gleixner <tglx@linutronix.de>
  Date:   Wed May 24 10:15:12 2017 +0200

    cpu/hotplug: Provide cpus_read|write_[un]lock()

    The counting 'rwsem' hackery of get|put_online_cpus() is going to be
    replaced by percpu rwsem.

    Rename the functions to make it clear that it's locking and not some
    refcount style interface. These new functions will be used for the
    preparatory patches which make the code ready for the percpu rwsem
    conversion.

    Rename all instances in the cpu hotplug code while at it.

Change-Id: I5a37cf5afc075a402b7347989fac637dfa60a1ed
Signed-off-by: Michael Jeanson <mjeanson@efficios.com>
Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
include/wrapper/cpu.h [new file with mode: 0644]
src/lib/ringbuffer/ring_buffer_backend.c
src/lib/ringbuffer/ring_buffer_frontend.c
src/lib/ringbuffer/ring_buffer_iterator.c
src/lttng-context-perf-counters.c
src/lttng-statedump-impl.c

diff --git a/include/wrapper/cpu.h b/include/wrapper/cpu.h
new file mode 100644 (file)
index 0000000..cbee196
--- /dev/null
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * wrapper/cpu.h
+ *
+ * Copyright (C) 2021 Michael Jeanson <mjeanson@efficios.com>
+ */
+
+#ifndef _LTTNG_WRAPPER_CPU_H
+#define _LTTNG_WRAPPER_CPU_H
+
+#include <linux/cpu.h>
+#include <lttng/kernel-version.h>
+
+#if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,13,0))
+
+static inline
+void lttng_cpus_read_lock(void)
+{
+       cpus_read_lock();
+}
+
+static inline
+void lttng_cpus_read_unlock(void)
+{
+       cpus_read_unlock();
+}
+
+#else /* LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,13,0) */
+
+static inline
+void lttng_cpus_read_lock(void)
+{
+       get_online_cpus();
+}
+
+static inline
+void lttng_cpus_read_unlock(void)
+{
+       put_online_cpus();
+}
+
+#endif /* LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,13,0) */
+
+#endif /* _LTTNG_WRAPPER_CPU_H */
index 26efb2bcd70dac30d468e21ea38ed869617c2bb2..9a339be0a6e9b7040abce39b9585a2f2e5d95f48 100644 (file)
 #include <linux/delay.h>
 #include <linux/errno.h>
 #include <linux/slab.h>
-#include <linux/cpu.h>
 #include <linux/mm.h>
 #include <linux/vmalloc.h>
 
+#include <wrapper/cpu.h>
 #include <wrapper/mm.h>
 #include <wrapper/vmalloc.h>   /* for wrapper_vmalloc_sync_mappings() */
 #include <ringbuffer/config.h>
@@ -445,14 +445,14 @@ int channel_backend_init(struct channel_backend *chanb,
                        chanb->cpu_hp_notifier.priority = 5;
                        register_hotcpu_notifier(&chanb->cpu_hp_notifier);
 
-                       get_online_cpus();
+                       lttng_cpus_read_lock();
                        for_each_online_cpu(i) {
                                ret = lib_ring_buffer_create(per_cpu_ptr(chanb->buf, i),
                                                         chanb, i);
                                if (ret)
                                        goto free_bufs; /* cpu hotplug locked */
                        }
-                       put_online_cpus();
+                       lttng_cpus_read_unlock();
 #else
                        for_each_possible_cpu(i) {
                                ret = lib_ring_buffer_create(per_cpu_ptr(chanb->buf, i),
@@ -485,7 +485,7 @@ free_bufs:
                 */
 #else /* #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
 #ifdef CONFIG_HOTPLUG_CPU
-               put_online_cpus();
+               lttng_cpus_read_unlock();
                unregister_hotcpu_notifier(&chanb->cpu_hp_notifier);
 #endif
 #endif /* #else #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
index e9056118c5a5cd59fdbc16ca82274683dfd80cf9..87a575d0bbf384bfcd9c731a55fca837ee91636d 100644 (file)
@@ -48,6 +48,7 @@
 #include <ringbuffer/iterator.h>
 #include <ringbuffer/nohz.h>
 #include <wrapper/atomic.h>
+#include <wrapper/cpu.h>
 #include <wrapper/kref.h>
 #include <wrapper/percpu-defs.h>
 #include <wrapper/timer.h>
@@ -724,7 +725,7 @@ static void channel_unregister_notifiers(struct lttng_kernel_ring_buffer_channel
                        int cpu;
 
 #ifdef CONFIG_HOTPLUG_CPU
-                       get_online_cpus();
+                       lttng_cpus_read_lock();
                        chan->cpu_hp_enable = 0;
                        for_each_online_cpu(cpu) {
                                struct lttng_kernel_ring_buffer *buf = per_cpu_ptr(chan->backend.buf,
@@ -732,7 +733,7 @@ static void channel_unregister_notifiers(struct lttng_kernel_ring_buffer_channel
                                lib_ring_buffer_stop_switch_timer(buf);
                                lib_ring_buffer_stop_read_timer(buf);
                        }
-                       put_online_cpus();
+                       lttng_cpus_read_unlock();
                        unregister_cpu_notifier(&chan->cpu_hp_notifier);
 #else
                        for_each_possible_cpu(cpu) {
@@ -772,14 +773,14 @@ void lib_ring_buffer_set_quiescent_channel(struct lttng_kernel_ring_buffer_chann
        const struct lttng_kernel_ring_buffer_config *config = &chan->backend.config;
 
        if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
-               get_online_cpus();
+               lttng_cpus_read_lock();
                for_each_channel_cpu(cpu, chan) {
                        struct lttng_kernel_ring_buffer *buf = per_cpu_ptr(chan->backend.buf,
                                                              cpu);
 
                        lib_ring_buffer_set_quiescent(buf);
                }
-               put_online_cpus();
+               lttng_cpus_read_unlock();
        } else {
                struct lttng_kernel_ring_buffer *buf = chan->backend.buf;
 
@@ -794,14 +795,14 @@ void lib_ring_buffer_clear_quiescent_channel(struct lttng_kernel_ring_buffer_cha
        const struct lttng_kernel_ring_buffer_config *config = &chan->backend.config;
 
        if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
-               get_online_cpus();
+               lttng_cpus_read_lock();
                for_each_channel_cpu(cpu, chan) {
                        struct lttng_kernel_ring_buffer *buf = per_cpu_ptr(chan->backend.buf,
                                                              cpu);
 
                        lib_ring_buffer_clear_quiescent(buf);
                }
-               put_online_cpus();
+               lttng_cpus_read_unlock();
        } else {
                struct lttng_kernel_ring_buffer *buf = chan->backend.buf;
 
@@ -899,7 +900,7 @@ struct lttng_kernel_ring_buffer_channel *channel_create(const struct lttng_kerne
                        chan->cpu_hp_notifier.priority = 6;
                        register_cpu_notifier(&chan->cpu_hp_notifier);
 
-                       get_online_cpus();
+                       lttng_cpus_read_lock();
                        for_each_online_cpu(cpu) {
                                struct lttng_kernel_ring_buffer *buf = per_cpu_ptr(chan->backend.buf,
                                                                       cpu);
@@ -909,7 +910,7 @@ struct lttng_kernel_ring_buffer_channel *channel_create(const struct lttng_kerne
                                spin_unlock(&per_cpu(ring_buffer_nohz_lock, cpu));
                        }
                        chan->cpu_hp_enable = 1;
-                       put_online_cpus();
+                       lttng_cpus_read_unlock();
 #else
                        for_each_possible_cpu(cpu) {
                                struct lttng_kernel_ring_buffer *buf = per_cpu_ptr(chan->backend.buf,
index 25839af631351c8c05af3745a7b9b05c034886d7..60c95ca6128d44bd44855c62352e6d464e2cf3e2 100644 (file)
@@ -10,6 +10,7 @@
  */
 
 #include <ringbuffer/iterator.h>
+#include <wrapper/cpu.h>
 #include <wrapper/file.h>
 #include <wrapper/uaccess.h>
 #include <linux/jiffies.h>
@@ -440,13 +441,13 @@ int channel_iterator_init(struct lttng_kernel_ring_buffer_channel *chan)
                        chan->hp_iter_notifier.priority = 10;
                        register_cpu_notifier(&chan->hp_iter_notifier);
 
-                       get_online_cpus();
+                       lttng_cpus_read_lock();
                        for_each_online_cpu(cpu) {
                                buf = per_cpu_ptr(chan->backend.buf, cpu);
                                lib_ring_buffer_iterator_init(chan, buf);
                        }
                        chan->hp_iter_enable = 1;
-                       put_online_cpus();
+                       lttng_cpus_read_unlock();
 #else
                        for_each_possible_cpu(cpu) {
                                buf = per_cpu_ptr(chan->backend.buf, cpu);
@@ -519,7 +520,7 @@ int channel_iterator_open(struct lttng_kernel_ring_buffer_channel *chan)
        CHAN_WARN_ON(chan, config->output != RING_BUFFER_ITERATOR);
 
        if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
-               get_online_cpus();
+               lttng_cpus_read_lock();
                /* Allow CPU hotplug to keep track of opened reader */
                chan->iter.read_open = 1;
                for_each_channel_cpu(cpu, chan) {
@@ -529,7 +530,7 @@ int channel_iterator_open(struct lttng_kernel_ring_buffer_channel *chan)
                                goto error;
                        buf->iter.read_open = 1;
                }
-               put_online_cpus();
+               lttng_cpus_read_unlock();
        } else {
                buf = channel_get_ring_buffer(config, chan, 0);
                ret = lib_ring_buffer_iterator_open(buf);
@@ -538,7 +539,7 @@ int channel_iterator_open(struct lttng_kernel_ring_buffer_channel *chan)
 error:
        /* Error should always happen on CPU 0, hence no close is required. */
        CHAN_WARN_ON(chan, cpu != 0);
-       put_online_cpus();
+       lttng_cpus_read_unlock();
        return ret;
 }
 EXPORT_SYMBOL_GPL(channel_iterator_open);
@@ -550,7 +551,7 @@ void channel_iterator_release(struct lttng_kernel_ring_buffer_channel *chan)
        int cpu;
 
        if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
-               get_online_cpus();
+               lttng_cpus_read_lock();
                for_each_channel_cpu(cpu, chan) {
                        buf = channel_get_ring_buffer(config, chan, cpu);
                        if (buf->iter.read_open) {
@@ -559,7 +560,7 @@ void channel_iterator_release(struct lttng_kernel_ring_buffer_channel *chan)
                        }
                }
                chan->iter.read_open = 0;
-               put_online_cpus();
+               lttng_cpus_read_unlock();
        } else {
                buf = channel_get_ring_buffer(config, chan, 0);
                lib_ring_buffer_iterator_release(buf);
index b0227d474e4d5388138bf3e6ae36bc33eaf75fec..372f05e0cc902c311611604875d6922372c1dc48 100644 (file)
@@ -16,6 +16,7 @@
 #include <lttng/events.h>
 #include <lttng/events-internal.h>
 #include <ringbuffer/frontend_types.h>
+#include <wrapper/cpu.h>
 #include <wrapper/vmalloc.h>
 #include <wrapper/perf.h>
 #include <lttng/tracer.h>
@@ -97,10 +98,10 @@ void lttng_destroy_perf_counter_ctx_field(void *priv)
        {
                int cpu;
 
-               get_online_cpus();
+               lttng_cpus_read_lock();
                for_each_online_cpu(cpu)
                        perf_event_release_kernel(events[cpu]);
-               put_online_cpus();
+               lttng_cpus_read_unlock();
 #ifdef CONFIG_HOTPLUG_CPU
                unregister_cpu_notifier(&perf_field->nb);
 #endif
@@ -304,7 +305,7 @@ int lttng_add_perf_counter_to_ctx(uint32_t type,
                perf_field->nb.priority = 0;
                register_cpu_notifier(&perf_field->nb);
 #endif
-               get_online_cpus();
+               lttng_cpus_read_lock();
                for_each_online_cpu(cpu) {
                        events[cpu] = wrapper_perf_event_create_kernel_counter(attr,
                                                cpu, NULL, overflow_callback);
@@ -317,7 +318,7 @@ int lttng_add_perf_counter_to_ctx(uint32_t type,
                                goto counter_busy;
                        }
                }
-               put_online_cpus();
+               lttng_cpus_read_unlock();
                perf_field->hp_enable = 1;
        }
 #endif /* #else #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
@@ -351,7 +352,7 @@ counter_error:
                        if (events[cpu] && !IS_ERR(events[cpu]))
                                perf_event_release_kernel(events[cpu]);
                }
-               put_online_cpus();
+               lttng_cpus_read_unlock();
 #ifdef CONFIG_HOTPLUG_CPU
                unregister_cpu_notifier(&perf_field->nb);
 #endif
index 4dfbca0b29dcc8b11b81099cd43f2ab97376c573..2b42783a2c0617ae93b74a894c488a3f0f16fe80 100644 (file)
@@ -23,7 +23,6 @@
 #include <linux/file.h>
 #include <linux/interrupt.h>
 #include <linux/irqnr.h>
-#include <linux/cpu.h>
 #include <linux/netdevice.h>
 #include <linux/inetdevice.h>
 #include <linux/mm.h>
@@ -34,6 +33,7 @@
 
 #include <lttng/events.h>
 #include <lttng/tracer.h>
+#include <wrapper/cpu.h>
 #include <wrapper/irqdesc.h>
 #include <wrapper/fdtable.h>
 #include <wrapper/namespace.h>
@@ -770,7 +770,7 @@ int do_lttng_statedump(struct lttng_kernel_session *session)
         * is to guarantee that each CPU has been in a state where is was in
         * syscall mode (i.e. not in a trap, an IRQ or a soft IRQ).
         */
-       get_online_cpus();
+       lttng_cpus_read_lock();
        atomic_set(&kernel_threads_to_run, num_online_cpus());
        for_each_online_cpu(cpu) {
                INIT_DELAYED_WORK(&cpu_work[cpu], lttng_statedump_work_func);
@@ -778,7 +778,7 @@ int do_lttng_statedump(struct lttng_kernel_session *session)
        }
        /* Wait for all threads to run */
        __wait_event(statedump_wq, (atomic_read(&kernel_threads_to_run) == 0));
-       put_online_cpus();
+       lttng_cpus_read_unlock();
        /* Our work is done */
        trace_lttng_statedump_end(session);
        return 0;
This page took 0.037123 seconds and 4 git commands to generate.