The CPU-hotplug functions get|put_online_cpus() were deprecated in v4.13
and removed in v5.15.
See upstream commits :
commit
8c854303ce0e38e5bbedd725ff39da7e235865d8
Author: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Tue Aug 3 16:16:21 2021 +0200
cpu/hotplug: Remove deprecated CPU-hotplug functions.
No users in tree use the deprecated CPU-hotplug functions anymore.
Remove them.
Introduced in v4.13 :
commit
8f553c498e1772cccb39a114da4a498d22992758
Author: Thomas Gleixner <tglx@linutronix.de>
Date: Wed May 24 10:15:12 2017 +0200
cpu/hotplug: Provide cpus_read|write_[un]lock()
The counting 'rwsem' hackery of get|put_online_cpus() is going to be
replaced by percpu rwsem.
Rename the functions to make it clear that it's locking and not some
refcount style interface. These new functions will be used for the
preparatory patches which make the code ready for the percpu rwsem
conversion.
Rename all instances in the cpu hotplug code while at it.
Change-Id: I5a37cf5afc075a402b7347989fac637dfa60a1ed
Signed-off-by: Michael Jeanson <mjeanson@efficios.com>
Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
--- /dev/null
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * wrapper/cpu.h
+ *
+ * Copyright (C) 2021 Michael Jeanson <mjeanson@efficios.com>
+ */
+
+#ifndef _LTTNG_WRAPPER_CPU_H
+#define _LTTNG_WRAPPER_CPU_H
+
+#include <linux/cpu.h>
+#include <lttng/kernel-version.h>
+
+#if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,13,0))
+
+static inline
+void lttng_cpus_read_lock(void)
+{
+ cpus_read_lock();
+}
+
+static inline
+void lttng_cpus_read_unlock(void)
+{
+ cpus_read_unlock();
+}
+
+#else /* LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,13,0) */
+
+static inline
+void lttng_cpus_read_lock(void)
+{
+ get_online_cpus();
+}
+
+static inline
+void lttng_cpus_read_unlock(void)
+{
+ put_online_cpus();
+}
+
+#endif /* LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,13,0) */
+
+#endif /* _LTTNG_WRAPPER_CPU_H */
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/slab.h>
-#include <linux/cpu.h>
#include <linux/mm.h>
#include <linux/vmalloc.h>
+#include <wrapper/cpu.h>
#include <wrapper/mm.h>
#include <wrapper/vmalloc.h> /* for wrapper_vmalloc_sync_mappings() */
#include <ringbuffer/config.h>
chanb->cpu_hp_notifier.priority = 5;
register_hotcpu_notifier(&chanb->cpu_hp_notifier);
- get_online_cpus();
+ lttng_cpus_read_lock();
for_each_online_cpu(i) {
ret = lib_ring_buffer_create(per_cpu_ptr(chanb->buf, i),
chanb, i);
if (ret)
goto free_bufs; /* cpu hotplug locked */
}
- put_online_cpus();
+ lttng_cpus_read_unlock();
#else
for_each_possible_cpu(i) {
ret = lib_ring_buffer_create(per_cpu_ptr(chanb->buf, i),
*/
#else /* #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
#ifdef CONFIG_HOTPLUG_CPU
- put_online_cpus();
+ lttng_cpus_read_unlock();
unregister_hotcpu_notifier(&chanb->cpu_hp_notifier);
#endif
#endif /* #else #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
#include <ringbuffer/iterator.h>
#include <ringbuffer/nohz.h>
#include <wrapper/atomic.h>
+#include <wrapper/cpu.h>
#include <wrapper/kref.h>
#include <wrapper/percpu-defs.h>
#include <wrapper/timer.h>
int cpu;
#ifdef CONFIG_HOTPLUG_CPU
- get_online_cpus();
+ lttng_cpus_read_lock();
chan->cpu_hp_enable = 0;
for_each_online_cpu(cpu) {
struct lttng_kernel_ring_buffer *buf = per_cpu_ptr(chan->backend.buf,
lib_ring_buffer_stop_switch_timer(buf);
lib_ring_buffer_stop_read_timer(buf);
}
- put_online_cpus();
+ lttng_cpus_read_unlock();
unregister_cpu_notifier(&chan->cpu_hp_notifier);
#else
for_each_possible_cpu(cpu) {
const struct lttng_kernel_ring_buffer_config *config = &chan->backend.config;
if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
- get_online_cpus();
+ lttng_cpus_read_lock();
for_each_channel_cpu(cpu, chan) {
struct lttng_kernel_ring_buffer *buf = per_cpu_ptr(chan->backend.buf,
cpu);
lib_ring_buffer_set_quiescent(buf);
}
- put_online_cpus();
+ lttng_cpus_read_unlock();
} else {
struct lttng_kernel_ring_buffer *buf = chan->backend.buf;
const struct lttng_kernel_ring_buffer_config *config = &chan->backend.config;
if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
- get_online_cpus();
+ lttng_cpus_read_lock();
for_each_channel_cpu(cpu, chan) {
struct lttng_kernel_ring_buffer *buf = per_cpu_ptr(chan->backend.buf,
cpu);
lib_ring_buffer_clear_quiescent(buf);
}
- put_online_cpus();
+ lttng_cpus_read_unlock();
} else {
struct lttng_kernel_ring_buffer *buf = chan->backend.buf;
chan->cpu_hp_notifier.priority = 6;
register_cpu_notifier(&chan->cpu_hp_notifier);
- get_online_cpus();
+ lttng_cpus_read_lock();
for_each_online_cpu(cpu) {
struct lttng_kernel_ring_buffer *buf = per_cpu_ptr(chan->backend.buf,
cpu);
spin_unlock(&per_cpu(ring_buffer_nohz_lock, cpu));
}
chan->cpu_hp_enable = 1;
- put_online_cpus();
+ lttng_cpus_read_unlock();
#else
for_each_possible_cpu(cpu) {
struct lttng_kernel_ring_buffer *buf = per_cpu_ptr(chan->backend.buf,
*/
#include <ringbuffer/iterator.h>
+#include <wrapper/cpu.h>
#include <wrapper/file.h>
#include <wrapper/uaccess.h>
#include <linux/jiffies.h>
chan->hp_iter_notifier.priority = 10;
register_cpu_notifier(&chan->hp_iter_notifier);
- get_online_cpus();
+ lttng_cpus_read_lock();
for_each_online_cpu(cpu) {
buf = per_cpu_ptr(chan->backend.buf, cpu);
lib_ring_buffer_iterator_init(chan, buf);
}
chan->hp_iter_enable = 1;
- put_online_cpus();
+ lttng_cpus_read_unlock();
#else
for_each_possible_cpu(cpu) {
buf = per_cpu_ptr(chan->backend.buf, cpu);
CHAN_WARN_ON(chan, config->output != RING_BUFFER_ITERATOR);
if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
- get_online_cpus();
+ lttng_cpus_read_lock();
/* Allow CPU hotplug to keep track of opened reader */
chan->iter.read_open = 1;
for_each_channel_cpu(cpu, chan) {
goto error;
buf->iter.read_open = 1;
}
- put_online_cpus();
+ lttng_cpus_read_unlock();
} else {
buf = channel_get_ring_buffer(config, chan, 0);
ret = lib_ring_buffer_iterator_open(buf);
error:
/* Error should always happen on CPU 0, hence no close is required. */
CHAN_WARN_ON(chan, cpu != 0);
- put_online_cpus();
+ lttng_cpus_read_unlock();
return ret;
}
EXPORT_SYMBOL_GPL(channel_iterator_open);
int cpu;
if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
- get_online_cpus();
+ lttng_cpus_read_lock();
for_each_channel_cpu(cpu, chan) {
buf = channel_get_ring_buffer(config, chan, cpu);
if (buf->iter.read_open) {
}
}
chan->iter.read_open = 0;
- put_online_cpus();
+ lttng_cpus_read_unlock();
} else {
buf = channel_get_ring_buffer(config, chan, 0);
lib_ring_buffer_iterator_release(buf);
#include <lttng/events.h>
#include <lttng/events-internal.h>
#include <ringbuffer/frontend_types.h>
+#include <wrapper/cpu.h>
#include <wrapper/vmalloc.h>
#include <wrapper/perf.h>
#include <lttng/tracer.h>
{
int cpu;
- get_online_cpus();
+ lttng_cpus_read_lock();
for_each_online_cpu(cpu)
perf_event_release_kernel(events[cpu]);
- put_online_cpus();
+ lttng_cpus_read_unlock();
#ifdef CONFIG_HOTPLUG_CPU
unregister_cpu_notifier(&perf_field->nb);
#endif
perf_field->nb.priority = 0;
register_cpu_notifier(&perf_field->nb);
#endif
- get_online_cpus();
+ lttng_cpus_read_lock();
for_each_online_cpu(cpu) {
events[cpu] = wrapper_perf_event_create_kernel_counter(attr,
cpu, NULL, overflow_callback);
goto counter_busy;
}
}
- put_online_cpus();
+ lttng_cpus_read_unlock();
perf_field->hp_enable = 1;
}
#endif /* #else #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
if (events[cpu] && !IS_ERR(events[cpu]))
perf_event_release_kernel(events[cpu]);
}
- put_online_cpus();
+ lttng_cpus_read_unlock();
#ifdef CONFIG_HOTPLUG_CPU
unregister_cpu_notifier(&perf_field->nb);
#endif
#include <linux/file.h>
#include <linux/interrupt.h>
#include <linux/irqnr.h>
-#include <linux/cpu.h>
#include <linux/netdevice.h>
#include <linux/inetdevice.h>
#include <linux/mm.h>
#include <lttng/events.h>
#include <lttng/tracer.h>
+#include <wrapper/cpu.h>
#include <wrapper/irqdesc.h>
#include <wrapper/fdtable.h>
#include <wrapper/namespace.h>
* is to guarantee that each CPU has been in a state where is was in
* syscall mode (i.e. not in a trap, an IRQ or a soft IRQ).
*/
- get_online_cpus();
+ lttng_cpus_read_lock();
atomic_set(&kernel_threads_to_run, num_online_cpus());
for_each_online_cpu(cpu) {
INIT_DELAYED_WORK(&cpu_work[cpu], lttng_statedump_work_func);
}
/* Wait for all threads to run */
__wait_event(statedump_wq, (atomic_read(&kernel_threads_to_run) == 0));
- put_online_cpus();
+ lttng_cpus_read_unlock();
/* Our work is done */
trace_lttng_statedump_end(session);
return 0;