#include <linux/cpumask.h>
#include <linux/types.h>
+#include <lttng-kernel-version.h>
+#include <lttng-cpuhotplug.h>
struct lib_ring_buffer_backend_page {
void *virt; /* page virtual address (cached) */
void *priv; /* Client-specific information */
void *priv_ops; /* Client-specific ops pointer */
void (*release_priv_ops)(void *priv_ops);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
+ struct lttng_cpuhp_node cpuhp_prepare; /* CPU hotplug prepare */
+#else
struct notifier_block cpu_hp_notifier; /* CPU hotplug notifier */
+#endif
/*
* We need to copy config because the module containing the
* source config can vanish before the last reference to this
#include <wrapper/ringbuffer/backend_types.h>
#include <wrapper/spinlock.h>
#include <lib/prio_heap/lttng_prio_heap.h> /* For per-CPU read-side iterator */
+#include <lttng-cpuhotplug.h>
/*
* A switch is done during tracing or as a final flush after tracing (so it
unsigned long switch_timer_interval; /* Buffer flush (jiffies) */
unsigned long read_timer_interval; /* Reader wakeup (jiffies) */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
+ struct lttng_cpuhp_node cpuhp_prepare;
+ struct lttng_cpuhp_node cpuhp_online;
+ struct lttng_cpuhp_node cpuhp_iter_online;
+#else
struct notifier_block cpu_hp_notifier; /* CPU hotplug notifier */
- struct notifier_block tick_nohz_notifier; /* CPU nohz notifier */
struct notifier_block hp_iter_notifier; /* hotplug iterator notifier */
unsigned int cpu_hp_enable:1; /* Enable CPU hotplug notif. */
unsigned int hp_iter_enable:1; /* Enable hp iter notif. */
+#endif
+ struct notifier_block tick_nohz_notifier; /* CPU nohz notifier */
wait_queue_head_t read_wait; /* reader wait queue */
wait_queue_head_t hp_wait; /* CPU hotplug wait queue */
int finalized; /* Has channel been finalized */
chanb->start_tsc = config->cb.ring_buffer_clock_read(chan);
}
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
+
+/*
+ * No need to implement a "dead" callback to do a buffer switch here,
+ * because it will happen when tracing is stopped, or will be done by
+ * switch timer CPU DEAD callback.
+ * We don't free buffers when CPU go away, because it would make trace
+ * data vanish, which is unwanted.
+ */
+int lttng_cpuhp_rb_backend_prepare(unsigned int cpu,
+ struct lttng_cpuhp_node *node)
+{
+ struct channel_backend *chanb = container_of(node,
+ struct channel_backend, cpuhp_prepare);
+ const struct lib_ring_buffer_config *config = &chanb->config;
+ struct lib_ring_buffer *buf;
+ int ret;
+
+ CHAN_WARN_ON(chanb, config->alloc == RING_BUFFER_ALLOC_GLOBAL);
+
+ buf = per_cpu_ptr(chanb->buf, cpu);
+ ret = lib_ring_buffer_create(buf, chanb, cpu);
+ if (ret) {
+ printk(KERN_ERR
+ "ring_buffer_cpu_hp_callback: cpu %d "
+ "buffer creation failed\n", cpu);
+ return ret;
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(lttng_cpuhp_rb_backend_prepare);
+
+#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
+
#ifdef CONFIG_HOTPLUG_CPU
+
/**
* lib_ring_buffer_cpu_hp_callback - CPU hotplug callback
* @nb: notifier block
}
return NOTIFY_OK;
}
+
#endif
+#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
+
/**
* channel_backend_init - initialize a channel backend
* @chanb: channel backend
if (!chanb->buf)
goto free_cpumask;
- /*
- * In case of non-hotplug cpu, if the ring-buffer is allocated
- * in early initcall, it will not be notified of secondary cpus.
- * In that off case, we need to allocate for all possible cpus.
- */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
+ chanb->cpuhp_prepare.component = LTTNG_RING_BUFFER_BACKEND;
+ ret = cpuhp_state_add_instance(lttng_rb_hp_prepare,
+ &chanb->cpuhp_prepare.node);
+ if (ret)
+ goto free_bufs;
+#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
+
+ {
+ /*
+ * In case of non-hotplug cpu, if the ring-buffer is allocated
+ * in early initcall, it will not be notified of secondary cpus.
+ * In that off case, we need to allocate for all possible cpus.
+ */
#ifdef CONFIG_HOTPLUG_CPU
- /*
- * buf->backend.allocated test takes care of concurrent CPU
- * hotplug.
- * Priority higher than frontend, so we create the ring buffer
- * before we start the timer.
- */
- chanb->cpu_hp_notifier.notifier_call =
- lib_ring_buffer_cpu_hp_callback;
- chanb->cpu_hp_notifier.priority = 5;
- register_hotcpu_notifier(&chanb->cpu_hp_notifier);
-
- get_online_cpus();
- for_each_online_cpu(i) {
- ret = lib_ring_buffer_create(per_cpu_ptr(chanb->buf, i),
- chanb, i);
- if (ret)
- goto free_bufs; /* cpu hotplug locked */
- }
- put_online_cpus();
+ /*
+ * buf->backend.allocated test takes care of concurrent CPU
+ * hotplug.
+ * Priority higher than frontend, so we create the ring buffer
+ * before we start the timer.
+ */
+ chanb->cpu_hp_notifier.notifier_call =
+ lib_ring_buffer_cpu_hp_callback;
+ chanb->cpu_hp_notifier.priority = 5;
+ register_hotcpu_notifier(&chanb->cpu_hp_notifier);
+
+ get_online_cpus();
+ for_each_online_cpu(i) {
+ ret = lib_ring_buffer_create(per_cpu_ptr(chanb->buf, i),
+ chanb, i);
+ if (ret)
+ goto free_bufs; /* cpu hotplug locked */
+ }
+ put_online_cpus();
#else
- for_each_possible_cpu(i) {
- ret = lib_ring_buffer_create(per_cpu_ptr(chanb->buf, i),
- chanb, i);
- if (ret)
- goto free_bufs; /* cpu hotplug locked */
- }
+ for_each_possible_cpu(i) {
+ ret = lib_ring_buffer_create(per_cpu_ptr(chanb->buf, i),
+ chanb, i);
+ if (ret)
+ goto free_bufs;
+ }
#endif
+ }
+#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
} else {
chanb->buf = kzalloc(sizeof(struct lib_ring_buffer), GFP_KERNEL);
if (!chanb->buf)
free_bufs:
if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
+ ret = cpuhp_state_remove_instance(lttng_rb_hp_prepare,
+ &chanb->cpuhp_prepare.node);
+ WARN_ON(ret);
+#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
+#ifdef CONFIG_HOTPLUG_CPU
+ put_online_cpus();
+ unregister_hotcpu_notifier(&chanb->cpu_hp_notifier);
+#endif
+#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
for_each_possible_cpu(i) {
- struct lib_ring_buffer *buf = per_cpu_ptr(chanb->buf, i);
+ struct lib_ring_buffer *buf =
+ per_cpu_ptr(chanb->buf, i);
if (!buf->backend.allocated)
continue;
lib_ring_buffer_free(buf);
}
-#ifdef CONFIG_HOTPLUG_CPU
- put_online_cpus();
- unregister_hotcpu_notifier(&chanb->cpu_hp_notifier);
-#endif
free_percpu(chanb->buf);
} else
kfree(chanb->buf);
{
const struct lib_ring_buffer_config *config = &chanb->config;
- if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
+ if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
+ int ret;
+
+ ret = cpuhp_state_remove_instance(lttng_rb_hp_prepare,
+ &chanb->cpuhp_prepare.node);
+ WARN_ON(ret);
+#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
unregister_hotcpu_notifier(&chanb->cpu_hp_notifier);
+#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
+ }
}
/**
buf->read_timer_enabled = 0;
}
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
+
+enum cpuhp_state lttng_rb_hp_prepare;
+enum cpuhp_state lttng_rb_hp_online;
+
+void lttng_rb_set_hp_prepare(enum cpuhp_state val)
+{
+ lttng_rb_hp_prepare = val;
+}
+EXPORT_SYMBOL_GPL(lttng_rb_set_hp_prepare);
+
+void lttng_rb_set_hp_online(enum cpuhp_state val)
+{
+ lttng_rb_hp_online = val;
+}
+EXPORT_SYMBOL_GPL(lttng_rb_set_hp_online);
+
+int lttng_cpuhp_rb_frontend_dead(unsigned int cpu,
+ struct lttng_cpuhp_node *node)
+{
+ struct channel *chan = container_of(node, struct channel,
+ cpuhp_prepare);
+ struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf, cpu);
+ const struct lib_ring_buffer_config *config = &chan->backend.config;
+
+ CHAN_WARN_ON(chan, config->alloc == RING_BUFFER_ALLOC_GLOBAL);
+
+ /*
+ * Performing a buffer switch on a remote CPU. Performed by
+ * the CPU responsible for doing the hotunplug after the target
+ * CPU stopped running completely. Ensures that all data
+ * from that remote CPU is flushed.
+ */
+ lib_ring_buffer_switch_slow(buf, SWITCH_ACTIVE);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(lttng_cpuhp_rb_frontend_dead);
+
+int lttng_cpuhp_rb_frontend_online(unsigned int cpu,
+ struct lttng_cpuhp_node *node)
+{
+ struct channel *chan = container_of(node, struct channel,
+ cpuhp_online);
+ struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf, cpu);
+ const struct lib_ring_buffer_config *config = &chan->backend.config;
+
+ CHAN_WARN_ON(chan, config->alloc == RING_BUFFER_ALLOC_GLOBAL);
+
+ wake_up_interruptible(&chan->hp_wait);
+ lib_ring_buffer_start_switch_timer(buf);
+ lib_ring_buffer_start_read_timer(buf);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(lttng_cpuhp_rb_frontend_online);
+
+int lttng_cpuhp_rb_frontend_offline(unsigned int cpu,
+ struct lttng_cpuhp_node *node)
+{
+ struct channel *chan = container_of(node, struct channel,
+ cpuhp_online);
+ struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf, cpu);
+ const struct lib_ring_buffer_config *config = &chan->backend.config;
+
+ CHAN_WARN_ON(chan, config->alloc == RING_BUFFER_ALLOC_GLOBAL);
+
+ lib_ring_buffer_stop_switch_timer(buf);
+ lib_ring_buffer_stop_read_timer(buf);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(lttng_cpuhp_rb_frontend_offline);
+
+#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
+
#ifdef CONFIG_HOTPLUG_CPU
+
/**
* lib_ring_buffer_cpu_hp_callback - CPU hotplug callback
* @nb: notifier block
return NOTIFY_DONE;
}
}
+
#endif
+#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
+
#if defined(CONFIG_NO_HZ) && defined(CONFIG_LIB_RING_BUFFER)
/*
* For per-cpu buffers, call the reader wakeups before switching the buffer, so
static void channel_unregister_notifiers(struct channel *chan)
{
const struct lib_ring_buffer_config *config = &chan->backend.config;
- int cpu;
channel_iterator_unregister_notifiers(chan);
if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
* concurrency.
*/
#endif /* CONFIG_NO_HZ */
-#ifdef CONFIG_HOTPLUG_CPU
- get_online_cpus();
- chan->cpu_hp_enable = 0;
- for_each_online_cpu(cpu) {
- struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf,
- cpu);
- lib_ring_buffer_stop_switch_timer(buf);
- lib_ring_buffer_stop_read_timer(buf);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
+ {
+ int ret;
+
+ ret = cpuhp_state_remove_instance(lttng_rb_hp_online,
+ &chan->cpuhp_online.node);
+ WARN_ON(ret);
+ ret = cpuhp_state_remove_instance_nocalls(lttng_rb_hp_prepare,
+ &chan->cpuhp_prepare.node);
+ WARN_ON(ret);
}
- put_online_cpus();
- unregister_cpu_notifier(&chan->cpu_hp_notifier);
+#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
+ {
+ int cpu;
+
+#ifdef CONFIG_HOTPLUG_CPU
+ get_online_cpus();
+ chan->cpu_hp_enable = 0;
+ for_each_online_cpu(cpu) {
+ struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf,
+ cpu);
+ lib_ring_buffer_stop_switch_timer(buf);
+ lib_ring_buffer_stop_read_timer(buf);
+ }
+ put_online_cpus();
+ unregister_cpu_notifier(&chan->cpu_hp_notifier);
#else
- for_each_possible_cpu(cpu) {
- struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf,
- cpu);
- lib_ring_buffer_stop_switch_timer(buf);
- lib_ring_buffer_stop_read_timer(buf);
- }
+ for_each_possible_cpu(cpu) {
+ struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf,
+ cpu);
+ lib_ring_buffer_stop_switch_timer(buf);
+ lib_ring_buffer_stop_read_timer(buf);
+ }
#endif
+ }
+#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
} else {
struct lib_ring_buffer *buf = chan->backend.buf;
size_t num_subbuf, unsigned int switch_timer_interval,
unsigned int read_timer_interval)
{
- int ret, cpu;
+ int ret;
struct channel *chan;
if (lib_ring_buffer_check_config(config, switch_timer_interval,
init_waitqueue_head(&chan->hp_wait);
if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
+ chan->cpuhp_prepare.component = LTTNG_RING_BUFFER_FRONTEND;
+ ret = cpuhp_state_add_instance_nocalls(lttng_rb_hp_prepare,
+ &chan->cpuhp_prepare.node);
+ if (ret)
+ goto cpuhp_prepare_error;
+
+ chan->cpuhp_online.component = LTTNG_RING_BUFFER_FRONTEND;
+ ret = cpuhp_state_add_instance(lttng_rb_hp_online,
+ &chan->cpuhp_online.node);
+ if (ret)
+ goto cpuhp_online_error;
+#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
+ {
+ int cpu;
+ /*
+ * In case of non-hotplug cpu, if the ring-buffer is allocated
+ * in early initcall, it will not be notified of secondary cpus.
+ * In that off case, we need to allocate for all possible cpus.
+ */
+#ifdef CONFIG_HOTPLUG_CPU
+ chan->cpu_hp_notifier.notifier_call =
+ lib_ring_buffer_cpu_hp_callback;
+ chan->cpu_hp_notifier.priority = 6;
+ register_cpu_notifier(&chan->cpu_hp_notifier);
+
+ get_online_cpus();
+ for_each_online_cpu(cpu) {
+ struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf,
+ cpu);
+ spin_lock(&per_cpu(ring_buffer_nohz_lock, cpu));
+ lib_ring_buffer_start_switch_timer(buf);
+ lib_ring_buffer_start_read_timer(buf);
+ spin_unlock(&per_cpu(ring_buffer_nohz_lock, cpu));
+ }
+ chan->cpu_hp_enable = 1;
+ put_online_cpus();
+#else
+ for_each_possible_cpu(cpu) {
+ struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf,
+ cpu);
+ spin_lock(&per_cpu(ring_buffer_nohz_lock, cpu));
+ lib_ring_buffer_start_switch_timer(buf);
+ lib_ring_buffer_start_read_timer(buf);
+ spin_unlock(&per_cpu(ring_buffer_nohz_lock, cpu));
+ }
+#endif
+ }
+#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
+
#if defined(CONFIG_NO_HZ) && defined(CONFIG_LIB_RING_BUFFER)
/* Only benefit from NO_HZ idle with per-cpu buffers for now. */
chan->tick_nohz_notifier.notifier_call =
&chan->tick_nohz_notifier);
#endif /* defined(CONFIG_NO_HZ) && defined(CONFIG_LIB_RING_BUFFER) */
- /*
- * In case of non-hotplug cpu, if the ring-buffer is allocated
- * in early initcall, it will not be notified of secondary cpus.
- * In that off case, we need to allocate for all possible cpus.
- */
-#ifdef CONFIG_HOTPLUG_CPU
- chan->cpu_hp_notifier.notifier_call =
- lib_ring_buffer_cpu_hp_callback;
- chan->cpu_hp_notifier.priority = 6;
- register_cpu_notifier(&chan->cpu_hp_notifier);
-
- get_online_cpus();
- for_each_online_cpu(cpu) {
- struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf,
- cpu);
- spin_lock(&per_cpu(ring_buffer_nohz_lock, cpu));
- lib_ring_buffer_start_switch_timer(buf);
- lib_ring_buffer_start_read_timer(buf);
- spin_unlock(&per_cpu(ring_buffer_nohz_lock, cpu));
- }
- chan->cpu_hp_enable = 1;
- put_online_cpus();
-#else
- for_each_possible_cpu(cpu) {
- struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf,
- cpu);
- spin_lock(&per_cpu(ring_buffer_nohz_lock, cpu));
- lib_ring_buffer_start_switch_timer(buf);
- lib_ring_buffer_start_read_timer(buf);
- spin_unlock(&per_cpu(ring_buffer_nohz_lock, cpu));
- }
-#endif
} else {
struct lib_ring_buffer *buf = chan->backend.buf;
return chan;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
+cpuhp_online_error:
+ ret = cpuhp_state_remove_instance_nocalls(lttng_rb_hp_prepare,
+ &chan->cpuhp_prepare.node);
+ WARN_ON(ret);
+cpuhp_prepare_error:
+#endif /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
error_free_backend:
channel_backend_free(&chan->backend);
error:
list_add(&buf->iter.empty_node, &chan->iter.empty_head);
}
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
+
+int lttng_cpuhp_rb_iter_online(unsigned int cpu,
+ struct lttng_cpuhp_node *node)
+{
+ struct channel *chan = container_of(node, struct channel,
+ cpuhp_iter_online);
+ struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf, cpu);
+ const struct lib_ring_buffer_config *config = &chan->backend.config;
+
+ CHAN_WARN_ON(chan, config->alloc == RING_BUFFER_ALLOC_GLOBAL);
+
+ lib_ring_buffer_iterator_init(chan, buf);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(lttng_cpuhp_rb_iter_online);
+
+#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
+
#ifdef CONFIG_HOTPLUG_CPU
static
int channel_iterator_cpu_hotplug(struct notifier_block *nb,
}
#endif
+#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
+
int channel_iterator_init(struct channel *chan)
{
const struct lib_ring_buffer_config *config = &chan->backend.config;
struct lib_ring_buffer *buf;
if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
- int cpu, ret;
+ int ret;
INIT_LIST_HEAD(&chan->iter.empty_head);
ret = lttng_heap_init(&chan->iter.heap,
GFP_KERNEL, buf_is_higher);
if (ret)
return ret;
- /*
- * In case of non-hotplug cpu, if the ring-buffer is allocated
- * in early initcall, it will not be notified of secondary cpus.
- * In that off case, we need to allocate for all possible cpus.
- */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
+ chan->cpuhp_iter_online.component = LTTNG_RING_BUFFER_ITER;
+ ret = cpuhp_state_add_instance(lttng_rb_hp_online,
+ &chan->cpuhp_iter_online.node);
+ if (ret)
+ return ret;
+#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
+ {
+ int cpu;
+
+ /*
+ * In case of non-hotplug cpu, if the ring-buffer is allocated
+ * in early initcall, it will not be notified of secondary cpus.
+ * In that off case, we need to allocate for all possible cpus.
+ */
#ifdef CONFIG_HOTPLUG_CPU
- chan->hp_iter_notifier.notifier_call =
- channel_iterator_cpu_hotplug;
- chan->hp_iter_notifier.priority = 10;
- register_cpu_notifier(&chan->hp_iter_notifier);
- get_online_cpus();
- for_each_online_cpu(cpu) {
- buf = per_cpu_ptr(chan->backend.buf, cpu);
- lib_ring_buffer_iterator_init(chan, buf);
- }
- chan->hp_iter_enable = 1;
- put_online_cpus();
+ chan->hp_iter_notifier.notifier_call =
+ channel_iterator_cpu_hotplug;
+ chan->hp_iter_notifier.priority = 10;
+ register_cpu_notifier(&chan->hp_iter_notifier);
+
+ get_online_cpus();
+ for_each_online_cpu(cpu) {
+ buf = per_cpu_ptr(chan->backend.buf, cpu);
+ lib_ring_buffer_iterator_init(chan, buf);
+ }
+ chan->hp_iter_enable = 1;
+ put_online_cpus();
#else
- for_each_possible_cpu(cpu) {
- buf = per_cpu_ptr(chan->backend.buf, cpu);
- lib_ring_buffer_iterator_init(chan, buf);
- }
+ for_each_possible_cpu(cpu) {
+ buf = per_cpu_ptr(chan->backend.buf, cpu);
+ lib_ring_buffer_iterator_init(chan, buf);
+ }
#endif
+ }
+#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
} else {
buf = channel_get_ring_buffer(config, chan, 0);
lib_ring_buffer_iterator_init(chan, buf);
const struct lib_ring_buffer_config *config = &chan->backend.config;
if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
+ {
+ int ret;
+
+ ret = cpuhp_state_remove_instance(lttng_rb_hp_online,
+ &chan->cpuhp_iter_online.node);
+ WARN_ON(ret);
+ }
+#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
chan->hp_iter_enable = 0;
unregister_cpu_notifier(&chan->hp_iter_notifier);
+#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
}
}
void lttng_destroy_perf_counter_field(struct lttng_ctx_field *field)
{
struct perf_event **events = field->u.perf_counter->e;
- int cpu;
- get_online_cpus();
- for_each_online_cpu(cpu)
- perf_event_release_kernel(events[cpu]);
- put_online_cpus();
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
+ {
+ int ret;
+
+ ret = cpuhp_state_remove_instance(lttng_hp_online,
+ &field->u.perf_counter->cpuhp_online.node);
+ WARN_ON(ret);
+ ret = cpuhp_state_remove_instance(lttng_hp_prepare,
+ &field->u.perf_counter->cpuhp_prepare.node);
+ WARN_ON(ret);
+ }
+#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
+ {
+ int cpu;
+
+ get_online_cpus();
+ for_each_online_cpu(cpu)
+ perf_event_release_kernel(events[cpu]);
+ put_online_cpus();
#ifdef CONFIG_HOTPLUG_CPU
- unregister_cpu_notifier(&field->u.perf_counter->nb);
+ unregister_cpu_notifier(&field->u.perf_counter->nb);
#endif
+ }
+#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
kfree(field->event_field.name);
kfree(field->u.perf_counter->attr);
kfree(events);
kfree(field->u.perf_counter);
}
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
+
+int lttng_cpuhp_perf_counter_online(unsigned int cpu,
+ struct lttng_cpuhp_node *node)
+{
+ struct lttng_perf_counter_field *perf_field =
+ container_of(node, struct lttng_perf_counter_field,
+ cpuhp_online);
+ struct perf_event **events = perf_field->e;
+ struct perf_event_attr *attr = perf_field->attr;
+ struct perf_event *pevent;
+
+ pevent = wrapper_perf_event_create_kernel_counter(attr,
+ cpu, NULL, overflow_callback);
+ if (!pevent || IS_ERR(pevent))
+ return -EINVAL;
+ if (pevent->state == PERF_EVENT_STATE_ERROR) {
+ perf_event_release_kernel(pevent);
+ return -EINVAL;
+ }
+ barrier(); /* Create perf counter before setting event */
+ events[cpu] = pevent;
+ return 0;
+}
+
+int lttng_cpuhp_perf_counter_dead(unsigned int cpu,
+ struct lttng_cpuhp_node *node)
+{
+ struct lttng_perf_counter_field *perf_field =
+ container_of(node, struct lttng_perf_counter_field,
+ cpuhp_prepare);
+ struct perf_event **events = perf_field->e;
+ struct perf_event *pevent;
+
+ pevent = events[cpu];
+ events[cpu] = NULL;
+ barrier(); /* NULLify event before perf counter teardown */
+ perf_event_release_kernel(pevent);
+ return 0;
+}
+
+#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
+
#ifdef CONFIG_HOTPLUG_CPU
/**
#endif
+#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
+
int lttng_add_perf_counter_to_ctx(uint32_t type,
uint64_t config,
const char *name,
struct perf_event **events;
struct perf_event_attr *attr;
int ret;
- int cpu;
char *name_alloc;
events = kzalloc(num_possible_cpus() * sizeof(*events), GFP_KERNEL);
goto find_error;
}
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
+
+ perf_field->cpuhp_prepare.component = LTTNG_CONTEXT_PERF_COUNTERS;
+ ret = cpuhp_state_add_instance(lttng_hp_prepare,
+ &perf_field->cpuhp_prepare.node);
+ if (ret)
+ goto cpuhp_prepare_error;
+
+ perf_field->cpuhp_online.component = LTTNG_CONTEXT_PERF_COUNTERS;
+ ret = cpuhp_state_add_instance(lttng_hp_online,
+ &perf_field->cpuhp_online.node);
+ if (ret)
+ goto cpuhp_online_error;
+
+#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
+ {
+ int cpu;
+
#ifdef CONFIG_HOTPLUG_CPU
- perf_field->nb.notifier_call =
- lttng_perf_counter_cpu_hp_callback;
- perf_field->nb.priority = 0;
- register_cpu_notifier(&perf_field->nb);
+ perf_field->nb.notifier_call =
+ lttng_perf_counter_cpu_hp_callback;
+ perf_field->nb.priority = 0;
+ register_cpu_notifier(&perf_field->nb);
#endif
-
- get_online_cpus();
- for_each_online_cpu(cpu) {
- events[cpu] = wrapper_perf_event_create_kernel_counter(attr,
- cpu, NULL, overflow_callback);
- if (!events[cpu] || IS_ERR(events[cpu])) {
- ret = -EINVAL;
- goto counter_error;
- }
- if (events[cpu]->state == PERF_EVENT_STATE_ERROR) {
- ret = -EBUSY;
- goto counter_busy;
+ get_online_cpus();
+ for_each_online_cpu(cpu) {
+ events[cpu] = wrapper_perf_event_create_kernel_counter(attr,
+ cpu, NULL, overflow_callback);
+ if (!events[cpu] || IS_ERR(events[cpu])) {
+ ret = -EINVAL;
+ goto counter_error;
+ }
+ if (events[cpu]->state == PERF_EVENT_STATE_ERROR) {
+ ret = -EBUSY;
+ goto counter_busy;
+ }
}
+ put_online_cpus();
+ perf_field->hp_enable = 1;
}
- put_online_cpus();
+#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
field->destroy = lttng_destroy_perf_counter_field;
field->get_size = perf_counter_get_size;
field->record = perf_counter_record;
field->u.perf_counter = perf_field;
- perf_field->hp_enable = 1;
lttng_context_update(*ctx);
wrapper_vmalloc_sync_all();
return 0;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
+cpuhp_online_error:
+ {
+ int remove_ret;
+
+ remove_ret = cpuhp_state_remove_instance(lttng_hp_prepare,
+ &perf_field->cpuhp_prepare.node);
+ WARN_ON(remove_ret);
+ }
+cpuhp_prepare_error:
+#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
counter_busy:
counter_error:
for_each_online_cpu(cpu) {
#ifdef CONFIG_HOTPLUG_CPU
unregister_cpu_notifier(&perf_field->nb);
#endif
+#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
find_error:
lttng_remove_context_field(ctx, field);
append_context_error:
--- /dev/null
+#ifndef LTTNG_CPUHOTPLUG_H
+#define LTTNG_CPUHOTPLUG_H
+
+/*
+ * lttng-cpuhotplug.h
+ *
+ * Copyright (C) 2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; only
+ * version 2.1 of the License.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <linux/cpuhotplug.h>
+
+struct lttng_cpuhp_node;
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
+enum lttng_cpuhp_component {
+ LTTNG_RING_BUFFER_FRONTEND,
+ LTTNG_RING_BUFFER_BACKEND,
+ LTTNG_RING_BUFFER_ITER,
+ LTTNG_CONTEXT_PERF_COUNTERS,
+};
+
+struct lttng_cpuhp_node {
+ enum lttng_cpuhp_component component;
+ struct hlist_node node;
+};
+
+extern enum cpuhp_state lttng_hp_prepare;
+extern enum cpuhp_state lttng_hp_online;
+
+int lttng_cpuhp_rb_backend_prepare(unsigned int cpu,
+ struct lttng_cpuhp_node *node);
+int lttng_cpuhp_rb_frontend_dead(unsigned int cpu,
+ struct lttng_cpuhp_node *node);
+int lttng_cpuhp_rb_frontend_online(unsigned int cpu,
+ struct lttng_cpuhp_node *node);
+int lttng_cpuhp_rb_frontend_offline(unsigned int cpu,
+ struct lttng_cpuhp_node *node);
+int lttng_cpuhp_rb_iter_online(unsigned int cpu,
+ struct lttng_cpuhp_node *node);
+
+/* Ring buffer is a separate library. */
+void lttng_rb_set_hp_prepare(enum cpuhp_state val);
+void lttng_rb_set_hp_online(enum cpuhp_state val);
+
+extern enum cpuhp_state lttng_rb_hp_prepare;
+extern enum cpuhp_state lttng_rb_hp_online;
+
+#endif
+
+#endif /* LTTNG_CPUHOTPLUG_H */
}
EXPORT_SYMBOL_GPL(lttng_transport_unregister);
+#if (defined(CONFIG_HOTPLUG_CPU) && (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)))
+
+enum cpuhp_state lttng_hp_prepare;
+enum cpuhp_state lttng_hp_online;
+
+static int lttng_hotplug_prepare(unsigned int cpu, struct hlist_node *node)
+{
+ struct lttng_cpuhp_node *lttng_node;
+
+ lttng_node = container_of(node, struct lttng_cpuhp_node, node);
+ switch (lttng_node->component) {
+ case LTTNG_RING_BUFFER_FRONTEND:
+ return 0;
+ case LTTNG_RING_BUFFER_BACKEND:
+ return lttng_cpuhp_rb_backend_prepare(cpu, lttng_node);
+ case LTTNG_RING_BUFFER_ITER:
+ return 0;
+ case LTTNG_CONTEXT_PERF_COUNTERS:
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int lttng_hotplug_dead(unsigned int cpu, struct hlist_node *node)
+{
+ struct lttng_cpuhp_node *lttng_node;
+
+ lttng_node = container_of(node, struct lttng_cpuhp_node, node);
+ switch (lttng_node->component) {
+ case LTTNG_RING_BUFFER_FRONTEND:
+ return lttng_cpuhp_rb_frontend_dead(cpu, lttng_node);
+ case LTTNG_RING_BUFFER_BACKEND:
+ return 0;
+ case LTTNG_RING_BUFFER_ITER:
+ return 0;
+ case LTTNG_CONTEXT_PERF_COUNTERS:
+ return lttng_cpuhp_perf_counter_dead(cpu, lttng_node);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int lttng_hotplug_online(unsigned int cpu, struct hlist_node *node)
+{
+ struct lttng_cpuhp_node *lttng_node;
+
+ lttng_node = container_of(node, struct lttng_cpuhp_node, node);
+ switch (lttng_node->component) {
+ case LTTNG_RING_BUFFER_FRONTEND:
+ return lttng_cpuhp_rb_frontend_online(cpu, lttng_node);
+ case LTTNG_RING_BUFFER_BACKEND:
+ return 0;
+ case LTTNG_RING_BUFFER_ITER:
+ return lttng_cpuhp_rb_iter_online(cpu, lttng_node);
+ case LTTNG_CONTEXT_PERF_COUNTERS:
+ return lttng_cpuhp_perf_counter_online(cpu, lttng_node);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int lttng_hotplug_offline(unsigned int cpu, struct hlist_node *node)
+{
+ struct lttng_cpuhp_node *lttng_node;
+
+ lttng_node = container_of(node, struct lttng_cpuhp_node, node);
+ switch (lttng_node->component) {
+ case LTTNG_RING_BUFFER_FRONTEND:
+ return lttng_cpuhp_rb_frontend_offline(cpu, lttng_node);
+ case LTTNG_RING_BUFFER_BACKEND:
+ return 0;
+ case LTTNG_RING_BUFFER_ITER:
+ return 0;
+ case LTTNG_CONTEXT_PERF_COUNTERS:
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int __init lttng_init_cpu_hotplug(void)
+{
+ int ret;
+
+ ret = cpuhp_setup_state_multi(CPUHP_BP_PREPARE_DYN, "lttng:prepare",
+ lttng_hotplug_prepare,
+ lttng_hotplug_dead);
+ if (ret < 0) {
+ return ret;
+ }
+ lttng_hp_prepare = ret;
+ lttng_rb_set_hp_prepare(ret);
+
+ ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "lttng:online",
+ lttng_hotplug_online,
+ lttng_hotplug_offline);
+ if (ret < 0) {
+ cpuhp_remove_multi_state(lttng_hp_prepare);
+ lttng_hp_prepare = 0;
+ return ret;
+ }
+ lttng_hp_online = ret;
+ lttng_rb_set_hp_online(ret);
+
+ return 0;
+}
+
+static void __exit lttng_exit_cpu_hotplug(void)
+{
+ lttng_rb_set_hp_online(0);
+ cpuhp_remove_multi_state(lttng_hp_online);
+ lttng_rb_set_hp_prepare(0);
+ cpuhp_remove_multi_state(lttng_hp_prepare);
+}
+
+#else /* #if (CONFIG_HOTPLUG_CPU && (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))) */
+static int lttng_init_cpu_hotplug(void)
+{
+ return 0;
+}
+static void lttng_exit_cpu_hotplug(void)
+{
+}
+#endif /* #else #if (CONFIG_HOTPLUG_CPU && (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))) */
+
+
static int __init lttng_events_init(void)
{
int ret;
ret = lttng_logger_init();
if (ret)
goto error_logger;
+ ret = lttng_init_cpu_hotplug();
+ if (ret)
+ goto error_hotplug;
printk(KERN_NOTICE "LTTng: Loaded modules v%s.%s.%s%s (%s)\n",
__stringify(LTTNG_MODULES_MAJOR_VERSION),
__stringify(LTTNG_MODULES_MINOR_VERSION),
LTTNG_VERSION_NAME);
return 0;
+error_hotplug:
+ lttng_logger_exit();
error_logger:
lttng_abi_exit();
error_abi:
{
struct lttng_session *session, *tmpsession;
+ lttng_exit_cpu_hotplug();
lttng_logger_exit();
lttng_abi_exit();
list_for_each_entry_safe(session, tmpsession, &sessions, list)
#include <linux/list.h>
#include <linux/kprobes.h>
#include <linux/kref.h>
+#include <lttng-cpuhotplug.h>
#include <wrapper/uuid.h>
#include <lttng-tracer.h>
#include <lttng-abi.h>
* lttng_ctx_field because cpu hotplug needs fixed-location addresses.
*/
struct lttng_perf_counter_field {
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
+ struct lttng_cpuhp_node cpuhp_prepare;
+ struct lttng_cpuhp_node cpuhp_online;
+#else
struct notifier_block nb;
int hp_enable;
+#endif
struct perf_event_attr *attr;
struct perf_event **e; /* per-cpu array */
};
uint64_t config,
const char *name,
struct lttng_ctx **ctx);
+int lttng_cpuhp_perf_counter_online(unsigned int cpu,
+ struct lttng_cpuhp_node *node);
+int lttng_cpuhp_perf_counter_dead(unsigned int cpu,
+ struct lttng_cpuhp_node *node);
#else
static inline
int lttng_add_perf_counter_to_ctx(uint32_t type,
{
return -ENOSYS;
}
+static inline
+int lttng_cpuhp_perf_counter_online(unsigned int cpu,
+ struct lttng_cpuhp_node *node)
+{
+ return 0;
+}
+static inline
+int lttng_cpuhp_perf_counter_dead(unsigned int cpu,
+ struct lttng_cpuhp_node *node)
+{
+ return 0;
+}
#endif
int lttng_logger_init(void);