From: Mathieu Desnoyers Date: Tue, 10 Jan 2017 16:19:51 +0000 (-0500) Subject: Adapt lttng-modules to Linux 4.10 cpu hotplug state machine X-Git-Tag: v2.8.5~10 X-Git-Url: https://git.lttng.org./?a=commitdiff_plain;h=8a30119bb20ac97d5372de5b5a067703fd40e97f;p=lttng-modules.git Adapt lttng-modules to Linux 4.10 cpu hotplug state machine Signed-off-by: Mathieu Desnoyers --- diff --git a/lib/ringbuffer/backend_types.h b/lib/ringbuffer/backend_types.h index c1d8be18..01ed63b9 100644 --- a/lib/ringbuffer/backend_types.h +++ b/lib/ringbuffer/backend_types.h @@ -25,6 +25,8 @@ #include #include +#include +#include struct lib_ring_buffer_backend_page { void *virt; /* page virtual address (cached) */ @@ -97,7 +99,11 @@ struct channel_backend { void *priv; /* Client-specific information */ void *priv_ops; /* Client-specific ops pointer */ void (*release_priv_ops)(void *priv_ops); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) + struct lttng_cpuhp_node cpuhp_prepare; /* CPU hotplug prepare */ +#else struct notifier_block cpu_hp_notifier; /* CPU hotplug notifier */ +#endif /* * We need to copy config because the module containing the * source config can vanish before the last reference to this diff --git a/lib/ringbuffer/frontend_types.h b/lib/ringbuffer/frontend_types.h index 9d7c23c9..cf64f2a1 100644 --- a/lib/ringbuffer/frontend_types.h +++ b/lib/ringbuffer/frontend_types.h @@ -33,6 +33,7 @@ #include #include #include /* For per-CPU read-side iterator */ +#include /* * A switch is done during tracing or as a final flush after tracing (so it @@ -69,11 +70,17 @@ struct channel { unsigned long switch_timer_interval; /* Buffer flush (jiffies) */ unsigned long read_timer_interval; /* Reader wakeup (jiffies) */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) + struct lttng_cpuhp_node cpuhp_prepare; + struct lttng_cpuhp_node cpuhp_online; + struct lttng_cpuhp_node cpuhp_iter_online; +#else struct notifier_block cpu_hp_notifier; /* CPU hotplug notifier */ - struct notifier_block tick_nohz_notifier; /* CPU nohz notifier */ struct notifier_block hp_iter_notifier; /* hotplug iterator notifier */ unsigned int cpu_hp_enable:1; /* Enable CPU hotplug notif. */ unsigned int hp_iter_enable:1; /* Enable hp iter notif. */ +#endif + struct notifier_block tick_nohz_notifier; /* CPU nohz notifier */ wait_queue_head_t read_wait; /* reader wait queue */ wait_queue_head_t hp_wait; /* CPU hotplug wait queue */ int finalized; /* Has channel been finalized */ diff --git a/lib/ringbuffer/ring_buffer_backend.c b/lib/ringbuffer/ring_buffer_backend.c index 27554a1e..f760836c 100644 --- a/lib/ringbuffer/ring_buffer_backend.c +++ b/lib/ringbuffer/ring_buffer_backend.c @@ -249,7 +249,42 @@ void channel_backend_reset(struct channel_backend *chanb) chanb->start_tsc = config->cb.ring_buffer_clock_read(chan); } +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) + +/* + * No need to implement a "dead" callback to do a buffer switch here, + * because it will happen when tracing is stopped, or will be done by + * switch timer CPU DEAD callback. + * We don't free buffers when CPU go away, because it would make trace + * data vanish, which is unwanted. + */ +int lttng_cpuhp_rb_backend_prepare(unsigned int cpu, + struct lttng_cpuhp_node *node) +{ + struct channel_backend *chanb = container_of(node, + struct channel_backend, cpuhp_prepare); + const struct lib_ring_buffer_config *config = &chanb->config; + struct lib_ring_buffer *buf; + int ret; + + CHAN_WARN_ON(chanb, config->alloc == RING_BUFFER_ALLOC_GLOBAL); + + buf = per_cpu_ptr(chanb->buf, cpu); + ret = lib_ring_buffer_create(buf, chanb, cpu); + if (ret) { + printk(KERN_ERR + "ring_buffer_cpu_hp_callback: cpu %d " + "buffer creation failed\n", cpu); + return ret; + } + return 0; +} +EXPORT_SYMBOL_GPL(lttng_cpuhp_rb_backend_prepare); + +#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */ + #ifdef CONFIG_HOTPLUG_CPU + /** * lib_ring_buffer_cpu_hp_callback - CPU hotplug callback * @nb: notifier block @@ -293,8 +328,11 @@ int lib_ring_buffer_cpu_hp_callback(struct notifier_block *nb, } return NOTIFY_OK; } + #endif +#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */ + /** * channel_backend_init - initialize a channel backend * @chanb: channel backend @@ -371,39 +409,50 @@ int channel_backend_init(struct channel_backend *chanb, if (!chanb->buf) goto free_cpumask; - /* - * In case of non-hotplug cpu, if the ring-buffer is allocated - * in early initcall, it will not be notified of secondary cpus. - * In that off case, we need to allocate for all possible cpus. - */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) + chanb->cpuhp_prepare.component = LTTNG_RING_BUFFER_BACKEND; + ret = cpuhp_state_add_instance(lttng_rb_hp_prepare, + &chanb->cpuhp_prepare.node); + if (ret) + goto free_bufs; +#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */ + + { + /* + * In case of non-hotplug cpu, if the ring-buffer is allocated + * in early initcall, it will not be notified of secondary cpus. + * In that off case, we need to allocate for all possible cpus. + */ #ifdef CONFIG_HOTPLUG_CPU - /* - * buf->backend.allocated test takes care of concurrent CPU - * hotplug. - * Priority higher than frontend, so we create the ring buffer - * before we start the timer. - */ - chanb->cpu_hp_notifier.notifier_call = - lib_ring_buffer_cpu_hp_callback; - chanb->cpu_hp_notifier.priority = 5; - register_hotcpu_notifier(&chanb->cpu_hp_notifier); - - get_online_cpus(); - for_each_online_cpu(i) { - ret = lib_ring_buffer_create(per_cpu_ptr(chanb->buf, i), - chanb, i); - if (ret) - goto free_bufs; /* cpu hotplug locked */ - } - put_online_cpus(); + /* + * buf->backend.allocated test takes care of concurrent CPU + * hotplug. + * Priority higher than frontend, so we create the ring buffer + * before we start the timer. + */ + chanb->cpu_hp_notifier.notifier_call = + lib_ring_buffer_cpu_hp_callback; + chanb->cpu_hp_notifier.priority = 5; + register_hotcpu_notifier(&chanb->cpu_hp_notifier); + + get_online_cpus(); + for_each_online_cpu(i) { + ret = lib_ring_buffer_create(per_cpu_ptr(chanb->buf, i), + chanb, i); + if (ret) + goto free_bufs; /* cpu hotplug locked */ + } + put_online_cpus(); #else - for_each_possible_cpu(i) { - ret = lib_ring_buffer_create(per_cpu_ptr(chanb->buf, i), - chanb, i); - if (ret) - goto free_bufs; /* cpu hotplug locked */ - } + for_each_possible_cpu(i) { + ret = lib_ring_buffer_create(per_cpu_ptr(chanb->buf, i), + chanb, i); + if (ret) + goto free_bufs; + } #endif + } +#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */ } else { chanb->buf = kzalloc(sizeof(struct lib_ring_buffer), GFP_KERNEL); if (!chanb->buf) @@ -418,17 +467,24 @@ int channel_backend_init(struct channel_backend *chanb, free_bufs: if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) { +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) + ret = cpuhp_state_remove_instance(lttng_rb_hp_prepare, + &chanb->cpuhp_prepare.node); + WARN_ON(ret); +#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */ +#ifdef CONFIG_HOTPLUG_CPU + put_online_cpus(); + unregister_hotcpu_notifier(&chanb->cpu_hp_notifier); +#endif +#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */ for_each_possible_cpu(i) { - struct lib_ring_buffer *buf = per_cpu_ptr(chanb->buf, i); + struct lib_ring_buffer *buf = + per_cpu_ptr(chanb->buf, i); if (!buf->backend.allocated) continue; lib_ring_buffer_free(buf); } -#ifdef CONFIG_HOTPLUG_CPU - put_online_cpus(); - unregister_hotcpu_notifier(&chanb->cpu_hp_notifier); -#endif free_percpu(chanb->buf); } else kfree(chanb->buf); @@ -448,8 +504,17 @@ void channel_backend_unregister_notifiers(struct channel_backend *chanb) { const struct lib_ring_buffer_config *config = &chanb->config; - if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) + if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) { +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) + int ret; + + ret = cpuhp_state_remove_instance(lttng_rb_hp_prepare, + &chanb->cpuhp_prepare.node); + WARN_ON(ret); +#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */ unregister_hotcpu_notifier(&chanb->cpu_hp_notifier); +#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */ + } } /** diff --git a/lib/ringbuffer/ring_buffer_frontend.c b/lib/ringbuffer/ring_buffer_frontend.c index 2ffe991b..ea76cf8b 100644 --- a/lib/ringbuffer/ring_buffer_frontend.c +++ b/lib/ringbuffer/ring_buffer_frontend.c @@ -410,7 +410,81 @@ static void lib_ring_buffer_stop_read_timer(struct lib_ring_buffer *buf) buf->read_timer_enabled = 0; } +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) + +enum cpuhp_state lttng_rb_hp_prepare; +enum cpuhp_state lttng_rb_hp_online; + +void lttng_rb_set_hp_prepare(enum cpuhp_state val) +{ + lttng_rb_hp_prepare = val; +} +EXPORT_SYMBOL_GPL(lttng_rb_set_hp_prepare); + +void lttng_rb_set_hp_online(enum cpuhp_state val) +{ + lttng_rb_hp_online = val; +} +EXPORT_SYMBOL_GPL(lttng_rb_set_hp_online); + +int lttng_cpuhp_rb_frontend_dead(unsigned int cpu, + struct lttng_cpuhp_node *node) +{ + struct channel *chan = container_of(node, struct channel, + cpuhp_prepare); + struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf, cpu); + const struct lib_ring_buffer_config *config = &chan->backend.config; + + CHAN_WARN_ON(chan, config->alloc == RING_BUFFER_ALLOC_GLOBAL); + + /* + * Performing a buffer switch on a remote CPU. Performed by + * the CPU responsible for doing the hotunplug after the target + * CPU stopped running completely. Ensures that all data + * from that remote CPU is flushed. + */ + lib_ring_buffer_switch_slow(buf, SWITCH_ACTIVE); + return 0; +} +EXPORT_SYMBOL_GPL(lttng_cpuhp_rb_frontend_dead); + +int lttng_cpuhp_rb_frontend_online(unsigned int cpu, + struct lttng_cpuhp_node *node) +{ + struct channel *chan = container_of(node, struct channel, + cpuhp_online); + struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf, cpu); + const struct lib_ring_buffer_config *config = &chan->backend.config; + + CHAN_WARN_ON(chan, config->alloc == RING_BUFFER_ALLOC_GLOBAL); + + wake_up_interruptible(&chan->hp_wait); + lib_ring_buffer_start_switch_timer(buf); + lib_ring_buffer_start_read_timer(buf); + return 0; +} +EXPORT_SYMBOL_GPL(lttng_cpuhp_rb_frontend_online); + +int lttng_cpuhp_rb_frontend_offline(unsigned int cpu, + struct lttng_cpuhp_node *node) +{ + struct channel *chan = container_of(node, struct channel, + cpuhp_online); + struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf, cpu); + const struct lib_ring_buffer_config *config = &chan->backend.config; + + CHAN_WARN_ON(chan, config->alloc == RING_BUFFER_ALLOC_GLOBAL); + + lib_ring_buffer_stop_switch_timer(buf); + lib_ring_buffer_stop_read_timer(buf); + return 0; +} +EXPORT_SYMBOL_GPL(lttng_cpuhp_rb_frontend_offline); + +#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */ + #ifdef CONFIG_HOTPLUG_CPU + /** * lib_ring_buffer_cpu_hp_callback - CPU hotplug callback * @nb: notifier block @@ -466,8 +540,11 @@ int lib_ring_buffer_cpu_hp_callback(struct notifier_block *nb, return NOTIFY_DONE; } } + #endif +#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */ + #if defined(CONFIG_NO_HZ) && defined(CONFIG_LIB_RING_BUFFER) /* * For per-cpu buffers, call the reader wakeups before switching the buffer, so @@ -556,7 +633,6 @@ void notrace lib_ring_buffer_tick_nohz_restart(void) static void channel_unregister_notifiers(struct channel *chan) { const struct lib_ring_buffer_config *config = &chan->backend.config; - int cpu; channel_iterator_unregister_notifiers(chan); if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) { @@ -573,25 +649,42 @@ static void channel_unregister_notifiers(struct channel *chan) * concurrency. */ #endif /* CONFIG_NO_HZ */ -#ifdef CONFIG_HOTPLUG_CPU - get_online_cpus(); - chan->cpu_hp_enable = 0; - for_each_online_cpu(cpu) { - struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf, - cpu); - lib_ring_buffer_stop_switch_timer(buf); - lib_ring_buffer_stop_read_timer(buf); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) + { + int ret; + + ret = cpuhp_state_remove_instance(lttng_rb_hp_online, + &chan->cpuhp_online.node); + WARN_ON(ret); + ret = cpuhp_state_remove_instance_nocalls(lttng_rb_hp_prepare, + &chan->cpuhp_prepare.node); + WARN_ON(ret); } - put_online_cpus(); - unregister_cpu_notifier(&chan->cpu_hp_notifier); +#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */ + { + int cpu; + +#ifdef CONFIG_HOTPLUG_CPU + get_online_cpus(); + chan->cpu_hp_enable = 0; + for_each_online_cpu(cpu) { + struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf, + cpu); + lib_ring_buffer_stop_switch_timer(buf); + lib_ring_buffer_stop_read_timer(buf); + } + put_online_cpus(); + unregister_cpu_notifier(&chan->cpu_hp_notifier); #else - for_each_possible_cpu(cpu) { - struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf, - cpu); - lib_ring_buffer_stop_switch_timer(buf); - lib_ring_buffer_stop_read_timer(buf); - } + for_each_possible_cpu(cpu) { + struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf, + cpu); + lib_ring_buffer_stop_switch_timer(buf); + lib_ring_buffer_stop_read_timer(buf); + } #endif + } +#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */ } else { struct lib_ring_buffer *buf = chan->backend.buf; @@ -692,7 +785,7 @@ struct channel *channel_create(const struct lib_ring_buffer_config *config, size_t num_subbuf, unsigned int switch_timer_interval, unsigned int read_timer_interval) { - int ret, cpu; + int ret; struct channel *chan; if (lib_ring_buffer_check_config(config, switch_timer_interval, @@ -720,6 +813,56 @@ struct channel *channel_create(const struct lib_ring_buffer_config *config, init_waitqueue_head(&chan->hp_wait); if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) { +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) + chan->cpuhp_prepare.component = LTTNG_RING_BUFFER_FRONTEND; + ret = cpuhp_state_add_instance_nocalls(lttng_rb_hp_prepare, + &chan->cpuhp_prepare.node); + if (ret) + goto cpuhp_prepare_error; + + chan->cpuhp_online.component = LTTNG_RING_BUFFER_FRONTEND; + ret = cpuhp_state_add_instance(lttng_rb_hp_online, + &chan->cpuhp_online.node); + if (ret) + goto cpuhp_online_error; +#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */ + { + int cpu; + /* + * In case of non-hotplug cpu, if the ring-buffer is allocated + * in early initcall, it will not be notified of secondary cpus. + * In that off case, we need to allocate for all possible cpus. + */ +#ifdef CONFIG_HOTPLUG_CPU + chan->cpu_hp_notifier.notifier_call = + lib_ring_buffer_cpu_hp_callback; + chan->cpu_hp_notifier.priority = 6; + register_cpu_notifier(&chan->cpu_hp_notifier); + + get_online_cpus(); + for_each_online_cpu(cpu) { + struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf, + cpu); + spin_lock(&per_cpu(ring_buffer_nohz_lock, cpu)); + lib_ring_buffer_start_switch_timer(buf); + lib_ring_buffer_start_read_timer(buf); + spin_unlock(&per_cpu(ring_buffer_nohz_lock, cpu)); + } + chan->cpu_hp_enable = 1; + put_online_cpus(); +#else + for_each_possible_cpu(cpu) { + struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf, + cpu); + spin_lock(&per_cpu(ring_buffer_nohz_lock, cpu)); + lib_ring_buffer_start_switch_timer(buf); + lib_ring_buffer_start_read_timer(buf); + spin_unlock(&per_cpu(ring_buffer_nohz_lock, cpu)); + } +#endif + } +#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */ + #if defined(CONFIG_NO_HZ) && defined(CONFIG_LIB_RING_BUFFER) /* Only benefit from NO_HZ idle with per-cpu buffers for now. */ chan->tick_nohz_notifier.notifier_call = @@ -729,38 +872,6 @@ struct channel *channel_create(const struct lib_ring_buffer_config *config, &chan->tick_nohz_notifier); #endif /* defined(CONFIG_NO_HZ) && defined(CONFIG_LIB_RING_BUFFER) */ - /* - * In case of non-hotplug cpu, if the ring-buffer is allocated - * in early initcall, it will not be notified of secondary cpus. - * In that off case, we need to allocate for all possible cpus. - */ -#ifdef CONFIG_HOTPLUG_CPU - chan->cpu_hp_notifier.notifier_call = - lib_ring_buffer_cpu_hp_callback; - chan->cpu_hp_notifier.priority = 6; - register_cpu_notifier(&chan->cpu_hp_notifier); - - get_online_cpus(); - for_each_online_cpu(cpu) { - struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf, - cpu); - spin_lock(&per_cpu(ring_buffer_nohz_lock, cpu)); - lib_ring_buffer_start_switch_timer(buf); - lib_ring_buffer_start_read_timer(buf); - spin_unlock(&per_cpu(ring_buffer_nohz_lock, cpu)); - } - chan->cpu_hp_enable = 1; - put_online_cpus(); -#else - for_each_possible_cpu(cpu) { - struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf, - cpu); - spin_lock(&per_cpu(ring_buffer_nohz_lock, cpu)); - lib_ring_buffer_start_switch_timer(buf); - lib_ring_buffer_start_read_timer(buf); - spin_unlock(&per_cpu(ring_buffer_nohz_lock, cpu)); - } -#endif } else { struct lib_ring_buffer *buf = chan->backend.buf; @@ -770,6 +881,13 @@ struct channel *channel_create(const struct lib_ring_buffer_config *config, return chan; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) +cpuhp_online_error: + ret = cpuhp_state_remove_instance_nocalls(lttng_rb_hp_prepare, + &chan->cpuhp_prepare.node); + WARN_ON(ret); +cpuhp_prepare_error: +#endif /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */ error_free_backend: channel_backend_free(&chan->backend); error: diff --git a/lib/ringbuffer/ring_buffer_iterator.c b/lib/ringbuffer/ring_buffer_iterator.c index 47e151bf..b6bec48d 100644 --- a/lib/ringbuffer/ring_buffer_iterator.c +++ b/lib/ringbuffer/ring_buffer_iterator.c @@ -350,6 +350,25 @@ void lib_ring_buffer_iterator_init(struct channel *chan, struct lib_ring_buffer list_add(&buf->iter.empty_node, &chan->iter.empty_head); } +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) + +int lttng_cpuhp_rb_iter_online(unsigned int cpu, + struct lttng_cpuhp_node *node) +{ + struct channel *chan = container_of(node, struct channel, + cpuhp_iter_online); + struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf, cpu); + const struct lib_ring_buffer_config *config = &chan->backend.config; + + CHAN_WARN_ON(chan, config->alloc == RING_BUFFER_ALLOC_GLOBAL); + + lib_ring_buffer_iterator_init(chan, buf); + return 0; +} +EXPORT_SYMBOL_GPL(lttng_cpuhp_rb_iter_online); + +#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */ + #ifdef CONFIG_HOTPLUG_CPU static int channel_iterator_cpu_hotplug(struct notifier_block *nb, @@ -380,13 +399,15 @@ int channel_iterator_cpu_hotplug(struct notifier_block *nb, } #endif +#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */ + int channel_iterator_init(struct channel *chan) { const struct lib_ring_buffer_config *config = &chan->backend.config; struct lib_ring_buffer *buf; if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) { - int cpu, ret; + int ret; INIT_LIST_HEAD(&chan->iter.empty_head); ret = lttng_heap_init(&chan->iter.heap, @@ -394,29 +415,43 @@ int channel_iterator_init(struct channel *chan) GFP_KERNEL, buf_is_higher); if (ret) return ret; - /* - * In case of non-hotplug cpu, if the ring-buffer is allocated - * in early initcall, it will not be notified of secondary cpus. - * In that off case, we need to allocate for all possible cpus. - */ + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) + chan->cpuhp_iter_online.component = LTTNG_RING_BUFFER_ITER; + ret = cpuhp_state_add_instance(lttng_rb_hp_online, + &chan->cpuhp_iter_online.node); + if (ret) + return ret; +#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */ + { + int cpu; + + /* + * In case of non-hotplug cpu, if the ring-buffer is allocated + * in early initcall, it will not be notified of secondary cpus. + * In that off case, we need to allocate for all possible cpus. + */ #ifdef CONFIG_HOTPLUG_CPU - chan->hp_iter_notifier.notifier_call = - channel_iterator_cpu_hotplug; - chan->hp_iter_notifier.priority = 10; - register_cpu_notifier(&chan->hp_iter_notifier); - get_online_cpus(); - for_each_online_cpu(cpu) { - buf = per_cpu_ptr(chan->backend.buf, cpu); - lib_ring_buffer_iterator_init(chan, buf); - } - chan->hp_iter_enable = 1; - put_online_cpus(); + chan->hp_iter_notifier.notifier_call = + channel_iterator_cpu_hotplug; + chan->hp_iter_notifier.priority = 10; + register_cpu_notifier(&chan->hp_iter_notifier); + + get_online_cpus(); + for_each_online_cpu(cpu) { + buf = per_cpu_ptr(chan->backend.buf, cpu); + lib_ring_buffer_iterator_init(chan, buf); + } + chan->hp_iter_enable = 1; + put_online_cpus(); #else - for_each_possible_cpu(cpu) { - buf = per_cpu_ptr(chan->backend.buf, cpu); - lib_ring_buffer_iterator_init(chan, buf); - } + for_each_possible_cpu(cpu) { + buf = per_cpu_ptr(chan->backend.buf, cpu); + lib_ring_buffer_iterator_init(chan, buf); + } #endif + } +#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */ } else { buf = channel_get_ring_buffer(config, chan, 0); lib_ring_buffer_iterator_init(chan, buf); @@ -429,8 +464,18 @@ void channel_iterator_unregister_notifiers(struct channel *chan) const struct lib_ring_buffer_config *config = &chan->backend.config; if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) { +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) + { + int ret; + + ret = cpuhp_state_remove_instance(lttng_rb_hp_online, + &chan->cpuhp_iter_online.node); + WARN_ON(ret); + } +#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */ chan->hp_iter_enable = 0; unregister_cpu_notifier(&chan->hp_iter_notifier); +#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */ } } diff --git a/lttng-context-perf-counters.c b/lttng-context-perf-counters.c index 239cf0cc..843cda6e 100644 --- a/lttng-context-perf-counters.c +++ b/lttng-context-perf-counters.c @@ -92,21 +92,80 @@ static void lttng_destroy_perf_counter_field(struct lttng_ctx_field *field) { struct perf_event **events = field->u.perf_counter->e; - int cpu; - get_online_cpus(); - for_each_online_cpu(cpu) - perf_event_release_kernel(events[cpu]); - put_online_cpus(); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) + { + int ret; + + ret = cpuhp_state_remove_instance(lttng_hp_online, + &field->u.perf_counter->cpuhp_online.node); + WARN_ON(ret); + ret = cpuhp_state_remove_instance(lttng_hp_prepare, + &field->u.perf_counter->cpuhp_prepare.node); + WARN_ON(ret); + } +#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */ + { + int cpu; + + get_online_cpus(); + for_each_online_cpu(cpu) + perf_event_release_kernel(events[cpu]); + put_online_cpus(); #ifdef CONFIG_HOTPLUG_CPU - unregister_cpu_notifier(&field->u.perf_counter->nb); + unregister_cpu_notifier(&field->u.perf_counter->nb); #endif + } +#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */ kfree(field->event_field.name); kfree(field->u.perf_counter->attr); kfree(events); kfree(field->u.perf_counter); } +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) + +int lttng_cpuhp_perf_counter_online(unsigned int cpu, + struct lttng_cpuhp_node *node) +{ + struct lttng_perf_counter_field *perf_field = + container_of(node, struct lttng_perf_counter_field, + cpuhp_online); + struct perf_event **events = perf_field->e; + struct perf_event_attr *attr = perf_field->attr; + struct perf_event *pevent; + + pevent = wrapper_perf_event_create_kernel_counter(attr, + cpu, NULL, overflow_callback); + if (!pevent || IS_ERR(pevent)) + return -EINVAL; + if (pevent->state == PERF_EVENT_STATE_ERROR) { + perf_event_release_kernel(pevent); + return -EINVAL; + } + barrier(); /* Create perf counter before setting event */ + events[cpu] = pevent; + return 0; +} + +int lttng_cpuhp_perf_counter_dead(unsigned int cpu, + struct lttng_cpuhp_node *node) +{ + struct lttng_perf_counter_field *perf_field = + container_of(node, struct lttng_perf_counter_field, + cpuhp_prepare); + struct perf_event **events = perf_field->e; + struct perf_event *pevent; + + pevent = events[cpu]; + events[cpu] = NULL; + barrier(); /* NULLify event before perf counter teardown */ + perf_event_release_kernel(pevent); + return 0; +} + +#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */ + #ifdef CONFIG_HOTPLUG_CPU /** @@ -164,6 +223,8 @@ int lttng_perf_counter_cpu_hp_callback(struct notifier_block *nb, #endif +#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */ + int lttng_add_perf_counter_to_ctx(uint32_t type, uint64_t config, const char *name, @@ -174,7 +235,6 @@ int lttng_add_perf_counter_to_ctx(uint32_t type, struct perf_event **events; struct perf_event_attr *attr; int ret; - int cpu; char *name_alloc; events = kzalloc(num_possible_cpus() * sizeof(*events), GFP_KERNEL); @@ -217,27 +277,47 @@ int lttng_add_perf_counter_to_ctx(uint32_t type, goto find_error; } +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) + + perf_field->cpuhp_prepare.component = LTTNG_CONTEXT_PERF_COUNTERS; + ret = cpuhp_state_add_instance(lttng_hp_prepare, + &perf_field->cpuhp_prepare.node); + if (ret) + goto cpuhp_prepare_error; + + perf_field->cpuhp_online.component = LTTNG_CONTEXT_PERF_COUNTERS; + ret = cpuhp_state_add_instance(lttng_hp_online, + &perf_field->cpuhp_online.node); + if (ret) + goto cpuhp_online_error; + +#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */ + { + int cpu; + #ifdef CONFIG_HOTPLUG_CPU - perf_field->nb.notifier_call = - lttng_perf_counter_cpu_hp_callback; - perf_field->nb.priority = 0; - register_cpu_notifier(&perf_field->nb); + perf_field->nb.notifier_call = + lttng_perf_counter_cpu_hp_callback; + perf_field->nb.priority = 0; + register_cpu_notifier(&perf_field->nb); #endif - - get_online_cpus(); - for_each_online_cpu(cpu) { - events[cpu] = wrapper_perf_event_create_kernel_counter(attr, - cpu, NULL, overflow_callback); - if (!events[cpu] || IS_ERR(events[cpu])) { - ret = -EINVAL; - goto counter_error; - } - if (events[cpu]->state == PERF_EVENT_STATE_ERROR) { - ret = -EBUSY; - goto counter_busy; + get_online_cpus(); + for_each_online_cpu(cpu) { + events[cpu] = wrapper_perf_event_create_kernel_counter(attr, + cpu, NULL, overflow_callback); + if (!events[cpu] || IS_ERR(events[cpu])) { + ret = -EINVAL; + goto counter_error; + } + if (events[cpu]->state == PERF_EVENT_STATE_ERROR) { + ret = -EBUSY; + goto counter_busy; + } } + put_online_cpus(); + perf_field->hp_enable = 1; } - put_online_cpus(); +#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */ field->destroy = lttng_destroy_perf_counter_field; @@ -252,12 +332,22 @@ int lttng_add_perf_counter_to_ctx(uint32_t type, field->get_size = perf_counter_get_size; field->record = perf_counter_record; field->u.perf_counter = perf_field; - perf_field->hp_enable = 1; lttng_context_update(*ctx); wrapper_vmalloc_sync_all(); return 0; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) +cpuhp_online_error: + { + int remove_ret; + + remove_ret = cpuhp_state_remove_instance(lttng_hp_prepare, + &perf_field->cpuhp_prepare.node); + WARN_ON(remove_ret); + } +cpuhp_prepare_error: +#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */ counter_busy: counter_error: for_each_online_cpu(cpu) { @@ -268,6 +358,7 @@ counter_error: #ifdef CONFIG_HOTPLUG_CPU unregister_cpu_notifier(&perf_field->nb); #endif +#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */ find_error: lttng_remove_context_field(ctx, field); append_context_error: diff --git a/lttng-cpuhotplug.h b/lttng-cpuhotplug.h new file mode 100644 index 00000000..da965d2f --- /dev/null +++ b/lttng-cpuhotplug.h @@ -0,0 +1,64 @@ +#ifndef LTTNG_CPUHOTPLUG_H +#define LTTNG_CPUHOTPLUG_H + +/* + * lttng-cpuhotplug.h + * + * Copyright (C) 2016 Mathieu Desnoyers + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; only + * version 2.1 of the License. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include + +struct lttng_cpuhp_node; + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) +enum lttng_cpuhp_component { + LTTNG_RING_BUFFER_FRONTEND, + LTTNG_RING_BUFFER_BACKEND, + LTTNG_RING_BUFFER_ITER, + LTTNG_CONTEXT_PERF_COUNTERS, +}; + +struct lttng_cpuhp_node { + enum lttng_cpuhp_component component; + struct hlist_node node; +}; + +extern enum cpuhp_state lttng_hp_prepare; +extern enum cpuhp_state lttng_hp_online; + +int lttng_cpuhp_rb_backend_prepare(unsigned int cpu, + struct lttng_cpuhp_node *node); +int lttng_cpuhp_rb_frontend_dead(unsigned int cpu, + struct lttng_cpuhp_node *node); +int lttng_cpuhp_rb_frontend_online(unsigned int cpu, + struct lttng_cpuhp_node *node); +int lttng_cpuhp_rb_frontend_offline(unsigned int cpu, + struct lttng_cpuhp_node *node); +int lttng_cpuhp_rb_iter_online(unsigned int cpu, + struct lttng_cpuhp_node *node); + +/* Ring buffer is a separate library. */ +void lttng_rb_set_hp_prepare(enum cpuhp_state val); +void lttng_rb_set_hp_online(enum cpuhp_state val); + +extern enum cpuhp_state lttng_rb_hp_prepare; +extern enum cpuhp_state lttng_rb_hp_online; + +#endif + +#endif /* LTTNG_CPUHOTPLUG_H */ diff --git a/lttng-events.c b/lttng-events.c index 400c5eca..e91caf9f 100644 --- a/lttng-events.c +++ b/lttng-events.c @@ -2277,6 +2277,133 @@ void lttng_transport_unregister(struct lttng_transport *transport) } EXPORT_SYMBOL_GPL(lttng_transport_unregister); +#if (defined(CONFIG_HOTPLUG_CPU) && (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))) + +enum cpuhp_state lttng_hp_prepare; +enum cpuhp_state lttng_hp_online; + +static int lttng_hotplug_prepare(unsigned int cpu, struct hlist_node *node) +{ + struct lttng_cpuhp_node *lttng_node; + + lttng_node = container_of(node, struct lttng_cpuhp_node, node); + switch (lttng_node->component) { + case LTTNG_RING_BUFFER_FRONTEND: + return 0; + case LTTNG_RING_BUFFER_BACKEND: + return lttng_cpuhp_rb_backend_prepare(cpu, lttng_node); + case LTTNG_RING_BUFFER_ITER: + return 0; + case LTTNG_CONTEXT_PERF_COUNTERS: + return 0; + default: + return -EINVAL; + } +} + +static int lttng_hotplug_dead(unsigned int cpu, struct hlist_node *node) +{ + struct lttng_cpuhp_node *lttng_node; + + lttng_node = container_of(node, struct lttng_cpuhp_node, node); + switch (lttng_node->component) { + case LTTNG_RING_BUFFER_FRONTEND: + return lttng_cpuhp_rb_frontend_dead(cpu, lttng_node); + case LTTNG_RING_BUFFER_BACKEND: + return 0; + case LTTNG_RING_BUFFER_ITER: + return 0; + case LTTNG_CONTEXT_PERF_COUNTERS: + return lttng_cpuhp_perf_counter_dead(cpu, lttng_node); + default: + return -EINVAL; + } +} + +static int lttng_hotplug_online(unsigned int cpu, struct hlist_node *node) +{ + struct lttng_cpuhp_node *lttng_node; + + lttng_node = container_of(node, struct lttng_cpuhp_node, node); + switch (lttng_node->component) { + case LTTNG_RING_BUFFER_FRONTEND: + return lttng_cpuhp_rb_frontend_online(cpu, lttng_node); + case LTTNG_RING_BUFFER_BACKEND: + return 0; + case LTTNG_RING_BUFFER_ITER: + return lttng_cpuhp_rb_iter_online(cpu, lttng_node); + case LTTNG_CONTEXT_PERF_COUNTERS: + return lttng_cpuhp_perf_counter_online(cpu, lttng_node); + default: + return -EINVAL; + } +} + +static int lttng_hotplug_offline(unsigned int cpu, struct hlist_node *node) +{ + struct lttng_cpuhp_node *lttng_node; + + lttng_node = container_of(node, struct lttng_cpuhp_node, node); + switch (lttng_node->component) { + case LTTNG_RING_BUFFER_FRONTEND: + return lttng_cpuhp_rb_frontend_offline(cpu, lttng_node); + case LTTNG_RING_BUFFER_BACKEND: + return 0; + case LTTNG_RING_BUFFER_ITER: + return 0; + case LTTNG_CONTEXT_PERF_COUNTERS: + return 0; + default: + return -EINVAL; + } +} + +static int __init lttng_init_cpu_hotplug(void) +{ + int ret; + + ret = cpuhp_setup_state_multi(CPUHP_BP_PREPARE_DYN, "lttng:prepare", + lttng_hotplug_prepare, + lttng_hotplug_dead); + if (ret < 0) { + return ret; + } + lttng_hp_prepare = ret; + lttng_rb_set_hp_prepare(ret); + + ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "lttng:online", + lttng_hotplug_online, + lttng_hotplug_offline); + if (ret < 0) { + cpuhp_remove_multi_state(lttng_hp_prepare); + lttng_hp_prepare = 0; + return ret; + } + lttng_hp_online = ret; + lttng_rb_set_hp_online(ret); + + return 0; +} + +static void __exit lttng_exit_cpu_hotplug(void) +{ + lttng_rb_set_hp_online(0); + cpuhp_remove_multi_state(lttng_hp_online); + lttng_rb_set_hp_prepare(0); + cpuhp_remove_multi_state(lttng_hp_prepare); +} + +#else /* #if (CONFIG_HOTPLUG_CPU && (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))) */ +static int lttng_init_cpu_hotplug(void) +{ + return 0; +} +static void lttng_exit_cpu_hotplug(void) +{ +} +#endif /* #else #if (CONFIG_HOTPLUG_CPU && (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))) */ + + static int __init lttng_events_init(void) { int ret; @@ -2307,8 +2434,13 @@ static int __init lttng_events_init(void) ret = lttng_logger_init(); if (ret) goto error_logger; + ret = lttng_init_cpu_hotplug(); + if (ret) + goto error_hotplug; return 0; +error_hotplug: + lttng_logger_exit(); error_logger: lttng_abi_exit(); error_abi: @@ -2326,6 +2458,7 @@ static void __exit lttng_events_exit(void) { struct lttng_session *session, *tmpsession; + lttng_exit_cpu_hotplug(); lttng_logger_exit(); lttng_abi_exit(); list_for_each_entry_safe(session, tmpsession, &sessions, list) diff --git a/lttng-events.h b/lttng-events.h index c246860d..2185571f 100644 --- a/lttng-events.h +++ b/lttng-events.h @@ -27,6 +27,7 @@ #include #include #include +#include #include #include #include @@ -154,8 +155,13 @@ union lttng_ctx_value { * lttng_ctx_field because cpu hotplug needs fixed-location addresses. */ struct lttng_perf_counter_field { +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) + struct lttng_cpuhp_node cpuhp_prepare; + struct lttng_cpuhp_node cpuhp_online; +#else struct notifier_block nb; int hp_enable; +#endif struct perf_event_attr *attr; struct perf_event **e; /* per-cpu array */ }; @@ -660,6 +666,10 @@ int lttng_add_perf_counter_to_ctx(uint32_t type, uint64_t config, const char *name, struct lttng_ctx **ctx); +int lttng_cpuhp_perf_counter_online(unsigned int cpu, + struct lttng_cpuhp_node *node); +int lttng_cpuhp_perf_counter_dead(unsigned int cpu, + struct lttng_cpuhp_node *node); #else static inline int lttng_add_perf_counter_to_ctx(uint32_t type, @@ -669,6 +679,18 @@ int lttng_add_perf_counter_to_ctx(uint32_t type, { return -ENOSYS; } +static inline +int lttng_cpuhp_perf_counter_online(unsigned int cpu, + struct lttng_cpuhp_node *node) +{ + return 0; +} +static inline +int lttng_cpuhp_perf_counter_dead(unsigned int cpu, + struct lttng_cpuhp_node *node) +{ + return 0; +} #endif int lttng_logger_init(void);