*
* RING_BUFFER_WAKEUP_BY_WRITER directly wakes up readers when a subbuffer is
* ready to read. Lower latencies before the reader is woken up. Mainly suitable
- * for drivers.
+ * for drivers. Going through an "irq_work" allows triggering this type of wakeup
+ * even from NMI context: the wakeup will be slightly delayed until the next
+ * interrupts are handled.
*
* RING_BUFFER_WAKEUP_NONE does not perform any wakeup whatsoever. The client
* has the responsibility to perform wakeups.
enum {
RING_BUFFER_WAKEUP_BY_TIMER, /* wake up performed by timer */
RING_BUFFER_WAKEUP_BY_WRITER, /*
- * writer wakes up reader,
- * not lock-free
- * (takes spinlock).
+ * writer wakes up reader through
+ * irq_work.
*/
} wakeup;
/*
#define _LIB_RING_BUFFER_FRONTEND_TYPES_H
#include <linux/kref.h>
+#include <linux/irq_work.h>
#include <ringbuffer/config.h>
#include <ringbuffer/backend_types.h>
#include <lttng/prio_heap.h> /* For per-CPU read-side iterator */
struct notifier_block tick_nohz_notifier; /* CPU nohz notifier */
wait_queue_head_t read_wait; /* reader wait queue */
wait_queue_head_t hp_wait; /* CPU hotplug wait queue */
+ struct irq_work wakeup_pending; /* Pending wakeup irq work */
int finalized; /* Has channel been finalized */
struct channel_iter iter; /* Channel read-side iterator */
struct kref ref; /* Reference count */
union v_atomic records_overrun; /* Number of overwritten records */
wait_queue_head_t read_wait; /* reader buffer-level wait queue */
wait_queue_head_t write_wait; /* writer buffer-level wait queue (for metadata only) */
+ struct irq_work wakeup_pending; /* Pending wakeup irq work */
int finalized; /* buffer has been finalized */
struct timer_list switch_timer; /* timer for periodical switch */
struct timer_list read_timer; /* timer for read poll */
{
struct channel *chan = buf->backend.chan;
+ irq_work_sync(&buf->wakeup_pending);
+
lib_ring_buffer_print_errors(chan, buf, buf->backend.cpu);
lttng_kvfree(buf->commit_hot);
lttng_kvfree(buf->commit_cold);
}
EXPORT_SYMBOL_GPL(channel_reset);
+static void lib_ring_buffer_pending_wakeup_buf(struct irq_work *entry)
+{
+ struct lib_ring_buffer *buf = container_of(entry, struct lib_ring_buffer,
+ wakeup_pending);
+ wake_up_interruptible(&buf->read_wait);
+}
+
+static void lib_ring_buffer_pending_wakeup_chan(struct irq_work *entry)
+{
+ struct channel *chan = container_of(entry, struct channel, wakeup_pending);
+ wake_up_interruptible(&chan->read_wait);
+}
+
/*
* Must be called under cpu hotplug protection.
*/
init_waitqueue_head(&buf->read_wait);
init_waitqueue_head(&buf->write_wait);
+ init_irq_work(&buf->wakeup_pending, lib_ring_buffer_pending_wakeup_buf);
raw_spin_lock_init(&buf->raw_tick_nohz_spinlock);
/*
kref_init(&chan->ref);
init_waitqueue_head(&chan->read_wait);
init_waitqueue_head(&chan->hp_wait);
+ init_irq_work(&chan->wakeup_pending, lib_ring_buffer_pending_wakeup_chan);
if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
const struct lib_ring_buffer_config *config = &chan->backend.config;
void *priv;
+ irq_work_sync(&chan->wakeup_pending);
+
channel_unregister_notifiers(chan);
if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
commit_count, idx);
/*
- * RING_BUFFER_WAKEUP_BY_WRITER wakeup is not lock-free.
+ * RING_BUFFER_WAKEUP_BY_WRITER uses an irq_work to issue
+ * the wakeups.
*/
if (config->wakeup == RING_BUFFER_WAKEUP_BY_WRITER
&& atomic_long_read(&buf->active_readers)
&& lib_ring_buffer_poll_deliver(config, buf, chan)) {
- wake_up_interruptible(&buf->read_wait);
- wake_up_interruptible(&chan->read_wait);
+ irq_work_queue(&buf->wakeup_pending);
+ irq_work_queue(&chan->wakeup_pending);
}
}