{
struct channel *chan = buf->backend.chan;
+ irq_work_sync(&buf->wakeup_pending);
+
lib_ring_buffer_print_errors(chan, buf, buf->backend.cpu);
lttng_kvfree(buf->commit_hot);
lttng_kvfree(buf->commit_cold);
}
EXPORT_SYMBOL_GPL(channel_reset);
+static void lib_ring_buffer_pending_wakeup_buf(struct irq_work *entry)
+{
+ struct lib_ring_buffer *buf = container_of(entry, struct lib_ring_buffer,
+ wakeup_pending);
+ wake_up_interruptible(&buf->read_wait);
+}
+
+static void lib_ring_buffer_pending_wakeup_chan(struct irq_work *entry)
+{
+ struct channel *chan = container_of(entry, struct channel, wakeup_pending);
+ wake_up_interruptible(&chan->read_wait);
+}
+
/*
* Must be called under cpu hotplug protection.
*/
init_waitqueue_head(&buf->read_wait);
init_waitqueue_head(&buf->write_wait);
+ init_irq_work(&buf->wakeup_pending, lib_ring_buffer_pending_wakeup_buf);
raw_spin_lock_init(&buf->raw_tick_nohz_spinlock);
/*
kref_init(&chan->ref);
init_waitqueue_head(&chan->read_wait);
init_waitqueue_head(&chan->hp_wait);
+ init_irq_work(&chan->wakeup_pending, lib_ring_buffer_pending_wakeup_chan);
if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
const struct lib_ring_buffer_config *config = &chan->backend.config;
void *priv;
+ irq_work_sync(&chan->wakeup_pending);
+
channel_unregister_notifiers(chan);
if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
int finalized;
retry:
- finalized = READ_ONCE(buf->finalized);
+ finalized = LTTNG_READ_ONCE(buf->finalized);
/*
* Read finalized before counters.
*/
return -EBUSY;
}
retry:
- finalized = READ_ONCE(buf->finalized);
+ finalized = LTTNG_READ_ONCE(buf->finalized);
/*
* Read finalized before counters.
*/
if (subbuf_offset(commit_count, chan) != 0)
printk(KERN_WARNING
- "ring buffer %s, cpu %d: "
+ "LTTng: ring buffer %s, cpu %d: "
"commit count in subbuffer %lu,\n"
"expecting multiples of %lu bytes\n"
" [ %lu bytes committed, %lu bytes reader-visible ]\n",
chan->backend.subbuf_size,
commit_count, commit_count_sb);
- printk(KERN_DEBUG "ring buffer: %s, cpu %d: %lu bytes committed\n",
+ printk(KERN_DEBUG "LTTng: ring buffer: %s, cpu %d: %lu bytes committed\n",
chan->backend.name, cpu, commit_count);
}
cons_offset = atomic_long_read(&buf->consumed);
if (write_offset != cons_offset)
printk(KERN_DEBUG
- "ring buffer %s, cpu %d: "
+ "LTTng: ring buffer %s, cpu %d: "
"non-consumed data\n"
" [ %lu bytes written, %lu bytes read ]\n",
chan->backend.name, cpu, write_offset, cons_offset);
const struct lib_ring_buffer_config *config = &chan->backend.config;
if (!strcmp(chan->backend.name, "relay-metadata")) {
- printk(KERN_DEBUG "ring buffer %s: %lu records written, "
+ printk(KERN_DEBUG "LTTng: ring buffer %s: %lu records written, "
"%lu records overrun\n",
chan->backend.name,
v_read(config, &buf->records_count),
v_read(config, &buf->records_overrun));
} else {
- printk(KERN_DEBUG "ring buffer %s, cpu %d: %lu records written, "
+ printk(KERN_DEBUG "LTTng: ring buffer %s, cpu %d: %lu records written, "
"%lu records overrun\n",
chan->backend.name, cpu,
v_read(config, &buf->records_count),
|| v_read(config, &buf->records_lost_wrap)
|| v_read(config, &buf->records_lost_big))
printk(KERN_WARNING
- "ring buffer %s, cpu %d: records were lost. Caused by:\n"
+ "LTTng: ring buffer %s, cpu %d: records were lost. Caused by:\n"
" [ %lu buffer full, %lu nest buffer wrap-around, "
"%lu event too big ]\n",
chan->backend.name, cpu,
commit_count, idx);
/*
- * RING_BUFFER_WAKEUP_BY_WRITER wakeup is not lock-free.
+ * RING_BUFFER_WAKEUP_BY_WRITER uses an irq_work to issue
+ * the wakeups.
*/
if (config->wakeup == RING_BUFFER_WAKEUP_BY_WRITER
&& atomic_long_read(&buf->active_readers)
&& lib_ring_buffer_poll_deliver(config, buf, chan)) {
- wake_up_interruptible(&buf->read_wait);
- wake_up_interruptible(&chan->read_wait);
+ irq_work_queue(&buf->wakeup_pending);
+ irq_work_queue(&chan->wakeup_pending);
}
}