Don't wait on the readers from the session teardown anymore, because if
the only thread that can complete reading the buffer is the same waiting
on this condition, it deadlocks.
If the session daemon needs to wait on consumers, it can always do that
in user-space through some other mechanism.
Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
* Dual LGPL v2.1/GPL v2 license.
*/
* Dual LGPL v2.1/GPL v2 license.
*/
#include "../../wrapper/ringbuffer/config.h"
#include "../../wrapper/ringbuffer/backend_types.h"
#include "../../wrapper/prio_heap.h" /* For per-CPU read-side iterator */
#include "../../wrapper/ringbuffer/config.h"
#include "../../wrapper/ringbuffer/backend_types.h"
#include "../../wrapper/prio_heap.h" /* For per-CPU read-side iterator */
int hp_iter_enable:1; /* Enable hp iter notif. */
wait_queue_head_t read_wait; /* reader wait queue */
struct channel_iter iter; /* Channel read-side iterator */
int hp_iter_enable:1; /* Enable hp iter notif. */
wait_queue_head_t read_wait; /* reader wait queue */
struct channel_iter iter; /* Channel read-side iterator */
- atomic_long_t read_ref; /* Reader reference count */
+ struct kref ref; /* Reference count */
};
/* Per-subbuffer commit counters used on the hot path */
};
/* Per-subbuffer commit counters used on the hot path */
chan->commit_count_mask = (~0UL >> chan->backend.num_subbuf_order);
chan->switch_timer_interval = usecs_to_jiffies(switch_timer_interval);
chan->read_timer_interval = usecs_to_jiffies(read_timer_interval);
chan->commit_count_mask = (~0UL >> chan->backend.num_subbuf_order);
chan->switch_timer_interval = usecs_to_jiffies(switch_timer_interval);
chan->read_timer_interval = usecs_to_jiffies(read_timer_interval);
init_waitqueue_head(&chan->read_wait);
if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
init_waitqueue_head(&chan->read_wait);
if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
}
EXPORT_SYMBOL_GPL(channel_create);
}
EXPORT_SYMBOL_GPL(channel_create);
+static
+void channel_release(struct kref *kref)
+{
+ struct channel *chan = container_of(kref, struct channel, ref);
+ channel_free(chan);
+}
+
/**
* channel_destroy - Finalize, wait for q.s. and destroy channel.
* @chan: channel to destroy
/**
* channel_destroy - Finalize, wait for q.s. and destroy channel.
* @chan: channel to destroy
wake_up_interruptible(&buf->read_wait);
}
wake_up_interruptible(&chan->read_wait);
wake_up_interruptible(&buf->read_wait);
}
wake_up_interruptible(&chan->read_wait);
-
- while (atomic_long_read(&chan->read_ref) > 0)
- msleep(100);
- /* Finish waiting for refcount before free */
- smp_mb();
+ kref_put(&chan->ref, channel_release);
priv = chan->backend.priv;
priv = chan->backend.priv;
return priv;
}
EXPORT_SYMBOL_GPL(channel_destroy);
return priv;
}
EXPORT_SYMBOL_GPL(channel_destroy);
if (!atomic_long_add_unless(&buf->active_readers, 1, 1))
return -EBUSY;
if (!atomic_long_add_unless(&buf->active_readers, 1, 1))
return -EBUSY;
- atomic_long_inc(&chan->read_ref);
smp_mb__after_atomic_inc();
return 0;
}
smp_mb__after_atomic_inc();
return 0;
}
CHAN_WARN_ON(chan, atomic_long_read(&buf->active_readers) != 1);
smp_mb__before_atomic_dec();
CHAN_WARN_ON(chan, atomic_long_read(&buf->active_readers) != 1);
smp_mb__before_atomic_dec();
- atomic_long_dec(&chan->read_ref);
atomic_long_dec(&buf->active_readers);
atomic_long_dec(&buf->active_readers);
+ kref_put(&chan->ref, channel_release);
}
EXPORT_SYMBOL_GPL(lib_ring_buffer_release_read);
}
EXPORT_SYMBOL_GPL(lib_ring_buffer_release_read);
return 0;
if (session->metadata_dumped)
goto skip_session;
return 0;
if (session->metadata_dumped)
goto skip_session;
+ if (!session->metadata) {
+ printk(KERN_WARNING "LTTng: tracing is starting, but metadata channel is not found\n");
+ return -EPERM;
+ }
skip_session:
list_for_each_entry(chan, &session->chan, list) {
skip_session:
list_for_each_entry(chan, &session->chan, list) {