ltt-relay-objs := ltt-events.o ltt-debugfs-abi.o \
ltt-probes.o ltt-core.o ltt-context.o \
lttng-context-pid.o lttng-context-comm.o \
- lttng-context-prio.o lttng-context-nice.o
+ lttng-context-prio.o lttng-context-nice.o \
+ wrapper/poll.o
ifneq ($(CONFIG_PERF_EVENTS),)
ltt-relay-objs += lttng-context-perf-counters.o
ringbuffer/ring_buffer_vfs.o \
ringbuffer/ring_buffer_splice.o \
ringbuffer/ring_buffer_mmap.o \
- prio_heap/lttng_prio_heap.o
+ prio_heap/lttng_prio_heap.o \
+ ../wrapper/poll.o
return finalized;
}
+static inline
+int lib_ring_buffer_channel_is_finalized(const struct channel *chan)
+{
+ return chan->finalized;
+}
+
static inline
unsigned long lib_ring_buffer_get_read_data_size(
const struct lib_ring_buffer_config *config,
int cpu_hp_enable:1; /* Enable CPU hotplug notif. */
int hp_iter_enable:1; /* Enable hp iter notif. */
wait_queue_head_t read_wait; /* reader wait queue */
+ wait_queue_head_t hp_wait; /* CPU hotplug wait queue */
+ int finalized; /* Has channel been finalized */
struct channel_iter iter; /* Channel read-side iterator */
struct kref ref; /* Reference count */
};
case CPU_DOWN_FAILED_FROZEN:
case CPU_ONLINE:
case CPU_ONLINE_FROZEN:
+ wake_up_interruptible(&chan->hp_wait);
lib_ring_buffer_start_switch_timer(buf);
lib_ring_buffer_start_read_timer(buf);
return NOTIFY_OK;
chan->read_timer_interval = usecs_to_jiffies(read_timer_interval);
kref_init(&chan->ref);
init_waitqueue_head(&chan->read_wait);
+ init_waitqueue_head(&chan->hp_wait);
if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
#if defined(CONFIG_NO_HZ) && defined(CONFIG_LIB_RING_BUFFER)
ACCESS_ONCE(buf->finalized) = 1;
wake_up_interruptible(&buf->read_wait);
}
+ ACCESS_ONCE(chan->finalized) = 1;
+ wake_up_interruptible(&chan->hp_wait);
wake_up_interruptible(&chan->read_wait);
kref_put(&chan->ref, channel_release);
priv = chan->backend.priv;
int finalized;
if (filp->f_mode & FMODE_READ) {
- poll_wait_set_exclusive(wait);
+ init_poll_funcptr(wait, wrapper_pollwait_exclusive);
poll_wait(filp, &buf->read_wait, wait);
finalized = lib_ring_buffer_is_finalized(config, buf);
#include <linux/slab.h>
#include "wrapper/vmalloc.h" /* for wrapper_vmalloc_sync_all() */
#include "wrapper/ringbuffer/vfs.h"
+#include "wrapper/poll.h"
#include "ltt-debugfs-abi.h"
#include "ltt-events.h"
#include "ltt-tracer.h"
}
}
-/* TODO: poll */
-#if 0
/**
* lttng_channel_poll - lttng stream addition/removal monitoring
*
unsigned int mask = 0;
if (file->f_mode & FMODE_READ) {
- poll_wait_set_exclusive(wait);
- poll_wait(file, &channel->notify_wait, wait);
+ init_poll_funcptr(wait, wrapper_pollwait_exclusive);
+ poll_wait(file, channel->ops->get_hp_wait_queue(channel->chan),
+ wait);
- /* TODO: identify when the channel is being finalized. */
- if (finalized)
+ if (channel->ops->is_finalized(channel->chan))
return POLLHUP;
else
return POLLIN | POLLRDNORM;
return mask;
}
-#endif //0
static
int lttng_channel_release(struct inode *inode, struct file *file)
static const struct file_operations lttng_channel_fops = {
.release = lttng_channel_release,
-/* TODO */
-#if 0
.poll = lttng_channel_poll,
-#endif //0
.unlocked_ioctl = lttng_channel_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = lttng_channel_ioctl,
if (!chan)
goto nomem;
chan->session = session;
- init_waitqueue_head(&chan->notify_wait);
chan->id = session->free_chan_id++;
/*
* Note: the channel creation op already writes into the packet
* we need to bail out after timeout or being
* interrupted.
*/
- waitret = wait_event_interruptible_timeout(*chan->ops->get_reader_wait_queue(chan),
+ waitret = wait_event_interruptible_timeout(*chan->ops->get_reader_wait_queue(chan->chan),
({
ret = chan->ops->event_reserve(&ctx, 0);
ret != -ENOBUFS || !ret;
* may change due to concurrent writes.
*/
size_t (*packet_avail_size)(struct channel *chan);
- wait_queue_head_t *(*get_reader_wait_queue)(struct ltt_channel *chan);
+ wait_queue_head_t *(*get_reader_wait_queue)(struct channel *chan);
+ wait_queue_head_t *(*get_hp_wait_queue)(struct channel *chan);
+ int (*is_finalized)(struct channel *chan);
};
struct ltt_channel {
struct file *file; /* File associated to channel */
unsigned int free_event_id; /* Next event ID to allocate */
struct list_head list; /* Channel list */
- wait_queue_head_t notify_wait; /* Channel addition notif. waitqueue */
struct ltt_channel_ops *ops;
int header_type; /* 0: unset, 1: compact, 2: large */
int metadata_dumped:1;
}
static
-wait_queue_head_t *ltt_get_reader_wait_queue(struct ltt_channel *chan)
+wait_queue_head_t *ltt_get_reader_wait_queue(struct channel *chan)
{
- return &chan->chan->read_wait;
+ return &chan->read_wait;
+}
+
+static
+wait_queue_head_t *ltt_get_hp_wait_queue(struct channel *chan)
+{
+ return &chan->hp_wait;
+}
+
+static
+int ltt_is_finalized(struct channel *chan)
+{
+ return lib_ring_buffer_channel_is_finalized(chan);
}
static struct ltt_transport ltt_relay_transport = {
.event_write = ltt_event_write,
.packet_avail_size = NULL, /* Would be racy anyway */
.get_reader_wait_queue = ltt_get_reader_wait_queue,
+ .get_hp_wait_queue = ltt_get_hp_wait_queue,
+ .is_finalized = ltt_is_finalized,
},
};
}
static
-wait_queue_head_t *ltt_get_reader_wait_queue(struct ltt_channel *chan)
+wait_queue_head_t *ltt_get_reader_wait_queue(struct channel *chan)
{
- return &chan->chan->read_wait;
+ return &chan->read_wait;
+}
+
+static
+wait_queue_head_t *ltt_get_hp_wait_queue(struct channel *chan)
+{
+ return &chan->hp_wait;
+}
+
+static
+int ltt_is_finalized(struct channel *chan)
+{
+ return lib_ring_buffer_channel_is_finalized(chan);
}
static struct ltt_transport ltt_relay_transport = {
.event_write = ltt_event_write,
.packet_avail_size = ltt_packet_avail_size,
.get_reader_wait_queue = ltt_get_reader_wait_queue,
+ .get_hp_wait_queue = ltt_get_hp_wait_queue,
+ .is_finalized = ltt_is_finalized,
},
};
--- /dev/null
+/*
+ * Copyright (C) 2011 Mathieu Desnoyers (mathieu.desnoyers@efficios.com)
+ *
+ * wrapper around poll __pollwait and poll_get_entry. Using KALLSYMS to get its
+ * address when available, else we need to have a kernel that exports this
+ * function to GPL modules.
+ *
+ * Dual LGPL v2.1/GPL v2 license.
+ */
+
+#ifdef CONFIG_KALLSYMS
+
+#include <linux/kallsyms.h>
+#include <linux/poll.h>
+
+struct poll_table_entry;
+struct splice_pipe_desc;
+
+static
+void (*__pollwait_sym)(struct file *filp, wait_queue_head_t *wait_address,
+ poll_table *p);
+static
+struct poll_table_entry *(*poll_get_entry_sym)(struct poll_wqueues *p);
+
+void wrapper_pollwait_exclusive(struct file *filp,
+ wait_queue_head_t *wait_address,
+ poll_table *p)
+
+{
+ struct poll_wqueues *pwq = container_of(p, struct poll_wqueues, pt);
+ struct poll_table_entry *entry;
+
+ if (!poll_get_entry_sym)
+ poll_get_entry_sym = (void *) kallsyms_lookup_name("poll_get_entry");
+ if (!poll_get_entry_sym) {
+ printk(KERN_WARNING "LTTng: poll_get_entry_sym symbol lookup failed.\n");
+ return;
+ }
+ entry = poll_get_entry_sym(pwq);
+
+ if (!__pollwait_sym)
+ __pollwait_sym = (void *) kallsyms_lookup_name("__pollwait");
+ if (!__pollwait_sym) {
+ printk(KERN_WARNING "LTTng: __pollwait symbol lookup failed.\n");
+ return;
+ }
+ return __pollwait_sym(filp, wait_address, p);
+}
+
+#else
+
+#include <linux/poll.h>
+
+ssize_t wrapper_pollwait_exclusive(struct file *filp,
+ wait_queue_head_t *wait_address,
+ poll_table *p)
+{
+ return pollwait_exclusive(filp, wait_address, p);
+}
+
+#endif
+#ifndef _LTTNG_WRAPPER_POLL_H
+#define _LTTNG_WRAPPER_POLL_H
+
/*
- * wrapper/poll.h
+ * Copyright (C) 2011 Mathieu Desnoyers (mathieu.desnoyers@efficios.com)
*
- * Copyright (C) 2010-2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ * wrapper around poll __pollwait and poll_get_entry. Using KALLSYMS to get its
+ * address when available, else we need to have a kernel that exports this
+ * function to GPL modules.
*
* Dual LGPL v2.1/GPL v2 license.
*/
-#ifndef CONFIG_LIB_RING_BUFFER
#include <linux/poll.h>
-#warning "poll_wait_set_exclusive() is defined as no-op. Will increase LTTng overhead. Please consider using the LTTng kernel tree for better results."
-
-/*
- * Will cause higher overhead when signalling all possible reader threads when a
- * buffer is ready to be consumed.
- */
-#define poll_wait_set_exclusive(poll_table)
+void wrapper_pollwait_exclusive(struct file *filp,
+ wait_queue_head_t *wait_address,
+ poll_table *p);
-#endif
+#endif /* _LTTNG_WRAPPER_POLL_H */