if (config->wakeup == RING_BUFFER_WAKEUP_BY_WRITER
&& uatomic_read(&buf->active_readers)
&& lib_ring_buffer_poll_deliver(config, buf, chan, handle)) {
- //wake_up_interruptible(&buf->read_wait);
- //wake_up_interruptible(&chan->read_wait);
+ int wakeup_fd = shm_get_wakeup_fd(handle, &buf->self._ref);
+
+ if (wakeup_fd >= 0) {
+ int ret;
+ /*
+ * Wake-up the other end by
+ * writing a null byte in the
+ * pipe (non-blocking).
+ */
+ do {
+ ret = write(wakeup_fd, "", 1);
+ } while (ret == -1L && errno == EINTR);
+ }
}
}
int get_subbuf:1; /* Sub-buffer being held by reader */
int switch_timer_enabled:1; /* Protected by ring_buffer_nohz_lock */
int read_timer_enabled:1; /* Protected by ring_buffer_nohz_lock */
+ /* shmp pointer to self */
+ DECLARE_SHMP(struct lib_ring_buffer, self);
} ____cacheline_aligned;
static inline
buf = shmp(handle, chanb->buf[i].shmp);
if (!buf)
goto end;
+ set_shmp(buf->self, chanb->buf[i].shmp._ref);
ret = lib_ring_buffer_create(buf, chanb, i,
handle, shmobj);
if (ret)
goto error_fcntl;
}
}
+ /* The write end of the pipe needs to be non-blocking */
+ ret = fcntl(waitfd[1], F_SETFL, O_NONBLOCK);
+ if (ret < 0) {
+ PERROR("fcntl");
+ goto error_fcntl;
+ }
*obj->wait_fd = *waitfd;
/* shm_fd: create shm */
struct shm_ref zalloc_shm(struct shm_object *obj, size_t len);
void align_shm(struct shm_object *obj, size_t align);
+static inline
+int shm_get_wakeup_fd(struct shm_handle *handle, struct shm_ref *ref)
+{
+ struct shm_object_table *table = handle->table;
+ struct shm_object *obj;
+ size_t index;
+
+ index = (size_t) ref->index;
+ if (unlikely(index >= table->allocated_len))
+ return -EPERM;
+ obj = &table->objects[index];
+ return obj->wait_fd[1];
+
+}
+
+static inline
+int shm_get_wait_fd(struct shm_handle *handle, struct shm_ref *ref)
+{
+ struct shm_object_table *table = handle->table;
+ struct shm_object *obj;
+ size_t index;
+
+ index = (size_t) ref->index;
+ if (unlikely(index >= table->allocated_len))
+ return -EPERM;
+ obj = &table->objects[index];
+ return obj->wait_fd[0];
+}
+
#endif /* _LIBRINGBUFFER_SHM_H */
.tsc_bits = 32,
.alloc = RING_BUFFER_ALLOC_PER_CPU,
- .sync = RING_BUFFER_SYNC_PER_CPU,
+ .sync = RING_BUFFER_SYNC_GLOBAL,
.mode = RING_BUFFER_MODE_TEMPLATE,
.backend = RING_BUFFER_PAGE,
- .output = RING_BUFFER_SPLICE,
+ .output = RING_BUFFER_MMAP,
.oops = RING_BUFFER_OOPS_CONSISTENCY,
- .ipi = RING_BUFFER_IPI_BARRIER,
- .wakeup = RING_BUFFER_WAKEUP_BY_TIMER,
+ .ipi = RING_BUFFER_NO_IPI_BARRIER,
+ .wakeup = RING_BUFFER_WAKEUP_BY_WRITER,
};
static
.sync = RING_BUFFER_SYNC_GLOBAL,
.mode = RING_BUFFER_MODE_TEMPLATE,
.backend = RING_BUFFER_PAGE,
- .output = RING_BUFFER_SPLICE,
+ .output = RING_BUFFER_MMAP,
.oops = RING_BUFFER_OOPS_CONSISTENCY,
- .ipi = RING_BUFFER_IPI_BARRIER,
- .wakeup = RING_BUFFER_WAKEUP_BY_TIMER,
+ .ipi = RING_BUFFER_NO_IPI_BARRIER,
+ .wakeup = RING_BUFFER_WAKEUP_BY_WRITER,
};
static