/*
* Only flush buffers periodically if readers are active.
*/
- if (uatomic_read(&buf->active_readers))
+ if (uatomic_read(&buf->active_readers) || uatomic_read(&buf->active_shadow_readers))
lib_ring_buffer_switch_slow(buf, SWITCH_ACTIVE, handle);
//TODO timers
CHAN_WARN_ON(chan, !buf->backend.allocated);
- if (uatomic_read(&buf->active_readers)
+ if (uatomic_read(&buf->active_readers) || uatomic_read(&buf->active_shadow_readers))
&& lib_ring_buffer_poll_deliver(config, buf, chan)) {
//TODO
//wake_up_interruptible(&buf->read_wait);
//channel_backend_unregister_notifiers(&chan->backend);
}
-static void channel_free(struct channel *chan, struct shm_handle *handle)
+static void channel_free(struct channel *chan, struct shm_handle *handle,
+ int shadow)
{
int ret;
- channel_backend_free(&chan->backend, handle);
+ if (!shadow)
+ channel_backend_free(&chan->backend, handle);
/* chan is freed by shm teardown */
shm_object_table_destroy(handle->table);
free(handle);
}
static
-void channel_release(struct channel *chan, struct shm_handle *handle)
+void channel_release(struct channel *chan, struct shm_handle *handle,
+ int shadow)
{
- channel_free(chan, handle);
+ channel_free(chan, handle, shadow);
}
/**
* They should release their handle at that point. Returns the private
* data pointer.
*/
-void *channel_destroy(struct channel *chan, struct shm_handle *handle)
+void *channel_destroy(struct channel *chan, struct shm_handle *handle,
+ int shadow)
{
const struct lib_ring_buffer_config *config = &chan->backend.config;
void *priv;
int cpu;
+ if (shadow) {
+ channel_release(chan, handle, shadow);
+ return NULL;
+ }
+
channel_unregister_notifiers(chan, handle);
if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
* descriptor directly. No need to refcount.
*/
priv = chan->backend.priv;
- channel_release(chan, handle);
+ channel_release(chan, handle, shadow);
return priv;
}
memory_map_size);
return shmp(handle, chan->backend.buf[0].shmp);
} else {
+ if (cpu >= num_possible_cpus())
+ return NULL;
ref = &chan->backend.buf[cpu].shmp._ref;
shm_get_object_data(handle, ref, shm_fd, wait_fd,
memory_map_size);
}
int lib_ring_buffer_open_read(struct lib_ring_buffer *buf,
- struct shm_handle *handle)
+ struct shm_handle *handle,
+ int shadow)
{
struct channel *chan = shmp(handle, buf->backend.chan);
+ if (shadow) {
+ if (uatomic_cmpxchg(&buf->active_shadow_readers, 0, 1) != 0)
+ return -EBUSY;
+ cmm_smp_mb();
+ return 0;
+ }
if (uatomic_cmpxchg(&buf->active_readers, 0, 1) != 0)
return -EBUSY;
cmm_smp_mb();
}
void lib_ring_buffer_release_read(struct lib_ring_buffer *buf,
- struct shm_handle *handle)
+ struct shm_handle *handle,
+ int shadow)
{
struct channel *chan = shmp(handle, buf->backend.chan);
+ if (shadow) {
+ CHAN_WARN_ON(chan, uatomic_read(&buf->active_shadow_readers) != 1);
+ cmm_smp_mb();
+ uatomic_dec(&buf->active_shadow_readers);
+ return;
+ }
CHAN_WARN_ON(chan, uatomic_read(&buf->active_readers) != 1);
cmm_smp_mb();
uatomic_dec(&buf->active_readers);
struct channel *chan = shmp(handle, bufb->chan);
unsigned long consumed;
- CHAN_WARN_ON(chan, uatomic_read(&buf->active_readers) != 1);
+ CHAN_WARN_ON(chan, uatomic_read(&buf->active_readers) != 1
+ && uatomic_read(&buf->active_shadow_readers) != 1);
/*
* Only push the consumed value forward.
const struct lib_ring_buffer_config *config = &chan->backend.config;
unsigned long read_sb_bindex, consumed_idx, consumed;
- CHAN_WARN_ON(chan, uatomic_read(&buf->active_readers) != 1);
+ CHAN_WARN_ON(chan, uatomic_read(&buf->active_readers) != 1
+ && uatomic_read(&buf->active_shadow_readers) != 1);
if (!buf->get_subbuf) {
/*