Add array offsets before checking the target pointer range.
Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
offset &= chanb->buf_size - 1;
sbidx = offset >> chanb->subbuf_size_order;
- id = shmp(handle, bufb->buf_wsb)[sbidx].id;
+ id = shmp_index(handle, bufb->buf_wsb, sbidx)->id;
sb_bindex = subbuffer_id_get_index(config, id);
- rpages = &shmp(handle, bufb->array)[sb_bindex];
+ rpages = shmp_index(handle, bufb->array, sb_bindex);
CHAN_WARN_ON(ctx->chan,
config->mode == RING_BUFFER_OVERWRITE
&& subbuffer_id_is_noref(config, id));
*/
CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
lib_ring_buffer_do_copy(config,
- shmp(handle, shmp(handle, rpages->shmp)->p) + (offset & ~(chanb->subbuf_size - 1)),
+ shmp_index(handle, shmp(handle, rpages->shmp)->p, offset & ~(chanb->subbuf_size - 1)),
src, len);
ctx->buf_offset += len;
}
unsigned int i;
for (i = 0; i < shmp(handle, bufb->chan)->backend.num_subbuf; i++) {
- id = shmp(handle, bufb->buf_wsb)[i].id;
+ id = shmp_index(handle, bufb->buf_wsb, i)->id;
sb_bindex = subbuffer_id_get_index(config, id);
- pages = &shmp(handle, bufb->array)[sb_bindex];
+ pages = shmp_index(handle, bufb->array, sb_bindex);
records_unread += v_read(config, &shmp(handle, pages->shmp)->records_unread);
}
if (config->mode == RING_BUFFER_OVERWRITE) {
id = bufb->buf_rsb.id;
sb_bindex = subbuffer_id_get_index(config, id);
- pages = &shmp(handle, bufb->array)[sb_bindex];
+ pages = shmp_index(handle, bufb->array, sb_bindex);
records_unread += v_read(config, &shmp(handle, pages->shmp)->records_unread);
}
return records_unread;
{
unsigned long sb_bindex;
- sb_bindex = subbuffer_id_get_index(config, shmp(handle, bufb->buf_wsb)[idx].id);
- v_inc(config, &shmp(handle, (shmp(handle, bufb->array)[sb_bindex]).shmp)->records_commit);
+ sb_bindex = subbuffer_id_get_index(config, shmp_index(handle, bufb->buf_wsb, idx)->id);
+ v_inc(config, &shmp(handle, shmp_index(handle, bufb->array, sb_bindex)->shmp)->records_commit);
}
/*
sb_bindex = subbuffer_id_get_index(config, bufb->buf_rsb.id);
CHAN_WARN_ON(shmp(handle, bufb->chan),
- !v_read(config, &shmp(handle, (shmp(handle, bufb->array)[sb_bindex]).shmp)->records_unread));
+ !v_read(config, &shmp(handle, shmp_index(handle, bufb->array, sb_bindex)->shmp)->records_unread));
/* Non-atomic decrement protected by exclusive subbuffer access */
- _v_dec(config, &shmp(handle, (shmp(handle, bufb->array)[sb_bindex]).shmp)->records_unread);
+ _v_dec(config, &shmp(handle, shmp_index(handle, bufb->array, sb_bindex)->shmp)->records_unread);
v_inc(config, &bufb->records_read);
}
{
unsigned long sb_bindex;
- sb_bindex = subbuffer_id_get_index(config, shmp(handle, bufb->buf_wsb)[idx].id);
- return v_read(config, &shmp(handle, (shmp(handle, bufb->array)[sb_bindex]).shmp)->records_commit);
+ sb_bindex = subbuffer_id_get_index(config, shmp_index(handle, bufb->buf_wsb, idx)->id);
+ return v_read(config, &shmp(handle, shmp_index(handle, bufb->array, sb_bindex)->shmp)->records_commit);
}
/*
struct lib_ring_buffer_backend_pages_shmp *pages;
unsigned long overruns, sb_bindex;
- sb_bindex = subbuffer_id_get_index(config, shmp(handle, bufb->buf_wsb)[idx].id);
- pages = &shmp(handle, bufb->array)[sb_bindex];
+ sb_bindex = subbuffer_id_get_index(config, shmp_index(handle, bufb->buf_wsb, idx)->id);
+ pages = shmp_index(handle, bufb->array, sb_bindex);
overruns = v_read(config, &shmp(handle, pages->shmp)->records_unread);
v_set(config, &shmp(handle, pages->shmp)->records_unread,
v_read(config, &shmp(handle, pages->shmp)->records_commit));
struct lib_ring_buffer_backend_pages_shmp *pages;
unsigned long sb_bindex;
- sb_bindex = subbuffer_id_get_index(config, shmp(handle, bufb->buf_wsb)[idx].id);
- pages = &shmp(handle, bufb->array)[sb_bindex];
+ sb_bindex = subbuffer_id_get_index(config, shmp_index(handle, bufb->buf_wsb, idx)->id);
+ pages = shmp_index(handle, bufb->array, sb_bindex);
shmp(handle, pages->shmp)->data_size = data_size;
}
unsigned long sb_bindex;
sb_bindex = subbuffer_id_get_index(config, bufb->buf_rsb.id);
- pages = &shmp(handle, bufb->array)[sb_bindex];
+ pages = shmp_index(handle, bufb->array, sb_bindex);
return shmp(handle, pages->shmp)->data_size;
}
struct lib_ring_buffer_backend_pages_shmp *pages;
unsigned long sb_bindex;
- sb_bindex = subbuffer_id_get_index(config, shmp(handle, bufb->buf_wsb)[idx].id);
- pages = &shmp(handle, bufb->array)[sb_bindex];
+ sb_bindex = subbuffer_id_get_index(config, shmp_index(handle, bufb->buf_wsb, idx)->id);
+ pages = shmp_index(handle, bufb->array, sb_bindex);
return shmp(handle, pages->shmp)->data_size;
}
* Performing a volatile access to read the sb_pages, because we want to
* read a coherent version of the pointer and the associated noref flag.
*/
- id = CMM_ACCESS_ONCE(shmp(handle, bufb->buf_wsb)[idx].id);
+ id = CMM_ACCESS_ONCE(shmp_index(handle, bufb->buf_wsb, idx)->id);
for (;;) {
/* This check is called on the fast path for each record. */
if (likely(!subbuffer_id_is_noref(config, id))) {
}
new_id = id;
subbuffer_id_clear_noref(config, &new_id);
- new_id = uatomic_cmpxchg(&shmp(handle, bufb->buf_wsb)[idx].id, id, new_id);
+ new_id = uatomic_cmpxchg(&shmp_index(handle, bufb->buf_wsb, idx)->id, id, new_id);
if (likely(new_id == id))
break;
id = new_id;
* readers of the noref flag.
*/
CHAN_WARN_ON(shmp(handle, bufb->chan),
- subbuffer_id_is_noref(config, shmp(handle, bufb->buf_wsb)[idx].id));
+ subbuffer_id_is_noref(config, shmp_index(handle, bufb->buf_wsb, idx)->id));
/*
* Memory barrier that ensures counter stores are ordered before set
* noref and offset.
*/
cmm_smp_mb();
- subbuffer_id_set_noref_offset(config, &shmp(handle, bufb->buf_wsb)[idx].id, offset);
+ subbuffer_id_set_noref_offset(config, &shmp_index(handle, bufb->buf_wsb, idx)->id, offset);
}
/**
* old_wpage, because the value read will be confirmed by the
* following cmpxchg().
*/
- old_id = shmp(handle, bufb->buf_wsb)[consumed_idx].id;
+ old_id = shmp_index(handle, bufb->buf_wsb, consumed_idx)->id;
if (unlikely(!subbuffer_id_is_noref(config, old_id)))
return -EAGAIN;
/*
!subbuffer_id_is_noref(config, bufb->buf_rsb.id));
subbuffer_id_set_noref_offset(config, &bufb->buf_rsb.id,
consumed_count);
- new_id = uatomic_cmpxchg(&shmp(handle, bufb->buf_wsb)[consumed_idx].id, old_id,
+ new_id = uatomic_cmpxchg(&shmp_index(handle, bufb->buf_wsb, consumed_idx)->id, old_id,
bufb->buf_rsb.id);
if (unlikely(old_id != new_id))
return -EAGAIN;
bufb->buf_rsb.id = new_id;
} else {
/* No page exchange, use the writer page directly */
- bufb->buf_rsb.id = shmp(handle, bufb->buf_wsb)[consumed_idx].id;
+ bufb->buf_rsb.id = shmp_index(handle, bufb->buf_wsb, consumed_idx)->id;
}
return 0;
}
*/
cmm_smp_wmb();
- v_add(config, ctx->slot_size, &shmp(handle, buf->commit_hot)[endidx].cc);
+ v_add(config, ctx->slot_size, &shmp_index(handle, buf->commit_hot, endidx)->cc);
/*
* commit count read can race with concurrent OOO commit count updates.
* count reaches back the reserve offset for a specific sub-buffer,
* which is completely independent of the order.
*/
- commit_count = v_read(config, &shmp(handle, buf->commit_hot)[endidx].cc);
+ commit_count = v_read(config, &shmp_index(handle, buf->commit_hot, endidx)->cc);
lib_ring_buffer_check_deliver(config, buf, chan, offset_end - 1,
commit_count, endidx, handle);
struct shm_handle *handle)
{
if (config->oops == RING_BUFFER_OOPS_CONSISTENCY)
- v_set(config, &shmp(handle, buf->commit_hot)[idx].seq, commit_count);
+ v_set(config, &shmp_index(handle, buf->commit_hot, idx)->seq, commit_count);
}
static inline
consumed_old = uatomic_read(&buf->consumed);
consumed_idx = subbuf_index(consumed_old, chan);
- commit_count = v_read(config, &shmp(handle, buf->commit_cold)[consumed_idx].cc_sb);
+ commit_count = v_read(config, &shmp_index(handle, buf->commit_cold, consumed_idx)->cc_sb);
/*
* No memory barrier here, since we are only interested
* in a statistically correct polling result. The next poll will
do {
offset = v_read(config, &buf->offset);
idx = subbuf_index(offset, chan);
- commit_count = v_read(config, &shmp(handle, buf->commit_hot)[idx].cc);
+ commit_count = v_read(config, &shmp_index(handle, buf->commit_hot, idx)->cc);
} while (offset != v_read(config, &buf->offset));
return ((buf_trunc(offset, chan) >> chan->backend.num_subbuf_order)
* The subbuffer size is least 2 bytes (minimum size: 1 page).
* This guarantees that old_commit_count + 1 != commit_count.
*/
- if (likely(v_cmpxchg(config, &shmp(handle, buf->commit_cold)[idx].cc_sb,
+ if (likely(v_cmpxchg(config, &shmp_index(handle, buf->commit_cold, idx)->cc_sb,
old_commit_count, old_commit_count + 1)
== old_commit_count)) {
/*
*/
cmm_smp_mb();
/* End of exclusive subbuffer access */
- v_set(config, &shmp(handle, buf->commit_cold)[idx].cc_sb,
+ v_set(config, &shmp_index(handle, buf->commit_cold, idx)->cc_sb,
commit_count);
lib_ring_buffer_vmcore_check_deliver(config, buf,
commit_count, idx, handle);
if (unlikely(subbuf_offset(offset - commit_count, chan)))
return;
- commit_seq_old = v_read(config, &shmp(handle, buf->commit_hot)[idx].seq);
+ commit_seq_old = v_read(config, &shmp_index(handle, buf->commit_hot, idx)->seq);
while ((long) (commit_seq_old - commit_count) < 0)
- commit_seq_old = v_cmpxchg(config, &shmp(handle, buf->commit_hot)[idx].seq,
+ commit_seq_old = v_cmpxchg(config, &shmp_index(handle, buf->commit_hot, idx)->seq,
commit_seq_old, commit_count);
}
set_shmp(shmp(handle, bufb->array)[i].shmp,
zalloc_shm(shmobj,
sizeof(struct lib_ring_buffer_backend_pages)));
- if (!shmp(handle, shmp(handle, bufb->array)[i].shmp))
+ if (!shmp(handle, shmp_index(handle, bufb->array, i)->shmp))
goto free_array;
}
goto free_array;
for (i = 0; i < num_subbuf; i++)
- shmp(handle, bufb->buf_wsb)[i].id = subbuffer_id(config, 0, 1, i);
+ shmp_index(handle, bufb->buf_wsb, i)->id = subbuffer_id(config, 0, 1, i);
/* Assign read-side subbuffer table */
if (extra_reader_sb)
ref.offset = bufb->memory_map._ref.offset;
ref.offset += i * subbuf_size;
- set_shmp(shmp(handle, shmp(handle, bufb->array)[i].shmp)->p,
+ set_shmp(shmp(handle, shmp_index(handle, bufb->array, i)->shmp)->p,
ref);
if (config->output == RING_BUFFER_MMAP) {
- shmp(handle, shmp(handle, bufb->array)[i].shmp)->mmap_offset = mmap_offset;
+ shmp(handle, shmp_index(handle, bufb->array, i)->shmp)->mmap_offset = mmap_offset;
mmap_offset += subbuf_size;
}
}
num_subbuf_alloc++;
for (i = 0; i < chanb->num_subbuf; i++)
- shmp(handle, bufb->buf_wsb)[i].id = subbuffer_id(config, 0, 1, i);
+ shmp_index(handle, bufb->buf_wsb, i)->id = subbuffer_id(config, 0, 1, i);
if (chanb->extra_reader_sb)
bufb->buf_rsb.id = subbuffer_id(config, 0, 1,
num_subbuf_alloc - 1);
for (i = 0; i < num_subbuf_alloc; i++) {
/* Don't reset mmap_offset */
- v_set(config, &shmp(handle, shmp(handle, bufb->array)[i].shmp)->records_commit, 0);
- v_set(config, &shmp(handle, shmp(handle, bufb->array)[i].shmp)->records_unread, 0);
- shmp(handle, shmp(handle, bufb->array)[i].shmp)->data_size = 0;
+ v_set(config, &shmp(handle, shmp_index(handle, bufb->array, i)->shmp)->records_commit, 0);
+ v_set(config, &shmp(handle, shmp_index(handle, bufb->array, i)->shmp)->records_unread, 0);
+ shmp(handle, shmp_index(handle, bufb->array, i)->shmp)->data_size = 0;
/* Don't reset backend page and virt addresses */
}
/* Don't reset num_pages_per_subbuf, cpu, allocated */
return 0;
id = bufb->buf_rsb.id;
sb_bindex = subbuffer_id_get_index(config, id);
- rpages = &shmp(handle, bufb->array)[sb_bindex];
+ rpages = shmp_index(handle, bufb->array, sb_bindex);
/*
* Underlying layer should never ask for reads across
* subbuffers.
CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
&& subbuffer_id_is_noref(config, id));
- memcpy(dest, shmp(handle, shmp(handle, rpages->shmp)->p) + (offset & ~(chanb->subbuf_size - 1)), len);
+ memcpy(dest, shmp_index(handle, shmp(handle, rpages->shmp)->p, offset & ~(chanb->subbuf_size - 1)), len);
return orig_len;
}
orig_offset = offset;
id = bufb->buf_rsb.id;
sb_bindex = subbuffer_id_get_index(config, id);
- rpages = &shmp(handle, bufb->array)[sb_bindex];
+ rpages = shmp_index(handle, bufb->array, sb_bindex);
/*
* Underlying layer should never ask for reads across
* subbuffers.
CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
&& subbuffer_id_is_noref(config, id));
- str = (char *)shmp(handle, shmp(handle, rpages->shmp)->p) + (offset & ~(chanb->subbuf_size - 1));
+ str = shmp_index(handle, shmp(handle, rpages->shmp)->p, offset & ~(chanb->subbuf_size - 1));
string_len = strnlen(str, len);
if (dest && len) {
memcpy(dest, str, string_len);
offset &= chanb->buf_size - 1;
id = bufb->buf_rsb.id;
sb_bindex = subbuffer_id_get_index(config, id);
- rpages = &shmp(handle, bufb->array)[sb_bindex];
+ rpages = shmp_index(handle, bufb->array, sb_bindex);
CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
&& subbuffer_id_is_noref(config, id));
- return shmp(handle, shmp(handle, rpages->shmp)->p) + (offset & ~(chanb->subbuf_size - 1));
+ return shmp_index(handle, shmp(handle, rpages->shmp)->p, offset & ~(chanb->subbuf_size - 1));
}
/**
offset &= chanb->buf_size - 1;
sbidx = offset >> chanb->subbuf_size_order;
- id = shmp(handle, bufb->buf_wsb)[sbidx].id;
+ id = shmp_index(handle, bufb->buf_wsb, sbidx)->id;
sb_bindex = subbuffer_id_get_index(config, id);
- rpages = &shmp(handle, bufb->array)[sb_bindex];
+ rpages = shmp_index(handle, bufb->array, sb_bindex);
CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
&& subbuffer_id_is_noref(config, id));
- return shmp(handle, shmp(handle, rpages->shmp)->p) + (offset & ~(chanb->subbuf_size - 1));
+ return shmp_index(handle, shmp(handle, rpages->shmp)->p, offset & ~(chanb->subbuf_size - 1));
}
*/
v_set(config, &buf->offset, 0);
for (i = 0; i < chan->backend.num_subbuf; i++) {
- v_set(config, &shmp(handle, buf->commit_hot)[i].cc, 0);
- v_set(config, &shmp(handle, buf->commit_hot)[i].seq, 0);
- v_set(config, &shmp(handle, buf->commit_cold)[i].cc_sb, 0);
+ v_set(config, &shmp_index(handle, buf->commit_hot, i)->cc, 0);
+ v_set(config, &shmp_index(handle, buf->commit_hot, i)->seq, 0);
+ v_set(config, &shmp_index(handle, buf->commit_cold, i)->cc_sb, 0);
}
uatomic_set(&buf->consumed, 0);
uatomic_set(&buf->record_disabled, 0);
*/
subbuf_header_size = config->cb.subbuffer_header_size();
v_set(config, &buf->offset, subbuf_header_size);
- subbuffer_id_clear_noref(config, &shmp(handle, buf->backend.buf_wsb)[0].id);
+ subbuffer_id_clear_noref(config, &shmp_index(handle, buf->backend.buf_wsb, 0)->id);
tsc = config->cb.ring_buffer_clock_read(shmp(handle, buf->backend.chan));
config->cb.buffer_begin(buf, tsc, 0, handle);
- v_add(config, subbuf_header_size, &shmp(handle, buf->commit_hot)[0].cc);
+ v_add(config, subbuf_header_size, &shmp_index(handle, buf->commit_hot, 0)->cc);
if (config->cb.buffer_create) {
ret = config->cb.buffer_create(buf, priv, cpu, chanb->name, handle);
cmm_smp_rmb();
consumed_cur = uatomic_read(&buf->consumed);
consumed_idx = subbuf_index(consumed, chan);
- commit_count = v_read(config, &shmp(handle, buf->commit_cold)[consumed_idx].cc_sb);
+ commit_count = v_read(config, &shmp_index(handle, buf->commit_cold, consumed_idx)->cc_sb);
/*
* Make sure we read the commit count before reading the buffer
* data and the write offset. Correct consumed offset ordering
*/
read_sb_bindex = subbuffer_id_get_index(config, bufb->buf_rsb.id);
v_add(config, v_read(config,
- &shmp(handle, shmp(handle, bufb->array)[read_sb_bindex].shmp)->records_unread),
+ &shmp(handle, shmp_index(handle, bufb->array, read_sb_bindex)->shmp)->records_unread),
&bufb->records_read);
- v_set(config, &shmp(handle, shmp(handle, bufb->array)[read_sb_bindex].shmp)->records_unread, 0);
+ v_set(config, &shmp(handle, shmp_index(handle, bufb->array, read_sb_bindex)->shmp)->records_unread, 0);
CHAN_WARN_ON(chan, config->mode == RING_BUFFER_OVERWRITE
&& subbuffer_id_is_noref(config, bufb->buf_rsb.id));
subbuffer_id_set_noref(config, &bufb->buf_rsb.id);
unsigned long cons_idx, commit_count, commit_count_sb;
cons_idx = subbuf_index(cons_offset, chan);
- commit_count = v_read(config, &shmp(handle, buf->commit_hot)[cons_idx].cc);
- commit_count_sb = v_read(config, &shmp(handle, buf->commit_cold)[cons_idx].cc_sb);
+ commit_count = v_read(config, &shmp_index(handle, buf->commit_hot, cons_idx)->cc);
+ commit_count_sb = v_read(config, &shmp_index(handle, buf->commit_cold, cons_idx)->cc_sb);
if (subbuf_offset(commit_count, chan) != 0)
ERRMSG("ring buffer %s, cpu %d: "
*/
cmm_smp_wmb();
v_add(config, config->cb.subbuffer_header_size(),
- &shmp(handle, buf->commit_hot)[oldidx].cc);
- commit_count = v_read(config, &shmp(handle, buf->commit_hot)[oldidx].cc);
+ &shmp_index(handle, buf->commit_hot, oldidx)->cc);
+ commit_count = v_read(config, &shmp_index(handle, buf->commit_hot, oldidx)->cc);
/* Check if the written buffer has to be delivered */
lib_ring_buffer_check_deliver(config, buf, chan, offsets->old,
commit_count, oldidx, handle);
* determine that the subbuffer is full.
*/
cmm_smp_wmb();
- v_add(config, padding_size, &shmp(handle, buf->commit_hot)[oldidx].cc);
- commit_count = v_read(config, &shmp(handle, buf->commit_hot)[oldidx].cc);
+ v_add(config, padding_size, &shmp_index(handle, buf->commit_hot, oldidx)->cc);
+ commit_count = v_read(config, &shmp_index(handle, buf->commit_hot, oldidx)->cc);
lib_ring_buffer_check_deliver(config, buf, chan, offsets->old - 1,
commit_count, oldidx, handle);
lib_ring_buffer_write_commit_counter(config, buf, chan, oldidx,
*/
cmm_smp_wmb();
v_add(config, config->cb.subbuffer_header_size(),
- &shmp(handle, buf->commit_hot)[beginidx].cc);
- commit_count = v_read(config, &shmp(handle, buf->commit_hot)[beginidx].cc);
+ &shmp_index(handle, buf->commit_hot, beginidx)->cc);
+ commit_count = v_read(config, &shmp_index(handle, buf->commit_hot, beginidx)->cc);
/* Check if the written buffer has to be delivered */
lib_ring_buffer_check_deliver(config, buf, chan, offsets->begin,
commit_count, beginidx, handle);
* determine that the subbuffer is full.
*/
cmm_smp_wmb();
- v_add(config, padding_size, &shmp(handle, buf->commit_hot)[endidx].cc);
- commit_count = v_read(config, &shmp(handle, buf->commit_hot)[endidx].cc);
+ v_add(config, padding_size, &shmp_index(handle, buf->commit_hot, endidx)->cc);
+ commit_count = v_read(config, &shmp_index(handle, buf->commit_hot, endidx)->cc);
lib_ring_buffer_check_deliver(config, buf, chan, offsets->end - 1,
commit_count, endidx, handle);
lib_ring_buffer_write_commit_counter(config, buf, chan, endidx,
(buf_trunc(offsets->begin, chan)
>> chan->backend.num_subbuf_order)
- ((unsigned long) v_read(config,
- &shmp(handle, buf->commit_cold)[sb_index].cc_sb)
+ &shmp_index(handle, buf->commit_cold, sb_index)->cc_sb)
& chan->commit_count_mask);
if (likely(reserve_commit_diff == 0)) {
/* Next subbuffer not being written to. */
* both the index and offset with known boundaries.
*/
static inline
-char *_shmp(struct shm_object_table *table, struct shm_ref *ref)
+char *_shmp_offset(struct shm_object_table *table, struct shm_ref *ref,
+ size_t offset)
{
struct shm_object *obj;
- size_t index, offset;
+ size_t index, ref_offset;
index = (size_t) ref->index;
if (unlikely(index >= table->allocated_len))
return NULL;
obj = &table->objects[index];
- offset = (size_t) ref->offset;
- if (unlikely(offset >= obj->memory_map_size))
+ ref_offset = (size_t) ref->offset;
+ ref_offset += offset;
+ if (unlikely(ref_offset >= obj->memory_map_size))
return NULL;
- return &obj->memory_map[offset];
+ return &obj->memory_map[ref_offset];
}
-#define shmp(handle, ref) \
+#define shmp_index(handle, ref, offset) \
({ \
__typeof__((ref)._type) ____ptr_ret; \
- ____ptr_ret = (__typeof__(____ptr_ret)) _shmp((handle)->table, &(ref)._ref); \
+ ____ptr_ret = (__typeof__(____ptr_ret)) _shmp_offset((handle)->table, &(ref)._ref, ((offset) * sizeof(*____ptr_ret))); \
____ptr_ret; \
})
+#define shmp(handle, ref) shmp_index(handle, ref, 0)
+
static inline
void _set_shmp(struct shm_ref *ref, struct shm_ref src)
{