size_t offset = ctx->buf_offset;
struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages;
unsigned long sb_bindex, id;
+ struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
+ void *p;
if (caa_unlikely(!len))
return;
id = shmp_index(handle, bufb->buf_wsb, sbidx)->id;
sb_bindex = subbuffer_id_get_index(config, id);
rpages = shmp_index(handle, bufb->array, sb_bindex);
+ if (caa_unlikely(!rpages))
+ return;
CHAN_WARN_ON(ctx->chan,
config->mode == RING_BUFFER_OVERWRITE
&& subbuffer_id_is_noref(config, id));
* subbuffers.
*/
CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
- lib_ring_buffer_do_copy(config,
- shmp_index(handle, shmp(handle, rpages->shmp)->p, offset & (chanb->subbuf_size - 1)),
- src, len);
+ backend_pages = shmp(handle, rpages->shmp);
+ if (caa_unlikely(!backend_pages))
+ return;
+ p = shmp_index(handle, backend_pages->p, offset & (chanb->subbuf_size - 1));
+ if (caa_unlikely(!p))
+ return;
+ lib_ring_buffer_do_copy(config, p, src, len);
ctx->buf_offset += len;
}
struct lttng_ust_shm_handle *handle)
{
struct lttng_ust_lib_ring_buffer_backend *bufb = &buf->backend;
- struct lttng_ust_lib_ring_buffer_backend_pages_shmp *pages;
- unsigned long records_unread = 0, sb_bindex, id;
+ unsigned long records_unread = 0, sb_bindex;
unsigned int i;
+ struct channel *chan;
- for (i = 0; i < shmp(handle, bufb->chan)->backend.num_subbuf; i++) {
- id = shmp_index(handle, bufb->buf_wsb, i)->id;
- sb_bindex = subbuffer_id_get_index(config, id);
- pages = shmp_index(handle, bufb->array, sb_bindex);
- records_unread += v_read(config, &shmp(handle, pages->shmp)->records_unread);
+ chan = shmp(handle, bufb->chan);
+ if (!chan)
+ return 0;
+ for (i = 0; i < chan->backend.num_subbuf; i++) {
+ struct lttng_ust_lib_ring_buffer_backend_subbuffer *wsb;
+ struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages;
+ struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
+
+ wsb = shmp_index(handle, bufb->buf_wsb, i);
+ if (!wsb)
+ return 0;
+ sb_bindex = subbuffer_id_get_index(config, wsb->id);
+ rpages = shmp_index(handle, bufb->array, sb_bindex);
+ if (!rpages)
+ return 0;
+ backend_pages = shmp(handle, rpages->shmp);
+ if (!backend_pages)
+ return 0;
+ records_unread += v_read(config, &backend_pages->records_unread);
}
if (config->mode == RING_BUFFER_OVERWRITE) {
- id = bufb->buf_rsb.id;
- sb_bindex = subbuffer_id_get_index(config, id);
- pages = shmp_index(handle, bufb->array, sb_bindex);
- records_unread += v_read(config, &shmp(handle, pages->shmp)->records_unread);
+ struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages;
+ struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
+
+ sb_bindex = subbuffer_id_get_index(config, bufb->buf_rsb.id);
+ rpages = shmp_index(handle, bufb->array, sb_bindex);
+ if (!rpages)
+ return 0;
+ backend_pages = shmp(handle, rpages->shmp);
+ if (!backend_pages)
+ return 0;
+ records_unread += v_read(config, &backend_pages->records_unread);
}
return records_unread;
}
#ifdef LTTNG_RING_BUFFER_COUNT_EVENTS
static inline
void subbuffer_count_record(const struct lttng_ust_lib_ring_buffer_config *config,
+ const struct lttng_ust_lib_ring_buffer_ctx *ctx,
struct lttng_ust_lib_ring_buffer_backend *bufb,
unsigned long idx, struct lttng_ust_shm_handle *handle)
{
- unsigned long sb_bindex;
+ struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
- sb_bindex = subbuffer_id_get_index(config, shmp_index(handle, bufb->buf_wsb, idx)->id);
- v_inc(config, &shmp(handle, shmp_index(handle, bufb->array, sb_bindex)->shmp)->records_commit);
+ backend_pages = lib_ring_buffer_get_backend_pages_from_ctx(config, ctx);
+ if (caa_unlikely(!backend_pages)) {
+ if (lib_ring_buffer_backend_get_pages(config, ctx, &backend_pages))
+ return;
+ }
+ v_inc(config, &backend_pages->records_commit);
}
#else /* LTTNG_RING_BUFFER_COUNT_EVENTS */
static inline
void subbuffer_count_record(const struct lttng_ust_lib_ring_buffer_config *config,
+ const struct lttng_ust_lib_ring_buffer_ctx *ctx,
struct lttng_ust_lib_ring_buffer_backend *bufb,
unsigned long idx, struct lttng_ust_shm_handle *handle)
{
struct lttng_ust_shm_handle *handle)
{
unsigned long sb_bindex;
+ struct channel *chan;
+ struct lttng_ust_lib_ring_buffer_backend_pages_shmp *pages_shmp;
+ struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
sb_bindex = subbuffer_id_get_index(config, bufb->buf_rsb.id);
- CHAN_WARN_ON(shmp(handle, bufb->chan),
- !v_read(config, &shmp(handle, shmp_index(handle, bufb->array, sb_bindex)->shmp)->records_unread));
+ chan = shmp(handle, bufb->chan);
+ if (!chan)
+ return;
+ pages_shmp = shmp_index(handle, bufb->array, sb_bindex);
+ if (!pages_shmp)
+ return;
+ backend_pages = shmp(handle, pages_shmp->shmp);
+ if (!backend_pages)
+ return;
+ CHAN_WARN_ON(chan, !v_read(config, &backend_pages->records_unread));
/* Non-atomic decrement protected by exclusive subbuffer access */
- _v_dec(config, &shmp(handle, shmp_index(handle, bufb->array, sb_bindex)->shmp)->records_unread);
+ _v_dec(config, &backend_pages->records_unread);
v_inc(config, &bufb->records_read);
}
struct lttng_ust_shm_handle *handle)
{
unsigned long sb_bindex;
+ struct lttng_ust_lib_ring_buffer_backend_subbuffer *wsb;
+ struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages;
+ struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
- sb_bindex = subbuffer_id_get_index(config, shmp_index(handle, bufb->buf_wsb, idx)->id);
- return v_read(config, &shmp(handle, shmp_index(handle, bufb->array, sb_bindex)->shmp)->records_commit);
+ wsb = shmp_index(handle, bufb->buf_wsb, idx);
+ if (!wsb)
+ return 0;
+ sb_bindex = subbuffer_id_get_index(config, wsb->id);
+ rpages = shmp_index(handle, bufb->array, sb_bindex);
+ if (!rpages)
+ return 0;
+ backend_pages = shmp(handle, rpages->shmp);
+ if (!backend_pages)
+ return 0;
+ return v_read(config, &backend_pages->records_commit);
}
/*
unsigned long idx,
struct lttng_ust_shm_handle *handle)
{
- struct lttng_ust_lib_ring_buffer_backend_pages_shmp *pages;
unsigned long overruns, sb_bindex;
+ struct lttng_ust_lib_ring_buffer_backend_subbuffer *wsb;
+ struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages;
+ struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
- sb_bindex = subbuffer_id_get_index(config, shmp_index(handle, bufb->buf_wsb, idx)->id);
- pages = shmp_index(handle, bufb->array, sb_bindex);
- overruns = v_read(config, &shmp(handle, pages->shmp)->records_unread);
- v_set(config, &shmp(handle, pages->shmp)->records_unread,
- v_read(config, &shmp(handle, pages->shmp)->records_commit));
- v_set(config, &shmp(handle, pages->shmp)->records_commit, 0);
+ wsb = shmp_index(handle, bufb->buf_wsb, idx);
+ if (!wsb)
+ return 0;
+ sb_bindex = subbuffer_id_get_index(config, wsb->id);
+ rpages = shmp_index(handle, bufb->array, sb_bindex);
+ if (!rpages)
+ return 0;
+ backend_pages = shmp(handle, rpages->shmp);
+ if (!backend_pages)
+ return 0;
+ overruns = v_read(config, &backend_pages->records_unread);
+ v_set(config, &backend_pages->records_unread,
+ v_read(config, &backend_pages->records_commit));
+ v_set(config, &backend_pages->records_commit, 0);
return overruns;
}
unsigned long data_size,
struct lttng_ust_shm_handle *handle)
{
- struct lttng_ust_lib_ring_buffer_backend_pages_shmp *pages;
unsigned long sb_bindex;
+ struct lttng_ust_lib_ring_buffer_backend_subbuffer *wsb;
+ struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages;
+ struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
- sb_bindex = subbuffer_id_get_index(config, shmp_index(handle, bufb->buf_wsb, idx)->id);
- pages = shmp_index(handle, bufb->array, sb_bindex);
- shmp(handle, pages->shmp)->data_size = data_size;
+ wsb = shmp_index(handle, bufb->buf_wsb, idx);
+ if (!wsb)
+ return;
+ sb_bindex = subbuffer_id_get_index(config, wsb->id);
+ rpages = shmp_index(handle, bufb->array, sb_bindex);
+ if (!rpages)
+ return;
+ backend_pages = shmp(handle, rpages->shmp);
+ if (!backend_pages)
+ return;
+ backend_pages->data_size = data_size;
}
static inline
struct lttng_ust_lib_ring_buffer_backend *bufb,
struct lttng_ust_shm_handle *handle)
{
- struct lttng_ust_lib_ring_buffer_backend_pages_shmp *pages;
unsigned long sb_bindex;
+ struct lttng_ust_lib_ring_buffer_backend_pages_shmp *pages_shmp;
+ struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
sb_bindex = subbuffer_id_get_index(config, bufb->buf_rsb.id);
- pages = shmp_index(handle, bufb->array, sb_bindex);
- return shmp(handle, pages->shmp)->data_size;
+ pages_shmp = shmp_index(handle, bufb->array, sb_bindex);
+ if (!pages_shmp)
+ return 0;
+ backend_pages = shmp(handle, pages_shmp->shmp);
+ if (!backend_pages)
+ return 0;
+ return backend_pages->data_size;
}
static inline
unsigned long idx,
struct lttng_ust_shm_handle *handle)
{
- struct lttng_ust_lib_ring_buffer_backend_pages_shmp *pages;
unsigned long sb_bindex;
+ struct lttng_ust_lib_ring_buffer_backend_subbuffer *wsb;
+ struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages;
+ struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
- sb_bindex = subbuffer_id_get_index(config, shmp_index(handle, bufb->buf_wsb, idx)->id);
- pages = shmp_index(handle, bufb->array, sb_bindex);
- return shmp(handle, pages->shmp)->data_size;
+ wsb = shmp_index(handle, bufb->buf_wsb, idx);
+ if (!wsb)
+ return 0;
+ sb_bindex = subbuffer_id_get_index(config, wsb->id);
+ rpages = shmp_index(handle, bufb->array, sb_bindex);
+ if (!rpages)
+ return 0;
+ backend_pages = shmp(handle, rpages->shmp);
+ if (!backend_pages)
+ return 0;
+ return backend_pages->data_size;
}
static inline
struct lttng_ust_lib_ring_buffer_backend *bufb,
unsigned long idx, struct lttng_ust_shm_handle *handle)
{
- shmp_index(handle, bufb->buf_cnt, idx)->seq_cnt++;
+ struct lttng_ust_lib_ring_buffer_backend_counts *counts;
+
+ counts = shmp_index(handle, bufb->buf_cnt, idx);
+ if (!counts)
+ return;
+ counts->seq_cnt++;
}
/**
struct lttng_ust_shm_handle *handle)
{
unsigned long id, new_id;
+ struct lttng_ust_lib_ring_buffer_backend_subbuffer *wsb;
if (config->mode != RING_BUFFER_OVERWRITE)
return;
* Performing a volatile access to read the sb_pages, because we want to
* read a coherent version of the pointer and the associated noref flag.
*/
- id = CMM_ACCESS_ONCE(shmp_index(handle, bufb->buf_wsb, idx)->id);
+ wsb = shmp_index(handle, bufb->buf_wsb, idx);
+ if (!wsb)
+ return;
+ id = CMM_ACCESS_ONCE(wsb->id);
for (;;) {
/* This check is called on the fast path for each record. */
if (caa_likely(!subbuffer_id_is_noref(config, id))) {
}
new_id = id;
subbuffer_id_clear_noref(config, &new_id);
- new_id = uatomic_cmpxchg(&shmp_index(handle, bufb->buf_wsb, idx)->id, id, new_id);
+ new_id = uatomic_cmpxchg(&wsb->id, id, new_id);
if (caa_likely(new_id == id))
break;
id = new_id;
unsigned long idx, unsigned long offset,
struct lttng_ust_shm_handle *handle)
{
+ struct lttng_ust_lib_ring_buffer_backend_subbuffer *wsb;
+ struct channel *chan;
+
if (config->mode != RING_BUFFER_OVERWRITE)
return;
+ wsb = shmp_index(handle, bufb->buf_wsb, idx);
+ if (!wsb)
+ return;
/*
* Because ring_buffer_set_noref() is only called by a single thread
* (the one which updated the cc_sb value), there are no concurrent
* subbuffer_set_noref() uses a volatile store to deal with concurrent
* readers of the noref flag.
*/
- CHAN_WARN_ON(shmp(handle, bufb->chan),
- subbuffer_id_is_noref(config, shmp_index(handle, bufb->buf_wsb, idx)->id));
+ chan = shmp(handle, bufb->chan);
+ if (!chan)
+ return;
+ CHAN_WARN_ON(chan, subbuffer_id_is_noref(config, wsb->id));
/*
* Memory barrier that ensures counter stores are ordered before set
* noref and offset.
*/
cmm_smp_mb();
- subbuffer_id_set_noref_offset(config, &shmp_index(handle, bufb->buf_wsb, idx)->id, offset);
+ subbuffer_id_set_noref_offset(config, &wsb->id, offset);
}
/**
unsigned long consumed_count,
struct lttng_ust_shm_handle *handle)
{
+ struct lttng_ust_lib_ring_buffer_backend_subbuffer *wsb;
unsigned long old_id, new_id;
+ wsb = shmp_index(handle, bufb->buf_wsb, consumed_idx);
+ if (caa_unlikely(!wsb))
+ return -EPERM;
+
if (config->mode == RING_BUFFER_OVERWRITE) {
+ struct channel *chan;
+
/*
* Exchange the target writer subbuffer with our own unused
* subbuffer. No need to use CMM_ACCESS_ONCE() here to read the
* old_wpage, because the value read will be confirmed by the
* following cmpxchg().
*/
- old_id = shmp_index(handle, bufb->buf_wsb, consumed_idx)->id;
+ old_id = wsb->id;
if (caa_unlikely(!subbuffer_id_is_noref(config, old_id)))
return -EAGAIN;
/*
if (caa_unlikely(!subbuffer_id_compare_offset(config, old_id,
consumed_count)))
return -EAGAIN;
- CHAN_WARN_ON(shmp(handle, bufb->chan),
- !subbuffer_id_is_noref(config, bufb->buf_rsb.id));
+ chan = shmp(handle, bufb->chan);
+ if (caa_unlikely(!chan))
+ return -EPERM;
+ CHAN_WARN_ON(chan, !subbuffer_id_is_noref(config, bufb->buf_rsb.id));
subbuffer_id_set_noref_offset(config, &bufb->buf_rsb.id,
consumed_count);
- new_id = uatomic_cmpxchg(&shmp_index(handle, bufb->buf_wsb, consumed_idx)->id, old_id,
- bufb->buf_rsb.id);
+ new_id = uatomic_cmpxchg(&wsb->id, old_id, bufb->buf_rsb.id);
if (caa_unlikely(old_id != new_id))
return -EAGAIN;
bufb->buf_rsb.id = new_id;
} else {
/* No page exchange, use the writer page directly */
- bufb->buf_rsb.id = shmp_index(handle, bufb->buf_wsb, consumed_idx)->id;
+ bufb->buf_rsb.id = wsb->id;
}
return 0;
}
void lib_ring_buffer_put_next_subbuf(struct lttng_ust_lib_ring_buffer *buf,
struct lttng_ust_shm_handle *handle)
{
+ struct channel *chan;
+
+ chan = shmp(handle, buf->backend.chan);
+ if (!chan)
+ return;
lib_ring_buffer_put_subbuf(buf, handle);
- lib_ring_buffer_move_consumer(buf, subbuf_align(buf->cons_snapshot,
- shmp(handle, buf->backend.chan)), handle);
+ lib_ring_buffer_move_consumer(buf, subbuf_align(buf->cons_snapshot, chan),
+ handle);
}
extern void channel_reset(struct channel *chan);
buf = shmp(handle, chan->backend.buf[ctx->cpu].shmp);
else
buf = shmp(handle, chan->backend.buf[0].shmp);
+ if (caa_unlikely(!buf))
+ return -EIO;
if (caa_unlikely(uatomic_read(&buf->record_disabled)))
return -EAGAIN;
ctx->buf = buf;
struct commit_counters_hot *cc_hot = shmp_index(handle,
buf->commit_hot, endidx);
+ if (caa_unlikely(!cc_hot))
+ return;
+
/*
* Must count record before incrementing the commit count.
*/
- subbuffer_count_record(config, &buf->backend, endidx, handle);
+ subbuffer_count_record(config, ctx, &buf->backend, endidx, handle);
/*
* Order all writes to buffer before the commit count update that will
struct lttng_ust_shm_handle *handle)
{
unsigned long offset, idx, commit_count;
+ struct commit_counters_hot *cc_hot = shmp_index(handle, buf->commit_hot, idx);
CHAN_WARN_ON(chan, config->alloc != RING_BUFFER_ALLOC_PER_CPU);
CHAN_WARN_ON(chan, config->sync != RING_BUFFER_SYNC_PER_CPU);
+ if (caa_unlikely(!cc_hot))
+ return 0;
+
/*
* Read offset and commit count in a loop so they are both read
* atomically wrt interrupts. By deal with interrupt concurrency by
do {
offset = v_read(config, &buf->offset);
idx = subbuf_index(offset, chan);
- commit_count = v_read(config, &shmp_index(handle, buf->commit_hot, idx)->cc);
+ commit_count = v_read(config, &cc_hot->cc);
} while (offset != v_read(config, &buf->offset));
return ((buf_trunc(offset, chan) >> chan->backend.num_subbuf_order)
chanb = &shmp(handle, bufb->chan)->backend;
if (!chanb)
- abort();
+ return;
config = &chanb->config;
num_subbuf_alloc = chanb->num_subbuf;
sb = shmp_index(handle, bufb->buf_wsb, i);
if (!sb)
- abort();
+ return;
sb->id = subbuffer_id(config, 0, 1, i);
}
if (chanb->extra_reader_sb)
sbp = shmp_index(handle, bufb->array, i);
if (!sbp)
- abort();
+ return;
pages = shmp(handle, sbp->shmp);
if (!pages)
- abort();
+ return;
/* Don't reset mmap_offset */
v_set(config, &pages->records_commit, 0);
v_set(config, &pages->records_unread, 0);
const struct lttng_ust_lib_ring_buffer_config *config;
ssize_t orig_len;
struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages;
+ struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
unsigned long sb_bindex, id;
void *src;
id = bufb->buf_rsb.id;
sb_bindex = subbuffer_id_get_index(config, id);
rpages = shmp_index(handle, bufb->array, sb_bindex);
+ if (!rpages)
+ return 0;
/*
* Underlying layer should never ask for reads across
* subbuffers.
CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
&& subbuffer_id_is_noref(config, id));
- src = shmp_index(handle, shmp(handle, rpages->shmp)->p,
- offset & (chanb->subbuf_size - 1));
+ backend_pages = shmp(handle, rpages->shmp);
+ if (!backend_pages)
+ return 0;
+ src = shmp_index(handle, backend_pages->p, offset & (chanb->subbuf_size - 1));
if (caa_unlikely(!src))
return 0;
memcpy(dest, src, len);
ssize_t string_len, orig_offset;
char *str;
struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages;
+ struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
unsigned long sb_bindex, id;
chanb = &shmp(handle, bufb->chan)->backend;
id = bufb->buf_rsb.id;
sb_bindex = subbuffer_id_get_index(config, id);
rpages = shmp_index(handle, bufb->array, sb_bindex);
+ if (!rpages)
+ return -EINVAL;
/*
* Underlying layer should never ask for reads across
* subbuffers.
CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
&& subbuffer_id_is_noref(config, id));
- str = shmp_index(handle, shmp(handle, rpages->shmp)->p, offset & (chanb->subbuf_size - 1));
+ backend_pages = shmp(handle, rpages->shmp);
+ if (!backend_pages)
+ return -EINVAL;
+ str = shmp_index(handle, backend_pages->p, offset & (chanb->subbuf_size - 1));
if (caa_unlikely(!str))
return -EINVAL;
string_len = strnlen(str, len);
struct lttng_ust_shm_handle *handle)
{
struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages;
+ struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
struct channel_backend *chanb;
const struct lttng_ust_lib_ring_buffer_config *config;
unsigned long sb_bindex, id;
id = bufb->buf_rsb.id;
sb_bindex = subbuffer_id_get_index(config, id);
rpages = shmp_index(handle, bufb->array, sb_bindex);
+ if (!rpages)
+ return NULL;
CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
&& subbuffer_id_is_noref(config, id));
- return shmp_index(handle, shmp(handle, rpages->shmp)->p, offset & (chanb->subbuf_size - 1));
+ backend_pages = shmp(handle, rpages->shmp);
+ if (!backend_pages)
+ return NULL;
+ return shmp_index(handle, backend_pages->p, offset & (chanb->subbuf_size - 1));
}
/**
{
size_t sbidx;
struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages;
+ struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
struct channel_backend *chanb;
const struct lttng_ust_lib_ring_buffer_config *config;
unsigned long sb_bindex, id;
id = sb->id;
sb_bindex = subbuffer_id_get_index(config, id);
rpages = shmp_index(handle, bufb->array, sb_bindex);
+ if (!rpages)
+ return NULL;
CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
&& subbuffer_id_is_noref(config, id));
- return shmp_index(handle, shmp(handle, rpages->shmp)->p, offset & (chanb->subbuf_size - 1));
+ backend_pages = shmp(handle, rpages->shmp);
+ if (!backend_pages)
+ return NULL;
+ return shmp_index(handle, backend_pages->p, offset & (chanb->subbuf_size - 1));
}
chan = shmp(handle, buf->backend.chan);
if (!chan)
- abort();
+ return;
config = &chan->backend.config;
/*
* Reset iterator first. It will put the subbuffer if it currently holds
*/
v_set(config, &buf->offset, 0);
for (i = 0; i < chan->backend.num_subbuf; i++) {
- v_set(config, &shmp_index(handle, buf->commit_hot, i)->cc, 0);
- v_set(config, &shmp_index(handle, buf->commit_hot, i)->seq, 0);
- v_set(config, &shmp_index(handle, buf->commit_cold, i)->cc_sb, 0);
+ struct commit_counters_hot *cc_hot;
+ struct commit_counters_cold *cc_cold;
+
+ cc_hot = shmp_index(handle, buf->commit_hot, i);
+ if (!cc_hot)
+ return;
+ cc_cold = shmp_index(handle, buf->commit_cold, i);
+ if (!cc_cold)
+ return;
+ v_set(config, &cc_hot->cc, 0);
+ v_set(config, &cc_hot->seq, 0);
+ v_set(config, &cc_cold->cc_sb, 0);
}
uatomic_set(&buf->consumed, 0);
uatomic_set(&buf->record_disabled, 0);
{
const struct lttng_ust_lib_ring_buffer_config *config = &chanb->config;
struct channel *chan = caa_container_of(chanb, struct channel, backend);
+ struct lttng_ust_lib_ring_buffer_backend_subbuffer *wsb;
+ struct channel *shmp_chan;
+ struct commit_counters_hot *cc_hot;
void *priv = channel_get_private(chan);
size_t subbuf_header_size;
uint64_t tsc;
*/
subbuf_header_size = config->cb.subbuffer_header_size();
v_set(config, &buf->offset, subbuf_header_size);
- subbuffer_id_clear_noref(config, &shmp_index(handle, buf->backend.buf_wsb, 0)->id);
- tsc = config->cb.ring_buffer_clock_read(shmp(handle, buf->backend.chan));
+ wsb = shmp_index(handle, buf->backend.buf_wsb, 0);
+ if (!wsb) {
+ ret = -EPERM;
+ goto free_chanbuf;
+ }
+ subbuffer_id_clear_noref(config, &wsb->id);
+ shmp_chan = shmp(handle, buf->backend.chan);
+ if (!shmp_chan) {
+ ret = -EPERM;
+ goto free_chanbuf;
+ }
+ tsc = config->cb.ring_buffer_clock_read(shmp_chan);
config->cb.buffer_begin(buf, tsc, 0, handle);
- v_add(config, subbuf_header_size, &shmp_index(handle, buf->commit_hot, 0)->cc);
- v_add(config, subbuf_header_size, &shmp_index(handle, buf->commit_hot, 0)->seq);
+ cc_hot = shmp_index(handle, buf->commit_hot, 0);
+ if (!cc_hot) {
+ ret = -EPERM;
+ goto free_chanbuf;
+ }
+ v_add(config, subbuf_header_size, &cc_hot->cc);
+ v_add(config, subbuf_header_size, &cc_hot->seq);
if (config->cb.buffer_create) {
ret = config->cb.buffer_create(buf, priv, cpu, chanb->name, handle);
shmp(handle, chan->backend.buf[cpu].shmp);
if (!buf)
- abort();
+ goto end;
if (uatomic_read(&buf->active_readers))
lib_ring_buffer_switch_slow(buf, SWITCH_ACTIVE,
chan->handle);
shmp(handle, chan->backend.buf[0].shmp);
if (!buf)
- abort();
+ goto end;
if (uatomic_read(&buf->active_readers))
lib_ring_buffer_switch_slow(buf, SWITCH_ACTIVE,
chan->handle);
}
+end:
pthread_mutex_unlock(&wakeup_fd_mutex);
return;
}
struct lttng_ust_shm_handle *handle)
{
unsigned long consumed_old, consumed_idx, commit_count, write_offset;
+ struct commit_counters_cold *cc_cold;
consumed_old = uatomic_read(&buf->consumed);
consumed_idx = subbuf_index(consumed_old, chan);
- commit_count = v_read(config, &shmp_index(handle, buf->commit_cold, consumed_idx)->cc_sb);
+ cc_cold = shmp_index(handle, buf->commit_cold, consumed_idx);
+ if (!cc_cold)
+ return 0;
+ commit_count = v_read(config, &cc_cold->cc_sb);
/*
* No memory barrier here, since we are only interested
* in a statistically correct polling result. The next poll will
shmp(handle, chan->backend.buf[cpu].shmp);
if (!buf)
- abort();
+ goto end;
if (uatomic_read(&buf->active_readers)
&& lib_ring_buffer_poll_deliver(config, buf,
chan, handle)) {
shmp(handle, chan->backend.buf[0].shmp);
if (!buf)
- abort();
+ goto end;
if (uatomic_read(&buf->active_readers)
&& lib_ring_buffer_poll_deliver(config, buf,
chan, handle)) {
lib_ring_buffer_wakeup(buf, handle);
}
}
+end:
pthread_mutex_unlock(&wakeup_fd_mutex);
}
for_each_possible_cpu(cpu) {
struct lttng_ust_lib_ring_buffer *buf =
shmp(handle, chan->backend.buf[cpu].shmp);
- lib_ring_buffer_print_errors(chan, buf, cpu, handle);
+ if (buf)
+ lib_ring_buffer_print_errors(chan, buf, cpu, handle);
}
} else {
struct lttng_ust_lib_ring_buffer *buf =
shmp(handle, chan->backend.buf[0].shmp);
- lib_ring_buffer_print_errors(chan, buf, -1, handle);
+ if (buf)
+ lib_ring_buffer_print_errors(chan, buf, -1, handle);
}
}
{
struct channel *chan = shmp(handle, buf->backend.chan);
+ if (!chan)
+ return;
CHAN_WARN_ON(chan, uatomic_read(&buf->active_readers) != 1);
cmm_smp_mb();
uatomic_dec(&buf->active_readers);
unsigned long *consumed, unsigned long *produced,
struct lttng_ust_shm_handle *handle)
{
- struct channel *chan = shmp(handle, buf->backend.chan);
- const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
+ struct channel *chan;
+ const struct lttng_ust_lib_ring_buffer_config *config;
unsigned long consumed_cur, write_offset;
int finalized;
+ chan = shmp(handle, buf->backend.chan);
+ if (!chan)
+ return -EPERM;
+ config = &chan->backend.config;
finalized = CMM_ACCESS_ONCE(buf->finalized);
/*
* Read finalized before counters.
struct lttng_ust_shm_handle *handle)
{
struct lttng_ust_lib_ring_buffer_backend *bufb = &buf->backend;
- struct channel *chan = shmp(handle, bufb->chan);
+ struct channel *chan;
unsigned long consumed;
+ chan = shmp(handle, bufb->chan);
+ if (!chan)
+ return;
CHAN_WARN_ON(chan, uatomic_read(&buf->active_readers) != 1);
/*
unsigned long consumed,
struct lttng_ust_shm_handle *handle)
{
- struct channel *chan = shmp(handle, buf->backend.chan);
- const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
+ struct channel *chan;
+ const struct lttng_ust_lib_ring_buffer_config *config;
unsigned long consumed_cur, consumed_idx, commit_count, write_offset;
int ret, finalized, nr_retry = LTTNG_UST_RING_BUFFER_GET_RETRY;
+ struct commit_counters_cold *cc_cold;
+ chan = shmp(handle, buf->backend.chan);
+ if (!chan)
+ return -EPERM;
+ config = &chan->backend.config;
retry:
finalized = CMM_ACCESS_ONCE(buf->finalized);
/*
cmm_smp_rmb();
consumed_cur = uatomic_read(&buf->consumed);
consumed_idx = subbuf_index(consumed, chan);
- commit_count = v_read(config, &shmp_index(handle, buf->commit_cold, consumed_idx)->cc_sb);
+ cc_cold = shmp_index(handle, buf->commit_cold, consumed_idx);
+ if (!cc_cold)
+ return -EPERM;
+ commit_count = v_read(config, &cc_cold->cc_sb);
/*
* Make sure we read the commit count before reading the buffer
* data and the write offset. Correct consumed offset ordering
struct lttng_ust_shm_handle *handle)
{
struct lttng_ust_lib_ring_buffer_backend *bufb = &buf->backend;
- struct channel *chan = shmp(handle, bufb->chan);
- const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
- unsigned long read_sb_bindex, consumed_idx, consumed;
+ struct channel *chan;
+ const struct lttng_ust_lib_ring_buffer_config *config;
+ unsigned long sb_bindex, consumed_idx, consumed;
+ struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages;
+ struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
+ chan = shmp(handle, bufb->chan);
+ if (!chan)
+ return;
+ config = &chan->backend.config;
CHAN_WARN_ON(chan, uatomic_read(&buf->active_readers) != 1);
if (!buf->get_subbuf) {
* Can be below zero if an iterator is used on a snapshot more than
* once.
*/
- read_sb_bindex = subbuffer_id_get_index(config, bufb->buf_rsb.id);
- v_add(config, v_read(config,
- &shmp(handle, shmp_index(handle, bufb->array, read_sb_bindex)->shmp)->records_unread),
- &bufb->records_read);
- v_set(config, &shmp(handle, shmp_index(handle, bufb->array, read_sb_bindex)->shmp)->records_unread, 0);
+ sb_bindex = subbuffer_id_get_index(config, bufb->buf_rsb.id);
+ rpages = shmp_index(handle, bufb->array, sb_bindex);
+ if (!rpages)
+ return;
+ backend_pages = shmp(handle, rpages->shmp);
+ if (!backend_pages)
+ return;
+ v_add(config, v_read(config, &backend_pages->records_unread),
+ &bufb->records_read);
+ v_set(config, &backend_pages->records_unread, 0);
CHAN_WARN_ON(chan, config->mode == RING_BUFFER_OVERWRITE
&& subbuffer_id_is_noref(config, bufb->buf_rsb.id));
subbuffer_id_set_noref(config, &bufb->buf_rsb.id);
{
const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
unsigned long cons_idx, commit_count, commit_count_sb;
+ struct commit_counters_hot *cc_hot;
+ struct commit_counters_cold *cc_cold;
cons_idx = subbuf_index(cons_offset, chan);
- commit_count = v_read(config, &shmp_index(handle, buf->commit_hot, cons_idx)->cc);
- commit_count_sb = v_read(config, &shmp_index(handle, buf->commit_cold, cons_idx)->cc_sb);
+ cc_hot = shmp_index(handle, buf->commit_hot, cons_idx);
+ if (!cc_hot)
+ return;
+ cc_cold = shmp_index(handle, buf->commit_cold, cons_idx);
+ if (!cc_cold)
+ return;
+ commit_count = v_read(config, &cc_hot->cc);
+ commit_count_sb = v_read(config, &cc_cold->cc_sb);
if (subbuf_offset(commit_count, chan) != 0)
DBG("ring buffer %s, cpu %d: "
*/
cmm_smp_wmb();
cc_hot = shmp_index(handle, buf->commit_hot, oldidx);
+ if (!cc_hot)
+ return;
v_add(config, config->cb.subbuffer_header_size(),
&cc_hot->cc);
commit_count = v_read(config, &cc_hot->cc);
*/
cmm_smp_wmb();
cc_hot = shmp_index(handle, buf->commit_hot, oldidx);
+ if (!cc_hot)
+ return;
v_add(config, padding_size, &cc_hot->cc);
commit_count = v_read(config, &cc_hot->cc);
lib_ring_buffer_check_deliver(config, buf, chan, offsets->old - 1,
* determine that the subbuffer is full.
*/
cmm_smp_wmb();
- v_add(config, config->cb.subbuffer_header_size(),
- &shmp_index(handle, buf->commit_hot, beginidx)->cc);
cc_hot = shmp_index(handle, buf->commit_hot, beginidx);
+ if (!cc_hot)
+ return;
+ v_add(config, config->cb.subbuffer_header_size(), &cc_hot->cc);
commit_count = v_read(config, &cc_hot->cc);
/* Check if the written buffer has to be delivered */
lib_ring_buffer_check_deliver(config, buf, chan, offsets->begin,
if (caa_unlikely(off == 0)) {
unsigned long sb_index, commit_count;
+ struct commit_counters_cold *cc_cold;
/*
* We are performing a SWITCH_FLUSH. There may be concurrent
/* Test new buffer integrity */
sb_index = subbuf_index(offsets->begin, chan);
- commit_count = v_read(config,
- &shmp_index(handle, buf->commit_cold,
- sb_index)->cc_sb);
+ cc_cold = shmp_index(handle, buf->commit_cold, sb_index);
+ if (!cc_cold)
+ return -1;
+ commit_count = v_read(config, &cc_cold->cc_sb);
reserve_commit_diff =
(buf_trunc(offsets->begin, chan)
>> chan->backend.num_subbuf_order)
void lib_ring_buffer_switch_slow(struct lttng_ust_lib_ring_buffer *buf, enum switch_mode mode,
struct lttng_ust_shm_handle *handle)
{
- struct channel *chan = shmp(handle, buf->backend.chan);
- const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
+ struct channel *chan;
+ const struct lttng_ust_lib_ring_buffer_config *config;
struct switch_offsets offsets;
unsigned long oldidx;
uint64_t tsc;
+ chan = shmp(handle, buf->backend.chan);
+ if (!chan)
+ return;
+ config = &chan->backend.config;
+
offsets.size = 0;
/*
}
if (caa_unlikely(offsets->switch_new_start)) {
unsigned long sb_index, commit_count;
+ struct commit_counters_cold *cc_cold;
/*
* We are typically not filling the previous buffer completely.
* are not seen reordered when updated by another CPU.
*/
cmm_smp_rmb();
- commit_count = v_read(config,
- &shmp_index(handle, buf->commit_cold,
- sb_index)->cc_sb);
+ cc_cold = shmp_index(handle, buf->commit_cold, sb_index);
+ if (!cc_cold)
+ return -1;
+ commit_count = v_read(config, &cc_cold->cc_sb);
/* Read buf->commit_cold[sb_index].cc_sb before buf->offset. */
cmm_smp_rmb();
if (caa_unlikely(offset_cmp != v_read(config, &buf->offset))) {
buf = shmp(handle, chan->backend.buf[ctx->cpu].shmp);
else
buf = shmp(handle, chan->backend.buf[0].shmp);
+ if (!buf)
+ return -EIO;
ctx->buf = buf;
offsets.size = 0;
unsigned long idx,
struct lttng_ust_shm_handle *handle)
{
- if (config->oops == RING_BUFFER_OOPS_CONSISTENCY)
- v_set(config, &shmp_index(handle, buf->commit_hot, idx)->seq, commit_count);
+ struct commit_counters_hot *cc_hot;
+
+ if (config->oops != RING_BUFFER_OOPS_CONSISTENCY)
+ return;
+ cc_hot = shmp_index(handle, buf->commit_hot, idx);
+ if (!cc_hot)
+ return;
+ v_set(config, &cc_hot->seq, commit_count);
}
/*
{
unsigned long old_commit_count = commit_count
- chan->backend.subbuf_size;
+ struct commit_counters_cold *cc_cold;
/*
* If we succeeded at updating cc_sb below, we are the subbuffer
* commit_cold cc_sb update.
*/
cmm_smp_wmb();
- if (caa_likely(v_cmpxchg(config, &shmp_index(handle, buf->commit_cold, idx)->cc_sb,
+ cc_cold = shmp_index(handle, buf->commit_cold, idx);
+ if (!cc_cold)
+ return;
+ if (caa_likely(v_cmpxchg(config, &cc_cold->cc_sb,
old_commit_count, old_commit_count + 1)
== old_commit_count)) {
/*
*/
cmm_smp_mb();
/* End of exclusive subbuffer access */
- v_set(config, &shmp_index(handle, buf->commit_cold, idx)->cc_sb,
- commit_count);
+ v_set(config, &cc_cold->cc_sb, commit_count);
/*
* Order later updates to reserve count after
* the commit cold cc_sb update.