/*
* lib_ring_buffer_switch_new_end: finish switching current subbuffer
*
- * The only remaining threads could be the ones with pending commits. They will
- * have to do the deliver themselves.
+ * Calls subbuffer_set_data_size() to set the data size of the current
+ * sub-buffer. We do not need to perform check_deliver nor commit here,
+ * since this task will be done by the "commit" of the event for which
+ * we are currently doing the space reservation.
*/
static
void lib_ring_buffer_switch_new_end(struct lib_ring_buffer *buf,
u64 tsc)
{
const struct lib_ring_buffer_config *config = &chan->backend.config;
- unsigned long endidx = subbuf_index(offsets->end - 1, chan);
- unsigned long commit_count, padding_size, data_size;
+ unsigned long endidx, data_size;
+ endidx = subbuf_index(offsets->end - 1, chan);
data_size = subbuf_offset(offsets->end - 1, chan) + 1;
- padding_size = chan->backend.subbuf_size - data_size;
subbuffer_set_data_size(config, &buf->backend, endidx, data_size);
-
- /*
- * Order all writes to buffer before the commit count update that will
- * determine that the subbuffer is full.
- */
- if (config->ipi == RING_BUFFER_IPI_BARRIER) {
- /*
- * Must write slot data before incrementing commit count. This
- * compiler barrier is upgraded into a smp_mb() by the IPI sent
- * by get_subbuf().
- */
- barrier();
- } else
- smp_wmb();
- v_add(config, padding_size, &buf->commit_hot[endidx].cc);
- commit_count = v_read(config, &buf->commit_hot[endidx].cc);
- lib_ring_buffer_check_deliver(config, buf, chan, offsets->end - 1,
- commit_count, endidx);
- lib_ring_buffer_write_commit_counter(config, buf, chan, endidx,
- offsets->end, commit_count,
- padding_size);
}
/*