* ring_buffer buffers from vmcore, after crash.
*/
lib_ring_buffer_write_commit_counter(config, buf, chan, endidx,
- ctx->buf_offset, commit_count,
- ctx->slot_size, handle);
+ offset_end, commit_count, handle);
}
/**
unsigned long idx,
unsigned long buf_offset,
unsigned long commit_count,
- size_t slot_size,
struct lttng_ust_shm_handle *handle)
{
- unsigned long offset, commit_seq_old;
+ unsigned long commit_seq_old;
if (config->oops != RING_BUFFER_OOPS_CONSISTENCY)
return;
- offset = buf_offset + slot_size;
-
/*
* subbuf_offset includes commit_count_mask. We can simply
* compare the offsets within the subbuffer without caring about
* buffer full/empty mismatch because offset is never zero here
* (subbuffer header and record headers have non-zero length).
*/
- if (caa_unlikely(subbuf_offset(offset - commit_count, chan)))
+ if (caa_unlikely(subbuf_offset(buf_offset - commit_count, chan)))
return;
commit_seq_old = v_read(config, &shmp_index(handle, buf->commit_hot, idx)->seq);
lib_ring_buffer_check_deliver(config, buf, chan, offsets->old,
commit_count, oldidx, handle, tsc);
lib_ring_buffer_write_commit_counter(config, buf, chan, oldidx,
- offsets->old, commit_count,
- config->cb.subbuffer_header_size(),
- handle);
+ offsets->old + config->cb.subbuffer_header_size(),
+ commit_count, handle);
}
/*
lib_ring_buffer_check_deliver(config, buf, chan, offsets->old - 1,
commit_count, oldidx, handle, tsc);
lib_ring_buffer_write_commit_counter(config, buf, chan, oldidx,
- offsets->old, commit_count,
- padding_size, handle);
+ offsets->old + padding_size, commit_count, handle);
}
/*
lib_ring_buffer_check_deliver(config, buf, chan, offsets->begin,
commit_count, beginidx, handle, tsc);
lib_ring_buffer_write_commit_counter(config, buf, chan, beginidx,
- offsets->begin, commit_count,
- config->cb.subbuffer_header_size(),
- handle);
+ offsets->begin + config->cb.subbuffer_header_size(),
+ commit_count, handle);
}
/*