- /* Iterate on subbuffers to recover. */
- for(i_subbuf = first_subbuf % buf->n_subbufs; ; i_subbuf++, i_subbuf %= buf->n_subbufs) {
- /* commit_seq is the offset in the buffer of the end of the last sequential commit.
- * Bytes beyond this limit cannot be recovered. This is a free-running counter. */
- long commit_seq = uatomic_read(&ustbuf->commit_seq[i_subbuf]);
-
- unsigned long valid_length = buf->subbuf_size;
- long n_subbufs_order = get_count_order(buf->n_subbufs);
- long commit_seq_mask = (~0UL >> n_subbufs_order);
-
- struct ltt_subbuffer_header *header = (struct ltt_subbuffer_header *)((char *)buf->mem+i_subbuf*buf->subbuf_size);
+ /*
+ * Iterate on subbuffers to recover, including the one the writer
+ * just wrote data into. Using write position - 1 since the writer
+ * position points into the position that is going to be written.
+ */
+ for (cons_off = uatomic_read(&ust_buf->consumed);
+ (long) (SUBBUF_TRUNC(uatomic_read(&ust_buf->offset) - 1, buf)
+ - cons_off) >= 0;
+ cons_off = SUBBUF_ALIGN(cons_off, buf)) {
+ /*
+ * commit_seq is the offset in the buffer of the end of the last sequential commit.
+ * Bytes beyond this limit cannot be recovered. This is a free-running counter.
+ */
+ unsigned long commit_seq =
+ uatomic_read(&ust_buf->commit_seq[SUBBUF_INDEX(cons_off, buf)]);
+ struct ltt_subbuffer_header *header =
+ (struct ltt_subbuffer_header *)((char *) buf->mem
+ + SUBBUF_INDEX(cons_off, buf) * buf->subbuf_size);
+ unsigned long valid_length;