7 /* This truncates to an offset in the buffer. */
8 #define USTD_BUFFER_TRUNC(offset, bufinfo) \
9 ((offset) & (~(((bufinfo)->subbuf_size*(bufinfo)->n_subbufs)-1)))
11 void finish_consuming_dead_subbuffer(struct buffer_info
*buf
)
13 struct ltt_channel_buf_struct
*ltt_buf
= buf
->bufstruct_mem
;
15 long write_offset
= local_read(<t_buf
->offset
);
16 long consumed_offset
= atomic_long_read(<t_buf
->consumed
);
20 DBG("processing died buffer");
21 DBG("consumed offset is %ld", consumed_offset
);
22 DBG("write offset is %ld", write_offset
);
24 /* First subbuf that we need to consume now. It is not modulo'd.
25 * Consumed_offset is the next byte to consume. */
26 long first_subbuf
= consumed_offset
/ buf
->subbuf_size
;
27 /* Last subbuf that we need to consume now. It is not modulo'd.
28 * Write_offset is the next place to write so write_offset-1 is the
29 * last place written. */
30 long last_subbuf
= (write_offset
- 1) / buf
->subbuf_size
;
32 DBG("first_subbuf=%d", first_subbuf
);
33 DBG("last_subbuf=%d", last_subbuf
);
35 if(last_subbuf
- first_subbuf
>= buf
->n_subbufs
) {
36 DBG("an overflow has occurred, nothing can be recovered");
40 /* Iterate on subbuffers to recover. */
41 for(i_subbuf
=first_subbuf
; ; i_subbuf
++, i_subbuf
%= buf
->n_subbufs
) {
43 /* commit_seq is the offset in the buffer of the end of the last sequential commit.
44 * Bytes beyond this limit cannot be recovered. This is a free-running counter. */
45 long commit_seq
= local_read(<t_buf
->commit_seq
[i_subbuf
]);
47 unsigned long valid_length
= buf
->subbuf_size
;
48 long n_subbufs_order
= get_count_order(buf
->n_subbufs
);
49 long commit_seq_mask
= (~0UL >> n_subbufs_order
);
51 struct ltt_subbuffer_header
*header
= (struct ltt_subbuffer_header
*)((char *)buf
->mem
+i_subbuf
*buf
->subbuf_size
);
53 if((commit_seq
& commit_seq_mask
) == 0) {
54 /* There is nothing to do. */
55 /* FIXME: is this needed? */
59 /* Check if subbuf was fully written. This is from Mathieu's algorithm/paper. */
60 if (((commit_seq
- buf
->subbuf_size
) & commit_seq_mask
)
61 - (USTD_BUFFER_TRUNC(consumed_offset
, buf
) >> n_subbufs_order
)
63 /* If it was, we only check the lost_size. This is the lost padding at the end of
65 valid_length
= (unsigned long)buf
->subbuf_size
- header
->lost_size
;
68 /* If the subbuffer was not fully written, then we don't check lost_size because
69 * it hasn't been written yet. Instead we check commit_seq and use it to choose
70 * a value for lost_size. The viewer will need this value when parsing.
73 valid_length
= commit_seq
& (buf
->subbuf_size
-1);
74 header
->lost_size
= buf
->subbuf_size
-valid_length
;
75 assert(i_subbuf
== (last_subbuf
% buf
->n_subbufs
));
79 patient_write(buf
->file_fd
, buf
->mem
+ i_subbuf
* buf
->subbuf_size
, valid_length
);
81 /* pad with empty bytes */
82 tmp
= malloc(buf
->subbuf_size
-valid_length
);
83 memset(tmp
, 0, buf
->subbuf_size
-valid_length
);
84 patient_write(buf
->file_fd
, tmp
, buf
->subbuf_size
-valid_length
);
87 if(i_subbuf
== last_subbuf
% buf
->n_subbufs
)
This page took 0.037026 seconds and 5 git commands to generate.