int n_subbufs;
/* size of each subbuffer */
int subbuf_size;
+ /* subbuf size count order */
+ int subbuf_size_order;
+ /* alloc size of all subbuf */
+ int alloc_size;
/* the buffer information struct */
void *bufstruct_mem;
return -1;
}
-static void ltt_relay_print_buffer_errors(struct ust_channel *chan, int cpu);
-
static void close_buf(struct ust_buffer *buf)
{
- struct ust_channel *chan = buf->chan;
- int cpu = buf->cpu;
int result;
result = shmdt(buf->buf_data);
PERROR("shmdt");
}
- free(buf->commit_count);
-
result = close(buf->data_ready_fd_read);
if (result < 0) {
PERROR("close");
if (result < 0 && errno != EBADF) {
PERROR("close");
}
-
- /* FIXME: This spews out errors, are they real?:
- * ltt_relay_print_buffer_errors(chan, cpu); */
}
return 0;
}
-static void ltt_relay_print_subbuffer_errors(
- struct ust_channel *channel,
- long cons_off, int cpu)
-{
- struct ust_buffer *ltt_buf = channel->buf[cpu];
- long cons_idx, commit_count, commit_count_sb, write_offset;
-
- cons_idx = SUBBUF_INDEX(cons_off, channel);
- commit_count = uatomic_read(<t_buf->commit_count[cons_idx].cc);
- commit_count_sb = uatomic_read(<t_buf->commit_count[cons_idx].cc_sb);
-
- /*
- * No need to order commit_count and write_offset reads because we
- * execute after trace is stopped when there are no readers left.
- */
- write_offset = uatomic_read(<t_buf->offset);
- WARN( "LTT : unread channel %s offset is %ld "
- "and cons_off : %ld (cpu %d)\n",
- channel->channel_name, write_offset, cons_off, cpu);
- /* Check each sub-buffer for non filled commit count */
- if (((commit_count - channel->subbuf_size) & channel->commit_count_mask)
- - (BUFFER_TRUNC(cons_off, channel) >> channel->n_subbufs_order) != 0) {
- ERR("LTT : %s : subbuffer %lu has non filled "
- "commit count [cc, cc_sb] [%lu,%lu].\n",
- channel->channel_name, cons_idx, commit_count, commit_count_sb);
- }
- ERR("LTT : %s : commit count : %lu, subbuf size %zd\n",
- channel->channel_name, commit_count,
- channel->subbuf_size);
-}
-
-static void ltt_relay_print_errors(struct ust_trace *trace,
- struct ust_channel *channel, int cpu)
-{
- struct ust_buffer *ltt_buf = channel->buf[cpu];
- long cons_off;
-
- /*
- * Can be called in the error path of allocation when
- * trans_channel_data is not yet set.
- */
- if (!channel)
- return;
-
-//ust// for (cons_off = 0; cons_off < rchan->alloc_size;
-//ust// cons_off = SUBBUF_ALIGN(cons_off, rchan))
-//ust// ust_buffers_print_written(ltt_chan, cons_off, cpu);
- for (cons_off = uatomic_read(<t_buf->consumed);
- (SUBBUF_TRUNC(uatomic_read(<t_buf->offset),
- channel)
- - cons_off) > 0;
- cons_off = SUBBUF_ALIGN(cons_off, channel))
- ltt_relay_print_subbuffer_errors(channel, cons_off, cpu);
-}
-
-static void ltt_relay_print_buffer_errors(struct ust_channel *channel, int cpu)
-{
- struct ust_trace *trace = channel->trace;
- struct ust_buffer *ltt_buf = channel->buf[cpu];
-
- if (uatomic_read(<t_buf->events_lost))
- ERR("channel %s: %ld events lost (cpu %d)",
- channel->channel_name,
- uatomic_read(<t_buf->events_lost), cpu);
- if (uatomic_read(<t_buf->corrupted_subbuffers))
- ERR("channel %s : %ld corrupted subbuffers (cpu %d)",
- channel->channel_name,
- uatomic_read(<t_buf->corrupted_subbuffers), cpu);
-
- ltt_relay_print_errors(trace, channel, cpu);
-}
-
static int map_buf_structs(struct ust_channel *chan)
{
void *ptr;
free(chan->buf_struct_shmids);
free(chan->buf);
-
}
static void ltt_relay_async_wakeup_chan(struct ust_channel *ltt_channel)
goto close_fifo;
}
+ /* Set subbuffer's information */
+ buf->subbuf_size_order = get_count_order(buf->subbuf_size);
+ buf->alloc_size = buf->subbuf_size * buf->n_subbufs;
+
/* attach memory */
buf->mem = shmat(buf->shmid, NULL, 0);
if(buf->mem == (void *) 0) {
#define LTT_MAGIC_NUMBER 0x00D6B7ED
#define LTT_REV_MAGIC_NUMBER 0xEDB7D600
+
+static void ltt_relay_print_subbuffer_errors(
+ struct buffer_info *buf,
+ long cons_off, int cpu)
+{
+ struct ust_buffer *ust_buf = buf->bufstruct_mem;
+ long cons_idx, commit_count, commit_count_mask, write_offset;
+
+ cons_idx = SUBBUF_INDEX(cons_off, buf);
+ commit_count = uatomic_read(&ust_buf->commit_seq[cons_idx]);
+ commit_count_mask = (~0UL >> get_count_order(buf->n_subbufs));
+
+ /*
+ * No need to order commit_count and write_offset reads because we
+ * execute after trace is stopped when there are no readers left.
+ */
+ write_offset = uatomic_read(&ust_buf->offset);
+ WARN( "LTT : unread channel %s offset is %ld "
+ "and cons_off : %ld (cpu %d)\n",
+ buf->channel, write_offset, cons_off, cpu);
+ /* Check each sub-buffer for non filled commit count */
+ if (((commit_count - buf->subbuf_size) & commit_count_mask)
+ - (BUFFER_TRUNC(cons_off, buf) >> get_count_order(buf->n_subbufs)) != 0) {
+ ERR("LTT : %s : subbuffer %lu has non filled "
+ "commit count [seq] [%lu].\n",
+ buf->channel, cons_idx, commit_count);
+ }
+ ERR("LTT : %s : commit count : %lu, subbuf size %d\n",
+ buf->channel, commit_count,
+ buf->subbuf_size);
+}
+
+static void ltt_relay_print_errors(struct buffer_info *buf, int cpu)
+{
+ struct ust_buffer *ust_buf = buf->bufstruct_mem;
+ long cons_off;
+
+ for (cons_off = uatomic_read(&ust_buf->consumed);
+ (SUBBUF_TRUNC(uatomic_read(&ust_buf->offset), buf)
+ - cons_off) > 0;
+ cons_off = SUBBUF_ALIGN(cons_off, buf))
+ ltt_relay_print_subbuffer_errors(buf, cons_off, cpu);
+}
+
+static void ltt_relay_print_buffer_errors(struct buffer_info *buf, int cpu)
+{
+ struct ust_buffer *ust_buf = buf->bufstruct_mem;
+
+ if (uatomic_read(&ust_buf->events_lost))
+ ERR("channel %s: %ld events lost (cpu %d)",
+ buf->channel,
+ uatomic_read(&ust_buf->events_lost), cpu);
+ if (uatomic_read(&ust_buf->corrupted_subbuffers))
+ ERR("channel %s : %ld corrupted subbuffers (cpu %d)",
+ buf->channel,
+ uatomic_read(&ust_buf->corrupted_subbuffers), cpu);
+
+ ltt_relay_print_errors(buf, cpu);
+}
+
/* Returns the size of a subbuffer size. This is the size that
* will need to be written to disk.
*
struct ltt_subbuffer_header *header = (struct ltt_subbuffer_header *)((char *)buf->mem+i_subbuf*buf->subbuf_size);
- if((commit_seq & commit_seq_mask) == 0) {
- /* There is nothing to do. */
- /* FIXME: is this needed? */
- break;
- }
-
/* Check if subbuf was fully written. This is from Mathieu's algorithm/paper. */
/* FIXME: not sure data_size = 0xffffffff when the buffer is not full. It might
* take the value of the header size initially */
if(callbacks->on_read_partial_subbuffer)
callbacks->on_read_partial_subbuffer(callbacks, buf, i_subbuf, valid_length);
+ /* Manually increment the consumed offset */
+ /* TODO ybrosseau 2011-03-02: Should only be done if the previous read was successful */
+ uatomic_add(&ustbuf->consumed, buf->subbuf_size);
+
if(i_subbuf == last_subbuf % buf->n_subbufs)
break;
}
+
+ ltt_relay_print_buffer_errors(buf, buf->channel_cpu);
}