| 1 | /* Copyright (C) 2009 Pierre-Marc Fournier |
| 2 | * |
| 3 | * This library is free software; you can redistribute it and/or |
| 4 | * modify it under the terms of the GNU Lesser General Public |
| 5 | * License as published by the Free Software Foundation; either |
| 6 | * version 2.1 of the License, or (at your option) any later version. |
| 7 | * |
| 8 | * This library is distributed in the hope that it will be useful, |
| 9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| 11 | * Lesser General Public License for more details. |
| 12 | * |
| 13 | * You should have received a copy of the GNU Lesser General Public |
| 14 | * License along with this library; if not, write to the Free Software |
| 15 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
| 16 | */ |
| 17 | |
| 18 | #include <assert.h> |
| 19 | #include <byteswap.h> |
| 20 | |
| 21 | #include "buffers.h" |
| 22 | #include "tracer.h" |
| 23 | #include "ustd.h" |
| 24 | #include "usterr.h" |
| 25 | |
| 26 | /* This truncates to an offset in the buffer. */ |
| 27 | #define USTD_BUFFER_TRUNC(offset, bufinfo) \ |
| 28 | ((offset) & (~(((bufinfo)->subbuf_size*(bufinfo)->n_subbufs)-1))) |
| 29 | |
| 30 | #define LTT_MAGIC_NUMBER 0x00D6B7ED |
| 31 | #define LTT_REV_MAGIC_NUMBER 0xEDB7D600 |
| 32 | |
| 33 | /* Returns the size of a subbuffer size. This is the size that |
| 34 | * will need to be written to disk. |
| 35 | * |
| 36 | * @subbuffer: pointer to the beginning of the subbuffer (the |
| 37 | * beginning of its header) |
| 38 | */ |
| 39 | |
| 40 | size_t subbuffer_data_size(void *subbuf) |
| 41 | { |
| 42 | struct ltt_subbuffer_header *header = subbuf; |
| 43 | int reverse; |
| 44 | u32 data_size; |
| 45 | |
| 46 | if(header->magic_number == LTT_MAGIC_NUMBER) { |
| 47 | reverse = 0; |
| 48 | } |
| 49 | else if(header->magic_number == LTT_REV_MAGIC_NUMBER) { |
| 50 | reverse = 1; |
| 51 | } |
| 52 | else { |
| 53 | return -1; |
| 54 | } |
| 55 | |
| 56 | data_size = header->sb_size; |
| 57 | if(reverse) |
| 58 | data_size = bswap_32(data_size); |
| 59 | |
| 60 | return data_size; |
| 61 | } |
| 62 | |
| 63 | |
| 64 | void finish_consuming_dead_subbuffer(struct buffer_info *buf) |
| 65 | { |
| 66 | struct ust_buffer *ustbuf = buf->bufstruct_mem; |
| 67 | |
| 68 | long write_offset = local_read(&ustbuf->offset); |
| 69 | long consumed_offset = atomic_long_read(&ustbuf->consumed); |
| 70 | |
| 71 | long i_subbuf; |
| 72 | |
| 73 | DBG("processing died buffer"); |
| 74 | DBG("consumed offset is %ld", consumed_offset); |
| 75 | DBG("write offset is %ld", write_offset); |
| 76 | |
| 77 | /* First subbuf that we need to consume now. It is not modulo'd. |
| 78 | * Consumed_offset is the next byte to consume. */ |
| 79 | long first_subbuf = consumed_offset / buf->subbuf_size; |
| 80 | /* Last subbuf that we need to consume now. It is not modulo'd. |
| 81 | * Write_offset is the next place to write so write_offset-1 is the |
| 82 | * last place written. */ |
| 83 | long last_subbuf = (write_offset - 1) / buf->subbuf_size; |
| 84 | |
| 85 | DBG("first_subbuf=%ld", first_subbuf); |
| 86 | DBG("last_subbuf=%ld", last_subbuf); |
| 87 | |
| 88 | if(last_subbuf - first_subbuf >= buf->n_subbufs) { |
| 89 | DBG("an overflow has occurred, nothing can be recovered"); |
| 90 | return; |
| 91 | } |
| 92 | |
| 93 | /* Iterate on subbuffers to recover. */ |
| 94 | for(i_subbuf = first_subbuf % buf->n_subbufs; ; i_subbuf++, i_subbuf %= buf->n_subbufs) { |
| 95 | void *tmp; |
| 96 | /* commit_seq is the offset in the buffer of the end of the last sequential commit. |
| 97 | * Bytes beyond this limit cannot be recovered. This is a free-running counter. */ |
| 98 | long commit_seq = local_read(&ustbuf->commit_seq[i_subbuf]); |
| 99 | |
| 100 | unsigned long valid_length = buf->subbuf_size; |
| 101 | long n_subbufs_order = get_count_order(buf->n_subbufs); |
| 102 | long commit_seq_mask = (~0UL >> n_subbufs_order); |
| 103 | |
| 104 | struct ltt_subbuffer_header *header = (struct ltt_subbuffer_header *)((char *)buf->mem+i_subbuf*buf->subbuf_size); |
| 105 | |
| 106 | int pad_size; |
| 107 | |
| 108 | if((commit_seq & commit_seq_mask) == 0) { |
| 109 | /* There is nothing to do. */ |
| 110 | /* FIXME: is this needed? */ |
| 111 | break; |
| 112 | } |
| 113 | |
| 114 | /* Check if subbuf was fully written. This is from Mathieu's algorithm/paper. */ |
| 115 | if (((commit_seq - buf->subbuf_size) & commit_seq_mask) |
| 116 | - (USTD_BUFFER_TRUNC(consumed_offset, buf) >> n_subbufs_order) == 0 |
| 117 | && header->data_size != 0xffffffff && header->sb_size != 0xffffffff) { |
| 118 | /* If it was, we only check the data_size. This is the amount of valid data at |
| 119 | * the beginning of the subbuffer. */ |
| 120 | valid_length = header->data_size; |
| 121 | } |
| 122 | else { |
| 123 | /* If the subbuffer was not fully written, then we don't check data_size because |
| 124 | * it hasn't been written yet. Instead we check commit_seq and use it to choose |
| 125 | * a value for data_size. The viewer will need this value when parsing. |
| 126 | */ |
| 127 | |
| 128 | valid_length = commit_seq & (buf->subbuf_size-1); |
| 129 | header->data_size = valid_length; |
| 130 | header->sb_size = PAGE_ALIGN(valid_length); |
| 131 | assert(i_subbuf == (last_subbuf % buf->n_subbufs)); |
| 132 | } |
| 133 | |
| 134 | |
| 135 | patient_write(buf->file_fd, buf->mem + i_subbuf * buf->subbuf_size, valid_length); |
| 136 | |
| 137 | /* pad with empty bytes */ |
| 138 | pad_size = PAGE_ALIGN(valid_length)-valid_length; |
| 139 | if(pad_size) { |
| 140 | tmp = malloc(pad_size); |
| 141 | memset(tmp, 0, pad_size); |
| 142 | patient_write(buf->file_fd, tmp, pad_size); |
| 143 | free(tmp); |
| 144 | } |
| 145 | |
| 146 | if(i_subbuf == last_subbuf % buf->n_subbufs) |
| 147 | break; |
| 148 | } |
| 149 | } |
| 150 | |