2 // does not cause event loss
4 // e.g. 3 events, 1 switch, read 1 subbuffer
6 // causes event loss with some reader timings,
7 // e.g. 3 events, 1 switch, 1 event (lost, buffer full), read 1 subbuffer
13 #define SUBBUF_SIZE (BUFSIZE / NR_SUBBUFS)
17 byte commit_count[NR_SUBBUFS];
21 byte retrieve_count[NR_SUBBUFS];
28 // buffer slot in-use bit. Detects racy use (more than a single process
29 // accessing a slot at any given step).
30 bool buffer_use[BUFSIZE];
32 // Proceed to a sub-subber switch is needed.
33 // Used in a periodical timer interrupt to fill and ship the current subbuffer
34 // to the reader so we can guarantee a steady flow.
35 // Also used as "finalize" operation to complete the last subbuffer after
36 // all writers have finished so the last subbuffer can be read by the reader.
39 byte prev_off, new_off, tmp_commit;
45 size = SUBBUF_SIZE - (prev_off % SUBBUF_SIZE);
46 new_off = prev_off + size;
48 :: new_off - read_off > BUFSIZE ->
49 refcount = refcount - 1;
56 :: prev_off != write_off -> goto cmpxchg_loop
57 :: else -> write_off = new_off;
62 tmp_commit = commit_count[(prev_off % BUFSIZE) / SUBBUF_SIZE] + size;
63 commit_count[(prev_off % BUFSIZE) / SUBBUF_SIZE] = tmp_commit;
65 :: tmp_commit % SUBBUF_SIZE == 0 -> deliver = 1
68 refcount = refcount - 1;
75 // Writes "size" bytes of information in the buffer at the current
76 // "write_off" position and then increment the commit_count of the sub-buffer
77 // the information has been written to.
78 proctype tracer(byte size)
80 byte prev_off, new_off, tmp_commit;
86 new_off = prev_off + size;
90 :: new_off - read_off > BUFSIZE -> goto lost
96 :: prev_off != write_off -> goto cmpxchg_loop
97 :: else -> write_off = new_off;
102 assert(buffer_use[(prev_off + i) % BUFSIZE] == 0);
103 buffer_use[(prev_off + i) % BUFSIZE] = 1;
105 :: i >= size -> break
109 // writing to buffer...
115 buffer_use[(prev_off + i) % BUFSIZE] = 0;
117 :: i >= size -> break
119 tmp_commit = commit_count[(prev_off % BUFSIZE) / SUBBUF_SIZE] + size;
120 commit_count[(prev_off % BUFSIZE) / SUBBUF_SIZE] = tmp_commit;
122 :: tmp_commit % SUBBUF_SIZE == 0 -> deliver = 1;
131 refcount = refcount - 1;
136 // Read the information sub-buffer per sub-buffer when available.
138 // Reads the information as soon as it is ready, or may be delayed by
139 // an asynchronous delivery. Being modeled as a process insures all cases
140 // (scheduled very quickly or very late, causing event loss) are covered.
141 // Only one reader per buffer (normally ensured by a mutex). This is modeled
142 // by using a single reader process.
147 byte lwrite_off, lcommit_count;
150 :: (write_off / SUBBUF_SIZE) - (read_off / SUBBUF_SIZE) > 0
151 && commit_count[(read_off % BUFSIZE) / SUBBUF_SIZE]
152 - retrieve_count[(read_off % BUFSIZE) / SUBBUF_SIZE]
157 :: i < SUBBUF_SIZE ->
158 assert(buffer_use[(read_off + i) % BUFSIZE] == 0);
159 buffer_use[(read_off + i) % BUFSIZE] = 1;
161 :: i >= SUBBUF_SIZE -> break
164 // reading from buffer...
166 // Since there is only one reader per buffer at any given time,
167 // we don't care about retrieve_count vs read_off ordering :
168 // combined use of retrieve_count and read_off are made atomic by a
173 :: i < SUBBUF_SIZE ->
174 buffer_use[(read_off + i) % BUFSIZE] = 0;
176 :: i >= SUBBUF_SIZE -> break
178 tmp_retrieve = retrieve_count[(read_off % BUFSIZE) / SUBBUF_SIZE]
180 retrieve_count[(read_off % BUFSIZE) / SUBBUF_SIZE] = tmp_retrieve;
181 read_off = read_off + SUBBUF_SIZE;
183 :: read_off >= (NUMPROCS - events_lost) -> break;
187 // Waits for all tracer and switcher processes to finish before finalizing
188 // the buffer. Only after that will the reader be allowed to read the
195 refcount = refcount + 1;
196 run switcher(); // Finalize the last sub-buffer so it can be read.
213 retrieve_count[i] = 0;
215 :: i >= NR_SUBBUFS -> break
222 :: i >= BUFSIZE -> break
229 refcount = refcount + 1;
232 :: i >= NUMPROCS -> break
237 refcount = refcount + 1;
240 :: i >= NUMSWITCH -> break
245 // The writer head must always be superior to the reader head.
246 assert(write_off - read_off >= 0);
251 commit_sum = commit_sum + commit_count[j];
252 // The commit count of a particular subbuffer must always be higher
253 // or equal to the retrieve_count of this subbuffer.
254 assert(commit_count[j] >= retrieve_count[j]);
256 :: j >= NR_SUBBUFS -> break
258 // The sum of all subbuffer commit counts must always be lower or equal
259 // to the writer head, because space must be reserved before it is
260 // written to and then committed.
261 assert(commit_sum <= write_off);
264 // If we have less writers than the buffer space available, we should
266 assert(NUMPROCS + NUMSWITCH > BUFSIZE || events_lost == 0);