libust 2.0 + ringbuffer + TRACEPOINT_EVENT builds and runs
[lttng-ust.git] / libringbuffer / ring_buffer_backend.c
1 /*
2 * ring_buffer_backend.c
3 *
4 * Copyright (C) 2005-2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
5 *
6 * Dual LGPL v2.1/GPL v2 license.
7 */
8
9 #include <urcu/arch.h>
10
11 #include "ust/core.h"
12
13 #include <ust/ringbuffer-config.h>
14 #include "backend.h"
15 #include "frontend.h"
16 #include "smp.h"
17 #include "shm.h"
18
19 /**
20 * lib_ring_buffer_backend_allocate - allocate a channel buffer
21 * @config: ring buffer instance configuration
22 * @buf: the buffer struct
23 * @size: total size of the buffer
24 * @num_subbuf: number of subbuffers
25 * @extra_reader_sb: need extra subbuffer for reader
26 */
27 static
28 int lib_ring_buffer_backend_allocate(const struct lib_ring_buffer_config *config,
29 struct lib_ring_buffer_backend *bufb,
30 size_t size, size_t num_subbuf,
31 int extra_reader_sb,
32 struct shm_header *shm_header)
33 {
34 struct channel_backend *chanb = &shmp(bufb->chan)->backend;
35 unsigned long subbuf_size, mmap_offset = 0;
36 unsigned long num_subbuf_alloc;
37 unsigned long i;
38
39 subbuf_size = chanb->subbuf_size;
40 num_subbuf_alloc = num_subbuf;
41
42 if (extra_reader_sb)
43 num_subbuf_alloc++;
44
45 /* Align the entire buffer backend data on PAGE_SIZE */
46 align_shm(shm_header, PAGE_SIZE);
47 set_shmp(bufb->array, zalloc_shm(shm_header,
48 sizeof(*bufb->array) * num_subbuf_alloc));
49 if (unlikely(!shmp(bufb->array)))
50 goto array_error;
51
52 /*
53 * This is the largest element (the buffer pages) which needs to
54 * be aligned on PAGE_SIZE.
55 */
56 align_shm(shm_header, PAGE_SIZE);
57 set_shmp(bufb->memory_map, zalloc_shm(shm_header,
58 subbuf_size * num_subbuf_alloc));
59 if (unlikely(!shmp(bufb->memory_map)))
60 goto memory_map_error;
61
62 /* Allocate backend pages array elements */
63 for (i = 0; i < num_subbuf_alloc; i++) {
64 align_shm(shm_header, __alignof__(struct lib_ring_buffer_backend_pages));
65 set_shmp(bufb->array[i],
66 zalloc_shm(shm_header,
67 sizeof(struct lib_ring_buffer_backend_pages)));
68 if (!shmp(bufb->array[i]))
69 goto free_array;
70 }
71
72 /* Allocate write-side subbuffer table */
73 align_shm(shm_header, __alignof__(struct lib_ring_buffer_backend_subbuffer));
74 bufb->buf_wsb = zalloc_shm(shm_header,
75 sizeof(struct lib_ring_buffer_backend_subbuffer)
76 * num_subbuf);
77 if (unlikely(!shmp(bufb->buf_wsb)))
78 goto free_array;
79
80 for (i = 0; i < num_subbuf; i++)
81 shmp(bufb->buf_wsb)[i].id = subbuffer_id(config, 0, 1, i);
82
83 /* Assign read-side subbuffer table */
84 if (extra_reader_sb)
85 bufb->buf_rsb.id = subbuffer_id(config, 0, 1,
86 num_subbuf_alloc - 1);
87 else
88 bufb->buf_rsb.id = subbuffer_id(config, 0, 1, 0);
89
90 /* Assign pages to page index */
91 for (i = 0; i < num_subbuf_alloc; i++) {
92 set_shmp(shmp(bufb->array)[i]->p,
93 &shmp(bufb->memory_map)[i * subbuf_size]);
94 if (config->output == RING_BUFFER_MMAP) {
95 shmp(bufb->array)[i]->mmap_offset = mmap_offset;
96 mmap_offset += subbuf_size;
97 }
98 }
99 /*
100 * Align the end of each buffer backend data on PAGE_SIZE, to
101 * behave like an array which contains elements that need to be
102 * aligned on PAGE_SIZE.
103 */
104 align_shm(shm_header, PAGE_SIZE);
105
106 return 0;
107
108 free_array:
109 /* bufb->array[i] will be freed by shm teardown */
110 memory_map_error:
111 /* bufb->array will be freed by shm teardown */
112 array_error:
113 return -ENOMEM;
114 }
115
116 int lib_ring_buffer_backend_create(struct lib_ring_buffer_backend *bufb,
117 struct channel_backend *chanb, int cpu,
118 struct shm_header *shm_header)
119 {
120 const struct lib_ring_buffer_config *config = chanb->config;
121
122 set_shmp(&bufb->chan, caa_container_of(chanb, struct channel, backend));
123 bufb->cpu = cpu;
124
125 return lib_ring_buffer_backend_allocate(config, bufb, chanb->buf_size,
126 chanb->num_subbuf,
127 chanb->extra_reader_sb,
128 shm_header);
129 }
130
131 void lib_ring_buffer_backend_free(struct lib_ring_buffer_backend *bufb)
132 {
133 /* bufb->buf_wsb will be freed by shm teardown */
134 /* bufb->array[i] will be freed by shm teardown */
135 /* bufb->array will be freed by shm teardown */
136 bufb->allocated = 0;
137 }
138
139 void lib_ring_buffer_backend_reset(struct lib_ring_buffer_backend *bufb)
140 {
141 struct channel_backend *chanb = &shmp(bufb->chan)->backend;
142 const struct lib_ring_buffer_config *config = chanb->config;
143 unsigned long num_subbuf_alloc;
144 unsigned int i;
145
146 num_subbuf_alloc = chanb->num_subbuf;
147 if (chanb->extra_reader_sb)
148 num_subbuf_alloc++;
149
150 for (i = 0; i < chanb->num_subbuf; i++)
151 shmp(bufb->buf_wsb)[i].id = subbuffer_id(config, 0, 1, i);
152 if (chanb->extra_reader_sb)
153 bufb->buf_rsb.id = subbuffer_id(config, 0, 1,
154 num_subbuf_alloc - 1);
155 else
156 bufb->buf_rsb.id = subbuffer_id(config, 0, 1, 0);
157
158 for (i = 0; i < num_subbuf_alloc; i++) {
159 /* Don't reset mmap_offset */
160 v_set(config, &shmp(bufb->array)[i]->records_commit, 0);
161 v_set(config, &shmp(bufb->array)[i]->records_unread, 0);
162 shmp(bufb->array)[i]->data_size = 0;
163 /* Don't reset backend page and virt addresses */
164 }
165 /* Don't reset num_pages_per_subbuf, cpu, allocated */
166 v_set(config, &bufb->records_read, 0);
167 }
168
169 /*
170 * The frontend is responsible for also calling ring_buffer_backend_reset for
171 * each buffer when calling channel_backend_reset.
172 */
173 void channel_backend_reset(struct channel_backend *chanb)
174 {
175 struct channel *chan = caa_container_of(chanb, struct channel, backend);
176 const struct lib_ring_buffer_config *config = chanb->config;
177
178 /*
179 * Don't reset buf_size, subbuf_size, subbuf_size_order,
180 * num_subbuf_order, buf_size_order, extra_reader_sb, num_subbuf,
181 * priv, notifiers, config, cpumask and name.
182 */
183 chanb->start_tsc = config->cb.ring_buffer_clock_read(chan);
184 }
185
186 /**
187 * channel_backend_init - initialize a channel backend
188 * @chanb: channel backend
189 * @name: channel name
190 * @config: client ring buffer configuration
191 * @priv: client private data
192 * @parent: dentry of parent directory, %NULL for root directory
193 * @subbuf_size: size of sub-buffers (> PAGE_SIZE, power of 2)
194 * @num_subbuf: number of sub-buffers (power of 2)
195 * @shm_header: shared memory header
196 *
197 * Returns channel pointer if successful, %NULL otherwise.
198 *
199 * Creates per-cpu channel buffers using the sizes and attributes
200 * specified. The created channel buffer files will be named
201 * name_0...name_N-1. File permissions will be %S_IRUSR.
202 *
203 * Called with CPU hotplug disabled.
204 */
205 int channel_backend_init(struct channel_backend *chanb,
206 const char *name,
207 const struct lib_ring_buffer_config *config,
208 void *priv, size_t subbuf_size, size_t num_subbuf,
209 struct shm_header *shm_header)
210 {
211 struct channel *chan = caa_container_of(chanb, struct channel, backend);
212 unsigned int i;
213 int ret;
214
215 if (!name)
216 return -EPERM;
217
218 if (!(subbuf_size && num_subbuf))
219 return -EPERM;
220
221 /* Check that the subbuffer size is larger than a page. */
222 if (subbuf_size < PAGE_SIZE)
223 return -EINVAL;
224
225 /*
226 * Make sure the number of subbuffers and subbuffer size are power of 2.
227 */
228 CHAN_WARN_ON(chanb, hweight32(subbuf_size) != 1);
229 CHAN_WARN_ON(chanb, hweight32(num_subbuf) != 1);
230
231 ret = subbuffer_id_check_index(config, num_subbuf);
232 if (ret)
233 return ret;
234
235 chanb->priv = priv;
236 chanb->buf_size = num_subbuf * subbuf_size;
237 chanb->subbuf_size = subbuf_size;
238 chanb->buf_size_order = get_count_order(chanb->buf_size);
239 chanb->subbuf_size_order = get_count_order(subbuf_size);
240 chanb->num_subbuf_order = get_count_order(num_subbuf);
241 chanb->extra_reader_sb =
242 (config->mode == RING_BUFFER_OVERWRITE) ? 1 : 0;
243 chanb->num_subbuf = num_subbuf;
244 strncpy(chanb->name, name, NAME_MAX);
245 chanb->name[NAME_MAX - 1] = '\0';
246 chanb->config = config;
247
248 if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
249 struct lib_ring_buffer *buf;
250 size_t alloc_size;
251
252 /* Allocating the buffer per-cpu structures */
253 align_shm(shm_header, __alignof__(struct lib_ring_buffer));
254 alloc_size = sizeof(struct lib_ring_buffer);
255 buf = zalloc_shm(shm_header, alloc_size * num_possible_cpus());
256 if (!buf)
257 goto end;
258 set_shmp(chanb->buf, buf);
259
260 /*
261 * We need to allocate for all possible cpus.
262 */
263 for_each_possible_cpu(i) {
264 ret = lib_ring_buffer_create(&shmp(chanb->buf)[i],
265 chanb, i, shm_header);
266 if (ret)
267 goto free_bufs; /* cpu hotplug locked */
268 }
269 } else {
270 struct lib_ring_buffer *buf;
271 size_t alloc_size;
272
273 align_shm(shm_header, __alignof__(struct lib_ring_buffer));
274 alloc_size = sizeof(struct lib_ring_buffer);
275 buf = zalloc_shm(shm_header, alloc_size);
276 if (!buf)
277 goto end;
278 set_shmp(chanb->buf, buf);
279 ret = lib_ring_buffer_create(shmp(chanb->buf), chanb, -1,
280 shm_header);
281 if (ret)
282 goto free_bufs;
283 }
284 chanb->start_tsc = config->cb.ring_buffer_clock_read(chan);
285
286 return 0;
287
288 free_bufs:
289 if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
290 for_each_possible_cpu(i) {
291 struct lib_ring_buffer *buf = &shmp(chanb->buf)[i];
292
293 if (!buf->backend.allocated)
294 continue;
295 lib_ring_buffer_free(buf);
296 }
297 }
298 /* We only free the buffer data upon shm teardown */
299 end:
300 return -ENOMEM;
301 }
302
303 /**
304 * channel_backend_free - destroy the channel
305 * @chan: the channel
306 *
307 * Destroy all channel buffers and frees the channel.
308 */
309 void channel_backend_free(struct channel_backend *chanb)
310 {
311 const struct lib_ring_buffer_config *config = chanb->config;
312 unsigned int i;
313
314 if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
315 for_each_possible_cpu(i) {
316 struct lib_ring_buffer *buf = &shmp(chanb->buf)[i];
317
318 if (!buf->backend.allocated)
319 continue;
320 lib_ring_buffer_free(buf);
321 }
322 } else {
323 struct lib_ring_buffer *buf = shmp(chanb->buf);
324
325 CHAN_WARN_ON(chanb, !buf->backend.allocated);
326 lib_ring_buffer_free(buf);
327 }
328 /* We only free the buffer data upon shm teardown */
329 }
330
331 /**
332 * lib_ring_buffer_read - read data from ring_buffer_buffer.
333 * @bufb : buffer backend
334 * @offset : offset within the buffer
335 * @dest : destination address
336 * @len : length to copy to destination
337 *
338 * Should be protected by get_subbuf/put_subbuf.
339 * Returns the length copied.
340 */
341 size_t lib_ring_buffer_read(struct lib_ring_buffer_backend *bufb, size_t offset,
342 void *dest, size_t len)
343 {
344 struct channel_backend *chanb = &shmp(bufb->chan)->backend;
345 const struct lib_ring_buffer_config *config = chanb->config;
346 ssize_t orig_len;
347 struct lib_ring_buffer_backend_pages *rpages;
348 unsigned long sb_bindex, id;
349
350 orig_len = len;
351 offset &= chanb->buf_size - 1;
352
353 if (unlikely(!len))
354 return 0;
355 id = bufb->buf_rsb.id;
356 sb_bindex = subbuffer_id_get_index(config, id);
357 rpages = shmp(bufb->array)[sb_bindex];
358 /*
359 * Underlying layer should never ask for reads across
360 * subbuffers.
361 */
362 CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
363 CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
364 && subbuffer_id_is_noref(config, id));
365 memcpy(dest, shmp(rpages->p) + (offset & ~(chanb->subbuf_size - 1)), len);
366 return orig_len;
367 }
368
369 /**
370 * lib_ring_buffer_read_cstr - read a C-style string from ring_buffer.
371 * @bufb : buffer backend
372 * @offset : offset within the buffer
373 * @dest : destination address
374 * @len : destination's length
375 *
376 * return string's length
377 * Should be protected by get_subbuf/put_subbuf.
378 */
379 int lib_ring_buffer_read_cstr(struct lib_ring_buffer_backend *bufb, size_t offset,
380 void *dest, size_t len)
381 {
382 struct channel_backend *chanb = &shmp(bufb->chan)->backend;
383 const struct lib_ring_buffer_config *config = chanb->config;
384 ssize_t string_len, orig_offset;
385 char *str;
386 struct lib_ring_buffer_backend_pages *rpages;
387 unsigned long sb_bindex, id;
388
389 offset &= chanb->buf_size - 1;
390 orig_offset = offset;
391 id = bufb->buf_rsb.id;
392 sb_bindex = subbuffer_id_get_index(config, id);
393 rpages = shmp(bufb->array)[sb_bindex];
394 /*
395 * Underlying layer should never ask for reads across
396 * subbuffers.
397 */
398 CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
399 CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
400 && subbuffer_id_is_noref(config, id));
401 str = (char *)shmp(rpages->p) + (offset & ~(chanb->subbuf_size - 1));
402 string_len = strnlen(str, len);
403 if (dest && len) {
404 memcpy(dest, str, string_len);
405 ((char *)dest)[0] = 0;
406 }
407 return offset - orig_offset;
408 }
409
410 /**
411 * lib_ring_buffer_read_offset_address - get address of a buffer location
412 * @bufb : buffer backend
413 * @offset : offset within the buffer.
414 *
415 * Return the address where a given offset is located (for read).
416 * Should be used to get the current subbuffer header pointer. Given we know
417 * it's never on a page boundary, it's safe to write directly to this address,
418 * as long as the write is never bigger than a page size.
419 */
420 void *lib_ring_buffer_read_offset_address(struct lib_ring_buffer_backend *bufb,
421 size_t offset)
422 {
423 struct lib_ring_buffer_backend_pages *rpages;
424 struct channel_backend *chanb = &shmp(bufb->chan)->backend;
425 const struct lib_ring_buffer_config *config = chanb->config;
426 unsigned long sb_bindex, id;
427
428 offset &= chanb->buf_size - 1;
429 id = bufb->buf_rsb.id;
430 sb_bindex = subbuffer_id_get_index(config, id);
431 rpages = shmp(bufb->array)[sb_bindex];
432 CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
433 && subbuffer_id_is_noref(config, id));
434 return shmp(rpages->p) + (offset & ~(chanb->subbuf_size - 1));
435 }
436
437 /**
438 * lib_ring_buffer_offset_address - get address of a location within the buffer
439 * @bufb : buffer backend
440 * @offset : offset within the buffer.
441 *
442 * Return the address where a given offset is located.
443 * Should be used to get the current subbuffer header pointer. Given we know
444 * it's always at the beginning of a page, it's safe to write directly to this
445 * address, as long as the write is never bigger than a page size.
446 */
447 void *lib_ring_buffer_offset_address(struct lib_ring_buffer_backend *bufb,
448 size_t offset)
449 {
450 size_t sbidx;
451 struct lib_ring_buffer_backend_pages *rpages;
452 struct channel_backend *chanb = &shmp(bufb->chan)->backend;
453 const struct lib_ring_buffer_config *config = chanb->config;
454 unsigned long sb_bindex, id;
455
456 offset &= chanb->buf_size - 1;
457 sbidx = offset >> chanb->subbuf_size_order;
458 id = shmp(bufb->buf_wsb)[sbidx].id;
459 sb_bindex = subbuffer_id_get_index(config, id);
460 rpages = shmp(bufb->array)[sb_bindex];
461 CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
462 && subbuffer_id_is_noref(config, id));
463 return shmp(rpages->p) + (offset & ~(chanb->subbuf_size - 1));
464 }
This page took 0.063488 seconds and 4 git commands to generate.