2 * ring_buffer_backend.c
4 * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; only
9 * version 2.1 of the License.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
25 #include <urcu/arch.h>
28 #include <lttng/align.h>
29 #include <lttng/ringbuffer-config.h>
37 * lib_ring_buffer_backend_allocate - allocate a channel buffer
38 * @config: ring buffer instance configuration
39 * @buf: the buffer struct
40 * @size: total size of the buffer
41 * @num_subbuf: number of subbuffers
42 * @extra_reader_sb: need extra subbuffer for reader
45 int lib_ring_buffer_backend_allocate(const struct lttng_ust_lib_ring_buffer_config
*config
,
46 struct lttng_ust_lib_ring_buffer_backend
*bufb
,
47 size_t size
, size_t num_subbuf
,
49 struct lttng_ust_shm_handle
*handle
,
50 struct shm_object
*shmobj
)
52 struct channel_backend
*chanb
;
53 unsigned long subbuf_size
, mmap_offset
= 0;
54 unsigned long num_subbuf_alloc
;
58 chanb
= &shmp(handle
, bufb
->chan
)->backend
;
62 subbuf_size
= chanb
->subbuf_size
;
63 num_subbuf_alloc
= num_subbuf
;
68 page_size
= LTTNG_UST_PAGE_SIZE
;
73 align_shm(shmobj
, __alignof__(struct lttng_ust_lib_ring_buffer_backend_pages_shmp
));
74 set_shmp(bufb
->array
, zalloc_shm(shmobj
,
75 sizeof(struct lttng_ust_lib_ring_buffer_backend_pages_shmp
) * num_subbuf_alloc
));
76 if (caa_unlikely(!shmp(handle
, bufb
->array
)))
80 * This is the largest element (the buffer pages) which needs to
81 * be aligned on page size.
83 align_shm(shmobj
, page_size
);
84 set_shmp(bufb
->memory_map
, zalloc_shm(shmobj
,
85 subbuf_size
* num_subbuf_alloc
));
86 if (caa_unlikely(!shmp(handle
, bufb
->memory_map
)))
87 goto memory_map_error
;
89 /* Allocate backend pages array elements */
90 for (i
= 0; i
< num_subbuf_alloc
; i
++) {
91 align_shm(shmobj
, __alignof__(struct lttng_ust_lib_ring_buffer_backend_pages
));
92 set_shmp(shmp_index(handle
, bufb
->array
, i
)->shmp
,
94 sizeof(struct lttng_ust_lib_ring_buffer_backend_pages
)));
95 if (!shmp(handle
, shmp_index(handle
, bufb
->array
, i
)->shmp
))
99 /* Allocate write-side subbuffer table */
100 align_shm(shmobj
, __alignof__(struct lttng_ust_lib_ring_buffer_backend_subbuffer
));
101 set_shmp(bufb
->buf_wsb
, zalloc_shm(shmobj
,
102 sizeof(struct lttng_ust_lib_ring_buffer_backend_subbuffer
)
104 if (caa_unlikely(!shmp(handle
, bufb
->buf_wsb
)))
107 for (i
= 0; i
< num_subbuf
; i
++) {
108 struct lttng_ust_lib_ring_buffer_backend_subbuffer
*sb
;
110 sb
= shmp_index(handle
, bufb
->buf_wsb
, i
);
113 sb
->id
= subbuffer_id(config
, 0, 1, i
);
116 /* Assign read-side subbuffer table */
118 bufb
->buf_rsb
.id
= subbuffer_id(config
, 0, 1,
119 num_subbuf_alloc
- 1);
121 bufb
->buf_rsb
.id
= subbuffer_id(config
, 0, 1, 0);
123 /* Allocate subbuffer packet counter table */
124 align_shm(shmobj
, __alignof__(struct lttng_ust_lib_ring_buffer_backend_counts
));
125 set_shmp(bufb
->buf_cnt
, zalloc_shm(shmobj
,
126 sizeof(struct lttng_ust_lib_ring_buffer_backend_counts
)
128 if (caa_unlikely(!shmp(handle
, bufb
->buf_cnt
)))
131 /* Assign pages to page index */
132 for (i
= 0; i
< num_subbuf_alloc
; i
++) {
133 struct lttng_ust_lib_ring_buffer_backend_pages_shmp
*sbp
;
134 struct lttng_ust_lib_ring_buffer_backend_pages
*pages
;
137 ref
.index
= bufb
->memory_map
._ref
.index
;
138 ref
.offset
= bufb
->memory_map
._ref
.offset
;
139 ref
.offset
+= i
* subbuf_size
;
141 sbp
= shmp_index(handle
, bufb
->array
, i
);
144 pages
= shmp(handle
, sbp
->shmp
);
147 set_shmp(pages
->p
, ref
);
148 if (config
->output
== RING_BUFFER_MMAP
) {
149 pages
->mmap_offset
= mmap_offset
;
150 mmap_offset
+= subbuf_size
;
156 /* bufb->buf_wsb will be freed by shm teardown */
158 /* bufb->array[i] will be freed by shm teardown */
160 /* bufb->array will be freed by shm teardown */
166 int lib_ring_buffer_backend_create(struct lttng_ust_lib_ring_buffer_backend
*bufb
,
167 struct channel_backend
*chanb
, int cpu
,
168 struct lttng_ust_shm_handle
*handle
,
169 struct shm_object
*shmobj
)
171 const struct lttng_ust_lib_ring_buffer_config
*config
= &chanb
->config
;
173 set_shmp(bufb
->chan
, handle
->chan
._ref
);
176 return lib_ring_buffer_backend_allocate(config
, bufb
, chanb
->buf_size
,
178 chanb
->extra_reader_sb
,
182 void lib_ring_buffer_backend_reset(struct lttng_ust_lib_ring_buffer_backend
*bufb
,
183 struct lttng_ust_shm_handle
*handle
)
185 struct channel_backend
*chanb
;
186 const struct lttng_ust_lib_ring_buffer_config
*config
;
187 unsigned long num_subbuf_alloc
;
190 chanb
= &shmp(handle
, bufb
->chan
)->backend
;
193 config
= &chanb
->config
;
195 num_subbuf_alloc
= chanb
->num_subbuf
;
196 if (chanb
->extra_reader_sb
)
199 for (i
= 0; i
< chanb
->num_subbuf
; i
++) {
200 struct lttng_ust_lib_ring_buffer_backend_subbuffer
*sb
;
202 sb
= shmp_index(handle
, bufb
->buf_wsb
, i
);
205 sb
->id
= subbuffer_id(config
, 0, 1, i
);
207 if (chanb
->extra_reader_sb
)
208 bufb
->buf_rsb
.id
= subbuffer_id(config
, 0, 1,
209 num_subbuf_alloc
- 1);
211 bufb
->buf_rsb
.id
= subbuffer_id(config
, 0, 1, 0);
213 for (i
= 0; i
< num_subbuf_alloc
; i
++) {
214 struct lttng_ust_lib_ring_buffer_backend_pages_shmp
*sbp
;
215 struct lttng_ust_lib_ring_buffer_backend_pages
*pages
;
217 sbp
= shmp_index(handle
, bufb
->array
, i
);
220 pages
= shmp(handle
, sbp
->shmp
);
223 /* Don't reset mmap_offset */
224 v_set(config
, &pages
->records_commit
, 0);
225 v_set(config
, &pages
->records_unread
, 0);
226 pages
->data_size
= 0;
227 /* Don't reset backend page and virt addresses */
229 /* Don't reset num_pages_per_subbuf, cpu, allocated */
230 v_set(config
, &bufb
->records_read
, 0);
234 * The frontend is responsible for also calling ring_buffer_backend_reset for
235 * each buffer when calling channel_backend_reset.
237 void channel_backend_reset(struct channel_backend
*chanb
)
239 struct channel
*chan
= caa_container_of(chanb
, struct channel
, backend
);
240 const struct lttng_ust_lib_ring_buffer_config
*config
= &chanb
->config
;
243 * Don't reset buf_size, subbuf_size, subbuf_size_order,
244 * num_subbuf_order, buf_size_order, extra_reader_sb, num_subbuf,
245 * priv, notifiers, config, cpumask and name.
247 chanb
->start_tsc
= config
->cb
.ring_buffer_clock_read(chan
);
251 * channel_backend_init - initialize a channel backend
252 * @chanb: channel backend
253 * @name: channel name
254 * @config: client ring buffer configuration
255 * @parent: dentry of parent directory, %NULL for root directory
256 * @subbuf_size: size of sub-buffers (> page size, power of 2)
257 * @num_subbuf: number of sub-buffers (power of 2)
258 * @lttng_ust_shm_handle: shared memory handle
259 * @stream_fds: stream file descriptors.
261 * Returns channel pointer if successful, %NULL otherwise.
263 * Creates per-cpu channel buffers using the sizes and attributes
264 * specified. The created channel buffer files will be named
265 * name_0...name_N-1. File permissions will be %S_IRUSR.
267 * Called with CPU hotplug disabled.
269 int channel_backend_init(struct channel_backend
*chanb
,
271 const struct lttng_ust_lib_ring_buffer_config
*config
,
272 size_t subbuf_size
, size_t num_subbuf
,
273 struct lttng_ust_shm_handle
*handle
,
274 const int *stream_fds
)
276 struct channel
*chan
= caa_container_of(chanb
, struct channel
, backend
);
279 size_t shmsize
= 0, num_subbuf_alloc
;
285 page_size
= LTTNG_UST_PAGE_SIZE
;
286 if (page_size
<= 0) {
289 /* Check that the subbuffer size is larger than a page. */
290 if (subbuf_size
< page_size
)
294 * Make sure the number of subbuffers and subbuffer size are
295 * power of 2, and nonzero.
297 if (!subbuf_size
|| (subbuf_size
& (subbuf_size
- 1)))
299 if (!num_subbuf
|| (num_subbuf
& (num_subbuf
- 1)))
302 * Overwrite mode buffers require at least 2 subbuffers per
305 if (config
->mode
== RING_BUFFER_OVERWRITE
&& num_subbuf
< 2)
308 ret
= subbuffer_id_check_index(config
, num_subbuf
);
312 chanb
->buf_size
= num_subbuf
* subbuf_size
;
313 chanb
->subbuf_size
= subbuf_size
;
314 chanb
->buf_size_order
= get_count_order(chanb
->buf_size
);
315 chanb
->subbuf_size_order
= get_count_order(subbuf_size
);
316 chanb
->num_subbuf_order
= get_count_order(num_subbuf
);
317 chanb
->extra_reader_sb
=
318 (config
->mode
== RING_BUFFER_OVERWRITE
) ? 1 : 0;
319 chanb
->num_subbuf
= num_subbuf
;
320 strncpy(chanb
->name
, name
, NAME_MAX
);
321 chanb
->name
[NAME_MAX
- 1] = '\0';
322 memcpy(&chanb
->config
, config
, sizeof(*config
));
324 /* Per-cpu buffer size: control (prior to backend) */
325 shmsize
= lttng_ust_offset_align(shmsize
, __alignof__(struct lttng_ust_lib_ring_buffer
));
326 shmsize
+= sizeof(struct lttng_ust_lib_ring_buffer
);
327 shmsize
+= lttng_ust_offset_align(shmsize
, __alignof__(struct commit_counters_hot
));
328 shmsize
+= sizeof(struct commit_counters_hot
) * num_subbuf
;
329 shmsize
+= lttng_ust_offset_align(shmsize
, __alignof__(struct commit_counters_cold
));
330 shmsize
+= sizeof(struct commit_counters_cold
) * num_subbuf
;
331 /* Sampled timestamp end */
332 shmsize
+= lttng_ust_offset_align(shmsize
, __alignof__(uint64_t));
333 shmsize
+= sizeof(uint64_t) * num_subbuf
;
335 /* Per-cpu buffer size: backend */
336 /* num_subbuf + 1 is the worse case */
337 num_subbuf_alloc
= num_subbuf
+ 1;
338 shmsize
+= lttng_ust_offset_align(shmsize
, __alignof__(struct lttng_ust_lib_ring_buffer_backend_pages_shmp
));
339 shmsize
+= sizeof(struct lttng_ust_lib_ring_buffer_backend_pages_shmp
) * num_subbuf_alloc
;
340 shmsize
+= lttng_ust_offset_align(shmsize
, page_size
);
341 shmsize
+= subbuf_size
* num_subbuf_alloc
;
342 shmsize
+= lttng_ust_offset_align(shmsize
, __alignof__(struct lttng_ust_lib_ring_buffer_backend_pages
));
343 shmsize
+= sizeof(struct lttng_ust_lib_ring_buffer_backend_pages
) * num_subbuf_alloc
;
344 shmsize
+= lttng_ust_offset_align(shmsize
, __alignof__(struct lttng_ust_lib_ring_buffer_backend_subbuffer
));
345 shmsize
+= sizeof(struct lttng_ust_lib_ring_buffer_backend_subbuffer
) * num_subbuf
;
346 shmsize
+= lttng_ust_offset_align(shmsize
, __alignof__(struct lttng_ust_lib_ring_buffer_backend_counts
));
347 shmsize
+= sizeof(struct lttng_ust_lib_ring_buffer_backend_counts
) * num_subbuf
;
349 if (config
->alloc
== RING_BUFFER_ALLOC_PER_CPU
) {
350 struct lttng_ust_lib_ring_buffer
*buf
;
352 * We need to allocate for all possible cpus.
354 for_each_possible_cpu(i
) {
355 struct shm_object
*shmobj
;
357 shmobj
= shm_object_table_alloc(handle
->table
, shmsize
,
358 SHM_OBJECT_SHM
, stream_fds
[i
], i
);
361 align_shm(shmobj
, __alignof__(struct lttng_ust_lib_ring_buffer
));
362 set_shmp(chanb
->buf
[i
].shmp
, zalloc_shm(shmobj
, sizeof(struct lttng_ust_lib_ring_buffer
)));
363 buf
= shmp(handle
, chanb
->buf
[i
].shmp
);
366 set_shmp(buf
->self
, chanb
->buf
[i
].shmp
._ref
);
367 ret
= lib_ring_buffer_create(buf
, chanb
, i
,
370 goto free_bufs
; /* cpu hotplug locked */
373 struct shm_object
*shmobj
;
374 struct lttng_ust_lib_ring_buffer
*buf
;
376 shmobj
= shm_object_table_alloc(handle
->table
, shmsize
,
377 SHM_OBJECT_SHM
, stream_fds
[0], -1);
380 align_shm(shmobj
, __alignof__(struct lttng_ust_lib_ring_buffer
));
381 set_shmp(chanb
->buf
[0].shmp
, zalloc_shm(shmobj
, sizeof(struct lttng_ust_lib_ring_buffer
)));
382 buf
= shmp(handle
, chanb
->buf
[0].shmp
);
385 set_shmp(buf
->self
, chanb
->buf
[0].shmp
._ref
);
386 ret
= lib_ring_buffer_create(buf
, chanb
, -1,
391 chanb
->start_tsc
= config
->cb
.ring_buffer_clock_read(chan
);
396 /* We only free the buffer data upon shm teardown */
402 * channel_backend_free - destroy the channel
405 * Destroy all channel buffers and frees the channel.
407 void channel_backend_free(struct channel_backend
*chanb
,
408 struct lttng_ust_shm_handle
*handle
)
410 /* SHM teardown takes care of everything */
414 * lib_ring_buffer_read - read data from ring_buffer_buffer.
415 * @bufb : buffer backend
416 * @offset : offset within the buffer
417 * @dest : destination address
418 * @len : length to copy to destination
420 * Should be protected by get_subbuf/put_subbuf.
421 * Returns the length copied.
423 size_t lib_ring_buffer_read(struct lttng_ust_lib_ring_buffer_backend
*bufb
, size_t offset
,
424 void *dest
, size_t len
, struct lttng_ust_shm_handle
*handle
)
426 struct channel_backend
*chanb
;
427 const struct lttng_ust_lib_ring_buffer_config
*config
;
429 struct lttng_ust_lib_ring_buffer_backend_pages_shmp
*rpages
;
430 struct lttng_ust_lib_ring_buffer_backend_pages
*backend_pages
;
431 unsigned long sb_bindex
, id
;
434 chanb
= &shmp(handle
, bufb
->chan
)->backend
;
437 config
= &chanb
->config
;
439 offset
&= chanb
->buf_size
- 1;
441 if (caa_unlikely(!len
))
443 id
= bufb
->buf_rsb
.id
;
444 sb_bindex
= subbuffer_id_get_index(config
, id
);
445 rpages
= shmp_index(handle
, bufb
->array
, sb_bindex
);
449 * Underlying layer should never ask for reads across
452 CHAN_WARN_ON(chanb
, offset
>= chanb
->buf_size
);
453 CHAN_WARN_ON(chanb
, config
->mode
== RING_BUFFER_OVERWRITE
454 && subbuffer_id_is_noref(config
, id
));
455 backend_pages
= shmp(handle
, rpages
->shmp
);
458 src
= shmp_index(handle
, backend_pages
->p
, offset
& (chanb
->subbuf_size
- 1));
459 if (caa_unlikely(!src
))
461 memcpy(dest
, src
, len
);
466 * lib_ring_buffer_read_cstr - read a C-style string from ring_buffer.
467 * @bufb : buffer backend
468 * @offset : offset within the buffer
469 * @dest : destination address
470 * @len : destination's length
472 * Return string's length, or -EINVAL on error.
473 * Should be protected by get_subbuf/put_subbuf.
474 * Destination length should be at least 1 to hold '\0'.
476 int lib_ring_buffer_read_cstr(struct lttng_ust_lib_ring_buffer_backend
*bufb
, size_t offset
,
477 void *dest
, size_t len
, struct lttng_ust_shm_handle
*handle
)
479 struct channel_backend
*chanb
;
480 const struct lttng_ust_lib_ring_buffer_config
*config
;
481 ssize_t string_len
, orig_offset
;
483 struct lttng_ust_lib_ring_buffer_backend_pages_shmp
*rpages
;
484 struct lttng_ust_lib_ring_buffer_backend_pages
*backend_pages
;
485 unsigned long sb_bindex
, id
;
487 chanb
= &shmp(handle
, bufb
->chan
)->backend
;
490 config
= &chanb
->config
;
491 if (caa_unlikely(!len
))
493 offset
&= chanb
->buf_size
- 1;
494 orig_offset
= offset
;
495 id
= bufb
->buf_rsb
.id
;
496 sb_bindex
= subbuffer_id_get_index(config
, id
);
497 rpages
= shmp_index(handle
, bufb
->array
, sb_bindex
);
501 * Underlying layer should never ask for reads across
504 CHAN_WARN_ON(chanb
, offset
>= chanb
->buf_size
);
505 CHAN_WARN_ON(chanb
, config
->mode
== RING_BUFFER_OVERWRITE
506 && subbuffer_id_is_noref(config
, id
));
507 backend_pages
= shmp(handle
, rpages
->shmp
);
510 str
= shmp_index(handle
, backend_pages
->p
, offset
& (chanb
->subbuf_size
- 1));
511 if (caa_unlikely(!str
))
513 string_len
= strnlen(str
, len
);
515 memcpy(dest
, str
, string_len
);
516 ((char *)dest
)[0] = 0;
518 return offset
- orig_offset
;
522 * lib_ring_buffer_read_offset_address - get address of a buffer location
523 * @bufb : buffer backend
524 * @offset : offset within the buffer.
526 * Return the address where a given offset is located (for read).
527 * Should be used to get the current subbuffer header pointer. Given we know
528 * it's never on a page boundary, it's safe to read/write directly
529 * from/to this address, as long as the read/write is never bigger than
532 void *lib_ring_buffer_read_offset_address(struct lttng_ust_lib_ring_buffer_backend
*bufb
,
534 struct lttng_ust_shm_handle
*handle
)
536 struct lttng_ust_lib_ring_buffer_backend_pages_shmp
*rpages
;
537 struct lttng_ust_lib_ring_buffer_backend_pages
*backend_pages
;
538 struct channel_backend
*chanb
;
539 const struct lttng_ust_lib_ring_buffer_config
*config
;
540 unsigned long sb_bindex
, id
;
542 chanb
= &shmp(handle
, bufb
->chan
)->backend
;
545 config
= &chanb
->config
;
546 offset
&= chanb
->buf_size
- 1;
547 id
= bufb
->buf_rsb
.id
;
548 sb_bindex
= subbuffer_id_get_index(config
, id
);
549 rpages
= shmp_index(handle
, bufb
->array
, sb_bindex
);
552 CHAN_WARN_ON(chanb
, config
->mode
== RING_BUFFER_OVERWRITE
553 && subbuffer_id_is_noref(config
, id
));
554 backend_pages
= shmp(handle
, rpages
->shmp
);
557 return shmp_index(handle
, backend_pages
->p
, offset
& (chanb
->subbuf_size
- 1));
561 * lib_ring_buffer_offset_address - get address of a location within the buffer
562 * @bufb : buffer backend
563 * @offset : offset within the buffer.
565 * Return the address where a given offset is located.
566 * Should be used to get the current subbuffer header pointer. Given we know
567 * it's always at the beginning of a page, it's safe to write directly to this
568 * address, as long as the write is never bigger than a page size.
570 void *lib_ring_buffer_offset_address(struct lttng_ust_lib_ring_buffer_backend
*bufb
,
572 struct lttng_ust_shm_handle
*handle
)
575 struct lttng_ust_lib_ring_buffer_backend_pages_shmp
*rpages
;
576 struct lttng_ust_lib_ring_buffer_backend_pages
*backend_pages
;
577 struct channel_backend
*chanb
;
578 const struct lttng_ust_lib_ring_buffer_config
*config
;
579 unsigned long sb_bindex
, id
;
580 struct lttng_ust_lib_ring_buffer_backend_subbuffer
*sb
;
582 chanb
= &shmp(handle
, bufb
->chan
)->backend
;
585 config
= &chanb
->config
;
586 offset
&= chanb
->buf_size
- 1;
587 sbidx
= offset
>> chanb
->subbuf_size_order
;
588 sb
= shmp_index(handle
, bufb
->buf_wsb
, sbidx
);
592 sb_bindex
= subbuffer_id_get_index(config
, id
);
593 rpages
= shmp_index(handle
, bufb
->array
, sb_bindex
);
596 CHAN_WARN_ON(chanb
, config
->mode
== RING_BUFFER_OVERWRITE
597 && subbuffer_id_is_noref(config
, id
));
598 backend_pages
= shmp(handle
, rpages
->shmp
);
601 return shmp_index(handle
, backend_pages
->p
, offset
& (chanb
->subbuf_size
- 1));