2 * SPDX-License-Identifier: LGPL-2.1-only
4 * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
11 #include <urcu/arch.h>
14 #include <lttng/align.h>
15 #include <lttng/ringbuffer-context.h>
16 #include "ringbuffer-config.h"
22 #include "ust-compat.h"
25 * lib_ring_buffer_backend_allocate - allocate a channel buffer
26 * @config: ring buffer instance configuration
27 * @buf: the buffer struct
28 * @size: total size of the buffer
29 * @num_subbuf: number of subbuffers
30 * @extra_reader_sb: need extra subbuffer for reader
33 int lib_ring_buffer_backend_allocate(const struct lttng_ust_lib_ring_buffer_config
*config
,
34 struct lttng_ust_lib_ring_buffer_backend
*bufb
,
35 size_t size
, size_t num_subbuf
,
37 struct lttng_ust_shm_handle
*handle
,
38 struct shm_object
*shmobj
)
40 struct channel_backend
*chanb
;
41 unsigned long subbuf_size
, mmap_offset
= 0;
42 unsigned long num_subbuf_alloc
;
46 chanb
= &shmp(handle
, bufb
->chan
)->backend
;
50 subbuf_size
= chanb
->subbuf_size
;
51 num_subbuf_alloc
= num_subbuf
;
56 page_size
= LTTNG_UST_PAGE_SIZE
;
61 align_shm(shmobj
, __alignof__(struct lttng_ust_lib_ring_buffer_backend_pages_shmp
));
62 set_shmp(bufb
->array
, zalloc_shm(shmobj
,
63 sizeof(struct lttng_ust_lib_ring_buffer_backend_pages_shmp
) * num_subbuf_alloc
));
64 if (caa_unlikely(!shmp(handle
, bufb
->array
)))
68 * This is the largest element (the buffer pages) which needs to
69 * be aligned on page size.
71 align_shm(shmobj
, page_size
);
72 set_shmp(bufb
->memory_map
, zalloc_shm(shmobj
,
73 subbuf_size
* num_subbuf_alloc
));
74 if (caa_unlikely(!shmp(handle
, bufb
->memory_map
)))
75 goto memory_map_error
;
77 /* Allocate backend pages array elements */
78 for (i
= 0; i
< num_subbuf_alloc
; i
++) {
79 align_shm(shmobj
, __alignof__(struct lttng_ust_lib_ring_buffer_backend_pages
));
80 set_shmp(shmp_index(handle
, bufb
->array
, i
)->shmp
,
82 sizeof(struct lttng_ust_lib_ring_buffer_backend_pages
)));
83 if (!shmp(handle
, shmp_index(handle
, bufb
->array
, i
)->shmp
))
87 /* Allocate write-side subbuffer table */
88 align_shm(shmobj
, __alignof__(struct lttng_ust_lib_ring_buffer_backend_subbuffer
));
89 set_shmp(bufb
->buf_wsb
, zalloc_shm(shmobj
,
90 sizeof(struct lttng_ust_lib_ring_buffer_backend_subbuffer
)
92 if (caa_unlikely(!shmp(handle
, bufb
->buf_wsb
)))
95 for (i
= 0; i
< num_subbuf
; i
++) {
96 struct lttng_ust_lib_ring_buffer_backend_subbuffer
*sb
;
98 sb
= shmp_index(handle
, bufb
->buf_wsb
, i
);
101 sb
->id
= subbuffer_id(config
, 0, 1, i
);
104 /* Assign read-side subbuffer table */
106 bufb
->buf_rsb
.id
= subbuffer_id(config
, 0, 1,
107 num_subbuf_alloc
- 1);
109 bufb
->buf_rsb
.id
= subbuffer_id(config
, 0, 1, 0);
111 /* Allocate subbuffer packet counter table */
112 align_shm(shmobj
, __alignof__(struct lttng_ust_lib_ring_buffer_backend_counts
));
113 set_shmp(bufb
->buf_cnt
, zalloc_shm(shmobj
,
114 sizeof(struct lttng_ust_lib_ring_buffer_backend_counts
)
116 if (caa_unlikely(!shmp(handle
, bufb
->buf_cnt
)))
119 /* Assign pages to page index */
120 for (i
= 0; i
< num_subbuf_alloc
; i
++) {
121 struct lttng_ust_lib_ring_buffer_backend_pages_shmp
*sbp
;
122 struct lttng_ust_lib_ring_buffer_backend_pages
*pages
;
125 ref
.index
= bufb
->memory_map
._ref
.index
;
126 ref
.offset
= bufb
->memory_map
._ref
.offset
;
127 ref
.offset
+= i
* subbuf_size
;
129 sbp
= shmp_index(handle
, bufb
->array
, i
);
132 pages
= shmp(handle
, sbp
->shmp
);
135 set_shmp(pages
->p
, ref
);
136 if (config
->output
== RING_BUFFER_MMAP
) {
137 pages
->mmap_offset
= mmap_offset
;
138 mmap_offset
+= subbuf_size
;
144 /* bufb->buf_wsb will be freed by shm teardown */
146 /* bufb->array[i] will be freed by shm teardown */
148 /* bufb->array will be freed by shm teardown */
154 int lib_ring_buffer_backend_create(struct lttng_ust_lib_ring_buffer_backend
*bufb
,
155 struct channel_backend
*chanb
, int cpu
,
156 struct lttng_ust_shm_handle
*handle
,
157 struct shm_object
*shmobj
)
159 const struct lttng_ust_lib_ring_buffer_config
*config
= &chanb
->config
;
161 set_shmp(bufb
->chan
, handle
->chan
._ref
);
164 return lib_ring_buffer_backend_allocate(config
, bufb
, chanb
->buf_size
,
166 chanb
->extra_reader_sb
,
170 void lib_ring_buffer_backend_reset(struct lttng_ust_lib_ring_buffer_backend
*bufb
,
171 struct lttng_ust_shm_handle
*handle
)
173 struct channel_backend
*chanb
;
174 const struct lttng_ust_lib_ring_buffer_config
*config
;
175 unsigned long num_subbuf_alloc
;
178 chanb
= &shmp(handle
, bufb
->chan
)->backend
;
181 config
= &chanb
->config
;
183 num_subbuf_alloc
= chanb
->num_subbuf
;
184 if (chanb
->extra_reader_sb
)
187 for (i
= 0; i
< chanb
->num_subbuf
; i
++) {
188 struct lttng_ust_lib_ring_buffer_backend_subbuffer
*sb
;
190 sb
= shmp_index(handle
, bufb
->buf_wsb
, i
);
193 sb
->id
= subbuffer_id(config
, 0, 1, i
);
195 if (chanb
->extra_reader_sb
)
196 bufb
->buf_rsb
.id
= subbuffer_id(config
, 0, 1,
197 num_subbuf_alloc
- 1);
199 bufb
->buf_rsb
.id
= subbuffer_id(config
, 0, 1, 0);
201 for (i
= 0; i
< num_subbuf_alloc
; i
++) {
202 struct lttng_ust_lib_ring_buffer_backend_pages_shmp
*sbp
;
203 struct lttng_ust_lib_ring_buffer_backend_pages
*pages
;
205 sbp
= shmp_index(handle
, bufb
->array
, i
);
208 pages
= shmp(handle
, sbp
->shmp
);
211 /* Don't reset mmap_offset */
212 v_set(config
, &pages
->records_commit
, 0);
213 v_set(config
, &pages
->records_unread
, 0);
214 pages
->data_size
= 0;
215 /* Don't reset backend page and virt addresses */
217 /* Don't reset num_pages_per_subbuf, cpu, allocated */
218 v_set(config
, &bufb
->records_read
, 0);
222 * The frontend is responsible for also calling ring_buffer_backend_reset for
223 * each buffer when calling channel_backend_reset.
225 void channel_backend_reset(struct channel_backend
*chanb
)
227 struct lttng_ust_lib_ring_buffer_channel
*chan
= caa_container_of(chanb
,
228 struct lttng_ust_lib_ring_buffer_channel
, backend
);
229 const struct lttng_ust_lib_ring_buffer_config
*config
= &chanb
->config
;
232 * Don't reset buf_size, subbuf_size, subbuf_size_order,
233 * num_subbuf_order, buf_size_order, extra_reader_sb, num_subbuf,
234 * priv, notifiers, config, cpumask and name.
236 chanb
->start_tsc
= config
->cb
.ring_buffer_clock_read(chan
);
240 * channel_backend_init - initialize a channel backend
241 * @chanb: channel backend
242 * @name: channel name
243 * @config: client ring buffer configuration
244 * @parent: dentry of parent directory, %NULL for root directory
245 * @subbuf_size: size of sub-buffers (> page size, power of 2)
246 * @num_subbuf: number of sub-buffers (power of 2)
247 * @lttng_ust_shm_handle: shared memory handle
248 * @stream_fds: stream file descriptors.
250 * Returns channel pointer if successful, %NULL otherwise.
252 * Creates per-cpu channel buffers using the sizes and attributes
253 * specified. The created channel buffer files will be named
254 * name_0...name_N-1. File permissions will be %S_IRUSR.
256 * Called with CPU hotplug disabled.
258 int channel_backend_init(struct channel_backend
*chanb
,
260 const struct lttng_ust_lib_ring_buffer_config
*config
,
261 size_t subbuf_size
, size_t num_subbuf
,
262 struct lttng_ust_shm_handle
*handle
,
263 const int *stream_fds
)
265 struct lttng_ust_lib_ring_buffer_channel
*chan
= caa_container_of(chanb
,
266 struct lttng_ust_lib_ring_buffer_channel
, backend
);
269 size_t shmsize
= 0, num_subbuf_alloc
;
275 page_size
= LTTNG_UST_PAGE_SIZE
;
276 if (page_size
<= 0) {
279 /* Check that the subbuffer size is larger than a page. */
280 if (subbuf_size
< page_size
)
284 * Make sure the number of subbuffers and subbuffer size are
285 * power of 2, and nonzero.
287 if (!subbuf_size
|| (subbuf_size
& (subbuf_size
- 1)))
289 if (!num_subbuf
|| (num_subbuf
& (num_subbuf
- 1)))
292 * Overwrite mode buffers require at least 2 subbuffers per
295 if (config
->mode
== RING_BUFFER_OVERWRITE
&& num_subbuf
< 2)
298 ret
= subbuffer_id_check_index(config
, num_subbuf
);
302 chanb
->buf_size
= num_subbuf
* subbuf_size
;
303 chanb
->subbuf_size
= subbuf_size
;
304 chanb
->buf_size_order
= get_count_order(chanb
->buf_size
);
305 chanb
->subbuf_size_order
= get_count_order(subbuf_size
);
306 chanb
->num_subbuf_order
= get_count_order(num_subbuf
);
307 chanb
->extra_reader_sb
=
308 (config
->mode
== RING_BUFFER_OVERWRITE
) ? 1 : 0;
309 chanb
->num_subbuf
= num_subbuf
;
310 strncpy(chanb
->name
, name
, NAME_MAX
);
311 chanb
->name
[NAME_MAX
- 1] = '\0';
312 memcpy(&chanb
->config
, config
, sizeof(*config
));
314 /* Per-cpu buffer size: control (prior to backend) */
315 shmsize
= lttng_ust_offset_align(shmsize
, __alignof__(struct lttng_ust_lib_ring_buffer
));
316 shmsize
+= sizeof(struct lttng_ust_lib_ring_buffer
);
317 shmsize
+= lttng_ust_offset_align(shmsize
, __alignof__(struct commit_counters_hot
));
318 shmsize
+= sizeof(struct commit_counters_hot
) * num_subbuf
;
319 shmsize
+= lttng_ust_offset_align(shmsize
, __alignof__(struct commit_counters_cold
));
320 shmsize
+= sizeof(struct commit_counters_cold
) * num_subbuf
;
321 /* Sampled timestamp end */
322 shmsize
+= lttng_ust_offset_align(shmsize
, __alignof__(uint64_t));
323 shmsize
+= sizeof(uint64_t) * num_subbuf
;
325 /* Per-cpu buffer size: backend */
326 /* num_subbuf + 1 is the worse case */
327 num_subbuf_alloc
= num_subbuf
+ 1;
328 shmsize
+= lttng_ust_offset_align(shmsize
, __alignof__(struct lttng_ust_lib_ring_buffer_backend_pages_shmp
));
329 shmsize
+= sizeof(struct lttng_ust_lib_ring_buffer_backend_pages_shmp
) * num_subbuf_alloc
;
330 shmsize
+= lttng_ust_offset_align(shmsize
, page_size
);
331 shmsize
+= subbuf_size
* num_subbuf_alloc
;
332 shmsize
+= lttng_ust_offset_align(shmsize
, __alignof__(struct lttng_ust_lib_ring_buffer_backend_pages
));
333 shmsize
+= sizeof(struct lttng_ust_lib_ring_buffer_backend_pages
) * num_subbuf_alloc
;
334 shmsize
+= lttng_ust_offset_align(shmsize
, __alignof__(struct lttng_ust_lib_ring_buffer_backend_subbuffer
));
335 shmsize
+= sizeof(struct lttng_ust_lib_ring_buffer_backend_subbuffer
) * num_subbuf
;
336 shmsize
+= lttng_ust_offset_align(shmsize
, __alignof__(struct lttng_ust_lib_ring_buffer_backend_counts
));
337 shmsize
+= sizeof(struct lttng_ust_lib_ring_buffer_backend_counts
) * num_subbuf
;
339 if (config
->alloc
== RING_BUFFER_ALLOC_PER_CPU
) {
340 struct lttng_ust_lib_ring_buffer
*buf
;
342 * We need to allocate for all possible cpus.
344 for_each_possible_cpu(i
) {
345 struct shm_object
*shmobj
;
347 shmobj
= shm_object_table_alloc(handle
->table
, shmsize
,
348 SHM_OBJECT_SHM
, stream_fds
[i
], i
);
351 align_shm(shmobj
, __alignof__(struct lttng_ust_lib_ring_buffer
));
352 set_shmp(chanb
->buf
[i
].shmp
, zalloc_shm(shmobj
, sizeof(struct lttng_ust_lib_ring_buffer
)));
353 buf
= shmp(handle
, chanb
->buf
[i
].shmp
);
356 set_shmp(buf
->self
, chanb
->buf
[i
].shmp
._ref
);
357 ret
= lib_ring_buffer_create(buf
, chanb
, i
,
360 goto free_bufs
; /* cpu hotplug locked */
363 struct shm_object
*shmobj
;
364 struct lttng_ust_lib_ring_buffer
*buf
;
366 shmobj
= shm_object_table_alloc(handle
->table
, shmsize
,
367 SHM_OBJECT_SHM
, stream_fds
[0], -1);
370 align_shm(shmobj
, __alignof__(struct lttng_ust_lib_ring_buffer
));
371 set_shmp(chanb
->buf
[0].shmp
, zalloc_shm(shmobj
, sizeof(struct lttng_ust_lib_ring_buffer
)));
372 buf
= shmp(handle
, chanb
->buf
[0].shmp
);
375 set_shmp(buf
->self
, chanb
->buf
[0].shmp
._ref
);
376 ret
= lib_ring_buffer_create(buf
, chanb
, -1,
381 chanb
->start_tsc
= config
->cb
.ring_buffer_clock_read(chan
);
386 /* We only free the buffer data upon shm teardown */
392 * channel_backend_free - destroy the channel
395 * Destroy all channel buffers and frees the channel.
397 void channel_backend_free(struct channel_backend
*chanb
,
398 struct lttng_ust_shm_handle
*handle
)
400 /* SHM teardown takes care of everything */
404 * lib_ring_buffer_read - read data from ring_buffer_buffer.
405 * @bufb : buffer backend
406 * @offset : offset within the buffer
407 * @dest : destination address
408 * @len : length to copy to destination
410 * Should be protected by get_subbuf/put_subbuf.
411 * Returns the length copied.
413 size_t lib_ring_buffer_read(struct lttng_ust_lib_ring_buffer_backend
*bufb
, size_t offset
,
414 void *dest
, size_t len
, struct lttng_ust_shm_handle
*handle
)
416 struct channel_backend
*chanb
;
417 const struct lttng_ust_lib_ring_buffer_config
*config
;
419 struct lttng_ust_lib_ring_buffer_backend_pages_shmp
*rpages
;
420 struct lttng_ust_lib_ring_buffer_backend_pages
*backend_pages
;
421 unsigned long sb_bindex
, id
;
424 chanb
= &shmp(handle
, bufb
->chan
)->backend
;
427 config
= &chanb
->config
;
429 offset
&= chanb
->buf_size
- 1;
431 if (caa_unlikely(!len
))
433 id
= bufb
->buf_rsb
.id
;
434 sb_bindex
= subbuffer_id_get_index(config
, id
);
435 rpages
= shmp_index(handle
, bufb
->array
, sb_bindex
);
439 * Underlying layer should never ask for reads across
442 CHAN_WARN_ON(chanb
, offset
>= chanb
->buf_size
);
443 CHAN_WARN_ON(chanb
, config
->mode
== RING_BUFFER_OVERWRITE
444 && subbuffer_id_is_noref(config
, id
));
445 backend_pages
= shmp(handle
, rpages
->shmp
);
448 src
= shmp_index(handle
, backend_pages
->p
, offset
& (chanb
->subbuf_size
- 1));
449 if (caa_unlikely(!src
))
451 memcpy(dest
, src
, len
);
456 * lib_ring_buffer_read_cstr - read a C-style string from ring_buffer.
457 * @bufb : buffer backend
458 * @offset : offset within the buffer
459 * @dest : destination address
460 * @len : destination's length
462 * Return string's length, or -EINVAL on error.
463 * Should be protected by get_subbuf/put_subbuf.
464 * Destination length should be at least 1 to hold '\0'.
466 int lib_ring_buffer_read_cstr(struct lttng_ust_lib_ring_buffer_backend
*bufb
, size_t offset
,
467 void *dest
, size_t len
, struct lttng_ust_shm_handle
*handle
)
469 struct channel_backend
*chanb
;
470 const struct lttng_ust_lib_ring_buffer_config
*config
;
471 ssize_t string_len
, orig_offset
;
473 struct lttng_ust_lib_ring_buffer_backend_pages_shmp
*rpages
;
474 struct lttng_ust_lib_ring_buffer_backend_pages
*backend_pages
;
475 unsigned long sb_bindex
, id
;
477 chanb
= &shmp(handle
, bufb
->chan
)->backend
;
480 config
= &chanb
->config
;
481 if (caa_unlikely(!len
))
483 offset
&= chanb
->buf_size
- 1;
484 orig_offset
= offset
;
485 id
= bufb
->buf_rsb
.id
;
486 sb_bindex
= subbuffer_id_get_index(config
, id
);
487 rpages
= shmp_index(handle
, bufb
->array
, sb_bindex
);
491 * Underlying layer should never ask for reads across
494 CHAN_WARN_ON(chanb
, offset
>= chanb
->buf_size
);
495 CHAN_WARN_ON(chanb
, config
->mode
== RING_BUFFER_OVERWRITE
496 && subbuffer_id_is_noref(config
, id
));
497 backend_pages
= shmp(handle
, rpages
->shmp
);
500 str
= shmp_index(handle
, backend_pages
->p
, offset
& (chanb
->subbuf_size
- 1));
501 if (caa_unlikely(!str
))
503 string_len
= strnlen(str
, len
);
505 memcpy(dest
, str
, string_len
);
506 ((char *)dest
)[0] = 0;
508 return offset
- orig_offset
;
512 * lib_ring_buffer_read_offset_address - get address of a buffer location
513 * @bufb : buffer backend
514 * @offset : offset within the buffer.
516 * Return the address where a given offset is located (for read).
517 * Should be used to get the current subbuffer header pointer. Given we know
518 * it's never on a page boundary, it's safe to read/write directly
519 * from/to this address, as long as the read/write is never bigger than
522 void *lib_ring_buffer_read_offset_address(struct lttng_ust_lib_ring_buffer_backend
*bufb
,
524 struct lttng_ust_shm_handle
*handle
)
526 struct lttng_ust_lib_ring_buffer_backend_pages_shmp
*rpages
;
527 struct lttng_ust_lib_ring_buffer_backend_pages
*backend_pages
;
528 struct channel_backend
*chanb
;
529 const struct lttng_ust_lib_ring_buffer_config
*config
;
530 unsigned long sb_bindex
, id
;
532 chanb
= &shmp(handle
, bufb
->chan
)->backend
;
535 config
= &chanb
->config
;
536 offset
&= chanb
->buf_size
- 1;
537 id
= bufb
->buf_rsb
.id
;
538 sb_bindex
= subbuffer_id_get_index(config
, id
);
539 rpages
= shmp_index(handle
, bufb
->array
, sb_bindex
);
542 CHAN_WARN_ON(chanb
, config
->mode
== RING_BUFFER_OVERWRITE
543 && subbuffer_id_is_noref(config
, id
));
544 backend_pages
= shmp(handle
, rpages
->shmp
);
547 return shmp_index(handle
, backend_pages
->p
, offset
& (chanb
->subbuf_size
- 1));
551 * lib_ring_buffer_offset_address - get address of a location within the buffer
552 * @bufb : buffer backend
553 * @offset : offset within the buffer.
555 * Return the address where a given offset is located.
556 * Should be used to get the current subbuffer header pointer. Given we know
557 * it's always at the beginning of a page, it's safe to write directly to this
558 * address, as long as the write is never bigger than a page size.
560 void *lib_ring_buffer_offset_address(struct lttng_ust_lib_ring_buffer_backend
*bufb
,
562 struct lttng_ust_shm_handle
*handle
)
565 struct lttng_ust_lib_ring_buffer_backend_pages_shmp
*rpages
;
566 struct lttng_ust_lib_ring_buffer_backend_pages
*backend_pages
;
567 struct channel_backend
*chanb
;
568 const struct lttng_ust_lib_ring_buffer_config
*config
;
569 unsigned long sb_bindex
, id
;
570 struct lttng_ust_lib_ring_buffer_backend_subbuffer
*sb
;
572 chanb
= &shmp(handle
, bufb
->chan
)->backend
;
575 config
= &chanb
->config
;
576 offset
&= chanb
->buf_size
- 1;
577 sbidx
= offset
>> chanb
->subbuf_size_order
;
578 sb
= shmp_index(handle
, bufb
->buf_wsb
, sbidx
);
582 sb_bindex
= subbuffer_id_get_index(config
, id
);
583 rpages
= shmp_index(handle
, bufb
->array
, sb_bindex
);
586 CHAN_WARN_ON(chanb
, config
->mode
== RING_BUFFER_OVERWRITE
587 && subbuffer_id_is_noref(config
, id
));
588 backend_pages
= shmp(handle
, rpages
->shmp
);
591 return shmp_index(handle
, backend_pages
->p
, offset
& (chanb
->subbuf_size
- 1));