2 * ring_buffer_backend.c
4 * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; only
9 * version 2.1 of the License.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 #include <urcu/arch.h>
26 #include <lttng/ringbuffer-config.h>
34 * lib_ring_buffer_backend_allocate - allocate a channel buffer
35 * @config: ring buffer instance configuration
36 * @buf: the buffer struct
37 * @size: total size of the buffer
38 * @num_subbuf: number of subbuffers
39 * @extra_reader_sb: need extra subbuffer for reader
42 int lib_ring_buffer_backend_allocate(const struct lttng_ust_lib_ring_buffer_config
*config
,
43 struct lttng_ust_lib_ring_buffer_backend
*bufb
,
44 size_t size
, size_t num_subbuf
,
46 struct lttng_ust_shm_handle
*handle
,
47 struct shm_object
*shmobj
)
49 struct channel_backend
*chanb
;
50 unsigned long subbuf_size
, mmap_offset
= 0;
51 unsigned long num_subbuf_alloc
;
55 chanb
= &shmp(handle
, bufb
->chan
)->backend
;
59 subbuf_size
= chanb
->subbuf_size
;
60 num_subbuf_alloc
= num_subbuf
;
65 page_size
= sysconf(_SC_PAGE_SIZE
);
70 align_shm(shmobj
, __alignof__(struct lttng_ust_lib_ring_buffer_backend_pages_shmp
));
71 set_shmp(bufb
->array
, zalloc_shm(shmobj
,
72 sizeof(struct lttng_ust_lib_ring_buffer_backend_pages_shmp
) * num_subbuf_alloc
));
73 if (caa_unlikely(!shmp(handle
, bufb
->array
)))
77 * This is the largest element (the buffer pages) which needs to
78 * be aligned on page size.
80 align_shm(shmobj
, page_size
);
81 set_shmp(bufb
->memory_map
, zalloc_shm(shmobj
,
82 subbuf_size
* num_subbuf_alloc
));
83 if (caa_unlikely(!shmp(handle
, bufb
->memory_map
)))
84 goto memory_map_error
;
86 /* Allocate backend pages array elements */
87 for (i
= 0; i
< num_subbuf_alloc
; i
++) {
88 align_shm(shmobj
, __alignof__(struct lttng_ust_lib_ring_buffer_backend_pages
));
89 set_shmp(shmp_index(handle
, bufb
->array
, i
)->shmp
,
91 sizeof(struct lttng_ust_lib_ring_buffer_backend_pages
)));
92 if (!shmp(handle
, shmp_index(handle
, bufb
->array
, i
)->shmp
))
96 /* Allocate write-side subbuffer table */
97 align_shm(shmobj
, __alignof__(struct lttng_ust_lib_ring_buffer_backend_subbuffer
));
98 set_shmp(bufb
->buf_wsb
, zalloc_shm(shmobj
,
99 sizeof(struct lttng_ust_lib_ring_buffer_backend_subbuffer
)
101 if (caa_unlikely(!shmp(handle
, bufb
->buf_wsb
)))
104 for (i
= 0; i
< num_subbuf
; i
++) {
105 struct lttng_ust_lib_ring_buffer_backend_subbuffer
*sb
;
107 sb
= shmp_index(handle
, bufb
->buf_wsb
, i
);
110 sb
->id
= subbuffer_id(config
, 0, 1, i
);
113 /* Assign read-side subbuffer table */
115 bufb
->buf_rsb
.id
= subbuffer_id(config
, 0, 1,
116 num_subbuf_alloc
- 1);
118 bufb
->buf_rsb
.id
= subbuffer_id(config
, 0, 1, 0);
120 /* Assign pages to page index */
121 for (i
= 0; i
< num_subbuf_alloc
; i
++) {
122 struct lttng_ust_lib_ring_buffer_backend_pages_shmp
*sbp
;
123 struct lttng_ust_lib_ring_buffer_backend_pages
*pages
;
126 ref
.index
= bufb
->memory_map
._ref
.index
;
127 ref
.offset
= bufb
->memory_map
._ref
.offset
;
128 ref
.offset
+= i
* subbuf_size
;
130 sbp
= shmp_index(handle
, bufb
->array
, i
);
133 pages
= shmp(handle
, sbp
->shmp
);
136 set_shmp(pages
->p
, ref
);
137 if (config
->output
== RING_BUFFER_MMAP
) {
138 pages
->mmap_offset
= mmap_offset
;
139 mmap_offset
+= subbuf_size
;
145 /* bufb->array[i] will be freed by shm teardown */
147 /* bufb->array will be freed by shm teardown */
153 int lib_ring_buffer_backend_create(struct lttng_ust_lib_ring_buffer_backend
*bufb
,
154 struct channel_backend
*chanb
, int cpu
,
155 struct lttng_ust_shm_handle
*handle
,
156 struct shm_object
*shmobj
)
158 const struct lttng_ust_lib_ring_buffer_config
*config
= &chanb
->config
;
160 set_shmp(bufb
->chan
, handle
->chan
._ref
);
163 return lib_ring_buffer_backend_allocate(config
, bufb
, chanb
->buf_size
,
165 chanb
->extra_reader_sb
,
169 void lib_ring_buffer_backend_reset(struct lttng_ust_lib_ring_buffer_backend
*bufb
,
170 struct lttng_ust_shm_handle
*handle
)
172 struct channel_backend
*chanb
;
173 const struct lttng_ust_lib_ring_buffer_config
*config
;
174 unsigned long num_subbuf_alloc
;
177 chanb
= &shmp(handle
, bufb
->chan
)->backend
;
180 config
= &chanb
->config
;
182 num_subbuf_alloc
= chanb
->num_subbuf
;
183 if (chanb
->extra_reader_sb
)
186 for (i
= 0; i
< chanb
->num_subbuf
; i
++) {
187 struct lttng_ust_lib_ring_buffer_backend_subbuffer
*sb
;
189 sb
= shmp_index(handle
, bufb
->buf_wsb
, i
);
192 sb
->id
= subbuffer_id(config
, 0, 1, i
);
194 if (chanb
->extra_reader_sb
)
195 bufb
->buf_rsb
.id
= subbuffer_id(config
, 0, 1,
196 num_subbuf_alloc
- 1);
198 bufb
->buf_rsb
.id
= subbuffer_id(config
, 0, 1, 0);
200 for (i
= 0; i
< num_subbuf_alloc
; i
++) {
201 struct lttng_ust_lib_ring_buffer_backend_pages_shmp
*sbp
;
202 struct lttng_ust_lib_ring_buffer_backend_pages
*pages
;
204 sbp
= shmp_index(handle
, bufb
->array
, i
);
207 pages
= shmp(handle
, sbp
->shmp
);
210 /* Don't reset mmap_offset */
211 v_set(config
, &pages
->records_commit
, 0);
212 v_set(config
, &pages
->records_unread
, 0);
213 pages
->data_size
= 0;
214 /* Don't reset backend page and virt addresses */
216 /* Don't reset num_pages_per_subbuf, cpu, allocated */
217 v_set(config
, &bufb
->records_read
, 0);
221 * The frontend is responsible for also calling ring_buffer_backend_reset for
222 * each buffer when calling channel_backend_reset.
224 void channel_backend_reset(struct channel_backend
*chanb
)
226 struct channel
*chan
= caa_container_of(chanb
, struct channel
, backend
);
227 const struct lttng_ust_lib_ring_buffer_config
*config
= &chanb
->config
;
230 * Don't reset buf_size, subbuf_size, subbuf_size_order,
231 * num_subbuf_order, buf_size_order, extra_reader_sb, num_subbuf,
232 * priv, notifiers, config, cpumask and name.
234 chanb
->start_tsc
= config
->cb
.ring_buffer_clock_read(chan
);
238 * channel_backend_init - initialize a channel backend
239 * @chanb: channel backend
240 * @name: channel name
241 * @config: client ring buffer configuration
242 * @parent: dentry of parent directory, %NULL for root directory
243 * @subbuf_size: size of sub-buffers (> page size, power of 2)
244 * @num_subbuf: number of sub-buffers (power of 2)
245 * @lttng_ust_shm_handle: shared memory handle
246 * @stream_fds: stream file descriptors.
248 * Returns channel pointer if successful, %NULL otherwise.
250 * Creates per-cpu channel buffers using the sizes and attributes
251 * specified. The created channel buffer files will be named
252 * name_0...name_N-1. File permissions will be %S_IRUSR.
254 * Called with CPU hotplug disabled.
256 int channel_backend_init(struct channel_backend
*chanb
,
258 const struct lttng_ust_lib_ring_buffer_config
*config
,
259 size_t subbuf_size
, size_t num_subbuf
,
260 struct lttng_ust_shm_handle
*handle
,
261 const int *stream_fds
)
263 struct channel
*chan
= caa_container_of(chanb
, struct channel
, backend
);
266 size_t shmsize
= 0, num_subbuf_alloc
;
272 page_size
= sysconf(_SC_PAGE_SIZE
);
273 if (page_size
<= 0) {
276 /* Check that the subbuffer size is larger than a page. */
277 if (subbuf_size
< page_size
)
281 * Make sure the number of subbuffers and subbuffer size are
282 * power of 2, and nonzero.
284 if (!subbuf_size
|| (subbuf_size
& (subbuf_size
- 1)))
286 if (!num_subbuf
|| (num_subbuf
& (num_subbuf
- 1)))
289 * Overwrite mode buffers require at least 2 subbuffers per
292 if (config
->mode
== RING_BUFFER_OVERWRITE
&& num_subbuf
< 2)
295 ret
= subbuffer_id_check_index(config
, num_subbuf
);
299 chanb
->buf_size
= num_subbuf
* subbuf_size
;
300 chanb
->subbuf_size
= subbuf_size
;
301 chanb
->buf_size_order
= get_count_order(chanb
->buf_size
);
302 chanb
->subbuf_size_order
= get_count_order(subbuf_size
);
303 chanb
->num_subbuf_order
= get_count_order(num_subbuf
);
304 chanb
->extra_reader_sb
=
305 (config
->mode
== RING_BUFFER_OVERWRITE
) ? 1 : 0;
306 chanb
->num_subbuf
= num_subbuf
;
307 strncpy(chanb
->name
, name
, NAME_MAX
);
308 chanb
->name
[NAME_MAX
- 1] = '\0';
309 memcpy(&chanb
->config
, config
, sizeof(*config
));
311 /* Per-cpu buffer size: control (prior to backend) */
312 shmsize
= offset_align(shmsize
, __alignof__(struct lttng_ust_lib_ring_buffer
));
313 shmsize
+= sizeof(struct lttng_ust_lib_ring_buffer
);
315 /* Per-cpu buffer size: backend */
316 /* num_subbuf + 1 is the worse case */
317 num_subbuf_alloc
= num_subbuf
+ 1;
318 shmsize
+= offset_align(shmsize
, __alignof__(struct lttng_ust_lib_ring_buffer_backend_pages_shmp
));
319 shmsize
+= sizeof(struct lttng_ust_lib_ring_buffer_backend_pages_shmp
) * num_subbuf_alloc
;
320 shmsize
+= offset_align(shmsize
, page_size
);
321 shmsize
+= subbuf_size
* num_subbuf_alloc
;
322 shmsize
+= offset_align(shmsize
, __alignof__(struct lttng_ust_lib_ring_buffer_backend_pages
));
323 shmsize
+= sizeof(struct lttng_ust_lib_ring_buffer_backend_pages
) * num_subbuf_alloc
;
324 shmsize
+= offset_align(shmsize
, __alignof__(struct lttng_ust_lib_ring_buffer_backend_subbuffer
));
325 shmsize
+= sizeof(struct lttng_ust_lib_ring_buffer_backend_subbuffer
) * num_subbuf
;
326 /* Per-cpu buffer size: control (after backend) */
327 shmsize
+= offset_align(shmsize
, __alignof__(struct commit_counters_hot
));
328 shmsize
+= sizeof(struct commit_counters_hot
) * num_subbuf
;
329 shmsize
+= offset_align(shmsize
, __alignof__(struct commit_counters_cold
));
330 shmsize
+= sizeof(struct commit_counters_cold
) * num_subbuf
;
332 if (config
->alloc
== RING_BUFFER_ALLOC_PER_CPU
) {
333 struct lttng_ust_lib_ring_buffer
*buf
;
335 * We need to allocate for all possible cpus.
337 for_each_possible_cpu(i
) {
338 struct shm_object
*shmobj
;
340 shmobj
= shm_object_table_alloc(handle
->table
, shmsize
,
341 SHM_OBJECT_SHM
, stream_fds
[i
]);
344 align_shm(shmobj
, __alignof__(struct lttng_ust_lib_ring_buffer
));
345 set_shmp(chanb
->buf
[i
].shmp
, zalloc_shm(shmobj
, sizeof(struct lttng_ust_lib_ring_buffer
)));
346 buf
= shmp(handle
, chanb
->buf
[i
].shmp
);
349 set_shmp(buf
->self
, chanb
->buf
[i
].shmp
._ref
);
350 ret
= lib_ring_buffer_create(buf
, chanb
, i
,
353 goto free_bufs
; /* cpu hotplug locked */
356 struct shm_object
*shmobj
;
357 struct lttng_ust_lib_ring_buffer
*buf
;
359 shmobj
= shm_object_table_alloc(handle
->table
, shmsize
,
360 SHM_OBJECT_SHM
, stream_fds
[0]);
363 align_shm(shmobj
, __alignof__(struct lttng_ust_lib_ring_buffer
));
364 set_shmp(chanb
->buf
[0].shmp
, zalloc_shm(shmobj
, sizeof(struct lttng_ust_lib_ring_buffer
)));
365 buf
= shmp(handle
, chanb
->buf
[0].shmp
);
368 set_shmp(buf
->self
, chanb
->buf
[0].shmp
._ref
);
369 ret
= lib_ring_buffer_create(buf
, chanb
, -1,
374 chanb
->start_tsc
= config
->cb
.ring_buffer_clock_read(chan
);
379 /* We only free the buffer data upon shm teardown */
385 * channel_backend_free - destroy the channel
388 * Destroy all channel buffers and frees the channel.
390 void channel_backend_free(struct channel_backend
*chanb
,
391 struct lttng_ust_shm_handle
*handle
)
393 /* SHM teardown takes care of everything */
397 * lib_ring_buffer_read - read data from ring_buffer_buffer.
398 * @bufb : buffer backend
399 * @offset : offset within the buffer
400 * @dest : destination address
401 * @len : length to copy to destination
403 * Should be protected by get_subbuf/put_subbuf.
404 * Returns the length copied.
406 size_t lib_ring_buffer_read(struct lttng_ust_lib_ring_buffer_backend
*bufb
, size_t offset
,
407 void *dest
, size_t len
, struct lttng_ust_shm_handle
*handle
)
409 struct channel_backend
*chanb
;
410 const struct lttng_ust_lib_ring_buffer_config
*config
;
412 struct lttng_ust_lib_ring_buffer_backend_pages_shmp
*rpages
;
413 unsigned long sb_bindex
, id
;
416 chanb
= &shmp(handle
, bufb
->chan
)->backend
;
419 config
= &chanb
->config
;
421 offset
&= chanb
->buf_size
- 1;
423 if (caa_unlikely(!len
))
425 id
= bufb
->buf_rsb
.id
;
426 sb_bindex
= subbuffer_id_get_index(config
, id
);
427 rpages
= shmp_index(handle
, bufb
->array
, sb_bindex
);
429 * Underlying layer should never ask for reads across
432 CHAN_WARN_ON(chanb
, offset
>= chanb
->buf_size
);
433 CHAN_WARN_ON(chanb
, config
->mode
== RING_BUFFER_OVERWRITE
434 && subbuffer_id_is_noref(config
, id
));
435 src
= shmp_index(handle
, shmp(handle
, rpages
->shmp
)->p
,
436 offset
& (chanb
->subbuf_size
- 1));
437 if (caa_unlikely(!src
))
439 memcpy(dest
, src
, len
);
444 * lib_ring_buffer_read_cstr - read a C-style string from ring_buffer.
445 * @bufb : buffer backend
446 * @offset : offset within the buffer
447 * @dest : destination address
448 * @len : destination's length
450 * Return string's length, or -EINVAL on error.
451 * Should be protected by get_subbuf/put_subbuf.
452 * Destination length should be at least 1 to hold '\0'.
454 int lib_ring_buffer_read_cstr(struct lttng_ust_lib_ring_buffer_backend
*bufb
, size_t offset
,
455 void *dest
, size_t len
, struct lttng_ust_shm_handle
*handle
)
457 struct channel_backend
*chanb
;
458 const struct lttng_ust_lib_ring_buffer_config
*config
;
459 ssize_t string_len
, orig_offset
;
461 struct lttng_ust_lib_ring_buffer_backend_pages_shmp
*rpages
;
462 unsigned long sb_bindex
, id
;
464 chanb
= &shmp(handle
, bufb
->chan
)->backend
;
467 config
= &chanb
->config
;
468 if (caa_unlikely(!len
))
470 offset
&= chanb
->buf_size
- 1;
471 orig_offset
= offset
;
472 id
= bufb
->buf_rsb
.id
;
473 sb_bindex
= subbuffer_id_get_index(config
, id
);
474 rpages
= shmp_index(handle
, bufb
->array
, sb_bindex
);
476 * Underlying layer should never ask for reads across
479 CHAN_WARN_ON(chanb
, offset
>= chanb
->buf_size
);
480 CHAN_WARN_ON(chanb
, config
->mode
== RING_BUFFER_OVERWRITE
481 && subbuffer_id_is_noref(config
, id
));
482 str
= shmp_index(handle
, shmp(handle
, rpages
->shmp
)->p
, offset
& (chanb
->subbuf_size
- 1));
483 if (caa_unlikely(!str
))
485 string_len
= strnlen(str
, len
);
487 memcpy(dest
, str
, string_len
);
488 ((char *)dest
)[0] = 0;
490 return offset
- orig_offset
;
494 * lib_ring_buffer_read_offset_address - get address of a buffer location
495 * @bufb : buffer backend
496 * @offset : offset within the buffer.
498 * Return the address where a given offset is located (for read).
499 * Should be used to get the current subbuffer header pointer. Given we know
500 * it's never on a page boundary, it's safe to read/write directly
501 * from/to this address, as long as the read/write is never bigger than
504 void *lib_ring_buffer_read_offset_address(struct lttng_ust_lib_ring_buffer_backend
*bufb
,
506 struct lttng_ust_shm_handle
*handle
)
508 struct lttng_ust_lib_ring_buffer_backend_pages_shmp
*rpages
;
509 struct channel_backend
*chanb
;
510 const struct lttng_ust_lib_ring_buffer_config
*config
;
511 unsigned long sb_bindex
, id
;
513 chanb
= &shmp(handle
, bufb
->chan
)->backend
;
516 config
= &chanb
->config
;
517 offset
&= chanb
->buf_size
- 1;
518 id
= bufb
->buf_rsb
.id
;
519 sb_bindex
= subbuffer_id_get_index(config
, id
);
520 rpages
= shmp_index(handle
, bufb
->array
, sb_bindex
);
521 CHAN_WARN_ON(chanb
, config
->mode
== RING_BUFFER_OVERWRITE
522 && subbuffer_id_is_noref(config
, id
));
523 return shmp_index(handle
, shmp(handle
, rpages
->shmp
)->p
, offset
& (chanb
->subbuf_size
- 1));
527 * lib_ring_buffer_offset_address - get address of a location within the buffer
528 * @bufb : buffer backend
529 * @offset : offset within the buffer.
531 * Return the address where a given offset is located.
532 * Should be used to get the current subbuffer header pointer. Given we know
533 * it's always at the beginning of a page, it's safe to write directly to this
534 * address, as long as the write is never bigger than a page size.
536 void *lib_ring_buffer_offset_address(struct lttng_ust_lib_ring_buffer_backend
*bufb
,
538 struct lttng_ust_shm_handle
*handle
)
541 struct lttng_ust_lib_ring_buffer_backend_pages_shmp
*rpages
;
542 struct channel_backend
*chanb
;
543 const struct lttng_ust_lib_ring_buffer_config
*config
;
544 unsigned long sb_bindex
, id
;
545 struct lttng_ust_lib_ring_buffer_backend_subbuffer
*sb
;
547 chanb
= &shmp(handle
, bufb
->chan
)->backend
;
550 config
= &chanb
->config
;
551 offset
&= chanb
->buf_size
- 1;
552 sbidx
= offset
>> chanb
->subbuf_size_order
;
553 sb
= shmp_index(handle
, bufb
->buf_wsb
, sbidx
);
557 sb_bindex
= subbuffer_id_get_index(config
, id
);
558 rpages
= shmp_index(handle
, bufb
->array
, sb_bindex
);
559 CHAN_WARN_ON(chanb
, config
->mode
== RING_BUFFER_OVERWRITE
560 && subbuffer_id_is_noref(config
, id
));
561 return shmp_index(handle
, shmp(handle
, rpages
->shmp
)->p
, offset
& (chanb
->subbuf_size
- 1));