2 * ring_buffer_backend.c
4 * Copyright (C) 2005-2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
6 * Dual LGPL v2.1/GPL v2 license.
11 #include "lttng/core.h"
13 #include <lttng/ringbuffer-config.h>
20 * lib_ring_buffer_backend_allocate - allocate a channel buffer
21 * @config: ring buffer instance configuration
22 * @buf: the buffer struct
23 * @size: total size of the buffer
24 * @num_subbuf: number of subbuffers
25 * @extra_reader_sb: need extra subbuffer for reader
28 int lib_ring_buffer_backend_allocate(const struct lttng_ust_lib_ring_buffer_config
*config
,
29 struct lttng_ust_lib_ring_buffer_backend
*bufb
,
30 size_t size
, size_t num_subbuf
,
32 struct lttng_ust_shm_handle
*handle
,
33 struct shm_object
*shmobj
)
35 struct channel_backend
*chanb
= &shmp(handle
, bufb
->chan
)->backend
;
36 unsigned long subbuf_size
, mmap_offset
= 0;
37 unsigned long num_subbuf_alloc
;
40 subbuf_size
= chanb
->subbuf_size
;
41 num_subbuf_alloc
= num_subbuf
;
46 align_shm(shmobj
, __alignof__(struct lttng_ust_lib_ring_buffer_backend_pages_shmp
));
47 set_shmp(bufb
->array
, zalloc_shm(shmobj
,
48 sizeof(struct lttng_ust_lib_ring_buffer_backend_pages_shmp
) * num_subbuf_alloc
));
49 if (caa_unlikely(!shmp(handle
, bufb
->array
)))
53 * This is the largest element (the buffer pages) which needs to
54 * be aligned on PAGE_SIZE.
56 align_shm(shmobj
, PAGE_SIZE
);
57 set_shmp(bufb
->memory_map
, zalloc_shm(shmobj
,
58 subbuf_size
* num_subbuf_alloc
));
59 if (caa_unlikely(!shmp(handle
, bufb
->memory_map
)))
60 goto memory_map_error
;
62 /* Allocate backend pages array elements */
63 for (i
= 0; i
< num_subbuf_alloc
; i
++) {
64 align_shm(shmobj
, __alignof__(struct lttng_ust_lib_ring_buffer_backend_pages
));
65 set_shmp(shmp_index(handle
, bufb
->array
, i
)->shmp
,
67 sizeof(struct lttng_ust_lib_ring_buffer_backend_pages
)));
68 if (!shmp(handle
, shmp_index(handle
, bufb
->array
, i
)->shmp
))
72 /* Allocate write-side subbuffer table */
73 align_shm(shmobj
, __alignof__(struct lttng_ust_lib_ring_buffer_backend_subbuffer
));
74 set_shmp(bufb
->buf_wsb
, zalloc_shm(shmobj
,
75 sizeof(struct lttng_ust_lib_ring_buffer_backend_subbuffer
)
77 if (caa_unlikely(!shmp(handle
, bufb
->buf_wsb
)))
80 for (i
= 0; i
< num_subbuf
; i
++)
81 shmp_index(handle
, bufb
->buf_wsb
, i
)->id
= subbuffer_id(config
, 0, 1, i
);
83 /* Assign read-side subbuffer table */
85 bufb
->buf_rsb
.id
= subbuffer_id(config
, 0, 1,
86 num_subbuf_alloc
- 1);
88 bufb
->buf_rsb
.id
= subbuffer_id(config
, 0, 1, 0);
90 /* Assign pages to page index */
91 for (i
= 0; i
< num_subbuf_alloc
; i
++) {
94 ref
.index
= bufb
->memory_map
._ref
.index
;
95 ref
.offset
= bufb
->memory_map
._ref
.offset
;
96 ref
.offset
+= i
* subbuf_size
;
98 set_shmp(shmp(handle
, shmp_index(handle
, bufb
->array
, i
)->shmp
)->p
,
100 if (config
->output
== RING_BUFFER_MMAP
) {
101 shmp(handle
, shmp_index(handle
, bufb
->array
, i
)->shmp
)->mmap_offset
= mmap_offset
;
102 mmap_offset
+= subbuf_size
;
109 /* bufb->array[i] will be freed by shm teardown */
111 /* bufb->array will be freed by shm teardown */
116 int lib_ring_buffer_backend_create(struct lttng_ust_lib_ring_buffer_backend
*bufb
,
117 struct channel_backend
*chanb
, int cpu
,
118 struct lttng_ust_shm_handle
*handle
,
119 struct shm_object
*shmobj
)
121 const struct lttng_ust_lib_ring_buffer_config
*config
= &chanb
->config
;
123 set_shmp(bufb
->chan
, handle
->chan
._ref
);
126 return lib_ring_buffer_backend_allocate(config
, bufb
, chanb
->buf_size
,
128 chanb
->extra_reader_sb
,
132 void lib_ring_buffer_backend_reset(struct lttng_ust_lib_ring_buffer_backend
*bufb
,
133 struct lttng_ust_shm_handle
*handle
)
135 struct channel_backend
*chanb
= &shmp(handle
, bufb
->chan
)->backend
;
136 const struct lttng_ust_lib_ring_buffer_config
*config
= &chanb
->config
;
137 unsigned long num_subbuf_alloc
;
140 num_subbuf_alloc
= chanb
->num_subbuf
;
141 if (chanb
->extra_reader_sb
)
144 for (i
= 0; i
< chanb
->num_subbuf
; i
++)
145 shmp_index(handle
, bufb
->buf_wsb
, i
)->id
= subbuffer_id(config
, 0, 1, i
);
146 if (chanb
->extra_reader_sb
)
147 bufb
->buf_rsb
.id
= subbuffer_id(config
, 0, 1,
148 num_subbuf_alloc
- 1);
150 bufb
->buf_rsb
.id
= subbuffer_id(config
, 0, 1, 0);
152 for (i
= 0; i
< num_subbuf_alloc
; i
++) {
153 /* Don't reset mmap_offset */
154 v_set(config
, &shmp(handle
, shmp_index(handle
, bufb
->array
, i
)->shmp
)->records_commit
, 0);
155 v_set(config
, &shmp(handle
, shmp_index(handle
, bufb
->array
, i
)->shmp
)->records_unread
, 0);
156 shmp(handle
, shmp_index(handle
, bufb
->array
, i
)->shmp
)->data_size
= 0;
157 /* Don't reset backend page and virt addresses */
159 /* Don't reset num_pages_per_subbuf, cpu, allocated */
160 v_set(config
, &bufb
->records_read
, 0);
164 * The frontend is responsible for also calling ring_buffer_backend_reset for
165 * each buffer when calling channel_backend_reset.
167 void channel_backend_reset(struct channel_backend
*chanb
)
169 struct channel
*chan
= caa_container_of(chanb
, struct channel
, backend
);
170 const struct lttng_ust_lib_ring_buffer_config
*config
= &chanb
->config
;
173 * Don't reset buf_size, subbuf_size, subbuf_size_order,
174 * num_subbuf_order, buf_size_order, extra_reader_sb, num_subbuf,
175 * priv, notifiers, config, cpumask and name.
177 chanb
->start_tsc
= config
->cb
.ring_buffer_clock_read(chan
);
181 * channel_backend_init - initialize a channel backend
182 * @chanb: channel backend
183 * @name: channel name
184 * @config: client ring buffer configuration
185 * @parent: dentry of parent directory, %NULL for root directory
186 * @subbuf_size: size of sub-buffers (> PAGE_SIZE, power of 2)
187 * @num_subbuf: number of sub-buffers (power of 2)
188 * @lttng_ust_shm_handle: shared memory handle
190 * Returns channel pointer if successful, %NULL otherwise.
192 * Creates per-cpu channel buffers using the sizes and attributes
193 * specified. The created channel buffer files will be named
194 * name_0...name_N-1. File permissions will be %S_IRUSR.
196 * Called with CPU hotplug disabled.
198 int channel_backend_init(struct channel_backend
*chanb
,
200 const struct lttng_ust_lib_ring_buffer_config
*config
,
201 size_t subbuf_size
, size_t num_subbuf
,
202 struct lttng_ust_shm_handle
*handle
)
204 struct channel
*chan
= caa_container_of(chanb
, struct channel
, backend
);
207 size_t shmsize
= 0, num_subbuf_alloc
;
212 if (!(subbuf_size
&& num_subbuf
))
215 /* Check that the subbuffer size is larger than a page. */
216 if (subbuf_size
< PAGE_SIZE
)
220 * Make sure the number of subbuffers and subbuffer size are power of 2.
222 CHAN_WARN_ON(chanb
, hweight32(subbuf_size
) != 1);
223 CHAN_WARN_ON(chanb
, hweight32(num_subbuf
) != 1);
225 ret
= subbuffer_id_check_index(config
, num_subbuf
);
229 chanb
->buf_size
= num_subbuf
* subbuf_size
;
230 chanb
->subbuf_size
= subbuf_size
;
231 chanb
->buf_size_order
= get_count_order(chanb
->buf_size
);
232 chanb
->subbuf_size_order
= get_count_order(subbuf_size
);
233 chanb
->num_subbuf_order
= get_count_order(num_subbuf
);
234 chanb
->extra_reader_sb
=
235 (config
->mode
== RING_BUFFER_OVERWRITE
) ? 1 : 0;
236 chanb
->num_subbuf
= num_subbuf
;
237 strncpy(chanb
->name
, name
, NAME_MAX
);
238 chanb
->name
[NAME_MAX
- 1] = '\0';
239 memcpy(&chanb
->config
, config
, sizeof(*config
));
241 /* Per-cpu buffer size: control (prior to backend) */
242 shmsize
= offset_align(shmsize
, __alignof__(struct lttng_ust_lib_ring_buffer
));
243 shmsize
+= sizeof(struct lttng_ust_lib_ring_buffer
);
245 /* Per-cpu buffer size: backend */
246 /* num_subbuf + 1 is the worse case */
247 num_subbuf_alloc
= num_subbuf
+ 1;
248 shmsize
+= offset_align(shmsize
, __alignof__(struct lttng_ust_lib_ring_buffer_backend_pages_shmp
));
249 shmsize
+= sizeof(struct lttng_ust_lib_ring_buffer_backend_pages_shmp
) * num_subbuf_alloc
;
250 shmsize
+= offset_align(shmsize
, PAGE_SIZE
);
251 shmsize
+= subbuf_size
* num_subbuf_alloc
;
252 shmsize
+= offset_align(shmsize
, __alignof__(struct lttng_ust_lib_ring_buffer_backend_pages
));
253 shmsize
+= sizeof(struct lttng_ust_lib_ring_buffer_backend_pages
) * num_subbuf_alloc
;
254 shmsize
+= offset_align(shmsize
, __alignof__(struct lttng_ust_lib_ring_buffer_backend_subbuffer
));
255 shmsize
+= sizeof(struct lttng_ust_lib_ring_buffer_backend_subbuffer
) * num_subbuf
;
256 /* Per-cpu buffer size: control (after backend) */
257 shmsize
+= offset_align(shmsize
, __alignof__(struct commit_counters_hot
));
258 shmsize
+= sizeof(struct commit_counters_hot
) * num_subbuf
;
259 shmsize
+= offset_align(shmsize
, __alignof__(struct commit_counters_cold
));
260 shmsize
+= sizeof(struct commit_counters_cold
) * num_subbuf
;
262 if (config
->alloc
== RING_BUFFER_ALLOC_PER_CPU
) {
263 struct lttng_ust_lib_ring_buffer
*buf
;
265 * We need to allocate for all possible cpus.
267 for_each_possible_cpu(i
) {
268 struct shm_object
*shmobj
;
270 shmobj
= shm_object_table_append(handle
->table
, shmsize
);
273 align_shm(shmobj
, __alignof__(struct lttng_ust_lib_ring_buffer
));
274 set_shmp(chanb
->buf
[i
].shmp
, zalloc_shm(shmobj
, sizeof(struct lttng_ust_lib_ring_buffer
)));
275 buf
= shmp(handle
, chanb
->buf
[i
].shmp
);
278 set_shmp(buf
->self
, chanb
->buf
[i
].shmp
._ref
);
279 ret
= lib_ring_buffer_create(buf
, chanb
, i
,
282 goto free_bufs
; /* cpu hotplug locked */
285 struct shm_object
*shmobj
;
286 struct lttng_ust_lib_ring_buffer
*buf
;
288 shmobj
= shm_object_table_append(handle
->table
, shmsize
);
291 align_shm(shmobj
, __alignof__(struct lttng_ust_lib_ring_buffer
));
292 set_shmp(chanb
->buf
[0].shmp
, zalloc_shm(shmobj
, sizeof(struct lttng_ust_lib_ring_buffer
)));
293 buf
= shmp(handle
, chanb
->buf
[0].shmp
);
296 ret
= lib_ring_buffer_create(buf
, chanb
, -1,
301 chanb
->start_tsc
= config
->cb
.ring_buffer_clock_read(chan
);
306 /* We only free the buffer data upon shm teardown */
312 * channel_backend_free - destroy the channel
315 * Destroy all channel buffers and frees the channel.
317 void channel_backend_free(struct channel_backend
*chanb
,
318 struct lttng_ust_shm_handle
*handle
)
320 /* SHM teardown takes care of everything */
324 * lib_ring_buffer_read - read data from ring_buffer_buffer.
325 * @bufb : buffer backend
326 * @offset : offset within the buffer
327 * @dest : destination address
328 * @len : length to copy to destination
330 * Should be protected by get_subbuf/put_subbuf.
331 * Returns the length copied.
333 size_t lib_ring_buffer_read(struct lttng_ust_lib_ring_buffer_backend
*bufb
, size_t offset
,
334 void *dest
, size_t len
, struct lttng_ust_shm_handle
*handle
)
336 struct channel_backend
*chanb
= &shmp(handle
, bufb
->chan
)->backend
;
337 const struct lttng_ust_lib_ring_buffer_config
*config
= &chanb
->config
;
339 struct lttng_ust_lib_ring_buffer_backend_pages_shmp
*rpages
;
340 unsigned long sb_bindex
, id
;
343 offset
&= chanb
->buf_size
- 1;
345 if (caa_unlikely(!len
))
347 id
= bufb
->buf_rsb
.id
;
348 sb_bindex
= subbuffer_id_get_index(config
, id
);
349 rpages
= shmp_index(handle
, bufb
->array
, sb_bindex
);
351 * Underlying layer should never ask for reads across
354 CHAN_WARN_ON(chanb
, offset
>= chanb
->buf_size
);
355 CHAN_WARN_ON(chanb
, config
->mode
== RING_BUFFER_OVERWRITE
356 && subbuffer_id_is_noref(config
, id
));
357 memcpy(dest
, shmp_index(handle
, shmp(handle
, rpages
->shmp
)->p
, offset
& (chanb
->subbuf_size
- 1)), len
);
362 * lib_ring_buffer_read_cstr - read a C-style string from ring_buffer.
363 * @bufb : buffer backend
364 * @offset : offset within the buffer
365 * @dest : destination address
366 * @len : destination's length
368 * return string's length
369 * Should be protected by get_subbuf/put_subbuf.
371 int lib_ring_buffer_read_cstr(struct lttng_ust_lib_ring_buffer_backend
*bufb
, size_t offset
,
372 void *dest
, size_t len
, struct lttng_ust_shm_handle
*handle
)
374 struct channel_backend
*chanb
= &shmp(handle
, bufb
->chan
)->backend
;
375 const struct lttng_ust_lib_ring_buffer_config
*config
= &chanb
->config
;
376 ssize_t string_len
, orig_offset
;
378 struct lttng_ust_lib_ring_buffer_backend_pages_shmp
*rpages
;
379 unsigned long sb_bindex
, id
;
381 offset
&= chanb
->buf_size
- 1;
382 orig_offset
= offset
;
383 id
= bufb
->buf_rsb
.id
;
384 sb_bindex
= subbuffer_id_get_index(config
, id
);
385 rpages
= shmp_index(handle
, bufb
->array
, sb_bindex
);
387 * Underlying layer should never ask for reads across
390 CHAN_WARN_ON(chanb
, offset
>= chanb
->buf_size
);
391 CHAN_WARN_ON(chanb
, config
->mode
== RING_BUFFER_OVERWRITE
392 && subbuffer_id_is_noref(config
, id
));
393 str
= shmp_index(handle
, shmp(handle
, rpages
->shmp
)->p
, offset
& (chanb
->subbuf_size
- 1));
394 string_len
= strnlen(str
, len
);
396 memcpy(dest
, str
, string_len
);
397 ((char *)dest
)[0] = 0;
399 return offset
- orig_offset
;
403 * lib_ring_buffer_read_offset_address - get address of a buffer location
404 * @bufb : buffer backend
405 * @offset : offset within the buffer.
407 * Return the address where a given offset is located (for read).
408 * Should be used to get the current subbuffer header pointer. Given we know
409 * it's never on a page boundary, it's safe to write directly to this address,
410 * as long as the write is never bigger than a page size.
412 void *lib_ring_buffer_read_offset_address(struct lttng_ust_lib_ring_buffer_backend
*bufb
,
414 struct lttng_ust_shm_handle
*handle
)
416 struct lttng_ust_lib_ring_buffer_backend_pages_shmp
*rpages
;
417 struct channel_backend
*chanb
= &shmp(handle
, bufb
->chan
)->backend
;
418 const struct lttng_ust_lib_ring_buffer_config
*config
= &chanb
->config
;
419 unsigned long sb_bindex
, id
;
421 offset
&= chanb
->buf_size
- 1;
422 id
= bufb
->buf_rsb
.id
;
423 sb_bindex
= subbuffer_id_get_index(config
, id
);
424 rpages
= shmp_index(handle
, bufb
->array
, sb_bindex
);
425 CHAN_WARN_ON(chanb
, config
->mode
== RING_BUFFER_OVERWRITE
426 && subbuffer_id_is_noref(config
, id
));
427 return shmp_index(handle
, shmp(handle
, rpages
->shmp
)->p
, offset
& (chanb
->subbuf_size
- 1));
431 * lib_ring_buffer_offset_address - get address of a location within the buffer
432 * @bufb : buffer backend
433 * @offset : offset within the buffer.
435 * Return the address where a given offset is located.
436 * Should be used to get the current subbuffer header pointer. Given we know
437 * it's always at the beginning of a page, it's safe to write directly to this
438 * address, as long as the write is never bigger than a page size.
440 void *lib_ring_buffer_offset_address(struct lttng_ust_lib_ring_buffer_backend
*bufb
,
442 struct lttng_ust_shm_handle
*handle
)
445 struct lttng_ust_lib_ring_buffer_backend_pages_shmp
*rpages
;
446 struct channel_backend
*chanb
= &shmp(handle
, bufb
->chan
)->backend
;
447 const struct lttng_ust_lib_ring_buffer_config
*config
= &chanb
->config
;
448 unsigned long sb_bindex
, id
;
450 offset
&= chanb
->buf_size
- 1;
451 sbidx
= offset
>> chanb
->subbuf_size_order
;
452 id
= shmp_index(handle
, bufb
->buf_wsb
, sbidx
)->id
;
453 sb_bindex
= subbuffer_id_get_index(config
, id
);
454 rpages
= shmp_index(handle
, bufb
->array
, sb_bindex
);
455 CHAN_WARN_ON(chanb
, config
->mode
== RING_BUFFER_OVERWRITE
456 && subbuffer_id_is_noref(config
, id
));
457 return shmp_index(handle
, shmp(handle
, rpages
->shmp
)->p
, offset
& (chanb
->subbuf_size
- 1));