1 /* SPDX-License-Identifier: (GPL-2.0-only OR LGPL-2.1-only)
5 * Copyright (C) 2020 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
10 #include "counter-internal.h"
11 #include <urcu/system.h>
12 #include <urcu/compiler.h>
15 #include "common/macros.h"
16 #include "common/align.h"
17 #include "common/bitmap.h"
19 #include "common/smp.h"
22 static size_t lttng_counter_get_dimension_nr_elements(struct lib_counter_dimension
*dimension
)
24 return dimension
->max_nr_elem
;
27 static int lttng_counter_init_stride(
28 const struct lib_counter_config
*config
__attribute__((unused
)),
29 struct lib_counter
*counter
)
31 size_t nr_dimensions
= counter
->nr_dimensions
;
35 for (i
= nr_dimensions
- 1; i
>= 0; i
--) {
36 struct lib_counter_dimension
*dimension
= &counter
->dimensions
[i
];
39 nr_elem
= lttng_counter_get_dimension_nr_elements(dimension
);
40 dimension
->stride
= stride
;
41 /* nr_elem should be minimum 1 for each dimension. */
45 if (stride
> SIZE_MAX
/ nr_elem
)
51 static int lttng_counter_layout_init(struct lib_counter
*counter
, int cpu
, int shm_fd
)
53 struct lib_counter_layout
*layout
;
55 size_t nr_elem
= counter
->allocated_elem
;
56 size_t shm_length
= 0, counters_offset
, overflow_offset
, underflow_offset
;
57 struct lttng_counter_shm_object
*shm_object
;
60 return 0; /* Skip, will be populated later. */
63 layout
= &counter
->global_counters
;
65 layout
= &counter
->percpu_counters
[cpu
];
66 switch (counter
->config
.counter_size
) {
67 case COUNTER_SIZE_8_BIT
:
68 case COUNTER_SIZE_16_BIT
:
69 case COUNTER_SIZE_32_BIT
:
70 case COUNTER_SIZE_64_BIT
:
71 counter_size
= (size_t) counter
->config
.counter_size
;
76 layout
->shm_fd
= shm_fd
;
77 counters_offset
= shm_length
;
78 shm_length
+= counter_size
* nr_elem
;
79 overflow_offset
= shm_length
;
80 shm_length
+= LTTNG_UST_ALIGN(nr_elem
, 8) / 8;
81 underflow_offset
= shm_length
;
82 shm_length
+= LTTNG_UST_ALIGN(nr_elem
, 8) / 8;
83 layout
->shm_len
= shm_length
;
84 if (counter
->is_daemon
) {
85 /* Allocate and clear shared memory. */
86 shm_object
= lttng_counter_shm_object_table_alloc(counter
->object_table
,
87 shm_length
, LTTNG_COUNTER_SHM_OBJECT_SHM
, shm_fd
, cpu
);
91 /* Map pre-existing shared memory. */
92 shm_object
= lttng_counter_shm_object_table_append_shm(counter
->object_table
,
97 layout
->counters
= shm_object
->memory_map
+ counters_offset
;
98 layout
->overflow_bitmap
= (unsigned long *)(shm_object
->memory_map
+ overflow_offset
);
99 layout
->underflow_bitmap
= (unsigned long *)(shm_object
->memory_map
+ underflow_offset
);
103 int lttng_counter_set_global_shm(struct lib_counter
*counter
, int fd
)
105 struct lib_counter_config
*config
= &counter
->config
;
106 struct lib_counter_layout
*layout
;
108 if (!(config
->alloc
& COUNTER_ALLOC_GLOBAL
))
110 layout
= &counter
->global_counters
;
111 if (layout
->shm_fd
>= 0)
113 return lttng_counter_layout_init(counter
, -1, fd
);
116 int lttng_counter_set_cpu_shm(struct lib_counter
*counter
, int cpu
, int fd
)
118 struct lib_counter_config
*config
= &counter
->config
;
119 struct lib_counter_layout
*layout
;
121 if (cpu
< 0 || cpu
>= num_possible_cpus())
124 if (!(config
->alloc
& COUNTER_ALLOC_PER_CPU
))
126 layout
= &counter
->percpu_counters
[cpu
];
127 if (layout
->shm_fd
>= 0)
129 return lttng_counter_layout_init(counter
, cpu
, fd
);
133 int lttng_counter_set_global_sum_step(struct lib_counter
*counter
,
134 int64_t global_sum_step
)
136 if (global_sum_step
< 0)
139 switch (counter
->config
.counter_size
) {
140 case COUNTER_SIZE_8_BIT
:
141 if (global_sum_step
> INT8_MAX
)
143 counter
->global_sum_step
.s8
= (int8_t) global_sum_step
;
145 case COUNTER_SIZE_16_BIT
:
146 if (global_sum_step
> INT16_MAX
)
148 counter
->global_sum_step
.s16
= (int16_t) global_sum_step
;
150 case COUNTER_SIZE_32_BIT
:
151 if (global_sum_step
> INT32_MAX
)
153 counter
->global_sum_step
.s32
= (int32_t) global_sum_step
;
155 case COUNTER_SIZE_64_BIT
:
156 counter
->global_sum_step
.s64
= global_sum_step
;
166 int validate_args(const struct lib_counter_config
*config
,
167 size_t nr_dimensions
__attribute__((unused
)),
168 const size_t *max_nr_elem
,
169 int64_t global_sum_step
,
170 int global_counter_fd
,
171 int nr_counter_cpu_fds
,
172 const int *counter_cpu_fds
)
174 int nr_cpus
= num_possible_cpus();
176 if (CAA_BITS_PER_LONG
!= 64 && config
->counter_size
== COUNTER_SIZE_64_BIT
) {
183 * global sum step is only useful with allocating both per-cpu
184 * and global counters.
186 if (global_sum_step
&& (!(config
->alloc
& COUNTER_ALLOC_GLOBAL
) ||
187 !(config
->alloc
& COUNTER_ALLOC_PER_CPU
)))
189 if (!(config
->alloc
& COUNTER_ALLOC_GLOBAL
) && global_counter_fd
>= 0)
191 if (!(config
->alloc
& COUNTER_ALLOC_PER_CPU
) && counter_cpu_fds
)
193 if (!(config
->alloc
& COUNTER_ALLOC_PER_CPU
) && nr_counter_cpu_fds
>= 0)
195 if (counter_cpu_fds
&& nr_cpus
!= nr_counter_cpu_fds
)
200 struct lib_counter
*lttng_counter_create(const struct lib_counter_config
*config
,
201 size_t nr_dimensions
,
202 const size_t *max_nr_elem
,
203 int64_t global_sum_step
,
204 int global_counter_fd
,
205 int nr_counter_cpu_fds
,
206 const int *counter_cpu_fds
,
209 struct lib_counter
*counter
;
210 size_t dimension
, nr_elem
= 1;
213 int nr_cpus
= num_possible_cpus();
215 if (validate_args(config
, nr_dimensions
, max_nr_elem
,
216 global_sum_step
, global_counter_fd
, nr_counter_cpu_fds
,
219 counter
= zmalloc(sizeof(struct lib_counter
));
222 counter
->global_counters
.shm_fd
= -1;
223 counter
->config
= *config
;
224 counter
->is_daemon
= is_daemon
;
225 if (lttng_counter_set_global_sum_step(counter
, global_sum_step
))
227 counter
->nr_dimensions
= nr_dimensions
;
228 counter
->dimensions
= zmalloc(nr_dimensions
* sizeof(*counter
->dimensions
));
229 if (!counter
->dimensions
)
230 goto error_dimensions
;
231 for (dimension
= 0; dimension
< nr_dimensions
; dimension
++)
232 counter
->dimensions
[dimension
].max_nr_elem
= max_nr_elem
[dimension
];
233 if (config
->alloc
& COUNTER_ALLOC_PER_CPU
) {
234 counter
->percpu_counters
= zmalloc(sizeof(struct lib_counter_layout
) * nr_cpus
);
235 if (!counter
->percpu_counters
)
236 goto error_alloc_percpu
;
237 for_each_possible_cpu(cpu
)
238 counter
->percpu_counters
[cpu
].shm_fd
= -1;
241 if (lttng_counter_init_stride(config
, counter
))
242 goto error_init_stride
;
243 //TODO saturation values.
244 for (dimension
= 0; dimension
< counter
->nr_dimensions
; dimension
++)
245 nr_elem
*= lttng_counter_get_dimension_nr_elements(&counter
->dimensions
[dimension
]);
246 counter
->allocated_elem
= nr_elem
;
248 if (config
->alloc
& COUNTER_ALLOC_GLOBAL
)
250 if (config
->alloc
& COUNTER_ALLOC_PER_CPU
)
251 nr_handles
+= nr_cpus
;
252 /* Allocate table for global and per-cpu counters. */
253 counter
->object_table
= lttng_counter_shm_object_table_create(nr_handles
);
254 if (!counter
->object_table
)
255 goto error_alloc_object_table
;
257 if (config
->alloc
& COUNTER_ALLOC_GLOBAL
) {
258 ret
= lttng_counter_layout_init(counter
, -1, global_counter_fd
); /* global */
260 goto layout_init_error
;
262 if ((config
->alloc
& COUNTER_ALLOC_PER_CPU
) && counter_cpu_fds
) {
263 for_each_possible_cpu(cpu
) {
264 ret
= lttng_counter_layout_init(counter
, cpu
, counter_cpu_fds
[cpu
]);
266 goto layout_init_error
;
272 lttng_counter_shm_object_table_destroy(counter
->object_table
, is_daemon
);
273 error_alloc_object_table
:
275 free(counter
->percpu_counters
);
277 free(counter
->dimensions
);
284 void lttng_counter_destroy(struct lib_counter
*counter
)
286 struct lib_counter_config
*config
= &counter
->config
;
288 if (config
->alloc
& COUNTER_ALLOC_PER_CPU
)
289 free(counter
->percpu_counters
);
290 lttng_counter_shm_object_table_destroy(counter
->object_table
, counter
->is_daemon
);
291 free(counter
->dimensions
);
295 int lttng_counter_get_global_shm(struct lib_counter
*counter
, int *fd
, size_t *len
)
299 shm_fd
= counter
->global_counters
.shm_fd
;
303 *len
= counter
->global_counters
.shm_len
;
307 int lttng_counter_get_cpu_shm(struct lib_counter
*counter
, int cpu
, int *fd
, size_t *len
)
309 struct lib_counter_layout
*layout
;
312 if (cpu
>= num_possible_cpus())
314 layout
= &counter
->percpu_counters
[cpu
];
315 shm_fd
= layout
->shm_fd
;
319 *len
= layout
->shm_len
;
323 int lttng_counter_read(const struct lib_counter_config
*config
,
324 struct lib_counter
*counter
,
325 const size_t *dimension_indexes
,
326 int cpu
, int64_t *value
, bool *overflow
,
330 struct lib_counter_layout
*layout
;
332 if (caa_unlikely(lttng_counter_validate_indexes(config
, counter
, dimension_indexes
)))
334 index
= lttng_counter_get_index(config
, counter
, dimension_indexes
);
336 switch (config
->alloc
) {
337 case COUNTER_ALLOC_PER_CPU
:
338 if (cpu
< 0 || cpu
>= num_possible_cpus())
340 layout
= &counter
->percpu_counters
[cpu
];
342 case COUNTER_ALLOC_PER_CPU
| COUNTER_ALLOC_GLOBAL
:
344 if (cpu
>= num_possible_cpus())
346 layout
= &counter
->percpu_counters
[cpu
];
348 layout
= &counter
->global_counters
;
351 case COUNTER_ALLOC_GLOBAL
:
354 layout
= &counter
->global_counters
;
359 if (caa_unlikely(!layout
->counters
))
362 switch (config
->counter_size
) {
363 case COUNTER_SIZE_8_BIT
:
365 int8_t *int_p
= (int8_t *) layout
->counters
+ index
;
366 *value
= (int64_t) CMM_LOAD_SHARED(*int_p
);
369 case COUNTER_SIZE_16_BIT
:
371 int16_t *int_p
= (int16_t *) layout
->counters
+ index
;
372 *value
= (int64_t) CMM_LOAD_SHARED(*int_p
);
375 case COUNTER_SIZE_32_BIT
:
377 int32_t *int_p
= (int32_t *) layout
->counters
+ index
;
378 *value
= (int64_t) CMM_LOAD_SHARED(*int_p
);
381 #if CAA_BITS_PER_LONG == 64
382 case COUNTER_SIZE_64_BIT
:
384 int64_t *int_p
= (int64_t *) layout
->counters
+ index
;
385 *value
= CMM_LOAD_SHARED(*int_p
);
392 *overflow
= lttng_bitmap_test_bit(index
, layout
->overflow_bitmap
);
393 *underflow
= lttng_bitmap_test_bit(index
, layout
->underflow_bitmap
);
397 int lttng_counter_aggregate(const struct lib_counter_config
*config
,
398 struct lib_counter
*counter
,
399 const size_t *dimension_indexes
,
400 int64_t *value
, bool *overflow
,
410 switch (config
->alloc
) {
411 case COUNTER_ALLOC_GLOBAL
: /* Fallthrough */
412 case COUNTER_ALLOC_PER_CPU
| COUNTER_ALLOC_GLOBAL
:
413 /* Read global counter. */
414 ret
= lttng_counter_read(config
, counter
, dimension_indexes
,
422 case COUNTER_ALLOC_PER_CPU
:
428 switch (config
->alloc
) {
429 case COUNTER_ALLOC_GLOBAL
:
431 case COUNTER_ALLOC_PER_CPU
| COUNTER_ALLOC_GLOBAL
: /* Fallthrough */
432 case COUNTER_ALLOC_PER_CPU
:
433 for_each_possible_cpu(cpu
) {
436 ret
= lttng_counter_read(config
, counter
, dimension_indexes
,
442 /* Overflow is defined on unsigned types. */
443 sum
= (int64_t) ((uint64_t) old
+ (uint64_t) v
);
444 if (v
> 0 && sum
< old
)
446 else if (v
< 0 && sum
> old
)
458 int lttng_counter_clear_cpu(const struct lib_counter_config
*config
,
459 struct lib_counter
*counter
,
460 const size_t *dimension_indexes
,
464 struct lib_counter_layout
*layout
;
466 if (caa_unlikely(lttng_counter_validate_indexes(config
, counter
, dimension_indexes
)))
468 index
= lttng_counter_get_index(config
, counter
, dimension_indexes
);
470 switch (config
->alloc
) {
471 case COUNTER_ALLOC_PER_CPU
:
472 if (cpu
< 0 || cpu
>= num_possible_cpus())
474 layout
= &counter
->percpu_counters
[cpu
];
476 case COUNTER_ALLOC_PER_CPU
| COUNTER_ALLOC_GLOBAL
:
478 if (cpu
>= num_possible_cpus())
480 layout
= &counter
->percpu_counters
[cpu
];
482 layout
= &counter
->global_counters
;
485 case COUNTER_ALLOC_GLOBAL
:
488 layout
= &counter
->global_counters
;
493 if (caa_unlikely(!layout
->counters
))
496 switch (config
->counter_size
) {
497 case COUNTER_SIZE_8_BIT
:
499 int8_t *int_p
= (int8_t *) layout
->counters
+ index
;
500 CMM_STORE_SHARED(*int_p
, 0);
503 case COUNTER_SIZE_16_BIT
:
505 int16_t *int_p
= (int16_t *) layout
->counters
+ index
;
506 CMM_STORE_SHARED(*int_p
, 0);
509 case COUNTER_SIZE_32_BIT
:
511 int32_t *int_p
= (int32_t *) layout
->counters
+ index
;
512 CMM_STORE_SHARED(*int_p
, 0);
515 #if CAA_BITS_PER_LONG == 64
516 case COUNTER_SIZE_64_BIT
:
518 int64_t *int_p
= (int64_t *) layout
->counters
+ index
;
519 CMM_STORE_SHARED(*int_p
, 0);
526 lttng_bitmap_clear_bit(index
, layout
->overflow_bitmap
);
527 lttng_bitmap_clear_bit(index
, layout
->underflow_bitmap
);
531 int lttng_counter_clear(const struct lib_counter_config
*config
,
532 struct lib_counter
*counter
,
533 const size_t *dimension_indexes
)
537 switch (config
->alloc
) {
538 case COUNTER_ALLOC_PER_CPU
:
540 case COUNTER_ALLOC_GLOBAL
: /* Fallthrough */
541 case COUNTER_ALLOC_PER_CPU
| COUNTER_ALLOC_GLOBAL
:
542 /* Clear global counter. */
543 ret
= lttng_counter_clear_cpu(config
, counter
, dimension_indexes
, -1);
551 switch (config
->alloc
) {
552 case COUNTER_ALLOC_PER_CPU
: /* Fallthrough */
553 case COUNTER_ALLOC_PER_CPU
| COUNTER_ALLOC_GLOBAL
:
554 for_each_possible_cpu(cpu
) {
555 ret
= lttng_counter_clear_cpu(config
, counter
, dimension_indexes
, cpu
);
560 case COUNTER_ALLOC_GLOBAL
: