1 /* SPDX-License-Identifier: (GPL-2.0-only OR LGPL-2.1-only)
5 * Copyright (C) 2020 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
10 #include "counter-internal.h"
11 #include <urcu/system.h>
12 #include <urcu/compiler.h>
15 #include "common/macros.h"
16 #include "common/align.h"
17 #include "common/bitmap.h"
19 #include "common/smp.h"
20 #include "common/populate.h"
23 static size_t lttng_counter_get_dimension_nr_elements(struct lib_counter_dimension
*dimension
)
25 return dimension
->max_nr_elem
;
28 static int lttng_counter_init_stride(
29 const struct lib_counter_config
*config
__attribute__((unused
)),
30 struct lib_counter
*counter
)
32 size_t nr_dimensions
= counter
->nr_dimensions
;
36 for (i
= nr_dimensions
- 1; i
>= 0; i
--) {
37 struct lib_counter_dimension
*dimension
= &counter
->dimensions
[i
];
40 nr_elem
= lttng_counter_get_dimension_nr_elements(dimension
);
41 dimension
->stride
= stride
;
42 /* nr_elem should be minimum 1 for each dimension. */
46 if (stride
> SIZE_MAX
/ nr_elem
)
52 static int lttng_counter_layout_init(struct lib_counter
*counter
, int cpu
, int shm_fd
)
54 struct lib_counter_layout
*layout
;
56 size_t nr_elem
= counter
->allocated_elem
;
57 size_t shm_length
= 0, counters_offset
, overflow_offset
, underflow_offset
;
58 struct lttng_counter_shm_object
*shm_object
;
61 return 0; /* Skip, will be populated later. */
64 layout
= &counter
->global_counters
;
66 layout
= &counter
->percpu_counters
[cpu
];
67 switch (counter
->config
.counter_size
) {
68 case COUNTER_SIZE_8_BIT
:
69 case COUNTER_SIZE_16_BIT
:
70 case COUNTER_SIZE_32_BIT
:
71 case COUNTER_SIZE_64_BIT
:
72 counter_size
= (size_t) counter
->config
.counter_size
;
77 layout
->shm_fd
= shm_fd
;
78 counters_offset
= shm_length
;
79 shm_length
+= counter_size
* nr_elem
;
80 overflow_offset
= shm_length
;
81 shm_length
+= LTTNG_UST_ALIGN(nr_elem
, 8) / 8;
82 underflow_offset
= shm_length
;
83 shm_length
+= LTTNG_UST_ALIGN(nr_elem
, 8) / 8;
84 layout
->shm_len
= shm_length
;
85 if (counter
->is_daemon
) {
86 /* Allocate and clear shared memory. */
87 shm_object
= lttng_counter_shm_object_table_alloc(counter
->object_table
,
88 shm_length
, LTTNG_COUNTER_SHM_OBJECT_SHM
, shm_fd
, cpu
,
89 lttng_ust_map_populate_cpu_is_enabled(cpu
));
93 /* Map pre-existing shared memory. */
94 shm_object
= lttng_counter_shm_object_table_append_shm(counter
->object_table
,
95 shm_fd
, shm_length
, lttng_ust_map_populate_cpu_is_enabled(cpu
));
99 layout
->counters
= shm_object
->memory_map
+ counters_offset
;
100 layout
->overflow_bitmap
= (unsigned long *)(shm_object
->memory_map
+ overflow_offset
);
101 layout
->underflow_bitmap
= (unsigned long *)(shm_object
->memory_map
+ underflow_offset
);
105 int lttng_counter_set_global_shm(struct lib_counter
*counter
, int fd
)
107 struct lib_counter_config
*config
= &counter
->config
;
108 struct lib_counter_layout
*layout
;
111 if (!(config
->alloc
& COUNTER_ALLOC_GLOBAL
))
113 layout
= &counter
->global_counters
;
114 if (layout
->shm_fd
>= 0)
116 ret
= lttng_counter_layout_init(counter
, -1, fd
);
118 counter
->received_shm
++;
122 int lttng_counter_set_cpu_shm(struct lib_counter
*counter
, int cpu
, int fd
)
124 struct lib_counter_config
*config
= &counter
->config
;
125 struct lib_counter_layout
*layout
;
128 if (cpu
< 0 || cpu
>= get_possible_cpus_array_len())
131 if (!(config
->alloc
& COUNTER_ALLOC_PER_CPU
))
133 layout
= &counter
->percpu_counters
[cpu
];
134 if (layout
->shm_fd
>= 0)
136 ret
= lttng_counter_layout_init(counter
, cpu
, fd
);
138 counter
->received_shm
++;
143 int lttng_counter_set_global_sum_step(struct lib_counter
*counter
,
144 int64_t global_sum_step
)
146 if (global_sum_step
< 0)
149 switch (counter
->config
.counter_size
) {
150 case COUNTER_SIZE_8_BIT
:
151 if (global_sum_step
> INT8_MAX
)
153 counter
->global_sum_step
.s8
= (int8_t) global_sum_step
;
155 case COUNTER_SIZE_16_BIT
:
156 if (global_sum_step
> INT16_MAX
)
158 counter
->global_sum_step
.s16
= (int16_t) global_sum_step
;
160 case COUNTER_SIZE_32_BIT
:
161 if (global_sum_step
> INT32_MAX
)
163 counter
->global_sum_step
.s32
= (int32_t) global_sum_step
;
165 case COUNTER_SIZE_64_BIT
:
166 counter
->global_sum_step
.s64
= global_sum_step
;
176 int validate_args(const struct lib_counter_config
*config
,
177 size_t nr_dimensions
__attribute__((unused
)),
178 const size_t *max_nr_elem
,
179 int64_t global_sum_step
,
180 int global_counter_fd
,
181 int nr_counter_cpu_fds
,
182 const int *counter_cpu_fds
)
184 int nr_cpus
= get_possible_cpus_array_len();
186 if (CAA_BITS_PER_LONG
!= 64 && config
->counter_size
== COUNTER_SIZE_64_BIT
) {
193 * global sum step is only useful with allocating both per-cpu
194 * and global counters.
196 if (global_sum_step
&& (!(config
->alloc
& COUNTER_ALLOC_GLOBAL
) ||
197 !(config
->alloc
& COUNTER_ALLOC_PER_CPU
)))
199 if (!(config
->alloc
& COUNTER_ALLOC_GLOBAL
) && global_counter_fd
>= 0)
201 if (!(config
->alloc
& COUNTER_ALLOC_PER_CPU
) && counter_cpu_fds
)
203 if (!(config
->alloc
& COUNTER_ALLOC_PER_CPU
) && nr_counter_cpu_fds
>= 0)
205 if (counter_cpu_fds
&& nr_cpus
!= nr_counter_cpu_fds
)
210 struct lib_counter
*lttng_counter_create(const struct lib_counter_config
*config
,
211 size_t nr_dimensions
,
212 const size_t *max_nr_elem
,
213 int64_t global_sum_step
,
214 int global_counter_fd
,
215 int nr_counter_cpu_fds
,
216 const int *counter_cpu_fds
,
219 struct lib_counter
*counter
;
220 size_t dimension
, nr_elem
= 1;
223 int nr_cpus
= get_possible_cpus_array_len();
224 bool populate
= lttng_ust_map_populate_is_enabled();
226 if (validate_args(config
, nr_dimensions
, max_nr_elem
,
227 global_sum_step
, global_counter_fd
, nr_counter_cpu_fds
,
230 counter
= zmalloc_populate(sizeof(struct lib_counter
), populate
);
233 counter
->global_counters
.shm_fd
= -1;
234 counter
->config
= *config
;
235 counter
->is_daemon
= is_daemon
;
236 if (lttng_counter_set_global_sum_step(counter
, global_sum_step
))
238 counter
->nr_dimensions
= nr_dimensions
;
239 counter
->dimensions
= zmalloc_populate(nr_dimensions
* sizeof(*counter
->dimensions
), populate
);
240 if (!counter
->dimensions
)
241 goto error_dimensions
;
242 for (dimension
= 0; dimension
< nr_dimensions
; dimension
++)
243 counter
->dimensions
[dimension
].max_nr_elem
= max_nr_elem
[dimension
];
244 if (config
->alloc
& COUNTER_ALLOC_PER_CPU
) {
245 counter
->percpu_counters
= zmalloc_populate(sizeof(struct lib_counter_layout
) * nr_cpus
, populate
);
246 if (!counter
->percpu_counters
)
247 goto error_alloc_percpu
;
248 for_each_possible_cpu(cpu
)
249 counter
->percpu_counters
[cpu
].shm_fd
= -1;
252 if (lttng_counter_init_stride(config
, counter
))
253 goto error_init_stride
;
254 //TODO saturation values.
255 for (dimension
= 0; dimension
< counter
->nr_dimensions
; dimension
++)
256 nr_elem
*= lttng_counter_get_dimension_nr_elements(&counter
->dimensions
[dimension
]);
257 counter
->allocated_elem
= nr_elem
;
259 if (config
->alloc
& COUNTER_ALLOC_GLOBAL
)
261 if (config
->alloc
& COUNTER_ALLOC_PER_CPU
)
262 nr_handles
+= nr_cpus
;
263 counter
->expected_shm
= nr_handles
;
264 /* Allocate table for global and per-cpu counters. */
265 counter
->object_table
= lttng_counter_shm_object_table_create(nr_handles
, populate
);
266 if (!counter
->object_table
)
267 goto error_alloc_object_table
;
269 if (config
->alloc
& COUNTER_ALLOC_GLOBAL
) {
270 ret
= lttng_counter_layout_init(counter
, -1, global_counter_fd
); /* global */
272 goto layout_init_error
;
274 if ((config
->alloc
& COUNTER_ALLOC_PER_CPU
) && counter_cpu_fds
) {
275 for_each_possible_cpu(cpu
) {
276 ret
= lttng_counter_layout_init(counter
, cpu
, counter_cpu_fds
[cpu
]);
278 goto layout_init_error
;
284 lttng_counter_shm_object_table_destroy(counter
->object_table
, is_daemon
);
285 error_alloc_object_table
:
287 free(counter
->percpu_counters
);
289 free(counter
->dimensions
);
296 void lttng_counter_destroy(struct lib_counter
*counter
)
298 struct lib_counter_config
*config
= &counter
->config
;
300 if (config
->alloc
& COUNTER_ALLOC_PER_CPU
)
301 free(counter
->percpu_counters
);
302 lttng_counter_shm_object_table_destroy(counter
->object_table
, counter
->is_daemon
);
303 free(counter
->dimensions
);
307 int lttng_counter_get_global_shm(struct lib_counter
*counter
, int *fd
, size_t *len
)
311 shm_fd
= counter
->global_counters
.shm_fd
;
315 *len
= counter
->global_counters
.shm_len
;
319 int lttng_counter_get_cpu_shm(struct lib_counter
*counter
, int cpu
, int *fd
, size_t *len
)
321 struct lib_counter_layout
*layout
;
324 if (cpu
>= get_possible_cpus_array_len())
326 layout
= &counter
->percpu_counters
[cpu
];
327 shm_fd
= layout
->shm_fd
;
331 *len
= layout
->shm_len
;
335 bool lttng_counter_ready(struct lib_counter
*counter
)
337 if (counter
->received_shm
== counter
->expected_shm
)
342 int lttng_counter_read(const struct lib_counter_config
*config
,
343 struct lib_counter
*counter
,
344 const size_t *dimension_indexes
,
345 int cpu
, int64_t *value
, bool *overflow
,
349 struct lib_counter_layout
*layout
;
351 if (caa_unlikely(lttng_counter_validate_indexes(config
, counter
, dimension_indexes
)))
353 index
= lttng_counter_get_index(config
, counter
, dimension_indexes
);
355 switch (config
->alloc
) {
356 case COUNTER_ALLOC_PER_CPU
:
357 if (cpu
< 0 || cpu
>= get_possible_cpus_array_len())
359 layout
= &counter
->percpu_counters
[cpu
];
361 case COUNTER_ALLOC_PER_CPU
| COUNTER_ALLOC_GLOBAL
:
363 if (cpu
>= get_possible_cpus_array_len())
365 layout
= &counter
->percpu_counters
[cpu
];
367 layout
= &counter
->global_counters
;
370 case COUNTER_ALLOC_GLOBAL
:
373 layout
= &counter
->global_counters
;
378 if (caa_unlikely(!layout
->counters
))
381 switch (config
->counter_size
) {
382 case COUNTER_SIZE_8_BIT
:
384 int8_t *int_p
= (int8_t *) layout
->counters
+ index
;
385 *value
= (int64_t) CMM_LOAD_SHARED(*int_p
);
388 case COUNTER_SIZE_16_BIT
:
390 int16_t *int_p
= (int16_t *) layout
->counters
+ index
;
391 *value
= (int64_t) CMM_LOAD_SHARED(*int_p
);
394 case COUNTER_SIZE_32_BIT
:
396 int32_t *int_p
= (int32_t *) layout
->counters
+ index
;
397 *value
= (int64_t) CMM_LOAD_SHARED(*int_p
);
400 #if CAA_BITS_PER_LONG == 64
401 case COUNTER_SIZE_64_BIT
:
403 int64_t *int_p
= (int64_t *) layout
->counters
+ index
;
404 *value
= CMM_LOAD_SHARED(*int_p
);
411 *overflow
= lttng_bitmap_test_bit(index
, layout
->overflow_bitmap
);
412 *underflow
= lttng_bitmap_test_bit(index
, layout
->underflow_bitmap
);
416 int lttng_counter_aggregate(const struct lib_counter_config
*config
,
417 struct lib_counter
*counter
,
418 const size_t *dimension_indexes
,
419 int64_t *value
, bool *overflow
,
429 switch (config
->alloc
) {
430 case COUNTER_ALLOC_GLOBAL
: /* Fallthrough */
431 case COUNTER_ALLOC_PER_CPU
| COUNTER_ALLOC_GLOBAL
:
432 /* Read global counter. */
433 ret
= lttng_counter_read(config
, counter
, dimension_indexes
,
441 case COUNTER_ALLOC_PER_CPU
:
447 switch (config
->alloc
) {
448 case COUNTER_ALLOC_GLOBAL
:
450 case COUNTER_ALLOC_PER_CPU
| COUNTER_ALLOC_GLOBAL
: /* Fallthrough */
451 case COUNTER_ALLOC_PER_CPU
:
452 for_each_possible_cpu(cpu
) {
455 ret
= lttng_counter_read(config
, counter
, dimension_indexes
,
461 /* Overflow is defined on unsigned types. */
462 sum
= (int64_t) ((uint64_t) old
+ (uint64_t) v
);
463 if (v
> 0 && sum
< old
)
465 else if (v
< 0 && sum
> old
)
472 switch (config
->counter_size
) {
473 case COUNTER_SIZE_8_BIT
:
478 sum
= (int8_t) sum
; /* Truncate sum. */
480 case COUNTER_SIZE_16_BIT
:
485 sum
= (int16_t) sum
; /* Truncate sum. */
487 case COUNTER_SIZE_32_BIT
:
492 sum
= (int32_t) sum
; /* Truncate sum. */
494 #if CAA_BITS_PER_LONG == 64
495 case COUNTER_SIZE_64_BIT
:
506 int lttng_counter_clear_cpu(const struct lib_counter_config
*config
,
507 struct lib_counter
*counter
,
508 const size_t *dimension_indexes
,
512 struct lib_counter_layout
*layout
;
514 if (caa_unlikely(lttng_counter_validate_indexes(config
, counter
, dimension_indexes
)))
516 index
= lttng_counter_get_index(config
, counter
, dimension_indexes
);
518 switch (config
->alloc
) {
519 case COUNTER_ALLOC_PER_CPU
:
520 if (cpu
< 0 || cpu
>= get_possible_cpus_array_len())
522 layout
= &counter
->percpu_counters
[cpu
];
524 case COUNTER_ALLOC_PER_CPU
| COUNTER_ALLOC_GLOBAL
:
526 if (cpu
>= get_possible_cpus_array_len())
528 layout
= &counter
->percpu_counters
[cpu
];
530 layout
= &counter
->global_counters
;
533 case COUNTER_ALLOC_GLOBAL
:
536 layout
= &counter
->global_counters
;
541 if (caa_unlikely(!layout
->counters
))
544 switch (config
->counter_size
) {
545 case COUNTER_SIZE_8_BIT
:
547 int8_t *int_p
= (int8_t *) layout
->counters
+ index
;
548 CMM_STORE_SHARED(*int_p
, 0);
551 case COUNTER_SIZE_16_BIT
:
553 int16_t *int_p
= (int16_t *) layout
->counters
+ index
;
554 CMM_STORE_SHARED(*int_p
, 0);
557 case COUNTER_SIZE_32_BIT
:
559 int32_t *int_p
= (int32_t *) layout
->counters
+ index
;
560 CMM_STORE_SHARED(*int_p
, 0);
563 #if CAA_BITS_PER_LONG == 64
564 case COUNTER_SIZE_64_BIT
:
566 int64_t *int_p
= (int64_t *) layout
->counters
+ index
;
567 CMM_STORE_SHARED(*int_p
, 0);
574 lttng_bitmap_clear_bit(index
, layout
->overflow_bitmap
);
575 lttng_bitmap_clear_bit(index
, layout
->underflow_bitmap
);
579 int lttng_counter_clear(const struct lib_counter_config
*config
,
580 struct lib_counter
*counter
,
581 const size_t *dimension_indexes
)
585 switch (config
->alloc
) {
586 case COUNTER_ALLOC_PER_CPU
:
588 case COUNTER_ALLOC_GLOBAL
: /* Fallthrough */
589 case COUNTER_ALLOC_PER_CPU
| COUNTER_ALLOC_GLOBAL
:
590 /* Clear global counter. */
591 ret
= lttng_counter_clear_cpu(config
, counter
, dimension_indexes
, -1);
599 switch (config
->alloc
) {
600 case COUNTER_ALLOC_PER_CPU
: /* Fallthrough */
601 case COUNTER_ALLOC_PER_CPU
| COUNTER_ALLOC_GLOBAL
:
602 for_each_possible_cpu(cpu
) {
603 ret
= lttng_counter_clear_cpu(config
, counter
, dimension_indexes
, cpu
);
608 case COUNTER_ALLOC_GLOBAL
: