1 /* SPDX-License-Identifier: (GPL-2.0-only OR LGPL-2.1-only)
5 * Copyright (C) 2020 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 #include <linux/module.h>
9 #include <linux/slab.h>
10 #include <lttng/tracer.h>
11 #include <linux/cpumask.h>
12 #include <counter/counter.h>
13 #include <counter/counter-internal.h>
14 #include <wrapper/vmalloc.h>
15 #include <wrapper/limits.h>
17 static size_t lttng_counter_get_dimension_nr_elements(struct lib_counter_dimension
*dimension
)
19 return dimension
->max_nr_elem
;
22 static int lttng_counter_init_stride(const struct lib_counter_config
*config
,
23 struct lib_counter
*counter
)
25 size_t nr_dimensions
= counter
->nr_dimensions
;
29 for (i
= nr_dimensions
- 1; i
>= 0; i
--) {
30 struct lib_counter_dimension
*dimension
= &counter
->dimensions
[i
];
33 nr_elem
= lttng_counter_get_dimension_nr_elements(dimension
);
34 dimension
->stride
= stride
;
35 /* nr_elem should be minimum 1 for each dimension. */
39 if (stride
> SIZE_MAX
/ nr_elem
)
45 static int lttng_counter_layout_init(struct lib_counter
*counter
, int cpu
)
47 struct lib_counter_layout
*layout
;
49 size_t nr_elem
= counter
->allocated_elem
;
52 layout
= &counter
->global_counters
;
54 layout
= per_cpu_ptr(counter
->percpu_counters
, cpu
);
55 switch (counter
->config
.counter_size
) {
56 case COUNTER_SIZE_8_BIT
:
57 case COUNTER_SIZE_16_BIT
:
58 case COUNTER_SIZE_32_BIT
:
59 case COUNTER_SIZE_64_BIT
:
60 counter_size
= (size_t) counter
->config
.counter_size
;
65 layout
->counters
= lttng_kvzalloc_node(ALIGN(counter_size
* nr_elem
,
66 1 << INTERNODE_CACHE_SHIFT
),
67 GFP_KERNEL
| __GFP_NOWARN
,
68 cpu_to_node(max(cpu
, 0)));
69 if (!layout
->counters
)
71 layout
->overflow_bitmap
= lttng_kvzalloc_node(ALIGN(ALIGN(nr_elem
, 8) / 8,
72 1 << INTERNODE_CACHE_SHIFT
),
73 GFP_KERNEL
| __GFP_NOWARN
,
74 cpu_to_node(max(cpu
, 0)));
75 if (!layout
->overflow_bitmap
)
77 layout
->underflow_bitmap
= lttng_kvzalloc_node(ALIGN(ALIGN(nr_elem
, 8) / 8,
78 1 << INTERNODE_CACHE_SHIFT
),
79 GFP_KERNEL
| __GFP_NOWARN
,
80 cpu_to_node(max(cpu
, 0)));
81 if (!layout
->underflow_bitmap
)
86 static void lttng_counter_layout_fini(struct lib_counter
*counter
, int cpu
)
88 struct lib_counter_layout
*layout
;
91 layout
= &counter
->global_counters
;
93 layout
= per_cpu_ptr(counter
->percpu_counters
, cpu
);
95 lttng_kvfree(layout
->counters
);
96 lttng_kvfree(layout
->overflow_bitmap
);
97 lttng_kvfree(layout
->underflow_bitmap
);
101 int lttng_counter_set_global_sum_step(struct lib_counter
*counter
,
102 int64_t global_sum_step
)
104 if (global_sum_step
< 0)
107 switch (counter
->config
.counter_size
) {
108 case COUNTER_SIZE_8_BIT
:
109 if (global_sum_step
> S8_MAX
)
111 counter
->global_sum_step
.s8
= (int8_t) global_sum_step
;
113 case COUNTER_SIZE_16_BIT
:
114 if (global_sum_step
> S16_MAX
)
116 counter
->global_sum_step
.s16
= (int16_t) global_sum_step
;
118 case COUNTER_SIZE_32_BIT
:
119 if (global_sum_step
> S32_MAX
)
121 counter
->global_sum_step
.s32
= (int32_t) global_sum_step
;
123 case COUNTER_SIZE_64_BIT
:
124 counter
->global_sum_step
.s64
= global_sum_step
;
134 int validate_args(const struct lib_counter_config
*config
,
135 size_t nr_dimensions
,
136 const size_t *max_nr_elem
,
137 int64_t global_sum_step
)
139 if (BITS_PER_LONG
!= 64 && config
->counter_size
== COUNTER_SIZE_64_BIT
) {
146 * global sum step is only useful with allocating both per-cpu
147 * and global counters.
149 if (global_sum_step
&& (!(config
->alloc
& COUNTER_ALLOC_GLOBAL
) ||
150 !(config
->alloc
& COUNTER_ALLOC_PER_CPU
)))
155 struct lib_counter
*lttng_counter_create(const struct lib_counter_config
*config
,
156 size_t nr_dimensions
,
157 const size_t *max_nr_elem
,
158 int64_t global_sum_step
)
160 struct lib_counter
*counter
;
161 size_t dimension
, nr_elem
= 1;
164 if (validate_args(config
, nr_dimensions
, max_nr_elem
, global_sum_step
))
166 counter
= kzalloc(sizeof(struct lib_counter
), GFP_KERNEL
);
169 counter
->config
= *config
;
170 if (lttng_counter_set_global_sum_step(counter
, global_sum_step
))
172 counter
->nr_dimensions
= nr_dimensions
;
173 counter
->dimensions
= kzalloc(nr_dimensions
* sizeof(*counter
->dimensions
), GFP_KERNEL
);
174 if (!counter
->dimensions
)
175 goto error_dimensions
;
176 for (dimension
= 0; dimension
< nr_dimensions
; dimension
++)
177 counter
->dimensions
[dimension
].max_nr_elem
= max_nr_elem
[dimension
];
178 if (config
->alloc
& COUNTER_ALLOC_PER_CPU
) {
179 counter
->percpu_counters
= alloc_percpu(struct lib_counter_layout
);
180 if (!counter
->percpu_counters
)
181 goto error_alloc_percpu
;
184 if (lttng_counter_init_stride(config
, counter
))
185 goto error_init_stride
;
186 //TODO saturation values.
187 for (dimension
= 0; dimension
< counter
->nr_dimensions
; dimension
++)
188 nr_elem
*= lttng_counter_get_dimension_nr_elements(&counter
->dimensions
[dimension
]);
189 counter
->allocated_elem
= nr_elem
;
190 if (config
->alloc
& COUNTER_ALLOC_GLOBAL
) {
191 ret
= lttng_counter_layout_init(counter
, -1); /* global */
193 goto layout_init_error
;
195 if (config
->alloc
& COUNTER_ALLOC_PER_CPU
) {
196 //TODO: integrate with CPU hotplug and online cpus
197 for (cpu
= 0; cpu
< num_possible_cpus(); cpu
++) {
198 ret
= lttng_counter_layout_init(counter
, cpu
);
200 goto layout_init_error
;
206 if (config
->alloc
& COUNTER_ALLOC_PER_CPU
) {
207 for (cpu
= 0; cpu
< num_possible_cpus(); cpu
++)
208 lttng_counter_layout_fini(counter
, cpu
);
210 if (config
->alloc
& COUNTER_ALLOC_GLOBAL
)
211 lttng_counter_layout_fini(counter
, -1);
213 free_percpu(counter
->percpu_counters
);
215 kfree(counter
->dimensions
);
221 EXPORT_SYMBOL_GPL(lttng_counter_create
);
223 void lttng_counter_destroy(struct lib_counter
*counter
)
225 struct lib_counter_config
*config
= &counter
->config
;
228 if (config
->alloc
& COUNTER_ALLOC_PER_CPU
) {
229 for (cpu
= 0; cpu
< num_possible_cpus(); cpu
++)
230 lttng_counter_layout_fini(counter
, cpu
);
231 free_percpu(counter
->percpu_counters
);
233 if (config
->alloc
& COUNTER_ALLOC_GLOBAL
)
234 lttng_counter_layout_fini(counter
, -1);
235 kfree(counter
->dimensions
);
238 EXPORT_SYMBOL_GPL(lttng_counter_destroy
);
240 int lttng_counter_read(const struct lib_counter_config
*config
,
241 struct lib_counter
*counter
,
242 const size_t *dimension_indexes
,
243 int cpu
, int64_t *value
, bool *overflow
,
246 struct lib_counter_layout
*layout
;
249 if (unlikely(lttng_counter_validate_indexes(config
, counter
, dimension_indexes
)))
251 index
= lttng_counter_get_index(config
, counter
, dimension_indexes
);
253 switch (config
->alloc
) {
254 case COUNTER_ALLOC_PER_CPU
:
255 if (cpu
< 0 || cpu
>= num_possible_cpus())
257 layout
= per_cpu_ptr(counter
->percpu_counters
, cpu
);
259 case COUNTER_ALLOC_PER_CPU
| COUNTER_ALLOC_GLOBAL
:
261 if (cpu
>= num_possible_cpus())
263 layout
= per_cpu_ptr(counter
->percpu_counters
, cpu
);
265 layout
= &counter
->global_counters
;
268 case COUNTER_ALLOC_GLOBAL
:
271 layout
= &counter
->global_counters
;
277 switch (config
->counter_size
) {
278 case COUNTER_SIZE_8_BIT
:
280 int8_t *int_p
= (int8_t *) layout
->counters
+ index
;
281 *value
= (int64_t) READ_ONCE(*int_p
);
284 case COUNTER_SIZE_16_BIT
:
286 int16_t *int_p
= (int16_t *) layout
->counters
+ index
;
287 *value
= (int64_t) READ_ONCE(*int_p
);
290 case COUNTER_SIZE_32_BIT
:
292 int32_t *int_p
= (int32_t *) layout
->counters
+ index
;
293 *value
= (int64_t) READ_ONCE(*int_p
);
296 #if BITS_PER_LONG == 64
297 case COUNTER_SIZE_64_BIT
:
299 int64_t *int_p
= (int64_t *) layout
->counters
+ index
;
300 *value
= READ_ONCE(*int_p
);
307 *overflow
= test_bit(index
, layout
->overflow_bitmap
);
308 *underflow
= test_bit(index
, layout
->underflow_bitmap
);
311 EXPORT_SYMBOL_GPL(lttng_counter_read
);
313 int lttng_counter_aggregate(const struct lib_counter_config
*config
,
314 struct lib_counter
*counter
,
315 const size_t *dimension_indexes
,
316 int64_t *value
, bool *overflow
,
326 switch (config
->alloc
) {
327 case COUNTER_ALLOC_GLOBAL
: /* Fallthrough */
328 case COUNTER_ALLOC_PER_CPU
| COUNTER_ALLOC_GLOBAL
:
329 /* Read global counter. */
330 ret
= lttng_counter_read(config
, counter
, dimension_indexes
,
338 case COUNTER_ALLOC_PER_CPU
:
342 switch (config
->alloc
) {
343 case COUNTER_ALLOC_GLOBAL
:
345 case COUNTER_ALLOC_PER_CPU
| COUNTER_ALLOC_GLOBAL
: /* Fallthrough */
346 case COUNTER_ALLOC_PER_CPU
:
347 //TODO: integrate with CPU hotplug and online cpus
348 for (cpu
= 0; cpu
< num_possible_cpus(); cpu
++) {
351 ret
= lttng_counter_read(config
, counter
, dimension_indexes
,
357 /* Overflow is defined on unsigned types. */
358 sum
= (int64_t) ((uint64_t) old
+ (uint64_t) v
);
359 if (v
> 0 && sum
< old
)
361 else if (v
< 0 && sum
> old
)
371 EXPORT_SYMBOL_GPL(lttng_counter_aggregate
);
374 int lttng_counter_clear_cpu(const struct lib_counter_config
*config
,
375 struct lib_counter
*counter
,
376 const size_t *dimension_indexes
,
379 struct lib_counter_layout
*layout
;
382 if (unlikely(lttng_counter_validate_indexes(config
, counter
, dimension_indexes
)))
384 index
= lttng_counter_get_index(config
, counter
, dimension_indexes
);
386 switch (config
->alloc
) {
387 case COUNTER_ALLOC_PER_CPU
:
388 if (cpu
< 0 || cpu
>= num_possible_cpus())
390 layout
= per_cpu_ptr(counter
->percpu_counters
, cpu
);
392 case COUNTER_ALLOC_PER_CPU
| COUNTER_ALLOC_GLOBAL
:
394 if (cpu
>= num_possible_cpus())
396 layout
= per_cpu_ptr(counter
->percpu_counters
, cpu
);
398 layout
= &counter
->global_counters
;
401 case COUNTER_ALLOC_GLOBAL
:
404 layout
= &counter
->global_counters
;
409 switch (config
->counter_size
) {
410 case COUNTER_SIZE_8_BIT
:
412 int8_t *int_p
= (int8_t *) layout
->counters
+ index
;
413 WRITE_ONCE(*int_p
, 0);
416 case COUNTER_SIZE_16_BIT
:
418 int16_t *int_p
= (int16_t *) layout
->counters
+ index
;
419 WRITE_ONCE(*int_p
, 0);
422 case COUNTER_SIZE_32_BIT
:
424 int32_t *int_p
= (int32_t *) layout
->counters
+ index
;
425 WRITE_ONCE(*int_p
, 0);
428 #if BITS_PER_LONG == 64
429 case COUNTER_SIZE_64_BIT
:
431 int64_t *int_p
= (int64_t *) layout
->counters
+ index
;
432 WRITE_ONCE(*int_p
, 0);
439 clear_bit(index
, layout
->overflow_bitmap
);
440 clear_bit(index
, layout
->underflow_bitmap
);
444 int lttng_counter_clear(const struct lib_counter_config
*config
,
445 struct lib_counter
*counter
,
446 const size_t *dimension_indexes
)
450 switch (config
->alloc
) {
451 case COUNTER_ALLOC_GLOBAL
: /* Fallthrough */
452 case COUNTER_ALLOC_PER_CPU
| COUNTER_ALLOC_GLOBAL
:
453 /* Clear global counter. */
454 ret
= lttng_counter_clear_cpu(config
, counter
, dimension_indexes
, -1);
458 case COUNTER_ALLOC_PER_CPU
:
462 switch (config
->alloc
) {
463 case COUNTER_ALLOC_GLOBAL
:
465 case COUNTER_ALLOC_PER_CPU
| COUNTER_ALLOC_GLOBAL
: /* Fallthrough */
466 case COUNTER_ALLOC_PER_CPU
:
467 //TODO: integrate with CPU hotplug and online cpus
468 for (cpu
= 0; cpu
< num_possible_cpus(); cpu
++) {
469 ret
= lttng_counter_clear_cpu(config
, counter
, dimension_indexes
, cpu
);
479 EXPORT_SYMBOL_GPL(lttng_counter_clear
);
481 int lttng_counter_get_nr_dimensions(const struct lib_counter_config
*config
,
482 struct lib_counter
*counter
,
483 size_t *nr_dimensions
)
485 *nr_dimensions
= counter
->nr_dimensions
;
488 EXPORT_SYMBOL_GPL(lttng_counter_get_nr_dimensions
);
490 int lttng_counter_get_max_nr_elem(const struct lib_counter_config
*config
,
491 struct lib_counter
*counter
,
492 size_t *max_nr_elem
) /* array of size nr_dimensions */
496 for (dimension
= 0; dimension
< counter
->nr_dimensions
; dimension
++)
497 max_nr_elem
[dimension
] = lttng_counter_get_dimension_nr_elements(&counter
->dimensions
[dimension
]);
500 EXPORT_SYMBOL_GPL(lttng_counter_get_max_nr_elem
);
502 MODULE_LICENSE("GPL and additional rights");
503 MODULE_AUTHOR("Mathieu Desnoyers <mathieu.desnoyers@efficios.com>");
504 MODULE_DESCRIPTION("LTTng counter library");
505 MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION
) "."
506 __stringify(LTTNG_MODULES_MINOR_VERSION
) "."
507 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION
)
508 LTTNG_MODULES_EXTRAVERSION
);