1 /* SPDX-License-Identifier: (GPL-2.0-only OR LGPL-2.1-only)
5 * Copyright (C) 2020 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 #include <linux/module.h>
9 #include <linux/slab.h>
10 #include <lttng/tracer.h>
11 #include <linux/cpumask.h>
12 #include <counter/counter.h>
13 #include <counter/counter-internal.h>
14 #include <wrapper/compiler_attributes.h>
15 #include <wrapper/vmalloc.h>
16 #include <wrapper/limits.h>
18 static size_t lttng_counter_get_dimension_nr_elements(struct lib_counter_dimension
*dimension
)
20 return dimension
->max_nr_elem
;
23 static int lttng_counter_init_stride(const struct lib_counter_config
*config
,
24 struct lib_counter
*counter
)
26 size_t nr_dimensions
= counter
->nr_dimensions
;
30 for (i
= nr_dimensions
- 1; i
>= 0; i
--) {
31 struct lib_counter_dimension
*dimension
= &counter
->dimensions
[i
];
34 nr_elem
= lttng_counter_get_dimension_nr_elements(dimension
);
35 dimension
->stride
= stride
;
36 /* nr_elem should be minimum 1 for each dimension. */
40 if (stride
> SIZE_MAX
/ nr_elem
)
46 static int lttng_counter_layout_init(struct lib_counter
*counter
, int cpu
)
48 struct lib_counter_layout
*layout
;
50 size_t nr_elem
= counter
->allocated_elem
;
53 layout
= &counter
->global_counters
;
55 layout
= per_cpu_ptr(counter
->percpu_counters
, cpu
);
56 switch (counter
->config
.counter_size
) {
57 case COUNTER_SIZE_8_BIT
:
58 case COUNTER_SIZE_16_BIT
:
59 case COUNTER_SIZE_32_BIT
:
60 case COUNTER_SIZE_64_BIT
:
61 counter_size
= (size_t) counter
->config
.counter_size
;
66 layout
->counters
= lttng_kvzalloc_node(ALIGN(counter_size
* nr_elem
,
67 1 << INTERNODE_CACHE_SHIFT
),
68 GFP_KERNEL
| __GFP_NOWARN
,
69 cpu_to_node(max(cpu
, 0)));
70 if (!layout
->counters
)
72 layout
->overflow_bitmap
= lttng_kvzalloc_node(ALIGN(ALIGN(nr_elem
, 8) / 8,
73 1 << INTERNODE_CACHE_SHIFT
),
74 GFP_KERNEL
| __GFP_NOWARN
,
75 cpu_to_node(max(cpu
, 0)));
76 if (!layout
->overflow_bitmap
)
78 layout
->underflow_bitmap
= lttng_kvzalloc_node(ALIGN(ALIGN(nr_elem
, 8) / 8,
79 1 << INTERNODE_CACHE_SHIFT
),
80 GFP_KERNEL
| __GFP_NOWARN
,
81 cpu_to_node(max(cpu
, 0)));
82 if (!layout
->underflow_bitmap
)
87 static void lttng_counter_layout_fini(struct lib_counter
*counter
, int cpu
)
89 struct lib_counter_layout
*layout
;
92 layout
= &counter
->global_counters
;
94 layout
= per_cpu_ptr(counter
->percpu_counters
, cpu
);
96 lttng_kvfree(layout
->counters
);
97 lttng_kvfree(layout
->overflow_bitmap
);
98 lttng_kvfree(layout
->underflow_bitmap
);
102 int lttng_counter_set_global_sum_step(struct lib_counter
*counter
,
103 int64_t global_sum_step
)
105 if (global_sum_step
< 0)
108 switch (counter
->config
.counter_size
) {
109 case COUNTER_SIZE_8_BIT
:
110 if (global_sum_step
> S8_MAX
)
112 counter
->global_sum_step
.s8
= (int8_t) global_sum_step
;
114 case COUNTER_SIZE_16_BIT
:
115 if (global_sum_step
> S16_MAX
)
117 counter
->global_sum_step
.s16
= (int16_t) global_sum_step
;
119 case COUNTER_SIZE_32_BIT
:
120 if (global_sum_step
> S32_MAX
)
122 counter
->global_sum_step
.s32
= (int32_t) global_sum_step
;
124 case COUNTER_SIZE_64_BIT
:
125 counter
->global_sum_step
.s64
= global_sum_step
;
135 int validate_args(const struct lib_counter_config
*config
,
136 size_t nr_dimensions
,
137 const size_t *max_nr_elem
,
138 int64_t global_sum_step
)
140 if (BITS_PER_LONG
!= 64 && config
->counter_size
== COUNTER_SIZE_64_BIT
) {
147 * global sum step is only useful with allocating both per-cpu
148 * and global counters.
150 if (global_sum_step
&& (!(config
->alloc
& COUNTER_ALLOC_GLOBAL
) ||
151 !(config
->alloc
& COUNTER_ALLOC_PER_CPU
)))
156 struct lib_counter
*lttng_counter_create(const struct lib_counter_config
*config
,
157 size_t nr_dimensions
,
158 const size_t *max_nr_elem
,
159 int64_t global_sum_step
)
161 struct lib_counter
*counter
;
162 size_t dimension
, nr_elem
= 1;
165 if (validate_args(config
, nr_dimensions
, max_nr_elem
, global_sum_step
))
167 counter
= kzalloc(sizeof(struct lib_counter
), GFP_KERNEL
);
170 counter
->config
= *config
;
171 if (lttng_counter_set_global_sum_step(counter
, global_sum_step
))
173 counter
->nr_dimensions
= nr_dimensions
;
174 counter
->dimensions
= kzalloc(nr_dimensions
* sizeof(*counter
->dimensions
), GFP_KERNEL
);
175 if (!counter
->dimensions
)
176 goto error_dimensions
;
177 for (dimension
= 0; dimension
< nr_dimensions
; dimension
++)
178 counter
->dimensions
[dimension
].max_nr_elem
= max_nr_elem
[dimension
];
179 if (config
->alloc
& COUNTER_ALLOC_PER_CPU
) {
180 counter
->percpu_counters
= alloc_percpu(struct lib_counter_layout
);
181 if (!counter
->percpu_counters
)
182 goto error_alloc_percpu
;
185 if (lttng_counter_init_stride(config
, counter
))
186 goto error_init_stride
;
187 //TODO saturation values.
188 for (dimension
= 0; dimension
< counter
->nr_dimensions
; dimension
++)
189 nr_elem
*= lttng_counter_get_dimension_nr_elements(&counter
->dimensions
[dimension
]);
190 counter
->allocated_elem
= nr_elem
;
191 if (config
->alloc
& COUNTER_ALLOC_GLOBAL
) {
192 ret
= lttng_counter_layout_init(counter
, -1); /* global */
194 goto layout_init_error
;
196 if (config
->alloc
& COUNTER_ALLOC_PER_CPU
) {
197 //TODO: integrate with CPU hotplug and online cpus
198 for (cpu
= 0; cpu
< num_possible_cpus(); cpu
++) {
199 ret
= lttng_counter_layout_init(counter
, cpu
);
201 goto layout_init_error
;
207 if (config
->alloc
& COUNTER_ALLOC_PER_CPU
) {
208 for (cpu
= 0; cpu
< num_possible_cpus(); cpu
++)
209 lttng_counter_layout_fini(counter
, cpu
);
211 if (config
->alloc
& COUNTER_ALLOC_GLOBAL
)
212 lttng_counter_layout_fini(counter
, -1);
214 free_percpu(counter
->percpu_counters
);
216 kfree(counter
->dimensions
);
222 EXPORT_SYMBOL_GPL(lttng_counter_create
);
224 void lttng_counter_destroy(struct lib_counter
*counter
)
226 struct lib_counter_config
*config
= &counter
->config
;
229 if (config
->alloc
& COUNTER_ALLOC_PER_CPU
) {
230 for (cpu
= 0; cpu
< num_possible_cpus(); cpu
++)
231 lttng_counter_layout_fini(counter
, cpu
);
232 free_percpu(counter
->percpu_counters
);
234 if (config
->alloc
& COUNTER_ALLOC_GLOBAL
)
235 lttng_counter_layout_fini(counter
, -1);
236 kfree(counter
->dimensions
);
239 EXPORT_SYMBOL_GPL(lttng_counter_destroy
);
241 int lttng_counter_read(const struct lib_counter_config
*config
,
242 struct lib_counter
*counter
,
243 const size_t *dimension_indexes
,
244 int cpu
, int64_t *value
, bool *overflow
,
247 struct lib_counter_layout
*layout
;
250 if (unlikely(lttng_counter_validate_indexes(config
, counter
, dimension_indexes
)))
252 index
= lttng_counter_get_index(config
, counter
, dimension_indexes
);
254 switch (config
->alloc
) {
255 case COUNTER_ALLOC_PER_CPU
:
256 if (cpu
< 0 || cpu
>= num_possible_cpus())
258 layout
= per_cpu_ptr(counter
->percpu_counters
, cpu
);
260 case COUNTER_ALLOC_PER_CPU
| COUNTER_ALLOC_GLOBAL
:
262 if (cpu
>= num_possible_cpus())
264 layout
= per_cpu_ptr(counter
->percpu_counters
, cpu
);
266 layout
= &counter
->global_counters
;
269 case COUNTER_ALLOC_GLOBAL
:
272 layout
= &counter
->global_counters
;
278 switch (config
->counter_size
) {
279 case COUNTER_SIZE_8_BIT
:
281 int8_t *int_p
= (int8_t *) layout
->counters
+ index
;
282 *value
= (int64_t) READ_ONCE(*int_p
);
285 case COUNTER_SIZE_16_BIT
:
287 int16_t *int_p
= (int16_t *) layout
->counters
+ index
;
288 *value
= (int64_t) READ_ONCE(*int_p
);
291 case COUNTER_SIZE_32_BIT
:
293 int32_t *int_p
= (int32_t *) layout
->counters
+ index
;
294 *value
= (int64_t) READ_ONCE(*int_p
);
297 #if BITS_PER_LONG == 64
298 case COUNTER_SIZE_64_BIT
:
300 int64_t *int_p
= (int64_t *) layout
->counters
+ index
;
301 *value
= READ_ONCE(*int_p
);
308 *overflow
= test_bit(index
, layout
->overflow_bitmap
);
309 *underflow
= test_bit(index
, layout
->underflow_bitmap
);
312 EXPORT_SYMBOL_GPL(lttng_counter_read
);
314 int lttng_counter_aggregate(const struct lib_counter_config
*config
,
315 struct lib_counter
*counter
,
316 const size_t *dimension_indexes
,
317 int64_t *value
, bool *overflow
,
327 switch (config
->alloc
) {
328 case COUNTER_ALLOC_GLOBAL
:
330 case COUNTER_ALLOC_PER_CPU
| COUNTER_ALLOC_GLOBAL
:
331 /* Read global counter. */
332 ret
= lttng_counter_read(config
, counter
, dimension_indexes
,
340 case COUNTER_ALLOC_PER_CPU
:
344 switch (config
->alloc
) {
345 case COUNTER_ALLOC_GLOBAL
:
347 case COUNTER_ALLOC_PER_CPU
| COUNTER_ALLOC_GLOBAL
:
349 case COUNTER_ALLOC_PER_CPU
:
350 //TODO: integrate with CPU hotplug and online cpus
351 for (cpu
= 0; cpu
< num_possible_cpus(); cpu
++) {
354 ret
= lttng_counter_read(config
, counter
, dimension_indexes
,
360 /* Overflow is defined on unsigned types. */
361 sum
= (int64_t) ((uint64_t) old
+ (uint64_t) v
);
362 if (v
> 0 && sum
< old
)
364 else if (v
< 0 && sum
> old
)
374 EXPORT_SYMBOL_GPL(lttng_counter_aggregate
);
377 int lttng_counter_clear_cpu(const struct lib_counter_config
*config
,
378 struct lib_counter
*counter
,
379 const size_t *dimension_indexes
,
382 struct lib_counter_layout
*layout
;
385 if (unlikely(lttng_counter_validate_indexes(config
, counter
, dimension_indexes
)))
387 index
= lttng_counter_get_index(config
, counter
, dimension_indexes
);
389 switch (config
->alloc
) {
390 case COUNTER_ALLOC_PER_CPU
:
391 if (cpu
< 0 || cpu
>= num_possible_cpus())
393 layout
= per_cpu_ptr(counter
->percpu_counters
, cpu
);
395 case COUNTER_ALLOC_PER_CPU
| COUNTER_ALLOC_GLOBAL
:
397 if (cpu
>= num_possible_cpus())
399 layout
= per_cpu_ptr(counter
->percpu_counters
, cpu
);
401 layout
= &counter
->global_counters
;
404 case COUNTER_ALLOC_GLOBAL
:
407 layout
= &counter
->global_counters
;
412 switch (config
->counter_size
) {
413 case COUNTER_SIZE_8_BIT
:
415 int8_t *int_p
= (int8_t *) layout
->counters
+ index
;
416 WRITE_ONCE(*int_p
, 0);
419 case COUNTER_SIZE_16_BIT
:
421 int16_t *int_p
= (int16_t *) layout
->counters
+ index
;
422 WRITE_ONCE(*int_p
, 0);
425 case COUNTER_SIZE_32_BIT
:
427 int32_t *int_p
= (int32_t *) layout
->counters
+ index
;
428 WRITE_ONCE(*int_p
, 0);
431 #if BITS_PER_LONG == 64
432 case COUNTER_SIZE_64_BIT
:
434 int64_t *int_p
= (int64_t *) layout
->counters
+ index
;
435 WRITE_ONCE(*int_p
, 0);
442 clear_bit(index
, layout
->overflow_bitmap
);
443 clear_bit(index
, layout
->underflow_bitmap
);
447 int lttng_counter_clear(const struct lib_counter_config
*config
,
448 struct lib_counter
*counter
,
449 const size_t *dimension_indexes
)
453 switch (config
->alloc
) {
454 case COUNTER_ALLOC_GLOBAL
:
456 case COUNTER_ALLOC_PER_CPU
| COUNTER_ALLOC_GLOBAL
:
457 /* Clear global counter. */
458 ret
= lttng_counter_clear_cpu(config
, counter
, dimension_indexes
, -1);
462 case COUNTER_ALLOC_PER_CPU
:
466 switch (config
->alloc
) {
467 case COUNTER_ALLOC_GLOBAL
:
469 case COUNTER_ALLOC_PER_CPU
| COUNTER_ALLOC_GLOBAL
:
471 case COUNTER_ALLOC_PER_CPU
:
472 //TODO: integrate with CPU hotplug and online cpus
473 for (cpu
= 0; cpu
< num_possible_cpus(); cpu
++) {
474 ret
= lttng_counter_clear_cpu(config
, counter
, dimension_indexes
, cpu
);
484 EXPORT_SYMBOL_GPL(lttng_counter_clear
);
486 int lttng_counter_get_nr_dimensions(const struct lib_counter_config
*config
,
487 struct lib_counter
*counter
,
488 size_t *nr_dimensions
)
490 *nr_dimensions
= counter
->nr_dimensions
;
493 EXPORT_SYMBOL_GPL(lttng_counter_get_nr_dimensions
);
495 int lttng_counter_get_max_nr_elem(const struct lib_counter_config
*config
,
496 struct lib_counter
*counter
,
497 size_t *max_nr_elem
) /* array of size nr_dimensions */
501 for (dimension
= 0; dimension
< counter
->nr_dimensions
; dimension
++)
502 max_nr_elem
[dimension
] = lttng_counter_get_dimension_nr_elements(&counter
->dimensions
[dimension
]);
505 EXPORT_SYMBOL_GPL(lttng_counter_get_max_nr_elem
);
507 MODULE_LICENSE("GPL and additional rights");
508 MODULE_AUTHOR("Mathieu Desnoyers <mathieu.desnoyers@efficios.com>");
509 MODULE_DESCRIPTION("LTTng counter library");
510 MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION
) "."
511 __stringify(LTTNG_MODULES_MINOR_VERSION
) "."
512 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION
)
513 LTTNG_MODULES_EXTRAVERSION
);