250701fccbcda26ef81e57ab2569f32cdf46a256
[lttng-ust.git] / src / common / counter / counter.c
1 /* SPDX-License-Identifier: (GPL-2.0-only OR LGPL-2.1-only)
2 *
3 * counter.c
4 *
5 * Copyright (C) 2020 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
6 */
7
8 #include <errno.h>
9 #include "counter.h"
10 #include "counter-internal.h"
11 #include <urcu/system.h>
12 #include <urcu/compiler.h>
13 #include <stdbool.h>
14
15 #include "common/macros.h"
16 #include "common/align.h"
17 #include "common/bitmap.h"
18
19 #include "common/smp.h"
20 #include "common/populate.h"
21 #include "shm.h"
22
23 static size_t lttng_counter_get_dimension_nr_elements(struct lib_counter_dimension *dimension)
24 {
25 return dimension->max_nr_elem;
26 }
27
28 static int lttng_counter_init_stride(
29 const struct lib_counter_config *config __attribute__((unused)),
30 struct lib_counter *counter)
31 {
32 size_t nr_dimensions = counter->nr_dimensions;
33 size_t stride = 1;
34 ssize_t i;
35
36 for (i = nr_dimensions - 1; i >= 0; i--) {
37 struct lib_counter_dimension *dimension = &counter->dimensions[i];
38 size_t nr_elem;
39
40 nr_elem = lttng_counter_get_dimension_nr_elements(dimension);
41 dimension->stride = stride;
42 /* nr_elem should be minimum 1 for each dimension. */
43 if (!nr_elem)
44 return -EINVAL;
45 stride *= nr_elem;
46 if (stride > SIZE_MAX / nr_elem)
47 return -EINVAL;
48 }
49 return 0;
50 }
51
52 static int lttng_counter_layout_init(struct lib_counter *counter, int cpu, int shm_fd)
53 {
54 struct lib_counter_layout *layout;
55 size_t counter_size;
56 size_t nr_elem = counter->allocated_elem;
57 size_t shm_length = 0, counters_offset, overflow_offset, underflow_offset;
58 struct lttng_counter_shm_object *shm_object;
59
60 if (shm_fd < 0)
61 return 0; /* Skip, will be populated later. */
62
63 if (cpu == -1)
64 layout = &counter->global_counters;
65 else
66 layout = &counter->percpu_counters[cpu];
67 switch (counter->config.counter_size) {
68 case COUNTER_SIZE_8_BIT:
69 case COUNTER_SIZE_16_BIT:
70 case COUNTER_SIZE_32_BIT:
71 case COUNTER_SIZE_64_BIT:
72 counter_size = (size_t) counter->config.counter_size;
73 break;
74 default:
75 return -EINVAL;
76 }
77 layout->shm_fd = shm_fd;
78 counters_offset = shm_length;
79 shm_length += counter_size * nr_elem;
80 overflow_offset = shm_length;
81 shm_length += LTTNG_UST_ALIGN(nr_elem, 8) / 8;
82 underflow_offset = shm_length;
83 shm_length += LTTNG_UST_ALIGN(nr_elem, 8) / 8;
84 layout->shm_len = shm_length;
85 if (counter->is_daemon) {
86 /* Allocate and clear shared memory. */
87 shm_object = lttng_counter_shm_object_table_alloc(counter->object_table,
88 shm_length, LTTNG_COUNTER_SHM_OBJECT_SHM, shm_fd, cpu,
89 lttng_ust_map_populate_cpu_is_enabled(cpu));
90 if (!shm_object)
91 return -ENOMEM;
92 } else {
93 /* Map pre-existing shared memory. */
94 shm_object = lttng_counter_shm_object_table_append_shm(counter->object_table,
95 shm_fd, shm_length, lttng_ust_map_populate_cpu_is_enabled(cpu));
96 if (!shm_object)
97 return -ENOMEM;
98 }
99 layout->counters = shm_object->memory_map + counters_offset;
100 layout->overflow_bitmap = (unsigned long *)(shm_object->memory_map + overflow_offset);
101 layout->underflow_bitmap = (unsigned long *)(shm_object->memory_map + underflow_offset);
102 return 0;
103 }
104
105 int lttng_counter_set_global_shm(struct lib_counter *counter, int fd)
106 {
107 struct lib_counter_config *config = &counter->config;
108 struct lib_counter_layout *layout;
109 int ret;
110
111 if (!(config->alloc & COUNTER_ALLOC_GLOBAL))
112 return -EINVAL;
113 layout = &counter->global_counters;
114 if (layout->shm_fd >= 0)
115 return -EBUSY;
116 ret = lttng_counter_layout_init(counter, -1, fd);
117 if (!ret)
118 counter->received_shm++;
119 return ret;
120 }
121
122 int lttng_counter_set_cpu_shm(struct lib_counter *counter, int cpu, int fd)
123 {
124 struct lib_counter_config *config = &counter->config;
125 struct lib_counter_layout *layout;
126 int ret;
127
128 if (cpu < 0 || cpu >= get_possible_cpus_array_len())
129 return -EINVAL;
130
131 if (!(config->alloc & COUNTER_ALLOC_PER_CPU))
132 return -EINVAL;
133 layout = &counter->percpu_counters[cpu];
134 if (layout->shm_fd >= 0)
135 return -EBUSY;
136 ret = lttng_counter_layout_init(counter, cpu, fd);
137 if (!ret)
138 counter->received_shm++;
139 return ret;
140 }
141
142 static
143 int lttng_counter_set_global_sum_step(struct lib_counter *counter,
144 int64_t global_sum_step)
145 {
146 if (global_sum_step < 0)
147 return -EINVAL;
148
149 switch (counter->config.counter_size) {
150 case COUNTER_SIZE_8_BIT:
151 if (global_sum_step > INT8_MAX)
152 return -EINVAL;
153 counter->global_sum_step.s8 = (int8_t) global_sum_step;
154 break;
155 case COUNTER_SIZE_16_BIT:
156 if (global_sum_step > INT16_MAX)
157 return -EINVAL;
158 counter->global_sum_step.s16 = (int16_t) global_sum_step;
159 break;
160 case COUNTER_SIZE_32_BIT:
161 if (global_sum_step > INT32_MAX)
162 return -EINVAL;
163 counter->global_sum_step.s32 = (int32_t) global_sum_step;
164 break;
165 case COUNTER_SIZE_64_BIT:
166 counter->global_sum_step.s64 = global_sum_step;
167 break;
168 default:
169 return -EINVAL;
170 }
171
172 return 0;
173 }
174
175 static
176 int validate_args(const struct lib_counter_config *config,
177 size_t nr_dimensions __attribute__((unused)),
178 const size_t *max_nr_elem,
179 int64_t global_sum_step,
180 int global_counter_fd,
181 int nr_counter_cpu_fds,
182 const int *counter_cpu_fds)
183 {
184 int nr_cpus = get_possible_cpus_array_len();
185
186 if (CAA_BITS_PER_LONG != 64 && config->counter_size == COUNTER_SIZE_64_BIT) {
187 WARN_ON_ONCE(1);
188 return -1;
189 }
190 if (!max_nr_elem)
191 return -1;
192 /*
193 * global sum step is only useful with allocating both per-cpu
194 * and global counters.
195 */
196 if (global_sum_step && (!(config->alloc & COUNTER_ALLOC_GLOBAL) ||
197 !(config->alloc & COUNTER_ALLOC_PER_CPU)))
198 return -1;
199 if (!(config->alloc & COUNTER_ALLOC_GLOBAL) && global_counter_fd >= 0)
200 return -1;
201 if (!(config->alloc & COUNTER_ALLOC_PER_CPU) && counter_cpu_fds)
202 return -1;
203 if (!(config->alloc & COUNTER_ALLOC_PER_CPU) && nr_counter_cpu_fds >= 0)
204 return -1;
205 if (counter_cpu_fds && nr_cpus != nr_counter_cpu_fds)
206 return -1;
207 return 0;
208 }
209
210 struct lib_counter *lttng_counter_create(const struct lib_counter_config *config,
211 size_t nr_dimensions,
212 const size_t *max_nr_elem,
213 int64_t global_sum_step,
214 int global_counter_fd,
215 int nr_counter_cpu_fds,
216 const int *counter_cpu_fds,
217 bool is_daemon)
218 {
219 struct lib_counter *counter;
220 size_t dimension, nr_elem = 1;
221 int cpu, ret;
222 int nr_handles = 0;
223 int nr_cpus = get_possible_cpus_array_len();
224 bool populate = lttng_ust_map_populate_is_enabled();
225
226 if (validate_args(config, nr_dimensions, max_nr_elem,
227 global_sum_step, global_counter_fd, nr_counter_cpu_fds,
228 counter_cpu_fds))
229 return NULL;
230 counter = zmalloc_populate(sizeof(struct lib_counter), populate);
231 if (!counter)
232 return NULL;
233 counter->global_counters.shm_fd = -1;
234 counter->config = *config;
235 counter->is_daemon = is_daemon;
236 if (lttng_counter_set_global_sum_step(counter, global_sum_step))
237 goto error_sum_step;
238 counter->nr_dimensions = nr_dimensions;
239 counter->dimensions = zmalloc_populate(nr_dimensions * sizeof(*counter->dimensions), populate);
240 if (!counter->dimensions)
241 goto error_dimensions;
242 for (dimension = 0; dimension < nr_dimensions; dimension++)
243 counter->dimensions[dimension].max_nr_elem = max_nr_elem[dimension];
244 if (config->alloc & COUNTER_ALLOC_PER_CPU) {
245 counter->percpu_counters = zmalloc_populate(sizeof(struct lib_counter_layout) * nr_cpus, populate);
246 if (!counter->percpu_counters)
247 goto error_alloc_percpu;
248 for_each_possible_cpu(cpu)
249 counter->percpu_counters[cpu].shm_fd = -1;
250 }
251
252 if (lttng_counter_init_stride(config, counter))
253 goto error_init_stride;
254 //TODO saturation values.
255 for (dimension = 0; dimension < counter->nr_dimensions; dimension++)
256 nr_elem *= lttng_counter_get_dimension_nr_elements(&counter->dimensions[dimension]);
257 counter->allocated_elem = nr_elem;
258
259 if (config->alloc & COUNTER_ALLOC_GLOBAL)
260 nr_handles++;
261 if (config->alloc & COUNTER_ALLOC_PER_CPU)
262 nr_handles += nr_cpus;
263 counter->expected_shm = nr_handles;
264 /* Allocate table for global and per-cpu counters. */
265 counter->object_table = lttng_counter_shm_object_table_create(nr_handles, populate);
266 if (!counter->object_table)
267 goto error_alloc_object_table;
268
269 if (config->alloc & COUNTER_ALLOC_GLOBAL) {
270 ret = lttng_counter_layout_init(counter, -1, global_counter_fd); /* global */
271 if (ret)
272 goto layout_init_error;
273 }
274 if ((config->alloc & COUNTER_ALLOC_PER_CPU) && counter_cpu_fds) {
275 for_each_possible_cpu(cpu) {
276 ret = lttng_counter_layout_init(counter, cpu, counter_cpu_fds[cpu]);
277 if (ret)
278 goto layout_init_error;
279 }
280 }
281 return counter;
282
283 layout_init_error:
284 lttng_counter_shm_object_table_destroy(counter->object_table, is_daemon);
285 error_alloc_object_table:
286 error_init_stride:
287 free(counter->percpu_counters);
288 error_alloc_percpu:
289 free(counter->dimensions);
290 error_dimensions:
291 error_sum_step:
292 free(counter);
293 return NULL;
294 }
295
296 void lttng_counter_destroy(struct lib_counter *counter)
297 {
298 struct lib_counter_config *config = &counter->config;
299
300 if (config->alloc & COUNTER_ALLOC_PER_CPU)
301 free(counter->percpu_counters);
302 lttng_counter_shm_object_table_destroy(counter->object_table, counter->is_daemon);
303 free(counter->dimensions);
304 free(counter);
305 }
306
307 int lttng_counter_get_global_shm(struct lib_counter *counter, int *fd, size_t *len)
308 {
309 int shm_fd;
310
311 shm_fd = counter->global_counters.shm_fd;
312 if (shm_fd < 0)
313 return -1;
314 *fd = shm_fd;
315 *len = counter->global_counters.shm_len;
316 return 0;
317 }
318
319 int lttng_counter_get_cpu_shm(struct lib_counter *counter, int cpu, int *fd, size_t *len)
320 {
321 struct lib_counter_layout *layout;
322 int shm_fd;
323
324 if (cpu >= get_possible_cpus_array_len())
325 return -1;
326 layout = &counter->percpu_counters[cpu];
327 shm_fd = layout->shm_fd;
328 if (shm_fd < 0)
329 return -1;
330 *fd = shm_fd;
331 *len = layout->shm_len;
332 return 0;
333 }
334
335 bool lttng_counter_ready(struct lib_counter *counter)
336 {
337 if (counter->received_shm == counter->expected_shm)
338 return true;
339 return false;
340 }
341
342 int lttng_counter_read(const struct lib_counter_config *config,
343 struct lib_counter *counter,
344 const size_t *dimension_indexes,
345 int cpu, int64_t *value, bool *overflow,
346 bool *underflow)
347 {
348 size_t index;
349 struct lib_counter_layout *layout;
350
351 if (caa_unlikely(lttng_counter_validate_indexes(config, counter, dimension_indexes)))
352 return -EOVERFLOW;
353 index = lttng_counter_get_index(config, counter, dimension_indexes);
354
355 switch (config->alloc) {
356 case COUNTER_ALLOC_PER_CPU:
357 if (cpu < 0 || cpu >= get_possible_cpus_array_len())
358 return -EINVAL;
359 layout = &counter->percpu_counters[cpu];
360 break;
361 case COUNTER_ALLOC_PER_CPU | COUNTER_ALLOC_GLOBAL:
362 if (cpu >= 0) {
363 if (cpu >= get_possible_cpus_array_len())
364 return -EINVAL;
365 layout = &counter->percpu_counters[cpu];
366 } else {
367 layout = &counter->global_counters;
368 }
369 break;
370 case COUNTER_ALLOC_GLOBAL:
371 if (cpu >= 0)
372 return -EINVAL;
373 layout = &counter->global_counters;
374 break;
375 default:
376 return -EINVAL;
377 }
378 if (caa_unlikely(!layout->counters))
379 return -ENODEV;
380
381 switch (config->counter_size) {
382 case COUNTER_SIZE_8_BIT:
383 {
384 int8_t *int_p = (int8_t *) layout->counters + index;
385 *value = (int64_t) CMM_LOAD_SHARED(*int_p);
386 break;
387 }
388 case COUNTER_SIZE_16_BIT:
389 {
390 int16_t *int_p = (int16_t *) layout->counters + index;
391 *value = (int64_t) CMM_LOAD_SHARED(*int_p);
392 break;
393 }
394 case COUNTER_SIZE_32_BIT:
395 {
396 int32_t *int_p = (int32_t *) layout->counters + index;
397 *value = (int64_t) CMM_LOAD_SHARED(*int_p);
398 break;
399 }
400 #if CAA_BITS_PER_LONG == 64
401 case COUNTER_SIZE_64_BIT:
402 {
403 int64_t *int_p = (int64_t *) layout->counters + index;
404 *value = CMM_LOAD_SHARED(*int_p);
405 break;
406 }
407 #endif
408 default:
409 return -EINVAL;
410 }
411 *overflow = lttng_bitmap_test_bit(index, layout->overflow_bitmap);
412 *underflow = lttng_bitmap_test_bit(index, layout->underflow_bitmap);
413 return 0;
414 }
415
416 int lttng_counter_aggregate(const struct lib_counter_config *config,
417 struct lib_counter *counter,
418 const size_t *dimension_indexes,
419 int64_t *value, bool *overflow,
420 bool *underflow)
421 {
422 int cpu, ret;
423 int64_t v, sum = 0;
424 bool of, uf;
425
426 *overflow = false;
427 *underflow = false;
428
429 switch (config->alloc) {
430 case COUNTER_ALLOC_GLOBAL: /* Fallthrough */
431 case COUNTER_ALLOC_PER_CPU | COUNTER_ALLOC_GLOBAL:
432 /* Read global counter. */
433 ret = lttng_counter_read(config, counter, dimension_indexes,
434 -1, &v, &of, &uf);
435 if (ret < 0)
436 return ret;
437 sum += v;
438 *overflow |= of;
439 *underflow |= uf;
440 break;
441 case COUNTER_ALLOC_PER_CPU:
442 break;
443 default:
444 return -EINVAL;
445 }
446
447 switch (config->alloc) {
448 case COUNTER_ALLOC_GLOBAL:
449 break;
450 case COUNTER_ALLOC_PER_CPU | COUNTER_ALLOC_GLOBAL: /* Fallthrough */
451 case COUNTER_ALLOC_PER_CPU:
452 for_each_possible_cpu(cpu) {
453 int64_t old = sum;
454
455 ret = lttng_counter_read(config, counter, dimension_indexes,
456 cpu, &v, &of, &uf);
457 if (ret < 0)
458 return ret;
459 *overflow |= of;
460 *underflow |= uf;
461 /* Overflow is defined on unsigned types. */
462 sum = (int64_t) ((uint64_t) old + (uint64_t) v);
463 if (v > 0 && sum < old)
464 *overflow = true;
465 else if (v < 0 && sum > old)
466 *underflow = true;
467 }
468 break;
469 default:
470 return -EINVAL;
471 }
472 switch (config->counter_size) {
473 case COUNTER_SIZE_8_BIT:
474 if (sum > INT8_MAX)
475 *overflow = true;
476 if (sum < INT8_MIN)
477 *underflow = true;
478 sum = (int8_t) sum; /* Truncate sum. */
479 break;
480 case COUNTER_SIZE_16_BIT:
481 if (sum > INT16_MAX)
482 *overflow = true;
483 if (sum < INT16_MIN)
484 *underflow = true;
485 sum = (int16_t) sum; /* Truncate sum. */
486 break;
487 case COUNTER_SIZE_32_BIT:
488 if (sum > INT32_MAX)
489 *overflow = true;
490 if (sum < INT32_MIN)
491 *underflow = true;
492 sum = (int32_t) sum; /* Truncate sum. */
493 break;
494 #if CAA_BITS_PER_LONG == 64
495 case COUNTER_SIZE_64_BIT:
496 break;
497 #endif
498 default:
499 return -EINVAL;
500 }
501 *value = sum;
502 return 0;
503 }
504
505 static
506 int lttng_counter_clear_cpu(const struct lib_counter_config *config,
507 struct lib_counter *counter,
508 const size_t *dimension_indexes,
509 int cpu)
510 {
511 size_t index;
512 struct lib_counter_layout *layout;
513
514 if (caa_unlikely(lttng_counter_validate_indexes(config, counter, dimension_indexes)))
515 return -EOVERFLOW;
516 index = lttng_counter_get_index(config, counter, dimension_indexes);
517
518 switch (config->alloc) {
519 case COUNTER_ALLOC_PER_CPU:
520 if (cpu < 0 || cpu >= get_possible_cpus_array_len())
521 return -EINVAL;
522 layout = &counter->percpu_counters[cpu];
523 break;
524 case COUNTER_ALLOC_PER_CPU | COUNTER_ALLOC_GLOBAL:
525 if (cpu >= 0) {
526 if (cpu >= get_possible_cpus_array_len())
527 return -EINVAL;
528 layout = &counter->percpu_counters[cpu];
529 } else {
530 layout = &counter->global_counters;
531 }
532 break;
533 case COUNTER_ALLOC_GLOBAL:
534 if (cpu >= 0)
535 return -EINVAL;
536 layout = &counter->global_counters;
537 break;
538 default:
539 return -EINVAL;
540 }
541 if (caa_unlikely(!layout->counters))
542 return -ENODEV;
543
544 switch (config->counter_size) {
545 case COUNTER_SIZE_8_BIT:
546 {
547 int8_t *int_p = (int8_t *) layout->counters + index;
548 CMM_STORE_SHARED(*int_p, 0);
549 break;
550 }
551 case COUNTER_SIZE_16_BIT:
552 {
553 int16_t *int_p = (int16_t *) layout->counters + index;
554 CMM_STORE_SHARED(*int_p, 0);
555 break;
556 }
557 case COUNTER_SIZE_32_BIT:
558 {
559 int32_t *int_p = (int32_t *) layout->counters + index;
560 CMM_STORE_SHARED(*int_p, 0);
561 break;
562 }
563 #if CAA_BITS_PER_LONG == 64
564 case COUNTER_SIZE_64_BIT:
565 {
566 int64_t *int_p = (int64_t *) layout->counters + index;
567 CMM_STORE_SHARED(*int_p, 0);
568 break;
569 }
570 #endif
571 default:
572 return -EINVAL;
573 }
574 lttng_bitmap_clear_bit(index, layout->overflow_bitmap);
575 lttng_bitmap_clear_bit(index, layout->underflow_bitmap);
576 return 0;
577 }
578
579 int lttng_counter_clear(const struct lib_counter_config *config,
580 struct lib_counter *counter,
581 const size_t *dimension_indexes)
582 {
583 int cpu, ret;
584
585 switch (config->alloc) {
586 case COUNTER_ALLOC_PER_CPU:
587 break;
588 case COUNTER_ALLOC_GLOBAL: /* Fallthrough */
589 case COUNTER_ALLOC_PER_CPU | COUNTER_ALLOC_GLOBAL:
590 /* Clear global counter. */
591 ret = lttng_counter_clear_cpu(config, counter, dimension_indexes, -1);
592 if (ret < 0)
593 return ret;
594 break;
595 default:
596 return -EINVAL;
597 }
598
599 switch (config->alloc) {
600 case COUNTER_ALLOC_PER_CPU: /* Fallthrough */
601 case COUNTER_ALLOC_PER_CPU | COUNTER_ALLOC_GLOBAL:
602 for_each_possible_cpu(cpu) {
603 ret = lttng_counter_clear_cpu(config, counter, dimension_indexes, cpu);
604 if (ret < 0)
605 return ret;
606 }
607 break;
608 case COUNTER_ALLOC_GLOBAL:
609 break;
610 default:
611 return -EINVAL;
612 }
613 return 0;
614 }
This page took 0.043834 seconds and 5 git commands to generate.