2 * counter/counter-api.h
4 * LTTng Counters API, requiring counter/config.h
6 * Copyright (C) 2020 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this software and associated documentation files (the "Software"), to deal
10 * in the Software without restriction, including without limitation the rights
11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 * copies of the Software, and to permit persons to whom the Software is
13 * furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
21 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
27 #ifndef _LTTNG_COUNTER_API_H
28 #define _LTTNG_COUNTER_API_H
33 #include "counter-internal.h"
34 #include <urcu/compiler.h>
35 #include <urcu/uatomic.h>
36 #include <lttng/bitmap.h>
37 #include "../libringbuffer/getcpu.h"
40 * Using unsigned arithmetic because overflow is defined.
42 static inline int __lttng_counter_add(const struct lib_counter_config
*config
,
43 enum lib_counter_config_alloc alloc
,
44 enum lib_counter_config_sync sync
,
45 struct lib_counter
*counter
,
46 const size_t *dimension_indexes
, int64_t v
,
50 bool overflow
= false, underflow
= false;
51 struct lib_counter_layout
*layout
;
54 if (caa_unlikely(lttng_counter_validate_indexes(config
, counter
, dimension_indexes
)))
56 index
= lttng_counter_get_index(config
, counter
, dimension_indexes
);
59 case COUNTER_ALLOC_PER_CPU
:
60 layout
= &counter
->percpu_counters
[lttng_ust_get_cpu()];
62 case COUNTER_ALLOC_GLOBAL
:
63 layout
= &counter
->global_counters
;
68 if (caa_unlikely(!layout
->counters
))
71 switch (config
->counter_size
) {
72 case COUNTER_SIZE_8_BIT
:
74 int8_t *int_p
= (int8_t *) layout
->counters
+ index
;
76 int8_t global_sum_step
= counter
->global_sum_step
.s8
;
80 case COUNTER_SYNC_PER_CPU
:
85 n
= (int8_t) ((uint8_t) old
+ (uint8_t) v
);
86 if (caa_unlikely(n
> (int8_t) global_sum_step
))
87 move_sum
= (int8_t) global_sum_step
/ 2;
88 else if (caa_unlikely(n
< -(int8_t) global_sum_step
))
89 move_sum
= -((int8_t) global_sum_step
/ 2);
91 res
= uatomic_cmpxchg(int_p
, old
, n
);
95 case COUNTER_SYNC_GLOBAL
:
99 n
= (int8_t) ((uint8_t) old
+ (uint8_t) v
);
100 res
= uatomic_cmpxchg(int_p
, old
, n
);
101 } while (old
!= res
);
105 if (v
> 0 && (v
>= UINT8_MAX
|| n
< old
))
107 else if (v
< 0 && (v
<= -UINT8_MAX
|| n
> old
))
111 case COUNTER_SIZE_16_BIT
:
113 int16_t *int_p
= (int16_t *) layout
->counters
+ index
;
115 int16_t global_sum_step
= counter
->global_sum_step
.s16
;
119 case COUNTER_SYNC_PER_CPU
:
124 n
= (int16_t) ((uint16_t) old
+ (uint16_t) v
);
125 if (caa_unlikely(n
> (int16_t) global_sum_step
))
126 move_sum
= (int16_t) global_sum_step
/ 2;
127 else if (caa_unlikely(n
< -(int16_t) global_sum_step
))
128 move_sum
= -((int16_t) global_sum_step
/ 2);
130 res
= uatomic_cmpxchg(int_p
, old
, n
);
131 } while (old
!= res
);
134 case COUNTER_SYNC_GLOBAL
:
138 n
= (int16_t) ((uint16_t) old
+ (uint16_t) v
);
139 res
= uatomic_cmpxchg(int_p
, old
, n
);
140 } while (old
!= res
);
144 if (v
> 0 && (v
>= UINT16_MAX
|| n
< old
))
146 else if (v
< 0 && (v
<= -UINT16_MAX
|| n
> old
))
150 case COUNTER_SIZE_32_BIT
:
152 int32_t *int_p
= (int32_t *) layout
->counters
+ index
;
154 int32_t global_sum_step
= counter
->global_sum_step
.s32
;
158 case COUNTER_SYNC_PER_CPU
:
163 n
= (int32_t) ((uint32_t) old
+ (uint32_t) v
);
164 if (caa_unlikely(n
> (int32_t) global_sum_step
))
165 move_sum
= (int32_t) global_sum_step
/ 2;
166 else if (caa_unlikely(n
< -(int32_t) global_sum_step
))
167 move_sum
= -((int32_t) global_sum_step
/ 2);
169 res
= uatomic_cmpxchg(int_p
, old
, n
);
170 } while (old
!= res
);
173 case COUNTER_SYNC_GLOBAL
:
177 n
= (int32_t) ((uint32_t) old
+ (uint32_t) v
);
178 res
= uatomic_cmpxchg(int_p
, old
, n
);
179 } while (old
!= res
);
183 if (v
> 0 && (v
>= UINT32_MAX
|| n
< old
))
185 else if (v
< 0 && (v
<= -UINT32_MAX
|| n
> old
))
189 #if CAA_BITS_PER_LONG == 64
190 case COUNTER_SIZE_64_BIT
:
192 int64_t *int_p
= (int64_t *) layout
->counters
+ index
;
194 int64_t global_sum_step
= counter
->global_sum_step
.s64
;
198 case COUNTER_SYNC_PER_CPU
:
203 n
= (int64_t) ((uint64_t) old
+ (uint64_t) v
);
204 if (caa_unlikely(n
> (int64_t) global_sum_step
))
205 move_sum
= (int64_t) global_sum_step
/ 2;
206 else if (caa_unlikely(n
< -(int64_t) global_sum_step
))
207 move_sum
= -((int64_t) global_sum_step
/ 2);
209 res
= uatomic_cmpxchg(int_p
, old
, n
);
210 } while (old
!= res
);
213 case COUNTER_SYNC_GLOBAL
:
217 n
= (int64_t) ((uint64_t) old
+ (uint64_t) v
);
218 res
= uatomic_cmpxchg(int_p
, old
, n
);
219 } while (old
!= res
);
223 if (v
> 0 && n
< old
)
225 else if (v
< 0 && n
> old
)
233 if (caa_unlikely(overflow
&& !lttng_bitmap_test_bit(index
, layout
->overflow_bitmap
)))
234 lttng_bitmap_set_bit(index
, layout
->overflow_bitmap
);
235 else if (caa_unlikely(underflow
&& !lttng_bitmap_test_bit(index
, layout
->underflow_bitmap
)))
236 lttng_bitmap_set_bit(index
, layout
->underflow_bitmap
);
238 *remainder
= move_sum
;
242 static inline int __lttng_counter_add_percpu(const struct lib_counter_config
*config
,
243 struct lib_counter
*counter
,
244 const size_t *dimension_indexes
, int64_t v
)
249 ret
= __lttng_counter_add(config
, COUNTER_ALLOC_PER_CPU
, config
->sync
,
250 counter
, dimension_indexes
, v
, &move_sum
);
251 if (caa_unlikely(ret
))
253 if (caa_unlikely(move_sum
))
254 return __lttng_counter_add(config
, COUNTER_ALLOC_GLOBAL
, COUNTER_SYNC_GLOBAL
,
255 counter
, dimension_indexes
, move_sum
, NULL
);
259 static inline int __lttng_counter_add_global(const struct lib_counter_config
*config
,
260 struct lib_counter
*counter
,
261 const size_t *dimension_indexes
, int64_t v
)
263 return __lttng_counter_add(config
, COUNTER_ALLOC_GLOBAL
, config
->sync
, counter
,
264 dimension_indexes
, v
, NULL
);
267 static inline int lttng_counter_add(const struct lib_counter_config
*config
,
268 struct lib_counter
*counter
,
269 const size_t *dimension_indexes
, int64_t v
)
271 switch (config
->alloc
) {
272 case COUNTER_ALLOC_PER_CPU
: /* Fallthrough */
273 case COUNTER_ALLOC_PER_CPU
| COUNTER_ALLOC_GLOBAL
:
274 return __lttng_counter_add_percpu(config
, counter
, dimension_indexes
, v
);
275 case COUNTER_ALLOC_GLOBAL
:
276 return __lttng_counter_add_global(config
, counter
, dimension_indexes
, v
);
282 static inline int lttng_counter_inc(const struct lib_counter_config
*config
,
283 struct lib_counter
*counter
,
284 const size_t *dimension_indexes
)
286 return lttng_counter_add(config
, counter
, dimension_indexes
, 1);
289 static inline int lttng_counter_dec(const struct lib_counter_config
*config
,
290 struct lib_counter
*counter
,
291 const size_t *dimension_indexes
)
293 return lttng_counter_add(config
, counter
, dimension_indexes
, -1);
296 #endif /* _LTTNG_COUNTER_API_H */