2 * counter/counter-api.h
4 * LTTng Counters API, requiring counter/config.h
6 * Copyright (C) 2020 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this software and associated documentation files (the "Software"), to deal
10 * in the Software without restriction, including without limitation the rights
11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 * copies of the Software, and to permit persons to whom the Software is
13 * furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
21 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
27 #ifndef _LTTNG_COUNTER_API_H
28 #define _LTTNG_COUNTER_API_H
33 #include "counter-internal.h"
34 #include <urcu/compiler.h>
35 #include <urcu/uatomic.h>
36 #include <lttng/bitmap.h>
37 #include "../libringbuffer/getcpu.h"
40 * Using unsigned arithmetic because overflow is defined.
42 static inline int __lttng_counter_add(const struct lib_counter_config
*config
,
43 enum lib_counter_config_alloc alloc
,
44 enum lib_counter_config_sync sync
,
45 struct lib_counter
*counter
,
46 const size_t *dimension_indexes
, int64_t v
,
50 bool overflow
= false, underflow
= false;
51 struct lib_counter_layout
*layout
;
54 if (caa_unlikely(lttng_counter_validate_indexes(config
, counter
, dimension_indexes
)))
56 index
= lttng_counter_get_index(config
, counter
, dimension_indexes
);
59 case COUNTER_ALLOC_PER_CPU
:
60 layout
= &counter
->percpu_counters
[lttng_ust_get_cpu()];
62 case COUNTER_ALLOC_GLOBAL
:
63 layout
= &counter
->global_counters
;
68 if (caa_unlikely(!layout
->counters
))
71 switch (config
->counter_size
) {
72 case COUNTER_SIZE_8_BIT
:
74 int8_t *int_p
= (int8_t *) layout
->counters
+ index
;
76 int8_t global_sum_step
= counter
->global_sum_step
.s8
;
80 case COUNTER_SYNC_PER_CPU
:
85 n
= (int8_t) ((uint8_t) old
+ (uint8_t) v
);
86 if (caa_unlikely(n
> (int8_t) global_sum_step
))
87 move_sum
= (int8_t) global_sum_step
/ 2;
88 else if (caa_unlikely(n
< -(int8_t) global_sum_step
))
89 move_sum
= -((int8_t) global_sum_step
/ 2);
91 res
= uatomic_cmpxchg(int_p
, old
, n
);
95 case COUNTER_SYNC_GLOBAL
:
99 n
= (int8_t) ((uint8_t) old
+ (uint8_t) v
);
100 res
= uatomic_cmpxchg(int_p
, old
, n
);
101 } while (old
!= res
);
107 if (v
> 0 && (v
>= UINT8_MAX
|| n
< old
))
109 else if (v
< 0 && (v
<= -(int64_t) UINT8_MAX
|| n
> old
))
113 case COUNTER_SIZE_16_BIT
:
115 int16_t *int_p
= (int16_t *) layout
->counters
+ index
;
117 int16_t global_sum_step
= counter
->global_sum_step
.s16
;
121 case COUNTER_SYNC_PER_CPU
:
126 n
= (int16_t) ((uint16_t) old
+ (uint16_t) v
);
127 if (caa_unlikely(n
> (int16_t) global_sum_step
))
128 move_sum
= (int16_t) global_sum_step
/ 2;
129 else if (caa_unlikely(n
< -(int16_t) global_sum_step
))
130 move_sum
= -((int16_t) global_sum_step
/ 2);
132 res
= uatomic_cmpxchg(int_p
, old
, n
);
133 } while (old
!= res
);
136 case COUNTER_SYNC_GLOBAL
:
140 n
= (int16_t) ((uint16_t) old
+ (uint16_t) v
);
141 res
= uatomic_cmpxchg(int_p
, old
, n
);
142 } while (old
!= res
);
148 if (v
> 0 && (v
>= UINT16_MAX
|| n
< old
))
150 else if (v
< 0 && (v
<= -(int64_t) UINT16_MAX
|| n
> old
))
154 case COUNTER_SIZE_32_BIT
:
156 int32_t *int_p
= (int32_t *) layout
->counters
+ index
;
158 int32_t global_sum_step
= counter
->global_sum_step
.s32
;
162 case COUNTER_SYNC_PER_CPU
:
167 n
= (int32_t) ((uint32_t) old
+ (uint32_t) v
);
168 if (caa_unlikely(n
> (int32_t) global_sum_step
))
169 move_sum
= (int32_t) global_sum_step
/ 2;
170 else if (caa_unlikely(n
< -(int32_t) global_sum_step
))
171 move_sum
= -((int32_t) global_sum_step
/ 2);
173 res
= uatomic_cmpxchg(int_p
, old
, n
);
174 } while (old
!= res
);
177 case COUNTER_SYNC_GLOBAL
:
181 n
= (int32_t) ((uint32_t) old
+ (uint32_t) v
);
182 res
= uatomic_cmpxchg(int_p
, old
, n
);
183 } while (old
!= res
);
189 if (v
> 0 && (v
>= UINT32_MAX
|| n
< old
))
191 else if (v
< 0 && (v
<= -(int64_t) UINT32_MAX
|| n
> old
))
195 #if CAA_BITS_PER_LONG == 64
196 case COUNTER_SIZE_64_BIT
:
198 int64_t *int_p
= (int64_t *) layout
->counters
+ index
;
200 int64_t global_sum_step
= counter
->global_sum_step
.s64
;
204 case COUNTER_SYNC_PER_CPU
:
209 n
= (int64_t) ((uint64_t) old
+ (uint64_t) v
);
210 if (caa_unlikely(n
> (int64_t) global_sum_step
))
211 move_sum
= (int64_t) global_sum_step
/ 2;
212 else if (caa_unlikely(n
< -(int64_t) global_sum_step
))
213 move_sum
= -((int64_t) global_sum_step
/ 2);
215 res
= uatomic_cmpxchg(int_p
, old
, n
);
216 } while (old
!= res
);
219 case COUNTER_SYNC_GLOBAL
:
223 n
= (int64_t) ((uint64_t) old
+ (uint64_t) v
);
224 res
= uatomic_cmpxchg(int_p
, old
, n
);
225 } while (old
!= res
);
231 if (v
> 0 && n
< old
)
233 else if (v
< 0 && n
> old
)
241 if (caa_unlikely(overflow
&& !lttng_bitmap_test_bit(index
, layout
->overflow_bitmap
)))
242 lttng_bitmap_set_bit(index
, layout
->overflow_bitmap
);
243 else if (caa_unlikely(underflow
&& !lttng_bitmap_test_bit(index
, layout
->underflow_bitmap
)))
244 lttng_bitmap_set_bit(index
, layout
->underflow_bitmap
);
246 *remainder
= move_sum
;
250 static inline int __lttng_counter_add_percpu(const struct lib_counter_config
*config
,
251 struct lib_counter
*counter
,
252 const size_t *dimension_indexes
, int64_t v
)
257 ret
= __lttng_counter_add(config
, COUNTER_ALLOC_PER_CPU
, config
->sync
,
258 counter
, dimension_indexes
, v
, &move_sum
);
259 if (caa_unlikely(ret
))
261 if (caa_unlikely(move_sum
))
262 return __lttng_counter_add(config
, COUNTER_ALLOC_GLOBAL
, COUNTER_SYNC_GLOBAL
,
263 counter
, dimension_indexes
, move_sum
, NULL
);
267 static inline int __lttng_counter_add_global(const struct lib_counter_config
*config
,
268 struct lib_counter
*counter
,
269 const size_t *dimension_indexes
, int64_t v
)
271 return __lttng_counter_add(config
, COUNTER_ALLOC_GLOBAL
, config
->sync
, counter
,
272 dimension_indexes
, v
, NULL
);
275 static inline int lttng_counter_add(const struct lib_counter_config
*config
,
276 struct lib_counter
*counter
,
277 const size_t *dimension_indexes
, int64_t v
)
279 switch (config
->alloc
) {
280 case COUNTER_ALLOC_PER_CPU
: /* Fallthrough */
281 case COUNTER_ALLOC_PER_CPU
| COUNTER_ALLOC_GLOBAL
:
282 return __lttng_counter_add_percpu(config
, counter
, dimension_indexes
, v
);
283 case COUNTER_ALLOC_GLOBAL
:
284 return __lttng_counter_add_global(config
, counter
, dimension_indexes
, v
);
290 static inline int lttng_counter_inc(const struct lib_counter_config
*config
,
291 struct lib_counter
*counter
,
292 const size_t *dimension_indexes
)
294 return lttng_counter_add(config
, counter
, dimension_indexes
, 1);
297 static inline int lttng_counter_dec(const struct lib_counter_config
*config
,
298 struct lib_counter
*counter
,
299 const size_t *dimension_indexes
)
301 return lttng_counter_add(config
, counter
, dimension_indexes
, -1);
304 #endif /* _LTTNG_COUNTER_API_H */