Move liblttng-ust-ctl to 'src/lib/'
[lttng-ust.git] / src / libcounter / counter-api.h
1 /*
2 * SPDX-License-Identifier: MIT
3 *
4 * Copyright (C) 2020 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
5 *
6 * LTTng Counters API, requiring counter/config.h
7 */
8
9 #ifndef _LTTNG_COUNTER_API_H
10 #define _LTTNG_COUNTER_API_H
11
12 #include <stdint.h>
13 #include <limits.h>
14 #include "counter.h"
15 #include "counter-internal.h"
16 #include <urcu/compiler.h>
17 #include <urcu/uatomic.h>
18 #include "common/bitmap.h"
19 #include "../libringbuffer/getcpu.h"
20
21 /*
22 * Using unsigned arithmetic because overflow is defined.
23 */
24 static inline int __lttng_counter_add(const struct lib_counter_config *config,
25 enum lib_counter_config_alloc alloc,
26 enum lib_counter_config_sync sync,
27 struct lib_counter *counter,
28 const size_t *dimension_indexes, int64_t v,
29 int64_t *remainder)
30 {
31 size_t index;
32 bool overflow = false, underflow = false;
33 struct lib_counter_layout *layout;
34 int64_t move_sum = 0;
35
36 if (caa_unlikely(lttng_counter_validate_indexes(config, counter, dimension_indexes)))
37 return -EOVERFLOW;
38 index = lttng_counter_get_index(config, counter, dimension_indexes);
39
40 switch (alloc) {
41 case COUNTER_ALLOC_PER_CPU:
42 layout = &counter->percpu_counters[lttng_ust_get_cpu()];
43 break;
44 case COUNTER_ALLOC_GLOBAL:
45 layout = &counter->global_counters;
46 break;
47 default:
48 return -EINVAL;
49 }
50 if (caa_unlikely(!layout->counters))
51 return -ENODEV;
52
53 switch (config->counter_size) {
54 case COUNTER_SIZE_8_BIT:
55 {
56 int8_t *int_p = (int8_t *) layout->counters + index;
57 int8_t old, n, res;
58 int8_t global_sum_step = counter->global_sum_step.s8;
59
60 res = *int_p;
61 switch (sync) {
62 case COUNTER_SYNC_PER_CPU:
63 {
64 do {
65 move_sum = 0;
66 old = res;
67 n = (int8_t) ((uint8_t) old + (uint8_t) v);
68 if (caa_unlikely(n > (int8_t) global_sum_step))
69 move_sum = (int8_t) global_sum_step / 2;
70 else if (caa_unlikely(n < -(int8_t) global_sum_step))
71 move_sum = -((int8_t) global_sum_step / 2);
72 n -= move_sum;
73 res = uatomic_cmpxchg(int_p, old, n);
74 } while (old != res);
75 break;
76 }
77 case COUNTER_SYNC_GLOBAL:
78 {
79 do {
80 old = res;
81 n = (int8_t) ((uint8_t) old + (uint8_t) v);
82 res = uatomic_cmpxchg(int_p, old, n);
83 } while (old != res);
84 break;
85 }
86 default:
87 return -EINVAL;
88 }
89 if (v > 0 && (v >= UINT8_MAX || n < old))
90 overflow = true;
91 else if (v < 0 && (v <= -(int64_t) UINT8_MAX || n > old))
92 underflow = true;
93 break;
94 }
95 case COUNTER_SIZE_16_BIT:
96 {
97 int16_t *int_p = (int16_t *) layout->counters + index;
98 int16_t old, n, res;
99 int16_t global_sum_step = counter->global_sum_step.s16;
100
101 res = *int_p;
102 switch (sync) {
103 case COUNTER_SYNC_PER_CPU:
104 {
105 do {
106 move_sum = 0;
107 old = res;
108 n = (int16_t) ((uint16_t) old + (uint16_t) v);
109 if (caa_unlikely(n > (int16_t) global_sum_step))
110 move_sum = (int16_t) global_sum_step / 2;
111 else if (caa_unlikely(n < -(int16_t) global_sum_step))
112 move_sum = -((int16_t) global_sum_step / 2);
113 n -= move_sum;
114 res = uatomic_cmpxchg(int_p, old, n);
115 } while (old != res);
116 break;
117 }
118 case COUNTER_SYNC_GLOBAL:
119 {
120 do {
121 old = res;
122 n = (int16_t) ((uint16_t) old + (uint16_t) v);
123 res = uatomic_cmpxchg(int_p, old, n);
124 } while (old != res);
125 break;
126 }
127 default:
128 return -EINVAL;
129 }
130 if (v > 0 && (v >= UINT16_MAX || n < old))
131 overflow = true;
132 else if (v < 0 && (v <= -(int64_t) UINT16_MAX || n > old))
133 underflow = true;
134 break;
135 }
136 case COUNTER_SIZE_32_BIT:
137 {
138 int32_t *int_p = (int32_t *) layout->counters + index;
139 int32_t old, n, res;
140 int32_t global_sum_step = counter->global_sum_step.s32;
141
142 res = *int_p;
143 switch (sync) {
144 case COUNTER_SYNC_PER_CPU:
145 {
146 do {
147 move_sum = 0;
148 old = res;
149 n = (int32_t) ((uint32_t) old + (uint32_t) v);
150 if (caa_unlikely(n > (int32_t) global_sum_step))
151 move_sum = (int32_t) global_sum_step / 2;
152 else if (caa_unlikely(n < -(int32_t) global_sum_step))
153 move_sum = -((int32_t) global_sum_step / 2);
154 n -= move_sum;
155 res = uatomic_cmpxchg(int_p, old, n);
156 } while (old != res);
157 break;
158 }
159 case COUNTER_SYNC_GLOBAL:
160 {
161 do {
162 old = res;
163 n = (int32_t) ((uint32_t) old + (uint32_t) v);
164 res = uatomic_cmpxchg(int_p, old, n);
165 } while (old != res);
166 break;
167 }
168 default:
169 return -EINVAL;
170 }
171 if (v > 0 && (v >= UINT32_MAX || n < old))
172 overflow = true;
173 else if (v < 0 && (v <= -(int64_t) UINT32_MAX || n > old))
174 underflow = true;
175 break;
176 }
177 #if CAA_BITS_PER_LONG == 64
178 case COUNTER_SIZE_64_BIT:
179 {
180 int64_t *int_p = (int64_t *) layout->counters + index;
181 int64_t old, n, res;
182 int64_t global_sum_step = counter->global_sum_step.s64;
183
184 res = *int_p;
185 switch (sync) {
186 case COUNTER_SYNC_PER_CPU:
187 {
188 do {
189 move_sum = 0;
190 old = res;
191 n = (int64_t) ((uint64_t) old + (uint64_t) v);
192 if (caa_unlikely(n > (int64_t) global_sum_step))
193 move_sum = (int64_t) global_sum_step / 2;
194 else if (caa_unlikely(n < -(int64_t) global_sum_step))
195 move_sum = -((int64_t) global_sum_step / 2);
196 n -= move_sum;
197 res = uatomic_cmpxchg(int_p, old, n);
198 } while (old != res);
199 break;
200 }
201 case COUNTER_SYNC_GLOBAL:
202 {
203 do {
204 old = res;
205 n = (int64_t) ((uint64_t) old + (uint64_t) v);
206 res = uatomic_cmpxchg(int_p, old, n);
207 } while (old != res);
208 break;
209 }
210 default:
211 return -EINVAL;
212 }
213 if (v > 0 && n < old)
214 overflow = true;
215 else if (v < 0 && n > old)
216 underflow = true;
217 break;
218 }
219 #endif
220 default:
221 return -EINVAL;
222 }
223 if (caa_unlikely(overflow && !lttng_bitmap_test_bit(index, layout->overflow_bitmap)))
224 lttng_bitmap_set_bit(index, layout->overflow_bitmap);
225 else if (caa_unlikely(underflow && !lttng_bitmap_test_bit(index, layout->underflow_bitmap)))
226 lttng_bitmap_set_bit(index, layout->underflow_bitmap);
227 if (remainder)
228 *remainder = move_sum;
229 return 0;
230 }
231
232 static inline int __lttng_counter_add_percpu(const struct lib_counter_config *config,
233 struct lib_counter *counter,
234 const size_t *dimension_indexes, int64_t v)
235 {
236 int64_t move_sum;
237 int ret;
238
239 ret = __lttng_counter_add(config, COUNTER_ALLOC_PER_CPU, config->sync,
240 counter, dimension_indexes, v, &move_sum);
241 if (caa_unlikely(ret))
242 return ret;
243 if (caa_unlikely(move_sum))
244 return __lttng_counter_add(config, COUNTER_ALLOC_GLOBAL, COUNTER_SYNC_GLOBAL,
245 counter, dimension_indexes, move_sum, NULL);
246 return 0;
247 }
248
249 static inline int __lttng_counter_add_global(const struct lib_counter_config *config,
250 struct lib_counter *counter,
251 const size_t *dimension_indexes, int64_t v)
252 {
253 return __lttng_counter_add(config, COUNTER_ALLOC_GLOBAL, config->sync, counter,
254 dimension_indexes, v, NULL);
255 }
256
257 static inline int lttng_counter_add(const struct lib_counter_config *config,
258 struct lib_counter *counter,
259 const size_t *dimension_indexes, int64_t v)
260 {
261 switch (config->alloc) {
262 case COUNTER_ALLOC_PER_CPU: /* Fallthrough */
263 case COUNTER_ALLOC_PER_CPU | COUNTER_ALLOC_GLOBAL:
264 return __lttng_counter_add_percpu(config, counter, dimension_indexes, v);
265 case COUNTER_ALLOC_GLOBAL:
266 return __lttng_counter_add_global(config, counter, dimension_indexes, v);
267 default:
268 return -EINVAL;
269 }
270 }
271
272 static inline int lttng_counter_inc(const struct lib_counter_config *config,
273 struct lib_counter *counter,
274 const size_t *dimension_indexes)
275 {
276 return lttng_counter_add(config, counter, dimension_indexes, 1);
277 }
278
279 static inline int lttng_counter_dec(const struct lib_counter_config *config,
280 struct lib_counter *counter,
281 const size_t *dimension_indexes)
282 {
283 return lttng_counter_add(config, counter, dimension_indexes, -1);
284 }
285
286 #endif /* _LTTNG_COUNTER_API_H */
This page took 0.054135 seconds and 4 git commands to generate.