Commit | Line | Data |
---|---|---|
d3d3857f MJ |
1 | // SPDX-FileCopyrightText: 1991-1994 by Xerox Corporation. All rights reserved. |
2 | // SPDX-FileCopyrightText: 1996-1999 by Silicon Graphics. All rights reserved. | |
3 | // SPDX-FileCopyrightText: 1999-2004 Hewlett-Packard Development Company, L.P. | |
4 | // SPDX-FileCopyrightText: 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com> | |
5 | // | |
6 | // SPDX-License-Identifier: LicenseRef-Boehm-GC | |
7 | ||
ec4e58a3 MD |
8 | #ifndef _URCU_ARCH_UATOMIC_X86_H |
9 | #define _URCU_ARCH_UATOMIC_X86_H | |
0114ba7f | 10 | |
ae5712d1 OD |
11 | #include <stdlib.h> /* For abort(3). */ |
12 | ||
67ecffc0 | 13 | /* |
ec4e58a3 | 14 | * Code inspired from libuatomic_ops-1.2, inherited in part from the |
0114ba7f MD |
15 | * Boehm-Demers-Weiser conservative garbage collector. |
16 | */ | |
17 | ||
0b1e236d | 18 | #include <urcu/arch.h> |
375db287 | 19 | #include <urcu/config.h> |
ec4e58a3 | 20 | #include <urcu/compiler.h> |
bf9de1b7 | 21 | #include <urcu/system.h> |
0fad128b | 22 | |
f469d839 PB |
23 | #define UATOMIC_HAS_ATOMIC_BYTE |
24 | #define UATOMIC_HAS_ATOMIC_SHORT | |
25 | ||
36bc70a8 MD |
26 | #ifdef __cplusplus |
27 | extern "C" { | |
67ecffc0 | 28 | #endif |
36bc70a8 | 29 | |
0114ba7f | 30 | /* |
0114ba7f MD |
31 | * Derived from AO_compare_and_swap() and AO_test_and_set_full(). |
32 | */ | |
33 | ||
835b9ab3 | 34 | /* |
71323499 | 35 | * The __hp() macro casts the void pointer @x to a pointer to a structure |
835b9ab3 MD |
36 | * containing an array of char of the specified size. This allows passing the |
37 | * @addr arguments of the following inline functions as "m" and "+m" operands | |
71323499 | 38 | * to the assembly. The @size parameter should be a constant to support |
13bf2f57 MD |
39 | * compilers such as clang which do not support VLA. Create typedefs because |
40 | * C++ does not allow types be defined in casts. | |
835b9ab3 MD |
41 | */ |
42 | ||
13bf2f57 MD |
43 | typedef struct { char v[1]; } __hp_1; |
44 | typedef struct { char v[2]; } __hp_2; | |
45 | typedef struct { char v[4]; } __hp_4; | |
46 | typedef struct { char v[8]; } __hp_8; | |
47 | ||
48 | #define __hp(size, x) ((__hp_##size *)(x)) | |
cc1be41b | 49 | |
424d4ed5 | 50 | #define _uatomic_set(addr, v) ((void) CMM_STORE_SHARED(*(addr), (v))) |
0fad128b | 51 | |
cc1be41b MD |
52 | /* cmpxchg */ |
53 | ||
5dba80f9 | 54 | static inline __attribute__((always_inline)) |
bf9de1b7 | 55 | unsigned long __uatomic_cmpxchg(void *addr, unsigned long old, |
0fad128b | 56 | unsigned long _new, int len) |
0114ba7f | 57 | { |
cc1be41b MD |
58 | switch (len) { |
59 | case 1: | |
60 | { | |
61 | unsigned char result = old; | |
0fad128b | 62 | |
cc1be41b MD |
63 | __asm__ __volatile__( |
64 | "lock; cmpxchgb %2, %1" | |
71323499 | 65 | : "+a"(result), "+m"(*__hp(1, addr)) |
cc1be41b | 66 | : "q"((unsigned char)_new) |
0114ba7f | 67 | : "memory"); |
cc1be41b MD |
68 | return result; |
69 | } | |
70 | case 2: | |
71 | { | |
72 | unsigned short result = old; | |
0fad128b | 73 | |
cc1be41b MD |
74 | __asm__ __volatile__( |
75 | "lock; cmpxchgw %2, %1" | |
71323499 | 76 | : "+a"(result), "+m"(*__hp(2, addr)) |
cc1be41b MD |
77 | : "r"((unsigned short)_new) |
78 | : "memory"); | |
79 | return result; | |
80 | } | |
81 | case 4: | |
82 | { | |
83 | unsigned int result = old; | |
0fad128b | 84 | |
cc1be41b MD |
85 | __asm__ __volatile__( |
86 | "lock; cmpxchgl %2, %1" | |
71323499 | 87 | : "+a"(result), "+m"(*__hp(4, addr)) |
cc1be41b MD |
88 | : "r"((unsigned int)_new) |
89 | : "memory"); | |
90 | return result; | |
91 | } | |
e040d717 | 92 | #if (CAA_BITS_PER_LONG == 64) |
cc1be41b MD |
93 | case 8: |
94 | { | |
6edb297e | 95 | unsigned long result = old; |
0fad128b | 96 | |
cc1be41b | 97 | __asm__ __volatile__( |
2c5e5fb3 | 98 | "lock; cmpxchgq %2, %1" |
71323499 | 99 | : "+a"(result), "+m"(*__hp(8, addr)) |
cc1be41b MD |
100 | : "r"((unsigned long)_new) |
101 | : "memory"); | |
102 | return result; | |
103 | } | |
104 | #endif | |
105 | } | |
d0bbd9c2 MD |
106 | /* |
107 | * generate an illegal instruction. Cannot catch this with | |
108 | * linker tricks when optimizations are disabled. | |
109 | */ | |
cc1be41b MD |
110 | __asm__ __volatile__("ud2"); |
111 | return 0; | |
0114ba7f MD |
112 | } |
113 | ||
bf9de1b7 | 114 | #define _uatomic_cmpxchg(addr, old, _new) \ |
e56d99bf MD |
115 | ((__typeof__(*(addr))) __uatomic_cmpxchg((addr), \ |
116 | caa_cast_long_keep_sign(old), \ | |
117 | caa_cast_long_keep_sign(_new),\ | |
cc1be41b MD |
118 | sizeof(*(addr)))) |
119 | ||
120 | /* xchg */ | |
0114ba7f | 121 | |
5dba80f9 | 122 | static inline __attribute__((always_inline)) |
bf9de1b7 | 123 | unsigned long __uatomic_exchange(void *addr, unsigned long val, int len) |
0114ba7f | 124 | { |
cc1be41b MD |
125 | /* Note: the "xchg" instruction does not need a "lock" prefix. */ |
126 | switch (len) { | |
127 | case 1: | |
128 | { | |
129 | unsigned char result; | |
130 | __asm__ __volatile__( | |
131 | "xchgb %0, %1" | |
71323499 | 132 | : "=q"(result), "+m"(*__hp(1, addr)) |
cc1be41b MD |
133 | : "0" ((unsigned char)val) |
134 | : "memory"); | |
135 | return result; | |
136 | } | |
137 | case 2: | |
138 | { | |
139 | unsigned short result; | |
140 | __asm__ __volatile__( | |
141 | "xchgw %0, %1" | |
71323499 | 142 | : "=r"(result), "+m"(*__hp(2, addr)) |
cc1be41b MD |
143 | : "0" ((unsigned short)val) |
144 | : "memory"); | |
145 | return result; | |
146 | } | |
147 | case 4: | |
148 | { | |
149 | unsigned int result; | |
150 | __asm__ __volatile__( | |
151 | "xchgl %0, %1" | |
71323499 | 152 | : "=r"(result), "+m"(*__hp(4, addr)) |
cc1be41b MD |
153 | : "0" ((unsigned int)val) |
154 | : "memory"); | |
155 | return result; | |
156 | } | |
e040d717 | 157 | #if (CAA_BITS_PER_LONG == 64) |
cc1be41b MD |
158 | case 8: |
159 | { | |
160 | unsigned long result; | |
161 | __asm__ __volatile__( | |
0114ba7f | 162 | "xchgq %0, %1" |
71323499 | 163 | : "=r"(result), "+m"(*__hp(8, addr)) |
cc1be41b | 164 | : "0" ((unsigned long)val) |
0114ba7f | 165 | : "memory"); |
cc1be41b MD |
166 | return result; |
167 | } | |
168 | #endif | |
169 | } | |
d0bbd9c2 MD |
170 | /* |
171 | * generate an illegal instruction. Cannot catch this with | |
172 | * linker tricks when optimizations are disabled. | |
173 | */ | |
cc1be41b MD |
174 | __asm__ __volatile__("ud2"); |
175 | return 0; | |
0114ba7f MD |
176 | } |
177 | ||
bf9de1b7 | 178 | #define _uatomic_xchg(addr, v) \ |
e56d99bf MD |
179 | ((__typeof__(*(addr))) __uatomic_exchange((addr), \ |
180 | caa_cast_long_keep_sign(v), \ | |
cc1be41b MD |
181 | sizeof(*(addr)))) |
182 | ||
8760d94e | 183 | /* uatomic_add_return */ |
0fad128b MD |
184 | |
185 | static inline __attribute__((always_inline)) | |
bf9de1b7 | 186 | unsigned long __uatomic_add_return(void *addr, unsigned long val, |
0fad128b MD |
187 | int len) |
188 | { | |
189 | switch (len) { | |
190 | case 1: | |
191 | { | |
192 | unsigned char result = val; | |
193 | ||
194 | __asm__ __volatile__( | |
195 | "lock; xaddb %1, %0" | |
71323499 | 196 | : "+m"(*__hp(1, addr)), "+q" (result) |
0fad128b MD |
197 | : |
198 | : "memory"); | |
199 | return result + (unsigned char)val; | |
200 | } | |
201 | case 2: | |
202 | { | |
203 | unsigned short result = val; | |
204 | ||
205 | __asm__ __volatile__( | |
206 | "lock; xaddw %1, %0" | |
71323499 | 207 | : "+m"(*__hp(2, addr)), "+r" (result) |
0fad128b MD |
208 | : |
209 | : "memory"); | |
210 | return result + (unsigned short)val; | |
211 | } | |
212 | case 4: | |
213 | { | |
214 | unsigned int result = val; | |
215 | ||
216 | __asm__ __volatile__( | |
217 | "lock; xaddl %1, %0" | |
71323499 | 218 | : "+m"(*__hp(4, addr)), "+r" (result) |
0fad128b MD |
219 | : |
220 | : "memory"); | |
221 | return result + (unsigned int)val; | |
222 | } | |
e040d717 | 223 | #if (CAA_BITS_PER_LONG == 64) |
0fad128b MD |
224 | case 8: |
225 | { | |
226 | unsigned long result = val; | |
227 | ||
228 | __asm__ __volatile__( | |
229 | "lock; xaddq %1, %0" | |
71323499 | 230 | : "+m"(*__hp(8, addr)), "+r" (result) |
0fad128b MD |
231 | : |
232 | : "memory"); | |
233 | return result + (unsigned long)val; | |
234 | } | |
235 | #endif | |
236 | } | |
d0bbd9c2 MD |
237 | /* |
238 | * generate an illegal instruction. Cannot catch this with | |
239 | * linker tricks when optimizations are disabled. | |
240 | */ | |
0fad128b MD |
241 | __asm__ __volatile__("ud2"); |
242 | return 0; | |
243 | } | |
244 | ||
e56d99bf MD |
245 | #define _uatomic_add_return(addr, v) \ |
246 | ((__typeof__(*(addr))) __uatomic_add_return((addr), \ | |
247 | caa_cast_long_keep_sign(v), \ | |
248 | sizeof(*(addr)))) | |
0fad128b | 249 | |
bf33aaea PB |
250 | /* uatomic_and */ |
251 | ||
252 | static inline __attribute__((always_inline)) | |
253 | void __uatomic_and(void *addr, unsigned long val, int len) | |
254 | { | |
255 | switch (len) { | |
256 | case 1: | |
257 | { | |
258 | __asm__ __volatile__( | |
259 | "lock; andb %1, %0" | |
71323499 | 260 | : "=m"(*__hp(1, addr)) |
bf33aaea PB |
261 | : "iq" ((unsigned char)val) |
262 | : "memory"); | |
263 | return; | |
264 | } | |
265 | case 2: | |
266 | { | |
267 | __asm__ __volatile__( | |
268 | "lock; andw %1, %0" | |
71323499 | 269 | : "=m"(*__hp(2, addr)) |
bf33aaea PB |
270 | : "ir" ((unsigned short)val) |
271 | : "memory"); | |
272 | return; | |
273 | } | |
274 | case 4: | |
275 | { | |
276 | __asm__ __volatile__( | |
277 | "lock; andl %1, %0" | |
71323499 | 278 | : "=m"(*__hp(4, addr)) |
bf33aaea PB |
279 | : "ir" ((unsigned int)val) |
280 | : "memory"); | |
281 | return; | |
282 | } | |
283 | #if (CAA_BITS_PER_LONG == 64) | |
284 | case 8: | |
285 | { | |
286 | __asm__ __volatile__( | |
287 | "lock; andq %1, %0" | |
71323499 | 288 | : "=m"(*__hp(8, addr)) |
bf33aaea PB |
289 | : "er" ((unsigned long)val) |
290 | : "memory"); | |
291 | return; | |
292 | } | |
293 | #endif | |
294 | } | |
d0bbd9c2 MD |
295 | /* |
296 | * generate an illegal instruction. Cannot catch this with | |
297 | * linker tricks when optimizations are disabled. | |
298 | */ | |
bf33aaea PB |
299 | __asm__ __volatile__("ud2"); |
300 | return; | |
301 | } | |
302 | ||
303 | #define _uatomic_and(addr, v) \ | |
e56d99bf | 304 | (__uatomic_and((addr), caa_cast_long_keep_sign(v), sizeof(*(addr)))) |
bf33aaea | 305 | |
985b35b1 PB |
306 | /* uatomic_or */ |
307 | ||
308 | static inline __attribute__((always_inline)) | |
309 | void __uatomic_or(void *addr, unsigned long val, int len) | |
310 | { | |
311 | switch (len) { | |
312 | case 1: | |
313 | { | |
314 | __asm__ __volatile__( | |
315 | "lock; orb %1, %0" | |
71323499 | 316 | : "=m"(*__hp(1, addr)) |
985b35b1 PB |
317 | : "iq" ((unsigned char)val) |
318 | : "memory"); | |
319 | return; | |
320 | } | |
321 | case 2: | |
322 | { | |
323 | __asm__ __volatile__( | |
324 | "lock; orw %1, %0" | |
71323499 | 325 | : "=m"(*__hp(2, addr)) |
985b35b1 PB |
326 | : "ir" ((unsigned short)val) |
327 | : "memory"); | |
328 | return; | |
329 | } | |
330 | case 4: | |
331 | { | |
332 | __asm__ __volatile__( | |
333 | "lock; orl %1, %0" | |
71323499 | 334 | : "=m"(*__hp(4, addr)) |
985b35b1 PB |
335 | : "ir" ((unsigned int)val) |
336 | : "memory"); | |
337 | return; | |
338 | } | |
339 | #if (CAA_BITS_PER_LONG == 64) | |
340 | case 8: | |
341 | { | |
342 | __asm__ __volatile__( | |
343 | "lock; orq %1, %0" | |
71323499 | 344 | : "=m"(*__hp(8, addr)) |
985b35b1 PB |
345 | : "er" ((unsigned long)val) |
346 | : "memory"); | |
347 | return; | |
348 | } | |
349 | #endif | |
350 | } | |
d0bbd9c2 MD |
351 | /* |
352 | * generate an illegal instruction. Cannot catch this with | |
353 | * linker tricks when optimizations are disabled. | |
354 | */ | |
985b35b1 PB |
355 | __asm__ __volatile__("ud2"); |
356 | return; | |
357 | } | |
358 | ||
359 | #define _uatomic_or(addr, v) \ | |
e56d99bf | 360 | (__uatomic_or((addr), caa_cast_long_keep_sign(v), sizeof(*(addr)))) |
985b35b1 | 361 | |
8760d94e | 362 | /* uatomic_add */ |
0114ba7f | 363 | |
5dba80f9 | 364 | static inline __attribute__((always_inline)) |
bf9de1b7 | 365 | void __uatomic_add(void *addr, unsigned long val, int len) |
0114ba7f MD |
366 | { |
367 | switch (len) { | |
cc1be41b MD |
368 | case 1: |
369 | { | |
370 | __asm__ __volatile__( | |
371 | "lock; addb %1, %0" | |
71323499 | 372 | : "=m"(*__hp(1, addr)) |
87322fe8 MD |
373 | : "iq" ((unsigned char)val) |
374 | : "memory"); | |
cc1be41b MD |
375 | return; |
376 | } | |
377 | case 2: | |
378 | { | |
379 | __asm__ __volatile__( | |
380 | "lock; addw %1, %0" | |
71323499 | 381 | : "=m"(*__hp(2, addr)) |
87322fe8 MD |
382 | : "ir" ((unsigned short)val) |
383 | : "memory"); | |
cc1be41b MD |
384 | return; |
385 | } | |
386 | case 4: | |
387 | { | |
388 | __asm__ __volatile__( | |
389 | "lock; addl %1, %0" | |
71323499 | 390 | : "=m"(*__hp(4, addr)) |
87322fe8 MD |
391 | : "ir" ((unsigned int)val) |
392 | : "memory"); | |
cc1be41b MD |
393 | return; |
394 | } | |
e040d717 | 395 | #if (CAA_BITS_PER_LONG == 64) |
cc1be41b MD |
396 | case 8: |
397 | { | |
398 | __asm__ __volatile__( | |
399 | "lock; addq %1, %0" | |
71323499 | 400 | : "=m"(*__hp(8, addr)) |
87322fe8 MD |
401 | : "er" ((unsigned long)val) |
402 | : "memory"); | |
cc1be41b MD |
403 | return; |
404 | } | |
0114ba7f MD |
405 | #endif |
406 | } | |
d0bbd9c2 MD |
407 | /* |
408 | * generate an illegal instruction. Cannot catch this with | |
409 | * linker tricks when optimizations are disabled. | |
410 | */ | |
0114ba7f | 411 | __asm__ __volatile__("ud2"); |
a81b8e5e | 412 | return; |
0114ba7f MD |
413 | } |
414 | ||
bf9de1b7 | 415 | #define _uatomic_add(addr, v) \ |
e56d99bf | 416 | (__uatomic_add((addr), caa_cast_long_keep_sign(v), sizeof(*(addr)))) |
0114ba7f | 417 | |
2c5e5fb3 | 418 | |
ec4e58a3 | 419 | /* uatomic_inc */ |
2c5e5fb3 MD |
420 | |
421 | static inline __attribute__((always_inline)) | |
bf9de1b7 | 422 | void __uatomic_inc(void *addr, int len) |
2c5e5fb3 MD |
423 | { |
424 | switch (len) { | |
425 | case 1: | |
426 | { | |
427 | __asm__ __volatile__( | |
428 | "lock; incb %0" | |
71323499 | 429 | : "=m"(*__hp(1, addr)) |
2c5e5fb3 MD |
430 | : |
431 | : "memory"); | |
432 | return; | |
433 | } | |
434 | case 2: | |
435 | { | |
436 | __asm__ __volatile__( | |
437 | "lock; incw %0" | |
71323499 | 438 | : "=m"(*__hp(2, addr)) |
2c5e5fb3 MD |
439 | : |
440 | : "memory"); | |
441 | return; | |
442 | } | |
443 | case 4: | |
444 | { | |
445 | __asm__ __volatile__( | |
446 | "lock; incl %0" | |
71323499 | 447 | : "=m"(*__hp(4, addr)) |
2c5e5fb3 MD |
448 | : |
449 | : "memory"); | |
450 | return; | |
451 | } | |
e040d717 | 452 | #if (CAA_BITS_PER_LONG == 64) |
2c5e5fb3 MD |
453 | case 8: |
454 | { | |
455 | __asm__ __volatile__( | |
456 | "lock; incq %0" | |
71323499 | 457 | : "=m"(*__hp(8, addr)) |
2c5e5fb3 MD |
458 | : |
459 | : "memory"); | |
460 | return; | |
461 | } | |
462 | #endif | |
463 | } | |
464 | /* generate an illegal instruction. Cannot catch this with linker tricks | |
465 | * when optimizations are disabled. */ | |
466 | __asm__ __volatile__("ud2"); | |
467 | return; | |
468 | } | |
469 | ||
bf9de1b7 | 470 | #define _uatomic_inc(addr) (__uatomic_inc((addr), sizeof(*(addr)))) |
2c5e5fb3 | 471 | |
ec4e58a3 | 472 | /* uatomic_dec */ |
2c5e5fb3 MD |
473 | |
474 | static inline __attribute__((always_inline)) | |
bf9de1b7 | 475 | void __uatomic_dec(void *addr, int len) |
2c5e5fb3 MD |
476 | { |
477 | switch (len) { | |
478 | case 1: | |
479 | { | |
480 | __asm__ __volatile__( | |
481 | "lock; decb %0" | |
71323499 | 482 | : "=m"(*__hp(1, addr)) |
2c5e5fb3 MD |
483 | : |
484 | : "memory"); | |
485 | return; | |
486 | } | |
487 | case 2: | |
488 | { | |
489 | __asm__ __volatile__( | |
490 | "lock; decw %0" | |
71323499 | 491 | : "=m"(*__hp(2, addr)) |
2c5e5fb3 MD |
492 | : |
493 | : "memory"); | |
494 | return; | |
495 | } | |
496 | case 4: | |
497 | { | |
498 | __asm__ __volatile__( | |
499 | "lock; decl %0" | |
71323499 | 500 | : "=m"(*__hp(4, addr)) |
2c5e5fb3 MD |
501 | : |
502 | : "memory"); | |
503 | return; | |
504 | } | |
e040d717 | 505 | #if (CAA_BITS_PER_LONG == 64) |
2c5e5fb3 MD |
506 | case 8: |
507 | { | |
508 | __asm__ __volatile__( | |
509 | "lock; decq %0" | |
71323499 | 510 | : "=m"(*__hp(8, addr)) |
2c5e5fb3 MD |
511 | : |
512 | : "memory"); | |
513 | return; | |
514 | } | |
515 | #endif | |
516 | } | |
d0bbd9c2 MD |
517 | /* |
518 | * generate an illegal instruction. Cannot catch this with | |
519 | * linker tricks when optimizations are disabled. | |
520 | */ | |
2c5e5fb3 MD |
521 | __asm__ __volatile__("ud2"); |
522 | return; | |
523 | } | |
524 | ||
bf9de1b7 | 525 | #define _uatomic_dec(addr) (__uatomic_dec((addr), sizeof(*(addr)))) |
0114ba7f | 526 | |
101389e4 | 527 | #ifdef URCU_ARCH_X86_NO_CAS |
0b1e236d MJ |
528 | |
529 | /* For backwards compat */ | |
530 | #define CONFIG_RCU_COMPAT_ARCH 1 | |
531 | ||
02be5561 MD |
532 | extern int __rcu_cas_avail; |
533 | extern int __rcu_cas_init(void); | |
bf9de1b7 MD |
534 | |
535 | #define UATOMIC_COMPAT(insn) \ | |
a0b7f7ea | 536 | ((caa_likely(__rcu_cas_avail > 0)) \ |
bf9de1b7 | 537 | ? (_uatomic_##insn) \ |
a0b7f7ea | 538 | : ((caa_unlikely(__rcu_cas_avail < 0) \ |
02be5561 | 539 | ? ((__rcu_cas_init() > 0) \ |
bf9de1b7 MD |
540 | ? (_uatomic_##insn) \ |
541 | : (compat_uatomic_##insn)) \ | |
542 | : (compat_uatomic_##insn)))) | |
543 | ||
424d4ed5 MD |
544 | /* |
545 | * We leave the return value so we don't break the ABI, but remove the | |
546 | * return value from the API. | |
547 | */ | |
bf9de1b7 MD |
548 | extern unsigned long _compat_uatomic_set(void *addr, |
549 | unsigned long _new, int len); | |
550 | #define compat_uatomic_set(addr, _new) \ | |
424d4ed5 MD |
551 | ((void) _compat_uatomic_set((addr), \ |
552 | caa_cast_long_keep_sign(_new), \ | |
553 | sizeof(*(addr)))) | |
bf9de1b7 MD |
554 | |
555 | ||
556 | extern unsigned long _compat_uatomic_xchg(void *addr, | |
557 | unsigned long _new, int len); | |
558 | #define compat_uatomic_xchg(addr, _new) \ | |
559 | ((__typeof__(*(addr))) _compat_uatomic_xchg((addr), \ | |
e56d99bf | 560 | caa_cast_long_keep_sign(_new), \ |
bf9de1b7 | 561 | sizeof(*(addr)))) |
7d413817 MD |
562 | |
563 | extern unsigned long _compat_uatomic_cmpxchg(void *addr, unsigned long old, | |
bf9de1b7 MD |
564 | unsigned long _new, int len); |
565 | #define compat_uatomic_cmpxchg(addr, old, _new) \ | |
566 | ((__typeof__(*(addr))) _compat_uatomic_cmpxchg((addr), \ | |
e56d99bf MD |
567 | caa_cast_long_keep_sign(old), \ |
568 | caa_cast_long_keep_sign(_new), \ | |
bf9de1b7 | 569 | sizeof(*(addr)))) |
7d413817 | 570 | |
8c43fe72 | 571 | extern void _compat_uatomic_and(void *addr, unsigned long _new, int len); |
bf33aaea | 572 | #define compat_uatomic_and(addr, v) \ |
8c43fe72 | 573 | (_compat_uatomic_and((addr), \ |
e56d99bf | 574 | caa_cast_long_keep_sign(v), \ |
8c43fe72 | 575 | sizeof(*(addr)))) |
bf33aaea | 576 | |
8c43fe72 | 577 | extern void _compat_uatomic_or(void *addr, unsigned long _new, int len); |
985b35b1 | 578 | #define compat_uatomic_or(addr, v) \ |
8c43fe72 | 579 | (_compat_uatomic_or((addr), \ |
e56d99bf | 580 | caa_cast_long_keep_sign(v), \ |
8c43fe72 | 581 | sizeof(*(addr)))) |
985b35b1 | 582 | |
28ca843d PB |
583 | extern unsigned long _compat_uatomic_add_return(void *addr, |
584 | unsigned long _new, int len); | |
e56d99bf MD |
585 | #define compat_uatomic_add_return(addr, v) \ |
586 | ((__typeof__(*(addr))) _compat_uatomic_add_return((addr), \ | |
587 | caa_cast_long_keep_sign(v), \ | |
588 | sizeof(*(addr)))) | |
bf9de1b7 | 589 | |
bf9de1b7 MD |
590 | #define compat_uatomic_add(addr, v) \ |
591 | ((void)compat_uatomic_add_return((addr), (v))) | |
bf9de1b7 MD |
592 | #define compat_uatomic_inc(addr) \ |
593 | (compat_uatomic_add((addr), 1)) | |
594 | #define compat_uatomic_dec(addr) \ | |
8760d94e | 595 | (compat_uatomic_add((addr), -1)) |
bf9de1b7 MD |
596 | |
597 | #else | |
598 | #define UATOMIC_COMPAT(insn) (_uatomic_##insn) | |
7d413817 MD |
599 | #endif |
600 | ||
bf9de1b7 | 601 | /* Read is atomic even in compat mode */ |
bf9de1b7 MD |
602 | #define uatomic_set(addr, v) \ |
603 | UATOMIC_COMPAT(set(addr, v)) | |
8760d94e | 604 | |
bf9de1b7 MD |
605 | #define uatomic_cmpxchg(addr, old, _new) \ |
606 | UATOMIC_COMPAT(cmpxchg(addr, old, _new)) | |
607 | #define uatomic_xchg(addr, v) \ | |
608 | UATOMIC_COMPAT(xchg(addr, v)) | |
2812a2d2 | 609 | |
bf33aaea PB |
610 | #define uatomic_and(addr, v) \ |
611 | UATOMIC_COMPAT(and(addr, v)) | |
42e83919 MD |
612 | #define cmm_smp_mb__before_uatomic_and() cmm_barrier() |
613 | #define cmm_smp_mb__after_uatomic_and() cmm_barrier() | |
2812a2d2 | 614 | |
985b35b1 PB |
615 | #define uatomic_or(addr, v) \ |
616 | UATOMIC_COMPAT(or(addr, v)) | |
42e83919 MD |
617 | #define cmm_smp_mb__before_uatomic_or() cmm_barrier() |
618 | #define cmm_smp_mb__after_uatomic_or() cmm_barrier() | |
2812a2d2 | 619 | |
bf9de1b7 MD |
620 | #define uatomic_add_return(addr, v) \ |
621 | UATOMIC_COMPAT(add_return(addr, v)) | |
8760d94e | 622 | |
bf9de1b7 | 623 | #define uatomic_add(addr, v) UATOMIC_COMPAT(add(addr, v)) |
42e83919 MD |
624 | #define cmm_smp_mb__before_uatomic_add() cmm_barrier() |
625 | #define cmm_smp_mb__after_uatomic_add() cmm_barrier() | |
2812a2d2 | 626 | |
bf9de1b7 | 627 | #define uatomic_inc(addr) UATOMIC_COMPAT(inc(addr)) |
42e83919 MD |
628 | #define cmm_smp_mb__before_uatomic_inc() cmm_barrier() |
629 | #define cmm_smp_mb__after_uatomic_inc() cmm_barrier() | |
2812a2d2 | 630 | |
bf9de1b7 | 631 | #define uatomic_dec(addr) UATOMIC_COMPAT(dec(addr)) |
42e83919 MD |
632 | #define cmm_smp_mb__before_uatomic_dec() cmm_barrier() |
633 | #define cmm_smp_mb__after_uatomic_dec() cmm_barrier() | |
bf9de1b7 | 634 | |
ae5712d1 OD |
635 | static inline void _cmm_compat_c11_smp_mb__before_uatomic_read_mo(enum cmm_memorder mo) |
636 | { | |
637 | /* | |
638 | * A SMP barrier is not necessary for CMM_SEQ_CST because, only a | |
639 | * previous store can be reordered with the load. However, emitting the | |
640 | * memory barrier after the store is sufficient to prevent reordering | |
641 | * between the two. This follows toolchains decision of emitting the | |
642 | * memory fence on the stores instead of the loads. | |
643 | * | |
644 | * A compiler barrier is necessary because the underlying operation does | |
645 | * not clobber the registers. | |
646 | */ | |
647 | switch (mo) { | |
648 | case CMM_RELAXED: /* Fall-through */ | |
649 | case CMM_ACQUIRE: /* Fall-through */ | |
650 | case CMM_CONSUME: /* Fall-through */ | |
651 | case CMM_SEQ_CST: /* Fall-through */ | |
652 | case CMM_SEQ_CST_FENCE: | |
653 | cmm_barrier(); | |
654 | break; | |
655 | case CMM_ACQ_REL: /* Fall-through */ | |
656 | case CMM_RELEASE: /* Fall-through */ | |
657 | default: | |
658 | abort(); | |
659 | break; | |
660 | } | |
661 | } | |
662 | ||
663 | static inline void _cmm_compat_c11_smp_mb__after_uatomic_read_mo(enum cmm_memorder mo) | |
664 | { | |
665 | /* | |
666 | * A SMP barrier is not necessary for CMM_SEQ_CST because following | |
667 | * loads and stores cannot be reordered with the load. | |
668 | * | |
669 | * A SMP barrier is however necessary for CMM_SEQ_CST_FENCE to respect | |
670 | * the memory model, since the underlying operation does not have a lock | |
671 | * prefix. | |
672 | * | |
673 | * A compiler barrier is necessary because the underlying operation does | |
674 | * not clobber the registers. | |
675 | */ | |
676 | switch (mo) { | |
677 | case CMM_SEQ_CST_FENCE: | |
678 | cmm_smp_mb(); | |
679 | break; | |
680 | case CMM_RELAXED: /* Fall-through */ | |
681 | case CMM_ACQUIRE: /* Fall-through */ | |
682 | case CMM_CONSUME: /* Fall-through */ | |
683 | case CMM_SEQ_CST: | |
684 | cmm_barrier(); | |
685 | break; | |
686 | case CMM_ACQ_REL: /* Fall-through */ | |
687 | case CMM_RELEASE: /* Fall-through */ | |
688 | default: | |
689 | abort(); | |
690 | break; | |
691 | } | |
692 | } | |
693 | ||
694 | static inline void _cmm_compat_c11_smp_mb__before_uatomic_set_mo(enum cmm_memorder mo) | |
695 | { | |
696 | /* | |
697 | * A SMP barrier is not necessary for CMM_SEQ_CST because the store can | |
698 | * only be reodered with later loads | |
699 | * | |
700 | * A compiler barrier is necessary because the underlying operation does | |
701 | * not clobber the registers. | |
702 | */ | |
703 | switch (mo) { | |
704 | case CMM_RELAXED: /* Fall-through */ | |
705 | case CMM_RELEASE: /* Fall-through */ | |
706 | case CMM_SEQ_CST: /* Fall-through */ | |
707 | case CMM_SEQ_CST_FENCE: | |
708 | cmm_barrier(); | |
709 | break; | |
710 | case CMM_ACQ_REL: /* Fall-through */ | |
711 | case CMM_ACQUIRE: /* Fall-through */ | |
712 | case CMM_CONSUME: /* Fall-through */ | |
713 | default: | |
714 | abort(); | |
715 | break; | |
716 | } | |
717 | } | |
718 | ||
719 | static inline void _cmm_compat_c11_smp_mb__after_uatomic_set_mo(enum cmm_memorder mo) | |
720 | { | |
721 | /* | |
722 | * A SMP barrier is necessary for CMM_SEQ_CST because the store can be | |
723 | * reorded with later loads. Since no memory barrier is being emitted | |
724 | * before loads, one has to be emitted after the store. This follows | |
725 | * toolchains decision of emitting the memory fence on the stores instead | |
726 | * of the loads. | |
727 | * | |
728 | * A SMP barrier is necessary for CMM_SEQ_CST_FENCE to respect the | |
729 | * memory model, since the underlying store does not have a lock prefix. | |
730 | * | |
731 | * A compiler barrier is necessary because the underlying operation does | |
732 | * not clobber the registers. | |
733 | */ | |
734 | switch (mo) { | |
735 | case CMM_SEQ_CST: /* Fall-through */ | |
736 | case CMM_SEQ_CST_FENCE: | |
737 | cmm_smp_mb(); | |
738 | break; | |
739 | case CMM_RELAXED: /* Fall-through */ | |
740 | case CMM_RELEASE: | |
741 | cmm_barrier(); | |
742 | break; | |
743 | case CMM_ACQ_REL: /* Fall-through */ | |
744 | case CMM_ACQUIRE: /* Fall-through */ | |
745 | case CMM_CONSUME: /* Fall-through */ | |
746 | default: | |
747 | abort(); | |
748 | break; | |
749 | } | |
750 | } | |
751 | ||
752 | static inline void _cmm_compat_c11_smp_mb__before_uatomic_xchg_mo(enum cmm_memorder mo) | |
753 | { | |
754 | /* NOP. uatomic_xchg has implicit lock prefix. */ | |
755 | switch (mo) { | |
756 | case CMM_RELAXED: /* Fall-through */ | |
757 | case CMM_ACQUIRE: /* Fall-through */ | |
758 | case CMM_CONSUME: /* Fall-through */ | |
759 | case CMM_RELEASE: /* Fall-through */ | |
760 | case CMM_ACQ_REL: /* Fall-through */ | |
761 | case CMM_SEQ_CST: /* Fall-through */ | |
762 | case CMM_SEQ_CST_FENCE: | |
763 | break; | |
764 | default: | |
765 | abort(); | |
766 | } | |
767 | } | |
768 | ||
769 | static inline void _cmm_compat_c11_smp_mb__after_uatomic_xchg_mo(enum cmm_memorder mo) | |
770 | { | |
771 | /* NOP. uatomic_xchg has implicit lock prefix. */ | |
772 | switch (mo) { | |
773 | case CMM_RELAXED: /* Fall-through */ | |
774 | case CMM_ACQUIRE: /* Fall-through */ | |
775 | case CMM_CONSUME: /* Fall-through */ | |
776 | case CMM_RELEASE: /* Fall-through */ | |
777 | case CMM_ACQ_REL: /* Fall-through */ | |
778 | case CMM_SEQ_CST: /* Fall-through */ | |
779 | case CMM_SEQ_CST_FENCE: | |
780 | break; | |
781 | default: | |
782 | abort(); | |
783 | } | |
784 | } | |
785 | ||
786 | static inline void _cmm_compat_c11_smp_mb__before_uatomic_cmpxchg_mo(enum cmm_memorder mo) | |
787 | { | |
788 | /* NOP. uatomic_cmpxchg has implicit lock prefix. */ | |
789 | switch (mo) { | |
790 | case CMM_RELAXED: /* Fall-through */ | |
791 | case CMM_ACQUIRE: /* Fall-through */ | |
792 | case CMM_CONSUME: /* Fall-through */ | |
793 | case CMM_RELEASE: /* Fall-through */ | |
794 | case CMM_ACQ_REL: /* Fall-through */ | |
795 | case CMM_SEQ_CST: /* Fall-through */ | |
796 | case CMM_SEQ_CST_FENCE: | |
797 | break; | |
798 | default: | |
799 | abort(); | |
800 | } | |
801 | } | |
802 | ||
803 | static inline void _cmm_compat_c11_smp_mb__after_uatomic_cmpxchg_mo(enum cmm_memorder mo) | |
804 | { | |
805 | /* NOP. uatomic_cmpxchg has implicit lock prefix. */ | |
806 | switch (mo) { | |
807 | case CMM_RELAXED: /* Fall-through */ | |
808 | case CMM_ACQUIRE: /* Fall-through */ | |
809 | case CMM_CONSUME: /* Fall-through */ | |
810 | case CMM_RELEASE: /* Fall-through */ | |
811 | case CMM_ACQ_REL: /* Fall-through */ | |
812 | case CMM_SEQ_CST: /* Fall-through */ | |
813 | case CMM_SEQ_CST_FENCE: | |
814 | break; | |
815 | default: | |
816 | abort(); | |
817 | } | |
818 | } | |
819 | ||
820 | static inline void _cmm_compat_c11_smp_mb__before_uatomic_and_mo(enum cmm_memorder mo) | |
821 | { | |
822 | /* NOP. uatomic_and has explicit lock prefix. */ | |
823 | switch (mo) { | |
824 | case CMM_RELAXED: /* Fall-through */ | |
825 | case CMM_ACQUIRE: /* Fall-through */ | |
826 | case CMM_CONSUME: /* Fall-through */ | |
827 | case CMM_RELEASE: /* Fall-through */ | |
828 | case CMM_ACQ_REL: /* Fall-through */ | |
829 | case CMM_SEQ_CST: /* Fall-through */ | |
830 | case CMM_SEQ_CST_FENCE: | |
831 | break; | |
832 | default: | |
833 | abort(); | |
834 | } | |
835 | } | |
836 | ||
837 | static inline void _cmm_compat_c11_smp_mb__after_uatomic_and_mo(enum cmm_memorder mo) | |
838 | { | |
839 | /* NOP. uatomic_and has explicit lock prefix. */ | |
840 | switch (mo) { | |
841 | case CMM_RELAXED: /* Fall-through */ | |
842 | case CMM_ACQUIRE: /* Fall-through */ | |
843 | case CMM_CONSUME: /* Fall-through */ | |
844 | case CMM_RELEASE: /* Fall-through */ | |
845 | case CMM_ACQ_REL: /* Fall-through */ | |
846 | case CMM_SEQ_CST: /* Fall-through */ | |
847 | case CMM_SEQ_CST_FENCE: | |
848 | break; | |
849 | default: | |
850 | abort(); | |
851 | } | |
852 | } | |
853 | ||
854 | static inline void _cmm_compat_c11_smp_mb__before_uatomic_or_mo(enum cmm_memorder mo) | |
855 | { | |
856 | /* NOP. uatomic_or has explicit lock prefix. */ | |
857 | switch (mo) { | |
858 | case CMM_RELAXED: /* Fall-through */ | |
859 | case CMM_ACQUIRE: /* Fall-through */ | |
860 | case CMM_CONSUME: /* Fall-through */ | |
861 | case CMM_RELEASE: /* Fall-through */ | |
862 | case CMM_ACQ_REL: /* Fall-through */ | |
863 | case CMM_SEQ_CST: /* Fall-through */ | |
864 | case CMM_SEQ_CST_FENCE: | |
865 | break; | |
866 | default: | |
867 | abort(); | |
868 | } | |
869 | } | |
870 | ||
871 | static inline void _cmm_compat_c11_smp_mb__after_uatomic_or_mo(enum cmm_memorder mo) | |
872 | { | |
873 | /* NOP. uatomic_or has explicit lock prefix. */ | |
874 | switch (mo) { | |
875 | case CMM_RELAXED: /* Fall-through */ | |
876 | case CMM_ACQUIRE: /* Fall-through */ | |
877 | case CMM_CONSUME: /* Fall-through */ | |
878 | case CMM_RELEASE: /* Fall-through */ | |
879 | case CMM_ACQ_REL: /* Fall-through */ | |
880 | case CMM_SEQ_CST: /* Fall-through */ | |
881 | case CMM_SEQ_CST_FENCE: | |
882 | break; | |
883 | default: | |
884 | abort(); | |
885 | } | |
886 | } | |
887 | ||
888 | static inline void _cmm_compat_c11_smp_mb__before_uatomic_add_mo(enum cmm_memorder mo) | |
889 | { | |
890 | /* NOP. uatomic_add has explicit lock prefix. */ | |
891 | switch (mo) { | |
892 | case CMM_RELAXED: /* Fall-through */ | |
893 | case CMM_ACQUIRE: /* Fall-through */ | |
894 | case CMM_CONSUME: /* Fall-through */ | |
895 | case CMM_RELEASE: /* Fall-through */ | |
896 | case CMM_ACQ_REL: /* Fall-through */ | |
897 | case CMM_SEQ_CST: /* Fall-through */ | |
898 | case CMM_SEQ_CST_FENCE: | |
899 | break; | |
900 | default: | |
901 | abort(); | |
902 | } | |
903 | } | |
904 | ||
905 | static inline void _cmm_compat_c11_smp_mb__after_uatomic_add_mo(enum cmm_memorder mo) | |
906 | { | |
907 | /* NOP. uatomic_add has explicit lock prefix. */ | |
908 | switch (mo) { | |
909 | case CMM_RELAXED: /* Fall-through */ | |
910 | case CMM_ACQUIRE: /* Fall-through */ | |
911 | case CMM_CONSUME: /* Fall-through */ | |
912 | case CMM_RELEASE: /* Fall-through */ | |
913 | case CMM_ACQ_REL: /* Fall-through */ | |
914 | case CMM_SEQ_CST: /* Fall-through */ | |
915 | case CMM_SEQ_CST_FENCE: | |
916 | break; | |
917 | default: | |
918 | abort(); | |
919 | } | |
920 | } | |
921 | ||
922 | static inline void _cmm_compat_c11_smp_mb__before_uatomic_sub_mo(enum cmm_memorder mo) | |
923 | { | |
924 | /* NOP. uatomic_sub has explicit lock prefix. */ | |
925 | switch (mo) { | |
926 | case CMM_RELAXED: /* Fall-through */ | |
927 | case CMM_ACQUIRE: /* Fall-through */ | |
928 | case CMM_CONSUME: /* Fall-through */ | |
929 | case CMM_RELEASE: /* Fall-through */ | |
930 | case CMM_ACQ_REL: /* Fall-through */ | |
931 | case CMM_SEQ_CST: /* Fall-through */ | |
932 | case CMM_SEQ_CST_FENCE: | |
933 | break; | |
934 | default: | |
935 | abort(); | |
936 | } | |
937 | } | |
938 | ||
939 | static inline void _cmm_compat_c11_smp_mb__after_uatomic_sub_mo(enum cmm_memorder mo) | |
940 | { | |
941 | /* NOP. uatomic_sub has explicit lock prefix. */ | |
942 | switch (mo) { | |
943 | case CMM_RELAXED: /* Fall-through */ | |
944 | case CMM_ACQUIRE: /* Fall-through */ | |
945 | case CMM_CONSUME: /* Fall-through */ | |
946 | case CMM_RELEASE: /* Fall-through */ | |
947 | case CMM_ACQ_REL: /* Fall-through */ | |
948 | case CMM_SEQ_CST: /* Fall-through */ | |
949 | case CMM_SEQ_CST_FENCE: | |
950 | break; | |
951 | default: | |
952 | abort(); | |
953 | } | |
954 | } | |
955 | ||
956 | static inline void _cmm_compat_c11_smp_mb__before_uatomic_inc_mo(enum cmm_memorder mo) | |
957 | { | |
958 | /* NOP. uatomic_inc has explicit lock prefix. */ | |
959 | switch (mo) { | |
960 | case CMM_RELAXED: /* Fall-through */ | |
961 | case CMM_ACQUIRE: /* Fall-through */ | |
962 | case CMM_CONSUME: /* Fall-through */ | |
963 | case CMM_RELEASE: /* Fall-through */ | |
964 | case CMM_ACQ_REL: /* Fall-through */ | |
965 | case CMM_SEQ_CST: /* Fall-through */ | |
966 | case CMM_SEQ_CST_FENCE: | |
967 | break; | |
968 | default: | |
969 | abort(); | |
970 | } | |
971 | } | |
972 | ||
973 | static inline void _cmm_compat_c11_smp_mb__after_uatomic_inc_mo(enum cmm_memorder mo) | |
974 | { | |
975 | /* NOP. uatomic_inc has explicit lock prefix. */ | |
976 | switch (mo) { | |
977 | case CMM_RELAXED: /* Fall-through */ | |
978 | case CMM_ACQUIRE: /* Fall-through */ | |
979 | case CMM_CONSUME: /* Fall-through */ | |
980 | case CMM_RELEASE: /* Fall-through */ | |
981 | case CMM_ACQ_REL: /* Fall-through */ | |
982 | case CMM_SEQ_CST: /* Fall-through */ | |
983 | case CMM_SEQ_CST_FENCE: | |
984 | break; | |
985 | default: | |
986 | abort(); | |
987 | } | |
988 | } | |
989 | ||
990 | static inline void _cmm_compat_c11_smp_mb__before_uatomic_dec_mo(enum cmm_memorder mo) | |
991 | { | |
992 | /* NOP. uatomic_dec has explicit lock prefix. */ | |
993 | switch (mo) { | |
994 | case CMM_RELAXED: /* Fall-through */ | |
995 | case CMM_ACQUIRE: /* Fall-through */ | |
996 | case CMM_CONSUME: /* Fall-through */ | |
997 | case CMM_RELEASE: /* Fall-through */ | |
998 | case CMM_ACQ_REL: /* Fall-through */ | |
999 | case CMM_SEQ_CST: /* Fall-through */ | |
1000 | case CMM_SEQ_CST_FENCE: | |
1001 | break; | |
1002 | default: | |
1003 | abort(); | |
1004 | } | |
1005 | } | |
1006 | ||
1007 | static inline void _cmm_compat_c11_smp_mb__after_uatomic_dec_mo(enum cmm_memorder mo) | |
1008 | { | |
1009 | /* NOP. uatomic_dec has explicit lock prefix. */ | |
1010 | switch (mo) { | |
1011 | case CMM_RELAXED: /* Fall-through */ | |
1012 | case CMM_ACQUIRE: /* Fall-through */ | |
1013 | case CMM_CONSUME: /* Fall-through */ | |
1014 | case CMM_RELEASE: /* Fall-through */ | |
1015 | case CMM_ACQ_REL: /* Fall-through */ | |
1016 | case CMM_SEQ_CST: /* Fall-through */ | |
1017 | case CMM_SEQ_CST_FENCE: | |
1018 | break; | |
1019 | default: | |
1020 | abort(); | |
1021 | } | |
1022 | } | |
1023 | ||
1024 | static inline void _cmm_compat_c11_smp_mb__before_uatomic_add_return_mo(enum cmm_memorder mo) | |
1025 | { | |
1026 | /* NOP. uatomic_add_return has explicit lock prefix. */ | |
1027 | switch (mo) { | |
1028 | case CMM_RELAXED: /* Fall-through */ | |
1029 | case CMM_ACQUIRE: /* Fall-through */ | |
1030 | case CMM_CONSUME: /* Fall-through */ | |
1031 | case CMM_RELEASE: /* Fall-through */ | |
1032 | case CMM_ACQ_REL: /* Fall-through */ | |
1033 | case CMM_SEQ_CST: /* Fall-through */ | |
1034 | case CMM_SEQ_CST_FENCE: | |
1035 | break; | |
1036 | default: | |
1037 | abort(); | |
1038 | } | |
1039 | } | |
1040 | ||
1041 | static inline void _cmm_compat_c11_smp_mb__after_uatomic_add_return_mo(enum cmm_memorder mo) | |
1042 | { | |
1043 | /* NOP. uatomic_add_return has explicit lock prefix. */ | |
1044 | switch (mo) { | |
1045 | case CMM_RELAXED: /* Fall-through */ | |
1046 | case CMM_ACQUIRE: /* Fall-through */ | |
1047 | case CMM_CONSUME: /* Fall-through */ | |
1048 | case CMM_RELEASE: /* Fall-through */ | |
1049 | case CMM_ACQ_REL: /* Fall-through */ | |
1050 | case CMM_SEQ_CST: /* Fall-through */ | |
1051 | case CMM_SEQ_CST_FENCE: | |
1052 | break; | |
1053 | default: | |
1054 | abort(); | |
1055 | } | |
1056 | } | |
1057 | ||
1058 | static inline void _cmm_compat_c11_smp_mb__before_uatomic_sub_return_mo(enum cmm_memorder mo) | |
1059 | { | |
1060 | /* NOP. uatomic_sub_return has explicit lock prefix. */ | |
1061 | switch (mo) { | |
1062 | case CMM_RELAXED: /* Fall-through */ | |
1063 | case CMM_ACQUIRE: /* Fall-through */ | |
1064 | case CMM_CONSUME: /* Fall-through */ | |
1065 | case CMM_RELEASE: /* Fall-through */ | |
1066 | case CMM_ACQ_REL: /* Fall-through */ | |
1067 | case CMM_SEQ_CST: /* Fall-through */ | |
1068 | case CMM_SEQ_CST_FENCE: | |
1069 | break; | |
1070 | default: | |
1071 | abort(); | |
1072 | } | |
1073 | } | |
1074 | ||
1075 | static inline void _cmm_compat_c11_smp_mb__after_uatomic_sub_return_mo(enum cmm_memorder mo) | |
1076 | { | |
1077 | /* NOP. uatomic_sub_return has explicit lock prefix. */ | |
1078 | switch (mo) { | |
1079 | case CMM_RELAXED: /* Fall-through */ | |
1080 | case CMM_ACQUIRE: /* Fall-through */ | |
1081 | case CMM_CONSUME: /* Fall-through */ | |
1082 | case CMM_RELEASE: /* Fall-through */ | |
1083 | case CMM_ACQ_REL: /* Fall-through */ | |
1084 | case CMM_SEQ_CST: /* Fall-through */ | |
1085 | case CMM_SEQ_CST_FENCE: | |
1086 | break; | |
1087 | default: | |
1088 | abort(); | |
1089 | } | |
1090 | } | |
1091 | ||
1092 | #define _cmm_compat_c11_smp_mb__before_mo(operation, mo) \ | |
1093 | do { \ | |
1094 | _cmm_compat_c11_smp_mb__before_ ## operation ## _mo (mo); \ | |
1095 | } while (0) | |
1096 | ||
1097 | #define _cmm_compat_c11_smp_mb__after_mo(operation, mo) \ | |
1098 | do { \ | |
1099 | _cmm_compat_c11_smp_mb__after_ ## operation ## _mo (mo); \ | |
1100 | } while (0) | |
1101 | ||
1102 | ||
67ecffc0 | 1103 | #ifdef __cplusplus |
36bc70a8 MD |
1104 | } |
1105 | #endif | |
1106 | ||
a2e7bf9c | 1107 | #include <urcu/uatomic/generic.h> |
8760d94e | 1108 | |
ec4e58a3 | 1109 | #endif /* _URCU_ARCH_UATOMIC_X86_H */ |