| 1 | #ifndef _URCU_ARCH_UATOMIC_PPC_H |
| 2 | #define _URCU_ARCH_UATOMIC_PPC_H |
| 3 | |
| 4 | /* |
| 5 | * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved. |
| 6 | * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved. |
| 7 | * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P. |
| 8 | * Copyright (c) 2009 Mathieu Desnoyers |
| 9 | * |
| 10 | * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED |
| 11 | * OR IMPLIED. ANY USE IS AT YOUR OWN RISK. |
| 12 | * |
| 13 | * Permission is hereby granted to use or copy this program |
| 14 | * for any purpose, provided the above notices are retained on all copies. |
| 15 | * Permission to modify the code and to distribute modified code is granted, |
| 16 | * provided the above notices are retained, and a notice that the code was |
| 17 | * modified is included with the above copyright notice. |
| 18 | * |
| 19 | * Code inspired from libuatomic_ops-1.2, inherited in part from the |
| 20 | * Boehm-Demers-Weiser conservative garbage collector. |
| 21 | */ |
| 22 | |
| 23 | #include <urcu/compiler.h> |
| 24 | #include <urcu/system.h> |
| 25 | |
| 26 | #ifdef __cplusplus |
| 27 | extern "C" { |
| 28 | #endif |
| 29 | |
| 30 | #ifdef __NO_LWSYNC__ |
| 31 | #define LWSYNC_OPCODE "sync\n" |
| 32 | #else |
| 33 | #define LWSYNC_OPCODE "lwsync\n" |
| 34 | #endif |
| 35 | |
| 36 | #define ILLEGAL_INSTR ".long 0xd00d00" |
| 37 | |
| 38 | /* |
| 39 | * Providing sequential consistency semantic with respect to other |
| 40 | * instructions for cmpxchg and add_return family of atomic primitives. |
| 41 | * |
| 42 | * This is achieved with: |
| 43 | * lwsync (prior loads can be reordered after following load) |
| 44 | * lwarx |
| 45 | * stwcx. |
| 46 | * test if success (retry) |
| 47 | * sync |
| 48 | * |
| 49 | * Explanation of the sequential consistency provided by this scheme |
| 50 | * from Paul E. McKenney: |
| 51 | * |
| 52 | * The reason we can get away with the lwsync before is that if a prior |
| 53 | * store reorders with the lwarx, then you have to store to the atomic |
| 54 | * variable from some other CPU to detect it. |
| 55 | * |
| 56 | * And if you do that, the lwarx will lose its reservation, so the stwcx |
| 57 | * will fail. The atomic operation will retry, so that the caller won't be |
| 58 | * able to see the misordering. |
| 59 | */ |
| 60 | |
| 61 | /* xchg */ |
| 62 | |
| 63 | static inline __attribute__((always_inline)) |
| 64 | unsigned long _uatomic_exchange(void *addr, unsigned long val, int len) |
| 65 | { |
| 66 | switch (len) { |
| 67 | case 4: |
| 68 | { |
| 69 | unsigned int result; |
| 70 | |
| 71 | __asm__ __volatile__( |
| 72 | LWSYNC_OPCODE |
| 73 | "1:\t" "lwarx %0,0,%1\n" /* load and reserve */ |
| 74 | "stwcx. %2,0,%1\n" /* else store conditional */ |
| 75 | "bne- 1b\n" /* retry if lost reservation */ |
| 76 | "sync\n" |
| 77 | : "=&r"(result) |
| 78 | : "r"(addr), "r"(val) |
| 79 | : "memory", "cc"); |
| 80 | |
| 81 | return result; |
| 82 | } |
| 83 | #if (CAA_BITS_PER_LONG == 64) |
| 84 | case 8: |
| 85 | { |
| 86 | unsigned long result; |
| 87 | |
| 88 | __asm__ __volatile__( |
| 89 | LWSYNC_OPCODE |
| 90 | "1:\t" "ldarx %0,0,%1\n" /* load and reserve */ |
| 91 | "stdcx. %2,0,%1\n" /* else store conditional */ |
| 92 | "bne- 1b\n" /* retry if lost reservation */ |
| 93 | "sync\n" |
| 94 | : "=&r"(result) |
| 95 | : "r"(addr), "r"(val) |
| 96 | : "memory", "cc"); |
| 97 | |
| 98 | return result; |
| 99 | } |
| 100 | #endif |
| 101 | } |
| 102 | /* generate an illegal instruction. Cannot catch this with linker tricks |
| 103 | * when optimizations are disabled. */ |
| 104 | __asm__ __volatile__(ILLEGAL_INSTR); |
| 105 | return 0; |
| 106 | } |
| 107 | |
| 108 | #define uatomic_xchg(addr, v) \ |
| 109 | ((__typeof__(*(addr))) _uatomic_exchange((addr), (unsigned long)(v), \ |
| 110 | sizeof(*(addr)))) |
| 111 | /* cmpxchg */ |
| 112 | |
| 113 | static inline __attribute__((always_inline)) |
| 114 | unsigned long _uatomic_cmpxchg(void *addr, unsigned long old, |
| 115 | unsigned long _new, int len) |
| 116 | { |
| 117 | switch (len) { |
| 118 | case 4: |
| 119 | { |
| 120 | unsigned int old_val; |
| 121 | |
| 122 | __asm__ __volatile__( |
| 123 | LWSYNC_OPCODE |
| 124 | "1:\t" "lwarx %0,0,%1\n" /* load and reserve */ |
| 125 | "cmpw %0,%3\n" /* if load is not equal to */ |
| 126 | "bne 2f\n" /* old, fail */ |
| 127 | "stwcx. %2,0,%1\n" /* else store conditional */ |
| 128 | "bne- 1b\n" /* retry if lost reservation */ |
| 129 | "sync\n" |
| 130 | "2:\n" |
| 131 | : "=&r"(old_val) |
| 132 | : "r"(addr), "r"((unsigned int)_new), |
| 133 | "r"((unsigned int)old) |
| 134 | : "memory", "cc"); |
| 135 | |
| 136 | return old_val; |
| 137 | } |
| 138 | #if (CAA_BITS_PER_LONG == 64) |
| 139 | case 8: |
| 140 | { |
| 141 | unsigned long old_val; |
| 142 | |
| 143 | __asm__ __volatile__( |
| 144 | LWSYNC_OPCODE |
| 145 | "1:\t" "ldarx %0,0,%1\n" /* load and reserve */ |
| 146 | "cmpd %0,%3\n" /* if load is not equal to */ |
| 147 | "bne 2f\n" /* old, fail */ |
| 148 | "stdcx. %2,0,%1\n" /* else store conditional */ |
| 149 | "bne- 1b\n" /* retry if lost reservation */ |
| 150 | "sync\n" |
| 151 | "2:\n" |
| 152 | : "=&r"(old_val) |
| 153 | : "r"(addr), "r"((unsigned long)_new), |
| 154 | "r"((unsigned long)old) |
| 155 | : "memory", "cc"); |
| 156 | |
| 157 | return old_val; |
| 158 | } |
| 159 | #endif |
| 160 | } |
| 161 | /* generate an illegal instruction. Cannot catch this with linker tricks |
| 162 | * when optimizations are disabled. */ |
| 163 | __asm__ __volatile__(ILLEGAL_INSTR); |
| 164 | return 0; |
| 165 | } |
| 166 | |
| 167 | |
| 168 | #define uatomic_cmpxchg(addr, old, _new) \ |
| 169 | ((__typeof__(*(addr))) _uatomic_cmpxchg((addr), (unsigned long)(old),\ |
| 170 | (unsigned long)(_new), \ |
| 171 | sizeof(*(addr)))) |
| 172 | |
| 173 | /* uatomic_add_return */ |
| 174 | |
| 175 | static inline __attribute__((always_inline)) |
| 176 | unsigned long _uatomic_add_return(void *addr, unsigned long val, |
| 177 | int len) |
| 178 | { |
| 179 | switch (len) { |
| 180 | case 4: |
| 181 | { |
| 182 | unsigned int result; |
| 183 | |
| 184 | __asm__ __volatile__( |
| 185 | LWSYNC_OPCODE |
| 186 | "1:\t" "lwarx %0,0,%1\n" /* load and reserve */ |
| 187 | "add %0,%2,%0\n" /* add val to value loaded */ |
| 188 | "stwcx. %0,0,%1\n" /* store conditional */ |
| 189 | "bne- 1b\n" /* retry if lost reservation */ |
| 190 | "sync\n" |
| 191 | : "=&r"(result) |
| 192 | : "r"(addr), "r"(val) |
| 193 | : "memory", "cc"); |
| 194 | |
| 195 | return result; |
| 196 | } |
| 197 | #if (CAA_BITS_PER_LONG == 64) |
| 198 | case 8: |
| 199 | { |
| 200 | unsigned long result; |
| 201 | |
| 202 | __asm__ __volatile__( |
| 203 | LWSYNC_OPCODE |
| 204 | "1:\t" "ldarx %0,0,%1\n" /* load and reserve */ |
| 205 | "add %0,%2,%0\n" /* add val to value loaded */ |
| 206 | "stdcx. %0,0,%1\n" /* store conditional */ |
| 207 | "bne- 1b\n" /* retry if lost reservation */ |
| 208 | "sync\n" |
| 209 | : "=&r"(result) |
| 210 | : "r"(addr), "r"(val) |
| 211 | : "memory", "cc"); |
| 212 | |
| 213 | return result; |
| 214 | } |
| 215 | #endif |
| 216 | } |
| 217 | /* generate an illegal instruction. Cannot catch this with linker tricks |
| 218 | * when optimizations are disabled. */ |
| 219 | __asm__ __volatile__(ILLEGAL_INSTR); |
| 220 | return 0; |
| 221 | } |
| 222 | |
| 223 | |
| 224 | #define uatomic_add_return(addr, v) \ |
| 225 | ((__typeof__(*(addr))) _uatomic_add_return((addr), \ |
| 226 | (unsigned long)(v), \ |
| 227 | sizeof(*(addr)))) |
| 228 | |
| 229 | #ifdef __cplusplus |
| 230 | } |
| 231 | #endif |
| 232 | |
| 233 | #include <urcu/uatomic/generic.h> |
| 234 | |
| 235 | #endif /* _URCU_ARCH_UATOMIC_PPC_H */ |