int wait_loops = 0;
struct rcu_reader *index, *tmp;
-#if (BITS_PER_LONG < 64)
+#if (CAA_BITS_PER_LONG < 64)
/* Switch parity: 0 -> 1, 1 -> 0 */
CAA_STORE_SHARED(rcu_gp_ctr, rcu_gp_ctr ^ RCU_GP_CTR);
-#else /* !(BITS_PER_LONG < 64) */
+#else /* !(CAA_BITS_PER_LONG < 64) */
/* Increment current G.P. */
CAA_STORE_SHARED(rcu_gp_ctr, rcu_gp_ctr + RCU_GP_CTR);
-#endif /* !(BITS_PER_LONG < 64) */
+#endif /* !(CAA_BITS_PER_LONG < 64) */
/*
* Must commit rcu_gp_ctr update to memory before waiting for quiescent
* long-size to ensure we do not encounter an overflow bug.
*/
-#if (BITS_PER_LONG < 64)
+#if (CAA_BITS_PER_LONG < 64)
void synchronize_rcu(void)
{
unsigned long was_online;
_CAA_STORE_SHARED(rcu_reader.ctr, CAA_LOAD_SHARED(rcu_gp_ctr));
cmm_smp_mb();
}
-#else /* !(BITS_PER_LONG < 64) */
+#else /* !(CAA_BITS_PER_LONG < 64) */
void synchronize_rcu(void)
{
unsigned long was_online;
_CAA_STORE_SHARED(rcu_reader.ctr, CAA_LOAD_SHARED(rcu_gp_ctr));
cmm_smp_mb();
}
-#endif /* !(BITS_PER_LONG < 64) */
+#endif /* !(CAA_BITS_PER_LONG < 64) */
/*
* library wrappers to be used by non-LGPL compatible source code.
return result;
}
-#if (BITS_PER_LONG == 64)
+#if (CAA_BITS_PER_LONG == 64)
case 8:
{
unsigned long result;
return old_val;
}
-#if (BITS_PER_LONG == 64)
+#if (CAA_BITS_PER_LONG == 64)
case 8:
{
unsigned long old_val;
return result;
}
-#if (BITS_PER_LONG == 64)
+#if (CAA_BITS_PER_LONG == 64)
case 8:
{
unsigned long result;
: "memory", "cc");
return old_val;
}
-#if (BITS_PER_LONG == 64)
+#if (CAA_BITS_PER_LONG == 64)
case 8:
{
unsigned long old_val;
: "memory", "cc");
return old_val;
}
-#if (BITS_PER_LONG == 64)
+#if (CAA_BITS_PER_LONG == 64)
case 8:
{
__asm__ __volatile__(
return _new;
}
-#if (BITS_PER_LONG == 64)
+#if (CAA_BITS_PER_LONG == 64)
case 8:
{
__asm__ __volatile__ (
: "memory");
return result;
}
-#if (BITS_PER_LONG == 64)
+#if (CAA_CAA_BITS_PER_LONG == 64)
case 8:
{
unsigned long result = old;
: "memory");
return result;
}
-#if (BITS_PER_LONG == 64)
+#if (CAA_CAA_BITS_PER_LONG == 64)
case 8:
{
unsigned long result;
: "memory");
return result + (unsigned int)val;
}
-#if (BITS_PER_LONG == 64)
+#if (CAA_CAA_BITS_PER_LONG == 64)
case 8:
{
unsigned long result = val;
: "memory");
return;
}
-#if (BITS_PER_LONG == 64)
+#if (CAA_CAA_BITS_PER_LONG == 64)
case 8:
{
__asm__ __volatile__(
: "memory");
return;
}
-#if (BITS_PER_LONG == 64)
+#if (CAA_CAA_BITS_PER_LONG == 64)
case 8:
{
__asm__ __volatile__(
: "memory");
return;
}
-#if (BITS_PER_LONG == 64)
+#if (CAA_CAA_BITS_PER_LONG == 64)
case 8:
{
__asm__ __volatile__(
#define _uatomic_dec(addr) (__uatomic_dec((addr), sizeof(*(addr))))
-#if ((BITS_PER_LONG != 64) && defined(CONFIG_RCU_COMPAT_ARCH))
+#if ((CAA_CAA_BITS_PER_LONG != 64) && defined(CONFIG_RCU_COMPAT_ARCH))
extern int __rcu_cas_avail;
extern int __rcu_cas_init(void);
#endif
case 4:
return __sync_val_compare_and_swap_4(addr, old, _new);
-#if (BITS_PER_LONG == 64)
+#if (CAA_BITS_PER_LONG == 64)
case 8:
return __sync_val_compare_and_swap_8(addr, old, _new);
#endif
#endif
case 4:
return __sync_add_and_fetch_4(addr, val);
-#if (BITS_PER_LONG == 64)
+#if (CAA_BITS_PER_LONG == 64)
case 8:
return __sync_add_and_fetch_8(addr, val);
#endif
return old;
}
-#if (BITS_PER_LONG == 64)
+#if (CAA_BITS_PER_LONG == 64)
case 8:
{
unsigned long old;
return old + val;
}
-#if (BITS_PER_LONG == 64)
+#if (CAA_BITS_PER_LONG == 64)
case 8:
{
unsigned long old, oldt;
return old;
}
-#if (BITS_PER_LONG == 64)
+#if (CAA_BITS_PER_LONG == 64)
case 8:
{
unsigned long old, oldt;