Fix: change order of _cds_lfht_new_with_alloc parameters
[urcu.git] / include / urcu / arch / ppc.h
CommitLineData
d3d3857f
MJ
1// SPDX-FileCopyrightText: 2009 Paul E. McKenney, IBM Corporation.
2// SPDX-FileCopyrightText: 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
3//
4// SPDX-License-Identifier: LGPL-2.1-or-later
5
ec4e58a3
MD
6#ifndef _URCU_ARCH_PPC_H
7#define _URCU_ARCH_PPC_H
121a5d44 8
6d0ce021 9/*
af02d47e 10 * arch_ppc.h: trivial definitions for the powerpc architecture.
6d0ce021
PM
11 */
12
ec4e58a3 13#include <urcu/compiler.h>
c96a3726 14#include <urcu/config.h>
999991c6 15#include <urcu/syscall-compat.h>
3fa18286 16#include <stdint.h>
121a5d44 17
36bc70a8
MD
18#ifdef __cplusplus
19extern "C" {
67ecffc0 20#endif
36bc70a8 21
b4e52e3e 22/* Include size of POWER5+ L3 cache lines: 256 bytes */
06f22bdb 23#define CAA_CACHE_LINE_SIZE 256
b4e52e3e 24
e62b2f86
MD
25#ifdef __NO_LWSYNC__
26#define LWSYNC_OPCODE "sync\n"
27#else
28#define LWSYNC_OPCODE "lwsync\n"
29#endif
30
0174d10d
PB
31/*
32 * Use sync for all cmm_mb/rmb/wmb barriers because lwsync does not
33 * preserve ordering of cacheable vs. non-cacheable accesses, so it
34 * should not be used to order with respect to MMIO operations. An
35 * eieio+lwsync pair is also not enough for cmm_rmb, because it will
36 * order cacheable and non-cacheable memory operations separately---i.e.
37 * not the latter against the former.
38 */
e51500ed 39#define cmm_mb() __asm__ __volatile__ ("sync":::"memory")
0174d10d
PB
40
41/*
42 * lwsync orders loads in cacheable memory with respect to other loads,
43 * and stores in cacheable memory with respect to other stores.
44 * Therefore, use it for barriers ordering accesses to cacheable memory
45 * only.
46 */
e51500ed
MD
47#define cmm_smp_rmb() __asm__ __volatile__ (LWSYNC_OPCODE:::"memory")
48#define cmm_smp_wmb() __asm__ __volatile__ (LWSYNC_OPCODE:::"memory")
6d0ce021 49
af02d47e 50#define mftbl() \
1b85da85 51 __extension__ \
af02d47e
MD
52 ({ \
53 unsigned long rval; \
c6fbc279 54 __asm__ __volatile__ ("mftb %0" : "=r" (rval)); \
af02d47e
MD
55 rval; \
56 })
57
58#define mftbu() \
1b85da85 59 __extension__ \
af02d47e
MD
60 ({ \
61 unsigned long rval; \
e51500ed 62 __asm__ __volatile__ ("mftbu %0" : "=r" (rval)); \
af02d47e
MD
63 rval; \
64 })
6d0ce021 65
9a9d403a 66#define mftb() \
1b85da85 67 __extension__ \
9a9d403a
TMQMF
68 ({ \
69 unsigned long long rval; \
e51500ed 70 __asm__ __volatile__ ("mftb %0" : "=r" (rval)); \
9a9d403a
TMQMF
71 rval; \
72 })
73
f8c43f45
MD
74#define HAS_CAA_GET_CYCLES
75
3fa18286 76typedef uint64_t caa_cycles_t;
6d0ce021 77
9a9d403a 78#ifdef __powerpc64__
3fa18286 79static inline caa_cycles_t caa_get_cycles(void)
6d0ce021 80{
3fa18286 81 return (caa_cycles_t) mftb();
9a9d403a
TMQMF
82}
83#else
3fa18286 84static inline caa_cycles_t caa_get_cycles(void)
9a9d403a
TMQMF
85{
86 unsigned long h, l;
6d0ce021
PM
87
88 for (;;) {
89 h = mftbu();
5481ddb3 90 cmm_barrier();
6d0ce021 91 l = mftbl();
5481ddb3 92 cmm_barrier();
6d0ce021 93 if (mftbu() == h)
3fa18286 94 return (((caa_cycles_t) h) << 32) + l;
6d0ce021
PM
95 }
96}
9a9d403a 97#endif
121a5d44 98
1b2c84f9 99/*
84f4ccb4
MD
100 * On Linux, define the membarrier system call number if not yet available in
101 * the system headers.
1b2c84f9 102 */
84f4ccb4 103#if (defined(__linux__) && !defined(__NR_membarrier))
1b2c84f9
MD
104#define __NR_membarrier 365
105#endif
106
67ecffc0 107#ifdef __cplusplus
36bc70a8
MD
108}
109#endif
110
1b9119f8 111#include <urcu/arch/generic.h>
e4d1eb09 112
ec4e58a3 113#endif /* _URCU_ARCH_PPC_H */
This page took 0.057697 seconds and 4 git commands to generate.