8 * common.h: Common Linux kernel-isms.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; but version 2 of the License only due
13 * to code included from the Linux kernel.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
24 * Copyright (c) 2006 Paul E. McKenney, IBM.
26 * Much code taken from the Linux kernel. For such code, the option
27 * to redistribute under later versions of GPL might not be available.
30 #include <urcu/arch.h>
32 #ifndef __always_inline
33 #define __always_inline inline
36 #define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)]))
37 #define BUILD_BUG_ON_ZERO(e) (sizeof(char[1 - 2 * !!(e)]) - 1)
40 # define stringify_in_c(...) __VA_ARGS__
41 # define ASM_CONST(x) x
43 /* This version of stringify will deal with commas... */
44 # define __stringify_in_c(...) #__VA_ARGS__
45 # define stringify_in_c(...) __stringify_in_c(__VA_ARGS__) " "
46 # define __ASM_CONST(x) x##UL
47 # define ASM_CONST(x) __ASM_CONST(x)
52 * arch-i386.h: Expose x86 atomic instructions. 80486 and better only.
54 * This program is free software; you can redistribute it and/or modify
55 * it under the terms of the GNU General Public License as published by
56 * the Free Software Foundation, but version 2 only due to inclusion
57 * of Linux-kernel code.
59 * This program is distributed in the hope that it will be useful,
60 * but WITHOUT ANY WARRANTY; without even the implied warranty of
61 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
62 * GNU General Public License for more details.
64 * You should have received a copy of the GNU General Public License
65 * along with this program; if not, write to the Free Software
66 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
68 * Copyright (c) 2006 Paul E. McKenney, IBM.
70 * Much code taken from the Linux kernel. For such code, the option
71 * to redistribute under later versions of GPL might not be available.
78 /* #define CAA_CACHE_LINE_SIZE 64 */
79 #define ____cacheline_internodealigned_in_smp \
80 __attribute__((__aligned__(CAA_CACHE_LINE_SIZE)))
83 * api_pthreads.h: API mapping to pthreads environment.
85 * This program is free software; you can redistribute it and/or modify
86 * it under the terms of the GNU General Public License as published by
87 * the Free Software Foundation; either version 2 of the License, or
88 * (at your option) any later version. However, please note that much
89 * of the code in this file derives from the Linux kernel, and that such
90 * code may not be available except under GPLv2.
92 * This program is distributed in the hope that it will be useful,
93 * but WITHOUT ANY WARRANTY; without even the implied warranty of
94 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
95 * GNU General Public License for more details.
97 * You should have received a copy of the GNU General Public License
98 * along with this program; if not, write to the Free Software
99 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
101 * Copyright (c) 2006 Paul E. McKenney, IBM.
108 #include <sys/types.h>
112 #include <sys/param.h>
113 /* #include "atomic.h" */
116 * Default machine parameters.
119 #ifndef CAA_CACHE_LINE_SIZE
120 /* #define CAA_CACHE_LINE_SIZE 128 */
121 #endif /* #ifndef CAA_CACHE_LINE_SIZE */
124 * Exclusive locking primitives.
127 typedef pthread_mutex_t spinlock_t
;
129 #define DEFINE_SPINLOCK(lock) spinlock_t lock = PTHREAD_MUTEX_INITIALIZER;
130 #define __SPIN_LOCK_UNLOCKED(lockp) PTHREAD_MUTEX_INITIALIZER
132 static void spin_lock_init(spinlock_t
*sp
)
134 if (pthread_mutex_init(sp
, NULL
) != 0) {
135 perror("spin_lock_init:pthread_mutex_init");
140 static void spin_lock(spinlock_t
*sp
)
142 if (pthread_mutex_lock(sp
) != 0) {
143 perror("spin_lock:pthread_mutex_lock");
148 static void spin_unlock(spinlock_t
*sp
)
150 if (pthread_mutex_unlock(sp
) != 0) {
151 perror("spin_unlock:pthread_mutex_unlock");
156 #define spin_lock_irqsave(l, f) do { f = 1; spin_lock(l); } while (0)
157 #define spin_unlock_irqrestore(l, f) do { f = 0; spin_unlock(l); } while (0)
160 * Thread creation/destruction primitives.
163 typedef pthread_t thread_id_t
;
165 #define NR_THREADS 128
167 #define __THREAD_ID_MAP_EMPTY ((thread_id_t) 0)
168 #define __THREAD_ID_MAP_WAITING ((thread_id_t) 1)
169 thread_id_t __thread_id_map
[NR_THREADS
];
170 spinlock_t __thread_id_map_mutex
;
172 #define for_each_thread(t) \
173 for (t = 0; t < NR_THREADS; t++)
175 #define for_each_running_thread(t) \
176 for (t = 0; t < NR_THREADS; t++) \
177 if ((__thread_id_map[t] != __THREAD_ID_MAP_EMPTY) && \
178 (__thread_id_map[t] != __THREAD_ID_MAP_WAITING))
180 #define for_each_tid(t, tid) \
181 for (t = 0; t < NR_THREADS; t++) \
182 if ((((tid) = __thread_id_map[t]) != __THREAD_ID_MAP_EMPTY) && \
183 ((tid) != __THREAD_ID_MAP_WAITING))
185 pthread_key_t thread_id_key
;
187 static int __smp_thread_id(void)
190 thread_id_t tid
= pthread_self();
192 for (i
= 0; i
< NR_THREADS
; i
++) {
193 if (__thread_id_map
[i
] == tid
) {
194 long v
= i
+ 1; /* must be non-NULL. */
196 if (pthread_setspecific(thread_id_key
, (void *)v
) != 0) {
197 perror("pthread_setspecific");
203 spin_lock(&__thread_id_map_mutex
);
204 for (i
= 0; i
< NR_THREADS
; i
++) {
205 if (__thread_id_map
[i
] == tid
)
206 spin_unlock(&__thread_id_map_mutex
);
209 spin_unlock(&__thread_id_map_mutex
);
210 fprintf(stderr
, "smp_thread_id: Rogue thread, id: %d(%#x)\n",
215 static int smp_thread_id(void)
219 id
= pthread_getspecific(thread_id_key
);
221 return __smp_thread_id();
222 return (long)(id
- 1);
225 static thread_id_t
create_thread(void *(*func
)(void *), void *arg
)
230 spin_lock(&__thread_id_map_mutex
);
231 for (i
= 0; i
< NR_THREADS
; i
++) {
232 if (__thread_id_map
[i
] == __THREAD_ID_MAP_EMPTY
)
235 if (i
>= NR_THREADS
) {
236 spin_unlock(&__thread_id_map_mutex
);
237 fprintf(stderr
, "Thread limit of %d exceeded!\n", NR_THREADS
);
240 __thread_id_map
[i
] = __THREAD_ID_MAP_WAITING
;
241 spin_unlock(&__thread_id_map_mutex
);
242 if (pthread_create(&tid
, NULL
, func
, arg
) != 0) {
243 perror("create_thread:pthread_create");
246 __thread_id_map
[i
] = tid
;
250 static void *wait_thread(thread_id_t tid
)
255 for (i
= 0; i
< NR_THREADS
; i
++) {
256 if (__thread_id_map
[i
] == tid
)
259 if (i
>= NR_THREADS
){
260 fprintf(stderr
, "wait_thread: bad tid = %d(%#x)\n",
264 if (pthread_join(tid
, &vp
) != 0) {
265 perror("wait_thread:pthread_join");
268 __thread_id_map
[i
] = __THREAD_ID_MAP_EMPTY
;
272 static void wait_all_threads(void)
277 for (i
= 1; i
< NR_THREADS
; i
++) {
278 tid
= __thread_id_map
[i
];
279 if (tid
!= __THREAD_ID_MAP_EMPTY
&&
280 tid
!= __THREAD_ID_MAP_WAITING
)
281 (void)wait_thread(tid
);
285 #ifndef HAVE_CPU_SET_T
286 typedef unsigned long cpu_set_t
;
287 # define CPU_ZERO(cpuset) do { *(cpuset) = 0; } while(0)
288 # define CPU_SET(cpu, cpuset) do { *(cpuset) |= (1UL << (cpu)); } while(0)
291 static void run_on(int cpu
)
293 #if HAVE_SCHED_SETAFFINITY
298 #if SCHED_SETAFFINITY_ARGS == 2
299 sched_setaffinity(0, &mask
);
301 sched_setaffinity(0, sizeof(mask
), &mask
);
303 #endif /* HAVE_SCHED_SETAFFINITY */
307 * timekeeping -- very crude -- should use MONOTONIC...
310 long long get_microseconds(void)
314 if (gettimeofday(&tv
, NULL
) != 0)
316 return ((long long)tv
.tv_sec
) * 1000000LL + (long long)tv
.tv_usec
;
320 * Per-thread variables.
323 #define DEFINE_PER_THREAD(type, name) \
326 __attribute__((__aligned__(CAA_CACHE_LINE_SIZE))); \
327 } __per_thread_##name[NR_THREADS];
328 #define DECLARE_PER_THREAD(type, name) extern DEFINE_PER_THREAD(type, name)
330 #define per_thread(name, thread) __per_thread_##name[thread].v
331 #define __get_thread_var(name) per_thread(name, smp_thread_id())
333 #define init_per_thread(name, v) \
336 for (__i_p_t_i = 0; __i_p_t_i < NR_THREADS; __i_p_t_i++) \
337 per_thread(name, __i_p_t_i) = v; \
341 * CPU traversal primitives.
346 #endif /* #ifndef NR_CPUS */
348 #define for_each_possible_cpu(cpu) \
349 for (cpu = 0; cpu < NR_CPUS; cpu++)
350 #define for_each_online_cpu(cpu) \
351 for (cpu = 0; cpu < NR_CPUS; cpu++)
357 #define DEFINE_PER_CPU(type, name) \
360 __attribute__((__aligned__(CAA_CACHE_LINE_SIZE))); \
361 } __per_cpu_##name[NR_CPUS]
362 #define DECLARE_PER_CPU(type, name) extern DEFINE_PER_CPU(type, name)
364 DEFINE_PER_THREAD(int, smp_processor_id
);
366 #define per_cpu(name, thread) __per_cpu_##name[thread].v
367 #define __get_cpu_var(name) per_cpu(name, smp_processor_id())
369 #define init_per_cpu(name, v) \
372 for (__i_p_c_i = 0; __i_p_c_i < NR_CPUS; __i_p_c_i++) \
373 per_cpu(name, __i_p_c_i) = v; \
377 * CPU state checking (crowbarred).
380 #define idle_cpu(cpu) 0
381 #define in_softirq() 1
382 #define hardirq_count() 0
383 #define PREEMPT_SHIFT 0
384 #define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS)
385 #define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS)
386 #define PREEMPT_BITS 8
387 #define SOFTIRQ_BITS 8
393 struct notifier_block
{
394 int (*notifier_call
)(struct notifier_block
*, unsigned long, void *);
395 struct notifier_block
*next
;
399 #define CPU_ONLINE 0x0002 /* CPU (unsigned)v is up */
400 #define CPU_UP_PREPARE 0x0003 /* CPU (unsigned)v coming up */
401 #define CPU_UP_CANCELED 0x0004 /* CPU (unsigned)v NOT coming up */
402 #define CPU_DOWN_PREPARE 0x0005 /* CPU (unsigned)v going down */
403 #define CPU_DOWN_FAILED 0x0006 /* CPU (unsigned)v NOT going down */
404 #define CPU_DEAD 0x0007 /* CPU (unsigned)v dead */
405 #define CPU_DYING 0x0008 /* CPU (unsigned)v not running any task,
406 * not handling interrupts, soon dead */
407 #define CPU_POST_DEAD 0x0009 /* CPU (unsigned)v dead, cpu_hotplug
410 /* Used for CPU hotplug events occuring while tasks are frozen due to a suspend
411 * operation in progress
413 #define CPU_TASKS_FROZEN 0x0010
415 #define CPU_ONLINE_FROZEN (CPU_ONLINE | CPU_TASKS_FROZEN)
416 #define CPU_UP_PREPARE_FROZEN (CPU_UP_PREPARE | CPU_TASKS_FROZEN)
417 #define CPU_UP_CANCELED_FROZEN (CPU_UP_CANCELED | CPU_TASKS_FROZEN)
418 #define CPU_DOWN_PREPARE_FROZEN (CPU_DOWN_PREPARE | CPU_TASKS_FROZEN)
419 #define CPU_DOWN_FAILED_FROZEN (CPU_DOWN_FAILED | CPU_TASKS_FROZEN)
420 #define CPU_DEAD_FROZEN (CPU_DEAD | CPU_TASKS_FROZEN)
421 #define CPU_DYING_FROZEN (CPU_DYING | CPU_TASKS_FROZEN)
423 /* Hibernation and suspend events */
424 #define PM_HIBERNATION_PREPARE 0x0001 /* Going to hibernate */
425 #define PM_POST_HIBERNATION 0x0002 /* Hibernation finished */
426 #define PM_SUSPEND_PREPARE 0x0003 /* Going to suspend the system */
427 #define PM_POST_SUSPEND 0x0004 /* Suspend finished */
428 #define PM_RESTORE_PREPARE 0x0005 /* Going to restore a saved image */
429 #define PM_POST_RESTORE 0x0006 /* Restore failed */
431 #define NOTIFY_DONE 0x0000 /* Don't care */
432 #define NOTIFY_OK 0x0001 /* Suits me */
433 #define NOTIFY_STOP_MASK 0x8000 /* Don't call further */
434 #define NOTIFY_BAD (NOTIFY_STOP_MASK|0x0002)
435 /* Bad/Veto action */
437 * Clean way to return from the notifier and stop further calls.
439 #define NOTIFY_STOP (NOTIFY_OK|NOTIFY_STOP_MASK)
445 #define BUG_ON(c) do { if (!(c)) abort(); } while (0)
448 * Initialization -- Must be called before calling any primitives.
451 static void smp_init(void)
455 spin_lock_init(&__thread_id_map_mutex
);
456 __thread_id_map
[0] = pthread_self();
457 for (i
= 1; i
< NR_THREADS
; i
++)
458 __thread_id_map
[i
] = __THREAD_ID_MAP_EMPTY
;
459 init_per_thread(smp_processor_id
, 0);
460 if (pthread_key_create(&thread_id_key
, NULL
) != 0) {
461 perror("pthread_key_create");