4 * Userspace RCU library
6 * Copyright February 2009 - Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
8 * Distributed under GPLv2
20 pthread_mutex_t urcu_mutex
= PTHREAD_MUTEX_INITIALIZER
;
23 * Global grace period counter.
24 * Contains the current RCU_GP_CTR_BIT.
25 * Also has a RCU_GP_CTR_BIT of 1, to accelerate the reader fast path.
27 long urcu_gp_ctr
= RCU_GP_COUNT
;
29 long __thread urcu_active_readers
;
31 /* Thread IDs of registered readers */
32 #define INIT_NUM_THREADS 4
36 long *urcu_active_readers
;
40 unsigned int yield_active
;
41 unsigned int __thread rand_yield
;
44 static struct reader_data
*reader_data
;
45 static int num_readers
, alloc_readers
;
50 void internal_urcu_lock(void)
53 ret
= pthread_mutex_lock(&urcu_mutex
);
55 perror("Error in pthread mutex lock");
60 void internal_urcu_unlock(void)
64 ret
= pthread_mutex_unlock(&urcu_mutex
);
66 perror("Error in pthread mutex unlock");
72 * called with urcu_mutex held.
74 static void switch_next_urcu_qparity(void)
76 urcu_gp_ctr
^= RCU_GP_CTR_BIT
;
80 static void force_mb_all_threads(void)
85 static void force_mb_all_threads(void)
87 struct reader_data
*index
;
89 * Ask for each threads to execute a smp_mb() so we can consider the
90 * compiler barriers around rcu read lock as real memory barriers.
95 smp_mb(); /* write sig_done before sending the signals */
96 for (index
= reader_data
; index
< reader_data
+ num_readers
; index
++)
97 pthread_kill(index
->tid
, SIGURCU
);
99 * Wait for sighandler (and thus mb()) to execute on every thread.
102 while (sig_done
< num_readers
)
104 smp_mb(); /* read sig_done before ending the barrier */
108 void wait_for_quiescent_state(void)
110 struct reader_data
*index
;
114 /* Wait for each thread urcu_active_readers count to become 0.
116 for (index
= reader_data
; index
< reader_data
+ num_readers
; index
++) {
120 while (rcu_old_gp_ongoing(index
->urcu_active_readers
))
125 void synchronize_rcu(void)
127 /* All threads should read qparity before accessing data structure
128 * where new ptr points to. */
129 /* Write new ptr before changing the qparity */
130 force_mb_all_threads();
132 internal_urcu_lock();
134 switch_next_urcu_qparity(); /* 0 -> 1 */
137 * Must commit qparity update to memory before waiting for parity
138 * 0 quiescent state. Failure to do so could result in the writer
139 * waiting forever while new readers are always accessing data (no
145 * Wait for previous parity to be empty of readers.
147 wait_for_quiescent_state(); /* Wait readers in parity 0 */
150 * Must finish waiting for quiescent state for parity 0 before
151 * committing qparity update to memory. Failure to do so could result in
152 * the writer waiting forever while new readers are always accessing
153 * data (no progress).
157 switch_next_urcu_qparity(); /* 1 -> 0 */
160 * Must commit qparity update to memory before waiting for parity
161 * 1 quiescent state. Failure to do so could result in the writer
162 * waiting forever while new readers are always accessing data (no
168 * Wait for previous parity to be empty of readers.
170 wait_for_quiescent_state(); /* Wait readers in parity 1 */
172 internal_urcu_unlock();
174 /* All threads should finish using the data referred to by old ptr
175 * before decrementing their urcu_active_readers count */
176 /* Finish waiting for reader threads before letting the old ptr being
178 force_mb_all_threads();
181 void urcu_add_reader(pthread_t id
)
183 struct reader_data
*oldarray
;
186 alloc_readers
= INIT_NUM_THREADS
;
189 malloc(sizeof(struct reader_data
) * alloc_readers
);
191 if (alloc_readers
< num_readers
+ 1) {
192 oldarray
= reader_data
;
193 reader_data
= malloc(sizeof(struct reader_data
)
194 * (alloc_readers
<< 1));
195 memcpy(reader_data
, oldarray
,
196 sizeof(struct reader_data
) * alloc_readers
);
200 reader_data
[num_readers
].tid
= id
;
201 /* reference to the TLS of _this_ reader thread. */
202 reader_data
[num_readers
].urcu_active_readers
= &urcu_active_readers
;
207 * Never shrink (implementation limitation).
208 * This is O(nb threads). Eventually use a hash table.
210 void urcu_remove_reader(pthread_t id
)
212 struct reader_data
*index
;
214 assert(reader_data
!= NULL
);
215 for (index
= reader_data
; index
< reader_data
+ num_readers
; index
++) {
216 if (pthread_equal(index
->tid
, id
)) {
217 memcpy(index
, &reader_data
[num_readers
- 1],
218 sizeof(struct reader_data
));
219 reader_data
[num_readers
- 1].tid
= 0;
220 reader_data
[num_readers
- 1].urcu_active_readers
= NULL
;
225 /* Hrm not found, forgot to register ? */
229 void urcu_register_thread(void)
231 internal_urcu_lock();
232 urcu_add_reader(pthread_self());
233 internal_urcu_unlock();
236 void urcu_unregister_thread(void)
238 internal_urcu_lock();
239 urcu_remove_reader(pthread_self());
240 internal_urcu_unlock();
243 #ifndef DEBUG_FULL_MB
244 void sigurcu_handler(int signo
, siginfo_t
*siginfo
, void *context
)
247 atomic_inc(&sig_done
);
250 void __attribute__((constructor
)) urcu_init(void)
252 struct sigaction act
;
255 act
.sa_sigaction
= sigurcu_handler
;
256 ret
= sigaction(SIGURCU
, &act
, NULL
);
258 perror("Error in sigaction");
263 void __attribute__((destructor
)) urcu_exit(void)
265 struct sigaction act
;
268 ret
= sigaction(SIGURCU
, NULL
, &act
);
270 perror("Error in sigaction");
273 assert(act
.sa_sigaction
== sigurcu_handler
);