4 * Userspace RCU library
6 * Copyright February 2009 - Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
8 * Distributed under GPLv2
20 pthread_mutex_t urcu_mutex
= PTHREAD_MUTEX_INITIALIZER
;
22 /* Global quiescent period parity */
25 int __thread urcu_active_readers
[2];
27 /* Thread IDs of registered readers */
28 #define INIT_NUM_THREADS 4
32 int *urcu_active_readers
;
39 static struct reader_data
*reader_data
;
40 static int num_readers
, alloc_readers
;
43 void internal_urcu_lock(void)
46 ret
= pthread_mutex_lock(&urcu_mutex
);
48 perror("Error in pthread mutex lock");
53 void internal_urcu_unlock(void)
57 ret
= pthread_mutex_unlock(&urcu_mutex
);
59 perror("Error in pthread mutex unlock");
65 * called with urcu_mutex held.
67 static int switch_next_urcu_qparity(void)
69 int old_parity
= urcu_qparity
;
70 urcu_qparity
= 1 - old_parity
;
74 static void force_mb_all_threads(void)
76 struct reader_data
*index
;
78 * Ask for each threads to execute a mb() so we can consider the
79 * compiler barriers around rcu read lock as real memory barriers.
86 mb(); /* write sig_done before sending the signals */
88 for (index
= reader_data
; index
< reader_data
+ num_readers
; index
++) {
89 pthread_kill(index
->tid
, SIGURCU
);
93 * Wait for sighandler (and thus mb()) to execute on every thread.
96 while (sig_done
< num_readers
)
99 mb(); /* read sig_done before ending the barrier */
103 void wait_for_quiescent_state(int parity
)
105 struct reader_data
*index
;
109 /* Wait for each thread urcu_active_readers count to become 0.
111 for (index
= reader_data
; index
< reader_data
+ num_readers
; index
++) {
115 while (index
->urcu_active_readers
[parity
] != 0)
119 * Locally : read *index->urcu_active_readers before freeing old
121 * Remote (reader threads) : Order urcu_qparity update and other
122 * thread's quiescent state counter read.
124 force_mb_all_threads();
127 static void switch_qparity(void)
131 /* All threads should read qparity before accessing data structure. */
132 /* Write ptr before changing the qparity */
133 force_mb_all_threads();
135 prev_parity
= switch_next_urcu_qparity();
139 * Wait for previous parity to be empty of readers.
141 wait_for_quiescent_state(prev_parity
);
144 void synchronize_rcu(void)
147 internal_urcu_lock();
153 internal_urcu_lock();
158 * Return old pointer, OK to free, no more reference exist.
159 * Called under rcu_write_lock.
161 void *urcu_publish_content(void **ptr
, void *new)
166 internal_urcu_lock();
169 * We can publish the new pointer before we change the current qparity.
170 * Readers seeing the new pointer while being in the previous qparity
171 * window will make us wait until the end of the quiescent state before
172 * we release the unrelated memory area. However, given we hold the
173 * urcu_mutex, we are making sure that no further garbage collection can
174 * occur until we release the mutex, therefore we guarantee that this
175 * given reader will have completed its execution using the new pointer
176 * when the next quiescent state window will be over.
187 internal_urcu_unlock();
193 void urcu_add_reader(pthread_t id
)
195 struct reader_data
*oldarray
;
198 alloc_readers
= INIT_NUM_THREADS
;
201 malloc(sizeof(struct reader_data
) * alloc_readers
);
203 if (alloc_readers
< num_readers
+ 1) {
204 oldarray
= reader_data
;
205 reader_data
= malloc(sizeof(struct reader_data
)
206 * (alloc_readers
<< 1));
207 memcpy(reader_data
, oldarray
,
208 sizeof(struct reader_data
) * alloc_readers
);
212 reader_data
[num_readers
].tid
= id
;
213 /* reference to the TLS of _this_ reader thread. */
214 reader_data
[num_readers
].urcu_active_readers
= urcu_active_readers
;
219 * Never shrink (implementation limitation).
220 * This is O(nb threads). Eventually use a hash table.
222 void urcu_remove_reader(pthread_t id
)
224 struct reader_data
*index
;
226 assert(reader_data
!= NULL
);
227 for (index
= reader_data
; index
< reader_data
+ num_readers
; index
++) {
228 if (pthread_equal(index
->tid
, id
)) {
229 memcpy(index
, &reader_data
[num_readers
- 1],
230 sizeof(struct reader_data
));
231 reader_data
[num_readers
- 1].tid
= 0;
232 reader_data
[num_readers
- 1].urcu_active_readers
= NULL
;
237 /* Hrm not found, forgot to register ? */
241 void urcu_register_thread(void)
243 internal_urcu_lock();
244 urcu_add_reader(pthread_self());
245 internal_urcu_unlock();
248 void urcu_unregister_thread(void)
250 internal_urcu_lock();
251 urcu_remove_reader(pthread_self());
252 internal_urcu_unlock();
255 void sigurcu_handler(int signo
, siginfo_t
*siginfo
, void *context
)
258 atomic_inc(&sig_done
);
261 void __attribute__((constructor
)) urcu_init(void)
263 struct sigaction act
;
266 act
.sa_sigaction
= sigurcu_handler
;
267 ret
= sigaction(SIGURCU
, &act
, NULL
);
269 perror("Error in sigaction");
274 void __attribute__((destructor
)) urcu_exit(void)
276 struct sigaction act
;
279 ret
= sigaction(SIGURCU
, NULL
, &act
);
281 perror("Error in sigaction");
284 assert(act
.sa_sigaction
== sigurcu_handler
);