/* Data used by both reader and synchronize_rcu() */
long ctr;
/* Data used for registry */
- struct list_head head __attribute__((aligned(CACHE_LINE_SIZE)));
+ struct list_head node __attribute__((aligned(CACHE_LINE_SIZE)));
pthread_t tid;
int alloc; /* registry entry allocated */
};
*/
for (;;) {
wait_loops++;
- list_for_each_entry_safe(index, tmp, ®istry, head) {
+ list_for_each_entry_safe(index, tmp, ®istry, node) {
if (!rcu_old_gp_ongoing(&index->ctr))
- list_move(&index->head, &qsreaders);
+ list_move(&index->node, &qsreaders);
}
if (list_empty(®istry)) {
/* Add to registry */
rcu_reader_reg->tid = pthread_self();
assert(rcu_reader_reg->ctr == 0);
- list_add(&rcu_reader_reg->head, ®istry);
+ list_add(&rcu_reader_reg->node, ®istry);
rcu_reader = rcu_reader_reg;
}
ret = pthread_kill(tid, 0);
assert(ret != EINVAL);
if (ret == ESRCH) {
- list_del(&rcu_reader_reg->head);
+ list_del(&rcu_reader_reg->node);
rcu_reader_reg->ctr = 0;
rcu_reader_reg->alloc = 0;
registry_arena.used -= sizeof(struct rcu_reader);
/* Data used by both reader and synchronize_rcu() */
unsigned long ctr;
/* Data used for registry */
- struct list_head head __attribute__((aligned(CACHE_LINE_SIZE)));
+ struct list_head node __attribute__((aligned(CACHE_LINE_SIZE)));
pthread_t tid;
};
smp_mb();
}
- list_for_each_entry_safe(index, tmp, ®istry, head) {
+ list_for_each_entry_safe(index, tmp, ®istry, node) {
if (!rcu_gp_ongoing(&index->ctr))
- list_move(&index->head, &qsreaders);
+ list_move(&index->node, &qsreaders);
}
if (list_empty(®istry)) {
assert(rcu_reader.ctr == 0);
mutex_lock(&rcu_gp_lock);
- list_add(&rcu_reader.head, ®istry);
+ list_add(&rcu_reader.node, ®istry);
mutex_unlock(&rcu_gp_lock);
_rcu_thread_online();
}
*/
_rcu_thread_offline();
mutex_lock(&rcu_gp_lock);
- list_del(&rcu_reader.head);
+ list_del(&rcu_reader.node);
mutex_unlock(&rcu_gp_lock);
}
unsigned long ctr;
char need_mb;
/* Data used for registry */
- struct list_head head __attribute__((aligned(CACHE_LINE_SIZE)));
+ struct list_head node __attribute__((aligned(CACHE_LINE_SIZE)));
pthread_t tid;
};
* safe and don't assume anything : we use smp_mc() to make sure the
* cache flush is enforced.
*/
- list_for_each_entry(index, ®istry, head) {
+ list_for_each_entry(index, ®istry, node) {
STORE_SHARED(index->need_mb, 1);
pthread_kill(index->tid, SIGRCU);
}
* relevant bug report. For Linux kernels, we recommend getting
* the Linux Test Project (LTP).
*/
- list_for_each_entry(index, ®istry, head) {
+ list_for_each_entry(index, ®istry, node) {
while (LOAD_SHARED(index->need_mb)) {
pthread_kill(index->tid, SIGRCU);
poll(NULL, 0, 1);
smp_mb_master(RCU_MB_GROUP);
}
- list_for_each_entry_safe(index, tmp, ®istry, head) {
+ list_for_each_entry_safe(index, tmp, ®istry, node) {
if (!rcu_gp_ongoing(&index->ctr))
- list_move(&index->head, &qsreaders);
+ list_move(&index->node, &qsreaders);
}
#ifndef HAS_INCOHERENT_CACHES
mutex_lock(&rcu_gp_lock);
rcu_init(); /* In case gcc does not support constructor attribute */
- list_add(&rcu_reader.head, ®istry);
+ list_add(&rcu_reader.node, ®istry);
mutex_unlock(&rcu_gp_lock);
}
void rcu_unregister_thread(void)
{
mutex_lock(&rcu_gp_lock);
- list_del(&rcu_reader.head);
+ list_del(&rcu_reader.node);
mutex_unlock(&rcu_gp_lock);
}