6045c184d22b2b9af8ea0c03bb33a1f026d24be0
1 #ifndef _URCU_RCULFQUEUE_STATIC_H
2 #define _URCU_RCULFQUEUE_STATIC_H
7 * Userspace RCU library - Lock-Free RCU Queue
9 * Copyright 2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
11 * TO BE INCLUDED ONLY IN LGPL-COMPATIBLE CODE. See rculfqueue.h for linking
12 * dynamically with the userspace rcu library.
14 * This library is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU Lesser General Public
16 * License as published by the Free Software Foundation; either
17 * version 2.1 of the License, or (at your option) any later version.
19 * This library is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 * Lesser General Public License for more details.
24 * You should have received a copy of the GNU Lesser General Public
25 * License along with this library; if not, write to the Free Software
26 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
29 #include <urcu-call-rcu.h>
30 #include <urcu/uatomic.h>
38 struct cds_lfq_node_rcu_dummy
{
39 struct cds_lfq_node_rcu parent
;
41 struct cds_lfq_queue_rcu
*q
;
45 * Lock-free RCU queue. Enqueue and dequeue operations hold a RCU read
46 * lock to deal with cmpxchg ABA problem. This queue is *not* circular:
47 * head points to the oldest node, tail points to the newest node.
48 * A dummy node is kept to ensure enqueue and dequeue can always proceed
49 * concurrently. Keeping a separate head and tail helps with large
50 * queues: enqueue and dequeue can proceed concurrently without
51 * wrestling for exclusive access to the same variables.
53 * Dequeue retry if it detects that it would be dequeueing the last node
54 * (it means a dummy node dequeue-requeue is in progress). This ensures
55 * that there is always at least one node in the queue.
57 * In the dequeue operation, we internally reallocate the dummy node
58 * upon dequeue/requeue and use call_rcu to free the old one after a
63 int is_dummy(struct cds_lfq_queue_rcu
*q
, struct cds_lfq_node_rcu
*node
)
65 return node
== CMM_LOAD_SHARED(q
->dummy
);
69 struct cds_lfq_node_rcu
*make_dummy(struct cds_lfq_queue_rcu
*q
,
70 struct cds_lfq_node_rcu
*next
)
72 struct cds_lfq_node_rcu_dummy
*dummy
;
74 dummy
= malloc(sizeof(struct cds_lfq_node_rcu_dummy
));
76 dummy
->parent
.next
= next
;
78 return &dummy
->parent
;
82 void free_dummy(struct rcu_head
*head
)
84 struct cds_lfq_node_rcu_dummy
*dummy
=
85 caa_container_of(head
, struct cds_lfq_node_rcu_dummy
, head
);
90 void rcu_free_dummy(struct cds_lfq_node_rcu
*node
)
92 struct cds_lfq_node_rcu_dummy
*dummy
;
94 dummy
= caa_container_of(node
, struct cds_lfq_node_rcu_dummy
, parent
);
95 dummy
->q
->queue_call_rcu(&dummy
->head
, free_dummy
);
99 void _cds_lfq_node_init_rcu(struct cds_lfq_node_rcu
*node
)
105 void _cds_lfq_init_rcu(struct cds_lfq_queue_rcu
*q
,
106 void queue_call_rcu(struct rcu_head
*head
,
107 void (*func
)(struct rcu_head
*head
)))
109 q
->tail
= make_dummy(q
, NULL
);
112 q
->queue_call_rcu
= queue_call_rcu
;
116 * The queue should be emptied before calling destroy.
118 * Return 0 on success, -EPERM if queue is not empty.
121 int _cds_lfq_destroy_rcu(struct cds_lfq_queue_rcu
*q
)
123 struct cds_lfq_node_rcu
*head
;
125 head
= rcu_dereference(q
->head
);
126 if (!(is_dummy(q
, head
) && head
->next
== NULL
))
127 return -EPERM
; /* not empty */
128 rcu_free_dummy(head
);
133 * Should be called under rcu read lock critical section.
136 void _cds_lfq_enqueue_rcu(struct cds_lfq_queue_rcu
*q
,
137 struct cds_lfq_node_rcu
*node
)
140 * uatomic_cmpxchg() implicit memory barrier orders earlier stores to
141 * node before publication.
145 struct cds_lfq_node_rcu
*tail
, *next
;
147 tail
= rcu_dereference(q
->tail
);
148 next
= uatomic_cmpxchg(&tail
->next
, NULL
, node
);
151 * Tail was at the end of queue, we successfully
152 * appended to it. Now move tail (another
153 * enqueue might beat us to it, that's fine).
155 (void) uatomic_cmpxchg(&q
->tail
, tail
, node
);
159 * Failure to append to current tail.
160 * Help moving tail further and retry.
162 (void) uatomic_cmpxchg(&q
->tail
, tail
, next
);
169 * Should be called under rcu read lock critical section.
171 * The caller must wait for a grace period to pass before freeing the returned
172 * node or modifying the cds_lfq_node_rcu structure.
173 * Returns NULL if queue is empty.
176 struct cds_lfq_node_rcu
*_cds_lfq_dequeue_rcu(struct cds_lfq_queue_rcu
*q
)
179 struct cds_lfq_node_rcu
*head
, *next
;
181 head
= rcu_dereference(q
->head
);
182 next
= rcu_dereference(head
->next
);
183 if (is_dummy(q
, head
) && next
== NULL
)
184 return NULL
; /* empty */
186 * We never, ever allow dequeue to get to a state where
187 * the queue is empty (we need at least one node in the
188 * queue). This is ensured by checking if the head next
189 * is NULL and retry in that case (this means a
190 * concurrent dummy node re-enqueue is in progress).
193 if (uatomic_cmpxchg(&q
->head
, head
, next
) == head
) {
194 if (is_dummy(q
, head
)) {
195 struct cds_lfq_node_rcu
*node
;
197 * Requeue dummy. We need to
198 * reallocate to protect from
201 rcu_free_dummy(head
);
202 node
= make_dummy(q
, NULL
);
204 * We are the only thread
205 * allowed to update dummy (we
206 * own the old dummy). Other
207 * dequeue threads read it
208 * concurrently with RCU
209 * read-lock held, which
212 CMM_STORE_SHARED(q
->dummy
, node
);
213 _cds_lfq_enqueue_rcu(q
, node
);
214 continue; /* try again */
218 /* Concurrently pushed, retry */
222 /* Dummy node re-enqueue is in progress, retry. */
232 #endif /* _URCU_RCULFQUEUE_STATIC_H */
This page took 0.042425 seconds and 4 git commands to generate.