tests regressions: use lib rather than recompile from source
[userspace-rcu.git] / urcu / static / rculfqueue.h
CommitLineData
3d02c34d
MD
1#ifndef _URCU_RCULFQUEUE_STATIC_H
2#define _URCU_RCULFQUEUE_STATIC_H
3
4/*
5 * rculfqueue-static.h
6 *
7 * Userspace RCU library - Lock-Free RCU Queue
8 *
9 * Copyright 2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
10 *
11 * TO BE INCLUDED ONLY IN LGPL-COMPATIBLE CODE. See rculfqueue.h for linking
12 * dynamically with the userspace rcu library.
13 *
14 * This library is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU Lesser General Public
16 * License as published by the Free Software Foundation; either
17 * version 2.1 of the License, or (at your option) any later version.
18 *
19 * This library is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 * Lesser General Public License for more details.
23 *
24 * You should have received a copy of the GNU Lesser General Public
25 * License along with this library; if not, write to the Free Software
26 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
27 */
28
e17d9985 29#include <urcu-call-rcu.h>
a2e7bf9c 30#include <urcu/uatomic.h>
4157e1ac 31#include <urcu-pointer.h>
3d02c34d 32#include <assert.h>
e17d9985 33#include <errno.h>
3d02c34d
MD
34
35#ifdef __cplusplus
36extern "C" {
37#endif
38
e17d9985
MD
39struct cds_lfq_node_rcu_dummy {
40 struct cds_lfq_node_rcu parent;
41 struct rcu_head head;
42 struct cds_lfq_queue_rcu *q;
43};
44
3d02c34d 45/*
e17d9985
MD
46 * Lock-free RCU queue. Enqueue and dequeue operations hold a RCU read
47 * lock to deal with cmpxchg ABA problem. This queue is *not* circular:
48 * head points to the oldest node, tail points to the newest node.
0cca1a2d 49 * A dummy node is kept to ensure enqueue and dequeue can always proceed
e17d9985
MD
50 * concurrently. Keeping a separate head and tail helps with large
51 * queues: enqueue and dequeue can proceed concurrently without
52 * wrestling for exclusive access to the same variables.
53 *
0cca1a2d
MD
54 * Dequeue retry if it detects that it would be dequeueing the last node
55 * (it means a dummy node dequeue-requeue is in progress). This ensures
56 * that there is always at least one node in the queue.
e17d9985 57 *
0cca1a2d
MD
58 * In the dequeue operation, we internally reallocate the dummy node
59 * upon dequeue/requeue and use call_rcu to free the old one after a
60 * grace period.
3d02c34d
MD
61 */
62
e17d9985
MD
63static inline
64struct cds_lfq_node_rcu *make_dummy(struct cds_lfq_queue_rcu *q,
65 struct cds_lfq_node_rcu *next)
66{
67 struct cds_lfq_node_rcu_dummy *dummy;
68
69 dummy = malloc(sizeof(struct cds_lfq_node_rcu_dummy));
8f03ed0d 70 assert(dummy);
e17d9985 71 dummy->parent.next = next;
fbdb32f6 72 dummy->parent.dummy = 1;
e17d9985 73 dummy->q = q;
909292c2 74 return &dummy->parent;
e17d9985
MD
75}
76
77static inline
f6719811 78void free_dummy_cb(struct rcu_head *head)
e17d9985
MD
79{
80 struct cds_lfq_node_rcu_dummy *dummy =
81 caa_container_of(head, struct cds_lfq_node_rcu_dummy, head);
82 free(dummy);
83}
84
85static inline
86void rcu_free_dummy(struct cds_lfq_node_rcu *node)
87{
88 struct cds_lfq_node_rcu_dummy *dummy;
3d02c34d 89
fbdb32f6 90 assert(node->dummy);
909292c2 91 dummy = caa_container_of(node, struct cds_lfq_node_rcu_dummy, parent);
6e5f88cf 92 dummy->q->queue_call_rcu(&dummy->head, free_dummy_cb);
f6719811
MD
93}
94
95static inline
96void free_dummy(struct cds_lfq_node_rcu *node)
97{
98 struct cds_lfq_node_rcu_dummy *dummy;
99
fbdb32f6 100 assert(node->dummy);
f6719811
MD
101 dummy = caa_container_of(node, struct cds_lfq_node_rcu_dummy, parent);
102 free(dummy);
e17d9985
MD
103}
104
105static inline
16aa9ee8 106void _cds_lfq_node_init_rcu(struct cds_lfq_node_rcu *node)
3d02c34d
MD
107{
108 node->next = NULL;
fbdb32f6 109 node->dummy = 0;
3d02c34d
MD
110}
111
e17d9985 112static inline
6e5f88cf
MD
113void _cds_lfq_init_rcu(struct cds_lfq_queue_rcu *q,
114 void queue_call_rcu(struct rcu_head *head,
115 void (*func)(struct rcu_head *head)))
e17d9985
MD
116{
117 q->tail = make_dummy(q, NULL);
0cca1a2d 118 q->head = q->tail;
6e5f88cf 119 q->queue_call_rcu = queue_call_rcu;
e17d9985
MD
120}
121
122/*
123 * The queue should be emptied before calling destroy.
124 *
125 * Return 0 on success, -EPERM if queue is not empty.
126 */
127static inline
128int _cds_lfq_destroy_rcu(struct cds_lfq_queue_rcu *q)
3d02c34d 129{
0cca1a2d 130 struct cds_lfq_node_rcu *head;
e17d9985
MD
131
132 head = rcu_dereference(q->head);
fbdb32f6 133 if (!(head->dummy && head->next == NULL))
e17d9985 134 return -EPERM; /* not empty */
f6719811 135 free_dummy(head);
e17d9985 136 return 0;
3d02c34d
MD
137}
138
d9b52143 139/*
6e5f88cf 140 * Should be called under rcu read lock critical section.
d9b52143 141 */
e17d9985 142static inline
d9b52143
MD
143void _cds_lfq_enqueue_rcu(struct cds_lfq_queue_rcu *q,
144 struct cds_lfq_node_rcu *node)
3d02c34d 145{
3d02c34d
MD
146 /*
147 * uatomic_cmpxchg() implicit memory barrier orders earlier stores to
148 * node before publication.
149 */
150
151 for (;;) {
16aa9ee8 152 struct cds_lfq_node_rcu *tail, *next;
3d02c34d 153
3d02c34d 154 tail = rcu_dereference(q->tail);
909292c2 155 next = uatomic_cmpxchg(&tail->next, NULL, node);
3d02c34d
MD
156 if (next == NULL) {
157 /*
158 * Tail was at the end of queue, we successfully
e17d9985
MD
159 * appended to it. Now move tail (another
160 * enqueue might beat us to it, that's fine).
3d02c34d 161 */
85b57703 162 (void) uatomic_cmpxchg(&q->tail, tail, node);
3d02c34d
MD
163 return;
164 } else {
165 /*
e17d9985
MD
166 * Failure to append to current tail.
167 * Help moving tail further and retry.
3d02c34d 168 */
85b57703 169 (void) uatomic_cmpxchg(&q->tail, tail, next);
3d02c34d
MD
170 continue;
171 }
172 }
173}
174
f6719811 175static inline
fbdb32f6 176void enqueue_dummy(struct cds_lfq_queue_rcu *q)
f6719811
MD
177{
178 struct cds_lfq_node_rcu *node;
179
180 /* We need to reallocate to protect from ABA. */
181 node = make_dummy(q, NULL);
f6719811
MD
182 _cds_lfq_enqueue_rcu(q, node);
183}
184
3d02c34d 185/*
6e5f88cf 186 * Should be called under rcu read lock critical section.
d9b52143 187 *
e17d9985
MD
188 * The caller must wait for a grace period to pass before freeing the returned
189 * node or modifying the cds_lfq_node_rcu structure.
190 * Returns NULL if queue is empty.
3d02c34d 191 */
e17d9985 192static inline
a34df756 193struct cds_lfq_node_rcu *_cds_lfq_dequeue_rcu(struct cds_lfq_queue_rcu *q)
3d02c34d
MD
194{
195 for (;;) {
fbdb32f6 196 struct cds_lfq_node_rcu *head, *next;
3d02c34d 197
3d02c34d 198 head = rcu_dereference(q->head);
909292c2 199 next = rcu_dereference(head->next);
6e5f88cf 200 if (head->dummy && next == NULL)
e17d9985
MD
201 return NULL; /* empty */
202 /*
203 * We never, ever allow dequeue to get to a state where
204 * the queue is empty (we need at least one node in the
205 * queue). This is ensured by checking if the head next
fbdb32f6
MD
206 * is NULL, which means we need to enqueue a dummy node
207 * before we can hope dequeuing anything.
e17d9985 208 */
f6719811 209 if (!next) {
fbdb32f6
MD
210 enqueue_dummy(q);
211 next = rcu_dereference(head->next);
f6719811 212 }
6e5f88cf 213 if (uatomic_cmpxchg(&q->head, head, next) != head)
f6719811 214 continue; /* Concurrently pushed. */
fbdb32f6
MD
215 if (head->dummy) {
216 /* Free dummy after grace period. */
217 rcu_free_dummy(head);
f6719811 218 continue; /* try again */
3d02c34d 219 }
f6719811 220 return head;
3d02c34d
MD
221 }
222}
223
224#ifdef __cplusplus
225}
226#endif
227
228#endif /* _URCU_RCULFQUEUE_STATIC_H */
This page took 0.057229 seconds and 4 git commands to generate.