Commit | Line | Data |
---|---|---|
3d02c34d MD |
1 | #ifndef _URCU_RCULFQUEUE_STATIC_H |
2 | #define _URCU_RCULFQUEUE_STATIC_H | |
3 | ||
4 | /* | |
5 | * rculfqueue-static.h | |
6 | * | |
7 | * Userspace RCU library - Lock-Free RCU Queue | |
8 | * | |
9 | * Copyright 2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com> | |
10 | * | |
11 | * TO BE INCLUDED ONLY IN LGPL-COMPATIBLE CODE. See rculfqueue.h for linking | |
12 | * dynamically with the userspace rcu library. | |
13 | * | |
14 | * This library is free software; you can redistribute it and/or | |
15 | * modify it under the terms of the GNU Lesser General Public | |
16 | * License as published by the Free Software Foundation; either | |
17 | * version 2.1 of the License, or (at your option) any later version. | |
18 | * | |
19 | * This library is distributed in the hope that it will be useful, | |
20 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
21 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
22 | * Lesser General Public License for more details. | |
23 | * | |
24 | * You should have received a copy of the GNU Lesser General Public | |
25 | * License along with this library; if not, write to the Free Software | |
26 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA | |
27 | */ | |
28 | ||
e17d9985 | 29 | #include <urcu-call-rcu.h> |
01477510 | 30 | #include <urcu/assert.h> |
a2e7bf9c | 31 | #include <urcu/uatomic.h> |
4157e1ac | 32 | #include <urcu-pointer.h> |
e17d9985 | 33 | #include <errno.h> |
3d02c34d MD |
34 | |
35 | #ifdef __cplusplus | |
36 | extern "C" { | |
37 | #endif | |
38 | ||
e17d9985 MD |
39 | struct cds_lfq_node_rcu_dummy { |
40 | struct cds_lfq_node_rcu parent; | |
41 | struct rcu_head head; | |
42 | struct cds_lfq_queue_rcu *q; | |
43 | }; | |
44 | ||
3d02c34d | 45 | /* |
e17d9985 MD |
46 | * Lock-free RCU queue. Enqueue and dequeue operations hold a RCU read |
47 | * lock to deal with cmpxchg ABA problem. This queue is *not* circular: | |
48 | * head points to the oldest node, tail points to the newest node. | |
0cca1a2d | 49 | * A dummy node is kept to ensure enqueue and dequeue can always proceed |
e17d9985 MD |
50 | * concurrently. Keeping a separate head and tail helps with large |
51 | * queues: enqueue and dequeue can proceed concurrently without | |
52 | * wrestling for exclusive access to the same variables. | |
53 | * | |
0cca1a2d MD |
54 | * Dequeue retry if it detects that it would be dequeueing the last node |
55 | * (it means a dummy node dequeue-requeue is in progress). This ensures | |
56 | * that there is always at least one node in the queue. | |
e17d9985 | 57 | * |
0cca1a2d MD |
58 | * In the dequeue operation, we internally reallocate the dummy node |
59 | * upon dequeue/requeue and use call_rcu to free the old one after a | |
60 | * grace period. | |
3d02c34d MD |
61 | */ |
62 | ||
e17d9985 MD |
63 | static inline |
64 | struct cds_lfq_node_rcu *make_dummy(struct cds_lfq_queue_rcu *q, | |
65 | struct cds_lfq_node_rcu *next) | |
66 | { | |
67 | struct cds_lfq_node_rcu_dummy *dummy; | |
68 | ||
28757437 SM |
69 | dummy = (struct cds_lfq_node_rcu_dummy *) |
70 | malloc(sizeof(struct cds_lfq_node_rcu_dummy)); | |
01477510 | 71 | urcu_posix_assert(dummy); |
e17d9985 | 72 | dummy->parent.next = next; |
fbdb32f6 | 73 | dummy->parent.dummy = 1; |
e17d9985 | 74 | dummy->q = q; |
909292c2 | 75 | return &dummy->parent; |
e17d9985 MD |
76 | } |
77 | ||
78 | static inline | |
f6719811 | 79 | void free_dummy_cb(struct rcu_head *head) |
e17d9985 MD |
80 | { |
81 | struct cds_lfq_node_rcu_dummy *dummy = | |
82 | caa_container_of(head, struct cds_lfq_node_rcu_dummy, head); | |
83 | free(dummy); | |
84 | } | |
85 | ||
86 | static inline | |
87 | void rcu_free_dummy(struct cds_lfq_node_rcu *node) | |
88 | { | |
89 | struct cds_lfq_node_rcu_dummy *dummy; | |
3d02c34d | 90 | |
01477510 | 91 | urcu_posix_assert(node->dummy); |
909292c2 | 92 | dummy = caa_container_of(node, struct cds_lfq_node_rcu_dummy, parent); |
6e5f88cf | 93 | dummy->q->queue_call_rcu(&dummy->head, free_dummy_cb); |
f6719811 MD |
94 | } |
95 | ||
96 | static inline | |
97 | void free_dummy(struct cds_lfq_node_rcu *node) | |
98 | { | |
99 | struct cds_lfq_node_rcu_dummy *dummy; | |
100 | ||
01477510 | 101 | urcu_posix_assert(node->dummy); |
f6719811 MD |
102 | dummy = caa_container_of(node, struct cds_lfq_node_rcu_dummy, parent); |
103 | free(dummy); | |
e17d9985 MD |
104 | } |
105 | ||
106 | static inline | |
16aa9ee8 | 107 | void _cds_lfq_node_init_rcu(struct cds_lfq_node_rcu *node) |
3d02c34d MD |
108 | { |
109 | node->next = NULL; | |
fbdb32f6 | 110 | node->dummy = 0; |
3d02c34d MD |
111 | } |
112 | ||
e17d9985 | 113 | static inline |
6e5f88cf MD |
114 | void _cds_lfq_init_rcu(struct cds_lfq_queue_rcu *q, |
115 | void queue_call_rcu(struct rcu_head *head, | |
116 | void (*func)(struct rcu_head *head))) | |
e17d9985 MD |
117 | { |
118 | q->tail = make_dummy(q, NULL); | |
0cca1a2d | 119 | q->head = q->tail; |
6e5f88cf | 120 | q->queue_call_rcu = queue_call_rcu; |
e17d9985 MD |
121 | } |
122 | ||
123 | /* | |
124 | * The queue should be emptied before calling destroy. | |
125 | * | |
126 | * Return 0 on success, -EPERM if queue is not empty. | |
127 | */ | |
128 | static inline | |
129 | int _cds_lfq_destroy_rcu(struct cds_lfq_queue_rcu *q) | |
3d02c34d | 130 | { |
0cca1a2d | 131 | struct cds_lfq_node_rcu *head; |
e17d9985 MD |
132 | |
133 | head = rcu_dereference(q->head); | |
fbdb32f6 | 134 | if (!(head->dummy && head->next == NULL)) |
e17d9985 | 135 | return -EPERM; /* not empty */ |
f6719811 | 136 | free_dummy(head); |
e17d9985 | 137 | return 0; |
3d02c34d MD |
138 | } |
139 | ||
d9b52143 | 140 | /* |
6e5f88cf | 141 | * Should be called under rcu read lock critical section. |
d9b52143 | 142 | */ |
e17d9985 | 143 | static inline |
d9b52143 MD |
144 | void _cds_lfq_enqueue_rcu(struct cds_lfq_queue_rcu *q, |
145 | struct cds_lfq_node_rcu *node) | |
3d02c34d | 146 | { |
3d02c34d MD |
147 | /* |
148 | * uatomic_cmpxchg() implicit memory barrier orders earlier stores to | |
149 | * node before publication. | |
150 | */ | |
151 | ||
152 | for (;;) { | |
16aa9ee8 | 153 | struct cds_lfq_node_rcu *tail, *next; |
3d02c34d | 154 | |
3d02c34d | 155 | tail = rcu_dereference(q->tail); |
909292c2 | 156 | next = uatomic_cmpxchg(&tail->next, NULL, node); |
3d02c34d MD |
157 | if (next == NULL) { |
158 | /* | |
159 | * Tail was at the end of queue, we successfully | |
e17d9985 MD |
160 | * appended to it. Now move tail (another |
161 | * enqueue might beat us to it, that's fine). | |
3d02c34d | 162 | */ |
85b57703 | 163 | (void) uatomic_cmpxchg(&q->tail, tail, node); |
3d02c34d MD |
164 | return; |
165 | } else { | |
166 | /* | |
e17d9985 MD |
167 | * Failure to append to current tail. |
168 | * Help moving tail further and retry. | |
3d02c34d | 169 | */ |
85b57703 | 170 | (void) uatomic_cmpxchg(&q->tail, tail, next); |
3d02c34d MD |
171 | continue; |
172 | } | |
173 | } | |
174 | } | |
175 | ||
f6719811 | 176 | static inline |
fbdb32f6 | 177 | void enqueue_dummy(struct cds_lfq_queue_rcu *q) |
f6719811 MD |
178 | { |
179 | struct cds_lfq_node_rcu *node; | |
180 | ||
181 | /* We need to reallocate to protect from ABA. */ | |
182 | node = make_dummy(q, NULL); | |
f6719811 MD |
183 | _cds_lfq_enqueue_rcu(q, node); |
184 | } | |
185 | ||
3d02c34d | 186 | /* |
6e5f88cf | 187 | * Should be called under rcu read lock critical section. |
d9b52143 | 188 | * |
e17d9985 MD |
189 | * The caller must wait for a grace period to pass before freeing the returned |
190 | * node or modifying the cds_lfq_node_rcu structure. | |
191 | * Returns NULL if queue is empty. | |
3d02c34d | 192 | */ |
e17d9985 | 193 | static inline |
a34df756 | 194 | struct cds_lfq_node_rcu *_cds_lfq_dequeue_rcu(struct cds_lfq_queue_rcu *q) |
3d02c34d MD |
195 | { |
196 | for (;;) { | |
fbdb32f6 | 197 | struct cds_lfq_node_rcu *head, *next; |
3d02c34d | 198 | |
3d02c34d | 199 | head = rcu_dereference(q->head); |
909292c2 | 200 | next = rcu_dereference(head->next); |
6e5f88cf | 201 | if (head->dummy && next == NULL) |
e17d9985 MD |
202 | return NULL; /* empty */ |
203 | /* | |
204 | * We never, ever allow dequeue to get to a state where | |
205 | * the queue is empty (we need at least one node in the | |
206 | * queue). This is ensured by checking if the head next | |
fbdb32f6 MD |
207 | * is NULL, which means we need to enqueue a dummy node |
208 | * before we can hope dequeuing anything. | |
e17d9985 | 209 | */ |
f6719811 | 210 | if (!next) { |
fbdb32f6 MD |
211 | enqueue_dummy(q); |
212 | next = rcu_dereference(head->next); | |
f6719811 | 213 | } |
6e5f88cf | 214 | if (uatomic_cmpxchg(&q->head, head, next) != head) |
f6719811 | 215 | continue; /* Concurrently pushed. */ |
fbdb32f6 MD |
216 | if (head->dummy) { |
217 | /* Free dummy after grace period. */ | |
218 | rcu_free_dummy(head); | |
f6719811 | 219 | continue; /* try again */ |
3d02c34d | 220 | } |
f6719811 | 221 | return head; |
3d02c34d MD |
222 | } |
223 | } | |
224 | ||
225 | #ifdef __cplusplus | |
226 | } | |
227 | #endif | |
228 | ||
229 | #endif /* _URCU_RCULFQUEUE_STATIC_H */ |