1 // SPDX-FileCopyrightText: 2010 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
3 // SPDX-License-Identifier: LGPL-2.1-or-later
5 #ifndef _URCU_RCULFQUEUE_STATIC_H
6 #define _URCU_RCULFQUEUE_STATIC_H
9 * Userspace RCU library - Lock-Free RCU Queue
11 * TO BE INCLUDED ONLY IN LGPL-COMPATIBLE CODE. See rculfqueue.h for linking
12 * dynamically with the userspace rcu library.
15 #include <urcu-call-rcu.h>
16 #include <urcu/assert.h>
17 #include <urcu/uatomic.h>
18 #include <urcu-pointer.h>
25 struct cds_lfq_node_rcu_dummy
{
26 struct cds_lfq_node_rcu parent
;
28 struct cds_lfq_queue_rcu
*q
;
32 * Lock-free RCU queue. Enqueue and dequeue operations hold a RCU read
33 * lock to deal with cmpxchg ABA problem. This queue is *not* circular:
34 * head points to the oldest node, tail points to the newest node.
35 * A dummy node is kept to ensure enqueue and dequeue can always proceed
36 * concurrently. Keeping a separate head and tail helps with large
37 * queues: enqueue and dequeue can proceed concurrently without
38 * wrestling for exclusive access to the same variables.
40 * Dequeue retry if it detects that it would be dequeueing the last node
41 * (it means a dummy node dequeue-requeue is in progress). This ensures
42 * that there is always at least one node in the queue.
44 * In the dequeue operation, we internally reallocate the dummy node
45 * upon dequeue/requeue and use call_rcu to free the old one after a
50 struct cds_lfq_node_rcu
*make_dummy(struct cds_lfq_queue_rcu
*q
,
51 struct cds_lfq_node_rcu
*next
)
53 struct cds_lfq_node_rcu_dummy
*dummy
;
55 dummy
= (struct cds_lfq_node_rcu_dummy
*)
56 malloc(sizeof(struct cds_lfq_node_rcu_dummy
));
57 urcu_posix_assert(dummy
);
58 dummy
->parent
.next
= next
;
59 dummy
->parent
.dummy
= 1;
61 return &dummy
->parent
;
65 void free_dummy_cb(struct rcu_head
*head
)
67 struct cds_lfq_node_rcu_dummy
*dummy
=
68 caa_container_of(head
, struct cds_lfq_node_rcu_dummy
, head
);
73 void rcu_free_dummy(struct cds_lfq_node_rcu
*node
)
75 struct cds_lfq_node_rcu_dummy
*dummy
;
77 urcu_posix_assert(node
->dummy
);
78 dummy
= caa_container_of(node
, struct cds_lfq_node_rcu_dummy
, parent
);
79 dummy
->q
->queue_call_rcu(&dummy
->head
, free_dummy_cb
);
83 void free_dummy(struct cds_lfq_node_rcu
*node
)
85 struct cds_lfq_node_rcu_dummy
*dummy
;
87 urcu_posix_assert(node
->dummy
);
88 dummy
= caa_container_of(node
, struct cds_lfq_node_rcu_dummy
, parent
);
93 void _cds_lfq_node_init_rcu(struct cds_lfq_node_rcu
*node
)
100 void _cds_lfq_init_rcu(struct cds_lfq_queue_rcu
*q
,
101 void queue_call_rcu(struct rcu_head
*head
,
102 void (*func
)(struct rcu_head
*head
)))
104 q
->tail
= make_dummy(q
, NULL
);
106 q
->queue_call_rcu
= queue_call_rcu
;
110 * The queue should be emptied before calling destroy.
112 * Return 0 on success, -EPERM if queue is not empty.
115 int _cds_lfq_destroy_rcu(struct cds_lfq_queue_rcu
*q
)
117 struct cds_lfq_node_rcu
*head
;
119 head
= rcu_dereference(q
->head
);
120 if (!(head
->dummy
&& head
->next
== NULL
))
121 return -EPERM
; /* not empty */
127 * Should be called under rcu read lock critical section.
130 void _cds_lfq_enqueue_rcu(struct cds_lfq_queue_rcu
*q
,
131 struct cds_lfq_node_rcu
*node
)
134 * uatomic_cmpxchg() implicit memory barrier orders earlier stores to
135 * node before publication.
138 struct cds_lfq_node_rcu
*tail
, *next
;
140 tail
= rcu_dereference(q
->tail
);
141 cmm_emit_legacy_smp_mb();
142 next
= uatomic_cmpxchg_mo(&tail
->next
, NULL
, node
,
143 CMM_SEQ_CST
, CMM_SEQ_CST
);
146 * Tail was at the end of queue, we successfully
147 * appended to it. Now move tail (another
148 * enqueue might beat us to it, that's fine).
150 (void) uatomic_cmpxchg_mo(&q
->tail
, tail
, node
,
151 CMM_SEQ_CST
, CMM_SEQ_CST
);
155 * Failure to append to current tail.
156 * Help moving tail further and retry.
158 (void) uatomic_cmpxchg_mo(&q
->tail
, tail
, next
,
159 CMM_SEQ_CST
, CMM_SEQ_CST
);
166 void enqueue_dummy(struct cds_lfq_queue_rcu
*q
)
168 struct cds_lfq_node_rcu
*node
;
170 /* We need to reallocate to protect from ABA. */
171 node
= make_dummy(q
, NULL
);
172 _cds_lfq_enqueue_rcu(q
, node
);
176 * Should be called under rcu read lock critical section.
178 * The caller must wait for a grace period to pass before freeing the returned
179 * node or modifying the cds_lfq_node_rcu structure.
180 * Returns NULL if queue is empty.
183 struct cds_lfq_node_rcu
*_cds_lfq_dequeue_rcu(struct cds_lfq_queue_rcu
*q
)
186 struct cds_lfq_node_rcu
*head
, *next
;
188 head
= rcu_dereference(q
->head
);
189 next
= rcu_dereference(head
->next
);
190 if (head
->dummy
&& next
== NULL
)
191 return NULL
; /* empty */
193 * We never, ever allow dequeue to get to a state where
194 * the queue is empty (we need at least one node in the
195 * queue). This is ensured by checking if the head next
196 * is NULL, which means we need to enqueue a dummy node
197 * before we can hope dequeuing anything.
201 next
= rcu_dereference(head
->next
);
203 if (uatomic_cmpxchg_mo(&q
->head
, head
, next
,
204 CMM_SEQ_CST
, CMM_SEQ_CST
) != head
)
205 continue; /* Concurrently pushed. */
207 /* Free dummy after grace period. */
208 rcu_free_dummy(head
);
209 continue; /* try again */
219 #endif /* _URCU_RCULFQUEUE_STATIC_H */
This page took 0.053281 seconds and 4 git commands to generate.