Commit | Line | Data |
---|---|---|
4afee0a7 MD |
1 | #ifndef _URCU_WFQUEUE_H |
2 | #define _URCU_WFQUEUE_H | |
3 | ||
4 | /* | |
5 | * wfqueue.h | |
6 | * | |
7 | * Userspace RCU library - Queue with Wait-Free Enqueue/Blocking Dequeue | |
8 | * | |
9 | * Copyright 2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com> | |
10 | * | |
11 | * This library is free software; you can redistribute it and/or | |
12 | * modify it under the terms of the GNU Lesser General Public | |
13 | * License as published by the Free Software Foundation; either | |
14 | * version 2.1 of the License, or (at your option) any later version. | |
15 | * | |
16 | * This library is distributed in the hope that it will be useful, | |
17 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
18 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
19 | * Lesser General Public License for more details. | |
20 | * | |
21 | * You should have received a copy of the GNU Lesser General Public | |
22 | * License along with this library; if not, write to the Free Software | |
23 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA | |
24 | */ | |
25 | ||
26 | #include <pthread.h> | |
27 | #include <assert.h> | |
28 | #include <urcu/compiler.h> | |
29 | ||
30 | #ifdef __cplusplus | |
31 | extern "C" { | |
32 | #endif | |
33 | ||
34 | #if (!defined(_GNU_SOURCE) && !defined(_LGPL_SOURCE)) | |
35 | #error "Dynamic loader LGPL wrappers not implemented yet" | |
36 | #endif | |
37 | ||
38 | /* | |
39 | * Queue with wait-free enqueue/blocking dequeue. | |
40 | * This implementation adds a dummy head node when the queue is empty to ensure | |
41 | * we can always update the queue locklessly. | |
42 | * | |
43 | * Inspired from half-wait-free/half-blocking queue implementation done by | |
44 | * Paul E. McKenney. | |
45 | */ | |
46 | ||
47 | struct wfq_node { | |
48 | struct wfq_node *next; | |
49 | }; | |
50 | ||
51 | struct wfq_queue { | |
52 | struct wfq_node *head, **tail; | |
53 | struct wfq_node dummy; /* Dummy node */ | |
54 | pthread_mutex_t lock; | |
55 | }; | |
56 | ||
57 | void wfq_node_init(struct wfq_node *node) | |
58 | { | |
59 | node->next = NULL; | |
60 | } | |
61 | ||
62 | void wfq_init(struct wfq_queue *q) | |
63 | { | |
64 | int ret; | |
65 | ||
66 | wfq_node_init(&q->dummy); | |
67 | /* Set queue head and tail */ | |
68 | q->head = &q->dummy; | |
69 | q->tail = &q->dummy.next; | |
70 | ret = pthread_mutex_init(&q->lock, NULL); | |
71 | assert(!ret); | |
72 | } | |
73 | ||
74 | void wfq_enqueue(struct wfq_queue *q, struct wfq_node *node) | |
75 | { | |
76 | struct wfq_node **old_tail; | |
77 | ||
78 | /* | |
79 | * uatomic_xchg() implicit memory barrier orders earlier stores to data | |
80 | * structure containing node and setting node->next to NULL before | |
81 | * publication. | |
82 | */ | |
83 | old_tail = uatomic_xchg(&q->tail, node); | |
84 | /* | |
85 | * At this point, dequeuers see a NULL old_tail->next, which indicates | |
86 | * that the queue is being appended to. The following store will append | |
87 | * "node" to the queue from a dequeuer perspective. | |
88 | */ | |
89 | STORE_SHARED(*old_tail, node); | |
90 | } | |
91 | ||
92 | /* | |
93 | * It is valid to reuse and free a dequeued node immediately. | |
94 | * | |
95 | * No need to go on a waitqueue here, as there is no possible state in which the | |
96 | * list could cause dequeue to busy-loop needlessly while waiting for another | |
97 | * thread to be scheduled. The queue appears empty until tail->next is set by | |
98 | * enqueue. | |
99 | */ | |
100 | struct wfq_node * | |
101 | __wfq_dequeue_blocking(struct wfq_queue *q) | |
102 | { | |
103 | struct wfq_node *node, *next; | |
104 | int busy_wait = 16; | |
105 | ||
106 | /* | |
107 | * Queue is empty if it only contains the dummy node. | |
108 | */ | |
109 | if (q->head == &q->dummy && LOAD_SHARED(q->tail) == &q->dummy.next) | |
110 | return NULL; | |
111 | node = q->head; | |
112 | ||
113 | /* | |
114 | * Adaptative busy-looping waiting for enqueuer to complete enqueue. | |
115 | */ | |
116 | while ((next = LOAD_SHARED(node->next)) == NULL) { | |
117 | if (busy_wait > 0) { | |
118 | cpu_relax(); | |
119 | busy_wait--; | |
120 | } else | |
121 | poll(NULL, 0, 1); /* Wait for 1ms */ | |
122 | } | |
123 | /* | |
124 | * Move queue head forward. | |
125 | */ | |
126 | q->head = next; | |
127 | /* | |
128 | * Requeue dummy node if we just dequeued it. | |
129 | */ | |
130 | if (node == &q->dummy) { | |
131 | wfq_node_init(node); | |
132 | wfq_enqueue(q, node); | |
133 | return __wfq_dequeue_blocking(q); | |
134 | } | |
135 | return node; | |
136 | } | |
137 | ||
138 | struct wfq_node * | |
139 | wfq_dequeue_blocking(struct wfq_queue *q) | |
140 | { | |
141 | struct wfq_node *retnode; | |
142 | int ret; | |
143 | ||
144 | ret = pthread_mutex_lock(&q->lock); | |
145 | assert(!ret); | |
146 | retnode = __wfq_dequeue_blocking(q); | |
147 | ret = pthread_mutex_unlock(&q->lock); | |
148 | assert(!ret); | |
149 | return retnode; | |
150 | } | |
151 | ||
152 | #ifdef __cplusplus | |
153 | } | |
154 | #endif | |
155 | ||
156 | #endif /* _URCU_WFQUEUE_H */ |