Commit | Line | Data |
---|---|---|
2ed95849 MD |
1 | |
2 | #define _LGPL_SOURCE | |
3 | #include <stdlib.h> | |
4 | #include <urcu.h> | |
5 | #include <arch.h> | |
6 | #include <arch_atomic.h> | |
7 | #include <assert.h> | |
8 | #include <compiler.h> | |
9 | #include <urcu-defer.h> | |
10 | #include <errno.h> | |
11 | ||
12 | #define HASH_SIZE 4096 | |
13 | ||
14 | typedef unsigned long (*ht_hash_fct)(void *key); | |
15 | ||
16 | struct rcu_ht_node; | |
17 | ||
18 | struct rcu_ht_node { | |
19 | struct rcu_ht_node *next; | |
20 | void *key; | |
21 | void *data; | |
22 | }; | |
23 | ||
24 | struct rcu_ht { | |
25 | struct rcu_ht_node *tbl[HASH_SIZE]; | |
26 | ht_hash_fct hash_fct; | |
27 | void (*free_fct)(void *data); /* fct to free data */ | |
28 | }; | |
29 | ||
30 | struct rcu_ht *ht_new(ht_hash_fct hash_fct, void (*free_fct)(void *data)); | |
31 | ||
32 | void ht_delete_all(struct rcu_ht *ht); | |
33 | ||
34 | int ht_destroy(struct rcu_ht *ht); | |
35 | ||
36 | void *ht_lookup(struct rcu_ht *ht, void *key); | |
37 | ||
38 | int ht_add(struct rcu_ht *ht, void *key, void *data); | |
39 | ||
40 | int ht_delete(struct rcu_ht *ht, void *key); | |
41 | ||
42 | void *ht_steal(struct rcu_ht *ht, void *key); | |
43 | ||
44 | ||
45 | /* Implementation */ | |
46 | ||
47 | static unsigned long stupid_hash(void *key) | |
48 | { | |
49 | return (unsigned long)key % HASH_SIZE; | |
50 | } | |
51 | ||
52 | struct rcu_ht *ht_new(ht_hash_fct hash_fct, void (*free_fct)(void *data)) | |
53 | { | |
54 | struct rcu_ht *ht; | |
55 | ||
56 | ht = calloc(1, sizeof(struct rcu_ht)); | |
57 | ht->hash_fct = hash_fct; | |
58 | ht->free_fct = free_fct; | |
59 | } | |
60 | ||
61 | /* delete all elements */ | |
62 | void ht_delete_all(struct rcu_ht *ht) | |
63 | { | |
64 | unsigned long i; | |
65 | struct rcu_ht_node **prev, *node; | |
66 | ||
67 | for (i = 0; i < HASH_SIZE; i++) { | |
68 | rcu_read_lock(); | |
69 | ||
70 | prev = &ht->tbl[i]; | |
71 | node = rcu_dereference(*prev); | |
72 | /* | |
73 | * Cut the head, therefore whole bucket will be unused | |
74 | * after GP. (except for concurrent additions) | |
75 | */ | |
76 | rcu_assign_pointer(prev, NULL); | |
77 | for (;;) { | |
78 | if (likely(!node)) { | |
79 | break; | |
80 | } | |
81 | prev = &node->next; | |
82 | if (node->data) | |
83 | call_rcu(ht->free_fct, node->data); | |
84 | call_rcu(free, node); | |
85 | node = rcu_dereference(*prev); | |
86 | } | |
87 | rcu_read_unlock(); | |
88 | } | |
89 | } | |
90 | ||
91 | /* | |
92 | * Should only be called when no more concurrent readers nor writers can | |
93 | * possibly access the table. | |
94 | */ | |
95 | int ht_destroy(struct rcu_ht *ht) | |
96 | { | |
97 | ht_delete_all(ht); | |
98 | free(ht); | |
99 | } | |
100 | ||
101 | void *ht_lookup(struct rcu_ht *ht, void *key) | |
102 | { | |
103 | unsigned long hash; | |
104 | struct rcu_ht_node *node; | |
105 | void *ret; | |
106 | ||
107 | hash = ht->hash_fct(key); | |
108 | ||
109 | rcu_read_lock(); | |
110 | node = rcu_dereference(ht->tbl[hash]); | |
111 | for (;;) { | |
112 | if (likely(!node)) { | |
113 | ret = NULL; | |
114 | break; | |
115 | } | |
116 | if (node->key == key) { | |
117 | ret = node->data; | |
118 | break; | |
119 | } | |
120 | node = rcu_dereference(node->next); | |
121 | } | |
122 | rcu_read_unlock(); | |
123 | } | |
124 | ||
125 | /* | |
126 | * Will re-try until either: | |
127 | * - The key is already there (-EEXIST) | |
128 | * - We successfully add the key at the head of a table bucket. | |
129 | */ | |
130 | int ht_add(struct rcu_ht *ht, void *key, void *data) | |
131 | { | |
132 | struct rcu_ht_node *node, *old_head, *new_head; | |
133 | unsigned long hash; | |
134 | int ret = 0; | |
135 | ||
136 | new_head = calloc(1, sizeof(struct rcu_ht_node)); | |
137 | new_head->key = key; | |
138 | new_head->data = data; | |
139 | hash = ht->hash_fct(key); | |
140 | ||
141 | /* here comes the fun and tricky part. | |
142 | * Add at the beginning with a cmpxchg. | |
143 | * Hold a read lock between the moment the first element is read | |
144 | * and the nodes traversal (to find duplicates). This ensures | |
145 | * the head pointer has not been reclaimed when cmpxchg is done. | |
146 | * Always adding at the head ensures that we would have to | |
147 | * re-try if a new item has been added concurrently. So we ensure that | |
148 | * we never add duplicates. */ | |
149 | retry: | |
150 | rcu_read_lock(); | |
151 | ||
152 | old_head = node = rcu_dereference(ht->tbl[hash]); | |
153 | for (;;) { | |
154 | if (likely(!node)) { | |
155 | break; | |
156 | } | |
157 | if (node->key == key) { | |
158 | ret = -EEXIST; | |
159 | goto end; | |
160 | } | |
161 | node = rcu_dereference(node->next); | |
162 | } | |
163 | if (rcu_cmpxchg_pointer(&ht->tbl[hash], old_head, new_head) != old_head) | |
164 | goto restart; | |
165 | end: | |
166 | rcu_read_unlock(); | |
167 | ||
168 | return ret; | |
169 | ||
170 | /* restart loop, release and re-take the read lock to be kind to GP */ | |
171 | restart: | |
172 | rcu_read_unlock(); | |
173 | goto retry; | |
174 | } | |
175 | ||
176 | /* | |
177 | * Restart until we successfully remove the entry, or no entry is left | |
178 | * ((void *)(unsigned long)-ENOENT). | |
179 | */ | |
180 | struct rcu_ht_node *ht_steal(struct rcu_ht *ht, void *key); | |
181 | { | |
182 | struct rcu_ht_node **prev, *node; | |
183 | unsigned long hash; | |
184 | ||
185 | hash = ht->hash_fct(key); | |
186 | ||
187 | retry: | |
188 | rcu_read_lock(); | |
189 | ||
190 | prev = &ht->tbl[hash]; | |
191 | node = rcu_dereference(*prev); | |
192 | for (;;) { | |
193 | if (likely(!node)) { | |
194 | node = (void *)(unsigned long)-ENOENT; | |
195 | goto end; | |
196 | } | |
197 | if (node->key == key) { | |
198 | break; | |
199 | } | |
200 | prev = &node->next; | |
201 | node = rcu_dereference(*prev); | |
202 | } | |
203 | /* Found it ! pointer to object is in "prev" */ | |
204 | if (rcu_cmpxchg_pointer(prev, node, node->next) != node) | |
205 | goto restart; | |
206 | end: | |
207 | rcu_read_unlock(); | |
208 | ||
209 | return node; | |
210 | ||
211 | /* restart loop, release and re-take the read lock to be kind to GP */ | |
212 | restart: | |
213 | rcu_read_unlock(); | |
214 | goto retry; | |
215 | } | |
216 | ||
217 | int ht_delete(struct rcu_ht *ht, void *key) | |
218 | { | |
219 | struct rcu_ht_node *node; | |
220 | ||
221 | node = ht_steal(ht, key); | |
222 | if (node) { | |
223 | if (free_fct && node->data) | |
224 | call_rcu(ht->free_fct, node->data); | |
225 | call_rcu(free, node); | |
226 | return 0; | |
227 | } else { | |
228 | return -ENOENT; | |
229 | } | |
230 | } |