4 * mmap/reservation based memory management for Lock-Free RCU Hash Table
6 * Copyright 2011 - Lai Jiangshan <laijs@cn.fujitsu.com>
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
28 #include "rculfhash-internal.h"
31 #define MAP_ANONYMOUS MAP_ANON
35 * The allocation scheme used by the mmap based RCU hash table is to make a
36 * large unaccessible mapping to reserve memory without allocating it.
37 * Then smaller chunks are allocated by overlapping read/write mappings which
38 * do allocate memory. Deallocation is done by an overlapping unaccessible
41 * This scheme was tested on Linux, macOS and Solaris. However, on Cygwin the
42 * mmap wrapper is based on the Windows NtMapViewOfSection API which doesn't
43 * support overlapping mappings.
45 * An alternative to the overlapping mappings is to use mprotect to change the
46 * protection on chunks of the large mapping, read/write to allocate and none
47 * to deallocate. This works perfecty on Cygwin and Solaris but on Linux a
48 * call to madvise is also required to deallocate and it just doesn't work on
51 * For this reason, we keep to original scheme on all platforms except Cygwin.
55 /* Reserve inaccessible memory space without allocating it */
57 void *memory_map(size_t length
)
61 ret
= mmap(NULL
, length
, PROT_NONE
, MAP_PRIVATE
| MAP_ANONYMOUS
, -1, 0);
62 if (ret
== MAP_FAILED
) {
70 void memory_unmap(void *ptr
, size_t length
)
72 if (munmap(ptr
, length
)) {
79 /* Set protection to read/write to allocate a memory chunk */
81 void memory_populate(void *ptr
, size_t length
)
83 if (mprotect(ptr
, length
, PROT_READ
| PROT_WRITE
)) {
89 /* Set protection to none to deallocate a memory chunk */
91 void memory_discard(void *ptr
, size_t length
)
93 if (mprotect(ptr
, length
, PROT_NONE
)) {
99 #else /* __CYGWIN__ */
102 void memory_populate(void *ptr
, size_t length
)
104 if (mmap(ptr
, length
, PROT_READ
| PROT_WRITE
,
105 MAP_FIXED
| MAP_PRIVATE
| MAP_ANONYMOUS
,
113 * Discard garbage memory and avoid system save it when try to swap it out.
114 * Make it still reserved, inaccessible.
117 void memory_discard(void *ptr
, size_t length
)
119 if (mmap(ptr
, length
, PROT_NONE
,
120 MAP_FIXED
| MAP_PRIVATE
| MAP_ANONYMOUS
,
126 #endif /* __CYGWIN__ */
129 void cds_lfht_alloc_bucket_table(struct cds_lfht
*ht
, unsigned long order
)
132 if (ht
->min_nr_alloc_buckets
== ht
->max_nr_buckets
) {
134 ht
->tbl_mmap
= calloc(ht
->max_nr_buckets
,
135 sizeof(*ht
->tbl_mmap
));
136 assert(ht
->tbl_mmap
);
140 ht
->tbl_mmap
= memory_map(ht
->max_nr_buckets
141 * sizeof(*ht
->tbl_mmap
));
142 memory_populate(ht
->tbl_mmap
,
143 ht
->min_nr_alloc_buckets
* sizeof(*ht
->tbl_mmap
));
144 } else if (order
> ht
->min_alloc_buckets_order
) {
146 unsigned long len
= 1UL << (order
- 1);
148 assert(ht
->min_nr_alloc_buckets
< ht
->max_nr_buckets
);
149 memory_populate(ht
->tbl_mmap
+ len
,
150 len
* sizeof(*ht
->tbl_mmap
));
152 /* Nothing to do for 0 < order && order <= ht->min_alloc_buckets_order */
156 * cds_lfht_free_bucket_table() should be called with decreasing order.
157 * When cds_lfht_free_bucket_table(0) is called, it means the whole
161 void cds_lfht_free_bucket_table(struct cds_lfht
*ht
, unsigned long order
)
164 if (ht
->min_nr_alloc_buckets
== ht
->max_nr_buckets
) {
166 poison_free(ht
->tbl_mmap
);
170 memory_unmap(ht
->tbl_mmap
,
171 ht
->max_nr_buckets
* sizeof(*ht
->tbl_mmap
));
172 } else if (order
> ht
->min_alloc_buckets_order
) {
174 unsigned long len
= 1UL << (order
- 1);
176 assert(ht
->min_nr_alloc_buckets
< ht
->max_nr_buckets
);
177 memory_discard(ht
->tbl_mmap
+ len
, len
* sizeof(*ht
->tbl_mmap
));
179 /* Nothing to do for 0 < order && order <= ht->min_alloc_buckets_order */
183 struct cds_lfht_node
*bucket_at(struct cds_lfht
*ht
, unsigned long index
)
185 return &ht
->tbl_mmap
[index
];
189 struct cds_lfht
*alloc_cds_lfht(unsigned long min_nr_alloc_buckets
,
190 unsigned long max_nr_buckets
)
192 unsigned long page_bucket_size
;
194 page_bucket_size
= getpagesize() / sizeof(struct cds_lfht_node
);
195 if (max_nr_buckets
<= page_bucket_size
) {
197 min_nr_alloc_buckets
= max_nr_buckets
;
200 min_nr_alloc_buckets
= max(min_nr_alloc_buckets
,
204 return __default_alloc_cds_lfht(
205 &cds_lfht_mm_mmap
, sizeof(struct cds_lfht
),
206 min_nr_alloc_buckets
, max_nr_buckets
);
209 const struct cds_lfht_mm_type cds_lfht_mm_mmap
= {
210 .alloc_cds_lfht
= alloc_cds_lfht
,
211 .alloc_bucket_table
= cds_lfht_alloc_bucket_table
,
212 .free_bucket_table
= cds_lfht_free_bucket_table
,
213 .bucket_at
= bucket_at
,