2 * SPDX-License-Identifier: LGPL-2.1-or-later
4 * Copyright 2011 Lai Jiangshan <laijs@cn.fujitsu.com>
6 * mmap/reservation based memory management for Lock-Free RCU Hash Table
14 #include "rculfhash-internal.h"
17 #define MAP_ANONYMOUS MAP_ANON
21 * The allocation scheme used by the mmap based RCU hash table is to make a
22 * large unaccessible mapping to reserve memory without allocating it.
23 * Then smaller chunks are allocated by overlapping read/write mappings which
24 * do allocate memory. Deallocation is done by an overlapping unaccessible
27 * This scheme was tested on Linux, macOS and Solaris. However, on Cygwin the
28 * mmap wrapper is based on the Windows NtMapViewOfSection API which doesn't
29 * support overlapping mappings.
31 * An alternative to the overlapping mappings is to use mprotect to change the
32 * protection on chunks of the large mapping, read/write to allocate and none
33 * to deallocate. This works perfecty on Cygwin and Solaris but on Linux a
34 * call to madvise is also required to deallocate and it just doesn't work on
37 * For this reason, we keep to original scheme on all platforms except Cygwin.
41 /* Reserve inaccessible memory space without allocating it */
43 void *memory_map(size_t length
)
47 ret
= mmap(NULL
, length
, PROT_NONE
, MAP_PRIVATE
| MAP_ANONYMOUS
, -1, 0);
48 if (ret
== MAP_FAILED
) {
56 void memory_unmap(void *ptr
, size_t length
)
58 if (munmap(ptr
, length
)) {
65 /* Set protection to read/write to allocate a memory chunk */
67 void memory_populate(void *ptr
, size_t length
)
69 if (mprotect(ptr
, length
, PROT_READ
| PROT_WRITE
)) {
75 /* Set protection to none to deallocate a memory chunk */
77 void memory_discard(void *ptr
, size_t length
)
79 if (mprotect(ptr
, length
, PROT_NONE
)) {
85 #else /* __CYGWIN__ */
88 void memory_populate(void *ptr
, size_t length
)
90 if (mmap(ptr
, length
, PROT_READ
| PROT_WRITE
,
91 MAP_FIXED
| MAP_PRIVATE
| MAP_ANONYMOUS
,
99 * Discard garbage memory and avoid system save it when try to swap it out.
100 * Make it still reserved, inaccessible.
103 void memory_discard(void *ptr
, size_t length
)
105 if (mmap(ptr
, length
, PROT_NONE
,
106 MAP_FIXED
| MAP_PRIVATE
| MAP_ANONYMOUS
,
112 #endif /* __CYGWIN__ */
115 void lttng_ust_lfht_alloc_bucket_table(struct lttng_ust_lfht
*ht
, unsigned long order
)
118 if (ht
->min_nr_alloc_buckets
== ht
->max_nr_buckets
) {
120 ht
->tbl_mmap
= calloc(ht
->max_nr_buckets
,
121 sizeof(*ht
->tbl_mmap
));
122 assert(ht
->tbl_mmap
);
126 ht
->tbl_mmap
= memory_map(ht
->max_nr_buckets
127 * sizeof(*ht
->tbl_mmap
));
128 memory_populate(ht
->tbl_mmap
,
129 ht
->min_nr_alloc_buckets
* sizeof(*ht
->tbl_mmap
));
130 } else if (order
> ht
->min_alloc_buckets_order
) {
132 unsigned long len
= 1UL << (order
- 1);
134 assert(ht
->min_nr_alloc_buckets
< ht
->max_nr_buckets
);
135 memory_populate(ht
->tbl_mmap
+ len
,
136 len
* sizeof(*ht
->tbl_mmap
));
138 /* Nothing to do for 0 < order && order <= ht->min_alloc_buckets_order */
142 * lttng_ust_lfht_free_bucket_table() should be called with decreasing order.
143 * When lttng_ust_lfht_free_bucket_table(0) is called, it means the whole
147 void lttng_ust_lfht_free_bucket_table(struct lttng_ust_lfht
*ht
, unsigned long order
)
150 if (ht
->min_nr_alloc_buckets
== ht
->max_nr_buckets
) {
152 poison_free(ht
->tbl_mmap
);
156 memory_unmap(ht
->tbl_mmap
,
157 ht
->max_nr_buckets
* sizeof(*ht
->tbl_mmap
));
158 } else if (order
> ht
->min_alloc_buckets_order
) {
160 unsigned long len
= 1UL << (order
- 1);
162 assert(ht
->min_nr_alloc_buckets
< ht
->max_nr_buckets
);
163 memory_discard(ht
->tbl_mmap
+ len
, len
* sizeof(*ht
->tbl_mmap
));
165 /* Nothing to do for 0 < order && order <= ht->min_alloc_buckets_order */
169 struct lttng_ust_lfht_node
*bucket_at(struct lttng_ust_lfht
*ht
, unsigned long index
)
171 return &ht
->tbl_mmap
[index
];
175 struct lttng_ust_lfht
*alloc_lttng_ust_lfht(unsigned long min_nr_alloc_buckets
,
176 unsigned long max_nr_buckets
)
178 unsigned long page_bucket_size
;
180 page_bucket_size
= getpagesize() / sizeof(struct lttng_ust_lfht_node
);
181 if (max_nr_buckets
<= page_bucket_size
) {
183 min_nr_alloc_buckets
= max_nr_buckets
;
186 min_nr_alloc_buckets
= max(min_nr_alloc_buckets
,
190 return __default_alloc_lttng_ust_lfht(
191 <tng_ust_lfht_mm_mmap
, sizeof(struct lttng_ust_lfht
),
192 min_nr_alloc_buckets
, max_nr_buckets
);
195 const struct lttng_ust_lfht_mm_type lttng_ust_lfht_mm_mmap
= {
196 .alloc_lttng_ust_lfht
= alloc_lttng_ust_lfht
,
197 .alloc_bucket_table
= lttng_ust_lfht_alloc_bucket_table
,
198 .free_bucket_table
= lttng_ust_lfht_free_bucket_table
,
199 .bucket_at
= bucket_at
,