Complete change of the source directory tree
[lttng-tools.git] / src / common / hashtable / rculfhash-mm-mmap.c
CommitLineData
bec39940
DG
1/*
2 * rculfhash-mm-mmap.c
3 *
4 * mmap/reservation based memory management for Lock-Free RCU Hash Table
5 *
6 * Copyright 2011 - Lai Jiangshan <laijs@cn.fujitsu.com>
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 */
22
23#include <unistd.h>
24#include <sys/mman.h>
25#include "rculfhash-internal.h"
26
27/* reserve inaccessible memory space without allocation any memory */
28static void *memory_map(size_t length)
29{
30 void *ret = mmap(NULL, length, PROT_NONE,
31 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
32
33 assert(ret != MAP_FAILED);
34 return ret;
35}
36
37static void memory_unmap(void *ptr, size_t length)
38{
39 int ret __attribute__((unused));
40
41 ret = munmap(ptr, length);
42
43 assert(ret == 0);
44}
45
46static void memory_populate(void *ptr, size_t length)
47{
48 void *ret __attribute__((unused));
49
50 ret = mmap(ptr, length, PROT_READ | PROT_WRITE,
51 MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
52
53 assert(ret == ptr);
54}
55
56/*
57 * Discard garbage memory and avoid system save it when try to swap it out.
58 * Make it still reserved, inaccessible.
59 */
60static void memory_discard(void *ptr, size_t length)
61{
62 void *ret __attribute__((unused));
63
64 ret = mmap(ptr, length, PROT_NONE,
65 MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
66
67 assert(ret == ptr);
68}
69
70static
71void cds_lfht_alloc_bucket_table(struct cds_lfht *ht, unsigned long order)
72{
73 if (order == 0) {
74 if (ht->min_nr_alloc_buckets == ht->max_nr_buckets) {
75 /* small table */
76 ht->tbl_mmap = calloc(ht->max_nr_buckets,
77 sizeof(*ht->tbl_mmap));
78 assert(ht->tbl_mmap);
79 return;
80 }
81 /* large table */
82 ht->tbl_mmap = memory_map(ht->max_nr_buckets
83 * sizeof(*ht->tbl_mmap));
84 memory_populate(ht->tbl_mmap,
85 ht->min_nr_alloc_buckets * sizeof(*ht->tbl_mmap));
86 } else if (order > ht->min_alloc_buckets_order) {
87 /* large table */
88 unsigned long len = 1UL << (order - 1);
89
90 assert(ht->min_nr_alloc_buckets < ht->max_nr_buckets);
91 memory_populate(ht->tbl_mmap + len,
92 len * sizeof(*ht->tbl_mmap));
93 }
94 /* Nothing to do for 0 < order && order <= ht->min_alloc_buckets_order */
95}
96
97/*
98 * cds_lfht_free_bucket_table() should be called with decreasing order.
99 * When cds_lfht_free_bucket_table(0) is called, it means the whole
100 * lfht is destroyed.
101 */
102static
103void cds_lfht_free_bucket_table(struct cds_lfht *ht, unsigned long order)
104{
105 if (order == 0) {
106 if (ht->min_nr_alloc_buckets == ht->max_nr_buckets) {
107 /* small table */
108 poison_free(ht->tbl_mmap);
109 return;
110 }
111 /* large table */
112 memory_unmap(ht->tbl_mmap,
113 ht->max_nr_buckets * sizeof(*ht->tbl_mmap));
114 } else if (order > ht->min_alloc_buckets_order) {
115 /* large table */
116 unsigned long len = 1UL << (order - 1);
117
118 assert(ht->min_nr_alloc_buckets < ht->max_nr_buckets);
119 memory_discard(ht->tbl_mmap + len, len * sizeof(*ht->tbl_mmap));
120 }
121 /* Nothing to do for 0 < order && order <= ht->min_alloc_buckets_order */
122}
123
124static
125struct cds_lfht_node *bucket_at(struct cds_lfht *ht, unsigned long index)
126{
127 return &ht->tbl_mmap[index];
128}
129
130static
131struct cds_lfht *alloc_cds_lfht(unsigned long min_nr_alloc_buckets,
132 unsigned long max_nr_buckets)
133{
134 unsigned long page_bucket_size;
135
136 page_bucket_size = getpagesize() / sizeof(struct cds_lfht_node);
137 if (max_nr_buckets <= page_bucket_size) {
138 /* small table */
139 min_nr_alloc_buckets = max_nr_buckets;
140 } else {
141 /* large table */
142 min_nr_alloc_buckets = max(min_nr_alloc_buckets,
143 page_bucket_size);
144 }
145
146 return __default_alloc_cds_lfht(
147 &cds_lfht_mm_mmap, sizeof(struct cds_lfht),
148 min_nr_alloc_buckets, max_nr_buckets);
149}
150
151const struct cds_lfht_mm_type cds_lfht_mm_mmap = {
152 .alloc_cds_lfht = alloc_cds_lfht,
153 .alloc_bucket_table = cds_lfht_alloc_bucket_table,
154 .free_bucket_table = cds_lfht_free_bucket_table,
155 .bucket_at = bucket_at,
156};
This page took 0.028277 seconds and 4 git commands to generate.