Fix type range comparison always false for 64-bit arch
[lttng-ust.git] / libringbuffer / shm.c
1 /*
2 * libringbuffer/shm.c
3 *
4 * Copyright 2011 (c) - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
5 *
6 * Dual LGPL v2.1/GPL v2 license.
7 */
8
9 #include "shm.h"
10 #include <unistd.h>
11 #include <fcntl.h>
12 #include <sys/mman.h>
13 #include <sys/stat.h> /* For mode constants */
14 #include <fcntl.h> /* For O_* constants */
15 #include <assert.h>
16 #include <stdio.h>
17 #include <signal.h>
18 #include <dirent.h>
19 #include <lttng/align.h>
20 #include <helper.h>
21 #include <limits.h>
22
23 struct shm_object_table *shm_object_table_create(size_t max_nb_obj)
24 {
25 struct shm_object_table *table;
26
27 table = zmalloc(sizeof(struct shm_object_table) +
28 max_nb_obj * sizeof(table->objects[0]));
29 table->size = max_nb_obj;
30 return table;
31 }
32
33 struct shm_object *shm_object_table_append(struct shm_object_table *table,
34 size_t memory_map_size)
35 {
36 int shmfd, waitfd[2], ret, i, sigblocked = 0;
37 struct shm_object *obj;
38 char *memory_map;
39 char tmp_name[NAME_MAX] = "ust-shm-tmp-XXXXXX";
40 sigset_t all_sigs, orig_sigs;
41
42 if (table->allocated_len >= table->size)
43 return NULL;
44 obj = &table->objects[table->allocated_len];
45
46 /* wait_fd: create pipe */
47 ret = pipe(waitfd);
48 if (ret < 0) {
49 PERROR("pipe");
50 goto error_pipe;
51 }
52 for (i = 0; i < 2; i++) {
53 ret = fcntl(waitfd[i], F_SETFD, FD_CLOEXEC);
54 if (ret < 0) {
55 PERROR("fcntl");
56 goto error_fcntl;
57 }
58 }
59 /* The write end of the pipe needs to be non-blocking */
60 ret = fcntl(waitfd[1], F_SETFL, O_NONBLOCK);
61 if (ret < 0) {
62 PERROR("fcntl");
63 goto error_fcntl;
64 }
65 memcpy(obj->wait_fd, waitfd, sizeof(waitfd));
66
67 /* shm_fd: create shm */
68
69 /*
70 * Theoretically, we could leak a shm if the application crashes
71 * between open and unlink. Disable signals on this thread for
72 * increased safety against this scenario.
73 */
74 sigfillset(&all_sigs);
75 ret = pthread_sigmask(SIG_BLOCK, &all_sigs, &orig_sigs);
76 if (ret == -1) {
77 PERROR("pthread_sigmask");
78 goto error_pthread_sigmask;
79 }
80 sigblocked = 1;
81
82 /*
83 * Allocate shm, and immediately unlink its shm oject, keeping
84 * only the file descriptor as a reference to the object. If it
85 * already exists (caused by short race window during which the
86 * global object exists in a concurrent shm_open), simply retry.
87 * We specifically do _not_ use the / at the beginning of the
88 * pathname so that some OS implementations can keep it local to
89 * the process (POSIX leaves this implementation-defined).
90 */
91 do {
92 /*
93 * Using mktemp filename with O_CREAT | O_EXCL open
94 * flags.
95 */
96 mktemp(tmp_name);
97 if (tmp_name[0] == '\0') {
98 PERROR("mktemp");
99 goto error_shm_open;
100 }
101 shmfd = shm_open(tmp_name,
102 O_CREAT | O_EXCL | O_RDWR, 0700);
103 } while (shmfd < 0 && (errno == EEXIST || errno == EACCES));
104 if (shmfd < 0) {
105 PERROR("shm_open");
106 goto error_shm_open;
107 }
108 ret = shm_unlink(tmp_name);
109 if (ret < 0 && errno != ENOENT) {
110 PERROR("shm_unlink");
111 goto error_shm_release;
112 }
113 sigblocked = 0;
114 ret = pthread_sigmask(SIG_SETMASK, &orig_sigs, NULL);
115 if (ret == -1) {
116 PERROR("pthread_sigmask");
117 goto error_sigmask_release;
118 }
119 ret = ftruncate(shmfd, memory_map_size);
120 if (ret) {
121 PERROR("ftruncate");
122 goto error_ftruncate;
123 }
124 obj->shm_fd = shmfd;
125
126 /* memory_map: mmap */
127 memory_map = mmap(NULL, memory_map_size, PROT_READ | PROT_WRITE,
128 MAP_SHARED, shmfd, 0);
129 if (memory_map == MAP_FAILED) {
130 PERROR("mmap");
131 goto error_mmap;
132 }
133 obj->memory_map = memory_map;
134 obj->memory_map_size = memory_map_size;
135 obj->allocated_len = 0;
136 obj->index = table->allocated_len++;
137
138 return obj;
139
140 error_mmap:
141 error_ftruncate:
142 error_shm_release:
143 error_sigmask_release:
144 ret = close(shmfd);
145 if (ret) {
146 PERROR("close");
147 assert(0);
148 }
149 error_shm_open:
150 if (sigblocked) {
151 ret = pthread_sigmask(SIG_SETMASK, &orig_sigs, NULL);
152 if (ret == -1) {
153 PERROR("pthread_sigmask");
154 }
155 }
156 error_pthread_sigmask:
157 error_fcntl:
158 for (i = 0; i < 2; i++) {
159 ret = close(waitfd[i]);
160 if (ret) {
161 PERROR("close");
162 assert(0);
163 }
164 }
165 error_pipe:
166 return NULL;
167
168 }
169
170 struct shm_object *shm_object_table_append_shadow(struct shm_object_table *table,
171 int shm_fd, int wait_fd, size_t memory_map_size)
172 {
173 struct shm_object *obj;
174 char *memory_map;
175
176 if (table->allocated_len >= table->size)
177 return NULL;
178 obj = &table->objects[table->allocated_len];
179
180 /* wait_fd: set read end of the pipe. */
181 obj->wait_fd[0] = wait_fd;
182 obj->wait_fd[1] = -1; /* write end is unset. */
183 obj->shm_fd = shm_fd;
184
185 /* memory_map: mmap */
186 memory_map = mmap(NULL, memory_map_size, PROT_READ | PROT_WRITE,
187 MAP_SHARED, shm_fd, 0);
188 if (memory_map == MAP_FAILED) {
189 PERROR("mmap");
190 goto error_mmap;
191 }
192 obj->memory_map = memory_map;
193 obj->memory_map_size = memory_map_size;
194 obj->allocated_len = memory_map_size;
195 obj->index = table->allocated_len++;
196
197 return obj;
198
199 error_mmap:
200 return NULL;
201 }
202
203 static
204 void shmp_object_destroy(struct shm_object *obj)
205 {
206 int ret, i;
207
208 if (!obj->is_shadow) {
209 ret = munmap(obj->memory_map, obj->memory_map_size);
210 if (ret) {
211 PERROR("umnmap");
212 assert(0);
213 }
214 }
215 if (obj->shm_fd >= 0) {
216 ret = close(obj->shm_fd);
217 if (ret) {
218 PERROR("close");
219 assert(0);
220 }
221 }
222 for (i = 0; i < 2; i++) {
223 if (obj->wait_fd[i] < 0)
224 continue;
225 ret = close(obj->wait_fd[i]);
226 if (ret) {
227 PERROR("close");
228 assert(0);
229 }
230 }
231 }
232
233 void shm_object_table_destroy(struct shm_object_table *table)
234 {
235 int i;
236
237 for (i = 0; i < table->allocated_len; i++)
238 shmp_object_destroy(&table->objects[i]);
239 free(table);
240 }
241
242 /*
243 * zalloc_shm - allocate memory within a shm object.
244 *
245 * Shared memory is already zeroed by shmget.
246 * *NOT* multithread-safe (should be protected by mutex).
247 * Returns a -1, -1 tuple on error.
248 */
249 struct shm_ref zalloc_shm(struct shm_object *obj, size_t len)
250 {
251 struct shm_ref ref;
252 struct shm_ref shm_ref_error = { -1, -1 };
253
254 if (obj->memory_map_size - obj->allocated_len < len)
255 return shm_ref_error;
256 ref.index = obj->index;
257 ref.offset = obj->allocated_len;
258 obj->allocated_len += len;
259 return ref;
260 }
261
262 void align_shm(struct shm_object *obj, size_t align)
263 {
264 size_t offset_len = offset_align(obj->allocated_len, align);
265 obj->allocated_len += offset_len;
266 }
This page took 0.037631 seconds and 4 git commands to generate.