Update hardcoded loglevel
[lttng-ust.git] / libringbuffer / shm.c
1 /*
2 * libringbuffer/shm.c
3 *
4 * Copyright 2011 (c) - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
5 *
6 * Dual LGPL v2.1/GPL v2 license.
7 */
8
9 #include "shm.h"
10 #include <unistd.h>
11 #include <fcntl.h>
12 #include <sys/mman.h>
13 #include <sys/stat.h> /* For mode constants */
14 #include <fcntl.h> /* For O_* constants */
15 #include <assert.h>
16 #include <stdio.h>
17 #include <signal.h>
18 #include <dirent.h>
19 #include <lttng/align.h>
20 #include <helper.h>
21
22 struct shm_object_table *shm_object_table_create(size_t max_nb_obj)
23 {
24 struct shm_object_table *table;
25
26 table = zmalloc(sizeof(struct shm_object_table) +
27 max_nb_obj * sizeof(table->objects[0]));
28 table->size = max_nb_obj;
29 return table;
30 }
31
32 struct shm_object *shm_object_table_append(struct shm_object_table *table,
33 size_t memory_map_size)
34 {
35 int shmfd, waitfd[2], ret, i, sigblocked = 0;
36 struct shm_object *obj;
37 char *memory_map;
38 char tmp_name[NAME_MAX] = "ust-shm-tmp-XXXXXX";
39 sigset_t all_sigs, orig_sigs;
40
41 if (table->allocated_len >= table->size)
42 return NULL;
43 obj = &table->objects[table->allocated_len];
44
45 /* wait_fd: create pipe */
46 ret = pipe(waitfd);
47 if (ret < 0) {
48 PERROR("pipe");
49 goto error_pipe;
50 }
51 for (i = 0; i < 2; i++) {
52 ret = fcntl(waitfd[i], F_SETFD, FD_CLOEXEC);
53 if (ret < 0) {
54 PERROR("fcntl");
55 goto error_fcntl;
56 }
57 }
58 /* The write end of the pipe needs to be non-blocking */
59 ret = fcntl(waitfd[1], F_SETFL, O_NONBLOCK);
60 if (ret < 0) {
61 PERROR("fcntl");
62 goto error_fcntl;
63 }
64 memcpy(obj->wait_fd, waitfd, sizeof(waitfd));
65
66 /* shm_fd: create shm */
67
68 /*
69 * Theoretically, we could leak a shm if the application crashes
70 * between open and unlink. Disable signals on this thread for
71 * increased safety against this scenario.
72 */
73 sigfillset(&all_sigs);
74 ret = pthread_sigmask(SIG_BLOCK, &all_sigs, &orig_sigs);
75 if (ret == -1) {
76 PERROR("pthread_sigmask");
77 goto error_pthread_sigmask;
78 }
79 sigblocked = 1;
80
81 /*
82 * Allocate shm, and immediately unlink its shm oject, keeping
83 * only the file descriptor as a reference to the object. If it
84 * already exists (caused by short race window during which the
85 * global object exists in a concurrent shm_open), simply retry.
86 * We specifically do _not_ use the / at the beginning of the
87 * pathname so that some OS implementations can keep it local to
88 * the process (POSIX leaves this implementation-defined).
89 */
90 do {
91 /*
92 * Using mktemp filename with O_CREAT | O_EXCL open
93 * flags.
94 */
95 mktemp(tmp_name);
96 if (tmp_name[0] == '\0') {
97 PERROR("mktemp");
98 goto error_shm_open;
99 }
100 shmfd = shm_open(tmp_name,
101 O_CREAT | O_EXCL | O_RDWR, 0700);
102 } while (shmfd < 0 && (errno == EEXIST || errno == EACCES));
103 if (shmfd < 0) {
104 PERROR("shm_open");
105 goto error_shm_open;
106 }
107 ret = shm_unlink(tmp_name);
108 if (ret < 0 && errno != ENOENT) {
109 PERROR("shm_unlink");
110 goto error_shm_release;
111 }
112 sigblocked = 0;
113 ret = pthread_sigmask(SIG_SETMASK, &orig_sigs, NULL);
114 if (ret == -1) {
115 PERROR("pthread_sigmask");
116 goto error_sigmask_release;
117 }
118 ret = ftruncate(shmfd, memory_map_size);
119 if (ret) {
120 PERROR("ftruncate");
121 goto error_ftruncate;
122 }
123 obj->shm_fd = shmfd;
124
125 /* memory_map: mmap */
126 memory_map = mmap(NULL, memory_map_size, PROT_READ | PROT_WRITE,
127 MAP_SHARED, shmfd, 0);
128 if (memory_map == MAP_FAILED) {
129 PERROR("mmap");
130 goto error_mmap;
131 }
132 obj->memory_map = memory_map;
133 obj->memory_map_size = memory_map_size;
134 obj->allocated_len = 0;
135 obj->index = table->allocated_len++;
136
137 return obj;
138
139 error_mmap:
140 error_ftruncate:
141 error_shm_release:
142 error_sigmask_release:
143 ret = close(shmfd);
144 if (ret) {
145 PERROR("close");
146 assert(0);
147 }
148 error_shm_open:
149 if (sigblocked) {
150 ret = pthread_sigmask(SIG_SETMASK, &orig_sigs, NULL);
151 if (ret == -1) {
152 PERROR("pthread_sigmask");
153 }
154 }
155 error_pthread_sigmask:
156 error_fcntl:
157 for (i = 0; i < 2; i++) {
158 ret = close(waitfd[i]);
159 if (ret) {
160 PERROR("close");
161 assert(0);
162 }
163 }
164 error_pipe:
165 return NULL;
166
167 }
168
169 struct shm_object *shm_object_table_append_shadow(struct shm_object_table *table,
170 int shm_fd, int wait_fd, size_t memory_map_size)
171 {
172 struct shm_object *obj;
173 char *memory_map;
174
175 if (table->allocated_len >= table->size)
176 return NULL;
177 obj = &table->objects[table->allocated_len];
178
179 /* wait_fd: set read end of the pipe. */
180 obj->wait_fd[0] = wait_fd;
181 obj->wait_fd[1] = -1; /* write end is unset. */
182 obj->shm_fd = shm_fd;
183
184 /* memory_map: mmap */
185 memory_map = mmap(NULL, memory_map_size, PROT_READ | PROT_WRITE,
186 MAP_SHARED, shm_fd, 0);
187 if (memory_map == MAP_FAILED) {
188 PERROR("mmap");
189 goto error_mmap;
190 }
191 obj->memory_map = memory_map;
192 obj->memory_map_size = memory_map_size;
193 obj->allocated_len = memory_map_size;
194 obj->index = table->allocated_len++;
195
196 return obj;
197
198 error_mmap:
199 return NULL;
200 }
201
202 static
203 void shmp_object_destroy(struct shm_object *obj)
204 {
205 int ret, i;
206
207 if (!obj->is_shadow) {
208 ret = munmap(obj->memory_map, obj->memory_map_size);
209 if (ret) {
210 PERROR("umnmap");
211 assert(0);
212 }
213 }
214 if (obj->shm_fd >= 0) {
215 ret = close(obj->shm_fd);
216 if (ret) {
217 PERROR("close");
218 assert(0);
219 }
220 }
221 for (i = 0; i < 2; i++) {
222 if (obj->wait_fd[i] < 0)
223 continue;
224 ret = close(obj->wait_fd[i]);
225 if (ret) {
226 PERROR("close");
227 assert(0);
228 }
229 }
230 }
231
232 void shm_object_table_destroy(struct shm_object_table *table)
233 {
234 int i;
235
236 for (i = 0; i < table->allocated_len; i++)
237 shmp_object_destroy(&table->objects[i]);
238 free(table);
239 }
240
241 /*
242 * zalloc_shm - allocate memory within a shm object.
243 *
244 * Shared memory is already zeroed by shmget.
245 * *NOT* multithread-safe (should be protected by mutex).
246 * Returns a -1, -1 tuple on error.
247 */
248 struct shm_ref zalloc_shm(struct shm_object *obj, size_t len)
249 {
250 struct shm_ref ref;
251 struct shm_ref shm_ref_error = { -1, -1 };
252
253 if (obj->memory_map_size - obj->allocated_len < len)
254 return shm_ref_error;
255 ref.index = obj->index;
256 ref.offset = obj->allocated_len;
257 obj->allocated_len += len;
258 return ref;
259 }
260
261 void align_shm(struct shm_object *obj, size_t align)
262 {
263 size_t offset_len = offset_align(obj->allocated_len, align);
264 obj->allocated_len += offset_len;
265 }
This page took 0.034828 seconds and 4 git commands to generate.