2 * SPDX-License-Identifier: LGPL-2.1-only
4 * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
12 #include <sys/types.h>
13 #include <sys/stat.h> /* For mode constants */
14 #include <fcntl.h> /* For O_* constants */
28 #include <lttng/ust-utils.h>
30 #include "common/macros.h"
31 #include "common/ust-fd.h"
32 #include "common/compat/mmap.h"
35 * Ensure we have the required amount of space available by writing 0
36 * into the entire buffer. Not doing so can trigger SIGBUS when going
37 * beyond the available shm space.
40 int zero_file(int fd
, size_t len
)
48 pagelen
= sysconf(_SC_PAGESIZE
);
51 zeropage
= calloc(pagelen
, 1);
55 while (len
> written
) {
57 retlen
= write(fd
, zeropage
,
58 min_t(size_t, pagelen
, len
- written
));
59 } while (retlen
== -1UL && errno
== EINTR
);
72 struct shm_object_table
*shm_object_table_create(size_t max_nb_obj
, bool populate
)
74 struct shm_object_table
*table
;
76 table
= zmalloc_populate(sizeof(struct shm_object_table
) +
77 max_nb_obj
* sizeof(table
->objects
[0]), populate
);
80 table
->size
= max_nb_obj
;
85 struct shm_object
*_shm_object_table_alloc_shm(struct shm_object_table
*table
,
86 size_t memory_map_size
,
90 int shmfd
, waitfd
[2], ret
, i
;
91 int flags
= MAP_SHARED
;
92 struct shm_object
*obj
;
97 if (table
->allocated_len
>= table
->size
)
99 obj
= &table
->objects
[table
->allocated_len
];
101 /* wait_fd: create pipe */
102 ret
= pipe2(waitfd
, O_CLOEXEC
);
107 /* The write end of the pipe needs to be non-blocking */
108 ret
= fcntl(waitfd
[1], F_SETFL
, O_NONBLOCK
);
113 memcpy(obj
->wait_fd
, waitfd
, sizeof(waitfd
));
116 * Set POSIX shared memory object size
118 * First, use ftruncate() to set its size, some implementations won't
119 * allow writes past the size set by ftruncate.
120 * Then, use write() to fill it with zeros, this allows us to fully
121 * allocate it and detect a shortage of shm space without dealing with
126 ret
= ftruncate(shmfd
, memory_map_size
);
129 goto error_ftruncate
;
131 ret
= zero_file(shmfd
, memory_map_size
);
134 goto error_zero_file
;
138 * Also ensure the file metadata is synced with the storage by using
139 * fsync(2). Some platforms don't allow fsync on POSIX shm fds, ignore
140 * EINVAL accordingly.
143 if (ret
&& errno
!= EINVAL
) {
147 obj
->shm_fd_ownership
= 0;
151 flags
|= LTTNG_MAP_POPULATE
;
152 /* memory_map: mmap */
153 memory_map
= mmap(NULL
, memory_map_size
, PROT_READ
| PROT_WRITE
,
155 if (memory_map
== MAP_FAILED
) {
159 obj
->type
= SHM_OBJECT_SHM
;
160 obj
->memory_map
= memory_map
;
161 obj
->memory_map_size
= memory_map_size
;
162 obj
->allocated_len
= 0;
163 obj
->index
= table
->allocated_len
++;
172 for (i
= 0; i
< 2; i
++) {
173 ret
= close(waitfd
[i
]);
184 struct shm_object
*_shm_object_table_alloc_mem(struct shm_object_table
*table
,
185 size_t memory_map_size
, bool populate
)
187 struct shm_object
*obj
;
189 int waitfd
[2], i
, ret
;
191 if (table
->allocated_len
>= table
->size
)
193 obj
= &table
->objects
[table
->allocated_len
];
195 memory_map
= zmalloc_populate(memory_map_size
, populate
);
199 /* wait_fd: create pipe */
200 ret
= pipe2(waitfd
, O_CLOEXEC
);
205 /* The write end of the pipe needs to be non-blocking */
206 ret
= fcntl(waitfd
[1], F_SETFL
, O_NONBLOCK
);
211 memcpy(obj
->wait_fd
, waitfd
, sizeof(waitfd
));
215 obj
->shm_fd_ownership
= 0;
217 obj
->type
= SHM_OBJECT_MEM
;
218 obj
->memory_map
= memory_map
;
219 obj
->memory_map_size
= memory_map_size
;
220 obj
->allocated_len
= 0;
221 obj
->index
= table
->allocated_len
++;
226 for (i
= 0; i
< 2; i
++) {
227 ret
= close(waitfd
[i
]);
240 * libnuma prints errors on the console even for numa_available().
241 * Work-around this limitation by using get_mempolicy() directly to
242 * check whether the kernel supports mempolicy.
245 static bool lttng_is_numa_available(void)
249 ret
= get_mempolicy(NULL
, NULL
, 0, NULL
, 0);
250 if (ret
&& errno
== ENOSYS
) {
253 return numa_available() > 0;
258 struct shm_object
*shm_object_table_alloc(struct shm_object_table
*table
,
259 size_t memory_map_size
,
260 enum shm_object_type type
,
265 struct shm_object
*shm_object_table_alloc(struct shm_object_table
*table
,
266 size_t memory_map_size
,
267 enum shm_object_type type
,
269 int cpu
__attribute__((unused
)),
273 struct shm_object
*shm_object
;
275 int oldnode
= 0, node
;
278 numa_avail
= lttng_is_numa_available();
280 oldnode
= numa_preferred();
282 node
= numa_node_of_cpu(cpu
);
284 numa_set_preferred(node
);
286 if (cpu
< 0 || node
< 0)
287 numa_set_localalloc();
289 #endif /* HAVE_LIBNUMA */
292 shm_object
= _shm_object_table_alloc_shm(table
, memory_map_size
,
293 stream_fd
, populate
);
296 shm_object
= _shm_object_table_alloc_mem(table
, memory_map_size
,
304 numa_set_preferred(oldnode
);
305 #endif /* HAVE_LIBNUMA */
309 struct shm_object
*shm_object_table_append_shm(struct shm_object_table
*table
,
310 int shm_fd
, int wakeup_fd
, uint32_t stream_nr
,
311 size_t memory_map_size
, bool populate
)
313 int flags
= MAP_SHARED
;
314 struct shm_object
*obj
;
318 if (table
->allocated_len
>= table
->size
)
320 /* streams _must_ be received in sequential order, else fail. */
321 if (stream_nr
+ 1 != table
->allocated_len
)
324 obj
= &table
->objects
[table
->allocated_len
];
326 /* wait_fd: set write end of the pipe. */
327 obj
->wait_fd
[0] = -1; /* read end is unset */
328 obj
->wait_fd
[1] = wakeup_fd
;
329 obj
->shm_fd
= shm_fd
;
330 obj
->shm_fd_ownership
= 1;
332 /* The write end of the pipe needs to be non-blocking */
333 ret
= fcntl(obj
->wait_fd
[1], F_SETFL
, O_NONBLOCK
);
340 flags
|= LTTNG_MAP_POPULATE
;
341 /* memory_map: mmap */
342 memory_map
= mmap(NULL
, memory_map_size
, PROT_READ
| PROT_WRITE
,
344 if (memory_map
== MAP_FAILED
) {
348 obj
->type
= SHM_OBJECT_SHM
;
349 obj
->memory_map
= memory_map
;
350 obj
->memory_map_size
= memory_map_size
;
351 obj
->allocated_len
= memory_map_size
;
352 obj
->index
= table
->allocated_len
++;
362 * Passing ownership of mem to object.
364 struct shm_object
*shm_object_table_append_mem(struct shm_object_table
*table
,
365 void *mem
, size_t memory_map_size
, int wakeup_fd
)
367 struct shm_object
*obj
;
370 if (table
->allocated_len
>= table
->size
)
372 obj
= &table
->objects
[table
->allocated_len
];
374 obj
->wait_fd
[0] = -1; /* read end is unset */
375 obj
->wait_fd
[1] = wakeup_fd
;
377 obj
->shm_fd_ownership
= 0;
379 /* The write end of the pipe needs to be non-blocking */
380 ret
= fcntl(obj
->wait_fd
[1], F_SETFL
, O_NONBLOCK
);
386 obj
->type
= SHM_OBJECT_MEM
;
387 obj
->memory_map
= mem
;
388 obj
->memory_map_size
= memory_map_size
;
389 obj
->allocated_len
= memory_map_size
;
390 obj
->index
= table
->allocated_len
++;
399 void shmp_object_destroy(struct shm_object
*obj
, int consumer
)
406 ret
= munmap(obj
->memory_map
, obj
->memory_map_size
);
412 if (obj
->shm_fd_ownership
) {
413 /* Delete FDs only if called from app (not consumer). */
415 lttng_ust_lock_fd_tracker();
416 ret
= close(obj
->shm_fd
);
418 lttng_ust_delete_fd_from_tracker(obj
->shm_fd
);
423 lttng_ust_unlock_fd_tracker();
425 ret
= close(obj
->shm_fd
);
432 for (i
= 0; i
< 2; i
++) {
433 if (obj
->wait_fd
[i
] < 0)
436 lttng_ust_lock_fd_tracker();
437 ret
= close(obj
->wait_fd
[i
]);
439 lttng_ust_delete_fd_from_tracker(obj
->wait_fd
[i
]);
444 lttng_ust_unlock_fd_tracker();
446 ret
= close(obj
->wait_fd
[i
]);
459 for (i
= 0; i
< 2; i
++) {
460 if (obj
->wait_fd
[i
] < 0)
463 lttng_ust_lock_fd_tracker();
464 ret
= close(obj
->wait_fd
[i
]);
466 lttng_ust_delete_fd_from_tracker(obj
->wait_fd
[i
]);
471 lttng_ust_unlock_fd_tracker();
473 ret
= close(obj
->wait_fd
[i
]);
480 free(obj
->memory_map
);
488 void shm_object_table_destroy(struct shm_object_table
*table
, int consumer
)
492 for (i
= 0; i
< table
->allocated_len
; i
++)
493 shmp_object_destroy(&table
->objects
[i
], consumer
);
498 * zalloc_shm - allocate memory within a shm object.
500 * Shared memory is already zeroed by shmget.
501 * *NOT* multithread-safe (should be protected by mutex).
502 * Returns a -1, -1 tuple on error.
504 struct shm_ref
zalloc_shm(struct shm_object
*obj
, size_t len
)
507 struct shm_ref shm_ref_error
= { -1, -1 };
509 if (obj
->memory_map_size
- obj
->allocated_len
< len
)
510 return shm_ref_error
;
511 ref
.index
= obj
->index
;
512 ref
.offset
= obj
->allocated_len
;
513 obj
->allocated_len
+= len
;
517 void align_shm(struct shm_object
*obj
, size_t align
)
519 size_t offset_len
= lttng_ust_offset_align(obj
->allocated_len
, align
);
520 obj
->allocated_len
+= offset_len
;
This page took 0.049866 seconds and 5 git commands to generate.