2 * SPDX-License-Identifier: LGPL-2.1-only
4 * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 #ifndef _LIBRINGBUFFER_SHM_H
8 #define _LIBRINGBUFFER_SHM_H
13 #include <usterr-signal-safe.h>
14 #include <urcu/compiler.h>
15 #include "shm_types.h"
17 /* channel_handle_create - for UST. */
18 __attribute__((visibility("hidden")))
20 struct lttng_ust_shm_handle
*channel_handle_create(void *data
,
21 uint64_t memory_map_size
, int wakeup_fd
);
23 /* channel_handle_add_stream - for UST. */
24 __attribute__((visibility("hidden")))
26 int channel_handle_add_stream(struct lttng_ust_shm_handle
*handle
,
27 int shm_fd
, int wakeup_fd
, uint32_t stream_nr
,
28 uint64_t memory_map_size
);
30 __attribute__((visibility("hidden")))
31 unsigned int channel_handle_get_nr_streams(struct lttng_ust_shm_handle
*handle
);
33 __attribute__((visibility("hidden")))
35 void channel_destroy(struct lttng_ust_lib_ring_buffer_channel
*chan
,
36 struct lttng_ust_shm_handle
*handle
,
40 * Pointer dereferencing. We don't trust the shm_ref, so we validate
41 * both the index and offset with known boundaries.
43 * "shmp" and "shmp_index" guarantee that it's safe to use the pointer
44 * target type, even in the occurrence of shm_ref modification by an
45 * untrusted process having write access to the shm_ref. We return a
46 * NULL pointer if the ranges are invalid.
49 char *_shmp_offset(struct shm_object_table
*table
, struct shm_ref
*ref
,
50 size_t idx
, size_t elem_size
)
52 struct shm_object
*obj
;
53 size_t objindex
, ref_offset
;
55 objindex
= (size_t) ref
->index
;
56 if (caa_unlikely(objindex
>= table
->allocated_len
))
58 obj
= &table
->objects
[objindex
];
59 ref_offset
= (size_t) ref
->offset
;
60 ref_offset
+= idx
* elem_size
;
61 /* Check if part of the element returned would exceed the limits. */
62 if (caa_unlikely(ref_offset
+ elem_size
> obj
->memory_map_size
))
64 return &obj
->memory_map
[ref_offset
];
67 #define shmp_index(handle, ref, index) \
69 __typeof__((ref)._type) ____ptr_ret; \
70 ____ptr_ret = (__typeof__(____ptr_ret)) _shmp_offset((handle)->table, &(ref)._ref, index, sizeof(*____ptr_ret)); \
74 #define shmp(handle, ref) shmp_index(handle, ref, 0)
77 void _set_shmp(struct shm_ref
*ref
, struct shm_ref src
)
82 #define set_shmp(ref, src) _set_shmp(&(ref)._ref, src)
84 __attribute__((visibility("hidden")))
85 struct shm_object_table
*shm_object_table_create(size_t max_nb_obj
);
87 __attribute__((visibility("hidden")))
88 struct shm_object
*shm_object_table_alloc(struct shm_object_table
*table
,
89 size_t memory_map_size
,
90 enum shm_object_type type
,
94 __attribute__((visibility("hidden")))
95 struct shm_object
*shm_object_table_append_shm(struct shm_object_table
*table
,
96 int shm_fd
, int wakeup_fd
, uint32_t stream_nr
,
97 size_t memory_map_size
);
99 /* mem ownership is passed to shm_object_table_append_mem(). */
100 __attribute__((visibility("hidden")))
101 struct shm_object
*shm_object_table_append_mem(struct shm_object_table
*table
,
102 void *mem
, size_t memory_map_size
, int wakeup_fd
);
104 __attribute__((visibility("hidden")))
105 void shm_object_table_destroy(struct shm_object_table
*table
, int consumer
);
108 * zalloc_shm - allocate memory within a shm object.
110 * Shared memory is already zeroed by shmget.
111 * *NOT* multithread-safe (should be protected by mutex).
112 * Returns a -1, -1 tuple on error.
114 __attribute__((visibility("hidden")))
115 struct shm_ref
zalloc_shm(struct shm_object
*obj
, size_t len
);
117 __attribute__((visibility("hidden")))
118 void align_shm(struct shm_object
*obj
, size_t align
);
121 int shm_get_wait_fd(struct lttng_ust_shm_handle
*handle
, struct shm_ref
*ref
)
123 struct shm_object_table
*table
= handle
->table
;
124 struct shm_object
*obj
;
127 index
= (size_t) ref
->index
;
128 if (caa_unlikely(index
>= table
->allocated_len
))
130 obj
= &table
->objects
[index
];
131 return obj
->wait_fd
[0];
135 int shm_get_wakeup_fd(struct lttng_ust_shm_handle
*handle
, struct shm_ref
*ref
)
137 struct shm_object_table
*table
= handle
->table
;
138 struct shm_object
*obj
;
141 index
= (size_t) ref
->index
;
142 if (caa_unlikely(index
>= table
->allocated_len
))
144 obj
= &table
->objects
[index
];
145 return obj
->wait_fd
[1];
149 int shm_close_wait_fd(struct lttng_ust_shm_handle
*handle
,
152 struct shm_object_table
*table
= handle
->table
;
153 struct shm_object
*obj
;
158 index
= (size_t) ref
->index
;
159 if (caa_unlikely(index
>= table
->allocated_len
))
161 obj
= &table
->objects
[index
];
162 wait_fd
= obj
->wait_fd
[0];
165 obj
->wait_fd
[0] = -1;
166 ret
= close(wait_fd
);
175 int shm_close_wakeup_fd(struct lttng_ust_shm_handle
*handle
,
178 struct shm_object_table
*table
= handle
->table
;
179 struct shm_object
*obj
;
184 index
= (size_t) ref
->index
;
185 if (caa_unlikely(index
>= table
->allocated_len
))
187 obj
= &table
->objects
[index
];
188 wakeup_fd
= obj
->wait_fd
[1];
191 obj
->wait_fd
[1] = -1;
192 ret
= close(wakeup_fd
);
201 int shm_get_shm_fd(struct lttng_ust_shm_handle
*handle
, struct shm_ref
*ref
)
203 struct shm_object_table
*table
= handle
->table
;
204 struct shm_object
*obj
;
207 index
= (size_t) ref
->index
;
208 if (caa_unlikely(index
>= table
->allocated_len
))
210 obj
= &table
->objects
[index
];
216 int shm_get_shm_size(struct lttng_ust_shm_handle
*handle
, struct shm_ref
*ref
,
219 struct shm_object_table
*table
= handle
->table
;
220 struct shm_object
*obj
;
223 index
= (size_t) ref
->index
;
224 if (caa_unlikely(index
>= table
->allocated_len
))
226 obj
= &table
->objects
[index
];
227 *size
= obj
->memory_map_size
;
231 #endif /* _LIBRINGBUFFER_SHM_H */