| 1 | /* |
| 2 | * SPDX-License-Identifier: LGPL-2.1-only |
| 3 | * |
| 4 | * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com> |
| 5 | */ |
| 6 | |
| 7 | #ifndef _LIBRINGBUFFER_SHM_H |
| 8 | #define _LIBRINGBUFFER_SHM_H |
| 9 | |
| 10 | #include <stddef.h> |
| 11 | #include <stdint.h> |
| 12 | #include <unistd.h> |
| 13 | #include "common/logging.h" |
| 14 | #include <urcu/compiler.h> |
| 15 | #include "shm_types.h" |
| 16 | |
| 17 | /* channel_handle_create - for UST. */ |
| 18 | extern |
| 19 | struct lttng_ust_shm_handle *channel_handle_create(void *data, |
| 20 | uint64_t memory_map_size, int wakeup_fd) |
| 21 | __attribute__((visibility("hidden"))); |
| 22 | |
| 23 | /* channel_handle_add_stream - for UST. */ |
| 24 | extern |
| 25 | int channel_handle_add_stream(struct lttng_ust_shm_handle *handle, |
| 26 | int shm_fd, int wakeup_fd, uint32_t stream_nr, |
| 27 | uint64_t memory_map_size) |
| 28 | __attribute__((visibility("hidden"))); |
| 29 | |
| 30 | unsigned int channel_handle_get_nr_streams(struct lttng_ust_shm_handle *handle) |
| 31 | __attribute__((visibility("hidden"))); |
| 32 | |
| 33 | /* |
| 34 | * Pointer dereferencing. We don't trust the shm_ref, so we validate |
| 35 | * both the index and offset with known boundaries. |
| 36 | * |
| 37 | * "shmp" and "shmp_index" guarantee that it's safe to use the pointer |
| 38 | * target type, even in the occurrence of shm_ref modification by an |
| 39 | * untrusted process having write access to the shm_ref. We return a |
| 40 | * NULL pointer if the ranges are invalid. |
| 41 | */ |
| 42 | static inline |
| 43 | char *_shmp_offset(struct shm_object_table *table, struct shm_ref *ref, |
| 44 | size_t idx, size_t elem_size) |
| 45 | { |
| 46 | struct shm_object *obj; |
| 47 | size_t objindex, ref_offset; |
| 48 | |
| 49 | objindex = (size_t) ref->index; |
| 50 | if (caa_unlikely(objindex >= table->allocated_len)) |
| 51 | return NULL; |
| 52 | obj = &table->objects[objindex]; |
| 53 | ref_offset = (size_t) ref->offset; |
| 54 | ref_offset += idx * elem_size; |
| 55 | /* Check if part of the element returned would exceed the limits. */ |
| 56 | if (caa_unlikely(ref_offset + elem_size > obj->memory_map_size)) |
| 57 | return NULL; |
| 58 | return &obj->memory_map[ref_offset]; |
| 59 | } |
| 60 | |
| 61 | #define shmp_index(handle, ref, index) \ |
| 62 | ((__typeof__((ref)._type)) _shmp_offset((handle)->table, &(ref)._ref, index, sizeof(*((ref)._type)))) |
| 63 | |
| 64 | #define shmp(handle, ref) shmp_index(handle, ref, 0) |
| 65 | |
| 66 | static inline |
| 67 | void _set_shmp(struct shm_ref *ref, struct shm_ref src) |
| 68 | { |
| 69 | *ref = src; |
| 70 | } |
| 71 | |
| 72 | #define set_shmp(ref, src) _set_shmp(&(ref)._ref, src) |
| 73 | |
| 74 | struct shm_object_table *shm_object_table_create(size_t max_nb_obj) |
| 75 | __attribute__((visibility("hidden"))); |
| 76 | |
| 77 | struct shm_object *shm_object_table_alloc(struct shm_object_table *table, |
| 78 | size_t memory_map_size, |
| 79 | enum shm_object_type type, |
| 80 | const int stream_fd, |
| 81 | int cpu) |
| 82 | __attribute__((visibility("hidden"))); |
| 83 | |
| 84 | struct shm_object *shm_object_table_append_shm(struct shm_object_table *table, |
| 85 | int shm_fd, int wakeup_fd, uint32_t stream_nr, |
| 86 | size_t memory_map_size) |
| 87 | __attribute__((visibility("hidden"))); |
| 88 | |
| 89 | /* mem ownership is passed to shm_object_table_append_mem(). */ |
| 90 | struct shm_object *shm_object_table_append_mem(struct shm_object_table *table, |
| 91 | void *mem, size_t memory_map_size, int wakeup_fd) |
| 92 | __attribute__((visibility("hidden"))); |
| 93 | |
| 94 | void shm_object_table_destroy(struct shm_object_table *table, int consumer) |
| 95 | __attribute__((visibility("hidden"))); |
| 96 | |
| 97 | /* |
| 98 | * zalloc_shm - allocate memory within a shm object. |
| 99 | * |
| 100 | * Shared memory is already zeroed by shmget. |
| 101 | * *NOT* multithread-safe (should be protected by mutex). |
| 102 | * Returns a -1, -1 tuple on error. |
| 103 | */ |
| 104 | struct shm_ref zalloc_shm(struct shm_object *obj, size_t len) |
| 105 | __attribute__((visibility("hidden"))); |
| 106 | |
| 107 | void align_shm(struct shm_object *obj, size_t align) |
| 108 | __attribute__((visibility("hidden"))); |
| 109 | |
| 110 | static inline |
| 111 | int shm_get_wait_fd(struct lttng_ust_shm_handle *handle, struct shm_ref *ref) |
| 112 | { |
| 113 | struct shm_object_table *table = handle->table; |
| 114 | struct shm_object *obj; |
| 115 | size_t index; |
| 116 | |
| 117 | index = (size_t) ref->index; |
| 118 | if (caa_unlikely(index >= table->allocated_len)) |
| 119 | return -EPERM; |
| 120 | obj = &table->objects[index]; |
| 121 | return obj->wait_fd[0]; |
| 122 | } |
| 123 | |
| 124 | static inline |
| 125 | int shm_get_wakeup_fd(struct lttng_ust_shm_handle *handle, struct shm_ref *ref) |
| 126 | { |
| 127 | struct shm_object_table *table = handle->table; |
| 128 | struct shm_object *obj; |
| 129 | size_t index; |
| 130 | |
| 131 | index = (size_t) ref->index; |
| 132 | if (caa_unlikely(index >= table->allocated_len)) |
| 133 | return -EPERM; |
| 134 | obj = &table->objects[index]; |
| 135 | return obj->wait_fd[1]; |
| 136 | } |
| 137 | |
| 138 | static inline |
| 139 | int shm_close_wait_fd(struct lttng_ust_shm_handle *handle, |
| 140 | struct shm_ref *ref) |
| 141 | { |
| 142 | struct shm_object_table *table = handle->table; |
| 143 | struct shm_object *obj; |
| 144 | int wait_fd; |
| 145 | size_t index; |
| 146 | int ret; |
| 147 | |
| 148 | index = (size_t) ref->index; |
| 149 | if (caa_unlikely(index >= table->allocated_len)) |
| 150 | return -EPERM; |
| 151 | obj = &table->objects[index]; |
| 152 | wait_fd = obj->wait_fd[0]; |
| 153 | if (wait_fd < 0) |
| 154 | return -ENOENT; |
| 155 | obj->wait_fd[0] = -1; |
| 156 | ret = close(wait_fd); |
| 157 | if (ret) { |
| 158 | ret = -errno; |
| 159 | return ret; |
| 160 | } |
| 161 | return 0; |
| 162 | } |
| 163 | |
| 164 | static inline |
| 165 | int shm_close_wakeup_fd(struct lttng_ust_shm_handle *handle, |
| 166 | struct shm_ref *ref) |
| 167 | { |
| 168 | struct shm_object_table *table = handle->table; |
| 169 | struct shm_object *obj; |
| 170 | int wakeup_fd; |
| 171 | size_t index; |
| 172 | int ret; |
| 173 | |
| 174 | index = (size_t) ref->index; |
| 175 | if (caa_unlikely(index >= table->allocated_len)) |
| 176 | return -EPERM; |
| 177 | obj = &table->objects[index]; |
| 178 | wakeup_fd = obj->wait_fd[1]; |
| 179 | if (wakeup_fd < 0) |
| 180 | return -ENOENT; |
| 181 | obj->wait_fd[1] = -1; |
| 182 | ret = close(wakeup_fd); |
| 183 | if (ret) { |
| 184 | ret = -errno; |
| 185 | return ret; |
| 186 | } |
| 187 | return 0; |
| 188 | } |
| 189 | |
| 190 | static inline |
| 191 | int shm_get_shm_fd(struct lttng_ust_shm_handle *handle, struct shm_ref *ref) |
| 192 | { |
| 193 | struct shm_object_table *table = handle->table; |
| 194 | struct shm_object *obj; |
| 195 | size_t index; |
| 196 | |
| 197 | index = (size_t) ref->index; |
| 198 | if (caa_unlikely(index >= table->allocated_len)) |
| 199 | return -EPERM; |
| 200 | obj = &table->objects[index]; |
| 201 | return obj->shm_fd; |
| 202 | } |
| 203 | |
| 204 | |
| 205 | static inline |
| 206 | int shm_get_shm_size(struct lttng_ust_shm_handle *handle, struct shm_ref *ref, |
| 207 | uint64_t *size) |
| 208 | { |
| 209 | struct shm_object_table *table = handle->table; |
| 210 | struct shm_object *obj; |
| 211 | size_t index; |
| 212 | |
| 213 | index = (size_t) ref->index; |
| 214 | if (caa_unlikely(index >= table->allocated_len)) |
| 215 | return -EPERM; |
| 216 | obj = &table->objects[index]; |
| 217 | *size = obj->memory_map_size; |
| 218 | return 0; |
| 219 | } |
| 220 | |
| 221 | #endif /* _LIBRINGBUFFER_SHM_H */ |