Commit | Line | Data |
---|---|---|
a6352fd4 MD |
1 | #ifndef _LIBRINGBUFFER_SHM_H |
2 | #define _LIBRINGBUFFER_SHM_H | |
3 | ||
4 | /* | |
5 | * libringbuffer/shm.h | |
6 | * | |
7 | * Copyright 2011 (c) - Mathieu Desnoyers <mathieu.desnoyers@efficios.com> | |
8 | * | |
9 | * Dual LGPL v2.1/GPL v2 license. | |
10 | */ | |
11 | ||
12 | #include <stdint.h> | |
1dbfff0c | 13 | #include <ust/usterr-signal-safe.h> |
a6352fd4 | 14 | #include "ust/core.h" |
1d498196 | 15 | #include "shm_types.h" |
a6352fd4 | 16 | |
a6352fd4 | 17 | /* |
1d498196 MD |
18 | * Pointer dereferencing. We don't trust the shm_ref, so we validate |
19 | * both the index and offset with known boundaries. | |
ed5426d3 MD |
20 | * |
21 | * "shmp" and "shmp_index" guarantee that it's safe to use the pointer | |
22 | * target type, even in the occurrence of shm_ref modification by an | |
23 | * untrusted process having write access to the shm_ref. We return a | |
24 | * NULL pointer if the ranges are invalid. | |
a6352fd4 | 25 | */ |
a6352fd4 | 26 | static inline |
4746ae29 | 27 | char *_shmp_offset(struct shm_object_table *table, struct shm_ref *ref, |
cba4b7a3 | 28 | size_t idx, size_t elem_size) |
a6352fd4 | 29 | { |
1d498196 | 30 | struct shm_object *obj; |
cba4b7a3 | 31 | size_t objindex, ref_offset; |
a6352fd4 | 32 | |
cba4b7a3 | 33 | objindex = (size_t) ref->index; |
b5a3dfa5 | 34 | if (caa_unlikely(objindex >= table->allocated_len)) |
1d498196 | 35 | return NULL; |
cba4b7a3 | 36 | obj = &table->objects[objindex]; |
4746ae29 | 37 | ref_offset = (size_t) ref->offset; |
cba4b7a3 MD |
38 | ref_offset += idx * elem_size; |
39 | /* Check if part of the element returned would exceed the limits. */ | |
b5a3dfa5 | 40 | if (caa_unlikely(ref_offset + elem_size > obj->memory_map_size)) |
a6352fd4 | 41 | return NULL; |
4746ae29 | 42 | return &obj->memory_map[ref_offset]; |
a6352fd4 MD |
43 | } |
44 | ||
cba4b7a3 | 45 | #define shmp_index(handle, ref, index) \ |
1d498196 MD |
46 | ({ \ |
47 | __typeof__((ref)._type) ____ptr_ret; \ | |
cba4b7a3 | 48 | ____ptr_ret = (__typeof__(____ptr_ret)) _shmp_offset((handle)->table, &(ref)._ref, index, sizeof(*____ptr_ret)); \ |
1d498196 MD |
49 | ____ptr_ret; \ |
50 | }) | |
51 | ||
4746ae29 MD |
52 | #define shmp(handle, ref) shmp_index(handle, ref, 0) |
53 | ||
431d5cf0 | 54 | static inline |
1d498196 | 55 | void _set_shmp(struct shm_ref *ref, struct shm_ref src) |
431d5cf0 | 56 | { |
1d498196 | 57 | *ref = src; |
431d5cf0 MD |
58 | } |
59 | ||
1d498196 MD |
60 | #define set_shmp(ref, src) _set_shmp(&(ref)._ref, src) |
61 | ||
62 | struct shm_object_table *shm_object_table_create(size_t max_nb_obj); | |
193183fb MD |
63 | struct shm_object *shm_object_table_append_shadow(struct shm_object_table *table, |
64 | int shm_fd, int wait_fd, size_t memory_map_size); | |
1d498196 MD |
65 | void shm_object_table_destroy(struct shm_object_table *table); |
66 | struct shm_object *shm_object_table_append(struct shm_object_table *table, | |
67 | size_t memory_map_size); | |
68 | ||
69 | /* | |
70 | * zalloc_shm - allocate memory within a shm object. | |
71 | * | |
72 | * Shared memory is already zeroed by shmget. | |
73 | * *NOT* multithread-safe (should be protected by mutex). | |
74 | * Returns a -1, -1 tuple on error. | |
75 | */ | |
76 | struct shm_ref zalloc_shm(struct shm_object *obj, size_t len); | |
77 | void align_shm(struct shm_object *obj, size_t align); | |
78 | ||
5d61a504 | 79 | static inline |
38fae1d3 | 80 | int shm_get_wakeup_fd(struct lttng_ust_shm_handle *handle, struct shm_ref *ref) |
5d61a504 MD |
81 | { |
82 | struct shm_object_table *table = handle->table; | |
83 | struct shm_object *obj; | |
84 | size_t index; | |
85 | ||
86 | index = (size_t) ref->index; | |
b5a3dfa5 | 87 | if (caa_unlikely(index >= table->allocated_len)) |
5d61a504 MD |
88 | return -EPERM; |
89 | obj = &table->objects[index]; | |
90 | return obj->wait_fd[1]; | |
91 | ||
92 | } | |
93 | ||
94 | static inline | |
38fae1d3 | 95 | int shm_get_wait_fd(struct lttng_ust_shm_handle *handle, struct shm_ref *ref) |
5d61a504 MD |
96 | { |
97 | struct shm_object_table *table = handle->table; | |
98 | struct shm_object *obj; | |
99 | size_t index; | |
100 | ||
101 | index = (size_t) ref->index; | |
b5a3dfa5 | 102 | if (caa_unlikely(index >= table->allocated_len)) |
5d61a504 MD |
103 | return -EPERM; |
104 | obj = &table->objects[index]; | |
105 | return obj->wait_fd[0]; | |
106 | } | |
107 | ||
381c0f1e | 108 | static inline |
38fae1d3 | 109 | int shm_get_object_data(struct lttng_ust_shm_handle *handle, struct shm_ref *ref, |
381c0f1e MD |
110 | int *shm_fd, int *wait_fd, uint64_t *memory_map_size) |
111 | { | |
112 | struct shm_object_table *table = handle->table; | |
113 | struct shm_object *obj; | |
114 | size_t index; | |
115 | ||
116 | index = (size_t) ref->index; | |
b5a3dfa5 | 117 | if (caa_unlikely(index >= table->allocated_len)) |
381c0f1e MD |
118 | return -EPERM; |
119 | obj = &table->objects[index]; | |
120 | *shm_fd = obj->shm_fd; | |
121 | *wait_fd = obj->wait_fd[0]; | |
193183fb | 122 | *memory_map_size = obj->allocated_len; |
381c0f1e MD |
123 | return 0; |
124 | } | |
125 | ||
a6352fd4 | 126 | #endif /* _LIBRINGBUFFER_SHM_H */ |