consumerd: refactor: combine duplicated check_*_functions
[lttng-tools.git] / src / common / consumer / consumer-metadata-cache.c
1 /*
2 * Copyright (C) 2013 Julien Desfossez <jdesfossez@efficios.com>
3 * Copyright (C) 2013 David Goulet <dgoulet@efficios.com>
4 *
5 * SPDX-License-Identifier: GPL-2.0-only
6 *
7 */
8
9 #define _LGPL_SOURCE
10 #include <assert.h>
11 #include <pthread.h>
12 #include <stdlib.h>
13 #include <string.h>
14 #include <sys/types.h>
15 #include <unistd.h>
16 #include <inttypes.h>
17
18 #include <common/common.h>
19 #include <common/utils.h>
20 #include <common/sessiond-comm/sessiond-comm.h>
21 #include <common/ust-consumer/ust-consumer.h>
22 #include <common/consumer/consumer.h>
23
24 #include "consumer-metadata-cache.h"
25
26 extern struct lttng_consumer_global_data consumer_data;
27
28 /*
29 * Extend the allocated size of the metadata cache. Called only from
30 * lttng_ustconsumer_write_metadata_cache.
31 *
32 * Return 0 on success, a negative value on error.
33 */
34 static int extend_metadata_cache(struct lttng_consumer_channel *channel,
35 unsigned int size)
36 {
37 int ret = 0;
38 char *tmp_data_ptr;
39 unsigned int new_size, old_size;
40
41 assert(channel);
42 assert(channel->metadata_cache);
43
44 old_size = channel->metadata_cache->cache_alloc_size;
45 new_size = max_t(unsigned int, old_size + size, old_size << 1);
46 DBG("Extending metadata cache to %u", new_size);
47 tmp_data_ptr = realloc(channel->metadata_cache->data, new_size);
48 if (!tmp_data_ptr) {
49 ERR("Reallocating metadata cache");
50 free(channel->metadata_cache->data);
51 ret = -1;
52 goto end;
53 }
54 /* Zero newly allocated memory */
55 memset(tmp_data_ptr + old_size, 0, new_size - old_size);
56 channel->metadata_cache->data = tmp_data_ptr;
57 channel->metadata_cache->cache_alloc_size = new_size;
58
59 end:
60 return ret;
61 }
62
63 /*
64 * Reset the metadata cache.
65 */
66 static
67 void metadata_cache_reset(struct consumer_metadata_cache *cache)
68 {
69 memset(cache->data, 0, cache->cache_alloc_size);
70 cache->max_offset = 0;
71 }
72
73 /*
74 * Check if the metadata cache version changed.
75 * If it did, reset the metadata cache.
76 * The metadata cache lock MUST be held.
77 *
78 * Returns 0 on success, a negative value on error.
79 */
80 static
81 int metadata_cache_check_version(struct consumer_metadata_cache *cache,
82 uint64_t version)
83 {
84 int ret = 0;
85
86 if (cache->version == version) {
87 goto end;
88 }
89
90 DBG("Metadata cache version update to %" PRIu64, version);
91 metadata_cache_reset(cache);
92 cache->version = version;
93
94 end:
95 return ret;
96 }
97
98 /*
99 * Write a character on the metadata poll pipe to wake the metadata thread.
100 * Returns 0 on success, -1 on error.
101 */
102 int consumer_metadata_wakeup_pipe(const struct lttng_consumer_channel *channel)
103 {
104 int ret = 0;
105 const char dummy = 'c';
106
107 if (channel->monitor && channel->metadata_stream) {
108 ssize_t write_ret;
109
110 write_ret = lttng_write(channel->metadata_stream->ust_metadata_poll_pipe[1],
111 &dummy, 1);
112 if (write_ret < 1) {
113 if (errno == EWOULDBLOCK) {
114 /*
115 * This is fine, the metadata poll thread
116 * is having a hard time keeping-up, but
117 * it will eventually wake-up and consume
118 * the available data.
119 */
120 ret = 0;
121 } else {
122 PERROR("Wake-up UST metadata pipe");
123 ret = -1;
124 goto end;
125 }
126 }
127 }
128
129 end:
130 return ret;
131 }
132
133 /*
134 * Write metadata to the cache, extend the cache if necessary. We support
135 * overlapping updates, but they need to be contiguous. Send the
136 * contiguous metadata in cache to the ring buffer. The metadata cache
137 * lock MUST be acquired to write in the cache.
138 *
139 * Return 0 on success, a negative value on error.
140 */
141 int consumer_metadata_cache_write(struct lttng_consumer_channel *channel,
142 unsigned int offset, unsigned int len, uint64_t version,
143 char *data)
144 {
145 int ret = 0;
146 struct consumer_metadata_cache *cache;
147
148 assert(channel);
149 assert(channel->metadata_cache);
150
151 cache = channel->metadata_cache;
152
153 ret = metadata_cache_check_version(cache, version);
154 if (ret < 0) {
155 goto end;
156 }
157
158 DBG("Writing %u bytes from offset %u in metadata cache", len, offset);
159
160 if (offset + len > cache->cache_alloc_size) {
161 ret = extend_metadata_cache(channel,
162 len - cache->cache_alloc_size + offset);
163 if (ret < 0) {
164 ERR("Extending metadata cache");
165 goto end;
166 }
167 }
168
169 memcpy(cache->data + offset, data, len);
170 if (offset + len > cache->max_offset) {
171 cache->max_offset = offset + len;
172 ret = consumer_metadata_wakeup_pipe(channel);
173 }
174
175 end:
176 return ret;
177 }
178
179 /*
180 * Create the metadata cache, original allocated size: max_sb_size
181 *
182 * Return 0 on success, a negative value on error.
183 */
184 int consumer_metadata_cache_allocate(struct lttng_consumer_channel *channel)
185 {
186 int ret;
187
188 assert(channel);
189
190 channel->metadata_cache = zmalloc(
191 sizeof(struct consumer_metadata_cache));
192 if (!channel->metadata_cache) {
193 PERROR("zmalloc metadata cache struct");
194 ret = -1;
195 goto end;
196 }
197 ret = pthread_mutex_init(&channel->metadata_cache->lock, NULL);
198 if (ret != 0) {
199 PERROR("mutex init");
200 goto end_free_cache;
201 }
202
203 channel->metadata_cache->cache_alloc_size = DEFAULT_METADATA_CACHE_SIZE;
204 channel->metadata_cache->data = zmalloc(
205 channel->metadata_cache->cache_alloc_size * sizeof(char));
206 if (!channel->metadata_cache->data) {
207 PERROR("zmalloc metadata cache data");
208 ret = -1;
209 goto end_free_mutex;
210 }
211 DBG("Allocated metadata cache of %" PRIu64 " bytes",
212 channel->metadata_cache->cache_alloc_size);
213
214 ret = 0;
215 goto end;
216
217 end_free_mutex:
218 pthread_mutex_destroy(&channel->metadata_cache->lock);
219 end_free_cache:
220 free(channel->metadata_cache);
221 end:
222 return ret;
223 }
224
225 /*
226 * Destroy and free the metadata cache
227 */
228 void consumer_metadata_cache_destroy(struct lttng_consumer_channel *channel)
229 {
230 if (!channel || !channel->metadata_cache) {
231 return;
232 }
233
234 DBG("Destroying metadata cache");
235
236 pthread_mutex_destroy(&channel->metadata_cache->lock);
237 free(channel->metadata_cache->data);
238 free(channel->metadata_cache);
239 }
240
241 /*
242 * Check if the cache is flushed up to the offset passed in parameter.
243 *
244 * Return 0 if everything has been flushed, 1 if there is data not flushed.
245 */
246 int consumer_metadata_cache_flushed(struct lttng_consumer_channel *channel,
247 uint64_t offset, int timer)
248 {
249 int ret = 0;
250 struct lttng_consumer_stream *metadata_stream;
251
252 assert(channel);
253 assert(channel->metadata_cache);
254
255 /*
256 * If not called from a timer handler, we have to take the
257 * channel lock to be mutually exclusive with channel teardown.
258 * Timer handler does not need to take this lock because it is
259 * already synchronized by timer stop (and, more importantly,
260 * taking this lock in a timer handler would cause a deadlock).
261 */
262 if (!timer) {
263 pthread_mutex_lock(&channel->lock);
264 }
265 pthread_mutex_lock(&channel->timer_lock);
266 metadata_stream = channel->metadata_stream;
267 if (!metadata_stream) {
268 /*
269 * Having no metadata stream means the channel is being destroyed so there
270 * is no cache to flush anymore.
271 */
272 ret = 0;
273 goto end_unlock_channel;
274 }
275
276 pthread_mutex_lock(&metadata_stream->lock);
277 pthread_mutex_lock(&channel->metadata_cache->lock);
278
279 if (metadata_stream->ust_metadata_pushed >= offset) {
280 ret = 0;
281 } else if (channel->metadata_stream->endpoint_status !=
282 CONSUMER_ENDPOINT_ACTIVE) {
283 /* An inactive endpoint means we don't have to flush anymore. */
284 ret = 0;
285 } else {
286 /* Still not completely flushed. */
287 ret = 1;
288 }
289
290 pthread_mutex_unlock(&channel->metadata_cache->lock);
291 pthread_mutex_unlock(&metadata_stream->lock);
292 end_unlock_channel:
293 pthread_mutex_unlock(&channel->timer_lock);
294 if (!timer) {
295 pthread_mutex_unlock(&channel->lock);
296 }
297
298 return ret;
299 }
This page took 0.0505330000000001 seconds and 4 git commands to generate.