Fix: lock nesting order reversed
[lttng-tools.git] / src / common / consumer / consumer-metadata-cache.c
1 /*
2 * Copyright (C) 2013 - Julien Desfossez <jdesfossez@efficios.com>
3 * David Goulet <dgoulet@efficios.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2 only,
7 * as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along
15 * with this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
17 */
18
19 #define _LGPL_SOURCE
20 #include <assert.h>
21 #include <pthread.h>
22 #include <stdlib.h>
23 #include <string.h>
24 #include <sys/types.h>
25 #include <unistd.h>
26 #include <inttypes.h>
27
28 #include <common/common.h>
29 #include <common/utils.h>
30 #include <common/sessiond-comm/sessiond-comm.h>
31 #include <common/ust-consumer/ust-consumer.h>
32 #include <common/consumer/consumer.h>
33
34 #include "consumer-metadata-cache.h"
35
36 extern struct lttng_consumer_global_data consumer_data;
37
38 /*
39 * Extend the allocated size of the metadata cache. Called only from
40 * lttng_ustconsumer_write_metadata_cache.
41 *
42 * Return 0 on success, a negative value on error.
43 */
44 static int extend_metadata_cache(struct lttng_consumer_channel *channel,
45 unsigned int size)
46 {
47 int ret = 0;
48 char *tmp_data_ptr;
49 unsigned int new_size, old_size;
50
51 assert(channel);
52 assert(channel->metadata_cache);
53
54 old_size = channel->metadata_cache->cache_alloc_size;
55 new_size = max_t(unsigned int, old_size + size, old_size << 1);
56 DBG("Extending metadata cache to %u", new_size);
57 tmp_data_ptr = realloc(channel->metadata_cache->data, new_size);
58 if (!tmp_data_ptr) {
59 ERR("Reallocating metadata cache");
60 free(channel->metadata_cache->data);
61 ret = -1;
62 goto end;
63 }
64 /* Zero newly allocated memory */
65 memset(tmp_data_ptr + old_size, 0, new_size - old_size);
66 channel->metadata_cache->data = tmp_data_ptr;
67 channel->metadata_cache->cache_alloc_size = new_size;
68
69 end:
70 return ret;
71 }
72
73 /*
74 * Reset the metadata cache.
75 */
76 static
77 void metadata_cache_reset(struct consumer_metadata_cache *cache)
78 {
79 memset(cache->data, 0, cache->cache_alloc_size);
80 cache->max_offset = 0;
81 }
82
83 /*
84 * Check if the metadata cache version changed.
85 * If it did, reset the metadata cache.
86 * The metadata cache lock MUST be held.
87 *
88 * Returns 0 on success, a negative value on error.
89 */
90 static
91 int metadata_cache_check_version(struct consumer_metadata_cache *cache,
92 struct lttng_consumer_channel *channel, uint64_t version)
93 {
94 int ret = 0;
95
96 if (cache->version == version) {
97 goto end;
98 }
99
100 DBG("Metadata cache version update to %" PRIu64, version);
101 metadata_cache_reset(cache);
102 cache->version = version;
103
104 end:
105 return ret;
106 }
107
108 /*
109 * Write metadata to the cache, extend the cache if necessary. We support
110 * overlapping updates, but they need to be contiguous. Send the
111 * contiguous metadata in cache to the ring buffer. The metadata cache
112 * lock MUST be acquired to write in the cache.
113 *
114 * Return 0 on success, a negative value on error.
115 */
116 int consumer_metadata_cache_write(struct lttng_consumer_channel *channel,
117 unsigned int offset, unsigned int len, uint64_t version,
118 char *data)
119 {
120 int ret = 0;
121 int size_ret;
122 struct consumer_metadata_cache *cache;
123
124 assert(channel);
125 assert(channel->metadata_cache);
126
127 cache = channel->metadata_cache;
128
129 ret = metadata_cache_check_version(cache, channel, version);
130 if (ret < 0) {
131 goto end;
132 }
133
134 DBG("Writing %u bytes from offset %u in metadata cache", len, offset);
135
136 if (offset + len > cache->cache_alloc_size) {
137 ret = extend_metadata_cache(channel,
138 len - cache->cache_alloc_size + offset);
139 if (ret < 0) {
140 ERR("Extending metadata cache");
141 goto end;
142 }
143 }
144
145 memcpy(cache->data + offset, data, len);
146 if (offset + len > cache->max_offset) {
147 char dummy = 'c';
148
149 cache->max_offset = offset + len;
150 if (channel->monitor && channel->metadata_stream) {
151 size_ret = lttng_write(channel->metadata_stream->ust_metadata_poll_pipe[1],
152 &dummy, 1);
153 if (size_ret < 1) {
154 ERR("Wakeup UST metadata pipe");
155 ret = -1;
156 goto end;
157 }
158 }
159 }
160
161 end:
162 return ret;
163 }
164
165 /*
166 * Create the metadata cache, original allocated size: max_sb_size
167 *
168 * Return 0 on success, a negative value on error.
169 */
170 int consumer_metadata_cache_allocate(struct lttng_consumer_channel *channel)
171 {
172 int ret;
173
174 assert(channel);
175
176 channel->metadata_cache = zmalloc(
177 sizeof(struct consumer_metadata_cache));
178 if (!channel->metadata_cache) {
179 PERROR("zmalloc metadata cache struct");
180 ret = -1;
181 goto end;
182 }
183 ret = pthread_mutex_init(&channel->metadata_cache->lock, NULL);
184 if (ret != 0) {
185 PERROR("mutex init");
186 goto end_free_cache;
187 }
188
189 channel->metadata_cache->cache_alloc_size = DEFAULT_METADATA_CACHE_SIZE;
190 channel->metadata_cache->data = zmalloc(
191 channel->metadata_cache->cache_alloc_size * sizeof(char));
192 if (!channel->metadata_cache->data) {
193 PERROR("zmalloc metadata cache data");
194 ret = -1;
195 goto end_free_mutex;
196 }
197 DBG("Allocated metadata cache of %" PRIu64 " bytes",
198 channel->metadata_cache->cache_alloc_size);
199
200 ret = 0;
201 goto end;
202
203 end_free_mutex:
204 pthread_mutex_destroy(&channel->metadata_cache->lock);
205 end_free_cache:
206 free(channel->metadata_cache);
207 end:
208 return ret;
209 }
210
211 /*
212 * Destroy and free the metadata cache
213 */
214 void consumer_metadata_cache_destroy(struct lttng_consumer_channel *channel)
215 {
216 if (!channel || !channel->metadata_cache) {
217 return;
218 }
219
220 DBG("Destroying metadata cache");
221
222 pthread_mutex_destroy(&channel->metadata_cache->lock);
223 free(channel->metadata_cache->data);
224 free(channel->metadata_cache);
225 }
226
227 /*
228 * Check if the cache is flushed up to the offset passed in parameter.
229 *
230 * Return 0 if everything has been flushed, 1 if there is data not flushed.
231 */
232 int consumer_metadata_cache_flushed(struct lttng_consumer_channel *channel,
233 uint64_t offset, int timer)
234 {
235 int ret = 0;
236 struct lttng_consumer_stream *metadata_stream;
237
238 assert(channel);
239 assert(channel->metadata_cache);
240
241 /*
242 * If not called from a timer handler, we have to take the
243 * channel lock to be mutually exclusive with channel teardown.
244 * Timer handler does not need to take this lock because it is
245 * already synchronized by timer stop (and, more importantly,
246 * taking this lock in a timer handler would cause a deadlock).
247 */
248 if (!timer) {
249 pthread_mutex_lock(&channel->lock);
250 }
251 pthread_mutex_lock(&channel->timer_lock);
252 pthread_mutex_lock(&channel->metadata_cache->lock);
253
254 metadata_stream = channel->metadata_stream;
255
256 if (!metadata_stream) {
257 /*
258 * Having no metadata stream means the channel is being destroyed so there
259 * is no cache to flush anymore.
260 */
261 ret = 0;
262 } else if (metadata_stream->ust_metadata_pushed >= offset) {
263 ret = 0;
264 } else if (channel->metadata_stream->endpoint_status !=
265 CONSUMER_ENDPOINT_ACTIVE) {
266 /* An inactive endpoint means we don't have to flush anymore. */
267 ret = 0;
268 } else {
269 /* Still not completely flushed. */
270 ret = 1;
271 }
272
273 pthread_mutex_unlock(&channel->metadata_cache->lock);
274 pthread_mutex_unlock(&channel->timer_lock);
275 if (!timer) {
276 pthread_mutex_unlock(&channel->lock);
277 }
278
279 return ret;
280 }
This page took 0.037539 seconds and 4 git commands to generate.