cac87635ed46c2044f72c44e0e99940c6af1af17
[lttng-tools.git] / src / bin / lttng-relayd / stream.c
1 /*
2 * Copyright (C) 2013 - Julien Desfossez <jdesfossez@efficios.com>
3 * David Goulet <dgoulet@efficios.com>
4 * 2015 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License, version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc., 51
17 * Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
18 */
19
20 #define _GNU_SOURCE
21 #define _LGPL_SOURCE
22 #include <common/common.h>
23 #include <common/utils.h>
24 #include <common/defaults.h>
25 #include <urcu/rculist.h>
26 #include <sys/stat.h>
27
28 #include "lttng-relayd.h"
29 #include "index.h"
30 #include "stream.h"
31 #include "viewer-stream.h"
32
33 /* Should be called with RCU read-side lock held. */
34 bool stream_get(struct relay_stream *stream)
35 {
36 bool has_ref = false;
37
38 pthread_mutex_lock(&stream->reflock);
39 if (stream->ref.refcount != 0) {
40 has_ref = true;
41 urcu_ref_get(&stream->ref);
42 }
43 pthread_mutex_unlock(&stream->reflock);
44
45 return has_ref;
46 }
47
48 /*
49 * Get stream from stream id from the streams hash table. Return stream
50 * if found else NULL. A stream reference is taken when a stream is
51 * returned. stream_put() must be called on that stream.
52 */
53 struct relay_stream *stream_get_by_id(uint64_t stream_id)
54 {
55 struct lttng_ht_node_u64 *node;
56 struct lttng_ht_iter iter;
57 struct relay_stream *stream = NULL;
58
59 rcu_read_lock();
60 lttng_ht_lookup(relay_streams_ht, &stream_id, &iter);
61 node = lttng_ht_iter_get_node_u64(&iter);
62 if (!node) {
63 DBG("Relay stream %" PRIu64 " not found", stream_id);
64 goto end;
65 }
66 stream = caa_container_of(node, struct relay_stream, node);
67 if (!stream_get(stream)) {
68 stream = NULL;
69 }
70 end:
71 rcu_read_unlock();
72 return stream;
73 }
74
75 /*
76 * We keep ownership of path_name and channel_name.
77 */
78 struct relay_stream *stream_create(struct ctf_trace *trace,
79 uint64_t stream_handle, char *path_name,
80 char *channel_name, uint64_t tracefile_size,
81 uint64_t tracefile_count)
82 {
83 int ret;
84 struct relay_stream *stream = NULL;
85 struct relay_session *session = trace->session;
86
87 stream = zmalloc(sizeof(struct relay_stream));
88 if (stream == NULL) {
89 PERROR("relay stream zmalloc");
90 ret = -1;
91 goto error_no_alloc;
92 }
93
94 stream->stream_handle = stream_handle;
95 stream->prev_seq = -1ULL;
96 stream->last_net_seq_num = -1ULL;
97 stream->ctf_stream_id = -1ULL;
98 stream->tracefile_size = tracefile_size;
99 stream->tracefile_count = tracefile_count;
100 stream->path_name = path_name;
101 stream->channel_name = channel_name;
102 lttng_ht_node_init_u64(&stream->node, stream->stream_handle);
103 pthread_mutex_init(&stream->lock, NULL);
104 pthread_mutex_init(&stream->reflock, NULL);
105 urcu_ref_init(&stream->ref);
106 ctf_trace_get(trace);
107 stream->trace = trace;
108
109 stream->indexes_ht = lttng_ht_new(0, LTTNG_HT_TYPE_U64);
110 if (!stream->indexes_ht) {
111 ERR("Cannot created indexes_ht");
112 ret = -1;
113 goto end;
114 }
115
116 ret = utils_mkdir_recursive(stream->path_name, S_IRWXU | S_IRWXG,
117 -1, -1);
118 if (ret < 0) {
119 ERR("relay creating output directory");
120 goto end;
121 }
122
123 /*
124 * No need to use run_as API here because whatever we receive,
125 * the relayd uses its own credentials for the stream files.
126 */
127 ret = utils_create_stream_file(stream->path_name, stream->channel_name,
128 stream->tracefile_size, 0, -1, -1, NULL);
129 if (ret < 0) {
130 ERR("Create output file");
131 goto end;
132 }
133 stream->stream_fd = stream_fd_create(ret);
134 if (!stream->stream_fd) {
135 if (close(ret)) {
136 PERROR("Error closing file %d", ret);
137 }
138 ret = -1;
139 goto end;
140 }
141 stream->tfa = tracefile_array_create(stream->tracefile_count);
142 if (!stream->tfa) {
143 ret = -1;
144 goto end;
145 }
146 if (stream->tracefile_size) {
147 DBG("Tracefile %s/%s_0 created", stream->path_name, stream->channel_name);
148 } else {
149 DBG("Tracefile %s/%s created", stream->path_name, stream->channel_name);
150 }
151
152 if (!strncmp(stream->channel_name, DEFAULT_METADATA_NAME, NAME_MAX)) {
153 stream->is_metadata = 1;
154 }
155
156 stream->in_recv_list = true;
157
158 /*
159 * Add the stream in the recv list of the session. Once the end stream
160 * message is received, all session streams are published.
161 */
162 pthread_mutex_lock(&session->recv_list_lock);
163 cds_list_add_rcu(&stream->recv_node, &session->recv_list);
164 session->stream_count++;
165 pthread_mutex_unlock(&session->recv_list_lock);
166
167 /*
168 * Both in the ctf_trace object and the global stream ht since the data
169 * side of the relayd does not have the concept of session.
170 */
171 lttng_ht_add_unique_u64(relay_streams_ht, &stream->node);
172 stream->in_stream_ht = true;
173
174 DBG("Relay new stream added %s with ID %" PRIu64, stream->channel_name,
175 stream->stream_handle);
176 ret = 0;
177
178 end:
179 if (ret) {
180 if (stream->stream_fd) {
181 stream_fd_put(stream->stream_fd);
182 stream->stream_fd = NULL;
183 }
184 stream_put(stream);
185 stream = NULL;
186 }
187 return stream;
188
189 error_no_alloc:
190 /*
191 * path_name and channel_name need to be freed explicitly here
192 * because we cannot rely on stream_put().
193 */
194 free(path_name);
195 free(channel_name);
196 return NULL;
197 }
198
199 /*
200 * Called with the session lock held.
201 */
202 void stream_publish(struct relay_stream *stream)
203 {
204 struct relay_session *session;
205
206 pthread_mutex_lock(&stream->lock);
207 if (stream->published) {
208 goto unlock;
209 }
210
211 session = stream->trace->session;
212
213 pthread_mutex_lock(&session->recv_list_lock);
214 if (stream->in_recv_list) {
215 cds_list_del_rcu(&stream->recv_node);
216 stream->in_recv_list = false;
217 }
218 pthread_mutex_unlock(&session->recv_list_lock);
219
220 pthread_mutex_lock(&stream->trace->stream_list_lock);
221 cds_list_add_rcu(&stream->stream_node, &stream->trace->stream_list);
222 pthread_mutex_unlock(&stream->trace->stream_list_lock);
223
224 stream->published = true;
225 unlock:
226 pthread_mutex_unlock(&stream->lock);
227 }
228
229 /*
230 * Stream must be protected by holding the stream lock or by virtue of being
231 * called from stream_destroy, in which case it is guaranteed to be accessed
232 * from a single thread by the reflock.
233 */
234 static void stream_unpublish(struct relay_stream *stream)
235 {
236 if (stream->in_stream_ht) {
237 struct lttng_ht_iter iter;
238 int ret;
239
240 iter.iter.node = &stream->node.node;
241 ret = lttng_ht_del(relay_streams_ht, &iter);
242 assert(!ret);
243 stream->in_stream_ht = false;
244 }
245 if (stream->published) {
246 pthread_mutex_lock(&stream->trace->stream_list_lock);
247 cds_list_del_rcu(&stream->stream_node);
248 pthread_mutex_unlock(&stream->trace->stream_list_lock);
249 stream->published = false;
250 }
251 }
252
253 static void stream_destroy(struct relay_stream *stream)
254 {
255 if (stream->indexes_ht) {
256 lttng_ht_destroy(stream->indexes_ht);
257 }
258 if (stream->tfa) {
259 tracefile_array_destroy(stream->tfa);
260 }
261 free(stream->path_name);
262 free(stream->channel_name);
263 free(stream);
264 }
265
266 static void stream_destroy_rcu(struct rcu_head *rcu_head)
267 {
268 struct relay_stream *stream =
269 caa_container_of(rcu_head, struct relay_stream, rcu_node);
270
271 stream_destroy(stream);
272 }
273
274 /*
275 * No need to take stream->lock since this is only called on the final
276 * stream_put which ensures that a single thread may act on the stream.
277 *
278 * At that point, the object is also protected by the reflock which
279 * guarantees that no other thread may share ownership of this stream.
280 */
281 static void stream_release(struct urcu_ref *ref)
282 {
283 struct relay_stream *stream =
284 caa_container_of(ref, struct relay_stream, ref);
285 struct relay_session *session;
286
287 session = stream->trace->session;
288
289 DBG("Releasing stream id %" PRIu64, stream->stream_handle);
290
291 pthread_mutex_lock(&session->recv_list_lock);
292 session->stream_count--;
293 if (stream->in_recv_list) {
294 cds_list_del_rcu(&stream->recv_node);
295 stream->in_recv_list = false;
296 }
297 pthread_mutex_unlock(&session->recv_list_lock);
298
299 stream_unpublish(stream);
300
301 if (stream->stream_fd) {
302 stream_fd_put(stream->stream_fd);
303 stream->stream_fd = NULL;
304 }
305 if (stream->index_fd) {
306 stream_fd_put(stream->index_fd);
307 stream->index_fd = NULL;
308 }
309 if (stream->trace) {
310 ctf_trace_put(stream->trace);
311 stream->trace = NULL;
312 }
313
314 call_rcu(&stream->rcu_node, stream_destroy_rcu);
315 }
316
317 void stream_put(struct relay_stream *stream)
318 {
319 DBG("stream put for stream id %" PRIu64, stream->stream_handle);
320 /*
321 * Ensure existence of stream->reflock for stream unlock.
322 */
323 rcu_read_lock();
324 /*
325 * Stream reflock ensures that concurrent test and update of
326 * stream ref is atomic.
327 */
328 pthread_mutex_lock(&stream->reflock);
329 assert(stream->ref.refcount != 0);
330 /*
331 * Wait until we have processed all the stream packets before
332 * actually putting our last stream reference.
333 */
334 DBG("stream put stream id %" PRIu64 " refcount %d",
335 stream->stream_handle,
336 (int) stream->ref.refcount);
337 urcu_ref_put(&stream->ref, stream_release);
338 pthread_mutex_unlock(&stream->reflock);
339 rcu_read_unlock();
340 }
341
342 void try_stream_close(struct relay_stream *stream)
343 {
344 DBG("Trying to close stream %" PRIu64, stream->stream_handle);
345 pthread_mutex_lock(&stream->lock);
346 /*
347 * Can be called concurently by connection close and reception of last
348 * pending data.
349 */
350 if (stream->closed) {
351 pthread_mutex_unlock(&stream->lock);
352 DBG("closing stream %" PRIu64 " aborted since it is already marked as closed", stream->stream_handle);
353 return;
354 }
355
356 stream->close_requested = true;
357
358 if (stream->last_net_seq_num == -1ULL) {
359 /*
360 * Handle connection close without explicit stream close
361 * command.
362 *
363 * We can be clever about indexes partially received in
364 * cases where we received the data socket part, but not
365 * the control socket part: since we're currently closing
366 * the stream on behalf of the control socket, we *know*
367 * there won't be any more control information for this
368 * socket. Therefore, we can destroy all indexes for
369 * which we have received only the file descriptor (from
370 * data socket). This takes care of consumerd crashes
371 * between sending the data and control information for
372 * a packet. Since those are sent in that order, we take
373 * care of consumerd crashes.
374 */
375 relay_index_close_partial_fd(stream);
376 /*
377 * Use the highest net_seq_num we currently have pending
378 * As end of stream indicator. Leave last_net_seq_num
379 * at -1ULL if we cannot find any index.
380 */
381 stream->last_net_seq_num = relay_index_find_last(stream);
382 /* Fall-through into the next check. */
383 }
384
385 if (stream->last_net_seq_num != -1ULL &&
386 ((int64_t) (stream->prev_seq - stream->last_net_seq_num)) < 0) {
387 /*
388 * Don't close since we still have data pending. This
389 * handles cases where an explicit close command has
390 * been received for this stream, and cases where the
391 * connection has been closed, and we are awaiting for
392 * index information from the data socket. It is
393 * therefore expected that all the index fd information
394 * we need has already been received on the control
395 * socket. Matching index information from data socket
396 * should be Expected Soon(TM).
397 *
398 * TODO: We should implement a timer to garbage collect
399 * streams after a timeout to be resilient against a
400 * consumerd implementation that would not match this
401 * expected behavior.
402 */
403 pthread_mutex_unlock(&stream->lock);
404 DBG("closing stream %" PRIu64 " aborted since it still has data pending", stream->stream_handle);
405 return;
406 }
407 /*
408 * We received all the indexes we can expect.
409 */
410 stream_unpublish(stream);
411 stream->closed = true;
412 /* Relay indexes are only used by the "consumer/sessiond" end. */
413 relay_index_close_all(stream);
414 pthread_mutex_unlock(&stream->lock);
415 DBG("Succeeded in closing stream %" PRIu64, stream->stream_handle);
416 stream_put(stream);
417 }
418
419 void print_relay_streams(void)
420 {
421 struct lttng_ht_iter iter;
422 struct relay_stream *stream;
423
424 rcu_read_lock();
425 cds_lfht_for_each_entry(relay_streams_ht->ht, &iter.iter, stream,
426 node.node) {
427 if (!stream_get(stream)) {
428 continue;
429 }
430 DBG("stream %p refcount %ld stream %" PRIu64 " trace %" PRIu64
431 " session %" PRIu64,
432 stream,
433 stream->ref.refcount,
434 stream->stream_handle,
435 stream->trace->id,
436 stream->trace->session->id);
437 stream_put(stream);
438 }
439 rcu_read_unlock();
440 }
This page took 0.039368 seconds and 4 git commands to generate.