cac87635ed46c2044f72c44e0e99940c6af1af17
2 * Copyright (C) 2013 - Julien Desfossez <jdesfossez@efficios.com>
3 * David Goulet <dgoulet@efficios.com>
4 * 2015 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License, version 2 only, as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc., 51
17 * Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
22 #include <common/common.h>
23 #include <common/utils.h>
24 #include <common/defaults.h>
25 #include <urcu/rculist.h>
28 #include "lttng-relayd.h"
31 #include "viewer-stream.h"
33 /* Should be called with RCU read-side lock held. */
34 bool stream_get(struct relay_stream
*stream
)
38 pthread_mutex_lock(&stream
->reflock
);
39 if (stream
->ref
.refcount
!= 0) {
41 urcu_ref_get(&stream
->ref
);
43 pthread_mutex_unlock(&stream
->reflock
);
49 * Get stream from stream id from the streams hash table. Return stream
50 * if found else NULL. A stream reference is taken when a stream is
51 * returned. stream_put() must be called on that stream.
53 struct relay_stream
*stream_get_by_id(uint64_t stream_id
)
55 struct lttng_ht_node_u64
*node
;
56 struct lttng_ht_iter iter
;
57 struct relay_stream
*stream
= NULL
;
60 lttng_ht_lookup(relay_streams_ht
, &stream_id
, &iter
);
61 node
= lttng_ht_iter_get_node_u64(&iter
);
63 DBG("Relay stream %" PRIu64
" not found", stream_id
);
66 stream
= caa_container_of(node
, struct relay_stream
, node
);
67 if (!stream_get(stream
)) {
76 * We keep ownership of path_name and channel_name.
78 struct relay_stream
*stream_create(struct ctf_trace
*trace
,
79 uint64_t stream_handle
, char *path_name
,
80 char *channel_name
, uint64_t tracefile_size
,
81 uint64_t tracefile_count
)
84 struct relay_stream
*stream
= NULL
;
85 struct relay_session
*session
= trace
->session
;
87 stream
= zmalloc(sizeof(struct relay_stream
));
89 PERROR("relay stream zmalloc");
94 stream
->stream_handle
= stream_handle
;
95 stream
->prev_seq
= -1ULL;
96 stream
->last_net_seq_num
= -1ULL;
97 stream
->ctf_stream_id
= -1ULL;
98 stream
->tracefile_size
= tracefile_size
;
99 stream
->tracefile_count
= tracefile_count
;
100 stream
->path_name
= path_name
;
101 stream
->channel_name
= channel_name
;
102 lttng_ht_node_init_u64(&stream
->node
, stream
->stream_handle
);
103 pthread_mutex_init(&stream
->lock
, NULL
);
104 pthread_mutex_init(&stream
->reflock
, NULL
);
105 urcu_ref_init(&stream
->ref
);
106 ctf_trace_get(trace
);
107 stream
->trace
= trace
;
109 stream
->indexes_ht
= lttng_ht_new(0, LTTNG_HT_TYPE_U64
);
110 if (!stream
->indexes_ht
) {
111 ERR("Cannot created indexes_ht");
116 ret
= utils_mkdir_recursive(stream
->path_name
, S_IRWXU
| S_IRWXG
,
119 ERR("relay creating output directory");
124 * No need to use run_as API here because whatever we receive,
125 * the relayd uses its own credentials for the stream files.
127 ret
= utils_create_stream_file(stream
->path_name
, stream
->channel_name
,
128 stream
->tracefile_size
, 0, -1, -1, NULL
);
130 ERR("Create output file");
133 stream
->stream_fd
= stream_fd_create(ret
);
134 if (!stream
->stream_fd
) {
136 PERROR("Error closing file %d", ret
);
141 stream
->tfa
= tracefile_array_create(stream
->tracefile_count
);
146 if (stream
->tracefile_size
) {
147 DBG("Tracefile %s/%s_0 created", stream
->path_name
, stream
->channel_name
);
149 DBG("Tracefile %s/%s created", stream
->path_name
, stream
->channel_name
);
152 if (!strncmp(stream
->channel_name
, DEFAULT_METADATA_NAME
, NAME_MAX
)) {
153 stream
->is_metadata
= 1;
156 stream
->in_recv_list
= true;
159 * Add the stream in the recv list of the session. Once the end stream
160 * message is received, all session streams are published.
162 pthread_mutex_lock(&session
->recv_list_lock
);
163 cds_list_add_rcu(&stream
->recv_node
, &session
->recv_list
);
164 session
->stream_count
++;
165 pthread_mutex_unlock(&session
->recv_list_lock
);
168 * Both in the ctf_trace object and the global stream ht since the data
169 * side of the relayd does not have the concept of session.
171 lttng_ht_add_unique_u64(relay_streams_ht
, &stream
->node
);
172 stream
->in_stream_ht
= true;
174 DBG("Relay new stream added %s with ID %" PRIu64
, stream
->channel_name
,
175 stream
->stream_handle
);
180 if (stream
->stream_fd
) {
181 stream_fd_put(stream
->stream_fd
);
182 stream
->stream_fd
= NULL
;
191 * path_name and channel_name need to be freed explicitly here
192 * because we cannot rely on stream_put().
200 * Called with the session lock held.
202 void stream_publish(struct relay_stream
*stream
)
204 struct relay_session
*session
;
206 pthread_mutex_lock(&stream
->lock
);
207 if (stream
->published
) {
211 session
= stream
->trace
->session
;
213 pthread_mutex_lock(&session
->recv_list_lock
);
214 if (stream
->in_recv_list
) {
215 cds_list_del_rcu(&stream
->recv_node
);
216 stream
->in_recv_list
= false;
218 pthread_mutex_unlock(&session
->recv_list_lock
);
220 pthread_mutex_lock(&stream
->trace
->stream_list_lock
);
221 cds_list_add_rcu(&stream
->stream_node
, &stream
->trace
->stream_list
);
222 pthread_mutex_unlock(&stream
->trace
->stream_list_lock
);
224 stream
->published
= true;
226 pthread_mutex_unlock(&stream
->lock
);
230 * Stream must be protected by holding the stream lock or by virtue of being
231 * called from stream_destroy, in which case it is guaranteed to be accessed
232 * from a single thread by the reflock.
234 static void stream_unpublish(struct relay_stream
*stream
)
236 if (stream
->in_stream_ht
) {
237 struct lttng_ht_iter iter
;
240 iter
.iter
.node
= &stream
->node
.node
;
241 ret
= lttng_ht_del(relay_streams_ht
, &iter
);
243 stream
->in_stream_ht
= false;
245 if (stream
->published
) {
246 pthread_mutex_lock(&stream
->trace
->stream_list_lock
);
247 cds_list_del_rcu(&stream
->stream_node
);
248 pthread_mutex_unlock(&stream
->trace
->stream_list_lock
);
249 stream
->published
= false;
253 static void stream_destroy(struct relay_stream
*stream
)
255 if (stream
->indexes_ht
) {
256 lttng_ht_destroy(stream
->indexes_ht
);
259 tracefile_array_destroy(stream
->tfa
);
261 free(stream
->path_name
);
262 free(stream
->channel_name
);
266 static void stream_destroy_rcu(struct rcu_head
*rcu_head
)
268 struct relay_stream
*stream
=
269 caa_container_of(rcu_head
, struct relay_stream
, rcu_node
);
271 stream_destroy(stream
);
275 * No need to take stream->lock since this is only called on the final
276 * stream_put which ensures that a single thread may act on the stream.
278 * At that point, the object is also protected by the reflock which
279 * guarantees that no other thread may share ownership of this stream.
281 static void stream_release(struct urcu_ref
*ref
)
283 struct relay_stream
*stream
=
284 caa_container_of(ref
, struct relay_stream
, ref
);
285 struct relay_session
*session
;
287 session
= stream
->trace
->session
;
289 DBG("Releasing stream id %" PRIu64
, stream
->stream_handle
);
291 pthread_mutex_lock(&session
->recv_list_lock
);
292 session
->stream_count
--;
293 if (stream
->in_recv_list
) {
294 cds_list_del_rcu(&stream
->recv_node
);
295 stream
->in_recv_list
= false;
297 pthread_mutex_unlock(&session
->recv_list_lock
);
299 stream_unpublish(stream
);
301 if (stream
->stream_fd
) {
302 stream_fd_put(stream
->stream_fd
);
303 stream
->stream_fd
= NULL
;
305 if (stream
->index_fd
) {
306 stream_fd_put(stream
->index_fd
);
307 stream
->index_fd
= NULL
;
310 ctf_trace_put(stream
->trace
);
311 stream
->trace
= NULL
;
314 call_rcu(&stream
->rcu_node
, stream_destroy_rcu
);
317 void stream_put(struct relay_stream
*stream
)
319 DBG("stream put for stream id %" PRIu64
, stream
->stream_handle
);
321 * Ensure existence of stream->reflock for stream unlock.
325 * Stream reflock ensures that concurrent test and update of
326 * stream ref is atomic.
328 pthread_mutex_lock(&stream
->reflock
);
329 assert(stream
->ref
.refcount
!= 0);
331 * Wait until we have processed all the stream packets before
332 * actually putting our last stream reference.
334 DBG("stream put stream id %" PRIu64
" refcount %d",
335 stream
->stream_handle
,
336 (int) stream
->ref
.refcount
);
337 urcu_ref_put(&stream
->ref
, stream_release
);
338 pthread_mutex_unlock(&stream
->reflock
);
342 void try_stream_close(struct relay_stream
*stream
)
344 DBG("Trying to close stream %" PRIu64
, stream
->stream_handle
);
345 pthread_mutex_lock(&stream
->lock
);
347 * Can be called concurently by connection close and reception of last
350 if (stream
->closed
) {
351 pthread_mutex_unlock(&stream
->lock
);
352 DBG("closing stream %" PRIu64
" aborted since it is already marked as closed", stream
->stream_handle
);
356 stream
->close_requested
= true;
358 if (stream
->last_net_seq_num
== -1ULL) {
360 * Handle connection close without explicit stream close
363 * We can be clever about indexes partially received in
364 * cases where we received the data socket part, but not
365 * the control socket part: since we're currently closing
366 * the stream on behalf of the control socket, we *know*
367 * there won't be any more control information for this
368 * socket. Therefore, we can destroy all indexes for
369 * which we have received only the file descriptor (from
370 * data socket). This takes care of consumerd crashes
371 * between sending the data and control information for
372 * a packet. Since those are sent in that order, we take
373 * care of consumerd crashes.
375 relay_index_close_partial_fd(stream
);
377 * Use the highest net_seq_num we currently have pending
378 * As end of stream indicator. Leave last_net_seq_num
379 * at -1ULL if we cannot find any index.
381 stream
->last_net_seq_num
= relay_index_find_last(stream
);
382 /* Fall-through into the next check. */
385 if (stream
->last_net_seq_num
!= -1ULL &&
386 ((int64_t) (stream
->prev_seq
- stream
->last_net_seq_num
)) < 0) {
388 * Don't close since we still have data pending. This
389 * handles cases where an explicit close command has
390 * been received for this stream, and cases where the
391 * connection has been closed, and we are awaiting for
392 * index information from the data socket. It is
393 * therefore expected that all the index fd information
394 * we need has already been received on the control
395 * socket. Matching index information from data socket
396 * should be Expected Soon(TM).
398 * TODO: We should implement a timer to garbage collect
399 * streams after a timeout to be resilient against a
400 * consumerd implementation that would not match this
403 pthread_mutex_unlock(&stream
->lock
);
404 DBG("closing stream %" PRIu64
" aborted since it still has data pending", stream
->stream_handle
);
408 * We received all the indexes we can expect.
410 stream_unpublish(stream
);
411 stream
->closed
= true;
412 /* Relay indexes are only used by the "consumer/sessiond" end. */
413 relay_index_close_all(stream
);
414 pthread_mutex_unlock(&stream
->lock
);
415 DBG("Succeeded in closing stream %" PRIu64
, stream
->stream_handle
);
419 void print_relay_streams(void)
421 struct lttng_ht_iter iter
;
422 struct relay_stream
*stream
;
425 cds_lfht_for_each_entry(relay_streams_ht
->ht
, &iter
.iter
, stream
,
427 if (!stream_get(stream
)) {
430 DBG("stream %p refcount %ld stream %" PRIu64
" trace %" PRIu64
433 stream
->ref
.refcount
,
434 stream
->stream_handle
,
436 stream
->trace
->session
->id
);
This page took 0.039368 seconds and 4 git commands to generate.