+
+ io_ret = lttng_write(new_fd, buf, count);
+ if (io_ret < (ssize_t) count) {
+ char error_string[256];
+
+ snprintf(error_string, sizeof(error_string),
+ "Failed to write %" PRIu64 " bytes from fd %i in rotate_truncate_stream(), returned %zi",
+ count, new_fd, io_ret);
+ if (io_ret == -1) {
+ PERROR("%s", error_string);
+ } else {
+ ERR("%s", error_string);
+ }
+ ret = -1;
+ goto end;
+ }
+
+ pos += count;
+ }
+
+ /* Truncate the file to get rid of the excess data. */
+ ret = ftruncate(stream->stream_fd->fd,
+ stream->pos_after_last_complete_data_index);
+ if (ret) {
+ PERROR("ftruncate");
+ goto end;
+ }
+
+ ret = close(stream->stream_fd->fd);
+ if (ret < 0) {
+ PERROR("Closing tracefile");
+ goto end;
+ }
+
+ /*
+ * Update the offset and FD of all the eventual indexes created by the
+ * data connection before the rotation command arrived.
+ */
+ ret = relay_index_switch_all_files(stream);
+ if (ret < 0) {
+ ERR("Failed to rotate index file");
+ goto end;
+ }
+
+ stream->stream_fd->fd = new_fd;
+ stream->tracefile_size_current = diff;
+ stream->pos_after_last_complete_data_index = 0;
+ stream->rotate_at_seq_num = -1ULL;
+
+ ret = 0;
+
+end:
+ return ret;
+}
+
+/*
+ * Check if a stream's index file should be rotated (for session rotation).
+ * Must be called with the stream lock held.
+ *
+ * Return 0 on success, a negative value on error.
+ */
+static
+int try_rotate_stream_index(struct relay_stream *stream)
+{
+ int ret = 0;
+
+ if (stream->rotate_at_seq_num == -1ULL) {
+ /* No rotation expected. */
+ goto end;
+ }
+
+ if (stream->index_rotated) {
+ /* Rotation of the index has already occurred. */
+ goto end;
+ }
+
+ if (stream->prev_index_seq == -1ULL ||
+ stream->prev_index_seq < stream->rotate_at_seq_num) {
+ DBG("Stream %" PRIu64 " index not yet ready for rotation (rotate_at_seq_num = %" PRIu64 ", prev_index_seq = %" PRIu64 ")",
+ stream->stream_handle,
+ stream->rotate_at_seq_num,
+ stream->prev_index_seq);
+ goto end;
+ } else if (stream->prev_index_seq != stream->rotate_at_seq_num) {
+ /*
+ * Unexpected, protocol error/bug.
+ * It could mean that we received a rotation position
+ * that is in the past.
+ */
+ ERR("Stream %" PRIu64 " index is in an inconsistent state (rotate_at_seq_num = %" PRIu64 ", prev_data_seq = %" PRIu64 ", prev_index_seq = %" PRIu64 ")",
+ stream->stream_handle,
+ stream->rotate_at_seq_num,
+ stream->prev_data_seq,
+ stream->prev_index_seq);
+ ret = -1;
+ goto end;
+ } else {
+ DBG("Rotating stream %" PRIu64 " index file",
+ stream->stream_handle);
+ ret = create_rotate_index_file(stream, stream->path_name);
+ stream->index_rotated = true;
+
+ if (stream->data_rotated && stream->index_rotated) {
+ /* Rotation completed; reset its state. */
+ DBG("Rotation completed for stream %" PRIu64,
+ stream->stream_handle);
+ stream->rotate_at_seq_num = -1ULL;
+ stream->data_rotated = false;
+ stream->index_rotated = false;
+ }
+ }
+
+end:
+ return ret;
+}
+
+/*
+ * Check if a stream's data file (as opposed to index) should be rotated
+ * (for session rotation).
+ * Must be called with the stream lock held.
+ *
+ * Return 0 on success, a negative value on error.
+ */
+static
+int try_rotate_stream_data(struct relay_stream *stream)
+{
+ int ret = 0;
+
+ if (stream->rotate_at_seq_num == -1ULL) {
+ /* No rotation expected. */
+ goto end;
+ }
+
+ if (stream->data_rotated) {
+ /* Rotation of the data file has already occurred. */
+ goto end;
+ }
+
+ if (stream->prev_data_seq == -1ULL ||
+ stream->prev_data_seq < stream->rotate_at_seq_num) {
+ DBG("Stream %" PRIu64 " not yet ready for rotation (rotate_at_seq_num = %" PRIu64 ", prev_data_seq = %" PRIu64 ")",
+ stream->stream_handle,
+ stream->rotate_at_seq_num,
+ stream->prev_data_seq);
+ goto end;
+ } else if (stream->prev_data_seq > stream->rotate_at_seq_num) {
+ /*
+ * prev_data_seq is checked here since indexes and rotation
+ * commands are serialized with respect to each other.
+ */
+ DBG("Rotation after too much data has been written in tracefile "
+ "for stream %" PRIu64 ", need to truncate before "
+ "rotating", stream->stream_handle);
+ ret = rotate_truncate_stream(stream);
+ if (ret) {
+ ERR("Failed to truncate stream");
+ goto end;
+ }
+ } else if (stream->prev_data_seq != stream->rotate_at_seq_num) {
+ /*
+ * Unexpected, protocol error/bug.
+ * It could mean that we received a rotation position
+ * that is in the past.
+ */
+ ERR("Stream %" PRIu64 " data is in an inconsistent state (rotate_at_seq_num = %" PRIu64 ", prev_data_seq = %" PRIu64 ")",
+ stream->stream_handle,
+ stream->rotate_at_seq_num,
+ stream->prev_data_seq);
+ ret = -1;
+ goto end;
+ } else {
+ ret = do_rotate_stream_data(stream);
+ }
+
+end:
+ return ret;
+}
+
+/*
+ * relay_recv_metadata: receive the metadata for the session.
+ */
+static int relay_recv_metadata(const struct lttcomm_relayd_hdr *recv_hdr,
+ struct relay_connection *conn,
+ const struct lttng_buffer_view *payload)
+{
+ int ret = 0;
+ ssize_t size_ret;
+ struct relay_session *session = conn->session;
+ struct lttcomm_relayd_metadata_payload metadata_payload_header;
+ struct relay_stream *metadata_stream;
+ uint64_t metadata_payload_size;
+
+ if (!session) {
+ ERR("Metadata sent before version check");
+ ret = -1;
+ goto end;
+ }
+
+ if (recv_hdr->data_size < sizeof(struct lttcomm_relayd_metadata_payload)) {
+ ERR("Incorrect data size");
+ ret = -1;
+ goto end;
+ }
+ metadata_payload_size = recv_hdr->data_size -
+ sizeof(struct lttcomm_relayd_metadata_payload);
+
+ memcpy(&metadata_payload_header, payload->data,
+ sizeof(metadata_payload_header));
+ metadata_payload_header.stream_id = be64toh(
+ metadata_payload_header.stream_id);
+ metadata_payload_header.padding_size = be32toh(
+ metadata_payload_header.padding_size);
+
+ metadata_stream = stream_get_by_id(metadata_payload_header.stream_id);
+ if (!metadata_stream) {
+ ret = -1;
+ goto end;
+ }
+
+ pthread_mutex_lock(&metadata_stream->lock);
+
+ size_ret = lttng_write(metadata_stream->stream_fd->fd,
+ payload->data + sizeof(metadata_payload_header),
+ metadata_payload_size);
+ if (size_ret < metadata_payload_size) {
+ ERR("Relay error writing metadata on file");
+ ret = -1;
+ goto end_put;
+ }
+
+ size_ret = write_padding_to_file(metadata_stream->stream_fd->fd,
+ metadata_payload_header.padding_size);
+ if (size_ret < (int64_t) metadata_payload_header.padding_size) {
+ ret = -1;
+ goto end_put;
+ }
+
+ metadata_stream->metadata_received +=
+ metadata_payload_size + metadata_payload_header.padding_size;
+ DBG2("Relay metadata written. Updated metadata_received %" PRIu64,
+ metadata_stream->metadata_received);
+
+ ret = try_rotate_stream_data(metadata_stream);
+ if (ret < 0) {
+ goto end_put;
+ }
+
+end_put:
+ pthread_mutex_unlock(&metadata_stream->lock);
+ stream_put(metadata_stream);
+end:
+ return ret;
+}
+
+/*
+ * relay_send_version: send relayd version number
+ */
+static int relay_send_version(const struct lttcomm_relayd_hdr *recv_hdr,
+ struct relay_connection *conn,
+ const struct lttng_buffer_view *payload)
+{
+ int ret;
+ ssize_t send_ret;
+ struct lttcomm_relayd_version reply, msg;
+ bool compatible = true;
+
+ conn->version_check_done = true;
+
+ /* Get version from the other side. */
+ if (payload->size < sizeof(msg)) {
+ ERR("Unexpected payload size in \"relay_send_version\": expected >= %zu bytes, got %zu bytes",
+ sizeof(msg), payload->size);
+ ret = -1;
+ goto end;
+ }
+
+ memcpy(&msg, payload->data, sizeof(msg));
+ msg.major = be32toh(msg.major);
+ msg.minor = be32toh(msg.minor);
+
+ memset(&reply, 0, sizeof(reply));
+ reply.major = RELAYD_VERSION_COMM_MAJOR;
+ reply.minor = RELAYD_VERSION_COMM_MINOR;
+
+ /* Major versions must be the same */
+ if (reply.major != msg.major) {
+ DBG("Incompatible major versions (%u vs %u), deleting session",
+ reply.major, msg.major);
+ compatible = false;
+ }
+
+ conn->major = reply.major;
+ /* We adapt to the lowest compatible version */
+ if (reply.minor <= msg.minor) {
+ conn->minor = reply.minor;
+ } else {
+ conn->minor = msg.minor;
+ }
+
+ reply.major = htobe32(reply.major);
+ reply.minor = htobe32(reply.minor);
+ send_ret = conn->sock->ops->sendmsg(conn->sock, &reply,
+ sizeof(reply), 0);
+ if (send_ret < (ssize_t) sizeof(reply)) {
+ ERR("Failed to send \"send version\" command reply (ret = %zd)",
+ send_ret);
+ ret = -1;
+ goto end;
+ } else {
+ ret = 0;
+ }
+
+ if (!compatible) {
+ ret = -1;
+ goto end;
+ }
+
+ DBG("Version check done using protocol %u.%u", conn->major,
+ conn->minor);
+
+end:
+ return ret;
+}
+
+/*
+ * Check for data pending for a given stream id from the session daemon.
+ */
+static int relay_data_pending(const struct lttcomm_relayd_hdr *recv_hdr,
+ struct relay_connection *conn,
+ const struct lttng_buffer_view *payload)
+{
+ struct relay_session *session = conn->session;
+ struct lttcomm_relayd_data_pending msg;
+ struct lttcomm_relayd_generic_reply reply;
+ struct relay_stream *stream;
+ ssize_t send_ret;
+ int ret;
+ uint64_t stream_seq;
+
+ DBG("Data pending command received");
+
+ if (!session || !conn->version_check_done) {
+ ERR("Trying to check for data before version check");
+ ret = -1;
+ goto end_no_session;
+ }
+
+ if (payload->size < sizeof(msg)) {
+ ERR("Unexpected payload size in \"relay_data_pending\": expected >= %zu bytes, got %zu bytes",
+ sizeof(msg), payload->size);
+ ret = -1;
+ goto end_no_session;
+ }
+ memcpy(&msg, payload->data, sizeof(msg));
+ msg.stream_id = be64toh(msg.stream_id);
+ msg.last_net_seq_num = be64toh(msg.last_net_seq_num);
+
+ stream = stream_get_by_id(msg.stream_id);
+ if (stream == NULL) {
+ ret = -1;
+ goto end;
+ }
+
+ pthread_mutex_lock(&stream->lock);
+
+ if (session_streams_have_index(session)) {
+ /*
+ * Ensure that both the index and stream data have been
+ * flushed up to the requested point.
+ */
+ stream_seq = min(stream->prev_data_seq, stream->prev_index_seq);
+ } else {
+ stream_seq = stream->prev_data_seq;
+ }
+ DBG("Data pending for stream id %" PRIu64 ": prev_data_seq %" PRIu64
+ ", prev_index_seq %" PRIu64
+ ", and last_seq %" PRIu64, msg.stream_id,
+ stream->prev_data_seq, stream->prev_index_seq,
+ msg.last_net_seq_num);
+
+ /* Avoid wrapping issue */
+ if (((int64_t) (stream_seq - msg.last_net_seq_num)) >= 0) {
+ /* Data has in fact been written and is NOT pending */
+ ret = 0;
+ } else {
+ /* Data still being streamed thus pending */
+ ret = 1;
+ }
+
+ stream->data_pending_check_done = true;
+ pthread_mutex_unlock(&stream->lock);
+
+ stream_put(stream);
+end:
+
+ memset(&reply, 0, sizeof(reply));
+ reply.ret_code = htobe32(ret);
+ send_ret = conn->sock->ops->sendmsg(conn->sock, &reply, sizeof(reply), 0);
+ if (send_ret < (ssize_t) sizeof(reply)) {
+ ERR("Failed to send \"data pending\" command reply (ret = %zd)",
+ send_ret);
+ ret = -1;
+ }
+
+end_no_session:
+ return ret;
+}
+
+/*
+ * Wait for the control socket to reach a quiescent state.
+ *
+ * Note that for now, when receiving this command from the session
+ * daemon, this means that every subsequent commands or data received on
+ * the control socket has been handled. So, this is why we simply return
+ * OK here.
+ */
+static int relay_quiescent_control(const struct lttcomm_relayd_hdr *recv_hdr,
+ struct relay_connection *conn,
+ const struct lttng_buffer_view *payload)
+{
+ int ret;
+ ssize_t send_ret;
+ struct relay_stream *stream;
+ struct lttcomm_relayd_quiescent_control msg;
+ struct lttcomm_relayd_generic_reply reply;
+
+ DBG("Checking quiescent state on control socket");
+
+ if (!conn->session || !conn->version_check_done) {
+ ERR("Trying to check for data before version check");
+ ret = -1;
+ goto end_no_session;
+ }
+
+ if (payload->size < sizeof(msg)) {
+ ERR("Unexpected payload size in \"relay_quiescent_control\": expected >= %zu bytes, got %zu bytes",
+ sizeof(msg), payload->size);
+ ret = -1;
+ goto end_no_session;
+ }
+ memcpy(&msg, payload->data, sizeof(msg));
+ msg.stream_id = be64toh(msg.stream_id);
+
+ stream = stream_get_by_id(msg.stream_id);
+ if (!stream) {
+ goto reply;
+ }
+ pthread_mutex_lock(&stream->lock);
+ stream->data_pending_check_done = true;
+ pthread_mutex_unlock(&stream->lock);
+
+ DBG("Relay quiescent control pending flag set to %" PRIu64, msg.stream_id);
+ stream_put(stream);
+reply:
+ memset(&reply, 0, sizeof(reply));
+ reply.ret_code = htobe32(LTTNG_OK);
+ send_ret = conn->sock->ops->sendmsg(conn->sock, &reply, sizeof(reply), 0);
+ if (send_ret < (ssize_t) sizeof(reply)) {
+ ERR("Failed to send \"quiescent control\" command reply (ret = %zd)",
+ send_ret);
+ ret = -1;
+ } else {
+ ret = 0;
+ }
+
+end_no_session:
+ return ret;
+}
+
+/*
+ * Initialize a data pending command. This means that a consumer is about
+ * to ask for data pending for each stream it holds. Simply iterate over
+ * all streams of a session and set the data_pending_check_done flag.
+ *
+ * This command returns to the client a LTTNG_OK code.
+ */
+static int relay_begin_data_pending(const struct lttcomm_relayd_hdr *recv_hdr,
+ struct relay_connection *conn,
+ const struct lttng_buffer_view *payload)
+{
+ int ret;
+ ssize_t send_ret;
+ struct lttng_ht_iter iter;
+ struct lttcomm_relayd_begin_data_pending msg;
+ struct lttcomm_relayd_generic_reply reply;
+ struct relay_stream *stream;
+
+ assert(recv_hdr);
+ assert(conn);
+
+ DBG("Init streams for data pending");
+
+ if (!conn->session || !conn->version_check_done) {
+ ERR("Trying to check for data before version check");
+ ret = -1;
+ goto end_no_session;
+ }
+
+ if (payload->size < sizeof(msg)) {
+ ERR("Unexpected payload size in \"relay_begin_data_pending\": expected >= %zu bytes, got %zu bytes",
+ sizeof(msg), payload->size);
+ ret = -1;
+ goto end_no_session;
+ }
+ memcpy(&msg, payload->data, sizeof(msg));
+ msg.session_id = be64toh(msg.session_id);
+
+ /*
+ * Iterate over all streams to set the begin data pending flag.
+ * For now, the streams are indexed by stream handle so we have
+ * to iterate over all streams to find the one associated with
+ * the right session_id.
+ */
+ rcu_read_lock();
+ cds_lfht_for_each_entry(relay_streams_ht->ht, &iter.iter, stream,
+ node.node) {
+ if (!stream_get(stream)) {
+ continue;
+ }
+ if (stream->trace->session->id == msg.session_id) {
+ pthread_mutex_lock(&stream->lock);
+ stream->data_pending_check_done = false;
+ pthread_mutex_unlock(&stream->lock);
+ DBG("Set begin data pending flag to stream %" PRIu64,
+ stream->stream_handle);
+ }
+ stream_put(stream);
+ }
+ rcu_read_unlock();
+
+ memset(&reply, 0, sizeof(reply));
+ /* All good, send back reply. */
+ reply.ret_code = htobe32(LTTNG_OK);
+
+ send_ret = conn->sock->ops->sendmsg(conn->sock, &reply, sizeof(reply), 0);
+ if (send_ret < (ssize_t) sizeof(reply)) {
+ ERR("Failed to send \"begin data pending\" command reply (ret = %zd)",
+ send_ret);
+ ret = -1;
+ } else {
+ ret = 0;
+ }
+
+end_no_session:
+ return ret;
+}
+
+/*
+ * End data pending command. This will check, for a given session id, if
+ * each stream associated with it has its data_pending_check_done flag
+ * set. If not, this means that the client lost track of the stream but
+ * the data is still being streamed on our side. In this case, we inform
+ * the client that data is in flight.
+ *
+ * Return to the client if there is data in flight or not with a ret_code.
+ */
+static int relay_end_data_pending(const struct lttcomm_relayd_hdr *recv_hdr,
+ struct relay_connection *conn,
+ const struct lttng_buffer_view *payload)
+{
+ int ret;
+ ssize_t send_ret;
+ struct lttng_ht_iter iter;
+ struct lttcomm_relayd_end_data_pending msg;
+ struct lttcomm_relayd_generic_reply reply;
+ struct relay_stream *stream;
+ uint32_t is_data_inflight = 0;
+
+ DBG("End data pending command");
+
+ if (!conn->session || !conn->version_check_done) {
+ ERR("Trying to check for data before version check");
+ ret = -1;
+ goto end_no_session;
+ }
+
+ if (payload->size < sizeof(msg)) {
+ ERR("Unexpected payload size in \"relay_end_data_pending\": expected >= %zu bytes, got %zu bytes",
+ sizeof(msg), payload->size);
+ ret = -1;
+ goto end_no_session;
+ }
+ memcpy(&msg, payload->data, sizeof(msg));
+ msg.session_id = be64toh(msg.session_id);
+
+ /*
+ * Iterate over all streams to see if the begin data pending
+ * flag is set.
+ */
+ rcu_read_lock();
+ cds_lfht_for_each_entry(relay_streams_ht->ht, &iter.iter, stream,
+ node.node) {
+ if (!stream_get(stream)) {
+ continue;
+ }
+ if (stream->trace->session->id != msg.session_id) {
+ stream_put(stream);
+ continue;
+ }
+ pthread_mutex_lock(&stream->lock);
+ if (!stream->data_pending_check_done) {
+ uint64_t stream_seq;
+
+ if (session_streams_have_index(conn->session)) {
+ /*
+ * Ensure that both the index and stream data have been
+ * flushed up to the requested point.
+ */
+ stream_seq = min(stream->prev_data_seq, stream->prev_index_seq);
+ } else {
+ stream_seq = stream->prev_data_seq;
+ }
+ if (!stream->closed || !(((int64_t) (stream_seq - stream->last_net_seq_num)) >= 0)) {
+ is_data_inflight = 1;
+ DBG("Data is still in flight for stream %" PRIu64,
+ stream->stream_handle);
+ pthread_mutex_unlock(&stream->lock);
+ stream_put(stream);
+ break;
+ }
+ }
+ pthread_mutex_unlock(&stream->lock);
+ stream_put(stream);
+ }
+ rcu_read_unlock();
+
+ memset(&reply, 0, sizeof(reply));
+ /* All good, send back reply. */
+ reply.ret_code = htobe32(is_data_inflight);
+
+ send_ret = conn->sock->ops->sendmsg(conn->sock, &reply, sizeof(reply), 0);
+ if (send_ret < (ssize_t) sizeof(reply)) {
+ ERR("Failed to send \"end data pending\" command reply (ret = %zd)",
+ send_ret);
+ ret = -1;
+ } else {
+ ret = 0;
+ }
+
+end_no_session:
+ return ret;
+}
+
+/*
+ * Receive an index for a specific stream.
+ *
+ * Return 0 on success else a negative value.
+ */
+static int relay_recv_index(const struct lttcomm_relayd_hdr *recv_hdr,
+ struct relay_connection *conn,
+ const struct lttng_buffer_view *payload)
+{
+ int ret;
+ ssize_t send_ret;
+ struct relay_session *session = conn->session;
+ struct lttcomm_relayd_index index_info;
+ struct relay_index *index;
+ struct lttcomm_relayd_generic_reply reply;
+ struct relay_stream *stream;
+ size_t msg_len;
+
+ assert(conn);
+
+ DBG("Relay receiving index");
+
+ if (!session || !conn->version_check_done) {
+ ERR("Trying to close a stream before version check");
+ ret = -1;
+ goto end_no_session;
+ }
+
+ msg_len = lttcomm_relayd_index_len(
+ lttng_to_index_major(conn->major, conn->minor),
+ lttng_to_index_minor(conn->major, conn->minor));
+ if (payload->size < msg_len) {
+ ERR("Unexpected payload size in \"relay_recv_index\": expected >= %zu bytes, got %zu bytes",
+ msg_len, payload->size);
+ ret = -1;
+ goto end_no_session;
+ }
+ memcpy(&index_info, payload->data, msg_len);
+ index_info.relay_stream_id = be64toh(index_info.relay_stream_id);
+ index_info.net_seq_num = be64toh(index_info.net_seq_num);
+ index_info.packet_size = be64toh(index_info.packet_size);
+ index_info.content_size = be64toh(index_info.content_size);
+ index_info.timestamp_begin = be64toh(index_info.timestamp_begin);
+ index_info.timestamp_end = be64toh(index_info.timestamp_end);
+ index_info.events_discarded = be64toh(index_info.events_discarded);
+ index_info.stream_id = be64toh(index_info.stream_id);
+
+ if (conn->minor >= 8) {
+ index_info.stream_instance_id =
+ be64toh(index_info.stream_instance_id);
+ index_info.packet_seq_num = be64toh(index_info.packet_seq_num);
+ }
+
+ stream = stream_get_by_id(index_info.relay_stream_id);
+ if (!stream) {
+ ERR("stream_get_by_id not found");
+ ret = -1;
+ goto end;
+ }
+ pthread_mutex_lock(&stream->lock);
+
+ /* Live beacon handling */
+ if (index_info.packet_size == 0) {
+ DBG("Received live beacon for stream %" PRIu64,
+ stream->stream_handle);
+
+ /*
+ * Only flag a stream inactive when it has already
+ * received data and no indexes are in flight.
+ */
+ if (stream->index_received_seqcount > 0
+ && stream->indexes_in_flight == 0) {
+ stream->beacon_ts_end = index_info.timestamp_end;
+ }
+ ret = 0;
+ goto end_stream_put;
+ } else {
+ stream->beacon_ts_end = -1ULL;
+ }
+
+ if (stream->ctf_stream_id == -1ULL) {
+ stream->ctf_stream_id = index_info.stream_id;
+ }
+ index = relay_index_get_by_id_or_create(stream, index_info.net_seq_num);
+ if (!index) {
+ ret = -1;
+ ERR("relay_index_get_by_id_or_create index NULL");
+ goto end_stream_put;
+ }
+ if (set_index_control_data(index, &index_info, conn)) {
+ ERR("set_index_control_data error");
+ relay_index_put(index);
+ ret = -1;
+ goto end_stream_put;
+ }
+ ret = relay_index_try_flush(index);
+ if (ret == 0) {
+ tracefile_array_commit_seq(stream->tfa);
+ stream->index_received_seqcount++;
+ stream->pos_after_last_complete_data_index += index->total_size;
+ stream->prev_index_seq = index_info.net_seq_num;
+
+ ret = try_rotate_stream_index(stream);
+ if (ret < 0) {
+ goto end_stream_put;
+ }
+ } else if (ret > 0) {
+ /* no flush. */
+ ret = 0;
+ } else {
+ /*
+ * ret < 0
+ *
+ * relay_index_try_flush is responsible for the self-reference
+ * put of the index object on error.
+ */
+ ERR("relay_index_try_flush error %d", ret);
+ ret = -1;
+ }
+
+end_stream_put:
+ pthread_mutex_unlock(&stream->lock);
+ stream_put(stream);
+
+end:
+
+ memset(&reply, 0, sizeof(reply));
+ if (ret < 0) {
+ reply.ret_code = htobe32(LTTNG_ERR_UNK);
+ } else {
+ reply.ret_code = htobe32(LTTNG_OK);
+ }
+ send_ret = conn->sock->ops->sendmsg(conn->sock, &reply, sizeof(reply), 0);
+ if (send_ret < (ssize_t) sizeof(reply)) {
+ ERR("Failed to send \"recv index\" command reply (ret = %zd)", send_ret);
+ ret = -1;
+ }
+
+end_no_session:
+ return ret;
+}
+
+/*
+ * Receive the streams_sent message.
+ *
+ * Return 0 on success else a negative value.
+ */
+static int relay_streams_sent(const struct lttcomm_relayd_hdr *recv_hdr,
+ struct relay_connection *conn,
+ const struct lttng_buffer_view *payload)
+{
+ int ret;
+ ssize_t send_ret;
+ struct lttcomm_relayd_generic_reply reply;
+
+ assert(conn);
+
+ DBG("Relay receiving streams_sent");
+
+ if (!conn->session || !conn->version_check_done) {
+ ERR("Trying to close a stream before version check");
+ ret = -1;
+ goto end_no_session;
+ }
+
+ /*
+ * Publish every pending stream in the connection recv list which are
+ * now ready to be used by the viewer.
+ */
+ publish_connection_local_streams(conn);
+
+ memset(&reply, 0, sizeof(reply));
+ reply.ret_code = htobe32(LTTNG_OK);
+ send_ret = conn->sock->ops->sendmsg(conn->sock, &reply, sizeof(reply), 0);
+ if (send_ret < (ssize_t) sizeof(reply)) {
+ ERR("Failed to send \"streams sent\" command reply (ret = %zd)",
+ send_ret);
+ ret = -1;
+ } else {
+ /* Success. */
+ ret = 0;
+ }
+
+end_no_session:
+ return ret;
+}
+
+/*
+ * relay_rotate_session_stream: rotate a stream to a new tracefile for the session
+ * rotation feature (not the tracefile rotation feature).
+ */
+static int relay_rotate_session_stream(const struct lttcomm_relayd_hdr *recv_hdr,
+ struct relay_connection *conn,
+ const struct lttng_buffer_view *payload)
+{
+ int ret;
+ ssize_t send_ret;
+ struct relay_session *session = conn->session;
+ struct lttcomm_relayd_rotate_stream stream_info;
+ struct lttcomm_relayd_generic_reply reply;
+ struct relay_stream *stream;
+ size_t header_len;
+ size_t path_len;
+ struct lttng_buffer_view new_path_view;
+
+ if (!session || !conn->version_check_done) {
+ ERR("Trying to rotate a stream before version check");
+ ret = -1;
+ goto end_no_reply;
+ }
+
+ if (session->major == 2 && session->minor < 11) {
+ ERR("Unsupported feature before 2.11");
+ ret = -1;
+ goto end_no_reply;
+ }
+
+ header_len = sizeof(struct lttcomm_relayd_rotate_stream);
+
+ if (payload->size < header_len) {
+ ERR("Unexpected payload size in \"relay_rotate_session_stream\": expected >= %zu bytes, got %zu bytes",
+ header_len, payload->size);
+ ret = -1;
+ goto end_no_reply;
+ }
+
+ memcpy(&stream_info, payload->data, header_len);
+
+ /* Convert to host */
+ stream_info.pathname_length = be32toh(stream_info.pathname_length);
+ stream_info.stream_id = be64toh(stream_info.stream_id);
+ stream_info.new_chunk_id = be64toh(stream_info.new_chunk_id);
+ stream_info.rotate_at_seq_num = be64toh(stream_info.rotate_at_seq_num);
+
+ path_len = stream_info.pathname_length;
+ if (payload->size < header_len + path_len) {
+ ERR("Unexpected payload size in \"relay_rotate_session_stream\" including path: expected >= %zu bytes, got %zu bytes",
+ header_len + path_len, payload->size);
+ ret = -1;
+ goto end_no_reply;
+ }
+
+ /* Ensure it fits in local filename length. */
+ if (path_len >= LTTNG_PATH_MAX) {
+ ret = -ENAMETOOLONG;
+ ERR("Length of relay_rotate_session_stream command's path name (%zu bytes) exceeds the maximal allowed length of %i bytes",
+ path_len, LTTNG_PATH_MAX);
+ goto end;
+ }
+
+ new_path_view = lttng_buffer_view_from_view(payload, header_len,
+ stream_info.pathname_length);
+
+ stream = stream_get_by_id(stream_info.stream_id);
+ if (!stream) {
+ ret = -1;
+ goto end;
+ }
+
+ pthread_mutex_lock(&stream->lock);
+
+ /*
+ * Update the trace path (just the folder, the stream name does not
+ * change).
+ */
+ free(stream->prev_path_name);
+ stream->prev_path_name = stream->path_name;
+ stream->path_name = create_output_path(new_path_view.data);
+ if (!stream->path_name) {
+ ERR("Failed to create a new output path");
+ ret = -1;
+ goto end_stream_unlock;
+ }
+ ret = utils_mkdir_recursive(stream->path_name, S_IRWXU | S_IRWXG,
+ -1, -1);
+ if (ret < 0) {
+ ERR("relay creating output directory");
+ ret = -1;
+ goto end_stream_unlock;
+ }
+
+ assert(stream->current_chunk_id.is_set);
+ stream->current_chunk_id.value = stream_info.new_chunk_id;
+
+ if (stream->is_metadata) {
+ /*
+ * Metadata streams have no index; consider its rotation
+ * complete.
+ */
+ stream->index_rotated = true;
+ /*
+ * The metadata stream is sent only over the control connection
+ * so we know we have all the data to perform the stream
+ * rotation.
+ */
+ ret = do_rotate_stream_data(stream);
+ } else {
+ stream->rotate_at_seq_num = stream_info.rotate_at_seq_num;
+ ret = try_rotate_stream_data(stream);
+ if (ret < 0) {
+ goto end_stream_unlock;
+ }
+
+ ret = try_rotate_stream_index(stream);
+ if (ret < 0) {
+ goto end_stream_unlock;
+ }
+ }
+
+end_stream_unlock:
+ pthread_mutex_unlock(&stream->lock);
+ stream_put(stream);
+end:
+ memset(&reply, 0, sizeof(reply));
+ if (ret < 0) {
+ reply.ret_code = htobe32(LTTNG_ERR_UNK);
+ } else {
+ reply.ret_code = htobe32(LTTNG_OK);
+ }
+ send_ret = conn->sock->ops->sendmsg(conn->sock, &reply,
+ sizeof(struct lttcomm_relayd_generic_reply), 0);
+ if (send_ret < (ssize_t) sizeof(reply)) {
+ ERR("Failed to send \"rotate session stream\" command reply (ret = %zd)",
+ send_ret);
+ ret = -1;
+ }
+
+end_no_reply:
+ return ret;
+}
+
+static int init_session_output_directory_handle(struct relay_session *session,
+ struct lttng_directory_handle *handle)
+{
+ int ret;
+ /* hostname/session_name */
+ char *session_directory = NULL;
+ /*
+ * base path + session_directory
+ * e.g. /home/user/lttng-traces/hostname/session_name
+ */
+ char *full_session_path = NULL;
+
+ pthread_mutex_lock(&session->lock);
+ ret = asprintf(&session_directory, "%s/%s", session->hostname,
+ session->session_name);
+ pthread_mutex_unlock(&session->lock);
+ if (ret < 0) {
+ PERROR("Failed to format session directory name");
+ goto end;
+ }
+
+ full_session_path = create_output_path(session_directory);
+ if (!full_session_path) {
+ ret = -1;
+ goto end;
+ }
+
+ ret = utils_mkdir_recursive(
+ full_session_path, S_IRWXU | S_IRWXG, -1, -1);
+ if (ret) {
+ ERR("Failed to create session output path \"%s\"",
+ full_session_path);
+ goto end;
+ }
+
+ ret = lttng_directory_handle_init(handle, full_session_path);
+ if (ret) {
+ goto end;
+ }
+end:
+ free(session_directory);
+ free(full_session_path);
+ return ret;
+}
+
+/*
+ * relay_create_trace_chunk: create a new trace chunk
+ */
+static int relay_create_trace_chunk(const struct lttcomm_relayd_hdr *recv_hdr,
+ struct relay_connection *conn,
+ const struct lttng_buffer_view *payload)
+{
+ int ret = 0;
+ ssize_t send_ret;
+ struct relay_session *session = conn->session;
+ struct lttcomm_relayd_create_trace_chunk *msg;
+ struct lttcomm_relayd_generic_reply reply = {};
+ struct lttng_buffer_view header_view;
+ struct lttng_buffer_view chunk_name_view;
+ struct lttng_trace_chunk *chunk = NULL, *published_chunk = NULL;
+ enum lttng_error_code reply_code = LTTNG_OK;
+ enum lttng_trace_chunk_status chunk_status;
+ struct lttng_directory_handle session_output;
+
+ if (!session || !conn->version_check_done) {
+ ERR("Trying to create a trace chunk before version check");
+ ret = -1;
+ goto end_no_reply;
+ }
+
+ if (session->major == 2 && session->minor < 11) {
+ ERR("Chunk creation command is unsupported before 2.11");
+ ret = -1;
+ goto end_no_reply;
+ }
+
+ header_view = lttng_buffer_view_from_view(payload, 0, sizeof(*msg));
+ if (!header_view.data) {
+ ERR("Failed to receive payload of chunk creation command");
+ ret = -1;
+ goto end_no_reply;
+ }
+
+ /* Convert to host endianness. */
+ msg = (typeof(msg)) header_view.data;
+ msg->chunk_id = be64toh(msg->chunk_id);
+ msg->creation_timestamp = be64toh(msg->creation_timestamp);
+ msg->override_name_length = be32toh(msg->override_name_length);
+
+ chunk = lttng_trace_chunk_create(
+ msg->chunk_id, msg->creation_timestamp);
+ if (!chunk) {
+ ERR("Failed to create trace chunk in trace chunk creation command");
+ ret = -1;
+ reply_code = LTTNG_ERR_NOMEM;
+ goto end;
+ }
+
+ if (msg->override_name_length) {
+ const char *name;
+
+ chunk_name_view = lttng_buffer_view_from_view(payload,
+ sizeof(*msg),
+ msg->override_name_length);
+ name = chunk_name_view.data;
+ if (!name || name[msg->override_name_length - 1]) {
+ ERR("Failed to receive payload of chunk creation command");
+ ret = -1;
+ reply_code = LTTNG_ERR_INVALID;
+ goto end;
+ }
+
+ chunk_status = lttng_trace_chunk_override_name(
+ chunk, chunk_name_view.data);
+ switch (chunk_status) {
+ case LTTNG_TRACE_CHUNK_STATUS_OK:
+ break;
+ case LTTNG_TRACE_CHUNK_STATUS_INVALID_ARGUMENT:
+ ERR("Failed to set the name of new trace chunk in trace chunk creation command (invalid name)");
+ reply_code = LTTNG_ERR_INVALID;
+ ret = -1;
+ goto end;
+ default:
+ ERR("Failed to set the name of new trace chunk in trace chunk creation command (unknown error)");
+ reply_code = LTTNG_ERR_UNK;
+ ret = -1;
+ goto end;
+ }
+ }
+
+ ret = init_session_output_directory_handle(
+ conn->session, &session_output);
+ if (ret) {
+ reply_code = LTTNG_ERR_CREATE_DIR_FAIL;
+ goto end;
+ }
+
+ chunk_status = lttng_trace_chunk_set_credentials_current_user(chunk);
+ if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) {
+ reply_code = LTTNG_ERR_UNK;
+ ret = -1;
+ goto end;
+ }
+
+ chunk_status = lttng_trace_chunk_set_as_owner(chunk, &session_output);
+ if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) {
+ reply_code = LTTNG_ERR_UNK;
+ ret = -1;
+ goto end;
+ }
+
+ published_chunk = sessiond_trace_chunk_registry_publish_chunk(
+ sessiond_trace_chunk_registry,
+ conn->session->sessiond_uuid,
+ conn->session->id,
+ chunk);
+ if (!published_chunk) {
+ char uuid_str[UUID_STR_LEN];
+
+ lttng_uuid_to_str(conn->session->sessiond_uuid, uuid_str);
+ ERR("Failed to publish chunk: sessiond_uuid = %s, session_id = %" PRIu64 ", chunk_id = %" PRIu64,
+ uuid_str,
+ conn->session->id,
+ msg->chunk_id);
+ ret = -1;
+ reply_code = LTTNG_ERR_NOMEM;
+ goto end;
+ }
+
+ pthread_mutex_lock(&conn->session->lock);
+ lttng_trace_chunk_put(conn->session->current_trace_chunk);
+ conn->session->current_trace_chunk = published_chunk;
+ pthread_mutex_unlock(&conn->session->lock);
+ published_chunk = NULL;
+
+end:
+ reply.ret_code = htobe32((uint32_t) reply_code);
+ send_ret = conn->sock->ops->sendmsg(conn->sock,
+ &reply,
+ sizeof(struct lttcomm_relayd_generic_reply),
+ 0);
+ if (send_ret < (ssize_t) sizeof(reply)) {
+ ERR("Failed to send \"create trace chunk\" command reply (ret = %zd)",
+ send_ret);
+ ret = -1;
+ }
+end_no_reply:
+ lttng_trace_chunk_put(chunk);
+ lttng_trace_chunk_put(published_chunk);
+ lttng_directory_handle_fini(&session_output);
+ return ret;
+}
+
+/*
+ * relay_close_trace_chunk: close a trace chunk
+ */
+static int relay_close_trace_chunk(const struct lttcomm_relayd_hdr *recv_hdr,
+ struct relay_connection *conn,
+ const struct lttng_buffer_view *payload)
+{
+ int ret = 0;
+ ssize_t send_ret;
+ struct relay_session *session = conn->session;
+ struct lttcomm_relayd_close_trace_chunk *msg;
+ struct lttcomm_relayd_generic_reply reply = {};
+ struct lttng_buffer_view header_view;
+ struct lttng_trace_chunk *chunk = NULL;
+ enum lttng_error_code reply_code = LTTNG_OK;
+ enum lttng_trace_chunk_status chunk_status;
+ uint64_t chunk_id;
+ LTTNG_OPTIONAL(enum lttng_trace_chunk_command_type) close_command;
+ time_t close_timestamp;
+
+ if (!session || !conn->version_check_done) {
+ ERR("Trying to close a trace chunk before version check");
+ ret = -1;
+ goto end_no_reply;
+ }
+
+ if (session->major == 2 && session->minor < 11) {
+ ERR("Chunk close command is unsupported before 2.11");
+ ret = -1;
+ goto end_no_reply;
+ }
+
+ header_view = lttng_buffer_view_from_view(payload, 0, sizeof(*msg));
+ if (!header_view.data) {
+ ERR("Failed to receive payload of chunk close command");
+ ret = -1;
+ goto end_no_reply;
+ }
+
+ /* Convert to host endianness. */
+ msg = (typeof(msg)) header_view.data;
+ chunk_id = be64toh(msg->chunk_id);
+ close_timestamp = (time_t) be64toh(msg->close_timestamp);
+ close_command = (typeof(close_command)){
+ .value = be32toh(msg->close_command.value),
+ .is_set = msg->close_command.is_set,
+ };
+
+ chunk = sessiond_trace_chunk_registry_get_chunk(
+ sessiond_trace_chunk_registry,
+ conn->session->sessiond_uuid,
+ conn->session->id,
+ chunk_id);
+ if (!chunk) {
+ char uuid_str[UUID_STR_LEN];
+
+ lttng_uuid_to_str(conn->session->sessiond_uuid, uuid_str);
+ ERR("Failed to find chunk to close: sessiond_uuid = %s, session_id = %" PRIu64 ", chunk_id = %" PRIu64,
+ uuid_str,
+ conn->session->id,
+ msg->chunk_id);
+ ret = -1;
+ reply_code = LTTNG_ERR_NOMEM;
+ goto end;
+ }
+
+ chunk_status = lttng_trace_chunk_set_close_timestamp(
+ chunk, close_timestamp);
+ if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) {
+ ERR("Failed to set trace chunk close timestamp");
+ ret = -1;
+ reply_code = LTTNG_ERR_UNK;
+ goto end;
+ }
+
+ if (close_command.is_set) {
+ chunk_status = lttng_trace_chunk_set_close_command(
+ chunk, close_command.value);
+ if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) {
+ ret = -1;
+ reply_code = LTTNG_ERR_INVALID;
+ goto end;
+ }
+ }
+
+end:
+ reply.ret_code = htobe32((uint32_t) reply_code);
+ send_ret = conn->sock->ops->sendmsg(conn->sock,
+ &reply,
+ sizeof(struct lttcomm_relayd_generic_reply),
+ 0);
+ if (send_ret < (ssize_t) sizeof(reply)) {
+ ERR("Failed to send \"create trace chunk\" command reply (ret = %zd)",
+ send_ret);
+ ret = -1;
+ }
+end_no_reply:
+ lttng_trace_chunk_put(chunk);
+ return ret;
+}
+
+#define DBG_CMD(cmd_name, conn) \
+ DBG3("Processing \"%s\" command for socket %i", cmd_name, conn->sock->fd);
+
+static int relay_process_control_command(struct relay_connection *conn,
+ const struct lttcomm_relayd_hdr *header,
+ const struct lttng_buffer_view *payload)
+{
+ int ret = 0;
+
+ switch (header->cmd) {
+ case RELAYD_CREATE_SESSION:
+ DBG_CMD("RELAYD_CREATE_SESSION", conn);
+ ret = relay_create_session(header, conn, payload);
+ break;
+ case RELAYD_ADD_STREAM:
+ DBG_CMD("RELAYD_ADD_STREAM", conn);
+ ret = relay_add_stream(header, conn, payload);
+ break;
+ case RELAYD_START_DATA:
+ DBG_CMD("RELAYD_START_DATA", conn);
+ ret = relay_start(header, conn, payload);
+ break;
+ case RELAYD_SEND_METADATA:
+ DBG_CMD("RELAYD_SEND_METADATA", conn);
+ ret = relay_recv_metadata(header, conn, payload);
+ break;
+ case RELAYD_VERSION:
+ DBG_CMD("RELAYD_VERSION", conn);
+ ret = relay_send_version(header, conn, payload);
+ break;
+ case RELAYD_CLOSE_STREAM:
+ DBG_CMD("RELAYD_CLOSE_STREAM", conn);
+ ret = relay_close_stream(header, conn, payload);
+ break;
+ case RELAYD_DATA_PENDING:
+ DBG_CMD("RELAYD_DATA_PENDING", conn);
+ ret = relay_data_pending(header, conn, payload);
+ break;
+ case RELAYD_QUIESCENT_CONTROL:
+ DBG_CMD("RELAYD_QUIESCENT_CONTROL", conn);
+ ret = relay_quiescent_control(header, conn, payload);
+ break;
+ case RELAYD_BEGIN_DATA_PENDING:
+ DBG_CMD("RELAYD_BEGIN_DATA_PENDING", conn);
+ ret = relay_begin_data_pending(header, conn, payload);
+ break;
+ case RELAYD_END_DATA_PENDING:
+ DBG_CMD("RELAYD_END_DATA_PENDING", conn);
+ ret = relay_end_data_pending(header, conn, payload);
+ break;
+ case RELAYD_SEND_INDEX:
+ DBG_CMD("RELAYD_SEND_INDEX", conn);
+ ret = relay_recv_index(header, conn, payload);
+ break;
+ case RELAYD_STREAMS_SENT:
+ DBG_CMD("RELAYD_STREAMS_SENT", conn);
+ ret = relay_streams_sent(header, conn, payload);
+ break;
+ case RELAYD_RESET_METADATA:
+ DBG_CMD("RELAYD_RESET_METADATA", conn);
+ ret = relay_reset_metadata(header, conn, payload);
+ break;
+ case RELAYD_ROTATE_STREAM:
+ DBG_CMD("RELAYD_ROTATE_STREAM", conn);
+ ret = relay_rotate_session_stream(header, conn, payload);
+ break;
+ case RELAYD_CREATE_TRACE_CHUNK:
+ DBG_CMD("RELAYD_CREATE_TRACE_CHUNK", conn);
+ ret = relay_create_trace_chunk(header, conn, payload);
+ break;
+ case RELAYD_CLOSE_TRACE_CHUNK:
+ DBG_CMD("RELAYD_CLOSE_TRACE_CHUNK", conn);
+ ret = relay_close_trace_chunk(header, conn, payload);
+ break;
+ case RELAYD_UPDATE_SYNC_INFO:
+ default:
+ ERR("Received unknown command (%u)", header->cmd);
+ relay_unknown_command(conn);
+ ret = -1;
+ goto end;
+ }
+
+end:
+ return ret;
+}
+
+static enum relay_connection_status relay_process_control_receive_payload(
+ struct relay_connection *conn)
+{
+ int ret = 0;
+ enum relay_connection_status status = RELAY_CONNECTION_STATUS_OK;
+ struct lttng_dynamic_buffer *reception_buffer =
+ &conn->protocol.ctrl.reception_buffer;
+ struct ctrl_connection_state_receive_payload *state =
+ &conn->protocol.ctrl.state.receive_payload;
+ struct lttng_buffer_view payload_view;
+
+ if (state->left_to_receive == 0) {
+ /* Short-circuit for payload-less commands. */
+ goto reception_complete;
+ }
+
+ ret = conn->sock->ops->recvmsg(conn->sock,
+ reception_buffer->data + state->received,
+ state->left_to_receive, MSG_DONTWAIT);
+ if (ret < 0) {
+ if (errno != EAGAIN && errno != EWOULDBLOCK) {
+ PERROR("Unable to receive command payload on sock %d",
+ conn->sock->fd);
+ status = RELAY_CONNECTION_STATUS_ERROR;
+ }
+ goto end;
+ } else if (ret == 0) {
+ DBG("Socket %d performed an orderly shutdown (received EOF)", conn->sock->fd);
+ status = RELAY_CONNECTION_STATUS_CLOSED;
+ goto end;
+ }
+
+ assert(ret > 0);
+ assert(ret <= state->left_to_receive);
+
+ state->left_to_receive -= ret;
+ state->received += ret;
+
+ if (state->left_to_receive > 0) {
+ /*
+ * Can't transition to the protocol's next state, wait to
+ * receive the rest of the header.
+ */
+ DBG3("Partial reception of control connection protocol payload (received %" PRIu64 " bytes, %" PRIu64 " bytes left to receive, fd = %i)",
+ state->received, state->left_to_receive,
+ conn->sock->fd);
+ goto end;
+ }
+
+reception_complete:
+ DBG("Done receiving control command payload: fd = %i, payload size = %" PRIu64 " bytes",
+ conn->sock->fd, state->received);
+ /*
+ * The payload required to process the command has been received.
+ * A view to the reception buffer is forwarded to the various
+ * commands and the state of the control is reset on success.
+ *
+ * Commands are responsible for sending their reply to the peer.
+ */
+ payload_view = lttng_buffer_view_from_dynamic_buffer(reception_buffer,
+ 0, -1);
+ ret = relay_process_control_command(conn,
+ &state->header, &payload_view);
+ if (ret < 0) {
+ status = RELAY_CONNECTION_STATUS_ERROR;
+ goto end;
+ }
+
+ ret = connection_reset_protocol_state(conn);
+ if (ret) {
+ status = RELAY_CONNECTION_STATUS_ERROR;
+ }
+end:
+ return status;
+}
+
+static enum relay_connection_status relay_process_control_receive_header(
+ struct relay_connection *conn)
+{
+ int ret = 0;
+ enum relay_connection_status status = RELAY_CONNECTION_STATUS_OK;
+ struct lttcomm_relayd_hdr header;
+ struct lttng_dynamic_buffer *reception_buffer =
+ &conn->protocol.ctrl.reception_buffer;
+ struct ctrl_connection_state_receive_header *state =
+ &conn->protocol.ctrl.state.receive_header;
+
+ assert(state->left_to_receive != 0);
+
+ ret = conn->sock->ops->recvmsg(conn->sock,
+ reception_buffer->data + state->received,
+ state->left_to_receive, MSG_DONTWAIT);
+ if (ret < 0) {
+ if (errno != EAGAIN && errno != EWOULDBLOCK) {
+ PERROR("Unable to receive control command header on sock %d",
+ conn->sock->fd);
+ status = RELAY_CONNECTION_STATUS_ERROR;
+ }
+ goto end;
+ } else if (ret == 0) {
+ DBG("Socket %d performed an orderly shutdown (received EOF)", conn->sock->fd);
+ status = RELAY_CONNECTION_STATUS_CLOSED;
+ goto end;
+ }
+
+ assert(ret > 0);
+ assert(ret <= state->left_to_receive);
+
+ state->left_to_receive -= ret;
+ state->received += ret;
+
+ if (state->left_to_receive > 0) {
+ /*
+ * Can't transition to the protocol's next state, wait to
+ * receive the rest of the header.
+ */
+ DBG3("Partial reception of control connection protocol header (received %" PRIu64 " bytes, %" PRIu64 " bytes left to receive, fd = %i)",
+ state->received, state->left_to_receive,
+ conn->sock->fd);
+ goto end;
+ }
+
+ /* Transition to next state: receiving the command's payload. */
+ conn->protocol.ctrl.state_id =
+ CTRL_CONNECTION_STATE_RECEIVE_PAYLOAD;
+ memcpy(&header, reception_buffer->data, sizeof(header));
+ header.circuit_id = be64toh(header.circuit_id);
+ header.data_size = be64toh(header.data_size);
+ header.cmd = be32toh(header.cmd);
+ header.cmd_version = be32toh(header.cmd_version);
+ memcpy(&conn->protocol.ctrl.state.receive_payload.header,
+ &header, sizeof(header));
+
+ DBG("Done receiving control command header: fd = %i, cmd = %" PRIu32 ", cmd_version = %" PRIu32 ", payload size = %" PRIu64 " bytes",
+ conn->sock->fd, header.cmd, header.cmd_version,
+ header.data_size);
+
+ if (header.data_size > DEFAULT_NETWORK_RELAYD_CTRL_MAX_PAYLOAD_SIZE) {
+ ERR("Command header indicates a payload (%" PRIu64 " bytes) that exceeds the maximal payload size allowed on a control connection.",
+ header.data_size);
+ status = RELAY_CONNECTION_STATUS_ERROR;
+ goto end;
+ }
+
+ conn->protocol.ctrl.state.receive_payload.left_to_receive =
+ header.data_size;
+ conn->protocol.ctrl.state.receive_payload.received = 0;
+ ret = lttng_dynamic_buffer_set_size(reception_buffer,
+ header.data_size);
+ if (ret) {
+ status = RELAY_CONNECTION_STATUS_ERROR;
+ goto end;
+ }
+
+ if (header.data_size == 0) {
+ /*
+ * Manually invoke the next state as the poll loop
+ * will not wake-up to allow us to proceed further.
+ */
+ status = relay_process_control_receive_payload(conn);
+ }
+end:
+ return status;
+}
+
+/*
+ * Process the commands received on the control socket
+ */
+static enum relay_connection_status relay_process_control(
+ struct relay_connection *conn)
+{
+ enum relay_connection_status status;
+
+ switch (conn->protocol.ctrl.state_id) {
+ case CTRL_CONNECTION_STATE_RECEIVE_HEADER:
+ status = relay_process_control_receive_header(conn);
+ break;
+ case CTRL_CONNECTION_STATE_RECEIVE_PAYLOAD:
+ status = relay_process_control_receive_payload(conn);
+ break;
+ default:
+ ERR("Unknown control connection protocol state encountered.");
+ abort();
+ }
+
+ return status;
+}
+
+/*
+ * Handle index for a data stream.
+ *
+ * Called with the stream lock held.
+ *
+ * Return 0 on success else a negative value.
+ */
+static int handle_index_data(struct relay_stream *stream, uint64_t net_seq_num,
+ bool rotate_index, bool *flushed, uint64_t total_size)
+{
+ int ret = 0;
+ uint64_t data_offset;
+ struct relay_index *index;
+
+ /* Get data offset because we are about to update the index. */
+ data_offset = htobe64(stream->tracefile_size_current);
+
+ DBG("handle_index_data: stream %" PRIu64 " net_seq_num %" PRIu64 " data offset %" PRIu64,
+ stream->stream_handle, net_seq_num, stream->tracefile_size_current);
+
+ /*
+ * Lookup for an existing index for that stream id/sequence
+ * number. If it exists, the control thread has already received the
+ * data for it, thus we need to write it to disk.
+ */
+ index = relay_index_get_by_id_or_create(stream, net_seq_num);
+ if (!index) {
+ ret = -1;
+ goto end;
+ }
+
+ if (rotate_index || !stream->index_file) {
+ const char *stream_path;
+
+ /*
+ * The data connection creates the stream's first index file.
+ *
+ * This can happen _after_ a ROTATE_STREAM command. In
+ * other words, the data of the first packet of this stream
+ * can be received after a ROTATE_STREAM command.
+ *
+ * The ROTATE_STREAM command changes the stream's path_name
+ * to point to the "next" chunk. If a rotation is pending for
+ * this stream, as indicated by "rotate_at_seq_num != -1ULL",
+ * it means that we are still receiving data that belongs in the
+ * stream's former path.
+ *
+ * In this very specific case, we must ensure that the index
+ * file is created in the streams's former path,
+ * "prev_path_name".
+ *
+ * All other rotations beyond the first one are not affected
+ * by this problem since the actual rotation operation creates
+ * the new chunk's index file.
+ */
+ stream_path = stream->rotate_at_seq_num == -1ULL ?
+ stream->path_name:
+ stream->prev_path_name;
+
+ ret = create_rotate_index_file(stream, stream_path);
+ if (ret < 0) {
+ ERR("Failed to rotate index");
+ /* Put self-ref for this index due to error. */
+ relay_index_put(index);
+ index = NULL;
+ goto end;
+ }
+ }
+
+ if (relay_index_set_file(index, stream->index_file, data_offset)) {
+ ret = -1;
+ /* Put self-ref for this index due to error. */
+ relay_index_put(index);
+ index = NULL;
+ goto end;
+ }
+
+ ret = relay_index_try_flush(index);
+ if (ret == 0) {
+ tracefile_array_commit_seq(stream->tfa);
+ stream->index_received_seqcount++;
+ *flushed = true;
+ } else if (ret > 0) {
+ index->total_size = total_size;
+ /* No flush. */
+ ret = 0;
+ } else {
+ /*
+ * ret < 0
+ *
+ * relay_index_try_flush is responsible for the self-reference
+ * put of the index object on error.
+ */
+ ERR("relay_index_try_flush error %d", ret);
+ ret = -1;
+ }
+end:
+ return ret;
+}
+
+static enum relay_connection_status relay_process_data_receive_header(
+ struct relay_connection *conn)
+{
+ int ret;
+ enum relay_connection_status status = RELAY_CONNECTION_STATUS_OK;
+ struct data_connection_state_receive_header *state =
+ &conn->protocol.data.state.receive_header;
+ struct lttcomm_relayd_data_hdr header;
+ struct relay_stream *stream;
+
+ assert(state->left_to_receive != 0);
+
+ ret = conn->sock->ops->recvmsg(conn->sock,
+ state->header_reception_buffer + state->received,
+ state->left_to_receive, MSG_DONTWAIT);
+ if (ret < 0) {
+ if (errno != EAGAIN && errno != EWOULDBLOCK) {
+ PERROR("Unable to receive data header on sock %d", conn->sock->fd);
+ status = RELAY_CONNECTION_STATUS_ERROR;
+ }
+ goto end;
+ } else if (ret == 0) {
+ /* Orderly shutdown. Not necessary to print an error. */
+ DBG("Socket %d performed an orderly shutdown (received EOF)", conn->sock->fd);
+ status = RELAY_CONNECTION_STATUS_CLOSED;
+ goto end;