#include "viewer-session.hpp"
#include <common/common.hpp>
+#include <common/urcu.hpp>
#include <urcu/rculist.h>
LTTNG_ASSERT(sock >= 0);
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
lttng_ht_lookup(relay_connections_ht, (void *) ((unsigned long) sock), &iter);
node = lttng_ht_iter_get_node_ulong(&iter);
if (!node) {
conn = nullptr;
}
end:
- rcu_read_unlock();
return conn;
}
void connection_put(struct relay_connection *conn)
{
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
urcu_ref_put(&conn->ref, connection_release);
- rcu_read_unlock();
}
void connection_ht_add(struct lttng_ht *relay_connections_ht, struct relay_connection *conn)
#include "stream.hpp"
#include <common/common.hpp>
+#include <common/urcu.hpp>
#include <common/utils.hpp>
#include <urcu/rculist.h>
struct lttng_ht_iter iter;
struct ctf_trace *trace = nullptr;
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
lttng_ht_lookup(session->ctf_traces_ht, subpath, &iter);
node = lttng_ht_iter_get_node_str(&iter);
if (!node) {
trace = nullptr;
}
end:
- rcu_read_unlock();
if (!trace) {
/* Try to create */
trace = ctf_trace_create(session, subpath);
void ctf_trace_put(struct ctf_trace *trace)
{
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
urcu_ref_put(&trace->ref, ctf_trace_release);
- rcu_read_unlock();
}
int ctf_trace_close(struct ctf_trace *trace)
{
struct relay_stream *stream;
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
cds_list_for_each_entry_rcu(stream, &trace->stream_list, stream_node)
{
/*
*/
try_stream_close(stream);
}
- rcu_read_unlock();
/*
* Since all references to the trace are held by its streams, we
* don't need to do any self-ref put.
{
struct relay_viewer_stream *vstream;
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
vstream = rcu_dereference(trace->viewer_metadata_stream);
if (!vstream) {
goto end;
vstream = nullptr;
}
end:
- rcu_read_unlock();
return vstream;
}
#include <common/common.hpp>
#include <common/compat/endian.hpp>
+#include <common/urcu.hpp>
#include <common/utils.hpp>
/*
stream->stream_handle,
net_seq_num);
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
lttng_ht_lookup(stream->indexes_ht, &net_seq_num, &iter);
node = lttng_ht_iter_get_node_u64(&iter);
if (node) {
}
}
end:
- rcu_read_unlock();
DBG2("Index %sfound or created in HT for stream ID %" PRIu64 " and seqnum %" PRIu64,
(index == NULL) ? "NOT " : "",
stream->stream_handle,
/*
* Ensure existence of index->lock for index unlock.
*/
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
/*
* Index lock ensures that concurrent test and update of stream
* ref is atomic.
*/
LTTNG_ASSERT(index->ref.refcount != 0);
urcu_ref_put(&index->ref, index_release);
- rcu_read_unlock();
}
/*
struct lttng_ht_iter iter;
struct relay_index *index;
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
+
cds_lfht_for_each_entry (stream->indexes_ht->ht, &iter.iter, index, index_n.node) {
/* Put self-ref from index. */
relay_index_put(index);
}
- rcu_read_unlock();
}
void relay_index_close_partial_fd(struct relay_stream *stream)
struct lttng_ht_iter iter;
struct relay_index *index;
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
+
cds_lfht_for_each_entry (stream->indexes_ht->ht, &iter.iter, index, index_n.node) {
if (!index->index_file) {
continue;
*/
relay_index_put(index);
}
- rcu_read_unlock();
}
uint64_t relay_index_find_last(struct relay_stream *stream)
struct relay_index *index;
uint64_t net_seq_num = -1ULL;
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
+
cds_lfht_for_each_entry (stream->indexes_ht->ht, &iter.iter, index, index_n.node) {
if (net_seq_num == -1ULL || index->index_n.key > net_seq_num) {
net_seq_num = index->index_n.key;
}
}
- rcu_read_unlock();
+
return net_seq_num;
}
struct relay_index *index;
int ret = 0;
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
+
cds_lfht_for_each_entry (stream->indexes_ht->ht, &iter.iter, index, index_n.node) {
ret = relay_index_switch_file(
index, stream->index_file, stream->pos_after_last_complete_data_index);
if (ret) {
- goto end;
+ return ret;
}
}
-end:
- rcu_read_unlock();
+
return ret;
}
#include <common/sessiond-comm/inet.hpp>
#include <common/sessiond-comm/relayd.hpp>
#include <common/sessiond-comm/sessiond-comm.hpp>
+#include <common/urcu.hpp>
#include <common/uri.hpp>
#include <common/utils.hpp>
if (!conn->viewer_session) {
goto end;
}
- rcu_read_lock();
- cds_list_for_each_entry_rcu(
- session, &conn->viewer_session->session_list, viewer_session_node)
+
{
- if (!session_get(session)) {
- continue;
- }
- current_val = uatomic_cmpxchg(&session->new_streams, 1, 0);
- ret = current_val;
- session_put(session);
- if (ret == 1) {
- goto end;
+ lttng::urcu::read_lock_guard read_lock;
+ cds_list_for_each_entry_rcu(
+ session, &conn->viewer_session->session_list, viewer_session_node)
+ {
+ if (!session_get(session)) {
+ continue;
+ }
+ current_val = uatomic_cmpxchg(&session->new_streams, 1, 0);
+ ret = current_val;
+ session_put(session);
+ if (ret == 1) {
+ goto end;
+ }
}
}
end:
- rcu_read_unlock();
return ret;
}
struct lttng_ht_iter iter;
struct relay_viewer_stream *vstream;
- rcu_read_lock();
+ {
+ lttng::urcu::read_lock_guard read_lock;
- cds_lfht_for_each_entry (viewer_streams_ht->ht, &iter.iter, vstream, stream_n.node) {
- struct ctf_trace *ctf_trace;
- struct lttng_viewer_stream send_stream = {};
+ cds_lfht_for_each_entry (
+ viewer_streams_ht->ht, &iter.iter, vstream, stream_n.node) {
+ struct ctf_trace *ctf_trace;
+ struct lttng_viewer_stream send_stream = {};
- health_code_update();
+ health_code_update();
- if (!viewer_stream_get(vstream)) {
- continue;
- }
+ if (!viewer_stream_get(vstream)) {
+ continue;
+ }
- pthread_mutex_lock(&vstream->stream->lock);
- /* Ignore if not the same session. */
- if (vstream->stream->trace->session->id != session_id ||
- (!ignore_sent_flag && vstream->sent_flag)) {
- pthread_mutex_unlock(&vstream->stream->lock);
- viewer_stream_put(vstream);
- continue;
- }
+ pthread_mutex_lock(&vstream->stream->lock);
+ /* Ignore if not the same session. */
+ if (vstream->stream->trace->session->id != session_id ||
+ (!ignore_sent_flag && vstream->sent_flag)) {
+ pthread_mutex_unlock(&vstream->stream->lock);
+ viewer_stream_put(vstream);
+ continue;
+ }
- ctf_trace = vstream->stream->trace;
- send_stream.id = htobe64(vstream->stream->stream_handle);
- send_stream.ctf_trace_id = htobe64(ctf_trace->id);
- send_stream.metadata_flag = htobe32(vstream->stream->is_metadata);
- if (lttng_strncpy(send_stream.path_name,
- vstream->path_name,
- sizeof(send_stream.path_name))) {
- pthread_mutex_unlock(&vstream->stream->lock);
- viewer_stream_put(vstream);
- ret = -1; /* Error. */
- goto end_unlock;
- }
- if (lttng_strncpy(send_stream.channel_name,
- vstream->channel_name,
- sizeof(send_stream.channel_name))) {
- pthread_mutex_unlock(&vstream->stream->lock);
- viewer_stream_put(vstream);
- ret = -1; /* Error. */
- goto end_unlock;
- }
+ ctf_trace = vstream->stream->trace;
+ send_stream.id = htobe64(vstream->stream->stream_handle);
+ send_stream.ctf_trace_id = htobe64(ctf_trace->id);
+ send_stream.metadata_flag = htobe32(vstream->stream->is_metadata);
+ if (lttng_strncpy(send_stream.path_name,
+ vstream->path_name,
+ sizeof(send_stream.path_name))) {
+ pthread_mutex_unlock(&vstream->stream->lock);
+ viewer_stream_put(vstream);
+ ret = -1; /* Error. */
+ goto end;
+ }
+ if (lttng_strncpy(send_stream.channel_name,
+ vstream->channel_name,
+ sizeof(send_stream.channel_name))) {
+ pthread_mutex_unlock(&vstream->stream->lock);
+ viewer_stream_put(vstream);
+ ret = -1; /* Error. */
+ goto end;
+ }
- DBG("Sending stream %" PRIu64 " to viewer", vstream->stream->stream_handle);
- vstream->sent_flag = true;
- pthread_mutex_unlock(&vstream->stream->lock);
+ DBG("Sending stream %" PRIu64 " to viewer", vstream->stream->stream_handle);
+ vstream->sent_flag = true;
+ pthread_mutex_unlock(&vstream->stream->lock);
- ret = send_response(sock, &send_stream, sizeof(send_stream));
- viewer_stream_put(vstream);
- if (ret < 0) {
- goto end_unlock;
+ ret = send_response(sock, &send_stream, sizeof(send_stream));
+ viewer_stream_put(vstream);
+ if (ret < 0) {
+ goto end;
+ }
}
}
ret = 0;
-end_unlock:
- rcu_read_unlock();
+end:
return ret;
}
* Create viewer streams for relay streams that are ready to be
* used for a the given session id only.
*/
- rcu_read_lock();
- cds_lfht_for_each_entry (
- relay_session->ctf_traces_ht->ht, &iter.iter, ctf_trace, node.node) {
- bool trace_has_metadata_stream = false;
-
- health_code_update();
-
- if (!ctf_trace_get(ctf_trace)) {
- continue;
- }
+ {
+ lttng::urcu::read_lock_guard read_lock;
- /*
- * Iterate over all the streams of the trace to see if we have a
- * metadata stream.
- */
- cds_list_for_each_entry_rcu(relay_stream, &ctf_trace->stream_list, stream_node)
- {
- bool is_metadata_stream;
+ cds_lfht_for_each_entry (
+ relay_session->ctf_traces_ht->ht, &iter.iter, ctf_trace, node.node) {
+ bool trace_has_metadata_stream = false;
- pthread_mutex_lock(&relay_stream->lock);
- is_metadata_stream = relay_stream->is_metadata;
- pthread_mutex_unlock(&relay_stream->lock);
+ health_code_update();
- if (is_metadata_stream) {
- trace_has_metadata_stream = true;
- break;
+ if (!ctf_trace_get(ctf_trace)) {
+ continue;
}
- }
- relay_stream = nullptr;
-
- /*
- * If there is no metadata stream in this trace at the moment
- * and we never sent one to the viewer, skip the trace. We
- * accept that the viewer will not see this trace at all.
- */
- if (!trace_has_metadata_stream && !ctf_trace->metadata_stream_sent_to_viewer) {
- ctf_trace_put(ctf_trace);
- continue;
- }
-
- cds_list_for_each_entry_rcu(relay_stream, &ctf_trace->stream_list, stream_node)
- {
- struct relay_viewer_stream *viewer_stream;
-
- if (!stream_get(relay_stream)) {
- continue;
+ /*
+ * Iterate over all the streams of the trace to see if we have a
+ * metadata stream.
+ */
+ cds_list_for_each_entry_rcu(
+ relay_stream, &ctf_trace->stream_list, stream_node)
+ {
+ bool is_metadata_stream;
+
+ pthread_mutex_lock(&relay_stream->lock);
+ is_metadata_stream = relay_stream->is_metadata;
+ pthread_mutex_unlock(&relay_stream->lock);
+
+ if (is_metadata_stream) {
+ trace_has_metadata_stream = true;
+ break;
+ }
}
- pthread_mutex_lock(&relay_stream->lock);
+ relay_stream = nullptr;
+
/*
- * stream published is protected by the session lock.
+ * If there is no metadata stream in this trace at the moment
+ * and we never sent one to the viewer, skip the trace. We
+ * accept that the viewer will not see this trace at all.
*/
- if (!relay_stream->published) {
- goto next;
+ if (!trace_has_metadata_stream &&
+ !ctf_trace->metadata_stream_sent_to_viewer) {
+ ctf_trace_put(ctf_trace);
+ continue;
}
- viewer_stream = viewer_stream_get_by_id(relay_stream->stream_handle);
- if (!viewer_stream) {
- struct lttng_trace_chunk *viewer_stream_trace_chunk = nullptr;
- /*
- * Save that we sent the metadata stream to the
- * viewer. So that we know what trace the viewer
- * is aware of.
- */
- if (relay_stream->is_metadata) {
- ctf_trace->metadata_stream_sent_to_viewer = true;
+ cds_list_for_each_entry_rcu(
+ relay_stream, &ctf_trace->stream_list, stream_node)
+ {
+ struct relay_viewer_stream *viewer_stream;
+
+ if (!stream_get(relay_stream)) {
+ continue;
}
+ pthread_mutex_lock(&relay_stream->lock);
/*
- * If a rotation is ongoing, use a copy of the
- * relay stream's chunk to ensure the stream
- * files exist.
- *
- * Otherwise, the viewer session's current trace
- * chunk can be used safely.
+ * stream published is protected by the session lock.
*/
- if ((relay_stream->ongoing_rotation.is_set ||
- session_has_ongoing_rotation(relay_session)) &&
- relay_stream->trace_chunk) {
- viewer_stream_trace_chunk =
- lttng_trace_chunk_copy(relay_stream->trace_chunk);
- if (!viewer_stream_trace_chunk) {
- ret = -1;
- ctf_trace_put(ctf_trace);
- goto error_unlock;
+ if (!relay_stream->published) {
+ goto next;
+ }
+ viewer_stream =
+ viewer_stream_get_by_id(relay_stream->stream_handle);
+ if (!viewer_stream) {
+ struct lttng_trace_chunk *viewer_stream_trace_chunk =
+ nullptr;
+
+ /*
+ * Save that we sent the metadata stream to the
+ * viewer. So that we know what trace the viewer
+ * is aware of.
+ */
+ if (relay_stream->is_metadata) {
+ ctf_trace->metadata_stream_sent_to_viewer = true;
}
- } else {
+
/*
- * Transition the viewer session into the newest trace chunk
- * available.
+ * If a rotation is ongoing, use a copy of the
+ * relay stream's chunk to ensure the stream
+ * files exist.
+ *
+ * Otherwise, the viewer session's current trace
+ * chunk can be used safely.
*/
- if (!lttng_trace_chunk_ids_equal(
- viewer_session->current_trace_chunk,
- relay_stream->trace_chunk)) {
- ret = viewer_session_set_trace_chunk_copy(
- viewer_session, relay_stream->trace_chunk);
- if (ret) {
+ if ((relay_stream->ongoing_rotation.is_set ||
+ session_has_ongoing_rotation(relay_session)) &&
+ relay_stream->trace_chunk) {
+ viewer_stream_trace_chunk = lttng_trace_chunk_copy(
+ relay_stream->trace_chunk);
+ if (!viewer_stream_trace_chunk) {
ret = -1;
ctf_trace_put(ctf_trace);
goto error_unlock;
}
- }
-
- if (relay_stream->trace_chunk) {
+ } else {
/*
- * If the corresponding relay
- * stream's trace chunk is set,
- * the viewer stream will be
- * created under it.
- *
- * Note that a relay stream can
- * have a NULL output trace
- * chunk (for instance, after a
- * clear against a stopped
- * session).
+ * Transition the viewer session into the newest
+ * trace chunk available.
*/
- const bool reference_acquired =
- lttng_trace_chunk_get(
- viewer_session->current_trace_chunk);
+ if (!lttng_trace_chunk_ids_equal(
+ viewer_session->current_trace_chunk,
+ relay_stream->trace_chunk)) {
+ ret = viewer_session_set_trace_chunk_copy(
+ viewer_session,
+ relay_stream->trace_chunk);
+ if (ret) {
+ ret = -1;
+ ctf_trace_put(ctf_trace);
+ goto error_unlock;
+ }
+ }
- LTTNG_ASSERT(reference_acquired);
- viewer_stream_trace_chunk =
- viewer_session->current_trace_chunk;
+ if (relay_stream->trace_chunk) {
+ /*
+ * If the corresponding relay
+ * stream's trace chunk is set,
+ * the viewer stream will be
+ * created under it.
+ *
+ * Note that a relay stream can
+ * have a NULL output trace
+ * chunk (for instance, after a
+ * clear against a stopped
+ * session).
+ */
+ const bool reference_acquired =
+ lttng_trace_chunk_get(
+ viewer_session
+ ->current_trace_chunk);
+
+ LTTNG_ASSERT(reference_acquired);
+ viewer_stream_trace_chunk =
+ viewer_session->current_trace_chunk;
+ }
}
- }
- viewer_stream = viewer_stream_create(
- relay_stream, viewer_stream_trace_chunk, seek_t);
- lttng_trace_chunk_put(viewer_stream_trace_chunk);
- viewer_stream_trace_chunk = nullptr;
- if (!viewer_stream) {
- ret = -1;
- ctf_trace_put(ctf_trace);
- goto error_unlock;
- }
+ viewer_stream = viewer_stream_create(
+ relay_stream, viewer_stream_trace_chunk, seek_t);
+ lttng_trace_chunk_put(viewer_stream_trace_chunk);
+ viewer_stream_trace_chunk = nullptr;
+ if (!viewer_stream) {
+ ret = -1;
+ ctf_trace_put(ctf_trace);
+ goto error_unlock;
+ }
- if (nb_created) {
- /* Update number of created stream counter. */
- (*nb_created)++;
- }
- /*
- * Ensure a self-reference is preserved even
- * after we have put our local reference.
- */
- if (!viewer_stream_get(viewer_stream)) {
- ERR("Unable to get self-reference on viewer stream, logic error.");
- abort();
- }
- } else {
- if (!viewer_stream->sent_flag && nb_unsent) {
- /* Update number of unsent stream counter. */
- (*nb_unsent)++;
- }
- }
- /* Update number of total stream counter. */
- if (nb_total) {
- if (relay_stream->is_metadata) {
- if (!relay_stream->closed ||
- relay_stream->metadata_received >
- viewer_stream->metadata_sent) {
- (*nb_total)++;
+ if (nb_created) {
+ /* Update number of created stream counter. */
+ (*nb_created)++;
+ }
+ /*
+ * Ensure a self-reference is preserved even
+ * after we have put our local reference.
+ */
+ if (!viewer_stream_get(viewer_stream)) {
+ ERR("Unable to get self-reference on viewer stream, logic error.");
+ abort();
}
} else {
- if (!relay_stream->closed ||
- !(((int64_t) (relay_stream->prev_data_seq -
- relay_stream->last_net_seq_num)) >= 0)) {
- (*nb_total)++;
+ if (!viewer_stream->sent_flag && nb_unsent) {
+ /* Update number of unsent stream counter. */
+ (*nb_unsent)++;
}
}
+ /* Update number of total stream counter. */
+ if (nb_total) {
+ if (relay_stream->is_metadata) {
+ if (!relay_stream->closed ||
+ relay_stream->metadata_received >
+ viewer_stream->metadata_sent) {
+ (*nb_total)++;
+ }
+ } else {
+ if (!relay_stream->closed ||
+ !(((int64_t) (relay_stream->prev_data_seq -
+ relay_stream->last_net_seq_num)) >=
+ 0)) {
+ (*nb_total)++;
+ }
+ }
+ }
+ /* Put local reference. */
+ viewer_stream_put(viewer_stream);
+ next:
+ pthread_mutex_unlock(&relay_stream->lock);
+ stream_put(relay_stream);
}
- /* Put local reference. */
- viewer_stream_put(viewer_stream);
- next:
- pthread_mutex_unlock(&relay_stream->lock);
- stream_put(relay_stream);
+ relay_stream = nullptr;
+ ctf_trace_put(ctf_trace);
}
- relay_stream = nullptr;
- ctf_trace_put(ctf_trace);
}
ret = 0;
error_unlock:
- rcu_read_unlock();
if (relay_stream) {
pthread_mutex_unlock(&relay_stream->lock);
return -1;
}
- rcu_read_lock();
- cds_lfht_for_each_entry (sessions_ht->ht, &iter.iter, session, session_n.node) {
- struct lttng_viewer_session *send_session;
+ {
+ lttng::urcu::read_lock_guard read_lock;
- health_code_update();
+ cds_lfht_for_each_entry (sessions_ht->ht, &iter.iter, session, session_n.node) {
+ struct lttng_viewer_session *send_session;
- pthread_mutex_lock(&session->lock);
- if (session->connection_closed) {
- /* Skip closed session */
- goto next_session;
- }
+ health_code_update();
+
+ pthread_mutex_lock(&session->lock);
+ if (session->connection_closed) {
+ /* Skip closed session */
+ goto next_session;
+ }
- if (count >= buf_count) {
- struct lttng_viewer_session *newbuf;
- uint32_t new_buf_count = buf_count << 1;
+ if (count >= buf_count) {
+ struct lttng_viewer_session *newbuf;
+ uint32_t new_buf_count = buf_count << 1;
- newbuf = (lttng_viewer_session *) realloc(
- send_session_buf, new_buf_count * sizeof(*send_session_buf));
- if (!newbuf) {
+ newbuf = (lttng_viewer_session *) realloc(
+ send_session_buf,
+ new_buf_count * sizeof(*send_session_buf));
+ if (!newbuf) {
+ ret = -1;
+ goto break_loop;
+ }
+ send_session_buf = newbuf;
+ buf_count = new_buf_count;
+ }
+ send_session = &send_session_buf[count];
+ if (lttng_strncpy(send_session->session_name,
+ session->session_name,
+ sizeof(send_session->session_name))) {
ret = -1;
goto break_loop;
}
- send_session_buf = newbuf;
- buf_count = new_buf_count;
- }
- send_session = &send_session_buf[count];
- if (lttng_strncpy(send_session->session_name,
- session->session_name,
- sizeof(send_session->session_name))) {
- ret = -1;
- goto break_loop;
- }
- if (lttng_strncpy(send_session->hostname,
- session->hostname,
- sizeof(send_session->hostname))) {
- ret = -1;
- goto break_loop;
- }
- send_session->id = htobe64(session->id);
- send_session->live_timer = htobe32(session->live_timer);
- if (session->viewer_attached) {
- send_session->clients = htobe32(1);
- } else {
- send_session->clients = htobe32(0);
+ if (lttng_strncpy(send_session->hostname,
+ session->hostname,
+ sizeof(send_session->hostname))) {
+ ret = -1;
+ goto break_loop;
+ }
+ send_session->id = htobe64(session->id);
+ send_session->live_timer = htobe32(session->live_timer);
+ if (session->viewer_attached) {
+ send_session->clients = htobe32(1);
+ } else {
+ send_session->clients = htobe32(0);
+ }
+ send_session->streams = htobe32(session->stream_count);
+ count++;
+ next_session:
+ pthread_mutex_unlock(&session->lock);
+ continue;
+ break_loop:
+ pthread_mutex_unlock(&session->lock);
+ break;
}
- send_session->streams = htobe32(session->stream_count);
- count++;
- next_session:
- pthread_mutex_unlock(&session->lock);
- continue;
- break_loop:
- pthread_mutex_unlock(&session->lock);
- break;
}
- rcu_read_unlock();
+
if (ret < 0) {
goto end_free;
}
(void) fd_tracker_util_poll_clean(the_fd_tracker, &events);
/* Cleanup remaining connection object. */
- rcu_read_lock();
- cds_lfht_for_each_entry (viewer_connections_ht->ht, &iter.iter, destroy_conn, sock_n.node) {
- health_code_update();
- connection_put(destroy_conn);
+ {
+ lttng::urcu::read_lock_guard read_lock;
+
+ cds_lfht_for_each_entry (
+ viewer_connections_ht->ht, &iter.iter, destroy_conn, sock_n.node) {
+ health_code_update();
+ connection_put(destroy_conn);
+ }
}
- rcu_read_unlock();
error_poll_create:
lttng_ht_destroy(viewer_connections_ht);
viewer_connections_ht_error:
#include <common/sessiond-comm/relayd.hpp>
#include <common/sessiond-comm/sessiond-comm.hpp>
#include <common/string-utils/format.hpp>
+#include <common/urcu.hpp>
#include <common/uri.hpp>
#include <common/utils.hpp>
* session lock.
*/
pthread_mutex_lock(&session->lock);
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
cds_list_for_each_entry_rcu(stream, &session->recv_list, recv_node)
{
stream_publish(stream);
}
- rcu_read_unlock();
/*
* Inform the viewer that there are new streams in the session.
* to iterate over all streams to find the one associated with
* the right session_id.
*/
- rcu_read_lock();
- cds_lfht_for_each_entry (relay_streams_ht->ht, &iter.iter, stream, node.node) {
- if (!stream_get(stream)) {
- continue;
- }
- if (stream->trace->session->id == msg.session_id) {
- pthread_mutex_lock(&stream->lock);
- stream->data_pending_check_done = false;
- pthread_mutex_unlock(&stream->lock);
- DBG("Set begin data pending flag to stream %" PRIu64,
- stream->stream_handle);
+ {
+ lttng::urcu::read_lock_guard read_lock;
+
+ cds_lfht_for_each_entry (relay_streams_ht->ht, &iter.iter, stream, node.node) {
+ if (!stream_get(stream)) {
+ continue;
+ }
+
+ if (stream->trace->session->id == msg.session_id) {
+ pthread_mutex_lock(&stream->lock);
+ stream->data_pending_check_done = false;
+ pthread_mutex_unlock(&stream->lock);
+ DBG("Set begin data pending flag to stream %" PRIu64,
+ stream->stream_handle);
+ }
+
+ stream_put(stream);
}
- stream_put(stream);
}
- rcu_read_unlock();
memset(&reply, 0, sizeof(reply));
/* All good, send back reply. */
* Iterate over all streams to see if the begin data pending
* flag is set.
*/
- rcu_read_lock();
- cds_lfht_for_each_entry (relay_streams_ht->ht, &iter.iter, stream, node.node) {
- if (!stream_get(stream)) {
- continue;
- }
- if (stream->trace->session->id != msg.session_id) {
- stream_put(stream);
- continue;
- }
- pthread_mutex_lock(&stream->lock);
- if (!stream->data_pending_check_done) {
- uint64_t stream_seq;
+ {
+ lttng::urcu::read_lock_guard read_lock;
- if (session_streams_have_index(conn->session)) {
- /*
- * Ensure that both the index and stream data have been
- * flushed up to the requested point.
- */
- stream_seq =
- std::min(stream->prev_data_seq, stream->prev_index_seq);
- } else {
- stream_seq = stream->prev_data_seq;
+ cds_lfht_for_each_entry (relay_streams_ht->ht, &iter.iter, stream, node.node) {
+ if (!stream_get(stream)) {
+ continue;
}
- if (!stream->closed ||
- !(((int64_t) (stream_seq - stream->last_net_seq_num)) >= 0)) {
- is_data_inflight = 1;
- DBG("Data is still in flight for stream %" PRIu64,
- stream->stream_handle);
- pthread_mutex_unlock(&stream->lock);
+
+ if (stream->trace->session->id != msg.session_id) {
stream_put(stream);
- break;
+ continue;
+ }
+
+ pthread_mutex_lock(&stream->lock);
+ if (!stream->data_pending_check_done) {
+ uint64_t stream_seq;
+
+ if (session_streams_have_index(conn->session)) {
+ /*
+ * Ensure that both the index and stream data have been
+ * flushed up to the requested point.
+ */
+ stream_seq = std::min(stream->prev_data_seq,
+ stream->prev_index_seq);
+ } else {
+ stream_seq = stream->prev_data_seq;
+ }
+
+ if (!stream->closed ||
+ !(((int64_t) (stream_seq - stream->last_net_seq_num)) >= 0)) {
+ is_data_inflight = 1;
+ DBG("Data is still in flight for stream %" PRIu64,
+ stream->stream_handle);
+ pthread_mutex_unlock(&stream->lock);
+ stream_put(stream);
+ break;
+ }
}
+
+ pthread_mutex_unlock(&stream->lock);
+ stream_put(stream);
}
- pthread_mutex_unlock(&stream->lock);
- stream_put(stream);
}
- rcu_read_unlock();
memset(&reply, 0, sizeof(reply));
/* All good, send back reply. */
exit:
error:
/* Cleanup remaining connection object. */
- rcu_read_lock();
- cds_lfht_for_each_entry (relay_connections_ht->ht, &iter.iter, destroy_conn, sock_n.node) {
- health_code_update();
+ {
+ lttng::urcu::read_lock_guard read_lock;
- session_abort(destroy_conn->session);
+ cds_lfht_for_each_entry (
+ relay_connections_ht->ht, &iter.iter, destroy_conn, sock_n.node) {
+ health_code_update();
- /*
- * No need to grab another ref, because we own
- * destroy_conn.
- */
- relay_thread_close_connection(&events, destroy_conn->sock->fd, destroy_conn);
+ session_abort(destroy_conn->session);
+
+ /*
+ * No need to grab another ref, because we own
+ * destroy_conn.
+ */
+ relay_thread_close_connection(
+ &events, destroy_conn->sock->fd, destroy_conn);
+ }
}
- rcu_read_unlock();
(void) fd_tracker_util_poll_clean(the_fd_tracker, &events);
error_poll_create:
#include <common/defaults.hpp>
#include <common/fd-tracker/utils.hpp>
#include <common/time.hpp>
+#include <common/urcu.hpp>
#include <common/utils.hpp>
#include <common/uuid.hpp>
struct lttng_ht_node_u64 *node;
struct lttng_ht_iter iter;
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
lttng_ht_lookup(sessions_ht, &id, &iter);
node = lttng_ht_iter_get_node_u64(&iter);
if (!node) {
session = nullptr;
}
end:
- rcu_read_unlock();
return session;
}
goto end;
}
- rcu_read_lock();
/*
* Sample the 'ongoing_rotation' status of all relay sessions that
* originate from the same session daemon session.
*/
- cds_lfht_for_each_entry (sessions_ht->ht, &iter.iter, iterated_session, session_n.node) {
- if (!session_get(iterated_session)) {
- continue;
- }
-
- if (session == iterated_session) {
- /* Skip this session. */
- goto next_session_no_unlock;
- }
-
- pthread_mutex_lock(&iterated_session->lock);
-
- if (!iterated_session->id_sessiond.is_set) {
- /*
- * Session belongs to a peer that doesn't support
- * rotations.
- */
- goto next_session;
- }
-
- if (session->sessiond_uuid != iterated_session->sessiond_uuid) {
- /* Sessions do not originate from the same sessiond. */
- goto next_session;
- }
-
- if (LTTNG_OPTIONAL_GET(session->id_sessiond) !=
- LTTNG_OPTIONAL_GET(iterated_session->id_sessiond)) {
- /*
- * Sessions do not originate from the same sessiond
- * session.
- */
- goto next_session;
- }
-
- ongoing_rotation = iterated_session->ongoing_rotation;
-
- next_session:
- pthread_mutex_unlock(&iterated_session->lock);
- next_session_no_unlock:
- session_put(iterated_session);
-
- if (ongoing_rotation) {
- break;
+ {
+ lttng::urcu::read_lock_guard read_lock;
+
+ cds_lfht_for_each_entry (
+ sessions_ht->ht, &iter.iter, iterated_session, session_n.node) {
+ if (!session_get(iterated_session)) {
+ continue;
+ }
+
+ if (session == iterated_session) {
+ /* Skip this session. */
+ goto next_session_no_unlock;
+ }
+
+ pthread_mutex_lock(&iterated_session->lock);
+
+ if (!iterated_session->id_sessiond.is_set) {
+ /*
+ * Session belongs to a peer that doesn't support
+ * rotations.
+ */
+ goto next_session;
+ }
+
+ if (session->sessiond_uuid != iterated_session->sessiond_uuid) {
+ /* Sessions do not originate from the same sessiond. */
+ goto next_session;
+ }
+
+ if (LTTNG_OPTIONAL_GET(session->id_sessiond) !=
+ LTTNG_OPTIONAL_GET(iterated_session->id_sessiond)) {
+ /*
+ * Sessions do not originate from the same sessiond
+ * session.
+ */
+ goto next_session;
+ }
+
+ ongoing_rotation = iterated_session->ongoing_rotation;
+
+ next_session:
+ pthread_mutex_unlock(&iterated_session->lock);
+ next_session_no_unlock:
+ session_put(iterated_session);
+
+ if (ongoing_rotation) {
+ break;
+ }
}
}
- rcu_read_unlock();
end:
return ongoing_rotation;
if (!session) {
return;
}
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
urcu_ref_put(&session->ref, session_release);
- rcu_read_unlock();
}
int session_close(struct relay_session *session)
session->connection_closed = true;
pthread_mutex_unlock(&session->lock);
- rcu_read_lock();
- cds_lfht_for_each_entry (session->ctf_traces_ht->ht, &iter.iter, trace, node.node) {
- ret = ctf_trace_close(trace);
- if (ret) {
- goto rcu_unlock;
- }
- }
- cds_list_for_each_entry_rcu(stream, &session->recv_list, recv_node)
{
- /* Close streams which have not been published yet. */
- try_stream_close(stream);
+ lttng::urcu::read_lock_guard read_lock;
+
+ cds_lfht_for_each_entry (session->ctf_traces_ht->ht, &iter.iter, trace, node.node) {
+ ret = ctf_trace_close(trace);
+ if (ret) {
+ goto end;
+ }
+ }
+
+ cds_list_for_each_entry_rcu(stream, &session->recv_list, recv_node)
+ {
+ /* Close streams which have not been published yet. */
+ try_stream_close(stream);
+ }
}
-rcu_unlock:
- rcu_read_unlock();
+
+end:
if (ret) {
return ret;
}
+
/* Put self-reference from create. */
session_put(session);
return ret;
return;
}
- rcu_read_lock();
- cds_lfht_for_each_entry (sessions_ht->ht, &iter.iter, session, session_n.node) {
- if (!session_get(session)) {
- continue;
+ {
+ lttng::urcu::read_lock_guard read_lock;
+
+ cds_lfht_for_each_entry (sessions_ht->ht, &iter.iter, session, session_n.node) {
+ if (!session_get(session)) {
+ continue;
+ }
+ DBG("session %p refcount %ld session %" PRIu64,
+ session,
+ session->ref.refcount,
+ session->id);
+ session_put(session);
}
- DBG("session %p refcount %ld session %" PRIu64,
- session,
- session->ref.refcount,
- session->id);
- session_put(session);
}
- rcu_read_unlock();
}
#include <common/macros.hpp>
#include <common/string-utils/format.hpp>
#include <common/trace-chunk-registry.hpp>
+#include <common/urcu.hpp>
#include <inttypes.h>
#include <stdio.h>
DBG("Destroying trace chunk registry associated to sessiond {%s}", uuid_str);
if (element->sessiond_trace_chunk_registry) {
/* Unpublish. */
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
cds_lfht_del(element->sessiond_trace_chunk_registry->ht, &element->ht_node);
- rcu_read_unlock();
element->sessiond_trace_chunk_registry = nullptr;
}
struct cds_lfht_node *node;
struct cds_lfht_iter iter;
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
cds_lfht_lookup(sessiond_registry->ht,
trace_chunk_registry_ht_key_hash(key),
trace_chunk_registry_ht_key_match,
element = nullptr;
}
}
- rcu_read_unlock();
return element;
}
trace_chunk_registry = nullptr;
/* Attempt to publish the new element. */
- rcu_read_lock();
- while (true) {
- struct cds_lfht_node *published_node;
- struct trace_chunk_registry_ht_element *published_element;
-
- published_node =
- cds_lfht_add_unique(sessiond_registry->ht,
- trace_chunk_registry_ht_key_hash(&new_element->key),
- trace_chunk_registry_ht_key_match,
- &new_element->key,
- &new_element->ht_node);
- if (published_node == &new_element->ht_node) {
- /* New element published successfully. */
- DBG("Created trace chunk registry for sessiond {%s}", uuid_str);
- new_element->sessiond_trace_chunk_registry = sessiond_registry;
- break;
- }
-
+ {
/*
- * An equivalent element was published during the creation of
- * this element. Attempt to acquire a reference to the one that
- * was already published and release the reference to the copy
- * we created if successful.
+ * Keep the rcu read lock is held accross all attempts
+ * purely for efficiency reasons.
*/
- published_element = lttng::utils::container_of(
- published_node, &trace_chunk_registry_ht_element::ht_node);
- if (trace_chunk_registry_ht_element_get(published_element)) {
- DBG("Acquired reference to trace chunk registry of sessiond {%s}",
- uuid_str);
- trace_chunk_registry_ht_element_put(new_element);
- new_element = nullptr;
- break;
+ lttng::urcu::read_lock_guard read_lock;
+ while (true) {
+ struct cds_lfht_node *published_node;
+ struct trace_chunk_registry_ht_element *published_element;
+
+ published_node = cds_lfht_add_unique(
+ sessiond_registry->ht,
+ trace_chunk_registry_ht_key_hash(&new_element->key),
+ trace_chunk_registry_ht_key_match,
+ &new_element->key,
+ &new_element->ht_node);
+ if (published_node == &new_element->ht_node) {
+ /* New element published successfully. */
+ DBG("Created trace chunk registry for sessiond {%s}", uuid_str);
+ new_element->sessiond_trace_chunk_registry = sessiond_registry;
+ break;
+ }
+
+ /*
+ * An equivalent element was published during the creation of
+ * this element. Attempt to acquire a reference to the one that
+ * was already published and release the reference to the copy
+ * we created if successful.
+ */
+ published_element = lttng::utils::container_of(
+ published_node, &trace_chunk_registry_ht_element::ht_node);
+ if (trace_chunk_registry_ht_element_get(published_element)) {
+ DBG("Acquired reference to trace chunk registry of sessiond {%s}",
+ uuid_str);
+ trace_chunk_registry_ht_element_put(new_element);
+ new_element = nullptr;
+ break;
+ }
+ /*
+ * A reference to the previously published element could not
+ * be acquired. Hence, retry to publish our copy of the
+ * element.
+ */
}
- /*
- * A reference to the previously published element could not
- * be acquired. Hence, retry to publish our copy of the
- * element.
- */
}
- rcu_read_unlock();
end:
if (ret < 0) {
ERR("Failed to create trace chunk registry for session daemon {%s}", uuid_str);
#include <common/defaults.hpp>
#include <common/fs-handle.hpp>
#include <common/sessiond-comm/relayd.hpp>
+#include <common/urcu.hpp>
#include <common/utils.hpp>
#include <algorithm>
struct lttng_ht_iter iter;
struct relay_stream *stream = nullptr;
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
lttng_ht_lookup(relay_streams_ht, &stream_id, &iter);
node = lttng_ht_iter_get_node_u64(&iter);
if (!node) {
stream = nullptr;
}
end:
- rcu_read_unlock();
return stream;
}
void stream_put(struct relay_stream *stream)
{
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
LTTNG_ASSERT(stream->ref.refcount != 0);
/*
* Wait until we have processed all the stream packets before
* actually putting our last stream reference.
*/
urcu_ref_put(&stream->ref, stream_release);
- rcu_read_unlock();
}
int stream_set_pending_rotation(struct relay_stream *stream,
struct lttng_ht_iter iter;
struct relay_index *index;
- rcu_read_lock();
- cds_lfht_for_each_entry (stream->indexes_ht->ht, &iter.iter, index, index_n.node) {
- DBG("index %p net_seq_num %" PRIu64 " refcount %ld"
- " stream %" PRIu64 " trace %" PRIu64 " session %" PRIu64,
- index,
- index->index_n.key,
- stream->ref.refcount,
- index->stream->stream_handle,
- index->stream->trace->id,
- index->stream->trace->session->id);
- }
- rcu_read_unlock();
+ {
+ lttng::urcu::read_lock_guard read_lock;
+
+ cds_lfht_for_each_entry (stream->indexes_ht->ht, &iter.iter, index, index_n.node) {
+ DBG("index %p net_seq_num %" PRIu64 " refcount %ld"
+ " stream %" PRIu64 " trace %" PRIu64 " session %" PRIu64,
+ index,
+ index->index_n.key,
+ stream->ref.refcount,
+ index->stream->stream_handle,
+ index->stream->trace->id,
+ index->stream->trace->session->id);
+ }
+ }
}
int stream_reset_file(struct relay_stream *stream)
return;
}
- rcu_read_lock();
- cds_lfht_for_each_entry (relay_streams_ht->ht, &iter.iter, stream, node.node) {
- if (!stream_get(stream)) {
- continue;
+ {
+ lttng::urcu::read_lock_guard read_lock;
+
+ cds_lfht_for_each_entry (relay_streams_ht->ht, &iter.iter, stream, node.node) {
+ if (!stream_get(stream)) {
+ continue;
+ }
+
+ DBG("stream %p refcount %ld stream %" PRIu64 " trace %" PRIu64
+ " session %" PRIu64,
+ stream,
+ stream->ref.refcount,
+ stream->stream_handle,
+ stream->trace->id,
+ stream->trace->session->id);
+ print_stream_indexes(stream);
+ stream_put(stream);
}
- DBG("stream %p refcount %ld stream %" PRIu64 " trace %" PRIu64 " session %" PRIu64,
- stream,
- stream->ref.refcount,
- stream->stream_handle,
- stream->trace->id,
- stream->trace->session->id);
- print_stream_indexes(stream);
- stream_put(stream);
}
- rcu_read_unlock();
}
#include "viewer-stream.hpp"
#include <common/common.hpp>
+#include <common/urcu.hpp>
#include <urcu/rculist.h>
* TODO: improvement: create more efficient list of
* vstream per session.
*/
- cds_lfht_for_each_entry (viewer_streams_ht->ht, &iter.iter, vstream, stream_n.node) {
- if (!viewer_stream_get(vstream)) {
- continue;
- }
- if (vstream->stream->trace->session != session) {
+ {
+ lttng::urcu::read_lock_guard read_guard;
+
+ cds_lfht_for_each_entry (
+ viewer_streams_ht->ht, &iter.iter, vstream, stream_n.node) {
+ if (!viewer_stream_get(vstream)) {
+ continue;
+ }
+ if (vstream->stream->trace->session != session) {
+ viewer_stream_put(vstream);
+ continue;
+ }
+ /* Put local reference. */
+ viewer_stream_put(vstream);
+ /*
+ * We have reached one of the viewer stream's lifetime
+ * end condition. This "put" will cause the proper
+ * teardown of the viewer stream.
+ */
viewer_stream_put(vstream);
- continue;
}
- /* Put local reference. */
- viewer_stream_put(vstream);
- /*
- * We have reached one of the viewer stream's lifetime
- * end condition. This "put" will cause the proper
- * teardown of the viewer stream.
- */
- viewer_stream_put(vstream);
}
+
lttng_trace_chunk_put(vsession->current_trace_chunk);
vsession->current_trace_chunk = nullptr;
viewer_session_detach(vsession, session);
{
struct relay_session *session;
- rcu_read_lock();
- cds_list_for_each_entry_rcu(session, &vsession->session_list, viewer_session_node)
{
- viewer_session_close_one_session(vsession, session);
+ lttng::urcu::read_lock_guard read_lock;
+
+ cds_list_for_each_entry_rcu(session, &vsession->session_list, viewer_session_node)
+ {
+ viewer_session_close_one_session(vsession, session);
+ }
}
- rcu_read_unlock();
}
/*
if (!session->viewer_attached) {
goto end;
}
- rcu_read_lock();
- cds_list_for_each_entry_rcu(iter, &vsession->session_list, viewer_session_node)
+
{
- if (session == iter) {
- found = 1;
- goto end_rcu_unlock;
+ lttng::urcu::read_lock_guard read_lock;
+ cds_list_for_each_entry_rcu(iter, &vsession->session_list, viewer_session_node)
+ {
+ if (session == iter) {
+ found = 1;
+ break;
+ }
}
}
-end_rcu_unlock:
- rcu_read_unlock();
+
end:
pthread_mutex_unlock(&session->lock);
return found;
#include <common/common.hpp>
#include <common/compat/string.hpp>
#include <common/index/index.hpp>
+#include <common/urcu.hpp>
#include <common/utils.hpp>
#include <algorithm>
struct lttng_ht_iter iter;
struct relay_viewer_stream *vstream = nullptr;
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
lttng_ht_lookup(viewer_streams_ht, &id, &iter);
node = lttng_ht_iter_get_node_u64(&iter);
if (!node) {
vstream = nullptr;
}
end:
- rcu_read_unlock();
return vstream;
}
void viewer_stream_put(struct relay_viewer_stream *vstream)
{
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
urcu_ref_put(&vstream->ref, viewer_stream_release);
- rcu_read_unlock();
}
void viewer_stream_close_files(struct relay_viewer_stream *vstream)
return;
}
- rcu_read_lock();
- cds_lfht_for_each_entry (viewer_streams_ht->ht, &iter.iter, vstream, stream_n.node) {
- if (!viewer_stream_get(vstream)) {
- continue;
+ {
+ lttng::urcu::read_lock_guard read_lock;
+
+ cds_lfht_for_each_entry (
+ viewer_streams_ht->ht, &iter.iter, vstream, stream_n.node) {
+ if (!viewer_stream_get(vstream)) {
+ continue;
+ }
+ DBG("vstream %p refcount %ld stream %" PRIu64 " trace %" PRIu64
+ " session %" PRIu64,
+ vstream,
+ vstream->ref.refcount,
+ vstream->stream->stream_handle,
+ vstream->stream->trace->id,
+ vstream->stream->trace->session->id);
+ viewer_stream_put(vstream);
}
- DBG("vstream %p refcount %ld stream %" PRIu64 " trace %" PRIu64 " session %" PRIu64,
- vstream,
- vstream->ref.refcount,
- vstream->stream->stream_handle,
- vstream->stream->trace->id,
- vstream->stream->trace->session->id);
- viewer_stream_put(vstream);
}
- rcu_read_unlock();
}
#include <common/dynamic-array.hpp>
#include <common/macros.hpp>
#include <common/optional.hpp>
+#include <common/urcu.hpp>
#include <lttng/action/action-internal.hpp>
#include <lttng/action/list-internal.hpp>
enum lttng_error_code cmd_ret;
struct lttng_action *action = item->action;
+ lttng::urcu::read_lock_guard read_lock;
+
action_status = lttng_action_start_session_get_session_name(action, &session_name);
if (action_status != LTTNG_ACTION_STATUS_OK) {
ERR("Failed to get session name from `%s` action", get_action_name(action));
}
session_lock_list();
- rcu_read_lock();
session = session_find_by_id(LTTNG_OPTIONAL_GET(item->context.session_id));
if (!session) {
DBG("Failed to find session `%s` by name while executing `%s` action of trigger `%s`",
session_unlock(session);
session_put(session);
error_unlock_list:
- rcu_read_unlock();
session_unlock_list();
end:
return ret;
enum lttng_error_code cmd_ret;
struct lttng_action *action = item->action;
+ lttng::urcu::read_lock_guard read_lock;
+
action_status = lttng_action_stop_session_get_session_name(action, &session_name);
if (action_status != LTTNG_ACTION_STATUS_OK) {
ERR("Failed to get session name from `%s` action", get_action_name(action));
}
session_lock_list();
- rcu_read_lock();
session = session_find_by_id(LTTNG_OPTIONAL_GET(item->context.session_id));
if (!session) {
DBG("Failed to find session `%s` by name while executing `%s` action of trigger `%s`",
session_unlock(session);
session_put(session);
error_unlock_list:
- rcu_read_unlock();
session_unlock_list();
end:
return ret;
enum lttng_error_code cmd_ret;
struct lttng_action *action = item->action;
+ lttng::urcu::read_lock_guard read_lock;
+
action_status = lttng_action_rotate_session_get_session_name(action, &session_name);
if (action_status != LTTNG_ACTION_STATUS_OK) {
ERR("Failed to get session name from `%s` action", get_action_name(action));
}
session_lock_list();
- rcu_read_lock();
session = session_find_by_id(LTTNG_OPTIONAL_GET(item->context.session_id));
if (!session) {
DBG("Failed to find session `%s` by name while executing `%s` action of trigger `%s`",
session_unlock(session);
session_put(session);
error_unlock_list:
- rcu_read_unlock();
session_unlock_list();
end:
return ret;
default_snapshot_output.max_size = UINT64_MAX;
+ lttng::urcu::read_lock_guard read_lock;
+
/*
* Validate if, at the moment the action was queued, the target session
* existed. If not, skip the action altogether.
}
session_lock_list();
- rcu_read_lock();
session = session_find_by_id(LTTNG_OPTIONAL_GET(item->context.session_id));
if (!session) {
DBG("Failed to find session `%s` by name while executing `%s` action of trigger `%s`",
session_unlock(session);
session_put(session);
error_unlock_list:
- rcu_read_unlock();
session_unlock_list();
end:
return ret;
#include <common/common.hpp>
#include <common/compat/endian.hpp>
#include <common/sessiond-comm/sessiond-comm.hpp>
+#include <common/urcu.hpp>
#include <common/uri.hpp>
#include <common/utils.hpp>
if (session->ust_session) {
const struct agent *agt;
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
agt = trace_ust_find_agent(session->ust_session, app->domain);
if (agt) {
agent_update(agt, app);
}
- rcu_read_unlock();
}
session_unlock(session);
session_put(session);
}
- rcu_read_lock();
- /*
- * We are protected against the addition of new events by the session
- * list lock being held.
- */
- cds_lfht_for_each_entry (
- the_trigger_agents_ht_by_domain->ht, &iter.iter, trigger_agent, node.node) {
- agent_update(trigger_agent, app);
+ {
+ /*
+ * We are protected against the addition of new events by the session
+ * list lock being held.
+ */
+ lttng::urcu::read_lock_guard read_lock;
+
+ cds_lfht_for_each_entry (
+ the_trigger_agents_ht_by_domain->ht, &iter.iter, trigger_agent, node.node) {
+ agent_update(trigger_agent, app);
+ }
}
- rcu_read_unlock();
}
/*
#include <common/common.hpp>
#include <common/compat/endian.hpp>
#include <common/sessiond-comm/agent.hpp>
+#include <common/urcu.hpp>
#include <lttng/condition/condition.h>
#include <lttng/condition/event-rule-matches.h>
LTTNG_ASSERT(event);
- rcu_read_lock();
+ {
+ lttng::urcu::read_lock_guard read_lock;
- cds_lfht_for_each_entry (the_agent_apps_ht_by_sock->ht, &iter.iter, app, node.node) {
- if (app->domain != domain) {
- continue;
- }
+ cds_lfht_for_each_entry (
+ the_agent_apps_ht_by_sock->ht, &iter.iter, app, node.node) {
+ if (app->domain != domain) {
+ continue;
+ }
- /* Enable event on agent application through TCP socket. */
- ret = enable_event(app, event);
- if (ret != LTTNG_OK) {
- goto error;
+ /* Enable event on agent application through TCP socket. */
+ ret = enable_event(app, event);
+ if (ret != LTTNG_OK) {
+ goto error;
+ }
}
}
ret = LTTNG_OK;
error:
- rcu_read_unlock();
return ret;
}
goto error;
}
- rcu_read_lock();
+ {
+ lttng::urcu::read_lock_guard read_lock;
- cds_lfht_for_each_entry (the_agent_apps_ht_by_sock->ht, &iter.iter, app, node.node) {
- struct agent_app_ctx *agent_ctx;
+ cds_lfht_for_each_entry (
+ the_agent_apps_ht_by_sock->ht, &iter.iter, app, node.node) {
+ struct agent_app_ctx *agent_ctx;
- if (app->domain != domain) {
- continue;
- }
+ if (app->domain != domain) {
+ continue;
+ }
- agent_ctx = create_app_ctx(ctx);
- if (!agent_ctx) {
- ret = LTTNG_ERR_NOMEM;
- goto error_unlock;
- }
+ agent_ctx = create_app_ctx(ctx);
+ if (!agent_ctx) {
+ ret = LTTNG_ERR_NOMEM;
+ goto error_unlock;
+ }
- /* Enable event on agent application through TCP socket. */
- ret = app_context_op(app, agent_ctx, AGENT_CMD_APP_CTX_ENABLE);
- destroy_app_ctx(agent_ctx);
- if (ret != LTTNG_OK) {
- goto error_unlock;
+ /* Enable event on agent application through TCP socket. */
+ ret = app_context_op(app, agent_ctx, AGENT_CMD_APP_CTX_ENABLE);
+ destroy_app_ctx(agent_ctx);
+ if (ret != LTTNG_OK) {
+ goto error_unlock;
+ }
}
}
ret = LTTNG_OK;
error_unlock:
- rcu_read_unlock();
error:
return ret;
}
goto end;
}
- rcu_read_lock();
+ {
+ lttng::urcu::read_lock_guard read_lock;
- cds_lfht_for_each_entry (the_agent_apps_ht_by_sock->ht, &iter.iter, app, node.node) {
- if (app->domain != domain) {
- continue;
- }
+ cds_lfht_for_each_entry (
+ the_agent_apps_ht_by_sock->ht, &iter.iter, app, node.node) {
+ if (app->domain != domain) {
+ continue;
+ }
- /* Enable event on agent application through TCP socket. */
- ret = disable_event(app, event);
- if (ret != LTTNG_OK) {
- goto error;
+ /* Enable event on agent application through TCP socket. */
+ ret = disable_event(app, event);
+ if (ret != LTTNG_OK) {
+ goto error;
+ }
}
}
LTTNG_ASSERT(!AGENT_EVENT_IS_ENABLED(event));
error:
- rcu_read_unlock();
end:
return ret;
}
struct lttng_ht_iter iter;
LTTNG_ASSERT(ctx);
-
- rcu_read_lock();
DBG2("Disabling agent application context %s:%s", ctx->provider_name, ctx->ctx_name);
- cds_lfht_for_each_entry (the_agent_apps_ht_by_sock->ht, &iter.iter, app, node.node) {
- if (app->domain != domain) {
- continue;
- }
- ret = app_context_op(app, ctx, AGENT_CMD_APP_CTX_DISABLE);
- if (ret != LTTNG_OK) {
- goto end;
+ {
+ lttng::urcu::read_lock_guard read_lock;
+
+ cds_lfht_for_each_entry (
+ the_agent_apps_ht_by_sock->ht, &iter.iter, app, node.node) {
+ if (app->domain != domain) {
+ continue;
+ }
+
+ ret = app_context_op(app, ctx, AGENT_CMD_APP_CTX_DISABLE);
+ if (ret != LTTNG_OK) {
+ goto end;
+ }
}
}
end:
- rcu_read_unlock();
return ret;
}
goto error;
}
- rcu_read_lock();
- cds_lfht_for_each_entry (the_agent_apps_ht_by_sock->ht, &iter.iter, app, node.node) {
- ssize_t nb_ev;
- struct lttng_event *agent_events;
+ {
+ lttng::urcu::read_lock_guard read_lock;
- /* Skip domain not asked by the list. */
- if (app->domain != domain) {
- continue;
- }
+ cds_lfht_for_each_entry (
+ the_agent_apps_ht_by_sock->ht, &iter.iter, app, node.node) {
+ ssize_t nb_ev;
+ struct lttng_event *agent_events;
- nb_ev = list_events(app, &agent_events);
- if (nb_ev < 0) {
- ret = nb_ev;
- goto error_unlock;
- }
+ /* Skip domain not asked by the list. */
+ if (app->domain != domain) {
+ continue;
+ }
- if (count + nb_ev > nbmem) {
- /* In case the realloc fails, we free the memory */
- struct lttng_event *new_tmp_events;
- size_t new_nbmem;
-
- new_nbmem = std::max(count + nb_ev, nbmem << 1);
- DBG2("Reallocating agent event list from %zu to %zu entries",
- nbmem,
- new_nbmem);
- new_tmp_events = (lttng_event *) realloc(
- tmp_events, new_nbmem * sizeof(*new_tmp_events));
- if (!new_tmp_events) {
- PERROR("realloc agent events");
- ret = -ENOMEM;
- free(agent_events);
- goto error_unlock;
+ nb_ev = list_events(app, &agent_events);
+ if (nb_ev < 0) {
+ ret = nb_ev;
+ goto error;
+ }
+
+ if (count + nb_ev > nbmem) {
+ /* In case the realloc fails, we free the memory */
+ struct lttng_event *new_tmp_events;
+ size_t new_nbmem;
+
+ new_nbmem = std::max(count + nb_ev, nbmem << 1);
+ DBG2("Reallocating agent event list from %zu to %zu entries",
+ nbmem,
+ new_nbmem);
+ new_tmp_events = (lttng_event *) realloc(
+ tmp_events, new_nbmem * sizeof(*new_tmp_events));
+ if (!new_tmp_events) {
+ PERROR("realloc agent events");
+ ret = -ENOMEM;
+ free(agent_events);
+ goto error;
+ }
+
+ /* Zero the new memory */
+ memset(new_tmp_events + nbmem,
+ 0,
+ (new_nbmem - nbmem) * sizeof(*new_tmp_events));
+ nbmem = new_nbmem;
+ tmp_events = new_tmp_events;
}
- /* Zero the new memory */
- memset(new_tmp_events + nbmem,
- 0,
- (new_nbmem - nbmem) * sizeof(*new_tmp_events));
- nbmem = new_nbmem;
- tmp_events = new_tmp_events;
+ memcpy(tmp_events + count, agent_events, nb_ev * sizeof(*tmp_events));
+ free(agent_events);
+ count += nb_ev;
}
- memcpy(tmp_events + count, agent_events, nb_ev * sizeof(*tmp_events));
- free(agent_events);
- count += nb_ev;
}
- rcu_read_unlock();
ret = count;
*events = tmp_events;
return ret;
-error_unlock:
- rcu_read_unlock();
error:
free(tmp_events);
return ret;
DBG3("Agent destroy");
- rcu_read_lock();
- cds_lfht_for_each_entry (agt->events->ht, &iter.iter, node, node) {
- int ret;
- struct agent_event *event;
-
- /*
- * When destroying an event, we have to try to disable it on the
- * agent side so the event stops generating data. The return
- * value is not important since we have to continue anyway
- * destroying the object.
- */
- event = lttng::utils::container_of(node, &agent_event::node);
- (void) agent_disable_event(event, agt->domain);
+ {
+ lttng::urcu::read_lock_guard read_lock;
+
+ cds_lfht_for_each_entry (agt->events->ht, &iter.iter, node, node) {
+ int ret;
+ struct agent_event *event;
+
+ /*
+ * When destroying an event, we have to try to disable it on the
+ * agent side so the event stops generating data. The return
+ * value is not important since we have to continue anyway
+ * destroying the object.
+ */
+ event = lttng::utils::container_of(node, &agent_event::node);
+ (void) agent_disable_event(event, agt->domain);
+
+ ret = lttng_ht_del(agt->events, &iter);
+ LTTNG_ASSERT(!ret);
+ call_rcu(&node->head, destroy_event_agent_rcu);
+ }
- ret = lttng_ht_del(agt->events, &iter);
- LTTNG_ASSERT(!ret);
- call_rcu(&node->head, destroy_event_agent_rcu);
+ cds_list_for_each_entry_rcu(ctx, &agt->app_ctx_list, list_node)
+ {
+ (void) disable_context(ctx, agt->domain);
+ cds_list_del(&ctx->list_node);
+ call_rcu(&ctx->rcu_node, destroy_app_ctx_rcu);
+ }
}
- cds_list_for_each_entry_rcu(ctx, &agt->app_ctx_list, list_node)
- {
- (void) disable_context(ctx, agt->domain);
- cds_list_del(&ctx->list_node);
- call_rcu(&ctx->rcu_node, destroy_app_ctx_rcu);
- }
- rcu_read_unlock();
lttng_ht_destroy(agt->events);
free(agt);
}
* happen. The hash table deletion is ONLY done through this call when the
* main sessiond thread is torn down.
*/
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
app = agent_find_app_by_sock(sock);
LTTNG_ASSERT(app);
/* The application is freed in a RCU call but the socket is closed here. */
agent_destroy_app(app);
- rcu_read_unlock();
}
/*
if (!the_agent_apps_ht_by_sock) {
return;
}
- rcu_read_lock();
- cds_lfht_for_each_entry (the_agent_apps_ht_by_sock->ht, &iter.iter, node, node) {
- struct agent_app *app;
- app = lttng::utils::container_of(node, &agent_app::node);
- agent_destroy_app_by_sock(app->sock->fd);
+ {
+ lttng::urcu::read_lock_guard read_lock;
+
+ cds_lfht_for_each_entry (the_agent_apps_ht_by_sock->ht, &iter.iter, node, node) {
+ struct agent_app *app;
+
+ app = lttng::utils::container_of(node, &agent_app::node);
+ agent_destroy_app_by_sock(app->sock->fd);
+ }
}
- rcu_read_unlock();
lttng_ht_destroy(the_agent_apps_ht_by_sock);
}
DBG("Agent updating app: pid = %ld", (long) app->pid);
- rcu_read_lock();
/*
* We are in the registration path thus if the application is gone,
* there is a serious code flow error.
*/
+ {
+ lttng::urcu::read_lock_guard read_lock;
- cds_lfht_for_each_entry (agt->events->ht, &iter.iter, event, node.node) {
- /* Skip event if disabled. */
- if (!AGENT_EVENT_IS_ENABLED(event)) {
- continue;
- }
+ cds_lfht_for_each_entry (agt->events->ht, &iter.iter, event, node.node) {
+ /* Skip event if disabled. */
+ if (!AGENT_EVENT_IS_ENABLED(event)) {
+ continue;
+ }
- ret = enable_event(app, event);
- if (ret != LTTNG_OK) {
- DBG2("Agent update unable to enable event %s on app pid: %d sock %d",
- event->name,
- app->pid,
- app->sock->fd);
- /* Let's try the others here and don't assume the app is dead. */
- continue;
+ ret = enable_event(app, event);
+ if (ret != LTTNG_OK) {
+ DBG2("Agent update unable to enable event %s on app pid: %d sock %d",
+ event->name,
+ app->pid,
+ app->sock->fd);
+ /* Let's try the others here and don't assume the app is dead. */
+ continue;
+ }
}
- }
- cds_list_for_each_entry_rcu(ctx, &agt->app_ctx_list, list_node)
- {
- ret = app_context_op(app, ctx, AGENT_CMD_APP_CTX_ENABLE);
- if (ret != LTTNG_OK) {
- DBG2("Agent update unable to add application context %s:%s on app pid: %d sock %d",
- ctx->provider_name,
- ctx->ctx_name,
- app->pid,
- app->sock->fd);
- continue;
+ cds_list_for_each_entry_rcu(ctx, &agt->app_ctx_list, list_node)
+ {
+ ret = app_context_op(app, ctx, AGENT_CMD_APP_CTX_ENABLE);
+ if (ret != LTTNG_OK) {
+ DBG2("Agent update unable to add application context %s:%s on app pid: %d sock %d",
+ ctx->provider_name,
+ ctx->ctx_name,
+ app->pid,
+ app->sock->fd);
+ continue;
+ }
}
}
-
- rcu_read_unlock();
}
/*
return;
}
- rcu_read_lock();
- cds_lfht_for_each_entry (the_trigger_agents_ht_by_domain->ht, &iter.iter, node, node) {
- struct agent *agent = lttng::utils::container_of(node, &agent::node);
- const int ret = lttng_ht_del(the_trigger_agents_ht_by_domain, &iter);
+ {
+ lttng::urcu::read_lock_guard read_lock;
+
+ cds_lfht_for_each_entry (
+ the_trigger_agents_ht_by_domain->ht, &iter.iter, node, node) {
+ struct agent *agent = lttng::utils::container_of(node, &agent::node);
+ const int ret = lttng_ht_del(the_trigger_agents_ht_by_domain, &iter);
- LTTNG_ASSERT(ret == 0);
- agent_destroy(agent);
+ LTTNG_ASSERT(ret == 0);
+ agent_destroy(agent);
+ }
}
- rcu_read_unlock();
lttng_ht_destroy(the_trigger_agents_ht_by_domain);
}
#include <common/common.hpp>
#include <common/hashtable/utils.hpp>
+#include <common/urcu.hpp>
#include <inttypes.h>
DBG3("Buffer registry per UID adding to global registry with id: %" PRIu64,
reg->session_id);
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
nodep = cds_lfht_add_unique(
ht->ht, ht->hash_fct(reg, lttng_ht_seed), ht->match_fct, reg, ®->node.node);
LTTNG_ASSERT(nodep == ®->node.node);
- rcu_read_unlock();
}
/*
DBG3("Buffer registry per PID adding to global registry with id: %" PRIu64,
reg->session_id);
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
lttng_ht_add_unique_u64(buffer_registry_pid, ®->node);
- rcu_read_unlock();
}
/*
struct buffer_reg_channel *reg_chan;
int ret = -1;
- rcu_read_lock();
- /*
- * For the per-uid registry, we have to iterate since we don't have the
- * uid and bitness key.
- */
- cds_list_for_each_entry (uid_reg, buffer_reg_uid_list, lnode) {
- session_reg = uid_reg->registry;
- cds_lfht_for_each_entry (
- session_reg->channels->ht, &iter.iter, reg_chan, node.node) {
- if (reg_chan->key == chan_key) {
- *consumer_chan_key = reg_chan->consumer_key;
- ret = 0;
- goto end;
+ {
+ lttng::urcu::read_lock_guard read_lock;
+
+ /*
+ * For the per-uid registry, we have to iterate since we don't have the
+ * uid and bitness key.
+ */
+ cds_list_for_each_entry (uid_reg, buffer_reg_uid_list, lnode) {
+ session_reg = uid_reg->registry;
+ cds_lfht_for_each_entry (
+ session_reg->channels->ht, &iter.iter, reg_chan, node.node) {
+ if (reg_chan->key == chan_key) {
+ *consumer_chan_key = reg_chan->consumer_key;
+ ret = 0;
+ goto end;
+ }
}
}
}
-
end:
- rcu_read_unlock();
return ret;
}
LTTNG_ASSERT(session);
LTTNG_ASSERT(channel);
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
lttng_ht_add_unique_u64(session->channels, &channel->node);
- rcu_read_unlock();
}
/*
DBG3("Buffer registry session destroy");
/* Destroy all channels. */
- rcu_read_lock();
- cds_lfht_for_each_entry (regp->channels->ht, &iter.iter, reg_chan, node.node) {
- ret = lttng_ht_del(regp->channels, &iter);
- LTTNG_ASSERT(!ret);
- buffer_reg_channel_destroy(reg_chan, domain);
+ {
+ lttng::urcu::read_lock_guard read_lock;
+
+ cds_lfht_for_each_entry (regp->channels->ht, &iter.iter, reg_chan, node.node) {
+ ret = lttng_ht_del(regp->channels, &iter);
+ LTTNG_ASSERT(!ret);
+ buffer_reg_channel_destroy(reg_chan, domain);
+ }
}
- rcu_read_unlock();
lttng_ht_destroy(regp->channels);
LTTNG_ASSERT(regp);
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
iter.iter.node = ®p->node.node;
ret = lttng_ht_del(buffer_registry_uid, &iter);
LTTNG_ASSERT(!ret);
- rcu_read_unlock();
}
static void rcu_free_buffer_reg_uid(struct rcu_head *head)
goto destroy;
}
- rcu_read_lock();
- /* Get the right socket from the consumer object. */
- socket = consumer_find_socket_by_bitness(regp->bits_per_long, consumer);
- if (!socket) {
- goto unlock;
- }
+ {
+ lttng::urcu::read_lock_guard read_lock;
+ /* Get the right socket from the consumer object. */
+ socket = consumer_find_socket_by_bitness(regp->bits_per_long, consumer);
+ if (!socket) {
+ goto destroy;
+ }
- switch (regp->domain) {
- case LTTNG_DOMAIN_UST:
- if (regp->registry->reg.ust->_metadata_key) {
- /* Return value does not matter. This call will print errors. */
- (void) consumer_close_metadata(socket,
- regp->registry->reg.ust->_metadata_key);
+ switch (regp->domain) {
+ case LTTNG_DOMAIN_UST:
+ if (regp->registry->reg.ust->_metadata_key) {
+ /* Return value does not matter. This call will print errors. */
+ (void) consumer_close_metadata(
+ socket, regp->registry->reg.ust->_metadata_key);
+ }
+ break;
+ default:
+ abort();
+ return;
}
- break;
- default:
- abort();
- rcu_read_unlock();
- return;
}
-unlock:
- rcu_read_unlock();
destroy:
call_rcu(®p->node.head, rcu_free_buffer_reg_uid);
}
struct lttng_channel *defattr = nullptr;
enum lttng_domain_type domain = LTTNG_DOMAIN_UST;
bool chan_published = false;
+ lttng::urcu::read_lock_guard read_lock;
LTTNG_ASSERT(usess);
}
/* Adding the channel to the channel hash table. */
- rcu_read_lock();
if (strncmp(uchan->name, DEFAULT_METADATA_NAME, sizeof(uchan->name)) != 0) {
lttng_ht_add_unique_str(usess->domain_global.channels, &uchan->node);
chan_published = true;
*/
memcpy(&usess->metadata_attr, &uchan->attr, sizeof(usess->metadata_attr));
}
- rcu_read_unlock();
DBG2("Channel %s created successfully", uchan->name);
if (domain != LTTNG_DOMAIN_UST) {
#include <common/sessiond-comm/sessiond-comm.hpp>
#include <common/string-utils/string-utils.hpp>
#include <common/trace-chunk.hpp>
+#include <common/urcu.hpp>
#include <common/utils.hpp>
#include <lttng/action/action-internal.hpp>
DBG3("Listing agent events");
- rcu_read_lock();
agent_event_count = lttng_ht_get_count(agt->events);
if (agent_event_count == 0) {
/* Early exit. */
local_nb_events = (unsigned int) agent_event_count;
- cds_lfht_for_each_entry (agt->events->ht, &iter.iter, event, node.node) {
- struct lttng_event *tmp_event = lttng_event_create();
+ {
+ lttng::urcu::read_lock_guard read_lock;
- if (!tmp_event) {
- ret_code = LTTNG_ERR_NOMEM;
- goto error;
- }
+ cds_lfht_for_each_entry (agt->events->ht, &iter.iter, event, node.node) {
+ struct lttng_event *tmp_event = lttng_event_create();
- if (lttng_strncpy(tmp_event->name, event->name, sizeof(tmp_event->name))) {
- lttng_event_destroy(tmp_event);
- ret_code = LTTNG_ERR_FATAL;
- goto error;
- }
+ if (!tmp_event) {
+ ret_code = LTTNG_ERR_NOMEM;
+ goto error;
+ }
- tmp_event->name[sizeof(tmp_event->name) - 1] = '\0';
- tmp_event->enabled = !!event->enabled_count;
- tmp_event->loglevel = event->loglevel_value;
- tmp_event->loglevel_type = event->loglevel_type;
+ if (lttng_strncpy(tmp_event->name, event->name, sizeof(tmp_event->name))) {
+ lttng_event_destroy(tmp_event);
+ ret_code = LTTNG_ERR_FATAL;
+ goto error;
+ }
- ret = lttng_event_serialize(
- tmp_event, 0, nullptr, event->filter_expression, 0, nullptr, reply_payload);
- lttng_event_destroy(tmp_event);
- if (ret) {
- ret_code = LTTNG_ERR_FATAL;
- goto error;
+ tmp_event->name[sizeof(tmp_event->name) - 1] = '\0';
+ tmp_event->enabled = !!event->enabled_count;
+ tmp_event->loglevel = event->loglevel_value;
+ tmp_event->loglevel_type = event->loglevel_type;
+
+ ret = lttng_event_serialize(tmp_event,
+ 0,
+ nullptr,
+ event->filter_expression,
+ 0,
+ nullptr,
+ reply_payload);
+ lttng_event_destroy(tmp_event);
+ if (ret) {
+ ret_code = LTTNG_ERR_FATAL;
+ goto error;
+ }
}
}
-
end:
ret_code = LTTNG_OK;
*nb_events = local_nb_events;
error:
- rcu_read_unlock();
return ret_code;
}
DBG("Listing UST global events for channel %s", channel_name);
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
lttng_ht_lookup(ust_global->channels, (void *) channel_name, &iter);
node = lttng_ht_iter_get_node_str(&iter);
ret_code = LTTNG_OK;
*nb_events = local_nb_events;
error:
- rcu_read_unlock();
return ret_code;
}
LTTNG_ASSERT(session);
- rcu_read_lock();
-
if (session->consumer_fds_sent == 0 && session->consumer != nullptr) {
+ lttng::urcu::read_lock_guard read_lock;
+
cds_lfht_for_each_entry (
session->consumer->socks->ht, &iter.iter, socket, node.node) {
pthread_mutex_lock(socket->lock);
}
error:
- rcu_read_unlock();
return ret;
}
DBG("Setting relayd for session %s", session->name);
- rcu_read_lock();
if (session->current_trace_chunk) {
enum lttng_trace_chunk_status status = lttng_trace_chunk_get_id(
session->current_trace_chunk, ¤t_chunk_id.value);
if (usess && usess->consumer && usess->consumer->type == CONSUMER_DST_NET &&
usess->consumer->enabled) {
/* For each consumer socket, send relayd sockets */
+ lttng::urcu::read_lock_guard read_lock;
+
cds_lfht_for_each_entry (
usess->consumer->socks->ht, &iter.iter, socket, node.node) {
pthread_mutex_lock(socket->lock);
/* Session is now ready for network streaming. */
session->net_handle = 1;
}
+
session->consumer->relay_major_version = usess->consumer->relay_major_version;
session->consumer->relay_minor_version = usess->consumer->relay_minor_version;
session->consumer->relay_allows_clear = usess->consumer->relay_allows_clear;
if (ksess && ksess->consumer && ksess->consumer->type == CONSUMER_DST_NET &&
ksess->consumer->enabled) {
+ lttng::urcu::read_lock_guard read_lock;
+
cds_lfht_for_each_entry (
ksess->consumer->socks->ht, &iter.iter, socket, node.node) {
pthread_mutex_lock(socket->lock);
/* Session is now ready for network streaming. */
session->net_handle = 1;
}
+
session->consumer->relay_major_version = ksess->consumer->relay_major_version;
session->consumer->relay_minor_version = ksess->consumer->relay_minor_version;
session->consumer->relay_allows_clear = ksess->consumer->relay_allows_clear;
}
error:
- rcu_read_unlock();
return ret;
}
usess = session->ust_session;
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
switch (domain) {
case LTTNG_DOMAIN_KERNEL:
ret = LTTNG_OK;
error:
- rcu_read_unlock();
return ret;
}
LTTNG_ASSERT(_attr);
LTTNG_ASSERT(domain);
+ lttng::urcu::read_lock_guard read_lock;
+
attr = lttng_channel_copy(_attr);
if (!attr) {
ret_code = LTTNG_ERR_NOMEM;
DBG("Enabling channel %s for session %s", attr->name, session->name);
- rcu_read_lock();
-
/*
* If the session is a live session, remove the switch timer, the
* live timer does the same thing but sends also synchronisation
session->has_non_mmap_channel = true;
}
error:
- rcu_read_unlock();
end:
lttng_channel_destroy(attr);
return ret_code;
event_name = event->name;
+ lttng::urcu::read_lock_guard read_lock;
+
/* Error out on unhandled search criteria */
if (event->loglevel_type || event->loglevel != -1 || event->enabled || event->pid ||
event->filter || event->exclusion) {
goto error;
}
- rcu_read_lock();
-
switch (domain) {
case LTTNG_DOMAIN_KERNEL:
{
ret = LTTNG_OK;
error_unlock:
- rcu_read_unlock();
error:
free(exclusion);
free(bytecode);
DBG("Enable event command for event \'%s\'", event->name);
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
switch (domain->type) {
case LTTNG_DOMAIN_KERNEL:
free(filter);
free(exclusion);
channel_attr_destroy(attr);
- rcu_read_unlock();
return ret;
}
pthread_mutex_init(socket->lock, nullptr);
socket->registered = 1;
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
consumer_add_socket(socket, ksess->consumer);
- rcu_read_unlock();
pthread_mutex_lock(&cdata->pid_mutex);
cdata->pid = -1;
DBG3("Listing domains found UST global domain");
nb_dom++;
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
+
cds_lfht_for_each_entry (
session->ust_session->agents->ht, &iter.iter, agt, node.node) {
if (agt->being_used) {
nb_dom++;
}
}
- rcu_read_unlock();
}
if (!nb_dom) {
(*domains)[index].buf_type = session->ust_session->buffer_type;
index++;
- rcu_read_lock();
- cds_lfht_for_each_entry (
- session->ust_session->agents->ht, &iter.iter, agt, node.node) {
- if (agt->being_used) {
- (*domains)[index].type = agt->domain;
- (*domains)[index].buf_type = session->ust_session->buffer_type;
- index++;
+ {
+ lttng::urcu::read_lock_guard read_lock;
+
+ cds_lfht_for_each_entry (
+ session->ust_session->agents->ht, &iter.iter, agt, node.node) {
+ if (agt->being_used) {
+ (*domains)[index].type = agt->domain;
+ (*domains)[index].buf_type =
+ session->ust_session->buffer_type;
+ index++;
+ }
}
}
- rcu_read_unlock();
}
end:
return nb_dom;
struct lttng_ht_iter iter;
struct ltt_ust_channel *uchan;
- rcu_read_lock();
- cds_lfht_for_each_entry (session->ust_session->domain_global.channels->ht,
- &iter.iter,
- uchan,
- node.node) {
- uint64_t discarded_events = 0, lost_packets = 0;
- struct lttng_channel *channel = nullptr;
- struct lttng_channel_extended *extended;
-
- channel = trace_ust_channel_to_lttng_channel(uchan);
- if (!channel) {
- ret_code = LTTNG_ERR_NOMEM;
- goto end;
- }
+ {
+ lttng::urcu::read_lock_guard read_lock;
+
+ cds_lfht_for_each_entry (session->ust_session->domain_global.channels->ht,
+ &iter.iter,
+ uchan,
+ node.node) {
+ uint64_t discarded_events = 0, lost_packets = 0;
+ struct lttng_channel *channel = nullptr;
+ struct lttng_channel_extended *extended;
- extended = (struct lttng_channel_extended *) channel->attr.extended.ptr;
+ channel = trace_ust_channel_to_lttng_channel(uchan);
+ if (!channel) {
+ ret_code = LTTNG_ERR_NOMEM;
+ goto end;
+ }
- ret = get_ust_runtime_stats(
- session, uchan, &discarded_events, &lost_packets);
- if (ret < 0) {
- lttng_channel_destroy(channel);
- ret_code = LTTNG_ERR_UNK;
- goto end;
- }
+ extended = (struct lttng_channel_extended *)
+ channel->attr.extended.ptr;
- extended->discarded_events = discarded_events;
- extended->lost_packets = lost_packets;
+ ret = get_ust_runtime_stats(
+ session, uchan, &discarded_events, &lost_packets);
+ if (ret < 0) {
+ lttng_channel_destroy(channel);
+ ret_code = LTTNG_ERR_UNK;
+ goto end;
+ }
+
+ extended->discarded_events = discarded_events;
+ extended->lost_packets = lost_packets;
+
+ ret = lttng_channel_serialize(channel, &payload->buffer);
+ if (ret) {
+ ERR("Failed to serialize lttng_channel: channel name = '%s'",
+ channel->name);
+ lttng_channel_destroy(channel);
+ ret_code = LTTNG_ERR_UNK;
+ goto end;
+ }
- ret = lttng_channel_serialize(channel, &payload->buffer);
- if (ret) {
- ERR("Failed to serialize lttng_channel: channel name = '%s'",
- channel->name);
lttng_channel_destroy(channel);
- ret_code = LTTNG_ERR_UNK;
- goto end;
+ i++;
}
-
- lttng_channel_destroy(channel);
- i++;
}
- rcu_read_unlock();
+
break;
}
default:
struct lttng_ht_iter iter;
struct agent *agt;
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
+
cds_lfht_for_each_entry (
session->ust_session->agents->ht, &iter.iter, agt, node.node) {
if (agt->domain == domain) {
break;
}
}
-
- rcu_read_unlock();
}
break;
default:
goto free_error;
}
- rcu_read_lock();
snapshot_add_output(&session->snapshot, new_output);
if (id) {
*id = new_output->id;
}
- rcu_read_unlock();
return LTTNG_OK;
LTTNG_ASSERT(session);
LTTNG_ASSERT(output);
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
/*
* Permission denied to create an output if the session is not
ret = LTTNG_OK;
error:
- rcu_read_unlock();
return ret;
}
}
/* Copy list from session to the new list object. */
- rcu_read_lock();
- cds_lfht_for_each_entry (session->snapshot.output_ht->ht, &iter.iter, output, node.node) {
- LTTNG_ASSERT(output->consumer);
- list[idx].id = output->id;
- list[idx].max_size = output->max_size;
- if (lttng_strncpy(list[idx].name, output->name, sizeof(list[idx].name))) {
- ret = -LTTNG_ERR_INVALID;
- goto error;
- }
- if (output->consumer->type == CONSUMER_DST_LOCAL) {
- if (lttng_strncpy(list[idx].ctrl_url,
- output->consumer->dst.session_root_path,
- sizeof(list[idx].ctrl_url))) {
+ {
+ lttng::urcu::read_lock_guard read_lock;
+
+ cds_lfht_for_each_entry (
+ session->snapshot.output_ht->ht, &iter.iter, output, node.node) {
+ LTTNG_ASSERT(output->consumer);
+ list[idx].id = output->id;
+ list[idx].max_size = output->max_size;
+ if (lttng_strncpy(list[idx].name, output->name, sizeof(list[idx].name))) {
ret = -LTTNG_ERR_INVALID;
goto error;
}
- } else {
- /* Control URI. */
- ret = uri_to_str_url(&output->consumer->dst.net.control,
- list[idx].ctrl_url,
- sizeof(list[idx].ctrl_url));
- if (ret < 0) {
- ret = -LTTNG_ERR_NOMEM;
- goto error;
- }
- /* Data URI. */
- ret = uri_to_str_url(&output->consumer->dst.net.data,
- list[idx].data_url,
- sizeof(list[idx].data_url));
- if (ret < 0) {
- ret = -LTTNG_ERR_NOMEM;
- goto error;
+ if (output->consumer->type == CONSUMER_DST_LOCAL) {
+ if (lttng_strncpy(list[idx].ctrl_url,
+ output->consumer->dst.session_root_path,
+ sizeof(list[idx].ctrl_url))) {
+ ret = -LTTNG_ERR_INVALID;
+ goto error;
+ }
+ } else {
+ /* Control URI. */
+ ret = uri_to_str_url(&output->consumer->dst.net.control,
+ list[idx].ctrl_url,
+ sizeof(list[idx].ctrl_url));
+ if (ret < 0) {
+ ret = -LTTNG_ERR_NOMEM;
+ goto error;
+ }
+
+ /* Data URI. */
+ ret = uri_to_str_url(&output->consumer->dst.net.data,
+ list[idx].data_url,
+ sizeof(list[idx].data_url));
+ if (ret < 0) {
+ ret = -LTTNG_ERR_NOMEM;
+ goto error;
+ }
}
+
+ idx++;
}
- idx++;
}
*outputs = list;
list = nullptr;
ret = session->snapshot.nb_output;
error:
- rcu_read_unlock();
free(list);
end:
return ret;
* For each consumer socket, create and send the relayd object of the
* snapshot output.
*/
- rcu_read_lock();
- cds_lfht_for_each_entry (output->socks->ht, &iter.iter, socket, node.node) {
- pthread_mutex_lock(socket->lock);
- status = send_consumer_relayd_sockets(
- session->id,
- output,
- socket,
- session->name,
- session->hostname,
- base_path,
- session->live_timer,
- current_chunk_id.is_set ? ¤t_chunk_id.value : nullptr,
- session->creation_time,
- session->name_contains_creation_time);
- pthread_mutex_unlock(socket->lock);
- if (status != LTTNG_OK) {
- rcu_read_unlock();
- goto error;
+ {
+ lttng::urcu::read_lock_guard read_lock;
+
+ cds_lfht_for_each_entry (output->socks->ht, &iter.iter, socket, node.node) {
+ pthread_mutex_lock(socket->lock);
+ status = send_consumer_relayd_sockets(
+ session->id,
+ output,
+ socket,
+ session->name,
+ session->hostname,
+ base_path,
+ session->live_timer,
+ current_chunk_id.is_set ? ¤t_chunk_id.value : nullptr,
+ session->creation_time,
+ session->name_contains_creation_time);
+ pthread_mutex_unlock(socket->lock);
+ if (status != LTTNG_OK) {
+ goto error;
+ }
}
}
- rcu_read_unlock();
error:
return status;
struct snapshot_output *sout;
struct lttng_ht_iter iter;
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
+
cds_lfht_for_each_entry (
session->snapshot.output_ht->ht, &iter.iter, sout, node.node) {
struct snapshot_output output_copy;
output->name,
sizeof(output_copy.name))) {
cmd_ret = LTTNG_ERR_INVALID;
- rcu_read_unlock();
goto error;
}
}
cmd_ret = snapshot_record(session, &output_copy);
if (cmd_ret != LTTNG_OK) {
- rcu_read_unlock();
goto error;
}
+
snapshot_success = 1;
}
- rcu_read_unlock();
}
if (snapshot_success) {
if (tmp_output) {
snapshot_output_destroy(tmp_output);
}
+
return cmd_ret;
}
#include <common/defaults.hpp>
#include <common/relayd/relayd.hpp>
#include <common/string-utils/format.hpp>
+#include <common/urcu.hpp>
#include <common/uri.hpp>
#include <inttypes.h>
/* Destroy any relayd connection */
if (consumer->type == CONSUMER_DST_NET) {
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
+
cds_lfht_for_each_entry (consumer->socks->ht, &iter.iter, socket, node.node) {
- int ret;
+ /* Send destroy relayd command. */
+ const int ret = consumer_send_destroy_relayd(socket, consumer);
- /* Send destroy relayd command */
- ret = consumer_send_destroy_relayd(socket, consumer);
if (ret < 0) {
DBG("Unable to send destroy relayd command to consumer");
/* Continue since we MUST delete everything at this point. */
}
}
- rcu_read_unlock();
}
}
LTTNG_ASSERT(data);
+ lttng::urcu::read_lock_guard read_lock;
+
if (output == nullptr || data->cmd_sock < 0) {
/*
* Not an error. Possible there is simply not spawned consumer or it's
goto error;
}
- rcu_read_lock();
socket = consumer_find_socket(data->cmd_sock, output);
- rcu_read_unlock();
if (socket == nullptr) {
socket = consumer_allocate_socket(&data->cmd_sock);
if (socket == nullptr) {
socket->registered = 0;
socket->lock = &data->lock;
- rcu_read_lock();
consumer_add_socket(socket, output);
- rcu_read_unlock();
}
socket->type = data->type;
return;
}
- rcu_read_lock();
- cds_lfht_for_each_entry (obj->socks->ht, &iter.iter, socket, node.node) {
- consumer_del_socket(socket, obj);
- consumer_destroy_socket(socket);
+ {
+ lttng::urcu::read_lock_guard read_lock;
+
+ cds_lfht_for_each_entry (obj->socks->ht, &iter.iter, socket, node.node) {
+ consumer_del_socket(socket, obj);
+ consumer_destroy_socket(socket);
+ }
}
- rcu_read_unlock();
}
/*
LTTNG_ASSERT(dst);
LTTNG_ASSERT(src);
- rcu_read_lock();
- cds_lfht_for_each_entry (src->socks->ht, &iter.iter, socket, node.node) {
- /* Ignore socket that are already there. */
- copy_sock = consumer_find_socket(*socket->fd_ptr, dst);
- if (copy_sock) {
- continue;
- }
+ {
+ lttng::urcu::read_lock_guard read_lock;
- /* Create new socket object. */
- copy_sock = consumer_allocate_socket(socket->fd_ptr);
- if (copy_sock == nullptr) {
- rcu_read_unlock();
- ret = -ENOMEM;
- goto error;
- }
+ cds_lfht_for_each_entry (src->socks->ht, &iter.iter, socket, node.node) {
+ /* Ignore socket that are already there. */
+ copy_sock = consumer_find_socket(*socket->fd_ptr, dst);
+ if (copy_sock) {
+ continue;
+ }
- copy_sock->registered = socket->registered;
- /*
- * This is valid because this lock is shared accross all consumer
- * object being the global lock of the consumer data structure of the
- * session daemon.
- */
- copy_sock->lock = socket->lock;
- consumer_add_socket(copy_sock, dst);
+ /* Create new socket object. */
+ copy_sock = consumer_allocate_socket(socket->fd_ptr);
+ if (copy_sock == nullptr) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ copy_sock->registered = socket->registered;
+ /*
+ * This is valid because this lock is shared accross all consumer
+ * object being the global lock of the consumer data structure of the
+ * session daemon.
+ */
+ copy_sock->lock = socket->lock;
+ consumer_add_socket(copy_sock, dst);
+ }
}
- rcu_read_unlock();
error:
return ret;
msg.cmd_type = LTTNG_CONSUMER_DATA_PENDING;
msg.u.data_pending.session_id = session_id;
- /* Send command for each consumer */
- rcu_read_lock();
- cds_lfht_for_each_entry (consumer->socks->ht, &iter.iter, socket, node.node) {
- pthread_mutex_lock(socket->lock);
- ret = consumer_socket_send(socket, &msg, sizeof(msg));
- if (ret < 0) {
- pthread_mutex_unlock(socket->lock);
- goto error_unlock;
- }
+ {
+ /* Send command for each consumer. */
+ lttng::urcu::read_lock_guard read_lock;
- /*
- * No need for a recv reply status because the answer to the command is
- * the reply status message.
- */
+ cds_lfht_for_each_entry (consumer->socks->ht, &iter.iter, socket, node.node) {
+ pthread_mutex_lock(socket->lock);
+ ret = consumer_socket_send(socket, &msg, sizeof(msg));
+ if (ret < 0) {
+ pthread_mutex_unlock(socket->lock);
+ goto error_unlock;
+ }
+
+ /*
+ * No need for a recv reply status because the answer to the command is
+ * the reply status message.
+ */
+ ret = consumer_socket_recv(socket, &ret_code, sizeof(ret_code));
+ if (ret < 0) {
+ pthread_mutex_unlock(socket->lock);
+ goto error_unlock;
+ }
- ret = consumer_socket_recv(socket, &ret_code, sizeof(ret_code));
- if (ret < 0) {
pthread_mutex_unlock(socket->lock);
- goto error_unlock;
- }
- pthread_mutex_unlock(socket->lock);
- if (ret_code == 1) {
- break;
+ if (ret_code == 1) {
+ break;
+ }
}
}
- rcu_read_unlock();
DBG("Consumer data is %s pending for session id %" PRIu64,
ret_code == 1 ? "" : "NOT",
return ret_code;
error_unlock:
- rcu_read_unlock();
return -1;
}
*discarded = 0;
- /* Send command for each consumer */
- rcu_read_lock();
- cds_lfht_for_each_entry (consumer->socks->ht, &iter.iter, socket, node.node) {
- uint64_t consumer_discarded = 0;
- pthread_mutex_lock(socket->lock);
- ret = consumer_socket_send(socket, &msg, sizeof(msg));
- if (ret < 0) {
- pthread_mutex_unlock(socket->lock);
- goto end;
- }
+ /* Send command for each consumer. */
+ {
+ lttng::urcu::read_lock_guard read_lock;
+
+ cds_lfht_for_each_entry (consumer->socks->ht, &iter.iter, socket, node.node) {
+ uint64_t consumer_discarded = 0;
+
+ pthread_mutex_lock(socket->lock);
+ ret = consumer_socket_send(socket, &msg, sizeof(msg));
+ if (ret < 0) {
+ pthread_mutex_unlock(socket->lock);
+ goto end;
+ }
+
+ /*
+ * No need for a recv reply status because the answer to the
+ * command is the reply status message.
+ */
+ ret = consumer_socket_recv(
+ socket, &consumer_discarded, sizeof(consumer_discarded));
+ if (ret < 0) {
+ ERR("get discarded events");
+ pthread_mutex_unlock(socket->lock);
+ goto end;
+ }
- /*
- * No need for a recv reply status because the answer to the
- * command is the reply status message.
- */
- ret = consumer_socket_recv(socket, &consumer_discarded, sizeof(consumer_discarded));
- if (ret < 0) {
- ERR("get discarded events");
pthread_mutex_unlock(socket->lock);
- goto end;
+ *discarded += consumer_discarded;
}
- pthread_mutex_unlock(socket->lock);
- *discarded += consumer_discarded;
}
+
ret = 0;
DBG("Consumer discarded %" PRIu64 " events in session id %" PRIu64, *discarded, session_id);
end:
- rcu_read_unlock();
return ret;
}
*lost = 0;
- /* Send command for each consumer */
- rcu_read_lock();
- cds_lfht_for_each_entry (consumer->socks->ht, &iter.iter, socket, node.node) {
- uint64_t consumer_lost = 0;
- pthread_mutex_lock(socket->lock);
- ret = consumer_socket_send(socket, &msg, sizeof(msg));
- if (ret < 0) {
- pthread_mutex_unlock(socket->lock);
- goto end;
- }
+ /* Send command for each consumer. */
+ {
+ lttng::urcu::read_lock_guard read_lock;
- /*
- * No need for a recv reply status because the answer to the
- * command is the reply status message.
- */
- ret = consumer_socket_recv(socket, &consumer_lost, sizeof(consumer_lost));
- if (ret < 0) {
- ERR("get lost packets");
+ cds_lfht_for_each_entry (consumer->socks->ht, &iter.iter, socket, node.node) {
+ uint64_t consumer_lost = 0;
+ pthread_mutex_lock(socket->lock);
+ ret = consumer_socket_send(socket, &msg, sizeof(msg));
+ if (ret < 0) {
+ pthread_mutex_unlock(socket->lock);
+ goto end;
+ }
+
+ /*
+ * No need for a recv reply status because the answer to the
+ * command is the reply status message.
+ */
+ ret = consumer_socket_recv(socket, &consumer_lost, sizeof(consumer_lost));
+ if (ret < 0) {
+ ERR("get lost packets");
+ pthread_mutex_unlock(socket->lock);
+ goto end;
+ }
pthread_mutex_unlock(socket->lock);
- goto end;
+ *lost += consumer_lost;
}
- pthread_mutex_unlock(socket->lock);
- *lost += consumer_lost;
}
+
ret = 0;
DBG("Consumer lost %" PRIu64 " packets in session id %" PRIu64, *lost, session_id);
end:
- rcu_read_unlock();
return ret;
}
#include <common/error.hpp>
#include <common/sessiond-comm/sessiond-comm.hpp>
+#include <common/urcu.hpp>
#include <stdio.h>
#include <unistd.h>
LTTNG_ASSERT(ctx);
LTTNG_ASSERT(channel_name);
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
chan_ht = usess->domain_global.channels;
/* Add ctx to channel */
ret = add_uctx_to_channel(usess, domain, uchan, ctx);
} else {
- rcu_read_lock();
/* Add ctx all events, all channels */
cds_lfht_for_each_entry (chan_ht->ht, &iter.iter, uchan, node.node) {
ret = add_uctx_to_channel(usess, domain, uchan, ctx);
continue;
}
}
- rcu_read_unlock();
}
switch (ret) {
}
error:
- rcu_read_unlock();
return ret;
}
#include <common/futex.hpp>
#include <common/macros.hpp>
+#include <common/urcu.hpp>
#include <stddef.h>
#include <stdlib.h>
return;
}
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
LTTNG_ASSERT(app_sock >= 0);
app = ust_app_find_by_sock(app_sock);
if (app == nullptr) {
* update.
*/
DBG3("UST app update failed to find app sock %d", app_sock);
- goto unlock_rcu;
+ return;
}
/* Update all event notifiers for the app. */
session_unlock(sess);
session_put(sess);
}
-
-unlock_rcu:
- rcu_read_unlock();
}
/*
* and change its state.
*/
session_lock_list();
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
/*
* Add application to the global hash table. This needs to be
ret = send_socket_to_thread(
notifiers->apps_cmd_notify_pipe_write_fd, app->notify_sock);
if (ret < 0) {
- rcu_read_unlock();
session_unlock_list();
/*
* No notify thread, stop the UST tracing. However, this is
ret = send_socket_to_thread(notifiers->apps_cmd_pipe_write_fd,
app->sock);
if (ret < 0) {
- rcu_read_unlock();
session_unlock_list();
/*
* No apps. thread, stop the UST tracing. However, this is
goto error;
}
- rcu_read_unlock();
session_unlock_list();
}
} while (node != nullptr);
#include <common/index-allocator.hpp>
#include <common/kernel-ctl/kernel-ctl.hpp>
#include <common/shm.hpp>
+#include <common/urcu.hpp>
#include <lttng/trigger/trigger-internal.hpp>
struct ust_error_accounting_entry *entry =
lttng::utils::container_of(entry_ref, &ust_error_accounting_entry::ref);
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
cds_lfht_del(error_counter_uid_ht->ht, &entry->node.node);
call_rcu(&entry->rcu_head, free_ust_error_accounting_entry);
- rcu_read_unlock();
}
static void ust_error_accounting_entry_put(struct ust_error_accounting_entry *entry)
ASSERT_LOCKED(the_event_notifier_counter.lock);
- rcu_read_lock();
- cds_lfht_for_each_entry (error_counter_uid_ht->ht, &iter.iter, uid_entry, node.node) {
- ust_error_accounting_entry_put(uid_entry);
- }
+ {
+ lttng::urcu::read_lock_guard read_lock;
- rcu_read_unlock();
+ cds_lfht_for_each_entry (
+ error_counter_uid_ht->ht, &iter.iter, uid_entry, node.node) {
+ ust_error_accounting_entry_put(uid_entry);
+ }
+ }
}
/*
ASSERT_LOCKED(the_event_notifier_counter.lock);
- rcu_read_lock();
- cds_lfht_for_each_entry (error_counter_uid_ht->ht, &iter.iter, uid_entry, node.node) {
- ust_error_accounting_entry_get(uid_entry);
- }
+ {
+ lttng::urcu::read_lock_guard read_lock;
- rcu_read_unlock();
+ cds_lfht_for_each_entry (
+ error_counter_uid_ht->ht, &iter.iter, uid_entry, node.node) {
+ ust_error_accounting_entry_get(uid_entry);
+ }
+ }
}
#endif /* HAVE_LIBLTTNG_UST_CTL */
struct lttng_ht_iter iter;
const struct index_ht_entry *index_entry;
enum event_notifier_error_accounting_status status;
+ lttng::urcu::read_lock_guard read_guard;
- rcu_read_lock();
lttng_ht_lookup(state->indices_ht, &tracer_token, &iter);
node = lttng_ht_iter_get_node_u64(&iter);
if (node) {
status = EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_NOT_FOUND;
}
- rcu_read_unlock();
return status;
}
struct ust_error_accounting_entry *entry;
enum event_notifier_error_accounting_status status;
struct lttng_ust_abi_object_data **cpu_counters;
+ lttng::urcu::read_lock_guard read_lock;
if (!ust_app_supports_counters(app)) {
status = EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_UNSUPPORTED;
* Check if we already have a error counter for the user id of this
* app. If not, create one.
*/
- rcu_read_lock();
entry = ust_error_accounting_entry_find(error_counter_uid_ht, app);
if (entry == nullptr) {
/*
app->event_notifier_group.nr_counter_cpu = entry->nr_counter_cpu_fds;
app->event_notifier_group.counter_cpu = cpu_counters;
cpu_counters = nullptr;
- goto end_unlock;
+ goto end;
error_send_cpu_counter_data:
error_duplicate_cpu_counter:
ust_error_accounting_entry_put(entry);
error_creating_entry:
app->event_notifier_group.counter = nullptr;
-end_unlock:
- rcu_read_unlock();
end:
return status;
}
struct ust_error_accounting_entry *entry;
int i;
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
/* If an error occurred during app registration no entry was created. */
if (!app->event_notifier_group.counter) {
status = EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_OK;
end:
- rcu_read_unlock();
return status;
}
uid_t trigger_owner_uid;
const char *trigger_name;
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
get_trigger_info_for_log(trigger, &trigger_name, &trigger_owner_uid);
status = EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_OK;
end:
- rcu_read_unlock();
return status;
}
size_t dimension_index;
const uint64_t tracer_token = lttng_trigger_get_tracer_token(trigger);
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
+
status = get_error_counter_index_for_token(&ust_state, tracer_token, &error_counter_index);
if (status != EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_OK) {
uid_t trigger_owner_uid;
status = EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_OK;
end:
- rcu_read_unlock();
return status;
}
#endif /* HAVE_LIBLTTNG_UST_CTL */
enum event_notifier_error_accounting_status status;
struct error_accounting_state *state;
+ lttng::urcu::read_lock_guard read_lock;
+
status = event_notifier_error_accounting_clear(trigger);
if (status != EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_OK) {
/* Trigger details already logged by callee on error. */
goto end;
}
- rcu_read_lock();
-
switch (lttng_trigger_get_underlying_domain_type_restriction(trigger)) {
case LTTNG_DOMAIN_KERNEL:
state = &kernel_state;
LTTNG_ASSERT(!del_ret);
call_rcu(&index_entry->rcu_head, free_index_ht_entry);
}
-
end:
- rcu_read_unlock();
+ return;
}
void event_notifier_error_accounting_fini()
#include <common/error.hpp>
#include <common/filter.hpp>
#include <common/sessiond-comm/sessiond-comm.hpp>
+#include <common/urcu.hpp>
#include <lttng/condition/condition.h>
#include <lttng/condition/event-rule-matches.h>
LTTNG_ASSERT(uchan);
LTTNG_ASSERT(event);
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
uevent = trace_ust_find_event(uchan->events,
event->name,
ret = LTTNG_OK;
end:
- rcu_read_unlock();
free(filter_expression);
free(filter);
free(exclusion);
ht = uchan->events;
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
/*
* We use a custom lookup since we need the iterator for the next_duplicate
ret = LTTNG_OK;
error:
- rcu_read_unlock();
return ret;
}
LTTNG_ASSERT(usess);
LTTNG_ASSERT(uchan);
- rcu_read_lock();
/* Disabling existing events */
- cds_lfht_for_each_entry (uchan->events->ht, &iter.iter, uevent, node.node) {
- if (uevent->enabled) {
- ret = event_ust_disable_tracepoint(usess, uchan, uevent->attr.name);
- if (ret < 0) {
- error = LTTNG_ERR_UST_DISABLE_FAIL;
- continue;
+ {
+ lttng::urcu::read_lock_guard read_lock;
+
+ cds_lfht_for_each_entry (uchan->events->ht, &iter.iter, uevent, node.node) {
+ if (uevent->enabled) {
+ ret = event_ust_disable_tracepoint(usess, uchan, uevent->attr.name);
+ if (ret < 0) {
+ error = LTTNG_ERR_UST_DISABLE_FAIL;
+ continue;
+ }
}
}
}
ret = error ? error : LTTNG_OK;
error:
- rcu_read_unlock();
free(events);
return ret;
}
struct agent_event *aevent;
struct lttng_ht_iter iter;
- /* Flag every event as enabled. */
- rcu_read_lock();
- cds_lfht_for_each_entry (agt->events->ht, &iter.iter, aevent, node.node) {
- aevent->enabled_count++;
+ {
+ /* Flag every event as enabled. */
+ lttng::urcu::read_lock_guard read_lock;
+
+ cds_lfht_for_each_entry (agt->events->ht, &iter.iter, aevent, node.node) {
+ aevent->enabled_count++;
+ }
}
- rcu_read_unlock();
}
/*
LTTNG_ASSERT(event);
LTTNG_ASSERT(agt);
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
aevent = agent_find_event(
event->name, event->loglevel_type, event->loglevel, filter_expression, agt);
if (!aevent) {
agent_destroy_event(aevent);
}
end:
- rcu_read_unlock();
free(filter);
free(filter_expression);
return ret;
DBG("Event agent disabling for trigger %" PRIu64, lttng_trigger_get_tracer_token(trigger));
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
aevent = agent_find_event_by_trigger(trigger, agt);
if (aevent == nullptr) {
}
end:
- rcu_read_unlock();
return ret;
}
DBG("Event agent disabling %s (all loglevels) for session %" PRIu64, event_name, usess->id);
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
agent_find_events_by_name(event_name, agt, &iter);
node = lttng_ht_iter_get_node_str(&iter);
node = lttng_ht_iter_get_node_str(&iter);
} while (node);
end:
- rcu_read_unlock();
return ret;
}
/*
}
/* Disable every event. */
- rcu_read_lock();
- cds_lfht_for_each_entry (agt->events->ht, &iter.iter, aevent, node.node) {
- if (!AGENT_EVENT_IS_ENABLED(aevent)) {
- continue;
- }
+ {
+ lttng::urcu::read_lock_guard read_lock;
- ret = event_agent_disable(usess, agt, aevent->name);
- if (ret != LTTNG_OK) {
- goto error_unlock;
+ cds_lfht_for_each_entry (agt->events->ht, &iter.iter, aevent, node.node) {
+ if (!AGENT_EVENT_IS_ENABLED(aevent)) {
+ continue;
+ }
+
+ ret = event_agent_disable(usess, agt, aevent->name);
+ if (ret != LTTNG_OK) {
+ goto error_unlock;
+ }
}
}
+
ret = LTTNG_OK;
error_unlock:
- rcu_read_unlock();
error:
return ret;
}
#include <common/common.hpp>
#include <common/compat/string.hpp>
#include <common/defaults.hpp>
+#include <common/urcu.hpp>
#include <inttypes.h>
#include <stdio.h>
struct lttng_channel_extended *channel_attr_extended;
bool is_local_trace;
size_t consumer_path_offset = 0;
+ lttng::urcu::read_lock_guard read_lock;
/* Safety net */
LTTNG_ASSERT(channel);
}
health_code_update();
- rcu_read_lock();
session = session_find_by_id(ksession->id);
LTTNG_ASSERT(session);
ASSERT_LOCKED(session->lock);
LTTNG_DOMAIN_KERNEL,
channel->channel->attr.subbuf_size *
channel->channel->attr.num_subbuf);
- rcu_read_unlock();
if (status != LTTNG_OK) {
ret = -1;
goto error;
struct lttcomm_consumer_msg lkm;
struct consumer_output *consumer;
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
/* Safety net */
LTTNG_ASSERT(ksession);
health_code_update();
error:
- rcu_read_unlock();
return ret;
}
LTTNG_ASSERT(ksession->consumer);
LTTNG_ASSERT(sock);
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
/* Bail out if consumer is disabled */
if (!ksession->consumer->enabled) {
}
error:
- rcu_read_unlock();
return ret;
}
#include <common/sessiond-comm/sessiond-comm.hpp>
#include <common/trace-chunk.hpp>
#include <common/tracker.hpp>
+#include <common/urcu.hpp>
#include <common/utils.hpp>
#include <lttng/condition/event-rule-matches-internal.hpp>
LTTNG_ASSERT(event);
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
cds_lfht_del(kernel_token_to_event_notifier_rule_ht, &event->ht_node);
- rcu_read_unlock();
ret = kernctl_disable(event->fd);
if (ret < 0) {
struct lttng_ht_iter iter;
/* For each consumer socket. */
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
+
cds_lfht_for_each_entry (
ksess->consumer->socks->ht, &iter.iter, socket, node.node) {
struct ltt_kernel_channel *chan;
}
}
}
- rcu_read_unlock();
}
/* Close any relayd session */
saved_metadata = ksess->metadata;
saved_metadata_fd = ksess->metadata_stream_fd;
- rcu_read_lock();
-
ret = kernel_open_metadata(ksess);
if (ret < 0) {
status = LTTNG_ERR_KERN_META_FAIL;
status = LTTNG_ERR_INVALID;
goto error;
}
- /* Send metadata to consumer and snapshot everything. */
- cds_lfht_for_each_entry (output->socks->ht, &iter.iter, socket, node.node) {
- struct ltt_kernel_channel *chan;
- pthread_mutex_lock(socket->lock);
- /* This stream must not be monitored by the consumer. */
- ret = kernel_consumer_add_metadata(socket, ksess, 0);
- pthread_mutex_unlock(socket->lock);
- if (ret < 0) {
- status = LTTNG_ERR_KERN_META_FAIL;
- goto error_consumer;
- }
+ {
+ /* Send metadata to consumer and snapshot everything. */
+ lttng::urcu::read_lock_guard read_lock;
- /* For each channel, ask the consumer to snapshot it. */
- cds_list_for_each_entry (chan, &ksess->channel_list.head, list) {
+ cds_lfht_for_each_entry (output->socks->ht, &iter.iter, socket, node.node) {
+ struct ltt_kernel_channel *chan;
+
+ pthread_mutex_lock(socket->lock);
+ /* This stream must not be monitored by the consumer. */
+ ret = kernel_consumer_add_metadata(socket, ksess, 0);
+ pthread_mutex_unlock(socket->lock);
+ if (ret < 0) {
+ status = LTTNG_ERR_KERN_META_FAIL;
+ goto error_consumer;
+ }
+
+ /* For each channel, ask the consumer to snapshot it. */
+ cds_list_for_each_entry (chan, &ksess->channel_list.head, list) {
+ status =
+ consumer_snapshot_channel(socket,
+ chan->key,
+ output,
+ 0,
+ &trace_path[consumer_path_offset],
+ nb_packets_per_stream);
+ if (status != LTTNG_OK) {
+ (void) kernel_consumer_destroy_metadata(socket,
+ ksess->metadata);
+ goto error_consumer;
+ }
+ }
+
+ /* Snapshot metadata, */
status = consumer_snapshot_channel(socket,
- chan->key,
+ ksess->metadata->key,
output,
- 0,
+ 1,
&trace_path[consumer_path_offset],
- nb_packets_per_stream);
+ 0);
if (status != LTTNG_OK) {
- (void) kernel_consumer_destroy_metadata(socket, ksess->metadata);
goto error_consumer;
}
- }
- /* Snapshot metadata, */
- status = consumer_snapshot_channel(socket,
- ksess->metadata->key,
- output,
- 1,
- &trace_path[consumer_path_offset],
- 0);
- if (status != LTTNG_OK) {
- goto error_consumer;
+ /*
+ * The metadata snapshot is done, ask the consumer to destroy it since
+ * it's not monitored on the consumer side.
+ */
+ (void) kernel_consumer_destroy_metadata(socket, ksess->metadata);
}
-
- /*
- * The metadata snapshot is done, ask the consumer to destroy it since
- * it's not monitored on the consumer side.
- */
- (void) kernel_consumer_destroy_metadata(socket, ksess->metadata);
}
error_consumer:
/* Restore metadata state.*/
ksess->metadata = saved_metadata;
ksess->metadata_stream_fd = saved_metadata_fd;
- rcu_read_unlock();
free(trace_path);
return status;
}
DBG("Rotate kernel session %s started (session %" PRIu64 ")", session->name, session->id);
- rcu_read_lock();
+ {
+ /*
+ * Note that this loop will end after one iteration given that there is
+ * only one kernel consumer.
+ */
+ lttng::urcu::read_lock_guard read_lock;
- /*
- * Note that this loop will end after one iteration given that there is
- * only one kernel consumer.
- */
- cds_lfht_for_each_entry (ksess->consumer->socks->ht, &iter.iter, socket, node.node) {
- struct ltt_kernel_channel *chan;
-
- /* For each channel, ask the consumer to rotate it. */
- cds_list_for_each_entry (chan, &ksess->channel_list.head, list) {
- DBG("Rotate kernel channel %" PRIu64 ", session %s",
- chan->key,
- session->name);
+ cds_lfht_for_each_entry (
+ ksess->consumer->socks->ht, &iter.iter, socket, node.node) {
+ struct ltt_kernel_channel *chan;
+
+ /* For each channel, ask the consumer to rotate it. */
+ cds_list_for_each_entry (chan, &ksess->channel_list.head, list) {
+ DBG("Rotate kernel channel %" PRIu64 ", session %s",
+ chan->key,
+ session->name);
+ ret = consumer_rotate_channel(socket,
+ chan->key,
+ ksess->consumer,
+ /* is_metadata_channel */ false);
+ if (ret < 0) {
+ status = LTTNG_ERR_ROTATION_FAIL_CONSUMER;
+ goto error;
+ }
+ }
+
+ /*
+ * Rotate the metadata channel.
+ */
ret = consumer_rotate_channel(socket,
- chan->key,
+ ksess->metadata->key,
ksess->consumer,
- /* is_metadata_channel */ false);
+ /* is_metadata_channel */ true);
if (ret < 0) {
status = LTTNG_ERR_ROTATION_FAIL_CONSUMER;
goto error;
}
}
-
- /*
- * Rotate the metadata channel.
- */
- ret = consumer_rotate_channel(socket,
- ksess->metadata->key,
- ksess->consumer,
- /* is_metadata_channel */ true);
- if (ret < 0) {
- status = LTTNG_ERR_ROTATION_FAIL_CONSUMER;
- goto error;
- }
}
error:
- rcu_read_unlock();
return status;
}
enum lttng_error_code ret = LTTNG_OK;
enum lttng_trace_chunk_status chunk_status;
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
LTTNG_ASSERT(ksess->current_trace_chunk);
/*
goto error;
}
error:
- rcu_read_unlock();
return ret;
}
DBG("Clear kernel session %s (session %" PRIu64 ")", session->name, session->id);
- rcu_read_lock();
-
if (ksess->active) {
ERR("Expecting inactive session %s (%" PRIu64 ")", session->name, session->id);
status = LTTNG_ERR_FATAL;
goto end;
}
- /*
- * Note that this loop will end after one iteration given that there is
- * only one kernel consumer.
- */
- cds_lfht_for_each_entry (ksess->consumer->socks->ht, &iter.iter, socket, node.node) {
- struct ltt_kernel_channel *chan;
-
- /* For each channel, ask the consumer to clear it. */
- cds_list_for_each_entry (chan, &ksess->channel_list.head, list) {
- DBG("Clear kernel channel %" PRIu64 ", session %s",
- chan->key,
- session->name);
- ret = consumer_clear_channel(socket, chan->key);
- if (ret < 0) {
- goto error;
+ {
+ /*
+ * Note that this loop will end after one iteration given that there is
+ * only one kernel consumer.
+ */
+ lttng::urcu::read_lock_guard read_lock;
+
+ cds_lfht_for_each_entry (
+ ksess->consumer->socks->ht, &iter.iter, socket, node.node) {
+ struct ltt_kernel_channel *chan;
+
+ /* For each channel, ask the consumer to clear it. */
+ cds_list_for_each_entry (chan, &ksess->channel_list.head, list) {
+ DBG("Clear kernel channel %" PRIu64 ", session %s",
+ chan->key,
+ session->name);
+ ret = consumer_clear_channel(socket, chan->key);
+ if (ret < 0) {
+ goto error;
+ }
+ }
+
+ if (!ksess->metadata) {
+ /*
+ * Nothing to do for the metadata.
+ * This is a snapshot session.
+ * The metadata is genererated on the fly.
+ */
+ continue;
}
- }
- if (!ksess->metadata) {
/*
- * Nothing to do for the metadata.
- * This is a snapshot session.
- * The metadata is genererated on the fly.
+ * Clear the metadata channel.
+ * Metadata channel is not cleared per se but we still need to
+ * perform a rotation operation on it behind the scene.
*/
- continue;
- }
-
- /*
- * Clear the metadata channel.
- * Metadata channel is not cleared per se but we still need to
- * perform a rotation operation on it behind the scene.
- */
- ret = consumer_clear_channel(socket, ksess->metadata->key);
- if (ret < 0) {
- goto error;
+ ret = consumer_clear_channel(socket, ksess->metadata->key);
+ if (ret < 0) {
+ goto error;
+ }
}
}
break;
}
end:
- rcu_read_unlock();
return status;
}
}
/* Add trigger to kernel token mapping in the hash table. */
- rcu_read_lock();
- cds_lfht_add(kernel_token_to_event_notifier_rule_ht,
- hash_trigger(trigger),
- &event_notifier_rule->ht_node);
- rcu_read_unlock();
+ {
+ lttng::urcu::read_lock_guard read_lock;
+ cds_lfht_add(kernel_token_to_event_notifier_rule_ht,
+ hash_trigger(trigger),
+ &event_notifier_rule->ht_node);
+ }
DBG("Created kernel event notifier: name = '%s', fd = %d",
kernel_event_notifier.event.name,
enum lttng_error_code error_code_ret;
int ret;
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
cds_lfht_lookup(kernel_token_to_event_notifier_rule_ht,
hash_trigger(trigger),
error_code_ret = LTTNG_OK;
error:
- rcu_read_unlock();
return error_code_ret;
}
#include <common/common.hpp>
#include <common/kernel-ctl/kernel-ctl.hpp>
+#include <common/urcu.hpp>
#include <stdbool.h>
* syscall hashtable used to track duplicate between 32 and 64 bit arch.
*
* This empty the hash table and destroys it after. After this, the pointer is
- * unsuable. RCU read side lock MUST be acquired before calling this.
+ * unsuable. RCU read side lock MUST NOT be acquired before calling this.
*/
static void destroy_syscall_ht(struct lttng_ht *ht)
{
struct lttng_ht_iter iter;
struct syscall *ksyscall;
- ASSERT_RCU_READ_LOCKED();
-
DBG3("Destroying syscall hash table.");
if (!ht) {
return;
}
- cds_lfht_for_each_entry (ht->ht, &iter.iter, ksyscall, node.node) {
- int ret;
+ {
+ lttng::urcu::read_lock_guard read_lock;
+
+ cds_lfht_for_each_entry (ht->ht, &iter.iter, ksyscall, node.node) {
+ int ret;
- ret = lttng_ht_del(ht, &iter);
- LTTNG_ASSERT(!ret);
- free(ksyscall);
+ ret = lttng_ht_del(ht, &iter);
+ LTTNG_ASSERT(!ret);
+ free(ksyscall);
+ }
}
+
lttng_ht_destroy(ht);
}
/*
* Lookup a syscall in the given hash table by name.
*
+ * RCU read lock MUST be acquired by the callers of this function.
+ *
* Return syscall object if found or else NULL.
*/
static struct syscall *lookup_syscall(struct lttng_ht *ht, const char *name)
DBG("Syscall table listing.");
- rcu_read_lock();
-
/*
* Allocate at least the number of total syscall we have even if some of
* them might not be valid. The count below will make sure to return the
}
for (i = 0; i < syscall_table_nb_entry; i++) {
- struct syscall *ksyscall;
-
/* Skip empty syscalls. */
if (*syscall_table[i].name == '\0') {
continue;
}
- ksyscall = lookup_syscall(syscalls_ht, syscall_table[i].name);
- if (ksyscall) {
- update_event_syscall_bitness(events, i, ksyscall->index);
- continue;
+ {
+ lttng::urcu::read_lock_guard read_lock;
+ struct syscall *ksyscall;
+
+ ksyscall = lookup_syscall(syscalls_ht, syscall_table[i].name);
+ if (ksyscall) {
+ update_event_syscall_bitness(events, i, ksyscall->index);
+ continue;
+ }
}
ret = add_syscall_to_ht(syscalls_ht, i, index);
destroy_syscall_ht(syscalls_ht);
*_events = events;
- rcu_read_unlock();
return index;
error:
destroy_syscall_ht(syscalls_ht);
free(events);
- rcu_read_unlock();
return ret;
}
#include "utils.hpp"
#include <common/pipe.hpp>
+#include <common/urcu.hpp>
#include <common/utils.hpp>
namespace {
if (!session_get(session)) {
continue;
}
+
session_lock(session);
if (session->kernel_session == nullptr) {
session_unlock(session);
session_put(session);
continue;
}
+
ksess = session->kernel_session;
cds_list_for_each_entry (channel, &ksess->channel_list.head, list) {
goto error;
}
- rcu_read_lock();
- cds_lfht_for_each_entry (
- ksess->consumer->socks->ht, &iter.iter, socket, node.node) {
- pthread_mutex_lock(socket->lock);
- ret = kernel_consumer_send_channel_streams(
- socket, channel, ksess, session->output_traces ? 1 : 0);
- pthread_mutex_unlock(socket->lock);
- if (ret < 0) {
- rcu_read_unlock();
- goto error;
+ {
+ lttng::urcu::read_lock_guard read_lock;
+
+ cds_lfht_for_each_entry (
+ ksess->consumer->socks->ht, &iter.iter, socket, node.node) {
+ pthread_mutex_lock(socket->lock);
+ ret = kernel_consumer_send_channel_streams(
+ socket,
+ channel,
+ ksess,
+ session->output_traces ? 1 : 0);
+ pthread_mutex_unlock(socket->lock);
+ if (ret < 0) {
+ goto error;
+ }
}
}
- rcu_read_unlock();
}
+
session_unlock(session);
session_put(session);
}
#include <common/macros.hpp>
#include <common/sessiond-comm/sessiond-comm.hpp>
#include <common/unix.hpp>
+#include <common/urcu.hpp>
#include <lttng/action/action-internal.hpp>
#include <lttng/action/list-internal.hpp>
}
lttng_session_trigger_list_destroy(session_info->trigger_list);
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
cds_lfht_del(session_info->sessions_ht, &session_info->sessions_ht_node);
- rcu_read_unlock();
free(session_info->name);
lttng_trace_archive_location_put(session_info->last_state_sample.rotation.location);
call_rcu(&session_info->rcu_node, free_session_info_rcu);
static void session_info_add_channel(struct session_info *session_info,
struct channel_info *channel_info)
{
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
cds_lfht_add(session_info->channel_infos_ht,
hash_channel_key(&channel_info->key),
&channel_info->session_info_channels_ht_node);
- rcu_read_unlock();
}
static void session_info_remove_channel(struct session_info *session_info,
struct channel_info *channel_info)
{
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
cds_lfht_del(session_info->channel_infos_ht, &channel_info->session_info_channels_ht_node);
- rcu_read_unlock();
}
static struct channel_info *channel_info_create(const char *channel_name,
lttng_condition_put(list->condition);
if (list->notification_trigger_clients_ht) {
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
cds_lfht_del(list->notification_trigger_clients_ht,
&list->notification_trigger_clients_ht_node);
- rcu_read_unlock();
list->notification_trigger_clients_ht = nullptr;
}
cds_list_for_each_entry_safe (client_list_element, tmp, &list->clients_list, node) {
*/
client_list->condition = lttng_condition_copy(condition);
- /* Build a list of clients to which this new condition applies. */
- cds_lfht_for_each_entry (state->client_socket_ht, &iter, client, client_socket_ht_node) {
- struct notification_client_list_element *client_list_element;
+ {
+ /* Build a list of clients to which this new condition applies. */
+ lttng::urcu::read_lock_guard read_lock;
- if (!condition_applies_to_client(condition, client)) {
- continue;
- }
+ cds_lfht_for_each_entry (
+ state->client_socket_ht, &iter, client, client_socket_ht_node) {
+ struct notification_client_list_element *client_list_element;
- client_list_element = zmalloc<notification_client_list_element>();
- if (!client_list_element) {
- goto error_put_client_list;
- }
+ if (!condition_applies_to_client(condition, client)) {
+ continue;
+ }
+
+ client_list_element = zmalloc<notification_client_list_element>();
+ if (!client_list_element) {
+ goto error_put_client_list;
+ }
- CDS_INIT_LIST_HEAD(&client_list_element->node);
- client_list_element->client = client;
- cds_list_add(&client_list_element->node, &client_list->clients_list);
+ CDS_INIT_LIST_HEAD(&client_list_element->node);
+ client_list_element->client = client;
+ cds_list_add(&client_list_element->node, &client_list->clients_list);
+ }
}
client_list->notification_trigger_clients_ht = state->notification_trigger_clients_ht;
- rcu_read_lock();
/*
* Add the client list to the global list of client list.
*/
- cds_lfht_add_unique(state->notification_trigger_clients_ht,
- lttng_condition_hash(client_list->condition),
- match_client_list_condition,
- client_list->condition,
- &client_list->notification_trigger_clients_ht_node);
- rcu_read_unlock();
+ {
+ lttng::urcu::read_lock_guard read_lock;
+
+ cds_lfht_add_unique(state->notification_trigger_clients_ht,
+ lttng_condition_hash(client_list->condition),
+ match_client_list_condition,
+ client_list->condition,
+ &client_list->notification_trigger_clients_ht_node);
+ }
goto end;
error_put_client_list:
struct cds_lfht_iter iter;
struct notification_client_list *list = nullptr;
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
cds_lfht_lookup(state->notification_trigger_clients_ht,
lttng_condition_hash(condition),
match_client_list_condition,
list = notification_client_list_get(list) ? list : nullptr;
}
- rcu_read_unlock();
return list;
}
struct channel_state_sample *last_sample = nullptr;
struct lttng_channel_trigger_list *channel_trigger_list = nullptr;
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
/* Find the channel associated with the condition. */
cds_lfht_for_each_entry (
*session_uid = channel_info->session_info->uid;
*session_gid = channel_info->session_info->gid;
end:
- rcu_read_unlock();
return ret;
}
cds_lfht_node_init(&list->session_triggers_ht_node);
list->session_triggers_ht = session_triggers_ht;
- rcu_read_lock();
/* Publish the list through the session_triggers_ht. */
- cds_lfht_add(session_triggers_ht,
- hash_key_str(session_name, lttng_ht_seed),
- &list->session_triggers_ht_node);
- rcu_read_unlock();
+ {
+ lttng::urcu::read_lock_guard read_lock;
+ cds_lfht_add(session_triggers_ht,
+ hash_key_str(session_name, lttng_ht_seed),
+ &list->session_triggers_ht_node);
+ }
end:
return list;
}
cds_list_del(&trigger_list_element->node);
free(trigger_list_element);
}
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
/* Unpublish the list from the session_triggers_ht. */
cds_lfht_del(list->session_triggers_ht, &list->session_triggers_ht_node);
- rcu_read_unlock();
call_rcu(&list->rcu_node, free_session_trigger_list_rcu);
}
session_trigger_list =
lttng_session_trigger_list_create(session_name, state->session_triggers_ht);
- /* Add all triggers applying to the session named 'session_name'. */
- cds_lfht_for_each_entry (state->triggers_ht, &iter, trigger_ht_element, node) {
- int ret;
+ {
+ /* Add all triggers applying to the session named 'session_name'. */
+ lttng::urcu::read_lock_guard read_lock;
- if (!trigger_applies_to_session(trigger_ht_element->trigger, session_name)) {
- continue;
- }
+ cds_lfht_for_each_entry (state->triggers_ht, &iter, trigger_ht_element, node) {
+ int ret;
- ret = lttng_session_trigger_list_add(session_trigger_list,
- trigger_ht_element->trigger);
- if (ret) {
- goto error;
- }
+ if (!trigger_applies_to_session(trigger_ht_element->trigger,
+ session_name)) {
+ continue;
+ }
- trigger_count++;
+ ret = lttng_session_trigger_list_add(session_trigger_list,
+ trigger_ht_element->trigger);
+ if (ret) {
+ goto error;
+ }
+
+ trigger_count++;
+ }
}
DBG("Found %i triggers that apply to newly created session", trigger_count);
struct session_info *session = nullptr;
struct lttng_session_trigger_list *trigger_list;
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
trigger_list = lttng_session_trigger_list_build(state, name);
if (!trigger_list) {
goto error;
goto error;
}
- rcu_read_unlock();
return session;
error:
- rcu_read_unlock();
session_info_put(session);
return nullptr;
}
int trigger_count = 0;
struct cds_lfht_iter iter;
struct session_info *session_info = nullptr;
+ lttng::urcu::read_lock_guard read_lock;
DBG("Adding channel: channel name = `%s`, session id = %" PRIu64 ", channel key = %" PRIu64
", domain = %s",
goto error;
}
- rcu_read_lock();
/* Build a list of all triggers applying to the new channel. */
cds_lfht_for_each_entry (state->triggers_ht, &iter, trigger_ht_element, node) {
struct lttng_trigger_list_element *new_element;
new_element = zmalloc<lttng_trigger_list_element>();
if (!new_element) {
- rcu_read_unlock();
goto error;
}
CDS_INIT_LIST_HEAD(&new_element->node);
cds_list_add(&new_element->node, &trigger_list);
trigger_count++;
}
- rcu_read_unlock();
DBG("Found %i triggers that apply to newly added channel", trigger_count);
channel_trigger_list = zmalloc<lttng_channel_trigger_list>();
cds_lfht_node_init(&channel_trigger_list->channel_triggers_ht_node);
cds_list_splice(&trigger_list, &channel_trigger_list->list);
- rcu_read_lock();
/* Add channel to the channel_ht which owns the channel_infos. */
cds_lfht_add(state->channels_ht,
hash_channel_key(&new_channel_info->key),
cds_lfht_add(state->channel_triggers_ht,
hash_channel_key(&new_channel_info->key),
&channel_trigger_list->channel_triggers_ht_node);
- rcu_read_unlock();
session_info_put(session_info);
*cmd_result = LTTNG_OK;
return 0;
channel_key,
lttng_domain_type_str(domain));
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
cds_lfht_lookup(state->channel_triggers_ht,
hash_channel_key(&key),
cds_lfht_del(state->channels_ht, node);
channel_info_destroy(channel_info);
end:
- rcu_read_unlock();
*cmd_result = LTTNG_OK;
return 0;
}
struct lttng_credentials session_creds;
struct session_state_sample new_session_state;
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
session_info = get_session_info_by_id(state, session_id);
if (!session_info) {
session_info_put(session_info);
*_cmd_result = cmd_result;
- rcu_read_unlock();
return ret;
}
struct lttng_triggers *local_triggers = nullptr;
const struct lttng_credentials *creds;
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
local_triggers = lttng_triggers_create();
if (!local_triggers) {
local_triggers = nullptr;
end:
- rcu_read_unlock();
lttng_triggers_destroy(local_triggers);
*_cmd_result = cmd_result;
return ret;
const char *trigger_name;
uid_t trigger_owner_uid;
- rcu_read_lock();
+ {
+ lttng::urcu::read_lock_guard read_lock;
- cds_lfht_for_each_entry (state->triggers_ht, &iter, trigger_ht_element, node) {
- if (lttng_trigger_is_equal(trigger, trigger_ht_element->trigger)) {
- /* Take one reference on the return trigger. */
- *registered_trigger = trigger_ht_element->trigger;
- lttng_trigger_get(*registered_trigger);
- ret = 0;
- cmd_result = LTTNG_OK;
- goto end;
+ cds_lfht_for_each_entry (state->triggers_ht, &iter, trigger_ht_element, node) {
+ if (lttng_trigger_is_equal(trigger, trigger_ht_element->trigger)) {
+ /* Take one reference on the return trigger. */
+ *registered_trigger = trigger_ht_element->trigger;
+ lttng_trigger_get(*registered_trigger);
+ ret = 0;
+ cmd_result = LTTNG_OK;
+ goto end;
+ }
}
}
ret = 0;
end:
- rcu_read_unlock();
*_cmd_result = cmd_result;
return ret;
}
enum action_executor_status executor_status;
const uint64_t trigger_tracer_token = state->trigger_id.next_tracer_token++;
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
/* Set the trigger's tracer token. */
lttng_trigger_set_tracer_token(trigger, trigger_tracer_token);
lttng_trigger_destroy(trigger);
}
end:
- rcu_read_unlock();
return ret;
}
struct cds_lfht_iter iter;
struct notification_trigger_tokens_ht_element *trigger_tokens_ht_element;
- cds_lfht_for_each_entry (state->trigger_tokens_ht, &iter, trigger_tokens_ht_element, node) {
- if (!lttng_trigger_is_equal(trigger, trigger_tokens_ht_element->trigger)) {
- continue;
- }
+ {
+ lttng::urcu::read_lock_guard read_lock;
+
+ cds_lfht_for_each_entry (
+ state->trigger_tokens_ht, &iter, trigger_tokens_ht_element, node) {
+ if (!lttng_trigger_is_equal(trigger, trigger_tokens_ht_element->trigger)) {
+ continue;
+ }
- event_notifier_error_accounting_unregister_event_notifier(
- trigger_tokens_ht_element->trigger);
+ event_notifier_error_accounting_unregister_event_notifier(
+ trigger_tokens_ht_element->trigger);
- /* TODO talk to all app and remove it */
- DBG("Removed trigger from tokens_ht");
- cds_lfht_del(state->trigger_tokens_ht, &trigger_tokens_ht_element->node);
+ /* TODO talk to all app and remove it */
+ DBG("Removed trigger from tokens_ht");
+ cds_lfht_del(state->trigger_tokens_ht, &trigger_tokens_ht_element->node);
- call_rcu(&trigger_tokens_ht_element->rcu_node,
- free_notification_trigger_tokens_ht_element_rcu);
+ call_rcu(&trigger_tokens_ht_element->rcu_node,
+ free_notification_trigger_tokens_ht_element_rcu);
- break;
+ break;
+ }
}
}
const struct lttng_condition *condition = lttng_trigger_get_const_condition(trigger);
enum lttng_error_code cmd_reply;
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
cds_lfht_lookup(
state->triggers_ht, lttng_condition_hash(condition), match_trigger, trigger, &iter);
lttng_trigger_destroy(trigger_ht_element->trigger);
call_rcu(&trigger_ht_element->rcu_node, free_lttng_trigger_ht_element_rcu);
end:
- rcu_read_unlock();
if (_cmd_reply) {
*_cmd_reply = cmd_reply;
}
cmd->parameters.client_communication_update.id;
struct notification_client *client;
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
client = get_client_from_id(client_id, state);
if (!client) {
} else {
ret = client_handle_transmission_status(client, client_status, state);
}
- rcu_read_unlock();
break;
}
default:
}
DBG("Added new notification channel client socket (%i) to poll set", client->socket);
- rcu_read_lock();
- cds_lfht_add(state->client_socket_ht,
- hash_client_socket(client->socket),
- &client->client_socket_ht_node);
- cds_lfht_add(state->client_id_ht, hash_client_id(client->id), &client->client_id_ht_node);
- rcu_read_unlock();
+ {
+ lttng::urcu::read_lock_guard read_lock;
+
+ cds_lfht_add(state->client_socket_ht,
+ hash_client_socket(client->socket),
+ &client->client_socket_ht_node);
+ cds_lfht_add(state->client_id_ht,
+ hash_client_id(client->id),
+ &client->client_id_ht_node);
+ }
return ret;
int ret = 0;
struct notification_client *client;
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
+
DBG("Closing client connection (socket fd = %i)", client_socket);
client = get_client_from_socket(client_socket, state);
if (!client) {
ret = notification_thread_client_disconnect(client, state);
end:
- rcu_read_unlock();
return ret;
}
struct notification_client *client;
bool error_encoutered = false;
- rcu_read_lock();
DBG("Closing all client connections");
- cds_lfht_for_each_entry (state->client_socket_ht, &iter, client, client_socket_ht_node) {
- int ret;
- ret = notification_thread_client_disconnect(client, state);
- if (ret) {
- error_encoutered = true;
+ {
+ lttng::urcu::read_lock_guard read_lock;
+
+ cds_lfht_for_each_entry (
+ state->client_socket_ht, &iter, client, client_socket_ht_node) {
+ int ret;
+
+ ret = notification_thread_client_disconnect(client, state);
+ if (ret) {
+ error_encoutered = true;
+ }
}
}
- rcu_read_unlock();
+
return error_encoutered ? 1 : 0;
}
struct cds_lfht_iter iter;
struct lttng_trigger_ht_element *trigger_ht_element;
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
cds_lfht_for_each_entry (state->triggers_ht, &iter, trigger_ht_element, node) {
int ret = handle_notification_thread_command_unregister_trigger(
state, trigger_ht_element->trigger, nullptr);
error_occurred = true;
}
}
- rcu_read_unlock();
return error_occurred ? -1 : 0;
}
ssize_t recv_ret;
size_t offset;
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
client = get_client_from_socket(socket, state);
if (!client) {
/* Internal error, abort. */
}
end:
- rcu_read_unlock();
return ret;
error_disconnect_client:
struct notification_client *client;
enum client_transmission_status transmission_status;
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
client = get_client_from_socket(socket, state);
if (!client) {
/* Internal error, abort. */
goto end;
}
end:
- rcu_read_unlock();
return ret;
}
unsigned int capture_count = 0;
/* Find triggers associated with this token. */
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
cds_lfht_lookup(state->trigger_tokens_ht,
hash_key_u64(¬ification->tracer_token, lttng_ht_seed),
match_trigger_token,
end_unlock:
notification_client_list_put(client_list);
- rcu_read_unlock();
end:
return ret;
}
struct lttng_credentials channel_creds = {};
struct lttng_credentials session_creds = {};
struct session_info *session;
+ lttng::urcu::read_lock_guard read_lock;
/*
* The monitoring pipe only holds messages smaller than PIPE_BUF,
channel_new_sample.highest_usage = sample_msg.highest;
channel_new_sample.lowest_usage = sample_msg.lowest;
- rcu_read_lock();
-
session = get_session_info_by_id(state, sample_msg.session_id);
if (!session) {
DBG("Received a sample for an unknown session from consumerd: session id = %" PRIu64,
session->last_state_sample = session_new_sample;
}
session_info_put(session);
- rcu_read_unlock();
end:
return ret;
}
#include <common/hashtable/utils.hpp>
#include <common/kernel-ctl/kernel-ctl.hpp>
#include <common/time.hpp>
+#include <common/urcu.hpp>
#include <common/utils.hpp>
#include <lttng/condition/condition-internal.hpp>
uint64_t relayd_id;
bool chunk_exists_on_peer = false;
enum lttng_trace_chunk_status chunk_status;
+ lttng::urcu::read_lock_guard read_lock;
LTTNG_ASSERT(session->chunk_being_archived);
* Check for a local pending rotation on all consumers (32-bit
* user space, 64-bit user space, and kernel).
*/
- rcu_read_lock();
if (!session->ust_session) {
goto skip_ust;
}
+
cds_lfht_for_each_entry (
session->ust_session->consumer->socks->ht, &iter, socket, node.node) {
relayd_id = session->ust_session->consumer->type == CONSUMER_DST_LOCAL ?
}
skip_kernel:
end:
- rcu_read_unlock();
if (!chunk_exists_on_peer) {
uint64_t chunk_being_archived_id;
#include <common/defaults.hpp>
#include <common/error.hpp>
#include <common/runas.hpp>
+#include <common/urcu.hpp>
#include <common/utils.hpp>
#include <lttng/save-internal.hpp>
goto end;
}
- rcu_read_lock();
- cds_lfht_for_each_entry (events->ht, &iter.iter, node, node) {
- event = lttng::utils::container_of(node, <t_ust_event::node);
+ {
+ lttng::urcu::read_lock_guard read_lock;
- if (event->internal) {
- /* Internal events must not be exposed to clients */
- continue;
- }
- ret = save_ust_event(writer, event);
- if (ret != LTTNG_OK) {
- rcu_read_unlock();
- goto end;
+ cds_lfht_for_each_entry (events->ht, &iter.iter, node, node) {
+ event = lttng::utils::container_of(node, <t_ust_event::node);
+
+ if (event->internal) {
+ /* Internal events must not be exposed to clients */
+ continue;
+ }
+ ret = save_ust_event(writer, event);
+ if (ret != LTTNG_OK) {
+ goto end;
+ }
}
}
- rcu_read_unlock();
/* /events */
ret = config_writer_close_element(writer);
goto end;
}
- rcu_read_lock();
- cds_lfht_for_each_entry (agent->events->ht, &iter.iter, node, node) {
- struct agent_event *agent_event;
- struct ltt_ust_event fake_event;
-
- memset(&fake_event, 0, sizeof(fake_event));
- agent_event = lttng::utils::container_of(node, &agent_event::node);
-
- /*
- * Initialize a fake ust event to reuse the same serialization
- * function since UST and agent events contain the same info
- * (and one could wonder why they don't reuse the same
- * structures...).
- */
- ret = init_ust_event_from_agent_event(&fake_event, agent_event);
- if (ret != LTTNG_OK) {
- rcu_read_unlock();
- goto end;
- }
- ret = save_ust_event(writer, &fake_event);
- if (ret != LTTNG_OK) {
- rcu_read_unlock();
- goto end;
+ {
+ lttng::urcu::read_lock_guard read_lock;
+
+ cds_lfht_for_each_entry (agent->events->ht, &iter.iter, node, node) {
+ struct agent_event *agent_event;
+ struct ltt_ust_event fake_event;
+
+ memset(&fake_event, 0, sizeof(fake_event));
+ agent_event = lttng::utils::container_of(node, &agent_event::node);
+
+ /*
+ * Initialize a fake ust event to reuse the same serialization
+ * function since UST and agent events contain the same info
+ * (and one could wonder why they don't reuse the same
+ * structures...).
+ */
+ ret = init_ust_event_from_agent_event(&fake_event, agent_event);
+ if (ret != LTTNG_OK) {
+ goto end;
+ }
+ ret = save_ust_event(writer, &fake_event);
+ if (ret != LTTNG_OK) {
+ goto end;
+ }
}
}
- rcu_read_unlock();
/* /events */
ret = config_writer_close_element(writer);
goto end;
}
- rcu_read_lock();
- cds_lfht_for_each_entry (
- session->ust_session->domain_global.channels->ht, &iter.iter, node, node) {
- ust_chan = lttng::utils::container_of(node, <t_ust_channel::node);
- if (domain == ust_chan->domain) {
- ret = save_ust_channel(writer, ust_chan, session->ust_session);
- if (ret != LTTNG_OK) {
- rcu_read_unlock();
- goto end;
+ {
+ lttng::urcu::read_lock_guard read_lock;
+
+ cds_lfht_for_each_entry (
+ session->ust_session->domain_global.channels->ht, &iter.iter, node, node) {
+ ust_chan = lttng::utils::container_of(node, <t_ust_channel::node);
+ if (domain == ust_chan->domain) {
+ ret = save_ust_channel(writer, ust_chan, session->ust_session);
+ if (ret != LTTNG_OK) {
+ goto end;
+ }
}
}
}
- rcu_read_unlock();
/* /channels */
ret = config_writer_close_element(writer);
goto end;
}
- rcu_read_lock();
- cds_lfht_for_each_entry (snapshot->output_ht->ht, &iter.iter, output, node.node) {
- ret = config_writer_open_element(writer, config_element_output);
- if (ret) {
- ret = LTTNG_ERR_SAVE_IO_FAIL;
- goto end_unlock;
- }
+ {
+ lttng::urcu::read_lock_guard read_lock;
- ret = config_writer_write_element_string(writer, config_element_name, output->name);
- if (ret) {
- ret = LTTNG_ERR_SAVE_IO_FAIL;
- goto end_unlock;
- }
+ cds_lfht_for_each_entry (snapshot->output_ht->ht, &iter.iter, output, node.node) {
+ ret = config_writer_open_element(writer, config_element_output);
+ if (ret) {
+ ret = LTTNG_ERR_SAVE_IO_FAIL;
+ goto end_unlock;
+ }
- ret = config_writer_write_element_unsigned_int(
- writer, config_element_max_size, output->max_size);
- if (ret) {
- ret = LTTNG_ERR_SAVE_IO_FAIL;
- goto end_unlock;
- }
+ ret = config_writer_write_element_string(
+ writer, config_element_name, output->name);
+ if (ret) {
+ ret = LTTNG_ERR_SAVE_IO_FAIL;
+ goto end_unlock;
+ }
- ret = save_consumer_output(writer, output->consumer);
- if (ret != LTTNG_OK) {
- goto end_unlock;
- }
+ ret = config_writer_write_element_unsigned_int(
+ writer, config_element_max_size, output->max_size);
+ if (ret) {
+ ret = LTTNG_ERR_SAVE_IO_FAIL;
+ goto end_unlock;
+ }
- /* /output */
- ret = config_writer_close_element(writer);
- if (ret) {
- ret = LTTNG_ERR_SAVE_IO_FAIL;
- goto end_unlock;
+ ret = save_consumer_output(writer, output->consumer);
+ if (ret != LTTNG_OK) {
+ goto end_unlock;
+ }
+
+ /* /output */
+ ret = config_writer_close_element(writer);
+ if (ret) {
+ ret = LTTNG_ERR_SAVE_IO_FAIL;
+ goto end_unlock;
+ }
}
}
- rcu_read_unlock();
/* /snapshot_outputs */
ret = config_writer_close_element(writer);
end:
return ret;
end_unlock:
- rcu_read_unlock();
return ret;
}
uint64_t chunk_id;
enum lttng_trace_chunk_status chunk_status;
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
/*
* Ownership of current trace chunk is transferred to
* `current_trace_chunk`.
current_trace_chunk = nullptr;
}
end_no_move:
- rcu_read_unlock();
lttng_trace_chunk_put(current_trace_chunk);
return ret;
error:
struct cds_lfht_node *node;
struct ltt_kernel_channel *chan;
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
cds_lfht_first(session->kernel_session->consumer->socks->ht, &iter.iter);
node = cds_lfht_iter_get_node(&iter.iter);
}
end:
- rcu_read_unlock();
return ret;
}
struct lttng_ht_iter iter;
struct ltt_session *ls;
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
if (!ltt_sessions_ht_by_name) {
found = false;
DBG3("Session id `%" PRIu64 "` sampled for session `%s", *id, name);
end:
- rcu_read_unlock();
return found;
}
#include "utils.hpp"
#include <common/defaults.hpp>
+#include <common/urcu.hpp>
#include <inttypes.h>
#include <string.h>
LTTNG_ASSERT(output);
iter.iter.node = &output->node.node;
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
ret = lttng_ht_del(snapshot->output_ht, &iter);
- rcu_read_unlock();
LTTNG_ASSERT(!ret);
/*
* This is safe because the ownership of a snapshot object is in a session
LTTNG_ASSERT(snapshot->output_ht);
LTTNG_ASSERT(output);
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
lttng_ht_add_unique_ulong(snapshot->output_ht, &output->node);
- rcu_read_unlock();
/*
* This is safe because the ownership of a snapshot object is in a session
* for which the session lock need to be acquired to read and modify it.
return;
}
- rcu_read_lock();
- cds_lfht_for_each_entry (obj->output_ht->ht, &iter.iter, output, node.node) {
- snapshot_delete_output(obj, output);
- snapshot_output_destroy(output);
+ {
+ lttng::urcu::read_lock_guard read_lock;
+
+ cds_lfht_for_each_entry (obj->output_ht->ht, &iter.iter, output, node.node) {
+ snapshot_delete_output(obj, output);
+ snapshot_output_destroy(output);
+ }
}
- rcu_read_unlock();
+
lttng_ht_destroy(obj->output_ht);
}
#include <common/common.hpp>
#include <common/defaults.hpp>
#include <common/trace-chunk.hpp>
+#include <common/urcu.hpp>
#include <common/utils.hpp>
#include <inttypes.h>
if (!id_tracker->ht) {
return;
}
- rcu_read_lock();
- cds_lfht_for_each_entry (id_tracker->ht->ht, &iter.iter, tracker_node, node.node) {
- int ret = lttng_ht_del(id_tracker->ht, &iter);
- LTTNG_ASSERT(!ret);
- destroy_id_tracker_node(tracker_node);
+ {
+ lttng::urcu::read_lock_guard read_lock;
+
+ cds_lfht_for_each_entry (id_tracker->ht->ht, &iter.iter, tracker_node, node.node) {
+ int ret = lttng_ht_del(id_tracker->ht, &iter);
+
+ LTTNG_ASSERT(!ret);
+ destroy_id_tracker_node(tracker_node);
+ }
}
- rcu_read_unlock();
+
lttng_ht_destroy(id_tracker->ht);
id_tracker->ht = nullptr;
}
LTTNG_ASSERT(ht);
- rcu_read_lock();
- cds_lfht_for_each_entry (ht->ht, &iter.iter, node, node) {
- /* Remove from ordered list. */
- ctx = lttng::utils::container_of(node, <t_ust_context::node);
- cds_list_del(&ctx->list);
- /* Remove from channel's hash table. */
- ret = lttng_ht_del(ht, &iter);
- if (!ret) {
- call_rcu(&node->head, destroy_context_rcu);
+ {
+ lttng::urcu::read_lock_guard read_lock;
+
+ cds_lfht_for_each_entry (ht->ht, &iter.iter, node, node) {
+ /* Remove from ordered list. */
+ ctx = lttng::utils::container_of(node, <t_ust_context::node);
+ cds_list_del(&ctx->list);
+ /* Remove from channel's hash table. */
+ ret = lttng_ht_del(ht, &iter);
+ if (!ret) {
+ call_rcu(&node->head, destroy_context_rcu);
+ }
}
}
- rcu_read_unlock();
lttng_ht_destroy(ht);
}
LTTNG_ASSERT(events);
- rcu_read_lock();
- cds_lfht_for_each_entry (events->ht, &iter.iter, node, node) {
- ret = lttng_ht_del(events, &iter);
- LTTNG_ASSERT(!ret);
- call_rcu(&node->head, destroy_event_rcu);
+ {
+ lttng::urcu::read_lock_guard read_lock;
+
+ cds_lfht_for_each_entry (events->ht, &iter.iter, node, node) {
+ ret = lttng_ht_del(events, &iter);
+ LTTNG_ASSERT(!ret);
+ call_rcu(&node->head, destroy_event_rcu);
+ }
}
- rcu_read_unlock();
lttng_ht_destroy(events);
}
struct buffer_reg_uid *uid_reg = nullptr;
struct buffer_reg_session *session_reg = nullptr;
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
cds_list_for_each_entry (uid_reg, &usess->buffer_reg_uid_list, lnode) {
lsu::registry_session *registry;
}
end:
- rcu_read_unlock();
return ret;
}
LTTNG_ASSERT(channels);
- rcu_read_lock();
- cds_lfht_for_each_entry (channels->ht, &iter.iter, node, node) {
- struct ltt_ust_channel *chan =
- lttng::utils::container_of(node, <t_ust_channel::node);
+ {
+ lttng::urcu::read_lock_guard read_lock;
+
+ cds_lfht_for_each_entry (channels->ht, &iter.iter, node, node) {
+ struct ltt_ust_channel *chan =
+ lttng::utils::container_of(node, <t_ust_channel::node);
- trace_ust_delete_channel(channels, chan);
- trace_ust_destroy_channel(chan);
+ trace_ust_delete_channel(channels, chan);
+ trace_ust_destroy_channel(chan);
+ }
}
- rcu_read_unlock();
lttng_ht_destroy(channels);
}
/* Cleaning up UST domain */
destroy_domain_global(&session->domain_global);
- rcu_read_lock();
- cds_lfht_for_each_entry (session->agents->ht, &iter.iter, agt, node.node) {
- int ret = lttng_ht_del(session->agents, &iter);
+ {
+ lttng::urcu::read_lock_guard read_lock;
+
+ cds_lfht_for_each_entry (session->agents->ht, &iter.iter, agt, node.node) {
+ int ret = lttng_ht_del(session->agents, &iter);
- LTTNG_ASSERT(!ret);
- agent_destroy(agt);
+ LTTNG_ASSERT(!ret);
+ agent_destroy(agt);
+ }
}
- rcu_read_unlock();
lttng_ht_destroy(session->agents);
#include <common/hashtable/hashtable.hpp>
#include <common/hashtable/utils.hpp>
#include <common/tracker.hpp>
+#include <common/urcu.hpp>
#include <lttng/lttng-error.h>
return;
}
- rcu_read_lock();
- cds_lfht_for_each_entry (
- tracker->inclusion_set_ht, &iter.iter, value_node, inclusion_set_ht_node) {
- process_attr_tracker_remove_value_node(tracker, value_node);
+ {
+ lttng::urcu::read_lock_guard read_lock;
+
+ cds_lfht_for_each_entry (
+ tracker->inclusion_set_ht, &iter.iter, value_node, inclusion_set_ht_node) {
+ process_attr_tracker_remove_value_node(tracker, value_node);
+ }
}
- rcu_read_unlock();
+
ret = cds_lfht_destroy(tracker->inclusion_set_ht, nullptr);
LTTNG_ASSERT(ret == 0);
tracker->inclusion_set_ht = nullptr;
LTTNG_ASSERT(tracker->policy == LTTNG_TRACKING_POLICY_INCLUDE_SET);
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
cds_lfht_lookup(tracker->inclusion_set_ht,
process_attr_value_hash(value),
match_inclusion_set_value,
value,
&iter);
node = cds_lfht_iter_get_node(&iter);
- rcu_read_unlock();
return node ? lttng::utils::container_of(
node, &process_attr_tracker_value_node::inclusion_set_ht_node) :
struct process_attr_value *value_copy = nullptr;
struct process_attr_tracker_value_node *value_node = nullptr;
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
if (tracker->policy != LTTNG_TRACKING_POLICY_INCLUDE_SET) {
status = PROCESS_ATTR_TRACKER_STATUS_INVALID_TRACKING_POLICY;
goto end;
if (value_node) {
free(value_node);
}
- rcu_read_unlock();
return status;
}
struct process_attr_tracker_value_node *value_node;
enum process_attr_tracker_status status = PROCESS_ATTR_TRACKER_STATUS_OK;
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
if (tracker->policy != LTTNG_TRACKING_POLICY_INCLUDE_SET) {
status = PROCESS_ATTR_TRACKER_STATUS_INVALID_TRACKING_POLICY;
goto end;
process_attr_tracker_remove_value_node(tracker, value_node);
end:
- rcu_read_unlock();
return status;
}
goto error;
}
- rcu_read_lock();
- cds_lfht_for_each_entry (
- tracker->inclusion_set_ht, &iter.iter, value_node, inclusion_set_ht_node) {
- int ret;
+ {
+ lttng::urcu::read_lock_guard read_lock;
- new_value = process_attr_value_copy(value_node->value);
- if (!new_value) {
- status = PROCESS_ATTR_TRACKER_STATUS_ERROR;
- goto error_unlock;
- }
+ cds_lfht_for_each_entry (
+ tracker->inclusion_set_ht, &iter.iter, value_node, inclusion_set_ht_node) {
+ int ret;
- ret = lttng_dynamic_pointer_array_add_pointer(&values->array, new_value);
- if (ret) {
- status = PROCESS_ATTR_TRACKER_STATUS_ERROR;
- goto error_unlock;
- }
+ new_value = process_attr_value_copy(value_node->value);
+ if (!new_value) {
+ status = PROCESS_ATTR_TRACKER_STATUS_ERROR;
+ goto error_unlock;
+ }
+
+ ret = lttng_dynamic_pointer_array_add_pointer(&values->array, new_value);
+ if (ret) {
+ status = PROCESS_ATTR_TRACKER_STATUS_ERROR;
+ goto error_unlock;
+ }
- new_value = nullptr;
+ new_value = nullptr;
+ }
}
- rcu_read_unlock();
+
*_values = values;
return status;
error_unlock:
- rcu_read_unlock();
error:
lttng_process_attr_values_destroy(values);
process_attr_value_destroy(new_value);
return;
}
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
session = session_find_by_id(ua_chan->session->tracing_id);
if (!session || !session->ust_session) {
/*
uchan->per_pid_closed_app_lost += lost;
end:
- rcu_read_unlock();
if (session) {
session_put(session);
}
/* Wipe sessions */
cds_list_for_each_entry_safe (ua_sess, tmp_ua_sess, &app->teardown_head, teardown_node) {
/* Free every object in the session and the session. */
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
delete_ust_app_session(sock, ua_sess, app);
- rcu_read_unlock();
}
/* Remove the event notifier rules associated with this app. */
- rcu_read_lock();
- cds_lfht_for_each_entry (app->token_to_event_notifier_rule_ht->ht,
- &iter.iter,
- event_notifier_rule,
- node.node) {
- ret = lttng_ht_del(app->token_to_event_notifier_rule_ht, &iter);
- LTTNG_ASSERT(!ret);
+ {
+ lttng::urcu::read_lock_guard read_lock;
- delete_ust_app_event_notifier_rule(app->sock, event_notifier_rule, app);
- }
+ cds_lfht_for_each_entry (app->token_to_event_notifier_rule_ht->ht,
+ &iter.iter,
+ event_notifier_rule,
+ node.node) {
+ ret = lttng_ht_del(app->token_to_event_notifier_rule_ht, &iter);
+ LTTNG_ASSERT(!ret);
- rcu_read_unlock();
+ delete_ust_app_event_notifier_rule(app->sock, event_notifier_rule, app);
+ }
+ }
lttng_ht_destroy(app->sessions);
lttng_ht_destroy(app->ust_sessions_objd);
LTTNG_ASSERT(ua_sess);
LTTNG_ASSERT(app);
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
reg_pid = buffer_reg_pid_find(ua_sess->id);
if (!reg_pid) {
*regp = reg_pid;
}
error:
- rcu_read_unlock();
return ret;
}
LTTNG_ASSERT(usess);
LTTNG_ASSERT(app);
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
reg_uid = buffer_reg_uid_find(usess->id, app->abi.bits_per_long, app->uid);
if (!reg_uid) {
*regp = reg_uid;
}
error:
- rcu_read_unlock();
return ret;
}
LTTNG_ASSERT(ua_chan);
LTTNG_ASSERT(registry);
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
health_code_update();
/* Get the right consumer socket for the application. */
}
}
- rcu_read_unlock();
return 0;
error_destroy:
lttng_fd_put(LTTNG_FD_APPS, 1);
error:
health_code_update();
- rcu_read_unlock();
return ret;
}
DBG("UST app creating channel %s with per PID buffers", ua_chan->name);
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
registry = get_session_registry(ua_sess);
/* The UST app session lock is held, registry shall not be null. */
}
}
error:
- rcu_read_unlock();
if (session) {
session_put(session);
}
app->registration_time = time(nullptr);
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
/*
* On a re-registration, we want to kick out the previous registration of
app->notify_sock,
app->v_major,
app->v_minor);
-
- rcu_read_unlock();
}
/*
struct ust_app_session *ua_sess;
int ret;
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
/* Get the node reference for a call_rcu */
lttng_ht_lookup(ust_app_ht_by_sock, (void *) ((unsigned long) sock), &ust_app_sock_iter);
/* Free memory */
call_rcu(<a->pid_n.head, delete_ust_app_rcu);
- rcu_read_unlock();
return;
}
goto error;
}
- rcu_read_lock();
+ {
+ lttng::urcu::read_lock_guard read_lock;
- cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
- struct lttng_ust_abi_tracepoint_iter uiter;
+ cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
+ struct lttng_ust_abi_tracepoint_iter uiter;
- health_code_update();
+ health_code_update();
- if (!app->compatible) {
- /*
- * TODO: In time, we should notice the caller of this error by
- * telling him that this is a version error.
- */
- continue;
- }
- pthread_mutex_lock(&app->sock_lock);
- handle = lttng_ust_ctl_tracepoint_list(app->sock);
- if (handle < 0) {
- if (handle != -EPIPE && handle != -LTTNG_UST_ERR_EXITING) {
- ERR("UST app list events getting handle failed for app pid %d",
- app->pid);
+ if (!app->compatible) {
+ /*
+ * TODO: In time, we should notice the caller of this error by
+ * telling him that this is a version error.
+ */
+ continue;
}
- pthread_mutex_unlock(&app->sock_lock);
- continue;
- }
- while ((ret = lttng_ust_ctl_tracepoint_list_get(app->sock, handle, &uiter)) !=
- -LTTNG_UST_ERR_NOENT) {
- /* Handle ustctl error. */
- if (ret < 0) {
- int release_ret;
-
- if (ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
- ERR("UST app tp list get failed for app %d with ret %d",
- app->sock,
- ret);
- } else {
- DBG3("UST app tp list get failed. Application is dead");
- break;
- }
- free(tmp_event);
- release_ret = lttng_ust_ctl_release_handle(app->sock, handle);
- if (release_ret < 0 && release_ret != -LTTNG_UST_ERR_EXITING &&
- release_ret != -EPIPE) {
- ERR("Error releasing app handle for app %d with ret %d",
- app->sock,
- release_ret);
+ pthread_mutex_lock(&app->sock_lock);
+ handle = lttng_ust_ctl_tracepoint_list(app->sock);
+ if (handle < 0) {
+ if (handle != -EPIPE && handle != -LTTNG_UST_ERR_EXITING) {
+ ERR("UST app list events getting handle failed for app pid %d",
+ app->pid);
}
pthread_mutex_unlock(&app->sock_lock);
- goto rcu_error;
+ continue;
}
- health_code_update();
- if (count >= nbmem) {
- /* In case the realloc fails, we free the memory */
- struct lttng_event *new_tmp_event;
- size_t new_nbmem;
-
- new_nbmem = nbmem << 1;
- DBG2("Reallocating event list from %zu to %zu entries",
- nbmem,
- new_nbmem);
- new_tmp_event = (lttng_event *) realloc(
- tmp_event, new_nbmem * sizeof(struct lttng_event));
- if (new_tmp_event == nullptr) {
+ while ((ret = lttng_ust_ctl_tracepoint_list_get(
+ app->sock, handle, &uiter)) != -LTTNG_UST_ERR_NOENT) {
+ /* Handle ustctl error. */
+ if (ret < 0) {
int release_ret;
- PERROR("realloc ust app events");
+ if (ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
+ ERR("UST app tp list get failed for app %d with ret %d",
+ app->sock,
+ ret);
+ } else {
+ DBG3("UST app tp list get failed. Application is dead");
+ break;
+ }
+
free(tmp_event);
- ret = -ENOMEM;
release_ret =
lttng_ust_ctl_release_handle(app->sock, handle);
if (release_ret < 0 &&
app->sock,
release_ret);
}
+
pthread_mutex_unlock(&app->sock_lock);
goto rcu_error;
}
- /* Zero the new memory */
- memset(new_tmp_event + nbmem,
- 0,
- (new_nbmem - nbmem) * sizeof(struct lttng_event));
- nbmem = new_nbmem;
- tmp_event = new_tmp_event;
+
+ health_code_update();
+ if (count >= nbmem) {
+ /* In case the realloc fails, we free the memory */
+ struct lttng_event *new_tmp_event;
+ size_t new_nbmem;
+
+ new_nbmem = nbmem << 1;
+ DBG2("Reallocating event list from %zu to %zu entries",
+ nbmem,
+ new_nbmem);
+ new_tmp_event = (lttng_event *) realloc(
+ tmp_event, new_nbmem * sizeof(struct lttng_event));
+ if (new_tmp_event == nullptr) {
+ int release_ret;
+
+ PERROR("realloc ust app events");
+ free(tmp_event);
+ ret = -ENOMEM;
+ release_ret = lttng_ust_ctl_release_handle(
+ app->sock, handle);
+ if (release_ret < 0 &&
+ release_ret != -LTTNG_UST_ERR_EXITING &&
+ release_ret != -EPIPE) {
+ ERR("Error releasing app handle for app %d with ret %d",
+ app->sock,
+ release_ret);
+ }
+
+ pthread_mutex_unlock(&app->sock_lock);
+ goto rcu_error;
+ }
+ /* Zero the new memory */
+ memset(new_tmp_event + nbmem,
+ 0,
+ (new_nbmem - nbmem) * sizeof(struct lttng_event));
+ nbmem = new_nbmem;
+ tmp_event = new_tmp_event;
+ }
+
+ memcpy(tmp_event[count].name,
+ uiter.name,
+ LTTNG_UST_ABI_SYM_NAME_LEN);
+ tmp_event[count].loglevel = uiter.loglevel;
+ tmp_event[count].type =
+ (enum lttng_event_type) LTTNG_UST_ABI_TRACEPOINT;
+ tmp_event[count].pid = app->pid;
+ tmp_event[count].enabled = -1;
+ count++;
}
- memcpy(tmp_event[count].name, uiter.name, LTTNG_UST_ABI_SYM_NAME_LEN);
- tmp_event[count].loglevel = uiter.loglevel;
- tmp_event[count].type = (enum lttng_event_type) LTTNG_UST_ABI_TRACEPOINT;
- tmp_event[count].pid = app->pid;
- tmp_event[count].enabled = -1;
- count++;
- }
- ret = lttng_ust_ctl_release_handle(app->sock, handle);
- pthread_mutex_unlock(&app->sock_lock);
- if (ret < 0) {
- if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
- DBG3("Error releasing app handle. Application died: pid = %d, sock = %d",
- app->pid,
- app->sock);
- } else if (ret == -EAGAIN) {
- WARN("Error releasing app handle. Communication time out: pid = %d, sock = %d",
- app->pid,
- app->sock);
- } else {
- ERR("Error releasing app handle with ret %d: pid = %d, sock = %d",
- ret,
- app->pid,
- app->sock);
+
+ ret = lttng_ust_ctl_release_handle(app->sock, handle);
+ pthread_mutex_unlock(&app->sock_lock);
+ if (ret < 0) {
+ if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
+ DBG3("Error releasing app handle. Application died: pid = %d, sock = %d",
+ app->pid,
+ app->sock);
+ } else if (ret == -EAGAIN) {
+ WARN("Error releasing app handle. Communication time out: pid = %d, sock = %d",
+ app->pid,
+ app->sock);
+ } else {
+ ERR("Error releasing app handle with ret %d: pid = %d, sock = %d",
+ ret,
+ app->pid,
+ app->sock);
+ }
}
}
}
DBG2("UST app list events done (%zu events)", count);
rcu_error:
- rcu_read_unlock();
error:
health_code_update();
return ret;
goto error;
}
- rcu_read_lock();
+ {
+ lttng::urcu::read_lock_guard read_lock;
- cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
- struct lttng_ust_abi_field_iter uiter;
+ cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
+ struct lttng_ust_abi_field_iter uiter;
- health_code_update();
+ health_code_update();
- if (!app->compatible) {
- /*
- * TODO: In time, we should notice the caller of this error by
- * telling him that this is a version error.
- */
- continue;
- }
- pthread_mutex_lock(&app->sock_lock);
- handle = lttng_ust_ctl_tracepoint_field_list(app->sock);
- if (handle < 0) {
- if (handle != -EPIPE && handle != -LTTNG_UST_ERR_EXITING) {
- ERR("UST app list field getting handle failed for app pid %d",
- app->pid);
+ if (!app->compatible) {
+ /*
+ * TODO: In time, we should notice the caller of this error by
+ * telling him that this is a version error.
+ */
+ continue;
}
- pthread_mutex_unlock(&app->sock_lock);
- continue;
- }
-
- while ((ret = lttng_ust_ctl_tracepoint_field_list_get(app->sock, handle, &uiter)) !=
- -LTTNG_UST_ERR_NOENT) {
- /* Handle ustctl error. */
- if (ret < 0) {
- int release_ret;
- if (ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
- ERR("UST app tp list field failed for app %d with ret %d",
- app->sock,
- ret);
- } else {
- DBG3("UST app tp list field failed. Application is dead");
- break;
+ pthread_mutex_lock(&app->sock_lock);
+ handle = lttng_ust_ctl_tracepoint_field_list(app->sock);
+ if (handle < 0) {
+ if (handle != -EPIPE && handle != -LTTNG_UST_ERR_EXITING) {
+ ERR("UST app list field getting handle failed for app pid %d",
+ app->pid);
}
- free(tmp_event);
- release_ret = lttng_ust_ctl_release_handle(app->sock, handle);
pthread_mutex_unlock(&app->sock_lock);
- if (release_ret < 0 && release_ret != -LTTNG_UST_ERR_EXITING &&
- release_ret != -EPIPE) {
- ERR("Error releasing app handle for app %d with ret %d",
- app->sock,
- release_ret);
- }
- goto rcu_error;
+ continue;
}
- health_code_update();
- if (count >= nbmem) {
- /* In case the realloc fails, we free the memory */
- struct lttng_event_field *new_tmp_event;
- size_t new_nbmem;
-
- new_nbmem = nbmem << 1;
- DBG2("Reallocating event field list from %zu to %zu entries",
- nbmem,
- new_nbmem);
- new_tmp_event = (lttng_event_field *) realloc(
- tmp_event, new_nbmem * sizeof(struct lttng_event_field));
- if (new_tmp_event == nullptr) {
+ while ((ret = lttng_ust_ctl_tracepoint_field_list_get(
+ app->sock, handle, &uiter)) != -LTTNG_UST_ERR_NOENT) {
+ /* Handle ustctl error. */
+ if (ret < 0) {
int release_ret;
- PERROR("realloc ust app event fields");
+ if (ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
+ ERR("UST app tp list field failed for app %d with ret %d",
+ app->sock,
+ ret);
+ } else {
+ DBG3("UST app tp list field failed. Application is dead");
+ break;
+ }
+
free(tmp_event);
- ret = -ENOMEM;
release_ret =
lttng_ust_ctl_release_handle(app->sock, handle);
pthread_mutex_unlock(&app->sock_lock);
- if (release_ret && release_ret != -LTTNG_UST_ERR_EXITING &&
+ if (release_ret < 0 &&
+ release_ret != -LTTNG_UST_ERR_EXITING &&
release_ret != -EPIPE) {
ERR("Error releasing app handle for app %d with ret %d",
app->sock,
release_ret);
}
+
goto rcu_error;
}
- /* Zero the new memory */
- memset(new_tmp_event + nbmem,
- 0,
- (new_nbmem - nbmem) * sizeof(struct lttng_event_field));
- nbmem = new_nbmem;
- tmp_event = new_tmp_event;
+
+ health_code_update();
+ if (count >= nbmem) {
+ /* In case the realloc fails, we free the memory */
+ struct lttng_event_field *new_tmp_event;
+ size_t new_nbmem;
+
+ new_nbmem = nbmem << 1;
+ DBG2("Reallocating event field list from %zu to %zu entries",
+ nbmem,
+ new_nbmem);
+ new_tmp_event = (lttng_event_field *) realloc(
+ tmp_event,
+ new_nbmem * sizeof(struct lttng_event_field));
+ if (new_tmp_event == nullptr) {
+ int release_ret;
+
+ PERROR("realloc ust app event fields");
+ free(tmp_event);
+ ret = -ENOMEM;
+ release_ret = lttng_ust_ctl_release_handle(
+ app->sock, handle);
+ pthread_mutex_unlock(&app->sock_lock);
+ if (release_ret &&
+ release_ret != -LTTNG_UST_ERR_EXITING &&
+ release_ret != -EPIPE) {
+ ERR("Error releasing app handle for app %d with ret %d",
+ app->sock,
+ release_ret);
+ }
+
+ goto rcu_error;
+ }
+
+ /* Zero the new memory */
+ memset(new_tmp_event + nbmem,
+ 0,
+ (new_nbmem - nbmem) *
+ sizeof(struct lttng_event_field));
+ nbmem = new_nbmem;
+ tmp_event = new_tmp_event;
+ }
+
+ memcpy(tmp_event[count].field_name,
+ uiter.field_name,
+ LTTNG_UST_ABI_SYM_NAME_LEN);
+ /* Mapping between these enums matches 1 to 1. */
+ tmp_event[count].type = (enum lttng_event_field_type) uiter.type;
+ tmp_event[count].nowrite = uiter.nowrite;
+
+ memcpy(tmp_event[count].event.name,
+ uiter.event_name,
+ LTTNG_UST_ABI_SYM_NAME_LEN);
+ tmp_event[count].event.loglevel = uiter.loglevel;
+ tmp_event[count].event.type = LTTNG_EVENT_TRACEPOINT;
+ tmp_event[count].event.pid = app->pid;
+ tmp_event[count].event.enabled = -1;
+ count++;
}
- memcpy(tmp_event[count].field_name,
- uiter.field_name,
- LTTNG_UST_ABI_SYM_NAME_LEN);
- /* Mapping between these enums matches 1 to 1. */
- tmp_event[count].type = (enum lttng_event_field_type) uiter.type;
- tmp_event[count].nowrite = uiter.nowrite;
-
- memcpy(tmp_event[count].event.name,
- uiter.event_name,
- LTTNG_UST_ABI_SYM_NAME_LEN);
- tmp_event[count].event.loglevel = uiter.loglevel;
- tmp_event[count].event.type = LTTNG_EVENT_TRACEPOINT;
- tmp_event[count].event.pid = app->pid;
- tmp_event[count].event.enabled = -1;
- count++;
- }
- ret = lttng_ust_ctl_release_handle(app->sock, handle);
- pthread_mutex_unlock(&app->sock_lock);
- if (ret < 0 && ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
- ERR("Error releasing app handle for app %d with ret %d", app->sock, ret);
+ ret = lttng_ust_ctl_release_handle(app->sock, handle);
+ pthread_mutex_unlock(&app->sock_lock);
+ if (ret < 0 && ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
+ ERR("Error releasing app handle for app %d with ret %d",
+ app->sock,
+ ret);
+ }
}
}
DBG2("UST app list event fields done (%zu events)", count);
rcu_error:
- rcu_read_unlock();
error:
health_code_update();
return ret;
DBG2("UST app cleaning registered apps hash table");
- rcu_read_lock();
-
/* Cleanup notify socket hash table */
if (ust_app_ht_by_notify_sock) {
+ lttng::urcu::read_lock_guard read_lock;
+
cds_lfht_for_each_entry (
ust_app_ht_by_notify_sock->ht, &iter.iter, app, notify_sock_n.node) {
/*
}
if (ust_app_ht) {
+ lttng::urcu::read_lock_guard read_lock;
+
cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
ret = lttng_ht_del(ust_app_ht, &iter);
LTTNG_ASSERT(!ret);
/* Cleanup socket hash table */
if (ust_app_ht_by_sock) {
+ lttng::urcu::read_lock_guard read_lock;
+
cds_lfht_for_each_entry (ust_app_ht_by_sock->ht, &iter.iter, app, sock_n.node) {
ret = lttng_ht_del(ust_app_ht_by_sock, &iter);
LTTNG_ASSERT(!ret);
}
}
- rcu_read_unlock();
-
/* Destroy is done only when the ht is empty */
if (ust_app_ht) {
lttng_ht_destroy(ust_app_ht);
uchan->name,
usess->id);
- rcu_read_lock();
+ {
+ lttng::urcu::read_lock_guard read_lock;
- /* For every registered applications */
- cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
- struct lttng_ht_iter uiter;
- if (!app->compatible) {
- /*
- * TODO: In time, we should notice the caller of this error by
- * telling him that this is a version error.
- */
- continue;
- }
- ua_sess = lookup_session_by_app(usess, app);
- if (ua_sess == nullptr) {
- continue;
- }
+ /* For every registered applications */
+ cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
+ struct lttng_ht_iter uiter;
+ if (!app->compatible) {
+ /*
+ * TODO: In time, we should notice the caller of this error by
+ * telling him that this is a version error.
+ */
+ continue;
+ }
+ ua_sess = lookup_session_by_app(usess, app);
+ if (ua_sess == nullptr) {
+ continue;
+ }
- /* Get channel */
- lttng_ht_lookup(ua_sess->channels, (void *) uchan->name, &uiter);
- ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
- /* If the session if found for the app, the channel must be there */
- LTTNG_ASSERT(ua_chan_node);
+ /* Get channel */
+ lttng_ht_lookup(ua_sess->channels, (void *) uchan->name, &uiter);
+ ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
+ /* If the session if found for the app, the channel must be there */
+ LTTNG_ASSERT(ua_chan_node);
- ua_chan = lttng::utils::container_of(ua_chan_node, &ust_app_channel::node);
- /* The channel must not be already disabled */
- LTTNG_ASSERT(ua_chan->enabled);
+ ua_chan = lttng::utils::container_of(ua_chan_node, &ust_app_channel::node);
+ /* The channel must not be already disabled */
+ LTTNG_ASSERT(ua_chan->enabled);
- /* Disable channel onto application */
- ret = disable_ust_app_channel(ua_sess, ua_chan, app);
- if (ret < 0) {
- /* XXX: We might want to report this error at some point... */
- continue;
+ /* Disable channel onto application */
+ ret = disable_ust_app_channel(ua_sess, ua_chan, app);
+ if (ret < 0) {
+ /* XXX: We might want to report this error at some point... */
+ continue;
+ }
}
}
- rcu_read_unlock();
return ret;
}
uchan->name,
usess->id);
- rcu_read_lock();
+ {
+ lttng::urcu::read_lock_guard read_lock;
- /* For every registered applications */
- cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
- if (!app->compatible) {
- /*
- * TODO: In time, we should notice the caller of this error by
- * telling him that this is a version error.
- */
- continue;
- }
- ua_sess = lookup_session_by_app(usess, app);
- if (ua_sess == nullptr) {
- continue;
- }
+ /* For every registered applications */
+ cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
+ if (!app->compatible) {
+ /*
+ * TODO: In time, we should notice the caller of this error by
+ * telling him that this is a version error.
+ */
+ continue;
+ }
+ ua_sess = lookup_session_by_app(usess, app);
+ if (ua_sess == nullptr) {
+ continue;
+ }
- /* Enable channel onto application */
- ret = enable_ust_app_channel(ua_sess, uchan, app);
- if (ret < 0) {
- /* XXX: We might want to report this error at some point... */
- continue;
+ /* Enable channel onto application */
+ ret = enable_ust_app_channel(ua_sess, uchan, app);
+ if (ret < 0) {
+ /* XXX: We might want to report this error at some point... */
+ continue;
+ }
}
}
- rcu_read_unlock();
return ret;
}
uchan->name,
usess->id);
- rcu_read_lock();
-
- /* For all registered applications */
- cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
- if (!app->compatible) {
- /*
- * TODO: In time, we should notice the caller of this error by
- * telling him that this is a version error.
- */
- continue;
- }
- ua_sess = lookup_session_by_app(usess, app);
- if (ua_sess == nullptr) {
- /* Next app */
- continue;
- }
+ {
+ lttng::urcu::read_lock_guard read_lock;
- /* Lookup channel in the ust app session */
- lttng_ht_lookup(ua_sess->channels, (void *) uchan->name, &uiter);
- ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
- if (ua_chan_node == nullptr) {
- DBG2("Channel %s not found in session id %" PRIu64 " for app pid %d."
- "Skipping",
- uchan->name,
- usess->id,
- app->pid);
- continue;
- }
- ua_chan = lttng::utils::container_of(ua_chan_node, &ust_app_channel::node);
+ /* For all registered applications */
+ cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
+ if (!app->compatible) {
+ /*
+ * TODO: In time, we should notice the caller of this error by
+ * telling him that this is a version error.
+ */
+ continue;
+ }
+ ua_sess = lookup_session_by_app(usess, app);
+ if (ua_sess == nullptr) {
+ /* Next app */
+ continue;
+ }
- ua_event = find_ust_app_event(ua_chan->events,
- uevent->attr.name,
- uevent->filter,
- uevent->attr.loglevel,
- uevent->exclusion);
- if (ua_event == nullptr) {
- DBG2("Event %s not found in channel %s for app pid %d."
- "Skipping",
- uevent->attr.name,
- uchan->name,
- app->pid);
- continue;
- }
+ /* Lookup channel in the ust app session */
+ lttng_ht_lookup(ua_sess->channels, (void *) uchan->name, &uiter);
+ ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
+ if (ua_chan_node == nullptr) {
+ DBG2("Channel %s not found in session id %" PRIu64
+ " for app pid %d."
+ "Skipping",
+ uchan->name,
+ usess->id,
+ app->pid);
+ continue;
+ }
+ ua_chan = lttng::utils::container_of(ua_chan_node, &ust_app_channel::node);
+
+ ua_event = find_ust_app_event(ua_chan->events,
+ uevent->attr.name,
+ uevent->filter,
+ uevent->attr.loglevel,
+ uevent->exclusion);
+ if (ua_event == nullptr) {
+ DBG2("Event %s not found in channel %s for app pid %d."
+ "Skipping",
+ uevent->attr.name,
+ uchan->name,
+ app->pid);
+ continue;
+ }
- ret = disable_ust_app_event(ua_event, app);
- if (ret < 0) {
- /* XXX: Report error someday... */
- continue;
+ ret = disable_ust_app_event(ua_event, app);
+ if (ret < 0) {
+ /* XXX: Report error someday... */
+ continue;
+ }
}
}
- rcu_read_unlock();
return ret;
}
* tracer also.
*/
- rcu_read_lock();
+ {
+ lttng::urcu::read_lock_guard read_lock;
- /* For all registered applications */
- cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
- if (!app->compatible) {
- /*
- * TODO: In time, we should notice the caller of this error by
- * telling him that this is a version error.
- */
- continue;
- }
- ua_sess = lookup_session_by_app(usess, app);
- if (!ua_sess) {
- /* The application has problem or is probably dead. */
- continue;
- }
+ /* For all registered applications */
+ cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
+ if (!app->compatible) {
+ /*
+ * TODO: In time, we should notice the caller of this error by
+ * telling him that this is a version error.
+ */
+ continue;
+ }
+ ua_sess = lookup_session_by_app(usess, app);
+ if (!ua_sess) {
+ /* The application has problem or is probably dead. */
+ continue;
+ }
- pthread_mutex_lock(&ua_sess->lock);
+ pthread_mutex_lock(&ua_sess->lock);
- if (ua_sess->deleted) {
- pthread_mutex_unlock(&ua_sess->lock);
- continue;
- }
+ if (ua_sess->deleted) {
+ pthread_mutex_unlock(&ua_sess->lock);
+ continue;
+ }
- /* Lookup channel in the ust app session */
- lttng_ht_lookup(ua_sess->channels, (void *) uchan->name, &uiter);
- ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
- /*
- * It is possible that the channel cannot be found is
- * the channel/event creation occurs concurrently with
- * an application exit.
- */
- if (!ua_chan_node) {
- pthread_mutex_unlock(&ua_sess->lock);
- continue;
- }
+ /* Lookup channel in the ust app session */
+ lttng_ht_lookup(ua_sess->channels, (void *) uchan->name, &uiter);
+ ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
+ /*
+ * It is possible that the channel cannot be found is
+ * the channel/event creation occurs concurrently with
+ * an application exit.
+ */
+ if (!ua_chan_node) {
+ pthread_mutex_unlock(&ua_sess->lock);
+ continue;
+ }
- ua_chan = lttng::utils::container_of(ua_chan_node, &ust_app_channel::node);
+ ua_chan = lttng::utils::container_of(ua_chan_node, &ust_app_channel::node);
+
+ /* Get event node */
+ ua_event = find_ust_app_event(ua_chan->events,
+ uevent->attr.name,
+ uevent->filter,
+ uevent->attr.loglevel,
+ uevent->exclusion);
+ if (ua_event == nullptr) {
+ DBG3("UST app enable event %s not found for app PID %d."
+ "Skipping app",
+ uevent->attr.name,
+ app->pid);
+ goto next_app;
+ }
- /* Get event node */
- ua_event = find_ust_app_event(ua_chan->events,
- uevent->attr.name,
- uevent->filter,
- uevent->attr.loglevel,
- uevent->exclusion);
- if (ua_event == nullptr) {
- DBG3("UST app enable event %s not found for app PID %d."
- "Skipping app",
- uevent->attr.name,
- app->pid);
- goto next_app;
- }
-
- ret = enable_ust_app_event(ua_event, app);
- if (ret < 0) {
+ ret = enable_ust_app_event(ua_event, app);
+ if (ret < 0) {
+ pthread_mutex_unlock(&ua_sess->lock);
+ goto error;
+ }
+ next_app:
pthread_mutex_unlock(&ua_sess->lock);
- goto error;
}
- next_app:
- pthread_mutex_unlock(&ua_sess->lock);
}
-
error:
- rcu_read_unlock();
return ret;
}
uevent->attr.name,
usess->id);
- rcu_read_lock();
+ {
+ lttng::urcu::read_lock_guard read_lock;
- /* For all registered applications */
- cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
- if (!app->compatible) {
- /*
- * TODO: In time, we should notice the caller of this error by
- * telling him that this is a version error.
- */
- continue;
- }
- ua_sess = lookup_session_by_app(usess, app);
- if (!ua_sess) {
- /* The application has problem or is probably dead. */
- continue;
- }
+ /* For all registered applications */
+ cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
+ if (!app->compatible) {
+ /*
+ * TODO: In time, we should notice the caller of this error by
+ * telling him that this is a version error.
+ */
+ continue;
+ }
- pthread_mutex_lock(&ua_sess->lock);
+ ua_sess = lookup_session_by_app(usess, app);
+ if (!ua_sess) {
+ /* The application has problem or is probably dead. */
+ continue;
+ }
- if (ua_sess->deleted) {
- pthread_mutex_unlock(&ua_sess->lock);
- continue;
- }
+ pthread_mutex_lock(&ua_sess->lock);
- /* Lookup channel in the ust app session */
- lttng_ht_lookup(ua_sess->channels, (void *) uchan->name, &uiter);
- ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
- /* If the channel is not found, there is a code flow error */
- LTTNG_ASSERT(ua_chan_node);
+ if (ua_sess->deleted) {
+ pthread_mutex_unlock(&ua_sess->lock);
+ continue;
+ }
- ua_chan = lttng::utils::container_of(ua_chan_node, &ust_app_channel::node);
+ /* Lookup channel in the ust app session */
+ lttng_ht_lookup(ua_sess->channels, (void *) uchan->name, &uiter);
+ ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
+ /* If the channel is not found, there is a code flow error */
+ LTTNG_ASSERT(ua_chan_node);
- ret = create_ust_app_event(ua_chan, uevent, app);
- pthread_mutex_unlock(&ua_sess->lock);
- if (ret < 0) {
- if (ret != -LTTNG_UST_ERR_EXIST) {
- /* Possible value at this point: -ENOMEM. If so, we stop! */
- break;
+ ua_chan = lttng::utils::container_of(ua_chan_node, &ust_app_channel::node);
+
+ ret = create_ust_app_event(ua_chan, uevent, app);
+ pthread_mutex_unlock(&ua_sess->lock);
+ if (ret < 0) {
+ if (ret != -LTTNG_UST_ERR_EXIST) {
+ /* Possible value at this point: -ENOMEM. If so, we stop! */
+ break;
+ }
+
+ DBG2("UST app event %s already exist on app PID %d",
+ uevent->attr.name,
+ app->pid);
+ continue;
}
- DBG2("UST app event %s already exist on app PID %d",
- uevent->attr.name,
- app->pid);
- continue;
}
}
- rcu_read_unlock();
return ret;
}
DBG("Starting tracing for ust app pid %d", app->pid);
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
if (!app->compatible) {
goto end;
}
end:
- rcu_read_unlock();
health_code_update();
return 0;
error_unlock:
pthread_mutex_unlock(&ua_sess->lock);
- rcu_read_unlock();
health_code_update();
return -1;
}
DBG("Stopping tracing for ust app pid %d", app->pid);
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
if (!app->compatible) {
goto end_no_session;
end_unlock:
pthread_mutex_unlock(&ua_sess->lock);
end_no_session:
- rcu_read_unlock();
health_code_update();
return 0;
error_rcu_unlock:
pthread_mutex_unlock(&ua_sess->lock);
- rcu_read_unlock();
health_code_update();
return -1;
}
DBG("Flushing app session buffers for ust app pid %d", app->pid);
- rcu_read_lock();
-
if (!app->compatible) {
goto end_not_compatible;
}
/* Flush buffers and push metadata. */
switch (ua_sess->buffer_type) {
case LTTNG_BUFFER_PER_PID:
+ {
+ lttng::urcu::read_lock_guard read_lock;
+
cds_lfht_for_each_entry (ua_sess->channels->ht, &iter.iter, ua_chan, node.node) {
health_code_update();
ret = consumer_flush_channel(socket, ua_chan->key);
continue;
}
}
+
break;
+ }
case LTTNG_BUFFER_PER_UID:
default:
abort();
pthread_mutex_unlock(&ua_sess->lock);
end_not_compatible:
- rcu_read_unlock();
health_code_update();
return retval;
}
DBG("Flushing session buffers for all ust apps");
- rcu_read_lock();
-
/* Flush buffers and push metadata. */
switch (usess->buffer_type) {
case LTTNG_BUFFER_PER_UID:
/* Flush all per UID buffers associated to that session. */
cds_list_for_each_entry (reg, &usess->buffer_reg_uid_list, lnode) {
+ lttng::urcu::read_lock_guard read_lock;
lsu::registry_session *ust_session_reg;
struct buffer_reg_channel *buf_reg_chan;
struct consumer_socket *socket;
auto locked_registry = ust_session_reg->lock();
(void) push_metadata(locked_registry, usess->consumer);
}
+
break;
}
case LTTNG_BUFFER_PER_PID:
struct ust_app_session *ua_sess;
struct lttng_ht_iter iter;
struct ust_app *app;
+ lttng::urcu::read_lock_guard read_lock;
cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
ua_sess = lookup_session_by_app(usess, app);
if (ua_sess == nullptr) {
continue;
}
+
(void) ust_app_flush_app_session(app, ua_sess);
}
+
break;
}
default:
break;
}
- rcu_read_unlock();
health_code_update();
return ret;
}
DBG("Clearing stream quiescent state for ust app pid %d", app->pid);
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
if (!app->compatible) {
goto end_not_compatible;
pthread_mutex_unlock(&ua_sess->lock);
end_not_compatible:
- rcu_read_unlock();
health_code_update();
return ret;
}
DBG("Clearing stream quiescent state for all ust apps");
- rcu_read_lock();
-
switch (usess->buffer_type) {
case LTTNG_BUFFER_PER_UID:
{
cds_list_for_each_entry (reg, &usess->buffer_reg_uid_list, lnode) {
struct consumer_socket *socket;
struct buffer_reg_channel *buf_reg_chan;
+ lttng::urcu::read_lock_guard read_lock;
/* Get associated consumer socket.*/
socket = consumer_find_socket_by_bitness(reg->bits_per_long,
buf_reg_chan->consumer_key);
}
}
+
break;
}
case LTTNG_BUFFER_PER_PID:
struct ust_app_session *ua_sess;
struct lttng_ht_iter iter;
struct ust_app *app;
+ lttng::urcu::read_lock_guard read_lock;
cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
ua_sess = lookup_session_by_app(usess, app);
}
(void) ust_app_clear_quiescent_app_session(app, ua_sess);
}
+
break;
}
default:
break;
}
- rcu_read_unlock();
health_code_update();
return ret;
}
DBG("Destroy tracing for ust app pid %d", app->pid);
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
if (!app->compatible) {
goto end;
}
}
end:
- rcu_read_unlock();
health_code_update();
return 0;
}
*/
usess->active = true;
- rcu_read_lock();
-
/*
* In a start-stop-start use-case, we need to clear the quiescent state
* of each channel set by the prior stop command, thus ensuring that a
*/
(void) ust_app_clear_quiescent_session(usess);
- cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
- ust_app_global_update(usess, app);
- }
+ {
+ lttng::urcu::read_lock_guard read_lock;
- rcu_read_unlock();
+ cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
+ ust_app_global_update(usess, app);
+ }
+ }
return 0;
}
*/
usess->active = false;
- rcu_read_lock();
+ {
+ lttng::urcu::read_lock_guard read_lock;
- cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
- ret = ust_app_stop_trace(usess, app);
- if (ret < 0) {
- /* Continue to next apps even on error */
- continue;
+ cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
+ ret = ust_app_stop_trace(usess, app);
+ if (ret < 0) {
+ /* Continue to next apps even on error */
+ continue;
+ }
}
}
(void) ust_app_flush_session(usess);
- rcu_read_unlock();
-
return 0;
}
DBG("Destroy all UST traces");
- rcu_read_lock();
+ {
+ lttng::urcu::read_lock_guard read_lock;
- cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
- ret = destroy_trace(usess, app);
- if (ret < 0) {
- /* Continue to next apps even on error */
- continue;
+ cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
+ ret = destroy_trace(usess, app);
+ if (ret < 0) {
+ /* Continue to next apps even on error */
+ continue;
+ }
}
}
- rcu_read_unlock();
-
return 0;
}
}
}
- rcu_read_lock();
- /* Remove all unknown event sources from the app. */
- cds_lfht_for_each_entry (app->token_to_event_notifier_rule_ht->ht,
- &app_trigger_iter.iter,
- event_notifier_rule,
- node.node) {
- const uint64_t app_token = event_notifier_rule->token;
- bool found = false;
+ {
+ lttng::urcu::read_lock_guard read_lock;
+
+ /* Remove all unknown event sources from the app. */
+ cds_lfht_for_each_entry (app->token_to_event_notifier_rule_ht->ht,
+ &app_trigger_iter.iter,
+ event_notifier_rule,
+ node.node) {
+ const uint64_t app_token = event_notifier_rule->token;
+ bool found = false;
- /*
- * Check if the app event trigger still exists on the
- * notification side.
- */
- for (i = 0; i < count; i++) {
- uint64_t notification_thread_token;
- const struct lttng_trigger *trigger =
- lttng_triggers_get_at_index(triggers, i);
+ /*
+ * Check if the app event trigger still exists on the
+ * notification side.
+ */
+ for (i = 0; i < count; i++) {
+ uint64_t notification_thread_token;
+ const struct lttng_trigger *trigger =
+ lttng_triggers_get_at_index(triggers, i);
- LTTNG_ASSERT(trigger);
+ LTTNG_ASSERT(trigger);
- notification_thread_token = lttng_trigger_get_tracer_token(trigger);
+ notification_thread_token = lttng_trigger_get_tracer_token(trigger);
- if (notification_thread_token == app_token) {
- found = true;
- break;
+ if (notification_thread_token == app_token) {
+ found = true;
+ break;
+ }
}
- }
- if (found) {
- /* Still valid. */
- continue;
- }
+ if (found) {
+ /* Still valid. */
+ continue;
+ }
- /*
- * This trigger was unregistered, disable it on the tracer's
- * side.
- */
- ret = lttng_ht_del(app->token_to_event_notifier_rule_ht, &app_trigger_iter);
- LTTNG_ASSERT(ret == 0);
+ /*
+ * This trigger was unregistered, disable it on the tracer's
+ * side.
+ */
+ ret = lttng_ht_del(app->token_to_event_notifier_rule_ht, &app_trigger_iter);
+ LTTNG_ASSERT(ret == 0);
- /* Callee logs errors. */
- (void) disable_ust_object(app, event_notifier_rule->obj);
+ /* Callee logs errors. */
+ (void) disable_ust_object(app, event_notifier_rule->obj);
- delete_ust_app_event_notifier_rule(app->sock, event_notifier_rule, app);
+ delete_ust_app_event_notifier_rule(app->sock, event_notifier_rule, app);
+ }
}
- rcu_read_unlock();
-
end:
lttng_triggers_destroy(triggers);
return;
goto deleted_session;
}
- rcu_read_lock();
+ {
+ lttng::urcu::read_lock_guard read_lock;
- ust_app_synchronize_all_channels(usess, ua_sess, app);
+ ust_app_synchronize_all_channels(usess, ua_sess, app);
- /*
- * Create the metadata for the application. This returns gracefully if a
- * metadata was already set for the session.
- *
- * The metadata channel must be created after the data channels as the
- * consumer daemon assumes this ordering. When interacting with a relay
- * daemon, the consumer will use this assumption to send the
- * "STREAMS_SENT" message to the relay daemon.
- */
- ret = create_ust_app_metadata(ua_sess, app, usess->consumer);
- if (ret < 0) {
- ERR("Metadata creation failed for app sock %d for session id %" PRIu64,
- app->sock,
- usess->id);
+ /*
+ * Create the metadata for the application. This returns gracefully if a
+ * metadata was already set for the session.
+ *
+ * The metadata channel must be created after the data channels as the
+ * consumer daemon assumes this ordering. When interacting with a relay
+ * daemon, the consumer will use this assumption to send the
+ * "STREAMS_SENT" message to the relay daemon.
+ */
+ ret = create_ust_app_metadata(ua_sess, app, usess->consumer);
+ if (ret < 0) {
+ ERR("Metadata creation failed for app sock %d for session id %" PRIu64,
+ app->sock,
+ usess->id);
+ }
}
- rcu_read_unlock();
-
deleted_session:
pthread_mutex_unlock(&ua_sess->lock);
end:
struct lttng_ht_iter iter;
struct ust_app *app;
- rcu_read_lock();
- cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
- ust_app_global_update(usess, app);
+ {
+ lttng::urcu::read_lock_guard read_lock;
+
+ cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
+ ust_app_global_update(usess, app);
+ }
}
- rcu_read_unlock();
}
void ust_app_global_update_all_event_notifier_rules()
struct lttng_ht_iter iter;
struct ust_app *app;
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
ust_app_global_update_event_notifier_rules(app);
}
-
- rcu_read_unlock();
}
/*
LTTNG_ASSERT(usess->active);
- rcu_read_lock();
- cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
- if (!app->compatible) {
- /*
- * TODO: In time, we should notice the caller of this error by
- * telling him that this is a version error.
- */
- continue;
- }
- ua_sess = lookup_session_by_app(usess, app);
- if (ua_sess == nullptr) {
- continue;
- }
+ {
+ lttng::urcu::read_lock_guard read_lock;
+ cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
+ if (!app->compatible) {
+ /*
+ * TODO: In time, we should notice the caller of this error by
+ * telling him that this is a version error.
+ */
+ continue;
+ }
+ ua_sess = lookup_session_by_app(usess, app);
+ if (ua_sess == nullptr) {
+ continue;
+ }
- pthread_mutex_lock(&ua_sess->lock);
+ pthread_mutex_lock(&ua_sess->lock);
- if (ua_sess->deleted) {
- pthread_mutex_unlock(&ua_sess->lock);
- continue;
- }
+ if (ua_sess->deleted) {
+ pthread_mutex_unlock(&ua_sess->lock);
+ continue;
+ }
- /* Lookup channel in the ust app session */
- lttng_ht_lookup(ua_sess->channels, (void *) uchan->name, &uiter);
- ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
- if (ua_chan_node == nullptr) {
- goto next_app;
- }
- ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
- ret = create_ust_app_channel_context(ua_chan, &uctx->ctx, app);
- if (ret < 0) {
- goto next_app;
+ /* Lookup channel in the ust app session */
+ lttng_ht_lookup(ua_sess->channels, (void *) uchan->name, &uiter);
+ ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
+ if (ua_chan_node == nullptr) {
+ goto next_app;
+ }
+ ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
+ ret = create_ust_app_channel_context(ua_chan, &uctx->ctx, app);
+ if (ret < 0) {
+ goto next_app;
+ }
+ next_app:
+ pthread_mutex_unlock(&ua_sess->lock);
}
- next_app:
- pthread_mutex_unlock(&ua_sess->lock);
}
- rcu_read_unlock();
return ret;
}
LTTNG_ASSERT(sock >= 0);
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
obj = zmalloc<ust_app_notify_sock_obj>();
if (!obj) {
(void) lttng_ht_del(ust_app_ht_by_notify_sock, &iter);
close_socket:
- rcu_read_unlock();
/*
* Close socket after a grace period to avoid for the socket to be reused
LTTNG_ASSERT(usess);
LTTNG_ASSERT(output);
- rcu_read_lock();
-
switch (usess->buffer_type) {
case LTTNG_BUFFER_PER_UID:
{
struct buffer_reg_uid *reg;
+ lttng::urcu::read_lock_guard read_lock;
+
cds_list_for_each_entry (reg, &usess->buffer_reg_uid_list, lnode) {
struct buffer_reg_channel *buf_reg_chan;
struct consumer_socket *socket;
goto error;
}
}
+
break;
}
case LTTNG_BUFFER_PER_PID:
{
+ lttng::urcu::read_lock_guard read_lock;
+
cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
struct consumer_socket *socket;
struct lttng_ht_iter chan_iter;
error:
free(trace_path);
- rcu_read_unlock();
return status;
}
cds_list_for_each_entry (reg, &usess->buffer_reg_uid_list, lnode) {
struct buffer_reg_channel *buf_reg_chan;
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
+
cds_lfht_for_each_entry (
reg->registry->channels->ht, &iter.iter, buf_reg_chan, node.node) {
if (cur_nr_packets >= buf_reg_chan->num_subbuf) {
}
tot_size += buf_reg_chan->subbuf_size * buf_reg_chan->stream_count;
}
- rcu_read_unlock();
}
break;
}
case LTTNG_BUFFER_PER_PID:
{
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
+
cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
struct ust_app_channel *ua_chan;
struct ust_app_session *ua_sess;
tot_size += ua_chan->attr.subbuf_size * ua_chan->streams.count;
}
}
- rcu_read_unlock();
break;
}
default:
*discarded = 0;
*lost = 0;
- rcu_read_lock();
/*
* Iterate over every registered applications. Sum counters for
* all applications containing requested session and channel.
*/
+ lttng::urcu::read_lock_guard read_lock;
+
cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
struct lttng_ht_iter uiter;
}
}
- rcu_read_unlock();
return ret;
}
DBG("Regenerating the metadata for ust app pid %d", app->pid);
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
ua_sess = lookup_session_by_app(usess, app);
if (ua_sess == nullptr) {
pthread_mutex_unlock(&ua_sess->lock);
end:
- rcu_read_unlock();
health_code_update();
return ret;
}
DBG("Regenerating the metadata for all UST apps");
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
if (!app->compatible) {
}
}
- rcu_read_unlock();
-
return 0;
}
LTTNG_ASSERT(usess);
- rcu_read_lock();
-
switch (usess->buffer_type) {
case LTTNG_BUFFER_PER_UID:
{
cds_list_for_each_entry (reg, &usess->buffer_reg_uid_list, lnode) {
struct buffer_reg_channel *buf_reg_chan;
struct consumer_socket *socket;
+ lttng::urcu::read_lock_guard read_lock;
/* Get consumer socket to use to push the metadata.*/
socket = consumer_find_socket_by_bitness(reg->bits_per_long,
}
case LTTNG_BUFFER_PER_PID:
{
+ lttng::urcu::read_lock_guard read_lock;
+
cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
struct consumer_socket *socket;
struct lttng_ht_iter chan_iter;
cmd_ret = LTTNG_OK;
error:
- rcu_read_unlock();
return cmd_ret;
}
int fmt_ret;
LTTNG_ASSERT(usess->current_trace_chunk);
- rcu_read_lock();
switch (usess->buffer_type) {
case LTTNG_BUFFER_PER_UID:
{
struct buffer_reg_uid *reg;
+ lttng::urcu::read_lock_guard read_lock;
cds_list_for_each_entry (reg, &usess->buffer_reg_uid_list, lnode) {
fmt_ret = asprintf(&pathname_index,
case LTTNG_BUFFER_PER_PID:
{
struct ust_app *app;
+ lttng::urcu::read_lock_guard read_lock;
/*
* Create the toplevel ust/ directory in case no apps are running.
ret = LTTNG_OK;
error:
- rcu_read_unlock();
return ret;
}
LTTNG_ASSERT(usess);
- rcu_read_lock();
-
if (usess->active) {
ERR("Expecting inactive session %s (%" PRIu64 ")", session->name, session->id);
cmd_ret = LTTNG_ERR_FATAL;
case LTTNG_BUFFER_PER_UID:
{
struct buffer_reg_uid *reg;
+ lttng::urcu::read_lock_guard read_lock;
cds_list_for_each_entry (reg, &usess->buffer_reg_uid_list, lnode) {
struct buffer_reg_channel *buf_reg_chan;
}
case LTTNG_BUFFER_PER_PID:
{
+ lttng::urcu::read_lock_guard read_lock;
+
cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
struct consumer_socket *socket;
struct lttng_ht_iter chan_iter;
error_socket:
end:
- rcu_read_unlock();
return cmd_ret;
}
LTTNG_ASSERT(usess);
- rcu_read_lock();
-
switch (usess->buffer_type) {
case LTTNG_BUFFER_PER_UID:
{
cds_list_for_each_entry (reg, &usess->buffer_reg_uid_list, lnode) {
struct buffer_reg_channel *buf_reg_chan;
struct consumer_socket *socket;
+ lttng::urcu::read_lock_guard read_lock;
socket = consumer_find_socket_by_bitness(reg->bits_per_long,
usess->consumer);
case LTTNG_BUFFER_PER_PID:
{
struct ust_app *app;
+ lttng::urcu::read_lock_guard read_lock;
cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
struct consumer_socket *socket;
}
error:
- rcu_read_unlock();
return ret;
}
*/
return v_major <= 9 ? lsu::ctl_field_quirks::UNDERSCORE_PREFIXED_VARIANT_TAG_MAPPINGS :
lsu::ctl_field_quirks::NONE;
-}
\ No newline at end of file
+}
LTTNG_ASSERT(socket);
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
health_code_update();
/* Wait for a metadata request */
ret = 0;
end:
- rcu_read_unlock();
return ret;
}
#include <common/error.hpp>
#include <common/macros.hpp>
#include <common/sessiond-comm/sessiond-comm.hpp>
+#include <common/urcu.hpp>
#include <lttng/channel-internal.hpp>
#include <lttng/channel.h>
#include <common/kernel-ctl/kernel-ctl.hpp>
#include <common/macros.hpp>
#include <common/relayd/relayd.hpp>
+#include <common/urcu.hpp>
#include <common/ust-consumer/ust-consumer.hpp>
#include <common/utils.hpp>
/* Ease our life a bit. */
ht = the_consumer_data.stream_list_ht;
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
/* Search the metadata associated with the session id of the given stream. */
ret = 0;
end:
- rcu_read_unlock();
return ret;
}
{
int ret;
struct lttng_consumer_stream *stream;
+ lttng::urcu::read_lock_guard read_lock;
stream = zmalloc<lttng_consumer_stream>();
if (stream == nullptr) {
goto end;
}
- rcu_read_lock();
-
if (trace_chunk && !lttng_trace_chunk_get(trace_chunk)) {
ERR("Failed to acquire trace chunk reference during the creation of a stream");
ret = -1;
stream->net_seq_idx,
stream->session_id);
- rcu_read_unlock();
-
lttng_dynamic_array_init(
&stream->read_subbuffer_ops.post_consume_cbs, sizeof(post_consume_cb), nullptr);
return stream;
error:
- rcu_read_unlock();
lttng_trace_chunk_put(stream->trace_chunk);
lttng_dynamic_array_reset(&stream->read_subbuffer_ops.post_consume_cbs);
free(stream);
stream->trace_chunk = nullptr;
/* Check and cleanup relayd if needed. */
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
relayd = consumer_find_relayd(stream->net_seq_idx);
if (relayd != nullptr) {
consumer_stream_relayd_close(stream, relayd);
stream->net_seq_idx = -1ULL;
}
-
- rcu_read_unlock();
}
/*
/* Should NEVER be called not in monitor mode. */
LTTNG_ASSERT(stream->chan->monitor);
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
if (ht) {
iter.iter.node = &stream->node.node;
/* See the previous ht del on why we ignore the returned value. */
(void) lttng_ht_del(the_consumer_data.stream_list_ht, &iter);
- rcu_read_unlock();
-
if (!stream->metadata_flag) {
/* Decrement the stream count of the global consumer data. */
LTTNG_ASSERT(the_consumer_data.stream_count > 0);
LTTNG_ASSERT(stream);
LTTNG_ASSERT(element);
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
if (stream->net_seq_idx != (uint64_t) -1ULL) {
struct consumer_relayd_sock_pair *relayd;
relayd = consumer_find_relayd(stream->net_seq_idx);
}
error:
- rcu_read_unlock();
return ret;
}
#include <common/consumer/consumer-timer.hpp>
#include <common/kernel-consumer/kernel-consumer.hpp>
#include <common/kernel-ctl/kernel-ctl.hpp>
+#include <common/urcu.hpp>
#include <common/ust-consumer/ust-consumer.hpp>
#include <bin/lttng-consumerd/health-consumerd.hpp>
DBG("Live timer for channel %" PRIu64, channel->key);
- rcu_read_lock();
- cds_lfht_for_each_entry_duplicate(ht->ht,
- ht->hash_fct(&channel->key, lttng_ht_seed),
- ht->match_fct,
- &channel->key,
- &iter.iter,
- stream,
- node_channel_id.node)
{
- ret = check_stream(stream, flush_index);
- if (ret < 0) {
- goto error_unlock;
+ lttng::urcu::read_lock_guard read_lock;
+ cds_lfht_for_each_entry_duplicate(ht->ht,
+ ht->hash_fct(&channel->key, lttng_ht_seed),
+ ht->match_fct,
+ &channel->key,
+ &iter.iter,
+ stream,
+ node_channel_id.node)
+ {
+ ret = check_stream(stream, flush_index);
+ if (ret < 0) {
+ goto error_unlock;
+ }
}
}
-
error_unlock:
- rcu_read_unlock();
error:
return;
*_total_consumed = 0;
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
cds_lfht_for_each_entry_duplicate(ht->ht,
ht->hash_fct(&channel->key, lttng_ht_seed),
*_highest_use = high;
*_lowest_use = low;
end:
- rcu_read_unlock();
if (empty_channel) {
ret = -1;
}
#include <common/time.hpp>
#include <common/trace-chunk-registry.hpp>
#include <common/trace-chunk.hpp>
+#include <common/urcu.hpp>
#include <common/ust-consumer/ust-consumer.hpp>
#include <common/utils.hpp>
return nullptr;
}
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
lttng_ht_lookup(ht, &key, &iter);
node = lttng_ht_iter_get_node_u64(&iter);
stream = lttng::utils::container_of(node, <tng_consumer_stream::node);
}
- rcu_read_unlock();
-
return stream;
}
{
struct lttng_consumer_stream *stream;
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
stream = find_stream(key, ht);
if (stream) {
stream->key = (uint64_t) -1ULL;
*/
stream->node.key = (uint64_t) -1ULL;
}
- rcu_read_unlock();
}
/*
{
struct lttng_consumer_channel *channel;
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
channel = consumer_find_channel(key);
if (channel) {
channel->key = (uint64_t) -1ULL;
*/
channel->node.key = (uint64_t) -1ULL;
}
- rcu_read_unlock();
}
static void free_channel_rcu(struct rcu_head *head)
if (channel->is_published) {
int ret;
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
iter.iter.node = &channel->node.node;
ret = lttng_ht_del(the_consumer_data.channel_ht, &iter);
LTTNG_ASSERT(!ret);
iter.iter.node = &channel->channels_by_session_id_ht_node.node;
ret = lttng_ht_del(the_consumer_data.channels_by_session_id_ht, &iter);
LTTNG_ASSERT(!ret);
- rcu_read_unlock();
}
channel->is_deleted = true;
struct lttng_ht_iter iter;
struct consumer_relayd_sock_pair *relayd;
- rcu_read_lock();
+ {
+ lttng::urcu::read_lock_guard read_lock;
- cds_lfht_for_each_entry (the_consumer_data.relayd_ht->ht, &iter.iter, relayd, node.node) {
- consumer_destroy_relayd(relayd);
+ cds_lfht_for_each_entry (
+ the_consumer_data.relayd_ht->ht, &iter.iter, relayd, node.node) {
+ consumer_destroy_relayd(relayd);
+ }
}
- rcu_read_unlock();
-
lttng_ht_destroy(the_consumer_data.relayd_ht);
}
DBG("Consumer set delete flag on stream by idx %" PRIu64, net_seq_idx);
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
/* Let's begin with metadata */
cds_lfht_for_each_entry (metadata_ht->ht, &iter.iter, stream, node.node) {
DBG("Delete flag set to data stream %d", stream->wait_fd);
}
}
- rcu_read_unlock();
}
/*
pthread_mutex_lock(&stream->chan->lock);
pthread_mutex_lock(&stream->chan->timer_lock);
pthread_mutex_lock(&stream->lock);
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
/* Steal stream identifier to avoid having streams with the same key */
steal_stream_key(stream->key, ht);
the_consumer_data.stream_count++;
the_consumer_data.need_update = 1;
- rcu_read_unlock();
pthread_mutex_unlock(&stream->lock);
pthread_mutex_unlock(&stream->chan->timer_lock);
pthread_mutex_unlock(&stream->chan->lock);
LTTNG_ASSERT(path);
/* The stream is not metadata. Get relayd reference if exists. */
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
relayd = consumer_find_relayd(stream->net_seq_idx);
if (relayd != nullptr) {
/* Add stream on the relayd */
stream->net_seq_idx);
end:
- rcu_read_unlock();
return ret;
}
LTTNG_ASSERT(net_seq_idx != -1ULL);
/* The stream is not metadata. Get relayd reference if exists. */
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
relayd = consumer_find_relayd(net_seq_idx);
if (relayd != nullptr) {
/* Add stream on the relayd */
DBG("All streams sent relayd id %" PRIu64, net_seq_idx);
end:
- rcu_read_unlock();
return ret;
}
struct consumer_relayd_sock_pair *relayd;
/* The stream is not metadata. Get relayd reference if exists. */
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
relayd = consumer_find_relayd(stream->net_seq_idx);
if (relayd) {
consumer_stream_relayd_close(stream, relayd);
}
- rcu_read_unlock();
}
/*
*/
steal_channel_key(channel->key);
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
lttng_ht_add_unique_u64(the_consumer_data.channel_ht, &channel->node);
lttng_ht_add_u64(the_consumer_data.channels_by_session_id_ht,
&channel->channels_by_session_id_ht_node);
- rcu_read_unlock();
channel->is_published = true;
pthread_mutex_unlock(&channel->timer_lock);
DBG("Updating poll fd array");
*nb_inactive_fd = 0;
- rcu_read_lock();
- cds_lfht_for_each_entry (ht->ht, &iter.iter, stream, node.node) {
- /*
- * Only active streams with an active end point can be added to the
- * poll set and local stream storage of the thread.
- *
- * There is a potential race here for endpoint_status to be updated
- * just after the check. However, this is OK since the stream(s) will
- * be deleted once the thread is notified that the end point state has
- * changed where this function will be called back again.
- *
- * We track the number of inactive FDs because they still need to be
- * closed by the polling thread after a wakeup on the data_pipe or
- * metadata_pipe.
- */
- if (stream->endpoint_status == CONSUMER_ENDPOINT_INACTIVE) {
- (*nb_inactive_fd)++;
- continue;
+
+ {
+ lttng::urcu::read_lock_guard read_lock;
+ cds_lfht_for_each_entry (ht->ht, &iter.iter, stream, node.node) {
+ /*
+ * Only active streams with an active end point can be added to the
+ * poll set and local stream storage of the thread.
+ *
+ * There is a potential race here for endpoint_status to be updated
+ * just after the check. However, this is OK since the stream(s) will
+ * be deleted once the thread is notified that the end point state has
+ * changed where this function will be called back again.
+ *
+ * We track the number of inactive FDs because they still need to be
+ * closed by the polling thread after a wakeup on the data_pipe or
+ * metadata_pipe.
+ */
+ if (stream->endpoint_status == CONSUMER_ENDPOINT_INACTIVE) {
+ (*nb_inactive_fd)++;
+ continue;
+ }
+
+ (*pollfd)[i].fd = stream->wait_fd;
+ (*pollfd)[i].events = POLLIN | POLLPRI;
+ local_stream[i] = stream;
+ i++;
}
- /*
- * This clobbers way too much the debug output. Uncomment that if you
- * need it for debugging purposes.
- */
- (*pollfd)[i].fd = stream->wait_fd;
- (*pollfd)[i].events = POLLIN | POLLPRI;
- local_stream[i] = stream;
- i++;
}
- rcu_read_unlock();
/*
* Insert the consumer_data_pipe at the end of the array and don't
struct lttng_consumer_channel *channel;
unsigned int trace_chunks_left;
- rcu_read_lock();
+ {
+ lttng::urcu::read_lock_guard read_lock;
- cds_lfht_for_each_entry (the_consumer_data.channel_ht->ht, &iter.iter, channel, node.node) {
- consumer_del_channel(channel);
+ cds_lfht_for_each_entry (
+ the_consumer_data.channel_ht->ht, &iter.iter, channel, node.node) {
+ consumer_del_channel(channel);
+ }
}
- rcu_read_unlock();
-
lttng_ht_destroy(the_consumer_data.channel_ht);
lttng_ht_destroy(the_consumer_data.channels_by_session_id_ht);
return;
}
- rcu_read_lock();
- cds_lfht_for_each_entry (ht->ht, &iter.iter, stream, node.node) {
- /*
- * Ignore return value since we are currently cleaning up so any error
- * can't be handled.
- */
- (void) consumer_del_stream(stream, ht);
+ {
+ lttng::urcu::read_lock_guard read_lock;
+ cds_lfht_for_each_entry (ht->ht, &iter.iter, stream, node.node) {
+ /*
+ * Ignore return value since we are currently cleaning up so any error
+ * can't be handled.
+ */
+ (void) consumer_del_stream(stream, ht);
+ }
}
- rcu_read_unlock();
lttng_ht_destroy(ht);
}
return;
}
- rcu_read_lock();
- cds_lfht_for_each_entry (ht->ht, &iter.iter, stream, node.node) {
- /*
- * Ignore return value since we are currently cleaning up so any error
- * can't be handled.
- */
- (void) consumer_del_metadata_stream(stream, ht);
+ {
+ lttng::urcu::read_lock_guard read_lock;
+ cds_lfht_for_each_entry (ht->ht, &iter.iter, stream, node.node) {
+ /*
+ * Ignore return value since we are currently cleaning up so any error
+ * can't be handled.
+ */
+ (void) consumer_del_metadata_stream(stream, ht);
+ }
}
- rcu_read_unlock();
lttng_ht_destroy(ht);
}
size_t write_len;
/* RCU lock for the relayd pointer */
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
LTTNG_ASSERT(stream->net_seq_idx != (uint64_t) -1ULL || stream->trace_chunk);
/* Flag that the current stream if set for network streaming. */
pthread_mutex_unlock(&relayd->ctrl_sock_mutex);
}
- rcu_read_unlock();
return ret;
}
}
/* RCU lock for the relayd pointer */
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
/* Flag that the current stream if set for network streaming. */
if (stream->net_seq_idx != (uint64_t) -1ULL) {
pthread_mutex_unlock(&relayd->ctrl_sock_mutex);
}
- rcu_read_unlock();
return written;
}
* after this point.
*/
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
/*
* Lookup the stream just to make sure it does not exist in our internal
*/
lttng_ht_add_u64(the_consumer_data.stream_list_ht, &stream->node_session_id);
- rcu_read_unlock();
-
pthread_mutex_unlock(&stream->lock);
pthread_mutex_unlock(&stream->chan->lock);
pthread_mutex_unlock(&stream->chan->timer_lock);
DBG("Consumer delete flagged data stream");
- rcu_read_lock();
- cds_lfht_for_each_entry (data_ht->ht, &iter.iter, stream, node.node) {
- /* Validate delete flag of the stream */
- if (stream->endpoint_status == CONSUMER_ENDPOINT_ACTIVE) {
- continue;
+ {
+ lttng::urcu::read_lock_guard read_lock;
+
+ cds_lfht_for_each_entry (data_ht->ht, &iter.iter, stream, node.node) {
+ /* Validate delete flag of the stream */
+ if (stream->endpoint_status == CONSUMER_ENDPOINT_ACTIVE) {
+ continue;
+ }
+ /* Delete it right now */
+ consumer_del_stream(stream, data_ht);
}
- /* Delete it right now */
- consumer_del_stream(stream, data_ht);
}
- rcu_read_unlock();
}
/*
LTTNG_ASSERT(pollset);
- rcu_read_lock();
- cds_lfht_for_each_entry (metadata_ht->ht, &iter.iter, stream, node.node) {
- /* Validate delete flag of the stream */
- if (stream->endpoint_status == CONSUMER_ENDPOINT_ACTIVE) {
- continue;
- }
- /*
- * Remove from pollset so the metadata thread can continue without
- * blocking on a deleted stream.
- */
- lttng_poll_del(pollset, stream->wait_fd);
+ {
+ lttng::urcu::read_lock_guard read_lock;
+ cds_lfht_for_each_entry (metadata_ht->ht, &iter.iter, stream, node.node) {
+ /* Validate delete flag of the stream */
+ if (stream->endpoint_status == CONSUMER_ENDPOINT_ACTIVE) {
+ continue;
+ }
+ /*
+ * Remove from pollset so the metadata thread can continue without
+ * blocking on a deleted stream.
+ */
+ lttng_poll_del(pollset, stream->wait_fd);
- /* Delete it right now */
- consumer_del_metadata_stream(stream, metadata_ht);
+ /* Delete it right now */
+ consumer_del_metadata_stream(stream, metadata_ht);
+ }
}
- rcu_read_unlock();
}
/*
continue;
}
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
{
uint64_t tmp_id = (uint64_t) pollfd;
consumer_del_metadata_stream(stream, metadata_ht);
} else {
ERR("Unexpected poll events %u for sock %d", revents, pollfd);
- rcu_read_unlock();
goto end;
}
/* Release RCU lock for the stream looked up */
- rcu_read_unlock();
}
}
ht = the_consumer_data.stream_per_chan_id_ht;
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
cds_lfht_for_each_entry_duplicate(ht->ht,
ht->hash_fct(&channel->key, lttng_ht_seed),
ht->match_fct,
next:
pthread_mutex_unlock(&stream->lock);
}
- rcu_read_unlock();
}
static void destroy_channel_ht(struct lttng_ht *ht)
return;
}
- rcu_read_lock();
- cds_lfht_for_each_entry (ht->ht, &iter.iter, channel, wait_fd_node.node) {
- ret = lttng_ht_del(ht, &iter);
- LTTNG_ASSERT(ret != 0);
+ {
+ lttng::urcu::read_lock_guard read_lock;
+
+ cds_lfht_for_each_entry (ht->ht, &iter.iter, channel, wait_fd_node.node) {
+ ret = lttng_ht_del(ht, &iter);
+ LTTNG_ASSERT(ret != 0);
+ }
}
- rcu_read_unlock();
lttng_ht_destroy(ht);
}
switch (action) {
case CONSUMER_CHANNEL_ADD:
+ {
DBG("Adding channel %d to poll set", chan->wait_fd);
lttng_ht_node_init_u64(&chan->wait_fd_node,
chan->wait_fd);
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
lttng_ht_add_unique_u64(channel_ht,
&chan->wait_fd_node);
- rcu_read_unlock();
/* Add channel to the global poll events list */
// FIXME: Empty flag on a pipe pollset, this might
// hang on FreeBSD.
lttng_poll_add(&events, chan->wait_fd, 0);
break;
+ }
case CONSUMER_CHANNEL_DEL:
{
/*
* GET_CHANNEL failed.
*/
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
chan = consumer_find_channel(key);
if (!chan) {
- rcu_read_unlock();
ERR("UST consumer get channel key %" PRIu64
" not found for del channel",
key);
if (!uatomic_sub_return(&chan->refcount, 1)) {
consumer_del_channel(chan);
}
- rcu_read_unlock();
goto restart;
}
case CONSUMER_CHANNEL_QUIT:
continue;
}
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
{
uint64_t tmp_id = (uint64_t) pollfd;
}
} else {
ERR("Unexpected poll events %u for sock %d", revents, pollfd);
- rcu_read_unlock();
goto end;
}
/* Release RCU lock for the channel looked up */
- rcu_read_unlock();
}
}
DBG("Consumer data pending command on session id %" PRIu64, id);
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
pthread_mutex_lock(&the_consumer_data.lock);
switch (the_consumer_data.type) {
data_not_pending:
/* Data is available to be read by a viewer. */
pthread_mutex_unlock(&the_consumer_data.lock);
- rcu_read_unlock();
return 0;
data_pending:
/* Data is still being extracted from buffers. */
pthread_mutex_unlock(&the_consumer_data.lock);
- rcu_read_unlock();
return 1;
}
nullptr);
lttng_dynamic_pointer_array_init(&streams_packet_to_open, nullptr);
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
pthread_mutex_lock(&channel->lock);
LTTNG_ASSERT(channel->trace_chunk);
end_unlock_channel:
pthread_mutex_unlock(&channel->lock);
end:
- rcu_read_unlock();
lttng_dynamic_array_reset(&stream_rotation_positions);
lttng_dynamic_pointer_array_reset(&streams_packet_to_open);
return ret;
int ret;
struct lttng_consumer_stream *stream;
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
pthread_mutex_lock(&channel->lock);
cds_list_for_each_entry (stream, &channel->streams.head, send_node) {
health_code_update();
pthread_mutex_unlock(&stream->lock);
}
pthread_mutex_unlock(&channel->lock);
- rcu_read_unlock();
return 0;
error_unlock:
pthread_mutex_unlock(&stream->lock);
pthread_mutex_unlock(&channel->lock);
- rcu_read_unlock();
return ret;
}
ASSERT_RCU_READ_LOCKED();
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
DBG("Consumer rotate ready streams in channel %" PRIu64, key);
ret = 0;
end:
- rcu_read_unlock();
return ret;
}
goto error;
}
- rcu_read_lock();
- cds_lfht_for_each_entry_duplicate(
- the_consumer_data.channels_by_session_id_ht->ht,
- the_consumer_data.channels_by_session_id_ht->hash_fct(&session_id, lttng_ht_seed),
- the_consumer_data.channels_by_session_id_ht->match_fct,
- &session_id,
- &iter.iter,
- channel,
- channels_by_session_id_ht_node.node)
{
- ret = lttng_consumer_channel_set_trace_chunk(channel, published_chunk);
- if (ret) {
- /*
- * Roll-back the creation of this chunk.
- *
- * This is important since the session daemon will
- * assume that the creation of this chunk failed and
- * will never ask for it to be closed, resulting
- * in a leak and an inconsistent state for some
- * channels.
- */
- enum lttcomm_return_code close_ret;
- char path[LTTNG_PATH_MAX];
+ lttng::urcu::read_lock_guard read_lock;
+ cds_lfht_for_each_entry_duplicate(
+ the_consumer_data.channels_by_session_id_ht->ht,
+ the_consumer_data.channels_by_session_id_ht->hash_fct(&session_id,
+ lttng_ht_seed),
+ the_consumer_data.channels_by_session_id_ht->match_fct,
+ &session_id,
+ &iter.iter,
+ channel,
+ channels_by_session_id_ht_node.node)
+ {
+ ret = lttng_consumer_channel_set_trace_chunk(channel, published_chunk);
+ if (ret) {
+ /*
+ * Roll-back the creation of this chunk.
+ *
+ * This is important since the session daemon will
+ * assume that the creation of this chunk failed and
+ * will never ask for it to be closed, resulting
+ * in a leak and an inconsistent state for some
+ * channels.
+ */
+ enum lttcomm_return_code close_ret;
+ char path[LTTNG_PATH_MAX];
+
+ DBG("Failed to set new trace chunk on existing channels, rolling back");
+ close_ret =
+ lttng_consumer_close_trace_chunk(relayd_id,
+ session_id,
+ chunk_id,
+ chunk_creation_timestamp,
+ nullptr,
+ path);
+ if (close_ret != LTTCOMM_CONSUMERD_SUCCESS) {
+ ERR("Failed to roll-back the creation of new chunk: session_id = %" PRIu64
+ ", chunk_id = %" PRIu64,
+ session_id,
+ chunk_id);
+ }
- DBG("Failed to set new trace chunk on existing channels, rolling back");
- close_ret = lttng_consumer_close_trace_chunk(relayd_id,
- session_id,
- chunk_id,
- chunk_creation_timestamp,
- nullptr,
- path);
- if (close_ret != LTTCOMM_CONSUMERD_SUCCESS) {
- ERR("Failed to roll-back the creation of new chunk: session_id = %" PRIu64
- ", chunk_id = %" PRIu64,
- session_id,
- chunk_id);
+ ret_code = LTTCOMM_CONSUMERD_CREATE_TRACE_CHUNK_FAILED;
+ break;
}
-
- ret_code = LTTCOMM_CONSUMERD_CREATE_TRACE_CHUNK_FAILED;
- break;
}
}
}
}
error_unlock:
- rcu_read_unlock();
error:
/* Release the reference returned by the "publish" operation. */
lttng_trace_chunk_put(published_chunk);
* it; it is only kept around to compare it (by address) to the
* current chunk found in the session's channels.
*/
- rcu_read_lock();
- cds_lfht_for_each_entry (the_consumer_data.channel_ht->ht, &iter.iter, channel, node.node) {
- int ret;
+ {
+ lttng::urcu::read_lock_guard read_lock;
+ cds_lfht_for_each_entry (
+ the_consumer_data.channel_ht->ht, &iter.iter, channel, node.node) {
+ int ret;
- /*
- * Only change the channel's chunk to NULL if it still
- * references the chunk being closed. The channel may
- * reference a newer channel in the case of a session
- * rotation. When a session rotation occurs, the "next"
- * chunk is created before the "current" chunk is closed.
- */
- if (channel->trace_chunk != chunk) {
- continue;
- }
- ret = lttng_consumer_channel_set_trace_chunk(channel, nullptr);
- if (ret) {
/*
- * Attempt to close the chunk on as many channels as
- * possible.
+ * Only change the channel's chunk to NULL if it still
+ * references the chunk being closed. The channel may
+ * reference a newer channel in the case of a session
+ * rotation. When a session rotation occurs, the "next"
+ * chunk is created before the "current" chunk is closed.
*/
- ret_code = LTTCOMM_CONSUMERD_CLOSE_TRACE_CHUNK_FAILED;
+ if (channel->trace_chunk != chunk) {
+ continue;
+ }
+ ret = lttng_consumer_channel_set_trace_chunk(channel, nullptr);
+ if (ret) {
+ /*
+ * Attempt to close the chunk on as many channels as
+ * possible.
+ */
+ ret_code = LTTCOMM_CONSUMERD_CLOSE_TRACE_CHUNK_FAILED;
+ }
}
}
-
if (relayd_id) {
int ret;
struct consumer_relayd_sock_pair *relayd;
}
}
error_unlock:
- rcu_read_unlock();
end:
/*
* Release the reference returned by the "find" operation and
const bool is_local_trace = !relayd_id;
struct consumer_relayd_sock_pair *relayd = nullptr;
bool chunk_exists_local, chunk_exists_remote;
+ lttng::urcu::read_lock_guard read_lock;
if (relayd_id) {
/* Only used for logging purposes. */
goto end;
}
- rcu_read_lock();
relayd = consumer_find_relayd(*relayd_id);
if (!relayd) {
ERR("Failed to find relayd %" PRIu64, *relayd_id);
DBG("Trace chunk %s on relay daemon", chunk_exists_remote ? "exists" : "does not exist");
end_rcu_unlock:
- rcu_read_unlock();
end:
return ret_code;
}
ht = the_consumer_data.stream_per_chan_id_ht;
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
cds_lfht_for_each_entry_duplicate(ht->ht,
ht->hash_fct(&channel->key, lttng_ht_seed),
ht->match_fct,
next:
pthread_mutex_unlock(&stream->lock);
}
- rcu_read_unlock();
return LTTCOMM_CONSUMERD_SUCCESS;
error_unlock:
pthread_mutex_unlock(&stream->lock);
- rcu_read_unlock();
return ret;
}
goto end;
}
- rcu_read_lock();
- cds_list_for_each_entry (stream, &channel->streams.head, send_node) {
- enum consumer_stream_open_packet_status status;
+ {
+ lttng::urcu::read_lock_guard read_lock;
+ cds_list_for_each_entry (stream, &channel->streams.head, send_node) {
+ enum consumer_stream_open_packet_status status;
- pthread_mutex_lock(&stream->lock);
- if (cds_lfht_is_node_deleted(&stream->node.node)) {
- goto next;
- }
+ pthread_mutex_lock(&stream->lock);
+ if (cds_lfht_is_node_deleted(&stream->node.node)) {
+ goto next;
+ }
- status = consumer_stream_open_packet(stream);
- switch (status) {
- case CONSUMER_STREAM_OPEN_PACKET_STATUS_OPENED:
- DBG("Opened a packet in \"open channel packets\" command: stream id = %" PRIu64
- ", channel name = %s, session id = %" PRIu64,
- stream->key,
- stream->chan->name,
- stream->chan->session_id);
- stream->opened_packet_in_current_trace_chunk = true;
- break;
- case CONSUMER_STREAM_OPEN_PACKET_STATUS_NO_SPACE:
- DBG("No space left to open a packet in \"open channel packets\" command: stream id = %" PRIu64
- ", channel name = %s, session id = %" PRIu64,
- stream->key,
- stream->chan->name,
- stream->chan->session_id);
- break;
- case CONSUMER_STREAM_OPEN_PACKET_STATUS_ERROR:
- /*
- * Only unexpected internal errors can lead to this
- * failing. Report an unknown error.
- */
- ERR("Failed to flush empty buffer in \"open channel packets\" command: stream id = %" PRIu64
- ", channel id = %" PRIu64 ", channel name = %s"
- ", session id = %" PRIu64,
- stream->key,
- channel->key,
- channel->name,
- channel->session_id);
- ret = LTTCOMM_CONSUMERD_UNKNOWN_ERROR;
- goto error_unlock;
- default:
- abort();
- }
+ status = consumer_stream_open_packet(stream);
+ switch (status) {
+ case CONSUMER_STREAM_OPEN_PACKET_STATUS_OPENED:
+ DBG("Opened a packet in \"open channel packets\" command: stream id = %" PRIu64
+ ", channel name = %s, session id = %" PRIu64,
+ stream->key,
+ stream->chan->name,
+ stream->chan->session_id);
+ stream->opened_packet_in_current_trace_chunk = true;
+ break;
+ case CONSUMER_STREAM_OPEN_PACKET_STATUS_NO_SPACE:
+ DBG("No space left to open a packet in \"open channel packets\" command: stream id = %" PRIu64
+ ", channel name = %s, session id = %" PRIu64,
+ stream->key,
+ stream->chan->name,
+ stream->chan->session_id);
+ break;
+ case CONSUMER_STREAM_OPEN_PACKET_STATUS_ERROR:
+ /*
+ * Only unexpected internal errors can lead to this
+ * failing. Report an unknown error.
+ */
+ ERR("Failed to flush empty buffer in \"open channel packets\" command: stream id = %" PRIu64
+ ", channel id = %" PRIu64 ", channel name = %s"
+ ", session id = %" PRIu64,
+ stream->key,
+ channel->key,
+ channel->name,
+ channel->session_id);
+ ret = LTTCOMM_CONSUMERD_UNKNOWN_ERROR;
+ goto error_unlock;
+ default:
+ abort();
+ }
- next:
- pthread_mutex_unlock(&stream->lock);
+ next:
+ pthread_mutex_unlock(&stream->lock);
+ }
}
-
end_rcu_unlock:
- rcu_read_unlock();
end:
return ret;
#include <common/hashtable/utils.hpp>
#include <common/macros.hpp>
#include <common/optional.hpp>
+#include <common/urcu.hpp>
#include <fcntl.h>
#include <inttypes.h>
}
DBG_NO_LOC(" Tracked unsuspendable file descriptors");
- rcu_read_lock();
- cds_lfht_for_each_entry (
- tracker->unsuspendable_fds, &iter, unsuspendable_fd, tracker_node) {
- DBG_NO_LOC(" %s [active, fd %d]",
- unsuspendable_fd->name ?: "Unnamed",
- unsuspendable_fd->fd);
- }
- rcu_read_unlock();
+
+ {
+ lttng::urcu::read_lock_guard read_lock;
+
+ cds_lfht_for_each_entry (
+ tracker->unsuspendable_fds, &iter, unsuspendable_fd, tracker_node) {
+ DBG_NO_LOC(" %s [active, fd %d]",
+ unsuspendable_fd->name ?: "Unnamed",
+ unsuspendable_fd->fd);
+ }
+ }
+
if (!UNSUSPENDABLE_COUNT(tracker)) {
DBG_NO_LOC(" None");
}
int ret, user_ret, i, fds_to_suspend;
unsigned int active_fds;
struct unsuspendable_fd **entries;
+ lttng::urcu::read_lock_guard read_lock;
entries = calloc<unsuspendable_fd *>(fd_count);
if (!entries) {
entries[i] = entry;
}
- rcu_read_lock();
for (i = 0; i < fd_count; i++) {
struct cds_lfht_node *node;
struct unsuspendable_fd *entry = entries[i];
if (node != &entry->tracker_node) {
ret = -EEXIST;
- rcu_read_unlock();
goto end_free_entries;
}
entries[i] = nullptr;
}
tracker->count.unsuspendable += fd_count;
- rcu_read_unlock();
ret = user_ret;
end_unlock:
pthread_mutex_unlock(&tracker->lock);
{
int i, ret, user_ret;
int *fds = nullptr;
+ lttng::urcu::read_lock_guard read_lock;
/*
* Maintain a local copy of fds_in as the user's callback may modify its
memcpy(fds, fds_in, sizeof(*fds) * fd_count);
pthread_mutex_lock(&tracker->lock);
- rcu_read_lock();
/* Let the user close the file descriptors. */
user_ret = close(user_data, fds_in);
tracker->count.unsuspendable -= fd_count;
ret = 0;
end_unlock:
- rcu_read_unlock();
pthread_mutex_unlock(&tracker->lock);
free(fds);
end:
#include <common/macros.hpp>
#include <common/optional.hpp>
#include <common/string-utils/format.hpp>
+#include <common/urcu.hpp>
#include <common/utils.hpp>
#include <lttng/constant.h>
return;
}
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
cds_lfht_del(inode->registry_ht, &inode->registry_node);
- rcu_read_unlock();
if (inode->unlink_pending) {
int ret;
struct cds_lfht_iter iter;
struct cds_lfht_node *node;
struct lttng_inode *inode = nullptr;
+ lttng::urcu::read_lock_guard read_lock;
ret = fstat(fd, &statbuf);
if (ret < 0) {
id.device = statbuf.st_dev;
id.inode = statbuf.st_ino;
- rcu_read_lock();
cds_lfht_lookup(registry->inodes, lttng_inode_id_hash(&id), lttng_inode_match, &id, &iter);
node = cds_lfht_iter_get_node(&iter);
if (node) {
inode = lttng::utils::container_of(node, <tng_inode::registry_node);
lttng_inode_get(inode);
- goto end_unlock;
+ goto end;
}
inode = lttng_inode_create(&id, registry->inodes, unlinked_file_pool, handle, path);
if (!inode) {
- goto end_unlock;
+ goto end;
}
node = cds_lfht_add_unique(registry->inodes,
&inode->id,
&inode->registry_node);
LTTNG_ASSERT(node == &inode->registry_node);
-end_unlock:
- rcu_read_unlock();
end:
return inode;
}
#include <common/common.hpp>
#include <common/defaults.hpp>
+#include <common/urcu.hpp>
#include <string.h>
#include <urcu.h>
LTTNG_ASSERT(node);
/* RCU read lock protects from ABA. */
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
node_ptr = cds_lfht_add_unique(ht->ht,
ht->hash_fct(node->key, lttng_ht_seed),
ht->match_fct,
node->key,
&node->node);
- rcu_read_unlock();
LTTNG_ASSERT(node_ptr == &node->node);
}
LTTNG_ASSERT(node);
/* RCU read lock protects from ABA. */
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
cds_lfht_add(ht->ht, ht->hash_fct(node->key, lttng_ht_seed), &node->node);
- rcu_read_unlock();
}
/*
LTTNG_ASSERT(node);
/* RCU read lock protects from ABA. */
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
cds_lfht_add(ht->ht, ht->hash_fct((void *) node->key, lttng_ht_seed), &node->node);
- rcu_read_unlock();
}
/*
LTTNG_ASSERT(node);
/* RCU read lock protects from ABA. */
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
cds_lfht_add(ht->ht, ht->hash_fct(&node->key, lttng_ht_seed), &node->node);
- rcu_read_unlock();
}
/*
LTTNG_ASSERT(node);
/* RCU read lock protects from ABA. */
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
node_ptr = cds_lfht_add_unique(ht->ht,
ht->hash_fct((void *) node->key, lttng_ht_seed),
ht->match_fct,
(void *) node->key,
&node->node);
- rcu_read_unlock();
LTTNG_ASSERT(node_ptr == &node->node);
}
LTTNG_ASSERT(node);
/* RCU read lock protects from ABA. */
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
node_ptr = cds_lfht_add_unique(ht->ht,
ht->hash_fct(&node->key, lttng_ht_seed),
ht->match_fct,
&node->key,
&node->node);
- rcu_read_unlock();
LTTNG_ASSERT(node_ptr == &node->node);
}
LTTNG_ASSERT(node);
/* RCU read lock protects from ABA. */
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
node_ptr = cds_lfht_add_unique(ht->ht,
ht->hash_fct((void *) &node->key, lttng_ht_seed),
ht->match_fct,
(void *) &node->key,
&node->node);
- rcu_read_unlock();
LTTNG_ASSERT(node_ptr == &node->node);
}
LTTNG_ASSERT(node);
/* RCU read lock protects from ABA. */
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
node_ptr = cds_lfht_add_replace(ht->ht,
ht->hash_fct((void *) node->key, lttng_ht_seed),
ht->match_fct,
(void *) node->key,
&node->node);
- rcu_read_unlock();
if (!node_ptr) {
return nullptr;
} else {
return lttng::utils::container_of(node_ptr, <tng_ht_node_ulong::node);
}
- LTTNG_ASSERT(node_ptr == &node->node);
}
/*
LTTNG_ASSERT(node);
/* RCU read lock protects from ABA. */
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
node_ptr = cds_lfht_add_replace(ht->ht,
ht->hash_fct(&node->key, lttng_ht_seed),
ht->match_fct,
&node->key,
&node->node);
- rcu_read_unlock();
if (!node_ptr) {
return nullptr;
} else {
+ LTTNG_ASSERT(node_ptr == &node->node);
return lttng::utils::container_of(node_ptr, <tng_ht_node_u64::node);
}
- LTTNG_ASSERT(node_ptr == &node->node);
}
/*
LTTNG_ASSERT(iter);
/* RCU read lock protects from ABA. */
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
ret = cds_lfht_del(ht->ht, iter->iter.node);
- rcu_read_unlock();
return ret;
}
LTTNG_ASSERT(ht->ht);
/* RCU read lock protects from ABA and allows RCU traversal. */
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
cds_lfht_count_nodes(ht->ht, &scb, &count, &sca);
- rcu_read_unlock();
return count;
}
#include <common/relayd/relayd.hpp>
#include <common/sessiond-comm/relayd.hpp>
#include <common/sessiond-comm/sessiond-comm.hpp>
+#include <common/urcu.hpp>
#include <common/utils.hpp>
#include <bin/lttng-consumerd/health-consumerd.hpp>
/* Prevent channel modifications while we perform the snapshot.*/
pthread_mutex_lock(&channel->lock);
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
/* Splice is not supported yet for channel snapshot. */
if (channel->output != CONSUMER_CHANNEL_MMAP) {
end_unlock:
pthread_mutex_unlock(&stream->lock);
end:
- rcu_read_unlock();
pthread_mutex_unlock(&channel->lock);
return ret;
}
DBG("Kernel consumer snapshot metadata with key %" PRIu64 " at path %s", key, path);
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
metadata_stream = metadata_channel->metadata_stream;
LTTNG_ASSERT(metadata_stream);
metadata_stream->read_subbuffer_ops.unlock(metadata_stream);
consumer_stream_destroy(metadata_stream, nullptr);
metadata_channel->metadata_stream = nullptr;
- rcu_read_unlock();
return ret;
}
health_code_update();
/* relayd needs RCU read-side protection */
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
switch (msg.cmd_type) {
case LTTNG_CONSUMER_ADD_RELAYD_SOCKET:
}
case LTTNG_CONSUMER_UPDATE_STREAM:
{
- rcu_read_unlock();
return -ENOSYS;
}
case LTTNG_CONSUMER_DESTROY_RELAYD:
end:
health_code_update();
- rcu_read_unlock();
return ret_func;
}
#define member_sizeof(type, field) sizeof(((type *) 0)->field)
#define ASSERT_LOCKED(lock) LTTNG_ASSERT(pthread_mutex_trylock(&(lock)))
-#define ASSERT_RCU_READ_LOCKED(lock) LTTNG_ASSERT(rcu_read_ongoing())
+#define ASSERT_RCU_READ_LOCKED() LTTNG_ASSERT(rcu_read_ongoing())
+#define ASSERT_RCU_READ_UNLOCKED() LTTNG_ASSERT(!rcu_read_ongoing())
/* Attribute suitable to tag functions as having printf()-like arguments. */
#define ATTR_FORMAT_PRINTF(_string_index, _first_to_check) \
#include <common/time.hpp>
#include <common/trace-chunk-registry.hpp>
#include <common/trace-chunk.hpp>
+#include <common/urcu.hpp>
#include <common/utils.hpp>
#include <lttng/constant.h>
element = lttng::utils::container_of(chunk,
<tng_trace_chunk_registry_element::chunk);
if (element->registry) {
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
cds_lfht_del(element->registry->ht, &element->trace_chunk_registry_ht_node);
- rcu_read_unlock();
call_rcu(&element->rcu_node, free_lttng_trace_chunk_registry_element);
} else {
/* Never published, can be free'd immediately. */
pthread_mutex_lock(&chunk->lock);
element = lttng_trace_chunk_registry_element_create_from_chunk(chunk, session_id);
pthread_mutex_unlock(&chunk->lock);
+
+ lttng::urcu::read_lock_guard read_lock;
if (!element) {
goto end;
}
chunk = nullptr;
element_hash = lttng_trace_chunk_registry_element_hash(element);
- rcu_read_lock();
while (true) {
struct cds_lfht_node *published_node;
struct lttng_trace_chunk *published_chunk;
* chunk.
*/
}
- rcu_read_unlock();
end:
return element ? &element->chunk : nullptr;
}
struct lttng_trace_chunk *published_chunk = nullptr;
struct cds_lfht_iter iter;
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
cds_lfht_lookup(registry->ht,
element_hash,
lttng_trace_chunk_registry_element_match,
published_chunk = &published_element->chunk;
}
end:
- rcu_read_unlock();
return published_chunk;
}
struct cds_lfht_node *published_node;
struct cds_lfht_iter iter;
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
cds_lfht_lookup(registry->ht,
element_hash,
lttng_trace_chunk_registry_element_match,
*chunk_exists = !cds_lfht_is_node_deleted(published_node);
end:
- rcu_read_unlock();
return ret;
}
unsigned int trace_chunks_left = 0;
DBG("Releasing trace chunk registry to all trace chunks");
- rcu_read_lock();
- cds_lfht_for_each_entry (registry->ht, &iter, chunk_element, trace_chunk_registry_ht_node) {
- const char *chunk_id_str = "none";
- char chunk_id_buf[MAX_INT_DEC_LEN(uint64_t)];
-
- pthread_mutex_lock(&chunk_element->chunk.lock);
- if (chunk_element->chunk.id.is_set) {
- int fmt_ret;
-
- fmt_ret = snprintf(chunk_id_buf,
- sizeof(chunk_id_buf),
- "%" PRIu64,
- chunk_element->chunk.id.value);
- if (fmt_ret < 0 || fmt_ret >= sizeof(chunk_id_buf)) {
- chunk_id_str = "formatting error";
- } else {
- chunk_id_str = chunk_id_buf;
+
+ {
+ lttng::urcu::read_lock_guard read_lock;
+
+ cds_lfht_for_each_entry (
+ registry->ht, &iter, chunk_element, trace_chunk_registry_ht_node) {
+ const char *chunk_id_str = "none";
+ char chunk_id_buf[MAX_INT_DEC_LEN(uint64_t)];
+
+ pthread_mutex_lock(&chunk_element->chunk.lock);
+ if (chunk_element->chunk.id.is_set) {
+ int fmt_ret;
+
+ fmt_ret = snprintf(chunk_id_buf,
+ sizeof(chunk_id_buf),
+ "%" PRIu64,
+ chunk_element->chunk.id.value);
+ if (fmt_ret < 0 || fmt_ret >= sizeof(chunk_id_buf)) {
+ chunk_id_str = "formatting error";
+ } else {
+ chunk_id_str = chunk_id_buf;
+ }
}
+
+ DBG("Releasing reference to trace chunk: session_id = %" PRIu64
+ "chunk_id = %s, name = \"%s\", status = %s",
+ chunk_element->session_id,
+ chunk_id_str,
+ chunk_element->chunk.name ?: "none",
+ chunk_element->chunk.close_command.is_set ? "open" : "closed");
+ pthread_mutex_unlock(&chunk_element->chunk.lock);
+ lttng_trace_chunk_put(&chunk_element->chunk);
+ trace_chunks_left++;
}
+ }
- DBG("Releasing reference to trace chunk: session_id = %" PRIu64
- "chunk_id = %s, name = \"%s\", status = %s",
- chunk_element->session_id,
- chunk_id_str,
- chunk_element->chunk.name ?: "none",
- chunk_element->chunk.close_command.is_set ? "open" : "closed");
- pthread_mutex_unlock(&chunk_element->chunk.lock);
- lttng_trace_chunk_put(&chunk_element->chunk);
- trace_chunks_left++;
- }
- rcu_read_unlock();
DBG("Released reference to %u trace chunks in %s()", trace_chunks_left, __FUNCTION__);
return trace_chunks_left;
#include <common/relayd/relayd.hpp>
#include <common/sessiond-comm/sessiond-comm.hpp>
#include <common/shm.hpp>
+#include <common/urcu.hpp>
#include <common/utils.hpp>
#include <lttng/ust-ctl.h>
DBG("UST consumer flush channel key %" PRIu64, chan_key);
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
channel = consumer_find_channel(chan_key);
if (!channel) {
ERR("UST consumer flush channel %" PRIu64 " not found", chan_key);
*/
sample_and_send_channel_buffer_stats(channel);
error:
- rcu_read_unlock();
return ret;
}
DBG("UST consumer clear quiescent channel key %" PRIu64, chan_key);
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
channel = consumer_find_channel(chan_key);
if (!channel) {
ERR("UST consumer clear quiescent channel %" PRIu64 " not found", chan_key);
pthread_mutex_unlock(&stream->lock);
}
error:
- rcu_read_unlock();
return ret;
}
DBG("UST consumer snapshot metadata with key %" PRIu64 " at path %s", key, path);
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
LTTNG_ASSERT(!metadata_channel->monitor);
metadata_channel->metadata_stream = nullptr;
error:
- rcu_read_unlock();
return ret;
}
LTTNG_ASSERT(ctx);
ASSERT_RCU_READ_LOCKED();
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
if (relayd_id != (uint64_t) -1ULL) {
use_relayd = 1;
pthread_mutex_unlock(&stream->lock);
}
- rcu_read_unlock();
return 0;
error_put_subbuf:
consumer_stream_close_output(stream);
error_unlock:
pthread_mutex_unlock(&stream->lock);
- rcu_read_unlock();
return ret;
}
health_code_update();
/* relayd needs RCU read-side lock */
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
switch (msg.cmd_type) {
case LTTNG_CONSUMER_ADD_RELAYD_SOCKET:
}
case LTTNG_CONSUMER_UPDATE_STREAM:
{
- rcu_read_unlock();
return -ENOSYS;
}
case LTTNG_CONSUMER_DATA_PENDING:
uint64_t key = msg.u.discarded_events.channel_key;
DBG("UST consumer discarded events command for session id %" PRIu64, id);
- rcu_read_lock();
pthread_mutex_lock(&the_consumer_data.lock);
ht = the_consumer_data.stream_list_ht;
}
}
pthread_mutex_unlock(&the_consumer_data.lock);
- rcu_read_unlock();
DBG("UST consumer discarded events command for session id %" PRIu64
", channel key %" PRIu64,
uint64_t key = msg.u.lost_packets.channel_key;
DBG("UST consumer lost packets command for session id %" PRIu64, id);
- rcu_read_lock();
pthread_mutex_lock(&the_consumer_data.lock);
ht = the_consumer_data.stream_list_ht;
}
}
pthread_mutex_unlock(&the_consumer_data.lock);
- rcu_read_unlock();
DBG("UST consumer lost packets command for session id %" PRIu64
", channel key %" PRIu64,
goto end;
end:
- rcu_read_unlock();
health_code_update();
return ret_func;
}
DBG("UST consumer closing all metadata streams");
- rcu_read_lock();
- cds_lfht_for_each_entry (metadata_ht->ht, &iter.iter, stream, node.node) {
- health_code_update();
+ {
+ lttng::urcu::read_lock_guard read_lock;
- pthread_mutex_lock(&stream->chan->lock);
- lttng_ustconsumer_close_metadata(stream->chan);
- pthread_mutex_unlock(&stream->chan->lock);
+ cds_lfht_for_each_entry (metadata_ht->ht, &iter.iter, stream, node.node) {
+ health_code_update();
+
+ pthread_mutex_lock(&stream->chan->lock);
+ lttng_ustconsumer_close_metadata(stream->chan);
+ pthread_mutex_unlock(&stream->chan->lock);
+ }
}
- rcu_read_unlock();
}
void lttng_ustconsumer_close_stream_wakeup(struct lttng_consumer_stream *stream)