X-Git-Url: http://git.lttng.org./?a=blobdiff_plain;f=src%2Fbin%2Flttng-sessiond%2Fust-app.c;h=22f25da35a1c0d5d2bcf2d1c4df18430d124b69d;hb=d4769e1418514c6320ff16b2585562308f108c90;hp=1309d9d1dd76e586471c1ca7c958870aa3fd8106;hpb=e3ae3c71a6f1cab083f6c3b79116f108d6e989ba;p=lttng-tools.git diff --git a/src/bin/lttng-sessiond/ust-app.c b/src/bin/lttng-sessiond/ust-app.c index 1309d9d1d..22f25da35 100644 --- a/src/bin/lttng-sessiond/ust-app.c +++ b/src/bin/lttng-sessiond/ust-app.c @@ -425,10 +425,14 @@ void delete_ust_app_channel(int sock, struct ust_app_channel *ua_chan, /* * Push metadata to consumer socket. * - * The socket lock MUST be acquired. - * The ust app session lock MUST be acquired. + * RCU read-side lock must be held to guarantee existance of socket. + * Must be called with the ust app session lock held. + * Must be called with the registry lock held. * * On success, return the len of metadata pushed or else a negative value. + * Returning a -EPIPE return value means we could not send the metadata, + * but it can be caused by recoverable errors (e.g. the application has + * terminated concurrently). */ ssize_t ust_app_push_metadata(struct ust_registry_session *registry, struct consumer_socket *socket, int send_zero_data) @@ -441,25 +445,23 @@ ssize_t ust_app_push_metadata(struct ust_registry_session *registry, assert(registry); assert(socket); - pthread_mutex_lock(®istry->lock); - /* - * Means that no metadata was assigned to the session. This can happens if - * no start has been done previously. + * Means that no metadata was assigned to the session. This can + * happens if no start has been done previously. */ if (!registry->metadata_key) { - pthread_mutex_unlock(®istry->lock); return 0; } /* - * On a push metadata error either the consumer is dead or the metadata - * channel has been destroyed because its endpoint might have died (e.g: - * relayd). If so, the metadata closed flag is set to 1 so we deny pushing - * metadata again which is not valid anymore on the consumer side. + * On a push metadata error either the consumer is dead or the + * metadata channel has been destroyed because its endpoint + * might have died (e.g: relayd), or because the application has + * exited. If so, the metadata closed flag is set to 1 so we + * deny pushing metadata again which is not valid anymore on the + * consumer side. */ if (registry->metadata_closed) { - pthread_mutex_unlock(®istry->lock); return -EPIPE; } @@ -488,29 +490,32 @@ ssize_t ust_app_push_metadata(struct ust_registry_session *registry, registry->metadata_len_sent += len; push_data: - pthread_mutex_unlock(®istry->lock); ret = consumer_push_metadata(socket, registry->metadata_key, metadata_str, len, offset); if (ret < 0) { /* - * There is an acceptable race here between the registry metadata key - * assignment and the creation on the consumer. The session daemon can - * concurrently push metadata for this registry while being created on - * the consumer since the metadata key of the registry is assigned - * *before* it is setup to avoid the consumer to ask for metadata that - * could possibly be not found in the session daemon. + * There is an acceptable race here between the registry + * metadata key assignment and the creation on the + * consumer. The session daemon can concurrently push + * metadata for this registry while being created on the + * consumer since the metadata key of the registry is + * assigned *before* it is setup to avoid the consumer + * to ask for metadata that could possibly be not found + * in the session daemon. * - * The metadata will get pushed either by the session being stopped or - * the consumer requesting metadata if that race is triggered. + * The metadata will get pushed either by the session + * being stopped or the consumer requesting metadata if + * that race is triggered. */ if (ret == -LTTCOMM_CONSUMERD_CHANNEL_FAIL) { ret = 0; } - /* Update back the actual metadata len sent since it failed here. */ - pthread_mutex_lock(®istry->lock); + /* + * Update back the actual metadata len sent since it + * failed here. + */ registry->metadata_len_sent -= len; - pthread_mutex_unlock(®istry->lock); ret_val = ret; goto error_push; } @@ -522,13 +527,14 @@ end: error: if (ret_val) { /* - * On error, flag the registry that the metadata is closed. We were unable - * to push anything and this means that either the consumer is not - * responding or the metadata cache has been destroyed on the consumer. + * On error, flag the registry that the metadata is + * closed. We were unable to push anything and this + * means that either the consumer is not responding or + * the metadata cache has been destroyed on the + * consumer. */ registry->metadata_closed = 1; } - pthread_mutex_unlock(®istry->lock); error_push: free(metadata_str); return ret_val; @@ -540,9 +546,13 @@ error_push: * socket to send the metadata is retrieved from consumer, if sock * is not NULL we use it to send the metadata. * RCU read-side lock must be held while calling this function, - * therefore ensuring existance of registry. + * therefore ensuring existance of registry. It also ensures existance + * of socket throughout this function. * * Return 0 on success else a negative error. + * Returning a -EPIPE return value means we could not send the metadata, + * but it can be caused by recoverable errors (e.g. the application has + * terminated concurrently). */ static int push_metadata(struct ust_registry_session *registry, struct consumer_output *consumer) @@ -555,50 +565,36 @@ static int push_metadata(struct ust_registry_session *registry, assert(consumer); pthread_mutex_lock(®istry->lock); - if (registry->metadata_closed) { - pthread_mutex_unlock(®istry->lock); - return -EPIPE; + ret_val = -EPIPE; + goto error; } /* Get consumer socket to use to push the metadata.*/ socket = consumer_find_socket_by_bitness(registry->bits_per_long, consumer); - pthread_mutex_unlock(®istry->lock); if (!socket) { ret_val = -1; goto error; } - /* - * TODO: Currently, we hold the socket lock around sampling of the next - * metadata segment to ensure we send metadata over the consumer socket in - * the correct order. This makes the registry lock nest inside the socket - * lock. - * - * Please note that this is a temporary measure: we should move this lock - * back into ust_consumer_push_metadata() when the consumer gets the - * ability to reorder the metadata it receives. - */ - pthread_mutex_lock(socket->lock); ret = ust_app_push_metadata(registry, socket, 0); - pthread_mutex_unlock(socket->lock); if (ret < 0) { ret_val = ret; goto error; } - + pthread_mutex_unlock(®istry->lock); return 0; error: -end: + pthread_mutex_unlock(®istry->lock); return ret_val; } /* * Send to the consumer a close metadata command for the given session. Once * done, the metadata channel is deleted and the session metadata pointer is - * nullified. The session lock MUST be acquired here unless the application is + * nullified. The session lock MUST be held unless the application is * in the destroy path. * * Return 0 on success else a negative value. @@ -680,6 +676,9 @@ void delete_ust_app_session(int sock, struct ust_app_session *ua_sess, pthread_mutex_lock(&ua_sess->lock); + assert(!ua_sess->deleted); + ua_sess->deleted = true; + registry = get_session_registry(ua_sess); if (registry) { /* Push metadata for application before freeing the application. */ @@ -2398,6 +2397,7 @@ static int create_buffer_reg_channel(struct buffer_reg_session *reg_sess, assert(reg_chan); reg_chan->consumer_key = ua_chan->key; reg_chan->subbuf_size = ua_chan->attr.subbuf_size; + reg_chan->num_subbuf = ua_chan->attr.num_subbuf; /* Create and add a channel registry to session. */ ret = ust_registry_channel_add(reg_sess->reg.ust, @@ -3136,6 +3136,11 @@ void ust_app_unregister(int sock) */ pthread_mutex_lock(&ua_sess->lock); + if (ua_sess->deleted) { + pthread_mutex_unlock(&ua_sess->lock); + continue; + } + /* * Normally, this is done in the delete session process which is * executed in the call rcu below. However, upon registration we can't @@ -3701,6 +3706,12 @@ int ust_app_create_channel_glb(struct ltt_ust_session *usess, assert(ua_sess); pthread_mutex_lock(&ua_sess->lock); + + if (ua_sess->deleted) { + pthread_mutex_unlock(&ua_sess->lock); + continue; + } + if (!strncmp(uchan->name, DEFAULT_METADATA_NAME, sizeof(uchan->name))) { copy_channel_attr_to_ustctl(&ua_sess->metadata_attr, &uchan->attr); @@ -3770,6 +3781,11 @@ int ust_app_enable_event_glb(struct ltt_ust_session *usess, pthread_mutex_lock(&ua_sess->lock); + if (ua_sess->deleted) { + pthread_mutex_unlock(&ua_sess->lock); + continue; + } + /* Lookup channel in the ust app session */ lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter); ua_chan_node = lttng_ht_iter_get_node_str(&uiter); @@ -3836,6 +3852,12 @@ int ust_app_create_event_glb(struct ltt_ust_session *usess, } pthread_mutex_lock(&ua_sess->lock); + + if (ua_sess->deleted) { + pthread_mutex_unlock(&ua_sess->lock); + continue; + } + /* Lookup channel in the ust app session */ lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter); ua_chan_node = lttng_ht_iter_get_node_str(&uiter); @@ -3887,6 +3909,11 @@ int ust_app_start_trace(struct ltt_ust_session *usess, struct ust_app *app) pthread_mutex_lock(&ua_sess->lock); + if (ua_sess->deleted) { + pthread_mutex_unlock(&ua_sess->lock); + goto end; + } + /* Upon restart, we skip the setup, already done */ if (ua_sess->started) { goto skip_setup; @@ -3987,6 +4014,11 @@ int ust_app_stop_trace(struct ltt_ust_session *usess, struct ust_app *app) pthread_mutex_lock(&ua_sess->lock); + if (ua_sess->deleted) { + pthread_mutex_unlock(&ua_sess->lock); + goto end_no_session; + } + /* * If started = 0, it means that stop trace has been called for a session * that was never started. It's possible since we can have a fail start @@ -4067,6 +4099,10 @@ int ust_app_flush_app_session(struct ust_app *app, pthread_mutex_lock(&ua_sess->lock); + if (ua_sess->deleted) { + goto end_deleted; + } + health_code_update(); /* Flushing buffers */ @@ -4096,6 +4132,7 @@ int ust_app_flush_app_session(struct ust_app *app, health_code_update(); +end_deleted: pthread_mutex_unlock(&ua_sess->lock); end_not_compatible: @@ -4112,7 +4149,7 @@ static int ust_app_flush_session(struct ltt_ust_session *usess) { - int ret; + int ret = 0; DBG("Flushing session buffers for all ust apps"); @@ -4153,7 +4190,6 @@ int ust_app_flush_session(struct ltt_ust_session *usess) /* Push metadata. */ (void) push_metadata(ust_session_reg, usess->consumer); } - ret = 0; break; } case LTTNG_BUFFER_PER_PID: @@ -4172,11 +4208,11 @@ int ust_app_flush_session(struct ltt_ust_session *usess) break; } default: + ret = -1; assert(0); break; } -end_no_session: rcu_read_unlock(); health_code_update(); return ret; @@ -4350,6 +4386,11 @@ void ust_app_global_update(struct ltt_ust_session *usess, int sock) pthread_mutex_lock(&ua_sess->lock); + if (ua_sess->deleted) { + pthread_mutex_unlock(&ua_sess->lock); + goto error; + } + /* * We can iterate safely here over all UST app session since the create ust * app session above made a shadow copy of the UST global domain from the @@ -4443,6 +4484,12 @@ int ust_app_add_ctx_channel_glb(struct ltt_ust_session *usess, } pthread_mutex_lock(&ua_sess->lock); + + if (ua_sess->deleted) { + pthread_mutex_unlock(&ua_sess->lock); + continue; + } + /* Lookup channel in the ust app session */ lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter); ua_chan_node = lttng_ht_iter_get_node_str(&uiter); @@ -4501,6 +4548,12 @@ int ust_app_enable_event_pid(struct ltt_ust_session *usess, } pthread_mutex_lock(&ua_sess->lock); + + if (ua_sess->deleted) { + ret = 0; + goto end_unlock; + } + /* Lookup channel in the ust app session */ lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &iter); ua_chan_node = lttng_ht_iter_get_node_str(&iter); @@ -5064,7 +5117,8 @@ void ust_app_destroy(struct ust_app *app) * Return 0 on success or else a negative value. */ int ust_app_snapshot_record(struct ltt_ust_session *usess, - struct snapshot_output *output, int wait, uint64_t max_stream_size) + struct snapshot_output *output, int wait, + uint64_t nb_packets_per_stream) { int ret = 0; unsigned int snapshot_done = 0; @@ -5108,14 +5162,14 @@ int ust_app_snapshot_record(struct ltt_ust_session *usess, reg_chan, node.node) { ret = consumer_snapshot_channel(socket, reg_chan->consumer_key, output, 0, usess->uid, usess->gid, pathname, wait, - max_stream_size); + nb_packets_per_stream); if (ret < 0) { goto error; } } ret = consumer_snapshot_channel(socket, reg->registry->reg.ust->metadata_key, output, 1, - usess->uid, usess->gid, pathname, wait, max_stream_size); + usess->uid, usess->gid, pathname, wait, 0); if (ret < 0) { goto error; } @@ -5159,7 +5213,7 @@ int ust_app_snapshot_record(struct ltt_ust_session *usess, ua_chan, node.node) { ret = consumer_snapshot_channel(socket, ua_chan->key, output, 0, ua_sess->euid, ua_sess->egid, pathname, wait, - max_stream_size); + nb_packets_per_stream); if (ret < 0) { goto error; } @@ -5168,8 +5222,7 @@ int ust_app_snapshot_record(struct ltt_ust_session *usess, registry = get_session_registry(ua_sess); assert(registry); ret = consumer_snapshot_channel(socket, registry->metadata_key, output, - 1, ua_sess->euid, ua_sess->egid, pathname, wait, - max_stream_size); + 1, ua_sess->euid, ua_sess->egid, pathname, wait, 0); if (ret < 0) { goto error; } @@ -5197,11 +5250,12 @@ error: } /* - * Return the number of streams for a UST session. + * Return the size taken by one more packet per stream. */ -unsigned int ust_app_get_nb_stream(struct ltt_ust_session *usess) +uint64_t ust_app_get_size_one_more_packet_per_stream(struct ltt_ust_session *usess, + uint64_t cur_nr_packets) { - unsigned int ret = 0; + uint64_t tot_size = 0; struct ust_app *app; struct lttng_ht_iter iter; @@ -5218,7 +5272,14 @@ unsigned int ust_app_get_nb_stream(struct ltt_ust_session *usess) rcu_read_lock(); cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter, reg_chan, node.node) { - ret += reg_chan->stream_count; + if (cur_nr_packets >= reg_chan->num_subbuf) { + /* + * Don't take channel into account if we + * already grab all its packets. + */ + continue; + } + tot_size += reg_chan->subbuf_size * reg_chan->stream_count; } rcu_read_unlock(); } @@ -5240,7 +5301,14 @@ unsigned int ust_app_get_nb_stream(struct ltt_ust_session *usess) cds_lfht_for_each_entry(ua_sess->channels->ht, &chan_iter.iter, ua_chan, node.node) { - ret += ua_chan->streams.count; + if (cur_nr_packets >= ua_chan->attr.num_subbuf) { + /* + * Don't take channel into account if we + * already grab all its packets. + */ + continue; + } + tot_size += ua_chan->attr.subbuf_size * ua_chan->streams.count; } } rcu_read_unlock(); @@ -5251,5 +5319,5 @@ unsigned int ust_app_get_nb_stream(struct ltt_ust_session *usess) break; } - return ret; + return tot_size; }