int lttng_session_active(void);
typedef int (*t_statedump_func_ptr)(struct lttng_session *session);
-int lttng_handle_pending_statedumps(t_statedump_func_ptr statedump_func_ptr);
+int lttng_handle_pending_statedump(void *owner);
+struct cds_list_head *_lttng_get_sessions(void);
#endif /* _LTTNG_UST_EVENTS_H */
#include "tracepoint-internal.h"
#include "lttng-tracer.h"
#include "lttng-tracer-core.h"
+#include "lttng-ust-baddr.h"
#include "wait.h"
#include "../libringbuffer/shm.h"
#include "jhash.h"
static CDS_LIST_HEAD(sessions);
+struct cds_list_head *_lttng_get_sessions(void)
+{
+ return &sessions;
+}
+
static void _lttng_event_destroy(struct lttng_event *event);
static
}
/*
- * Called after session enable: For each session, execute pending statedumps.
+ * For each session of the owner thread, execute pending statedump.
+ * Only dump state for the sessions owned by the caller thread, because
+ * we don't keep ust_lock across the entire iteration.
*/
-int lttng_handle_pending_statedumps(t_statedump_func_ptr statedump_func_ptr)
+int lttng_handle_pending_statedump(void *owner)
{
struct lttng_session *session;
+ /* Execute state dump */
+ lttng_ust_baddr_statedump(owner);
+
+ /* Clear pending state dump */
+ ust_lock();
cds_list_for_each_entry(session, &sessions, node) {
- if (session->statedump_pending) {
- session->statedump_pending = 0;
- statedump_func_ptr(session);
- }
+ if (session->owner != owner)
+ continue;
+ if (!session->statedump_pending)
+ continue;
+ session->statedump_pending = 0;
}
+ ust_unlock();
return 0;
}
#define TRACEPOINT_DEFINE
#include "ust_baddr_statedump.h"
-static int
-extract_soinfo_events(struct dl_phdr_info *info, size_t size, void *data)
+static
+int extract_soinfo_events(struct dl_phdr_info *info, size_t size, void *data)
{
int j;
int num_loadable_segment = 0;
+ void *owner = data;
+ struct cds_list_head *sessionsp;
+
+ sessionsp = _lttng_get_sessions();
for (j = 0; j < info->dlpi_phnum; j++) {
char resolved_path[PATH_MAX];
struct stat sostat;
void *base_addr_ptr;
+ struct lttng_session *session;
if (info->dlpi_phdr[j].p_type != PT_LOAD)
continue;
sostat.st_mtime = -1;
}
- tracepoint(ust_baddr_statedump, soinfo,
- (struct lttng_session *) data, base_addr_ptr,
- resolved_path, sostat.st_size, sostat.st_mtime);
+ /*
+ * UST lock needs to be nested within dynamic loader
+ * lock.
+ */
+ ust_lock();
+ cds_list_for_each_entry(session, sessionsp, node) {
+ if (session->owner != owner)
+ continue;
+ if (!session->statedump_pending)
+ continue;
+ tracepoint(ust_baddr_statedump, soinfo,
+ session, base_addr_ptr,
+ resolved_path, sostat.st_size,
+ sostat.st_mtime);
+ }
+ ust_unlock();
/*
* We are only interested in the base address (lowest virtual
return 0;
}
-int
-lttng_ust_baddr_statedump(struct lttng_session *session)
+int lttng_ust_baddr_statedump(void *owner)
{
if (getenv("LTTNG_UST_WITHOUT_BADDR_STATEDUMP"))
return 0;
/*
* Iterate through the list of currently loaded shared objects and
- * generate events for loadable segments using extract_soinfo_events
+ * generate events for loadable segments using
+ * extract_soinfo_events.
*/
- dl_iterate_phdr(extract_soinfo_events, session);
+ dl_iterate_phdr(extract_soinfo_events, owner);
return 0;
}
#include <lttng/ust-events.h>
-int lttng_ust_baddr_statedump(struct lttng_session *session);
+int lttng_ust_baddr_statedump(void *owner);
#endif /* LTTNG_UST_BADDR_H */
char wait_shm_path[PATH_MAX];
char *wait_shm_mmap;
- int session_enabled;
+ /* Keep track of lazy state dump not performed yet. */
+ int statedump_pending;
};
/* Socket from app (connect) to session daemon (listen) for communication */
.wait_shm_path = "/" LTTNG_UST_WAIT_FILENAME,
- .session_enabled = 0,
+ .statedump_pending = 0,
};
/* TODO: allow global_apps_sock_path override */
.socket = -1,
.notify_socket = -1,
- .session_enabled = 0,
+ .statedump_pending = 0,
};
static int wait_poll_fallback;
return 0;
}
+/*
+ * Only execute pending statedump after the constructor semaphore has
+ * been posted by each listener thread. This means statedump will only
+ * be performed after the "registration done" command is received from
+ * each session daemon the application is connected to.
+ *
+ * This ensures we don't run into deadlock issues with the dynamic
+ * loader mutex, which is held while the constructor is called and
+ * waiting on the constructor semaphore. All operations requiring this
+ * dynamic loader lock need to be postponed using this mechanism.
+ */
+static
+void handle_pending_statedump(struct sock_info *sock_info)
+{
+ int ctor_passed = sock_info->constructor_sem_posted;
+
+ if (ctor_passed && sock_info->statedump_pending) {
+ sock_info->statedump_pending = 0;
+ lttng_handle_pending_statedump(sock_info);
+ }
+}
+
static
int handle_message(struct sock_info *sock_info,
int sock, struct ustcomm_ust_msg *lum)
error:
ust_unlock();
- return ret;
-}
-static
-void handle_pending_statedumps(struct sock_info *sock_info)
-{
- int ctor_passed = sock_info->constructor_sem_posted;
+ /*
+ * Performed delayed statedump operations outside of the UST
+ * lock. We need to take the dynamic loader lock before we take
+ * the UST lock internally within handle_pending_statedump().
+ */
+ handle_pending_statedump(sock_info);
- if (ctor_passed && sock_info->session_enabled) {
- sock_info->session_enabled = 0;
- lttng_handle_pending_statedumps(<tng_ust_baddr_statedump);
- }
+ return ret;
}
static
ret = handle_message(sock_info, sock, &lum);
if (ret) {
ERR("Error handling message for %s socket", sock_info->name);
- } else {
- handle_pending_statedumps(sock_info);
}
continue;
default:
void lttng_ust_sockinfo_session_enabled(void *owner)
{
struct sock_info *sock_info = owner;
- sock_info->session_enabled = 1;
+ sock_info->statedump_pending = 1;
}