# libust and '.' (that contains the linker script). However, '.'
# must be installed after libust so it can overwrite libust.so with
# the linker script.
-SUBDIRS = snprintf libust . tests libustinstr-malloc libustfork include doc
+SUBDIRS = snprintf libust include doc
+
+#temporarily disabled
+# . tests libustinstr-malloc libustfork
EXTRA_DIST = libust.ldscript.in libust-initializer.c libust-initializer.h
AC_CONFIG_MACRO_DIR([config])
AM_INIT_AUTOMAKE([foreign])
m4_ifdef([AM_SILENT_RULES], [AM_SILENT_RULES([yes])])
-AC_CONFIG_SRCDIR([ustctl/ustctl.c])
+AC_CONFIG_SRCDIR([include/ust/tracepoint.h])
AC_CONFIG_HEADERS([config.h include/ust/config.h])
AH_TEMPLATE([HAVE_EFFICIENT_UNALIGNED_ACCESS], [Use efficient unaligned access.])
# Compute minor/major version numbers
--- /dev/null
+/*
+ * ltt/ltt-channels.c
+ *
+ * (C) Copyright 2008 - Mathieu Desnoyers (mathieu.desnoyers@polymtl.ca)
+ *
+ * LTTng channel management.
+ *
+ * Author:
+ * Mathieu Desnoyers (mathieu.desnoyers@polymtl.ca)
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <stdlib.h>
+#include <ust/marker.h>
+#include <ust/marker-internal.h>
+#include "channels.h"
+#include "usterr_signal_safe.h"
+
+/*
+ * ltt_channel_mutex may be nested inside the LTT trace mutex.
+ * ltt_channel_mutex mutex may be nested inside markers mutex.
+ */
+static DEFINE_MUTEX(ltt_channel_mutex);
+static CDS_LIST_HEAD(ltt_channels);
+/*
+ * Index of next channel in array. Makes sure that as long as a trace channel is
+ * allocated, no array index will be re-used when a channel is freed and then
+ * another channel is allocated. This index is cleared and the array indexeds
+ * get reassigned when the index_urcu_ref goes back to 0, which indicates that no
+ * more trace channels are allocated.
+ */
+static unsigned int free_index;
+static struct urcu_ref index_urcu_ref; /* Keeps track of allocated trace channels */
+
+int ust_channels_overwrite_by_default = 0;
+int ust_channels_request_collection_by_default = 1;
+
+static struct ltt_channel_setting *lookup_channel(const char *name)
+{
+ struct ltt_channel_setting *iter;
+
+ cds_list_for_each_entry(iter, <t_channels, list)
+ if (strcmp(name, iter->name) == 0)
+ return iter;
+ return NULL;
+}
+
+/*
+ * Must be called when channel refcount falls to 0 _and_ also when the last
+ * trace is freed. This function is responsible for compacting the channel and
+ * event IDs when no users are active.
+ *
+ * Called with lock_markers() and channels mutex held.
+ */
+static void release_channel_setting(struct urcu_ref *urcu_ref)
+{
+ struct ltt_channel_setting *setting = _ust_container_of(urcu_ref,
+ struct ltt_channel_setting, urcu_ref);
+ struct ltt_channel_setting *iter;
+
+ if (uatomic_read(&index_urcu_ref.refcount) == 0
+ && uatomic_read(&setting->urcu_ref.refcount) == 0) {
+ cds_list_del(&setting->list);
+ free(setting);
+
+ free_index = 0;
+ cds_list_for_each_entry(iter, <t_channels, list) {
+ iter->index = free_index++;
+ iter->free_event_id = 0;
+ }
+ /* FIXME: why not run this? */
+//ust// markers_compact_event_ids();
+ }
+}
+
+/*
+ * Perform channel index compaction when the last trace channel is freed.
+ *
+ * Called with lock_markers() and channels mutex held.
+ */
+static void release_trace_channel(struct urcu_ref *urcu_ref)
+{
+ struct ltt_channel_setting *iter, *n;
+
+ cds_list_for_each_entry_safe(iter, n, <t_channels, list)
+ release_channel_setting(&iter->urcu_ref);
+}
+
+/**
+ * ltt_channels_register - Register a trace channel.
+ * @name: channel name
+ *
+ * Uses refcounting.
+ */
+int ltt_channels_register(const char *name)
+{
+ struct ltt_channel_setting *setting;
+ int ret = 0;
+
+ pthread_mutex_lock(<t_channel_mutex);
+ setting = lookup_channel(name);
+ if (setting) {
+ if (uatomic_read(&setting->urcu_ref.refcount) == 0)
+ goto init_urcu_ref;
+ else {
+ urcu_ref_get(&setting->urcu_ref);
+ goto end;
+ }
+ }
+ setting = zmalloc(sizeof(*setting));
+ if (!setting) {
+ ret = -ENOMEM;
+ goto end;
+ }
+ cds_list_add(&setting->list, <t_channels);
+ strncpy(setting->name, name, PATH_MAX-1);
+ setting->index = free_index++;
+init_urcu_ref:
+ urcu_ref_init(&setting->urcu_ref);
+end:
+ pthread_mutex_unlock(<t_channel_mutex);
+ return ret;
+}
+
+/**
+ * ltt_channels_unregister - Unregister a trace channel.
+ * @name: channel name
+ *
+ * Must be called with markers mutex held.
+ */
+int ltt_channels_unregister(const char *name)
+{
+ struct ltt_channel_setting *setting;
+ int ret = 0;
+
+ pthread_mutex_lock(<t_channel_mutex);
+ setting = lookup_channel(name);
+ if (!setting || uatomic_read(&setting->urcu_ref.refcount) == 0) {
+ ret = -ENOENT;
+ goto end;
+ }
+ urcu_ref_put(&setting->urcu_ref, release_channel_setting);
+end:
+ pthread_mutex_unlock(<t_channel_mutex);
+ return ret;
+}
+
+/**
+ * ltt_channels_set_default - Set channel default behavior.
+ * @name: default channel name
+ * @subbuf_size: size of the subbuffers
+ * @subbuf_cnt: number of subbuffers
+ */
+int ltt_channels_set_default(const char *name,
+ unsigned int subbuf_size,
+ unsigned int subbuf_cnt)
+{
+ struct ltt_channel_setting *setting;
+ int ret = 0;
+
+ pthread_mutex_lock(<t_channel_mutex);
+ setting = lookup_channel(name);
+ if (!setting || uatomic_read(&setting->urcu_ref.refcount) == 0) {
+ ret = -ENOENT;
+ goto end;
+ }
+ setting->subbuf_size = subbuf_size;
+ setting->subbuf_cnt = subbuf_cnt;
+end:
+ pthread_mutex_unlock(<t_channel_mutex);
+ return ret;
+}
+
+/**
+ * ltt_channels_get_name_from_index - get channel name from channel index
+ * @index: channel index
+ *
+ * Allows to lookup the channel name given its index. Done to keep the name
+ * information outside of each trace channel instance.
+ */
+const char *ltt_channels_get_name_from_index(unsigned int index)
+{
+ struct ltt_channel_setting *iter;
+
+ cds_list_for_each_entry(iter, <t_channels, list)
+ if (iter->index == index && uatomic_read(&iter->urcu_ref.refcount))
+ return iter->name;
+ return NULL;
+}
+
+static struct ltt_channel_setting *
+ltt_channels_get_setting_from_name(const char *name)
+{
+ struct ltt_channel_setting *iter;
+
+ cds_list_for_each_entry(iter, <t_channels, list)
+ if (!strcmp(iter->name, name)
+ && uatomic_read(&iter->urcu_ref.refcount))
+ return iter;
+ return NULL;
+}
+
+/**
+ * ltt_channels_get_index_from_name - get channel index from channel name
+ * @name: channel name
+ *
+ * Allows to lookup the channel index given its name. Done to keep the name
+ * information outside of each trace channel instance.
+ * Returns -1 if not found.
+ */
+int ltt_channels_get_index_from_name(const char *name)
+{
+ struct ltt_channel_setting *setting;
+
+ setting = ltt_channels_get_setting_from_name(name);
+ if (setting)
+ return setting->index;
+ else
+ return -1;
+}
+
+/**
+ * ltt_channels_trace_alloc - Allocate channel structures for a trace
+ * @subbuf_size: subbuffer size. 0 uses default.
+ * @subbuf_cnt: number of subbuffers per per-cpu buffers. 0 uses default.
+ * @flags: Default channel flags
+ *
+ * Use the current channel list to allocate the channels for a trace.
+ * Called with trace lock held. Does not perform the trace buffer allocation,
+ * because we must let the user overwrite specific channel sizes.
+ */
+struct ust_channel *ltt_channels_trace_alloc(unsigned int *nr_channels,
+ int overwrite,
+ int request_collection,
+ int active)
+{
+ struct ust_channel *channel = NULL;
+ struct ltt_channel_setting *iter;
+
+ pthread_mutex_lock(<t_channel_mutex);
+ if (!free_index) {
+ WARN("ltt_channels_trace_alloc: no free_index; are there any probes connected?");
+ goto end;
+ }
+ if (!uatomic_read(&index_urcu_ref.refcount))
+ urcu_ref_init(&index_urcu_ref);
+ else
+ urcu_ref_get(&index_urcu_ref);
+ *nr_channels = free_index;
+ channel = zmalloc(sizeof(struct ust_channel) * free_index);
+ if (!channel) {
+ WARN("ltt_channel_struct: channel null after alloc");
+ goto end;
+ }
+ cds_list_for_each_entry(iter, <t_channels, list) {
+ if (!uatomic_read(&iter->urcu_ref.refcount))
+ continue;
+ channel[iter->index].subbuf_size = iter->subbuf_size;
+ channel[iter->index].subbuf_cnt = iter->subbuf_cnt;
+ channel[iter->index].overwrite = overwrite;
+ channel[iter->index].request_collection = request_collection;
+ channel[iter->index].active = active;
+ channel[iter->index].channel_name = iter->name;
+ }
+end:
+ pthread_mutex_unlock(<t_channel_mutex);
+ return channel;
+}
+
+/**
+ * ltt_channels_trace_free - Free one trace's channels
+ * @channels: channels to free
+ *
+ * Called with trace lock held. The actual channel buffers must be freed before
+ * this function is called.
+ */
+void ltt_channels_trace_free(struct ust_channel *channels)
+{
+ lock_ust_marker();
+ pthread_mutex_lock(<t_channel_mutex);
+ free(channels);
+ urcu_ref_put(&index_urcu_ref, release_trace_channel);
+ pthread_mutex_unlock(<t_channel_mutex);
+ unlock_ust_marker();
+}
+
+/**
+ * _ltt_channels_get_event_id - get next event ID for a marker
+ * @channel: channel name
+ * @name: event name
+ *
+ * Returns a unique event ID (for this channel) or < 0 on error.
+ * Must be called with channels mutex held.
+ */
+int _ltt_channels_get_event_id(const char *channel, const char *name)
+{
+ struct ltt_channel_setting *setting;
+ int ret;
+
+ setting = ltt_channels_get_setting_from_name(channel);
+ if (!setting) {
+ ret = -ENOENT;
+ goto end;
+ }
+ if (strcmp(channel, "metadata") == 0) {
+ if (strcmp(name, "core_marker_id") == 0)
+ ret = 0;
+ else if (strcmp(name, "core_marker_format") == 0)
+ ret = 1;
+ else if (strcmp(name, "testev") == 0)
+ ret = 2;
+ else
+ ret = -ENOENT;
+ goto end;
+ }
+ if (setting->free_event_id == EVENTS_PER_CHANNEL - 1) {
+ ret = -ENOSPC;
+ goto end;
+ }
+ ret = setting->free_event_id++;
+end:
+ return ret;
+}
+
+/**
+ * ltt_channels_get_event_id - get next event ID for a marker
+ * @channel: channel name
+ * @name: event name
+ *
+ * Returns a unique event ID (for this channel) or < 0 on error.
+ */
+int ltt_channels_get_event_id(const char *channel, const char *name)
+{
+ int ret;
+
+ pthread_mutex_lock(<t_channel_mutex);
+ ret = _ltt_channels_get_event_id(channel, name);
+ pthread_mutex_unlock(<t_channel_mutex);
+ return ret;
+}
--- /dev/null
+#ifndef UST_CHANNELS_H
+#define UST_CHANNELS_H
+
+/*
+ * Copyright (C) 2008 Mathieu Desnoyers (mathieu.desnoyers@polymtl.ca)
+ *
+ * Dynamic tracer channel allocation.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#define _LGPL_SOURCE
+#include <linux/limits.h>
+#include <errno.h>
+#include <ust/kcompat/kcompat.h>
+#include <ust/core.h>
+#include <urcu/list.h>
+#include <urcu/ref.h>
+
+#define EVENTS_PER_CHANNEL 65536
+#define MAX_CPUS 32
+
+struct ust_trace;
+
+struct ust_buffer;
+
+struct ust_channel {
+ /* First 32 bytes cache-hot cacheline */
+ struct ust_trace *trace;
+ int *buf_struct_shmids;
+ struct ust_buffer **buf;
+ int overwrite:1;
+ /* whether collection is requested upon trace start */
+ int request_collection:1;
+ int active:1;
+ unsigned int n_subbufs_order;
+ unsigned long commit_count_mask; /*
+ * Commit count mask, removing
+ * the MSBs corresponding to
+ * bits used to represent the
+ * subbuffer index.
+ */
+ /* End of first 32 bytes cacheline */
+
+ struct urcu_ref urcu_ref; /* Channel transport reference count */
+ size_t subbuf_size;
+ int subbuf_size_order;
+ unsigned int subbuf_cnt;
+ const char *channel_name;
+ int n_cpus;
+
+ u32 version;
+ size_t alloc_size;
+ struct cds_list_head list;
+} ____cacheline_aligned;
+
+struct ltt_channel_setting {
+ unsigned int subbuf_size;
+ unsigned int subbuf_cnt;
+ struct urcu_ref urcu_ref; /* Number of references to structure content */
+ struct cds_list_head list;
+ unsigned int index; /* index of channel in trace channel array */
+ u16 free_event_id; /* Next event ID to allocate */
+ char name[PATH_MAX];
+};
+
+extern int ltt_channels_register(const char *name);
+extern int ltt_channels_unregister(const char *name);
+extern int ltt_channels_set_default(const char *name,
+ unsigned int subbuf_size,
+ unsigned int subbuf_cnt);
+extern const char *ltt_channels_get_name_from_index(unsigned int index);
+extern int ltt_channels_get_index_from_name(const char *name);
+extern struct ust_channel *ltt_channels_trace_alloc(unsigned int *nr_channels,
+ int overwrite,
+ int request_collection,
+ int active);
+extern void ltt_channels_trace_free(struct ust_channel *channels);
+extern int _ltt_channels_get_event_id(const char *channel, const char *name);
+extern int ltt_channels_get_event_id(const char *channel, const char *name);
+
+extern int ust_channels_overwrite_by_default;
+extern int ust_channels_request_collection_by_default;
+
+#endif /* UST_CHANNELS_H */
--- /dev/null
+/*
+ * Copyright (C) 2007 Mathieu Desnoyers
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ */
+
+/* This file contains a high-level API for activating and deactivating ust_markers,
+ * and making sure ust_markers in a given library can be released when the library
+ * is unloaded.
+ */
+
+#include <ctype.h>
+#include <stdlib.h>
+
+#include "tracer.h"
+#include "usterr_signal_safe.h"
+
+#define DEFAULT_CHANNEL "cpu"
+#define DEFAULT_PROBE "default"
+
+static int initialized;
+
+CDS_LIST_HEAD(probes_list);
+
+/*
+ * Mutex protecting the probe slab cache.
+ * Nests inside the traces mutex.
+ */
+DEFINE_MUTEX(probes_mutex);
+
+struct ltt_available_probe default_probe = {
+ .name = "default",
+ .format = NULL,
+ .probe_func = ltt_vtrace,
+ .callbacks[0] = ltt_serialize_data,
+};
+
+//ust//static struct kmem_cache *ust_markers_loaded_cachep;
+static CDS_LIST_HEAD(ust_markers_loaded_list);
+/*
+ * List sorted by name strcmp order.
+ */
+static CDS_LIST_HEAD(probes_registered_list);
+
+static struct ltt_available_probe *get_probe_from_name(const char *pname)
+{
+ struct ltt_available_probe *iter;
+ int comparison, found = 0;
+
+ if (!pname)
+ pname = DEFAULT_PROBE;
+ cds_list_for_each_entry(iter, &probes_registered_list, node) {
+ comparison = strcmp(pname, iter->name);
+ if (!comparison)
+ found = 1;
+ if (comparison <= 0)
+ break;
+ }
+ if (found)
+ return iter;
+ else
+ return NULL;
+}
+
+/* (unused)
+static char *skip_spaces(char *buf)
+{
+ while (*buf != '\0' && isspace(*buf))
+ buf++;
+ return buf;
+}
+
+static char *skip_nonspaces(char *buf)
+{
+ while (*buf != '\0' && !isspace(*buf))
+ buf++;
+ return buf;
+}
+
+static void get_ust_marker_string(char *buf, char **start,
+ char **end)
+{
+ *start = skip_spaces(buf);
+ *end = skip_nonspaces(*start);
+ **end = '\0';
+}
+*/
+
+int ltt_probe_register(struct ltt_available_probe *pdata)
+{
+ int ret = 0;
+ int comparison;
+ struct ltt_available_probe *iter;
+
+ pthread_mutex_lock(&probes_mutex);
+ cds_list_for_each_entry_reverse(iter, &probes_registered_list, node) {
+ comparison = strcmp(pdata->name, iter->name);
+ if (!comparison) {
+ ret = -EBUSY;
+ goto end;
+ } else if (comparison > 0) {
+ /* We belong to the location right after iter. */
+ cds_list_add(&pdata->node, &iter->node);
+ goto end;
+ }
+ }
+ /* Should be added at the head of the list */
+ cds_list_add(&pdata->node, &probes_registered_list);
+end:
+ pthread_mutex_unlock(&probes_mutex);
+ return ret;
+}
+
+/*
+ * Called when a probe does not want to be called anymore.
+ */
+int ltt_probe_unregister(struct ltt_available_probe *pdata)
+{
+ int ret = 0;
+ struct ltt_active_ust_marker *amark, *tmp;
+
+ pthread_mutex_lock(&probes_mutex);
+ cds_list_for_each_entry_safe(amark, tmp, &ust_markers_loaded_list, node) {
+ if (amark->probe == pdata) {
+ ret = ust_marker_probe_unregister_private_data(
+ pdata->probe_func, amark);
+ if (ret)
+ goto end;
+ cds_list_del(&amark->node);
+ free(amark);
+ }
+ }
+ cds_list_del(&pdata->node);
+end:
+ pthread_mutex_unlock(&probes_mutex);
+ return ret;
+}
+
+/*
+ * Connect ust_marker "mname" to probe "pname".
+ * Only allow _only_ probe instance to be connected to a ust_marker.
+ */
+int ltt_ust_marker_connect(const char *channel, const char *mname,
+ const char *pname)
+
+{
+ int ret;
+ struct ltt_active_ust_marker *pdata;
+ struct ltt_available_probe *probe;
+
+ ltt_lock_traces();
+ pthread_mutex_lock(&probes_mutex);
+ probe = get_probe_from_name(pname);
+ if (!probe) {
+ ret = -ENOENT;
+ goto end;
+ }
+ pdata = ust_marker_get_private_data(channel, mname, probe->probe_func, 0);
+ if (pdata && !IS_ERR(pdata)) {
+ ret = -EEXIST;
+ goto end;
+ }
+ pdata = zmalloc(sizeof(struct ltt_active_ust_marker));
+ if (!pdata) {
+ ret = -ENOMEM;
+ goto end;
+ }
+ pdata->probe = probe;
+ /*
+ * ID has priority over channel in case of conflict.
+ */
+ ret = ust_marker_probe_register(channel, mname, NULL,
+ probe->probe_func, pdata);
+ if (ret)
+ free(pdata);
+ else
+ cds_list_add(&pdata->node, &ust_markers_loaded_list);
+end:
+ pthread_mutex_unlock(&probes_mutex);
+ ltt_unlock_traces();
+ return ret;
+}
+
+/*
+ * Disconnect ust_marker "mname", probe "pname".
+ */
+int ltt_ust_marker_disconnect(const char *channel, const char *mname,
+ const char *pname)
+{
+ struct ltt_active_ust_marker *pdata;
+ struct ltt_available_probe *probe;
+ int ret = 0;
+
+ pthread_mutex_lock(&probes_mutex);
+ probe = get_probe_from_name(pname);
+ if (!probe) {
+ ret = -ENOENT;
+ goto end;
+ }
+ pdata = ust_marker_get_private_data(channel, mname, probe->probe_func, 0);
+ if (IS_ERR(pdata)) {
+ ret = PTR_ERR(pdata);
+ goto end;
+ } else if (!pdata) {
+ /*
+ * Not registered by us.
+ */
+ ret = -EPERM;
+ goto end;
+ }
+ ret = ust_marker_probe_unregister(channel, mname, probe->probe_func, pdata);
+ if (ret)
+ goto end;
+ else {
+ cds_list_del(&pdata->node);
+ free(pdata);
+ }
+end:
+ pthread_mutex_unlock(&probes_mutex);
+ return ret;
+}
+
+static void disconnect_all_ust_markers(void)
+{
+ struct ltt_active_ust_marker *pdata, *tmp;
+
+ cds_list_for_each_entry_safe(pdata, tmp, &ust_markers_loaded_list, node) {
+ ust_marker_probe_unregister_private_data(pdata->probe->probe_func,
+ pdata);
+ cds_list_del(&pdata->node);
+ free(pdata);
+ }
+}
+
+void __attribute__((constructor)) init_ust_marker_control(void)
+{
+ if (!initialized) {
+ int ret;
+
+ init_ust_marker();
+ ret = ltt_probe_register(&default_probe);
+ BUG_ON(ret);
+ ret = ltt_ust_marker_connect("metadata", "core_marker_format",
+ DEFAULT_PROBE);
+ BUG_ON(ret);
+ ret = ltt_ust_marker_connect("metadata", "core_marker_id", DEFAULT_PROBE);
+ BUG_ON(ret);
+ initialized = 1;
+ }
+}
+
+static void __attribute__((destructor)) ust_marker_control_exit(void)
+{
+ int ret;
+
+ ret = ltt_ust_marker_disconnect("metadata", "core_marker_format",
+ DEFAULT_PROBE);
+ BUG_ON(ret);
+ ret = ltt_ust_marker_disconnect("metadata", "core_marker_id",
+ DEFAULT_PROBE);
+ BUG_ON(ret);
+ ret = ltt_probe_unregister(&default_probe);
+ BUG_ON(ret);
+ disconnect_all_ust_markers();
+ ust_marker_synchronize_unregister();
+}
--- /dev/null
+/*
+ * Copyright (C) 2009 - Pierre-Marc Fournier (pierre-marc dot fournier at polymtl dot ca)
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef MARKER_CONTROL_H
+#define MARKER_CONTROL_H
+
+int init_ust_marker_control(void);
+int ltt_probe_register(struct ltt_available_probe *pdata);
+
+#endif /* MARKER_CONTROL_H */
--- /dev/null
+/*
+ * Copyright (C) 2007-2011 Mathieu Desnoyers
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation;
+ * version 2.1 of the License.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#define _LGPL_SOURCE
+#include <stdlib.h>
+#include <errno.h>
+#include <pthread.h>
+#include <urcu-call-rcu.h>
+#include <urcu-bp.h>
+#include <urcu/rculist.h>
+#include <urcu/hlist.h>
+
+#include <ust/core.h>
+#include <ust/marker.h>
+#include <ust/marker-internal.h>
+#include <ust/tracepoint.h>
+#include <ust/tracepoint-internal.h>
+
+#include "usterr_signal_safe.h"
+#include "channels.h"
+#include "tracercore.h"
+#include "tracer.h"
+
+extern struct ust_marker * const __start___ust_marker_ptrs[] __attribute__((visibility("hidden")));
+extern struct ust_marker * const __stop___ust_marker_ptrs[] __attribute__((visibility("hidden")));
+
+/* Set to 1 to enable ust_marker debug output */
+static const int ust_marker_debug;
+static int initialized;
+static void (*new_ust_marker_cb)(struct ust_marker *);
+
+/*
+ * ust_marker mutex protects the builtin and module ust_marker and the
+ * hash table, as well as the ust_marker_libs list.
+ */
+static DEFINE_MUTEX(ust_marker_mutex);
+static CDS_LIST_HEAD(ust_marker_libs);
+
+/*
+ * Allow nested mutex for mutex listing and nested enable.
+ */
+static __thread int nested_mutex;
+
+void lock_ust_marker(void)
+{
+ if (!(nested_mutex++))
+ pthread_mutex_lock(&ust_marker_mutex);
+}
+
+void unlock_ust_marker(void)
+{
+ if (!(--nested_mutex))
+ pthread_mutex_unlock(&ust_marker_mutex);
+}
+
+/*
+ * ust_marker hash table, containing the active ust_marker.
+ * Protected by ust_marker mutex.
+ */
+#define UST_MARKER_HASH_BITS 6
+#define UST_MARKER_TABLE_SIZE (1 << UST_MARKER_HASH_BITS)
+static struct cds_hlist_head ust_marker_table[UST_MARKER_TABLE_SIZE];
+
+struct ust_marker_probe_array {
+ struct rcu_head rcu;
+ struct ust_marker_probe_closure c[0];
+};
+
+/*
+ * Note about RCU :
+ * It is used to make sure every handler has finished using its private
+ * data between two consecutive operation (add or remove) on a given
+ * ust_marker. It is also used to delay the free of multiple probes
+ * array until a quiescent state is reached. ust_marker entries
+ * modifications are protected by the ust_marker_mutex.
+ */
+struct ust_marker_entry {
+ struct cds_hlist_node hlist;
+ char *format;
+ char *name;
+ /* Probe wrapper */
+ void (*call)(const struct ust_marker *mdata, void *call_private, ...);
+ struct ust_marker_probe_closure single;
+ struct ust_marker_probe_array *multi;
+ int refcount; /* Number of times armed. 0 if disarmed. */
+ u16 channel_id;
+ u16 event_id;
+ unsigned char ptype:1;
+ unsigned char format_allocated:1;
+ char channel[0]; /* Contains channel'\0'name'\0'format'\0' */
+};
+
+/**
+ * __ust_marker_empty_function - Empty probe callback
+ * @mdata: ust_marker data
+ * @probe_private: probe private data
+ * @call_private: call site private data
+ * @fmt: format string
+ * @...: variable argument list
+ *
+ * Empty callback provided as a probe to the ust_marker. By providing
+ * this to a disabled ust_marker, we make sure the execution flow is
+ * always valid even though the function pointer change and the
+ * ust_marker enabling are two distinct operations that modifies the
+ * execution flow of preemptible code.
+ */
+notrace void __ust_marker_empty_function(const struct ust_marker *mdata,
+ void *probe_private, void *call_private, const char *fmt, va_list *args)
+{
+}
+
+/*
+ * ust_marker_probe_cb Callback that prepares the variable argument list for probes.
+ * @mdata: pointer of type struct ust_marker
+ * @call_private: caller site private data
+ * @...: Variable argument list.
+ *
+ * Since we do not use "typical" pointer based RCU in the 1 argument case, we
+ * need to put a full cmm_smp_rmb() in this branch. This is why we do not use
+ * rcu_dereference() for the pointer read.
+ */
+notrace void ust_marker_probe_cb(const struct ust_marker *mdata,
+ void *call_private, ...)
+{
+ va_list args;
+ char ptype;
+
+ /*
+ * rcu_read_lock_sched does two things : disabling preemption to make
+ * sure the teardown of the callbacks can be done correctly when they
+ * are in modules and they insure RCU read coherency.
+ */
+ rcu_read_lock();
+ ptype = mdata->ptype;
+ if (likely(!ptype)) {
+ ust_marker_probe_func *func;
+ /* Must read the ptype before ptr. They are not data dependant,
+ * so we put an explicit cmm_smp_rmb() here. */
+ cmm_smp_rmb();
+ func = mdata->single.func;
+ /* Must read the ptr before private data. They are not data
+ * dependant, so we put an explicit cmm_smp_rmb() here. */
+ cmm_smp_rmb();
+ va_start(args, call_private);
+ func(mdata, mdata->single.probe_private, call_private,
+ mdata->format, &args);
+ va_end(args);
+ } else {
+ struct ust_marker_probe_array *multi;
+ int i;
+ /*
+ * Read mdata->ptype before mdata->multi.
+ */
+ cmm_smp_rmb();
+ multi = mdata->multi;
+ /*
+ * multi points to an array, therefore accessing the array
+ * depends on reading multi. However, even in this case,
+ * we must insure that the pointer is read _before_ the array
+ * data. Same as rcu_dereference, but we need a full cmm_smp_rmb()
+ * in the fast path, so put the explicit cmm_barrier here.
+ */
+ cmm_smp_read_barrier_depends();
+ for (i = 0; multi->c[i].func; i++) {
+ va_start(args, call_private);
+ multi->c[i].func(mdata, multi->c[i].probe_private,
+ call_private, mdata->format, &args);
+ va_end(args);
+ }
+ }
+ rcu_read_unlock();
+}
+
+/*
+ * ust_marker_probe_cb Callback that does not prepare the variable argument list.
+ * @mdata: pointer of type struct ust_marker
+ * @call_private: caller site private data
+ * @...: Variable argument list.
+ *
+ * Should be connected to ust_marker "UST_MARKER_NOARGS".
+ */
+static notrace void ust_marker_probe_cb_noarg(const struct ust_marker *mdata,
+ void *call_private, ...)
+{
+ va_list args; /* not initialized */
+ char ptype;
+
+ rcu_read_lock();
+ ptype = mdata->ptype;
+ if (likely(!ptype)) {
+ ust_marker_probe_func *func;
+ /* Must read the ptype before ptr. They are not data dependant,
+ * so we put an explicit cmm_smp_rmb() here. */
+ cmm_smp_rmb();
+ func = mdata->single.func;
+ /* Must read the ptr before private data. They are not data
+ * dependant, so we put an explicit cmm_smp_rmb() here. */
+ cmm_smp_rmb();
+ func(mdata, mdata->single.probe_private, call_private,
+ mdata->format, &args);
+ } else {
+ struct ust_marker_probe_array *multi;
+ int i;
+ /*
+ * Read mdata->ptype before mdata->multi.
+ */
+ cmm_smp_rmb();
+ multi = mdata->multi;
+ /*
+ * multi points to an array, therefore accessing the array
+ * depends on reading multi. However, even in this case,
+ * we must insure that the pointer is read _before_ the array
+ * data. Same as rcu_dereference, but we need a full cmm_smp_rmb()
+ * in the fast path, so put the explicit cmm_barrier here.
+ */
+ cmm_smp_read_barrier_depends();
+ for (i = 0; multi->c[i].func; i++)
+ multi->c[i].func(mdata, multi->c[i].probe_private,
+ call_private, mdata->format, &args);
+ }
+ rcu_read_unlock();
+}
+
+static void free_old_closure(struct rcu_head *head)
+{
+ struct ust_marker_probe_array *multi =
+ _ust_container_of(head, struct ust_marker_probe_array, rcu);
+ free(multi);
+}
+
+static void debug_print_probes(struct ust_marker_entry *entry)
+{
+ int i;
+
+ if (!ust_marker_debug)
+ return;
+
+ if (!entry->ptype) {
+ DBG("Single probe : %p %p",
+ entry->single.func,
+ entry->single.probe_private);
+ } else {
+ for (i = 0; entry->multi->c[i].func; i++)
+ DBG("Multi probe %d : %p %p", i,
+ entry->multi->c[i].func,
+ entry->multi->c[i].probe_private);
+ }
+}
+
+static struct ust_marker_probe_array *
+ust_marker_entry_add_probe(struct ust_marker_entry *entry,
+ ust_marker_probe_func *probe, void *probe_private)
+{
+ int nr_probes = 0;
+ struct ust_marker_probe_array *old, *new;
+
+ WARN_ON(!probe);
+
+ debug_print_probes(entry);
+ old = entry->multi;
+ if (!entry->ptype) {
+ if (entry->single.func == probe &&
+ entry->single.probe_private == probe_private)
+ return ERR_PTR(-EBUSY);
+ if (entry->single.func == __ust_marker_empty_function) {
+ /* 0 -> 1 probes */
+ entry->single.func = probe;
+ entry->single.probe_private = probe_private;
+ entry->refcount = 1;
+ entry->ptype = 0;
+ debug_print_probes(entry);
+ return NULL;
+ } else {
+ /* 1 -> 2 probes */
+ nr_probes = 1;
+ old = NULL;
+ }
+ } else {
+ /* (N -> N+1), (N != 0, 1) probes */
+ for (nr_probes = 0; old->c[nr_probes].func; nr_probes++)
+ if (old->c[nr_probes].func == probe
+ && old->c[nr_probes].probe_private
+ == probe_private)
+ return ERR_PTR(-EBUSY);
+ }
+ /* + 2 : one for new probe, one for NULL func */
+ new = zmalloc(sizeof(struct ust_marker_probe_array)
+ + ((nr_probes + 2) * sizeof(struct ust_marker_probe_closure)));
+ if (new == NULL)
+ return ERR_PTR(-ENOMEM);
+ if (!old)
+ new->c[0] = entry->single;
+ else
+ memcpy(&new->c[0], &old->c[0],
+ nr_probes * sizeof(struct ust_marker_probe_closure));
+ new->c[nr_probes].func = probe;
+ new->c[nr_probes].probe_private = probe_private;
+ entry->refcount = nr_probes + 1;
+ entry->multi = new;
+ entry->ptype = 1;
+ debug_print_probes(entry);
+ return old;
+}
+
+static struct ust_marker_probe_array *
+ust_marker_entry_remove_probe(struct ust_marker_entry *entry,
+ ust_marker_probe_func *probe, void *probe_private)
+{
+ int nr_probes = 0, nr_del = 0, i;
+ struct ust_marker_probe_array *old, *new;
+
+ old = entry->multi;
+
+ debug_print_probes(entry);
+ if (!entry->ptype) {
+ /* 0 -> N is an error */
+ WARN_ON(entry->single.func == __ust_marker_empty_function);
+ /* 1 -> 0 probes */
+ WARN_ON(probe && entry->single.func != probe);
+ WARN_ON(entry->single.probe_private != probe_private);
+ entry->single.func = __ust_marker_empty_function;
+ entry->refcount = 0;
+ entry->ptype = 0;
+ debug_print_probes(entry);
+ return NULL;
+ } else {
+ /* (N -> M), (N > 1, M >= 0) probes */
+ for (nr_probes = 0; old->c[nr_probes].func; nr_probes++) {
+ if ((!probe || old->c[nr_probes].func == probe)
+ && old->c[nr_probes].probe_private
+ == probe_private)
+ nr_del++;
+ }
+ }
+
+ if (nr_probes - nr_del == 0) {
+ /* N -> 0, (N > 1) */
+ entry->single.func = __ust_marker_empty_function;
+ entry->refcount = 0;
+ entry->ptype = 0;
+ } else if (nr_probes - nr_del == 1) {
+ /* N -> 1, (N > 1) */
+ for (i = 0; old->c[i].func; i++)
+ if ((probe && old->c[i].func != probe) ||
+ old->c[i].probe_private != probe_private)
+ entry->single = old->c[i];
+ entry->refcount = 1;
+ entry->ptype = 0;
+ } else {
+ int j = 0;
+ /* N -> M, (N > 1, M > 1) */
+ /* + 1 for NULL */
+ new = zmalloc(sizeof(struct ust_marker_probe_array)
+ + ((nr_probes - nr_del + 1) * sizeof(struct ust_marker_probe_closure)));
+ if (new == NULL)
+ return ERR_PTR(-ENOMEM);
+ for (i = 0; old->c[i].func; i++)
+ if ((probe && old->c[i].func != probe) ||
+ old->c[i].probe_private != probe_private)
+ new->c[j++] = old->c[i];
+ entry->refcount = nr_probes - nr_del;
+ entry->ptype = 1;
+ entry->multi = new;
+ }
+ debug_print_probes(entry);
+ return old;
+}
+
+/*
+ * Get ust_marker if the ust_marker is present in the ust_marker hash table.
+ * Must be called with ust_marker_mutex held.
+ * Returns NULL if not present.
+ */
+static struct ust_marker_entry *get_ust_marker(const char *channel, const char *name)
+{
+ struct cds_hlist_head *head;
+ struct cds_hlist_node *node;
+ struct ust_marker_entry *e;
+ size_t channel_len = strlen(channel) + 1;
+ size_t name_len = strlen(name) + 1;
+ u32 hash;
+
+ hash = jhash(channel, channel_len-1, 0) ^ jhash(name, name_len-1, 0);
+ head = &ust_marker_table[hash & ((1 << UST_MARKER_HASH_BITS)-1)];
+ cds_hlist_for_each_entry(e, node, head, hlist) {
+ if (!strcmp(channel, e->channel) && !strcmp(name, e->name))
+ return e;
+ }
+ return NULL;
+}
+
+/*
+ * Add the ust_marker to the ust_marker hash table. Must be called with
+ * ust_marker_mutex held.
+ */
+static struct ust_marker_entry *add_ust_marker(const char *channel, const char *name,
+ const char *format)
+{
+ struct cds_hlist_head *head;
+ struct cds_hlist_node *node;
+ struct ust_marker_entry *e;
+ size_t channel_len = strlen(channel) + 1;
+ size_t name_len = strlen(name) + 1;
+ size_t format_len = 0;
+ u32 hash;
+
+ hash = jhash(channel, channel_len-1, 0) ^ jhash(name, name_len-1, 0);
+ if (format)
+ format_len = strlen(format) + 1;
+ head = &ust_marker_table[hash & ((1 << UST_MARKER_HASH_BITS)-1)];
+ cds_hlist_for_each_entry(e, node, head, hlist) {
+ if (!strcmp(channel, e->channel) && !strcmp(name, e->name)) {
+ DBG("ust_marker %s.%s busy", channel, name);
+ return ERR_PTR(-EBUSY); /* Already there */
+ }
+ }
+ /*
+ * Using zmalloc here to allocate a variable length element. Could
+ * cause some memory fragmentation if overused.
+ */
+ e = zmalloc(sizeof(struct ust_marker_entry)
+ + channel_len + name_len + format_len);
+ if (!e)
+ return ERR_PTR(-ENOMEM);
+ memcpy(e->channel, channel, channel_len);
+ e->name = &e->channel[channel_len];
+ memcpy(e->name, name, name_len);
+ if (format) {
+ e->format = &e->name[name_len];
+ memcpy(e->format, format, format_len);
+ if (strcmp(e->format, UST_MARKER_NOARGS) == 0)
+ e->call = ust_marker_probe_cb_noarg;
+ else
+ e->call = ust_marker_probe_cb;
+ __ust_marker(metadata, core_marker_format, NULL,
+ "channel %s name %s format %s",
+ e->channel, e->name, e->format);
+ } else {
+ e->format = NULL;
+ e->call = ust_marker_probe_cb;
+ }
+ e->single.func = __ust_marker_empty_function;
+ e->single.probe_private = NULL;
+ e->multi = NULL;
+ e->ptype = 0;
+ e->format_allocated = 0;
+ e->refcount = 0;
+ cds_hlist_add_head(&e->hlist, head);
+ return e;
+}
+
+/*
+ * Remove the ust_marker from the ust_marker hash table. Must be called with mutex_lock
+ * held.
+ */
+static int remove_ust_marker(const char *channel, const char *name)
+{
+ struct cds_hlist_head *head;
+ struct cds_hlist_node *node;
+ struct ust_marker_entry *e;
+ int found = 0;
+ size_t channel_len = strlen(channel) + 1;
+ size_t name_len = strlen(name) + 1;
+ u32 hash;
+ int ret;
+
+ hash = jhash(channel, channel_len-1, 0) ^ jhash(name, name_len-1, 0);
+ head = &ust_marker_table[hash & ((1 << UST_MARKER_HASH_BITS)-1)];
+ cds_hlist_for_each_entry(e, node, head, hlist) {
+ if (!strcmp(channel, e->channel) && !strcmp(name, e->name)) {
+ found = 1;
+ break;
+ }
+ }
+ if (!found)
+ return -ENOENT;
+ if (e->single.func != __ust_marker_empty_function)
+ return -EBUSY;
+ cds_hlist_del(&e->hlist);
+ if (e->format_allocated)
+ free(e->format);
+ ret = ltt_channels_unregister(e->channel);
+ WARN_ON(ret);
+ free(e);
+ return 0;
+}
+
+/*
+ * Set the mark_entry format to the format found in the element.
+ */
+static int ust_marker_set_format(struct ust_marker_entry *entry, const char *format)
+{
+ entry->format = strdup(format);
+ if (!entry->format)
+ return -ENOMEM;
+ entry->format_allocated = 1;
+
+ __ust_marker(metadata, core_marker_format, NULL,
+ "channel %s name %s format %s",
+ entry->channel, entry->name, entry->format);
+ return 0;
+}
+
+/*
+ * Sets the probe callback corresponding to one ust_marker.
+ */
+static int set_ust_marker(struct ust_marker_entry *entry, struct ust_marker *elem,
+ int active)
+{
+ int ret = 0;
+ WARN_ON(strcmp(entry->name, elem->name) != 0);
+
+ if (entry->format) {
+ if (strcmp(entry->format, elem->format) != 0) {
+ ERR("Format mismatch for probe %s (%s), ust_marker (%s)",
+ entry->name,
+ entry->format,
+ elem->format);
+ return -EPERM;
+ }
+ } else {
+ ret = ust_marker_set_format(entry, elem->format);
+ if (ret)
+ return ret;
+ }
+
+ /*
+ * probe_cb setup (statically known) is done here. It is
+ * asynchronous with the rest of execution, therefore we only
+ * pass from a "safe" callback (with argument) to an "unsafe"
+ * callback (does not set arguments).
+ */
+ elem->call = entry->call;
+ elem->channel_id = entry->channel_id;
+ elem->event_id = entry->event_id;
+ /*
+ * Sanity check :
+ * We only update the single probe private data when the ptr is
+ * set to a _non_ single probe! (0 -> 1 and N -> 1, N != 1)
+ */
+ WARN_ON(elem->single.func != __ust_marker_empty_function
+ && elem->single.probe_private != entry->single.probe_private
+ && !elem->ptype);
+ elem->single.probe_private = entry->single.probe_private;
+ /*
+ * Make sure the private data is valid when we update the
+ * single probe ptr.
+ */
+ cmm_smp_wmb();
+ elem->single.func = entry->single.func;
+ /*
+ * We also make sure that the new probe callbacks array is consistent
+ * before setting a pointer to it.
+ */
+ rcu_assign_pointer(elem->multi, entry->multi);
+ /*
+ * Update the function or multi probe array pointer before setting the
+ * ptype.
+ */
+ cmm_smp_wmb();
+ elem->ptype = entry->ptype;
+
+ if (elem->tp_name && (active ^ elem->state)) {
+ WARN_ON(!elem->tp_cb);
+ /*
+ * It is ok to directly call the probe registration because type
+ * checking has been done in the __ust_marker_tp() macro.
+ */
+
+ if (active) {
+ ret = tracepoint_probe_register_noupdate(
+ elem->tp_name,
+ elem->tp_cb, NULL);
+ } else {
+ /*
+ * tracepoint_probe_update_all() must be called
+ * before the library containing tp_cb is unloaded.
+ */
+ ret = tracepoint_probe_unregister_noupdate(
+ elem->tp_name,
+ elem->tp_cb, NULL);
+ }
+ }
+ elem->state = active;
+
+ return ret;
+}
+
+/*
+ * Disable a ust_marker and its probe callback.
+ * Note: only waiting an RCU period after setting elem->call to the empty
+ * function insures that the original callback is not used anymore. This insured
+ * by rcu_read_lock around the call site.
+ */
+static void disable_ust_marker(struct ust_marker *elem)
+{
+ int ret;
+
+ /* leave "call" as is. It is known statically. */
+ if (elem->tp_name && elem->state) {
+ WARN_ON(!elem->tp_cb);
+ /*
+ * It is ok to directly call the probe registration because type
+ * checking has been done in the __ust_marker_tp() macro.
+ */
+ /*
+ * tracepoint_probe_update_all() must be called
+ * before the module containing tp_cb is unloaded.
+ */
+ ret = tracepoint_probe_unregister_noupdate(elem->tp_name,
+ elem->tp_cb, NULL);
+ WARN_ON(ret);
+ }
+ elem->state = 0;
+ elem->single.func = __ust_marker_empty_function;
+ /* Update the function before setting the ptype */
+ cmm_smp_wmb();
+ elem->ptype = 0; /* single probe */
+ /*
+ * Leave the private data and channel_id/event_id there, because removal
+ * is racy and should be done only after an RCU period. These are never
+ * used until the next initialization anyway.
+ */
+}
+
+/*
+ * is_ust_marker_enabled - Check if a ust_marker is enabled
+ * @channel: channel name
+ * @name: ust_marker name
+ *
+ * Returns 1 if the ust_marker is enabled, 0 if disabled.
+ */
+int is_ust_marker_enabled(const char *channel, const char *name)
+{
+ struct ust_marker_entry *entry;
+
+ lock_ust_marker();
+ entry = get_ust_marker(channel, name);
+ unlock_ust_marker();
+
+ return entry && !!entry->refcount;
+}
+
+/**
+ * ust_marker_update_probe_range - Update a probe range
+ * @begin: beginning of the range
+ * @end: end of the range
+ *
+ * Updates the probe callback corresponding to a range of ust_marker.
+ */
+static
+void ust_marker_update_probe_range(struct ust_marker * const *begin,
+ struct ust_marker * const *end)
+{
+ struct ust_marker * const *iter;
+ struct ust_marker_entry *mark_entry;
+
+ for (iter = begin; iter < end; iter++) {
+ if (!*iter)
+ continue; /* skip dummy */
+ mark_entry = get_ust_marker((*iter)->channel, (*iter)->name);
+ if (mark_entry) {
+ set_ust_marker(mark_entry, *iter, !!mark_entry->refcount);
+ /*
+ * ignore error, continue
+ */
+ } else {
+ disable_ust_marker(*iter);
+ }
+ }
+}
+
+static void lib_update_ust_marker(void)
+{
+ struct ust_marker_lib *lib;
+
+ lock_ust_marker();
+ cds_list_for_each_entry(lib, &ust_marker_libs, list)
+ ust_marker_update_probe_range(lib->ust_marker_start,
+ lib->ust_marker_start + lib->ust_marker_count);
+ unlock_ust_marker();
+}
+
+/*
+ * Update probes, removing the faulty probes.
+ *
+ * Internal callback only changed before the first probe is connected to it.
+ * Single probe private data can only be changed on 0 -> 1 and 2 -> 1
+ * transitions. All other transitions will leave the old private data valid.
+ * This makes the non-atomicity of the callback/private data updates valid.
+ *
+ * "special case" updates :
+ * 0 -> 1 callback
+ * 1 -> 0 callback
+ * 1 -> 2 callbacks
+ * 2 -> 1 callbacks
+ * Other updates all behave the same, just like the 2 -> 3 or 3 -> 2 updates.
+ * Site effect : ust_marker_set_format may delete the ust_marker entry (creating a
+ * replacement).
+ */
+static void ust_marker_update_probes(void)
+{
+ lib_update_ust_marker();
+ tracepoint_probe_update_all();
+}
+
+/**
+ * ust_marker_probe_register - Connect a probe to a ust_marker
+ * @channel: ust_marker channel
+ * @name: ust_marker name
+ * @format: format string
+ * @probe: probe handler
+ * @probe_private: probe private data
+ *
+ * private data must be a valid allocated memory address, or NULL.
+ * Returns 0 if ok, error value on error.
+ * The probe address must at least be aligned on the architecture pointer size.
+ */
+int ust_marker_probe_register(const char *channel, const char *name,
+ const char *format, ust_marker_probe_func *probe,
+ void *probe_private)
+{
+ struct ust_marker_entry *entry;
+ int ret = 0, ret_err;
+ struct ust_marker_probe_array *old;
+ int first_probe = 0;
+
+ lock_ust_marker();
+ entry = get_ust_marker(channel, name);
+ if (!entry) {
+ first_probe = 1;
+ entry = add_ust_marker(channel, name, format);
+ if (IS_ERR(entry))
+ ret = PTR_ERR(entry);
+ if (ret)
+ goto end;
+ ret = ltt_channels_register(channel);
+ if (ret)
+ goto error_remove_ust_marker;
+ ret = ltt_channels_get_index_from_name(channel);
+ if (ret < 0)
+ goto error_unregister_channel;
+ entry->channel_id = ret;
+ ret = ltt_channels_get_event_id(channel, name);
+ if (ret < 0)
+ goto error_unregister_channel;
+ entry->event_id = ret;
+ ret = 0;
+ __ust_marker(metadata, core_marker_id, NULL,
+ "channel %s name %s event_id %hu "
+ "int #1u%zu long #1u%zu pointer #1u%zu "
+ "size_t #1u%zu alignment #1u%u",
+ channel, name, entry->event_id,
+ sizeof(int), sizeof(long), sizeof(void *),
+ sizeof(size_t), ltt_get_alignment());
+ } else if (format) {
+ if (!entry->format)
+ ret = ust_marker_set_format(entry, format);
+ else if (strcmp(entry->format, format))
+ ret = -EPERM;
+ if (ret)
+ goto end;
+ }
+
+ old = ust_marker_entry_add_probe(entry, probe, probe_private);
+ if (IS_ERR(old)) {
+ ret = PTR_ERR(old);
+ if (first_probe)
+ goto error_unregister_channel;
+ else
+ goto end;
+ }
+ unlock_ust_marker();
+
+ /* Activate ust_marker if necessary */
+ ust_marker_update_probes();
+
+ if (old) {
+ synchronize_rcu();
+ free_old_closure(&old->rcu);
+ }
+ return ret;
+
+error_unregister_channel:
+ ret_err = ltt_channels_unregister(channel);
+ WARN_ON(ret_err);
+error_remove_ust_marker:
+ ret_err = remove_ust_marker(channel, name);
+ WARN_ON(ret_err);
+end:
+ unlock_ust_marker();
+ return ret;
+}
+
+/**
+ * ust_marker_probe_unregister - Disconnect a probe from a ust_marker
+ * @channel: ust_marker channel
+ * @name: ust_marker name
+ * @probe: probe function pointer
+ * @probe_private: probe private data
+ *
+ * Returns the private data given to ust_marker_probe_register, or an ERR_PTR().
+ * We do not need to call a synchronize_sched to make sure the probes have
+ * finished running before doing a module unload, because the module unload
+ * itself uses stop_machine(), which insures that every preempt disabled section
+ * have finished.
+ */
+int ust_marker_probe_unregister(const char *channel, const char *name,
+ ust_marker_probe_func *probe, void *probe_private)
+{
+ struct ust_marker_entry *entry;
+ struct ust_marker_probe_array *old;
+ int ret = 0;
+
+ lock_ust_marker();
+ entry = get_ust_marker(channel, name);
+ if (!entry) {
+ ret = -ENOENT;
+ goto end;
+ }
+ old = ust_marker_entry_remove_probe(entry, probe, probe_private);
+ unlock_ust_marker();
+
+ ust_marker_update_probes();
+
+ if (old) {
+ synchronize_rcu();
+ free_old_closure(&old->rcu);
+ }
+ return ret;
+
+end:
+ unlock_ust_marker();
+ return ret;
+}
+
+static struct ust_marker_entry *
+get_ust_marker_from_private_data(ust_marker_probe_func *probe,
+ void *probe_private)
+{
+ struct ust_marker_entry *entry;
+ unsigned int i;
+ struct cds_hlist_head *head;
+ struct cds_hlist_node *node;
+
+ for (i = 0; i < UST_MARKER_TABLE_SIZE; i++) {
+ head = &ust_marker_table[i];
+ cds_hlist_for_each_entry(entry, node, head, hlist) {
+ if (!entry->ptype) {
+ if (entry->single.func == probe
+ && entry->single.probe_private
+ == probe_private)
+ return entry;
+ } else {
+ struct ust_marker_probe_array *closure;
+ closure = entry->multi;
+ for (i = 0; closure->c[i].func; i++) {
+ if (closure->c[i].func == probe &&
+ closure->c[i].probe_private
+ == probe_private)
+ return entry;
+ }
+ }
+ }
+ }
+ return NULL;
+}
+
+/**
+ * ust_marker_probe_unregister_private_data - Disconnect a probe from a ust_marker
+ * @probe: probe function
+ * @probe_private: probe private data
+ *
+ * Unregister a probe by providing the registered private data.
+ * Only removes the first ust_marker found in hash table.
+ * Return 0 on success or error value.
+ * We do not need to call a synchronize_sched to make sure the probes have
+ * finished running before doing a module unload, because the module unload
+ * itself uses stop_machine(), which insures that every preempt disabled section
+ * have finished.
+ */
+int ust_marker_probe_unregister_private_data(ust_marker_probe_func *probe,
+ void *probe_private)
+{
+ struct ust_marker_entry *entry;
+ int ret = 0;
+ struct ust_marker_probe_array *old;
+ char *channel = NULL, *name = NULL;
+
+ lock_ust_marker();
+ entry = get_ust_marker_from_private_data(probe, probe_private);
+ if (!entry) {
+ ret = -ENOENT;
+ goto unlock;
+ }
+ old = ust_marker_entry_remove_probe(entry, NULL, probe_private);
+ channel = strdup(entry->channel);
+ name = strdup(entry->name);
+ /* Ignore busy error message */
+ remove_ust_marker(channel, name);
+ unlock_ust_marker();
+
+ ust_marker_update_probes();
+
+ if (old) {
+ synchronize_rcu();
+ free_old_closure(&old->rcu);
+ }
+ goto end;
+
+unlock:
+ unlock_ust_marker();
+end:
+ free(channel);
+ free(name);
+ return ret;
+}
+
+/**
+ * ust_marker_get_private_data - Get a ust_marker's probe private data
+ * @channel: ust_marker channel
+ * @name: ust_marker name
+ * @probe: probe to match
+ * @num: get the nth matching probe's private data
+ *
+ * Returns the nth private data pointer (starting from 0) matching, or an
+ * ERR_PTR.
+ * Returns the private data pointer, or an ERR_PTR.
+ * The private data pointer should _only_ be dereferenced if the caller is the
+ * owner of the data, or its content could vanish. This is mostly used to
+ * confirm that a caller is the owner of a registered probe.
+ */
+void *ust_marker_get_private_data(const char *channel, const char *name,
+ ust_marker_probe_func *probe, int num)
+{
+ struct cds_hlist_head *head;
+ struct cds_hlist_node *node;
+ struct ust_marker_entry *e;
+ size_t channel_len = strlen(channel) + 1;
+ size_t name_len = strlen(name) + 1;
+ int i;
+ u32 hash;
+
+ hash = jhash(channel, channel_len-1, 0) ^ jhash(name, name_len-1, 0);
+ head = &ust_marker_table[hash & ((1 << UST_MARKER_HASH_BITS)-1)];
+ cds_hlist_for_each_entry(e, node, head, hlist) {
+ if (!strcmp(channel, e->channel) && !strcmp(name, e->name)) {
+ if (!e->ptype) {
+ if (num == 0 && e->single.func == probe)
+ return e->single.probe_private;
+ } else {
+ struct ust_marker_probe_array *closure;
+ int match = 0;
+ closure = e->multi;
+ for (i = 0; closure->c[i].func; i++) {
+ if (closure->c[i].func != probe)
+ continue;
+ if (match++ == num)
+ return closure->c[i].probe_private;
+ }
+ }
+ break;
+ }
+ }
+ return ERR_PTR(-ENOENT);
+}
+
+/**
+ * ust_marker_get_iter_range - Get a next ust_marker iterator given a range.
+ * @ust_marker: current ust_marker (in), next ust_marker (out)
+ * @begin: beginning of the range
+ * @end: end of the range
+ *
+ * Returns whether a next ust_marker has been found (1) or not (0).
+ * Will return the first ust_marker in the range if the input ust_marker is NULL.
+ * Called with markers mutex held.
+ */
+static
+int ust_marker_get_iter_range(struct ust_marker * const **ust_marker,
+ struct ust_marker * const *begin,
+ struct ust_marker * const *end)
+{
+ if (!*ust_marker && begin != end)
+ *ust_marker = begin;
+ while (*ust_marker >= begin && *ust_marker < end) {
+ if (!**ust_marker)
+ (*ust_marker)++; /* skip dummy */
+ else
+ return 1;
+ }
+ return 0;
+}
+
+/*
+ * Returns 0 if current not found.
+ * Returns 1 if current found.
+ * Called with markers mutex held.
+ */
+static
+int lib_get_iter_ust_marker(struct ust_marker_iter *iter)
+{
+ struct ust_marker_lib *iter_lib;
+ int found = 0;
+
+ cds_list_for_each_entry(iter_lib, &ust_marker_libs, list) {
+ if (iter_lib < iter->lib)
+ continue;
+ else if (iter_lib > iter->lib)
+ iter->ust_marker = NULL;
+ found = ust_marker_get_iter_range(&iter->ust_marker,
+ iter_lib->ust_marker_start,
+ iter_lib->ust_marker_start + iter_lib->ust_marker_count);
+ if (found) {
+ iter->lib = iter_lib;
+ break;
+ }
+ }
+ return found;
+}
+
+/* Called with markers mutex held. */
+static void ust_marker_get_iter(struct ust_marker_iter *iter)
+{
+ int found = 0;
+
+ found = lib_get_iter_ust_marker(iter);
+ if (!found)
+ ust_marker_iter_reset(iter);
+}
+
+void ust_marker_iter_start(struct ust_marker_iter *iter)
+{
+ lock_ust_marker();
+ ust_marker_get_iter(iter);
+}
+
+/* Called with markers mutex held. */
+void ust_marker_iter_next(struct ust_marker_iter *iter)
+{
+ iter->ust_marker++;
+ /*
+ * iter->ust_marker may be invalid because we blindly incremented it.
+ * Make sure it is valid by marshalling on the ust_marker, getting the
+ * ust_marker from following modules if necessary.
+ */
+ ust_marker_get_iter(iter);
+}
+
+void ust_marker_iter_stop(struct ust_marker_iter *iter)
+{
+ unlock_ust_marker();
+}
+
+void ust_marker_iter_reset(struct ust_marker_iter *iter)
+{
+ iter->lib = NULL;
+ iter->ust_marker = NULL;
+}
+
+void ltt_dump_ust_marker_state(struct ust_trace *trace)
+{
+ struct ust_marker_entry *entry;
+ struct ltt_probe_private_data call_data;
+ struct cds_hlist_head *head;
+ struct cds_hlist_node *node;
+ unsigned int i;
+
+ lock_ust_marker();
+ call_data.trace = trace;
+ call_data.serializer = NULL;
+
+ for (i = 0; i < UST_MARKER_TABLE_SIZE; i++) {
+ head = &ust_marker_table[i];
+ cds_hlist_for_each_entry(entry, node, head, hlist) {
+ __ust_marker(metadata, core_marker_id,
+ &call_data,
+ "channel %s name %s event_id %hu "
+ "int #1u%zu long #1u%zu pointer #1u%zu "
+ "size_t #1u%zu alignment #1u%u",
+ entry->channel,
+ entry->name,
+ entry->event_id,
+ sizeof(int), sizeof(long),
+ sizeof(void *), sizeof(size_t),
+ ltt_get_alignment());
+ if (entry->format)
+ __ust_marker(metadata,
+ core_marker_format,
+ &call_data,
+ "channel %s name %s format %s",
+ entry->channel,
+ entry->name,
+ entry->format);
+ }
+ }
+ unlock_ust_marker();
+}
+
+void ust_marker_set_new_ust_marker_cb(void (*cb)(struct ust_marker *))
+{
+ new_ust_marker_cb = cb;
+}
+
+static void new_ust_marker(struct ust_marker * const *start,
+ struct ust_marker * const *end)
+{
+ if (new_ust_marker_cb) {
+ struct ust_marker * const *m;
+
+ for (m = start; m < end; m++) {
+ if (*m)
+ new_ust_marker_cb(*m);
+ }
+ }
+}
+
+int ust_marker_register_lib(struct ust_marker * const *ust_marker_start,
+ int ust_marker_count)
+{
+ struct ust_marker_lib *pl, *iter;
+
+ pl = (struct ust_marker_lib *) zmalloc(sizeof(struct ust_marker_lib));
+
+ pl->ust_marker_start = ust_marker_start;
+ pl->ust_marker_count = ust_marker_count;
+
+ lock_ust_marker();
+
+ /*
+ * We sort the libs by struct lib pointer address.
+ */
+ cds_list_for_each_entry_reverse(iter, &ust_marker_libs, list) {
+ BUG_ON(iter == pl); /* Should never be in the list twice */
+ if (iter < pl) {
+ /* We belong to the location right after iter. */
+ cds_list_add(&pl->list, &iter->list);
+ goto lib_added;
+ }
+ }
+ /* We should be added at the head of the list */
+ cds_list_add(&pl->list, &ust_marker_libs);
+lib_added:
+ unlock_ust_marker();
+
+ new_ust_marker(ust_marker_start, ust_marker_start + ust_marker_count);
+
+ /* TODO: update just the loaded lib */
+ lib_update_ust_marker();
+
+ DBG("just registered a ust_marker section from %p and having %d ust_marker (minus dummy ust_marker)", ust_marker_start, ust_marker_count);
+
+ return 0;
+}
+
+int ust_marker_unregister_lib(struct ust_marker * const *ust_marker_start)
+{
+ struct ust_marker_lib *lib;
+
+ lock_ust_marker();
+ cds_list_for_each_entry(lib, &ust_marker_libs, list) {
+ if(lib->ust_marker_start == ust_marker_start) {
+ struct ust_marker_lib *lib2free = lib;
+ cds_list_del(&lib->list);
+ free(lib2free);
+ break;
+ }
+ }
+ unlock_ust_marker();
+
+ return 0;
+}
+
+void __attribute__((constructor)) init_ust_marker(void)
+{
+ if (!initialized) {
+ init_tracepoint();
+ ust_marker_register_lib(__start___ust_marker_ptrs,
+ __stop___ust_marker_ptrs
+ - __start___ust_marker_ptrs);
+ initialized = 1;
+ }
+}
+
+void __attribute__((destructor)) destroy_ust_marker(void)
+{
+ ust_marker_unregister_lib(__start___ust_marker_ptrs);
+}
--- /dev/null
+/*
+ * LTTng serializing code.
+ *
+ * Copyright Mathieu Desnoyers, March 2007.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ *
+ * See this discussion about weirdness about passing va_list and then va_list to
+ * functions. (related to array argument passing). va_list seems to be
+ * implemented as an array on x86_64, but not on i386... This is why we pass a
+ * va_list * to ltt_vtrace.
+ */
+
+#define _GNU_SOURCE
+#define _LGPL_SOURCE
+#include <unistd.h>
+#include <sys/syscall.h>
+#include <stdarg.h>
+#include <string.h>
+#include <stdint.h>
+#include <stdio.h>
+
+#include <urcu-bp.h>
+#include <urcu/rculist.h>
+
+#include <ust/core.h>
+#include <ust/clock.h>
+#include "buffers.h"
+#include "tracer.h"
+#include "usterr_signal_safe.h"
+#include "ust_snprintf.h"
+
+/*
+ * Because UST core defines a non-const PAGE_SIZE, define PAGE_SIZE_STATIC here.
+ * It is just an approximation for the tracer stack.
+ */
+#define PAGE_SIZE_STATIC 4096
+
+enum ltt_type {
+ LTT_TYPE_SIGNED_INT,
+ LTT_TYPE_UNSIGNED_INT,
+ LTT_TYPE_STRING,
+ LTT_TYPE_NONE,
+};
+
+/*
+ * Special stack for the tracer. Keeps serialization offsets for each field.
+ * Per-thread. Deals with reentrancy from signals by simply ensuring that
+ * interrupting signals put the stack back to its original position.
+ */
+#define TRACER_STACK_LEN (PAGE_SIZE_STATIC / sizeof(unsigned long))
+static unsigned long __thread tracer_stack[TRACER_STACK_LEN];
+
+static unsigned int __thread tracer_stack_pos;
+
+#define LTT_ATTRIBUTE_NETWORK_BYTE_ORDER (1<<1)
+
+/*
+ * Inspired from vsnprintf
+ *
+ * The serialization format string supports the basic printf format strings.
+ * In addition, it defines new formats that can be used to serialize more
+ * complex/non portable data structures.
+ *
+ * Typical use:
+ *
+ * field_name %ctype
+ * field_name #tracetype %ctype
+ * field_name #tracetype %ctype1 %ctype2 ...
+ *
+ * A conversion is performed between format string types supported by GCC and
+ * the trace type requested. GCC type is used to perform type checking on format
+ * strings. Trace type is used to specify the exact binary representation
+ * in the trace. A mapping is done between one or more GCC types to one trace
+ * type. Sign extension, if required by the conversion, is performed following
+ * the trace type.
+ *
+ * If a gcc format is not declared with a trace format, the gcc format is
+ * also used as binary representation in the trace.
+ *
+ * Strings are supported with %s.
+ * A single tracetype (sequence) can take multiple c types as parameter.
+ *
+ * c types:
+ *
+ * see printf(3).
+ *
+ * Note: to write a uint32_t in a trace, the following expression is recommended
+ * si it can be portable:
+ *
+ * ("#4u%lu", (unsigned long)var)
+ *
+ * trace types:
+ *
+ * Serialization specific formats :
+ *
+ * Fixed size integers
+ * #1u writes uint8_t
+ * #2u writes uint16_t
+ * #4u writes uint32_t
+ * #8u writes uint64_t
+ * #1d writes int8_t
+ * #2d writes int16_t
+ * #4d writes int32_t
+ * #8d writes int64_t
+ * i.e.:
+ * #1u%lu #2u%lu #4d%lu #8d%lu #llu%hu #d%lu
+ *
+ * * Attributes:
+ *
+ * n: (for network byte order)
+ * #ntracetype%ctype
+ * is written in the trace in network byte order.
+ *
+ * i.e.: #bn4u%lu, #n%lu, #b%u
+ *
+ * TODO (eventually)
+ * Variable length sequence
+ * #a #tracetype1 #tracetype2 %array_ptr %elem_size %num_elems
+ * In the trace:
+ * #a specifies that this is a sequence
+ * #tracetype1 is the type of elements in the sequence
+ * #tracetype2 is the type of the element count
+ * GCC input:
+ * array_ptr is a pointer to an array that contains members of size
+ * elem_size.
+ * num_elems is the number of elements in the array.
+ * i.e.: #a #lu #lu %p %lu %u
+ *
+ * Callback
+ * #k callback (taken from the probe data)
+ * The following % arguments are exepected by the callback
+ *
+ * i.e.: #a #lu #lu #k %p
+ *
+ * Note: No conversion is done from floats to integers, nor from integers to
+ * floats between c types and trace types. float conversion from double to float
+ * or from float to double is also not supported.
+ *
+ * REMOVE
+ * %*b expects sizeof(data), data
+ * where sizeof(data) is 1, 2, 4 or 8
+ *
+ * Fixed length struct, union or array.
+ * FIXME: unable to extract those sizes statically.
+ * %*r expects sizeof(*ptr), ptr
+ * %*.*r expects sizeof(*ptr), __alignof__(*ptr), ptr
+ * struct and unions removed.
+ * Fixed length array:
+ * [%p]#a[len #tracetype]
+ * i.e.: [%p]#a[12 #lu]
+ *
+ * Variable length sequence
+ * %*.*:*v expects sizeof(*ptr), __alignof__(*ptr), elem_num, ptr
+ * where elem_num is the number of elements in the sequence
+ */
+static inline const char *parse_trace_type(const char *fmt,
+ char *trace_size, enum ltt_type *trace_type,
+ unsigned long *attributes)
+{
+ int qualifier; /* 'h', 'l', or 'L' for integer fields */
+ /* 'z' support added 23/7/1999 S.H. */
+ /* 'z' changed to 'Z' --davidm 1/25/99 */
+ /* 't' added for ptrdiff_t */
+
+ /* parse attributes. */
+repeat:
+ switch (*fmt) {
+ case 'n':
+ *attributes |= LTT_ATTRIBUTE_NETWORK_BYTE_ORDER;
+ ++fmt;
+ goto repeat;
+ }
+
+ /* get the conversion qualifier */
+ qualifier = -1;
+ if (*fmt == 'h' || *fmt == 'l' || *fmt == 'L' ||
+ *fmt == 'Z' || *fmt == 'z' || *fmt == 't' ||
+ *fmt == 'S' || *fmt == '1' || *fmt == '2' ||
+ *fmt == '4' || *fmt == 8) {
+ qualifier = *fmt;
+ ++fmt;
+ if (qualifier == 'l' && *fmt == 'l') {
+ qualifier = 'L';
+ ++fmt;
+ }
+ }
+
+ switch (*fmt) {
+ case 'c':
+ *trace_type = LTT_TYPE_UNSIGNED_INT;
+ *trace_size = sizeof(unsigned char);
+ goto parse_end;
+ case 's':
+ *trace_type = LTT_TYPE_STRING;
+ goto parse_end;
+ case 'p':
+ *trace_type = LTT_TYPE_UNSIGNED_INT;
+ *trace_size = sizeof(void *);
+ goto parse_end;
+ case 'd':
+ case 'i':
+ *trace_type = LTT_TYPE_SIGNED_INT;
+ break;
+ case 'o':
+ case 'u':
+ case 'x':
+ case 'X':
+ *trace_type = LTT_TYPE_UNSIGNED_INT;
+ break;
+ default:
+ if (!*fmt)
+ --fmt;
+ goto parse_end;
+ }
+ switch (qualifier) {
+ case 'L':
+ *trace_size = sizeof(long long);
+ break;
+ case 'l':
+ *trace_size = sizeof(long);
+ break;
+ case 'Z':
+ case 'z':
+ *trace_size = sizeof(size_t);
+ break;
+//ust// case 't':
+//ust// *trace_size = sizeof(ptrdiff_t);
+//ust// break;
+ case 'h':
+ *trace_size = sizeof(short);
+ break;
+ case '1':
+ *trace_size = sizeof(uint8_t);
+ break;
+ case '2':
+ *trace_size = sizeof(uint16_t);
+ break;
+ case '4':
+ *trace_size = sizeof(uint32_t);
+ break;
+ case '8':
+ *trace_size = sizeof(uint64_t);
+ break;
+ default:
+ *trace_size = sizeof(int);
+ }
+
+parse_end:
+ return fmt;
+}
+
+/*
+ * Restrictions:
+ * Field width and precision are *not* supported.
+ * %n not supported.
+ */
+static inline
+const char *parse_c_type(const char *fmt, char *c_size, enum ltt_type *c_type,
+ char *outfmt)
+{
+ int qualifier; /* 'h', 'l', or 'L' for integer fields */
+ /* 'z' support added 23/7/1999 S.H. */
+ /* 'z' changed to 'Z' --davidm 1/25/99 */
+ /* 't' added for ptrdiff_t */
+
+ /* process flags : ignore standard print formats for now. */
+repeat:
+ switch (*fmt) {
+ case '-':
+ case '+':
+ case ' ':
+ case '#':
+ case '0':
+ ++fmt;
+ goto repeat;
+ }
+
+ /* get the conversion qualifier */
+ qualifier = -1;
+ if (*fmt == 'h' || *fmt == 'l' || *fmt == 'L' ||
+ *fmt == 'Z' || *fmt == 'z' || *fmt == 't' ||
+ *fmt == 'S') {
+ qualifier = *fmt;
+ ++fmt;
+ if (qualifier == 'l' && *fmt == 'l') {
+ qualifier = 'L';
+ ++fmt;
+ }
+ }
+
+ if (outfmt) {
+ if (qualifier != -1)
+ *outfmt++ = (char)qualifier;
+ *outfmt++ = *fmt;
+ *outfmt = 0;
+ }
+
+ switch (*fmt) {
+ case 'c':
+ *c_type = LTT_TYPE_UNSIGNED_INT;
+ *c_size = sizeof(unsigned char);
+ goto parse_end;
+ case 's':
+ *c_type = LTT_TYPE_STRING;
+ goto parse_end;
+ case 'p':
+ *c_type = LTT_TYPE_UNSIGNED_INT;
+ *c_size = sizeof(void *);
+ goto parse_end;
+ case 'd':
+ case 'i':
+ *c_type = LTT_TYPE_SIGNED_INT;
+ break;
+ case 'o':
+ case 'u':
+ case 'x':
+ case 'X':
+ *c_type = LTT_TYPE_UNSIGNED_INT;
+ break;
+ default:
+ if (!*fmt)
+ --fmt;
+ goto parse_end;
+ }
+ switch (qualifier) {
+ case 'L':
+ *c_size = sizeof(long long);
+ break;
+ case 'l':
+ *c_size = sizeof(long);
+ break;
+ case 'Z':
+ case 'z':
+ *c_size = sizeof(size_t);
+ break;
+//ust// case 't':
+//ust// *c_size = sizeof(ptrdiff_t);
+//ust// break;
+ case 'h':
+ *c_size = sizeof(short);
+ break;
+ default:
+ *c_size = sizeof(int);
+ }
+
+parse_end:
+ return fmt;
+}
+
+static inline size_t serialize_trace_data(struct ust_buffer *buf,
+ size_t buf_offset,
+ char trace_size, enum ltt_type trace_type,
+ char c_size, enum ltt_type c_type,
+ unsigned int *stack_pos_ctx,
+ int *largest_align,
+ va_list *args)
+{
+ union {
+ unsigned long v_ulong;
+ uint64_t v_uint64;
+ struct {
+ const char *s;
+ size_t len;
+ } v_string;
+ } tmp;
+
+ /*
+ * Be careful about sign extension here.
+ * Sign extension is done with the destination (trace) type.
+ */
+ switch (trace_type) {
+ case LTT_TYPE_SIGNED_INT:
+ switch (c_size) {
+ case 1:
+ tmp.v_ulong = (long)(int8_t)va_arg(*args, int);
+ break;
+ case 2:
+ tmp.v_ulong = (long)(int16_t)va_arg(*args, int);
+ break;
+ case 4:
+ tmp.v_ulong = (long)(int32_t)va_arg(*args, int);
+ break;
+ case 8:
+ tmp.v_uint64 = va_arg(*args, int64_t);
+ break;
+ default:
+ BUG();
+ }
+ break;
+ case LTT_TYPE_UNSIGNED_INT:
+ switch (c_size) {
+ case 1:
+ tmp.v_ulong = (unsigned long)(uint8_t)va_arg(*args, unsigned int);
+ break;
+ case 2:
+ tmp.v_ulong = (unsigned long)(uint16_t)va_arg(*args, unsigned int);
+ break;
+ case 4:
+ tmp.v_ulong = (unsigned long)(uint32_t)va_arg(*args, unsigned int);
+ break;
+ case 8:
+ tmp.v_uint64 = va_arg(*args, uint64_t);
+ break;
+ default:
+ BUG();
+ }
+ break;
+ case LTT_TYPE_STRING:
+ tmp.v_string.s = va_arg(*args, const char *);
+ if ((unsigned long)tmp.v_string.s < PAGE_SIZE)
+ tmp.v_string.s = "<NULL>";
+ if (!buf) {
+ /*
+ * Reserve tracer stack entry.
+ */
+ tracer_stack_pos++;
+ assert(tracer_stack_pos <= TRACER_STACK_LEN);
+ cmm_barrier();
+ tracer_stack[*stack_pos_ctx] =
+ strlen(tmp.v_string.s) + 1;
+ }
+ tmp.v_string.len = tracer_stack[(*stack_pos_ctx)++];
+ if (buf)
+ ust_buffers_strncpy(buf, buf_offset, tmp.v_string.s,
+ tmp.v_string.len);
+ buf_offset += tmp.v_string.len;
+ goto copydone;
+ default:
+ BUG();
+ }
+
+ /*
+ * If trace_size is lower or equal to 4 bytes, there is no sign
+ * extension to do because we are already encoded in a long. Therefore,
+ * we can combine signed and unsigned ops. 4 bytes float also works
+ * with this, because we do a simple copy of 4 bytes into 4 bytes
+ * without manipulation (and we do not support conversion from integers
+ * to floats).
+ * It is also the case if c_size is 8 bytes, which is the largest
+ * possible integer.
+ */
+ if (ltt_get_alignment()) {
+ buf_offset += ltt_align(buf_offset, trace_size);
+ if (largest_align)
+ *largest_align = max_t(int, *largest_align, trace_size);
+ }
+ if (trace_size <= 4 || c_size == 8) {
+ if (buf) {
+ switch (trace_size) {
+ case 1:
+ if (c_size == 8)
+ ust_buffers_write(buf, buf_offset,
+ (uint8_t[]){ (uint8_t)tmp.v_uint64 },
+ sizeof(uint8_t));
+ else
+ ust_buffers_write(buf, buf_offset,
+ (uint8_t[]){ (uint8_t)tmp.v_ulong },
+ sizeof(uint8_t));
+ break;
+ case 2:
+ if (c_size == 8)
+ ust_buffers_write(buf, buf_offset,
+ (uint16_t[]){ (uint16_t)tmp.v_uint64 },
+ sizeof(uint16_t));
+ else
+ ust_buffers_write(buf, buf_offset,
+ (uint16_t[]){ (uint16_t)tmp.v_ulong },
+ sizeof(uint16_t));
+ break;
+ case 4:
+ if (c_size == 8)
+ ust_buffers_write(buf, buf_offset,
+ (uint32_t[]){ (uint32_t)tmp.v_uint64 },
+ sizeof(uint32_t));
+ else
+ ust_buffers_write(buf, buf_offset,
+ (uint32_t[]){ (uint32_t)tmp.v_ulong },
+ sizeof(uint32_t));
+ break;
+ case 8:
+ /*
+ * c_size cannot be other than 8 here because
+ * trace_size > 4.
+ */
+ ust_buffers_write(buf, buf_offset,
+ (uint64_t[]){ (uint64_t)tmp.v_uint64 },
+ sizeof(uint64_t));
+ break;
+ default:
+ BUG();
+ }
+ }
+ buf_offset += trace_size;
+ goto copydone;
+ } else {
+ /*
+ * Perform sign extension.
+ */
+ if (buf) {
+ switch (trace_type) {
+ case LTT_TYPE_SIGNED_INT:
+ ust_buffers_write(buf, buf_offset,
+ (int64_t[]){ (int64_t)tmp.v_ulong },
+ sizeof(int64_t));
+ break;
+ case LTT_TYPE_UNSIGNED_INT:
+ ust_buffers_write(buf, buf_offset,
+ (uint64_t[]){ (uint64_t)tmp.v_ulong },
+ sizeof(uint64_t));
+ break;
+ default:
+ BUG();
+ }
+ }
+ buf_offset += trace_size;
+ goto copydone;
+ }
+
+copydone:
+ return buf_offset;
+}
+
+notrace size_t ltt_serialize_data(struct ust_buffer *buf, size_t buf_offset,
+ struct ltt_serialize_closure *closure,
+ void *serialize_private,
+ unsigned int stack_pos_ctx,
+ int *largest_align,
+ const char *fmt, va_list *args)
+{
+ char trace_size = 0, c_size = 0; /*
+ * 0 (unset), 1, 2, 4, 8 bytes.
+ */
+ enum ltt_type trace_type = LTT_TYPE_NONE, c_type = LTT_TYPE_NONE;
+ unsigned long attributes = 0;
+
+ for (; *fmt ; ++fmt) {
+ switch (*fmt) {
+ case '#':
+ /* tracetypes (#) */
+ ++fmt; /* skip first '#' */
+ if (*fmt == '#') /* Escaped ## */
+ break;
+ attributes = 0;
+ fmt = parse_trace_type(fmt, &trace_size, &trace_type,
+ &attributes);
+ break;
+ case '%':
+ /* c types (%) */
+ ++fmt; /* skip first '%' */
+ if (*fmt == '%') /* Escaped %% */
+ break;
+ fmt = parse_c_type(fmt, &c_size, &c_type, NULL);
+ /*
+ * Output c types if no trace types has been
+ * specified.
+ */
+ if (!trace_size)
+ trace_size = c_size;
+ if (trace_type == LTT_TYPE_NONE)
+ trace_type = c_type;
+ if (c_type == LTT_TYPE_STRING)
+ trace_type = LTT_TYPE_STRING;
+ /* perform trace write */
+ buf_offset = serialize_trace_data(buf,
+ buf_offset, trace_size,
+ trace_type, c_size, c_type,
+ &stack_pos_ctx,
+ largest_align,
+ args);
+ trace_size = 0;
+ c_size = 0;
+ trace_type = LTT_TYPE_NONE;
+ c_size = LTT_TYPE_NONE;
+ attributes = 0;
+ break;
+ /* default is to skip the text, doing nothing */
+ }
+ }
+ return buf_offset;
+}
+
+/*
+ * Calculate data size
+ * Assume that the padding for alignment starts at a sizeof(void *) address.
+ */
+static notrace size_t ltt_get_data_size(struct ltt_serialize_closure *closure,
+ void *serialize_private,
+ unsigned int stack_pos_ctx, int *largest_align,
+ const char *fmt, va_list *args)
+{
+ ltt_serialize_cb cb = closure->callbacks[0];
+ closure->cb_idx = 0;
+ return (size_t)cb(NULL, 0, closure, serialize_private,
+ stack_pos_ctx, largest_align, fmt, args);
+}
+
+static notrace
+void ltt_write_event_data(struct ust_buffer *buf, size_t buf_offset,
+ struct ltt_serialize_closure *closure,
+ void *serialize_private,
+ unsigned int stack_pos_ctx,
+ int largest_align,
+ const char *fmt, va_list *args)
+{
+ ltt_serialize_cb cb = closure->callbacks[0];
+ closure->cb_idx = 0;
+ buf_offset += ltt_align(buf_offset, largest_align);
+ cb(buf, buf_offset, closure, serialize_private, stack_pos_ctx, NULL,
+ fmt, args);
+}
+
+
+notrace void ltt_vtrace(const struct ust_marker *mdata, void *probe_data,
+ void *call_data,
+ const char *fmt, va_list *args)
+{
+ int largest_align, ret;
+ struct ltt_active_ust_marker *pdata;
+ uint16_t eID;
+ size_t data_size, slot_size;
+ unsigned int chan_index;
+ struct ust_channel *channel;
+ struct ust_trace *trace, *dest_trace = NULL;
+ struct ust_buffer *buf;
+ u64 tsc;
+ long buf_offset;
+ va_list args_copy;
+ struct ltt_serialize_closure closure;
+ struct ltt_probe_private_data *private_data = call_data;
+ void *serialize_private = NULL;
+ int cpu;
+ unsigned int rflags;
+ unsigned int stack_pos_ctx;
+
+ /*
+ * This test is useful for quickly exiting static tracing when no trace
+ * is active. We expect to have an active trace when we get here.
+ */
+ if (unlikely(ltt_traces.num_active_traces == 0))
+ return;
+
+ rcu_read_lock();
+ cpu = ust_get_cpu();
+
+ /* Force volatile access. */
+ CMM_STORE_SHARED(ltt_nesting, CMM_LOAD_SHARED(ltt_nesting) + 1);
+ stack_pos_ctx = tracer_stack_pos;
+ cmm_barrier();
+
+ pdata = (struct ltt_active_ust_marker *)probe_data;
+ eID = mdata->event_id;
+ chan_index = mdata->channel_id;
+ closure.callbacks = pdata->probe->callbacks;
+
+ if (unlikely(private_data)) {
+ dest_trace = private_data->trace;
+ if (private_data->serializer)
+ closure.callbacks = &private_data->serializer;
+ serialize_private = private_data->serialize_private;
+ }
+
+ va_copy(args_copy, *args);
+ /*
+ * Assumes event payload to start on largest_align alignment.
+ */
+ largest_align = 1; /* must be non-zero for ltt_align */
+ data_size = ltt_get_data_size(&closure, serialize_private,
+ stack_pos_ctx, &largest_align,
+ fmt, &args_copy);
+ largest_align = min_t(int, largest_align, sizeof(void *));
+ va_end(args_copy);
+
+ /* Iterate on each trace */
+ cds_list_for_each_entry_rcu(trace, <t_traces.head, list) {
+ /*
+ * Expect the filter to filter out events. If we get here,
+ * we went through tracepoint activation as a first step.
+ */
+ if (unlikely(dest_trace && trace != dest_trace))
+ continue;
+ if (unlikely(!trace->active))
+ continue;
+ if (unlikely(!ltt_run_filter(trace, eID)))
+ continue;
+#ifdef CONFIG_LTT_DEBUG_EVENT_SIZE
+ rflags = LTT_RFLAG_ID_SIZE;
+#else
+ if (unlikely(eID >= LTT_FREE_EVENTS))
+ rflags = LTT_RFLAG_ID;
+ else
+ rflags = 0;
+#endif
+ /*
+ * Skip channels added after trace creation.
+ */
+ if (unlikely(chan_index >= trace->nr_channels))
+ continue;
+ channel = &trace->channels[chan_index];
+ if (!channel->active)
+ continue;
+
+ /*
+ * If a new cpu was plugged since the trace was started, we did
+ * not add it to the trace, and therefore we write the event to
+ * cpu 0.
+ */
+ if (cpu >= channel->n_cpus) {
+ cpu = 0;
+ }
+
+ /* reserve space : header and data */
+ ret = ltt_reserve_slot(channel, trace, data_size, largest_align,
+ cpu, &buf, &slot_size, &buf_offset,
+ &tsc, &rflags);
+ if (unlikely(ret < 0))
+ continue; /* buffer full */
+
+ va_copy(args_copy, *args);
+ /* FIXME : could probably encapsulate transport better. */
+ buf = channel->buf[cpu];
+ /* Out-of-order write : header and data */
+ buf_offset = ltt_write_event_header(channel, buf, buf_offset,
+ eID, data_size, tsc, rflags);
+ ltt_write_event_data(buf, buf_offset, &closure,
+ serialize_private,
+ stack_pos_ctx, largest_align,
+ fmt, &args_copy);
+ va_end(args_copy);
+ /* Out-of-order commit */
+ ltt_commit_slot(channel, buf, buf_offset, data_size, slot_size);
+ DBG("just commited event (%s/%s) at offset %ld and size %zd", mdata->channel, mdata->name, buf_offset, slot_size);
+ }
+
+ cmm_barrier();
+ tracer_stack_pos = stack_pos_ctx;
+ CMM_STORE_SHARED(ltt_nesting, CMM_LOAD_SHARED(ltt_nesting) - 1);
+
+ rcu_read_unlock();
+}
+
+notrace void ltt_trace(const struct ust_marker *mdata, void *probe_data,
+ void *call_data,
+ const char *fmt, ...)
+{
+ va_list args;
+
+ va_start(args, fmt);
+ ltt_vtrace(mdata, probe_data, call_data, fmt, &args);
+ va_end(args);
+}
+
+static notrace void skip_space(const char **ps)
+{
+ while(**ps == ' ')
+ (*ps)++;
+}
+
+static notrace void copy_token(char **out, const char **in)
+{
+ while (**in != ' ' && **in != '\0') {
+ **out = **in;
+ (*out)++;
+ (*in)++;
+ }
+}
+
+/* serialize_to_text
+ *
+ * Given a format string and a va_list of arguments, convert them to a
+ * human-readable string.
+ *
+ * @outbuf: the buffer to output the string to
+ * @bufsize: the max size that can be used in outbuf
+ * @fmt: the marker format string
+ * @ap: a va_list that contains the arguments corresponding to fmt
+ *
+ * Return value: the number of chars that have been put in outbuf, excluding
+ * the final \0, or, if the buffer was too small, the number of chars that
+ * would have been written in outbuf if it had been large enough.
+ *
+ * outbuf may be NULL. The return value may then be used be allocate an
+ * appropriate outbuf.
+ *
+ */
+
+notrace
+int serialize_to_text(char *outbuf, int bufsize, const char *fmt, va_list ap)
+{
+ int fmt_len = strlen(fmt);
+ char *new_fmt = alloca(fmt_len + 1);
+ const char *orig_fmt_p = fmt;
+ char *new_fmt_p = new_fmt;
+ char false_buf;
+ int result;
+ enum { none, cfmt, tracefmt, argname } prev_token = none;
+
+ while (*orig_fmt_p != '\0') {
+ if (*orig_fmt_p == '%') {
+ prev_token = cfmt;
+ copy_token(&new_fmt_p, &orig_fmt_p);
+ }
+ else if (*orig_fmt_p == '#') {
+ prev_token = tracefmt;
+ do {
+ orig_fmt_p++;
+ } while (*orig_fmt_p != ' ' && *orig_fmt_p != '\0');
+ }
+ else if (*orig_fmt_p == ' ') {
+ if (prev_token == argname) {
+ *new_fmt_p = '=';
+ new_fmt_p++;
+ }
+ else if (prev_token == cfmt) {
+ *new_fmt_p = ' ';
+ new_fmt_p++;
+ }
+
+ skip_space(&orig_fmt_p);
+ }
+ else {
+ prev_token = argname;
+ copy_token(&new_fmt_p, &orig_fmt_p);
+ }
+ }
+
+ *new_fmt_p = '\0';
+
+ if (outbuf == NULL) {
+ /* use this false_buffer for compatibility with pre-C99 */
+ outbuf = &false_buf;
+ bufsize = 1;
+ }
+ result = ust_safe_vsnprintf(outbuf, bufsize, new_fmt, ap);
+
+ return result;
+}
--- /dev/null
+/* Copyright (C) 2009 Pierre-Marc Fournier
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/* This file contains the implementation of the UST listener thread, which
+ * receives trace control commands. It also coordinates the initialization of
+ * libust.
+ */
+
+#define _GNU_SOURCE
+#define _LGPL_SOURCE
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <pthread.h>
+#include <signal.h>
+#include <sys/epoll.h>
+#include <sys/time.h>
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <fcntl.h>
+#include <poll.h>
+#include <regex.h>
+#include <urcu/uatomic.h>
+#include <urcu/list.h>
+
+#include <ust/marker.h>
+#include <ust/tracepoint.h>
+#include <ust/tracepoint-internal.h>
+#include <ust/tracectl.h>
+#include <ust/clock.h>
+#include "tracer.h"
+#include "usterr_signal_safe.h"
+#include "ustcomm.h"
+#include "buffers.h"
+#include "marker-control.h"
+
+/* This should only be accessed by the constructor, before the creation
+ * of the listener, and then only by the listener.
+ */
+s64 pidunique = -1LL;
+
+/* The process pid is used to detect a non-traceable fork
+ * and allow the non-traceable fork to be ignored
+ * by destructor sequences in libust
+ */
+static pid_t processpid = 0;
+
+static struct ustcomm_header _receive_header;
+static struct ustcomm_header *receive_header = &_receive_header;
+static char receive_buffer[USTCOMM_BUFFER_SIZE];
+static char send_buffer[USTCOMM_BUFFER_SIZE];
+
+static int epoll_fd;
+
+/*
+ * Listener thread data vs fork() protection mechanism. Ensures that no listener
+ * thread mutexes and data structures are being concurrently modified or held by
+ * other threads when fork() is executed.
+ */
+static pthread_mutex_t listener_thread_data_mutex = PTHREAD_MUTEX_INITIALIZER;
+
+/* Mutex protecting listen_sock. Nests inside listener_thread_data_mutex. */
+static pthread_mutex_t listen_sock_mutex = PTHREAD_MUTEX_INITIALIZER;
+static struct ustcomm_sock *listen_sock;
+
+extern struct chan_info_struct chan_infos[];
+
+static struct cds_list_head ust_socks = CDS_LIST_HEAD_INIT(ust_socks);
+
+/* volatile because shared between the listener and the main thread */
+int buffers_to_export = 0;
+
+int ust_clock_source;
+
+static long long make_pidunique(void)
+{
+ s64 retval;
+ struct timeval tv;
+
+ gettimeofday(&tv, NULL);
+
+ retval = tv.tv_sec;
+ retval <<= 32;
+ retval |= tv.tv_usec;
+
+ return retval;
+}
+
+static void print_ust_marker(FILE *fp)
+{
+ struct ust_marker_iter iter;
+
+ ust_marker_iter_reset(&iter);
+ ust_marker_iter_start(&iter);
+
+ while (iter.ust_marker) {
+ fprintf(fp, "ust_marker: %s/%s %d \"%s\" %p\n",
+ (*iter.ust_marker)->channel,
+ (*iter.ust_marker)->name,
+ (int)(*iter.ust_marker)->state,
+ (*iter.ust_marker)->format,
+ NULL); /*
+ * location is null for now, will be added
+ * to a different table.
+ */
+ ust_marker_iter_next(&iter);
+ }
+ ust_marker_iter_stop(&iter);
+}
+
+static void print_trace_events(FILE *fp)
+{
+ struct trace_event_iter iter;
+
+ trace_event_iter_reset(&iter);
+ trace_event_iter_start(&iter);
+
+ while (iter.trace_event) {
+ fprintf(fp, "trace_event: %s\n", (*iter.trace_event)->name);
+ trace_event_iter_next(&iter);
+ }
+ trace_event_iter_stop(&iter);
+}
+
+static int connect_ustconsumer(void)
+{
+ int result, fd;
+ char default_daemon_path[] = SOCK_DIR "/ustconsumer";
+ char *explicit_daemon_path, *daemon_path;
+
+ explicit_daemon_path = getenv("UST_DAEMON_SOCKET");
+ if (explicit_daemon_path) {
+ daemon_path = explicit_daemon_path;
+ } else {
+ daemon_path = default_daemon_path;
+ }
+
+ DBG("Connecting to daemon_path %s", daemon_path);
+
+ result = ustcomm_connect_path(daemon_path, &fd);
+ if (result < 0) {
+ WARN("connect_ustconsumer failed, daemon_path: %s",
+ daemon_path);
+ return result;
+ }
+
+ return fd;
+}
+
+
+static void request_buffer_consumer(int sock,
+ const char *trace,
+ const char *channel,
+ int cpu)
+{
+ struct ustcomm_header send_header, recv_header;
+ struct ustcomm_buffer_info buf_inf;
+ int result = 0;
+
+ result = ustcomm_pack_buffer_info(&send_header,
+ &buf_inf,
+ trace,
+ channel,
+ cpu);
+
+ if (result < 0) {
+ ERR("failed to pack buffer info message %s_%d",
+ channel, cpu);
+ return;
+ }
+
+ buf_inf.pid = getpid();
+ send_header.command = CONSUME_BUFFER;
+
+ result = ustcomm_req(sock, &send_header, (char *) &buf_inf,
+ &recv_header, NULL);
+ if (result <= 0) {
+ PERROR("request for buffer consumer failed, is the daemon online?");
+ }
+
+ return;
+}
+
+/* Ask the daemon to collect a trace called trace_name and being
+ * produced by this pid.
+ *
+ * The trace must be at least allocated. (It can also be started.)
+ * This is because _ltt_trace_find is used.
+ */
+
+static void inform_consumer_daemon(const char *trace_name)
+{
+ int sock, i,j;
+ struct ust_trace *trace;
+ const char *ch_name;
+
+ sock = connect_ustconsumer();
+ if (sock < 0) {
+ return;
+ }
+
+ DBG("Connected to ustconsumer");
+
+ ltt_lock_traces();
+
+ trace = _ltt_trace_find(trace_name);
+ if (trace == NULL) {
+ WARN("inform_consumer_daemon: could not find trace \"%s\"; it is probably already destroyed", trace_name);
+ goto unlock_traces;
+ }
+
+ for (i=0; i < trace->nr_channels; i++) {
+ if (trace->channels[i].request_collection) {
+ /* iterate on all cpus */
+ for (j=0; j<trace->channels[i].n_cpus; j++) {
+ ch_name = trace->channels[i].channel_name;
+ request_buffer_consumer(sock, trace_name,
+ ch_name, j);
+ CMM_STORE_SHARED(buffers_to_export,
+ CMM_LOAD_SHARED(buffers_to_export)+1);
+ }
+ }
+ }
+
+unlock_traces:
+ ltt_unlock_traces();
+
+ close(sock);
+}
+
+static struct ust_channel *find_channel(const char *ch_name,
+ struct ust_trace *trace)
+{
+ int i;
+
+ for (i=0; i<trace->nr_channels; i++) {
+ if (!strcmp(trace->channels[i].channel_name, ch_name)) {
+ return &trace->channels[i];
+ }
+ }
+
+ return NULL;
+}
+
+static int get_buffer_shmid_pipe_fd(const char *trace_name, const char *ch_name,
+ int ch_cpu,
+ int *buf_shmid,
+ int *buf_struct_shmid,
+ int *buf_pipe_fd)
+{
+ struct ust_trace *trace;
+ struct ust_channel *channel;
+ struct ust_buffer *buf;
+
+ DBG("get_buffer_shmid_pipe_fd");
+
+ ltt_lock_traces();
+ trace = _ltt_trace_find(trace_name);
+ ltt_unlock_traces();
+
+ if (trace == NULL) {
+ ERR("cannot find trace!");
+ return -ENODATA;
+ }
+
+ channel = find_channel(ch_name, trace);
+ if (!channel) {
+ ERR("cannot find channel %s!", ch_name);
+ return -ENODATA;
+ }
+
+ buf = channel->buf[ch_cpu];
+
+ *buf_shmid = buf->shmid;
+ *buf_struct_shmid = channel->buf_struct_shmids[ch_cpu];
+ *buf_pipe_fd = buf->data_ready_fd_read;
+
+ return 0;
+}
+
+static int get_subbuf_num_size(const char *trace_name, const char *ch_name,
+ int *num, int *size)
+{
+ struct ust_trace *trace;
+ struct ust_channel *channel;
+
+ DBG("get_subbuf_size");
+
+ ltt_lock_traces();
+ trace = _ltt_trace_find(trace_name);
+ ltt_unlock_traces();
+
+ if (!trace) {
+ ERR("cannot find trace!");
+ return -ENODATA;
+ }
+
+ channel = find_channel(ch_name, trace);
+ if (!channel) {
+ ERR("unable to find channel");
+ return -ENODATA;
+ }
+
+ *num = channel->subbuf_cnt;
+ *size = channel->subbuf_size;
+
+ return 0;
+}
+
+/* Return the power of two which is equal or higher to v */
+
+static unsigned int pow2_higher_or_eq(unsigned int v)
+{
+ int hb = fls(v);
+ int retval = 1<<(hb-1);
+
+ if (v-retval == 0)
+ return retval;
+ else
+ return retval<<1;
+}
+
+static int set_subbuf_size(const char *trace_name, const char *ch_name,
+ unsigned int size)
+{
+ unsigned int power;
+ int retval = 0;
+ struct ust_trace *trace;
+ struct ust_channel *channel;
+
+ DBG("set_subbuf_size");
+
+ power = pow2_higher_or_eq(size);
+ power = max_t(unsigned int, 2u, power);
+ if (power != size) {
+ WARN("using the next power of two for buffer size = %u\n", power);
+ }
+
+ ltt_lock_traces();
+ trace = _ltt_trace_find_setup(trace_name);
+ if (trace == NULL) {
+ ERR("cannot find trace!");
+ retval = -ENODATA;
+ goto unlock_traces;
+ }
+
+ channel = find_channel(ch_name, trace);
+ if (!channel) {
+ ERR("unable to find channel");
+ retval = -ENODATA;
+ goto unlock_traces;
+ }
+
+ channel->subbuf_size = power;
+ DBG("the set_subbuf_size for the requested channel is %zu", channel->subbuf_size);
+
+unlock_traces:
+ ltt_unlock_traces();
+
+ return retval;
+}
+
+static int set_subbuf_num(const char *trace_name, const char *ch_name,
+ unsigned int num)
+{
+ struct ust_trace *trace;
+ struct ust_channel *channel;
+ int retval = 0;
+
+ DBG("set_subbuf_num");
+
+ if (num < 2) {
+ ERR("subbuffer count should be greater than 2");
+ return -EINVAL;
+ }
+
+ ltt_lock_traces();
+ trace = _ltt_trace_find_setup(trace_name);
+ if (trace == NULL) {
+ ERR("cannot find trace!");
+ retval = -ENODATA;
+ goto unlock_traces;
+ }
+
+ channel = find_channel(ch_name, trace);
+ if (!channel) {
+ ERR("unable to find channel");
+ retval = -ENODATA;
+ goto unlock_traces;
+ }
+
+ channel->subbuf_cnt = num;
+ DBG("the set_subbuf_cnt for the requested channel is %u", channel->subbuf_cnt);
+
+unlock_traces:
+ ltt_unlock_traces();
+ return retval;
+}
+
+static int get_subbuffer(const char *trace_name, const char *ch_name,
+ int ch_cpu, long *consumed_old)
+{
+ int retval = 0;
+ struct ust_trace *trace;
+ struct ust_channel *channel;
+ struct ust_buffer *buf;
+
+ DBG("get_subbuf");
+
+ *consumed_old = 0;
+
+ ltt_lock_traces();
+ trace = _ltt_trace_find(trace_name);
+
+ if (!trace) {
+ DBG("Cannot find trace. It was likely destroyed by the user.");
+ retval = -ENODATA;
+ goto unlock_traces;
+ }
+
+ channel = find_channel(ch_name, trace);
+ if (!channel) {
+ ERR("unable to find channel");
+ retval = -ENODATA;
+ goto unlock_traces;
+ }
+
+ buf = channel->buf[ch_cpu];
+
+ retval = ust_buffers_get_subbuf(buf, consumed_old);
+ if (retval < 0) {
+ WARN("missed buffer?");
+ }
+
+unlock_traces:
+ ltt_unlock_traces();
+
+ return retval;
+}
+
+
+static int notify_buffer_mapped(const char *trace_name,
+ const char *ch_name,
+ int ch_cpu)
+{
+ int retval = 0;
+ struct ust_trace *trace;
+ struct ust_channel *channel;
+ struct ust_buffer *buf;
+
+ DBG("get_buffer_fd");
+
+ ltt_lock_traces();
+ trace = _ltt_trace_find(trace_name);
+
+ if (!trace) {
+ retval = -ENODATA;
+ DBG("Cannot find trace. It was likely destroyed by the user.");
+ goto unlock_traces;
+ }
+
+ channel = find_channel(ch_name, trace);
+ if (!channel) {
+ retval = -ENODATA;
+ ERR("unable to find channel");
+ goto unlock_traces;
+ }
+
+ buf = channel->buf[ch_cpu];
+
+ /* Being here is the proof the daemon has mapped the buffer in its
+ * memory. We may now decrement buffers_to_export.
+ */
+ if (uatomic_read(&buf->consumed) == 0) {
+ DBG("decrementing buffers_to_export");
+ CMM_STORE_SHARED(buffers_to_export, CMM_LOAD_SHARED(buffers_to_export)-1);
+ }
+
+unlock_traces:
+ ltt_unlock_traces();
+
+ return retval;
+}
+
+static int put_subbuffer(const char *trace_name, const char *ch_name,
+ int ch_cpu, long consumed_old)
+{
+ int retval = 0;
+ struct ust_trace *trace;
+ struct ust_channel *channel;
+ struct ust_buffer *buf;
+
+ DBG("put_subbuf");
+
+ ltt_lock_traces();
+ trace = _ltt_trace_find(trace_name);
+
+ if (!trace) {
+ retval = -ENODATA;
+ DBG("Cannot find trace. It was likely destroyed by the user.");
+ goto unlock_traces;
+ }
+
+ channel = find_channel(ch_name, trace);
+ if (!channel) {
+ retval = -ENODATA;
+ ERR("unable to find channel");
+ goto unlock_traces;
+ }
+
+ buf = channel->buf[ch_cpu];
+
+ retval = ust_buffers_put_subbuf(buf, consumed_old);
+ if (retval < 0) {
+ WARN("ust_buffers_put_subbuf: error (subbuf=%s_%d)",
+ ch_name, ch_cpu);
+ } else {
+ DBG("ust_buffers_put_subbuf: success (subbuf=%s_%d)",
+ ch_name, ch_cpu);
+ }
+
+unlock_traces:
+ ltt_unlock_traces();
+
+ return retval;
+}
+
+static void release_listener_mutex(void *ptr)
+{
+ pthread_mutex_unlock(&listener_thread_data_mutex);
+}
+
+static void listener_cleanup(void *ptr)
+{
+ pthread_mutex_lock(&listen_sock_mutex);
+ if (listen_sock) {
+ ustcomm_del_named_sock(listen_sock, 0);
+ listen_sock = NULL;
+ }
+ pthread_mutex_unlock(&listen_sock_mutex);
+}
+
+static int force_subbuf_switch(const char *trace_name)
+{
+ struct ust_trace *trace;
+ int i, j, retval = 0;
+
+ ltt_lock_traces();
+ trace = _ltt_trace_find(trace_name);
+ if (!trace) {
+ retval = -ENODATA;
+ DBG("Cannot find trace. It was likely destroyed by the user.");
+ goto unlock_traces;
+ }
+
+ for (i = 0; i < trace->nr_channels; i++) {
+ for (j = 0; j < trace->channels[i].n_cpus; j++) {
+ ltt_force_switch(trace->channels[i].buf[j],
+ FORCE_FLUSH);
+ }
+ }
+
+unlock_traces:
+ ltt_unlock_traces();
+
+ return retval;
+}
+
+static int process_trace_cmd(int command, char *trace_name)
+{
+ int result;
+ char trace_type[] = "ustrelay";
+
+ switch(command) {
+ case START:
+ /* start is an operation that setups the trace, allocates it and starts it */
+ result = ltt_trace_setup(trace_name);
+ if (result < 0) {
+ ERR("ltt_trace_setup failed");
+ return result;
+ }
+
+ result = ltt_trace_set_type(trace_name, trace_type);
+ if (result < 0) {
+ ERR("ltt_trace_set_type failed");
+ return result;
+ }
+
+ result = ltt_trace_alloc(trace_name);
+ if (result < 0) {
+ ERR("ltt_trace_alloc failed");
+ return result;
+ }
+
+ inform_consumer_daemon(trace_name);
+
+ result = ltt_trace_start(trace_name);
+ if (result < 0) {
+ ERR("ltt_trace_start failed");
+ return result;
+ }
+
+ return 0;
+ case SETUP_TRACE:
+ DBG("trace setup");
+
+ result = ltt_trace_setup(trace_name);
+ if (result < 0) {
+ ERR("ltt_trace_setup failed");
+ return result;
+ }
+
+ result = ltt_trace_set_type(trace_name, trace_type);
+ if (result < 0) {
+ ERR("ltt_trace_set_type failed");
+ return result;
+ }
+
+ return 0;
+ case ALLOC_TRACE:
+ DBG("trace alloc");
+
+ result = ltt_trace_alloc(trace_name);
+ if (result < 0) {
+ ERR("ltt_trace_alloc failed");
+ return result;
+ }
+ inform_consumer_daemon(trace_name);
+
+ return 0;
+
+ case CREATE_TRACE:
+ DBG("trace create");
+
+ result = ltt_trace_setup(trace_name);
+ if (result < 0) {
+ ERR("ltt_trace_setup failed");
+ return result;
+ }
+
+ result = ltt_trace_set_type(trace_name, trace_type);
+ if (result < 0) {
+ ERR("ltt_trace_set_type failed");
+ return result;
+ }
+
+ return 0;
+ case START_TRACE:
+ DBG("trace start");
+
+ result = ltt_trace_alloc(trace_name);
+ if (result < 0) {
+ ERR("ltt_trace_alloc failed");
+ return result;
+ }
+ if (!result) {
+ inform_consumer_daemon(trace_name);
+ }
+
+ result = ltt_trace_start(trace_name);
+ if (result < 0) {
+ ERR("ltt_trace_start failed");
+ return result;
+ }
+
+ return 0;
+ case STOP_TRACE:
+ DBG("trace stop");
+
+ result = ltt_trace_stop(trace_name);
+ if (result < 0) {
+ ERR("ltt_trace_stop failed");
+ return result;
+ }
+
+ return 0;
+ case DESTROY_TRACE:
+ DBG("trace destroy");
+
+ result = ltt_trace_destroy(trace_name, 0);
+ if (result < 0) {
+ ERR("ltt_trace_destroy failed");
+ return result;
+ }
+ return 0;
+ case FORCE_SUBBUF_SWITCH:
+ DBG("force switch");
+
+ result = force_subbuf_switch(trace_name);
+ if (result < 0) {
+ ERR("force_subbuf_switch failed");
+ return result;
+ }
+ return 0;
+ }
+
+ return 0;
+}
+
+
+static void process_channel_cmd(int sock, int command,
+ struct ustcomm_channel_info *ch_inf)
+{
+ struct ustcomm_header _reply_header;
+ struct ustcomm_header *reply_header = &_reply_header;
+ struct ustcomm_channel_info *reply_msg =
+ (struct ustcomm_channel_info *)send_buffer;
+ int result, offset = 0, num, size;
+
+ memset(reply_header, 0, sizeof(*reply_header));
+
+ switch (command) {
+ case GET_SUBBUF_NUM_SIZE:
+ result = get_subbuf_num_size(ch_inf->trace,
+ ch_inf->channel,
+ &num, &size);
+ if (result < 0) {
+ reply_header->result = result;
+ break;
+ }
+
+ reply_msg->channel = USTCOMM_POISON_PTR;
+ reply_msg->subbuf_num = num;
+ reply_msg->subbuf_size = size;
+
+
+ reply_header->size = COMPUTE_MSG_SIZE(reply_msg, offset);
+
+ break;
+ case SET_SUBBUF_NUM:
+ reply_header->result = set_subbuf_num(ch_inf->trace,
+ ch_inf->channel,
+ ch_inf->subbuf_num);
+
+ break;
+ case SET_SUBBUF_SIZE:
+ reply_header->result = set_subbuf_size(ch_inf->trace,
+ ch_inf->channel,
+ ch_inf->subbuf_size);
+
+
+ break;
+ }
+ if (ustcomm_send(sock, reply_header, (char *)reply_msg) < 0) {
+ ERR("ustcomm_send failed");
+ }
+}
+
+static void process_buffer_cmd(int sock, int command,
+ struct ustcomm_buffer_info *buf_inf)
+{
+ struct ustcomm_header _reply_header;
+ struct ustcomm_header *reply_header = &_reply_header;
+ struct ustcomm_buffer_info *reply_msg =
+ (struct ustcomm_buffer_info *)send_buffer;
+ int result, offset = 0, buf_shmid, buf_struct_shmid, buf_pipe_fd;
+ long consumed_old;
+
+ memset(reply_header, 0, sizeof(*reply_header));
+
+ switch (command) {
+ case GET_BUF_SHMID_PIPE_FD:
+ result = get_buffer_shmid_pipe_fd(buf_inf->trace,
+ buf_inf->channel,
+ buf_inf->ch_cpu,
+ &buf_shmid,
+ &buf_struct_shmid,
+ &buf_pipe_fd);
+ if (result < 0) {
+ reply_header->result = result;
+ break;
+ }
+
+ reply_msg->channel = USTCOMM_POISON_PTR;
+ reply_msg->buf_shmid = buf_shmid;
+ reply_msg->buf_struct_shmid = buf_struct_shmid;
+
+ reply_header->size = COMPUTE_MSG_SIZE(reply_msg, offset);
+ reply_header->fd_included = 1;
+
+ if (ustcomm_send_fd(sock, reply_header, (char *)reply_msg,
+ &buf_pipe_fd) < 0) {
+ ERR("ustcomm_send failed");
+ }
+ return;
+
+ case NOTIFY_BUF_MAPPED:
+ reply_header->result =
+ notify_buffer_mapped(buf_inf->trace,
+ buf_inf->channel,
+ buf_inf->ch_cpu);
+ break;
+ case GET_SUBBUFFER:
+ result = get_subbuffer(buf_inf->trace, buf_inf->channel,
+ buf_inf->ch_cpu, &consumed_old);
+ if (result < 0) {
+ reply_header->result = result;
+ break;
+ }
+
+ reply_msg->channel = USTCOMM_POISON_PTR;
+ reply_msg->consumed_old = consumed_old;
+
+ reply_header->size = COMPUTE_MSG_SIZE(reply_msg, offset);
+
+ break;
+ case PUT_SUBBUFFER:
+ result = put_subbuffer(buf_inf->trace, buf_inf->channel,
+ buf_inf->ch_cpu,
+ buf_inf->consumed_old);
+ reply_header->result = result;
+
+ break;
+ }
+
+ if (ustcomm_send(sock, reply_header, (char *)reply_msg) < 0) {
+ ERR("ustcomm_send failed");
+ }
+
+}
+
+static void process_ust_marker_cmd(int sock, int command,
+ struct ustcomm_ust_marker_info *ust_marker_inf)
+{
+ struct ustcomm_header _reply_header;
+ struct ustcomm_header *reply_header = &_reply_header;
+ int result = 0;
+
+ memset(reply_header, 0, sizeof(*reply_header));
+
+ switch(command) {
+ case ENABLE_MARKER:
+
+ result = ltt_ust_marker_connect(ust_marker_inf->channel,
+ ust_marker_inf->ust_marker,
+ "default");
+ if (result < 0) {
+ WARN("could not enable ust_marker; channel=%s,"
+ " name=%s",
+ ust_marker_inf->channel,
+ ust_marker_inf->ust_marker);
+
+ }
+ break;
+ case DISABLE_MARKER:
+ result = ltt_ust_marker_disconnect(ust_marker_inf->channel,
+ ust_marker_inf->ust_marker,
+ "default");
+ if (result < 0) {
+ WARN("could not disable ust_marker; channel=%s,"
+ " name=%s",
+ ust_marker_inf->channel,
+ ust_marker_inf->ust_marker);
+ }
+ break;
+ }
+
+ reply_header->result = result;
+
+ if (ustcomm_send(sock, reply_header, NULL) < 0) {
+ ERR("ustcomm_send failed");
+ }
+
+}
+static void process_client_cmd(struct ustcomm_header *recv_header,
+ char *recv_buf, int sock)
+{
+ int result;
+ struct ustcomm_header _reply_header;
+ struct ustcomm_header *reply_header = &_reply_header;
+ char *send_buf = send_buffer;
+
+ memset(reply_header, 0, sizeof(*reply_header));
+ memset(send_buf, 0, sizeof(send_buffer));
+
+ switch(recv_header->command) {
+ case GET_SUBBUF_NUM_SIZE:
+ case SET_SUBBUF_NUM:
+ case SET_SUBBUF_SIZE:
+ {
+ struct ustcomm_channel_info *ch_inf;
+ ch_inf = (struct ustcomm_channel_info *)recv_buf;
+ result = ustcomm_unpack_channel_info(ch_inf);
+ if (result < 0) {
+ ERR("couldn't unpack channel info");
+ reply_header->result = -EINVAL;
+ goto send_response;
+ }
+ process_channel_cmd(sock, recv_header->command, ch_inf);
+ return;
+ }
+ case GET_BUF_SHMID_PIPE_FD:
+ case NOTIFY_BUF_MAPPED:
+ case GET_SUBBUFFER:
+ case PUT_SUBBUFFER:
+ {
+ struct ustcomm_buffer_info *buf_inf;
+ buf_inf = (struct ustcomm_buffer_info *)recv_buf;
+ result = ustcomm_unpack_buffer_info(buf_inf);
+ if (result < 0) {
+ ERR("couldn't unpack buffer info");
+ reply_header->result = -EINVAL;
+ goto send_response;
+ }
+ process_buffer_cmd(sock, recv_header->command, buf_inf);
+ return;
+ }
+ case ENABLE_MARKER:
+ case DISABLE_MARKER:
+ {
+ struct ustcomm_ust_marker_info *ust_marker_inf;
+ ust_marker_inf = (struct ustcomm_ust_marker_info *)recv_buf;
+ result = ustcomm_unpack_ust_marker_info(ust_marker_inf);
+ if (result < 0) {
+ ERR("couldn't unpack ust_marker info");
+ reply_header->result = -EINVAL;
+ goto send_response;
+ }
+ process_ust_marker_cmd(sock, recv_header->command, ust_marker_inf);
+ return;
+ }
+ case LIST_MARKERS:
+ {
+ char *ptr;
+ size_t size;
+ FILE *fp;
+
+ fp = open_memstream(&ptr, &size);
+ if (fp == NULL) {
+ ERR("opening memstream failed");
+ return;
+ }
+ print_ust_marker(fp);
+ fclose(fp);
+
+ reply_header->size = size + 1; /* Include final \0 */
+
+ result = ustcomm_send(sock, reply_header, ptr);
+
+ free(ptr);
+
+ if (result < 0) {
+ PERROR("failed to send ust_marker list");
+ }
+
+ break;
+ }
+ case LIST_TRACE_EVENTS:
+ {
+ char *ptr;
+ size_t size;
+ FILE *fp;
+
+ fp = open_memstream(&ptr, &size);
+ if (fp == NULL) {
+ ERR("opening memstream failed");
+ return;
+ }
+ print_trace_events(fp);
+ fclose(fp);
+
+ reply_header->size = size + 1; /* Include final \0 */
+
+ result = ustcomm_send(sock, reply_header, ptr);
+
+ free(ptr);
+
+ if (result < 0) {
+ ERR("list_trace_events failed");
+ return;
+ }
+
+ break;
+ }
+ case LOAD_PROBE_LIB:
+ {
+ char *libfile;
+
+ /* FIXME: No functionality at all... */
+ libfile = recv_buf;
+
+ DBG("load_probe_lib loading %s", libfile);
+
+ break;
+ }
+ case GET_PIDUNIQUE:
+ {
+ struct ustcomm_pidunique *pid_msg;
+ pid_msg = (struct ustcomm_pidunique *)send_buf;
+
+ pid_msg->pidunique = pidunique;
+ reply_header->size = sizeof(pid_msg);
+
+ goto send_response;
+
+ }
+ case GET_SOCK_PATH:
+ {
+ struct ustcomm_single_field *sock_msg;
+ char *sock_path_env;
+
+ sock_msg = (struct ustcomm_single_field *)send_buf;
+
+ sock_path_env = getenv("UST_DAEMON_SOCKET");
+
+ if (!sock_path_env) {
+ result = ustcomm_pack_single_field(reply_header,
+ sock_msg,
+ SOCK_DIR "/ustconsumer");
+
+ } else {
+ result = ustcomm_pack_single_field(reply_header,
+ sock_msg,
+ sock_path_env);
+ }
+ reply_header->result = result;
+
+ goto send_response;
+ }
+ case SET_SOCK_PATH:
+ {
+ struct ustcomm_single_field *sock_msg;
+ sock_msg = (struct ustcomm_single_field *)recv_buf;
+ result = ustcomm_unpack_single_field(sock_msg);
+ if (result < 0) {
+ reply_header->result = -EINVAL;
+ goto send_response;
+ }
+
+ reply_header->result = setenv("UST_DAEMON_SOCKET",
+ sock_msg->field, 1);
+
+ goto send_response;
+ }
+ case START:
+ case SETUP_TRACE:
+ case ALLOC_TRACE:
+ case CREATE_TRACE:
+ case START_TRACE:
+ case STOP_TRACE:
+ case DESTROY_TRACE:
+ case FORCE_SUBBUF_SWITCH:
+ {
+ struct ustcomm_single_field *trace_inf =
+ (struct ustcomm_single_field *)recv_buf;
+
+ result = ustcomm_unpack_single_field(trace_inf);
+ if (result < 0) {
+ ERR("couldn't unpack trace info");
+ reply_header->result = -EINVAL;
+ goto send_response;
+ }
+
+ reply_header->result =
+ process_trace_cmd(recv_header->command,
+ trace_inf->field);
+ goto send_response;
+
+ }
+ default:
+ reply_header->result = -EINVAL;
+
+ goto send_response;
+ }
+
+ return;
+
+send_response:
+ ustcomm_send(sock, reply_header, send_buf);
+}
+
+#define MAX_EVENTS 10
+
+void *listener_main(void *p)
+{
+ struct ustcomm_sock *epoll_sock;
+ struct epoll_event events[MAX_EVENTS];
+ struct sockaddr addr;
+ int accept_fd, nfds, result, i, addr_size;
+
+ DBG("LISTENER");
+
+ pthread_cleanup_push(listener_cleanup, NULL);
+
+ for(;;) {
+ nfds = epoll_wait(epoll_fd, events, MAX_EVENTS, -1);
+ if (nfds == -1) {
+ PERROR("listener_main: epoll_wait failed");
+ continue;
+ }
+
+ for (i = 0; i < nfds; i++) {
+ pthread_mutex_lock(&listener_thread_data_mutex);
+ pthread_cleanup_push(release_listener_mutex, NULL);
+ epoll_sock = (struct ustcomm_sock *)events[i].data.ptr;
+ if (epoll_sock == listen_sock) {
+ addr_size = sizeof(struct sockaddr);
+ accept_fd = accept(epoll_sock->fd,
+ &addr,
+ (socklen_t *)&addr_size);
+ if (accept_fd == -1) {
+ PERROR("listener_main: accept failed");
+ continue;
+ }
+ ustcomm_init_sock(accept_fd, epoll_fd,
+ &ust_socks);
+ } else {
+ memset(receive_header, 0,
+ sizeof(*receive_header));
+ memset(receive_buffer, 0,
+ sizeof(receive_buffer));
+ result = ustcomm_recv(epoll_sock->fd,
+ receive_header,
+ receive_buffer);
+ if (result == 0) {
+ ustcomm_del_sock(epoll_sock, 0);
+ } else {
+ process_client_cmd(receive_header,
+ receive_buffer,
+ epoll_sock->fd);
+ }
+ }
+ pthread_cleanup_pop(1); /* release listener mutex */
+ }
+ }
+
+ pthread_cleanup_pop(1);
+}
+
+/* These should only be accessed in the parent thread,
+ * not the listener.
+ */
+static volatile sig_atomic_t have_listener = 0;
+static pthread_t listener_thread;
+
+void create_listener(void)
+{
+ int result;
+ sigset_t sig_all_blocked;
+ sigset_t orig_parent_mask;
+
+ if (have_listener) {
+ WARN("not creating listener because we already had one");
+ return;
+ }
+
+ /* A new thread created by pthread_create inherits the signal mask
+ * from the parent. To avoid any signal being received by the
+ * listener thread, we block all signals temporarily in the parent,
+ * while we create the listener thread.
+ */
+
+ sigfillset(&sig_all_blocked);
+
+ result = pthread_sigmask(SIG_SETMASK, &sig_all_blocked, &orig_parent_mask);
+ if (result) {
+ PERROR("pthread_sigmask: %s", strerror(result));
+ }
+
+ result = pthread_create(&listener_thread, NULL, listener_main, NULL);
+ if (result == -1) {
+ PERROR("pthread_create");
+ }
+
+ /* Restore original signal mask in parent */
+ result = pthread_sigmask(SIG_SETMASK, &orig_parent_mask, NULL);
+ if (result) {
+ PERROR("pthread_sigmask: %s", strerror(result));
+ } else {
+ have_listener = 1;
+ }
+}
+
+#define AUTOPROBE_DISABLED 0
+#define AUTOPROBE_ENABLE_ALL 1
+#define AUTOPROBE_ENABLE_REGEX 2
+static int autoprobe_method = AUTOPROBE_DISABLED;
+static regex_t autoprobe_regex;
+
+static void auto_probe_connect(struct ust_marker *m)
+{
+ int result;
+
+ char* concat_name = NULL;
+ const char *probe_name = "default";
+
+ if (autoprobe_method == AUTOPROBE_DISABLED) {
+ return;
+ } else if (autoprobe_method == AUTOPROBE_ENABLE_REGEX) {
+ result = asprintf(&concat_name, "%s/%s", m->channel, m->name);
+ if (result == -1) {
+ ERR("auto_probe_connect: asprintf failed (ust_marker %s/%s)",
+ m->channel, m->name);
+ return;
+ }
+ if (regexec(&autoprobe_regex, concat_name, 0, NULL, 0)) {
+ free(concat_name);
+ return;
+ }
+ free(concat_name);
+ }
+
+ result = ltt_ust_marker_connect(m->channel, m->name, probe_name);
+ if (result && result != -EEXIST)
+ ERR("ltt_ust_marker_connect (ust_marker = %s/%s, errno = %d)", m->channel, m->name, -result);
+
+ DBG("auto connected ust_marker %s (addr: %p) %s to probe default", m->channel, m, m->name);
+
+}
+
+static struct ustcomm_sock * init_app_socket(int epoll_fd)
+{
+ char *dir_name, *sock_name;
+ int result;
+ struct ustcomm_sock *sock = NULL;
+ time_t mtime;
+
+ dir_name = ustcomm_user_sock_dir();
+ if (!dir_name)
+ return NULL;
+
+ mtime = ustcomm_pid_st_mtime(getpid());
+ if (!mtime) {
+ goto free_dir_name;
+ }
+
+ result = asprintf(&sock_name, "%s/%d.%ld", dir_name,
+ (int) getpid(), (long) mtime);
+ if (result < 0) {
+ ERR("string overflow allocating socket name, "
+ "UST thread bailing");
+ goto free_dir_name;
+ }
+
+ result = ensure_dir_exists(dir_name, S_IRWXU);
+ if (result == -1) {
+ ERR("Unable to create socket directory %s, UST thread bailing",
+ dir_name);
+ goto free_sock_name;
+ }
+
+ sock = ustcomm_init_named_socket(sock_name, epoll_fd);
+ if (!sock) {
+ ERR("Error initializing named socket (%s). Check that directory"
+ "exists and that it is writable. UST thread bailing", sock_name);
+ goto free_sock_name;
+ }
+
+free_sock_name:
+ free(sock_name);
+free_dir_name:
+ free(dir_name);
+
+ return sock;
+}
+
+static void __attribute__((constructor)) init()
+{
+ struct timespec ts;
+ int result;
+ char* autoprobe_val = NULL;
+ char* subbuffer_size_val = NULL;
+ char* subbuffer_count_val = NULL;
+ unsigned int subbuffer_size;
+ unsigned int subbuffer_count;
+ unsigned int power;
+
+ /* Assign the pidunique, to be able to differentiate the processes with same
+ * pid, (before and after an exec).
+ */
+ pidunique = make_pidunique();
+ processpid = getpid();
+
+ DBG("Tracectl constructor");
+
+ /* Set up epoll */
+ epoll_fd = epoll_create(MAX_EVENTS);
+ if (epoll_fd == -1) {
+ ERR("epoll_create failed, tracing shutting down");
+ return;
+ }
+
+ /* Create the socket */
+ listen_sock = init_app_socket(epoll_fd);
+ if (!listen_sock) {
+ ERR("failed to create application socket,"
+ " tracing shutting down");
+ return;
+ }
+
+ create_listener();
+
+ /* Get clock the clock source type */
+
+ /* Default clock source */
+ ust_clock_source = CLOCK_TRACE;
+ if (clock_gettime(ust_clock_source, &ts) != 0) {
+ ust_clock_source = CLOCK_MONOTONIC;
+ DBG("UST traces will not be synchronized with LTTng traces");
+ }
+
+ if (getenv("UST_TRACE") || getenv("UST_AUTOPROBE")) {
+ /* Ensure ust_marker control is initialized */
+ init_ust_marker_control();
+ }
+
+ autoprobe_val = getenv("UST_AUTOPROBE");
+ if (autoprobe_val) {
+ struct ust_marker_iter iter;
+
+ DBG("Autoprobe enabled.");
+
+ /* first, set the callback that will connect the
+ * probe on new ust_marker
+ */
+ if (autoprobe_val[0] == '/') {
+ result = regcomp(&autoprobe_regex, autoprobe_val+1, 0);
+ if (result) {
+ char regexerr[150];
+
+ regerror(result, &autoprobe_regex, regexerr, sizeof(regexerr));
+ ERR("cannot parse regex %s (%s), will ignore UST_AUTOPROBE", autoprobe_val, regexerr);
+ /* don't crash the application just for this */
+ } else {
+ autoprobe_method = AUTOPROBE_ENABLE_REGEX;
+ }
+ } else {
+ /* just enable all instrumentation */
+ autoprobe_method = AUTOPROBE_ENABLE_ALL;
+ }
+
+ ust_marker_set_new_ust_marker_cb(auto_probe_connect);
+
+ /* Now, connect the probes that were already registered. */
+ ust_marker_iter_reset(&iter);
+ ust_marker_iter_start(&iter);
+
+ DBG("now iterating on ust_marker already registered");
+ while (iter.ust_marker) {
+ DBG("now iterating on ust_marker %s", (*iter.ust_marker)->name);
+ auto_probe_connect(*iter.ust_marker);
+ ust_marker_iter_next(&iter);
+ }
+ ust_marker_iter_stop(&iter);
+ }
+
+ if (getenv("UST_OVERWRITE")) {
+ int val = atoi(getenv("UST_OVERWRITE"));
+ if (val == 0 || val == 1) {
+ CMM_STORE_SHARED(ust_channels_overwrite_by_default, val);
+ } else {
+ WARN("invalid value for UST_OVERWRITE");
+ }
+ }
+
+ if (getenv("UST_AUTOCOLLECT")) {
+ int val = atoi(getenv("UST_AUTOCOLLECT"));
+ if (val == 0 || val == 1) {
+ CMM_STORE_SHARED(ust_channels_request_collection_by_default, val);
+ } else {
+ WARN("invalid value for UST_AUTOCOLLECT");
+ }
+ }
+
+ subbuffer_size_val = getenv("UST_SUBBUF_SIZE");
+ if (subbuffer_size_val) {
+ sscanf(subbuffer_size_val, "%u", &subbuffer_size);
+ power = pow2_higher_or_eq(subbuffer_size);
+ if (power != subbuffer_size)
+ WARN("using the next power of two for buffer size = %u\n", power);
+ chan_infos[LTT_CHANNEL_UST].def_subbufsize = power;
+ }
+
+ subbuffer_count_val = getenv("UST_SUBBUF_NUM");
+ if (subbuffer_count_val) {
+ sscanf(subbuffer_count_val, "%u", &subbuffer_count);
+ if (subbuffer_count < 2)
+ subbuffer_count = 2;
+ chan_infos[LTT_CHANNEL_UST].def_subbufcount = subbuffer_count;
+ }
+
+ if (getenv("UST_TRACE")) {
+ char trace_name[] = "auto";
+ char trace_type[] = "ustrelay";
+
+ DBG("starting early tracing");
+
+ /* Ensure buffers are initialized, for the transport to be available.
+ * We are about to set a trace type and it will fail without this.
+ */
+ init_ustrelay_transport();
+
+ /* FIXME: When starting early tracing (here), depending on the
+ * order of constructors, it is very well possible some ust_marker
+ * sections are not yet registered. Because of this, some
+ * channels may not be registered. Yet, we are about to ask the
+ * daemon to collect the channels. Channels which are not yet
+ * registered will not be collected.
+ *
+ * Currently, in LTTng, there is no way to add a channel after
+ * trace start. The reason for this is that it induces complex
+ * concurrency issues on the trace structures, which can only
+ * be resolved using RCU. This has not been done yet. As a
+ * workaround, we are forcing the registration of the "ust"
+ * channel here. This is the only channel (apart from metadata)
+ * that can be reliably used in early tracing.
+ *
+ * Non-early tracing does not have this problem and can use
+ * arbitrary channel names.
+ */
+ ltt_channels_register("ust");
+
+ result = ltt_trace_setup(trace_name);
+ if (result < 0) {
+ ERR("ltt_trace_setup failed");
+ return;
+ }
+
+ result = ltt_trace_set_type(trace_name, trace_type);
+ if (result < 0) {
+ ERR("ltt_trace_set_type failed");
+ return;
+ }
+
+ result = ltt_trace_alloc(trace_name);
+ if (result < 0) {
+ ERR("ltt_trace_alloc failed");
+ return;
+ }
+
+ result = ltt_trace_start(trace_name);
+ if (result < 0) {
+ ERR("ltt_trace_start failed");
+ return;
+ }
+
+ /* Do this after the trace is started in order to avoid creating confusion
+ * if the trace fails to start. */
+ inform_consumer_daemon(trace_name);
+ }
+
+ return;
+
+ /* should decrementally destroy stuff if error */
+
+}
+
+/* This is only called if we terminate normally, not with an unhandled signal,
+ * so we cannot rely on it. However, for now, LTTV requires that the header of
+ * the last sub-buffer contain a valid end time for the trace. This is done
+ * automatically only when the trace is properly stopped.
+ *
+ * If the traced program crashed, it is always possible to manually add the
+ * right value in the header, or to open the trace in text mode.
+ *
+ * FIXME: Fix LTTV so it doesn't need this.
+ */
+
+static void destroy_traces(void)
+{
+ int result;
+
+ /* if trace running, finish it */
+
+ DBG("destructor stopping traces");
+
+ result = ltt_trace_stop("auto");
+ if (result == -1) {
+ ERR("ltt_trace_stop error");
+ }
+
+ result = ltt_trace_destroy("auto", 0);
+ if (result == -1) {
+ ERR("ltt_trace_destroy error");
+ }
+}
+
+static int trace_recording(void)
+{
+ int retval = 0;
+ struct ust_trace *trace;
+
+ ltt_lock_traces();
+
+ cds_list_for_each_entry(trace, <t_traces.head, list) {
+ if (trace->active) {
+ retval = 1;
+ break;
+ }
+ }
+
+ ltt_unlock_traces();
+
+ return retval;
+}
+
+int restarting_usleep(useconds_t usecs)
+{
+ struct timespec tv;
+ int result;
+
+ tv.tv_sec = 0;
+ tv.tv_nsec = usecs * 1000;
+
+ do {
+ result = nanosleep(&tv, &tv);
+ } while (result == -1 && errno == EINTR);
+
+ return result;
+}
+
+static void stop_listener(void)
+{
+ int result;
+
+ if (!have_listener)
+ return;
+
+ result = pthread_cancel(listener_thread);
+ if (result != 0) {
+ ERR("pthread_cancel: %s", strerror(result));
+ }
+ result = pthread_join(listener_thread, NULL);
+ if (result != 0) {
+ ERR("pthread_join: %s", strerror(result));
+ }
+}
+
+/* This destructor keeps the process alive for a few seconds in order
+ * to leave time for ustconsumer to connect to its buffers. This is necessary
+ * for programs whose execution is very short. It is also useful in all
+ * programs when tracing is started close to the end of the program
+ * execution.
+ *
+ * FIXME: For now, this only works for the first trace created in a
+ * process.
+ */
+
+static void __attribute__((destructor)) keepalive()
+{
+ if (processpid != getpid()) {
+ return;
+ }
+
+ if (trace_recording() && CMM_LOAD_SHARED(buffers_to_export)) {
+ int total = 0;
+ DBG("Keeping process alive for consumer daemon...");
+ while (CMM_LOAD_SHARED(buffers_to_export)) {
+ const int interv = 200000;
+ restarting_usleep(interv);
+ total += interv;
+
+ if (total >= 3000000) {
+ WARN("non-consumed buffers remaining after wait limit; not waiting anymore");
+ break;
+ }
+ }
+ DBG("Finally dying...");
+ }
+
+ destroy_traces();
+
+ /* Ask the listener to stop and clean up. */
+ stop_listener();
+}
+
+void ust_potential_exec(void)
+{
+ ust_marker(potential_exec, UST_MARKER_NOARGS);
+
+ DBG("test");
+
+ keepalive();
+}
+
+/* Notify ust that there was a fork. This needs to be called inside
+ * the new process, anytime a process whose memory is not shared with
+ * the parent is created. If this function is not called, the events
+ * of the new process will not be collected.
+ *
+ * Signals should be disabled before the fork and reenabled only after
+ * this call in order to guarantee tracing is not started before ust_fork()
+ * sanitizes the new process.
+ */
+
+static void ust_fork(void)
+{
+ struct ustcomm_sock *sock, *sock_tmp;
+ struct ust_trace *trace, *trace_tmp;
+ int result;
+
+ /* FIXME: technically, the locks could have been taken before the fork */
+ DBG("ust: forking");
+
+ /* Get the pid of the new process */
+ processpid = getpid();
+
+ /*
+ * FIXME: This could be prettier, we loop over the list twice and
+ * following good locking practice should lock around the loop
+ */
+ cds_list_for_each_entry_safe(trace, trace_tmp, <t_traces.head, list) {
+ ltt_trace_stop(trace->trace_name);
+ }
+
+ /* Delete all active connections, but leave them in the epoll set */
+ cds_list_for_each_entry_safe(sock, sock_tmp, &ust_socks, list) {
+ ustcomm_del_sock(sock, 1);
+ }
+
+ /*
+ * FIXME: This could be prettier, we loop over the list twice and
+ * following good locking practice should lock around the loop
+ */
+ cds_list_for_each_entry_safe(trace, trace_tmp, <t_traces.head, list) {
+ ltt_trace_destroy(trace->trace_name, 1);
+ }
+
+ /* Clean up the listener socket and epoll, keeping the socket file */
+ if (listen_sock) {
+ ustcomm_del_named_sock(listen_sock, 1);
+ listen_sock = NULL;
+ }
+ close(epoll_fd);
+
+ /* Re-start the launch sequence */
+ CMM_STORE_SHARED(buffers_to_export, 0);
+ have_listener = 0;
+
+ /* Set up epoll */
+ epoll_fd = epoll_create(MAX_EVENTS);
+ if (epoll_fd == -1) {
+ ERR("epoll_create failed, tracing shutting down");
+ return;
+ }
+
+ /* Create the socket */
+ listen_sock = init_app_socket(epoll_fd);
+ if (!listen_sock) {
+ ERR("failed to create application socket,"
+ " tracing shutting down");
+ return;
+ }
+ create_listener();
+ ltt_trace_setup("auto");
+ result = ltt_trace_set_type("auto", "ustrelay");
+ if (result < 0) {
+ ERR("ltt_trace_set_type failed");
+ return;
+ }
+
+ ltt_trace_alloc("auto");
+ ltt_trace_start("auto");
+ inform_consumer_daemon("auto");
+}
+
+void ust_before_fork(ust_fork_info_t *fork_info)
+{
+ /* Disable signals. This is to avoid that the child
+ * intervenes before it is properly setup for tracing. It is
+ * safer to disable all signals, because then we know we are not
+ * breaking anything by restoring the original mask.
+ */
+ sigset_t all_sigs;
+ int result;
+
+ /* FIXME:
+ - only do this if tracing is active
+ */
+
+ /* Disable signals */
+ sigfillset(&all_sigs);
+ result = sigprocmask(SIG_BLOCK, &all_sigs, &fork_info->orig_sigs);
+ if (result == -1) {
+ PERROR("sigprocmask");
+ return;
+ }
+
+ /*
+ * Take the fork lock to make sure we are not in the middle of
+ * something in the listener thread.
+ */
+ pthread_mutex_lock(&listener_thread_data_mutex);
+ /*
+ * Hold listen_sock_mutex to protect from listen_sock teardown.
+ */
+ pthread_mutex_lock(&listen_sock_mutex);
+ rcu_bp_before_fork();
+}
+
+/* Don't call this function directly in a traced program */
+static void ust_after_fork_common(ust_fork_info_t *fork_info)
+{
+ int result;
+
+ pthread_mutex_unlock(&listen_sock_mutex);
+ pthread_mutex_unlock(&listener_thread_data_mutex);
+ /* Restore signals */
+ result = sigprocmask(SIG_SETMASK, &fork_info->orig_sigs, NULL);
+ if (result == -1) {
+ PERROR("sigprocmask");
+ return;
+ }
+}
+
+void ust_after_fork_parent(ust_fork_info_t *fork_info)
+{
+ rcu_bp_after_fork_parent();
+ /* Release mutexes and reenable signals */
+ ust_after_fork_common(fork_info);
+}
+
+void ust_after_fork_child(ust_fork_info_t *fork_info)
+{
+ /* Release urcu mutexes */
+ rcu_bp_after_fork_child();
+
+ /* Sanitize the child */
+ ust_fork();
+
+ /* Release mutexes and reenable signals */
+ ust_after_fork_common(fork_info);
+}
+
--- /dev/null
+/*
+ * tracer.c
+ *
+ * (C) Copyright 2005-2008 -
+ * Mathieu Desnoyers (mathieu.desnoyers@polymtl.ca)
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ * Inspired from LTT :
+ * Karim Yaghmour (karim@opersys.com)
+ * Tom Zanussi (zanussi@us.ibm.com)
+ * Bob Wisniewski (bob@watson.ibm.com)
+ * And from K42 :
+ * Bob Wisniewski (bob@watson.ibm.com)
+ *
+ * Changelog:
+ * 22/09/06, Move to the marker/probes mechanism.
+ * 19/10/05, Complete lockless mechanism.
+ * 27/05/05, Modular redesign and rewrite.
+ */
+
+#include <urcu-bp.h>
+#include <urcu/rculist.h>
+
+#include <ust/clock.h>
+
+#include "tracercore.h"
+#include "tracer.h"
+#include "usterr_signal_safe.h"
+
+struct chan_info_struct chan_infos[] = {
+ [LTT_CHANNEL_METADATA] = {
+ LTT_METADATA_CHANNEL,
+ LTT_DEFAULT_SUBBUF_SIZE_LOW,
+ LTT_DEFAULT_N_SUBBUFS_LOW,
+ },
+ [LTT_CHANNEL_UST] = {
+ LTT_UST_CHANNEL,
+ LTT_DEFAULT_SUBBUF_SIZE_HIGH,
+ LTT_DEFAULT_N_SUBBUFS_HIGH,
+ },
+};
+
+static enum ltt_channels get_channel_type_from_name(const char *name)
+{
+ int i;
+
+ if (!name)
+ return LTT_CHANNEL_UST;
+
+ for (i = 0; i < ARRAY_SIZE(chan_infos); i++)
+ if (chan_infos[i].name && !strcmp(name, chan_infos[i].name))
+ return (enum ltt_channels)i;
+
+ return LTT_CHANNEL_UST;
+}
+
+static CDS_LIST_HEAD(ltt_transport_list);
+/* transport mutex, nests inside traces mutex (ltt_lock_traces) */
+static DEFINE_MUTEX(ltt_transport_mutex);
+/**
+ * ltt_transport_register - LTT transport registration
+ * @transport: transport structure
+ *
+ * Registers a transport which can be used as output to extract the data out of
+ * LTTng. The module calling this registration function must ensure that no
+ * trap-inducing code will be executed by the transport functions. E.g.
+ * vmalloc_sync_all() must be called between a vmalloc and the moment the memory
+ * is made visible to the transport function. This registration acts as a
+ * vmalloc_sync_all. Therefore, only if the module allocates virtual memory
+ * after its registration must it synchronize the TLBs.
+ */
+void ltt_transport_register(struct ltt_transport *transport)
+{
+ pthread_mutex_lock(<t_transport_mutex);
+ cds_list_add_tail(&transport->node, <t_transport_list);
+ pthread_mutex_unlock(<t_transport_mutex);
+}
+
+/**
+ * ltt_transport_unregister - LTT transport unregistration
+ * @transport: transport structure
+ */
+void ltt_transport_unregister(struct ltt_transport *transport)
+{
+ pthread_mutex_lock(<t_transport_mutex);
+ cds_list_del(&transport->node);
+ pthread_mutex_unlock(<t_transport_mutex);
+}
+
+static inline int is_channel_overwrite(enum ltt_channels chan,
+ enum trace_mode mode)
+{
+ switch (mode) {
+ case LTT_TRACE_NORMAL:
+ return 0;
+ case LTT_TRACE_FLIGHT:
+ switch (chan) {
+ case LTT_CHANNEL_METADATA:
+ return 0;
+ default:
+ return 1;
+ }
+ case LTT_TRACE_HYBRID:
+ switch (chan) {
+ case LTT_CHANNEL_METADATA:
+ return 0;
+ default:
+ return 1;
+ }
+ default:
+ return 0;
+ }
+}
+
+static void trace_async_wakeup(struct ust_trace *trace)
+{
+ int i;
+ struct ust_channel *chan;
+
+ /* Must check each channel for pending read wakeup */
+ for (i = 0; i < trace->nr_channels; i++) {
+ chan = &trace->channels[i];
+ if (chan->active)
+ trace->ops->wakeup_channel(chan);
+ }
+}
+
+/**
+ * _ltt_trace_find - find a trace by given name.
+ * trace_name: trace name
+ *
+ * Returns a pointer to the trace structure, NULL if not found.
+ */
+struct ust_trace *_ltt_trace_find(const char *trace_name)
+{
+ struct ust_trace *trace;
+
+ cds_list_for_each_entry(trace, <t_traces.head, list)
+ if (!strncmp(trace->trace_name, trace_name, NAME_MAX))
+ return trace;
+
+ return NULL;
+}
+
+/* _ltt_trace_find_setup :
+ * find a trace in setup list by given name.
+ *
+ * Returns a pointer to the trace structure, NULL if not found.
+ */
+struct ust_trace *_ltt_trace_find_setup(const char *trace_name)
+{
+ struct ust_trace *trace;
+
+ cds_list_for_each_entry(trace, <t_traces.setup_head, list)
+ if (!strncmp(trace->trace_name, trace_name, NAME_MAX))
+ return trace;
+
+ return NULL;
+}
+
+/**
+ * ltt_release_transport - Release an LTT transport
+ * @kref : reference count on the transport
+ */
+void ltt_release_transport(struct urcu_ref *urcu_ref)
+{
+ return;
+}
+
+/**
+ * ltt_release_trace - Release a LTT trace
+ * @kref : reference count on the trace
+ */
+void ltt_release_trace(struct urcu_ref *urcu_ref)
+{
+ struct ust_trace *trace = _ust_container_of(urcu_ref,
+ struct ust_trace, urcu_ref);
+ ltt_channels_trace_free(trace->channels);
+ free(trace);
+}
+
+static inline void prepare_chan_size_num(unsigned int *subbuf_size,
+ unsigned int *n_subbufs)
+{
+ /* Make sure the subbuffer size is larger than a page */
+ *subbuf_size = max_t(unsigned int, *subbuf_size, PAGE_SIZE);
+
+ /* round to next power of 2 */
+ *subbuf_size = 1 << get_count_order(*subbuf_size);
+ *n_subbufs = 1 << get_count_order(*n_subbufs);
+
+ /* Subbuf size and number must both be power of two */
+ WARN_ON(hweight32(*subbuf_size) != 1);
+ WARN_ON(hweight32(*n_subbufs) != 1);
+}
+
+int _ltt_trace_setup(const char *trace_name)
+{
+ int err = 0;
+ struct ust_trace *new_trace = NULL;
+ int metadata_index;
+ unsigned int chan;
+ enum ltt_channels chantype;
+
+ if (_ltt_trace_find_setup(trace_name)) {
+ ERR("Trace name %s already used", trace_name);
+ err = -EEXIST;
+ goto traces_error;
+ }
+
+ if (_ltt_trace_find(trace_name)) {
+ ERR("Trace name %s already used", trace_name);
+ err = -EEXIST;
+ goto traces_error;
+ }
+
+ new_trace = zmalloc(sizeof(struct ust_trace));
+ if (!new_trace) {
+ ERR("Unable to allocate memory for trace %s", trace_name);
+ err = -ENOMEM;
+ goto traces_error;
+ }
+ strncpy(new_trace->trace_name, trace_name, NAME_MAX);
+ new_trace->channels = ltt_channels_trace_alloc(&new_trace->nr_channels,
+ ust_channels_overwrite_by_default,
+ ust_channels_request_collection_by_default, 1);
+ if (!new_trace->channels) {
+ ERR("Unable to allocate memory for chaninfo %s\n", trace_name);
+ err = -ENOMEM;
+ goto trace_free;
+ }
+
+ /*
+ * Force metadata channel to active, no overwrite.
+ */
+ metadata_index = ltt_channels_get_index_from_name("metadata");
+ WARN_ON(metadata_index < 0);
+ new_trace->channels[metadata_index].overwrite = 0;
+ new_trace->channels[metadata_index].active = 1;
+
+ /*
+ * Set hardcoded tracer defaults for some channels
+ */
+ for (chan = 0; chan < new_trace->nr_channels; chan++) {
+ if (!(new_trace->channels[chan].active))
+ continue;
+
+ chantype = get_channel_type_from_name(
+ ltt_channels_get_name_from_index(chan));
+ new_trace->channels[chan].subbuf_size =
+ chan_infos[chantype].def_subbufsize;
+ new_trace->channels[chan].subbuf_cnt =
+ chan_infos[chantype].def_subbufcount;
+ }
+
+ cds_list_add(&new_trace->list, <t_traces.setup_head);
+ return 0;
+
+trace_free:
+ free(new_trace);
+traces_error:
+ return err;
+}
+
+
+int ltt_trace_setup(const char *trace_name)
+{
+ int ret;
+ ltt_lock_traces();
+ ret = _ltt_trace_setup(trace_name);
+ ltt_unlock_traces();
+ return ret;
+}
+
+/* must be called from within a traces lock. */
+static void _ltt_trace_free(struct ust_trace *trace)
+{
+ cds_list_del(&trace->list);
+ free(trace);
+}
+
+int ltt_trace_set_type(const char *trace_name, const char *trace_type)
+{
+ int err = 0;
+ struct ust_trace *trace;
+ struct ltt_transport *tran_iter, *transport = NULL;
+
+ ltt_lock_traces();
+
+ trace = _ltt_trace_find_setup(trace_name);
+ if (!trace) {
+ ERR("Trace not found %s", trace_name);
+ err = -ENOENT;
+ goto traces_error;
+ }
+
+ pthread_mutex_lock(<t_transport_mutex);
+ cds_list_for_each_entry(tran_iter, <t_transport_list, node) {
+ if (!strcmp(tran_iter->name, trace_type)) {
+ transport = tran_iter;
+ break;
+ }
+ }
+ pthread_mutex_unlock(<t_transport_mutex);
+
+ if (!transport) {
+ ERR("Transport %s is not present", trace_type);
+ err = -EINVAL;
+ goto traces_error;
+ }
+
+ trace->transport = transport;
+
+traces_error:
+ ltt_unlock_traces();
+ return err;
+}
+
+int ltt_trace_set_channel_subbufsize(const char *trace_name,
+ const char *channel_name, unsigned int size)
+{
+ int err = 0;
+ struct ust_trace *trace;
+ int index;
+
+ ltt_lock_traces();
+
+ trace = _ltt_trace_find_setup(trace_name);
+ if (!trace) {
+ ERR("Trace not found %s", trace_name);
+ err = -ENOENT;
+ goto traces_error;
+ }
+
+ index = ltt_channels_get_index_from_name(channel_name);
+ if (index < 0) {
+ ERR("Channel %s not found", channel_name);
+ err = -ENOENT;
+ goto traces_error;
+ }
+ trace->channels[index].subbuf_size = size;
+
+traces_error:
+ ltt_unlock_traces();
+ return err;
+}
+
+int ltt_trace_set_channel_subbufcount(const char *trace_name,
+ const char *channel_name, unsigned int cnt)
+{
+ int err = 0;
+ struct ust_trace *trace;
+ int index;
+
+ ltt_lock_traces();
+
+ trace = _ltt_trace_find_setup(trace_name);
+ if (!trace) {
+ ERR("Trace not found %s", trace_name);
+ err = -ENOENT;
+ goto traces_error;
+ }
+
+ index = ltt_channels_get_index_from_name(channel_name);
+ if (index < 0) {
+ ERR("Channel %s not found", channel_name);
+ err = -ENOENT;
+ goto traces_error;
+ }
+ trace->channels[index].subbuf_cnt = cnt;
+
+traces_error:
+ ltt_unlock_traces();
+ return err;
+}
+
+int ltt_trace_set_channel_enable(const char *trace_name,
+ const char *channel_name, unsigned int enable)
+{
+ int err = 0;
+ struct ust_trace *trace;
+ int index;
+
+ ltt_lock_traces();
+
+ trace = _ltt_trace_find_setup(trace_name);
+ if (!trace) {
+ ERR("Trace not found %s", trace_name);
+ err = -ENOENT;
+ goto traces_error;
+ }
+
+ /*
+ * Datas in metadata channel(marker info) is necessary to be able to
+ * read the trace, we always enable this channel.
+ */
+ if (!enable && !strcmp(channel_name, "metadata")) {
+ ERR("Trying to disable metadata channel");
+ err = -EINVAL;
+ goto traces_error;
+ }
+
+ index = ltt_channels_get_index_from_name(channel_name);
+ if (index < 0) {
+ ERR("Channel %s not found", channel_name);
+ err = -ENOENT;
+ goto traces_error;
+ }
+
+ trace->channels[index].active = enable;
+
+traces_error:
+ ltt_unlock_traces();
+ return err;
+}
+
+int ltt_trace_set_channel_overwrite(const char *trace_name,
+ const char *channel_name, unsigned int overwrite)
+{
+ int err = 0;
+ struct ust_trace *trace;
+ int index;
+
+ ltt_lock_traces();
+
+ trace = _ltt_trace_find_setup(trace_name);
+ if (!trace) {
+ ERR("Trace not found %s", trace_name);
+ err = -ENOENT;
+ goto traces_error;
+ }
+
+ /*
+ * Always put the metadata channel in non-overwrite mode :
+ * This is a very low traffic channel and it can't afford to have its
+ * data overwritten : this data (marker info) is necessary to be
+ * able to read the trace.
+ */
+ if (overwrite && !strcmp(channel_name, "metadata")) {
+ ERR("Trying to set metadata channel to overwrite mode");
+ err = -EINVAL;
+ goto traces_error;
+ }
+
+ index = ltt_channels_get_index_from_name(channel_name);
+ if (index < 0) {
+ ERR("Channel %s not found", channel_name);
+ err = -ENOENT;
+ goto traces_error;
+ }
+
+ trace->channels[index].overwrite = overwrite;
+
+traces_error:
+ ltt_unlock_traces();
+ return err;
+}
+
+int ltt_trace_alloc(const char *trace_name)
+{
+ int err = 0;
+ struct ust_trace *trace;
+ unsigned int subbuf_size, subbuf_cnt;
+ int chan;
+ const char *channel_name;
+
+ ltt_lock_traces();
+
+ if (_ltt_trace_find(trace_name)) { /* Trace already allocated */
+ err = 1;
+ goto traces_error;
+ }
+
+ trace = _ltt_trace_find_setup(trace_name);
+ if (!trace) {
+ ERR("Trace not found %s", trace_name);
+ err = -ENOENT;
+ goto traces_error;
+ }
+
+ urcu_ref_init(&trace->urcu_ref);
+ urcu_ref_init(&trace->ltt_transport_urcu_ref);
+ trace->active = 0;
+ trace->freq_scale = trace_clock_freq_scale();
+
+ if (!trace->transport) {
+ ERR("Transport is not set");
+ err = -EINVAL;
+ goto transport_error;
+ }
+ trace->ops = &trace->transport->ops;
+
+ trace->start_freq = trace_clock_frequency();
+ trace->start_tsc = trace_clock_read64();
+ gettimeofday(&trace->start_time, NULL); //ust// changed /* FIXME: is this ok? */
+
+ for (chan = 0; chan < trace->nr_channels; chan++) {
+ if (!(trace->channels[chan].active))
+ continue;
+
+ channel_name = ltt_channels_get_name_from_index(chan);
+ WARN_ON(!channel_name);
+ subbuf_size = trace->channels[chan].subbuf_size;
+ subbuf_cnt = trace->channels[chan].subbuf_cnt;
+ prepare_chan_size_num(&subbuf_size, &subbuf_cnt);
+ err = trace->ops->create_channel(trace_name, trace,
+ channel_name,
+ &trace->channels[chan],
+ subbuf_size,
+ subbuf_cnt,
+ trace->channels[chan].overwrite);
+ if (err != 0) {
+ ERR("Cannot create channel %s", channel_name);
+ goto create_channel_error;
+ }
+ }
+
+ cds_list_del(&trace->list);
+ cds_list_add_rcu(&trace->list, <t_traces.head);
+
+ ltt_unlock_traces();
+
+ return 0;
+
+create_channel_error:
+ for (chan--; chan >= 0; chan--)
+ if (trace->channels[chan].active)
+ trace->ops->remove_channel(&trace->channels[chan]);
+
+transport_error:
+traces_error:
+ ltt_unlock_traces();
+ return err;
+}
+
+/* Must be called while sure that trace is in the list. */
+static int _ltt_trace_destroy(struct ust_trace *trace)
+{
+ int err = -EPERM;
+
+ if (trace == NULL) {
+ err = -ENOENT;
+ goto traces_error;
+ }
+ if (trace->active) {
+ ERR("Can't destroy trace %s : tracer is active", trace->trace_name);
+ err = -EBUSY;
+ goto active_error;
+ }
+
+ cds_list_del_rcu(&trace->list);
+ synchronize_rcu();
+
+ return 0;
+
+active_error:
+traces_error:
+ return err;
+}
+
+/* Sleepable part of the destroy */
+static void __ltt_trace_destroy(struct ust_trace *trace, int drop)
+{
+ int i;
+ struct ust_channel *chan;
+
+ if(!drop) {
+ for (i = 0; i < trace->nr_channels; i++) {
+ chan = &trace->channels[i];
+ if (chan->active)
+ trace->ops->finish_channel(chan);
+ }
+ }
+
+ /*
+ * The currently destroyed trace is not in the trace list anymore,
+ * so it's safe to call the async wakeup ourself. It will deliver
+ * the last subbuffers.
+ */
+ trace_async_wakeup(trace);
+
+ for (i = 0; i < trace->nr_channels; i++) {
+ chan = &trace->channels[i];
+ if (chan->active)
+ trace->ops->remove_channel(chan);
+ }
+
+ urcu_ref_put(&trace->ltt_transport_urcu_ref, ltt_release_transport);
+
+ urcu_ref_put(&trace->urcu_ref, ltt_release_trace);
+}
+
+int ltt_trace_destroy(const char *trace_name, int drop)
+{
+ int err = 0;
+ struct ust_trace *trace;
+
+ ltt_lock_traces();
+
+ trace = _ltt_trace_find(trace_name);
+ if (trace) {
+ err = _ltt_trace_destroy(trace);
+ if (err)
+ goto error;
+
+ ltt_unlock_traces();
+
+ __ltt_trace_destroy(trace, drop);
+
+ return 0;
+ }
+
+ trace = _ltt_trace_find_setup(trace_name);
+ if (trace) {
+ _ltt_trace_free(trace);
+ ltt_unlock_traces();
+ return 0;
+ }
+
+ err = -ENOENT;
+
+error:
+ ltt_unlock_traces();
+ return err;
+}
+
+/* must be called from within a traces lock. */
+static int _ltt_trace_start(struct ust_trace *trace)
+{
+ int err = 0;
+
+ if (trace == NULL) {
+ err = -ENOENT;
+ goto traces_error;
+ }
+ if (trace->active)
+ DBG("Tracing already active for trace %s", trace->trace_name);
+ trace->active = 1;
+ /* Read by trace points without protection : be careful */
+ ltt_traces.num_active_traces++;
+ return err;
+
+traces_error:
+ return err;
+}
+
+int ltt_trace_start(const char *trace_name)
+{
+ int err = 0;
+ struct ust_trace *trace;
+
+ ltt_lock_traces();
+
+ trace = _ltt_trace_find(trace_name);
+ err = _ltt_trace_start(trace);
+ if (err)
+ goto no_trace;
+
+ ltt_unlock_traces();
+
+ /*
+ * Call the process-wide state dump.
+ * Notice that there is no protection on the trace : that's exactly
+ * why we iterate on the list and check for trace equality instead of
+ * directly using this trace handle inside the logging function: we want
+ * to record events only in a single trace in the trace session list.
+ */
+
+ ltt_dump_ust_marker_state(trace);
+
+ return err;
+
+ /* Error handling */
+no_trace:
+ ltt_unlock_traces();
+ return err;
+}
+
+/* must be called from within traces lock */
+static int _ltt_trace_stop(struct ust_trace *trace)
+{
+ int err = -EPERM;
+
+ if (trace == NULL) {
+ err = -ENOENT;
+ goto traces_error;
+ }
+ if (!trace->active)
+ DBG("LTT : Tracing not active for trace %s", trace->trace_name);
+ if (trace->active) {
+ trace->active = 0;
+ ltt_traces.num_active_traces--;
+ }
+ return 0;
+
+traces_error:
+ return err;
+}
+
+int ltt_trace_stop(const char *trace_name)
+{
+ int err = 0;
+ struct ust_trace *trace;
+
+ ltt_lock_traces();
+ trace = _ltt_trace_find(trace_name);
+ err = _ltt_trace_stop(trace);
+ ltt_unlock_traces();
+ return err;
+}
--- /dev/null
+/**
+ * ltt-type-serializer.c
+ *
+ * LTTng specialized type serializer.
+ *
+ * Copyright Mathieu Desnoyers, 2008.
+ *
+ * Dual LGPL v2.1/GPL v2 license.
+ */
+
+/* This file contains functions for tracepoint custom probes support. */
+
+#define _GNU_SOURCE
+#define _LGPL_SOURCE
+#include <urcu/rculist.h>
+#include <ust/core.h>
+#include <ust/clock.h>
+#include <urcu-bp.h>
+#include "tracer.h"
+#include "type-serializer.h"
+
+notrace
+void _ltt_specialized_trace(const struct ust_marker *mdata, void *probe_data,
+ void *serialize_private, unsigned int data_size,
+ unsigned int largest_align)
+{
+ int ret;
+ uint16_t eID;
+ size_t slot_size;
+ unsigned int chan_index;
+ struct ust_buffer *buf;
+ struct ust_channel *chan;
+ struct ust_trace *trace;
+ u64 tsc;
+ long buf_offset;
+ int cpu;
+ unsigned int rflags;
+
+ /*
+ * If we get here, it's probably because we have useful work to do.
+ */
+ if (unlikely(ltt_traces.num_active_traces == 0))
+ return;
+
+ rcu_read_lock();
+ cpu = ust_get_cpu();
+
+ /* Force volatile access. */
+ CMM_STORE_SHARED(ltt_nesting, CMM_LOAD_SHARED(ltt_nesting) + 1);
+
+ /*
+ * asm volatile and "memory" clobber prevent the compiler from moving
+ * instructions out of the ltt nesting count. This is required to ensure
+ * that probe side-effects which can cause recursion (e.g. unforeseen
+ * traps, divisions by 0, ...) are triggered within the incremented
+ * nesting count section.
+ */
+ cmm_barrier();
+ eID = mdata->event_id;
+ chan_index = mdata->channel_id;
+
+ /*
+ * Iterate on each trace, typically small number of active traces,
+ * list iteration with prefetch is usually slower.
+ */
+ cds_list_for_each_entry_rcu(trace, <t_traces.head, list) {
+ if (unlikely(!trace->active))
+ continue;
+//ust// if (unlikely(!ltt_run_filter(trace, eID)))
+//ust// continue;
+#ifdef CONFIG_LTT_DEBUG_EVENT_SIZE
+ rflags = LTT_RFLAG_ID_SIZE;
+#else
+ if (unlikely(eID >= LTT_FREE_EVENTS))
+ rflags = LTT_RFLAG_ID;
+ else
+ rflags = 0;
+#endif
+ /*
+ * Skip channels added after trace creation.
+ */
+ if (unlikely(chan_index >= trace->nr_channels))
+ continue;
+ chan = &trace->channels[chan_index];
+ if (!chan->active)
+ continue;
+
+ /* If a new cpu was plugged since the trace was started, we did
+ * not add it to the trace, and therefore we write the event to
+ * cpu 0.
+ */
+ if(cpu >= chan->n_cpus) {
+ cpu = 0;
+ }
+
+ /* reserve space : header and data */
+ ret = ltt_reserve_slot(chan, trace, data_size, largest_align,
+ cpu, &buf, &slot_size, &buf_offset, &tsc,
+ &rflags);
+ if (unlikely(ret < 0))
+ continue; /* buffer full */
+
+ /* Out-of-order write : header and data */
+ buf_offset = ltt_write_event_header(chan, buf,
+ buf_offset, eID, data_size,
+ tsc, rflags);
+ if (data_size) {
+ buf_offset += ltt_align(buf_offset, largest_align);
+ ust_buffers_write(buf, buf_offset,
+ serialize_private, data_size);
+ buf_offset += data_size;
+ }
+ /* Out-of-order commit */
+ ltt_commit_slot(chan, buf, buf_offset, data_size, slot_size);
+ }
+ /*
+ * asm volatile and "memory" clobber prevent the compiler from moving
+ * instructions out of the ltt nesting count. This is required to ensure
+ * that probe side-effects which can cause recursion (e.g. unforeseen
+ * traps, divisions by 0, ...) are triggered within the incremented
+ * nesting count section.
+ */
+ cmm_barrier();
+ CMM_STORE_SHARED(ltt_nesting, CMM_LOAD_SHARED(ltt_nesting) - 1);
+ rcu_read_unlock();
+}
--- /dev/null
+#ifndef _LTT_TYPE_SERIALIZER_H
+#define _LTT_TYPE_SERIALIZER_H
+
+#include <ust/marker.h>
+#include <ust/marker-internal.h>
+#include <ust/core.h>
+#include "buffers.h"
+
+/*
+ * largest_align must be non-zero, equal to the minimum between the largest type
+ * and sizeof(void *).
+ */
+extern void _ltt_specialized_trace(const struct ust_marker *mdata, void *probe_data,
+ void *serialize_private, unsigned int data_size,
+ unsigned int largest_align);
+
+/*
+ * Statically check that 0 < largest_align < sizeof(void *) to make sure it is
+ * dumb-proof. It will make sure 0 is changed into 1 and unsigned long long is
+ * changed into sizeof(void *) on 32-bit architectures.
+ */
+static inline void ltt_specialized_trace(const struct ust_marker *mdata,
+ void *probe_data,
+ void *serialize_private, unsigned int data_size,
+ unsigned int largest_align)
+{
+ largest_align = min_t(unsigned int, largest_align, sizeof(void *));
+ largest_align = max_t(unsigned int, largest_align, 1);
+ _ltt_specialized_trace(mdata, probe_data, serialize_private, data_size,
+ largest_align);
+}
+
+/*
+ * Type serializer definitions.
+ */
+
+/*
+ * Return size of structure without end-of-structure padding.
+ */
+#define serialize_sizeof(type) offsetof(typeof(type), end_field)
+
+struct serialize_long_int {
+ unsigned long f1;
+ unsigned int f2;
+ unsigned char end_field[0];
+} LTT_ALIGN;
+
+struct serialize_int_int_long {
+ unsigned int f1;
+ unsigned int f2;
+ unsigned long f3;
+ unsigned char end_field[0];
+} LTT_ALIGN;
+
+struct serialize_int_int_short {
+ unsigned int f1;
+ unsigned int f2;
+ unsigned short f3;
+ unsigned char end_field[0];
+} LTT_ALIGN;
+
+struct serialize_long_long_long {
+ unsigned long f1;
+ unsigned long f2;
+ unsigned long f3;
+ unsigned char end_field[0];
+} LTT_ALIGN;
+
+struct serialize_long_long_int {
+ unsigned long f1;
+ unsigned long f2;
+ unsigned int f3;
+ unsigned char end_field[0];
+} LTT_ALIGN;
+
+struct serialize_long_long_short_char {
+ unsigned long f1;
+ unsigned long f2;
+ unsigned short f3;
+ unsigned char f4;
+ unsigned char end_field[0];
+} LTT_ALIGN;
+
+struct serialize_long_long_short {
+ unsigned long f1;
+ unsigned long f2;
+ unsigned short f3;
+ unsigned char end_field[0];
+} LTT_ALIGN;
+
+struct serialize_long_short_char {
+ unsigned long f1;
+ unsigned short f2;
+ unsigned char f3;
+ unsigned char end_field[0];
+} LTT_ALIGN;
+
+struct serialize_long_short {
+ unsigned long f1;
+ unsigned short f2;
+ unsigned char end_field[0];
+} LTT_ALIGN;
+
+struct serialize_long_char {
+ unsigned long f1;
+ unsigned char f2;
+ unsigned char end_field[0];
+} LTT_ALIGN;
+
+struct serialize_sizet_int {
+ size_t f1;
+ unsigned int f2;
+ unsigned char end_field[0];
+} LTT_ALIGN;
+
+struct serialize_long_long_sizet_int {
+ unsigned long f1;
+ unsigned long f2;
+ size_t f3;
+ unsigned int f4;
+ unsigned char end_field[0];
+} LTT_ALIGN;
+
+struct serialize_long_long_sizet_int_int {
+ unsigned long f1;
+ unsigned long f2;
+ size_t f3;
+ unsigned int f4;
+ unsigned int f5;
+ unsigned char end_field[0];
+} LTT_ALIGN;
+
+struct serialize_l4421224411111 {
+ unsigned long f1;
+ uint32_t f2;
+ uint32_t f3;
+ uint16_t f4;
+ uint8_t f5;
+ uint16_t f6;
+ uint16_t f7;
+ uint32_t f8;
+ uint32_t f9;
+ uint8_t f10;
+ uint8_t f11;
+ uint8_t f12;
+ uint8_t f13;
+ uint8_t f14;
+ unsigned char end_field[0];
+} LTT_ALIGN;
+
+struct serialize_l214421224411111 {
+ unsigned long f1;
+ uint16_t f2;
+ uint8_t f3;
+ uint32_t f4;
+ uint32_t f5;
+ uint16_t f6;
+ uint8_t f7;
+ uint16_t f8;
+ uint16_t f9;
+ uint32_t f10;
+ uint32_t f11;
+ uint8_t f12;
+ uint8_t f13;
+ uint8_t f14;
+ uint8_t f15;
+ uint8_t f16;
+ uint8_t end_field[0];
+} LTT_ALIGN;
+
+struct serialize_l4412228 {
+ unsigned long f1;
+ uint32_t f2;
+ uint32_t f3;
+ uint8_t f4;
+ uint16_t f5;
+ uint16_t f6;
+ uint16_t f7;
+ uint64_t f8;
+ unsigned char end_field[0];
+} LTT_ALIGN;
+
+#endif /* _LTT_TYPE_SERIALIZER_H */
-AM_CPPFLAGS = -I$(top_srcdir)/include -I$(top_srcdir)/libustcomm
+AM_CPPFLAGS = -I$(top_srcdir)/include
AM_CFLAGS = -fno-strict-aliasing
lib_LTLIBRARIES = libust.la
libust_la_SOURCES = \
- marker.c \
tracepoint.c \
trace_event.c \
- channels.c \
- channels.h \
- marker-control.c \
- marker-control.h \
- buffers.c \
- buffers.h \
- tracer.c \
tracer.h \
tracercore.c \
tracercore.h \
- serialize.c \
- tracectl.c \
- tracerconst.h \
- type-serializer.h \
- type-serializer.c
+ tracerconst.h
+
+#removed: buffers.c buffers.h
libust_la_LDFLAGS = -no-undefined -version-info 0:0:0
libust_la_LIBADD = \
-lpthread \
-lrt \
- $(top_builddir)/snprintf/libustsnprintf.la \
- $(top_builddir)/libustcomm/libustcomm.la
+ $(top_builddir)/snprintf/libustsnprintf.la
libust_la_CFLAGS = -DUST_COMPONENT="libust" -fno-strict-aliasing
+++ /dev/null
-/*
- * ltt/ltt-channels.c
- *
- * (C) Copyright 2008 - Mathieu Desnoyers (mathieu.desnoyers@polymtl.ca)
- *
- * LTTng channel management.
- *
- * Author:
- * Mathieu Desnoyers (mathieu.desnoyers@polymtl.ca)
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-#include <stdlib.h>
-#include <ust/marker.h>
-#include <ust/marker-internal.h>
-#include "channels.h"
-#include "usterr_signal_safe.h"
-
-/*
- * ltt_channel_mutex may be nested inside the LTT trace mutex.
- * ltt_channel_mutex mutex may be nested inside markers mutex.
- */
-static DEFINE_MUTEX(ltt_channel_mutex);
-static CDS_LIST_HEAD(ltt_channels);
-/*
- * Index of next channel in array. Makes sure that as long as a trace channel is
- * allocated, no array index will be re-used when a channel is freed and then
- * another channel is allocated. This index is cleared and the array indexeds
- * get reassigned when the index_urcu_ref goes back to 0, which indicates that no
- * more trace channels are allocated.
- */
-static unsigned int free_index;
-static struct urcu_ref index_urcu_ref; /* Keeps track of allocated trace channels */
-
-int ust_channels_overwrite_by_default = 0;
-int ust_channels_request_collection_by_default = 1;
-
-static struct ltt_channel_setting *lookup_channel(const char *name)
-{
- struct ltt_channel_setting *iter;
-
- cds_list_for_each_entry(iter, <t_channels, list)
- if (strcmp(name, iter->name) == 0)
- return iter;
- return NULL;
-}
-
-/*
- * Must be called when channel refcount falls to 0 _and_ also when the last
- * trace is freed. This function is responsible for compacting the channel and
- * event IDs when no users are active.
- *
- * Called with lock_markers() and channels mutex held.
- */
-static void release_channel_setting(struct urcu_ref *urcu_ref)
-{
- struct ltt_channel_setting *setting = _ust_container_of(urcu_ref,
- struct ltt_channel_setting, urcu_ref);
- struct ltt_channel_setting *iter;
-
- if (uatomic_read(&index_urcu_ref.refcount) == 0
- && uatomic_read(&setting->urcu_ref.refcount) == 0) {
- cds_list_del(&setting->list);
- free(setting);
-
- free_index = 0;
- cds_list_for_each_entry(iter, <t_channels, list) {
- iter->index = free_index++;
- iter->free_event_id = 0;
- }
- /* FIXME: why not run this? */
-//ust// markers_compact_event_ids();
- }
-}
-
-/*
- * Perform channel index compaction when the last trace channel is freed.
- *
- * Called with lock_markers() and channels mutex held.
- */
-static void release_trace_channel(struct urcu_ref *urcu_ref)
-{
- struct ltt_channel_setting *iter, *n;
-
- cds_list_for_each_entry_safe(iter, n, <t_channels, list)
- release_channel_setting(&iter->urcu_ref);
-}
-
-/**
- * ltt_channels_register - Register a trace channel.
- * @name: channel name
- *
- * Uses refcounting.
- */
-int ltt_channels_register(const char *name)
-{
- struct ltt_channel_setting *setting;
- int ret = 0;
-
- pthread_mutex_lock(<t_channel_mutex);
- setting = lookup_channel(name);
- if (setting) {
- if (uatomic_read(&setting->urcu_ref.refcount) == 0)
- goto init_urcu_ref;
- else {
- urcu_ref_get(&setting->urcu_ref);
- goto end;
- }
- }
- setting = zmalloc(sizeof(*setting));
- if (!setting) {
- ret = -ENOMEM;
- goto end;
- }
- cds_list_add(&setting->list, <t_channels);
- strncpy(setting->name, name, PATH_MAX-1);
- setting->index = free_index++;
-init_urcu_ref:
- urcu_ref_init(&setting->urcu_ref);
-end:
- pthread_mutex_unlock(<t_channel_mutex);
- return ret;
-}
-
-/**
- * ltt_channels_unregister - Unregister a trace channel.
- * @name: channel name
- *
- * Must be called with markers mutex held.
- */
-int ltt_channels_unregister(const char *name)
-{
- struct ltt_channel_setting *setting;
- int ret = 0;
-
- pthread_mutex_lock(<t_channel_mutex);
- setting = lookup_channel(name);
- if (!setting || uatomic_read(&setting->urcu_ref.refcount) == 0) {
- ret = -ENOENT;
- goto end;
- }
- urcu_ref_put(&setting->urcu_ref, release_channel_setting);
-end:
- pthread_mutex_unlock(<t_channel_mutex);
- return ret;
-}
-
-/**
- * ltt_channels_set_default - Set channel default behavior.
- * @name: default channel name
- * @subbuf_size: size of the subbuffers
- * @subbuf_cnt: number of subbuffers
- */
-int ltt_channels_set_default(const char *name,
- unsigned int subbuf_size,
- unsigned int subbuf_cnt)
-{
- struct ltt_channel_setting *setting;
- int ret = 0;
-
- pthread_mutex_lock(<t_channel_mutex);
- setting = lookup_channel(name);
- if (!setting || uatomic_read(&setting->urcu_ref.refcount) == 0) {
- ret = -ENOENT;
- goto end;
- }
- setting->subbuf_size = subbuf_size;
- setting->subbuf_cnt = subbuf_cnt;
-end:
- pthread_mutex_unlock(<t_channel_mutex);
- return ret;
-}
-
-/**
- * ltt_channels_get_name_from_index - get channel name from channel index
- * @index: channel index
- *
- * Allows to lookup the channel name given its index. Done to keep the name
- * information outside of each trace channel instance.
- */
-const char *ltt_channels_get_name_from_index(unsigned int index)
-{
- struct ltt_channel_setting *iter;
-
- cds_list_for_each_entry(iter, <t_channels, list)
- if (iter->index == index && uatomic_read(&iter->urcu_ref.refcount))
- return iter->name;
- return NULL;
-}
-
-static struct ltt_channel_setting *
-ltt_channels_get_setting_from_name(const char *name)
-{
- struct ltt_channel_setting *iter;
-
- cds_list_for_each_entry(iter, <t_channels, list)
- if (!strcmp(iter->name, name)
- && uatomic_read(&iter->urcu_ref.refcount))
- return iter;
- return NULL;
-}
-
-/**
- * ltt_channels_get_index_from_name - get channel index from channel name
- * @name: channel name
- *
- * Allows to lookup the channel index given its name. Done to keep the name
- * information outside of each trace channel instance.
- * Returns -1 if not found.
- */
-int ltt_channels_get_index_from_name(const char *name)
-{
- struct ltt_channel_setting *setting;
-
- setting = ltt_channels_get_setting_from_name(name);
- if (setting)
- return setting->index;
- else
- return -1;
-}
-
-/**
- * ltt_channels_trace_alloc - Allocate channel structures for a trace
- * @subbuf_size: subbuffer size. 0 uses default.
- * @subbuf_cnt: number of subbuffers per per-cpu buffers. 0 uses default.
- * @flags: Default channel flags
- *
- * Use the current channel list to allocate the channels for a trace.
- * Called with trace lock held. Does not perform the trace buffer allocation,
- * because we must let the user overwrite specific channel sizes.
- */
-struct ust_channel *ltt_channels_trace_alloc(unsigned int *nr_channels,
- int overwrite,
- int request_collection,
- int active)
-{
- struct ust_channel *channel = NULL;
- struct ltt_channel_setting *iter;
-
- pthread_mutex_lock(<t_channel_mutex);
- if (!free_index) {
- WARN("ltt_channels_trace_alloc: no free_index; are there any probes connected?");
- goto end;
- }
- if (!uatomic_read(&index_urcu_ref.refcount))
- urcu_ref_init(&index_urcu_ref);
- else
- urcu_ref_get(&index_urcu_ref);
- *nr_channels = free_index;
- channel = zmalloc(sizeof(struct ust_channel) * free_index);
- if (!channel) {
- WARN("ltt_channel_struct: channel null after alloc");
- goto end;
- }
- cds_list_for_each_entry(iter, <t_channels, list) {
- if (!uatomic_read(&iter->urcu_ref.refcount))
- continue;
- channel[iter->index].subbuf_size = iter->subbuf_size;
- channel[iter->index].subbuf_cnt = iter->subbuf_cnt;
- channel[iter->index].overwrite = overwrite;
- channel[iter->index].request_collection = request_collection;
- channel[iter->index].active = active;
- channel[iter->index].channel_name = iter->name;
- }
-end:
- pthread_mutex_unlock(<t_channel_mutex);
- return channel;
-}
-
-/**
- * ltt_channels_trace_free - Free one trace's channels
- * @channels: channels to free
- *
- * Called with trace lock held. The actual channel buffers must be freed before
- * this function is called.
- */
-void ltt_channels_trace_free(struct ust_channel *channels)
-{
- lock_ust_marker();
- pthread_mutex_lock(<t_channel_mutex);
- free(channels);
- urcu_ref_put(&index_urcu_ref, release_trace_channel);
- pthread_mutex_unlock(<t_channel_mutex);
- unlock_ust_marker();
-}
-
-/**
- * _ltt_channels_get_event_id - get next event ID for a marker
- * @channel: channel name
- * @name: event name
- *
- * Returns a unique event ID (for this channel) or < 0 on error.
- * Must be called with channels mutex held.
- */
-int _ltt_channels_get_event_id(const char *channel, const char *name)
-{
- struct ltt_channel_setting *setting;
- int ret;
-
- setting = ltt_channels_get_setting_from_name(channel);
- if (!setting) {
- ret = -ENOENT;
- goto end;
- }
- if (strcmp(channel, "metadata") == 0) {
- if (strcmp(name, "core_marker_id") == 0)
- ret = 0;
- else if (strcmp(name, "core_marker_format") == 0)
- ret = 1;
- else if (strcmp(name, "testev") == 0)
- ret = 2;
- else
- ret = -ENOENT;
- goto end;
- }
- if (setting->free_event_id == EVENTS_PER_CHANNEL - 1) {
- ret = -ENOSPC;
- goto end;
- }
- ret = setting->free_event_id++;
-end:
- return ret;
-}
-
-/**
- * ltt_channels_get_event_id - get next event ID for a marker
- * @channel: channel name
- * @name: event name
- *
- * Returns a unique event ID (for this channel) or < 0 on error.
- */
-int ltt_channels_get_event_id(const char *channel, const char *name)
-{
- int ret;
-
- pthread_mutex_lock(<t_channel_mutex);
- ret = _ltt_channels_get_event_id(channel, name);
- pthread_mutex_unlock(<t_channel_mutex);
- return ret;
-}
+++ /dev/null
-#ifndef UST_CHANNELS_H
-#define UST_CHANNELS_H
-
-/*
- * Copyright (C) 2008 Mathieu Desnoyers (mathieu.desnoyers@polymtl.ca)
- *
- * Dynamic tracer channel allocation.
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-#define _LGPL_SOURCE
-#include <linux/limits.h>
-#include <errno.h>
-#include <ust/kcompat/kcompat.h>
-#include <ust/core.h>
-#include <urcu/list.h>
-#include <urcu/ref.h>
-
-#define EVENTS_PER_CHANNEL 65536
-#define MAX_CPUS 32
-
-struct ust_trace;
-
-struct ust_buffer;
-
-struct ust_channel {
- /* First 32 bytes cache-hot cacheline */
- struct ust_trace *trace;
- int *buf_struct_shmids;
- struct ust_buffer **buf;
- int overwrite:1;
- /* whether collection is requested upon trace start */
- int request_collection:1;
- int active:1;
- unsigned int n_subbufs_order;
- unsigned long commit_count_mask; /*
- * Commit count mask, removing
- * the MSBs corresponding to
- * bits used to represent the
- * subbuffer index.
- */
- /* End of first 32 bytes cacheline */
-
- struct urcu_ref urcu_ref; /* Channel transport reference count */
- size_t subbuf_size;
- int subbuf_size_order;
- unsigned int subbuf_cnt;
- const char *channel_name;
- int n_cpus;
-
- u32 version;
- size_t alloc_size;
- struct cds_list_head list;
-} ____cacheline_aligned;
-
-struct ltt_channel_setting {
- unsigned int subbuf_size;
- unsigned int subbuf_cnt;
- struct urcu_ref urcu_ref; /* Number of references to structure content */
- struct cds_list_head list;
- unsigned int index; /* index of channel in trace channel array */
- u16 free_event_id; /* Next event ID to allocate */
- char name[PATH_MAX];
-};
-
-extern int ltt_channels_register(const char *name);
-extern int ltt_channels_unregister(const char *name);
-extern int ltt_channels_set_default(const char *name,
- unsigned int subbuf_size,
- unsigned int subbuf_cnt);
-extern const char *ltt_channels_get_name_from_index(unsigned int index);
-extern int ltt_channels_get_index_from_name(const char *name);
-extern struct ust_channel *ltt_channels_trace_alloc(unsigned int *nr_channels,
- int overwrite,
- int request_collection,
- int active);
-extern void ltt_channels_trace_free(struct ust_channel *channels);
-extern int _ltt_channels_get_event_id(const char *channel, const char *name);
-extern int ltt_channels_get_event_id(const char *channel, const char *name);
-
-extern int ust_channels_overwrite_by_default;
-extern int ust_channels_request_collection_by_default;
-
-#endif /* UST_CHANNELS_H */
+++ /dev/null
-/*
- * Copyright (C) 2007 Mathieu Desnoyers
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- *
- */
-
-/* This file contains a high-level API for activating and deactivating ust_markers,
- * and making sure ust_markers in a given library can be released when the library
- * is unloaded.
- */
-
-#include <ctype.h>
-#include <stdlib.h>
-
-#include "tracer.h"
-#include "usterr_signal_safe.h"
-
-#define DEFAULT_CHANNEL "cpu"
-#define DEFAULT_PROBE "default"
-
-static int initialized;
-
-CDS_LIST_HEAD(probes_list);
-
-/*
- * Mutex protecting the probe slab cache.
- * Nests inside the traces mutex.
- */
-DEFINE_MUTEX(probes_mutex);
-
-struct ltt_available_probe default_probe = {
- .name = "default",
- .format = NULL,
- .probe_func = ltt_vtrace,
- .callbacks[0] = ltt_serialize_data,
-};
-
-//ust//static struct kmem_cache *ust_markers_loaded_cachep;
-static CDS_LIST_HEAD(ust_markers_loaded_list);
-/*
- * List sorted by name strcmp order.
- */
-static CDS_LIST_HEAD(probes_registered_list);
-
-static struct ltt_available_probe *get_probe_from_name(const char *pname)
-{
- struct ltt_available_probe *iter;
- int comparison, found = 0;
-
- if (!pname)
- pname = DEFAULT_PROBE;
- cds_list_for_each_entry(iter, &probes_registered_list, node) {
- comparison = strcmp(pname, iter->name);
- if (!comparison)
- found = 1;
- if (comparison <= 0)
- break;
- }
- if (found)
- return iter;
- else
- return NULL;
-}
-
-/* (unused)
-static char *skip_spaces(char *buf)
-{
- while (*buf != '\0' && isspace(*buf))
- buf++;
- return buf;
-}
-
-static char *skip_nonspaces(char *buf)
-{
- while (*buf != '\0' && !isspace(*buf))
- buf++;
- return buf;
-}
-
-static void get_ust_marker_string(char *buf, char **start,
- char **end)
-{
- *start = skip_spaces(buf);
- *end = skip_nonspaces(*start);
- **end = '\0';
-}
-*/
-
-int ltt_probe_register(struct ltt_available_probe *pdata)
-{
- int ret = 0;
- int comparison;
- struct ltt_available_probe *iter;
-
- pthread_mutex_lock(&probes_mutex);
- cds_list_for_each_entry_reverse(iter, &probes_registered_list, node) {
- comparison = strcmp(pdata->name, iter->name);
- if (!comparison) {
- ret = -EBUSY;
- goto end;
- } else if (comparison > 0) {
- /* We belong to the location right after iter. */
- cds_list_add(&pdata->node, &iter->node);
- goto end;
- }
- }
- /* Should be added at the head of the list */
- cds_list_add(&pdata->node, &probes_registered_list);
-end:
- pthread_mutex_unlock(&probes_mutex);
- return ret;
-}
-
-/*
- * Called when a probe does not want to be called anymore.
- */
-int ltt_probe_unregister(struct ltt_available_probe *pdata)
-{
- int ret = 0;
- struct ltt_active_ust_marker *amark, *tmp;
-
- pthread_mutex_lock(&probes_mutex);
- cds_list_for_each_entry_safe(amark, tmp, &ust_markers_loaded_list, node) {
- if (amark->probe == pdata) {
- ret = ust_marker_probe_unregister_private_data(
- pdata->probe_func, amark);
- if (ret)
- goto end;
- cds_list_del(&amark->node);
- free(amark);
- }
- }
- cds_list_del(&pdata->node);
-end:
- pthread_mutex_unlock(&probes_mutex);
- return ret;
-}
-
-/*
- * Connect ust_marker "mname" to probe "pname".
- * Only allow _only_ probe instance to be connected to a ust_marker.
- */
-int ltt_ust_marker_connect(const char *channel, const char *mname,
- const char *pname)
-
-{
- int ret;
- struct ltt_active_ust_marker *pdata;
- struct ltt_available_probe *probe;
-
- ltt_lock_traces();
- pthread_mutex_lock(&probes_mutex);
- probe = get_probe_from_name(pname);
- if (!probe) {
- ret = -ENOENT;
- goto end;
- }
- pdata = ust_marker_get_private_data(channel, mname, probe->probe_func, 0);
- if (pdata && !IS_ERR(pdata)) {
- ret = -EEXIST;
- goto end;
- }
- pdata = zmalloc(sizeof(struct ltt_active_ust_marker));
- if (!pdata) {
- ret = -ENOMEM;
- goto end;
- }
- pdata->probe = probe;
- /*
- * ID has priority over channel in case of conflict.
- */
- ret = ust_marker_probe_register(channel, mname, NULL,
- probe->probe_func, pdata);
- if (ret)
- free(pdata);
- else
- cds_list_add(&pdata->node, &ust_markers_loaded_list);
-end:
- pthread_mutex_unlock(&probes_mutex);
- ltt_unlock_traces();
- return ret;
-}
-
-/*
- * Disconnect ust_marker "mname", probe "pname".
- */
-int ltt_ust_marker_disconnect(const char *channel, const char *mname,
- const char *pname)
-{
- struct ltt_active_ust_marker *pdata;
- struct ltt_available_probe *probe;
- int ret = 0;
-
- pthread_mutex_lock(&probes_mutex);
- probe = get_probe_from_name(pname);
- if (!probe) {
- ret = -ENOENT;
- goto end;
- }
- pdata = ust_marker_get_private_data(channel, mname, probe->probe_func, 0);
- if (IS_ERR(pdata)) {
- ret = PTR_ERR(pdata);
- goto end;
- } else if (!pdata) {
- /*
- * Not registered by us.
- */
- ret = -EPERM;
- goto end;
- }
- ret = ust_marker_probe_unregister(channel, mname, probe->probe_func, pdata);
- if (ret)
- goto end;
- else {
- cds_list_del(&pdata->node);
- free(pdata);
- }
-end:
- pthread_mutex_unlock(&probes_mutex);
- return ret;
-}
-
-static void disconnect_all_ust_markers(void)
-{
- struct ltt_active_ust_marker *pdata, *tmp;
-
- cds_list_for_each_entry_safe(pdata, tmp, &ust_markers_loaded_list, node) {
- ust_marker_probe_unregister_private_data(pdata->probe->probe_func,
- pdata);
- cds_list_del(&pdata->node);
- free(pdata);
- }
-}
-
-void __attribute__((constructor)) init_ust_marker_control(void)
-{
- if (!initialized) {
- int ret;
-
- init_ust_marker();
- ret = ltt_probe_register(&default_probe);
- BUG_ON(ret);
- ret = ltt_ust_marker_connect("metadata", "core_marker_format",
- DEFAULT_PROBE);
- BUG_ON(ret);
- ret = ltt_ust_marker_connect("metadata", "core_marker_id", DEFAULT_PROBE);
- BUG_ON(ret);
- initialized = 1;
- }
-}
-
-static void __attribute__((destructor)) ust_marker_control_exit(void)
-{
- int ret;
-
- ret = ltt_ust_marker_disconnect("metadata", "core_marker_format",
- DEFAULT_PROBE);
- BUG_ON(ret);
- ret = ltt_ust_marker_disconnect("metadata", "core_marker_id",
- DEFAULT_PROBE);
- BUG_ON(ret);
- ret = ltt_probe_unregister(&default_probe);
- BUG_ON(ret);
- disconnect_all_ust_markers();
- ust_marker_synchronize_unregister();
-}
+++ /dev/null
-/*
- * Copyright (C) 2009 - Pierre-Marc Fournier (pierre-marc dot fournier at polymtl dot ca)
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-#ifndef MARKER_CONTROL_H
-#define MARKER_CONTROL_H
-
-int init_ust_marker_control(void);
-int ltt_probe_register(struct ltt_available_probe *pdata);
-
-#endif /* MARKER_CONTROL_H */
+++ /dev/null
-/*
- * Copyright (C) 2007-2011 Mathieu Desnoyers
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation;
- * version 2.1 of the License.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-#define _LGPL_SOURCE
-#include <stdlib.h>
-#include <errno.h>
-#include <pthread.h>
-#include <urcu-call-rcu.h>
-#include <urcu-bp.h>
-#include <urcu/rculist.h>
-#include <urcu/hlist.h>
-
-#include <ust/core.h>
-#include <ust/marker.h>
-#include <ust/marker-internal.h>
-#include <ust/tracepoint.h>
-#include <ust/tracepoint-internal.h>
-
-#include "usterr_signal_safe.h"
-#include "channels.h"
-#include "tracercore.h"
-#include "tracer.h"
-
-extern struct ust_marker * const __start___ust_marker_ptrs[] __attribute__((visibility("hidden")));
-extern struct ust_marker * const __stop___ust_marker_ptrs[] __attribute__((visibility("hidden")));
-
-/* Set to 1 to enable ust_marker debug output */
-static const int ust_marker_debug;
-static int initialized;
-static void (*new_ust_marker_cb)(struct ust_marker *);
-
-/*
- * ust_marker mutex protects the builtin and module ust_marker and the
- * hash table, as well as the ust_marker_libs list.
- */
-static DEFINE_MUTEX(ust_marker_mutex);
-static CDS_LIST_HEAD(ust_marker_libs);
-
-/*
- * Allow nested mutex for mutex listing and nested enable.
- */
-static __thread int nested_mutex;
-
-void lock_ust_marker(void)
-{
- if (!(nested_mutex++))
- pthread_mutex_lock(&ust_marker_mutex);
-}
-
-void unlock_ust_marker(void)
-{
- if (!(--nested_mutex))
- pthread_mutex_unlock(&ust_marker_mutex);
-}
-
-/*
- * ust_marker hash table, containing the active ust_marker.
- * Protected by ust_marker mutex.
- */
-#define UST_MARKER_HASH_BITS 6
-#define UST_MARKER_TABLE_SIZE (1 << UST_MARKER_HASH_BITS)
-static struct cds_hlist_head ust_marker_table[UST_MARKER_TABLE_SIZE];
-
-struct ust_marker_probe_array {
- struct rcu_head rcu;
- struct ust_marker_probe_closure c[0];
-};
-
-/*
- * Note about RCU :
- * It is used to make sure every handler has finished using its private
- * data between two consecutive operation (add or remove) on a given
- * ust_marker. It is also used to delay the free of multiple probes
- * array until a quiescent state is reached. ust_marker entries
- * modifications are protected by the ust_marker_mutex.
- */
-struct ust_marker_entry {
- struct cds_hlist_node hlist;
- char *format;
- char *name;
- /* Probe wrapper */
- void (*call)(const struct ust_marker *mdata, void *call_private, ...);
- struct ust_marker_probe_closure single;
- struct ust_marker_probe_array *multi;
- int refcount; /* Number of times armed. 0 if disarmed. */
- u16 channel_id;
- u16 event_id;
- unsigned char ptype:1;
- unsigned char format_allocated:1;
- char channel[0]; /* Contains channel'\0'name'\0'format'\0' */
-};
-
-/**
- * __ust_marker_empty_function - Empty probe callback
- * @mdata: ust_marker data
- * @probe_private: probe private data
- * @call_private: call site private data
- * @fmt: format string
- * @...: variable argument list
- *
- * Empty callback provided as a probe to the ust_marker. By providing
- * this to a disabled ust_marker, we make sure the execution flow is
- * always valid even though the function pointer change and the
- * ust_marker enabling are two distinct operations that modifies the
- * execution flow of preemptible code.
- */
-notrace void __ust_marker_empty_function(const struct ust_marker *mdata,
- void *probe_private, void *call_private, const char *fmt, va_list *args)
-{
-}
-
-/*
- * ust_marker_probe_cb Callback that prepares the variable argument list for probes.
- * @mdata: pointer of type struct ust_marker
- * @call_private: caller site private data
- * @...: Variable argument list.
- *
- * Since we do not use "typical" pointer based RCU in the 1 argument case, we
- * need to put a full cmm_smp_rmb() in this branch. This is why we do not use
- * rcu_dereference() for the pointer read.
- */
-notrace void ust_marker_probe_cb(const struct ust_marker *mdata,
- void *call_private, ...)
-{
- va_list args;
- char ptype;
-
- /*
- * rcu_read_lock_sched does two things : disabling preemption to make
- * sure the teardown of the callbacks can be done correctly when they
- * are in modules and they insure RCU read coherency.
- */
- rcu_read_lock();
- ptype = mdata->ptype;
- if (likely(!ptype)) {
- ust_marker_probe_func *func;
- /* Must read the ptype before ptr. They are not data dependant,
- * so we put an explicit cmm_smp_rmb() here. */
- cmm_smp_rmb();
- func = mdata->single.func;
- /* Must read the ptr before private data. They are not data
- * dependant, so we put an explicit cmm_smp_rmb() here. */
- cmm_smp_rmb();
- va_start(args, call_private);
- func(mdata, mdata->single.probe_private, call_private,
- mdata->format, &args);
- va_end(args);
- } else {
- struct ust_marker_probe_array *multi;
- int i;
- /*
- * Read mdata->ptype before mdata->multi.
- */
- cmm_smp_rmb();
- multi = mdata->multi;
- /*
- * multi points to an array, therefore accessing the array
- * depends on reading multi. However, even in this case,
- * we must insure that the pointer is read _before_ the array
- * data. Same as rcu_dereference, but we need a full cmm_smp_rmb()
- * in the fast path, so put the explicit cmm_barrier here.
- */
- cmm_smp_read_barrier_depends();
- for (i = 0; multi->c[i].func; i++) {
- va_start(args, call_private);
- multi->c[i].func(mdata, multi->c[i].probe_private,
- call_private, mdata->format, &args);
- va_end(args);
- }
- }
- rcu_read_unlock();
-}
-
-/*
- * ust_marker_probe_cb Callback that does not prepare the variable argument list.
- * @mdata: pointer of type struct ust_marker
- * @call_private: caller site private data
- * @...: Variable argument list.
- *
- * Should be connected to ust_marker "UST_MARKER_NOARGS".
- */
-static notrace void ust_marker_probe_cb_noarg(const struct ust_marker *mdata,
- void *call_private, ...)
-{
- va_list args; /* not initialized */
- char ptype;
-
- rcu_read_lock();
- ptype = mdata->ptype;
- if (likely(!ptype)) {
- ust_marker_probe_func *func;
- /* Must read the ptype before ptr. They are not data dependant,
- * so we put an explicit cmm_smp_rmb() here. */
- cmm_smp_rmb();
- func = mdata->single.func;
- /* Must read the ptr before private data. They are not data
- * dependant, so we put an explicit cmm_smp_rmb() here. */
- cmm_smp_rmb();
- func(mdata, mdata->single.probe_private, call_private,
- mdata->format, &args);
- } else {
- struct ust_marker_probe_array *multi;
- int i;
- /*
- * Read mdata->ptype before mdata->multi.
- */
- cmm_smp_rmb();
- multi = mdata->multi;
- /*
- * multi points to an array, therefore accessing the array
- * depends on reading multi. However, even in this case,
- * we must insure that the pointer is read _before_ the array
- * data. Same as rcu_dereference, but we need a full cmm_smp_rmb()
- * in the fast path, so put the explicit cmm_barrier here.
- */
- cmm_smp_read_barrier_depends();
- for (i = 0; multi->c[i].func; i++)
- multi->c[i].func(mdata, multi->c[i].probe_private,
- call_private, mdata->format, &args);
- }
- rcu_read_unlock();
-}
-
-static void free_old_closure(struct rcu_head *head)
-{
- struct ust_marker_probe_array *multi =
- _ust_container_of(head, struct ust_marker_probe_array, rcu);
- free(multi);
-}
-
-static void debug_print_probes(struct ust_marker_entry *entry)
-{
- int i;
-
- if (!ust_marker_debug)
- return;
-
- if (!entry->ptype) {
- DBG("Single probe : %p %p",
- entry->single.func,
- entry->single.probe_private);
- } else {
- for (i = 0; entry->multi->c[i].func; i++)
- DBG("Multi probe %d : %p %p", i,
- entry->multi->c[i].func,
- entry->multi->c[i].probe_private);
- }
-}
-
-static struct ust_marker_probe_array *
-ust_marker_entry_add_probe(struct ust_marker_entry *entry,
- ust_marker_probe_func *probe, void *probe_private)
-{
- int nr_probes = 0;
- struct ust_marker_probe_array *old, *new;
-
- WARN_ON(!probe);
-
- debug_print_probes(entry);
- old = entry->multi;
- if (!entry->ptype) {
- if (entry->single.func == probe &&
- entry->single.probe_private == probe_private)
- return ERR_PTR(-EBUSY);
- if (entry->single.func == __ust_marker_empty_function) {
- /* 0 -> 1 probes */
- entry->single.func = probe;
- entry->single.probe_private = probe_private;
- entry->refcount = 1;
- entry->ptype = 0;
- debug_print_probes(entry);
- return NULL;
- } else {
- /* 1 -> 2 probes */
- nr_probes = 1;
- old = NULL;
- }
- } else {
- /* (N -> N+1), (N != 0, 1) probes */
- for (nr_probes = 0; old->c[nr_probes].func; nr_probes++)
- if (old->c[nr_probes].func == probe
- && old->c[nr_probes].probe_private
- == probe_private)
- return ERR_PTR(-EBUSY);
- }
- /* + 2 : one for new probe, one for NULL func */
- new = zmalloc(sizeof(struct ust_marker_probe_array)
- + ((nr_probes + 2) * sizeof(struct ust_marker_probe_closure)));
- if (new == NULL)
- return ERR_PTR(-ENOMEM);
- if (!old)
- new->c[0] = entry->single;
- else
- memcpy(&new->c[0], &old->c[0],
- nr_probes * sizeof(struct ust_marker_probe_closure));
- new->c[nr_probes].func = probe;
- new->c[nr_probes].probe_private = probe_private;
- entry->refcount = nr_probes + 1;
- entry->multi = new;
- entry->ptype = 1;
- debug_print_probes(entry);
- return old;
-}
-
-static struct ust_marker_probe_array *
-ust_marker_entry_remove_probe(struct ust_marker_entry *entry,
- ust_marker_probe_func *probe, void *probe_private)
-{
- int nr_probes = 0, nr_del = 0, i;
- struct ust_marker_probe_array *old, *new;
-
- old = entry->multi;
-
- debug_print_probes(entry);
- if (!entry->ptype) {
- /* 0 -> N is an error */
- WARN_ON(entry->single.func == __ust_marker_empty_function);
- /* 1 -> 0 probes */
- WARN_ON(probe && entry->single.func != probe);
- WARN_ON(entry->single.probe_private != probe_private);
- entry->single.func = __ust_marker_empty_function;
- entry->refcount = 0;
- entry->ptype = 0;
- debug_print_probes(entry);
- return NULL;
- } else {
- /* (N -> M), (N > 1, M >= 0) probes */
- for (nr_probes = 0; old->c[nr_probes].func; nr_probes++) {
- if ((!probe || old->c[nr_probes].func == probe)
- && old->c[nr_probes].probe_private
- == probe_private)
- nr_del++;
- }
- }
-
- if (nr_probes - nr_del == 0) {
- /* N -> 0, (N > 1) */
- entry->single.func = __ust_marker_empty_function;
- entry->refcount = 0;
- entry->ptype = 0;
- } else if (nr_probes - nr_del == 1) {
- /* N -> 1, (N > 1) */
- for (i = 0; old->c[i].func; i++)
- if ((probe && old->c[i].func != probe) ||
- old->c[i].probe_private != probe_private)
- entry->single = old->c[i];
- entry->refcount = 1;
- entry->ptype = 0;
- } else {
- int j = 0;
- /* N -> M, (N > 1, M > 1) */
- /* + 1 for NULL */
- new = zmalloc(sizeof(struct ust_marker_probe_array)
- + ((nr_probes - nr_del + 1) * sizeof(struct ust_marker_probe_closure)));
- if (new == NULL)
- return ERR_PTR(-ENOMEM);
- for (i = 0; old->c[i].func; i++)
- if ((probe && old->c[i].func != probe) ||
- old->c[i].probe_private != probe_private)
- new->c[j++] = old->c[i];
- entry->refcount = nr_probes - nr_del;
- entry->ptype = 1;
- entry->multi = new;
- }
- debug_print_probes(entry);
- return old;
-}
-
-/*
- * Get ust_marker if the ust_marker is present in the ust_marker hash table.
- * Must be called with ust_marker_mutex held.
- * Returns NULL if not present.
- */
-static struct ust_marker_entry *get_ust_marker(const char *channel, const char *name)
-{
- struct cds_hlist_head *head;
- struct cds_hlist_node *node;
- struct ust_marker_entry *e;
- size_t channel_len = strlen(channel) + 1;
- size_t name_len = strlen(name) + 1;
- u32 hash;
-
- hash = jhash(channel, channel_len-1, 0) ^ jhash(name, name_len-1, 0);
- head = &ust_marker_table[hash & ((1 << UST_MARKER_HASH_BITS)-1)];
- cds_hlist_for_each_entry(e, node, head, hlist) {
- if (!strcmp(channel, e->channel) && !strcmp(name, e->name))
- return e;
- }
- return NULL;
-}
-
-/*
- * Add the ust_marker to the ust_marker hash table. Must be called with
- * ust_marker_mutex held.
- */
-static struct ust_marker_entry *add_ust_marker(const char *channel, const char *name,
- const char *format)
-{
- struct cds_hlist_head *head;
- struct cds_hlist_node *node;
- struct ust_marker_entry *e;
- size_t channel_len = strlen(channel) + 1;
- size_t name_len = strlen(name) + 1;
- size_t format_len = 0;
- u32 hash;
-
- hash = jhash(channel, channel_len-1, 0) ^ jhash(name, name_len-1, 0);
- if (format)
- format_len = strlen(format) + 1;
- head = &ust_marker_table[hash & ((1 << UST_MARKER_HASH_BITS)-1)];
- cds_hlist_for_each_entry(e, node, head, hlist) {
- if (!strcmp(channel, e->channel) && !strcmp(name, e->name)) {
- DBG("ust_marker %s.%s busy", channel, name);
- return ERR_PTR(-EBUSY); /* Already there */
- }
- }
- /*
- * Using zmalloc here to allocate a variable length element. Could
- * cause some memory fragmentation if overused.
- */
- e = zmalloc(sizeof(struct ust_marker_entry)
- + channel_len + name_len + format_len);
- if (!e)
- return ERR_PTR(-ENOMEM);
- memcpy(e->channel, channel, channel_len);
- e->name = &e->channel[channel_len];
- memcpy(e->name, name, name_len);
- if (format) {
- e->format = &e->name[name_len];
- memcpy(e->format, format, format_len);
- if (strcmp(e->format, UST_MARKER_NOARGS) == 0)
- e->call = ust_marker_probe_cb_noarg;
- else
- e->call = ust_marker_probe_cb;
- __ust_marker(metadata, core_marker_format, NULL,
- "channel %s name %s format %s",
- e->channel, e->name, e->format);
- } else {
- e->format = NULL;
- e->call = ust_marker_probe_cb;
- }
- e->single.func = __ust_marker_empty_function;
- e->single.probe_private = NULL;
- e->multi = NULL;
- e->ptype = 0;
- e->format_allocated = 0;
- e->refcount = 0;
- cds_hlist_add_head(&e->hlist, head);
- return e;
-}
-
-/*
- * Remove the ust_marker from the ust_marker hash table. Must be called with mutex_lock
- * held.
- */
-static int remove_ust_marker(const char *channel, const char *name)
-{
- struct cds_hlist_head *head;
- struct cds_hlist_node *node;
- struct ust_marker_entry *e;
- int found = 0;
- size_t channel_len = strlen(channel) + 1;
- size_t name_len = strlen(name) + 1;
- u32 hash;
- int ret;
-
- hash = jhash(channel, channel_len-1, 0) ^ jhash(name, name_len-1, 0);
- head = &ust_marker_table[hash & ((1 << UST_MARKER_HASH_BITS)-1)];
- cds_hlist_for_each_entry(e, node, head, hlist) {
- if (!strcmp(channel, e->channel) && !strcmp(name, e->name)) {
- found = 1;
- break;
- }
- }
- if (!found)
- return -ENOENT;
- if (e->single.func != __ust_marker_empty_function)
- return -EBUSY;
- cds_hlist_del(&e->hlist);
- if (e->format_allocated)
- free(e->format);
- ret = ltt_channels_unregister(e->channel);
- WARN_ON(ret);
- free(e);
- return 0;
-}
-
-/*
- * Set the mark_entry format to the format found in the element.
- */
-static int ust_marker_set_format(struct ust_marker_entry *entry, const char *format)
-{
- entry->format = strdup(format);
- if (!entry->format)
- return -ENOMEM;
- entry->format_allocated = 1;
-
- __ust_marker(metadata, core_marker_format, NULL,
- "channel %s name %s format %s",
- entry->channel, entry->name, entry->format);
- return 0;
-}
-
-/*
- * Sets the probe callback corresponding to one ust_marker.
- */
-static int set_ust_marker(struct ust_marker_entry *entry, struct ust_marker *elem,
- int active)
-{
- int ret = 0;
- WARN_ON(strcmp(entry->name, elem->name) != 0);
-
- if (entry->format) {
- if (strcmp(entry->format, elem->format) != 0) {
- ERR("Format mismatch for probe %s (%s), ust_marker (%s)",
- entry->name,
- entry->format,
- elem->format);
- return -EPERM;
- }
- } else {
- ret = ust_marker_set_format(entry, elem->format);
- if (ret)
- return ret;
- }
-
- /*
- * probe_cb setup (statically known) is done here. It is
- * asynchronous with the rest of execution, therefore we only
- * pass from a "safe" callback (with argument) to an "unsafe"
- * callback (does not set arguments).
- */
- elem->call = entry->call;
- elem->channel_id = entry->channel_id;
- elem->event_id = entry->event_id;
- /*
- * Sanity check :
- * We only update the single probe private data when the ptr is
- * set to a _non_ single probe! (0 -> 1 and N -> 1, N != 1)
- */
- WARN_ON(elem->single.func != __ust_marker_empty_function
- && elem->single.probe_private != entry->single.probe_private
- && !elem->ptype);
- elem->single.probe_private = entry->single.probe_private;
- /*
- * Make sure the private data is valid when we update the
- * single probe ptr.
- */
- cmm_smp_wmb();
- elem->single.func = entry->single.func;
- /*
- * We also make sure that the new probe callbacks array is consistent
- * before setting a pointer to it.
- */
- rcu_assign_pointer(elem->multi, entry->multi);
- /*
- * Update the function or multi probe array pointer before setting the
- * ptype.
- */
- cmm_smp_wmb();
- elem->ptype = entry->ptype;
-
- if (elem->tp_name && (active ^ elem->state)) {
- WARN_ON(!elem->tp_cb);
- /*
- * It is ok to directly call the probe registration because type
- * checking has been done in the __ust_marker_tp() macro.
- */
-
- if (active) {
- ret = tracepoint_probe_register_noupdate(
- elem->tp_name,
- elem->tp_cb, NULL);
- } else {
- /*
- * tracepoint_probe_update_all() must be called
- * before the library containing tp_cb is unloaded.
- */
- ret = tracepoint_probe_unregister_noupdate(
- elem->tp_name,
- elem->tp_cb, NULL);
- }
- }
- elem->state = active;
-
- return ret;
-}
-
-/*
- * Disable a ust_marker and its probe callback.
- * Note: only waiting an RCU period after setting elem->call to the empty
- * function insures that the original callback is not used anymore. This insured
- * by rcu_read_lock around the call site.
- */
-static void disable_ust_marker(struct ust_marker *elem)
-{
- int ret;
-
- /* leave "call" as is. It is known statically. */
- if (elem->tp_name && elem->state) {
- WARN_ON(!elem->tp_cb);
- /*
- * It is ok to directly call the probe registration because type
- * checking has been done in the __ust_marker_tp() macro.
- */
- /*
- * tracepoint_probe_update_all() must be called
- * before the module containing tp_cb is unloaded.
- */
- ret = tracepoint_probe_unregister_noupdate(elem->tp_name,
- elem->tp_cb, NULL);
- WARN_ON(ret);
- }
- elem->state = 0;
- elem->single.func = __ust_marker_empty_function;
- /* Update the function before setting the ptype */
- cmm_smp_wmb();
- elem->ptype = 0; /* single probe */
- /*
- * Leave the private data and channel_id/event_id there, because removal
- * is racy and should be done only after an RCU period. These are never
- * used until the next initialization anyway.
- */
-}
-
-/*
- * is_ust_marker_enabled - Check if a ust_marker is enabled
- * @channel: channel name
- * @name: ust_marker name
- *
- * Returns 1 if the ust_marker is enabled, 0 if disabled.
- */
-int is_ust_marker_enabled(const char *channel, const char *name)
-{
- struct ust_marker_entry *entry;
-
- lock_ust_marker();
- entry = get_ust_marker(channel, name);
- unlock_ust_marker();
-
- return entry && !!entry->refcount;
-}
-
-/**
- * ust_marker_update_probe_range - Update a probe range
- * @begin: beginning of the range
- * @end: end of the range
- *
- * Updates the probe callback corresponding to a range of ust_marker.
- */
-static
-void ust_marker_update_probe_range(struct ust_marker * const *begin,
- struct ust_marker * const *end)
-{
- struct ust_marker * const *iter;
- struct ust_marker_entry *mark_entry;
-
- for (iter = begin; iter < end; iter++) {
- if (!*iter)
- continue; /* skip dummy */
- mark_entry = get_ust_marker((*iter)->channel, (*iter)->name);
- if (mark_entry) {
- set_ust_marker(mark_entry, *iter, !!mark_entry->refcount);
- /*
- * ignore error, continue
- */
- } else {
- disable_ust_marker(*iter);
- }
- }
-}
-
-static void lib_update_ust_marker(void)
-{
- struct ust_marker_lib *lib;
-
- lock_ust_marker();
- cds_list_for_each_entry(lib, &ust_marker_libs, list)
- ust_marker_update_probe_range(lib->ust_marker_start,
- lib->ust_marker_start + lib->ust_marker_count);
- unlock_ust_marker();
-}
-
-/*
- * Update probes, removing the faulty probes.
- *
- * Internal callback only changed before the first probe is connected to it.
- * Single probe private data can only be changed on 0 -> 1 and 2 -> 1
- * transitions. All other transitions will leave the old private data valid.
- * This makes the non-atomicity of the callback/private data updates valid.
- *
- * "special case" updates :
- * 0 -> 1 callback
- * 1 -> 0 callback
- * 1 -> 2 callbacks
- * 2 -> 1 callbacks
- * Other updates all behave the same, just like the 2 -> 3 or 3 -> 2 updates.
- * Site effect : ust_marker_set_format may delete the ust_marker entry (creating a
- * replacement).
- */
-static void ust_marker_update_probes(void)
-{
- lib_update_ust_marker();
- tracepoint_probe_update_all();
-}
-
-/**
- * ust_marker_probe_register - Connect a probe to a ust_marker
- * @channel: ust_marker channel
- * @name: ust_marker name
- * @format: format string
- * @probe: probe handler
- * @probe_private: probe private data
- *
- * private data must be a valid allocated memory address, or NULL.
- * Returns 0 if ok, error value on error.
- * The probe address must at least be aligned on the architecture pointer size.
- */
-int ust_marker_probe_register(const char *channel, const char *name,
- const char *format, ust_marker_probe_func *probe,
- void *probe_private)
-{
- struct ust_marker_entry *entry;
- int ret = 0, ret_err;
- struct ust_marker_probe_array *old;
- int first_probe = 0;
-
- lock_ust_marker();
- entry = get_ust_marker(channel, name);
- if (!entry) {
- first_probe = 1;
- entry = add_ust_marker(channel, name, format);
- if (IS_ERR(entry))
- ret = PTR_ERR(entry);
- if (ret)
- goto end;
- ret = ltt_channels_register(channel);
- if (ret)
- goto error_remove_ust_marker;
- ret = ltt_channels_get_index_from_name(channel);
- if (ret < 0)
- goto error_unregister_channel;
- entry->channel_id = ret;
- ret = ltt_channels_get_event_id(channel, name);
- if (ret < 0)
- goto error_unregister_channel;
- entry->event_id = ret;
- ret = 0;
- __ust_marker(metadata, core_marker_id, NULL,
- "channel %s name %s event_id %hu "
- "int #1u%zu long #1u%zu pointer #1u%zu "
- "size_t #1u%zu alignment #1u%u",
- channel, name, entry->event_id,
- sizeof(int), sizeof(long), sizeof(void *),
- sizeof(size_t), ltt_get_alignment());
- } else if (format) {
- if (!entry->format)
- ret = ust_marker_set_format(entry, format);
- else if (strcmp(entry->format, format))
- ret = -EPERM;
- if (ret)
- goto end;
- }
-
- old = ust_marker_entry_add_probe(entry, probe, probe_private);
- if (IS_ERR(old)) {
- ret = PTR_ERR(old);
- if (first_probe)
- goto error_unregister_channel;
- else
- goto end;
- }
- unlock_ust_marker();
-
- /* Activate ust_marker if necessary */
- ust_marker_update_probes();
-
- if (old) {
- synchronize_rcu();
- free_old_closure(&old->rcu);
- }
- return ret;
-
-error_unregister_channel:
- ret_err = ltt_channels_unregister(channel);
- WARN_ON(ret_err);
-error_remove_ust_marker:
- ret_err = remove_ust_marker(channel, name);
- WARN_ON(ret_err);
-end:
- unlock_ust_marker();
- return ret;
-}
-
-/**
- * ust_marker_probe_unregister - Disconnect a probe from a ust_marker
- * @channel: ust_marker channel
- * @name: ust_marker name
- * @probe: probe function pointer
- * @probe_private: probe private data
- *
- * Returns the private data given to ust_marker_probe_register, or an ERR_PTR().
- * We do not need to call a synchronize_sched to make sure the probes have
- * finished running before doing a module unload, because the module unload
- * itself uses stop_machine(), which insures that every preempt disabled section
- * have finished.
- */
-int ust_marker_probe_unregister(const char *channel, const char *name,
- ust_marker_probe_func *probe, void *probe_private)
-{
- struct ust_marker_entry *entry;
- struct ust_marker_probe_array *old;
- int ret = 0;
-
- lock_ust_marker();
- entry = get_ust_marker(channel, name);
- if (!entry) {
- ret = -ENOENT;
- goto end;
- }
- old = ust_marker_entry_remove_probe(entry, probe, probe_private);
- unlock_ust_marker();
-
- ust_marker_update_probes();
-
- if (old) {
- synchronize_rcu();
- free_old_closure(&old->rcu);
- }
- return ret;
-
-end:
- unlock_ust_marker();
- return ret;
-}
-
-static struct ust_marker_entry *
-get_ust_marker_from_private_data(ust_marker_probe_func *probe,
- void *probe_private)
-{
- struct ust_marker_entry *entry;
- unsigned int i;
- struct cds_hlist_head *head;
- struct cds_hlist_node *node;
-
- for (i = 0; i < UST_MARKER_TABLE_SIZE; i++) {
- head = &ust_marker_table[i];
- cds_hlist_for_each_entry(entry, node, head, hlist) {
- if (!entry->ptype) {
- if (entry->single.func == probe
- && entry->single.probe_private
- == probe_private)
- return entry;
- } else {
- struct ust_marker_probe_array *closure;
- closure = entry->multi;
- for (i = 0; closure->c[i].func; i++) {
- if (closure->c[i].func == probe &&
- closure->c[i].probe_private
- == probe_private)
- return entry;
- }
- }
- }
- }
- return NULL;
-}
-
-/**
- * ust_marker_probe_unregister_private_data - Disconnect a probe from a ust_marker
- * @probe: probe function
- * @probe_private: probe private data
- *
- * Unregister a probe by providing the registered private data.
- * Only removes the first ust_marker found in hash table.
- * Return 0 on success or error value.
- * We do not need to call a synchronize_sched to make sure the probes have
- * finished running before doing a module unload, because the module unload
- * itself uses stop_machine(), which insures that every preempt disabled section
- * have finished.
- */
-int ust_marker_probe_unregister_private_data(ust_marker_probe_func *probe,
- void *probe_private)
-{
- struct ust_marker_entry *entry;
- int ret = 0;
- struct ust_marker_probe_array *old;
- char *channel = NULL, *name = NULL;
-
- lock_ust_marker();
- entry = get_ust_marker_from_private_data(probe, probe_private);
- if (!entry) {
- ret = -ENOENT;
- goto unlock;
- }
- old = ust_marker_entry_remove_probe(entry, NULL, probe_private);
- channel = strdup(entry->channel);
- name = strdup(entry->name);
- /* Ignore busy error message */
- remove_ust_marker(channel, name);
- unlock_ust_marker();
-
- ust_marker_update_probes();
-
- if (old) {
- synchronize_rcu();
- free_old_closure(&old->rcu);
- }
- goto end;
-
-unlock:
- unlock_ust_marker();
-end:
- free(channel);
- free(name);
- return ret;
-}
-
-/**
- * ust_marker_get_private_data - Get a ust_marker's probe private data
- * @channel: ust_marker channel
- * @name: ust_marker name
- * @probe: probe to match
- * @num: get the nth matching probe's private data
- *
- * Returns the nth private data pointer (starting from 0) matching, or an
- * ERR_PTR.
- * Returns the private data pointer, or an ERR_PTR.
- * The private data pointer should _only_ be dereferenced if the caller is the
- * owner of the data, or its content could vanish. This is mostly used to
- * confirm that a caller is the owner of a registered probe.
- */
-void *ust_marker_get_private_data(const char *channel, const char *name,
- ust_marker_probe_func *probe, int num)
-{
- struct cds_hlist_head *head;
- struct cds_hlist_node *node;
- struct ust_marker_entry *e;
- size_t channel_len = strlen(channel) + 1;
- size_t name_len = strlen(name) + 1;
- int i;
- u32 hash;
-
- hash = jhash(channel, channel_len-1, 0) ^ jhash(name, name_len-1, 0);
- head = &ust_marker_table[hash & ((1 << UST_MARKER_HASH_BITS)-1)];
- cds_hlist_for_each_entry(e, node, head, hlist) {
- if (!strcmp(channel, e->channel) && !strcmp(name, e->name)) {
- if (!e->ptype) {
- if (num == 0 && e->single.func == probe)
- return e->single.probe_private;
- } else {
- struct ust_marker_probe_array *closure;
- int match = 0;
- closure = e->multi;
- for (i = 0; closure->c[i].func; i++) {
- if (closure->c[i].func != probe)
- continue;
- if (match++ == num)
- return closure->c[i].probe_private;
- }
- }
- break;
- }
- }
- return ERR_PTR(-ENOENT);
-}
-
-/**
- * ust_marker_get_iter_range - Get a next ust_marker iterator given a range.
- * @ust_marker: current ust_marker (in), next ust_marker (out)
- * @begin: beginning of the range
- * @end: end of the range
- *
- * Returns whether a next ust_marker has been found (1) or not (0).
- * Will return the first ust_marker in the range if the input ust_marker is NULL.
- * Called with markers mutex held.
- */
-static
-int ust_marker_get_iter_range(struct ust_marker * const **ust_marker,
- struct ust_marker * const *begin,
- struct ust_marker * const *end)
-{
- if (!*ust_marker && begin != end)
- *ust_marker = begin;
- while (*ust_marker >= begin && *ust_marker < end) {
- if (!**ust_marker)
- (*ust_marker)++; /* skip dummy */
- else
- return 1;
- }
- return 0;
-}
-
-/*
- * Returns 0 if current not found.
- * Returns 1 if current found.
- * Called with markers mutex held.
- */
-static
-int lib_get_iter_ust_marker(struct ust_marker_iter *iter)
-{
- struct ust_marker_lib *iter_lib;
- int found = 0;
-
- cds_list_for_each_entry(iter_lib, &ust_marker_libs, list) {
- if (iter_lib < iter->lib)
- continue;
- else if (iter_lib > iter->lib)
- iter->ust_marker = NULL;
- found = ust_marker_get_iter_range(&iter->ust_marker,
- iter_lib->ust_marker_start,
- iter_lib->ust_marker_start + iter_lib->ust_marker_count);
- if (found) {
- iter->lib = iter_lib;
- break;
- }
- }
- return found;
-}
-
-/* Called with markers mutex held. */
-static void ust_marker_get_iter(struct ust_marker_iter *iter)
-{
- int found = 0;
-
- found = lib_get_iter_ust_marker(iter);
- if (!found)
- ust_marker_iter_reset(iter);
-}
-
-void ust_marker_iter_start(struct ust_marker_iter *iter)
-{
- lock_ust_marker();
- ust_marker_get_iter(iter);
-}
-
-/* Called with markers mutex held. */
-void ust_marker_iter_next(struct ust_marker_iter *iter)
-{
- iter->ust_marker++;
- /*
- * iter->ust_marker may be invalid because we blindly incremented it.
- * Make sure it is valid by marshalling on the ust_marker, getting the
- * ust_marker from following modules if necessary.
- */
- ust_marker_get_iter(iter);
-}
-
-void ust_marker_iter_stop(struct ust_marker_iter *iter)
-{
- unlock_ust_marker();
-}
-
-void ust_marker_iter_reset(struct ust_marker_iter *iter)
-{
- iter->lib = NULL;
- iter->ust_marker = NULL;
-}
-
-void ltt_dump_ust_marker_state(struct ust_trace *trace)
-{
- struct ust_marker_entry *entry;
- struct ltt_probe_private_data call_data;
- struct cds_hlist_head *head;
- struct cds_hlist_node *node;
- unsigned int i;
-
- lock_ust_marker();
- call_data.trace = trace;
- call_data.serializer = NULL;
-
- for (i = 0; i < UST_MARKER_TABLE_SIZE; i++) {
- head = &ust_marker_table[i];
- cds_hlist_for_each_entry(entry, node, head, hlist) {
- __ust_marker(metadata, core_marker_id,
- &call_data,
- "channel %s name %s event_id %hu "
- "int #1u%zu long #1u%zu pointer #1u%zu "
- "size_t #1u%zu alignment #1u%u",
- entry->channel,
- entry->name,
- entry->event_id,
- sizeof(int), sizeof(long),
- sizeof(void *), sizeof(size_t),
- ltt_get_alignment());
- if (entry->format)
- __ust_marker(metadata,
- core_marker_format,
- &call_data,
- "channel %s name %s format %s",
- entry->channel,
- entry->name,
- entry->format);
- }
- }
- unlock_ust_marker();
-}
-
-void ust_marker_set_new_ust_marker_cb(void (*cb)(struct ust_marker *))
-{
- new_ust_marker_cb = cb;
-}
-
-static void new_ust_marker(struct ust_marker * const *start,
- struct ust_marker * const *end)
-{
- if (new_ust_marker_cb) {
- struct ust_marker * const *m;
-
- for (m = start; m < end; m++) {
- if (*m)
- new_ust_marker_cb(*m);
- }
- }
-}
-
-int ust_marker_register_lib(struct ust_marker * const *ust_marker_start,
- int ust_marker_count)
-{
- struct ust_marker_lib *pl, *iter;
-
- pl = (struct ust_marker_lib *) zmalloc(sizeof(struct ust_marker_lib));
-
- pl->ust_marker_start = ust_marker_start;
- pl->ust_marker_count = ust_marker_count;
-
- lock_ust_marker();
-
- /*
- * We sort the libs by struct lib pointer address.
- */
- cds_list_for_each_entry_reverse(iter, &ust_marker_libs, list) {
- BUG_ON(iter == pl); /* Should never be in the list twice */
- if (iter < pl) {
- /* We belong to the location right after iter. */
- cds_list_add(&pl->list, &iter->list);
- goto lib_added;
- }
- }
- /* We should be added at the head of the list */
- cds_list_add(&pl->list, &ust_marker_libs);
-lib_added:
- unlock_ust_marker();
-
- new_ust_marker(ust_marker_start, ust_marker_start + ust_marker_count);
-
- /* TODO: update just the loaded lib */
- lib_update_ust_marker();
-
- DBG("just registered a ust_marker section from %p and having %d ust_marker (minus dummy ust_marker)", ust_marker_start, ust_marker_count);
-
- return 0;
-}
-
-int ust_marker_unregister_lib(struct ust_marker * const *ust_marker_start)
-{
- struct ust_marker_lib *lib;
-
- lock_ust_marker();
- cds_list_for_each_entry(lib, &ust_marker_libs, list) {
- if(lib->ust_marker_start == ust_marker_start) {
- struct ust_marker_lib *lib2free = lib;
- cds_list_del(&lib->list);
- free(lib2free);
- break;
- }
- }
- unlock_ust_marker();
-
- return 0;
-}
-
-void __attribute__((constructor)) init_ust_marker(void)
-{
- if (!initialized) {
- init_tracepoint();
- ust_marker_register_lib(__start___ust_marker_ptrs,
- __stop___ust_marker_ptrs
- - __start___ust_marker_ptrs);
- initialized = 1;
- }
-}
-
-void __attribute__((destructor)) destroy_ust_marker(void)
-{
- ust_marker_unregister_lib(__start___ust_marker_ptrs);
-}
+++ /dev/null
-/*
- * LTTng serializing code.
- *
- * Copyright Mathieu Desnoyers, March 2007.
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- *
- *
- * See this discussion about weirdness about passing va_list and then va_list to
- * functions. (related to array argument passing). va_list seems to be
- * implemented as an array on x86_64, but not on i386... This is why we pass a
- * va_list * to ltt_vtrace.
- */
-
-#define _GNU_SOURCE
-#define _LGPL_SOURCE
-#include <unistd.h>
-#include <sys/syscall.h>
-#include <stdarg.h>
-#include <string.h>
-#include <stdint.h>
-#include <stdio.h>
-
-#include <urcu-bp.h>
-#include <urcu/rculist.h>
-
-#include <ust/core.h>
-#include <ust/clock.h>
-#include "buffers.h"
-#include "tracer.h"
-#include "usterr_signal_safe.h"
-#include "ust_snprintf.h"
-
-/*
- * Because UST core defines a non-const PAGE_SIZE, define PAGE_SIZE_STATIC here.
- * It is just an approximation for the tracer stack.
- */
-#define PAGE_SIZE_STATIC 4096
-
-enum ltt_type {
- LTT_TYPE_SIGNED_INT,
- LTT_TYPE_UNSIGNED_INT,
- LTT_TYPE_STRING,
- LTT_TYPE_NONE,
-};
-
-/*
- * Special stack for the tracer. Keeps serialization offsets for each field.
- * Per-thread. Deals with reentrancy from signals by simply ensuring that
- * interrupting signals put the stack back to its original position.
- */
-#define TRACER_STACK_LEN (PAGE_SIZE_STATIC / sizeof(unsigned long))
-static unsigned long __thread tracer_stack[TRACER_STACK_LEN];
-
-static unsigned int __thread tracer_stack_pos;
-
-#define LTT_ATTRIBUTE_NETWORK_BYTE_ORDER (1<<1)
-
-/*
- * Inspired from vsnprintf
- *
- * The serialization format string supports the basic printf format strings.
- * In addition, it defines new formats that can be used to serialize more
- * complex/non portable data structures.
- *
- * Typical use:
- *
- * field_name %ctype
- * field_name #tracetype %ctype
- * field_name #tracetype %ctype1 %ctype2 ...
- *
- * A conversion is performed between format string types supported by GCC and
- * the trace type requested. GCC type is used to perform type checking on format
- * strings. Trace type is used to specify the exact binary representation
- * in the trace. A mapping is done between one or more GCC types to one trace
- * type. Sign extension, if required by the conversion, is performed following
- * the trace type.
- *
- * If a gcc format is not declared with a trace format, the gcc format is
- * also used as binary representation in the trace.
- *
- * Strings are supported with %s.
- * A single tracetype (sequence) can take multiple c types as parameter.
- *
- * c types:
- *
- * see printf(3).
- *
- * Note: to write a uint32_t in a trace, the following expression is recommended
- * si it can be portable:
- *
- * ("#4u%lu", (unsigned long)var)
- *
- * trace types:
- *
- * Serialization specific formats :
- *
- * Fixed size integers
- * #1u writes uint8_t
- * #2u writes uint16_t
- * #4u writes uint32_t
- * #8u writes uint64_t
- * #1d writes int8_t
- * #2d writes int16_t
- * #4d writes int32_t
- * #8d writes int64_t
- * i.e.:
- * #1u%lu #2u%lu #4d%lu #8d%lu #llu%hu #d%lu
- *
- * * Attributes:
- *
- * n: (for network byte order)
- * #ntracetype%ctype
- * is written in the trace in network byte order.
- *
- * i.e.: #bn4u%lu, #n%lu, #b%u
- *
- * TODO (eventually)
- * Variable length sequence
- * #a #tracetype1 #tracetype2 %array_ptr %elem_size %num_elems
- * In the trace:
- * #a specifies that this is a sequence
- * #tracetype1 is the type of elements in the sequence
- * #tracetype2 is the type of the element count
- * GCC input:
- * array_ptr is a pointer to an array that contains members of size
- * elem_size.
- * num_elems is the number of elements in the array.
- * i.e.: #a #lu #lu %p %lu %u
- *
- * Callback
- * #k callback (taken from the probe data)
- * The following % arguments are exepected by the callback
- *
- * i.e.: #a #lu #lu #k %p
- *
- * Note: No conversion is done from floats to integers, nor from integers to
- * floats between c types and trace types. float conversion from double to float
- * or from float to double is also not supported.
- *
- * REMOVE
- * %*b expects sizeof(data), data
- * where sizeof(data) is 1, 2, 4 or 8
- *
- * Fixed length struct, union or array.
- * FIXME: unable to extract those sizes statically.
- * %*r expects sizeof(*ptr), ptr
- * %*.*r expects sizeof(*ptr), __alignof__(*ptr), ptr
- * struct and unions removed.
- * Fixed length array:
- * [%p]#a[len #tracetype]
- * i.e.: [%p]#a[12 #lu]
- *
- * Variable length sequence
- * %*.*:*v expects sizeof(*ptr), __alignof__(*ptr), elem_num, ptr
- * where elem_num is the number of elements in the sequence
- */
-static inline const char *parse_trace_type(const char *fmt,
- char *trace_size, enum ltt_type *trace_type,
- unsigned long *attributes)
-{
- int qualifier; /* 'h', 'l', or 'L' for integer fields */
- /* 'z' support added 23/7/1999 S.H. */
- /* 'z' changed to 'Z' --davidm 1/25/99 */
- /* 't' added for ptrdiff_t */
-
- /* parse attributes. */
-repeat:
- switch (*fmt) {
- case 'n':
- *attributes |= LTT_ATTRIBUTE_NETWORK_BYTE_ORDER;
- ++fmt;
- goto repeat;
- }
-
- /* get the conversion qualifier */
- qualifier = -1;
- if (*fmt == 'h' || *fmt == 'l' || *fmt == 'L' ||
- *fmt == 'Z' || *fmt == 'z' || *fmt == 't' ||
- *fmt == 'S' || *fmt == '1' || *fmt == '2' ||
- *fmt == '4' || *fmt == 8) {
- qualifier = *fmt;
- ++fmt;
- if (qualifier == 'l' && *fmt == 'l') {
- qualifier = 'L';
- ++fmt;
- }
- }
-
- switch (*fmt) {
- case 'c':
- *trace_type = LTT_TYPE_UNSIGNED_INT;
- *trace_size = sizeof(unsigned char);
- goto parse_end;
- case 's':
- *trace_type = LTT_TYPE_STRING;
- goto parse_end;
- case 'p':
- *trace_type = LTT_TYPE_UNSIGNED_INT;
- *trace_size = sizeof(void *);
- goto parse_end;
- case 'd':
- case 'i':
- *trace_type = LTT_TYPE_SIGNED_INT;
- break;
- case 'o':
- case 'u':
- case 'x':
- case 'X':
- *trace_type = LTT_TYPE_UNSIGNED_INT;
- break;
- default:
- if (!*fmt)
- --fmt;
- goto parse_end;
- }
- switch (qualifier) {
- case 'L':
- *trace_size = sizeof(long long);
- break;
- case 'l':
- *trace_size = sizeof(long);
- break;
- case 'Z':
- case 'z':
- *trace_size = sizeof(size_t);
- break;
-//ust// case 't':
-//ust// *trace_size = sizeof(ptrdiff_t);
-//ust// break;
- case 'h':
- *trace_size = sizeof(short);
- break;
- case '1':
- *trace_size = sizeof(uint8_t);
- break;
- case '2':
- *trace_size = sizeof(uint16_t);
- break;
- case '4':
- *trace_size = sizeof(uint32_t);
- break;
- case '8':
- *trace_size = sizeof(uint64_t);
- break;
- default:
- *trace_size = sizeof(int);
- }
-
-parse_end:
- return fmt;
-}
-
-/*
- * Restrictions:
- * Field width and precision are *not* supported.
- * %n not supported.
- */
-static inline
-const char *parse_c_type(const char *fmt, char *c_size, enum ltt_type *c_type,
- char *outfmt)
-{
- int qualifier; /* 'h', 'l', or 'L' for integer fields */
- /* 'z' support added 23/7/1999 S.H. */
- /* 'z' changed to 'Z' --davidm 1/25/99 */
- /* 't' added for ptrdiff_t */
-
- /* process flags : ignore standard print formats for now. */
-repeat:
- switch (*fmt) {
- case '-':
- case '+':
- case ' ':
- case '#':
- case '0':
- ++fmt;
- goto repeat;
- }
-
- /* get the conversion qualifier */
- qualifier = -1;
- if (*fmt == 'h' || *fmt == 'l' || *fmt == 'L' ||
- *fmt == 'Z' || *fmt == 'z' || *fmt == 't' ||
- *fmt == 'S') {
- qualifier = *fmt;
- ++fmt;
- if (qualifier == 'l' && *fmt == 'l') {
- qualifier = 'L';
- ++fmt;
- }
- }
-
- if (outfmt) {
- if (qualifier != -1)
- *outfmt++ = (char)qualifier;
- *outfmt++ = *fmt;
- *outfmt = 0;
- }
-
- switch (*fmt) {
- case 'c':
- *c_type = LTT_TYPE_UNSIGNED_INT;
- *c_size = sizeof(unsigned char);
- goto parse_end;
- case 's':
- *c_type = LTT_TYPE_STRING;
- goto parse_end;
- case 'p':
- *c_type = LTT_TYPE_UNSIGNED_INT;
- *c_size = sizeof(void *);
- goto parse_end;
- case 'd':
- case 'i':
- *c_type = LTT_TYPE_SIGNED_INT;
- break;
- case 'o':
- case 'u':
- case 'x':
- case 'X':
- *c_type = LTT_TYPE_UNSIGNED_INT;
- break;
- default:
- if (!*fmt)
- --fmt;
- goto parse_end;
- }
- switch (qualifier) {
- case 'L':
- *c_size = sizeof(long long);
- break;
- case 'l':
- *c_size = sizeof(long);
- break;
- case 'Z':
- case 'z':
- *c_size = sizeof(size_t);
- break;
-//ust// case 't':
-//ust// *c_size = sizeof(ptrdiff_t);
-//ust// break;
- case 'h':
- *c_size = sizeof(short);
- break;
- default:
- *c_size = sizeof(int);
- }
-
-parse_end:
- return fmt;
-}
-
-static inline size_t serialize_trace_data(struct ust_buffer *buf,
- size_t buf_offset,
- char trace_size, enum ltt_type trace_type,
- char c_size, enum ltt_type c_type,
- unsigned int *stack_pos_ctx,
- int *largest_align,
- va_list *args)
-{
- union {
- unsigned long v_ulong;
- uint64_t v_uint64;
- struct {
- const char *s;
- size_t len;
- } v_string;
- } tmp;
-
- /*
- * Be careful about sign extension here.
- * Sign extension is done with the destination (trace) type.
- */
- switch (trace_type) {
- case LTT_TYPE_SIGNED_INT:
- switch (c_size) {
- case 1:
- tmp.v_ulong = (long)(int8_t)va_arg(*args, int);
- break;
- case 2:
- tmp.v_ulong = (long)(int16_t)va_arg(*args, int);
- break;
- case 4:
- tmp.v_ulong = (long)(int32_t)va_arg(*args, int);
- break;
- case 8:
- tmp.v_uint64 = va_arg(*args, int64_t);
- break;
- default:
- BUG();
- }
- break;
- case LTT_TYPE_UNSIGNED_INT:
- switch (c_size) {
- case 1:
- tmp.v_ulong = (unsigned long)(uint8_t)va_arg(*args, unsigned int);
- break;
- case 2:
- tmp.v_ulong = (unsigned long)(uint16_t)va_arg(*args, unsigned int);
- break;
- case 4:
- tmp.v_ulong = (unsigned long)(uint32_t)va_arg(*args, unsigned int);
- break;
- case 8:
- tmp.v_uint64 = va_arg(*args, uint64_t);
- break;
- default:
- BUG();
- }
- break;
- case LTT_TYPE_STRING:
- tmp.v_string.s = va_arg(*args, const char *);
- if ((unsigned long)tmp.v_string.s < PAGE_SIZE)
- tmp.v_string.s = "<NULL>";
- if (!buf) {
- /*
- * Reserve tracer stack entry.
- */
- tracer_stack_pos++;
- assert(tracer_stack_pos <= TRACER_STACK_LEN);
- cmm_barrier();
- tracer_stack[*stack_pos_ctx] =
- strlen(tmp.v_string.s) + 1;
- }
- tmp.v_string.len = tracer_stack[(*stack_pos_ctx)++];
- if (buf)
- ust_buffers_strncpy(buf, buf_offset, tmp.v_string.s,
- tmp.v_string.len);
- buf_offset += tmp.v_string.len;
- goto copydone;
- default:
- BUG();
- }
-
- /*
- * If trace_size is lower or equal to 4 bytes, there is no sign
- * extension to do because we are already encoded in a long. Therefore,
- * we can combine signed and unsigned ops. 4 bytes float also works
- * with this, because we do a simple copy of 4 bytes into 4 bytes
- * without manipulation (and we do not support conversion from integers
- * to floats).
- * It is also the case if c_size is 8 bytes, which is the largest
- * possible integer.
- */
- if (ltt_get_alignment()) {
- buf_offset += ltt_align(buf_offset, trace_size);
- if (largest_align)
- *largest_align = max_t(int, *largest_align, trace_size);
- }
- if (trace_size <= 4 || c_size == 8) {
- if (buf) {
- switch (trace_size) {
- case 1:
- if (c_size == 8)
- ust_buffers_write(buf, buf_offset,
- (uint8_t[]){ (uint8_t)tmp.v_uint64 },
- sizeof(uint8_t));
- else
- ust_buffers_write(buf, buf_offset,
- (uint8_t[]){ (uint8_t)tmp.v_ulong },
- sizeof(uint8_t));
- break;
- case 2:
- if (c_size == 8)
- ust_buffers_write(buf, buf_offset,
- (uint16_t[]){ (uint16_t)tmp.v_uint64 },
- sizeof(uint16_t));
- else
- ust_buffers_write(buf, buf_offset,
- (uint16_t[]){ (uint16_t)tmp.v_ulong },
- sizeof(uint16_t));
- break;
- case 4:
- if (c_size == 8)
- ust_buffers_write(buf, buf_offset,
- (uint32_t[]){ (uint32_t)tmp.v_uint64 },
- sizeof(uint32_t));
- else
- ust_buffers_write(buf, buf_offset,
- (uint32_t[]){ (uint32_t)tmp.v_ulong },
- sizeof(uint32_t));
- break;
- case 8:
- /*
- * c_size cannot be other than 8 here because
- * trace_size > 4.
- */
- ust_buffers_write(buf, buf_offset,
- (uint64_t[]){ (uint64_t)tmp.v_uint64 },
- sizeof(uint64_t));
- break;
- default:
- BUG();
- }
- }
- buf_offset += trace_size;
- goto copydone;
- } else {
- /*
- * Perform sign extension.
- */
- if (buf) {
- switch (trace_type) {
- case LTT_TYPE_SIGNED_INT:
- ust_buffers_write(buf, buf_offset,
- (int64_t[]){ (int64_t)tmp.v_ulong },
- sizeof(int64_t));
- break;
- case LTT_TYPE_UNSIGNED_INT:
- ust_buffers_write(buf, buf_offset,
- (uint64_t[]){ (uint64_t)tmp.v_ulong },
- sizeof(uint64_t));
- break;
- default:
- BUG();
- }
- }
- buf_offset += trace_size;
- goto copydone;
- }
-
-copydone:
- return buf_offset;
-}
-
-notrace size_t ltt_serialize_data(struct ust_buffer *buf, size_t buf_offset,
- struct ltt_serialize_closure *closure,
- void *serialize_private,
- unsigned int stack_pos_ctx,
- int *largest_align,
- const char *fmt, va_list *args)
-{
- char trace_size = 0, c_size = 0; /*
- * 0 (unset), 1, 2, 4, 8 bytes.
- */
- enum ltt_type trace_type = LTT_TYPE_NONE, c_type = LTT_TYPE_NONE;
- unsigned long attributes = 0;
-
- for (; *fmt ; ++fmt) {
- switch (*fmt) {
- case '#':
- /* tracetypes (#) */
- ++fmt; /* skip first '#' */
- if (*fmt == '#') /* Escaped ## */
- break;
- attributes = 0;
- fmt = parse_trace_type(fmt, &trace_size, &trace_type,
- &attributes);
- break;
- case '%':
- /* c types (%) */
- ++fmt; /* skip first '%' */
- if (*fmt == '%') /* Escaped %% */
- break;
- fmt = parse_c_type(fmt, &c_size, &c_type, NULL);
- /*
- * Output c types if no trace types has been
- * specified.
- */
- if (!trace_size)
- trace_size = c_size;
- if (trace_type == LTT_TYPE_NONE)
- trace_type = c_type;
- if (c_type == LTT_TYPE_STRING)
- trace_type = LTT_TYPE_STRING;
- /* perform trace write */
- buf_offset = serialize_trace_data(buf,
- buf_offset, trace_size,
- trace_type, c_size, c_type,
- &stack_pos_ctx,
- largest_align,
- args);
- trace_size = 0;
- c_size = 0;
- trace_type = LTT_TYPE_NONE;
- c_size = LTT_TYPE_NONE;
- attributes = 0;
- break;
- /* default is to skip the text, doing nothing */
- }
- }
- return buf_offset;
-}
-
-/*
- * Calculate data size
- * Assume that the padding for alignment starts at a sizeof(void *) address.
- */
-static notrace size_t ltt_get_data_size(struct ltt_serialize_closure *closure,
- void *serialize_private,
- unsigned int stack_pos_ctx, int *largest_align,
- const char *fmt, va_list *args)
-{
- ltt_serialize_cb cb = closure->callbacks[0];
- closure->cb_idx = 0;
- return (size_t)cb(NULL, 0, closure, serialize_private,
- stack_pos_ctx, largest_align, fmt, args);
-}
-
-static notrace
-void ltt_write_event_data(struct ust_buffer *buf, size_t buf_offset,
- struct ltt_serialize_closure *closure,
- void *serialize_private,
- unsigned int stack_pos_ctx,
- int largest_align,
- const char *fmt, va_list *args)
-{
- ltt_serialize_cb cb = closure->callbacks[0];
- closure->cb_idx = 0;
- buf_offset += ltt_align(buf_offset, largest_align);
- cb(buf, buf_offset, closure, serialize_private, stack_pos_ctx, NULL,
- fmt, args);
-}
-
-
-notrace void ltt_vtrace(const struct ust_marker *mdata, void *probe_data,
- void *call_data,
- const char *fmt, va_list *args)
-{
- int largest_align, ret;
- struct ltt_active_ust_marker *pdata;
- uint16_t eID;
- size_t data_size, slot_size;
- unsigned int chan_index;
- struct ust_channel *channel;
- struct ust_trace *trace, *dest_trace = NULL;
- struct ust_buffer *buf;
- u64 tsc;
- long buf_offset;
- va_list args_copy;
- struct ltt_serialize_closure closure;
- struct ltt_probe_private_data *private_data = call_data;
- void *serialize_private = NULL;
- int cpu;
- unsigned int rflags;
- unsigned int stack_pos_ctx;
-
- /*
- * This test is useful for quickly exiting static tracing when no trace
- * is active. We expect to have an active trace when we get here.
- */
- if (unlikely(ltt_traces.num_active_traces == 0))
- return;
-
- rcu_read_lock();
- cpu = ust_get_cpu();
-
- /* Force volatile access. */
- CMM_STORE_SHARED(ltt_nesting, CMM_LOAD_SHARED(ltt_nesting) + 1);
- stack_pos_ctx = tracer_stack_pos;
- cmm_barrier();
-
- pdata = (struct ltt_active_ust_marker *)probe_data;
- eID = mdata->event_id;
- chan_index = mdata->channel_id;
- closure.callbacks = pdata->probe->callbacks;
-
- if (unlikely(private_data)) {
- dest_trace = private_data->trace;
- if (private_data->serializer)
- closure.callbacks = &private_data->serializer;
- serialize_private = private_data->serialize_private;
- }
-
- va_copy(args_copy, *args);
- /*
- * Assumes event payload to start on largest_align alignment.
- */
- largest_align = 1; /* must be non-zero for ltt_align */
- data_size = ltt_get_data_size(&closure, serialize_private,
- stack_pos_ctx, &largest_align,
- fmt, &args_copy);
- largest_align = min_t(int, largest_align, sizeof(void *));
- va_end(args_copy);
-
- /* Iterate on each trace */
- cds_list_for_each_entry_rcu(trace, <t_traces.head, list) {
- /*
- * Expect the filter to filter out events. If we get here,
- * we went through tracepoint activation as a first step.
- */
- if (unlikely(dest_trace && trace != dest_trace))
- continue;
- if (unlikely(!trace->active))
- continue;
- if (unlikely(!ltt_run_filter(trace, eID)))
- continue;
-#ifdef CONFIG_LTT_DEBUG_EVENT_SIZE
- rflags = LTT_RFLAG_ID_SIZE;
-#else
- if (unlikely(eID >= LTT_FREE_EVENTS))
- rflags = LTT_RFLAG_ID;
- else
- rflags = 0;
-#endif
- /*
- * Skip channels added after trace creation.
- */
- if (unlikely(chan_index >= trace->nr_channels))
- continue;
- channel = &trace->channels[chan_index];
- if (!channel->active)
- continue;
-
- /*
- * If a new cpu was plugged since the trace was started, we did
- * not add it to the trace, and therefore we write the event to
- * cpu 0.
- */
- if (cpu >= channel->n_cpus) {
- cpu = 0;
- }
-
- /* reserve space : header and data */
- ret = ltt_reserve_slot(channel, trace, data_size, largest_align,
- cpu, &buf, &slot_size, &buf_offset,
- &tsc, &rflags);
- if (unlikely(ret < 0))
- continue; /* buffer full */
-
- va_copy(args_copy, *args);
- /* FIXME : could probably encapsulate transport better. */
- buf = channel->buf[cpu];
- /* Out-of-order write : header and data */
- buf_offset = ltt_write_event_header(channel, buf, buf_offset,
- eID, data_size, tsc, rflags);
- ltt_write_event_data(buf, buf_offset, &closure,
- serialize_private,
- stack_pos_ctx, largest_align,
- fmt, &args_copy);
- va_end(args_copy);
- /* Out-of-order commit */
- ltt_commit_slot(channel, buf, buf_offset, data_size, slot_size);
- DBG("just commited event (%s/%s) at offset %ld and size %zd", mdata->channel, mdata->name, buf_offset, slot_size);
- }
-
- cmm_barrier();
- tracer_stack_pos = stack_pos_ctx;
- CMM_STORE_SHARED(ltt_nesting, CMM_LOAD_SHARED(ltt_nesting) - 1);
-
- rcu_read_unlock();
-}
-
-notrace void ltt_trace(const struct ust_marker *mdata, void *probe_data,
- void *call_data,
- const char *fmt, ...)
-{
- va_list args;
-
- va_start(args, fmt);
- ltt_vtrace(mdata, probe_data, call_data, fmt, &args);
- va_end(args);
-}
-
-static notrace void skip_space(const char **ps)
-{
- while(**ps == ' ')
- (*ps)++;
-}
-
-static notrace void copy_token(char **out, const char **in)
-{
- while (**in != ' ' && **in != '\0') {
- **out = **in;
- (*out)++;
- (*in)++;
- }
-}
-
-/* serialize_to_text
- *
- * Given a format string and a va_list of arguments, convert them to a
- * human-readable string.
- *
- * @outbuf: the buffer to output the string to
- * @bufsize: the max size that can be used in outbuf
- * @fmt: the marker format string
- * @ap: a va_list that contains the arguments corresponding to fmt
- *
- * Return value: the number of chars that have been put in outbuf, excluding
- * the final \0, or, if the buffer was too small, the number of chars that
- * would have been written in outbuf if it had been large enough.
- *
- * outbuf may be NULL. The return value may then be used be allocate an
- * appropriate outbuf.
- *
- */
-
-notrace
-int serialize_to_text(char *outbuf, int bufsize, const char *fmt, va_list ap)
-{
- int fmt_len = strlen(fmt);
- char *new_fmt = alloca(fmt_len + 1);
- const char *orig_fmt_p = fmt;
- char *new_fmt_p = new_fmt;
- char false_buf;
- int result;
- enum { none, cfmt, tracefmt, argname } prev_token = none;
-
- while (*orig_fmt_p != '\0') {
- if (*orig_fmt_p == '%') {
- prev_token = cfmt;
- copy_token(&new_fmt_p, &orig_fmt_p);
- }
- else if (*orig_fmt_p == '#') {
- prev_token = tracefmt;
- do {
- orig_fmt_p++;
- } while (*orig_fmt_p != ' ' && *orig_fmt_p != '\0');
- }
- else if (*orig_fmt_p == ' ') {
- if (prev_token == argname) {
- *new_fmt_p = '=';
- new_fmt_p++;
- }
- else if (prev_token == cfmt) {
- *new_fmt_p = ' ';
- new_fmt_p++;
- }
-
- skip_space(&orig_fmt_p);
- }
- else {
- prev_token = argname;
- copy_token(&new_fmt_p, &orig_fmt_p);
- }
- }
-
- *new_fmt_p = '\0';
-
- if (outbuf == NULL) {
- /* use this false_buffer for compatibility with pre-C99 */
- outbuf = &false_buf;
- bufsize = 1;
- }
- result = ust_safe_vsnprintf(outbuf, bufsize, new_fmt, ap);
-
- return result;
-}
+++ /dev/null
-/* Copyright (C) 2009 Pierre-Marc Fournier
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-/* This file contains the implementation of the UST listener thread, which
- * receives trace control commands. It also coordinates the initialization of
- * libust.
- */
-
-#define _GNU_SOURCE
-#define _LGPL_SOURCE
-#include <stdio.h>
-#include <stdlib.h>
-#include <stdint.h>
-#include <pthread.h>
-#include <signal.h>
-#include <sys/epoll.h>
-#include <sys/time.h>
-#include <sys/types.h>
-#include <sys/socket.h>
-#include <fcntl.h>
-#include <poll.h>
-#include <regex.h>
-#include <urcu/uatomic.h>
-#include <urcu/list.h>
-
-#include <ust/marker.h>
-#include <ust/tracepoint.h>
-#include <ust/tracepoint-internal.h>
-#include <ust/tracectl.h>
-#include <ust/clock.h>
-#include "tracer.h"
-#include "usterr_signal_safe.h"
-#include "ustcomm.h"
-#include "buffers.h"
-#include "marker-control.h"
-
-/* This should only be accessed by the constructor, before the creation
- * of the listener, and then only by the listener.
- */
-s64 pidunique = -1LL;
-
-/* The process pid is used to detect a non-traceable fork
- * and allow the non-traceable fork to be ignored
- * by destructor sequences in libust
- */
-static pid_t processpid = 0;
-
-static struct ustcomm_header _receive_header;
-static struct ustcomm_header *receive_header = &_receive_header;
-static char receive_buffer[USTCOMM_BUFFER_SIZE];
-static char send_buffer[USTCOMM_BUFFER_SIZE];
-
-static int epoll_fd;
-
-/*
- * Listener thread data vs fork() protection mechanism. Ensures that no listener
- * thread mutexes and data structures are being concurrently modified or held by
- * other threads when fork() is executed.
- */
-static pthread_mutex_t listener_thread_data_mutex = PTHREAD_MUTEX_INITIALIZER;
-
-/* Mutex protecting listen_sock. Nests inside listener_thread_data_mutex. */
-static pthread_mutex_t listen_sock_mutex = PTHREAD_MUTEX_INITIALIZER;
-static struct ustcomm_sock *listen_sock;
-
-extern struct chan_info_struct chan_infos[];
-
-static struct cds_list_head ust_socks = CDS_LIST_HEAD_INIT(ust_socks);
-
-/* volatile because shared between the listener and the main thread */
-int buffers_to_export = 0;
-
-int ust_clock_source;
-
-static long long make_pidunique(void)
-{
- s64 retval;
- struct timeval tv;
-
- gettimeofday(&tv, NULL);
-
- retval = tv.tv_sec;
- retval <<= 32;
- retval |= tv.tv_usec;
-
- return retval;
-}
-
-static void print_ust_marker(FILE *fp)
-{
- struct ust_marker_iter iter;
-
- ust_marker_iter_reset(&iter);
- ust_marker_iter_start(&iter);
-
- while (iter.ust_marker) {
- fprintf(fp, "ust_marker: %s/%s %d \"%s\" %p\n",
- (*iter.ust_marker)->channel,
- (*iter.ust_marker)->name,
- (int)(*iter.ust_marker)->state,
- (*iter.ust_marker)->format,
- NULL); /*
- * location is null for now, will be added
- * to a different table.
- */
- ust_marker_iter_next(&iter);
- }
- ust_marker_iter_stop(&iter);
-}
-
-static void print_trace_events(FILE *fp)
-{
- struct trace_event_iter iter;
-
- trace_event_iter_reset(&iter);
- trace_event_iter_start(&iter);
-
- while (iter.trace_event) {
- fprintf(fp, "trace_event: %s\n", (*iter.trace_event)->name);
- trace_event_iter_next(&iter);
- }
- trace_event_iter_stop(&iter);
-}
-
-static int connect_ustconsumer(void)
-{
- int result, fd;
- char default_daemon_path[] = SOCK_DIR "/ustconsumer";
- char *explicit_daemon_path, *daemon_path;
-
- explicit_daemon_path = getenv("UST_DAEMON_SOCKET");
- if (explicit_daemon_path) {
- daemon_path = explicit_daemon_path;
- } else {
- daemon_path = default_daemon_path;
- }
-
- DBG("Connecting to daemon_path %s", daemon_path);
-
- result = ustcomm_connect_path(daemon_path, &fd);
- if (result < 0) {
- WARN("connect_ustconsumer failed, daemon_path: %s",
- daemon_path);
- return result;
- }
-
- return fd;
-}
-
-
-static void request_buffer_consumer(int sock,
- const char *trace,
- const char *channel,
- int cpu)
-{
- struct ustcomm_header send_header, recv_header;
- struct ustcomm_buffer_info buf_inf;
- int result = 0;
-
- result = ustcomm_pack_buffer_info(&send_header,
- &buf_inf,
- trace,
- channel,
- cpu);
-
- if (result < 0) {
- ERR("failed to pack buffer info message %s_%d",
- channel, cpu);
- return;
- }
-
- buf_inf.pid = getpid();
- send_header.command = CONSUME_BUFFER;
-
- result = ustcomm_req(sock, &send_header, (char *) &buf_inf,
- &recv_header, NULL);
- if (result <= 0) {
- PERROR("request for buffer consumer failed, is the daemon online?");
- }
-
- return;
-}
-
-/* Ask the daemon to collect a trace called trace_name and being
- * produced by this pid.
- *
- * The trace must be at least allocated. (It can also be started.)
- * This is because _ltt_trace_find is used.
- */
-
-static void inform_consumer_daemon(const char *trace_name)
-{
- int sock, i,j;
- struct ust_trace *trace;
- const char *ch_name;
-
- sock = connect_ustconsumer();
- if (sock < 0) {
- return;
- }
-
- DBG("Connected to ustconsumer");
-
- ltt_lock_traces();
-
- trace = _ltt_trace_find(trace_name);
- if (trace == NULL) {
- WARN("inform_consumer_daemon: could not find trace \"%s\"; it is probably already destroyed", trace_name);
- goto unlock_traces;
- }
-
- for (i=0; i < trace->nr_channels; i++) {
- if (trace->channels[i].request_collection) {
- /* iterate on all cpus */
- for (j=0; j<trace->channels[i].n_cpus; j++) {
- ch_name = trace->channels[i].channel_name;
- request_buffer_consumer(sock, trace_name,
- ch_name, j);
- CMM_STORE_SHARED(buffers_to_export,
- CMM_LOAD_SHARED(buffers_to_export)+1);
- }
- }
- }
-
-unlock_traces:
- ltt_unlock_traces();
-
- close(sock);
-}
-
-static struct ust_channel *find_channel(const char *ch_name,
- struct ust_trace *trace)
-{
- int i;
-
- for (i=0; i<trace->nr_channels; i++) {
- if (!strcmp(trace->channels[i].channel_name, ch_name)) {
- return &trace->channels[i];
- }
- }
-
- return NULL;
-}
-
-static int get_buffer_shmid_pipe_fd(const char *trace_name, const char *ch_name,
- int ch_cpu,
- int *buf_shmid,
- int *buf_struct_shmid,
- int *buf_pipe_fd)
-{
- struct ust_trace *trace;
- struct ust_channel *channel;
- struct ust_buffer *buf;
-
- DBG("get_buffer_shmid_pipe_fd");
-
- ltt_lock_traces();
- trace = _ltt_trace_find(trace_name);
- ltt_unlock_traces();
-
- if (trace == NULL) {
- ERR("cannot find trace!");
- return -ENODATA;
- }
-
- channel = find_channel(ch_name, trace);
- if (!channel) {
- ERR("cannot find channel %s!", ch_name);
- return -ENODATA;
- }
-
- buf = channel->buf[ch_cpu];
-
- *buf_shmid = buf->shmid;
- *buf_struct_shmid = channel->buf_struct_shmids[ch_cpu];
- *buf_pipe_fd = buf->data_ready_fd_read;
-
- return 0;
-}
-
-static int get_subbuf_num_size(const char *trace_name, const char *ch_name,
- int *num, int *size)
-{
- struct ust_trace *trace;
- struct ust_channel *channel;
-
- DBG("get_subbuf_size");
-
- ltt_lock_traces();
- trace = _ltt_trace_find(trace_name);
- ltt_unlock_traces();
-
- if (!trace) {
- ERR("cannot find trace!");
- return -ENODATA;
- }
-
- channel = find_channel(ch_name, trace);
- if (!channel) {
- ERR("unable to find channel");
- return -ENODATA;
- }
-
- *num = channel->subbuf_cnt;
- *size = channel->subbuf_size;
-
- return 0;
-}
-
-/* Return the power of two which is equal or higher to v */
-
-static unsigned int pow2_higher_or_eq(unsigned int v)
-{
- int hb = fls(v);
- int retval = 1<<(hb-1);
-
- if (v-retval == 0)
- return retval;
- else
- return retval<<1;
-}
-
-static int set_subbuf_size(const char *trace_name, const char *ch_name,
- unsigned int size)
-{
- unsigned int power;
- int retval = 0;
- struct ust_trace *trace;
- struct ust_channel *channel;
-
- DBG("set_subbuf_size");
-
- power = pow2_higher_or_eq(size);
- power = max_t(unsigned int, 2u, power);
- if (power != size) {
- WARN("using the next power of two for buffer size = %u\n", power);
- }
-
- ltt_lock_traces();
- trace = _ltt_trace_find_setup(trace_name);
- if (trace == NULL) {
- ERR("cannot find trace!");
- retval = -ENODATA;
- goto unlock_traces;
- }
-
- channel = find_channel(ch_name, trace);
- if (!channel) {
- ERR("unable to find channel");
- retval = -ENODATA;
- goto unlock_traces;
- }
-
- channel->subbuf_size = power;
- DBG("the set_subbuf_size for the requested channel is %zu", channel->subbuf_size);
-
-unlock_traces:
- ltt_unlock_traces();
-
- return retval;
-}
-
-static int set_subbuf_num(const char *trace_name, const char *ch_name,
- unsigned int num)
-{
- struct ust_trace *trace;
- struct ust_channel *channel;
- int retval = 0;
-
- DBG("set_subbuf_num");
-
- if (num < 2) {
- ERR("subbuffer count should be greater than 2");
- return -EINVAL;
- }
-
- ltt_lock_traces();
- trace = _ltt_trace_find_setup(trace_name);
- if (trace == NULL) {
- ERR("cannot find trace!");
- retval = -ENODATA;
- goto unlock_traces;
- }
-
- channel = find_channel(ch_name, trace);
- if (!channel) {
- ERR("unable to find channel");
- retval = -ENODATA;
- goto unlock_traces;
- }
-
- channel->subbuf_cnt = num;
- DBG("the set_subbuf_cnt for the requested channel is %u", channel->subbuf_cnt);
-
-unlock_traces:
- ltt_unlock_traces();
- return retval;
-}
-
-static int get_subbuffer(const char *trace_name, const char *ch_name,
- int ch_cpu, long *consumed_old)
-{
- int retval = 0;
- struct ust_trace *trace;
- struct ust_channel *channel;
- struct ust_buffer *buf;
-
- DBG("get_subbuf");
-
- *consumed_old = 0;
-
- ltt_lock_traces();
- trace = _ltt_trace_find(trace_name);
-
- if (!trace) {
- DBG("Cannot find trace. It was likely destroyed by the user.");
- retval = -ENODATA;
- goto unlock_traces;
- }
-
- channel = find_channel(ch_name, trace);
- if (!channel) {
- ERR("unable to find channel");
- retval = -ENODATA;
- goto unlock_traces;
- }
-
- buf = channel->buf[ch_cpu];
-
- retval = ust_buffers_get_subbuf(buf, consumed_old);
- if (retval < 0) {
- WARN("missed buffer?");
- }
-
-unlock_traces:
- ltt_unlock_traces();
-
- return retval;
-}
-
-
-static int notify_buffer_mapped(const char *trace_name,
- const char *ch_name,
- int ch_cpu)
-{
- int retval = 0;
- struct ust_trace *trace;
- struct ust_channel *channel;
- struct ust_buffer *buf;
-
- DBG("get_buffer_fd");
-
- ltt_lock_traces();
- trace = _ltt_trace_find(trace_name);
-
- if (!trace) {
- retval = -ENODATA;
- DBG("Cannot find trace. It was likely destroyed by the user.");
- goto unlock_traces;
- }
-
- channel = find_channel(ch_name, trace);
- if (!channel) {
- retval = -ENODATA;
- ERR("unable to find channel");
- goto unlock_traces;
- }
-
- buf = channel->buf[ch_cpu];
-
- /* Being here is the proof the daemon has mapped the buffer in its
- * memory. We may now decrement buffers_to_export.
- */
- if (uatomic_read(&buf->consumed) == 0) {
- DBG("decrementing buffers_to_export");
- CMM_STORE_SHARED(buffers_to_export, CMM_LOAD_SHARED(buffers_to_export)-1);
- }
-
-unlock_traces:
- ltt_unlock_traces();
-
- return retval;
-}
-
-static int put_subbuffer(const char *trace_name, const char *ch_name,
- int ch_cpu, long consumed_old)
-{
- int retval = 0;
- struct ust_trace *trace;
- struct ust_channel *channel;
- struct ust_buffer *buf;
-
- DBG("put_subbuf");
-
- ltt_lock_traces();
- trace = _ltt_trace_find(trace_name);
-
- if (!trace) {
- retval = -ENODATA;
- DBG("Cannot find trace. It was likely destroyed by the user.");
- goto unlock_traces;
- }
-
- channel = find_channel(ch_name, trace);
- if (!channel) {
- retval = -ENODATA;
- ERR("unable to find channel");
- goto unlock_traces;
- }
-
- buf = channel->buf[ch_cpu];
-
- retval = ust_buffers_put_subbuf(buf, consumed_old);
- if (retval < 0) {
- WARN("ust_buffers_put_subbuf: error (subbuf=%s_%d)",
- ch_name, ch_cpu);
- } else {
- DBG("ust_buffers_put_subbuf: success (subbuf=%s_%d)",
- ch_name, ch_cpu);
- }
-
-unlock_traces:
- ltt_unlock_traces();
-
- return retval;
-}
-
-static void release_listener_mutex(void *ptr)
-{
- pthread_mutex_unlock(&listener_thread_data_mutex);
-}
-
-static void listener_cleanup(void *ptr)
-{
- pthread_mutex_lock(&listen_sock_mutex);
- if (listen_sock) {
- ustcomm_del_named_sock(listen_sock, 0);
- listen_sock = NULL;
- }
- pthread_mutex_unlock(&listen_sock_mutex);
-}
-
-static int force_subbuf_switch(const char *trace_name)
-{
- struct ust_trace *trace;
- int i, j, retval = 0;
-
- ltt_lock_traces();
- trace = _ltt_trace_find(trace_name);
- if (!trace) {
- retval = -ENODATA;
- DBG("Cannot find trace. It was likely destroyed by the user.");
- goto unlock_traces;
- }
-
- for (i = 0; i < trace->nr_channels; i++) {
- for (j = 0; j < trace->channels[i].n_cpus; j++) {
- ltt_force_switch(trace->channels[i].buf[j],
- FORCE_FLUSH);
- }
- }
-
-unlock_traces:
- ltt_unlock_traces();
-
- return retval;
-}
-
-static int process_trace_cmd(int command, char *trace_name)
-{
- int result;
- char trace_type[] = "ustrelay";
-
- switch(command) {
- case START:
- /* start is an operation that setups the trace, allocates it and starts it */
- result = ltt_trace_setup(trace_name);
- if (result < 0) {
- ERR("ltt_trace_setup failed");
- return result;
- }
-
- result = ltt_trace_set_type(trace_name, trace_type);
- if (result < 0) {
- ERR("ltt_trace_set_type failed");
- return result;
- }
-
- result = ltt_trace_alloc(trace_name);
- if (result < 0) {
- ERR("ltt_trace_alloc failed");
- return result;
- }
-
- inform_consumer_daemon(trace_name);
-
- result = ltt_trace_start(trace_name);
- if (result < 0) {
- ERR("ltt_trace_start failed");
- return result;
- }
-
- return 0;
- case SETUP_TRACE:
- DBG("trace setup");
-
- result = ltt_trace_setup(trace_name);
- if (result < 0) {
- ERR("ltt_trace_setup failed");
- return result;
- }
-
- result = ltt_trace_set_type(trace_name, trace_type);
- if (result < 0) {
- ERR("ltt_trace_set_type failed");
- return result;
- }
-
- return 0;
- case ALLOC_TRACE:
- DBG("trace alloc");
-
- result = ltt_trace_alloc(trace_name);
- if (result < 0) {
- ERR("ltt_trace_alloc failed");
- return result;
- }
- inform_consumer_daemon(trace_name);
-
- return 0;
-
- case CREATE_TRACE:
- DBG("trace create");
-
- result = ltt_trace_setup(trace_name);
- if (result < 0) {
- ERR("ltt_trace_setup failed");
- return result;
- }
-
- result = ltt_trace_set_type(trace_name, trace_type);
- if (result < 0) {
- ERR("ltt_trace_set_type failed");
- return result;
- }
-
- return 0;
- case START_TRACE:
- DBG("trace start");
-
- result = ltt_trace_alloc(trace_name);
- if (result < 0) {
- ERR("ltt_trace_alloc failed");
- return result;
- }
- if (!result) {
- inform_consumer_daemon(trace_name);
- }
-
- result = ltt_trace_start(trace_name);
- if (result < 0) {
- ERR("ltt_trace_start failed");
- return result;
- }
-
- return 0;
- case STOP_TRACE:
- DBG("trace stop");
-
- result = ltt_trace_stop(trace_name);
- if (result < 0) {
- ERR("ltt_trace_stop failed");
- return result;
- }
-
- return 0;
- case DESTROY_TRACE:
- DBG("trace destroy");
-
- result = ltt_trace_destroy(trace_name, 0);
- if (result < 0) {
- ERR("ltt_trace_destroy failed");
- return result;
- }
- return 0;
- case FORCE_SUBBUF_SWITCH:
- DBG("force switch");
-
- result = force_subbuf_switch(trace_name);
- if (result < 0) {
- ERR("force_subbuf_switch failed");
- return result;
- }
- return 0;
- }
-
- return 0;
-}
-
-
-static void process_channel_cmd(int sock, int command,
- struct ustcomm_channel_info *ch_inf)
-{
- struct ustcomm_header _reply_header;
- struct ustcomm_header *reply_header = &_reply_header;
- struct ustcomm_channel_info *reply_msg =
- (struct ustcomm_channel_info *)send_buffer;
- int result, offset = 0, num, size;
-
- memset(reply_header, 0, sizeof(*reply_header));
-
- switch (command) {
- case GET_SUBBUF_NUM_SIZE:
- result = get_subbuf_num_size(ch_inf->trace,
- ch_inf->channel,
- &num, &size);
- if (result < 0) {
- reply_header->result = result;
- break;
- }
-
- reply_msg->channel = USTCOMM_POISON_PTR;
- reply_msg->subbuf_num = num;
- reply_msg->subbuf_size = size;
-
-
- reply_header->size = COMPUTE_MSG_SIZE(reply_msg, offset);
-
- break;
- case SET_SUBBUF_NUM:
- reply_header->result = set_subbuf_num(ch_inf->trace,
- ch_inf->channel,
- ch_inf->subbuf_num);
-
- break;
- case SET_SUBBUF_SIZE:
- reply_header->result = set_subbuf_size(ch_inf->trace,
- ch_inf->channel,
- ch_inf->subbuf_size);
-
-
- break;
- }
- if (ustcomm_send(sock, reply_header, (char *)reply_msg) < 0) {
- ERR("ustcomm_send failed");
- }
-}
-
-static void process_buffer_cmd(int sock, int command,
- struct ustcomm_buffer_info *buf_inf)
-{
- struct ustcomm_header _reply_header;
- struct ustcomm_header *reply_header = &_reply_header;
- struct ustcomm_buffer_info *reply_msg =
- (struct ustcomm_buffer_info *)send_buffer;
- int result, offset = 0, buf_shmid, buf_struct_shmid, buf_pipe_fd;
- long consumed_old;
-
- memset(reply_header, 0, sizeof(*reply_header));
-
- switch (command) {
- case GET_BUF_SHMID_PIPE_FD:
- result = get_buffer_shmid_pipe_fd(buf_inf->trace,
- buf_inf->channel,
- buf_inf->ch_cpu,
- &buf_shmid,
- &buf_struct_shmid,
- &buf_pipe_fd);
- if (result < 0) {
- reply_header->result = result;
- break;
- }
-
- reply_msg->channel = USTCOMM_POISON_PTR;
- reply_msg->buf_shmid = buf_shmid;
- reply_msg->buf_struct_shmid = buf_struct_shmid;
-
- reply_header->size = COMPUTE_MSG_SIZE(reply_msg, offset);
- reply_header->fd_included = 1;
-
- if (ustcomm_send_fd(sock, reply_header, (char *)reply_msg,
- &buf_pipe_fd) < 0) {
- ERR("ustcomm_send failed");
- }
- return;
-
- case NOTIFY_BUF_MAPPED:
- reply_header->result =
- notify_buffer_mapped(buf_inf->trace,
- buf_inf->channel,
- buf_inf->ch_cpu);
- break;
- case GET_SUBBUFFER:
- result = get_subbuffer(buf_inf->trace, buf_inf->channel,
- buf_inf->ch_cpu, &consumed_old);
- if (result < 0) {
- reply_header->result = result;
- break;
- }
-
- reply_msg->channel = USTCOMM_POISON_PTR;
- reply_msg->consumed_old = consumed_old;
-
- reply_header->size = COMPUTE_MSG_SIZE(reply_msg, offset);
-
- break;
- case PUT_SUBBUFFER:
- result = put_subbuffer(buf_inf->trace, buf_inf->channel,
- buf_inf->ch_cpu,
- buf_inf->consumed_old);
- reply_header->result = result;
-
- break;
- }
-
- if (ustcomm_send(sock, reply_header, (char *)reply_msg) < 0) {
- ERR("ustcomm_send failed");
- }
-
-}
-
-static void process_ust_marker_cmd(int sock, int command,
- struct ustcomm_ust_marker_info *ust_marker_inf)
-{
- struct ustcomm_header _reply_header;
- struct ustcomm_header *reply_header = &_reply_header;
- int result = 0;
-
- memset(reply_header, 0, sizeof(*reply_header));
-
- switch(command) {
- case ENABLE_MARKER:
-
- result = ltt_ust_marker_connect(ust_marker_inf->channel,
- ust_marker_inf->ust_marker,
- "default");
- if (result < 0) {
- WARN("could not enable ust_marker; channel=%s,"
- " name=%s",
- ust_marker_inf->channel,
- ust_marker_inf->ust_marker);
-
- }
- break;
- case DISABLE_MARKER:
- result = ltt_ust_marker_disconnect(ust_marker_inf->channel,
- ust_marker_inf->ust_marker,
- "default");
- if (result < 0) {
- WARN("could not disable ust_marker; channel=%s,"
- " name=%s",
- ust_marker_inf->channel,
- ust_marker_inf->ust_marker);
- }
- break;
- }
-
- reply_header->result = result;
-
- if (ustcomm_send(sock, reply_header, NULL) < 0) {
- ERR("ustcomm_send failed");
- }
-
-}
-static void process_client_cmd(struct ustcomm_header *recv_header,
- char *recv_buf, int sock)
-{
- int result;
- struct ustcomm_header _reply_header;
- struct ustcomm_header *reply_header = &_reply_header;
- char *send_buf = send_buffer;
-
- memset(reply_header, 0, sizeof(*reply_header));
- memset(send_buf, 0, sizeof(send_buffer));
-
- switch(recv_header->command) {
- case GET_SUBBUF_NUM_SIZE:
- case SET_SUBBUF_NUM:
- case SET_SUBBUF_SIZE:
- {
- struct ustcomm_channel_info *ch_inf;
- ch_inf = (struct ustcomm_channel_info *)recv_buf;
- result = ustcomm_unpack_channel_info(ch_inf);
- if (result < 0) {
- ERR("couldn't unpack channel info");
- reply_header->result = -EINVAL;
- goto send_response;
- }
- process_channel_cmd(sock, recv_header->command, ch_inf);
- return;
- }
- case GET_BUF_SHMID_PIPE_FD:
- case NOTIFY_BUF_MAPPED:
- case GET_SUBBUFFER:
- case PUT_SUBBUFFER:
- {
- struct ustcomm_buffer_info *buf_inf;
- buf_inf = (struct ustcomm_buffer_info *)recv_buf;
- result = ustcomm_unpack_buffer_info(buf_inf);
- if (result < 0) {
- ERR("couldn't unpack buffer info");
- reply_header->result = -EINVAL;
- goto send_response;
- }
- process_buffer_cmd(sock, recv_header->command, buf_inf);
- return;
- }
- case ENABLE_MARKER:
- case DISABLE_MARKER:
- {
- struct ustcomm_ust_marker_info *ust_marker_inf;
- ust_marker_inf = (struct ustcomm_ust_marker_info *)recv_buf;
- result = ustcomm_unpack_ust_marker_info(ust_marker_inf);
- if (result < 0) {
- ERR("couldn't unpack ust_marker info");
- reply_header->result = -EINVAL;
- goto send_response;
- }
- process_ust_marker_cmd(sock, recv_header->command, ust_marker_inf);
- return;
- }
- case LIST_MARKERS:
- {
- char *ptr;
- size_t size;
- FILE *fp;
-
- fp = open_memstream(&ptr, &size);
- if (fp == NULL) {
- ERR("opening memstream failed");
- return;
- }
- print_ust_marker(fp);
- fclose(fp);
-
- reply_header->size = size + 1; /* Include final \0 */
-
- result = ustcomm_send(sock, reply_header, ptr);
-
- free(ptr);
-
- if (result < 0) {
- PERROR("failed to send ust_marker list");
- }
-
- break;
- }
- case LIST_TRACE_EVENTS:
- {
- char *ptr;
- size_t size;
- FILE *fp;
-
- fp = open_memstream(&ptr, &size);
- if (fp == NULL) {
- ERR("opening memstream failed");
- return;
- }
- print_trace_events(fp);
- fclose(fp);
-
- reply_header->size = size + 1; /* Include final \0 */
-
- result = ustcomm_send(sock, reply_header, ptr);
-
- free(ptr);
-
- if (result < 0) {
- ERR("list_trace_events failed");
- return;
- }
-
- break;
- }
- case LOAD_PROBE_LIB:
- {
- char *libfile;
-
- /* FIXME: No functionality at all... */
- libfile = recv_buf;
-
- DBG("load_probe_lib loading %s", libfile);
-
- break;
- }
- case GET_PIDUNIQUE:
- {
- struct ustcomm_pidunique *pid_msg;
- pid_msg = (struct ustcomm_pidunique *)send_buf;
-
- pid_msg->pidunique = pidunique;
- reply_header->size = sizeof(pid_msg);
-
- goto send_response;
-
- }
- case GET_SOCK_PATH:
- {
- struct ustcomm_single_field *sock_msg;
- char *sock_path_env;
-
- sock_msg = (struct ustcomm_single_field *)send_buf;
-
- sock_path_env = getenv("UST_DAEMON_SOCKET");
-
- if (!sock_path_env) {
- result = ustcomm_pack_single_field(reply_header,
- sock_msg,
- SOCK_DIR "/ustconsumer");
-
- } else {
- result = ustcomm_pack_single_field(reply_header,
- sock_msg,
- sock_path_env);
- }
- reply_header->result = result;
-
- goto send_response;
- }
- case SET_SOCK_PATH:
- {
- struct ustcomm_single_field *sock_msg;
- sock_msg = (struct ustcomm_single_field *)recv_buf;
- result = ustcomm_unpack_single_field(sock_msg);
- if (result < 0) {
- reply_header->result = -EINVAL;
- goto send_response;
- }
-
- reply_header->result = setenv("UST_DAEMON_SOCKET",
- sock_msg->field, 1);
-
- goto send_response;
- }
- case START:
- case SETUP_TRACE:
- case ALLOC_TRACE:
- case CREATE_TRACE:
- case START_TRACE:
- case STOP_TRACE:
- case DESTROY_TRACE:
- case FORCE_SUBBUF_SWITCH:
- {
- struct ustcomm_single_field *trace_inf =
- (struct ustcomm_single_field *)recv_buf;
-
- result = ustcomm_unpack_single_field(trace_inf);
- if (result < 0) {
- ERR("couldn't unpack trace info");
- reply_header->result = -EINVAL;
- goto send_response;
- }
-
- reply_header->result =
- process_trace_cmd(recv_header->command,
- trace_inf->field);
- goto send_response;
-
- }
- default:
- reply_header->result = -EINVAL;
-
- goto send_response;
- }
-
- return;
-
-send_response:
- ustcomm_send(sock, reply_header, send_buf);
-}
-
-#define MAX_EVENTS 10
-
-void *listener_main(void *p)
-{
- struct ustcomm_sock *epoll_sock;
- struct epoll_event events[MAX_EVENTS];
- struct sockaddr addr;
- int accept_fd, nfds, result, i, addr_size;
-
- DBG("LISTENER");
-
- pthread_cleanup_push(listener_cleanup, NULL);
-
- for(;;) {
- nfds = epoll_wait(epoll_fd, events, MAX_EVENTS, -1);
- if (nfds == -1) {
- PERROR("listener_main: epoll_wait failed");
- continue;
- }
-
- for (i = 0; i < nfds; i++) {
- pthread_mutex_lock(&listener_thread_data_mutex);
- pthread_cleanup_push(release_listener_mutex, NULL);
- epoll_sock = (struct ustcomm_sock *)events[i].data.ptr;
- if (epoll_sock == listen_sock) {
- addr_size = sizeof(struct sockaddr);
- accept_fd = accept(epoll_sock->fd,
- &addr,
- (socklen_t *)&addr_size);
- if (accept_fd == -1) {
- PERROR("listener_main: accept failed");
- continue;
- }
- ustcomm_init_sock(accept_fd, epoll_fd,
- &ust_socks);
- } else {
- memset(receive_header, 0,
- sizeof(*receive_header));
- memset(receive_buffer, 0,
- sizeof(receive_buffer));
- result = ustcomm_recv(epoll_sock->fd,
- receive_header,
- receive_buffer);
- if (result == 0) {
- ustcomm_del_sock(epoll_sock, 0);
- } else {
- process_client_cmd(receive_header,
- receive_buffer,
- epoll_sock->fd);
- }
- }
- pthread_cleanup_pop(1); /* release listener mutex */
- }
- }
-
- pthread_cleanup_pop(1);
-}
-
-/* These should only be accessed in the parent thread,
- * not the listener.
- */
-static volatile sig_atomic_t have_listener = 0;
-static pthread_t listener_thread;
-
-void create_listener(void)
-{
- int result;
- sigset_t sig_all_blocked;
- sigset_t orig_parent_mask;
-
- if (have_listener) {
- WARN("not creating listener because we already had one");
- return;
- }
-
- /* A new thread created by pthread_create inherits the signal mask
- * from the parent. To avoid any signal being received by the
- * listener thread, we block all signals temporarily in the parent,
- * while we create the listener thread.
- */
-
- sigfillset(&sig_all_blocked);
-
- result = pthread_sigmask(SIG_SETMASK, &sig_all_blocked, &orig_parent_mask);
- if (result) {
- PERROR("pthread_sigmask: %s", strerror(result));
- }
-
- result = pthread_create(&listener_thread, NULL, listener_main, NULL);
- if (result == -1) {
- PERROR("pthread_create");
- }
-
- /* Restore original signal mask in parent */
- result = pthread_sigmask(SIG_SETMASK, &orig_parent_mask, NULL);
- if (result) {
- PERROR("pthread_sigmask: %s", strerror(result));
- } else {
- have_listener = 1;
- }
-}
-
-#define AUTOPROBE_DISABLED 0
-#define AUTOPROBE_ENABLE_ALL 1
-#define AUTOPROBE_ENABLE_REGEX 2
-static int autoprobe_method = AUTOPROBE_DISABLED;
-static regex_t autoprobe_regex;
-
-static void auto_probe_connect(struct ust_marker *m)
-{
- int result;
-
- char* concat_name = NULL;
- const char *probe_name = "default";
-
- if (autoprobe_method == AUTOPROBE_DISABLED) {
- return;
- } else if (autoprobe_method == AUTOPROBE_ENABLE_REGEX) {
- result = asprintf(&concat_name, "%s/%s", m->channel, m->name);
- if (result == -1) {
- ERR("auto_probe_connect: asprintf failed (ust_marker %s/%s)",
- m->channel, m->name);
- return;
- }
- if (regexec(&autoprobe_regex, concat_name, 0, NULL, 0)) {
- free(concat_name);
- return;
- }
- free(concat_name);
- }
-
- result = ltt_ust_marker_connect(m->channel, m->name, probe_name);
- if (result && result != -EEXIST)
- ERR("ltt_ust_marker_connect (ust_marker = %s/%s, errno = %d)", m->channel, m->name, -result);
-
- DBG("auto connected ust_marker %s (addr: %p) %s to probe default", m->channel, m, m->name);
-
-}
-
-static struct ustcomm_sock * init_app_socket(int epoll_fd)
-{
- char *dir_name, *sock_name;
- int result;
- struct ustcomm_sock *sock = NULL;
- time_t mtime;
-
- dir_name = ustcomm_user_sock_dir();
- if (!dir_name)
- return NULL;
-
- mtime = ustcomm_pid_st_mtime(getpid());
- if (!mtime) {
- goto free_dir_name;
- }
-
- result = asprintf(&sock_name, "%s/%d.%ld", dir_name,
- (int) getpid(), (long) mtime);
- if (result < 0) {
- ERR("string overflow allocating socket name, "
- "UST thread bailing");
- goto free_dir_name;
- }
-
- result = ensure_dir_exists(dir_name, S_IRWXU);
- if (result == -1) {
- ERR("Unable to create socket directory %s, UST thread bailing",
- dir_name);
- goto free_sock_name;
- }
-
- sock = ustcomm_init_named_socket(sock_name, epoll_fd);
- if (!sock) {
- ERR("Error initializing named socket (%s). Check that directory"
- "exists and that it is writable. UST thread bailing", sock_name);
- goto free_sock_name;
- }
-
-free_sock_name:
- free(sock_name);
-free_dir_name:
- free(dir_name);
-
- return sock;
-}
-
-static void __attribute__((constructor)) init()
-{
- struct timespec ts;
- int result;
- char* autoprobe_val = NULL;
- char* subbuffer_size_val = NULL;
- char* subbuffer_count_val = NULL;
- unsigned int subbuffer_size;
- unsigned int subbuffer_count;
- unsigned int power;
-
- /* Assign the pidunique, to be able to differentiate the processes with same
- * pid, (before and after an exec).
- */
- pidunique = make_pidunique();
- processpid = getpid();
-
- DBG("Tracectl constructor");
-
- /* Set up epoll */
- epoll_fd = epoll_create(MAX_EVENTS);
- if (epoll_fd == -1) {
- ERR("epoll_create failed, tracing shutting down");
- return;
- }
-
- /* Create the socket */
- listen_sock = init_app_socket(epoll_fd);
- if (!listen_sock) {
- ERR("failed to create application socket,"
- " tracing shutting down");
- return;
- }
-
- create_listener();
-
- /* Get clock the clock source type */
-
- /* Default clock source */
- ust_clock_source = CLOCK_TRACE;
- if (clock_gettime(ust_clock_source, &ts) != 0) {
- ust_clock_source = CLOCK_MONOTONIC;
- DBG("UST traces will not be synchronized with LTTng traces");
- }
-
- if (getenv("UST_TRACE") || getenv("UST_AUTOPROBE")) {
- /* Ensure ust_marker control is initialized */
- init_ust_marker_control();
- }
-
- autoprobe_val = getenv("UST_AUTOPROBE");
- if (autoprobe_val) {
- struct ust_marker_iter iter;
-
- DBG("Autoprobe enabled.");
-
- /* first, set the callback that will connect the
- * probe on new ust_marker
- */
- if (autoprobe_val[0] == '/') {
- result = regcomp(&autoprobe_regex, autoprobe_val+1, 0);
- if (result) {
- char regexerr[150];
-
- regerror(result, &autoprobe_regex, regexerr, sizeof(regexerr));
- ERR("cannot parse regex %s (%s), will ignore UST_AUTOPROBE", autoprobe_val, regexerr);
- /* don't crash the application just for this */
- } else {
- autoprobe_method = AUTOPROBE_ENABLE_REGEX;
- }
- } else {
- /* just enable all instrumentation */
- autoprobe_method = AUTOPROBE_ENABLE_ALL;
- }
-
- ust_marker_set_new_ust_marker_cb(auto_probe_connect);
-
- /* Now, connect the probes that were already registered. */
- ust_marker_iter_reset(&iter);
- ust_marker_iter_start(&iter);
-
- DBG("now iterating on ust_marker already registered");
- while (iter.ust_marker) {
- DBG("now iterating on ust_marker %s", (*iter.ust_marker)->name);
- auto_probe_connect(*iter.ust_marker);
- ust_marker_iter_next(&iter);
- }
- ust_marker_iter_stop(&iter);
- }
-
- if (getenv("UST_OVERWRITE")) {
- int val = atoi(getenv("UST_OVERWRITE"));
- if (val == 0 || val == 1) {
- CMM_STORE_SHARED(ust_channels_overwrite_by_default, val);
- } else {
- WARN("invalid value for UST_OVERWRITE");
- }
- }
-
- if (getenv("UST_AUTOCOLLECT")) {
- int val = atoi(getenv("UST_AUTOCOLLECT"));
- if (val == 0 || val == 1) {
- CMM_STORE_SHARED(ust_channels_request_collection_by_default, val);
- } else {
- WARN("invalid value for UST_AUTOCOLLECT");
- }
- }
-
- subbuffer_size_val = getenv("UST_SUBBUF_SIZE");
- if (subbuffer_size_val) {
- sscanf(subbuffer_size_val, "%u", &subbuffer_size);
- power = pow2_higher_or_eq(subbuffer_size);
- if (power != subbuffer_size)
- WARN("using the next power of two for buffer size = %u\n", power);
- chan_infos[LTT_CHANNEL_UST].def_subbufsize = power;
- }
-
- subbuffer_count_val = getenv("UST_SUBBUF_NUM");
- if (subbuffer_count_val) {
- sscanf(subbuffer_count_val, "%u", &subbuffer_count);
- if (subbuffer_count < 2)
- subbuffer_count = 2;
- chan_infos[LTT_CHANNEL_UST].def_subbufcount = subbuffer_count;
- }
-
- if (getenv("UST_TRACE")) {
- char trace_name[] = "auto";
- char trace_type[] = "ustrelay";
-
- DBG("starting early tracing");
-
- /* Ensure buffers are initialized, for the transport to be available.
- * We are about to set a trace type and it will fail without this.
- */
- init_ustrelay_transport();
-
- /* FIXME: When starting early tracing (here), depending on the
- * order of constructors, it is very well possible some ust_marker
- * sections are not yet registered. Because of this, some
- * channels may not be registered. Yet, we are about to ask the
- * daemon to collect the channels. Channels which are not yet
- * registered will not be collected.
- *
- * Currently, in LTTng, there is no way to add a channel after
- * trace start. The reason for this is that it induces complex
- * concurrency issues on the trace structures, which can only
- * be resolved using RCU. This has not been done yet. As a
- * workaround, we are forcing the registration of the "ust"
- * channel here. This is the only channel (apart from metadata)
- * that can be reliably used in early tracing.
- *
- * Non-early tracing does not have this problem and can use
- * arbitrary channel names.
- */
- ltt_channels_register("ust");
-
- result = ltt_trace_setup(trace_name);
- if (result < 0) {
- ERR("ltt_trace_setup failed");
- return;
- }
-
- result = ltt_trace_set_type(trace_name, trace_type);
- if (result < 0) {
- ERR("ltt_trace_set_type failed");
- return;
- }
-
- result = ltt_trace_alloc(trace_name);
- if (result < 0) {
- ERR("ltt_trace_alloc failed");
- return;
- }
-
- result = ltt_trace_start(trace_name);
- if (result < 0) {
- ERR("ltt_trace_start failed");
- return;
- }
-
- /* Do this after the trace is started in order to avoid creating confusion
- * if the trace fails to start. */
- inform_consumer_daemon(trace_name);
- }
-
- return;
-
- /* should decrementally destroy stuff if error */
-
-}
-
-/* This is only called if we terminate normally, not with an unhandled signal,
- * so we cannot rely on it. However, for now, LTTV requires that the header of
- * the last sub-buffer contain a valid end time for the trace. This is done
- * automatically only when the trace is properly stopped.
- *
- * If the traced program crashed, it is always possible to manually add the
- * right value in the header, or to open the trace in text mode.
- *
- * FIXME: Fix LTTV so it doesn't need this.
- */
-
-static void destroy_traces(void)
-{
- int result;
-
- /* if trace running, finish it */
-
- DBG("destructor stopping traces");
-
- result = ltt_trace_stop("auto");
- if (result == -1) {
- ERR("ltt_trace_stop error");
- }
-
- result = ltt_trace_destroy("auto", 0);
- if (result == -1) {
- ERR("ltt_trace_destroy error");
- }
-}
-
-static int trace_recording(void)
-{
- int retval = 0;
- struct ust_trace *trace;
-
- ltt_lock_traces();
-
- cds_list_for_each_entry(trace, <t_traces.head, list) {
- if (trace->active) {
- retval = 1;
- break;
- }
- }
-
- ltt_unlock_traces();
-
- return retval;
-}
-
-int restarting_usleep(useconds_t usecs)
-{
- struct timespec tv;
- int result;
-
- tv.tv_sec = 0;
- tv.tv_nsec = usecs * 1000;
-
- do {
- result = nanosleep(&tv, &tv);
- } while (result == -1 && errno == EINTR);
-
- return result;
-}
-
-static void stop_listener(void)
-{
- int result;
-
- if (!have_listener)
- return;
-
- result = pthread_cancel(listener_thread);
- if (result != 0) {
- ERR("pthread_cancel: %s", strerror(result));
- }
- result = pthread_join(listener_thread, NULL);
- if (result != 0) {
- ERR("pthread_join: %s", strerror(result));
- }
-}
-
-/* This destructor keeps the process alive for a few seconds in order
- * to leave time for ustconsumer to connect to its buffers. This is necessary
- * for programs whose execution is very short. It is also useful in all
- * programs when tracing is started close to the end of the program
- * execution.
- *
- * FIXME: For now, this only works for the first trace created in a
- * process.
- */
-
-static void __attribute__((destructor)) keepalive()
-{
- if (processpid != getpid()) {
- return;
- }
-
- if (trace_recording() && CMM_LOAD_SHARED(buffers_to_export)) {
- int total = 0;
- DBG("Keeping process alive for consumer daemon...");
- while (CMM_LOAD_SHARED(buffers_to_export)) {
- const int interv = 200000;
- restarting_usleep(interv);
- total += interv;
-
- if (total >= 3000000) {
- WARN("non-consumed buffers remaining after wait limit; not waiting anymore");
- break;
- }
- }
- DBG("Finally dying...");
- }
-
- destroy_traces();
-
- /* Ask the listener to stop and clean up. */
- stop_listener();
-}
-
-void ust_potential_exec(void)
-{
- ust_marker(potential_exec, UST_MARKER_NOARGS);
-
- DBG("test");
-
- keepalive();
-}
-
-/* Notify ust that there was a fork. This needs to be called inside
- * the new process, anytime a process whose memory is not shared with
- * the parent is created. If this function is not called, the events
- * of the new process will not be collected.
- *
- * Signals should be disabled before the fork and reenabled only after
- * this call in order to guarantee tracing is not started before ust_fork()
- * sanitizes the new process.
- */
-
-static void ust_fork(void)
-{
- struct ustcomm_sock *sock, *sock_tmp;
- struct ust_trace *trace, *trace_tmp;
- int result;
-
- /* FIXME: technically, the locks could have been taken before the fork */
- DBG("ust: forking");
-
- /* Get the pid of the new process */
- processpid = getpid();
-
- /*
- * FIXME: This could be prettier, we loop over the list twice and
- * following good locking practice should lock around the loop
- */
- cds_list_for_each_entry_safe(trace, trace_tmp, <t_traces.head, list) {
- ltt_trace_stop(trace->trace_name);
- }
-
- /* Delete all active connections, but leave them in the epoll set */
- cds_list_for_each_entry_safe(sock, sock_tmp, &ust_socks, list) {
- ustcomm_del_sock(sock, 1);
- }
-
- /*
- * FIXME: This could be prettier, we loop over the list twice and
- * following good locking practice should lock around the loop
- */
- cds_list_for_each_entry_safe(trace, trace_tmp, <t_traces.head, list) {
- ltt_trace_destroy(trace->trace_name, 1);
- }
-
- /* Clean up the listener socket and epoll, keeping the socket file */
- if (listen_sock) {
- ustcomm_del_named_sock(listen_sock, 1);
- listen_sock = NULL;
- }
- close(epoll_fd);
-
- /* Re-start the launch sequence */
- CMM_STORE_SHARED(buffers_to_export, 0);
- have_listener = 0;
-
- /* Set up epoll */
- epoll_fd = epoll_create(MAX_EVENTS);
- if (epoll_fd == -1) {
- ERR("epoll_create failed, tracing shutting down");
- return;
- }
-
- /* Create the socket */
- listen_sock = init_app_socket(epoll_fd);
- if (!listen_sock) {
- ERR("failed to create application socket,"
- " tracing shutting down");
- return;
- }
- create_listener();
- ltt_trace_setup("auto");
- result = ltt_trace_set_type("auto", "ustrelay");
- if (result < 0) {
- ERR("ltt_trace_set_type failed");
- return;
- }
-
- ltt_trace_alloc("auto");
- ltt_trace_start("auto");
- inform_consumer_daemon("auto");
-}
-
-void ust_before_fork(ust_fork_info_t *fork_info)
-{
- /* Disable signals. This is to avoid that the child
- * intervenes before it is properly setup for tracing. It is
- * safer to disable all signals, because then we know we are not
- * breaking anything by restoring the original mask.
- */
- sigset_t all_sigs;
- int result;
-
- /* FIXME:
- - only do this if tracing is active
- */
-
- /* Disable signals */
- sigfillset(&all_sigs);
- result = sigprocmask(SIG_BLOCK, &all_sigs, &fork_info->orig_sigs);
- if (result == -1) {
- PERROR("sigprocmask");
- return;
- }
-
- /*
- * Take the fork lock to make sure we are not in the middle of
- * something in the listener thread.
- */
- pthread_mutex_lock(&listener_thread_data_mutex);
- /*
- * Hold listen_sock_mutex to protect from listen_sock teardown.
- */
- pthread_mutex_lock(&listen_sock_mutex);
- rcu_bp_before_fork();
-}
-
-/* Don't call this function directly in a traced program */
-static void ust_after_fork_common(ust_fork_info_t *fork_info)
-{
- int result;
-
- pthread_mutex_unlock(&listen_sock_mutex);
- pthread_mutex_unlock(&listener_thread_data_mutex);
- /* Restore signals */
- result = sigprocmask(SIG_SETMASK, &fork_info->orig_sigs, NULL);
- if (result == -1) {
- PERROR("sigprocmask");
- return;
- }
-}
-
-void ust_after_fork_parent(ust_fork_info_t *fork_info)
-{
- rcu_bp_after_fork_parent();
- /* Release mutexes and reenable signals */
- ust_after_fork_common(fork_info);
-}
-
-void ust_after_fork_child(ust_fork_info_t *fork_info)
-{
- /* Release urcu mutexes */
- rcu_bp_after_fork_child();
-
- /* Sanitize the child */
- ust_fork();
-
- /* Release mutexes and reenable signals */
- ust_after_fork_common(fork_info);
-}
-
+++ /dev/null
-/*
- * tracer.c
- *
- * (C) Copyright 2005-2008 -
- * Mathieu Desnoyers (mathieu.desnoyers@polymtl.ca)
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- *
- * Inspired from LTT :
- * Karim Yaghmour (karim@opersys.com)
- * Tom Zanussi (zanussi@us.ibm.com)
- * Bob Wisniewski (bob@watson.ibm.com)
- * And from K42 :
- * Bob Wisniewski (bob@watson.ibm.com)
- *
- * Changelog:
- * 22/09/06, Move to the marker/probes mechanism.
- * 19/10/05, Complete lockless mechanism.
- * 27/05/05, Modular redesign and rewrite.
- */
-
-#include <urcu-bp.h>
-#include <urcu/rculist.h>
-
-#include <ust/clock.h>
-
-#include "tracercore.h"
-#include "tracer.h"
-#include "usterr_signal_safe.h"
-
-struct chan_info_struct chan_infos[] = {
- [LTT_CHANNEL_METADATA] = {
- LTT_METADATA_CHANNEL,
- LTT_DEFAULT_SUBBUF_SIZE_LOW,
- LTT_DEFAULT_N_SUBBUFS_LOW,
- },
- [LTT_CHANNEL_UST] = {
- LTT_UST_CHANNEL,
- LTT_DEFAULT_SUBBUF_SIZE_HIGH,
- LTT_DEFAULT_N_SUBBUFS_HIGH,
- },
-};
-
-static enum ltt_channels get_channel_type_from_name(const char *name)
-{
- int i;
-
- if (!name)
- return LTT_CHANNEL_UST;
-
- for (i = 0; i < ARRAY_SIZE(chan_infos); i++)
- if (chan_infos[i].name && !strcmp(name, chan_infos[i].name))
- return (enum ltt_channels)i;
-
- return LTT_CHANNEL_UST;
-}
-
-static CDS_LIST_HEAD(ltt_transport_list);
-/* transport mutex, nests inside traces mutex (ltt_lock_traces) */
-static DEFINE_MUTEX(ltt_transport_mutex);
-/**
- * ltt_transport_register - LTT transport registration
- * @transport: transport structure
- *
- * Registers a transport which can be used as output to extract the data out of
- * LTTng. The module calling this registration function must ensure that no
- * trap-inducing code will be executed by the transport functions. E.g.
- * vmalloc_sync_all() must be called between a vmalloc and the moment the memory
- * is made visible to the transport function. This registration acts as a
- * vmalloc_sync_all. Therefore, only if the module allocates virtual memory
- * after its registration must it synchronize the TLBs.
- */
-void ltt_transport_register(struct ltt_transport *transport)
-{
- pthread_mutex_lock(<t_transport_mutex);
- cds_list_add_tail(&transport->node, <t_transport_list);
- pthread_mutex_unlock(<t_transport_mutex);
-}
-
-/**
- * ltt_transport_unregister - LTT transport unregistration
- * @transport: transport structure
- */
-void ltt_transport_unregister(struct ltt_transport *transport)
-{
- pthread_mutex_lock(<t_transport_mutex);
- cds_list_del(&transport->node);
- pthread_mutex_unlock(<t_transport_mutex);
-}
-
-static inline int is_channel_overwrite(enum ltt_channels chan,
- enum trace_mode mode)
-{
- switch (mode) {
- case LTT_TRACE_NORMAL:
- return 0;
- case LTT_TRACE_FLIGHT:
- switch (chan) {
- case LTT_CHANNEL_METADATA:
- return 0;
- default:
- return 1;
- }
- case LTT_TRACE_HYBRID:
- switch (chan) {
- case LTT_CHANNEL_METADATA:
- return 0;
- default:
- return 1;
- }
- default:
- return 0;
- }
-}
-
-static void trace_async_wakeup(struct ust_trace *trace)
-{
- int i;
- struct ust_channel *chan;
-
- /* Must check each channel for pending read wakeup */
- for (i = 0; i < trace->nr_channels; i++) {
- chan = &trace->channels[i];
- if (chan->active)
- trace->ops->wakeup_channel(chan);
- }
-}
-
-/**
- * _ltt_trace_find - find a trace by given name.
- * trace_name: trace name
- *
- * Returns a pointer to the trace structure, NULL if not found.
- */
-struct ust_trace *_ltt_trace_find(const char *trace_name)
-{
- struct ust_trace *trace;
-
- cds_list_for_each_entry(trace, <t_traces.head, list)
- if (!strncmp(trace->trace_name, trace_name, NAME_MAX))
- return trace;
-
- return NULL;
-}
-
-/* _ltt_trace_find_setup :
- * find a trace in setup list by given name.
- *
- * Returns a pointer to the trace structure, NULL if not found.
- */
-struct ust_trace *_ltt_trace_find_setup(const char *trace_name)
-{
- struct ust_trace *trace;
-
- cds_list_for_each_entry(trace, <t_traces.setup_head, list)
- if (!strncmp(trace->trace_name, trace_name, NAME_MAX))
- return trace;
-
- return NULL;
-}
-
-/**
- * ltt_release_transport - Release an LTT transport
- * @kref : reference count on the transport
- */
-void ltt_release_transport(struct urcu_ref *urcu_ref)
-{
- return;
-}
-
-/**
- * ltt_release_trace - Release a LTT trace
- * @kref : reference count on the trace
- */
-void ltt_release_trace(struct urcu_ref *urcu_ref)
-{
- struct ust_trace *trace = _ust_container_of(urcu_ref,
- struct ust_trace, urcu_ref);
- ltt_channels_trace_free(trace->channels);
- free(trace);
-}
-
-static inline void prepare_chan_size_num(unsigned int *subbuf_size,
- unsigned int *n_subbufs)
-{
- /* Make sure the subbuffer size is larger than a page */
- *subbuf_size = max_t(unsigned int, *subbuf_size, PAGE_SIZE);
-
- /* round to next power of 2 */
- *subbuf_size = 1 << get_count_order(*subbuf_size);
- *n_subbufs = 1 << get_count_order(*n_subbufs);
-
- /* Subbuf size and number must both be power of two */
- WARN_ON(hweight32(*subbuf_size) != 1);
- WARN_ON(hweight32(*n_subbufs) != 1);
-}
-
-int _ltt_trace_setup(const char *trace_name)
-{
- int err = 0;
- struct ust_trace *new_trace = NULL;
- int metadata_index;
- unsigned int chan;
- enum ltt_channels chantype;
-
- if (_ltt_trace_find_setup(trace_name)) {
- ERR("Trace name %s already used", trace_name);
- err = -EEXIST;
- goto traces_error;
- }
-
- if (_ltt_trace_find(trace_name)) {
- ERR("Trace name %s already used", trace_name);
- err = -EEXIST;
- goto traces_error;
- }
-
- new_trace = zmalloc(sizeof(struct ust_trace));
- if (!new_trace) {
- ERR("Unable to allocate memory for trace %s", trace_name);
- err = -ENOMEM;
- goto traces_error;
- }
- strncpy(new_trace->trace_name, trace_name, NAME_MAX);
- new_trace->channels = ltt_channels_trace_alloc(&new_trace->nr_channels,
- ust_channels_overwrite_by_default,
- ust_channels_request_collection_by_default, 1);
- if (!new_trace->channels) {
- ERR("Unable to allocate memory for chaninfo %s\n", trace_name);
- err = -ENOMEM;
- goto trace_free;
- }
-
- /*
- * Force metadata channel to active, no overwrite.
- */
- metadata_index = ltt_channels_get_index_from_name("metadata");
- WARN_ON(metadata_index < 0);
- new_trace->channels[metadata_index].overwrite = 0;
- new_trace->channels[metadata_index].active = 1;
-
- /*
- * Set hardcoded tracer defaults for some channels
- */
- for (chan = 0; chan < new_trace->nr_channels; chan++) {
- if (!(new_trace->channels[chan].active))
- continue;
-
- chantype = get_channel_type_from_name(
- ltt_channels_get_name_from_index(chan));
- new_trace->channels[chan].subbuf_size =
- chan_infos[chantype].def_subbufsize;
- new_trace->channels[chan].subbuf_cnt =
- chan_infos[chantype].def_subbufcount;
- }
-
- cds_list_add(&new_trace->list, <t_traces.setup_head);
- return 0;
-
-trace_free:
- free(new_trace);
-traces_error:
- return err;
-}
-
-
-int ltt_trace_setup(const char *trace_name)
-{
- int ret;
- ltt_lock_traces();
- ret = _ltt_trace_setup(trace_name);
- ltt_unlock_traces();
- return ret;
-}
-
-/* must be called from within a traces lock. */
-static void _ltt_trace_free(struct ust_trace *trace)
-{
- cds_list_del(&trace->list);
- free(trace);
-}
-
-int ltt_trace_set_type(const char *trace_name, const char *trace_type)
-{
- int err = 0;
- struct ust_trace *trace;
- struct ltt_transport *tran_iter, *transport = NULL;
-
- ltt_lock_traces();
-
- trace = _ltt_trace_find_setup(trace_name);
- if (!trace) {
- ERR("Trace not found %s", trace_name);
- err = -ENOENT;
- goto traces_error;
- }
-
- pthread_mutex_lock(<t_transport_mutex);
- cds_list_for_each_entry(tran_iter, <t_transport_list, node) {
- if (!strcmp(tran_iter->name, trace_type)) {
- transport = tran_iter;
- break;
- }
- }
- pthread_mutex_unlock(<t_transport_mutex);
-
- if (!transport) {
- ERR("Transport %s is not present", trace_type);
- err = -EINVAL;
- goto traces_error;
- }
-
- trace->transport = transport;
-
-traces_error:
- ltt_unlock_traces();
- return err;
-}
-
-int ltt_trace_set_channel_subbufsize(const char *trace_name,
- const char *channel_name, unsigned int size)
-{
- int err = 0;
- struct ust_trace *trace;
- int index;
-
- ltt_lock_traces();
-
- trace = _ltt_trace_find_setup(trace_name);
- if (!trace) {
- ERR("Trace not found %s", trace_name);
- err = -ENOENT;
- goto traces_error;
- }
-
- index = ltt_channels_get_index_from_name(channel_name);
- if (index < 0) {
- ERR("Channel %s not found", channel_name);
- err = -ENOENT;
- goto traces_error;
- }
- trace->channels[index].subbuf_size = size;
-
-traces_error:
- ltt_unlock_traces();
- return err;
-}
-
-int ltt_trace_set_channel_subbufcount(const char *trace_name,
- const char *channel_name, unsigned int cnt)
-{
- int err = 0;
- struct ust_trace *trace;
- int index;
-
- ltt_lock_traces();
-
- trace = _ltt_trace_find_setup(trace_name);
- if (!trace) {
- ERR("Trace not found %s", trace_name);
- err = -ENOENT;
- goto traces_error;
- }
-
- index = ltt_channels_get_index_from_name(channel_name);
- if (index < 0) {
- ERR("Channel %s not found", channel_name);
- err = -ENOENT;
- goto traces_error;
- }
- trace->channels[index].subbuf_cnt = cnt;
-
-traces_error:
- ltt_unlock_traces();
- return err;
-}
-
-int ltt_trace_set_channel_enable(const char *trace_name,
- const char *channel_name, unsigned int enable)
-{
- int err = 0;
- struct ust_trace *trace;
- int index;
-
- ltt_lock_traces();
-
- trace = _ltt_trace_find_setup(trace_name);
- if (!trace) {
- ERR("Trace not found %s", trace_name);
- err = -ENOENT;
- goto traces_error;
- }
-
- /*
- * Datas in metadata channel(marker info) is necessary to be able to
- * read the trace, we always enable this channel.
- */
- if (!enable && !strcmp(channel_name, "metadata")) {
- ERR("Trying to disable metadata channel");
- err = -EINVAL;
- goto traces_error;
- }
-
- index = ltt_channels_get_index_from_name(channel_name);
- if (index < 0) {
- ERR("Channel %s not found", channel_name);
- err = -ENOENT;
- goto traces_error;
- }
-
- trace->channels[index].active = enable;
-
-traces_error:
- ltt_unlock_traces();
- return err;
-}
-
-int ltt_trace_set_channel_overwrite(const char *trace_name,
- const char *channel_name, unsigned int overwrite)
-{
- int err = 0;
- struct ust_trace *trace;
- int index;
-
- ltt_lock_traces();
-
- trace = _ltt_trace_find_setup(trace_name);
- if (!trace) {
- ERR("Trace not found %s", trace_name);
- err = -ENOENT;
- goto traces_error;
- }
-
- /*
- * Always put the metadata channel in non-overwrite mode :
- * This is a very low traffic channel and it can't afford to have its
- * data overwritten : this data (marker info) is necessary to be
- * able to read the trace.
- */
- if (overwrite && !strcmp(channel_name, "metadata")) {
- ERR("Trying to set metadata channel to overwrite mode");
- err = -EINVAL;
- goto traces_error;
- }
-
- index = ltt_channels_get_index_from_name(channel_name);
- if (index < 0) {
- ERR("Channel %s not found", channel_name);
- err = -ENOENT;
- goto traces_error;
- }
-
- trace->channels[index].overwrite = overwrite;
-
-traces_error:
- ltt_unlock_traces();
- return err;
-}
-
-int ltt_trace_alloc(const char *trace_name)
-{
- int err = 0;
- struct ust_trace *trace;
- unsigned int subbuf_size, subbuf_cnt;
- int chan;
- const char *channel_name;
-
- ltt_lock_traces();
-
- if (_ltt_trace_find(trace_name)) { /* Trace already allocated */
- err = 1;
- goto traces_error;
- }
-
- trace = _ltt_trace_find_setup(trace_name);
- if (!trace) {
- ERR("Trace not found %s", trace_name);
- err = -ENOENT;
- goto traces_error;
- }
-
- urcu_ref_init(&trace->urcu_ref);
- urcu_ref_init(&trace->ltt_transport_urcu_ref);
- trace->active = 0;
- trace->freq_scale = trace_clock_freq_scale();
-
- if (!trace->transport) {
- ERR("Transport is not set");
- err = -EINVAL;
- goto transport_error;
- }
- trace->ops = &trace->transport->ops;
-
- trace->start_freq = trace_clock_frequency();
- trace->start_tsc = trace_clock_read64();
- gettimeofday(&trace->start_time, NULL); //ust// changed /* FIXME: is this ok? */
-
- for (chan = 0; chan < trace->nr_channels; chan++) {
- if (!(trace->channels[chan].active))
- continue;
-
- channel_name = ltt_channels_get_name_from_index(chan);
- WARN_ON(!channel_name);
- subbuf_size = trace->channels[chan].subbuf_size;
- subbuf_cnt = trace->channels[chan].subbuf_cnt;
- prepare_chan_size_num(&subbuf_size, &subbuf_cnt);
- err = trace->ops->create_channel(trace_name, trace,
- channel_name,
- &trace->channels[chan],
- subbuf_size,
- subbuf_cnt,
- trace->channels[chan].overwrite);
- if (err != 0) {
- ERR("Cannot create channel %s", channel_name);
- goto create_channel_error;
- }
- }
-
- cds_list_del(&trace->list);
- cds_list_add_rcu(&trace->list, <t_traces.head);
-
- ltt_unlock_traces();
-
- return 0;
-
-create_channel_error:
- for (chan--; chan >= 0; chan--)
- if (trace->channels[chan].active)
- trace->ops->remove_channel(&trace->channels[chan]);
-
-transport_error:
-traces_error:
- ltt_unlock_traces();
- return err;
-}
-
-/* Must be called while sure that trace is in the list. */
-static int _ltt_trace_destroy(struct ust_trace *trace)
-{
- int err = -EPERM;
-
- if (trace == NULL) {
- err = -ENOENT;
- goto traces_error;
- }
- if (trace->active) {
- ERR("Can't destroy trace %s : tracer is active", trace->trace_name);
- err = -EBUSY;
- goto active_error;
- }
-
- cds_list_del_rcu(&trace->list);
- synchronize_rcu();
-
- return 0;
-
-active_error:
-traces_error:
- return err;
-}
-
-/* Sleepable part of the destroy */
-static void __ltt_trace_destroy(struct ust_trace *trace, int drop)
-{
- int i;
- struct ust_channel *chan;
-
- if(!drop) {
- for (i = 0; i < trace->nr_channels; i++) {
- chan = &trace->channels[i];
- if (chan->active)
- trace->ops->finish_channel(chan);
- }
- }
-
- /*
- * The currently destroyed trace is not in the trace list anymore,
- * so it's safe to call the async wakeup ourself. It will deliver
- * the last subbuffers.
- */
- trace_async_wakeup(trace);
-
- for (i = 0; i < trace->nr_channels; i++) {
- chan = &trace->channels[i];
- if (chan->active)
- trace->ops->remove_channel(chan);
- }
-
- urcu_ref_put(&trace->ltt_transport_urcu_ref, ltt_release_transport);
-
- urcu_ref_put(&trace->urcu_ref, ltt_release_trace);
-}
-
-int ltt_trace_destroy(const char *trace_name, int drop)
-{
- int err = 0;
- struct ust_trace *trace;
-
- ltt_lock_traces();
-
- trace = _ltt_trace_find(trace_name);
- if (trace) {
- err = _ltt_trace_destroy(trace);
- if (err)
- goto error;
-
- ltt_unlock_traces();
-
- __ltt_trace_destroy(trace, drop);
-
- return 0;
- }
-
- trace = _ltt_trace_find_setup(trace_name);
- if (trace) {
- _ltt_trace_free(trace);
- ltt_unlock_traces();
- return 0;
- }
-
- err = -ENOENT;
-
-error:
- ltt_unlock_traces();
- return err;
-}
-
-/* must be called from within a traces lock. */
-static int _ltt_trace_start(struct ust_trace *trace)
-{
- int err = 0;
-
- if (trace == NULL) {
- err = -ENOENT;
- goto traces_error;
- }
- if (trace->active)
- DBG("Tracing already active for trace %s", trace->trace_name);
- trace->active = 1;
- /* Read by trace points without protection : be careful */
- ltt_traces.num_active_traces++;
- return err;
-
-traces_error:
- return err;
-}
-
-int ltt_trace_start(const char *trace_name)
-{
- int err = 0;
- struct ust_trace *trace;
-
- ltt_lock_traces();
-
- trace = _ltt_trace_find(trace_name);
- err = _ltt_trace_start(trace);
- if (err)
- goto no_trace;
-
- ltt_unlock_traces();
-
- /*
- * Call the process-wide state dump.
- * Notice that there is no protection on the trace : that's exactly
- * why we iterate on the list and check for trace equality instead of
- * directly using this trace handle inside the logging function: we want
- * to record events only in a single trace in the trace session list.
- */
-
- ltt_dump_ust_marker_state(trace);
-
- return err;
-
- /* Error handling */
-no_trace:
- ltt_unlock_traces();
- return err;
-}
-
-/* must be called from within traces lock */
-static int _ltt_trace_stop(struct ust_trace *trace)
-{
- int err = -EPERM;
-
- if (trace == NULL) {
- err = -ENOENT;
- goto traces_error;
- }
- if (!trace->active)
- DBG("LTT : Tracing not active for trace %s", trace->trace_name);
- if (trace->active) {
- trace->active = 0;
- ltt_traces.num_active_traces--;
- }
- return 0;
-
-traces_error:
- return err;
-}
-
-int ltt_trace_stop(const char *trace_name)
-{
- int err = 0;
- struct ust_trace *trace;
-
- ltt_lock_traces();
- trace = _ltt_trace_find(trace_name);
- err = _ltt_trace_stop(trace);
- ltt_unlock_traces();
- return err;
-}
+++ /dev/null
-/**
- * ltt-type-serializer.c
- *
- * LTTng specialized type serializer.
- *
- * Copyright Mathieu Desnoyers, 2008.
- *
- * Dual LGPL v2.1/GPL v2 license.
- */
-
-/* This file contains functions for tracepoint custom probes support. */
-
-#define _GNU_SOURCE
-#define _LGPL_SOURCE
-#include <urcu/rculist.h>
-#include <ust/core.h>
-#include <ust/clock.h>
-#include <urcu-bp.h>
-#include "tracer.h"
-#include "type-serializer.h"
-
-notrace
-void _ltt_specialized_trace(const struct ust_marker *mdata, void *probe_data,
- void *serialize_private, unsigned int data_size,
- unsigned int largest_align)
-{
- int ret;
- uint16_t eID;
- size_t slot_size;
- unsigned int chan_index;
- struct ust_buffer *buf;
- struct ust_channel *chan;
- struct ust_trace *trace;
- u64 tsc;
- long buf_offset;
- int cpu;
- unsigned int rflags;
-
- /*
- * If we get here, it's probably because we have useful work to do.
- */
- if (unlikely(ltt_traces.num_active_traces == 0))
- return;
-
- rcu_read_lock();
- cpu = ust_get_cpu();
-
- /* Force volatile access. */
- CMM_STORE_SHARED(ltt_nesting, CMM_LOAD_SHARED(ltt_nesting) + 1);
-
- /*
- * asm volatile and "memory" clobber prevent the compiler from moving
- * instructions out of the ltt nesting count. This is required to ensure
- * that probe side-effects which can cause recursion (e.g. unforeseen
- * traps, divisions by 0, ...) are triggered within the incremented
- * nesting count section.
- */
- cmm_barrier();
- eID = mdata->event_id;
- chan_index = mdata->channel_id;
-
- /*
- * Iterate on each trace, typically small number of active traces,
- * list iteration with prefetch is usually slower.
- */
- cds_list_for_each_entry_rcu(trace, <t_traces.head, list) {
- if (unlikely(!trace->active))
- continue;
-//ust// if (unlikely(!ltt_run_filter(trace, eID)))
-//ust// continue;
-#ifdef CONFIG_LTT_DEBUG_EVENT_SIZE
- rflags = LTT_RFLAG_ID_SIZE;
-#else
- if (unlikely(eID >= LTT_FREE_EVENTS))
- rflags = LTT_RFLAG_ID;
- else
- rflags = 0;
-#endif
- /*
- * Skip channels added after trace creation.
- */
- if (unlikely(chan_index >= trace->nr_channels))
- continue;
- chan = &trace->channels[chan_index];
- if (!chan->active)
- continue;
-
- /* If a new cpu was plugged since the trace was started, we did
- * not add it to the trace, and therefore we write the event to
- * cpu 0.
- */
- if(cpu >= chan->n_cpus) {
- cpu = 0;
- }
-
- /* reserve space : header and data */
- ret = ltt_reserve_slot(chan, trace, data_size, largest_align,
- cpu, &buf, &slot_size, &buf_offset, &tsc,
- &rflags);
- if (unlikely(ret < 0))
- continue; /* buffer full */
-
- /* Out-of-order write : header and data */
- buf_offset = ltt_write_event_header(chan, buf,
- buf_offset, eID, data_size,
- tsc, rflags);
- if (data_size) {
- buf_offset += ltt_align(buf_offset, largest_align);
- ust_buffers_write(buf, buf_offset,
- serialize_private, data_size);
- buf_offset += data_size;
- }
- /* Out-of-order commit */
- ltt_commit_slot(chan, buf, buf_offset, data_size, slot_size);
- }
- /*
- * asm volatile and "memory" clobber prevent the compiler from moving
- * instructions out of the ltt nesting count. This is required to ensure
- * that probe side-effects which can cause recursion (e.g. unforeseen
- * traps, divisions by 0, ...) are triggered within the incremented
- * nesting count section.
- */
- cmm_barrier();
- CMM_STORE_SHARED(ltt_nesting, CMM_LOAD_SHARED(ltt_nesting) - 1);
- rcu_read_unlock();
-}
+++ /dev/null
-#ifndef _LTT_TYPE_SERIALIZER_H
-#define _LTT_TYPE_SERIALIZER_H
-
-#include <ust/marker.h>
-#include <ust/marker-internal.h>
-#include <ust/core.h>
-#include "buffers.h"
-
-/*
- * largest_align must be non-zero, equal to the minimum between the largest type
- * and sizeof(void *).
- */
-extern void _ltt_specialized_trace(const struct ust_marker *mdata, void *probe_data,
- void *serialize_private, unsigned int data_size,
- unsigned int largest_align);
-
-/*
- * Statically check that 0 < largest_align < sizeof(void *) to make sure it is
- * dumb-proof. It will make sure 0 is changed into 1 and unsigned long long is
- * changed into sizeof(void *) on 32-bit architectures.
- */
-static inline void ltt_specialized_trace(const struct ust_marker *mdata,
- void *probe_data,
- void *serialize_private, unsigned int data_size,
- unsigned int largest_align)
-{
- largest_align = min_t(unsigned int, largest_align, sizeof(void *));
- largest_align = max_t(unsigned int, largest_align, 1);
- _ltt_specialized_trace(mdata, probe_data, serialize_private, data_size,
- largest_align);
-}
-
-/*
- * Type serializer definitions.
- */
-
-/*
- * Return size of structure without end-of-structure padding.
- */
-#define serialize_sizeof(type) offsetof(typeof(type), end_field)
-
-struct serialize_long_int {
- unsigned long f1;
- unsigned int f2;
- unsigned char end_field[0];
-} LTT_ALIGN;
-
-struct serialize_int_int_long {
- unsigned int f1;
- unsigned int f2;
- unsigned long f3;
- unsigned char end_field[0];
-} LTT_ALIGN;
-
-struct serialize_int_int_short {
- unsigned int f1;
- unsigned int f2;
- unsigned short f3;
- unsigned char end_field[0];
-} LTT_ALIGN;
-
-struct serialize_long_long_long {
- unsigned long f1;
- unsigned long f2;
- unsigned long f3;
- unsigned char end_field[0];
-} LTT_ALIGN;
-
-struct serialize_long_long_int {
- unsigned long f1;
- unsigned long f2;
- unsigned int f3;
- unsigned char end_field[0];
-} LTT_ALIGN;
-
-struct serialize_long_long_short_char {
- unsigned long f1;
- unsigned long f2;
- unsigned short f3;
- unsigned char f4;
- unsigned char end_field[0];
-} LTT_ALIGN;
-
-struct serialize_long_long_short {
- unsigned long f1;
- unsigned long f2;
- unsigned short f3;
- unsigned char end_field[0];
-} LTT_ALIGN;
-
-struct serialize_long_short_char {
- unsigned long f1;
- unsigned short f2;
- unsigned char f3;
- unsigned char end_field[0];
-} LTT_ALIGN;
-
-struct serialize_long_short {
- unsigned long f1;
- unsigned short f2;
- unsigned char end_field[0];
-} LTT_ALIGN;
-
-struct serialize_long_char {
- unsigned long f1;
- unsigned char f2;
- unsigned char end_field[0];
-} LTT_ALIGN;
-
-struct serialize_sizet_int {
- size_t f1;
- unsigned int f2;
- unsigned char end_field[0];
-} LTT_ALIGN;
-
-struct serialize_long_long_sizet_int {
- unsigned long f1;
- unsigned long f2;
- size_t f3;
- unsigned int f4;
- unsigned char end_field[0];
-} LTT_ALIGN;
-
-struct serialize_long_long_sizet_int_int {
- unsigned long f1;
- unsigned long f2;
- size_t f3;
- unsigned int f4;
- unsigned int f5;
- unsigned char end_field[0];
-} LTT_ALIGN;
-
-struct serialize_l4421224411111 {
- unsigned long f1;
- uint32_t f2;
- uint32_t f3;
- uint16_t f4;
- uint8_t f5;
- uint16_t f6;
- uint16_t f7;
- uint32_t f8;
- uint32_t f9;
- uint8_t f10;
- uint8_t f11;
- uint8_t f12;
- uint8_t f13;
- uint8_t f14;
- unsigned char end_field[0];
-} LTT_ALIGN;
-
-struct serialize_l214421224411111 {
- unsigned long f1;
- uint16_t f2;
- uint8_t f3;
- uint32_t f4;
- uint32_t f5;
- uint16_t f6;
- uint8_t f7;
- uint16_t f8;
- uint16_t f9;
- uint32_t f10;
- uint32_t f11;
- uint8_t f12;
- uint8_t f13;
- uint8_t f14;
- uint8_t f15;
- uint8_t f16;
- uint8_t end_field[0];
-} LTT_ALIGN;
-
-struct serialize_l4412228 {
- unsigned long f1;
- uint32_t f2;
- uint32_t f3;
- uint8_t f4;
- uint16_t f5;
- uint16_t f6;
- uint16_t f7;
- uint64_t f8;
- unsigned char end_field[0];
-} LTT_ALIGN;
-
-#endif /* _LTT_TYPE_SERIALIZER_H */