-SUBDIRS = libmarkers libtracectl libtracing hello libmallocwrap
+SUBDIRS = libust hello libmallocwrap
#! /bin/sh
set -x
-aclocal -I config
+aclocal
libtoolize --force --copy
autoheader
automake --add-missing --copy
AC_CONFIG_AUX_DIR(make_scripts)
AM_INIT_AUTOMAKE([0.0 foreign])
AC_CONFIG_SRCDIR([ust/localerr.h])
-#AC_CONFIG_HEADERS([config.h])
+AC_CONFIG_HEADERS([config.h])
# Checks for programs.
AC_PROG_CC
# ust/Makefile
# ustd/Makefile])
-AC_CONFIG_FILES([Makefile libmarkers/Makefile libtracectl/Makefile libtracing/Makefile hello/Makefile libmallocwrap/Makefile])
+AC_CONFIG_FILES([Makefile libust/Makefile hello/Makefile libmallocwrap/Makefile])
AC_OUTPUT
bin_PROGRAMS = hello
-hello_SOURCES = hello.c tp.c tp.h $(top_builddir)/share/kref.c
-hello_LDADD = $(top_builddir)/libmarkers/libmarkers.la $(top_builddir)/libtracectl/libtracectl.la $(top_builddir)/libtracing/libtracing.la @URCU_PATH@/liburcu.so
-##hello_LDFLAGS = -L$(top_builddir)/libmarkers
+hello_SOURCES = hello.c tp.c tp.h
+hello_LDADD = $(top_builddir)/libust/libust.la @URCU_PATH@/liburcu.so
INCLUDES = -I$(top_builddir)/share
INCLUDES += -I@URCU_PATH@
INCLUDES += -I@KCOMPAT_PATH@
-INCLUDES += -I$(top_builddir)/libtracing
-INCLUDES += -I$(top_builddir)/libmarkers
-INCLUDES += -I$(top_builddir)/libtracectl
+INCLUDES += -I$(top_builddir)/libust
--- /dev/null
+lib_LTLIBRARIES = libmallocwrap.la
+libmallocwrap_la_SOURCES = mallocwrap.c
+noinst_SCRIPTS = run
+
+INCLUDES = -I$(top_builddir)/share
+INCLUDES += -I@URCU_PATH@
+INCLUDES += -I@KCOMPAT_PATH@
+INCLUDES += -I$(top_builddir)/libust
+++ /dev/null
-lib_LTLIBRARIES = libmarkers.la
-libmarkers_la_SOURCES = marker.c tracepoint.c immediate.h jhash.h rcupdate.h marker.h tracepoint.h
-libmarkers_la_LIBADD = @URCU_PATH@/liburcu.so
-
-INCLUDES = -I$(top_builddir)/share
-INCLUDES += -I@URCU_PATH@
-INCLUDES += -I@KCOMPAT_PATH@
-INCLUDES += -I$(top_builddir)/libtracing
+++ /dev/null
-#ifndef _LINUX_IMMEDIATE_H
-#define _LINUX_IMMEDIATE_H
-
-/*
- * Immediate values, can be updated at runtime and save cache lines.
- *
- * (C) Copyright 2007 Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
- *
- * This file is released under the GPLv2.
- * See the file COPYING for more details.
- */
-
-#ifdef USE_IMMEDIATE
-
-#include <asm/immediate.h>
-
-/**
- * imv_set - set immediate variable (with locking)
- * @name: immediate value name
- * @i: required value
- *
- * Sets the value of @name, taking the module_mutex if required by
- * the architecture.
- */
-#define imv_set(name, i) \
- do { \
- name##__imv = (i); \
- core_imv_update(); \
- module_imv_update(); \
- } while (0)
-
-/*
- * Internal update functions.
- */
-extern void core_imv_update(void);
-extern void imv_update_range(const struct __imv *begin,
- const struct __imv *end);
-extern void imv_unref_core_init(void);
-extern void imv_unref(struct __imv *begin, struct __imv *end, void *start,
- unsigned long size);
-
-#else
-
-/*
- * Generic immediate values: a simple, standard, memory load.
- */
-
-/**
- * imv_read - read immediate variable
- * @name: immediate value name
- *
- * Reads the value of @name.
- */
-#define imv_read(name) _imv_read(name)
-
-/**
- * imv_set - set immediate variable (with locking)
- * @name: immediate value name
- * @i: required value
- *
- * Sets the value of @name, taking the module_mutex if required by
- * the architecture.
- */
-#define imv_set(name, i) (name##__imv = (i))
-
-static inline void core_imv_update(void) { }
-static inline void imv_unref_core_init(void) { }
-
-#endif
-
-#define DECLARE_IMV(type, name) extern __typeof__(type) name##__imv
-#define DEFINE_IMV(type, name) __typeof__(type) name##__imv
-
-#define EXPORT_IMV_SYMBOL(name) EXPORT_SYMBOL(name##__imv)
-#define EXPORT_IMV_SYMBOL_GPL(name) EXPORT_SYMBOL_GPL(name##__imv)
-
-/**
- * _imv_read - Read immediate value with standard memory load.
- * @name: immediate value name
- *
- * Force a data read of the immediate value instead of the immediate value
- * based mechanism. Useful for __init and __exit section data read.
- */
-#define _imv_read(name) (name##__imv)
-
-#endif
+++ /dev/null
-/*
- * Copyright (C) 2007 Mathieu Desnoyers
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- */
-//ust// #include <linux/module.h>
-//ust// #include <linux/mutex.h>
-//ust// #include <linux/types.h>
-#include "jhash.h"
-#include "list.h"
-#include "rcupdate.h"
-//ust// #include <linux/marker.h>
-#include <errno.h>
-//ust// #include <linux/slab.h>
-//ust// #include <linux/immediate.h>
-//ust// #include <linux/sched.h>
-//ust// #include <linux/uaccess.h>
-//ust// #include <linux/user_marker.h>
-//ust// #include <linux/ltt-tracer.h>
-
-#include "marker.h"
-#include "kernelcompat.h"
-#include "usterr.h"
-#include "channels.h"
-#include "tracercore.h"
-#include "tracer.h"
-
-extern struct marker __start___markers[] __attribute__((visibility("hidden")));
-extern struct marker __stop___markers[] __attribute__((visibility("hidden")));
-
-/* Set to 1 to enable marker debug output */
-static const int marker_debug;
-
-/*
- * markers_mutex nests inside module_mutex. Markers mutex protects the builtin
- * and module markers and the hash table.
- */
-static DEFINE_MUTEX(markers_mutex);
-
-void lock_markers(void)
-{
- mutex_lock(&markers_mutex);
-}
-
-void unlock_markers(void)
-{
- mutex_unlock(&markers_mutex);
-}
-
-/*
- * Marker hash table, containing the active markers.
- * Protected by module_mutex.
- */
-#define MARKER_HASH_BITS 6
-#define MARKER_TABLE_SIZE (1 << MARKER_HASH_BITS)
-static struct hlist_head marker_table[MARKER_TABLE_SIZE];
-
-/*
- * Note about RCU :
- * It is used to make sure every handler has finished using its private data
- * between two consecutive operation (add or remove) on a given marker. It is
- * also used to delay the free of multiple probes array until a quiescent state
- * is reached.
- * marker entries modifications are protected by the markers_mutex.
- */
-struct marker_entry {
- struct hlist_node hlist;
- char *format;
- char *name;
- /* Probe wrapper */
- void (*call)(const struct marker *mdata, void *call_private, ...);
- struct marker_probe_closure single;
- struct marker_probe_closure *multi;
- int refcount; /* Number of times armed. 0 if disarmed. */
- struct rcu_head rcu;
- void *oldptr;
- int rcu_pending;
- u16 channel_id;
- u16 event_id;
- unsigned char ptype:1;
- unsigned char format_allocated:1;
- char channel[0]; /* Contains channel'\0'name'\0'format'\0' */
-};
-
-#ifdef CONFIG_MARKERS_USERSPACE
-static void marker_update_processes(void);
-#else
-static void marker_update_processes(void)
-{
-}
-#endif
-
-/**
- * __mark_empty_function - Empty probe callback
- * @mdata: marker data
- * @probe_private: probe private data
- * @call_private: call site private data
- * @fmt: format string
- * @...: variable argument list
- *
- * Empty callback provided as a probe to the markers. By providing this to a
- * disabled marker, we make sure the execution flow is always valid even
- * though the function pointer change and the marker enabling are two distinct
- * operations that modifies the execution flow of preemptible code.
- */
-notrace void __mark_empty_function(const struct marker *mdata,
- void *probe_private, void *call_private, const char *fmt, va_list *args)
-{
-}
-//ust// EXPORT_SYMBOL_GPL(__mark_empty_function);
-
-/*
- * marker_probe_cb Callback that prepares the variable argument list for probes.
- * @mdata: pointer of type struct marker
- * @call_private: caller site private data
- * @...: Variable argument list.
- *
- * Since we do not use "typical" pointer based RCU in the 1 argument case, we
- * need to put a full smp_rmb() in this branch. This is why we do not use
- * rcu_dereference() for the pointer read.
- */
-notrace void marker_probe_cb(const struct marker *mdata,
- void *call_private, ...)
-{
- va_list args;
- char ptype;
-
- /*
- * rcu_read_lock_sched does two things : disabling preemption to make
- * sure the teardown of the callbacks can be done correctly when they
- * are in modules and they insure RCU read coherency.
- */
-//ust// rcu_read_lock_sched_notrace();
- ptype = mdata->ptype;
- if (likely(!ptype)) {
- marker_probe_func *func;
- /* Must read the ptype before ptr. They are not data dependant,
- * so we put an explicit smp_rmb() here. */
- smp_rmb();
- func = mdata->single.func;
- /* Must read the ptr before private data. They are not data
- * dependant, so we put an explicit smp_rmb() here. */
- smp_rmb();
- va_start(args, call_private);
- func(mdata, mdata->single.probe_private, call_private,
- mdata->format, &args);
- va_end(args);
- } else {
- struct marker_probe_closure *multi;
- int i;
- /*
- * Read mdata->ptype before mdata->multi.
- */
- smp_rmb();
- multi = mdata->multi;
- /*
- * multi points to an array, therefore accessing the array
- * depends on reading multi. However, even in this case,
- * we must insure that the pointer is read _before_ the array
- * data. Same as rcu_dereference, but we need a full smp_rmb()
- * in the fast path, so put the explicit barrier here.
- */
- smp_read_barrier_depends();
- for (i = 0; multi[i].func; i++) {
- va_start(args, call_private);
- multi[i].func(mdata, multi[i].probe_private,
- call_private, mdata->format, &args);
- va_end(args);
- }
- }
-//ust// rcu_read_unlock_sched_notrace();
-}
-//ust// EXPORT_SYMBOL_GPL(marker_probe_cb);
-
-/*
- * marker_probe_cb Callback that does not prepare the variable argument list.
- * @mdata: pointer of type struct marker
- * @call_private: caller site private data
- * @...: Variable argument list.
- *
- * Should be connected to markers "MARK_NOARGS".
- */
-static notrace void marker_probe_cb_noarg(const struct marker *mdata,
- void *call_private, ...)
-{
- va_list args; /* not initialized */
- char ptype;
-
-//ust// rcu_read_lock_sched_notrace();
- ptype = mdata->ptype;
- if (likely(!ptype)) {
- marker_probe_func *func;
- /* Must read the ptype before ptr. They are not data dependant,
- * so we put an explicit smp_rmb() here. */
- smp_rmb();
- func = mdata->single.func;
- /* Must read the ptr before private data. They are not data
- * dependant, so we put an explicit smp_rmb() here. */
- smp_rmb();
- func(mdata, mdata->single.probe_private, call_private,
- mdata->format, &args);
- } else {
- struct marker_probe_closure *multi;
- int i;
- /*
- * Read mdata->ptype before mdata->multi.
- */
- smp_rmb();
- multi = mdata->multi;
- /*
- * multi points to an array, therefore accessing the array
- * depends on reading multi. However, even in this case,
- * we must insure that the pointer is read _before_ the array
- * data. Same as rcu_dereference, but we need a full smp_rmb()
- * in the fast path, so put the explicit barrier here.
- */
- smp_read_barrier_depends();
- for (i = 0; multi[i].func; i++)
- multi[i].func(mdata, multi[i].probe_private,
- call_private, mdata->format, &args);
- }
-//ust// rcu_read_unlock_sched_notrace();
-}
-
-static void free_old_closure(struct rcu_head *head)
-{
- struct marker_entry *entry = container_of(head,
- struct marker_entry, rcu);
- kfree(entry->oldptr);
- /* Make sure we free the data before setting the pending flag to 0 */
- smp_wmb();
- entry->rcu_pending = 0;
-}
-
-static void debug_print_probes(struct marker_entry *entry)
-{
- int i;
-
- if (!marker_debug)
- return;
-
- if (!entry->ptype) {
- printk(KERN_DEBUG "Single probe : %p %p\n",
- entry->single.func,
- entry->single.probe_private);
- } else {
- for (i = 0; entry->multi[i].func; i++)
- printk(KERN_DEBUG "Multi probe %d : %p %p\n", i,
- entry->multi[i].func,
- entry->multi[i].probe_private);
- }
-}
-
-static struct marker_probe_closure *
-marker_entry_add_probe(struct marker_entry *entry,
- marker_probe_func *probe, void *probe_private)
-{
- int nr_probes = 0;
- struct marker_probe_closure *old, *new;
-
- WARN_ON(!probe);
-
- debug_print_probes(entry);
- old = entry->multi;
- if (!entry->ptype) {
- if (entry->single.func == probe &&
- entry->single.probe_private == probe_private)
- return ERR_PTR(-EBUSY);
- if (entry->single.func == __mark_empty_function) {
- /* 0 -> 1 probes */
- entry->single.func = probe;
- entry->single.probe_private = probe_private;
- entry->refcount = 1;
- entry->ptype = 0;
- debug_print_probes(entry);
- return NULL;
- } else {
- /* 1 -> 2 probes */
- nr_probes = 1;
- old = NULL;
- }
- } else {
- /* (N -> N+1), (N != 0, 1) probes */
- for (nr_probes = 0; old[nr_probes].func; nr_probes++)
- if (old[nr_probes].func == probe
- && old[nr_probes].probe_private
- == probe_private)
- return ERR_PTR(-EBUSY);
- }
- /* + 2 : one for new probe, one for NULL func */
- new = kzalloc((nr_probes + 2) * sizeof(struct marker_probe_closure),
- GFP_KERNEL);
- if (new == NULL)
- return ERR_PTR(-ENOMEM);
- if (!old)
- new[0] = entry->single;
- else
- memcpy(new, old,
- nr_probes * sizeof(struct marker_probe_closure));
- new[nr_probes].func = probe;
- new[nr_probes].probe_private = probe_private;
- entry->refcount = nr_probes + 1;
- entry->multi = new;
- entry->ptype = 1;
- debug_print_probes(entry);
- return old;
-}
-
-static struct marker_probe_closure *
-marker_entry_remove_probe(struct marker_entry *entry,
- marker_probe_func *probe, void *probe_private)
-{
- int nr_probes = 0, nr_del = 0, i;
- struct marker_probe_closure *old, *new;
-
- old = entry->multi;
-
- debug_print_probes(entry);
- if (!entry->ptype) {
- /* 0 -> N is an error */
- WARN_ON(entry->single.func == __mark_empty_function);
- /* 1 -> 0 probes */
- WARN_ON(probe && entry->single.func != probe);
- WARN_ON(entry->single.probe_private != probe_private);
- entry->single.func = __mark_empty_function;
- entry->refcount = 0;
- entry->ptype = 0;
- debug_print_probes(entry);
- return NULL;
- } else {
- /* (N -> M), (N > 1, M >= 0) probes */
- for (nr_probes = 0; old[nr_probes].func; nr_probes++) {
- if ((!probe || old[nr_probes].func == probe)
- && old[nr_probes].probe_private
- == probe_private)
- nr_del++;
- }
- }
-
- if (nr_probes - nr_del == 0) {
- /* N -> 0, (N > 1) */
- entry->single.func = __mark_empty_function;
- entry->refcount = 0;
- entry->ptype = 0;
- } else if (nr_probes - nr_del == 1) {
- /* N -> 1, (N > 1) */
- for (i = 0; old[i].func; i++)
- if ((probe && old[i].func != probe) ||
- old[i].probe_private != probe_private)
- entry->single = old[i];
- entry->refcount = 1;
- entry->ptype = 0;
- } else {
- int j = 0;
- /* N -> M, (N > 1, M > 1) */
- /* + 1 for NULL */
- new = kzalloc((nr_probes - nr_del + 1)
- * sizeof(struct marker_probe_closure), GFP_KERNEL);
- if (new == NULL)
- return ERR_PTR(-ENOMEM);
- for (i = 0; old[i].func; i++)
- if ((probe && old[i].func != probe) ||
- old[i].probe_private != probe_private)
- new[j++] = old[i];
- entry->refcount = nr_probes - nr_del;
- entry->ptype = 1;
- entry->multi = new;
- }
- debug_print_probes(entry);
- return old;
-}
-
-/*
- * Get marker if the marker is present in the marker hash table.
- * Must be called with markers_mutex held.
- * Returns NULL if not present.
- */
-static struct marker_entry *get_marker(const char *channel, const char *name)
-{
- struct hlist_head *head;
- struct hlist_node *node;
- struct marker_entry *e;
- size_t channel_len = strlen(channel) + 1;
- size_t name_len = strlen(name) + 1;
- u32 hash;
-
- hash = jhash(channel, channel_len-1, 0) ^ jhash(name, name_len-1, 0);
- head = &marker_table[hash & ((1 << MARKER_HASH_BITS)-1)];
- hlist_for_each_entry(e, node, head, hlist) {
- if (!strcmp(channel, e->channel) && !strcmp(name, e->name))
- return e;
- }
- return NULL;
-}
-
-/*
- * Add the marker to the marker hash table. Must be called with markers_mutex
- * held.
- */
-static struct marker_entry *add_marker(const char *channel, const char *name,
- const char *format)
-{
- struct hlist_head *head;
- struct hlist_node *node;
- struct marker_entry *e;
- size_t channel_len = strlen(channel) + 1;
- size_t name_len = strlen(name) + 1;
- size_t format_len = 0;
- u32 hash;
-
- hash = jhash(channel, channel_len-1, 0) ^ jhash(name, name_len-1, 0);
- if (format)
- format_len = strlen(format) + 1;
- head = &marker_table[hash & ((1 << MARKER_HASH_BITS)-1)];
- hlist_for_each_entry(e, node, head, hlist) {
- if (!strcmp(channel, e->channel) && !strcmp(name, e->name)) {
- printk(KERN_NOTICE
- "Marker %s.%s busy\n", channel, name);
- return ERR_PTR(-EBUSY); /* Already there */
- }
- }
- /*
- * Using kmalloc here to allocate a variable length element. Could
- * cause some memory fragmentation if overused.
- */
- e = kmalloc(sizeof(struct marker_entry)
- + channel_len + name_len + format_len,
- GFP_KERNEL);
- if (!e)
- return ERR_PTR(-ENOMEM);
- memcpy(e->channel, channel, channel_len);
- e->name = &e->channel[channel_len];
- memcpy(e->name, name, name_len);
- if (format) {
- e->format = &e->name[channel_len + name_len];
- memcpy(e->format, format, format_len);
- if (strcmp(e->format, MARK_NOARGS) == 0)
- e->call = marker_probe_cb_noarg;
- else
- e->call = marker_probe_cb;
- trace_mark(metadata, core_marker_format,
- "channel %s name %s format %s",
- e->channel, e->name, e->format);
- } else {
- e->format = NULL;
- e->call = marker_probe_cb;
- }
- e->single.func = __mark_empty_function;
- e->single.probe_private = NULL;
- e->multi = NULL;
- e->ptype = 0;
- e->format_allocated = 0;
- e->refcount = 0;
- e->rcu_pending = 0;
- hlist_add_head(&e->hlist, head);
- return e;
-}
-
-/*
- * Remove the marker from the marker hash table. Must be called with mutex_lock
- * held.
- */
-static int remove_marker(const char *channel, const char *name)
-{
- struct hlist_head *head;
- struct hlist_node *node;
- struct marker_entry *e;
- int found = 0;
- size_t channel_len = strlen(channel) + 1;
- size_t name_len = strlen(name) + 1;
- u32 hash;
- int ret;
-
- hash = jhash(channel, channel_len-1, 0) ^ jhash(name, name_len-1, 0);
- head = &marker_table[hash & ((1 << MARKER_HASH_BITS)-1)];
- hlist_for_each_entry(e, node, head, hlist) {
- if (!strcmp(channel, e->channel) && !strcmp(name, e->name)) {
- found = 1;
- break;
- }
- }
- if (!found)
- return -ENOENT;
- if (e->single.func != __mark_empty_function)
- return -EBUSY;
- hlist_del(&e->hlist);
- if (e->format_allocated)
- kfree(e->format);
- ret = ltt_channels_unregister(e->channel);
- WARN_ON(ret);
- /* Make sure the call_rcu has been executed */
- if (e->rcu_pending)
- rcu_barrier_sched();
- kfree(e);
- return 0;
-}
-
-/*
- * Set the mark_entry format to the format found in the element.
- */
-static int marker_set_format(struct marker_entry *entry, const char *format)
-{
- entry->format = kstrdup(format, GFP_KERNEL);
- if (!entry->format)
- return -ENOMEM;
- entry->format_allocated = 1;
-
- trace_mark(metadata, core_marker_format,
- "channel %s name %s format %s",
- entry->channel, entry->name, entry->format);
- return 0;
-}
-
-/*
- * Sets the probe callback corresponding to one marker.
- */
-static int set_marker(struct marker_entry *entry, struct marker *elem,
- int active)
-{
- int ret = 0;
- WARN_ON(strcmp(entry->name, elem->name) != 0);
-
- if (entry->format) {
- if (strcmp(entry->format, elem->format) != 0) {
- printk(KERN_NOTICE
- "Format mismatch for probe %s "
- "(%s), marker (%s)\n",
- entry->name,
- entry->format,
- elem->format);
- return -EPERM;
- }
- } else {
- ret = marker_set_format(entry, elem->format);
- if (ret)
- return ret;
- }
-
- /*
- * probe_cb setup (statically known) is done here. It is
- * asynchronous with the rest of execution, therefore we only
- * pass from a "safe" callback (with argument) to an "unsafe"
- * callback (does not set arguments).
- */
- elem->call = entry->call;
- elem->channel_id = entry->channel_id;
- elem->event_id = entry->event_id;
- /*
- * Sanity check :
- * We only update the single probe private data when the ptr is
- * set to a _non_ single probe! (0 -> 1 and N -> 1, N != 1)
- */
- WARN_ON(elem->single.func != __mark_empty_function
- && elem->single.probe_private != entry->single.probe_private
- && !elem->ptype);
- elem->single.probe_private = entry->single.probe_private;
- /*
- * Make sure the private data is valid when we update the
- * single probe ptr.
- */
- smp_wmb();
- elem->single.func = entry->single.func;
- /*
- * We also make sure that the new probe callbacks array is consistent
- * before setting a pointer to it.
- */
- rcu_assign_pointer(elem->multi, entry->multi);
- /*
- * Update the function or multi probe array pointer before setting the
- * ptype.
- */
- smp_wmb();
- elem->ptype = entry->ptype;
-
-//ust// if (elem->tp_name && (active ^ _imv_read(elem->state))) {
-//ust// WARN_ON(!elem->tp_cb);
-//ust// /*
-//ust// * It is ok to directly call the probe registration because type
-//ust// * checking has been done in the __trace_mark_tp() macro.
-//ust// */
-//ust//
-//ust// if (active) {
-//ust// /*
-//ust// * try_module_get should always succeed because we hold
-//ust// * markers_mutex to get the tp_cb address.
-//ust// */
-//ust// ret = try_module_get(__module_text_address(
-//ust// (unsigned long)elem->tp_cb));
-//ust// BUG_ON(!ret);
-//ust// ret = tracepoint_probe_register_noupdate(
-//ust// elem->tp_name,
-//ust// elem->tp_cb);
-//ust// } else {
-//ust// ret = tracepoint_probe_unregister_noupdate(
-//ust// elem->tp_name,
-//ust// elem->tp_cb);
-//ust// /*
-//ust// * tracepoint_probe_update_all() must be called
-//ust// * before the module containing tp_cb is unloaded.
-//ust// */
-//ust// module_put(__module_text_address(
-//ust// (unsigned long)elem->tp_cb));
-//ust// }
-//ust// }
- elem->state__imv = active;
-
- return ret;
-}
-
-/*
- * Disable a marker and its probe callback.
- * Note: only waiting an RCU period after setting elem->call to the empty
- * function insures that the original callback is not used anymore. This insured
- * by rcu_read_lock_sched around the call site.
- */
-static void disable_marker(struct marker *elem)
-{
- int ret;
-
- /* leave "call" as is. It is known statically. */
-//ust// if (elem->tp_name && _imv_read(elem->state)) {
-//ust// WARN_ON(!elem->tp_cb);
-//ust// /*
-//ust// * It is ok to directly call the probe registration because type
-//ust// * checking has been done in the __trace_mark_tp() macro.
-//ust// */
-//ust// ret = tracepoint_probe_unregister_noupdate(elem->tp_name,
-//ust// elem->tp_cb);
-//ust// WARN_ON(ret);
-//ust// /*
-//ust// * tracepoint_probe_update_all() must be called
-//ust// * before the module containing tp_cb is unloaded.
-//ust// */
-//ust// module_put(__module_text_address((unsigned long)elem->tp_cb));
-//ust// }
- elem->state__imv = 0;
- elem->single.func = __mark_empty_function;
- /* Update the function before setting the ptype */
- smp_wmb();
- elem->ptype = 0; /* single probe */
- /*
- * Leave the private data and channel_id/event_id there, because removal
- * is racy and should be done only after an RCU period. These are never
- * used until the next initialization anyway.
- */
-}
-
-/**
- * marker_update_probe_range - Update a probe range
- * @begin: beginning of the range
- * @end: end of the range
- *
- * Updates the probe callback corresponding to a range of markers.
- */
-void marker_update_probe_range(struct marker *begin,
- struct marker *end)
-{
- struct marker *iter;
- struct marker_entry *mark_entry;
-
- mutex_lock(&markers_mutex);
- for (iter = begin; iter < end; iter++) {
- mark_entry = get_marker(iter->channel, iter->name);
- if (mark_entry) {
- set_marker(mark_entry, iter, !!mark_entry->refcount);
- /*
- * ignore error, continue
- */
-
- /* This is added for UST. We emit a core_marker_id event
- * for markers that are already registered to a probe
- * upon library load. Otherwise, no core_marker_id will
- * be generated for these markers. Is this the right thing
- * to do?
- */
- trace_mark(metadata, core_marker_id,
- "channel %s name %s event_id %hu "
- "int #1u%zu long #1u%zu pointer #1u%zu "
- "size_t #1u%zu alignment #1u%u",
- iter->channel, iter->name, mark_entry->event_id,
- sizeof(int), sizeof(long), sizeof(void *),
- sizeof(size_t), ltt_get_alignment());
- } else {
- disable_marker(iter);
- }
- }
- mutex_unlock(&markers_mutex);
-}
-
-/*
- * Update probes, removing the faulty probes.
- *
- * Internal callback only changed before the first probe is connected to it.
- * Single probe private data can only be changed on 0 -> 1 and 2 -> 1
- * transitions. All other transitions will leave the old private data valid.
- * This makes the non-atomicity of the callback/private data updates valid.
- *
- * "special case" updates :
- * 0 -> 1 callback
- * 1 -> 0 callback
- * 1 -> 2 callbacks
- * 2 -> 1 callbacks
- * Other updates all behave the same, just like the 2 -> 3 or 3 -> 2 updates.
- * Site effect : marker_set_format may delete the marker entry (creating a
- * replacement).
- */
-static void marker_update_probes(void)
-{
- /* Core kernel markers */
-//ust// marker_update_probe_range(__start___markers, __stop___markers);
- /* Markers in modules. */
-//ust// module_update_markers();
- lib_update_markers();
-//ust// tracepoint_probe_update_all();
- /* Update immediate values */
- core_imv_update();
-//ust// module_imv_update(); /* FIXME: need to port for libs? */
- marker_update_processes();
-}
-
-/**
- * marker_probe_register - Connect a probe to a marker
- * @channel: marker channel
- * @name: marker name
- * @format: format string
- * @probe: probe handler
- * @probe_private: probe private data
- *
- * private data must be a valid allocated memory address, or NULL.
- * Returns 0 if ok, error value on error.
- * The probe address must at least be aligned on the architecture pointer size.
- */
-int marker_probe_register(const char *channel, const char *name,
- const char *format, marker_probe_func *probe,
- void *probe_private)
-{
- struct marker_entry *entry;
- int ret = 0, ret_err;
- struct marker_probe_closure *old;
- int first_probe = 0;
-
- mutex_lock(&markers_mutex);
- entry = get_marker(channel, name);
- if (!entry) {
- first_probe = 1;
- entry = add_marker(channel, name, format);
- if (IS_ERR(entry))
- ret = PTR_ERR(entry);
- if (ret)
- goto end;
- ret = ltt_channels_register(channel);
- if (ret)
- goto error_remove_marker;
- ret = ltt_channels_get_index_from_name(channel);
- if (ret < 0)
- goto error_unregister_channel;
- entry->channel_id = ret;
- ret = ltt_channels_get_event_id(channel, name);
- if (ret < 0)
- goto error_unregister_channel;
- entry->event_id = ret;
- ret = 0;
- trace_mark(metadata, core_marker_id,
- "channel %s name %s event_id %hu "
- "int #1u%zu long #1u%zu pointer #1u%zu "
- "size_t #1u%zu alignment #1u%u",
- channel, name, entry->event_id,
- sizeof(int), sizeof(long), sizeof(void *),
- sizeof(size_t), ltt_get_alignment());
- } else if (format) {
- if (!entry->format)
- ret = marker_set_format(entry, format);
- else if (strcmp(entry->format, format))
- ret = -EPERM;
- if (ret)
- goto end;
- }
-
- /*
- * If we detect that a call_rcu is pending for this marker,
- * make sure it's executed now.
- */
- if (entry->rcu_pending)
- rcu_barrier_sched();
- old = marker_entry_add_probe(entry, probe, probe_private);
- if (IS_ERR(old)) {
- ret = PTR_ERR(old);
- if (first_probe)
- goto error_unregister_channel;
- else
- goto end;
- }
- mutex_unlock(&markers_mutex);
-
- marker_update_probes();
-
- mutex_lock(&markers_mutex);
- entry = get_marker(channel, name);
- if (!entry)
- goto end;
- if (entry->rcu_pending)
- rcu_barrier_sched();
- entry->oldptr = old;
- entry->rcu_pending = 1;
- /* write rcu_pending before calling the RCU callback */
- smp_wmb();
- call_rcu_sched(&entry->rcu, free_old_closure);
- /*synchronize_rcu(); free_old_closure();*/
- goto end;
-
-error_unregister_channel:
- ret_err = ltt_channels_unregister(channel);
- WARN_ON(ret_err);
-error_remove_marker:
- ret_err = remove_marker(channel, name);
- WARN_ON(ret_err);
-end:
- mutex_unlock(&markers_mutex);
- return ret;
-}
-//ust// EXPORT_SYMBOL_GPL(marker_probe_register);
-
-/**
- * marker_probe_unregister - Disconnect a probe from a marker
- * @channel: marker channel
- * @name: marker name
- * @probe: probe function pointer
- * @probe_private: probe private data
- *
- * Returns the private data given to marker_probe_register, or an ERR_PTR().
- * We do not need to call a synchronize_sched to make sure the probes have
- * finished running before doing a module unload, because the module unload
- * itself uses stop_machine(), which insures that every preempt disabled section
- * have finished.
- */
-int marker_probe_unregister(const char *channel, const char *name,
- marker_probe_func *probe, void *probe_private)
-{
- struct marker_entry *entry;
- struct marker_probe_closure *old;
- int ret = -ENOENT;
-
- mutex_lock(&markers_mutex);
- entry = get_marker(channel, name);
- if (!entry)
- goto end;
- if (entry->rcu_pending)
- rcu_barrier_sched();
- old = marker_entry_remove_probe(entry, probe, probe_private);
- mutex_unlock(&markers_mutex);
-
- marker_update_probes();
-
- mutex_lock(&markers_mutex);
- entry = get_marker(channel, name);
- if (!entry)
- goto end;
- if (entry->rcu_pending)
- rcu_barrier_sched();
- entry->oldptr = old;
- entry->rcu_pending = 1;
- /* write rcu_pending before calling the RCU callback */
- smp_wmb();
- call_rcu_sched(&entry->rcu, free_old_closure);
- remove_marker(channel, name); /* Ignore busy error message */
- ret = 0;
-end:
- mutex_unlock(&markers_mutex);
- return ret;
-}
-//ust// EXPORT_SYMBOL_GPL(marker_probe_unregister);
-
-static struct marker_entry *
-get_marker_from_private_data(marker_probe_func *probe, void *probe_private)
-{
- struct marker_entry *entry;
- unsigned int i;
- struct hlist_head *head;
- struct hlist_node *node;
-
- for (i = 0; i < MARKER_TABLE_SIZE; i++) {
- head = &marker_table[i];
- hlist_for_each_entry(entry, node, head, hlist) {
- if (!entry->ptype) {
- if (entry->single.func == probe
- && entry->single.probe_private
- == probe_private)
- return entry;
- } else {
- struct marker_probe_closure *closure;
- closure = entry->multi;
- for (i = 0; closure[i].func; i++) {
- if (closure[i].func == probe &&
- closure[i].probe_private
- == probe_private)
- return entry;
- }
- }
- }
- }
- return NULL;
-}
-
-/**
- * marker_probe_unregister_private_data - Disconnect a probe from a marker
- * @probe: probe function
- * @probe_private: probe private data
- *
- * Unregister a probe by providing the registered private data.
- * Only removes the first marker found in hash table.
- * Return 0 on success or error value.
- * We do not need to call a synchronize_sched to make sure the probes have
- * finished running before doing a module unload, because the module unload
- * itself uses stop_machine(), which insures that every preempt disabled section
- * have finished.
- */
-int marker_probe_unregister_private_data(marker_probe_func *probe,
- void *probe_private)
-{
- struct marker_entry *entry;
- int ret = 0;
- struct marker_probe_closure *old;
- const char *channel = NULL, *name = NULL;
-
- mutex_lock(&markers_mutex);
- entry = get_marker_from_private_data(probe, probe_private);
- if (!entry) {
- ret = -ENOENT;
- goto end;
- }
- if (entry->rcu_pending)
- rcu_barrier_sched();
- old = marker_entry_remove_probe(entry, NULL, probe_private);
- channel = kstrdup(entry->channel, GFP_KERNEL);
- name = kstrdup(entry->name, GFP_KERNEL);
- mutex_unlock(&markers_mutex);
-
- marker_update_probes();
-
- mutex_lock(&markers_mutex);
- entry = get_marker(channel, name);
- if (!entry)
- goto end;
- if (entry->rcu_pending)
- rcu_barrier_sched();
- entry->oldptr = old;
- entry->rcu_pending = 1;
- /* write rcu_pending before calling the RCU callback */
- smp_wmb();
- call_rcu_sched(&entry->rcu, free_old_closure);
- /* Ignore busy error message */
- remove_marker(channel, name);
-end:
- mutex_unlock(&markers_mutex);
- kfree(channel);
- kfree(name);
- return ret;
-}
-//ust// EXPORT_SYMBOL_GPL(marker_probe_unregister_private_data);
-
-/**
- * marker_get_private_data - Get a marker's probe private data
- * @channel: marker channel
- * @name: marker name
- * @probe: probe to match
- * @num: get the nth matching probe's private data
- *
- * Returns the nth private data pointer (starting from 0) matching, or an
- * ERR_PTR.
- * Returns the private data pointer, or an ERR_PTR.
- * The private data pointer should _only_ be dereferenced if the caller is the
- * owner of the data, or its content could vanish. This is mostly used to
- * confirm that a caller is the owner of a registered probe.
- */
-void *marker_get_private_data(const char *channel, const char *name,
- marker_probe_func *probe, int num)
-{
- struct hlist_head *head;
- struct hlist_node *node;
- struct marker_entry *e;
- size_t channel_len = strlen(channel) + 1;
- size_t name_len = strlen(name) + 1;
- int i;
- u32 hash;
-
- hash = jhash(channel, channel_len-1, 0) ^ jhash(name, name_len-1, 0);
- head = &marker_table[hash & ((1 << MARKER_HASH_BITS)-1)];
- hlist_for_each_entry(e, node, head, hlist) {
- if (!strcmp(channel, e->channel) && !strcmp(name, e->name)) {
- if (!e->ptype) {
- if (num == 0 && e->single.func == probe)
- return e->single.probe_private;
- } else {
- struct marker_probe_closure *closure;
- int match = 0;
- closure = e->multi;
- for (i = 0; closure[i].func; i++) {
- if (closure[i].func != probe)
- continue;
- if (match++ == num)
- return closure[i].probe_private;
- }
- }
- break;
- }
- }
- return ERR_PTR(-ENOENT);
-}
-//ust// EXPORT_SYMBOL_GPL(marker_get_private_data);
-
-/**
- * markers_compact_event_ids - Compact markers event IDs and reassign channels
- *
- * Called when no channel users are active by the channel infrastructure.
- * Called with lock_markers() and channel mutex held.
- */
-//ust// void markers_compact_event_ids(void)
-//ust// {
-//ust// struct marker_entry *entry;
-//ust// unsigned int i;
-//ust// struct hlist_head *head;
-//ust// struct hlist_node *node;
-//ust// int ret;
-//ust//
-//ust// for (i = 0; i < MARKER_TABLE_SIZE; i++) {
-//ust// head = &marker_table[i];
-//ust// hlist_for_each_entry(entry, node, head, hlist) {
-//ust// ret = ltt_channels_get_index_from_name(entry->channel);
-//ust// WARN_ON(ret < 0);
-//ust// entry->channel_id = ret;
-//ust// ret = _ltt_channels_get_event_id(entry->channel,
-//ust// entry->name);
-//ust// WARN_ON(ret < 0);
-//ust// entry->event_id = ret;
-//ust// }
-//ust// }
-//ust// }
-
-//ust//#ifdef CONFIG_MODULES
-
-/**
- * marker_get_iter_range - Get a next marker iterator given a range.
- * @marker: current markers (in), next marker (out)
- * @begin: beginning of the range
- * @end: end of the range
- *
- * Returns whether a next marker has been found (1) or not (0).
- * Will return the first marker in the range if the input marker is NULL.
- */
-int marker_get_iter_range(struct marker **marker, struct marker *begin,
- struct marker *end)
-{
- if (!*marker && begin != end) {
- *marker = begin;
- return 1;
- }
- if (*marker >= begin && *marker < end)
- return 1;
- return 0;
-}
-//ust// EXPORT_SYMBOL_GPL(marker_get_iter_range);
-
-static void marker_get_iter(struct marker_iter *iter)
-{
- int found = 0;
-
- /* Core kernel markers */
- if (!iter->lib) {
- /* ust FIXME: how come we cannot disable the following line? we shouldn't need core stuff */
- found = marker_get_iter_range(&iter->marker,
- __start___markers, __stop___markers);
- if (found)
- goto end;
- }
- /* Markers in modules. */
- found = lib_get_iter_markers(iter);
-end:
- if (!found)
- marker_iter_reset(iter);
-}
-
-void marker_iter_start(struct marker_iter *iter)
-{
- marker_get_iter(iter);
-}
-//ust// EXPORT_SYMBOL_GPL(marker_iter_start);
-
-void marker_iter_next(struct marker_iter *iter)
-{
- iter->marker++;
- /*
- * iter->marker may be invalid because we blindly incremented it.
- * Make sure it is valid by marshalling on the markers, getting the
- * markers from following modules if necessary.
- */
- marker_get_iter(iter);
-}
-//ust// EXPORT_SYMBOL_GPL(marker_iter_next);
-
-void marker_iter_stop(struct marker_iter *iter)
-{
-}
-//ust// EXPORT_SYMBOL_GPL(marker_iter_stop);
-
-void marker_iter_reset(struct marker_iter *iter)
-{
- iter->lib = NULL;
- iter->marker = NULL;
-}
-//ust// EXPORT_SYMBOL_GPL(marker_iter_reset);
-
-#ifdef CONFIG_MARKERS_USERSPACE
-/*
- * must be called with current->user_markers_mutex held
- */
-static void free_user_marker(char __user *state, struct hlist_head *head)
-{
- struct user_marker *umark;
- struct hlist_node *pos, *n;
-
- hlist_for_each_entry_safe(umark, pos, n, head, hlist) {
- if (umark->state == state) {
- hlist_del(&umark->hlist);
- kfree(umark);
- }
- }
-}
-
-//ust// asmlinkage long sys_marker(char __user *name, char __user *format,
-//ust// char __user *state, int reg)
-//ust// {
-//ust// struct user_marker *umark;
-//ust// long len;
-//ust// struct marker_entry *entry;
-//ust// int ret = 0;
-//ust//
-//ust// printk(KERN_DEBUG "Program %s %s marker [%p, %p]\n",
-//ust// current->comm, reg ? "registers" : "unregisters",
-//ust// name, state);
-//ust// if (reg) {
-//ust// umark = kmalloc(sizeof(struct user_marker), GFP_KERNEL);
-//ust// umark->name[MAX_USER_MARKER_NAME_LEN - 1] = '\0';
-//ust// umark->format[MAX_USER_MARKER_FORMAT_LEN - 1] = '\0';
-//ust// umark->state = state;
-//ust// len = strncpy_from_user(umark->name, name,
-//ust// MAX_USER_MARKER_NAME_LEN - 1);
-//ust// if (len < 0) {
-//ust// ret = -EFAULT;
-//ust// goto error;
-//ust// }
-//ust// len = strncpy_from_user(umark->format, format,
-//ust// MAX_USER_MARKER_FORMAT_LEN - 1);
-//ust// if (len < 0) {
-//ust// ret = -EFAULT;
-//ust// goto error;
-//ust// }
-//ust// printk(KERN_DEBUG "Marker name : %s, format : %s", umark->name,
-//ust// umark->format);
-//ust// mutex_lock(&markers_mutex);
-//ust// entry = get_marker("userspace", umark->name);
-//ust// if (entry) {
-//ust// if (entry->format &&
-//ust// strcmp(entry->format, umark->format) != 0) {
-//ust// printk(" error, wrong format in process %s",
-//ust// current->comm);
-//ust// ret = -EPERM;
-//ust// goto error_unlock;
-//ust// }
-//ust// printk(" %s", !!entry->refcount
-//ust// ? "enabled" : "disabled");
-//ust// if (put_user(!!entry->refcount, state)) {
-//ust// ret = -EFAULT;
-//ust// goto error_unlock;
-//ust// }
-//ust// printk("\n");
-//ust// } else {
-//ust// printk(" disabled\n");
-//ust// if (put_user(0, umark->state)) {
-//ust// printk(KERN_WARNING
-//ust// "Marker in %s caused a fault\n",
-//ust// current->comm);
-//ust// goto error_unlock;
-//ust// }
-//ust// }
-//ust// mutex_lock(¤t->group_leader->user_markers_mutex);
-//ust// hlist_add_head(&umark->hlist,
-//ust// ¤t->group_leader->user_markers);
-//ust// current->group_leader->user_markers_sequence++;
-//ust// mutex_unlock(¤t->group_leader->user_markers_mutex);
-//ust// mutex_unlock(&markers_mutex);
-//ust// } else {
-//ust// mutex_lock(¤t->group_leader->user_markers_mutex);
-//ust// free_user_marker(state,
-//ust// ¤t->group_leader->user_markers);
-//ust// current->group_leader->user_markers_sequence++;
-//ust// mutex_unlock(¤t->group_leader->user_markers_mutex);
-//ust// }
-//ust// goto end;
-//ust// error_unlock:
-//ust// mutex_unlock(&markers_mutex);
-//ust// error:
-//ust// kfree(umark);
-//ust// end:
-//ust// return ret;
-//ust// }
-//ust//
-//ust// /*
-//ust// * Types :
-//ust// * string : 0
-//ust// */
-//ust// asmlinkage long sys_trace(int type, uint16_t id,
-//ust// char __user *ubuf)
-//ust// {
-//ust// long ret = -EPERM;
-//ust// char *page;
-//ust// int len;
-//ust//
-//ust// switch (type) {
-//ust// case 0: /* String */
-//ust// ret = -ENOMEM;
-//ust// page = (char *)__get_free_page(GFP_TEMPORARY);
-//ust// if (!page)
-//ust// goto string_out;
-//ust// len = strncpy_from_user(page, ubuf, PAGE_SIZE);
-//ust// if (len < 0) {
-//ust// ret = -EFAULT;
-//ust// goto string_err;
-//ust// }
-//ust// trace_mark(userspace, string, "string %s", page);
-//ust// string_err:
-//ust// free_page((unsigned long) page);
-//ust// string_out:
-//ust// break;
-//ust// default:
-//ust// break;
-//ust// }
-//ust// return ret;
-//ust// }
-
-//ust// static void marker_update_processes(void)
-//ust// {
-//ust// struct task_struct *g, *t;
-//ust//
-//ust// /*
-//ust// * markers_mutex is taken to protect the p->user_markers read.
-//ust// */
-//ust// mutex_lock(&markers_mutex);
-//ust// read_lock(&tasklist_lock);
-//ust// for_each_process(g) {
-//ust// WARN_ON(!thread_group_leader(g));
-//ust// if (hlist_empty(&g->user_markers))
-//ust// continue;
-//ust// if (strcmp(g->comm, "testprog") == 0)
-//ust// printk(KERN_DEBUG "set update pending for testprog\n");
-//ust// t = g;
-//ust// do {
-//ust// /* TODO : implement this thread flag in each arch. */
-//ust// set_tsk_thread_flag(t, TIF_MARKER_PENDING);
-//ust// } while ((t = next_thread(t)) != g);
-//ust// }
-//ust// read_unlock(&tasklist_lock);
-//ust// mutex_unlock(&markers_mutex);
-//ust// }
-
-/*
- * Update current process.
- * Note that we have to wait a whole scheduler period before we are sure that
- * every running userspace threads have their markers updated.
- * (synchronize_sched() can be used to insure this).
- */
-void marker_update_process(void)
-{
- struct user_marker *umark;
- struct hlist_node *pos;
- struct marker_entry *entry;
-
- mutex_lock(&markers_mutex);
- mutex_lock(¤t->group_leader->user_markers_mutex);
- if (strcmp(current->comm, "testprog") == 0)
- printk(KERN_DEBUG "do update pending for testprog\n");
- hlist_for_each_entry(umark, pos,
- ¤t->group_leader->user_markers, hlist) {
- printk(KERN_DEBUG "Updating marker %s in %s\n",
- umark->name, current->comm);
- entry = get_marker("userspace", umark->name);
- if (entry) {
- if (entry->format &&
- strcmp(entry->format, umark->format) != 0) {
- printk(KERN_WARNING
- " error, wrong format in process %s\n",
- current->comm);
- break;
- }
- if (put_user(!!entry->refcount, umark->state)) {
- printk(KERN_WARNING
- "Marker in %s caused a fault\n",
- current->comm);
- break;
- }
- } else {
- if (put_user(0, umark->state)) {
- printk(KERN_WARNING
- "Marker in %s caused a fault\n",
- current->comm);
- break;
- }
- }
- }
- clear_thread_flag(TIF_MARKER_PENDING);
- mutex_unlock(¤t->group_leader->user_markers_mutex);
- mutex_unlock(&markers_mutex);
-}
-
-/*
- * Called at process exit and upon do_execve().
- * We assume that when the leader exits, no more references can be done to the
- * leader structure by the other threads.
- */
-void exit_user_markers(struct task_struct *p)
-{
- struct user_marker *umark;
- struct hlist_node *pos, *n;
-
- if (thread_group_leader(p)) {
- mutex_lock(&markers_mutex);
- mutex_lock(&p->user_markers_mutex);
- hlist_for_each_entry_safe(umark, pos, n, &p->user_markers,
- hlist)
- kfree(umark);
- INIT_HLIST_HEAD(&p->user_markers);
- p->user_markers_sequence++;
- mutex_unlock(&p->user_markers_mutex);
- mutex_unlock(&markers_mutex);
- }
-}
-
-int is_marker_enabled(const char *channel, const char *name)
-{
- struct marker_entry *entry;
-
- mutex_lock(&markers_mutex);
- entry = get_marker(channel, name);
- mutex_unlock(&markers_mutex);
-
- return entry && !!entry->refcount;
-}
-//ust// #endif
-
-int marker_module_notify(struct notifier_block *self,
- unsigned long val, void *data)
-{
- struct module *mod = data;
-
- switch (val) {
- case MODULE_STATE_COMING:
- marker_update_probe_range(mod->markers,
- mod->markers + mod->num_markers);
- break;
- case MODULE_STATE_GOING:
- marker_update_probe_range(mod->markers,
- mod->markers + mod->num_markers);
- break;
- }
- return 0;
-}
-
-struct notifier_block marker_module_nb = {
- .notifier_call = marker_module_notify,
- .priority = 0,
-};
-
-//ust// static int init_markers(void)
-//ust// {
-//ust// return register_module_notifier(&marker_module_nb);
-//ust// }
-//ust// __initcall(init_markers);
-/* TODO: call marker_module_nb() when a library is linked at runtime (dlopen)? */
-
-#endif /* CONFIG_MODULES */
-
-void ltt_dump_marker_state(struct ltt_trace_struct *trace)
-{
- struct marker_iter iter;
- struct ltt_probe_private_data call_data;
- const char *channel;
-
- call_data.trace = trace;
- call_data.serializer = NULL;
-
- marker_iter_reset(&iter);
- marker_iter_start(&iter);
- for (; iter.marker != NULL; marker_iter_next(&iter)) {
- if (!_imv_read(iter.marker->state))
- continue;
- channel = ltt_channels_get_name_from_index(
- iter.marker->channel_id);
- __trace_mark(0, metadata, core_marker_id,
- &call_data,
- "channel %s name %s event_id %hu "
- "int #1u%zu long #1u%zu pointer #1u%zu "
- "size_t #1u%zu alignment #1u%u",
- channel,
- iter.marker->name,
- iter.marker->event_id,
- sizeof(int), sizeof(long),
- sizeof(void *), sizeof(size_t),
- ltt_get_alignment());
- if (iter.marker->format)
- __trace_mark(0, metadata,
- core_marker_format,
- &call_data,
- "channel %s name %s format %s",
- channel,
- iter.marker->name,
- iter.marker->format);
- }
- marker_iter_stop(&iter);
-}
-//ust// EXPORT_SYMBOL_GPL(ltt_dump_marker_state);
-
-
-static LIST_HEAD(libs);
-
-/*
- * Returns 0 if current not found.
- * Returns 1 if current found.
- */
-int lib_get_iter_markers(struct marker_iter *iter)
-{
- struct lib *iter_lib;
- int found = 0;
-
-//ust// mutex_lock(&module_mutex);
- list_for_each_entry(iter_lib, &libs, list) {
- if (iter_lib < iter->lib)
- continue;
- else if (iter_lib > iter->lib)
- iter->marker = NULL;
- found = marker_get_iter_range(&iter->marker,
- iter_lib->markers_start,
- iter_lib->markers_start + iter_lib->markers_count);
- if (found) {
- iter->lib = iter_lib;
- break;
- }
- }
-//ust// mutex_unlock(&module_mutex);
- return found;
-}
-
-void lib_update_markers(void)
-{
- struct lib *lib;
-
-//ust// mutex_lock(&module_mutex);
- list_for_each_entry(lib, &libs, list)
- marker_update_probe_range(lib->markers_start,
- lib->markers_start + lib->markers_count);
-//ust// mutex_unlock(&module_mutex);
-}
-
-static void (*new_marker_cb)(struct marker *) = NULL;
-
-void marker_set_new_marker_cb(void (*cb)(struct marker *))
-{
- new_marker_cb = cb;
-}
-
-static void new_markers(struct marker *start, struct marker *end)
-{
- if(new_marker_cb) {
- struct marker *m;
- for(m=start; m < end; m++) {
- new_marker_cb(m);
- }
- }
-}
-
-int marker_register_lib(struct marker *markers_start, int markers_count)
-{
- struct lib *pl;
-
- pl = (struct lib *) malloc(sizeof(struct lib));
-
- pl->markers_start = markers_start;
- pl->markers_count = markers_count;
-
- /* FIXME: maybe protect this with its own mutex? */
- lock_markers();
- list_add(&pl->list, &libs);
- unlock_markers();
-
- new_markers(markers_start, markers_start + markers_count);
-
- /* FIXME: update just the loaded lib */
- lib_update_markers();
-
- DBG("just registered a markers section from %p and having %d markers", markers_start, markers_count);
-
- return 0;
-}
-
-int marker_unregister_lib(struct marker *markers_start, int markers_count)
-{
- /*FIXME: implement; but before implementing, marker_register_lib must
- have appropriate locking. */
-
- return 0;
-}
-
-static int initialized = 0;
-
-void __attribute__((constructor)) init_markers(void)
-{
- if(!initialized) {
- marker_register_lib(__start___markers, (((long)__stop___markers)-((long)__start___markers))/sizeof(struct marker));
- printf("markers_start: %p, markers_stop: %p\n", __start___markers, __stop___markers);
- initialized = 1;
- }
-}
+++ /dev/null
-/*
- * Code markup for dynamic and static tracing.
- *
- * See Documentation/marker.txt.
- *
- * (C) Copyright 2006 Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
- * (C) Copyright 2009 Pierre-Marc Fournier <pierre-marc dot fournier at polymtl dot ca>
- *
- * This file is released under the GPLv2.
- * See the file COPYING for more details.
- */
-
-#ifndef _LINUX_MARKER_H
-#define _LINUX_MARKER_H
-
-#include <stdarg.h>
-//ust// #include <linux/types.h>
-#include "immediate.h"
-//ust// #include <linux/ltt-channels.h>
-#include "kernelcompat.h"
-#include "compiler.h"
-#include "list.h"
-
-//ust// struct module;
-//ust// struct task_struct;
-struct marker;
-
-/**
- * marker_probe_func - Type of a marker probe function
- * @mdata: marker data
- * @probe_private: probe private data
- * @call_private: call site private data
- * @fmt: format string
- * @args: variable argument list pointer. Use a pointer to overcome C's
- * inability to pass this around as a pointer in a portable manner in
- * the callee otherwise.
- *
- * Type of marker probe functions. They receive the mdata and need to parse the
- * format string to recover the variable argument list.
- */
-typedef void marker_probe_func(const struct marker *mdata,
- void *probe_private, void *call_private,
- const char *fmt, va_list *args);
-
-struct marker_probe_closure {
- marker_probe_func *func; /* Callback */
- void *probe_private; /* Private probe data */
-};
-
-struct marker {
- const char *channel; /* Name of channel where to send data */
- const char *name; /* Marker name */
- const char *format; /* Marker format string, describing the
- * variable argument list.
- */
- DEFINE_IMV(char, state);/* Immediate value state. */
- char ptype; /* probe type : 0 : single, 1 : multi */
- /* Probe wrapper */
- u16 channel_id; /* Numeric channel identifier, dynamic */
- u16 event_id; /* Numeric event identifier, dynamic */
- void (*call)(const struct marker *mdata, void *call_private, ...);
- struct marker_probe_closure single;
- struct marker_probe_closure *multi;
- const char *tp_name; /* Optional tracepoint name */
- void *tp_cb; /* Optional tracepoint callback */
-} __attribute__((aligned(8)));
-
-//ust// #ifdef CONFIG_MARKERS
-
-#define _DEFINE_MARKER(channel, name, tp_name_str, tp_cb, format) \
- static const char __mstrtab_##channel##_##name[] \
- __attribute__((section("__markers_strings"))) \
- = #channel "\0" #name "\0" format; \
- static struct marker __mark_##channel##_##name \
- __attribute__((section("__markers"), aligned(8))) = \
- { __mstrtab_##channel##_##name, \
- &__mstrtab_##channel##_##name[sizeof(#channel)], \
- &__mstrtab_##channel##_##name[sizeof(#channel) + \
- sizeof(#name)], \
- 0, 0, 0, 0, marker_probe_cb, \
- { __mark_empty_function, NULL}, \
- NULL, tp_name_str, tp_cb }
-
-#define DEFINE_MARKER(channel, name, format) \
- _DEFINE_MARKER(channel, name, NULL, NULL, format)
-
-#define DEFINE_MARKER_TP(channel, name, tp_name, tp_cb, format) \
- _DEFINE_MARKER(channel, name, #tp_name, tp_cb, format)
-
-/*
- * Make sure the alignment of the structure in the __markers section will
- * not add unwanted padding between the beginning of the section and the
- * structure. Force alignment to the same alignment as the section start.
- *
- * The "generic" argument controls which marker enabling mechanism must be used.
- * If generic is true, a variable read is used.
- * If generic is false, immediate values are used.
- */
-#define __trace_mark(generic, channel, name, call_private, format, args...) \
- do { \
- DEFINE_MARKER(channel, name, format); \
- __mark_check_format(format, ## args); \
- if (!generic) { \
- if (unlikely(imv_read( \
- __mark_##channel##_##name.state))) \
- (*__mark_##channel##_##name.call) \
- (&__mark_##channel##_##name, \
- call_private, ## args); \
- } else { \
- if (unlikely(_imv_read( \
- __mark_##channel##_##name.state))) \
- (*__mark_##channel##_##name.call) \
- (&__mark_##channel##_##name, \
- call_private, ## args); \
- } \
- } while (0)
-
-#define __trace_mark_tp(channel, name, call_private, tp_name, tp_cb, \
- format, args...) \
- do { \
- void __check_tp_type(void) \
- { \
- register_trace_##tp_name(tp_cb); \
- } \
- DEFINE_MARKER_TP(channel, name, tp_name, tp_cb, format);\
- __mark_check_format(format, ## args); \
- (*__mark_##channel##_##name.call)(&__mark_##channel##_##name, \
- call_private, ## args); \
- } while (0)
-
-extern void marker_update_probe_range(struct marker *begin,
- struct marker *end);
-
-#define GET_MARKER(channel, name) (__mark_##channel##_##name)
-
-//ust// #else /* !CONFIG_MARKERS */
-//ust// #define DEFINE_MARKER(channel, name, tp_name, tp_cb, format)
-//ust// #define __trace_mark(generic, channel, name, call_private, format, args...) \
-//ust// __mark_check_format(format, ## args)
-//ust// #define __trace_mark_tp(channel, name, call_private, tp_name, tp_cb, \
-//ust// format, args...) \
-//ust// do { \
-//ust// void __check_tp_type(void) \
-//ust// { \
-//ust// register_trace_##tp_name(tp_cb); \
-//ust// } \
-//ust// __mark_check_format(format, ## args); \
-//ust// } while (0)
-//ust// static inline void marker_update_probe_range(struct marker *begin,
-//ust// struct marker *end)
-//ust// { }
-//ust// #define GET_MARKER(channel, name)
-//ust// #endif /* CONFIG_MARKERS */
-
-/**
- * trace_mark - Marker using code patching
- * @channel: marker channel (where to send the data), not quoted.
- * @name: marker name, not quoted.
- * @format: format string
- * @args...: variable argument list
- *
- * Places a marker using optimized code patching technique (imv_read())
- * to be enabled when immediate values are present.
- */
-#define trace_mark(channel, name, format, args...) \
- __trace_mark(0, channel, name, NULL, format, ## args)
-
-/**
- * _trace_mark - Marker using variable read
- * @channel: marker channel (where to send the data), not quoted.
- * @name: marker name, not quoted.
- * @format: format string
- * @args...: variable argument list
- *
- * Places a marker using a standard memory read (_imv_read()) to be
- * enabled. Should be used for markers in code paths where instruction
- * modification based enabling is not welcome. (__init and __exit functions,
- * lockdep, some traps, printk).
- */
-#define _trace_mark(channel, name, format, args...) \
- __trace_mark(1, channel, name, NULL, format, ## args)
-
-/**
- * trace_mark_tp - Marker in a tracepoint callback
- * @channel: marker channel (where to send the data), not quoted.
- * @name: marker name, not quoted.
- * @tp_name: tracepoint name, not quoted.
- * @tp_cb: tracepoint callback. Should have an associated global symbol so it
- * is not optimized away by the compiler (should not be static).
- * @format: format string
- * @args...: variable argument list
- *
- * Places a marker in a tracepoint callback.
- */
-#define trace_mark_tp(channel, name, tp_name, tp_cb, format, args...) \
- __trace_mark_tp(channel, name, NULL, tp_name, tp_cb, format, ## args)
-
-/**
- * MARK_NOARGS - Format string for a marker with no argument.
- */
-#define MARK_NOARGS " "
-
-extern void lock_markers(void);
-extern void unlock_markers(void);
-
-extern void markers_compact_event_ids(void);
-
-/* To be used for string format validity checking with gcc */
-static inline void __printf(1, 2) ___mark_check_format(const char *fmt, ...)
-{
-}
-
-#define __mark_check_format(format, args...) \
- do { \
- if (0) \
- ___mark_check_format(format, ## args); \
- } while (0)
-
-extern marker_probe_func __mark_empty_function;
-
-extern void marker_probe_cb(const struct marker *mdata,
- void *call_private, ...);
-
-/*
- * Connect a probe to a marker.
- * private data pointer must be a valid allocated memory address, or NULL.
- */
-extern int marker_probe_register(const char *channel, const char *name,
- const char *format, marker_probe_func *probe, void *probe_private);
-
-/*
- * Returns the private data given to marker_probe_register.
- */
-extern int marker_probe_unregister(const char *channel, const char *name,
- marker_probe_func *probe, void *probe_private);
-/*
- * Unregister a marker by providing the registered private data.
- */
-extern int marker_probe_unregister_private_data(marker_probe_func *probe,
- void *probe_private);
-
-extern void *marker_get_private_data(const char *channel, const char *name,
- marker_probe_func *probe, int num);
-
-/*
- * marker_synchronize_unregister must be called between the last marker probe
- * unregistration and the first one of
- * - the end of module exit function
- * - the free of any resource used by the probes
- * to ensure the code and data are valid for any possibly running probes.
- */
-#define marker_synchronize_unregister() synchronize_sched()
-
-struct marker_iter {
-//ust// struct module *module;
- struct lib *lib;
- struct marker *marker;
-};
-
-extern void marker_iter_start(struct marker_iter *iter);
-extern void marker_iter_next(struct marker_iter *iter);
-extern void marker_iter_stop(struct marker_iter *iter);
-extern void marker_iter_reset(struct marker_iter *iter);
-extern int marker_get_iter_range(struct marker **marker, struct marker *begin,
- struct marker *end);
-
-extern void marker_update_process(void);
-extern int is_marker_enabled(const char *channel, const char *name);
-
-//ust// #ifdef CONFIG_MARKERS_USERSPACE
-//ust// extern void exit_user_markers(struct task_struct *p);
-//ust// #else
-//ust// static inline void exit_user_markers(struct task_struct *p)
-//ust// {
-//ust// }
-//ust// #endif
-
-
-struct lib {
- struct marker *markers_start;
- int markers_count;
- struct list_head list;
-};
-
-int marker_register_lib(struct marker *markers_start, int markers_count);
-
-#define MARKER_LIB \
-extern struct marker __start___markers[] __attribute__((visibility("hidden"))); \
-extern struct marker __stop___markers[] __attribute__((visibility("hidden"))); \
- \
-static void __attribute__((constructor)) __markers__init(void) \
-{ \
- marker_register_lib(__start___markers, (((long)__stop___markers)-((long)__start___markers))/sizeof(struct marker));\
-}
-
-void marker_set_new_marker_cb(void (*cb)(struct marker *));
-
-#endif
+++ /dev/null
-/*
- * Copyright (C) 2008 Mathieu Desnoyers
- * Copyright (C) 2009 Pierre-Marc Fournier
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
- *
- * Ported to userspace by Pierre-Marc Fournier.
- */
-
-//ust// #include <linux/module.h>
-//ust// #include <linux/mutex.h>
-//ust// #include <linux/types.h>
-//ust// #include <linux/jhash.h>
-//ust// #include <linux/list.h>
-//ust// #include <linux/rcupdate.h>
-//ust// #include <linux/tracepoint.h>
-//ust// #include <linux/err.h>
-//ust// #include <linux/slab.h>
-//ust// #include <linux/immediate.h>
-
-#include <errno.h>
-
-#include "kernelcompat.h"
-#include "tracepoint.h"
-#include "usterr.h"
-#include "list.h"
-
-//extern struct tracepoint __start___tracepoints[] __attribute__((visibility("hidden")));
-//extern struct tracepoint __stop___tracepoints[] __attribute__((visibility("hidden")));
-
-/* Set to 1 to enable tracepoint debug output */
-static const int tracepoint_debug;
-
-/* libraries that contain tracepoints (struct tracepoint_lib) */
-static LIST_HEAD(libs);
-
-/*
- * tracepoints_mutex nests inside module_mutex. Tracepoints mutex protects the
- * builtin and module tracepoints and the hash table.
- */
-static DEFINE_MUTEX(tracepoints_mutex);
-
-/*
- * Tracepoint hash table, containing the active tracepoints.
- * Protected by tracepoints_mutex.
- */
-#define TRACEPOINT_HASH_BITS 6
-#define TRACEPOINT_TABLE_SIZE (1 << TRACEPOINT_HASH_BITS)
-static struct hlist_head tracepoint_table[TRACEPOINT_TABLE_SIZE];
-
-/*
- * Note about RCU :
- * It is used to to delay the free of multiple probes array until a quiescent
- * state is reached.
- * Tracepoint entries modifications are protected by the tracepoints_mutex.
- */
-struct tracepoint_entry {
- struct hlist_node hlist;
- void **funcs;
- int refcount; /* Number of times armed. 0 if disarmed. */
- char name[0];
-};
-
-struct tp_probes {
- union {
-//ust// struct rcu_head rcu;
- struct list_head list;
- } u;
- void *probes[0];
-};
-
-static inline void *allocate_probes(int count)
-{
- struct tp_probes *p = kmalloc(count * sizeof(void *)
- + sizeof(struct tp_probes), GFP_KERNEL);
- return p == NULL ? NULL : p->probes;
-}
-
-//ust// static void rcu_free_old_probes(struct rcu_head *head)
-//ust// {
-//ust// kfree(container_of(head, struct tp_probes, u.rcu));
-//ust// }
-
-static inline void release_probes(void *old)
-{
- if (old) {
- struct tp_probes *tp_probes = container_of(old,
- struct tp_probes, probes[0]);
-//ust// call_rcu_sched(&tp_probes->u.rcu, rcu_free_old_probes);
- synchronize_rcu();
- kfree(tp_probes);
- }
-}
-
-static void debug_print_probes(struct tracepoint_entry *entry)
-{
- int i;
-
- if (!tracepoint_debug || !entry->funcs)
- return;
-
- for (i = 0; entry->funcs[i]; i++)
- printk(KERN_DEBUG "Probe %d : %p\n", i, entry->funcs[i]);
-}
-
-static void *
-tracepoint_entry_add_probe(struct tracepoint_entry *entry, void *probe)
-{
- int nr_probes = 0;
- void **old, **new;
-
- WARN_ON(!probe);
-
- debug_print_probes(entry);
- old = entry->funcs;
- if (old) {
- /* (N -> N+1), (N != 0, 1) probes */
- for (nr_probes = 0; old[nr_probes]; nr_probes++)
- if (old[nr_probes] == probe)
- return ERR_PTR(-EEXIST);
- }
- /* + 2 : one for new probe, one for NULL func */
- new = allocate_probes(nr_probes + 2);
- if (new == NULL)
- return ERR_PTR(-ENOMEM);
- if (old)
- memcpy(new, old, nr_probes * sizeof(void *));
- new[nr_probes] = probe;
- new[nr_probes + 1] = NULL;
- entry->refcount = nr_probes + 1;
- entry->funcs = new;
- debug_print_probes(entry);
- return old;
-}
-
-static void *
-tracepoint_entry_remove_probe(struct tracepoint_entry *entry, void *probe)
-{
- int nr_probes = 0, nr_del = 0, i;
- void **old, **new;
-
- old = entry->funcs;
-
- if (!old)
- return ERR_PTR(-ENOENT);
-
- debug_print_probes(entry);
- /* (N -> M), (N > 1, M >= 0) probes */
- for (nr_probes = 0; old[nr_probes]; nr_probes++) {
- if ((!probe || old[nr_probes] == probe))
- nr_del++;
- }
-
- if (nr_probes - nr_del == 0) {
- /* N -> 0, (N > 1) */
- entry->funcs = NULL;
- entry->refcount = 0;
- debug_print_probes(entry);
- return old;
- } else {
- int j = 0;
- /* N -> M, (N > 1, M > 0) */
- /* + 1 for NULL */
- new = allocate_probes(nr_probes - nr_del + 1);
- if (new == NULL)
- return ERR_PTR(-ENOMEM);
- for (i = 0; old[i]; i++)
- if ((probe && old[i] != probe))
- new[j++] = old[i];
- new[nr_probes - nr_del] = NULL;
- entry->refcount = nr_probes - nr_del;
- entry->funcs = new;
- }
- debug_print_probes(entry);
- return old;
-}
-
-/*
- * Get tracepoint if the tracepoint is present in the tracepoint hash table.
- * Must be called with tracepoints_mutex held.
- * Returns NULL if not present.
- */
-static struct tracepoint_entry *get_tracepoint(const char *name)
-{
- struct hlist_head *head;
- struct hlist_node *node;
- struct tracepoint_entry *e;
- u32 hash = jhash(name, strlen(name), 0);
-
- head = &tracepoint_table[hash & (TRACEPOINT_TABLE_SIZE - 1)];
- hlist_for_each_entry(e, node, head, hlist) {
- if (!strcmp(name, e->name))
- return e;
- }
- return NULL;
-}
-
-/*
- * Add the tracepoint to the tracepoint hash table. Must be called with
- * tracepoints_mutex held.
- */
-static struct tracepoint_entry *add_tracepoint(const char *name)
-{
- struct hlist_head *head;
- struct hlist_node *node;
- struct tracepoint_entry *e;
- size_t name_len = strlen(name) + 1;
- u32 hash = jhash(name, name_len-1, 0);
-
- head = &tracepoint_table[hash & (TRACEPOINT_TABLE_SIZE - 1)];
- hlist_for_each_entry(e, node, head, hlist) {
- if (!strcmp(name, e->name)) {
- printk(KERN_NOTICE
- "tracepoint %s busy\n", name);
- return ERR_PTR(-EEXIST); /* Already there */
- }
- }
- /*
- * Using kmalloc here to allocate a variable length element. Could
- * cause some memory fragmentation if overused.
- */
- e = kmalloc(sizeof(struct tracepoint_entry) + name_len, GFP_KERNEL);
- if (!e)
- return ERR_PTR(-ENOMEM);
- memcpy(&e->name[0], name, name_len);
- e->funcs = NULL;
- e->refcount = 0;
- hlist_add_head(&e->hlist, head);
- return e;
-}
-
-/*
- * Remove the tracepoint from the tracepoint hash table. Must be called with
- * mutex_lock held.
- */
-static inline void remove_tracepoint(struct tracepoint_entry *e)
-{
- hlist_del(&e->hlist);
- kfree(e);
-}
-
-/*
- * Sets the probe callback corresponding to one tracepoint.
- */
-static void set_tracepoint(struct tracepoint_entry **entry,
- struct tracepoint *elem, int active)
-{
- WARN_ON(strcmp((*entry)->name, elem->name) != 0);
-
- /*
- * rcu_assign_pointer has a smp_wmb() which makes sure that the new
- * probe callbacks array is consistent before setting a pointer to it.
- * This array is referenced by __DO_TRACE from
- * include/linux/tracepoints.h. A matching smp_read_barrier_depends()
- * is used.
- */
- rcu_assign_pointer(elem->funcs, (*entry)->funcs);
- elem->state__imv = active;
-}
-
-/*
- * Disable a tracepoint and its probe callback.
- * Note: only waiting an RCU period after setting elem->call to the empty
- * function insures that the original callback is not used anymore. This insured
- * by preempt_disable around the call site.
- */
-static void disable_tracepoint(struct tracepoint *elem)
-{
- elem->state__imv = 0;
- rcu_assign_pointer(elem->funcs, NULL);
-}
-
-/**
- * tracepoint_update_probe_range - Update a probe range
- * @begin: beginning of the range
- * @end: end of the range
- *
- * Updates the probe callback corresponding to a range of tracepoints.
- */
-void tracepoint_update_probe_range(struct tracepoint *begin,
- struct tracepoint *end)
-{
- struct tracepoint *iter;
- struct tracepoint_entry *mark_entry;
-
- mutex_lock(&tracepoints_mutex);
- for (iter = begin; iter < end; iter++) {
- mark_entry = get_tracepoint(iter->name);
- if (mark_entry) {
- set_tracepoint(&mark_entry, iter,
- !!mark_entry->refcount);
- } else {
- disable_tracepoint(iter);
- }
- }
- mutex_unlock(&tracepoints_mutex);
-}
-
-/*
- * Update probes, removing the faulty probes.
- */
-static void tracepoint_update_probes(void)
-{
- /* Core kernel tracepoints */
-//ust// tracepoint_update_probe_range(__start___tracepoints,
-//ust// __stop___tracepoints);
- /* tracepoints in modules. */
- lib_update_tracepoints();
- /* Update immediate values */
- core_imv_update();
-//ust// module_imv_update();
-}
-
-static void *tracepoint_add_probe(const char *name, void *probe)
-{
- struct tracepoint_entry *entry;
- void *old;
-
- entry = get_tracepoint(name);
- if (!entry) {
- entry = add_tracepoint(name);
- if (IS_ERR(entry))
- return entry;
- }
- old = tracepoint_entry_add_probe(entry, probe);
- if (IS_ERR(old) && !entry->refcount)
- remove_tracepoint(entry);
- return old;
-}
-
-/**
- * tracepoint_probe_register - Connect a probe to a tracepoint
- * @name: tracepoint name
- * @probe: probe handler
- *
- * Returns 0 if ok, error value on error.
- * The probe address must at least be aligned on the architecture pointer size.
- */
-int tracepoint_probe_register(const char *name, void *probe)
-{
- void *old;
-
- mutex_lock(&tracepoints_mutex);
- old = tracepoint_add_probe(name, probe);
- mutex_unlock(&tracepoints_mutex);
- if (IS_ERR(old))
- return PTR_ERR(old);
-
- tracepoint_update_probes(); /* may update entry */
- release_probes(old);
- return 0;
-}
-//ust// EXPORT_SYMBOL_GPL(tracepoint_probe_register);
-
-static void *tracepoint_remove_probe(const char *name, void *probe)
-{
- struct tracepoint_entry *entry;
- void *old;
-
- entry = get_tracepoint(name);
- if (!entry)
- return ERR_PTR(-ENOENT);
- old = tracepoint_entry_remove_probe(entry, probe);
- if (IS_ERR(old))
- return old;
- if (!entry->refcount)
- remove_tracepoint(entry);
- return old;
-}
-
-/**
- * tracepoint_probe_unregister - Disconnect a probe from a tracepoint
- * @name: tracepoint name
- * @probe: probe function pointer
- *
- * We do not need to call a synchronize_sched to make sure the probes have
- * finished running before doing a module unload, because the module unload
- * itself uses stop_machine(), which insures that every preempt disabled section
- * have finished.
- */
-int tracepoint_probe_unregister(const char *name, void *probe)
-{
- void *old;
-
- mutex_lock(&tracepoints_mutex);
- old = tracepoint_remove_probe(name, probe);
- mutex_unlock(&tracepoints_mutex);
- if (IS_ERR(old))
- return PTR_ERR(old);
-
- tracepoint_update_probes(); /* may update entry */
- release_probes(old);
- return 0;
-}
-//ust// EXPORT_SYMBOL_GPL(tracepoint_probe_unregister);
-
-static LIST_HEAD(old_probes);
-static int need_update;
-
-static void tracepoint_add_old_probes(void *old)
-{
- need_update = 1;
- if (old) {
- struct tp_probes *tp_probes = container_of(old,
- struct tp_probes, probes[0]);
- list_add(&tp_probes->u.list, &old_probes);
- }
-}
-
-/**
- * tracepoint_probe_register_noupdate - register a probe but not connect
- * @name: tracepoint name
- * @probe: probe handler
- *
- * caller must call tracepoint_probe_update_all()
- */
-int tracepoint_probe_register_noupdate(const char *name, void *probe)
-{
- void *old;
-
- mutex_lock(&tracepoints_mutex);
- old = tracepoint_add_probe(name, probe);
- if (IS_ERR(old)) {
- mutex_unlock(&tracepoints_mutex);
- return PTR_ERR(old);
- }
- tracepoint_add_old_probes(old);
- mutex_unlock(&tracepoints_mutex);
- return 0;
-}
-//ust// EXPORT_SYMBOL_GPL(tracepoint_probe_register_noupdate);
-
-/**
- * tracepoint_probe_unregister_noupdate - remove a probe but not disconnect
- * @name: tracepoint name
- * @probe: probe function pointer
- *
- * caller must call tracepoint_probe_update_all()
- */
-int tracepoint_probe_unregister_noupdate(const char *name, void *probe)
-{
- void *old;
-
- mutex_lock(&tracepoints_mutex);
- old = tracepoint_remove_probe(name, probe);
- if (IS_ERR(old)) {
- mutex_unlock(&tracepoints_mutex);
- return PTR_ERR(old);
- }
- tracepoint_add_old_probes(old);
- mutex_unlock(&tracepoints_mutex);
- return 0;
-}
-//ust// EXPORT_SYMBOL_GPL(tracepoint_probe_unregister_noupdate);
-
-/**
- * tracepoint_probe_update_all - update tracepoints
- */
-void tracepoint_probe_update_all(void)
-{
- LIST_HEAD(release_probes);
- struct tp_probes *pos, *next;
-
- mutex_lock(&tracepoints_mutex);
- if (!need_update) {
- mutex_unlock(&tracepoints_mutex);
- return;
- }
- if (!list_empty(&old_probes))
- list_replace_init(&old_probes, &release_probes);
- need_update = 0;
- mutex_unlock(&tracepoints_mutex);
-
- tracepoint_update_probes();
- list_for_each_entry_safe(pos, next, &release_probes, u.list) {
- list_del(&pos->u.list);
-//ust// call_rcu_sched(&pos->u.rcu, rcu_free_old_probes);
- synchronize_rcu();
- kfree(pos);
- }
-}
-//ust// EXPORT_SYMBOL_GPL(tracepoint_probe_update_all);
-
-/**
- * tracepoint_get_iter_range - Get a next tracepoint iterator given a range.
- * @tracepoint: current tracepoints (in), next tracepoint (out)
- * @begin: beginning of the range
- * @end: end of the range
- *
- * Returns whether a next tracepoint has been found (1) or not (0).
- * Will return the first tracepoint in the range if the input tracepoint is
- * NULL.
- */
-int tracepoint_get_iter_range(struct tracepoint **tracepoint,
- struct tracepoint *begin, struct tracepoint *end)
-{
- if (!*tracepoint && begin != end) {
- *tracepoint = begin;
- return 1;
- }
- if (*tracepoint >= begin && *tracepoint < end)
- return 1;
- return 0;
-}
-//ust// EXPORT_SYMBOL_GPL(tracepoint_get_iter_range);
-
-static void tracepoint_get_iter(struct tracepoint_iter *iter)
-{
- int found = 0;
-
-//ust// /* Core kernel tracepoints */
-//ust// if (!iter->module) {
-//ust// found = tracepoint_get_iter_range(&iter->tracepoint,
-//ust// __start___tracepoints, __stop___tracepoints);
-//ust// if (found)
-//ust// goto end;
-//ust// }
- /* tracepoints in libs. */
- found = lib_get_iter_tracepoints(iter);
-end:
- if (!found)
- tracepoint_iter_reset(iter);
-}
-
-void tracepoint_iter_start(struct tracepoint_iter *iter)
-{
- tracepoint_get_iter(iter);
-}
-//ust// EXPORT_SYMBOL_GPL(tracepoint_iter_start);
-
-void tracepoint_iter_next(struct tracepoint_iter *iter)
-{
- iter->tracepoint++;
- /*
- * iter->tracepoint may be invalid because we blindly incremented it.
- * Make sure it is valid by marshalling on the tracepoints, getting the
- * tracepoints from following modules if necessary.
- */
- tracepoint_get_iter(iter);
-}
-//ust// EXPORT_SYMBOL_GPL(tracepoint_iter_next);
-
-void tracepoint_iter_stop(struct tracepoint_iter *iter)
-{
-}
-//ust// EXPORT_SYMBOL_GPL(tracepoint_iter_stop);
-
-void tracepoint_iter_reset(struct tracepoint_iter *iter)
-{
-//ust// iter->module = NULL;
- iter->tracepoint = NULL;
-}
-//ust// EXPORT_SYMBOL_GPL(tracepoint_iter_reset);
-
-//ust// #ifdef CONFIG_MODULES
-
-//ust// int tracepoint_module_notify(struct notifier_block *self,
-//ust// unsigned long val, void *data)
-//ust// {
-//ust// struct module *mod = data;
-//ust//
-//ust// switch (val) {
-//ust// case MODULE_STATE_COMING:
-//ust// tracepoint_update_probe_range(mod->tracepoints,
-//ust// mod->tracepoints + mod->num_tracepoints);
-//ust// break;
-//ust// case MODULE_STATE_GOING:
-//ust// tracepoint_update_probe_range(mod->tracepoints,
-//ust// mod->tracepoints + mod->num_tracepoints);
-//ust// break;
-//ust// }
-//ust// return 0;
-//ust// }
-
-//ust// struct notifier_block tracepoint_module_nb = {
-//ust// .notifier_call = tracepoint_module_notify,
-//ust// .priority = 0,
-//ust// };
-
-//ust// static int init_tracepoints(void)
-//ust// {
-//ust// return register_module_notifier(&tracepoint_module_nb);
-//ust// }
-//ust// __initcall(init_tracepoints);
-
-//ust// #endif /* CONFIG_MODULES */
-
-/*
- * Returns 0 if current not found.
- * Returns 1 if current found.
- */
-int lib_get_iter_tracepoints(struct tracepoint_iter *iter)
-{
- struct tracepoint_lib *iter_lib;
- int found = 0;
-
-//ust// mutex_lock(&module_mutex);
- list_for_each_entry(iter_lib, &libs, list) {
- if (iter_lib < iter->lib)
- continue;
- else if (iter_lib > iter->lib)
- iter->tracepoint = NULL;
- found = marker_get_iter_range(&iter->tracepoint,
- iter_lib->tracepoints_start,
- iter_lib->tracepoints_start + iter_lib->tracepoints_count);
- if (found) {
- iter->lib = iter_lib;
- break;
- }
- }
-//ust// mutex_unlock(&module_mutex);
- return found;
-}
-
-void lib_update_tracepoints(void)
-{
- struct tracepoint_lib *lib;
-
-//ust// mutex_lock(&module_mutex);
- list_for_each_entry(lib, &libs, list)
- tracepoint_update_probe_range(lib->tracepoints_start,
- lib->tracepoints_start + lib->tracepoints_count);
-//ust// mutex_unlock(&module_mutex);
-}
-
-static void (*new_tracepoint_cb)(struct tracepoint *) = NULL;
-
-void tracepoint_set_new_tracepoint_cb(void (*cb)(struct tracepoint *))
-{
- new_tracepoint_cb = cb;
-}
-
-static void new_tracepoints(struct tracepoint *start, struct tracepoint *end)
-{
- if(new_tracepoint_cb) {
- struct tracepoint *t;
- for(t=start; t < end; t++) {
- new_tracepoint_cb(t);
- }
- }
-}
-
-int tracepoint_register_lib(struct tracepoint *tracepoints_start, int tracepoints_count)
-{
- struct tracepoint_lib *pl;
-
- pl = (struct tracepoint_lib *) malloc(sizeof(struct tracepoint_lib));
-
- pl->tracepoints_start = tracepoints_start;
- pl->tracepoints_count = tracepoints_count;
-
- /* FIXME: maybe protect this with its own mutex? */
- mutex_lock(&tracepoints_mutex);
- list_add(&pl->list, &libs);
- mutex_unlock(&tracepoints_mutex);
-
- new_tracepoints(tracepoints_start, tracepoints_start + tracepoints_count);
-
- /* FIXME: update just the loaded lib */
- lib_update_tracepoints();
-
- DBG("just registered a tracepoints section from %p and having %d tracepoints", tracepoints_start, tracepoints_count);
-
- return 0;
-}
-
-int tracepoint_unregister_lib(struct tracepoint *tracepoints_start, int tracepoints_count)
-{
- /*FIXME: implement; but before implementing, tracepoint_register_lib must
- have appropriate locking. */
-
- return 0;
-}
+++ /dev/null
-#ifndef _LINUX_TRACEPOINT_H
-#define _LINUX_TRACEPOINT_H
-
-/*
- * Copyright (C) 2008 Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
- * Copyright (C) 2009 Pierre-Marc Fournier
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
- *
- * Heavily inspired from the Linux Kernel Markers.
- *
- * Ported to userspace by Pierre-Marc Fournier.
- */
-
-//#include <linux/immediate.h>
-//#include <linux/types.h>
-//#include <linux/rcupdate.h>
-
-#include "immediate.h"
-#include "kernelcompat.h"
-
-struct module;
-struct tracepoint;
-
-struct tracepoint {
- const char *name; /* Tracepoint name */
- DEFINE_IMV(char, state); /* State. */
- void **funcs;
-} __attribute__((aligned(32))); /*
- * Aligned on 32 bytes because it is
- * globally visible and gcc happily
- * align these on the structure size.
- * Keep in sync with vmlinux.lds.h.
- */
-
-#define TPPROTO(args...) args
-#define TPARGS(args...) args
-
-//ust// #ifdef CONFIG_TRACEPOINTS
-
-/*
- * it_func[0] is never NULL because there is at least one element in the array
- * when the array itself is non NULL.
- */
-#define __DO_TRACE(tp, proto, args) \
- do { \
- void **it_func; \
- \
- rcu_read_lock_sched_notrace(); \
- it_func = rcu_dereference((tp)->funcs); \
- if (it_func) { \
- do { \
- ((void(*)(proto))(*it_func))(args); \
- } while (*(++it_func)); \
- } \
- rcu_read_unlock_sched_notrace(); \
- } while (0)
-
-#define __CHECK_TRACE(name, generic, proto, args) \
- do { \
- if (!generic) { \
- if (unlikely(imv_read(__tracepoint_##name.state))) \
- __DO_TRACE(&__tracepoint_##name, \
- TPPROTO(proto), TPARGS(args)); \
- } else { \
- if (unlikely(_imv_read(__tracepoint_##name.state))) \
- __DO_TRACE(&__tracepoint_##name, \
- TPPROTO(proto), TPARGS(args)); \
- } \
- } while (0)
-
-/*
- * Make sure the alignment of the structure in the __tracepoints section will
- * not add unwanted padding between the beginning of the section and the
- * structure. Force alignment to the same alignment as the section start.
- *
- * The "generic" argument, passed to the declared __trace_##name inline
- * function controls which tracepoint enabling mechanism must be used.
- * If generic is true, a variable read is used.
- * If generic is false, immediate values are used.
- */
-#define DECLARE_TRACE(name, proto, args) \
- extern struct tracepoint __tracepoint_##name; \
- static inline void trace_##name(proto) \
- { \
- __CHECK_TRACE(name, 0, TPPROTO(proto), TPARGS(args)); \
- } \
- static inline void _trace_##name(proto) \
- { \
- __CHECK_TRACE(name, 1, TPPROTO(proto), TPARGS(args)); \
- } \
- static inline int register_trace_##name(void (*probe)(proto)) \
- { \
- return tracepoint_probe_register(#name, (void *)probe); \
- } \
- static inline int unregister_trace_##name(void (*probe)(proto)) \
- { \
- return tracepoint_probe_unregister(#name, (void *)probe);\
- }
-
-#define DEFINE_TRACE(name) \
- static const char __tpstrtab_##name[] \
- __attribute__((section("__tracepoints_strings"))) = #name; \
- struct tracepoint __tracepoint_##name \
- __attribute__((section("__tracepoints"), aligned(32))) = \
- { __tpstrtab_##name, 0, NULL }
-
-#define EXPORT_TRACEPOINT_SYMBOL_GPL(name) \
- EXPORT_SYMBOL_GPL(__tracepoint_##name)
-#define EXPORT_TRACEPOINT_SYMBOL(name) \
- EXPORT_SYMBOL(__tracepoint_##name)
-
-extern void tracepoint_update_probe_range(struct tracepoint *begin,
- struct tracepoint *end);
-
-//ust// #else /* !CONFIG_TRACEPOINTS */
-//ust// #define DECLARE_TRACE(name, proto, args) \
-//ust// static inline void trace_##name(proto) \
-//ust// { } \
-//ust// static inline void _trace_##name(proto) \
-//ust// { } \
-//ust// static inline int register_trace_##name(void (*probe)(proto)) \
-//ust// { \
-//ust// return -ENOSYS; \
-//ust// } \
-//ust// static inline int unregister_trace_##name(void (*probe)(proto)) \
-//ust// { \
-//ust// return -ENOSYS; \
-//ust// }
-//ust//
-//ust// #define DEFINE_TRACE(name)
-//ust// #define EXPORT_TRACEPOINT_SYMBOL_GPL(name)
-//ust// #define EXPORT_TRACEPOINT_SYMBOL(name)
-//ust//
-//ust// static inline void tracepoint_update_probe_range(struct tracepoint *begin,
-//ust// struct tracepoint *end)
-//ust// { }
-//ust// #endif /* CONFIG_TRACEPOINTS */
-
-/*
- * Connect a probe to a tracepoint.
- * Internal API, should not be used directly.
- */
-extern int tracepoint_probe_register(const char *name, void *probe);
-
-/*
- * Disconnect a probe from a tracepoint.
- * Internal API, should not be used directly.
- */
-extern int tracepoint_probe_unregister(const char *name, void *probe);
-
-extern int tracepoint_probe_register_noupdate(const char *name, void *probe);
-extern int tracepoint_probe_unregister_noupdate(const char *name, void *probe);
-extern void tracepoint_probe_update_all(void);
-
-struct tracepoint_iter {
-//ust// struct module *module;
- struct tracepoint_lib *lib;
- struct tracepoint *tracepoint;
-};
-
-extern void tracepoint_iter_start(struct tracepoint_iter *iter);
-extern void tracepoint_iter_next(struct tracepoint_iter *iter);
-extern void tracepoint_iter_stop(struct tracepoint_iter *iter);
-extern void tracepoint_iter_reset(struct tracepoint_iter *iter);
-extern int tracepoint_get_iter_range(struct tracepoint **tracepoint,
- struct tracepoint *begin, struct tracepoint *end);
-
-/*
- * tracepoint_synchronize_unregister must be called between the last tracepoint
- * probe unregistration and the end of module exit to make sure there is no
- * caller executing a probe when it is freed.
- */
-static inline void tracepoint_synchronize_unregister(void)
-{
- synchronize_sched();
-}
-
-struct tracepoint_lib {
- struct tracepoint *tracepoints_start;
- int tracepoints_count;
- struct list_head list;
-};
-
-#define TRACEPOINT_LIB \
-extern struct tracepoint __start___tracepoints[] __attribute__((visibility("hidden"))); \
-extern struct tracepoint __stop___tracepoints[] __attribute__((visibility("hidden"))); \
- \
-static void __attribute__((constructor)) __tracepoints__init(void) \
-{ \
- tracepoint_register_lib(__start___tracepoints, (((long)__stop___tracepoints)-((long)__start___tracepoints))/sizeof(struct tracepoint));\
-}
-#endif
+++ /dev/null
-lib_LTLIBRARIES = libtracectl.la
-libtracectl_la_SOURCES = marker-control.c localerr.h tracectl.c ../libustcomm/ustcomm.c ../libustcomm/ustcomm.h
-
-INCLUDES = -I$(top_builddir)/share
-INCLUDES += -I@URCU_PATH@
-INCLUDES += -I@KCOMPAT_PATH@
-INCLUDES += -I$(top_builddir)/libtracing
-INCLUDES += -I$(top_builddir)/libmarkers
-INCLUDES += -I$(top_builddir)/libustcomm
+++ /dev/null
-#include "usterr.h"
+++ /dev/null
-/*
- * Copyright (C) 2007 Mathieu Desnoyers
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
- * LTT marker control module over /proc
- */
-
-//ust// #include <linux/proc_fs.h>
-//ust// #include <linux/module.h>
-//ust// #include <linux/stat.h>
-//ust// #include <linux/vmalloc.h>
-//ust// #include <linux/marker.h>
-//ust// #include <linux/ltt-tracer.h>
-//ust// #include <linux/uaccess.h>
-//ust// #include <linux/string.h>
-//ust// #include <linux/ctype.h>
-//ust// #include <linux/list.h>
-//ust// #include <linux/mutex.h>
-//ust// #include <linux/seq_file.h>
-//ust// #include <linux/slab.h>
-#include "kernelcompat.h"
-#include "list.h"
-#include "tracer.h"
-#include "localerr.h"
-
-#define DEFAULT_CHANNEL "cpu"
-#define DEFAULT_PROBE "default"
-
-LIST_HEAD(probes_list);
-
-/*
- * Mutex protecting the probe slab cache.
- * Nests inside the traces mutex.
- */
-DEFINE_MUTEX(probes_mutex);
-
-struct ltt_available_probe default_probe = {
- .name = "default",
- .format = NULL,
- .probe_func = ltt_vtrace,
- .callbacks[0] = ltt_serialize_data,
-};
-
-//ust//static struct kmem_cache *markers_loaded_cachep;
-static LIST_HEAD(markers_loaded_list);
-/*
- * List sorted by name strcmp order.
- */
-static LIST_HEAD(probes_registered_list);
-
-//ust// static struct proc_dir_entry *pentry;
-
-//ust// static struct file_operations ltt_fops;
-
-static struct ltt_available_probe *get_probe_from_name(const char *pname)
-{
- struct ltt_available_probe *iter;
- int comparison, found = 0;
-
- if (!pname)
- pname = DEFAULT_PROBE;
- list_for_each_entry(iter, &probes_registered_list, node) {
- comparison = strcmp(pname, iter->name);
- if (!comparison)
- found = 1;
- if (comparison <= 0)
- break;
- }
- if (found)
- return iter;
- else
- return NULL;
-}
-
-static char *skip_spaces(char *buf)
-{
- while (*buf != '\0' && isspace(*buf))
- buf++;
- return buf;
-}
-
-static char *skip_nonspaces(char *buf)
-{
- while (*buf != '\0' && !isspace(*buf))
- buf++;
- return buf;
-}
-
-static void get_marker_string(char *buf, char **start,
- char **end)
-{
- *start = skip_spaces(buf);
- *end = skip_nonspaces(*start);
- **end = '\0';
-}
-
-int ltt_probe_register(struct ltt_available_probe *pdata)
-{
- int ret = 0;
- int comparison;
- struct ltt_available_probe *iter;
-
- mutex_lock(&probes_mutex);
- list_for_each_entry_reverse(iter, &probes_registered_list, node) {
- comparison = strcmp(pdata->name, iter->name);
- if (!comparison) {
- ret = -EBUSY;
- goto end;
- } else if (comparison > 0) {
- /* We belong to the location right after iter. */
- list_add(&pdata->node, &iter->node);
- goto end;
- }
- }
- /* Should be added at the head of the list */
- list_add(&pdata->node, &probes_registered_list);
-end:
- mutex_unlock(&probes_mutex);
- return ret;
-}
-EXPORT_SYMBOL_GPL(ltt_probe_register);
-
-/*
- * Called when a probe does not want to be called anymore.
- */
-int ltt_probe_unregister(struct ltt_available_probe *pdata)
-{
- int ret = 0;
- struct ltt_active_marker *amark, *tmp;
-
- mutex_lock(&probes_mutex);
- list_for_each_entry_safe(amark, tmp, &markers_loaded_list, node) {
- if (amark->probe == pdata) {
- ret = marker_probe_unregister_private_data(
- pdata->probe_func, amark);
- if (ret)
- goto end;
- list_del(&amark->node);
- free(amark);
- }
- }
- list_del(&pdata->node);
-end:
- mutex_unlock(&probes_mutex);
- return ret;
-}
-EXPORT_SYMBOL_GPL(ltt_probe_unregister);
-
-/*
- * Connect marker "mname" to probe "pname".
- * Only allow _only_ probe instance to be connected to a marker.
- */
-int ltt_marker_connect(const char *channel, const char *mname,
- const char *pname)
-
-{
- int ret;
- struct ltt_active_marker *pdata;
- struct ltt_available_probe *probe;
-
- ltt_lock_traces();
- mutex_lock(&probes_mutex);
- probe = get_probe_from_name(pname);
- if (!probe) {
- ret = -ENOENT;
- goto end;
- }
- pdata = marker_get_private_data(channel, mname, probe->probe_func, 0);
- if (pdata && !IS_ERR(pdata)) {
- ret = -EEXIST;
- goto end;
- }
- pdata = zmalloc(sizeof(struct ltt_active_marker));
- if (!pdata) {
- ret = -ENOMEM;
- goto end;
- }
- pdata->probe = probe;
- /*
- * ID has priority over channel in case of conflict.
- */
- ret = marker_probe_register(channel, mname, NULL,
- probe->probe_func, pdata);
- if (ret)
- free(pdata);
- else
- list_add(&pdata->node, &markers_loaded_list);
-end:
- mutex_unlock(&probes_mutex);
- ltt_unlock_traces();
- return ret;
-}
-EXPORT_SYMBOL_GPL(ltt_marker_connect);
-
-/*
- * Disconnect marker "mname", probe "pname".
- */
-int ltt_marker_disconnect(const char *channel, const char *mname,
- const char *pname)
-{
- struct ltt_active_marker *pdata;
- struct ltt_available_probe *probe;
- int ret = 0;
-
- mutex_lock(&probes_mutex);
- probe = get_probe_from_name(pname);
- if (!probe) {
- ret = -ENOENT;
- goto end;
- }
- pdata = marker_get_private_data(channel, mname, probe->probe_func, 0);
- if (IS_ERR(pdata)) {
- ret = PTR_ERR(pdata);
- goto end;
- } else if (!pdata) {
- /*
- * Not registered by us.
- */
- ret = -EPERM;
- goto end;
- }
- ret = marker_probe_unregister(channel, mname, probe->probe_func, pdata);
- if (ret)
- goto end;
- else {
- list_del(&pdata->node);
- free(pdata);
- }
-end:
- mutex_unlock(&probes_mutex);
- return ret;
-}
-EXPORT_SYMBOL_GPL(ltt_marker_disconnect);
-
-/*
- * function handling proc entry write.
- *
- * connect <channel name> <marker name> [<probe name>]]
- * disconnect <channel name> <marker name> [<probe name>]
- */
-//ust// static ssize_t ltt_write(struct file *file, const char __user *buffer,
-//ust// size_t count, loff_t *offset)
-//ust// {
-//ust// char *kbuf;
-//ust// char *iter, *marker_action, *arg[4];
-//ust// ssize_t ret;
-//ust// int i;
-//ust//
-//ust// if (!count)
-//ust// return -EINVAL;
-//ust//
-//ust// kbuf = vmalloc(count + 1);
-//ust// kbuf[count] = '\0'; /* Transform into a string */
-//ust// ret = copy_from_user(kbuf, buffer, count);
-//ust// if (ret) {
-//ust// ret = -EINVAL;
-//ust// goto end;
-//ust// }
-//ust// get_marker_string(kbuf, &marker_action, &iter);
-//ust// if (!marker_action || marker_action == iter) {
-//ust// ret = -EINVAL;
-//ust// goto end;
-//ust// }
-//ust// for (i = 0; i < 4; i++) {
-//ust// arg[i] = NULL;
-//ust// if (iter < kbuf + count) {
-//ust// iter++; /* skip the added '\0' */
-//ust// get_marker_string(iter, &arg[i], &iter);
-//ust// if (arg[i] == iter)
-//ust// arg[i] = NULL;
-//ust// }
-//ust// }
-//ust//
-//ust// if (!arg[0] || !arg[1]) {
-//ust// ret = -EINVAL;
-//ust// goto end;
-//ust// }
-//ust//
-//ust// if (!strcmp(marker_action, "connect")) {
-//ust// ret = ltt_marker_connect(arg[0], arg[1], arg[2]);
-//ust// if (ret)
-//ust// goto end;
-//ust// } else if (!strcmp(marker_action, "disconnect")) {
-//ust// ret = ltt_marker_disconnect(arg[0], arg[1], arg[2]);
-//ust// if (ret)
-//ust// goto end;
-//ust// }
-//ust// ret = count;
-//ust// end:
-//ust// vfree(kbuf);
-//ust// return ret;
-//ust// }
-//ust//
-//ust// static void *s_next(struct seq_file *m, void *p, loff_t *pos)
-//ust// {
-//ust// struct marker_iter *iter = m->private;
-//ust//
-//ust// marker_iter_next(iter);
-//ust// if (!iter->marker) {
-//ust// /*
-//ust// * Setting the iter module to -1UL will make sure
-//ust// * that no module can possibly hold the current marker.
-//ust// */
-//ust// iter->module = (void *)-1UL;
-//ust// return NULL;
-//ust// }
-//ust// return iter->marker;
-//ust// }
-//ust//
-//ust// static void *s_start(struct seq_file *m, loff_t *pos)
-//ust// {
-//ust// struct marker_iter *iter = m->private;
-//ust//
-//ust// if (!*pos)
-//ust// marker_iter_reset(iter);
-//ust// marker_iter_start(iter);
-//ust// if (!iter->marker) {
-//ust// /*
-//ust// * Setting the iter module to -1UL will make sure
-//ust// * that no module can possibly hold the current marker.
-//ust// */
-//ust// iter->module = (void *)-1UL;
-//ust// return NULL;
-//ust// }
-//ust// return iter->marker;
-//ust// }
-//ust//
-//ust// static void s_stop(struct seq_file *m, void *p)
-//ust// {
-//ust// marker_iter_stop(m->private);
-//ust// }
-//ust//
-//ust// static int s_show(struct seq_file *m, void *p)
-//ust// {
-//ust// struct marker_iter *iter = m->private;
-//ust//
-//ust// seq_printf(m, "channel: %s marker: %s format: \"%s\" state: %d "
-//ust// "event_id: %hu call: 0x%p probe %s : 0x%p\n",
-//ust// iter->marker->channel,
-//ust// iter->marker->name, iter->marker->format,
-//ust// _imv_read(iter->marker->state),
-//ust// iter->marker->event_id,
-//ust// iter->marker->call,
-//ust// iter->marker->ptype ? "multi" : "single",
-//ust// iter->marker->ptype ?
-//ust// (void*)iter->marker->multi : (void*)iter->marker->single.func);
-//ust// return 0;
-//ust// }
-//ust//
-//ust// static const struct seq_operations ltt_seq_op = {
-//ust// .start = s_start,
-//ust// .next = s_next,
-//ust// .stop = s_stop,
-//ust// .show = s_show,
-//ust// };
-//ust//
-//ust// static int ltt_open(struct inode *inode, struct file *file)
-//ust// {
-//ust// /*
-//ust// * Iterator kept in m->private.
-//ust// * Restart iteration on all modules between reads because we do not lock
-//ust// * the module mutex between those.
-//ust// */
-//ust// int ret;
-//ust// struct marker_iter *iter;
-//ust//
-//ust// iter = kzalloc(sizeof(*iter), GFP_KERNEL);
-//ust// if (!iter)
-//ust// return -ENOMEM;
-//ust//
-//ust// ret = seq_open(file, <t_seq_op);
-//ust// if (ret == 0)
-//ust// ((struct seq_file *)file->private_data)->private = iter;
-//ust// else
-//ust// kfree(iter);
-//ust// return ret;
-//ust// }
-//ust//
-//ust// static struct file_operations ltt_fops = {
-//ust// .write = ltt_write,
-//ust// .open = ltt_open,
-//ust// .read = seq_read,
-//ust// .llseek = seq_lseek,
-//ust// .release = seq_release_private,
-//ust// };
-
-static void disconnect_all_markers(void)
-{
- struct ltt_active_marker *pdata, *tmp;
-
- list_for_each_entry_safe(pdata, tmp, &markers_loaded_list, node) {
- marker_probe_unregister_private_data(pdata->probe->probe_func,
- pdata);
- list_del(&pdata->node);
- free(pdata);
- }
-}
-
-static char initialized = 0;
-
-void __attribute__((constructor)) init_marker_control(void)
-{
- if(!initialized) {
- int ret;
-
-//ust// pentry = create_proc_entry("ltt", S_IRUSR|S_IWUSR, NULL);
-//ust// if (!pentry)
-//ust// return -EBUSY;
-//ust// markers_loaded_cachep = KMEM_CACHE(ltt_active_marker, 0);
-
- ret = ltt_probe_register(&default_probe);
- BUG_ON(ret);
- ret = ltt_marker_connect("metadata", "core_marker_format",
- DEFAULT_PROBE);
- BUG_ON(ret);
- ret = ltt_marker_connect("metadata", "core_marker_id", DEFAULT_PROBE);
- BUG_ON(ret);
-//ust// pentry->proc_fops = <t_fops;
-
- initialized = 1;
- }
-}
-//ust// module_init(marker_control_init);
-
-static void __exit marker_control_exit(void)
-{
- int ret;
-
-//ust// remove_proc_entry("ltt", NULL);
- ret = ltt_marker_disconnect("metadata", "core_marker_format",
- DEFAULT_PROBE);
- BUG_ON(ret);
- ret = ltt_marker_disconnect("metadata", "core_marker_id",
- DEFAULT_PROBE);
- BUG_ON(ret);
- ret = ltt_probe_unregister(&default_probe);
- BUG_ON(ret);
- disconnect_all_markers();
-//ust// kmem_cache_destroy(markers_loaded_cachep);
-//ust// marker_synchronize_unregister();
-}
-//ust// module_exit(marker_control_exit);
-
-//ust// MODULE_LICENSE("GPL");
-//ust// MODULE_AUTHOR("Mathieu Desnoyers");
-//ust// MODULE_DESCRIPTION("Linux Trace Toolkit Marker Control");
+++ /dev/null
-#ifndef MARKER_CONTROL_H
-#define MARKER_CONTROL_H
-
-int marker_control_init(void);
-int ltt_probe_register(struct ltt_available_probe *pdata);
-
-#endif /* MARKER_CONTROL_H */
+++ /dev/null
-#include <stdio.h>
-#include <stdint.h>
-#include <signal.h>
-#include <sys/types.h>
-#include <sys/socket.h>
-#include <sys/un.h>
-#include <sched.h>
-#include <fcntl.h>
-#include <poll.h>
-
-#include "marker.h"
-#include "tracer.h"
-#include "localerr.h"
-#include "ustcomm.h"
-#include "relay.h" /* FIXME: remove */
-
-//#define USE_CLONE
-
-#define USTSIGNAL SIGIO
-
-#define MAX_MSG_SIZE (100)
-#define MSG_NOTIF 1
-#define MSG_REGISTER_NOTIF 2
-
-char consumer_stack[10000];
-
-struct list_head blocked_consumers = LIST_HEAD_INIT(blocked_consumers);
-
-static struct ustcomm_app ustcomm_app;
-
-struct tracecmd { /* no padding */
- uint32_t size;
- uint16_t command;
-};
-
-//struct listener_arg {
-// int pipe_fd;
-//};
-
-struct trctl_msg {
- /* size: the size of all the fields except size itself */
- uint32_t size;
- uint16_t type;
- /* Only the necessary part of the payload is transferred. It
- * may even be none of it.
- */
- char payload[94];
-};
-
-struct consumer_channel {
- int fd;
- struct ltt_channel_struct *chan;
-};
-
-struct blocked_consumer {
- int fd_consumer;
- int fd_producer;
- int tmp_poll_idx;
-
- /* args to ustcomm_send_reply */
- struct ustcomm_server server;
- struct ustcomm_source src;
-
- /* args to ltt_do_get_subbuf */
- struct rchan_buf *rbuf;
- struct ltt_channel_buf_struct *lttbuf;
-
- struct list_head list;
-};
-
-static void print_markers(void)
-{
- struct marker_iter iter;
-
- lock_markers();
- marker_iter_reset(&iter);
- marker_iter_start(&iter);
-
- while(iter.marker) {
- fprintf(stderr, "marker: %s_%s \"%s\"\n", iter.marker->channel, iter.marker->name, iter.marker->format);
- marker_iter_next(&iter);
- }
- unlock_markers();
-}
-
-void do_command(struct tracecmd *cmd)
-{
-}
-
-void receive_commands()
-{
-}
-
-int fd_notif = -1;
-void notif_cb(void)
-{
- int result;
- struct trctl_msg msg;
-
- /* FIXME: fd_notif should probably be protected by a spinlock */
-
- if(fd_notif == -1)
- return;
-
- msg.type = MSG_NOTIF;
- msg.size = sizeof(msg.type);
-
- /* FIXME: don't block here */
- result = write(fd_notif, &msg, msg.size+sizeof(msg.size));
- if(result == -1) {
- PERROR("write");
- return;
- }
-}
-
-static int inform_consumer_daemon(void)
-{
- ustcomm_request_consumer(getpid(), "metadata");
- ustcomm_request_consumer(getpid(), "ust");
-}
-
-void process_blocked_consumers(void)
-{
- int n_fds = 0;
- struct pollfd *fds;
- struct blocked_consumer *bc;
- int idx = 0;
- char inbuf;
- int result;
-
- list_for_each_entry(bc, &blocked_consumers, list) {
- n_fds++;
- }
-
- fds = (struct pollfd *) malloc(n_fds * sizeof(struct pollfd));
- if(fds == NULL) {
- ERR("malloc returned NULL");
- return;
- }
-
- list_for_each_entry(bc, &blocked_consumers, list) {
- fds[idx].fd = bc->fd_producer;
- fds[idx].events = POLLIN;
- bc->tmp_poll_idx = idx;
- idx++;
- }
-
- result = poll(fds, n_fds, 0);
- if(result == -1) {
- PERROR("poll");
- return -1;
- }
-
- list_for_each_entry(bc, &blocked_consumers, list) {
- if(fds[bc->tmp_poll_idx].revents) {
- long consumed_old = 0;
- char *reply;
-
- result = read(bc->fd_producer, &inbuf, 1);
- if(result == -1) {
- PERROR("read");
- continue;
- }
- if(result == 0) {
- DBG("PRODUCER END");
-
- close(bc->fd_producer);
-
- __list_del(bc->list.prev, bc->list.next);
-
- result = ustcomm_send_reply(&bc->server, "END", &bc->src);
- if(result < 0) {
- ERR("ustcomm_send_reply failed");
- continue;
- }
-
- continue;
- }
-
- result = ltt_do_get_subbuf(bc->rbuf, bc->lttbuf, &consumed_old);
- if(result == -EAGAIN) {
- WARN("missed buffer?");
- continue;
- }
- else if(result < 0) {
- DBG("ltt_do_get_subbuf: error: %s", strerror(-result));
- }
- asprintf(&reply, "%s %ld", "OK", consumed_old);
- result = ustcomm_send_reply(&bc->server, reply, &bc->src);
- if(result < 0) {
- ERR("ustcomm_send_reply failed");
- free(reply);
- continue;
- }
- free(reply);
-
- __list_del(bc->list.prev, bc->list.next);
- }
- }
-
-}
-
-int listener_main(void *p)
-{
- int result;
-
- DBG("LISTENER");
-
- for(;;) {
- uint32_t size;
- struct sockaddr_un addr;
- socklen_t addrlen = sizeof(addr);
- char trace_name[] = "auto";
- char trace_type[] = "ustrelay";
- char *recvbuf;
- int len;
- struct ustcomm_source src;
-
- process_blocked_consumers();
-
- result = ustcomm_app_recv_message(&ustcomm_app, &recvbuf, &src, 5);
- if(result < 0) {
- WARN("error in ustcomm_app_recv_message");
- continue;
- }
- else if(result == 0) {
- /* no message */
- continue;
- }
-
- DBG("received a message! it's: %s\n", recvbuf);
- len = strlen(recvbuf);
-
- if(!strcmp(recvbuf, "print_markers")) {
- print_markers();
- }
- else if(!strcmp(recvbuf, "trace_setup")) {
- DBG("trace setup");
-
- result = ltt_trace_setup(trace_name);
- if(result < 0) {
- ERR("ltt_trace_setup failed");
- return;
- }
-
- result = ltt_trace_set_type(trace_name, trace_type);
- if(result < 0) {
- ERR("ltt_trace_set_type failed");
- return;
- }
- }
- else if(!strcmp(recvbuf, "trace_alloc")) {
- DBG("trace alloc");
-
- result = ltt_trace_alloc(trace_name);
- if(result < 0) {
- ERR("ltt_trace_alloc failed");
- return;
- }
- }
- else if(!strcmp(recvbuf, "trace_start")) {
- DBG("trace start");
-
- result = ltt_trace_start(trace_name);
- if(result < 0) {
- ERR("ltt_trace_start failed");
- continue;
- }
- }
- else if(!strcmp(recvbuf, "trace_stop")) {
- DBG("trace stop");
-
- result = ltt_trace_stop(trace_name);
- if(result < 0) {
- ERR("ltt_trace_stop failed");
- return;
- }
- }
- else if(!strcmp(recvbuf, "trace_destroy")) {
-
- DBG("trace destroy");
-
- result = ltt_trace_destroy(trace_name);
- if(result < 0) {
- ERR("ltt_trace_destroy failed");
- return;
- }
- }
- else if(nth_token_is(recvbuf, "get_shmid", 0) == 1) {
- struct ltt_trace_struct *trace;
- char trace_name[] = "auto";
- int i;
- char *channel_name;
-
- DBG("get_shmid");
-
- channel_name = nth_token(recvbuf, 1);
- if(channel_name == NULL) {
- ERR("get_shmid: cannot parse channel");
- goto next_cmd;
- }
-
- ltt_lock_traces();
- trace = _ltt_trace_find(trace_name);
- ltt_unlock_traces();
-
- if(trace == NULL) {
- CPRINTF("cannot find trace!");
- return 1;
- }
-
- for(i=0; i<trace->nr_channels; i++) {
- struct rchan *rchan = trace->channels[i].trans_channel_data;
- struct rchan_buf *rbuf = rchan->buf;
- struct ltt_channel_struct *ltt_channel = (struct ltt_channel_struct *)rchan->private_data;
- struct ltt_channel_buf_struct *ltt_buf = ltt_channel->buf;
-
- if(!strcmp(trace->channels[i].channel_name, channel_name)) {
- char *reply;
-
- DBG("the shmid for the requested channel is %d", rbuf->shmid);
- DBG("the shmid for its buffer structure is %d", ltt_channel->buf_shmid);
- asprintf(&reply, "%d %d", rbuf->shmid, ltt_channel->buf_shmid);
-
- result = ustcomm_send_reply(&ustcomm_app.server, reply, &src);
- if(result) {
- ERR("listener: get_shmid: ustcomm_send_reply failed");
- goto next_cmd;
- }
-
- free(reply);
-
- break;
- }
- }
- }
- else if(nth_token_is(recvbuf, "get_n_subbufs", 0) == 1) {
- struct ltt_trace_struct *trace;
- char trace_name[] = "auto";
- int i;
- char *channel_name;
-
- DBG("get_n_subbufs");
-
- channel_name = nth_token(recvbuf, 1);
- if(channel_name == NULL) {
- ERR("get_n_subbufs: cannot parse channel");
- goto next_cmd;
- }
-
- ltt_lock_traces();
- trace = _ltt_trace_find(trace_name);
- ltt_unlock_traces();
-
- if(trace == NULL) {
- CPRINTF("cannot find trace!");
- return 1;
- }
-
- for(i=0; i<trace->nr_channels; i++) {
- struct rchan *rchan = trace->channels[i].trans_channel_data;
-
- if(!strcmp(trace->channels[i].channel_name, channel_name)) {
- char *reply;
-
- DBG("the n_subbufs for the requested channel is %d", rchan->n_subbufs);
- asprintf(&reply, "%d", rchan->n_subbufs);
-
- result = ustcomm_send_reply(&ustcomm_app.server, reply, &src);
- if(result) {
- ERR("listener: get_n_subbufs: ustcomm_send_reply failed");
- goto next_cmd;
- }
-
- free(reply);
-
- break;
- }
- }
- }
- else if(nth_token_is(recvbuf, "get_subbuf_size", 0) == 1) {
- struct ltt_trace_struct *trace;
- char trace_name[] = "auto";
- int i;
- char *channel_name;
-
- DBG("get_subbuf_size");
-
- channel_name = nth_token(recvbuf, 1);
- if(channel_name == NULL) {
- ERR("get_subbuf_size: cannot parse channel");
- goto next_cmd;
- }
-
- ltt_lock_traces();
- trace = _ltt_trace_find(trace_name);
- ltt_unlock_traces();
-
- if(trace == NULL) {
- CPRINTF("cannot find trace!");
- return 1;
- }
-
- for(i=0; i<trace->nr_channels; i++) {
- struct rchan *rchan = trace->channels[i].trans_channel_data;
-
- if(!strcmp(trace->channels[i].channel_name, channel_name)) {
- char *reply;
-
- DBG("the subbuf_size for the requested channel is %d", rchan->subbuf_size);
- asprintf(&reply, "%d", rchan->subbuf_size);
-
- result = ustcomm_send_reply(&ustcomm_app.server, reply, &src);
- if(result) {
- ERR("listener: get_subbuf_size: ustcomm_send_reply failed");
- goto next_cmd;
- }
-
- free(reply);
-
- break;
- }
- }
- }
- else if(nth_token_is(recvbuf, "load_probe_lib", 0) == 1) {
- char *libfile;
-
- libfile = nth_token(recvbuf, 1);
-
- DBG("load_probe_lib loading %s", libfile);
- }
- else if(nth_token_is(recvbuf, "get_subbuffer", 0) == 1) {
- struct ltt_trace_struct *trace;
- char trace_name[] = "auto";
- int i;
- char *channel_name;
-
- DBG("get_subbuf");
-
- channel_name = nth_token(recvbuf, 1);
- if(channel_name == NULL) {
- ERR("get_subbuf: cannot parse channel");
- goto next_cmd;
- }
-
- ltt_lock_traces();
- trace = _ltt_trace_find(trace_name);
- ltt_unlock_traces();
-
- if(trace == NULL) {
- CPRINTF("cannot find trace!");
- return 1;
- }
-
- for(i=0; i<trace->nr_channels; i++) {
- struct rchan *rchan = trace->channels[i].trans_channel_data;
-
- if(!strcmp(trace->channels[i].channel_name, channel_name)) {
- struct rchan_buf *rbuf = rchan->buf;
- struct ltt_channel_buf_struct *lttbuf = trace->channels[i].buf;
- char *reply;
- long consumed_old=0;
- int fd;
- struct blocked_consumer *bc;
-
- bc = (struct blocked_consumer *) malloc(sizeof(struct blocked_consumer));
- if(bc == NULL) {
- ERR("malloc returned NULL");
- goto next_cmd;
- }
- bc->fd_consumer = src.fd;
- bc->fd_producer = lttbuf->data_ready_fd_read;
- bc->rbuf = rbuf;
- bc->lttbuf = lttbuf;
- bc->src = src;
- bc->server = ustcomm_app.server;
-
- list_add(&bc->list, &blocked_consumers);
-
- break;
- }
- }
- }
- else if(nth_token_is(recvbuf, "put_subbuffer", 0) == 1) {
- struct ltt_trace_struct *trace;
- char trace_name[] = "auto";
- int i;
- char *channel_name;
- long consumed_old;
- char *consumed_old_str;
- char *endptr;
-
- DBG("put_subbuf");
-
- channel_name = strdup_malloc(nth_token(recvbuf, 1));
- if(channel_name == NULL) {
- ERR("put_subbuf_size: cannot parse channel");
- goto next_cmd;
- }
-
- consumed_old_str = strdup_malloc(nth_token(recvbuf, 2));
- if(consumed_old_str == NULL) {
- ERR("put_subbuf: cannot parse consumed_old");
- goto next_cmd;
- }
- consumed_old = strtol(consumed_old_str, &endptr, 10);
- if(*endptr != '\0') {
- ERR("put_subbuf: invalid value for consumed_old");
- goto next_cmd;
- }
-
- ltt_lock_traces();
- trace = _ltt_trace_find(trace_name);
- ltt_unlock_traces();
-
- if(trace == NULL) {
- CPRINTF("cannot find trace!");
- return 1;
- }
-
- for(i=0; i<trace->nr_channels; i++) {
- struct rchan *rchan = trace->channels[i].trans_channel_data;
-
- if(!strcmp(trace->channels[i].channel_name, channel_name)) {
- struct rchan_buf *rbuf = rchan->buf;
- struct ltt_channel_buf_struct *lttbuf = trace->channels[i].buf;
- char *reply;
- long consumed_old=0;
-
- result = ltt_do_put_subbuf(rbuf, lttbuf, consumed_old);
- if(result < 0) {
- WARN("ltt_do_put_subbuf: error");
- }
- else {
- DBG("ltt_do_put_subbuf: success");
- }
- asprintf(&reply, "%s", "OK", consumed_old);
-
- result = ustcomm_send_reply(&ustcomm_app.server, reply, &src);
- if(result) {
- ERR("listener: put_subbuf: ustcomm_send_reply failed");
- goto next_cmd;
- }
-
- free(reply);
-
- break;
- }
- }
-
- free(channel_name);
- free(consumed_old_str);
- }
-// else if(nth_token_is(recvbuf, "get_notifications", 0) == 1) {
-// struct ltt_trace_struct *trace;
-// char trace_name[] = "auto";
-// int i;
-// char *channel_name;
-//
-// DBG("get_notifications");
-//
-// channel_name = strdup_malloc(nth_token(recvbuf, 1));
-// if(channel_name == NULL) {
-// ERR("put_subbuf_size: cannot parse channel");
-// goto next_cmd;
-// }
-//
-// ltt_lock_traces();
-// trace = _ltt_trace_find(trace_name);
-// ltt_unlock_traces();
-//
-// if(trace == NULL) {
-// CPRINTF("cannot find trace!");
-// return 1;
-// }
-//
-// for(i=0; i<trace->nr_channels; i++) {
-// struct rchan *rchan = trace->channels[i].trans_channel_data;
-// int fd;
-//
-// if(!strcmp(trace->channels[i].channel_name, channel_name)) {
-// struct rchan_buf *rbuf = rchan->buf;
-// struct ltt_channel_buf_struct *lttbuf = trace->channels[i].buf;
-//
-// result = fd = ustcomm_app_detach_client(&ustcomm_app, &src);
-// if(result == -1) {
-// ERR("ustcomm_app_detach_client failed");
-// goto next_cmd;
-// }
-//
-// lttbuf->wake_consumer_arg = (void *) fd;
-//
-// smp_wmb();
-//
-// lttbuf->call_wake_consumer = 1;
-//
-// break;
-// }
-// }
-//
-// free(channel_name);
-// }
- else {
- ERR("unable to parse message: %s", recvbuf);
- }
-
- next_cmd:
- free(recvbuf);
- }
-}
-
-static char listener_stack[16384];
-
-void create_listener(void)
-{
- int result;
- static char listener_stack[16384];
- //char *listener_stack = malloc(16384);
-
-#ifdef USE_CLONE
- result = clone(listener_main, listener_stack+sizeof(listener_stack)-1, CLONE_FS | CLONE_FILES | CLONE_VM | CLONE_SIGHAND | CLONE_THREAD, NULL);
- if(result == -1) {
- perror("clone");
- }
-#else
- pthread_t thread;
-
- pthread_create(&thread, NULL, listener_main, NULL);
-#endif
-}
-
-/* The signal handler itself. Signals must be setup so there cannot be
- nested signals. */
-
-void sighandler(int sig)
-{
- static char have_listener = 0;
- DBG("sighandler");
-
- if(!have_listener) {
- create_listener();
- have_listener = 1;
- }
-}
-
-/* Called by the app signal handler to chain it to us. */
-
-void chain_signal(void)
-{
- sighandler(USTSIGNAL);
-}
-
-static int init_socket(void)
-{
- return ustcomm_init_app(getpid(), &ustcomm_app);
-}
-
-static void destroy_socket(void)
-{
-// int result;
-//
-// if(mysocketfile[0] == '\0')
-// return;
-//
-// result = unlink(mysocketfile);
-// if(result == -1) {
-// PERROR("unlink");
-// }
-}
-
-static int init_signal_handler(void)
-{
- /* Attempt to handler SIGIO. If the main program wants to
- * handle it, fine, it'll override us. They it'll have to
- * use the chaining function.
- */
-
- int result;
- struct sigaction act;
-
- result = sigemptyset(&act.sa_mask);
- if(result == -1) {
- PERROR("sigemptyset");
- return -1;
- }
-
- act.sa_handler = sighandler;
- act.sa_flags = SA_RESTART;
-
- /* Only defer ourselves. Also, try to restart interrupted
- * syscalls to disturb the traced program as little as possible.
- */
- result = sigaction(SIGIO, &act, NULL);
- if(result == -1) {
- PERROR("sigaction");
- return -1;
- }
-
- return 0;
-}
-
-static void auto_probe_connect(struct marker *m)
-{
- int result;
-
- result = ltt_marker_connect(m->channel, m->name, "default");
- if(result)
- ERR("ltt_marker_connect");
-
- DBG("just auto connected marker %s %s to probe default", m->channel, m->name);
-}
-
-static void __attribute__((constructor(101))) init0()
-{
- DBG("UST_AUTOPROBE constructor");
- if(getenv("UST_AUTOPROBE")) {
- marker_set_new_marker_cb(auto_probe_connect);
- }
-}
-
-static void fini(void);
-
-static void __attribute__((constructor(1000))) init()
-{
- int result;
-
- DBG("UST_TRACE constructor");
-
- /* Must create socket before signal handler to prevent races.
- */
- result = init_socket();
- if(result == -1) {
- ERR("init_socket error");
- return;
- }
- result = init_signal_handler();
- if(result == -1) {
- ERR("init_signal_handler error");
- return;
- }
-
- if(getenv("UST_TRACE")) {
- char trace_name[] = "auto";
- char trace_type[] = "ustrelay";
-
- DBG("starting early tracing");
-
- /* Ensure marker control is initialized */
- init_marker_control();
-
- /* Ensure relay is initialized */
- init_ustrelay_transport();
-
- /* Ensure markers are initialized */
- init_markers();
-
- /* In case. */
- ltt_channels_register("ust");
-
- result = ltt_trace_setup(trace_name);
- if(result < 0) {
- ERR("ltt_trace_setup failed");
- return;
- }
-
- result = ltt_trace_set_type(trace_name, trace_type);
- if(result < 0) {
- ERR("ltt_trace_set_type failed");
- return;
- }
-
- result = ltt_trace_alloc(trace_name);
- if(result < 0) {
- ERR("ltt_trace_alloc failed");
- return;
- }
-
- result = ltt_trace_start(trace_name);
- if(result < 0) {
- ERR("ltt_trace_start failed");
- return;
- }
- //start_consumer();
- inform_consumer_daemon();
- }
-
-
- return;
-
- /* should decrementally destroy stuff if error */
-
-}
-
-/* This is only called if we terminate normally, not with an unhandled signal,
- * so we cannot rely on it. */
-
-static void __attribute__((destructor)) fini()
-{
- int result;
-
- /* if trace running, finish it */
-
- DBG("destructor stopping traces");
-
- result = ltt_trace_stop("auto");
- if(result == -1) {
- ERR("ltt_trace_stop error");
- }
-
- result = ltt_trace_destroy("auto");
- if(result == -1) {
- ERR("ltt_trace_destroy error");
- }
-
- /* FIXME: wait for the consumer to be done */
- //DBG("waiting 5 sec for consume");
- //sleep(5);
-
- destroy_socket();
-}
+++ /dev/null
-lib_LTLIBRARIES = libtracing.la
-libtracing_la_SOURCES = channels.c channels.h relay.c relay.h serialize.c tracer.c tracer.h tracercore.c tracercore.h
-
-INCLUDES = -I$(top_builddir)/share
-INCLUDES += -I@URCU_PATH@
-INCLUDES += -I@KCOMPAT_PATH@
-INCLUDES += -I$(top_builddir)/libmarkers
+++ /dev/null
-/*
- * ltt/ltt-channels.c
- *
- * (C) Copyright 2008 - Mathieu Desnoyers (mathieu.desnoyers@polymtl.ca)
- *
- * LTTng channel management.
- *
- * Author:
- * Mathieu Desnoyers (mathieu.desnoyers@polymtl.ca)
- */
-
-//ust// #include <linux/module.h>
-//ust// #include <linux/ltt-channels.h>
-//ust// #include <linux/mutex.h>
-//ust// #include <linux/vmalloc.h>
-
-#include "kernelcompat.h"
-#include "channels.h"
-#include "usterr.h"
-
-/*
- * ltt_channel_mutex may be nested inside the LTT trace mutex.
- * ltt_channel_mutex mutex may be nested inside markers mutex.
- */
-static DEFINE_MUTEX(ltt_channel_mutex);
-static LIST_HEAD(ltt_channels);
-/*
- * Index of next channel in array. Makes sure that as long as a trace channel is
- * allocated, no array index will be re-used when a channel is freed and then
- * another channel is allocated. This index is cleared and the array indexeds
- * get reassigned when the index_kref goes back to 0, which indicates that no
- * more trace channels are allocated.
- */
-static unsigned int free_index;
-static struct kref index_kref; /* Keeps track of allocated trace channels */
-
-static struct ltt_channel_setting *lookup_channel(const char *name)
-{
- struct ltt_channel_setting *iter;
-
- list_for_each_entry(iter, <t_channels, list)
- if (strcmp(name, iter->name) == 0)
- return iter;
- return NULL;
-}
-
-/*
- * Must be called when channel refcount falls to 0 _and_ also when the last
- * trace is freed. This function is responsible for compacting the channel and
- * event IDs when no users are active.
- *
- * Called with lock_markers() and channels mutex held.
- */
-static void release_channel_setting(struct kref *kref)
-{
- struct ltt_channel_setting *setting = container_of(kref,
- struct ltt_channel_setting, kref);
- struct ltt_channel_setting *iter;
-
- if (atomic_read(&index_kref.refcount) == 0
- && atomic_read(&setting->kref.refcount) == 0) {
- list_del(&setting->list);
- kfree(setting);
-
- free_index = 0;
- list_for_each_entry(iter, <t_channels, list) {
- iter->index = free_index++;
- iter->free_event_id = 0;
- }
-//ust// markers_compact_event_ids();
- }
-}
-
-/*
- * Perform channel index compaction when the last trace channel is freed.
- *
- * Called with lock_markers() and channels mutex held.
- */
-static void release_trace_channel(struct kref *kref)
-{
- struct ltt_channel_setting *iter, *n;
-
- list_for_each_entry_safe(iter, n, <t_channels, list)
- release_channel_setting(&iter->kref);
-}
-
-/**
- * ltt_channels_register - Register a trace channel.
- * @name: channel name
- *
- * Uses refcounting.
- */
-int ltt_channels_register(const char *name)
-{
- struct ltt_channel_setting *setting;
- int ret = 0;
-
- mutex_lock(<t_channel_mutex);
- setting = lookup_channel(name);
- if (setting) {
- if (atomic_read(&setting->kref.refcount) == 0)
- goto init_kref;
- else {
- kref_get(&setting->kref);
- goto end;
- }
- }
- setting = kzalloc(sizeof(*setting), GFP_KERNEL);
- if (!setting) {
- ret = -ENOMEM;
- goto end;
- }
- list_add(&setting->list, <t_channels);
- strncpy(setting->name, name, PATH_MAX-1);
- setting->index = free_index++;
-init_kref:
- kref_init(&setting->kref);
-end:
- mutex_unlock(<t_channel_mutex);
- return ret;
-}
-//ust// EXPORT_SYMBOL_GPL(ltt_channels_register);
-
-/**
- * ltt_channels_unregister - Unregister a trace channel.
- * @name: channel name
- *
- * Must be called with markers mutex held.
- */
-int ltt_channels_unregister(const char *name)
-{
- struct ltt_channel_setting *setting;
- int ret = 0;
-
- mutex_lock(<t_channel_mutex);
- setting = lookup_channel(name);
- if (!setting || atomic_read(&setting->kref.refcount) == 0) {
- ret = -ENOENT;
- goto end;
- }
- kref_put(&setting->kref, release_channel_setting);
-end:
- mutex_unlock(<t_channel_mutex);
- return ret;
-}
-//ust// EXPORT_SYMBOL_GPL(ltt_channels_unregister);
-
-/**
- * ltt_channels_set_default - Set channel default behavior.
- * @name: default channel name
- * @subbuf_size: size of the subbuffers
- * @subbuf_cnt: number of subbuffers
- */
-int ltt_channels_set_default(const char *name,
- unsigned int subbuf_size,
- unsigned int subbuf_cnt)
-{
- struct ltt_channel_setting *setting;
- int ret = 0;
-
- mutex_lock(<t_channel_mutex);
- setting = lookup_channel(name);
- if (!setting || atomic_read(&setting->kref.refcount) == 0) {
- ret = -ENOENT;
- goto end;
- }
- setting->subbuf_size = subbuf_size;
- setting->subbuf_cnt = subbuf_cnt;
-end:
- mutex_unlock(<t_channel_mutex);
- return ret;
-}
-//ust// EXPORT_SYMBOL_GPL(ltt_channels_set_default);
-
-/**
- * ltt_channels_get_name_from_index - get channel name from channel index
- * @index: channel index
- *
- * Allows to lookup the channel name given its index. Done to keep the name
- * information outside of each trace channel instance.
- */
-const char *ltt_channels_get_name_from_index(unsigned int index)
-{
- struct ltt_channel_setting *iter;
-
- list_for_each_entry(iter, <t_channels, list)
- if (iter->index == index && atomic_read(&iter->kref.refcount))
- return iter->name;
- return NULL;
-}
-//ust// EXPORT_SYMBOL_GPL(ltt_channels_get_name_from_index);
-
-static struct ltt_channel_setting *
-ltt_channels_get_setting_from_name(const char *name)
-{
- struct ltt_channel_setting *iter;
-
- list_for_each_entry(iter, <t_channels, list)
- if (!strcmp(iter->name, name)
- && atomic_read(&iter->kref.refcount))
- return iter;
- return NULL;
-}
-
-/**
- * ltt_channels_get_index_from_name - get channel index from channel name
- * @name: channel name
- *
- * Allows to lookup the channel index given its name. Done to keep the name
- * information outside of each trace channel instance.
- * Returns -1 if not found.
- */
-int ltt_channels_get_index_from_name(const char *name)
-{
- struct ltt_channel_setting *setting;
-
- setting = ltt_channels_get_setting_from_name(name);
- if (setting)
- return setting->index;
- else
- return -1;
-}
-//ust// EXPORT_SYMBOL_GPL(ltt_channels_get_index_from_name);
-
-/**
- * ltt_channels_trace_alloc - Allocate channel structures for a trace
- * @subbuf_size: subbuffer size. 0 uses default.
- * @subbuf_cnt: number of subbuffers per per-cpu buffers. 0 uses default.
- * @flags: Default channel flags
- *
- * Use the current channel list to allocate the channels for a trace.
- * Called with trace lock held. Does not perform the trace buffer allocation,
- * because we must let the user overwrite specific channel sizes.
- */
-struct ltt_channel_struct *ltt_channels_trace_alloc(unsigned int *nr_channels,
- int overwrite,
- int active)
-{
- struct ltt_channel_struct *channel = NULL;
- struct ltt_channel_setting *iter;
-
- mutex_lock(<t_channel_mutex);
- if (!free_index) {
- WARN("ltt_channels_trace_alloc: no free_index; are there any probes connected?");
- goto end;
- }
- if (!atomic_read(&index_kref.refcount))
- kref_init(&index_kref);
- else
- kref_get(&index_kref);
- *nr_channels = free_index;
- channel = kzalloc(sizeof(struct ltt_channel_struct) * free_index,
- GFP_KERNEL);
- if (!channel) {
- WARN("ltt_channel_struct: channel null after alloc");
- goto end;
- }
- list_for_each_entry(iter, <t_channels, list) {
- if (!atomic_read(&iter->kref.refcount))
- continue;
- channel[iter->index].subbuf_size = iter->subbuf_size;
- channel[iter->index].subbuf_cnt = iter->subbuf_cnt;
- channel[iter->index].overwrite = overwrite;
- channel[iter->index].active = active;
- channel[iter->index].channel_name = iter->name;
- }
-end:
- mutex_unlock(<t_channel_mutex);
- return channel;
-}
-//ust// EXPORT_SYMBOL_GPL(ltt_channels_trace_alloc);
-
-/**
- * ltt_channels_trace_free - Free one trace's channels
- * @channels: channels to free
- *
- * Called with trace lock held. The actual channel buffers must be freed before
- * this function is called.
- */
-void ltt_channels_trace_free(struct ltt_channel_struct *channels)
-{
- lock_markers();
- mutex_lock(<t_channel_mutex);
- kfree(channels);
- kref_put(&index_kref, release_trace_channel);
- mutex_unlock(<t_channel_mutex);
- unlock_markers();
-}
-//ust// EXPORT_SYMBOL_GPL(ltt_channels_trace_free);
-
-/**
- * _ltt_channels_get_event_id - get next event ID for a marker
- * @channel: channel name
- * @name: event name
- *
- * Returns a unique event ID (for this channel) or < 0 on error.
- * Must be called with channels mutex held.
- */
-int _ltt_channels_get_event_id(const char *channel, const char *name)
-{
- struct ltt_channel_setting *setting;
- int ret;
-
- setting = ltt_channels_get_setting_from_name(channel);
- if (!setting) {
- ret = -ENOENT;
- goto end;
- }
- if (strcmp(channel, "metadata") == 0) {
- if (strcmp(name, "core_marker_id") == 0)
- ret = 0;
- else if (strcmp(name, "core_marker_format") == 0)
- ret = 1;
- else if (strcmp(name, "testev") == 0)
- ret = 2;
- else
- ret = -ENOENT;
- goto end;
- }
- if (setting->free_event_id == EVENTS_PER_CHANNEL - 1) {
- ret = -ENOSPC;
- goto end;
- }
- ret = setting->free_event_id++;
-end:
- return ret;
-}
-
-/**
- * ltt_channels_get_event_id - get next event ID for a marker
- * @channel: channel name
- * @name: event name
- *
- * Returns a unique event ID (for this channel) or < 0 on error.
- */
-int ltt_channels_get_event_id(const char *channel, const char *name)
-{
- int ret;
-
- mutex_lock(<t_channel_mutex);
- ret = _ltt_channels_get_event_id(channel, name);
- mutex_unlock(<t_channel_mutex);
- return ret;
-}
-
-//ust// MODULE_LICENSE("GPL");
-//ust// MODULE_AUTHOR("Mathieu Desnoyers");
-//ust// MODULE_DESCRIPTION("Linux Trace Toolkit Next Generation Channel Management");
+++ /dev/null
-#ifndef _LTT_CHANNELS_H
-#define _LTT_CHANNELS_H
-
-/*
- * Copyright (C) 2008 Mathieu Desnoyers (mathieu.desnoyers@polymtl.ca)
- *
- * Dynamic tracer channel allocation.
- */
-
-#include <linux/limits.h>
-//ust// #include <linux/kref.h>
-//ust// #include <linux/list.h>
-#include <errno.h>
-
-#include "kernelcompat.h"
-#include "kref.h"
-#include "list.h"
-
-#define EVENTS_PER_CHANNEL 65536
-
-struct ltt_trace_struct;
-struct rchan_buf;
-
-struct ltt_channel_struct {
- /* First 32 bytes cache-hot cacheline */
- struct ltt_trace_struct *trace;
- void *buf;
- void *trans_channel_data;
- int overwrite:1;
- int active:1;
- unsigned int n_subbufs_order;
- unsigned long commit_count_mask; /*
- * Commit count mask, removing
- * the MSBs corresponding to
- * bits used to represent the
- * subbuffer index.
- */
- /* End of first 32 bytes cacheline */
-
- /*
- * buffer_begin - called on buffer-switch to a new sub-buffer
- * @buf: the channel buffer containing the new sub-buffer
- */
- void (*buffer_begin) (struct rchan_buf *buf,
- u64 tsc, unsigned int subbuf_idx);
- /*
- * buffer_end - called on buffer-switch to a new sub-buffer
- * @buf: the channel buffer containing the previous sub-buffer
- */
- void (*buffer_end) (struct rchan_buf *buf,
- u64 tsc, unsigned int offset, unsigned int subbuf_idx);
- struct kref kref; /* Channel transport reference count */
- unsigned int subbuf_size;
- unsigned int subbuf_cnt;
- const char *channel_name;
-
- int buf_shmid;
-} ____cacheline_aligned;
-
-struct ltt_channel_setting {
- unsigned int subbuf_size;
- unsigned int subbuf_cnt;
- struct kref kref; /* Number of references to structure content */
- struct list_head list;
- unsigned int index; /* index of channel in trace channel array */
- u16 free_event_id; /* Next event ID to allocate */
- char name[PATH_MAX];
-};
-
-int ltt_channels_register(const char *name);
-int ltt_channels_unregister(const char *name);
-int ltt_channels_set_default(const char *name,
- unsigned int subbuf_size,
- unsigned int subbuf_cnt);
-const char *ltt_channels_get_name_from_index(unsigned int index);
-int ltt_channels_get_index_from_name(const char *name);
-struct ltt_channel_struct *ltt_channels_trace_alloc(unsigned int *nr_channels,
- int overwrite,
- int active);
-void ltt_channels_trace_free(struct ltt_channel_struct *channels);
-int _ltt_channels_get_event_id(const char *channel, const char *name);
-int ltt_channels_get_event_id(const char *channel, const char *name);
-
-#endif /* _LTT_CHANNELS_H */
+++ /dev/null
-/*
- * Public API and common code for kernel->userspace relay file support.
- *
- * Copyright (C) 2002-2005 - Tom Zanussi (zanussi@us.ibm.com), IBM Corp
- * Copyright (C) 1999-2005 - Karim Yaghmour (karim@opersys.com)
- * Copyright (C) 2008 - Mathieu Desnoyers (mathieu.desnoyers@polymtl.ca)
- *
- * Moved to kernel/relay.c by Paul Mundt, 2006.
- * November 2006 - CPU hotplug support by Mathieu Desnoyers
- * (mathieu.desnoyers@polymtl.ca)
- *
- * This file is released under the GPL.
- */
-//ust// #include <linux/errno.h>
-//ust// #include <linux/stddef.h>
-//ust// #include <linux/slab.h>
-//ust// #include <linux/module.h>
-//ust// #include <linux/string.h>
-//ust// #include <linux/ltt-relay.h>
-//ust// #include <linux/vmalloc.h>
-//ust// #include <linux/mm.h>
-//ust// #include <linux/cpu.h>
-//ust// #include <linux/splice.h>
-//ust// #include <linux/bitops.h>
-#include "kernelcompat.h"
-#include <sys/mman.h>
-#include <sys/ipc.h>
-#include <sys/shm.h>
-#include "list.h"
-#include "relay.h"
-#include "channels.h"
-#include "kref.h"
-#include "tracer.h"
-#include "tracercore.h"
-#include "usterr.h"
-
-/* list of open channels, for cpu hotplug */
-static DEFINE_MUTEX(relay_channels_mutex);
-static LIST_HEAD(relay_channels);
-
-
-static struct dentry *ltt_create_buf_file_callback(struct rchan_buf *buf);
-
-/**
- * relay_alloc_buf - allocate a channel buffer
- * @buf: the buffer struct
- * @size: total size of the buffer
- */
-//ust// static int relay_alloc_buf(struct rchan_buf *buf, size_t *size)
-//ust//{
-//ust// unsigned int i, n_pages;
-//ust// struct buf_page *buf_page, *n;
-//ust//
-//ust// *size = PAGE_ALIGN(*size);
-//ust// n_pages = *size >> PAGE_SHIFT;
-//ust//
-//ust// INIT_LIST_HEAD(&buf->pages);
-//ust//
-//ust// for (i = 0; i < n_pages; i++) {
-//ust// buf_page = kmalloc_node(sizeof(*buf_page), GFP_KERNEL,
-//ust// cpu_to_node(buf->cpu));
-//ust// if (unlikely(!buf_page))
-//ust// goto depopulate;
-//ust// buf_page->page = alloc_pages_node(cpu_to_node(buf->cpu),
-//ust// GFP_KERNEL | __GFP_ZERO, 0);
-//ust// if (unlikely(!buf_page->page)) {
-//ust// kfree(buf_page);
-//ust// goto depopulate;
-//ust// }
-//ust// list_add_tail(&buf_page->list, &buf->pages);
-//ust// buf_page->offset = (size_t)i << PAGE_SHIFT;
-//ust// buf_page->buf = buf;
-//ust// set_page_private(buf_page->page, (unsigned long)buf_page);
-//ust// if (i == 0) {
-//ust// buf->wpage = buf_page;
-//ust// buf->hpage[0] = buf_page;
-//ust// buf->hpage[1] = buf_page;
-//ust// buf->rpage = buf_page;
-//ust// }
-//ust// }
-//ust// buf->page_count = n_pages;
-//ust// return 0;
-//ust//
-//ust//depopulate:
-//ust// list_for_each_entry_safe(buf_page, n, &buf->pages, list) {
-//ust// list_del_init(&buf_page->list);
-//ust// __free_page(buf_page->page);
-//ust// kfree(buf_page);
-//ust// }
-//ust// return -ENOMEM;
-//ust//}
-
-static int relay_alloc_buf(struct rchan_buf *buf, size_t *size)
-{
-//ust// unsigned int n_pages;
-//ust// struct buf_page *buf_page, *n;
-
- void *ptr;
- int result;
-
- *size = PAGE_ALIGN(*size);
-
- result = buf->shmid = shmget(getpid(), *size, IPC_CREAT | IPC_EXCL | 0700);
- if(buf->shmid == -1) {
- PERROR("shmget");
- return -1;
- }
-
- ptr = shmat(buf->shmid, NULL, 0);
- if(ptr == (void *) -1) {
- perror("shmat");
- goto destroy_shmem;
- }
-
- /* Already mark the shared memory for destruction. This will occur only
- * when all users have detached.
- */
- result = shmctl(buf->shmid, IPC_RMID, NULL);
- if(result == -1) {
- perror("shmctl");
- return -1;
- }
-
- buf->buf_data = ptr;
- buf->buf_size = *size;
-
- return 0;
-
- destroy_shmem:
- result = shmctl(buf->shmid, IPC_RMID, NULL);
- if(result == -1) {
- perror("shmctl");
- }
-
- return -1;
-}
-
-/**
- * relay_create_buf - allocate and initialize a channel buffer
- * @chan: the relay channel
- * @cpu: cpu the buffer belongs to
- *
- * Returns channel buffer if successful, %NULL otherwise.
- */
-static struct rchan_buf *relay_create_buf(struct rchan *chan)
-{
- int ret;
- struct rchan_buf *buf = kzalloc(sizeof(struct rchan_buf), GFP_KERNEL);
- if (!buf)
- return NULL;
-
-// buf->cpu = cpu;
- ret = relay_alloc_buf(buf, &chan->alloc_size);
- if (ret)
- goto free_buf;
-
- buf->chan = chan;
- kref_get(&buf->chan->kref);
- return buf;
-
-free_buf:
- kfree(buf);
- return NULL;
-}
-
-/**
- * relay_destroy_channel - free the channel struct
- * @kref: target kernel reference that contains the relay channel
- *
- * Should only be called from kref_put().
- */
-static void relay_destroy_channel(struct kref *kref)
-{
- struct rchan *chan = container_of(kref, struct rchan, kref);
- kfree(chan);
-}
-
-/**
- * relay_destroy_buf - destroy an rchan_buf struct and associated buffer
- * @buf: the buffer struct
- */
-static void relay_destroy_buf(struct rchan_buf *buf)
-{
- struct rchan *chan = buf->chan;
- struct buf_page *buf_page, *n;
- int result;
-
- result = munmap(buf->buf_data, buf->buf_size);
- if(result == -1) {
- PERROR("munmap");
- }
-
-//ust// chan->buf[buf->cpu] = NULL;
- kfree(buf);
- kref_put(&chan->kref, relay_destroy_channel);
-}
-
-/**
- * relay_remove_buf - remove a channel buffer
- * @kref: target kernel reference that contains the relay buffer
- *
- * Removes the file from the fileystem, which also frees the
- * rchan_buf_struct and the channel buffer. Should only be called from
- * kref_put().
- */
-static void relay_remove_buf(struct kref *kref)
-{
- struct rchan_buf *buf = container_of(kref, struct rchan_buf, kref);
-//ust// buf->chan->cb->remove_buf_file(buf);
- relay_destroy_buf(buf);
-}
-
-/*
- * High-level relay kernel API and associated functions.
- */
-
-/*
- * rchan_callback implementations defining default channel behavior. Used
- * in place of corresponding NULL values in client callback struct.
- */
-
-/*
- * create_buf_file_create() default callback. Does nothing.
- */
-static struct dentry *create_buf_file_default_callback(const char *filename,
- struct dentry *parent,
- int mode,
- struct rchan_buf *buf)
-{
- return NULL;
-}
-
-/*
- * remove_buf_file() default callback. Does nothing.
- */
-static int remove_buf_file_default_callback(struct dentry *dentry)
-{
- return -EINVAL;
-}
-
-/**
- * wakeup_readers - wake up readers waiting on a channel
- * @data: contains the channel buffer
- *
- * This is the timer function used to defer reader waking.
- */
-//ust// static void wakeup_readers(unsigned long data)
-//ust// {
-//ust// struct rchan_buf *buf = (struct rchan_buf *)data;
-//ust// wake_up_interruptible(&buf->read_wait);
-//ust// }
-
-/**
- * __relay_reset - reset a channel buffer
- * @buf: the channel buffer
- * @init: 1 if this is a first-time initialization
- *
- * See relay_reset() for description of effect.
- */
-static void __relay_reset(struct rchan_buf *buf, unsigned int init)
-{
- if (init) {
-//ust// init_waitqueue_head(&buf->read_wait);
- kref_init(&buf->kref);
-//ust// setup_timer(&buf->timer, wakeup_readers, (unsigned long)buf);
- } else
-//ust// del_timer_sync(&buf->timer);
-
- buf->finalized = 0;
-}
-
-/*
- * relay_open_buf - create a new relay channel buffer
- *
- * used by relay_open() and CPU hotplug.
- */
-static struct rchan_buf *relay_open_buf(struct rchan *chan)
-{
- struct rchan_buf *buf = NULL;
- struct dentry *dentry;
-//ust// char *tmpname;
-
-//ust// tmpname = kzalloc(NAME_MAX + 1, GFP_KERNEL);
-//ust// if (!tmpname)
-//ust// goto end;
-//ust// snprintf(tmpname, NAME_MAX, "%s%d", chan->base_filename, cpu);
-
- buf = relay_create_buf(chan);
- if (!buf)
- goto free_name;
-
- __relay_reset(buf, 1);
-
- /* Create file in fs */
-//ust// dentry = chan->cb->create_buf_file(tmpname, chan->parent, S_IRUSR,
-//ust// buf);
-
- ltt_create_buf_file_callback(buf); // ust //
-
-//ust// if (!dentry)
-//ust// goto free_buf;
-//ust//
-//ust// buf->dentry = dentry;
-
- goto free_name;
-
-free_buf:
- relay_destroy_buf(buf);
- buf = NULL;
-free_name:
-//ust// kfree(tmpname);
-end:
- return buf;
-}
-
-/**
- * relay_close_buf - close a channel buffer
- * @buf: channel buffer
- *
- * Marks the buffer finalized and restores the default callbacks.
- * The channel buffer and channel buffer data structure are then freed
- * automatically when the last reference is given up.
- */
-static void relay_close_buf(struct rchan_buf *buf)
-{
-//ust// del_timer_sync(&buf->timer);
- kref_put(&buf->kref, relay_remove_buf);
-}
-
-//ust// static void setup_callbacks(struct rchan *chan,
-//ust// struct rchan_callbacks *cb)
-//ust// {
-//ust// if (!cb) {
-//ust// chan->cb = &default_channel_callbacks;
-//ust// return;
-//ust// }
-//ust//
-//ust// if (!cb->create_buf_file)
-//ust// cb->create_buf_file = create_buf_file_default_callback;
-//ust// if (!cb->remove_buf_file)
-//ust// cb->remove_buf_file = remove_buf_file_default_callback;
-//ust// chan->cb = cb;
-//ust// }
-
-/**
- * relay_hotcpu_callback - CPU hotplug callback
- * @nb: notifier block
- * @action: hotplug action to take
- * @hcpu: CPU number
- *
- * Returns the success/failure of the operation. (%NOTIFY_OK, %NOTIFY_BAD)
- */
-//ust// static int __cpuinit relay_hotcpu_callback(struct notifier_block *nb,
-//ust// unsigned long action,
-//ust// void *hcpu)
-//ust// {
-//ust// unsigned int hotcpu = (unsigned long)hcpu;
-//ust// struct rchan *chan;
-//ust//
-//ust// switch (action) {
-//ust// case CPU_UP_PREPARE:
-//ust// case CPU_UP_PREPARE_FROZEN:
-//ust// mutex_lock(&relay_channels_mutex);
-//ust// list_for_each_entry(chan, &relay_channels, list) {
-//ust// if (chan->buf[hotcpu])
-//ust// continue;
-//ust// chan->buf[hotcpu] = relay_open_buf(chan, hotcpu);
-//ust// if (!chan->buf[hotcpu]) {
-//ust// printk(KERN_ERR
-//ust// "relay_hotcpu_callback: cpu %d buffer "
-//ust// "creation failed\n", hotcpu);
-//ust// mutex_unlock(&relay_channels_mutex);
-//ust// return NOTIFY_BAD;
-//ust// }
-//ust// }
-//ust// mutex_unlock(&relay_channels_mutex);
-//ust// break;
-//ust// case CPU_DEAD:
-//ust// case CPU_DEAD_FROZEN:
-//ust// /* No need to flush the cpu : will be flushed upon
-//ust// * final relay_flush() call. */
-//ust// break;
-//ust// }
-//ust// return NOTIFY_OK;
-//ust// }
-
-/**
- * ltt_relay_open - create a new relay channel
- * @base_filename: base name of files to create
- * @parent: dentry of parent directory, %NULL for root directory
- * @subbuf_size: size of sub-buffers
- * @n_subbufs: number of sub-buffers
- * @cb: client callback functions
- * @private_data: user-defined data
- *
- * Returns channel pointer if successful, %NULL otherwise.
- *
- * Creates a channel buffer for each cpu using the sizes and
- * attributes specified. The created channel buffer files
- * will be named base_filename0...base_filenameN-1. File
- * permissions will be %S_IRUSR.
- */
-struct rchan *ltt_relay_open(const char *base_filename,
- struct dentry *parent,
- size_t subbuf_size,
- size_t n_subbufs,
- void *private_data)
-{
- unsigned int i;
- struct rchan *chan;
-//ust// if (!base_filename)
-//ust// return NULL;
-
- if (!(subbuf_size && n_subbufs))
- return NULL;
-
- chan = kzalloc(sizeof(struct rchan), GFP_KERNEL);
- if (!chan)
- return NULL;
-
- chan->version = LTT_RELAY_CHANNEL_VERSION;
- chan->n_subbufs = n_subbufs;
- chan->subbuf_size = subbuf_size;
- chan->subbuf_size_order = get_count_order(subbuf_size);
- chan->alloc_size = FIX_SIZE(subbuf_size * n_subbufs);
- chan->parent = parent;
- chan->private_data = private_data;
-//ust// strlcpy(chan->base_filename, base_filename, NAME_MAX);
-//ust// setup_callbacks(chan, cb);
- kref_init(&chan->kref);
-
- mutex_lock(&relay_channels_mutex);
-//ust// for_each_online_cpu(i) {
- chan->buf = relay_open_buf(chan);
- if (!chan->buf)
- goto error;
-//ust// }
- list_add(&chan->list, &relay_channels);
- mutex_unlock(&relay_channels_mutex);
-
- return chan;
-
-//ust//free_bufs:
-//ust// for_each_possible_cpu(i) {
-//ust// if (!chan->buf[i])
-//ust// break;
-//ust// relay_close_buf(chan->buf[i]);
-//ust// }
-
- error:
- kref_put(&chan->kref, relay_destroy_channel);
- mutex_unlock(&relay_channels_mutex);
- return NULL;
-}
-//ust// EXPORT_SYMBOL_GPL(ltt_relay_open);
-
-/**
- * ltt_relay_close - close the channel
- * @chan: the channel
- *
- * Closes all channel buffers and frees the channel.
- */
-void ltt_relay_close(struct rchan *chan)
-{
- unsigned int i;
-
- if (!chan)
- return;
-
- mutex_lock(&relay_channels_mutex);
-//ust// for_each_possible_cpu(i)
- if (chan->buf)
- relay_close_buf(chan->buf);
-
- list_del(&chan->list);
- kref_put(&chan->kref, relay_destroy_channel);
- mutex_unlock(&relay_channels_mutex);
-}
-//ust// EXPORT_SYMBOL_GPL(ltt_relay_close);
-
-/*
- * Start iteration at the previous element. Skip the real list head.
- */
-//ust// struct buf_page *ltt_relay_find_prev_page(struct rchan_buf *buf,
-//ust// struct buf_page *page, size_t offset, ssize_t diff_offset)
-//ust// {
-//ust// struct buf_page *iter;
-//ust// size_t orig_iter_off;
-//ust// unsigned int i = 0;
-//ust//
-//ust// orig_iter_off = page->offset;
-//ust// list_for_each_entry_reverse(iter, &page->list, list) {
-//ust// /*
-//ust// * Skip the real list head.
-//ust// */
-//ust// if (&iter->list == &buf->pages)
-//ust// continue;
-//ust// i++;
-//ust// if (offset >= iter->offset
-//ust// && offset < iter->offset + PAGE_SIZE) {
-//ust// #ifdef CONFIG_LTT_RELAY_CHECK_RANDOM_ACCESS
-//ust// if (i > 1) {
-//ust// printk(KERN_WARNING
-//ust// "Backward random access detected in "
-//ust// "ltt_relay. Iterations %u, "
-//ust// "offset %zu, orig iter->off %zu, "
-//ust// "iter->off %zu diff_offset %zd.\n", i,
-//ust// offset, orig_iter_off, iter->offset,
-//ust// diff_offset);
-//ust// WARN_ON(1);
-//ust// }
-//ust// #endif
-//ust// return iter;
-//ust// }
-//ust// }
-//ust// WARN_ON(1);
-//ust// return NULL;
-//ust// }
-//ust// EXPORT_SYMBOL_GPL(ltt_relay_find_prev_page);
-
-/*
- * Start iteration at the next element. Skip the real list head.
- */
-//ust// struct buf_page *ltt_relay_find_next_page(struct rchan_buf *buf,
-//ust// struct buf_page *page, size_t offset, ssize_t diff_offset)
-//ust// {
-//ust// struct buf_page *iter;
-//ust// unsigned int i = 0;
-//ust// size_t orig_iter_off;
-//ust//
-//ust// orig_iter_off = page->offset;
-//ust// list_for_each_entry(iter, &page->list, list) {
-//ust// /*
-//ust// * Skip the real list head.
-//ust// */
-//ust// if (&iter->list == &buf->pages)
-//ust// continue;
-//ust// i++;
-//ust// if (offset >= iter->offset
-//ust// && offset < iter->offset + PAGE_SIZE) {
-//ust// #ifdef CONFIG_LTT_RELAY_CHECK_RANDOM_ACCESS
-//ust// if (i > 1) {
-//ust// printk(KERN_WARNING
-//ust// "Forward random access detected in "
-//ust// "ltt_relay. Iterations %u, "
-//ust// "offset %zu, orig iter->off %zu, "
-//ust// "iter->off %zu diff_offset %zd.\n", i,
-//ust// offset, orig_iter_off, iter->offset,
-//ust// diff_offset);
-//ust// WARN_ON(1);
-//ust// }
-//ust// #endif
-//ust// return iter;
-//ust// }
-//ust// }
-//ust// WARN_ON(1);
-//ust// return NULL;
-//ust// }
-//ust// EXPORT_SYMBOL_GPL(ltt_relay_find_next_page);
-
-/**
- * ltt_relay_write - write data to a ltt_relay buffer.
- * @buf : buffer
- * @offset : offset within the buffer
- * @src : source address
- * @len : length to write
- * @page : cached buffer page
- * @pagecpy : page size copied so far
- */
-void _ltt_relay_write(struct rchan_buf *buf, size_t offset,
- const void *src, size_t len, ssize_t cpy)
-{
- do {
- len -= cpy;
- src += cpy;
- offset += cpy;
- /*
- * Underlying layer should never ask for writes across
- * subbuffers.
- */
- WARN_ON(offset >= buf->buf_size);
-
- cpy = min_t(size_t, len, buf->buf_size - offset);
- ltt_relay_do_copy(buf->buf_data + offset, src, cpy);
- } while (unlikely(len != cpy));
-}
-//ust// EXPORT_SYMBOL_GPL(_ltt_relay_write);
-
-/**
- * ltt_relay_read - read data from ltt_relay_buffer.
- * @buf : buffer
- * @offset : offset within the buffer
- * @dest : destination address
- * @len : length to write
- */
-//ust// int ltt_relay_read(struct rchan_buf *buf, size_t offset,
-//ust// void *dest, size_t len)
-//ust// {
-//ust// struct buf_page *page;
-//ust// ssize_t pagecpy, orig_len;
-//ust//
-//ust// orig_len = len;
-//ust// offset &= buf->chan->alloc_size - 1;
-//ust// page = buf->rpage;
-//ust// if (unlikely(!len))
-//ust// return 0;
-//ust// for (;;) {
-//ust// page = ltt_relay_cache_page(buf, &buf->rpage, page, offset);
-//ust// pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK));
-//ust// memcpy(dest, page_address(page->page) + (offset & ~PAGE_MASK),
-//ust// pagecpy);
-//ust// len -= pagecpy;
-//ust// if (likely(!len))
-//ust// break;
-//ust// dest += pagecpy;
-//ust// offset += pagecpy;
-//ust// /*
-//ust// * Underlying layer should never ask for reads across
-//ust// * subbuffers.
-//ust// */
-//ust// WARN_ON(offset >= buf->chan->alloc_size);
-//ust// }
-//ust// return orig_len;
-//ust// }
-//ust// EXPORT_SYMBOL_GPL(ltt_relay_read);
-
-/**
- * ltt_relay_read_get_page - Get a whole page to read from
- * @buf : buffer
- * @offset : offset within the buffer
- */
-//ust// struct buf_page *ltt_relay_read_get_page(struct rchan_buf *buf, size_t offset)
-//ust// {
-//ust// struct buf_page *page;
-
-//ust// offset &= buf->chan->alloc_size - 1;
-//ust// page = buf->rpage;
-//ust// page = ltt_relay_cache_page(buf, &buf->rpage, page, offset);
-//ust// return page;
-//ust// }
-//ust// EXPORT_SYMBOL_GPL(ltt_relay_read_get_page);
-
-/**
- * ltt_relay_offset_address - get address of a location within the buffer
- * @buf : buffer
- * @offset : offset within the buffer.
- *
- * Return the address where a given offset is located.
- * Should be used to get the current subbuffer header pointer. Given we know
- * it's never on a page boundary, it's safe to write directly to this address,
- * as long as the write is never bigger than a page size.
- */
-void *ltt_relay_offset_address(struct rchan_buf *buf, size_t offset)
-{
-//ust// struct buf_page *page;
-//ust// unsigned int odd;
-//ust//
-//ust// offset &= buf->chan->alloc_size - 1;
-//ust// odd = !!(offset & buf->chan->subbuf_size);
-//ust// page = buf->hpage[odd];
-//ust// if (offset < page->offset || offset >= page->offset + PAGE_SIZE)
-//ust// buf->hpage[odd] = page = buf->wpage;
-//ust// page = ltt_relay_cache_page(buf, &buf->hpage[odd], page, offset);
-//ust// return page_address(page->page) + (offset & ~PAGE_MASK);
- return ((char *)buf->buf_data)+offset;
- return NULL;
-}
-//ust// EXPORT_SYMBOL_GPL(ltt_relay_offset_address);
-
-/**
- * relay_file_open - open file op for relay files
- * @inode: the inode
- * @filp: the file
- *
- * Increments the channel buffer refcount.
- */
-//ust// static int relay_file_open(struct inode *inode, struct file *filp)
-//ust// {
-//ust// struct rchan_buf *buf = inode->i_private;
-//ust// kref_get(&buf->kref);
-//ust// filp->private_data = buf;
-//ust//
-//ust// return nonseekable_open(inode, filp);
-//ust// }
-
-/**
- * relay_file_release - release file op for relay files
- * @inode: the inode
- * @filp: the file
- *
- * Decrements the channel refcount, as the filesystem is
- * no longer using it.
- */
-//ust// static int relay_file_release(struct inode *inode, struct file *filp)
-//ust// {
-//ust// struct rchan_buf *buf = filp->private_data;
-//ust// kref_put(&buf->kref, relay_remove_buf);
-//ust//
-//ust// return 0;
-//ust// }
-
-//ust// const struct file_operations ltt_relay_file_operations = {
-//ust// .open = relay_file_open,
-//ust// .release = relay_file_release,
-//ust// };
-//ust// EXPORT_SYMBOL_GPL(ltt_relay_file_operations);
-
-//ust// static __init int relay_init(void)
-//ust// {
-//ust// hotcpu_notifier(relay_hotcpu_callback, 5);
-//ust// return 0;
-//ust// }
-
-//ust// module_init(relay_init);
-/*
- * ltt/ltt-relay.c
- *
- * (C) Copyright 2005-2008 - Mathieu Desnoyers (mathieu.desnoyers@polymtl.ca)
- *
- * LTTng lockless buffer space management (reader/writer).
- *
- * Author:
- * Mathieu Desnoyers (mathieu.desnoyers@polymtl.ca)
- *
- * Inspired from LTT :
- * Karim Yaghmour (karim@opersys.com)
- * Tom Zanussi (zanussi@us.ibm.com)
- * Bob Wisniewski (bob@watson.ibm.com)
- * And from K42 :
- * Bob Wisniewski (bob@watson.ibm.com)
- *
- * Changelog:
- * 08/10/08, Cleanup.
- * 19/10/05, Complete lockless mechanism.
- * 27/05/05, Modular redesign and rewrite.
- *
- * Userspace reader semantic :
- * while (poll fd != POLLHUP) {
- * - ioctl RELAY_GET_SUBBUF_SIZE
- * while (1) {
- * - ioctl GET_SUBBUF
- * - splice 1 subbuffer worth of data to a pipe
- * - splice the data from pipe to disk/network
- * - ioctl PUT_SUBBUF, check error value
- * if err val < 0, previous subbuffer was corrupted.
- * }
- * }
- */
-
-//ust// #include <linux/time.h>
-//ust// #include <linux/ltt-tracer.h>
-//ust// #include <linux/ltt-relay.h>
-//ust// #include <linux/module.h>
-//ust// #include <linux/string.h>
-//ust// #include <linux/slab.h>
-//ust// #include <linux/init.h>
-//ust// #include <linux/rcupdate.h>
-//ust// #include <linux/sched.h>
-//ust// #include <linux/bitops.h>
-//ust// #include <linux/fs.h>
-//ust// #include <linux/smp_lock.h>
-//ust// #include <linux/debugfs.h>
-//ust// #include <linux/stat.h>
-//ust// #include <linux/cpu.h>
-//ust// #include <linux/pipe_fs_i.h>
-//ust// #include <linux/splice.h>
-//ust// #include <asm/atomic.h>
-//ust// #include <asm/local.h>
-
-#if 0
-#define printk_dbg(fmt, args...) printk(fmt, args)
-#else
-#define printk_dbg(fmt, args...)
-#endif
-
-/*
- * Last TSC comparison functions. Check if the current TSC overflows
- * LTT_TSC_BITS bits from the last TSC read. Reads and writes last_tsc
- * atomically.
- */
-
-#if (BITS_PER_LONG == 32)
-static inline void save_last_tsc(struct ltt_channel_buf_struct *ltt_buf,
- u64 tsc)
-{
- ltt_buf->last_tsc = (unsigned long)(tsc >> LTT_TSC_BITS);
-}
-
-static inline int last_tsc_overflow(struct ltt_channel_buf_struct *ltt_buf,
- u64 tsc)
-{
- unsigned long tsc_shifted = (unsigned long)(tsc >> LTT_TSC_BITS);
-
- if (unlikely((tsc_shifted - ltt_buf->last_tsc)))
- return 1;
- else
- return 0;
-}
-#else
-static inline void save_last_tsc(struct ltt_channel_buf_struct *ltt_buf,
- u64 tsc)
-{
- ltt_buf->last_tsc = (unsigned long)tsc;
-}
-
-static inline int last_tsc_overflow(struct ltt_channel_buf_struct *ltt_buf,
- u64 tsc)
-{
- if (unlikely((tsc - ltt_buf->last_tsc) >> LTT_TSC_BITS))
- return 1;
- else
- return 0;
-}
-#endif
-
-//ust// static struct file_operations ltt_file_operations;
-
-/*
- * A switch is done during tracing or as a final flush after tracing (so it
- * won't write in the new sub-buffer).
- */
-enum force_switch_mode { FORCE_ACTIVE, FORCE_FLUSH };
-
-static int ltt_relay_create_buffer(struct ltt_trace_struct *trace,
- struct ltt_channel_struct *ltt_chan,
- struct rchan_buf *buf,
- unsigned int n_subbufs);
-
-static void ltt_relay_destroy_buffer(struct ltt_channel_struct *ltt_chan);
-
-static void ltt_force_switch(struct rchan_buf *buf,
- enum force_switch_mode mode);
-
-/*
- * Trace callbacks
- */
-static void ltt_buffer_begin_callback(struct rchan_buf *buf,
- u64 tsc, unsigned int subbuf_idx)
-{
- struct ltt_channel_struct *channel =
- (struct ltt_channel_struct *)buf->chan->private_data;
- struct ltt_subbuffer_header *header =
- (struct ltt_subbuffer_header *)
- ltt_relay_offset_address(buf,
- subbuf_idx * buf->chan->subbuf_size);
-
- header->cycle_count_begin = tsc;
- header->lost_size = 0xFFFFFFFF; /* for debugging */
- header->buf_size = buf->chan->subbuf_size;
- ltt_write_trace_header(channel->trace, header);
-}
-
-/*
- * offset is assumed to never be 0 here : never deliver a completely empty
- * subbuffer. The lost size is between 0 and subbuf_size-1.
- */
-static notrace void ltt_buffer_end_callback(struct rchan_buf *buf,
- u64 tsc, unsigned int offset, unsigned int subbuf_idx)
-{
- struct ltt_channel_struct *channel =
- (struct ltt_channel_struct *)buf->chan->private_data;
- struct ltt_channel_buf_struct *ltt_buf = channel->buf;
- struct ltt_subbuffer_header *header =
- (struct ltt_subbuffer_header *)
- ltt_relay_offset_address(buf,
- subbuf_idx * buf->chan->subbuf_size);
-
- header->lost_size = SUBBUF_OFFSET((buf->chan->subbuf_size - offset),
- buf->chan);
- header->cycle_count_end = tsc;
- header->events_lost = local_read(<t_buf->events_lost);
- header->subbuf_corrupt = local_read(<t_buf->corrupted_subbuffers);
-
-}
-
-void (*wake_consumer)(void *, int) = NULL;
-
-void relay_set_wake_consumer(void (*wake)(void *, int))
-{
- wake_consumer = wake;
-}
-
-void relay_wake_consumer(void *arg, int finished)
-{
- if(wake_consumer)
- wake_consumer(arg, finished);
-}
-
-static notrace void ltt_deliver(struct rchan_buf *buf, unsigned int subbuf_idx,
- void *subbuf)
-{
- struct ltt_channel_struct *channel =
- (struct ltt_channel_struct *)buf->chan->private_data;
- struct ltt_channel_buf_struct *ltt_buf = channel->buf;
- int result;
-
- result = write(ltt_buf->data_ready_fd_write, "1", 1);
- if(result == -1) {
- PERROR("write (in ltt_relay_buffer_flush)");
- ERR("this should never happen!");
- }
-//ust// atomic_set(<t_buf->wakeup_readers, 1);
-}
-
-static struct dentry *ltt_create_buf_file_callback(struct rchan_buf *buf)
-{
- struct ltt_channel_struct *ltt_chan;
- int err;
-//ust// struct dentry *dentry;
-
- ltt_chan = buf->chan->private_data;
- err = ltt_relay_create_buffer(ltt_chan->trace, ltt_chan, buf, buf->chan->n_subbufs);
- if (err)
- return ERR_PTR(err);
-
-//ust// dentry = debugfs_create_file(filename, mode, parent, buf,
-//ust// <t_file_operations);
-//ust// if (!dentry)
-//ust// goto error;
-//ust// return dentry;
- return NULL; //ust//
-//ust//error:
- ltt_relay_destroy_buffer(ltt_chan);
- return NULL;
-}
-
-static int ltt_remove_buf_file_callback(struct rchan_buf *buf)
-{
-//ust// struct rchan_buf *buf = dentry->d_inode->i_private;
- struct ltt_channel_struct *ltt_chan = buf->chan->private_data;
-
-//ust// debugfs_remove(dentry);
- ltt_relay_destroy_buffer(ltt_chan);
-
- return 0;
-}
-
-/*
- * Wake writers :
- *
- * This must be done after the trace is removed from the RCU list so that there
- * are no stalled writers.
- */
-//ust// static void ltt_relay_wake_writers(struct ltt_channel_buf_struct *ltt_buf)
-//ust// {
-//ust//
-//ust// if (waitqueue_active(<t_buf->write_wait))
-//ust// wake_up_interruptible(<t_buf->write_wait);
-//ust// }
-
-/*
- * This function should not be called from NMI interrupt context
- */
-static notrace void ltt_buf_unfull(struct rchan_buf *buf,
- unsigned int subbuf_idx,
- long offset)
-{
-//ust// struct ltt_channel_struct *ltt_channel =
-//ust// (struct ltt_channel_struct *)buf->chan->private_data;
-//ust// struct ltt_channel_buf_struct *ltt_buf = ltt_channel->buf;
-//ust//
-//ust// ltt_relay_wake_writers(ltt_buf);
-}
-
-/**
- * ltt_open - open file op for ltt files
- * @inode: opened inode
- * @file: opened file
- *
- * Open implementation. Makes sure only one open instance of a buffer is
- * done at a given moment.
- */
-//ust// static int ltt_open(struct inode *inode, struct file *file)
-//ust// {
-//ust// struct rchan_buf *buf = inode->i_private;
-//ust// struct ltt_channel_struct *ltt_channel =
-//ust// (struct ltt_channel_struct *)buf->chan->private_data;
-//ust// struct ltt_channel_buf_struct *ltt_buf =
-//ust// percpu_ptr(ltt_channel->buf, buf->cpu);
-//ust//
-//ust// if (!atomic_long_add_unless(<t_buf->active_readers, 1, 1))
-//ust// return -EBUSY;
-//ust// return ltt_relay_file_operations.open(inode, file);
-//ust// }
-
-/**
- * ltt_release - release file op for ltt files
- * @inode: opened inode
- * @file: opened file
- *
- * Release implementation.
- */
-//ust// static int ltt_release(struct inode *inode, struct file *file)
-//ust// {
-//ust// struct rchan_buf *buf = inode->i_private;
-//ust// struct ltt_channel_struct *ltt_channel =
-//ust// (struct ltt_channel_struct *)buf->chan->private_data;
-//ust// struct ltt_channel_buf_struct *ltt_buf =
-//ust// percpu_ptr(ltt_channel->buf, buf->cpu);
-//ust// int ret;
-//ust//
-//ust// WARN_ON(atomic_long_read(<t_buf->active_readers) != 1);
-//ust// atomic_long_dec(<t_buf->active_readers);
-//ust// ret = ltt_relay_file_operations.release(inode, file);
-//ust// WARN_ON(ret);
-//ust// return ret;
-//ust// }
-
-/**
- * ltt_poll - file op for ltt files
- * @filp: the file
- * @wait: poll table
- *
- * Poll implementation.
- */
-//ust// static unsigned int ltt_poll(struct file *filp, poll_table *wait)
-//ust// {
-//ust// unsigned int mask = 0;
-//ust// struct inode *inode = filp->f_dentry->d_inode;
-//ust// struct rchan_buf *buf = inode->i_private;
-//ust// struct ltt_channel_struct *ltt_channel =
-//ust// (struct ltt_channel_struct *)buf->chan->private_data;
-//ust// struct ltt_channel_buf_struct *ltt_buf =
-//ust// percpu_ptr(ltt_channel->buf, buf->cpu);
-//ust//
-//ust// if (filp->f_mode & FMODE_READ) {
-//ust// poll_wait_set_exclusive(wait);
-//ust// poll_wait(filp, &buf->read_wait, wait);
-//ust//
-//ust// WARN_ON(atomic_long_read(<t_buf->active_readers) != 1);
-//ust// if (SUBBUF_TRUNC(local_read(<t_buf->offset),
-//ust// buf->chan)
-//ust// - SUBBUF_TRUNC(atomic_long_read(<t_buf->consumed),
-//ust// buf->chan)
-//ust// == 0) {
-//ust// if (buf->finalized)
-//ust// return POLLHUP;
-//ust// else
-//ust// return 0;
-//ust// } else {
-//ust// struct rchan *rchan =
-//ust// ltt_channel->trans_channel_data;
-//ust// if (SUBBUF_TRUNC(local_read(<t_buf->offset),
-//ust// buf->chan)
-//ust// - SUBBUF_TRUNC(atomic_long_read(
-//ust// <t_buf->consumed),
-//ust// buf->chan)
-//ust// >= rchan->alloc_size)
-//ust// return POLLPRI | POLLRDBAND;
-//ust// else
-//ust// return POLLIN | POLLRDNORM;
-//ust// }
-//ust// }
-//ust// return mask;
-//ust// }
-
-int ltt_do_get_subbuf(struct rchan_buf *buf, struct ltt_channel_buf_struct *ltt_buf, long *pconsumed_old)
-{
- struct ltt_channel_struct *ltt_channel = (struct ltt_channel_struct *)buf->chan->private_data;
- long consumed_old, consumed_idx, commit_count, write_offset;
- consumed_old = atomic_long_read(<t_buf->consumed);
- consumed_idx = SUBBUF_INDEX(consumed_old, buf->chan);
- commit_count = local_read(<t_buf->commit_count[consumed_idx]);
- /*
- * Make sure we read the commit count before reading the buffer
- * data and the write offset. Correct consumed offset ordering
- * wrt commit count is insured by the use of cmpxchg to update
- * the consumed offset.
- */
- smp_rmb();
- write_offset = local_read(<t_buf->offset);
- /*
- * Check that the subbuffer we are trying to consume has been
- * already fully committed.
- */
- if (((commit_count - buf->chan->subbuf_size)
- & ltt_channel->commit_count_mask)
- - (BUFFER_TRUNC(consumed_old, buf->chan)
- >> ltt_channel->n_subbufs_order)
- != 0) {
- return -EAGAIN;
- }
- /*
- * Check that we are not about to read the same subbuffer in
- * which the writer head is.
- */
- if ((SUBBUF_TRUNC(write_offset, buf->chan)
- - SUBBUF_TRUNC(consumed_old, buf->chan))
- == 0) {
- return -EAGAIN;
- }
-
- *pconsumed_old = consumed_old;
- return 0;
-}
-
-int ltt_do_put_subbuf(struct rchan_buf *buf, struct ltt_channel_buf_struct *ltt_buf, u32 uconsumed_old)
-{
- long consumed_new, consumed_old;
-
- consumed_old = atomic_long_read(<t_buf->consumed);
- consumed_old = consumed_old & (~0xFFFFFFFFL);
- consumed_old = consumed_old | uconsumed_old;
- consumed_new = SUBBUF_ALIGN(consumed_old, buf->chan);
-
-//ust// spin_lock(<t_buf->full_lock);
- if (atomic_long_cmpxchg(<t_buf->consumed, consumed_old,
- consumed_new)
- != consumed_old) {
- /* We have been pushed by the writer : the last
- * buffer read _is_ corrupted! It can also
- * happen if this is a buffer we never got. */
-//ust// spin_unlock(<t_buf->full_lock);
- return -EIO;
- } else {
- /* tell the client that buffer is now unfull */
- int index;
- long data;
- index = SUBBUF_INDEX(consumed_old, buf->chan);
- data = BUFFER_OFFSET(consumed_old, buf->chan);
- ltt_buf_unfull(buf, index, data);
-//ust// spin_unlock(<t_buf->full_lock);
- }
- return 0;
-}
-
-/**
- * ltt_ioctl - control on the debugfs file
- *
- * @inode: the inode
- * @filp: the file
- * @cmd: the command
- * @arg: command arg
- *
- * This ioctl implements three commands necessary for a minimal
- * producer/consumer implementation :
- * RELAY_GET_SUBBUF
- * Get the next sub buffer that can be read. It never blocks.
- * RELAY_PUT_SUBBUF
- * Release the currently read sub-buffer. Parameter is the last
- * put subbuffer (returned by GET_SUBBUF).
- * RELAY_GET_N_BUBBUFS
- * returns the number of sub buffers in the per cpu channel.
- * RELAY_GET_SUBBUF_SIZE
- * returns the size of the sub buffers.
- */
-//ust// static int ltt_ioctl(struct inode *inode, struct file *filp,
-//ust// unsigned int cmd, unsigned long arg)
-//ust// {
-//ust// struct rchan_buf *buf = inode->i_private;
-//ust// struct ltt_channel_struct *ltt_channel =
-//ust// (struct ltt_channel_struct *)buf->chan->private_data;
-//ust// struct ltt_channel_buf_struct *ltt_buf =
-//ust// percpu_ptr(ltt_channel->buf, buf->cpu);
-//ust// u32 __user *argp = (u32 __user *)arg;
-//ust//
-//ust// WARN_ON(atomic_long_read(<t_buf->active_readers) != 1);
-//ust// switch (cmd) {
-//ust// case RELAY_GET_SUBBUF:
-//ust// {
-//ust// int ret;
-//ust// ret = ltt_do_get_subbuf(buf, ltt_buf, &consumed_old);
-//ust// if(ret < 0)
-//ust// return ret;
-//ust// return put_user((u32)consumed_old, argp);
-//ust// }
-//ust// case RELAY_PUT_SUBBUF:
-//ust// {
-//ust// int ret;
-//ust// u32 uconsumed_old;
-//ust// ret = get_user(uconsumed_old, argp);
-//ust// if (ret)
-//ust// return ret; /* will return -EFAULT */
-//ust// return ltt_do_put_subbuf(buf, ltt_buf, uconsumed_old);
-//ust// }
-//ust// case RELAY_GET_N_SUBBUFS:
-//ust// return put_user((u32)buf->chan->n_subbufs, argp);
-//ust// break;
-//ust// case RELAY_GET_SUBBUF_SIZE:
-//ust// return put_user((u32)buf->chan->subbuf_size, argp);
-//ust// break;
-//ust// default:
-//ust// return -ENOIOCTLCMD;
-//ust// }
-//ust// return 0;
-//ust// }
-
-//ust// #ifdef CONFIG_COMPAT
-//ust// static long ltt_compat_ioctl(struct file *file, unsigned int cmd,
-//ust// unsigned long arg)
-//ust// {
-//ust// long ret = -ENOIOCTLCMD;
-//ust//
-//ust// lock_kernel();
-//ust// ret = ltt_ioctl(file->f_dentry->d_inode, file, cmd, arg);
-//ust// unlock_kernel();
-//ust//
-//ust// return ret;
-//ust// }
-//ust// #endif
-
-//ust// static void ltt_relay_pipe_buf_release(struct pipe_inode_info *pipe,
-//ust// struct pipe_buffer *pbuf)
-//ust// {
-//ust// }
-//ust//
-//ust// static struct pipe_buf_operations ltt_relay_pipe_buf_ops = {
-//ust// .can_merge = 0,
-//ust// .map = generic_pipe_buf_map,
-//ust// .unmap = generic_pipe_buf_unmap,
-//ust// .confirm = generic_pipe_buf_confirm,
-//ust// .release = ltt_relay_pipe_buf_release,
-//ust// .steal = generic_pipe_buf_steal,
-//ust// .get = generic_pipe_buf_get,
-//ust// };
-
-//ust// static void ltt_relay_page_release(struct splice_pipe_desc *spd, unsigned int i)
-//ust// {
-//ust// }
-
-/*
- * subbuf_splice_actor - splice up to one subbuf's worth of data
- */
-//ust// static int subbuf_splice_actor(struct file *in,
-//ust// loff_t *ppos,
-//ust// struct pipe_inode_info *pipe,
-//ust// size_t len,
-//ust// unsigned int flags)
-//ust// {
-//ust// struct rchan_buf *buf = in->private_data;
-//ust// struct ltt_channel_struct *ltt_channel =
-//ust// (struct ltt_channel_struct *)buf->chan->private_data;
-//ust// struct ltt_channel_buf_struct *ltt_buf =
-//ust// percpu_ptr(ltt_channel->buf, buf->cpu);
-//ust// unsigned int poff, subbuf_pages, nr_pages;
-//ust// struct page *pages[PIPE_BUFFERS];
-//ust// struct partial_page partial[PIPE_BUFFERS];
-//ust// struct splice_pipe_desc spd = {
-//ust// .pages = pages,
-//ust// .nr_pages = 0,
-//ust// .partial = partial,
-//ust// .flags = flags,
-//ust// .ops = <t_relay_pipe_buf_ops,
-//ust// .spd_release = ltt_relay_page_release,
-//ust// };
-//ust// long consumed_old, consumed_idx, roffset;
-//ust// unsigned long bytes_avail;
-//ust//
-//ust// /*
-//ust// * Check that a GET_SUBBUF ioctl has been done before.
-//ust// */
-//ust// WARN_ON(atomic_long_read(<t_buf->active_readers) != 1);
-//ust// consumed_old = atomic_long_read(<t_buf->consumed);
-//ust// consumed_old += *ppos;
-//ust// consumed_idx = SUBBUF_INDEX(consumed_old, buf->chan);
-//ust//
-//ust// /*
-//ust// * Adjust read len, if longer than what is available
-//ust// */
-//ust// bytes_avail = SUBBUF_TRUNC(local_read(<t_buf->offset), buf->chan)
-//ust// - consumed_old;
-//ust// WARN_ON(bytes_avail > buf->chan->alloc_size);
-//ust// len = min_t(size_t, len, bytes_avail);
-//ust// subbuf_pages = bytes_avail >> PAGE_SHIFT;
-//ust// nr_pages = min_t(unsigned int, subbuf_pages, PIPE_BUFFERS);
-//ust// roffset = consumed_old & PAGE_MASK;
-//ust// poff = consumed_old & ~PAGE_MASK;
-//ust// printk_dbg(KERN_DEBUG "SPLICE actor len %zu pos %zd write_pos %ld\n",
-//ust// len, (ssize_t)*ppos, local_read(<t_buf->offset));
-//ust//
-//ust// for (; spd.nr_pages < nr_pages; spd.nr_pages++) {
-//ust// unsigned int this_len;
-//ust// struct buf_page *page;
-//ust//
-//ust// if (!len)
-//ust// break;
-//ust// printk_dbg(KERN_DEBUG "SPLICE actor loop len %zu roffset %ld\n",
-//ust// len, roffset);
-//ust//
-//ust// this_len = PAGE_SIZE - poff;
-//ust// page = ltt_relay_read_get_page(buf, roffset);
-//ust// spd.pages[spd.nr_pages] = page->page;
-//ust// spd.partial[spd.nr_pages].offset = poff;
-//ust// spd.partial[spd.nr_pages].len = this_len;
-//ust//
-//ust// poff = 0;
-//ust// roffset += PAGE_SIZE;
-//ust// len -= this_len;
-//ust// }
-//ust//
-//ust// if (!spd.nr_pages)
-//ust// return 0;
-//ust//
-//ust// return splice_to_pipe(pipe, &spd);
-//ust// }
-
-//ust// static ssize_t ltt_relay_file_splice_read(struct file *in,
-//ust// loff_t *ppos,
-//ust// struct pipe_inode_info *pipe,
-//ust// size_t len,
-//ust// unsigned int flags)
-//ust// {
-//ust// ssize_t spliced;
-//ust// int ret;
-//ust//
-//ust// ret = 0;
-//ust// spliced = 0;
-//ust//
-//ust// printk_dbg(KERN_DEBUG "SPLICE read len %zu pos %zd\n",
-//ust// len, (ssize_t)*ppos);
-//ust// while (len && !spliced) {
-//ust// ret = subbuf_splice_actor(in, ppos, pipe, len, flags);
-//ust// printk_dbg(KERN_DEBUG "SPLICE read loop ret %d\n", ret);
-//ust// if (ret < 0)
-//ust// break;
-//ust// else if (!ret) {
-//ust// if (flags & SPLICE_F_NONBLOCK)
-//ust// ret = -EAGAIN;
-//ust// break;
-//ust// }
-//ust//
-//ust// *ppos += ret;
-//ust// if (ret > len)
-//ust// len = 0;
-//ust// else
-//ust// len -= ret;
-//ust// spliced += ret;
-//ust// }
-//ust//
-//ust// if (spliced)
-//ust// return spliced;
-//ust//
-//ust// return ret;
-//ust// }
-
-static void ltt_relay_print_subbuffer_errors(
- struct ltt_channel_struct *ltt_chan,
- long cons_off)
-{
- struct rchan *rchan = ltt_chan->trans_channel_data;
- struct ltt_channel_buf_struct *ltt_buf = ltt_chan->buf;
- long cons_idx, commit_count, write_offset;
-
- cons_idx = SUBBUF_INDEX(cons_off, rchan);
- commit_count = local_read(<t_buf->commit_count[cons_idx]);
- /*
- * No need to order commit_count and write_offset reads because we
- * execute after trace is stopped when there are no readers left.
- */
- write_offset = local_read(<t_buf->offset);
- printk(KERN_WARNING
- "LTT : unread channel %s offset is %ld "
- "and cons_off : %ld\n",
- ltt_chan->channel_name, write_offset, cons_off);
- /* Check each sub-buffer for non filled commit count */
- if (((commit_count - rchan->subbuf_size) & ltt_chan->commit_count_mask)
- - (BUFFER_TRUNC(cons_off, rchan) >> ltt_chan->n_subbufs_order)
- != 0)
- printk(KERN_ALERT
- "LTT : %s : subbuffer %lu has non filled "
- "commit count %lu.\n",
- ltt_chan->channel_name, cons_idx, commit_count);
- printk(KERN_ALERT "LTT : %s : commit count : %lu, subbuf size %zd\n",
- ltt_chan->channel_name, commit_count,
- rchan->subbuf_size);
-}
-
-static void ltt_relay_print_errors(struct ltt_trace_struct *trace,
- struct ltt_channel_struct *ltt_chan)
-{
- struct rchan *rchan = ltt_chan->trans_channel_data;
- struct ltt_channel_buf_struct *ltt_buf = ltt_chan->buf;
- long cons_off;
-
- for (cons_off = atomic_long_read(<t_buf->consumed);
- (SUBBUF_TRUNC(local_read(<t_buf->offset),
- rchan)
- - cons_off) > 0;
- cons_off = SUBBUF_ALIGN(cons_off, rchan))
- ltt_relay_print_subbuffer_errors(ltt_chan, cons_off);
-}
-
-static void ltt_relay_print_buffer_errors(struct ltt_channel_struct *ltt_chan)
-{
- struct ltt_trace_struct *trace = ltt_chan->trace;
- struct ltt_channel_buf_struct *ltt_buf = ltt_chan->buf;
-
- if (local_read(<t_buf->events_lost))
- printk(KERN_ALERT
- "LTT : %s : %ld events lost "
- "in %s channel.\n",
- ltt_chan->channel_name,
- local_read(<t_buf->events_lost),
- ltt_chan->channel_name);
- if (local_read(<t_buf->corrupted_subbuffers))
- printk(KERN_ALERT
- "LTT : %s : %ld corrupted subbuffers "
- "in %s channel.\n",
- ltt_chan->channel_name,
- local_read(<t_buf->corrupted_subbuffers),
- ltt_chan->channel_name);
-
- ltt_relay_print_errors(trace, ltt_chan);
-}
-
-static void ltt_relay_remove_dirs(struct ltt_trace_struct *trace)
-{
-//ust// debugfs_remove(trace->dentry.trace_root);
-}
-
-static void ltt_relay_release_channel(struct kref *kref)
-{
- struct ltt_channel_struct *ltt_chan = container_of(kref,
- struct ltt_channel_struct, kref);
- free(ltt_chan->buf);
-}
-
-/*
- * Create ltt buffer.
- */
-//ust// static int ltt_relay_create_buffer(struct ltt_trace_struct *trace,
-//ust// struct ltt_channel_struct *ltt_chan, struct rchan_buf *buf,
-//ust// unsigned int cpu, unsigned int n_subbufs)
-//ust// {
-//ust// struct ltt_channel_buf_struct *ltt_buf =
-//ust// percpu_ptr(ltt_chan->buf, cpu);
-//ust// unsigned int j;
-//ust//
-//ust// ltt_buf->commit_count =
-//ust// kzalloc_node(sizeof(ltt_buf->commit_count) * n_subbufs,
-//ust// GFP_KERNEL, cpu_to_node(cpu));
-//ust// if (!ltt_buf->commit_count)
-//ust// return -ENOMEM;
-//ust// kref_get(&trace->kref);
-//ust// kref_get(&trace->ltt_transport_kref);
-//ust// kref_get(<t_chan->kref);
-//ust// local_set(<t_buf->offset, ltt_subbuffer_header_size());
-//ust// atomic_long_set(<t_buf->consumed, 0);
-//ust// atomic_long_set(<t_buf->active_readers, 0);
-//ust// for (j = 0; j < n_subbufs; j++)
-//ust// local_set(<t_buf->commit_count[j], 0);
-//ust// init_waitqueue_head(<t_buf->write_wait);
-//ust// atomic_set(<t_buf->wakeup_readers, 0);
-//ust// spin_lock_init(<t_buf->full_lock);
-//ust//
-//ust// ltt_buffer_begin_callback(buf, trace->start_tsc, 0);
-//ust// /* atomic_add made on local variable on data that belongs to
-//ust// * various CPUs : ok because tracing not started (for this cpu). */
-//ust// local_add(ltt_subbuffer_header_size(), <t_buf->commit_count[0]);
-//ust//
-//ust// local_set(<t_buf->events_lost, 0);
-//ust// local_set(<t_buf->corrupted_subbuffers, 0);
-//ust//
-//ust// return 0;
-//ust// }
-
-static int ltt_relay_create_buffer(struct ltt_trace_struct *trace,
- struct ltt_channel_struct *ltt_chan, struct rchan_buf *buf,
- unsigned int n_subbufs)
-{
- struct ltt_channel_buf_struct *ltt_buf = ltt_chan->buf;
- unsigned int j;
- int fds[2];
- int result;
-
-//ust// ltt_buf->commit_count =
-//ust// zmalloc(sizeof(ltt_buf->commit_count) * n_subbufs);
-//ust// if (!ltt_buf->commit_count)
-//ust// return -ENOMEM;
- kref_get(&trace->kref);
- kref_get(&trace->ltt_transport_kref);
- kref_get(<t_chan->kref);
- local_set(<t_buf->offset, ltt_subbuffer_header_size());
- atomic_long_set(<t_buf->consumed, 0);
- atomic_long_set(<t_buf->active_readers, 0);
- for (j = 0; j < n_subbufs; j++)
- local_set(<t_buf->commit_count[j], 0);
-//ust// init_waitqueue_head(<t_buf->write_wait);
-//ust// atomic_set(<t_buf->wakeup_readers, 0);
-//ust// spin_lock_init(<t_buf->full_lock);
-
- ltt_buffer_begin_callback(buf, trace->start_tsc, 0);
-
- local_add(ltt_subbuffer_header_size(), <t_buf->commit_count[0]);
-
- local_set(<t_buf->events_lost, 0);
- local_set(<t_buf->corrupted_subbuffers, 0);
-
- result = pipe(fds);
- if(result == -1) {
- PERROR("pipe");
- return -1;
- }
- ltt_buf->data_ready_fd_read = fds[0];
- ltt_buf->data_ready_fd_write = fds[1];
-
- return 0;
-}
-
-static void ltt_relay_destroy_buffer(struct ltt_channel_struct *ltt_chan)
-{
- struct ltt_trace_struct *trace = ltt_chan->trace;
- struct ltt_channel_buf_struct *ltt_buf = ltt_chan->buf;
-
- kref_put(<t_chan->trace->ltt_transport_kref,
- ltt_release_transport);
- ltt_relay_print_buffer_errors(ltt_chan);
-//ust// kfree(ltt_buf->commit_count);
-//ust// ltt_buf->commit_count = NULL;
- kref_put(<t_chan->kref, ltt_relay_release_channel);
- kref_put(&trace->kref, ltt_release_trace);
-//ust// wake_up_interruptible(&trace->kref_wq);
-}
-
-static void ltt_chan_alloc_ltt_buf(struct ltt_channel_struct *ltt_chan)
-{
- void *ptr;
- int result;
-
- /* Get one page */
- /* FIXME: increase size if we have a commit_count array that overflows the page */
- size_t size = PAGE_ALIGN(1);
-
- result = ltt_chan->buf_shmid = shmget(getpid(), size, IPC_CREAT | IPC_EXCL | 0700);
- if(ltt_chan->buf_shmid == -1) {
- PERROR("shmget");
- return -1;
- }
-
- ptr = shmat(ltt_chan->buf_shmid, NULL, 0);
- if(ptr == (void *) -1) {
- perror("shmat");
- goto destroy_shmem;
- }
-
- /* Already mark the shared memory for destruction. This will occur only
- * when all users have detached.
- */
- result = shmctl(ltt_chan->buf_shmid, IPC_RMID, NULL);
- if(result == -1) {
- perror("shmctl");
- return -1;
- }
-
- ltt_chan->buf = ptr;
-
- return 0;
-
- destroy_shmem:
- result = shmctl(ltt_chan->buf_shmid, IPC_RMID, NULL);
- if(result == -1) {
- perror("shmctl");
- }
-
- return -1;
-}
-
-/*
- * Create channel.
- */
-static int ltt_relay_create_channel(const char *trace_name,
- struct ltt_trace_struct *trace, struct dentry *dir,
- const char *channel_name, struct ltt_channel_struct *ltt_chan,
- unsigned int subbuf_size, unsigned int n_subbufs,
- int overwrite)
-{
- char *tmpname;
- unsigned int tmpname_len;
- int err = 0;
-
- tmpname = kmalloc(PATH_MAX, GFP_KERNEL);
- if (!tmpname)
- return EPERM;
- if (overwrite) {
- strncpy(tmpname, LTT_FLIGHT_PREFIX, PATH_MAX-1);
- strncat(tmpname, channel_name,
- PATH_MAX-1-sizeof(LTT_FLIGHT_PREFIX));
- } else {
- strncpy(tmpname, channel_name, PATH_MAX-1);
- }
- strncat(tmpname, "_", PATH_MAX-1-strlen(tmpname));
-
- kref_init(<t_chan->kref);
-
- ltt_chan->trace = trace;
- ltt_chan->buffer_begin = ltt_buffer_begin_callback;
- ltt_chan->buffer_end = ltt_buffer_end_callback;
- ltt_chan->overwrite = overwrite;
- ltt_chan->n_subbufs_order = get_count_order(n_subbufs);
- ltt_chan->commit_count_mask = (~0UL >> ltt_chan->n_subbufs_order);
-//ust// ltt_chan->buf = percpu_alloc_mask(sizeof(struct ltt_channel_buf_struct), GFP_KERNEL, cpu_possible_map);
-
- ltt_chan_alloc_ltt_buf(ltt_chan);
-
-//ust// ltt_chan->buf = malloc(sizeof(struct ltt_channel_buf_struct));
- if (!ltt_chan->buf)
- goto alloc_error;
- ltt_chan->trans_channel_data = ltt_relay_open(tmpname,
- dir,
- subbuf_size,
- n_subbufs,
- ltt_chan);
- tmpname_len = strlen(tmpname);
- if (tmpname_len > 0) {
- /* Remove final _ for pretty printing */
- tmpname[tmpname_len-1] = '\0';
- }
- if (ltt_chan->trans_channel_data == NULL) {
- printk(KERN_ERR "LTT : Can't open %s channel for trace %s\n",
- tmpname, trace_name);
- goto relay_open_error;
- }
-
- err = 0;
- goto end;
-
-relay_open_error:
-//ust// percpu_free(ltt_chan->buf);
-alloc_error:
- err = EPERM;
-end:
- kfree(tmpname);
- return err;
-}
-
-static int ltt_relay_create_dirs(struct ltt_trace_struct *new_trace)
-{
-//ust// new_trace->dentry.trace_root = debugfs_create_dir(new_trace->trace_name,
-//ust// get_ltt_root());
-//ust// if (new_trace->dentry.trace_root == NULL) {
-//ust// printk(KERN_ERR "LTT : Trace directory name %s already taken\n",
-//ust// new_trace->trace_name);
-//ust// return EEXIST;
-//ust// }
-
-//ust// new_trace->callbacks.create_buf_file = ltt_create_buf_file_callback;
-//ust// new_trace->callbacks.remove_buf_file = ltt_remove_buf_file_callback;
-
- return 0;
-}
-
-/*
- * LTTng channel flush function.
- *
- * Must be called when no tracing is active in the channel, because of
- * accesses across CPUs.
- */
-static notrace void ltt_relay_buffer_flush(struct rchan_buf *buf)
-{
- struct ltt_channel_struct *channel =
- (struct ltt_channel_struct *)buf->chan->private_data;
- struct ltt_channel_buf_struct *ltt_buf = channel->buf;
- int result;
-
- buf->finalized = 1;
- ltt_force_switch(buf, FORCE_FLUSH);
-
- result = write(ltt_buf->data_ready_fd_write, "1", 1);
- if(result == -1) {
- PERROR("write (in ltt_relay_buffer_flush)");
- ERR("this should never happen!");
- }
-}
-
-static void ltt_relay_async_wakeup_chan(struct ltt_channel_struct *ltt_channel)
-{
-//ust// unsigned int i;
-//ust// struct rchan *rchan = ltt_channel->trans_channel_data;
-//ust//
-//ust// for_each_possible_cpu(i) {
-//ust// struct ltt_channel_buf_struct *ltt_buf =
-//ust// percpu_ptr(ltt_channel->buf, i);
-//ust//
-//ust// if (atomic_read(<t_buf->wakeup_readers) == 1) {
-//ust// atomic_set(<t_buf->wakeup_readers, 0);
-//ust// wake_up_interruptible(&rchan->buf[i]->read_wait);
-//ust// }
-//ust// }
-}
-
-static void ltt_relay_finish_buffer(struct ltt_channel_struct *ltt_channel)
-{
- struct rchan *rchan = ltt_channel->trans_channel_data;
- int result;
-
- if (rchan->buf) {
- struct ltt_channel_buf_struct *ltt_buf = ltt_channel->buf;
- ltt_relay_buffer_flush(rchan->buf);
-//ust// ltt_relay_wake_writers(ltt_buf);
- /* closing the pipe tells the consumer the buffer is finished */
-
- //result = write(ltt_buf->data_ready_fd_write, "D", 1);
- //if(result == -1) {
- // PERROR("write (in ltt_relay_finish_buffer)");
- // ERR("this should never happen!");
- //}
- close(ltt_buf->data_ready_fd_write);
- }
-}
-
-
-static void ltt_relay_finish_channel(struct ltt_channel_struct *ltt_channel)
-{
- unsigned int i;
-
-//ust// for_each_possible_cpu(i)
- ltt_relay_finish_buffer(ltt_channel);
-}
-
-static void ltt_relay_remove_channel(struct ltt_channel_struct *channel)
-{
- struct rchan *rchan = channel->trans_channel_data;
-
- ltt_relay_close(rchan);
- kref_put(&channel->kref, ltt_relay_release_channel);
-}
-
-struct ltt_reserve_switch_offsets {
- long begin, end, old;
- long begin_switch, end_switch_current, end_switch_old;
- long commit_count, reserve_commit_diff;
- size_t before_hdr_pad, size;
-};
-
-/*
- * Returns :
- * 0 if ok
- * !0 if execution must be aborted.
- */
-static inline int ltt_relay_try_reserve(
- struct ltt_channel_struct *ltt_channel,
- struct ltt_channel_buf_struct *ltt_buf, struct rchan *rchan,
- struct rchan_buf *buf,
- struct ltt_reserve_switch_offsets *offsets, size_t data_size,
- u64 *tsc, unsigned int *rflags, int largest_align)
-{
- offsets->begin = local_read(<t_buf->offset);
- offsets->old = offsets->begin;
- offsets->begin_switch = 0;
- offsets->end_switch_current = 0;
- offsets->end_switch_old = 0;
-
- *tsc = trace_clock_read64();
- if (last_tsc_overflow(ltt_buf, *tsc))
- *rflags = LTT_RFLAG_ID_SIZE_TSC;
-
- if (SUBBUF_OFFSET(offsets->begin, buf->chan) == 0) {
- offsets->begin_switch = 1; /* For offsets->begin */
- } else {
- offsets->size = ltt_get_header_size(ltt_channel,
- offsets->begin, data_size,
- &offsets->before_hdr_pad, *rflags);
- offsets->size += ltt_align(offsets->begin + offsets->size,
- largest_align)
- + data_size;
- if ((SUBBUF_OFFSET(offsets->begin, buf->chan) + offsets->size)
- > buf->chan->subbuf_size) {
- offsets->end_switch_old = 1; /* For offsets->old */
- offsets->begin_switch = 1; /* For offsets->begin */
- }
- }
- if (offsets->begin_switch) {
- long subbuf_index;
-
- if (offsets->end_switch_old)
- offsets->begin = SUBBUF_ALIGN(offsets->begin,
- buf->chan);
- offsets->begin = offsets->begin + ltt_subbuffer_header_size();
- /* Test new buffer integrity */
- subbuf_index = SUBBUF_INDEX(offsets->begin, buf->chan);
- offsets->reserve_commit_diff =
- (BUFFER_TRUNC(offsets->begin, buf->chan)
- >> ltt_channel->n_subbufs_order)
- - (local_read(<t_buf->commit_count[subbuf_index])
- & ltt_channel->commit_count_mask);
- if (offsets->reserve_commit_diff == 0) {
- /* Next buffer not corrupted. */
- if (!ltt_channel->overwrite &&
- (SUBBUF_TRUNC(offsets->begin, buf->chan)
- - SUBBUF_TRUNC(atomic_long_read(
- <t_buf->consumed),
- buf->chan))
- >= rchan->alloc_size) {
- /*
- * We do not overwrite non consumed buffers
- * and we are full : event is lost.
- */
- local_inc(<t_buf->events_lost);
- return -1;
- } else {
- /*
- * next buffer not corrupted, we are either in
- * overwrite mode or the buffer is not full.
- * It's safe to write in this new subbuffer.
- */
- }
- } else {
- /*
- * Next subbuffer corrupted. Force pushing reader even
- * in normal mode. It's safe to write in this new
- * subbuffer.
- */
- }
- offsets->size = ltt_get_header_size(ltt_channel,
- offsets->begin, data_size,
- &offsets->before_hdr_pad, *rflags);
- offsets->size += ltt_align(offsets->begin + offsets->size,
- largest_align)
- + data_size;
- if ((SUBBUF_OFFSET(offsets->begin, buf->chan) + offsets->size)
- > buf->chan->subbuf_size) {
- /*
- * Event too big for subbuffers, report error, don't
- * complete the sub-buffer switch.
- */
- local_inc(<t_buf->events_lost);
- return -1;
- } else {
- /*
- * We just made a successful buffer switch and the event
- * fits in the new subbuffer. Let's write.
- */
- }
- } else {
- /*
- * Event fits in the current buffer and we are not on a switch
- * boundary. It's safe to write.
- */
- }
- offsets->end = offsets->begin + offsets->size;
-
- if ((SUBBUF_OFFSET(offsets->end, buf->chan)) == 0) {
- /*
- * The offset_end will fall at the very beginning of the next
- * subbuffer.
- */
- offsets->end_switch_current = 1; /* For offsets->begin */
- }
- return 0;
-}
-
-/*
- * Returns :
- * 0 if ok
- * !0 if execution must be aborted.
- */
-static inline int ltt_relay_try_switch(
- enum force_switch_mode mode,
- struct ltt_channel_struct *ltt_channel,
- struct ltt_channel_buf_struct *ltt_buf, struct rchan *rchan,
- struct rchan_buf *buf,
- struct ltt_reserve_switch_offsets *offsets,
- u64 *tsc)
-{
- long subbuf_index;
-
- offsets->begin = local_read(<t_buf->offset);
- offsets->old = offsets->begin;
- offsets->begin_switch = 0;
- offsets->end_switch_old = 0;
-
- *tsc = trace_clock_read64();
-
- if (SUBBUF_OFFSET(offsets->begin, buf->chan) != 0) {
- offsets->begin = SUBBUF_ALIGN(offsets->begin, buf->chan);
- offsets->end_switch_old = 1;
- } else {
- /* we do not have to switch : buffer is empty */
- return -1;
- }
- if (mode == FORCE_ACTIVE)
- offsets->begin += ltt_subbuffer_header_size();
- /*
- * Always begin_switch in FORCE_ACTIVE mode.
- * Test new buffer integrity
- */
- subbuf_index = SUBBUF_INDEX(offsets->begin, buf->chan);
- offsets->reserve_commit_diff =
- (BUFFER_TRUNC(offsets->begin, buf->chan)
- >> ltt_channel->n_subbufs_order)
- - (local_read(<t_buf->commit_count[subbuf_index])
- & ltt_channel->commit_count_mask);
- if (offsets->reserve_commit_diff == 0) {
- /* Next buffer not corrupted. */
- if (mode == FORCE_ACTIVE
- && !ltt_channel->overwrite
- && offsets->begin - atomic_long_read(<t_buf->consumed)
- >= rchan->alloc_size) {
- /*
- * We do not overwrite non consumed buffers and we are
- * full : ignore switch while tracing is active.
- */
- return -1;
- }
- } else {
- /*
- * Next subbuffer corrupted. Force pushing reader even in normal
- * mode
- */
- }
- offsets->end = offsets->begin;
- return 0;
-}
-
-static inline void ltt_reserve_push_reader(
- struct ltt_channel_struct *ltt_channel,
- struct ltt_channel_buf_struct *ltt_buf,
- struct rchan *rchan,
- struct rchan_buf *buf,
- struct ltt_reserve_switch_offsets *offsets)
-{
- long consumed_old, consumed_new;
-
- do {
- consumed_old = atomic_long_read(<t_buf->consumed);
- /*
- * If buffer is in overwrite mode, push the reader consumed
- * count if the write position has reached it and we are not
- * at the first iteration (don't push the reader farther than
- * the writer). This operation can be done concurrently by many
- * writers in the same buffer, the writer being at the farthest
- * write position sub-buffer index in the buffer being the one
- * which will win this loop.
- * If the buffer is not in overwrite mode, pushing the reader
- * only happens if a sub-buffer is corrupted.
- */
- if ((SUBBUF_TRUNC(offsets->end-1, buf->chan)
- - SUBBUF_TRUNC(consumed_old, buf->chan))
- >= rchan->alloc_size)
- consumed_new = SUBBUF_ALIGN(consumed_old, buf->chan);
- else {
- consumed_new = consumed_old;
- break;
- }
- } while (atomic_long_cmpxchg(<t_buf->consumed, consumed_old,
- consumed_new) != consumed_old);
-
- if (consumed_old != consumed_new) {
- /*
- * Reader pushed : we are the winner of the push, we can
- * therefore reequilibrate reserve and commit. Atomic increment
- * of the commit count permits other writers to play around
- * with this variable before us. We keep track of
- * corrupted_subbuffers even in overwrite mode :
- * we never want to write over a non completely committed
- * sub-buffer : possible causes : the buffer size is too low
- * compared to the unordered data input, or there is a writer
- * that died between the reserve and the commit.
- */
- if (offsets->reserve_commit_diff) {
- /*
- * We have to alter the sub-buffer commit count.
- * We do not deliver the previous subbuffer, given it
- * was either corrupted or not consumed (overwrite
- * mode).
- */
- local_add(offsets->reserve_commit_diff,
- <t_buf->commit_count[
- SUBBUF_INDEX(offsets->begin,
- buf->chan)]);
- if (!ltt_channel->overwrite
- || offsets->reserve_commit_diff
- != rchan->subbuf_size) {
- /*
- * The reserve commit diff was not subbuf_size :
- * it means the subbuffer was partly written to
- * and is therefore corrupted. If it is multiple
- * of subbuffer size and we are in flight
- * recorder mode, we are skipping over a whole
- * subbuffer.
- */
- local_inc(<t_buf->corrupted_subbuffers);
- }
- }
- }
-}
-
-
-/*
- * ltt_reserve_switch_old_subbuf: switch old subbuffer
- *
- * Concurrency safe because we are the last and only thread to alter this
- * sub-buffer. As long as it is not delivered and read, no other thread can
- * alter the offset, alter the reserve_count or call the
- * client_buffer_end_callback on this sub-buffer.
- *
- * The only remaining threads could be the ones with pending commits. They will
- * have to do the deliver themselves. Not concurrency safe in overwrite mode.
- * We detect corrupted subbuffers with commit and reserve counts. We keep a
- * corrupted sub-buffers count and push the readers across these sub-buffers.
- *
- * Not concurrency safe if a writer is stalled in a subbuffer and another writer
- * switches in, finding out it's corrupted. The result will be than the old
- * (uncommited) subbuffer will be declared corrupted, and that the new subbuffer
- * will be declared corrupted too because of the commit count adjustment.
- *
- * Note : offset_old should never be 0 here.
- */
-static inline void ltt_reserve_switch_old_subbuf(
- struct ltt_channel_struct *ltt_channel,
- struct ltt_channel_buf_struct *ltt_buf, struct rchan *rchan,
- struct rchan_buf *buf,
- struct ltt_reserve_switch_offsets *offsets, u64 *tsc)
-{
- long oldidx = SUBBUF_INDEX(offsets->old - 1, rchan);
-
- ltt_channel->buffer_end(buf, *tsc, offsets->old, oldidx);
- /* Must write buffer end before incrementing commit count */
- smp_wmb();
- offsets->commit_count =
- local_add_return(rchan->subbuf_size
- - (SUBBUF_OFFSET(offsets->old - 1, rchan)
- + 1),
- <t_buf->commit_count[oldidx]);
- if ((BUFFER_TRUNC(offsets->old - 1, rchan)
- >> ltt_channel->n_subbufs_order)
- - ((offsets->commit_count - rchan->subbuf_size)
- & ltt_channel->commit_count_mask) == 0)
- ltt_deliver(buf, oldidx, NULL);
-}
-
-/*
- * ltt_reserve_switch_new_subbuf: Populate new subbuffer.
- *
- * This code can be executed unordered : writers may already have written to the
- * sub-buffer before this code gets executed, caution. The commit makes sure
- * that this code is executed before the deliver of this sub-buffer.
- */
-static /*inline*/ void ltt_reserve_switch_new_subbuf(
- struct ltt_channel_struct *ltt_channel,
- struct ltt_channel_buf_struct *ltt_buf, struct rchan *rchan,
- struct rchan_buf *buf,
- struct ltt_reserve_switch_offsets *offsets, u64 *tsc)
-{
- long beginidx = SUBBUF_INDEX(offsets->begin, rchan);
-
- ltt_channel->buffer_begin(buf, *tsc, beginidx);
- /* Must write buffer end before incrementing commit count */
- smp_wmb();
- offsets->commit_count = local_add_return(ltt_subbuffer_header_size(),
- <t_buf->commit_count[beginidx]);
- /* Check if the written buffer has to be delivered */
- if ((BUFFER_TRUNC(offsets->begin, rchan)
- >> ltt_channel->n_subbufs_order)
- - ((offsets->commit_count - rchan->subbuf_size)
- & ltt_channel->commit_count_mask) == 0)
- ltt_deliver(buf, beginidx, NULL);
-}
-
-
-/*
- * ltt_reserve_end_switch_current: finish switching current subbuffer
- *
- * Concurrency safe because we are the last and only thread to alter this
- * sub-buffer. As long as it is not delivered and read, no other thread can
- * alter the offset, alter the reserve_count or call the
- * client_buffer_end_callback on this sub-buffer.
- *
- * The only remaining threads could be the ones with pending commits. They will
- * have to do the deliver themselves. Not concurrency safe in overwrite mode.
- * We detect corrupted subbuffers with commit and reserve counts. We keep a
- * corrupted sub-buffers count and push the readers across these sub-buffers.
- *
- * Not concurrency safe if a writer is stalled in a subbuffer and another writer
- * switches in, finding out it's corrupted. The result will be than the old
- * (uncommited) subbuffer will be declared corrupted, and that the new subbuffer
- * will be declared corrupted too because of the commit count adjustment.
- */
-static inline void ltt_reserve_end_switch_current(
- struct ltt_channel_struct *ltt_channel,
- struct ltt_channel_buf_struct *ltt_buf, struct rchan *rchan,
- struct rchan_buf *buf,
- struct ltt_reserve_switch_offsets *offsets, u64 *tsc)
-{
- long endidx = SUBBUF_INDEX(offsets->end - 1, rchan);
-
- ltt_channel->buffer_end(buf, *tsc, offsets->end, endidx);
- /* Must write buffer begin before incrementing commit count */
- smp_wmb();
- offsets->commit_count =
- local_add_return(rchan->subbuf_size
- - (SUBBUF_OFFSET(offsets->end - 1, rchan)
- + 1),
- <t_buf->commit_count[endidx]);
- if ((BUFFER_TRUNC(offsets->end - 1, rchan)
- >> ltt_channel->n_subbufs_order)
- - ((offsets->commit_count - rchan->subbuf_size)
- & ltt_channel->commit_count_mask) == 0)
- ltt_deliver(buf, endidx, NULL);
-}
-
-/**
- * ltt_relay_reserve_slot - Atomic slot reservation in a LTTng buffer.
- * @trace: the trace structure to log to.
- * @ltt_channel: channel structure
- * @transport_data: data structure specific to ltt relay
- * @data_size: size of the variable length data to log.
- * @slot_size: pointer to total size of the slot (out)
- * @buf_offset : pointer to reserved buffer offset (out)
- * @tsc: pointer to the tsc at the slot reservation (out)
- * @cpu: cpuid
- *
- * Return : -ENOSPC if not enough space, else returns 0.
- * It will take care of sub-buffer switching.
- */
-static notrace int ltt_relay_reserve_slot(struct ltt_trace_struct *trace,
- struct ltt_channel_struct *ltt_channel, void **transport_data,
- size_t data_size, size_t *slot_size, long *buf_offset, u64 *tsc,
- unsigned int *rflags, int largest_align)
-{
- struct rchan *rchan = ltt_channel->trans_channel_data;
- struct rchan_buf *buf = *transport_data = rchan->buf;
- struct ltt_channel_buf_struct *ltt_buf = ltt_channel->buf;
- struct ltt_reserve_switch_offsets offsets;
-
- offsets.reserve_commit_diff = 0;
- offsets.size = 0;
-
- /*
- * Perform retryable operations.
- */
- if (ltt_nesting > 4) {
- local_inc(<t_buf->events_lost);
- return -EPERM;
- }
- do {
- if (ltt_relay_try_reserve(ltt_channel, ltt_buf,
- rchan, buf, &offsets, data_size, tsc, rflags,
- largest_align))
- return -ENOSPC;
- } while (local_cmpxchg(<t_buf->offset, offsets.old,
- offsets.end) != offsets.old);
-
- /*
- * Atomically update last_tsc. This update races against concurrent
- * atomic updates, but the race will always cause supplementary full TSC
- * events, never the opposite (missing a full TSC event when it would be
- * needed).
- */
- save_last_tsc(ltt_buf, *tsc);
-
- /*
- * Push the reader if necessary
- */
- ltt_reserve_push_reader(ltt_channel, ltt_buf, rchan, buf, &offsets);
-
- /*
- * Switch old subbuffer if needed.
- */
- if (offsets.end_switch_old)
- ltt_reserve_switch_old_subbuf(ltt_channel, ltt_buf, rchan, buf,
- &offsets, tsc);
-
- /*
- * Populate new subbuffer.
- */
- if (offsets.begin_switch)
- ltt_reserve_switch_new_subbuf(ltt_channel, ltt_buf, rchan,
- buf, &offsets, tsc);
-
- if (offsets.end_switch_current)
- ltt_reserve_end_switch_current(ltt_channel, ltt_buf, rchan,
- buf, &offsets, tsc);
-
- *slot_size = offsets.size;
- *buf_offset = offsets.begin + offsets.before_hdr_pad;
- return 0;
-}
-
-/*
- * Force a sub-buffer switch for a per-cpu buffer. This operation is
- * completely reentrant : can be called while tracing is active with
- * absolutely no lock held.
- *
- * Note, however, that as a local_cmpxchg is used for some atomic
- * operations, this function must be called from the CPU which owns the buffer
- * for a ACTIVE flush.
- */
-static notrace void ltt_force_switch(struct rchan_buf *buf,
- enum force_switch_mode mode)
-{
- struct ltt_channel_struct *ltt_channel =
- (struct ltt_channel_struct *)buf->chan->private_data;
- struct ltt_channel_buf_struct *ltt_buf = ltt_channel->buf;
- struct rchan *rchan = ltt_channel->trans_channel_data;
- struct ltt_reserve_switch_offsets offsets;
- u64 tsc;
-
- offsets.reserve_commit_diff = 0;
- offsets.size = 0;
-
- /*
- * Perform retryable operations.
- */
- do {
- if (ltt_relay_try_switch(mode, ltt_channel, ltt_buf,
- rchan, buf, &offsets, &tsc))
- return;
- } while (local_cmpxchg(<t_buf->offset, offsets.old,
- offsets.end) != offsets.old);
-
- /*
- * Atomically update last_tsc. This update races against concurrent
- * atomic updates, but the race will always cause supplementary full TSC
- * events, never the opposite (missing a full TSC event when it would be
- * needed).
- */
- save_last_tsc(ltt_buf, tsc);
-
- /*
- * Push the reader if necessary
- */
- if (mode == FORCE_ACTIVE)
- ltt_reserve_push_reader(ltt_channel, ltt_buf, rchan,
- buf, &offsets);
-
- /*
- * Switch old subbuffer if needed.
- */
- if (offsets.end_switch_old)
- ltt_reserve_switch_old_subbuf(ltt_channel, ltt_buf, rchan, buf,
- &offsets, &tsc);
-
- /*
- * Populate new subbuffer.
- */
- if (mode == FORCE_ACTIVE)
- ltt_reserve_switch_new_subbuf(ltt_channel,
- ltt_buf, rchan, buf, &offsets, &tsc);
-}
-
-/*
- * for flight recording. must be called after relay_commit.
- * This function decrements de subbuffer's lost_size each time the commit count
- * reaches back the reserve offset (module subbuffer size). It is useful for
- * crash dump.
- * We use slot_size - 1 to make sure we deal correctly with the case where we
- * fill the subbuffer completely (so the subbuf index stays in the previous
- * subbuffer).
- */
-//ust// #ifdef CONFIG_LTT_VMCORE
-static /*inline*/ void ltt_write_commit_counter(struct rchan_buf *buf,
- long buf_offset, size_t slot_size)
-{
- struct ltt_channel_struct *ltt_channel =
- (struct ltt_channel_struct *)buf->chan->private_data;
- struct ltt_channel_buf_struct *ltt_buf = ltt_channel->buf;
- struct ltt_subbuffer_header *header;
- long offset, subbuf_idx, commit_count;
- uint32_t lost_old, lost_new;
-
- subbuf_idx = SUBBUF_INDEX(buf_offset - 1, buf->chan);
- offset = buf_offset + slot_size;
- header = (struct ltt_subbuffer_header *)
- ltt_relay_offset_address(buf,
- subbuf_idx * buf->chan->subbuf_size);
- for (;;) {
- lost_old = header->lost_size;
- commit_count =
- local_read(<t_buf->commit_count[subbuf_idx]);
- /* SUBBUF_OFFSET includes commit_count_mask */
- if (!SUBBUF_OFFSET(offset - commit_count, buf->chan)) {
- lost_new = (uint32_t)buf->chan->subbuf_size
- - SUBBUF_OFFSET(commit_count, buf->chan);
- lost_old = cmpxchg_local(&header->lost_size, lost_old,
- lost_new);
- if (lost_old <= lost_new)
- break;
- } else {
- break;
- }
- }
-}
-//ust// #else
-//ust// static inline void ltt_write_commit_counter(struct rchan_buf *buf,
-//ust// long buf_offset, size_t slot_size)
-//ust// {
-//ust// }
-//ust// #endif
-
-/*
- * Atomic unordered slot commit. Increments the commit count in the
- * specified sub-buffer, and delivers it if necessary.
- *
- * Parameters:
- *
- * @ltt_channel : channel structure
- * @transport_data: transport-specific data
- * @buf_offset : offset following the event header.
- * @slot_size : size of the reserved slot.
- */
-static notrace void ltt_relay_commit_slot(
- struct ltt_channel_struct *ltt_channel,
- void **transport_data, long buf_offset, size_t slot_size)
-{
- struct rchan_buf *buf = *transport_data;
- struct ltt_channel_buf_struct *ltt_buf = ltt_channel->buf;
- struct rchan *rchan = buf->chan;
- long offset_end = buf_offset;
- long endidx = SUBBUF_INDEX(offset_end - 1, rchan);
- long commit_count;
-
- /* Must write slot data before incrementing commit count */
- smp_wmb();
- commit_count = local_add_return(slot_size,
- <t_buf->commit_count[endidx]);
- /* Check if all commits have been done */
- if ((BUFFER_TRUNC(offset_end - 1, rchan)
- >> ltt_channel->n_subbufs_order)
- - ((commit_count - rchan->subbuf_size)
- & ltt_channel->commit_count_mask) == 0)
- ltt_deliver(buf, endidx, NULL);
- /*
- * Update lost_size for each commit. It's needed only for extracting
- * ltt buffers from vmcore, after crash.
- */
- ltt_write_commit_counter(buf, buf_offset, slot_size);
-
- DBG("commited slot. now commit count is %ld", commit_count);
-}
-
-/*
- * This is called with preemption disabled when user space has requested
- * blocking mode. If one of the active traces has free space below a
- * specific threshold value, we reenable preemption and block.
- */
-static int ltt_relay_user_blocking(struct ltt_trace_struct *trace,
- unsigned int chan_index, size_t data_size,
- struct user_dbg_data *dbg)
-{
-//ust// struct rchan *rchan;
-//ust// struct ltt_channel_buf_struct *ltt_buf;
-//ust// struct ltt_channel_struct *channel;
-//ust// struct rchan_buf *relay_buf;
-//ust// int cpu;
-//ust// DECLARE_WAITQUEUE(wait, current);
-//ust//
-//ust// channel = &trace->channels[chan_index];
-//ust// rchan = channel->trans_channel_data;
-//ust// cpu = smp_processor_id();
-//ust// relay_buf = rchan->buf[cpu];
-//ust// ltt_buf = percpu_ptr(channel->buf, cpu);
-//ust//
-//ust// /*
-//ust// * Check if data is too big for the channel : do not
-//ust// * block for it.
-//ust// */
-//ust// if (LTT_RESERVE_CRITICAL + data_size > relay_buf->chan->subbuf_size)
-//ust// return 0;
-//ust//
-//ust// /*
-//ust// * If free space too low, we block. We restart from the
-//ust// * beginning after we resume (cpu id may have changed
-//ust// * while preemption is active).
-//ust// */
-//ust// spin_lock(<t_buf->full_lock);
-//ust// if (!channel->overwrite) {
-//ust// dbg->write = local_read(<t_buf->offset);
-//ust// dbg->read = atomic_long_read(<t_buf->consumed);
-//ust// dbg->avail_size = dbg->write + LTT_RESERVE_CRITICAL + data_size
-//ust// - SUBBUF_TRUNC(dbg->read,
-//ust// relay_buf->chan);
-//ust// if (dbg->avail_size > rchan->alloc_size) {
-//ust// __set_current_state(TASK_INTERRUPTIBLE);
-//ust// add_wait_queue(<t_buf->write_wait, &wait);
-//ust// spin_unlock(<t_buf->full_lock);
-//ust// preempt_enable();
-//ust// schedule();
-//ust// __set_current_state(TASK_RUNNING);
-//ust// remove_wait_queue(<t_buf->write_wait, &wait);
-//ust// if (signal_pending(current))
-//ust// return -ERESTARTSYS;
-//ust// preempt_disable();
-//ust// return 1;
-//ust// }
-//ust// }
-//ust// spin_unlock(<t_buf->full_lock);
- return 0;
-}
-
-static void ltt_relay_print_user_errors(struct ltt_trace_struct *trace,
- unsigned int chan_index, size_t data_size,
- struct user_dbg_data *dbg)
-{
- struct rchan *rchan;
- struct ltt_channel_buf_struct *ltt_buf;
- struct ltt_channel_struct *channel;
- struct rchan_buf *relay_buf;
-
- channel = &trace->channels[chan_index];
- rchan = channel->trans_channel_data;
- relay_buf = rchan->buf;
- ltt_buf = channel->buf;
-
- printk(KERN_ERR "Error in LTT usertrace : "
- "buffer full : event lost in blocking "
- "mode. Increase LTT_RESERVE_CRITICAL.\n");
- printk(KERN_ERR "LTT nesting level is %u.\n", ltt_nesting);
- printk(KERN_ERR "LTT avail size %lu.\n",
- dbg->avail_size);
- printk(KERN_ERR "avai write : %lu, read : %lu\n",
- dbg->write, dbg->read);
-
- dbg->write = local_read(<t_buf->offset);
- dbg->read = atomic_long_read(<t_buf->consumed);
-
- printk(KERN_ERR "LTT cur size %lu.\n",
- dbg->write + LTT_RESERVE_CRITICAL + data_size
- - SUBBUF_TRUNC(dbg->read, relay_buf->chan));
- printk(KERN_ERR "cur write : %lu, read : %lu\n",
- dbg->write, dbg->read);
-}
-
-//ust// static struct ltt_transport ltt_relay_transport = {
-//ust// .name = "relay",
-//ust// .owner = THIS_MODULE,
-//ust// .ops = {
-//ust// .create_dirs = ltt_relay_create_dirs,
-//ust// .remove_dirs = ltt_relay_remove_dirs,
-//ust// .create_channel = ltt_relay_create_channel,
-//ust// .finish_channel = ltt_relay_finish_channel,
-//ust// .remove_channel = ltt_relay_remove_channel,
-//ust// .wakeup_channel = ltt_relay_async_wakeup_chan,
-//ust// .commit_slot = ltt_relay_commit_slot,
-//ust// .reserve_slot = ltt_relay_reserve_slot,
-//ust// .user_blocking = ltt_relay_user_blocking,
-//ust// .user_errors = ltt_relay_print_user_errors,
-//ust// },
-//ust// };
-
-static struct ltt_transport ust_relay_transport = {
- .name = "ustrelay",
-//ust// .owner = THIS_MODULE,
- .ops = {
- .create_dirs = ltt_relay_create_dirs,
- .remove_dirs = ltt_relay_remove_dirs,
- .create_channel = ltt_relay_create_channel,
- .finish_channel = ltt_relay_finish_channel,
- .remove_channel = ltt_relay_remove_channel,
- .wakeup_channel = ltt_relay_async_wakeup_chan,
- .commit_slot = ltt_relay_commit_slot,
- .reserve_slot = ltt_relay_reserve_slot,
- .user_blocking = ltt_relay_user_blocking,
- .user_errors = ltt_relay_print_user_errors,
- },
-};
-
-//ust// static int __init ltt_relay_init(void)
-//ust// {
-//ust// printk(KERN_INFO "LTT : ltt-relay init\n");
-//ust//
-//ust// ltt_file_operations = ltt_relay_file_operations;
-//ust// ltt_file_operations.owner = THIS_MODULE;
-//ust// ltt_file_operations.open = ltt_open;
-//ust// ltt_file_operations.release = ltt_release;
-//ust// ltt_file_operations.poll = ltt_poll;
-//ust// ltt_file_operations.splice_read = ltt_relay_file_splice_read,
-//ust// ltt_file_operations.ioctl = ltt_ioctl;
-//ust//#ifdef CONFIG_COMPAT
-//ust// ltt_file_operations.compat_ioctl = ltt_compat_ioctl;
-//ust//#endif
-//ust//
-//ust// ltt_transport_register(<t_relay_transport);
-//ust//
-//ust// return 0;
-//ust// }
-
-static char initialized = 0;
-
-void __attribute__((constructor)) init_ustrelay_transport(void)
-{
- if(!initialized) {
- ltt_transport_register(&ust_relay_transport);
- initialized = 1;
- }
-}
-
-static void __exit ltt_relay_exit(void)
-{
-//ust// printk(KERN_INFO "LTT : ltt-relay exit\n");
-
- ltt_transport_unregister(&ust_relay_transport);
-}
-
-//ust// module_init(ltt_relay_init);
-//ust// module_exit(ltt_relay_exit);
-//ust//
-//ust// MODULE_LICENSE("GPL");
-//ust// MODULE_AUTHOR("Mathieu Desnoyers");
-//ust// MODULE_DESCRIPTION("Linux Trace Toolkit Next Generation Lockless Relay");
+++ /dev/null
-/*
- * linux/include/linux/ltt-relay.h
- *
- * Copyright (C) 2002, 2003 - Tom Zanussi (zanussi@us.ibm.com), IBM Corp
- * Copyright (C) 1999, 2000, 2001, 2002 - Karim Yaghmour (karim@opersys.com)
- * Copyright (C) 2008 - Mathieu Desnoyers (mathieu.desnoyers@polymtl.ca)
- *
- * CONFIG_RELAY definitions and declarations
- */
-
-#ifndef _LINUX_LTT_RELAY_H
-#define _LINUX_LTT_RELAY_H
-
-//ust// #include <linux/types.h>
-//ust// #include <linux/sched.h>
-//ust// #include <linux/timer.h>
-//ust// #include <linux/wait.h>
-//ust// #include <linux/list.h>
-//ust// #include <linux/fs.h>
-//ust// #include <linux/poll.h>
-//ust// #include <linux/kref.h>
-//ust// #include <linux/mm.h>
-//ust// #include <linux/ltt-core.h>
-#include "kref.h"
-#include "list.h"
-
-/* Needs a _much_ better name... */
-#define FIX_SIZE(x) ((((x) - 1) & PAGE_MASK) + PAGE_SIZE)
-
-/*
- * Tracks changes to rchan/rchan_buf structs
- */
-#define LTT_RELAY_CHANNEL_VERSION 8
-
-struct rchan_buf;
-
-struct buf_page {
- struct page *page;
- struct rchan_buf *buf; /* buffer the page belongs to */
- size_t offset; /* page offset in the buffer */
- struct list_head list; /* buffer linked list */
-};
-
-/*
- * Per-cpu relay channel buffer
- */
-struct rchan_buf {
- struct rchan *chan; /* associated channel */
-//ust// wait_queue_head_t read_wait; /* reader wait queue */
-//ust// struct timer_list timer; /* reader wake-up timer */
-//ust// struct dentry *dentry; /* channel file dentry */
- struct kref kref; /* channel buffer refcount */
-//ust// struct list_head pages; /* list of buffer pages */
- void *buf_data; //ust//
- size_t buf_size;
-//ust// struct buf_page *wpage; /* current write page (cache) */
-//ust// struct buf_page *hpage[2]; /* current subbuf header page (cache) */
-//ust// struct buf_page *rpage; /* current subbuf read page (cache) */
-//ust// unsigned int page_count; /* number of current buffer pages */
- unsigned int finalized; /* buffer has been finalized */
-//ust// unsigned int cpu; /* this buf's cpu */
- int shmid; /* the shmid of the buffer data pages */
-} ____cacheline_aligned;
-
-/*
- * Relay channel data structure
- */
-struct rchan {
- u32 version; /* the version of this struct */
- size_t subbuf_size; /* sub-buffer size */
- size_t n_subbufs; /* number of sub-buffers per buffer */
- size_t alloc_size; /* total buffer size allocated */
- struct rchan_callbacks *cb; /* client callbacks */
- struct kref kref; /* channel refcount */
- void *private_data; /* for user-defined data */
-//ust// struct rchan_buf *buf[NR_CPUS]; /* per-cpu channel buffers */
- struct rchan_buf *buf;
- struct list_head list; /* for channel list */
- struct dentry *parent; /* parent dentry passed to open */
- int subbuf_size_order; /* order of sub-buffer size */
-//ust// char base_filename[NAME_MAX]; /* saved base filename */
-};
-
-/*
- * Relay channel client callbacks
- */
-struct rchan_callbacks {
- /*
- * subbuf_start - called on buffer-switch to a new sub-buffer
- * @buf: the channel buffer containing the new sub-buffer
- * @subbuf: the start of the new sub-buffer
- * @prev_subbuf: the start of the previous sub-buffer
- * @prev_padding: unused space at the end of previous sub-buffer
- *
- * The client should return 1 to continue logging, 0 to stop
- * logging.
- *
- * NOTE: subbuf_start will also be invoked when the buffer is
- * created, so that the first sub-buffer can be initialized
- * if necessary. In this case, prev_subbuf will be NULL.
- *
- * NOTE: the client can reserve bytes at the beginning of the new
- * sub-buffer by calling subbuf_start_reserve() in this callback.
- */
- int (*subbuf_start) (struct rchan_buf *buf,
- void *subbuf,
- void *prev_subbuf,
- size_t prev_padding);
-
- /*
- * create_buf_file - create file to represent a relay channel buffer
- * @filename: the name of the file to create
- * @parent: the parent of the file to create
- * @mode: the mode of the file to create
- * @buf: the channel buffer
- *
- * Called during relay_open(), once for each per-cpu buffer,
- * to allow the client to create a file to be used to
- * represent the corresponding channel buffer. If the file is
- * created outside of relay, the parent must also exist in
- * that filesystem.
- *
- * The callback should return the dentry of the file created
- * to represent the relay buffer.
- *
- * Setting the is_global outparam to a non-zero value will
- * cause relay_open() to create a single global buffer rather
- * than the default set of per-cpu buffers.
- *
- * See Documentation/filesystems/relayfs.txt for more info.
- */
- struct dentry *(*create_buf_file)(const char *filename,
- struct dentry *parent,
- int mode,
- struct rchan_buf *buf);
-
- /*
- * remove_buf_file - remove file representing a relay channel buffer
- * @dentry: the dentry of the file to remove
- *
- * Called during relay_close(), once for each per-cpu buffer,
- * to allow the client to remove a file used to represent a
- * channel buffer.
- *
- * The callback should return 0 if successful, negative if not.
- */
-//ust// int (*remove_buf_file)(struct rchan_buf *buf);
-};
-
-extern struct buf_page *ltt_relay_find_prev_page(struct rchan_buf *buf,
- struct buf_page *page, size_t offset, ssize_t diff_offset);
-
-extern struct buf_page *ltt_relay_find_next_page(struct rchan_buf *buf,
- struct buf_page *page, size_t offset, ssize_t diff_offset);
-
-extern void _ltt_relay_write(struct rchan_buf *buf, size_t offset,
- const void *src, size_t len, ssize_t cpy);
-
-extern int ltt_relay_read(struct rchan_buf *buf, size_t offset,
- void *dest, size_t len);
-
-extern struct buf_page *ltt_relay_read_get_page(struct rchan_buf *buf,
- size_t offset);
-
-/*
- * Return the address where a given offset is located.
- * Should be used to get the current subbuffer header pointer. Given we know
- * it's never on a page boundary, it's safe to write directly to this address,
- * as long as the write is never bigger than a page size.
- */
-extern void *ltt_relay_offset_address(struct rchan_buf *buf,
- size_t offset);
-
-/*
- * Find the page containing "offset". Cache it if it is after the currently
- * cached page.
- */
-static inline struct buf_page *ltt_relay_cache_page(struct rchan_buf *buf,
- struct buf_page **page_cache,
- struct buf_page *page, size_t offset)
-{
- ssize_t diff_offset;
- ssize_t half_buf_size = buf->chan->alloc_size >> 1;
-
- /*
- * Make sure this is the page we want to write into. The current
- * page is changed concurrently by other writers. [wrh]page are
- * used as a cache remembering the last page written
- * to/read/looked up for header address. No synchronization;
- * could have to find the previous page is a nested write
- * occured. Finding the right page is done by comparing the
- * dest_offset with the buf_page offsets.
- * When at the exact opposite of the buffer, bias towards forward search
- * because it will be cached.
- */
-
- diff_offset = (ssize_t)offset - (ssize_t)page->offset;
- if (diff_offset <= -(ssize_t)half_buf_size)
- diff_offset += buf->chan->alloc_size;
- else if (diff_offset > half_buf_size)
- diff_offset -= buf->chan->alloc_size;
-
- if (unlikely(diff_offset >= (ssize_t)PAGE_SIZE)) {
- page = ltt_relay_find_next_page(buf, page, offset, diff_offset);
- *page_cache = page;
- } else if (unlikely(diff_offset < 0)) {
- page = ltt_relay_find_prev_page(buf, page, offset, diff_offset);
- }
- return page;
-}
-
-//ust// #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
- static inline void ltt_relay_do_copy(void *dest, const void *src, size_t len)
-{
- switch (len) {
- case 0: break;
- case 1: *(u8 *)dest = *(const u8 *)src;
- break;
- case 2: *(u16 *)dest = *(const u16 *)src;
- break;
- case 4: *(u32 *)dest = *(const u32 *)src;
- break;
-//ust// #if (BITS_PER_LONG == 64)
- case 8: *(u64 *)dest = *(const u64 *)src;
- break;
-//ust// #endif
- default:
- memcpy(dest, src, len);
- }
-}
-//ust// #else
-//ust// /*
-//ust// * Returns whether the dest and src addresses are aligned on
-//ust// * min(sizeof(void *), len). Call this with statically known len for efficiency.
-//ust// */
-//ust// static inline int addr_aligned(const void *dest, const void *src, size_t len)
-//ust// {
-//ust// if (ltt_align((size_t)dest, len))
-//ust// return 0;
-//ust// if (ltt_align((size_t)src, len))
-//ust// return 0;
-//ust// return 1;
-//ust// }
-//ust//
-//ust// static inline void ltt_relay_do_copy(void *dest, const void *src, size_t len)
-//ust// {
-//ust// switch (len) {
-//ust// case 0: break;
-//ust// case 1: *(u8 *)dest = *(const u8 *)src;
-//ust// break;
-//ust// case 2: if (unlikely(!addr_aligned(dest, src, 2)))
-//ust// goto memcpy_fallback;
-//ust// *(u16 *)dest = *(const u16 *)src;
-//ust// break;
-//ust// case 4: if (unlikely(!addr_aligned(dest, src, 4)))
-//ust// goto memcpy_fallback;
-//ust// *(u32 *)dest = *(const u32 *)src;
-//ust// break;
-//ust// #if (BITS_PER_LONG == 64)
-//ust// case 8: if (unlikely(!addr_aligned(dest, src, 8)))
-//ust// goto memcpy_fallback;
-//ust// *(u64 *)dest = *(const u64 *)src;
-//ust// break;
-//ust// #endif
-//ust// default:
-//ust// goto memcpy_fallback;
-//ust// }
-//ust// return;
-//ust// memcpy_fallback:
-//ust// memcpy(dest, src, len);
-//ust// }
-//ust// #endif
-
-static inline int ltt_relay_write(struct rchan_buf *buf, size_t offset,
- const void *src, size_t len)
-{
-//ust// struct buf_page *page;
-//ust// ssize_t pagecpy;
-//ust//
-//ust// offset &= buf->chan->alloc_size - 1;
-//ust// page = buf->wpage;
-//ust//
-//ust// page = ltt_relay_cache_page(buf, &buf->wpage, page, offset);
-//ust// pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK));
-//ust// ltt_relay_do_copy(page_address(page->page)
-//ust// + (offset & ~PAGE_MASK), src, pagecpy);
-//ust//
-//ust// if (unlikely(len != pagecpy))
-//ust// _ltt_relay_write(buf, offset, src, len, page, pagecpy);
-//ust// return len;
-
-
- size_t cpy;
- cpy = min_t(size_t, len, buf->buf_size - offset);
- ltt_relay_do_copy(buf->buf_data + offset, src, cpy);
-
- if (unlikely(len != cpy))
- _ltt_relay_write(buf, offset, src, len, cpy);
- return len;
-}
-
-/*
- * CONFIG_LTT_RELAY kernel API, ltt/ltt-relay-alloc.c
- */
-
-struct rchan *ltt_relay_open(const char *base_filename,
- struct dentry *parent,
- size_t subbuf_size,
- size_t n_subbufs,
- void *private_data);
-extern void ltt_relay_close(struct rchan *chan);
-
-/*
- * exported ltt_relay file operations, ltt/ltt-relay-alloc.c
- */
-extern const struct file_operations ltt_relay_file_operations;
-
-
-/* LTTng lockless logging buffer info */
-struct ltt_channel_buf_struct {
- /* First 32 bytes cache-hot cacheline */
- local_t offset; /* Current offset in the buffer */
-//ust// local_t *commit_count; /* Commit count per sub-buffer */
- atomic_long_t consumed; /*
- * Current offset in the buffer
- * standard atomic access (shared)
- */
- unsigned long last_tsc; /*
- * Last timestamp written in the buffer.
- */
- /* End of first 32 bytes cacheline */
- atomic_long_t active_readers; /*
- * Active readers count
- * standard atomic access (shared)
- */
- local_t events_lost;
- local_t corrupted_subbuffers;
-//ust// spinlock_t full_lock; /*
-//ust// * buffer full condition spinlock, only
-//ust// * for userspace tracing blocking mode
-//ust// * synchronization with reader.
-//ust// */
-//ust// wait_queue_head_t write_wait; /*
-//ust// * Wait queue for blocking user space
-//ust// * writers
-//ust// */
-//ust// atomic_t wakeup_readers; /* Boolean : wakeup readers waiting ? */
- /* one byte is written to this pipe when data is available, in order
- to wake the consumer */
- /* portability: Single byte writes must be as quick as possible. The kernel-side
- buffer must be large enough so the writer doesn't block. From the pipe(7)
- man page: Since linux 2.6.11, the pipe capacity is 65536 bytes. */
- int data_ready_fd_write;
- /* the reading end of the pipe */
- int data_ready_fd_read;
-
- /* commit count per subbuffer; must be at end of struct */
- local_t commit_count[0] ____cacheline_aligned;
-} ____cacheline_aligned;
-
-int ltt_do_get_subbuf(struct rchan_buf *buf, struct ltt_channel_buf_struct *ltt_buf, long *pconsumed_old);
-
-int ltt_do_put_subbuf(struct rchan_buf *buf, struct ltt_channel_buf_struct *ltt_buf, u32 uconsumed_old);
-
-
-#endif /* _LINUX_LTT_RELAY_H */
-
+++ /dev/null
-/*
- * LTTng serializing code.
- *
- * Copyright Mathieu Desnoyers, March 2007.
- *
- * Licensed under the GPLv2.
- *
- * See this discussion about weirdness about passing va_list and then va_list to
- * functions. (related to array argument passing). va_list seems to be
- * implemented as an array on x86_64, but not on i386... This is why we pass a
- * va_list * to ltt_vtrace.
- */
-
-#include <stdarg.h>
-//ust// #include <linux/ctype.h>
-//ust// #include <linux/string.h>
-//ust// #include <linux/module.h>
-//ust// #include <linux/ltt-tracer.h>
-#include <string.h>
-#include <stdint.h>
-#include "kernelcompat.h"
-#include "relay.h"
-#include "tracer.h"
-#include "list.h"
-#include "usterr.h"
-
-enum ltt_type {
- LTT_TYPE_SIGNED_INT,
- LTT_TYPE_UNSIGNED_INT,
- LTT_TYPE_STRING,
- LTT_TYPE_NONE,
-};
-
-#define LTT_ATTRIBUTE_NETWORK_BYTE_ORDER (1<<1)
-
-/*
- * Inspired from vsnprintf
- *
- * The serialization format string supports the basic printf format strings.
- * In addition, it defines new formats that can be used to serialize more
- * complex/non portable data structures.
- *
- * Typical use:
- *
- * field_name %ctype
- * field_name #tracetype %ctype
- * field_name #tracetype %ctype1 %ctype2 ...
- *
- * A conversion is performed between format string types supported by GCC and
- * the trace type requested. GCC type is used to perform type checking on format
- * strings. Trace type is used to specify the exact binary representation
- * in the trace. A mapping is done between one or more GCC types to one trace
- * type. Sign extension, if required by the conversion, is performed following
- * the trace type.
- *
- * If a gcc format is not declared with a trace format, the gcc format is
- * also used as binary representation in the trace.
- *
- * Strings are supported with %s.
- * A single tracetype (sequence) can take multiple c types as parameter.
- *
- * c types:
- *
- * see printf(3).
- *
- * Note: to write a uint32_t in a trace, the following expression is recommended
- * si it can be portable:
- *
- * ("#4u%lu", (unsigned long)var)
- *
- * trace types:
- *
- * Serialization specific formats :
- *
- * Fixed size integers
- * #1u writes uint8_t
- * #2u writes uint16_t
- * #4u writes uint32_t
- * #8u writes uint64_t
- * #1d writes int8_t
- * #2d writes int16_t
- * #4d writes int32_t
- * #8d writes int64_t
- * i.e.:
- * #1u%lu #2u%lu #4d%lu #8d%lu #llu%hu #d%lu
- *
- * * Attributes:
- *
- * n: (for network byte order)
- * #ntracetype%ctype
- * is written in the trace in network byte order.
- *
- * i.e.: #bn4u%lu, #n%lu, #b%u
- *
- * TODO (eventually)
- * Variable length sequence
- * #a #tracetype1 #tracetype2 %array_ptr %elem_size %num_elems
- * In the trace:
- * #a specifies that this is a sequence
- * #tracetype1 is the type of elements in the sequence
- * #tracetype2 is the type of the element count
- * GCC input:
- * array_ptr is a pointer to an array that contains members of size
- * elem_size.
- * num_elems is the number of elements in the array.
- * i.e.: #a #lu #lu %p %lu %u
- *
- * Callback
- * #k callback (taken from the probe data)
- * The following % arguments are exepected by the callback
- *
- * i.e.: #a #lu #lu #k %p
- *
- * Note: No conversion is done from floats to integers, nor from integers to
- * floats between c types and trace types. float conversion from double to float
- * or from float to double is also not supported.
- *
- * REMOVE
- * %*b expects sizeof(data), data
- * where sizeof(data) is 1, 2, 4 or 8
- *
- * Fixed length struct, union or array.
- * FIXME: unable to extract those sizes statically.
- * %*r expects sizeof(*ptr), ptr
- * %*.*r expects sizeof(*ptr), __alignof__(*ptr), ptr
- * struct and unions removed.
- * Fixed length array:
- * [%p]#a[len #tracetype]
- * i.e.: [%p]#a[12 #lu]
- *
- * Variable length sequence
- * %*.*:*v expects sizeof(*ptr), __alignof__(*ptr), elem_num, ptr
- * where elem_num is the number of elements in the sequence
- */
-static inline const char *parse_trace_type(const char *fmt,
- char *trace_size, enum ltt_type *trace_type,
- unsigned long *attributes)
-{
- int qualifier; /* 'h', 'l', or 'L' for integer fields */
- /* 'z' support added 23/7/1999 S.H. */
- /* 'z' changed to 'Z' --davidm 1/25/99 */
- /* 't' added for ptrdiff_t */
-
- /* parse attributes. */
-repeat:
- switch (*fmt) {
- case 'n':
- *attributes |= LTT_ATTRIBUTE_NETWORK_BYTE_ORDER;
- ++fmt;
- goto repeat;
- }
-
- /* get the conversion qualifier */
- qualifier = -1;
- if (*fmt == 'h' || *fmt == 'l' || *fmt == 'L' ||
- *fmt == 'Z' || *fmt == 'z' || *fmt == 't' ||
- *fmt == 'S' || *fmt == '1' || *fmt == '2' ||
- *fmt == '4' || *fmt == 8) {
- qualifier = *fmt;
- ++fmt;
- if (qualifier == 'l' && *fmt == 'l') {
- qualifier = 'L';
- ++fmt;
- }
- }
-
- switch (*fmt) {
- case 'c':
- *trace_type = LTT_TYPE_UNSIGNED_INT;
- *trace_size = sizeof(unsigned char);
- goto parse_end;
- case 's':
- *trace_type = LTT_TYPE_STRING;
- goto parse_end;
- case 'p':
- *trace_type = LTT_TYPE_UNSIGNED_INT;
- *trace_size = sizeof(void *);
- goto parse_end;
- case 'd':
- case 'i':
- *trace_type = LTT_TYPE_SIGNED_INT;
- break;
- case 'o':
- case 'u':
- case 'x':
- case 'X':
- *trace_type = LTT_TYPE_UNSIGNED_INT;
- break;
- default:
- if (!*fmt)
- --fmt;
- goto parse_end;
- }
- switch (qualifier) {
- case 'L':
- *trace_size = sizeof(long long);
- break;
- case 'l':
- *trace_size = sizeof(long);
- break;
- case 'Z':
- case 'z':
- *trace_size = sizeof(size_t);
- break;
-//ust// case 't':
-//ust// *trace_size = sizeof(ptrdiff_t);
-//ust// break;
- case 'h':
- *trace_size = sizeof(short);
- break;
- case '1':
- *trace_size = sizeof(uint8_t);
- break;
- case '2':
- *trace_size = sizeof(uint16_t);
- break;
- case '4':
- *trace_size = sizeof(uint32_t);
- break;
- case '8':
- *trace_size = sizeof(uint64_t);
- break;
- default:
- *trace_size = sizeof(int);
- }
-
-parse_end:
- return fmt;
-}
-
-/*
- * Restrictions:
- * Field width and precision are *not* supported.
- * %n not supported.
- */
-static inline const char *parse_c_type(const char *fmt,
- char *c_size, enum ltt_type *c_type)
-{
- int qualifier; /* 'h', 'l', or 'L' for integer fields */
- /* 'z' support added 23/7/1999 S.H. */
- /* 'z' changed to 'Z' --davidm 1/25/99 */
- /* 't' added for ptrdiff_t */
-
- /* process flags : ignore standard print formats for now. */
-repeat:
- switch (*fmt) {
- case '-':
- case '+':
- case ' ':
- case '#':
- case '0':
- ++fmt;
- goto repeat;
- }
-
- /* get the conversion qualifier */
- qualifier = -1;
- if (*fmt == 'h' || *fmt == 'l' || *fmt == 'L' ||
- *fmt == 'Z' || *fmt == 'z' || *fmt == 't' ||
- *fmt == 'S') {
- qualifier = *fmt;
- ++fmt;
- if (qualifier == 'l' && *fmt == 'l') {
- qualifier = 'L';
- ++fmt;
- }
- }
-
- switch (*fmt) {
- case 'c':
- *c_type = LTT_TYPE_UNSIGNED_INT;
- *c_size = sizeof(unsigned char);
- goto parse_end;
- case 's':
- *c_type = LTT_TYPE_STRING;
- goto parse_end;
- case 'p':
- *c_type = LTT_TYPE_UNSIGNED_INT;
- *c_size = sizeof(void *);
- goto parse_end;
- case 'd':
- case 'i':
- *c_type = LTT_TYPE_SIGNED_INT;
- break;
- case 'o':
- case 'u':
- case 'x':
- case 'X':
- *c_type = LTT_TYPE_UNSIGNED_INT;
- break;
- default:
- if (!*fmt)
- --fmt;
- goto parse_end;
- }
- switch (qualifier) {
- case 'L':
- *c_size = sizeof(long long);
- break;
- case 'l':
- *c_size = sizeof(long);
- break;
- case 'Z':
- case 'z':
- *c_size = sizeof(size_t);
- break;
-//ust// case 't':
-//ust// *c_size = sizeof(ptrdiff_t);
-//ust// break;
- case 'h':
- *c_size = sizeof(short);
- break;
- default:
- *c_size = sizeof(int);
- }
-
-parse_end:
- return fmt;
-}
-
-static inline size_t serialize_trace_data(struct rchan_buf *buf,
- size_t buf_offset,
- char trace_size, enum ltt_type trace_type,
- char c_size, enum ltt_type c_type,
- int *largest_align, va_list *args)
-{
- union {
- unsigned long v_ulong;
- uint64_t v_uint64;
- struct {
- const char *s;
- size_t len;
- } v_string;
- } tmp;
-
- /*
- * Be careful about sign extension here.
- * Sign extension is done with the destination (trace) type.
- */
- switch (trace_type) {
- case LTT_TYPE_SIGNED_INT:
- switch (c_size) {
- case 1:
- tmp.v_ulong = (long)(int8_t)va_arg(*args, int);
- break;
- case 2:
- tmp.v_ulong = (long)(int16_t)va_arg(*args, int);
- break;
- case 4:
- tmp.v_ulong = (long)(int32_t)va_arg(*args, int);
- break;
- case 8:
- tmp.v_uint64 = va_arg(*args, int64_t);
- break;
- default:
- BUG();
- }
- break;
- case LTT_TYPE_UNSIGNED_INT:
- switch (c_size) {
- case 1:
- tmp.v_ulong = (unsigned long)(uint8_t)
- va_arg(*args, unsigned int);
- break;
- case 2:
- tmp.v_ulong = (unsigned long)(uint16_t)
- va_arg(*args, unsigned int);
- break;
- case 4:
- tmp.v_ulong = (unsigned long)(uint32_t)
- va_arg(*args, unsigned int);
- break;
- case 8:
- tmp.v_uint64 = va_arg(*args, uint64_t);
- break;
- default:
- BUG();
- }
- break;
- case LTT_TYPE_STRING:
- tmp.v_string.s = va_arg(*args, const char *);
- if ((unsigned long)tmp.v_string.s < PAGE_SIZE)
- tmp.v_string.s = "<NULL>";
- tmp.v_string.len = strlen(tmp.v_string.s)+1;
- if (buf)
- ltt_relay_write(buf, buf_offset, tmp.v_string.s,
- tmp.v_string.len);
- buf_offset += tmp.v_string.len;
- goto copydone;
- default:
- BUG();
- }
-
- /*
- * If trace_size is lower or equal to 4 bytes, there is no sign
- * extension to do because we are already encoded in a long. Therefore,
- * we can combine signed and unsigned ops. 4 bytes float also works
- * with this, because we do a simple copy of 4 bytes into 4 bytes
- * without manipulation (and we do not support conversion from integers
- * to floats).
- * It is also the case if c_size is 8 bytes, which is the largest
- * possible integer.
- */
- if (ltt_get_alignment()) {
- buf_offset += ltt_align(buf_offset, trace_size);
- if (largest_align)
- *largest_align = max_t(int, *largest_align, trace_size);
- }
- if (trace_size <= 4 || c_size == 8) {
- if (buf) {
- switch (trace_size) {
- case 1:
- if (c_size == 8)
- ltt_relay_write(buf, buf_offset,
- (uint8_t[]){ (uint8_t)tmp.v_uint64 },
- sizeof(uint8_t));
- else
- ltt_relay_write(buf, buf_offset,
- (uint8_t[]){ (uint8_t)tmp.v_ulong },
- sizeof(uint8_t));
- break;
- case 2:
- if (c_size == 8)
- ltt_relay_write(buf, buf_offset,
- (uint16_t[]){ (uint16_t)tmp.v_uint64 },
- sizeof(uint16_t));
- else
- ltt_relay_write(buf, buf_offset,
- (uint16_t[]){ (uint16_t)tmp.v_ulong },
- sizeof(uint16_t));
- break;
- case 4:
- if (c_size == 8)
- ltt_relay_write(buf, buf_offset,
- (uint32_t[]){ (uint32_t)tmp.v_uint64 },
- sizeof(uint32_t));
- else
- ltt_relay_write(buf, buf_offset,
- (uint32_t[]){ (uint32_t)tmp.v_ulong },
- sizeof(uint32_t));
- break;
- case 8:
- /*
- * c_size cannot be other than 8 here because
- * trace_size > 4.
- */
- ltt_relay_write(buf, buf_offset,
- (uint64_t[]){ (uint64_t)tmp.v_uint64 },
- sizeof(uint64_t));
- break;
- default:
- BUG();
- }
- }
- buf_offset += trace_size;
- goto copydone;
- } else {
- /*
- * Perform sign extension.
- */
- if (buf) {
- switch (trace_type) {
- case LTT_TYPE_SIGNED_INT:
- ltt_relay_write(buf, buf_offset,
- (int64_t[]){ (int64_t)tmp.v_ulong },
- sizeof(int64_t));
- break;
- case LTT_TYPE_UNSIGNED_INT:
- ltt_relay_write(buf, buf_offset,
- (uint64_t[]){ (uint64_t)tmp.v_ulong },
- sizeof(uint64_t));
- break;
- default:
- BUG();
- }
- }
- buf_offset += trace_size;
- goto copydone;
- }
-
-copydone:
- return buf_offset;
-}
-
-notrace size_t ltt_serialize_data(struct rchan_buf *buf, size_t buf_offset,
- struct ltt_serialize_closure *closure,
- void *serialize_private, int *largest_align,
- const char *fmt, va_list *args)
-{
- char trace_size = 0, c_size = 0; /*
- * 0 (unset), 1, 2, 4, 8 bytes.
- */
- enum ltt_type trace_type = LTT_TYPE_NONE, c_type = LTT_TYPE_NONE;
- unsigned long attributes = 0;
-
- for (; *fmt ; ++fmt) {
- switch (*fmt) {
- case '#':
- /* tracetypes (#) */
- ++fmt; /* skip first '#' */
- if (*fmt == '#') /* Escaped ## */
- break;
- attributes = 0;
- fmt = parse_trace_type(fmt, &trace_size, &trace_type,
- &attributes);
- break;
- case '%':
- /* c types (%) */
- ++fmt; /* skip first '%' */
- if (*fmt == '%') /* Escaped %% */
- break;
- fmt = parse_c_type(fmt, &c_size, &c_type);
- /*
- * Output c types if no trace types has been
- * specified.
- */
- if (!trace_size)
- trace_size = c_size;
- if (trace_type == LTT_TYPE_NONE)
- trace_type = c_type;
- if (c_type == LTT_TYPE_STRING)
- trace_type = LTT_TYPE_STRING;
- /* perform trace write */
- buf_offset = serialize_trace_data(buf,
- buf_offset, trace_size,
- trace_type, c_size, c_type,
- largest_align, args);
- trace_size = 0;
- c_size = 0;
- trace_type = LTT_TYPE_NONE;
- c_size = LTT_TYPE_NONE;
- attributes = 0;
- break;
- /* default is to skip the text, doing nothing */
- }
- }
- return buf_offset;
-}
-EXPORT_SYMBOL_GPL(ltt_serialize_data);
-
-/*
- * Calculate data size
- * Assume that the padding for alignment starts at a sizeof(void *) address.
- */
-static notrace size_t ltt_get_data_size(struct ltt_serialize_closure *closure,
- void *serialize_private, int *largest_align,
- const char *fmt, va_list *args)
-{
- ltt_serialize_cb cb = closure->callbacks[0];
- closure->cb_idx = 0;
- return (size_t)cb(NULL, 0, closure, serialize_private,
- largest_align, fmt, args);
-}
-
-static notrace
-void ltt_write_event_data(struct rchan_buf *buf, size_t buf_offset,
- struct ltt_serialize_closure *closure,
- void *serialize_private, int largest_align,
- const char *fmt, va_list *args)
-{
- ltt_serialize_cb cb = closure->callbacks[0];
- closure->cb_idx = 0;
- buf_offset += ltt_align(buf_offset, largest_align);
- cb(buf, buf_offset, closure, serialize_private, NULL, fmt, args);
-}
-
-
-notrace void ltt_vtrace(const struct marker *mdata, void *probe_data,
- void *call_data, const char *fmt, va_list *args)
-{
- int largest_align, ret;
- struct ltt_active_marker *pdata;
- uint16_t eID;
- size_t data_size, slot_size;
- unsigned int chan_index;
- struct ltt_channel_struct *channel;
- struct ltt_trace_struct *trace, *dest_trace = NULL;
- struct rchan_buf *buf;
- void *transport_data;
- u64 tsc;
- long buf_offset;
- va_list args_copy;
- struct ltt_serialize_closure closure;
- struct ltt_probe_private_data *private_data = call_data;
- void *serialize_private = NULL;
- int cpu;
- unsigned int rflags;
-
- /*
- * This test is useful for quickly exiting static tracing when no trace
- * is active. We expect to have an active trace when we get here.
- */
- if (unlikely(ltt_traces.num_active_traces == 0))
- return;
-
- rcu_read_lock_sched_notrace();
- cpu = smp_processor_id();
-//ust// __get_cpu_var(ltt_nesting)++;
- ltt_nesting++;
-
- pdata = (struct ltt_active_marker *)probe_data;
- eID = mdata->event_id;
- chan_index = mdata->channel_id;
- closure.callbacks = pdata->probe->callbacks;
-
- if (unlikely(private_data)) {
- dest_trace = private_data->trace;
- if (private_data->serializer)
- closure.callbacks = &private_data->serializer;
- serialize_private = private_data->serialize_private;
- }
-
- va_copy(args_copy, *args);
- /*
- * Assumes event payload to start on largest_align alignment.
- */
- largest_align = 1; /* must be non-zero for ltt_align */
- data_size = ltt_get_data_size(&closure, serialize_private,
- &largest_align, fmt, &args_copy);
- largest_align = min_t(int, largest_align, sizeof(void *));
- va_end(args_copy);
-
- /* Iterate on each trace */
- list_for_each_entry_rcu(trace, <t_traces.head, list) {
- /*
- * Expect the filter to filter out events. If we get here,
- * we went through tracepoint activation as a first step.
- */
- if (unlikely(dest_trace && trace != dest_trace))
- continue;
- if (unlikely(!trace->active))
- continue;
- if (unlikely(!ltt_run_filter(trace, eID)))
- continue;
-#ifdef CONFIG_LTT_DEBUG_EVENT_SIZE
- rflags = LTT_RFLAG_ID_SIZE;
-#else
- if (unlikely(eID >= LTT_FREE_EVENTS))
- rflags = LTT_RFLAG_ID;
- else
- rflags = 0;
-#endif
- /*
- * Skip channels added after trace creation.
- */
- if (unlikely(chan_index >= trace->nr_channels))
- continue;
- channel = &trace->channels[chan_index];
- if (!channel->active)
- continue;
-
- /* reserve space : header and data */
- ret = ltt_reserve_slot(trace, channel, &transport_data,
- data_size, &slot_size, &buf_offset,
- &tsc, &rflags,
- largest_align);
- if (unlikely(ret < 0))
- continue; /* buffer full */
-
- va_copy(args_copy, *args);
- /* FIXME : could probably encapsulate transport better. */
-//ust// buf = ((struct rchan *)channel->trans_channel_data)->buf[cpu];
- buf = ((struct rchan *)channel->trans_channel_data)->buf;
- /* Out-of-order write : header and data */
- buf_offset = ltt_write_event_header(trace,
- channel, buf, buf_offset,
- eID, data_size, tsc, rflags);
- ltt_write_event_data(buf, buf_offset, &closure,
- serialize_private,
- largest_align, fmt, &args_copy);
- va_end(args_copy);
- /* Out-of-order commit */
- ltt_commit_slot(channel, &transport_data, buf_offset,
- slot_size);
- printf("just commited event at offset %d and size %d\n", buf_offset, slot_size);
- }
-//ust// __get_cpu_var(ltt_nesting)--;
- ltt_nesting--;
- rcu_read_unlock_sched_notrace();
-}
-EXPORT_SYMBOL_GPL(ltt_vtrace);
-
-notrace void ltt_trace(const struct marker *mdata, void *probe_data,
- void *call_data, const char *fmt, ...)
-{
- va_list args;
-
- va_start(args, fmt);
- ltt_vtrace(mdata, probe_data, call_data, fmt, &args);
- va_end(args);
-}
-EXPORT_SYMBOL_GPL(ltt_trace);
-
-//ust// MODULE_LICENSE("GPL");
-//ust// MODULE_AUTHOR("Mathieu Desnoyers");
-//ust// MODULE_DESCRIPTION("Linux Trace Toolkit Next Generation Serializer");
+++ /dev/null
-/*
- * ltt/ltt-tracer.c
- *
- * (C) Copyright 2005-2008 -
- * Mathieu Desnoyers (mathieu.desnoyers@polymtl.ca)
- *
- * Tracing management internal kernel API. Trace buffer allocation/free, tracing
- * start/stop.
- *
- * Author:
- * Mathieu Desnoyers (mathieu.desnoyers@polymtl.ca)
- *
- * Inspired from LTT :
- * Karim Yaghmour (karim@opersys.com)
- * Tom Zanussi (zanussi@us.ibm.com)
- * Bob Wisniewski (bob@watson.ibm.com)
- * And from K42 :
- * Bob Wisniewski (bob@watson.ibm.com)
- *
- * Changelog:
- * 22/09/06, Move to the marker/probes mechanism.
- * 19/10/05, Complete lockless mechanism.
- * 27/05/05, Modular redesign and rewrite.
- */
-
-//ust// #include <linux/time.h>
-//ust// #include <linux/ltt-tracer.h>
-//ust// #include <linux/module.h>
-//ust// #include <linux/string.h>
-//ust// #include <linux/slab.h>
-//ust// #include <linux/init.h>
-//ust// #include <linux/rcupdate.h>
-//ust// #include <linux/sched.h>
-//ust// #include <linux/bitops.h>
-//ust// #include <linux/fs.h>
-//ust// #include <linux/cpu.h>
-//ust// #include <linux/kref.h>
-//ust// #include <linux/delay.h>
-//ust// #include <linux/vmalloc.h>
-//ust// #include <asm/atomic.h>
-#include "kernelcompat.h"
-#include "tracercore.h"
-#include "tracer.h"
-#include "usterr.h"
-
-//ust// static void async_wakeup(unsigned long data);
-//ust//
-//ust// static DEFINE_TIMER(ltt_async_wakeup_timer, async_wakeup, 0, 0);
-
-/* Default callbacks for modules */
-notrace int ltt_filter_control_default(enum ltt_filter_control_msg msg,
- struct ltt_trace_struct *trace)
-{
- return 0;
-}
-
-int ltt_statedump_default(struct ltt_trace_struct *trace)
-{
- return 0;
-}
-
-/* Callbacks for registered modules */
-
-int (*ltt_filter_control_functor)
- (enum ltt_filter_control_msg msg, struct ltt_trace_struct *trace) =
- ltt_filter_control_default;
-struct module *ltt_filter_control_owner;
-
-/* These function pointers are protected by a trace activation check */
-struct module *ltt_run_filter_owner;
-int (*ltt_statedump_functor)(struct ltt_trace_struct *trace) =
- ltt_statedump_default;
-struct module *ltt_statedump_owner;
-
-struct chan_info_struct {
- const char *name;
- unsigned int def_subbufsize;
- unsigned int def_subbufcount;
-} chan_infos[] = {
- [LTT_CHANNEL_METADATA] = {
- LTT_METADATA_CHANNEL,
- LTT_DEFAULT_SUBBUF_SIZE_LOW,
- LTT_DEFAULT_N_SUBBUFS_LOW,
- },
- [LTT_CHANNEL_UST] = {
- LTT_UST_CHANNEL,
- LTT_DEFAULT_SUBBUF_SIZE_HIGH,
- LTT_DEFAULT_N_SUBBUFS_HIGH,
- },
-};
-
-static enum ltt_channels get_channel_type_from_name(const char *name)
-{
- int i;
-
- if (!name)
- return LTT_CHANNEL_UST;
-
- for (i = 0; i < ARRAY_SIZE(chan_infos); i++)
- if (chan_infos[i].name && !strcmp(name, chan_infos[i].name))
- return (enum ltt_channels)i;
-
- return LTT_CHANNEL_UST;
-}
-
-/**
- * ltt_module_register - LTT module registration
- * @name: module type
- * @function: callback to register
- * @owner: module which owns the callback
- *
- * The module calling this registration function must ensure that no
- * trap-inducing code will be executed by "function". E.g. vmalloc_sync_all()
- * must be called between a vmalloc and the moment the memory is made visible to
- * "function". This registration acts as a vmalloc_sync_all. Therefore, only if
- * the module allocates virtual memory after its registration must it
- * synchronize the TLBs.
- */
-//ust// int ltt_module_register(enum ltt_module_function name, void *function,
-//ust// struct module *owner)
-//ust// {
-//ust// int ret = 0;
-//ust//
-//ust// /*
-//ust// * Make sure no page fault can be triggered by the module about to be
-//ust// * registered. We deal with this here so we don't have to call
-//ust// * vmalloc_sync_all() in each module's init.
-//ust// */
-//ust// vmalloc_sync_all();
-//ust//
-//ust// switch (name) {
-//ust// case LTT_FUNCTION_RUN_FILTER:
-//ust// if (ltt_run_filter_owner != NULL) {
-//ust// ret = -EEXIST;
-//ust// goto end;
-//ust// }
-//ust// ltt_filter_register((ltt_run_filter_functor)function);
-//ust// ltt_run_filter_owner = owner;
-//ust// break;
-//ust// case LTT_FUNCTION_FILTER_CONTROL:
-//ust// if (ltt_filter_control_owner != NULL) {
-//ust// ret = -EEXIST;
-//ust// goto end;
-//ust// }
-//ust// ltt_filter_control_functor =
-//ust// (int (*)(enum ltt_filter_control_msg,
-//ust// struct ltt_trace_struct *))function;
-//ust// ltt_filter_control_owner = owner;
-//ust// break;
-//ust// case LTT_FUNCTION_STATEDUMP:
-//ust// if (ltt_statedump_owner != NULL) {
-//ust// ret = -EEXIST;
-//ust// goto end;
-//ust// }
-//ust// ltt_statedump_functor =
-//ust// (int (*)(struct ltt_trace_struct *))function;
-//ust// ltt_statedump_owner = owner;
-//ust// break;
-//ust// }
-//ust//
-//ust// end:
-//ust//
-//ust// return ret;
-//ust// }
-//ust// EXPORT_SYMBOL_GPL(ltt_module_register);
-
-/**
- * ltt_module_unregister - LTT module unregistration
- * @name: module type
- */
-//ust// void ltt_module_unregister(enum ltt_module_function name)
-//ust// {
-//ust// switch (name) {
-//ust// case LTT_FUNCTION_RUN_FILTER:
-//ust// ltt_filter_unregister();
-//ust// ltt_run_filter_owner = NULL;
-//ust// /* Wait for preempt sections to finish */
-//ust// synchronize_sched();
-//ust// break;
-//ust// case LTT_FUNCTION_FILTER_CONTROL:
-//ust// ltt_filter_control_functor = ltt_filter_control_default;
-//ust// ltt_filter_control_owner = NULL;
-//ust// break;
-//ust// case LTT_FUNCTION_STATEDUMP:
-//ust// ltt_statedump_functor = ltt_statedump_default;
-//ust// ltt_statedump_owner = NULL;
-//ust// break;
-//ust// }
-//ust//
-//ust// }
-//ust// EXPORT_SYMBOL_GPL(ltt_module_unregister);
-
-static LIST_HEAD(ltt_transport_list);
-
-/**
- * ltt_transport_register - LTT transport registration
- * @transport: transport structure
- *
- * Registers a transport which can be used as output to extract the data out of
- * LTTng. The module calling this registration function must ensure that no
- * trap-inducing code will be executed by the transport functions. E.g.
- * vmalloc_sync_all() must be called between a vmalloc and the moment the memory
- * is made visible to the transport function. This registration acts as a
- * vmalloc_sync_all. Therefore, only if the module allocates virtual memory
- * after its registration must it synchronize the TLBs.
- */
-void ltt_transport_register(struct ltt_transport *transport)
-{
- /*
- * Make sure no page fault can be triggered by the module about to be
- * registered. We deal with this here so we don't have to call
- * vmalloc_sync_all() in each module's init.
- */
-//ust// vmalloc_sync_all();
-
- ltt_lock_traces();
- list_add_tail(&transport->node, <t_transport_list);
- ltt_unlock_traces();
-}
-//ust// EXPORT_SYMBOL_GPL(ltt_transport_register);
-
-/**
- * ltt_transport_unregister - LTT transport unregistration
- * @transport: transport structure
- */
-void ltt_transport_unregister(struct ltt_transport *transport)
-{
- ltt_lock_traces();
- list_del(&transport->node);
- ltt_unlock_traces();
-}
-//ust// EXPORT_SYMBOL_GPL(ltt_transport_unregister);
-
-static inline int is_channel_overwrite(enum ltt_channels chan,
- enum trace_mode mode)
-{
- switch (mode) {
- case LTT_TRACE_NORMAL:
- return 0;
- case LTT_TRACE_FLIGHT:
- switch (chan) {
- case LTT_CHANNEL_METADATA:
- return 0;
- default:
- return 1;
- }
- case LTT_TRACE_HYBRID:
- switch (chan) {
- case LTT_CHANNEL_METADATA:
- return 0;
- default:
- return 1;
- }
- default:
- return 0;
- }
-}
-
-/**
- * ltt_write_trace_header - Write trace header
- * @trace: Trace information
- * @header: Memory address where the information must be written to
- */
-void notrace ltt_write_trace_header(struct ltt_trace_struct *trace,
- struct ltt_subbuffer_header *header)
-{
- header->magic_number = LTT_TRACER_MAGIC_NUMBER;
- header->major_version = LTT_TRACER_VERSION_MAJOR;
- header->minor_version = LTT_TRACER_VERSION_MINOR;
- header->arch_size = sizeof(void *);
- header->alignment = ltt_get_alignment();
- header->start_time_sec = trace->start_time.tv_sec;
- header->start_time_usec = trace->start_time.tv_usec;
- header->start_freq = trace->start_freq;
- header->freq_scale = trace->freq_scale;
-}
-//ust// EXPORT_SYMBOL_GPL(ltt_write_trace_header);
-
-static void trace_async_wakeup(struct ltt_trace_struct *trace)
-{
- int i;
- struct ltt_channel_struct *chan;
-
- /* Must check each channel for pending read wakeup */
- for (i = 0; i < trace->nr_channels; i++) {
- chan = &trace->channels[i];
- if (chan->active)
- trace->ops->wakeup_channel(chan);
- }
-}
-
-//ust// /* Timer to send async wakeups to the readers */
-//ust// static void async_wakeup(unsigned long data)
-//ust// {
-//ust// struct ltt_trace_struct *trace;
-//ust//
-//ust// /*
-//ust// * PREEMPT_RT does not allow spinlocks to be taken within preempt
-//ust// * disable sections (spinlock taken in wake_up). However, mainline won't
-//ust// * allow mutex to be taken in interrupt context. Ugly.
-//ust// * A proper way to do this would be to turn the timer into a
-//ust// * periodically woken up thread, but it adds to the footprint.
-//ust// */
-//ust// #ifndef CONFIG_PREEMPT_RT
-//ust// rcu_read_lock_sched();
-//ust// #else
-//ust// ltt_lock_traces();
-//ust// #endif
-//ust// list_for_each_entry_rcu(trace, <t_traces.head, list) {
-//ust// trace_async_wakeup(trace);
-//ust// }
-//ust// #ifndef CONFIG_PREEMPT_RT
-//ust// rcu_read_unlock_sched();
-//ust// #else
-//ust// ltt_unlock_traces();
-//ust// #endif
-//ust//
-//ust// mod_timer(<t_async_wakeup_timer, jiffies + LTT_PERCPU_TIMER_INTERVAL);
-//ust// }
-
-/**
- * _ltt_trace_find - find a trace by given name.
- * trace_name: trace name
- *
- * Returns a pointer to the trace structure, NULL if not found.
- */
-struct ltt_trace_struct *_ltt_trace_find(const char *trace_name)
-{
- struct ltt_trace_struct *trace;
-
- list_for_each_entry(trace, <t_traces.head, list)
- if (!strncmp(trace->trace_name, trace_name, NAME_MAX))
- return trace;
-
- return NULL;
-}
-
-/* _ltt_trace_find_setup :
- * find a trace in setup list by given name.
- *
- * Returns a pointer to the trace structure, NULL if not found.
- */
-struct ltt_trace_struct *_ltt_trace_find_setup(const char *trace_name)
-{
- struct ltt_trace_struct *trace;
-
- list_for_each_entry(trace, <t_traces.setup_head, list)
- if (!strncmp(trace->trace_name, trace_name, NAME_MAX))
- return trace;
-
- return NULL;
-}
-//ust// EXPORT_SYMBOL_GPL(_ltt_trace_find_setup);
-
-/**
- * ltt_release_transport - Release an LTT transport
- * @kref : reference count on the transport
- */
-void ltt_release_transport(struct kref *kref)
-{
- struct ltt_trace_struct *trace = container_of(kref,
- struct ltt_trace_struct, ltt_transport_kref);
-//ust// trace->ops->remove_dirs(trace);
-}
-//ust// EXPORT_SYMBOL_GPL(ltt_release_transport);
-
-/**
- * ltt_release_trace - Release a LTT trace
- * @kref : reference count on the trace
- */
-void ltt_release_trace(struct kref *kref)
-{
- struct ltt_trace_struct *trace = container_of(kref,
- struct ltt_trace_struct, kref);
- ltt_channels_trace_free(trace->channels);
- kfree(trace);
-}
-//ust// EXPORT_SYMBOL_GPL(ltt_release_trace);
-
-static inline void prepare_chan_size_num(unsigned int *subbuf_size,
- unsigned int *n_subbufs)
-{
- *subbuf_size = 1 << get_count_order(*subbuf_size);
- *n_subbufs = 1 << get_count_order(*n_subbufs);
-
- /* Subbuf size and number must both be power of two */
- WARN_ON(hweight32(*subbuf_size) != 1);
- WARN_ON(hweight32(*n_subbufs) != 1);
-}
-
-int _ltt_trace_setup(const char *trace_name)
-{
- int err = 0;
- struct ltt_trace_struct *new_trace = NULL;
- int metadata_index;
- unsigned int chan;
- enum ltt_channels chantype;
-
- if (_ltt_trace_find_setup(trace_name)) {
- printk(KERN_ERR "LTT : Trace name %s already used.\n",
- trace_name);
- err = -EEXIST;
- goto traces_error;
- }
-
- if (_ltt_trace_find(trace_name)) {
- printk(KERN_ERR "LTT : Trace name %s already used.\n",
- trace_name);
- err = -EEXIST;
- goto traces_error;
- }
-
- new_trace = kzalloc(sizeof(struct ltt_trace_struct), GFP_KERNEL);
- if (!new_trace) {
- printk(KERN_ERR
- "LTT : Unable to allocate memory for trace %s\n",
- trace_name);
- err = -ENOMEM;
- goto traces_error;
- }
- strncpy(new_trace->trace_name, trace_name, NAME_MAX);
- new_trace->channels = ltt_channels_trace_alloc(&new_trace->nr_channels,
- 0, 1);
- if (!new_trace->channels) {
- printk(KERN_ERR
- "LTT : Unable to allocate memory for chaninfo %s\n",
- trace_name);
- err = -ENOMEM;
- goto trace_free;
- }
-
- /*
- * Force metadata channel to active, no overwrite.
- */
- metadata_index = ltt_channels_get_index_from_name("metadata");
- WARN_ON(metadata_index < 0);
- new_trace->channels[metadata_index].overwrite = 0;
- new_trace->channels[metadata_index].active = 1;
-
- /*
- * Set hardcoded tracer defaults for some channels
- */
- for (chan = 0; chan < new_trace->nr_channels; chan++) {
- if (!(new_trace->channels[chan].active))
- continue;
-
- chantype = get_channel_type_from_name(
- ltt_channels_get_name_from_index(chan));
- new_trace->channels[chan].subbuf_size =
- chan_infos[chantype].def_subbufsize;
- new_trace->channels[chan].subbuf_cnt =
- chan_infos[chantype].def_subbufcount;
- }
-
- list_add(&new_trace->list, <t_traces.setup_head);
- return 0;
-
-trace_free:
- kfree(new_trace);
-traces_error:
- return err;
-}
-//ust// EXPORT_SYMBOL_GPL(_ltt_trace_setup);
-
-
-int ltt_trace_setup(const char *trace_name)
-{
- int ret;
- ltt_lock_traces();
- ret = _ltt_trace_setup(trace_name);
- ltt_unlock_traces();
- return ret;
-}
-//ust// EXPORT_SYMBOL_GPL(ltt_trace_setup);
-
-/* must be called from within a traces lock. */
-static void _ltt_trace_free(struct ltt_trace_struct *trace)
-{
- list_del(&trace->list);
- kfree(trace);
-}
-
-int ltt_trace_set_type(const char *trace_name, const char *trace_type)
-{
- int err = 0;
- struct ltt_trace_struct *trace;
- struct ltt_transport *tran_iter, *transport = NULL;
-
- ltt_lock_traces();
-
- trace = _ltt_trace_find_setup(trace_name);
- if (!trace) {
- printk(KERN_ERR "LTT : Trace not found %s\n", trace_name);
- err = -ENOENT;
- goto traces_error;
- }
-
- list_for_each_entry(tran_iter, <t_transport_list, node) {
- if (!strcmp(tran_iter->name, trace_type)) {
- transport = tran_iter;
- break;
- }
- }
- if (!transport) {
- printk(KERN_ERR "LTT : Transport %s is not present.\n",
- trace_type);
- err = -EINVAL;
- goto traces_error;
- }
-
- trace->transport = transport;
-
-traces_error:
- ltt_unlock_traces();
- return err;
-}
-//ust// EXPORT_SYMBOL_GPL(ltt_trace_set_type);
-
-int ltt_trace_set_channel_subbufsize(const char *trace_name,
- const char *channel_name, unsigned int size)
-{
- int err = 0;
- struct ltt_trace_struct *trace;
- int index;
-
- ltt_lock_traces();
-
- trace = _ltt_trace_find_setup(trace_name);
- if (!trace) {
- printk(KERN_ERR "LTT : Trace not found %s\n", trace_name);
- err = -ENOENT;
- goto traces_error;
- }
-
- index = ltt_channels_get_index_from_name(channel_name);
- if (index < 0) {
- printk(KERN_ERR "LTT : Channel %s not found\n", channel_name);
- err = -ENOENT;
- goto traces_error;
- }
- trace->channels[index].subbuf_size = size;
-
-traces_error:
- ltt_unlock_traces();
- return err;
-}
-//ust// EXPORT_SYMBOL_GPL(ltt_trace_set_channel_subbufsize);
-
-int ltt_trace_set_channel_subbufcount(const char *trace_name,
- const char *channel_name, unsigned int cnt)
-{
- int err = 0;
- struct ltt_trace_struct *trace;
- int index;
-
- ltt_lock_traces();
-
- trace = _ltt_trace_find_setup(trace_name);
- if (!trace) {
- printk(KERN_ERR "LTT : Trace not found %s\n", trace_name);
- err = -ENOENT;
- goto traces_error;
- }
-
- index = ltt_channels_get_index_from_name(channel_name);
- if (index < 0) {
- printk(KERN_ERR "LTT : Channel %s not found\n", channel_name);
- err = -ENOENT;
- goto traces_error;
- }
- trace->channels[index].subbuf_cnt = cnt;
-
-traces_error:
- ltt_unlock_traces();
- return err;
-}
-//ust// EXPORT_SYMBOL_GPL(ltt_trace_set_channel_subbufcount);
-
-int ltt_trace_set_channel_enable(const char *trace_name,
- const char *channel_name, unsigned int enable)
-{
- int err = 0;
- struct ltt_trace_struct *trace;
- int index;
-
- ltt_lock_traces();
-
- trace = _ltt_trace_find_setup(trace_name);
- if (!trace) {
- printk(KERN_ERR "LTT : Trace not found %s\n", trace_name);
- err = -ENOENT;
- goto traces_error;
- }
-
- /*
- * Datas in metadata channel(marker info) is necessary to be able to
- * read the trace, we always enable this channel.
- */
- if (!enable && !strcmp(channel_name, "metadata")) {
- printk(KERN_ERR "LTT : Trying to disable metadata channel\n");
- err = -EINVAL;
- goto traces_error;
- }
-
- index = ltt_channels_get_index_from_name(channel_name);
- if (index < 0) {
- printk(KERN_ERR "LTT : Channel %s not found\n", channel_name);
- err = -ENOENT;
- goto traces_error;
- }
-
- trace->channels[index].active = enable;
-
-traces_error:
- ltt_unlock_traces();
- return err;
-}
-//ust// EXPORT_SYMBOL_GPL(ltt_trace_set_channel_enable);
-
-int ltt_trace_set_channel_overwrite(const char *trace_name,
- const char *channel_name, unsigned int overwrite)
-{
- int err = 0;
- struct ltt_trace_struct *trace;
- int index;
-
- ltt_lock_traces();
-
- trace = _ltt_trace_find_setup(trace_name);
- if (!trace) {
- printk(KERN_ERR "LTT : Trace not found %s\n", trace_name);
- err = -ENOENT;
- goto traces_error;
- }
-
- /*
- * Always put the metadata channel in non-overwrite mode :
- * This is a very low traffic channel and it can't afford to have its
- * data overwritten : this data (marker info) is necessary to be
- * able to read the trace.
- */
- if (overwrite && !strcmp(channel_name, "metadata")) {
- printk(KERN_ERR "LTT : Trying to set metadata channel to "
- "overwrite mode\n");
- err = -EINVAL;
- goto traces_error;
- }
-
- index = ltt_channels_get_index_from_name(channel_name);
- if (index < 0) {
- printk(KERN_ERR "LTT : Channel %s not found\n", channel_name);
- err = -ENOENT;
- goto traces_error;
- }
-
- trace->channels[index].overwrite = overwrite;
-
-traces_error:
- ltt_unlock_traces();
- return err;
-}
-//ust// EXPORT_SYMBOL_GPL(ltt_trace_set_channel_overwrite);
-
-int ltt_trace_alloc(const char *trace_name)
-{
- int err = 0;
- struct ltt_trace_struct *trace;
- int subbuf_size, subbuf_cnt;
- unsigned long flags;
- int chan;
- const char *channel_name;
-
- ltt_lock_traces();
-
- trace = _ltt_trace_find_setup(trace_name);
- if (!trace) {
- printk(KERN_ERR "LTT : Trace not found %s\n", trace_name);
- err = -ENOENT;
- goto traces_error;
- }
-
- kref_init(&trace->kref);
- kref_init(&trace->ltt_transport_kref);
-//ust// init_waitqueue_head(&trace->kref_wq);
- trace->active = 0;
-//ust// get_trace_clock();
- trace->freq_scale = trace_clock_freq_scale();
-
- if (!trace->transport) {
- printk(KERN_ERR "LTT : Transport is not set.\n");
- err = -EINVAL;
- goto transport_error;
- }
-//ust// if (!try_module_get(trace->transport->owner)) {
-//ust// printk(KERN_ERR "LTT : Can't lock transport module.\n");
-//ust// err = -ENODEV;
-//ust// goto transport_error;
-//ust// }
- trace->ops = &trace->transport->ops;
-
-//ust// err = trace->ops->create_dirs(trace);
-//ust// if (err) {
-//ust// printk(KERN_ERR "LTT : Can't create dir for trace %s.\n",
-//ust// trace_name);
-//ust// goto dirs_error;
-//ust// }
-
-//ust// local_irq_save(flags);
- trace->start_freq = trace_clock_frequency();
- trace->start_tsc = trace_clock_read64();
- gettimeofday(&trace->start_time, NULL); //ust// changed
-//ust// local_irq_restore(flags);
-
- for (chan = 0; chan < trace->nr_channels; chan++) {
- if (!(trace->channels[chan].active))
- continue;
-
- channel_name = ltt_channels_get_name_from_index(chan);
- WARN_ON(!channel_name);
- subbuf_size = trace->channels[chan].subbuf_size;
- subbuf_cnt = trace->channels[chan].subbuf_cnt;
- prepare_chan_size_num(&subbuf_size, &subbuf_cnt);
- err = trace->ops->create_channel(trace_name, trace,
- trace->dentry.trace_root,
- channel_name,
- &trace->channels[chan],
- subbuf_size,
- subbuf_cnt,
- trace->channels[chan].overwrite);
- if (err != 0) {
- printk(KERN_ERR "LTT : Can't create channel %s.\n",
- channel_name);
- goto create_channel_error;
- }
- }
-
- list_del(&trace->list);
-//ust// if (list_empty(<t_traces.head)) {
-//ust// mod_timer(<t_async_wakeup_timer,
-//ust// jiffies + LTT_PERCPU_TIMER_INTERVAL);
-//ust// set_kernel_trace_flag_all_tasks();
-//ust// }
- list_add_rcu(&trace->list, <t_traces.head);
-//ust// synchronize_sched();
-
- ltt_unlock_traces();
-
- return 0;
-
-create_channel_error:
- for (chan--; chan >= 0; chan--)
- if (trace->channels[chan].active)
- trace->ops->remove_channel(&trace->channels[chan]);
-
-dirs_error:
-//ust// module_put(trace->transport->owner);
-transport_error:
-//ust// put_trace_clock();
-traces_error:
- ltt_unlock_traces();
- return err;
-}
-//ust// EXPORT_SYMBOL_GPL(ltt_trace_alloc);
-
-/*
- * It is worked as a wrapper for current version of ltt_control.ko.
- * We will make a new ltt_control based on debugfs, and control each channel's
- * buffer.
- */
-static int ltt_trace_create(const char *trace_name, const char *trace_type,
- enum trace_mode mode,
- unsigned int subbuf_size_low, unsigned int n_subbufs_low,
- unsigned int subbuf_size_med, unsigned int n_subbufs_med,
- unsigned int subbuf_size_high, unsigned int n_subbufs_high)
-{
- int err = 0;
-
- err = ltt_trace_setup(trace_name);
- if (IS_ERR_VALUE(err))
- return err;
-
- err = ltt_trace_set_type(trace_name, trace_type);
- if (IS_ERR_VALUE(err))
- return err;
-
- err = ltt_trace_alloc(trace_name);
- if (IS_ERR_VALUE(err))
- return err;
-
- return err;
-}
-
-/* Must be called while sure that trace is in the list. */
-static int _ltt_trace_destroy(struct ltt_trace_struct *trace)
-{
- int err = -EPERM;
-
- if (trace == NULL) {
- err = -ENOENT;
- goto traces_error;
- }
- if (trace->active) {
- printk(KERN_ERR
- "LTT : Can't destroy trace %s : tracer is active\n",
- trace->trace_name);
- err = -EBUSY;
- goto active_error;
- }
- /* Everything went fine */
-//ust// list_del_rcu(&trace->list);
-//ust// synchronize_sched();
- if (list_empty(<t_traces.head)) {
-//ust// clear_kernel_trace_flag_all_tasks();
- /*
- * We stop the asynchronous delivery of reader wakeup, but
- * we must make one last check for reader wakeups pending
- * later in __ltt_trace_destroy.
- */
-//ust// del_timer_sync(<t_async_wakeup_timer);
- }
- return 0;
-
- /* error handling */
-active_error:
-traces_error:
- return err;
-}
-
-/* Sleepable part of the destroy */
-static void __ltt_trace_destroy(struct ltt_trace_struct *trace)
-{
- int i;
- struct ltt_channel_struct *chan;
-
- for (i = 0; i < trace->nr_channels; i++) {
- chan = &trace->channels[i];
- if (chan->active)
- trace->ops->finish_channel(chan);
- }
-
- return; /* FIXME: temporary for ust */
-//ust// flush_scheduled_work();
-
- /*
- * The currently destroyed trace is not in the trace list anymore,
- * so it's safe to call the async wakeup ourself. It will deliver
- * the last subbuffers.
- */
- trace_async_wakeup(trace);
-
- for (i = 0; i < trace->nr_channels; i++) {
- chan = &trace->channels[i];
- if (chan->active)
- trace->ops->remove_channel(chan);
- }
-
- kref_put(&trace->ltt_transport_kref, ltt_release_transport);
-
-//ust// module_put(trace->transport->owner);
-
- /*
- * Wait for lttd readers to release the files, therefore making sure
- * the last subbuffers have been read.
- */
-//ust// if (atomic_read(&trace->kref.refcount) > 1) {
-//ust// int ret = 0;
-//ust// __wait_event_interruptible(trace->kref_wq,
-//ust// (atomic_read(&trace->kref.refcount) == 1), ret);
-//ust// }
- kref_put(&trace->kref, ltt_release_trace);
-}
-
-int ltt_trace_destroy(const char *trace_name)
-{
- int err = 0;
- struct ltt_trace_struct *trace;
-
- ltt_lock_traces();
-
- trace = _ltt_trace_find(trace_name);
- if (trace) {
- err = _ltt_trace_destroy(trace);
- if (err)
- goto error;
-
- ltt_unlock_traces();
-
- __ltt_trace_destroy(trace);
-//ust// put_trace_clock();
-
- return 0;
- }
-
- trace = _ltt_trace_find_setup(trace_name);
- if (trace) {
- _ltt_trace_free(trace);
- ltt_unlock_traces();
- return 0;
- }
-
- err = -ENOENT;
-
- /* Error handling */
-error:
- ltt_unlock_traces();
- return err;
-}
-//ust// EXPORT_SYMBOL_GPL(ltt_trace_destroy);
-
-/* must be called from within a traces lock. */
-static int _ltt_trace_start(struct ltt_trace_struct *trace)
-{
- int err = 0;
-
- if (trace == NULL) {
- err = -ENOENT;
- goto traces_error;
- }
- if (trace->active)
- printk(KERN_INFO "LTT : Tracing already active for trace %s\n",
- trace->trace_name);
-//ust// if (!try_module_get(ltt_run_filter_owner)) {
-//ust// err = -ENODEV;
-//ust// printk(KERN_ERR "LTT : Can't lock filter module.\n");
-//ust// goto get_ltt_run_filter_error;
-//ust// }
- trace->active = 1;
- /* Read by trace points without protection : be careful */
- ltt_traces.num_active_traces++;
- return err;
-
- /* error handling */
-get_ltt_run_filter_error:
-traces_error:
- return err;
-}
-
-int ltt_trace_start(const char *trace_name)
-{
- int err = 0;
- struct ltt_trace_struct *trace;
-
- ltt_lock_traces();
-
- trace = _ltt_trace_find(trace_name);
- err = _ltt_trace_start(trace);
- if (err)
- goto no_trace;
-
- ltt_unlock_traces();
-
- /*
- * Call the kernel state dump.
- * Events will be mixed with real kernel events, it's ok.
- * Notice that there is no protection on the trace : that's exactly
- * why we iterate on the list and check for trace equality instead of
- * directly using this trace handle inside the logging function.
- */
-
- ltt_dump_marker_state(trace);
-
-//ust// if (!try_module_get(ltt_statedump_owner)) {
-//ust// err = -ENODEV;
-//ust// printk(KERN_ERR
-//ust// "LTT : Can't lock state dump module.\n");
-//ust// } else {
- ltt_statedump_functor(trace);
-//ust// module_put(ltt_statedump_owner);
-//ust// }
-
- return err;
-
- /* Error handling */
-no_trace:
- ltt_unlock_traces();
- return err;
-}
-//ust// EXPORT_SYMBOL_GPL(ltt_trace_start);
-
-/* must be called from within traces lock */
-static int _ltt_trace_stop(struct ltt_trace_struct *trace)
-{
- int err = -EPERM;
-
- if (trace == NULL) {
- err = -ENOENT;
- goto traces_error;
- }
- if (!trace->active)
- printk(KERN_INFO "LTT : Tracing not active for trace %s\n",
- trace->trace_name);
- if (trace->active) {
- trace->active = 0;
- ltt_traces.num_active_traces--;
-//ust// synchronize_sched(); /* Wait for each tracing to be finished */
- }
-//ust// module_put(ltt_run_filter_owner);
- /* Everything went fine */
- return 0;
-
- /* Error handling */
-traces_error:
- return err;
-}
-
-int ltt_trace_stop(const char *trace_name)
-{
- int err = 0;
- struct ltt_trace_struct *trace;
-
- ltt_lock_traces();
- trace = _ltt_trace_find(trace_name);
- err = _ltt_trace_stop(trace);
- ltt_unlock_traces();
- return err;
-}
-//ust// EXPORT_SYMBOL_GPL(ltt_trace_stop);
-
-/**
- * ltt_control - Trace control in-kernel API
- * @msg: Action to perform
- * @trace_name: Trace on which the action must be done
- * @trace_type: Type of trace (normal, flight, hybrid)
- * @args: Arguments specific to the action
- */
-//ust// int ltt_control(enum ltt_control_msg msg, const char *trace_name,
-//ust// const char *trace_type, union ltt_control_args args)
-//ust// {
-//ust// int err = -EPERM;
-//ust//
-//ust// printk(KERN_ALERT "ltt_control : trace %s\n", trace_name);
-//ust// switch (msg) {
-//ust// case LTT_CONTROL_START:
-//ust// printk(KERN_DEBUG "Start tracing %s\n", trace_name);
-//ust// err = ltt_trace_start(trace_name);
-//ust// break;
-//ust// case LTT_CONTROL_STOP:
-//ust// printk(KERN_DEBUG "Stop tracing %s\n", trace_name);
-//ust// err = ltt_trace_stop(trace_name);
-//ust// break;
-//ust// case LTT_CONTROL_CREATE_TRACE:
-//ust// printk(KERN_DEBUG "Creating trace %s\n", trace_name);
-//ust// err = ltt_trace_create(trace_name, trace_type,
-//ust// args.new_trace.mode,
-//ust// args.new_trace.subbuf_size_low,
-//ust// args.new_trace.n_subbufs_low,
-//ust// args.new_trace.subbuf_size_med,
-//ust// args.new_trace.n_subbufs_med,
-//ust// args.new_trace.subbuf_size_high,
-//ust// args.new_trace.n_subbufs_high);
-//ust// break;
-//ust// case LTT_CONTROL_DESTROY_TRACE:
-//ust// printk(KERN_DEBUG "Destroying trace %s\n", trace_name);
-//ust// err = ltt_trace_destroy(trace_name);
-//ust// break;
-//ust// }
-//ust// return err;
-//ust// }
-//ust// EXPORT_SYMBOL_GPL(ltt_control);
-
-/**
- * ltt_filter_control - Trace filter control in-kernel API
- * @msg: Action to perform on the filter
- * @trace_name: Trace on which the action must be done
- */
-int ltt_filter_control(enum ltt_filter_control_msg msg, const char *trace_name)
-{
- int err;
- struct ltt_trace_struct *trace;
-
- printk(KERN_DEBUG "ltt_filter_control : trace %s\n", trace_name);
- ltt_lock_traces();
- trace = _ltt_trace_find(trace_name);
- if (trace == NULL) {
- printk(KERN_ALERT
- "Trace does not exist. Cannot proxy control request\n");
- err = -ENOENT;
- goto trace_error;
- }
-//ust// if (!try_module_get(ltt_filter_control_owner)) {
-//ust// err = -ENODEV;
-//ust// goto get_module_error;
-//ust// }
- switch (msg) {
- case LTT_FILTER_DEFAULT_ACCEPT:
- printk(KERN_DEBUG
- "Proxy filter default accept %s\n", trace_name);
- err = (*ltt_filter_control_functor)(msg, trace);
- break;
- case LTT_FILTER_DEFAULT_REJECT:
- printk(KERN_DEBUG
- "Proxy filter default reject %s\n", trace_name);
- err = (*ltt_filter_control_functor)(msg, trace);
- break;
- default:
- err = -EPERM;
- }
-//ust// module_put(ltt_filter_control_owner);
-
-get_module_error:
-trace_error:
- ltt_unlock_traces();
- return err;
-}
-//ust// EXPORT_SYMBOL_GPL(ltt_filter_control);
-
-//ust// int __init ltt_init(void)
-//ust// {
-//ust// /* Make sure no page fault can be triggered by this module */
-//ust// vmalloc_sync_all();
-//ust// return 0;
-//ust// }
-
-//ust// module_init(ltt_init)
-
-//ust// static void __exit ltt_exit(void)
-//ust// {
-//ust// struct ltt_trace_struct *trace;
-//ust// struct list_head *pos, *n;
-//ust//
-//ust// ltt_lock_traces();
-//ust// /* Stop each trace, currently being read by RCU read-side */
-//ust// list_for_each_entry_rcu(trace, <t_traces.head, list)
-//ust// _ltt_trace_stop(trace);
-//ust// /* Wait for quiescent state. Readers have preemption disabled. */
-//ust// synchronize_sched();
-//ust// /* Safe iteration is now permitted. It does not have to be RCU-safe
-//ust// * because no readers are left. */
-//ust// list_for_each_safe(pos, n, <t_traces.head) {
-//ust// trace = container_of(pos, struct ltt_trace_struct, list);
-//ust// /* _ltt_trace_destroy does a synchronize_sched() */
-//ust// _ltt_trace_destroy(trace);
-//ust// __ltt_trace_destroy(trace);
-//ust// }
-//ust// /* free traces in pre-alloc status */
-//ust// list_for_each_safe(pos, n, <t_traces.setup_head) {
-//ust// trace = container_of(pos, struct ltt_trace_struct, list);
-//ust// _ltt_trace_free(trace);
-//ust// }
-//ust//
-//ust// ltt_unlock_traces();
-//ust// }
-
-//ust// module_exit(ltt_exit)
-
-//ust// MODULE_LICENSE("GPL");
-//ust// MODULE_AUTHOR("Mathieu Desnoyers");
-//ust// MODULE_DESCRIPTION("Linux Trace Toolkit Next Generation Tracer Kernel API");
+++ /dev/null
-/*
- * Copyright (C) 2005,2006,2008 Mathieu Desnoyers (mathieu.desnoyers@polymtl.ca)
- * Copyright (C) 2009 Pierre-Marc Fournier
- *
- * This contains the definitions for the Linux Trace Toolkit tracer.
- *
- * Ported to userspace by Pierre-Marc Fournier.
- *
- * This file is released under the GPLv2.
- */
-
-#ifndef _LTT_TRACER_H
-#define _LTT_TRACER_H
-
-#include <sys/types.h>
-#include <stdarg.h>
-#include "relay.h"
-#include "list.h"
-#include "kernelcompat.h"
-#include "channels.h"
-#include "tracercore.h"
-#include "marker.h"
-
-/* Number of bytes to log with a read/write event */
-#define LTT_LOG_RW_SIZE 32L
-
-/* Interval (in jiffies) at which the LTT per-CPU timer fires */
-#define LTT_PERCPU_TIMER_INTERVAL 1
-
-#ifndef LTT_ARCH_TYPE
-#define LTT_ARCH_TYPE LTT_ARCH_TYPE_UNDEFINED
-#endif
-
-#ifndef LTT_ARCH_VARIANT
-#define LTT_ARCH_VARIANT LTT_ARCH_VARIANT_NONE
-#endif
-
-struct ltt_active_marker;
-
-/* Maximum number of callbacks per marker */
-#define LTT_NR_CALLBACKS 10
-
-struct ltt_serialize_closure;
-struct ltt_probe_private_data;
-
-/* Serialization callback '%k' */
-typedef size_t (*ltt_serialize_cb)(struct rchan_buf *buf, size_t buf_offset,
- struct ltt_serialize_closure *closure,
- void *serialize_private, int *largest_align,
- const char *fmt, va_list *args);
-
-struct ltt_serialize_closure {
- ltt_serialize_cb *callbacks;
- long cb_args[LTT_NR_CALLBACKS];
- unsigned int cb_idx;
-};
-
-size_t ltt_serialize_data(struct rchan_buf *buf, size_t buf_offset,
- struct ltt_serialize_closure *closure,
- void *serialize_private,
- int *largest_align, const char *fmt, va_list *args);
-
-struct ltt_available_probe {
- const char *name; /* probe name */
- const char *format;
- marker_probe_func *probe_func;
- ltt_serialize_cb callbacks[LTT_NR_CALLBACKS];
- struct list_head node; /* registered probes list */
-};
-
-struct ltt_probe_private_data {
- struct ltt_trace_struct *trace; /*
- * Target trace, for metadata
- * or statedump.
- */
- ltt_serialize_cb serializer; /*
- * Serialization function override.
- */
- void *serialize_private; /*
- * Private data for serialization
- * functions.
- */
-};
-
-enum ltt_channels {
- LTT_CHANNEL_METADATA,
- LTT_CHANNEL_UST,
-};
-
-struct ltt_active_marker {
- struct list_head node; /* active markers list */
- const char *channel;
- const char *name;
- const char *format;
- struct ltt_available_probe *probe;
-};
-
-struct marker; //ust//
-extern void ltt_vtrace(const struct marker *mdata, void *probe_data,
- void *call_data, const char *fmt, va_list *args);
-extern void ltt_trace(const struct marker *mdata, void *probe_data,
- void *call_data, const char *fmt, ...);
-
-/*
- * Unique ID assigned to each registered probe.
- */
-enum marker_id {
- MARKER_ID_SET_MARKER_ID = 0, /* Static IDs available (range 0-7) */
- MARKER_ID_SET_MARKER_FORMAT,
- MARKER_ID_COMPACT, /* Compact IDs (range: 8-127) */
- MARKER_ID_DYNAMIC, /* Dynamic IDs (range: 128-65535) */
-};
-
-/* static ids 0-1 reserved for internal use. */
-#define MARKER_CORE_IDS 2
-static inline enum marker_id marker_id_type(uint16_t id)
-{
- if (id < MARKER_CORE_IDS)
- return (enum marker_id)id;
- else
- return MARKER_ID_DYNAMIC;
-}
-
-struct user_dbg_data {
- unsigned long avail_size;
- unsigned long write;
- unsigned long read;
-};
-
-struct ltt_trace_ops {
- /* First 32 bytes cache-hot cacheline */
- int (*reserve_slot) (struct ltt_trace_struct *trace,
- struct ltt_channel_struct *channel,
- void **transport_data, size_t data_size,
- size_t *slot_size, long *buf_offset, u64 *tsc,
- unsigned int *rflags,
- int largest_align);
- void (*commit_slot) (struct ltt_channel_struct *channel,
- void **transport_data, long buf_offset,
- size_t slot_size);
- void (*wakeup_channel) (struct ltt_channel_struct *ltt_channel);
- int (*user_blocking) (struct ltt_trace_struct *trace,
- unsigned int index, size_t data_size,
- struct user_dbg_data *dbg);
- /* End of first 32 bytes cacheline */
- int (*create_dirs) (struct ltt_trace_struct *new_trace);
- void (*remove_dirs) (struct ltt_trace_struct *new_trace);
- int (*create_channel) (const char *trace_name,
- struct ltt_trace_struct *trace,
- struct dentry *dir, const char *channel_name,
- struct ltt_channel_struct *ltt_chan,
- unsigned int subbuf_size,
- unsigned int n_subbufs, int overwrite);
- void (*finish_channel) (struct ltt_channel_struct *channel);
- void (*remove_channel) (struct ltt_channel_struct *channel);
- void (*user_errors) (struct ltt_trace_struct *trace,
- unsigned int index, size_t data_size,
- struct user_dbg_data *dbg);
-} ____cacheline_aligned;
-
-struct ltt_transport {
- char *name;
- struct module *owner;
- struct list_head node;
- struct ltt_trace_ops ops;
-};
-
-enum trace_mode { LTT_TRACE_NORMAL, LTT_TRACE_FLIGHT, LTT_TRACE_HYBRID };
-
-#define CHANNEL_FLAG_ENABLE (1U<<0)
-#define CHANNEL_FLAG_OVERWRITE (1U<<1)
-
-/* Per-trace information - each trace/flight recorder represented by one */
-struct ltt_trace_struct {
- /* First 32 bytes cache-hot cacheline */
- struct list_head list;
- struct ltt_trace_ops *ops;
- int active;
- /* Second 32 bytes cache-hot cacheline */
- struct ltt_channel_struct *channels;
- unsigned int nr_channels;
- u32 freq_scale;
- u64 start_freq;
- u64 start_tsc;
- unsigned long long start_monotonic;
- struct timeval start_time;
- struct ltt_channel_setting *settings;
- struct {
- struct dentry *trace_root;
- } dentry;
- struct kref kref; /* Each channel has a kref of the trace struct */
- struct ltt_transport *transport;
- struct kref ltt_transport_kref;
- char trace_name[NAME_MAX];
-} ____cacheline_aligned;
-
-/* Hardcoded event headers
- *
- * event header for a trace with active heartbeat : 27 bits timestamps
- *
- * headers are 32-bits aligned. In order to insure such alignment, a dynamic per
- * trace alignment value must be done.
- *
- * Remember that the C compiler does align each member on the boundary
- * equivalent to their own size.
- *
- * As relay subbuffers are aligned on pages, we are sure that they are 4 and 8
- * bytes aligned, so the buffer header and trace header are aligned.
- *
- * Event headers are aligned depending on the trace alignment option.
- *
- * Note using C structure bitfields for cross-endianness and portability
- * concerns.
- */
-
-#define LTT_RESERVED_EVENTS 3
-#define LTT_EVENT_BITS 5
-#define LTT_FREE_EVENTS ((1 << LTT_EVENT_BITS) - LTT_RESERVED_EVENTS)
-#define LTT_TSC_BITS 27
-#define LTT_TSC_MASK ((1 << LTT_TSC_BITS) - 1)
-
-struct ltt_event_header {
- u32 id_time; /* 5 bits event id (MSB); 27 bits time (LSB) */
-};
-
-/* Reservation flags */
-#define LTT_RFLAG_ID (1 << 0)
-#define LTT_RFLAG_ID_SIZE (1 << 1)
-#define LTT_RFLAG_ID_SIZE_TSC (1 << 2)
-
-/*
- * We use asm/timex.h : cpu_khz/HZ variable in here : we might have to deal
- * specifically with CPU frequency scaling someday, so using an interpolation
- * between the start and end of buffer values is not flexible enough. Using an
- * immediate frequency value permits to calculate directly the times for parts
- * of a buffer that would be before a frequency change.
- *
- * Keep the natural field alignment for _each field_ within this structure if
- * you ever add/remove a field from this header. Packed attribute is not used
- * because gcc generates poor code on at least powerpc and mips. Don't ever
- * let gcc add padding between the structure elements.
- */
-struct ltt_subbuffer_header {
- uint64_t cycle_count_begin; /* Cycle count at subbuffer start */
- uint64_t cycle_count_end; /* Cycle count at subbuffer end */
- uint32_t magic_number; /*
- * Trace magic number.
- * contains endianness information.
- */
- uint8_t major_version;
- uint8_t minor_version;
- uint8_t arch_size; /* Architecture pointer size */
- uint8_t alignment; /* LTT data alignment */
- uint64_t start_time_sec; /* NTP-corrected start time */
- uint64_t start_time_usec;
- uint64_t start_freq; /*
- * Frequency at trace start,
- * used all along the trace.
- */
- uint32_t freq_scale; /* Frequency scaling (divisor) */
- uint32_t lost_size; /* Size unused at end of subbuffer */
- uint32_t buf_size; /* Size of this subbuffer */
- uint32_t events_lost; /*
- * Events lost in this subbuffer since
- * the beginning of the trace.
- * (may overflow)
- */
- uint32_t subbuf_corrupt; /*
- * Corrupted (lost) subbuffers since
- * the begginig of the trace.
- * (may overflow)
- */
- uint8_t header_end[0]; /* End of header */
-};
-
-/**
- * ltt_subbuffer_header_size - called on buffer-switch to a new sub-buffer
- *
- * Return header size without padding after the structure. Don't use packed
- * structure because gcc generates inefficient code on some architectures
- * (powerpc, mips..)
- */
-static inline size_t ltt_subbuffer_header_size(void)
-{
- return offsetof(struct ltt_subbuffer_header, header_end);
-}
-
-/*
- * ltt_get_header_size
- *
- * Calculate alignment offset to 32-bits. This is the alignment offset of the
- * event header.
- *
- * Important note :
- * The event header must be 32-bits. The total offset calculated here :
- *
- * Alignment of header struct on 32 bits (min arch size, header size)
- * + sizeof(header struct) (32-bits)
- * + (opt) u16 (ext. event id)
- * + (opt) u16 (event_size) (if event_size == 0xFFFFUL, has ext. event size)
- * + (opt) u32 (ext. event size)
- * + (opt) u64 full TSC (aligned on min(64-bits, arch size))
- *
- * The payload must itself determine its own alignment from the biggest type it
- * contains.
- * */
-static inline unsigned char ltt_get_header_size(
- struct ltt_channel_struct *channel,
- size_t offset,
- size_t data_size,
- size_t *before_hdr_pad,
- unsigned int rflags)
-{
- size_t orig_offset = offset;
- size_t padding;
-
- padding = ltt_align(offset, sizeof(struct ltt_event_header));
- offset += padding;
- offset += sizeof(struct ltt_event_header);
-
- switch (rflags) {
- case LTT_RFLAG_ID_SIZE_TSC:
- offset += sizeof(u16) + sizeof(u16);
- if (data_size >= 0xFFFFU)
- offset += sizeof(u32);
- offset += ltt_align(offset, sizeof(u64));
- offset += sizeof(u64);
- break;
- case LTT_RFLAG_ID_SIZE:
- offset += sizeof(u16) + sizeof(u16);
- if (data_size >= 0xFFFFU)
- offset += sizeof(u32);
- break;
- case LTT_RFLAG_ID:
- offset += sizeof(u16);
- break;
- }
-
- *before_hdr_pad = padding;
- return offset - orig_offset;
-}
-
-/*
- * ltt_write_event_header
- *
- * Writes the event header to the offset (already aligned on 32-bits).
- *
- * @trace : trace to write to.
- * @channel : pointer to the channel structure..
- * @buf : buffer to write to.
- * @buf_offset : buffer offset to write to (aligned on 32 bits).
- * @eID : event ID
- * @event_size : size of the event, excluding the event header.
- * @tsc : time stamp counter.
- * @rflags : reservation flags.
- *
- * returns : offset where the event data must be written.
- */
-static inline size_t ltt_write_event_header(struct ltt_trace_struct *trace,
- struct ltt_channel_struct *channel,
- struct rchan_buf *buf, long buf_offset,
- u16 eID, size_t event_size,
- u64 tsc, unsigned int rflags)
-{
- struct ltt_event_header header;
- size_t small_size;
-
- switch (rflags) {
- case LTT_RFLAG_ID_SIZE_TSC:
- header.id_time = 29 << LTT_TSC_BITS;
- break;
- case LTT_RFLAG_ID_SIZE:
- header.id_time = 30 << LTT_TSC_BITS;
- break;
- case LTT_RFLAG_ID:
- header.id_time = 31 << LTT_TSC_BITS;
- break;
- default:
- header.id_time = eID << LTT_TSC_BITS;
- break;
- }
- header.id_time |= (u32)tsc & LTT_TSC_MASK;
- ltt_relay_write(buf, buf_offset, &header, sizeof(header));
- buf_offset += sizeof(header);
-
- switch (rflags) {
- case LTT_RFLAG_ID_SIZE_TSC:
- small_size = min_t(size_t, event_size, 0xFFFFU);
- ltt_relay_write(buf, buf_offset,
- (u16[]){ (u16)eID }, sizeof(u16));
- buf_offset += sizeof(u16);
- ltt_relay_write(buf, buf_offset,
- (u16[]){ (u16)small_size }, sizeof(u16));
- buf_offset += sizeof(u16);
- if (small_size == 0xFFFFU) {
- ltt_relay_write(buf, buf_offset,
- (u32[]){ (u32)event_size }, sizeof(u32));
- buf_offset += sizeof(u32);
- }
- buf_offset += ltt_align(buf_offset, sizeof(u64));
- ltt_relay_write(buf, buf_offset,
- (u64[]){ (u64)tsc }, sizeof(u64));
- buf_offset += sizeof(u64);
- break;
- case LTT_RFLAG_ID_SIZE:
- small_size = min_t(size_t, event_size, 0xFFFFU);
- ltt_relay_write(buf, buf_offset,
- (u16[]){ (u16)eID }, sizeof(u16));
- buf_offset += sizeof(u16);
- ltt_relay_write(buf, buf_offset,
- (u16[]){ (u16)small_size }, sizeof(u16));
- buf_offset += sizeof(u16);
- if (small_size == 0xFFFFU) {
- ltt_relay_write(buf, buf_offset,
- (u32[]){ (u32)event_size }, sizeof(u32));
- buf_offset += sizeof(u32);
- }
- break;
- case LTT_RFLAG_ID:
- ltt_relay_write(buf, buf_offset,
- (u16[]){ (u16)eID }, sizeof(u16));
- buf_offset += sizeof(u16);
- break;
- default:
- break;
- }
-
- return buf_offset;
-}
-
-/* Lockless LTTng */
-
-/* Buffer offset macros */
-
-/*
- * BUFFER_TRUNC zeroes the subbuffer offset and the subbuffer number parts of
- * the offset, which leaves only the buffer number.
- */
-#define BUFFER_TRUNC(offset, chan) \
- ((offset) & (~((chan)->alloc_size-1)))
-#define BUFFER_OFFSET(offset, chan) ((offset) & ((chan)->alloc_size - 1))
-#define SUBBUF_OFFSET(offset, chan) ((offset) & ((chan)->subbuf_size - 1))
-#define SUBBUF_ALIGN(offset, chan) \
- (((offset) + (chan)->subbuf_size) & (~((chan)->subbuf_size - 1)))
-#define SUBBUF_TRUNC(offset, chan) \
- ((offset) & (~((chan)->subbuf_size - 1)))
-#define SUBBUF_INDEX(offset, chan) \
- (BUFFER_OFFSET((offset), chan) >> (chan)->subbuf_size_order)
-
-/*
- * ltt_reserve_slot
- *
- * Atomic slot reservation in a LTTng buffer. It will take care of
- * sub-buffer switching.
- *
- * Parameters:
- *
- * @trace : the trace structure to log to.
- * @channel : the chanel to reserve space into.
- * @transport_data : specific transport data.
- * @data_size : size of the variable length data to log.
- * @slot_size : pointer to total size of the slot (out)
- * @buf_offset : pointer to reserve offset (out)
- * @tsc : pointer to the tsc at the slot reservation (out)
- * @rflags : reservation flags (header specificity)
- * @cpu : cpu id
- *
- * Return : -ENOSPC if not enough space, else 0.
- */
-static inline int ltt_reserve_slot(
- struct ltt_trace_struct *trace,
- struct ltt_channel_struct *channel,
- void **transport_data,
- size_t data_size,
- size_t *slot_size,
- long *buf_offset,
- u64 *tsc,
- unsigned int *rflags,
- int largest_align)
-{
- return trace->ops->reserve_slot(trace, channel, transport_data,
- data_size, slot_size, buf_offset, tsc, rflags,
- largest_align);
-}
-
-
-/*
- * ltt_commit_slot
- *
- * Atomic unordered slot commit. Increments the commit count in the
- * specified sub-buffer, and delivers it if necessary.
- *
- * Parameters:
- *
- * @channel : the chanel to reserve space into.
- * @transport_data : specific transport data.
- * @buf_offset : offset of beginning of reserved slot
- * @slot_size : size of the reserved slot.
- */
-static inline void ltt_commit_slot(
- struct ltt_channel_struct *channel,
- void **transport_data,
- long buf_offset,
- size_t slot_size)
-{
- struct ltt_trace_struct *trace = channel->trace;
-
- trace->ops->commit_slot(channel, transport_data, buf_offset, slot_size);
-}
-
-/*
- * Control channels :
- * control/metadata
- * control/interrupts
- * control/...
- *
- * cpu channel :
- * cpu
- */
-
-#define LTT_METADATA_CHANNEL "metadata_state"
-#define LTT_UST_CHANNEL "ust"
-
-#define LTT_FLIGHT_PREFIX "flight-"
-
-/* Tracer properties */
-//#define LTT_DEFAULT_SUBBUF_SIZE_LOW 65536
-#define LTT_DEFAULT_SUBBUF_SIZE_LOW 4096
-#define LTT_DEFAULT_N_SUBBUFS_LOW 2
-//#define LTT_DEFAULT_SUBBUF_SIZE_MED 262144
-#define LTT_DEFAULT_SUBBUF_SIZE_MED 4096
-#define LTT_DEFAULT_N_SUBBUFS_MED 2
-//#define LTT_DEFAULT_SUBBUF_SIZE_HIGH 1048576
-#define LTT_DEFAULT_SUBBUF_SIZE_HIGH 4096
-#define LTT_DEFAULT_N_SUBBUFS_HIGH 2
-#define LTT_TRACER_MAGIC_NUMBER 0x00D6B7ED
-#define LTT_TRACER_VERSION_MAJOR 2
-#define LTT_TRACER_VERSION_MINOR 3
-
-/*
- * Size reserved for high priority events (interrupts, NMI, BH) at the end of a
- * nearly full buffer. User space won't use this last amount of space when in
- * blocking mode. This space also includes the event header that would be
- * written by this user space event.
- */
-#define LTT_RESERVE_CRITICAL 4096
-
-/* Register and unregister function pointers */
-
-enum ltt_module_function {
- LTT_FUNCTION_RUN_FILTER,
- LTT_FUNCTION_FILTER_CONTROL,
- LTT_FUNCTION_STATEDUMP
-};
-
-void ltt_transport_register(struct ltt_transport *transport);
-void ltt_transport_unregister(struct ltt_transport *transport);
-
-/* Exported control function */
-
-union ltt_control_args {
- struct {
- enum trace_mode mode;
- unsigned int subbuf_size_low;
- unsigned int n_subbufs_low;
- unsigned int subbuf_size_med;
- unsigned int n_subbufs_med;
- unsigned int subbuf_size_high;
- unsigned int n_subbufs_high;
- } new_trace;
-};
-
-int _ltt_trace_setup(const char *trace_name);
-int ltt_trace_setup(const char *trace_name);
-struct ltt_trace_struct *_ltt_trace_find_setup(const char *trace_name);
-int ltt_trace_set_type(const char *trace_name, const char *trace_type);
-int ltt_trace_set_channel_subbufsize(const char *trace_name,
- const char *channel_name, unsigned int size);
-int ltt_trace_set_channel_subbufcount(const char *trace_name,
- const char *channel_name, unsigned int cnt);
-int ltt_trace_set_channel_enable(const char *trace_name,
- const char *channel_name, unsigned int enable);
-int ltt_trace_set_channel_overwrite(const char *trace_name,
- const char *channel_name, unsigned int overwrite);
-int ltt_trace_alloc(const char *trace_name);
-int ltt_trace_destroy(const char *trace_name);
-int ltt_trace_start(const char *trace_name);
-int ltt_trace_stop(const char *trace_name);
-
-enum ltt_filter_control_msg {
- LTT_FILTER_DEFAULT_ACCEPT,
- LTT_FILTER_DEFAULT_REJECT
-};
-
-extern int ltt_filter_control(enum ltt_filter_control_msg msg,
- const char *trace_name);
-
-extern struct dentry *get_filter_root(void);
-
-void ltt_write_trace_header(struct ltt_trace_struct *trace,
- struct ltt_subbuffer_header *header);
-extern void ltt_buffer_destroy(struct ltt_channel_struct *ltt_chan);
-
-void ltt_core_register(int (*function)(u8, void *));
-
-void ltt_core_unregister(void);
-
-void ltt_release_trace(struct kref *kref);
-void ltt_release_transport(struct kref *kref);
-
-extern int ltt_probe_register(struct ltt_available_probe *pdata);
-extern int ltt_probe_unregister(struct ltt_available_probe *pdata);
-extern int ltt_marker_connect(const char *channel, const char *mname,
- const char *pname);
-extern int ltt_marker_disconnect(const char *channel, const char *mname,
- const char *pname);
-extern void ltt_dump_marker_state(struct ltt_trace_struct *trace);
-
-void ltt_lock_traces(void);
-void ltt_unlock_traces(void);
-
-struct ltt_trace_struct *_ltt_trace_find(const char *trace_name);
-
-#endif /* _LTT_TRACER_H */
+++ /dev/null
-/*
- * LTT core in-kernel infrastructure.
- *
- * Copyright 2006 - Mathieu Desnoyers mathieu.desnoyers@polymtl.ca
- *
- * Distributed under the GPL license
- */
-
-//ust// #include <linux/ltt-core.h>
-//ust// #include <linux/percpu.h>
-//ust// #include <linux/module.h>
-//ust// #include <linux/debugfs.h>
-#include "kernelcompat.h"
-#include "tracercore.h"
-
-/* Traces structures */
-struct ltt_traces ltt_traces = {
- .setup_head = LIST_HEAD_INIT(ltt_traces.setup_head),
- .head = LIST_HEAD_INIT(ltt_traces.head),
-};
-//ust// EXPORT_SYMBOL(ltt_traces);
-
-/* Traces list writer locking */
-static DEFINE_MUTEX(ltt_traces_mutex);
-
-/* dentry of ltt's root dir */
-//ust// static struct dentry *ltt_root_dentry;
-//ust// struct dentry *get_ltt_root(void)
-//ust// {
-//ust// if (!ltt_root_dentry) {
-//ust// ltt_root_dentry = debugfs_create_dir(LTT_ROOT, NULL);
-//ust// if (!ltt_root_dentry)
-//ust// printk(KERN_ERR "LTT : create ltt root dir failed\n");
-//ust// }
-//ust// return ltt_root_dentry;
-//ust// }
-//ust// EXPORT_SYMBOL_GPL(get_ltt_root);
-
-void ltt_lock_traces(void)
-{
- mutex_lock(<t_traces_mutex);
-}
-//ust// EXPORT_SYMBOL_GPL(ltt_lock_traces);
-
-void ltt_unlock_traces(void)
-{
- mutex_unlock(<t_traces_mutex);
-}
-//ust// EXPORT_SYMBOL_GPL(ltt_unlock_traces);
-
-//ust// DEFINE_PER_CPU(unsigned int, ltt_nesting);
-//ust// EXPORT_PER_CPU_SYMBOL(ltt_nesting);
-unsigned int ltt_nesting;
-
-int ltt_run_filter_default(void *trace, uint16_t eID)
-{
- return 1;
-}
-
-/* This function pointer is protected by a trace activation check */
-ltt_run_filter_functor ltt_run_filter = ltt_run_filter_default;
-//ust// EXPORT_SYMBOL_GPL(ltt_run_filter);
-
-void ltt_filter_register(ltt_run_filter_functor func)
-{
- ltt_run_filter = func;
-}
-//ust// EXPORT_SYMBOL_GPL(ltt_filter_register);
-
-void ltt_filter_unregister(void)
-{
- ltt_run_filter = ltt_run_filter_default;
-}
-//ust// EXPORT_SYMBOL_GPL(ltt_filter_unregister);
+++ /dev/null
-/*
- * Copyright (C) 2005,2006 Mathieu Desnoyers (mathieu.desnoyers@polymtl.ca)
- *
- * This contains the core definitions for the Linux Trace Toolkit.
- */
-
-#ifndef LTT_CORE_H
-#define LTT_CORE_H
-
-#include "list.h"
-#include "kernelcompat.h"
-//ust// #include <linux/percpu.h>
-
-/* ltt's root dir in debugfs */
-#define LTT_ROOT "ltt"
-
-/*
- * All modifications of ltt_traces must be done by ltt-tracer.c, while holding
- * the semaphore. Only reading of this information can be done elsewhere, with
- * the RCU mechanism : the preemption must be disabled while reading the
- * list.
- */
-struct ltt_traces {
- struct list_head setup_head; /* Pre-allocated traces list */
- struct list_head head; /* Allocated Traces list */
- unsigned int num_active_traces; /* Number of active traces */
-} ____cacheline_aligned;
-
-extern struct ltt_traces ltt_traces;
-
-/*
- * get dentry of ltt's root dir
- */
-struct dentry *get_ltt_root(void);
-
-/* Keep track of trap nesting inside LTT */
-//ust// DECLARE_PER_CPU(unsigned int, ltt_nesting);
-extern unsigned int ltt_nesting;
-
-typedef int (*ltt_run_filter_functor)(void *trace, uint16_t eID);
-//typedef int (*ltt_run_filter_functor)(void *, __u16);
-
-extern ltt_run_filter_functor ltt_run_filter;
-
-extern void ltt_filter_register(ltt_run_filter_functor func);
-extern void ltt_filter_unregister(void);
-
-#if defined(CONFIG_LTT) && defined(CONFIG_LTT_ALIGNMENT)
-
-/*
- * Calculate the offset needed to align the type.
- * size_of_type must be non-zero.
- */
-static inline unsigned int ltt_align(size_t align_drift, size_t size_of_type)
-{
- size_t alignment = min(sizeof(void *), size_of_type);
- return (alignment - align_drift) & (alignment - 1);
-}
-/* Default arch alignment */
-#define LTT_ALIGN
-
-static inline int ltt_get_alignment(void)
-{
- return sizeof(void *);
-}
-
-#else
-
-static inline unsigned int ltt_align(size_t align_drift,
- size_t size_of_type)
-{
- return 0;
-}
-
-#define LTT_ALIGN __attribute__((packed))
-
-static inline int ltt_get_alignment(void)
-{
- return 0;
-}
-#endif /* defined(CONFIG_LTT) && defined(CONFIG_LTT_ALIGNMENT) */
-
-#endif /* LTT_CORE_H */
--- /dev/null
+lib_LTLIBRARIES = libust.la
+libust_la_SOURCES = marker.c marker.h tracepoint.c tracepoint.h immediate.h channels.c channels.h marker-control.c marker-control.h relay.c relay.h tracer.c tracer.h tracercore.c tracercore.h serialize.c tracectl.c $(top_builddir)/libustcomm/ustcomm.c $(top_builddir)/share/kref.c $(top_builddir)/share/usterr.c
+libust_la_LIBADD = @URCU_PATH@/liburcu.so
+libust_la_LDFLAGS = -lpthread
+
+INCLUDES = -I$(top_builddir)/share
+INCLUDES += -I$(top_builddir)/libustcomm
+INCLUDES += -I@URCU_PATH@
+INCLUDES += -I@KCOMPAT_PATH@
--- /dev/null
+/*
+ * ltt/ltt-channels.c
+ *
+ * (C) Copyright 2008 - Mathieu Desnoyers (mathieu.desnoyers@polymtl.ca)
+ *
+ * LTTng channel management.
+ *
+ * Author:
+ * Mathieu Desnoyers (mathieu.desnoyers@polymtl.ca)
+ */
+
+//ust// #include <linux/module.h>
+//ust// #include <linux/ltt-channels.h>
+//ust// #include <linux/mutex.h>
+//ust// #include <linux/vmalloc.h>
+
+#include "kernelcompat.h"
+#include "channels.h"
+#include "usterr.h"
+
+/*
+ * ltt_channel_mutex may be nested inside the LTT trace mutex.
+ * ltt_channel_mutex mutex may be nested inside markers mutex.
+ */
+static DEFINE_MUTEX(ltt_channel_mutex);
+static LIST_HEAD(ltt_channels);
+/*
+ * Index of next channel in array. Makes sure that as long as a trace channel is
+ * allocated, no array index will be re-used when a channel is freed and then
+ * another channel is allocated. This index is cleared and the array indexeds
+ * get reassigned when the index_kref goes back to 0, which indicates that no
+ * more trace channels are allocated.
+ */
+static unsigned int free_index;
+static struct kref index_kref; /* Keeps track of allocated trace channels */
+
+static struct ltt_channel_setting *lookup_channel(const char *name)
+{
+ struct ltt_channel_setting *iter;
+
+ list_for_each_entry(iter, <t_channels, list)
+ if (strcmp(name, iter->name) == 0)
+ return iter;
+ return NULL;
+}
+
+/*
+ * Must be called when channel refcount falls to 0 _and_ also when the last
+ * trace is freed. This function is responsible for compacting the channel and
+ * event IDs when no users are active.
+ *
+ * Called with lock_markers() and channels mutex held.
+ */
+static void release_channel_setting(struct kref *kref)
+{
+ struct ltt_channel_setting *setting = container_of(kref,
+ struct ltt_channel_setting, kref);
+ struct ltt_channel_setting *iter;
+
+ if (atomic_read(&index_kref.refcount) == 0
+ && atomic_read(&setting->kref.refcount) == 0) {
+ list_del(&setting->list);
+ kfree(setting);
+
+ free_index = 0;
+ list_for_each_entry(iter, <t_channels, list) {
+ iter->index = free_index++;
+ iter->free_event_id = 0;
+ }
+//ust// markers_compact_event_ids();
+ }
+}
+
+/*
+ * Perform channel index compaction when the last trace channel is freed.
+ *
+ * Called with lock_markers() and channels mutex held.
+ */
+static void release_trace_channel(struct kref *kref)
+{
+ struct ltt_channel_setting *iter, *n;
+
+ list_for_each_entry_safe(iter, n, <t_channels, list)
+ release_channel_setting(&iter->kref);
+}
+
+/**
+ * ltt_channels_register - Register a trace channel.
+ * @name: channel name
+ *
+ * Uses refcounting.
+ */
+int ltt_channels_register(const char *name)
+{
+ struct ltt_channel_setting *setting;
+ int ret = 0;
+
+ mutex_lock(<t_channel_mutex);
+ setting = lookup_channel(name);
+ if (setting) {
+ if (atomic_read(&setting->kref.refcount) == 0)
+ goto init_kref;
+ else {
+ kref_get(&setting->kref);
+ goto end;
+ }
+ }
+ setting = kzalloc(sizeof(*setting), GFP_KERNEL);
+ if (!setting) {
+ ret = -ENOMEM;
+ goto end;
+ }
+ list_add(&setting->list, <t_channels);
+ strncpy(setting->name, name, PATH_MAX-1);
+ setting->index = free_index++;
+init_kref:
+ kref_init(&setting->kref);
+end:
+ mutex_unlock(<t_channel_mutex);
+ return ret;
+}
+//ust// EXPORT_SYMBOL_GPL(ltt_channels_register);
+
+/**
+ * ltt_channels_unregister - Unregister a trace channel.
+ * @name: channel name
+ *
+ * Must be called with markers mutex held.
+ */
+int ltt_channels_unregister(const char *name)
+{
+ struct ltt_channel_setting *setting;
+ int ret = 0;
+
+ mutex_lock(<t_channel_mutex);
+ setting = lookup_channel(name);
+ if (!setting || atomic_read(&setting->kref.refcount) == 0) {
+ ret = -ENOENT;
+ goto end;
+ }
+ kref_put(&setting->kref, release_channel_setting);
+end:
+ mutex_unlock(<t_channel_mutex);
+ return ret;
+}
+//ust// EXPORT_SYMBOL_GPL(ltt_channels_unregister);
+
+/**
+ * ltt_channels_set_default - Set channel default behavior.
+ * @name: default channel name
+ * @subbuf_size: size of the subbuffers
+ * @subbuf_cnt: number of subbuffers
+ */
+int ltt_channels_set_default(const char *name,
+ unsigned int subbuf_size,
+ unsigned int subbuf_cnt)
+{
+ struct ltt_channel_setting *setting;
+ int ret = 0;
+
+ mutex_lock(<t_channel_mutex);
+ setting = lookup_channel(name);
+ if (!setting || atomic_read(&setting->kref.refcount) == 0) {
+ ret = -ENOENT;
+ goto end;
+ }
+ setting->subbuf_size = subbuf_size;
+ setting->subbuf_cnt = subbuf_cnt;
+end:
+ mutex_unlock(<t_channel_mutex);
+ return ret;
+}
+//ust// EXPORT_SYMBOL_GPL(ltt_channels_set_default);
+
+/**
+ * ltt_channels_get_name_from_index - get channel name from channel index
+ * @index: channel index
+ *
+ * Allows to lookup the channel name given its index. Done to keep the name
+ * information outside of each trace channel instance.
+ */
+const char *ltt_channels_get_name_from_index(unsigned int index)
+{
+ struct ltt_channel_setting *iter;
+
+ list_for_each_entry(iter, <t_channels, list)
+ if (iter->index == index && atomic_read(&iter->kref.refcount))
+ return iter->name;
+ return NULL;
+}
+//ust// EXPORT_SYMBOL_GPL(ltt_channels_get_name_from_index);
+
+static struct ltt_channel_setting *
+ltt_channels_get_setting_from_name(const char *name)
+{
+ struct ltt_channel_setting *iter;
+
+ list_for_each_entry(iter, <t_channels, list)
+ if (!strcmp(iter->name, name)
+ && atomic_read(&iter->kref.refcount))
+ return iter;
+ return NULL;
+}
+
+/**
+ * ltt_channels_get_index_from_name - get channel index from channel name
+ * @name: channel name
+ *
+ * Allows to lookup the channel index given its name. Done to keep the name
+ * information outside of each trace channel instance.
+ * Returns -1 if not found.
+ */
+int ltt_channels_get_index_from_name(const char *name)
+{
+ struct ltt_channel_setting *setting;
+
+ setting = ltt_channels_get_setting_from_name(name);
+ if (setting)
+ return setting->index;
+ else
+ return -1;
+}
+//ust// EXPORT_SYMBOL_GPL(ltt_channels_get_index_from_name);
+
+/**
+ * ltt_channels_trace_alloc - Allocate channel structures for a trace
+ * @subbuf_size: subbuffer size. 0 uses default.
+ * @subbuf_cnt: number of subbuffers per per-cpu buffers. 0 uses default.
+ * @flags: Default channel flags
+ *
+ * Use the current channel list to allocate the channels for a trace.
+ * Called with trace lock held. Does not perform the trace buffer allocation,
+ * because we must let the user overwrite specific channel sizes.
+ */
+struct ltt_channel_struct *ltt_channels_trace_alloc(unsigned int *nr_channels,
+ int overwrite,
+ int active)
+{
+ struct ltt_channel_struct *channel = NULL;
+ struct ltt_channel_setting *iter;
+
+ mutex_lock(<t_channel_mutex);
+ if (!free_index) {
+ WARN("ltt_channels_trace_alloc: no free_index; are there any probes connected?");
+ goto end;
+ }
+ if (!atomic_read(&index_kref.refcount))
+ kref_init(&index_kref);
+ else
+ kref_get(&index_kref);
+ *nr_channels = free_index;
+ channel = kzalloc(sizeof(struct ltt_channel_struct) * free_index,
+ GFP_KERNEL);
+ if (!channel) {
+ WARN("ltt_channel_struct: channel null after alloc");
+ goto end;
+ }
+ list_for_each_entry(iter, <t_channels, list) {
+ if (!atomic_read(&iter->kref.refcount))
+ continue;
+ channel[iter->index].subbuf_size = iter->subbuf_size;
+ channel[iter->index].subbuf_cnt = iter->subbuf_cnt;
+ channel[iter->index].overwrite = overwrite;
+ channel[iter->index].active = active;
+ channel[iter->index].channel_name = iter->name;
+ }
+end:
+ mutex_unlock(<t_channel_mutex);
+ return channel;
+}
+//ust// EXPORT_SYMBOL_GPL(ltt_channels_trace_alloc);
+
+/**
+ * ltt_channels_trace_free - Free one trace's channels
+ * @channels: channels to free
+ *
+ * Called with trace lock held. The actual channel buffers must be freed before
+ * this function is called.
+ */
+void ltt_channels_trace_free(struct ltt_channel_struct *channels)
+{
+ lock_markers();
+ mutex_lock(<t_channel_mutex);
+ kfree(channels);
+ kref_put(&index_kref, release_trace_channel);
+ mutex_unlock(<t_channel_mutex);
+ unlock_markers();
+}
+//ust// EXPORT_SYMBOL_GPL(ltt_channels_trace_free);
+
+/**
+ * _ltt_channels_get_event_id - get next event ID for a marker
+ * @channel: channel name
+ * @name: event name
+ *
+ * Returns a unique event ID (for this channel) or < 0 on error.
+ * Must be called with channels mutex held.
+ */
+int _ltt_channels_get_event_id(const char *channel, const char *name)
+{
+ struct ltt_channel_setting *setting;
+ int ret;
+
+ setting = ltt_channels_get_setting_from_name(channel);
+ if (!setting) {
+ ret = -ENOENT;
+ goto end;
+ }
+ if (strcmp(channel, "metadata") == 0) {
+ if (strcmp(name, "core_marker_id") == 0)
+ ret = 0;
+ else if (strcmp(name, "core_marker_format") == 0)
+ ret = 1;
+ else if (strcmp(name, "testev") == 0)
+ ret = 2;
+ else
+ ret = -ENOENT;
+ goto end;
+ }
+ if (setting->free_event_id == EVENTS_PER_CHANNEL - 1) {
+ ret = -ENOSPC;
+ goto end;
+ }
+ ret = setting->free_event_id++;
+end:
+ return ret;
+}
+
+/**
+ * ltt_channels_get_event_id - get next event ID for a marker
+ * @channel: channel name
+ * @name: event name
+ *
+ * Returns a unique event ID (for this channel) or < 0 on error.
+ */
+int ltt_channels_get_event_id(const char *channel, const char *name)
+{
+ int ret;
+
+ mutex_lock(<t_channel_mutex);
+ ret = _ltt_channels_get_event_id(channel, name);
+ mutex_unlock(<t_channel_mutex);
+ return ret;
+}
+
+//ust// MODULE_LICENSE("GPL");
+//ust// MODULE_AUTHOR("Mathieu Desnoyers");
+//ust// MODULE_DESCRIPTION("Linux Trace Toolkit Next Generation Channel Management");
--- /dev/null
+#ifndef _LTT_CHANNELS_H
+#define _LTT_CHANNELS_H
+
+/*
+ * Copyright (C) 2008 Mathieu Desnoyers (mathieu.desnoyers@polymtl.ca)
+ *
+ * Dynamic tracer channel allocation.
+ */
+
+#include <linux/limits.h>
+//ust// #include <linux/kref.h>
+//ust// #include <linux/list.h>
+#include <errno.h>
+
+#include "kernelcompat.h"
+#include "kref.h"
+#include "list.h"
+
+#define EVENTS_PER_CHANNEL 65536
+
+struct ltt_trace_struct;
+struct rchan_buf;
+
+struct ltt_channel_struct {
+ /* First 32 bytes cache-hot cacheline */
+ struct ltt_trace_struct *trace;
+ void *buf;
+ void *trans_channel_data;
+ int overwrite:1;
+ int active:1;
+ unsigned int n_subbufs_order;
+ unsigned long commit_count_mask; /*
+ * Commit count mask, removing
+ * the MSBs corresponding to
+ * bits used to represent the
+ * subbuffer index.
+ */
+ /* End of first 32 bytes cacheline */
+
+ /*
+ * buffer_begin - called on buffer-switch to a new sub-buffer
+ * @buf: the channel buffer containing the new sub-buffer
+ */
+ void (*buffer_begin) (struct rchan_buf *buf,
+ u64 tsc, unsigned int subbuf_idx);
+ /*
+ * buffer_end - called on buffer-switch to a new sub-buffer
+ * @buf: the channel buffer containing the previous sub-buffer
+ */
+ void (*buffer_end) (struct rchan_buf *buf,
+ u64 tsc, unsigned int offset, unsigned int subbuf_idx);
+ struct kref kref; /* Channel transport reference count */
+ unsigned int subbuf_size;
+ unsigned int subbuf_cnt;
+ const char *channel_name;
+
+ int buf_shmid;
+} ____cacheline_aligned;
+
+struct ltt_channel_setting {
+ unsigned int subbuf_size;
+ unsigned int subbuf_cnt;
+ struct kref kref; /* Number of references to structure content */
+ struct list_head list;
+ unsigned int index; /* index of channel in trace channel array */
+ u16 free_event_id; /* Next event ID to allocate */
+ char name[PATH_MAX];
+};
+
+int ltt_channels_register(const char *name);
+int ltt_channels_unregister(const char *name);
+int ltt_channels_set_default(const char *name,
+ unsigned int subbuf_size,
+ unsigned int subbuf_cnt);
+const char *ltt_channels_get_name_from_index(unsigned int index);
+int ltt_channels_get_index_from_name(const char *name);
+struct ltt_channel_struct *ltt_channels_trace_alloc(unsigned int *nr_channels,
+ int overwrite,
+ int active);
+void ltt_channels_trace_free(struct ltt_channel_struct *channels);
+int _ltt_channels_get_event_id(const char *channel, const char *name);
+int ltt_channels_get_event_id(const char *channel, const char *name);
+
+#endif /* _LTT_CHANNELS_H */
--- /dev/null
+#ifndef _LINUX_IMMEDIATE_H
+#define _LINUX_IMMEDIATE_H
+
+/*
+ * Immediate values, can be updated at runtime and save cache lines.
+ *
+ * (C) Copyright 2007 Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
+ *
+ * This file is released under the GPLv2.
+ * See the file COPYING for more details.
+ */
+
+#ifdef USE_IMMEDIATE
+
+#include <asm/immediate.h>
+
+/**
+ * imv_set - set immediate variable (with locking)
+ * @name: immediate value name
+ * @i: required value
+ *
+ * Sets the value of @name, taking the module_mutex if required by
+ * the architecture.
+ */
+#define imv_set(name, i) \
+ do { \
+ name##__imv = (i); \
+ core_imv_update(); \
+ module_imv_update(); \
+ } while (0)
+
+/*
+ * Internal update functions.
+ */
+extern void core_imv_update(void);
+extern void imv_update_range(const struct __imv *begin,
+ const struct __imv *end);
+extern void imv_unref_core_init(void);
+extern void imv_unref(struct __imv *begin, struct __imv *end, void *start,
+ unsigned long size);
+
+#else
+
+/*
+ * Generic immediate values: a simple, standard, memory load.
+ */
+
+/**
+ * imv_read - read immediate variable
+ * @name: immediate value name
+ *
+ * Reads the value of @name.
+ */
+#define imv_read(name) _imv_read(name)
+
+/**
+ * imv_set - set immediate variable (with locking)
+ * @name: immediate value name
+ * @i: required value
+ *
+ * Sets the value of @name, taking the module_mutex if required by
+ * the architecture.
+ */
+#define imv_set(name, i) (name##__imv = (i))
+
+static inline void core_imv_update(void) { }
+static inline void imv_unref_core_init(void) { }
+
+#endif
+
+#define DECLARE_IMV(type, name) extern __typeof__(type) name##__imv
+#define DEFINE_IMV(type, name) __typeof__(type) name##__imv
+
+#define EXPORT_IMV_SYMBOL(name) EXPORT_SYMBOL(name##__imv)
+#define EXPORT_IMV_SYMBOL_GPL(name) EXPORT_SYMBOL_GPL(name##__imv)
+
+/**
+ * _imv_read - Read immediate value with standard memory load.
+ * @name: immediate value name
+ *
+ * Force a data read of the immediate value instead of the immediate value
+ * based mechanism. Useful for __init and __exit section data read.
+ */
+#define _imv_read(name) (name##__imv)
+
+#endif
--- /dev/null
+#include "usterr.h"
--- /dev/null
+/*
+ * Copyright (C) 2007 Mathieu Desnoyers
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * LTT marker control module over /proc
+ */
+
+//ust// #include <linux/proc_fs.h>
+//ust// #include <linux/module.h>
+//ust// #include <linux/stat.h>
+//ust// #include <linux/vmalloc.h>
+//ust// #include <linux/marker.h>
+//ust// #include <linux/ltt-tracer.h>
+//ust// #include <linux/uaccess.h>
+//ust// #include <linux/string.h>
+//ust// #include <linux/ctype.h>
+//ust// #include <linux/list.h>
+//ust// #include <linux/mutex.h>
+//ust// #include <linux/seq_file.h>
+//ust// #include <linux/slab.h>
+#include "kernelcompat.h"
+#include "list.h"
+#include "tracer.h"
+#include "localerr.h"
+
+#define DEFAULT_CHANNEL "cpu"
+#define DEFAULT_PROBE "default"
+
+LIST_HEAD(probes_list);
+
+/*
+ * Mutex protecting the probe slab cache.
+ * Nests inside the traces mutex.
+ */
+DEFINE_MUTEX(probes_mutex);
+
+struct ltt_available_probe default_probe = {
+ .name = "default",
+ .format = NULL,
+ .probe_func = ltt_vtrace,
+ .callbacks[0] = ltt_serialize_data,
+};
+
+//ust//static struct kmem_cache *markers_loaded_cachep;
+static LIST_HEAD(markers_loaded_list);
+/*
+ * List sorted by name strcmp order.
+ */
+static LIST_HEAD(probes_registered_list);
+
+//ust// static struct proc_dir_entry *pentry;
+
+//ust// static struct file_operations ltt_fops;
+
+static struct ltt_available_probe *get_probe_from_name(const char *pname)
+{
+ struct ltt_available_probe *iter;
+ int comparison, found = 0;
+
+ if (!pname)
+ pname = DEFAULT_PROBE;
+ list_for_each_entry(iter, &probes_registered_list, node) {
+ comparison = strcmp(pname, iter->name);
+ if (!comparison)
+ found = 1;
+ if (comparison <= 0)
+ break;
+ }
+ if (found)
+ return iter;
+ else
+ return NULL;
+}
+
+static char *skip_spaces(char *buf)
+{
+ while (*buf != '\0' && isspace(*buf))
+ buf++;
+ return buf;
+}
+
+static char *skip_nonspaces(char *buf)
+{
+ while (*buf != '\0' && !isspace(*buf))
+ buf++;
+ return buf;
+}
+
+static void get_marker_string(char *buf, char **start,
+ char **end)
+{
+ *start = skip_spaces(buf);
+ *end = skip_nonspaces(*start);
+ **end = '\0';
+}
+
+int ltt_probe_register(struct ltt_available_probe *pdata)
+{
+ int ret = 0;
+ int comparison;
+ struct ltt_available_probe *iter;
+
+ mutex_lock(&probes_mutex);
+ list_for_each_entry_reverse(iter, &probes_registered_list, node) {
+ comparison = strcmp(pdata->name, iter->name);
+ if (!comparison) {
+ ret = -EBUSY;
+ goto end;
+ } else if (comparison > 0) {
+ /* We belong to the location right after iter. */
+ list_add(&pdata->node, &iter->node);
+ goto end;
+ }
+ }
+ /* Should be added at the head of the list */
+ list_add(&pdata->node, &probes_registered_list);
+end:
+ mutex_unlock(&probes_mutex);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(ltt_probe_register);
+
+/*
+ * Called when a probe does not want to be called anymore.
+ */
+int ltt_probe_unregister(struct ltt_available_probe *pdata)
+{
+ int ret = 0;
+ struct ltt_active_marker *amark, *tmp;
+
+ mutex_lock(&probes_mutex);
+ list_for_each_entry_safe(amark, tmp, &markers_loaded_list, node) {
+ if (amark->probe == pdata) {
+ ret = marker_probe_unregister_private_data(
+ pdata->probe_func, amark);
+ if (ret)
+ goto end;
+ list_del(&amark->node);
+ free(amark);
+ }
+ }
+ list_del(&pdata->node);
+end:
+ mutex_unlock(&probes_mutex);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(ltt_probe_unregister);
+
+/*
+ * Connect marker "mname" to probe "pname".
+ * Only allow _only_ probe instance to be connected to a marker.
+ */
+int ltt_marker_connect(const char *channel, const char *mname,
+ const char *pname)
+
+{
+ int ret;
+ struct ltt_active_marker *pdata;
+ struct ltt_available_probe *probe;
+
+ ltt_lock_traces();
+ mutex_lock(&probes_mutex);
+ probe = get_probe_from_name(pname);
+ if (!probe) {
+ ret = -ENOENT;
+ goto end;
+ }
+ pdata = marker_get_private_data(channel, mname, probe->probe_func, 0);
+ if (pdata && !IS_ERR(pdata)) {
+ ret = -EEXIST;
+ goto end;
+ }
+ pdata = zmalloc(sizeof(struct ltt_active_marker));
+ if (!pdata) {
+ ret = -ENOMEM;
+ goto end;
+ }
+ pdata->probe = probe;
+ /*
+ * ID has priority over channel in case of conflict.
+ */
+ ret = marker_probe_register(channel, mname, NULL,
+ probe->probe_func, pdata);
+ if (ret)
+ free(pdata);
+ else
+ list_add(&pdata->node, &markers_loaded_list);
+end:
+ mutex_unlock(&probes_mutex);
+ ltt_unlock_traces();
+ return ret;
+}
+EXPORT_SYMBOL_GPL(ltt_marker_connect);
+
+/*
+ * Disconnect marker "mname", probe "pname".
+ */
+int ltt_marker_disconnect(const char *channel, const char *mname,
+ const char *pname)
+{
+ struct ltt_active_marker *pdata;
+ struct ltt_available_probe *probe;
+ int ret = 0;
+
+ mutex_lock(&probes_mutex);
+ probe = get_probe_from_name(pname);
+ if (!probe) {
+ ret = -ENOENT;
+ goto end;
+ }
+ pdata = marker_get_private_data(channel, mname, probe->probe_func, 0);
+ if (IS_ERR(pdata)) {
+ ret = PTR_ERR(pdata);
+ goto end;
+ } else if (!pdata) {
+ /*
+ * Not registered by us.
+ */
+ ret = -EPERM;
+ goto end;
+ }
+ ret = marker_probe_unregister(channel, mname, probe->probe_func, pdata);
+ if (ret)
+ goto end;
+ else {
+ list_del(&pdata->node);
+ free(pdata);
+ }
+end:
+ mutex_unlock(&probes_mutex);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(ltt_marker_disconnect);
+
+/*
+ * function handling proc entry write.
+ *
+ * connect <channel name> <marker name> [<probe name>]]
+ * disconnect <channel name> <marker name> [<probe name>]
+ */
+//ust// static ssize_t ltt_write(struct file *file, const char __user *buffer,
+//ust// size_t count, loff_t *offset)
+//ust// {
+//ust// char *kbuf;
+//ust// char *iter, *marker_action, *arg[4];
+//ust// ssize_t ret;
+//ust// int i;
+//ust//
+//ust// if (!count)
+//ust// return -EINVAL;
+//ust//
+//ust// kbuf = vmalloc(count + 1);
+//ust// kbuf[count] = '\0'; /* Transform into a string */
+//ust// ret = copy_from_user(kbuf, buffer, count);
+//ust// if (ret) {
+//ust// ret = -EINVAL;
+//ust// goto end;
+//ust// }
+//ust// get_marker_string(kbuf, &marker_action, &iter);
+//ust// if (!marker_action || marker_action == iter) {
+//ust// ret = -EINVAL;
+//ust// goto end;
+//ust// }
+//ust// for (i = 0; i < 4; i++) {
+//ust// arg[i] = NULL;
+//ust// if (iter < kbuf + count) {
+//ust// iter++; /* skip the added '\0' */
+//ust// get_marker_string(iter, &arg[i], &iter);
+//ust// if (arg[i] == iter)
+//ust// arg[i] = NULL;
+//ust// }
+//ust// }
+//ust//
+//ust// if (!arg[0] || !arg[1]) {
+//ust// ret = -EINVAL;
+//ust// goto end;
+//ust// }
+//ust//
+//ust// if (!strcmp(marker_action, "connect")) {
+//ust// ret = ltt_marker_connect(arg[0], arg[1], arg[2]);
+//ust// if (ret)
+//ust// goto end;
+//ust// } else if (!strcmp(marker_action, "disconnect")) {
+//ust// ret = ltt_marker_disconnect(arg[0], arg[1], arg[2]);
+//ust// if (ret)
+//ust// goto end;
+//ust// }
+//ust// ret = count;
+//ust// end:
+//ust// vfree(kbuf);
+//ust// return ret;
+//ust// }
+//ust//
+//ust// static void *s_next(struct seq_file *m, void *p, loff_t *pos)
+//ust// {
+//ust// struct marker_iter *iter = m->private;
+//ust//
+//ust// marker_iter_next(iter);
+//ust// if (!iter->marker) {
+//ust// /*
+//ust// * Setting the iter module to -1UL will make sure
+//ust// * that no module can possibly hold the current marker.
+//ust// */
+//ust// iter->module = (void *)-1UL;
+//ust// return NULL;
+//ust// }
+//ust// return iter->marker;
+//ust// }
+//ust//
+//ust// static void *s_start(struct seq_file *m, loff_t *pos)
+//ust// {
+//ust// struct marker_iter *iter = m->private;
+//ust//
+//ust// if (!*pos)
+//ust// marker_iter_reset(iter);
+//ust// marker_iter_start(iter);
+//ust// if (!iter->marker) {
+//ust// /*
+//ust// * Setting the iter module to -1UL will make sure
+//ust// * that no module can possibly hold the current marker.
+//ust// */
+//ust// iter->module = (void *)-1UL;
+//ust// return NULL;
+//ust// }
+//ust// return iter->marker;
+//ust// }
+//ust//
+//ust// static void s_stop(struct seq_file *m, void *p)
+//ust// {
+//ust// marker_iter_stop(m->private);
+//ust// }
+//ust//
+//ust// static int s_show(struct seq_file *m, void *p)
+//ust// {
+//ust// struct marker_iter *iter = m->private;
+//ust//
+//ust// seq_printf(m, "channel: %s marker: %s format: \"%s\" state: %d "
+//ust// "event_id: %hu call: 0x%p probe %s : 0x%p\n",
+//ust// iter->marker->channel,
+//ust// iter->marker->name, iter->marker->format,
+//ust// _imv_read(iter->marker->state),
+//ust// iter->marker->event_id,
+//ust// iter->marker->call,
+//ust// iter->marker->ptype ? "multi" : "single",
+//ust// iter->marker->ptype ?
+//ust// (void*)iter->marker->multi : (void*)iter->marker->single.func);
+//ust// return 0;
+//ust// }
+//ust//
+//ust// static const struct seq_operations ltt_seq_op = {
+//ust// .start = s_start,
+//ust// .next = s_next,
+//ust// .stop = s_stop,
+//ust// .show = s_show,
+//ust// };
+//ust//
+//ust// static int ltt_open(struct inode *inode, struct file *file)
+//ust// {
+//ust// /*
+//ust// * Iterator kept in m->private.
+//ust// * Restart iteration on all modules between reads because we do not lock
+//ust// * the module mutex between those.
+//ust// */
+//ust// int ret;
+//ust// struct marker_iter *iter;
+//ust//
+//ust// iter = kzalloc(sizeof(*iter), GFP_KERNEL);
+//ust// if (!iter)
+//ust// return -ENOMEM;
+//ust//
+//ust// ret = seq_open(file, <t_seq_op);
+//ust// if (ret == 0)
+//ust// ((struct seq_file *)file->private_data)->private = iter;
+//ust// else
+//ust// kfree(iter);
+//ust// return ret;
+//ust// }
+//ust//
+//ust// static struct file_operations ltt_fops = {
+//ust// .write = ltt_write,
+//ust// .open = ltt_open,
+//ust// .read = seq_read,
+//ust// .llseek = seq_lseek,
+//ust// .release = seq_release_private,
+//ust// };
+
+static void disconnect_all_markers(void)
+{
+ struct ltt_active_marker *pdata, *tmp;
+
+ list_for_each_entry_safe(pdata, tmp, &markers_loaded_list, node) {
+ marker_probe_unregister_private_data(pdata->probe->probe_func,
+ pdata);
+ list_del(&pdata->node);
+ free(pdata);
+ }
+}
+
+static char initialized = 0;
+
+void __attribute__((constructor)) init_marker_control(void)
+{
+ if(!initialized) {
+ int ret;
+
+//ust// pentry = create_proc_entry("ltt", S_IRUSR|S_IWUSR, NULL);
+//ust// if (!pentry)
+//ust// return -EBUSY;
+//ust// markers_loaded_cachep = KMEM_CACHE(ltt_active_marker, 0);
+
+ ret = ltt_probe_register(&default_probe);
+ BUG_ON(ret);
+ ret = ltt_marker_connect("metadata", "core_marker_format",
+ DEFAULT_PROBE);
+ BUG_ON(ret);
+ ret = ltt_marker_connect("metadata", "core_marker_id", DEFAULT_PROBE);
+ BUG_ON(ret);
+//ust// pentry->proc_fops = <t_fops;
+
+ initialized = 1;
+ }
+}
+//ust// module_init(marker_control_init);
+
+static void __exit marker_control_exit(void)
+{
+ int ret;
+
+//ust// remove_proc_entry("ltt", NULL);
+ ret = ltt_marker_disconnect("metadata", "core_marker_format",
+ DEFAULT_PROBE);
+ BUG_ON(ret);
+ ret = ltt_marker_disconnect("metadata", "core_marker_id",
+ DEFAULT_PROBE);
+ BUG_ON(ret);
+ ret = ltt_probe_unregister(&default_probe);
+ BUG_ON(ret);
+ disconnect_all_markers();
+//ust// kmem_cache_destroy(markers_loaded_cachep);
+//ust// marker_synchronize_unregister();
+}
+//ust// module_exit(marker_control_exit);
+
+//ust// MODULE_LICENSE("GPL");
+//ust// MODULE_AUTHOR("Mathieu Desnoyers");
+//ust// MODULE_DESCRIPTION("Linux Trace Toolkit Marker Control");
--- /dev/null
+#ifndef MARKER_CONTROL_H
+#define MARKER_CONTROL_H
+
+int marker_control_init(void);
+int ltt_probe_register(struct ltt_available_probe *pdata);
+
+#endif /* MARKER_CONTROL_H */
--- /dev/null
+/*
+ * Copyright (C) 2007 Mathieu Desnoyers
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+//ust// #include <linux/module.h>
+//ust// #include <linux/mutex.h>
+//ust// #include <linux/types.h>
+//#include "jhash.h"
+//#include "list.h"
+//#include "rcupdate.h"
+//ust// #include <linux/marker.h>
+#include <errno.h>
+//ust// #include <linux/slab.h>
+//ust// #include <linux/immediate.h>
+//ust// #include <linux/sched.h>
+//ust// #include <linux/uaccess.h>
+//ust// #include <linux/user_marker.h>
+//ust// #include <linux/ltt-tracer.h>
+
+#include "marker.h"
+#include "kernelcompat.h"
+#include "usterr.h"
+#include "channels.h"
+#include "tracercore.h"
+#include "tracer.h"
+
+extern struct marker __start___markers[] __attribute__((visibility("hidden")));
+extern struct marker __stop___markers[] __attribute__((visibility("hidden")));
+
+/* Set to 1 to enable marker debug output */
+static const int marker_debug;
+
+/*
+ * markers_mutex nests inside module_mutex. Markers mutex protects the builtin
+ * and module markers and the hash table.
+ */
+static DEFINE_MUTEX(markers_mutex);
+
+void lock_markers(void)
+{
+ mutex_lock(&markers_mutex);
+}
+
+void unlock_markers(void)
+{
+ mutex_unlock(&markers_mutex);
+}
+
+/*
+ * Marker hash table, containing the active markers.
+ * Protected by module_mutex.
+ */
+#define MARKER_HASH_BITS 6
+#define MARKER_TABLE_SIZE (1 << MARKER_HASH_BITS)
+static struct hlist_head marker_table[MARKER_TABLE_SIZE];
+
+/*
+ * Note about RCU :
+ * It is used to make sure every handler has finished using its private data
+ * between two consecutive operation (add or remove) on a given marker. It is
+ * also used to delay the free of multiple probes array until a quiescent state
+ * is reached.
+ * marker entries modifications are protected by the markers_mutex.
+ */
+struct marker_entry {
+ struct hlist_node hlist;
+ char *format;
+ char *name;
+ /* Probe wrapper */
+ void (*call)(const struct marker *mdata, void *call_private, ...);
+ struct marker_probe_closure single;
+ struct marker_probe_closure *multi;
+ int refcount; /* Number of times armed. 0 if disarmed. */
+ struct rcu_head rcu;
+ void *oldptr;
+ int rcu_pending;
+ u16 channel_id;
+ u16 event_id;
+ unsigned char ptype:1;
+ unsigned char format_allocated:1;
+ char channel[0]; /* Contains channel'\0'name'\0'format'\0' */
+};
+
+#ifdef CONFIG_MARKERS_USERSPACE
+static void marker_update_processes(void);
+#else
+static void marker_update_processes(void)
+{
+}
+#endif
+
+/**
+ * __mark_empty_function - Empty probe callback
+ * @mdata: marker data
+ * @probe_private: probe private data
+ * @call_private: call site private data
+ * @fmt: format string
+ * @...: variable argument list
+ *
+ * Empty callback provided as a probe to the markers. By providing this to a
+ * disabled marker, we make sure the execution flow is always valid even
+ * though the function pointer change and the marker enabling are two distinct
+ * operations that modifies the execution flow of preemptible code.
+ */
+notrace void __mark_empty_function(const struct marker *mdata,
+ void *probe_private, void *call_private, const char *fmt, va_list *args)
+{
+}
+//ust// EXPORT_SYMBOL_GPL(__mark_empty_function);
+
+/*
+ * marker_probe_cb Callback that prepares the variable argument list for probes.
+ * @mdata: pointer of type struct marker
+ * @call_private: caller site private data
+ * @...: Variable argument list.
+ *
+ * Since we do not use "typical" pointer based RCU in the 1 argument case, we
+ * need to put a full smp_rmb() in this branch. This is why we do not use
+ * rcu_dereference() for the pointer read.
+ */
+notrace void marker_probe_cb(const struct marker *mdata,
+ void *call_private, ...)
+{
+ va_list args;
+ char ptype;
+
+ /*
+ * rcu_read_lock_sched does two things : disabling preemption to make
+ * sure the teardown of the callbacks can be done correctly when they
+ * are in modules and they insure RCU read coherency.
+ */
+//ust// rcu_read_lock_sched_notrace();
+ ptype = mdata->ptype;
+ if (likely(!ptype)) {
+ marker_probe_func *func;
+ /* Must read the ptype before ptr. They are not data dependant,
+ * so we put an explicit smp_rmb() here. */
+ smp_rmb();
+ func = mdata->single.func;
+ /* Must read the ptr before private data. They are not data
+ * dependant, so we put an explicit smp_rmb() here. */
+ smp_rmb();
+ va_start(args, call_private);
+ func(mdata, mdata->single.probe_private, call_private,
+ mdata->format, &args);
+ va_end(args);
+ } else {
+ struct marker_probe_closure *multi;
+ int i;
+ /*
+ * Read mdata->ptype before mdata->multi.
+ */
+ smp_rmb();
+ multi = mdata->multi;
+ /*
+ * multi points to an array, therefore accessing the array
+ * depends on reading multi. However, even in this case,
+ * we must insure that the pointer is read _before_ the array
+ * data. Same as rcu_dereference, but we need a full smp_rmb()
+ * in the fast path, so put the explicit barrier here.
+ */
+ smp_read_barrier_depends();
+ for (i = 0; multi[i].func; i++) {
+ va_start(args, call_private);
+ multi[i].func(mdata, multi[i].probe_private,
+ call_private, mdata->format, &args);
+ va_end(args);
+ }
+ }
+//ust// rcu_read_unlock_sched_notrace();
+}
+//ust// EXPORT_SYMBOL_GPL(marker_probe_cb);
+
+/*
+ * marker_probe_cb Callback that does not prepare the variable argument list.
+ * @mdata: pointer of type struct marker
+ * @call_private: caller site private data
+ * @...: Variable argument list.
+ *
+ * Should be connected to markers "MARK_NOARGS".
+ */
+static notrace void marker_probe_cb_noarg(const struct marker *mdata,
+ void *call_private, ...)
+{
+ va_list args; /* not initialized */
+ char ptype;
+
+//ust// rcu_read_lock_sched_notrace();
+ ptype = mdata->ptype;
+ if (likely(!ptype)) {
+ marker_probe_func *func;
+ /* Must read the ptype before ptr. They are not data dependant,
+ * so we put an explicit smp_rmb() here. */
+ smp_rmb();
+ func = mdata->single.func;
+ /* Must read the ptr before private data. They are not data
+ * dependant, so we put an explicit smp_rmb() here. */
+ smp_rmb();
+ func(mdata, mdata->single.probe_private, call_private,
+ mdata->format, &args);
+ } else {
+ struct marker_probe_closure *multi;
+ int i;
+ /*
+ * Read mdata->ptype before mdata->multi.
+ */
+ smp_rmb();
+ multi = mdata->multi;
+ /*
+ * multi points to an array, therefore accessing the array
+ * depends on reading multi. However, even in this case,
+ * we must insure that the pointer is read _before_ the array
+ * data. Same as rcu_dereference, but we need a full smp_rmb()
+ * in the fast path, so put the explicit barrier here.
+ */
+ smp_read_barrier_depends();
+ for (i = 0; multi[i].func; i++)
+ multi[i].func(mdata, multi[i].probe_private,
+ call_private, mdata->format, &args);
+ }
+//ust// rcu_read_unlock_sched_notrace();
+}
+
+static void free_old_closure(struct rcu_head *head)
+{
+ struct marker_entry *entry = container_of(head,
+ struct marker_entry, rcu);
+ kfree(entry->oldptr);
+ /* Make sure we free the data before setting the pending flag to 0 */
+ smp_wmb();
+ entry->rcu_pending = 0;
+}
+
+static void debug_print_probes(struct marker_entry *entry)
+{
+ int i;
+
+ if (!marker_debug)
+ return;
+
+ if (!entry->ptype) {
+ printk(KERN_DEBUG "Single probe : %p %p\n",
+ entry->single.func,
+ entry->single.probe_private);
+ } else {
+ for (i = 0; entry->multi[i].func; i++)
+ printk(KERN_DEBUG "Multi probe %d : %p %p\n", i,
+ entry->multi[i].func,
+ entry->multi[i].probe_private);
+ }
+}
+
+static struct marker_probe_closure *
+marker_entry_add_probe(struct marker_entry *entry,
+ marker_probe_func *probe, void *probe_private)
+{
+ int nr_probes = 0;
+ struct marker_probe_closure *old, *new;
+
+ WARN_ON(!probe);
+
+ debug_print_probes(entry);
+ old = entry->multi;
+ if (!entry->ptype) {
+ if (entry->single.func == probe &&
+ entry->single.probe_private == probe_private)
+ return ERR_PTR(-EBUSY);
+ if (entry->single.func == __mark_empty_function) {
+ /* 0 -> 1 probes */
+ entry->single.func = probe;
+ entry->single.probe_private = probe_private;
+ entry->refcount = 1;
+ entry->ptype = 0;
+ debug_print_probes(entry);
+ return NULL;
+ } else {
+ /* 1 -> 2 probes */
+ nr_probes = 1;
+ old = NULL;
+ }
+ } else {
+ /* (N -> N+1), (N != 0, 1) probes */
+ for (nr_probes = 0; old[nr_probes].func; nr_probes++)
+ if (old[nr_probes].func == probe
+ && old[nr_probes].probe_private
+ == probe_private)
+ return ERR_PTR(-EBUSY);
+ }
+ /* + 2 : one for new probe, one for NULL func */
+ new = kzalloc((nr_probes + 2) * sizeof(struct marker_probe_closure),
+ GFP_KERNEL);
+ if (new == NULL)
+ return ERR_PTR(-ENOMEM);
+ if (!old)
+ new[0] = entry->single;
+ else
+ memcpy(new, old,
+ nr_probes * sizeof(struct marker_probe_closure));
+ new[nr_probes].func = probe;
+ new[nr_probes].probe_private = probe_private;
+ entry->refcount = nr_probes + 1;
+ entry->multi = new;
+ entry->ptype = 1;
+ debug_print_probes(entry);
+ return old;
+}
+
+static struct marker_probe_closure *
+marker_entry_remove_probe(struct marker_entry *entry,
+ marker_probe_func *probe, void *probe_private)
+{
+ int nr_probes = 0, nr_del = 0, i;
+ struct marker_probe_closure *old, *new;
+
+ old = entry->multi;
+
+ debug_print_probes(entry);
+ if (!entry->ptype) {
+ /* 0 -> N is an error */
+ WARN_ON(entry->single.func == __mark_empty_function);
+ /* 1 -> 0 probes */
+ WARN_ON(probe && entry->single.func != probe);
+ WARN_ON(entry->single.probe_private != probe_private);
+ entry->single.func = __mark_empty_function;
+ entry->refcount = 0;
+ entry->ptype = 0;
+ debug_print_probes(entry);
+ return NULL;
+ } else {
+ /* (N -> M), (N > 1, M >= 0) probes */
+ for (nr_probes = 0; old[nr_probes].func; nr_probes++) {
+ if ((!probe || old[nr_probes].func == probe)
+ && old[nr_probes].probe_private
+ == probe_private)
+ nr_del++;
+ }
+ }
+
+ if (nr_probes - nr_del == 0) {
+ /* N -> 0, (N > 1) */
+ entry->single.func = __mark_empty_function;
+ entry->refcount = 0;
+ entry->ptype = 0;
+ } else if (nr_probes - nr_del == 1) {
+ /* N -> 1, (N > 1) */
+ for (i = 0; old[i].func; i++)
+ if ((probe && old[i].func != probe) ||
+ old[i].probe_private != probe_private)
+ entry->single = old[i];
+ entry->refcount = 1;
+ entry->ptype = 0;
+ } else {
+ int j = 0;
+ /* N -> M, (N > 1, M > 1) */
+ /* + 1 for NULL */
+ new = kzalloc((nr_probes - nr_del + 1)
+ * sizeof(struct marker_probe_closure), GFP_KERNEL);
+ if (new == NULL)
+ return ERR_PTR(-ENOMEM);
+ for (i = 0; old[i].func; i++)
+ if ((probe && old[i].func != probe) ||
+ old[i].probe_private != probe_private)
+ new[j++] = old[i];
+ entry->refcount = nr_probes - nr_del;
+ entry->ptype = 1;
+ entry->multi = new;
+ }
+ debug_print_probes(entry);
+ return old;
+}
+
+/*
+ * Get marker if the marker is present in the marker hash table.
+ * Must be called with markers_mutex held.
+ * Returns NULL if not present.
+ */
+static struct marker_entry *get_marker(const char *channel, const char *name)
+{
+ struct hlist_head *head;
+ struct hlist_node *node;
+ struct marker_entry *e;
+ size_t channel_len = strlen(channel) + 1;
+ size_t name_len = strlen(name) + 1;
+ u32 hash;
+
+ hash = jhash(channel, channel_len-1, 0) ^ jhash(name, name_len-1, 0);
+ head = &marker_table[hash & ((1 << MARKER_HASH_BITS)-1)];
+ hlist_for_each_entry(e, node, head, hlist) {
+ if (!strcmp(channel, e->channel) && !strcmp(name, e->name))
+ return e;
+ }
+ return NULL;
+}
+
+/*
+ * Add the marker to the marker hash table. Must be called with markers_mutex
+ * held.
+ */
+static struct marker_entry *add_marker(const char *channel, const char *name,
+ const char *format)
+{
+ struct hlist_head *head;
+ struct hlist_node *node;
+ struct marker_entry *e;
+ size_t channel_len = strlen(channel) + 1;
+ size_t name_len = strlen(name) + 1;
+ size_t format_len = 0;
+ u32 hash;
+
+ hash = jhash(channel, channel_len-1, 0) ^ jhash(name, name_len-1, 0);
+ if (format)
+ format_len = strlen(format) + 1;
+ head = &marker_table[hash & ((1 << MARKER_HASH_BITS)-1)];
+ hlist_for_each_entry(e, node, head, hlist) {
+ if (!strcmp(channel, e->channel) && !strcmp(name, e->name)) {
+ printk(KERN_NOTICE
+ "Marker %s.%s busy\n", channel, name);
+ return ERR_PTR(-EBUSY); /* Already there */
+ }
+ }
+ /*
+ * Using kmalloc here to allocate a variable length element. Could
+ * cause some memory fragmentation if overused.
+ */
+ e = kmalloc(sizeof(struct marker_entry)
+ + channel_len + name_len + format_len,
+ GFP_KERNEL);
+ if (!e)
+ return ERR_PTR(-ENOMEM);
+ memcpy(e->channel, channel, channel_len);
+ e->name = &e->channel[channel_len];
+ memcpy(e->name, name, name_len);
+ if (format) {
+ e->format = &e->name[channel_len + name_len];
+ memcpy(e->format, format, format_len);
+ if (strcmp(e->format, MARK_NOARGS) == 0)
+ e->call = marker_probe_cb_noarg;
+ else
+ e->call = marker_probe_cb;
+ trace_mark(metadata, core_marker_format,
+ "channel %s name %s format %s",
+ e->channel, e->name, e->format);
+ } else {
+ e->format = NULL;
+ e->call = marker_probe_cb;
+ }
+ e->single.func = __mark_empty_function;
+ e->single.probe_private = NULL;
+ e->multi = NULL;
+ e->ptype = 0;
+ e->format_allocated = 0;
+ e->refcount = 0;
+ e->rcu_pending = 0;
+ hlist_add_head(&e->hlist, head);
+ return e;
+}
+
+/*
+ * Remove the marker from the marker hash table. Must be called with mutex_lock
+ * held.
+ */
+static int remove_marker(const char *channel, const char *name)
+{
+ struct hlist_head *head;
+ struct hlist_node *node;
+ struct marker_entry *e;
+ int found = 0;
+ size_t channel_len = strlen(channel) + 1;
+ size_t name_len = strlen(name) + 1;
+ u32 hash;
+ int ret;
+
+ hash = jhash(channel, channel_len-1, 0) ^ jhash(name, name_len-1, 0);
+ head = &marker_table[hash & ((1 << MARKER_HASH_BITS)-1)];
+ hlist_for_each_entry(e, node, head, hlist) {
+ if (!strcmp(channel, e->channel) && !strcmp(name, e->name)) {
+ found = 1;
+ break;
+ }
+ }
+ if (!found)
+ return -ENOENT;
+ if (e->single.func != __mark_empty_function)
+ return -EBUSY;
+ hlist_del(&e->hlist);
+ if (e->format_allocated)
+ kfree(e->format);
+ ret = ltt_channels_unregister(e->channel);
+ WARN_ON(ret);
+ /* Make sure the call_rcu has been executed */
+ if (e->rcu_pending)
+ rcu_barrier_sched();
+ kfree(e);
+ return 0;
+}
+
+/*
+ * Set the mark_entry format to the format found in the element.
+ */
+static int marker_set_format(struct marker_entry *entry, const char *format)
+{
+ entry->format = kstrdup(format, GFP_KERNEL);
+ if (!entry->format)
+ return -ENOMEM;
+ entry->format_allocated = 1;
+
+ trace_mark(metadata, core_marker_format,
+ "channel %s name %s format %s",
+ entry->channel, entry->name, entry->format);
+ return 0;
+}
+
+/*
+ * Sets the probe callback corresponding to one marker.
+ */
+static int set_marker(struct marker_entry *entry, struct marker *elem,
+ int active)
+{
+ int ret = 0;
+ WARN_ON(strcmp(entry->name, elem->name) != 0);
+
+ if (entry->format) {
+ if (strcmp(entry->format, elem->format) != 0) {
+ printk(KERN_NOTICE
+ "Format mismatch for probe %s "
+ "(%s), marker (%s)\n",
+ entry->name,
+ entry->format,
+ elem->format);
+ return -EPERM;
+ }
+ } else {
+ ret = marker_set_format(entry, elem->format);
+ if (ret)
+ return ret;
+ }
+
+ /*
+ * probe_cb setup (statically known) is done here. It is
+ * asynchronous with the rest of execution, therefore we only
+ * pass from a "safe" callback (with argument) to an "unsafe"
+ * callback (does not set arguments).
+ */
+ elem->call = entry->call;
+ elem->channel_id = entry->channel_id;
+ elem->event_id = entry->event_id;
+ /*
+ * Sanity check :
+ * We only update the single probe private data when the ptr is
+ * set to a _non_ single probe! (0 -> 1 and N -> 1, N != 1)
+ */
+ WARN_ON(elem->single.func != __mark_empty_function
+ && elem->single.probe_private != entry->single.probe_private
+ && !elem->ptype);
+ elem->single.probe_private = entry->single.probe_private;
+ /*
+ * Make sure the private data is valid when we update the
+ * single probe ptr.
+ */
+ smp_wmb();
+ elem->single.func = entry->single.func;
+ /*
+ * We also make sure that the new probe callbacks array is consistent
+ * before setting a pointer to it.
+ */
+ rcu_assign_pointer(elem->multi, entry->multi);
+ /*
+ * Update the function or multi probe array pointer before setting the
+ * ptype.
+ */
+ smp_wmb();
+ elem->ptype = entry->ptype;
+
+//ust// if (elem->tp_name && (active ^ _imv_read(elem->state))) {
+//ust// WARN_ON(!elem->tp_cb);
+//ust// /*
+//ust// * It is ok to directly call the probe registration because type
+//ust// * checking has been done in the __trace_mark_tp() macro.
+//ust// */
+//ust//
+//ust// if (active) {
+//ust// /*
+//ust// * try_module_get should always succeed because we hold
+//ust// * markers_mutex to get the tp_cb address.
+//ust// */
+//ust// ret = try_module_get(__module_text_address(
+//ust// (unsigned long)elem->tp_cb));
+//ust// BUG_ON(!ret);
+//ust// ret = tracepoint_probe_register_noupdate(
+//ust// elem->tp_name,
+//ust// elem->tp_cb);
+//ust// } else {
+//ust// ret = tracepoint_probe_unregister_noupdate(
+//ust// elem->tp_name,
+//ust// elem->tp_cb);
+//ust// /*
+//ust// * tracepoint_probe_update_all() must be called
+//ust// * before the module containing tp_cb is unloaded.
+//ust// */
+//ust// module_put(__module_text_address(
+//ust// (unsigned long)elem->tp_cb));
+//ust// }
+//ust// }
+ elem->state__imv = active;
+
+ return ret;
+}
+
+/*
+ * Disable a marker and its probe callback.
+ * Note: only waiting an RCU period after setting elem->call to the empty
+ * function insures that the original callback is not used anymore. This insured
+ * by rcu_read_lock_sched around the call site.
+ */
+static void disable_marker(struct marker *elem)
+{
+ int ret;
+
+ /* leave "call" as is. It is known statically. */
+//ust// if (elem->tp_name && _imv_read(elem->state)) {
+//ust// WARN_ON(!elem->tp_cb);
+//ust// /*
+//ust// * It is ok to directly call the probe registration because type
+//ust// * checking has been done in the __trace_mark_tp() macro.
+//ust// */
+//ust// ret = tracepoint_probe_unregister_noupdate(elem->tp_name,
+//ust// elem->tp_cb);
+//ust// WARN_ON(ret);
+//ust// /*
+//ust// * tracepoint_probe_update_all() must be called
+//ust// * before the module containing tp_cb is unloaded.
+//ust// */
+//ust// module_put(__module_text_address((unsigned long)elem->tp_cb));
+//ust// }
+ elem->state__imv = 0;
+ elem->single.func = __mark_empty_function;
+ /* Update the function before setting the ptype */
+ smp_wmb();
+ elem->ptype = 0; /* single probe */
+ /*
+ * Leave the private data and channel_id/event_id there, because removal
+ * is racy and should be done only after an RCU period. These are never
+ * used until the next initialization anyway.
+ */
+}
+
+/**
+ * marker_update_probe_range - Update a probe range
+ * @begin: beginning of the range
+ * @end: end of the range
+ *
+ * Updates the probe callback corresponding to a range of markers.
+ */
+void marker_update_probe_range(struct marker *begin,
+ struct marker *end)
+{
+ struct marker *iter;
+ struct marker_entry *mark_entry;
+
+ mutex_lock(&markers_mutex);
+ for (iter = begin; iter < end; iter++) {
+ mark_entry = get_marker(iter->channel, iter->name);
+ if (mark_entry) {
+ set_marker(mark_entry, iter, !!mark_entry->refcount);
+ /*
+ * ignore error, continue
+ */
+
+ /* This is added for UST. We emit a core_marker_id event
+ * for markers that are already registered to a probe
+ * upon library load. Otherwise, no core_marker_id will
+ * be generated for these markers. Is this the right thing
+ * to do?
+ */
+ trace_mark(metadata, core_marker_id,
+ "channel %s name %s event_id %hu "
+ "int #1u%zu long #1u%zu pointer #1u%zu "
+ "size_t #1u%zu alignment #1u%u",
+ iter->channel, iter->name, mark_entry->event_id,
+ sizeof(int), sizeof(long), sizeof(void *),
+ sizeof(size_t), ltt_get_alignment());
+ } else {
+ disable_marker(iter);
+ }
+ }
+ mutex_unlock(&markers_mutex);
+}
+
+/*
+ * Update probes, removing the faulty probes.
+ *
+ * Internal callback only changed before the first probe is connected to it.
+ * Single probe private data can only be changed on 0 -> 1 and 2 -> 1
+ * transitions. All other transitions will leave the old private data valid.
+ * This makes the non-atomicity of the callback/private data updates valid.
+ *
+ * "special case" updates :
+ * 0 -> 1 callback
+ * 1 -> 0 callback
+ * 1 -> 2 callbacks
+ * 2 -> 1 callbacks
+ * Other updates all behave the same, just like the 2 -> 3 or 3 -> 2 updates.
+ * Site effect : marker_set_format may delete the marker entry (creating a
+ * replacement).
+ */
+static void marker_update_probes(void)
+{
+ /* Core kernel markers */
+//ust// marker_update_probe_range(__start___markers, __stop___markers);
+ /* Markers in modules. */
+//ust// module_update_markers();
+ lib_update_markers();
+//ust// tracepoint_probe_update_all();
+ /* Update immediate values */
+ core_imv_update();
+//ust// module_imv_update(); /* FIXME: need to port for libs? */
+ marker_update_processes();
+}
+
+/**
+ * marker_probe_register - Connect a probe to a marker
+ * @channel: marker channel
+ * @name: marker name
+ * @format: format string
+ * @probe: probe handler
+ * @probe_private: probe private data
+ *
+ * private data must be a valid allocated memory address, or NULL.
+ * Returns 0 if ok, error value on error.
+ * The probe address must at least be aligned on the architecture pointer size.
+ */
+int marker_probe_register(const char *channel, const char *name,
+ const char *format, marker_probe_func *probe,
+ void *probe_private)
+{
+ struct marker_entry *entry;
+ int ret = 0, ret_err;
+ struct marker_probe_closure *old;
+ int first_probe = 0;
+
+ mutex_lock(&markers_mutex);
+ entry = get_marker(channel, name);
+ if (!entry) {
+ first_probe = 1;
+ entry = add_marker(channel, name, format);
+ if (IS_ERR(entry))
+ ret = PTR_ERR(entry);
+ if (ret)
+ goto end;
+ ret = ltt_channels_register(channel);
+ if (ret)
+ goto error_remove_marker;
+ ret = ltt_channels_get_index_from_name(channel);
+ if (ret < 0)
+ goto error_unregister_channel;
+ entry->channel_id = ret;
+ ret = ltt_channels_get_event_id(channel, name);
+ if (ret < 0)
+ goto error_unregister_channel;
+ entry->event_id = ret;
+ ret = 0;
+ trace_mark(metadata, core_marker_id,
+ "channel %s name %s event_id %hu "
+ "int #1u%zu long #1u%zu pointer #1u%zu "
+ "size_t #1u%zu alignment #1u%u",
+ channel, name, entry->event_id,
+ sizeof(int), sizeof(long), sizeof(void *),
+ sizeof(size_t), ltt_get_alignment());
+ } else if (format) {
+ if (!entry->format)
+ ret = marker_set_format(entry, format);
+ else if (strcmp(entry->format, format))
+ ret = -EPERM;
+ if (ret)
+ goto end;
+ }
+
+ /*
+ * If we detect that a call_rcu is pending for this marker,
+ * make sure it's executed now.
+ */
+ if (entry->rcu_pending)
+ rcu_barrier_sched();
+ old = marker_entry_add_probe(entry, probe, probe_private);
+ if (IS_ERR(old)) {
+ ret = PTR_ERR(old);
+ if (first_probe)
+ goto error_unregister_channel;
+ else
+ goto end;
+ }
+ mutex_unlock(&markers_mutex);
+
+ marker_update_probes();
+
+ mutex_lock(&markers_mutex);
+ entry = get_marker(channel, name);
+ if (!entry)
+ goto end;
+ if (entry->rcu_pending)
+ rcu_barrier_sched();
+ entry->oldptr = old;
+ entry->rcu_pending = 1;
+ /* write rcu_pending before calling the RCU callback */
+ smp_wmb();
+ call_rcu_sched(&entry->rcu, free_old_closure);
+ /*synchronize_rcu(); free_old_closure();*/
+ goto end;
+
+error_unregister_channel:
+ ret_err = ltt_channels_unregister(channel);
+ WARN_ON(ret_err);
+error_remove_marker:
+ ret_err = remove_marker(channel, name);
+ WARN_ON(ret_err);
+end:
+ mutex_unlock(&markers_mutex);
+ return ret;
+}
+//ust// EXPORT_SYMBOL_GPL(marker_probe_register);
+
+/**
+ * marker_probe_unregister - Disconnect a probe from a marker
+ * @channel: marker channel
+ * @name: marker name
+ * @probe: probe function pointer
+ * @probe_private: probe private data
+ *
+ * Returns the private data given to marker_probe_register, or an ERR_PTR().
+ * We do not need to call a synchronize_sched to make sure the probes have
+ * finished running before doing a module unload, because the module unload
+ * itself uses stop_machine(), which insures that every preempt disabled section
+ * have finished.
+ */
+int marker_probe_unregister(const char *channel, const char *name,
+ marker_probe_func *probe, void *probe_private)
+{
+ struct marker_entry *entry;
+ struct marker_probe_closure *old;
+ int ret = -ENOENT;
+
+ mutex_lock(&markers_mutex);
+ entry = get_marker(channel, name);
+ if (!entry)
+ goto end;
+ if (entry->rcu_pending)
+ rcu_barrier_sched();
+ old = marker_entry_remove_probe(entry, probe, probe_private);
+ mutex_unlock(&markers_mutex);
+
+ marker_update_probes();
+
+ mutex_lock(&markers_mutex);
+ entry = get_marker(channel, name);
+ if (!entry)
+ goto end;
+ if (entry->rcu_pending)
+ rcu_barrier_sched();
+ entry->oldptr = old;
+ entry->rcu_pending = 1;
+ /* write rcu_pending before calling the RCU callback */
+ smp_wmb();
+ call_rcu_sched(&entry->rcu, free_old_closure);
+ remove_marker(channel, name); /* Ignore busy error message */
+ ret = 0;
+end:
+ mutex_unlock(&markers_mutex);
+ return ret;
+}
+//ust// EXPORT_SYMBOL_GPL(marker_probe_unregister);
+
+static struct marker_entry *
+get_marker_from_private_data(marker_probe_func *probe, void *probe_private)
+{
+ struct marker_entry *entry;
+ unsigned int i;
+ struct hlist_head *head;
+ struct hlist_node *node;
+
+ for (i = 0; i < MARKER_TABLE_SIZE; i++) {
+ head = &marker_table[i];
+ hlist_for_each_entry(entry, node, head, hlist) {
+ if (!entry->ptype) {
+ if (entry->single.func == probe
+ && entry->single.probe_private
+ == probe_private)
+ return entry;
+ } else {
+ struct marker_probe_closure *closure;
+ closure = entry->multi;
+ for (i = 0; closure[i].func; i++) {
+ if (closure[i].func == probe &&
+ closure[i].probe_private
+ == probe_private)
+ return entry;
+ }
+ }
+ }
+ }
+ return NULL;
+}
+
+/**
+ * marker_probe_unregister_private_data - Disconnect a probe from a marker
+ * @probe: probe function
+ * @probe_private: probe private data
+ *
+ * Unregister a probe by providing the registered private data.
+ * Only removes the first marker found in hash table.
+ * Return 0 on success or error value.
+ * We do not need to call a synchronize_sched to make sure the probes have
+ * finished running before doing a module unload, because the module unload
+ * itself uses stop_machine(), which insures that every preempt disabled section
+ * have finished.
+ */
+int marker_probe_unregister_private_data(marker_probe_func *probe,
+ void *probe_private)
+{
+ struct marker_entry *entry;
+ int ret = 0;
+ struct marker_probe_closure *old;
+ const char *channel = NULL, *name = NULL;
+
+ mutex_lock(&markers_mutex);
+ entry = get_marker_from_private_data(probe, probe_private);
+ if (!entry) {
+ ret = -ENOENT;
+ goto end;
+ }
+ if (entry->rcu_pending)
+ rcu_barrier_sched();
+ old = marker_entry_remove_probe(entry, NULL, probe_private);
+ channel = kstrdup(entry->channel, GFP_KERNEL);
+ name = kstrdup(entry->name, GFP_KERNEL);
+ mutex_unlock(&markers_mutex);
+
+ marker_update_probes();
+
+ mutex_lock(&markers_mutex);
+ entry = get_marker(channel, name);
+ if (!entry)
+ goto end;
+ if (entry->rcu_pending)
+ rcu_barrier_sched();
+ entry->oldptr = old;
+ entry->rcu_pending = 1;
+ /* write rcu_pending before calling the RCU callback */
+ smp_wmb();
+ call_rcu_sched(&entry->rcu, free_old_closure);
+ /* Ignore busy error message */
+ remove_marker(channel, name);
+end:
+ mutex_unlock(&markers_mutex);
+ kfree(channel);
+ kfree(name);
+ return ret;
+}
+//ust// EXPORT_SYMBOL_GPL(marker_probe_unregister_private_data);
+
+/**
+ * marker_get_private_data - Get a marker's probe private data
+ * @channel: marker channel
+ * @name: marker name
+ * @probe: probe to match
+ * @num: get the nth matching probe's private data
+ *
+ * Returns the nth private data pointer (starting from 0) matching, or an
+ * ERR_PTR.
+ * Returns the private data pointer, or an ERR_PTR.
+ * The private data pointer should _only_ be dereferenced if the caller is the
+ * owner of the data, or its content could vanish. This is mostly used to
+ * confirm that a caller is the owner of a registered probe.
+ */
+void *marker_get_private_data(const char *channel, const char *name,
+ marker_probe_func *probe, int num)
+{
+ struct hlist_head *head;
+ struct hlist_node *node;
+ struct marker_entry *e;
+ size_t channel_len = strlen(channel) + 1;
+ size_t name_len = strlen(name) + 1;
+ int i;
+ u32 hash;
+
+ hash = jhash(channel, channel_len-1, 0) ^ jhash(name, name_len-1, 0);
+ head = &marker_table[hash & ((1 << MARKER_HASH_BITS)-1)];
+ hlist_for_each_entry(e, node, head, hlist) {
+ if (!strcmp(channel, e->channel) && !strcmp(name, e->name)) {
+ if (!e->ptype) {
+ if (num == 0 && e->single.func == probe)
+ return e->single.probe_private;
+ } else {
+ struct marker_probe_closure *closure;
+ int match = 0;
+ closure = e->multi;
+ for (i = 0; closure[i].func; i++) {
+ if (closure[i].func != probe)
+ continue;
+ if (match++ == num)
+ return closure[i].probe_private;
+ }
+ }
+ break;
+ }
+ }
+ return ERR_PTR(-ENOENT);
+}
+//ust// EXPORT_SYMBOL_GPL(marker_get_private_data);
+
+/**
+ * markers_compact_event_ids - Compact markers event IDs and reassign channels
+ *
+ * Called when no channel users are active by the channel infrastructure.
+ * Called with lock_markers() and channel mutex held.
+ */
+//ust// void markers_compact_event_ids(void)
+//ust// {
+//ust// struct marker_entry *entry;
+//ust// unsigned int i;
+//ust// struct hlist_head *head;
+//ust// struct hlist_node *node;
+//ust// int ret;
+//ust//
+//ust// for (i = 0; i < MARKER_TABLE_SIZE; i++) {
+//ust// head = &marker_table[i];
+//ust// hlist_for_each_entry(entry, node, head, hlist) {
+//ust// ret = ltt_channels_get_index_from_name(entry->channel);
+//ust// WARN_ON(ret < 0);
+//ust// entry->channel_id = ret;
+//ust// ret = _ltt_channels_get_event_id(entry->channel,
+//ust// entry->name);
+//ust// WARN_ON(ret < 0);
+//ust// entry->event_id = ret;
+//ust// }
+//ust// }
+//ust// }
+
+//ust//#ifdef CONFIG_MODULES
+
+/**
+ * marker_get_iter_range - Get a next marker iterator given a range.
+ * @marker: current markers (in), next marker (out)
+ * @begin: beginning of the range
+ * @end: end of the range
+ *
+ * Returns whether a next marker has been found (1) or not (0).
+ * Will return the first marker in the range if the input marker is NULL.
+ */
+int marker_get_iter_range(struct marker **marker, struct marker *begin,
+ struct marker *end)
+{
+ if (!*marker && begin != end) {
+ *marker = begin;
+ return 1;
+ }
+ if (*marker >= begin && *marker < end)
+ return 1;
+ return 0;
+}
+//ust// EXPORT_SYMBOL_GPL(marker_get_iter_range);
+
+static void marker_get_iter(struct marker_iter *iter)
+{
+ int found = 0;
+
+ /* Core kernel markers */
+ if (!iter->lib) {
+ /* ust FIXME: how come we cannot disable the following line? we shouldn't need core stuff */
+ found = marker_get_iter_range(&iter->marker,
+ __start___markers, __stop___markers);
+ if (found)
+ goto end;
+ }
+ /* Markers in modules. */
+ found = lib_get_iter_markers(iter);
+end:
+ if (!found)
+ marker_iter_reset(iter);
+}
+
+void marker_iter_start(struct marker_iter *iter)
+{
+ marker_get_iter(iter);
+}
+//ust// EXPORT_SYMBOL_GPL(marker_iter_start);
+
+void marker_iter_next(struct marker_iter *iter)
+{
+ iter->marker++;
+ /*
+ * iter->marker may be invalid because we blindly incremented it.
+ * Make sure it is valid by marshalling on the markers, getting the
+ * markers from following modules if necessary.
+ */
+ marker_get_iter(iter);
+}
+//ust// EXPORT_SYMBOL_GPL(marker_iter_next);
+
+void marker_iter_stop(struct marker_iter *iter)
+{
+}
+//ust// EXPORT_SYMBOL_GPL(marker_iter_stop);
+
+void marker_iter_reset(struct marker_iter *iter)
+{
+ iter->lib = NULL;
+ iter->marker = NULL;
+}
+//ust// EXPORT_SYMBOL_GPL(marker_iter_reset);
+
+#ifdef CONFIG_MARKERS_USERSPACE
+/*
+ * must be called with current->user_markers_mutex held
+ */
+static void free_user_marker(char __user *state, struct hlist_head *head)
+{
+ struct user_marker *umark;
+ struct hlist_node *pos, *n;
+
+ hlist_for_each_entry_safe(umark, pos, n, head, hlist) {
+ if (umark->state == state) {
+ hlist_del(&umark->hlist);
+ kfree(umark);
+ }
+ }
+}
+
+//ust// asmlinkage long sys_marker(char __user *name, char __user *format,
+//ust// char __user *state, int reg)
+//ust// {
+//ust// struct user_marker *umark;
+//ust// long len;
+//ust// struct marker_entry *entry;
+//ust// int ret = 0;
+//ust//
+//ust// printk(KERN_DEBUG "Program %s %s marker [%p, %p]\n",
+//ust// current->comm, reg ? "registers" : "unregisters",
+//ust// name, state);
+//ust// if (reg) {
+//ust// umark = kmalloc(sizeof(struct user_marker), GFP_KERNEL);
+//ust// umark->name[MAX_USER_MARKER_NAME_LEN - 1] = '\0';
+//ust// umark->format[MAX_USER_MARKER_FORMAT_LEN - 1] = '\0';
+//ust// umark->state = state;
+//ust// len = strncpy_from_user(umark->name, name,
+//ust// MAX_USER_MARKER_NAME_LEN - 1);
+//ust// if (len < 0) {
+//ust// ret = -EFAULT;
+//ust// goto error;
+//ust// }
+//ust// len = strncpy_from_user(umark->format, format,
+//ust// MAX_USER_MARKER_FORMAT_LEN - 1);
+//ust// if (len < 0) {
+//ust// ret = -EFAULT;
+//ust// goto error;
+//ust// }
+//ust// printk(KERN_DEBUG "Marker name : %s, format : %s", umark->name,
+//ust// umark->format);
+//ust// mutex_lock(&markers_mutex);
+//ust// entry = get_marker("userspace", umark->name);
+//ust// if (entry) {
+//ust// if (entry->format &&
+//ust// strcmp(entry->format, umark->format) != 0) {
+//ust// printk(" error, wrong format in process %s",
+//ust// current->comm);
+//ust// ret = -EPERM;
+//ust// goto error_unlock;
+//ust// }
+//ust// printk(" %s", !!entry->refcount
+//ust// ? "enabled" : "disabled");
+//ust// if (put_user(!!entry->refcount, state)) {
+//ust// ret = -EFAULT;
+//ust// goto error_unlock;
+//ust// }
+//ust// printk("\n");
+//ust// } else {
+//ust// printk(" disabled\n");
+//ust// if (put_user(0, umark->state)) {
+//ust// printk(KERN_WARNING
+//ust// "Marker in %s caused a fault\n",
+//ust// current->comm);
+//ust// goto error_unlock;
+//ust// }
+//ust// }
+//ust// mutex_lock(¤t->group_leader->user_markers_mutex);
+//ust// hlist_add_head(&umark->hlist,
+//ust// ¤t->group_leader->user_markers);
+//ust// current->group_leader->user_markers_sequence++;
+//ust// mutex_unlock(¤t->group_leader->user_markers_mutex);
+//ust// mutex_unlock(&markers_mutex);
+//ust// } else {
+//ust// mutex_lock(¤t->group_leader->user_markers_mutex);
+//ust// free_user_marker(state,
+//ust// ¤t->group_leader->user_markers);
+//ust// current->group_leader->user_markers_sequence++;
+//ust// mutex_unlock(¤t->group_leader->user_markers_mutex);
+//ust// }
+//ust// goto end;
+//ust// error_unlock:
+//ust// mutex_unlock(&markers_mutex);
+//ust// error:
+//ust// kfree(umark);
+//ust// end:
+//ust// return ret;
+//ust// }
+//ust//
+//ust// /*
+//ust// * Types :
+//ust// * string : 0
+//ust// */
+//ust// asmlinkage long sys_trace(int type, uint16_t id,
+//ust// char __user *ubuf)
+//ust// {
+//ust// long ret = -EPERM;
+//ust// char *page;
+//ust// int len;
+//ust//
+//ust// switch (type) {
+//ust// case 0: /* String */
+//ust// ret = -ENOMEM;
+//ust// page = (char *)__get_free_page(GFP_TEMPORARY);
+//ust// if (!page)
+//ust// goto string_out;
+//ust// len = strncpy_from_user(page, ubuf, PAGE_SIZE);
+//ust// if (len < 0) {
+//ust// ret = -EFAULT;
+//ust// goto string_err;
+//ust// }
+//ust// trace_mark(userspace, string, "string %s", page);
+//ust// string_err:
+//ust// free_page((unsigned long) page);
+//ust// string_out:
+//ust// break;
+//ust// default:
+//ust// break;
+//ust// }
+//ust// return ret;
+//ust// }
+
+//ust// static void marker_update_processes(void)
+//ust// {
+//ust// struct task_struct *g, *t;
+//ust//
+//ust// /*
+//ust// * markers_mutex is taken to protect the p->user_markers read.
+//ust// */
+//ust// mutex_lock(&markers_mutex);
+//ust// read_lock(&tasklist_lock);
+//ust// for_each_process(g) {
+//ust// WARN_ON(!thread_group_leader(g));
+//ust// if (hlist_empty(&g->user_markers))
+//ust// continue;
+//ust// if (strcmp(g->comm, "testprog") == 0)
+//ust// printk(KERN_DEBUG "set update pending for testprog\n");
+//ust// t = g;
+//ust// do {
+//ust// /* TODO : implement this thread flag in each arch. */
+//ust// set_tsk_thread_flag(t, TIF_MARKER_PENDING);
+//ust// } while ((t = next_thread(t)) != g);
+//ust// }
+//ust// read_unlock(&tasklist_lock);
+//ust// mutex_unlock(&markers_mutex);
+//ust// }
+
+/*
+ * Update current process.
+ * Note that we have to wait a whole scheduler period before we are sure that
+ * every running userspace threads have their markers updated.
+ * (synchronize_sched() can be used to insure this).
+ */
+void marker_update_process(void)
+{
+ struct user_marker *umark;
+ struct hlist_node *pos;
+ struct marker_entry *entry;
+
+ mutex_lock(&markers_mutex);
+ mutex_lock(¤t->group_leader->user_markers_mutex);
+ if (strcmp(current->comm, "testprog") == 0)
+ printk(KERN_DEBUG "do update pending for testprog\n");
+ hlist_for_each_entry(umark, pos,
+ ¤t->group_leader->user_markers, hlist) {
+ printk(KERN_DEBUG "Updating marker %s in %s\n",
+ umark->name, current->comm);
+ entry = get_marker("userspace", umark->name);
+ if (entry) {
+ if (entry->format &&
+ strcmp(entry->format, umark->format) != 0) {
+ printk(KERN_WARNING
+ " error, wrong format in process %s\n",
+ current->comm);
+ break;
+ }
+ if (put_user(!!entry->refcount, umark->state)) {
+ printk(KERN_WARNING
+ "Marker in %s caused a fault\n",
+ current->comm);
+ break;
+ }
+ } else {
+ if (put_user(0, umark->state)) {
+ printk(KERN_WARNING
+ "Marker in %s caused a fault\n",
+ current->comm);
+ break;
+ }
+ }
+ }
+ clear_thread_flag(TIF_MARKER_PENDING);
+ mutex_unlock(¤t->group_leader->user_markers_mutex);
+ mutex_unlock(&markers_mutex);
+}
+
+/*
+ * Called at process exit and upon do_execve().
+ * We assume that when the leader exits, no more references can be done to the
+ * leader structure by the other threads.
+ */
+void exit_user_markers(struct task_struct *p)
+{
+ struct user_marker *umark;
+ struct hlist_node *pos, *n;
+
+ if (thread_group_leader(p)) {
+ mutex_lock(&markers_mutex);
+ mutex_lock(&p->user_markers_mutex);
+ hlist_for_each_entry_safe(umark, pos, n, &p->user_markers,
+ hlist)
+ kfree(umark);
+ INIT_HLIST_HEAD(&p->user_markers);
+ p->user_markers_sequence++;
+ mutex_unlock(&p->user_markers_mutex);
+ mutex_unlock(&markers_mutex);
+ }
+}
+
+int is_marker_enabled(const char *channel, const char *name)
+{
+ struct marker_entry *entry;
+
+ mutex_lock(&markers_mutex);
+ entry = get_marker(channel, name);
+ mutex_unlock(&markers_mutex);
+
+ return entry && !!entry->refcount;
+}
+//ust// #endif
+
+int marker_module_notify(struct notifier_block *self,
+ unsigned long val, void *data)
+{
+ struct module *mod = data;
+
+ switch (val) {
+ case MODULE_STATE_COMING:
+ marker_update_probe_range(mod->markers,
+ mod->markers + mod->num_markers);
+ break;
+ case MODULE_STATE_GOING:
+ marker_update_probe_range(mod->markers,
+ mod->markers + mod->num_markers);
+ break;
+ }
+ return 0;
+}
+
+struct notifier_block marker_module_nb = {
+ .notifier_call = marker_module_notify,
+ .priority = 0,
+};
+
+//ust// static int init_markers(void)
+//ust// {
+//ust// return register_module_notifier(&marker_module_nb);
+//ust// }
+//ust// __initcall(init_markers);
+/* TODO: call marker_module_nb() when a library is linked at runtime (dlopen)? */
+
+#endif /* CONFIG_MODULES */
+
+void ltt_dump_marker_state(struct ltt_trace_struct *trace)
+{
+ struct marker_iter iter;
+ struct ltt_probe_private_data call_data;
+ const char *channel;
+
+ call_data.trace = trace;
+ call_data.serializer = NULL;
+
+ marker_iter_reset(&iter);
+ marker_iter_start(&iter);
+ for (; iter.marker != NULL; marker_iter_next(&iter)) {
+ if (!_imv_read(iter.marker->state))
+ continue;
+ channel = ltt_channels_get_name_from_index(
+ iter.marker->channel_id);
+ __trace_mark(0, metadata, core_marker_id,
+ &call_data,
+ "channel %s name %s event_id %hu "
+ "int #1u%zu long #1u%zu pointer #1u%zu "
+ "size_t #1u%zu alignment #1u%u",
+ channel,
+ iter.marker->name,
+ iter.marker->event_id,
+ sizeof(int), sizeof(long),
+ sizeof(void *), sizeof(size_t),
+ ltt_get_alignment());
+ if (iter.marker->format)
+ __trace_mark(0, metadata,
+ core_marker_format,
+ &call_data,
+ "channel %s name %s format %s",
+ channel,
+ iter.marker->name,
+ iter.marker->format);
+ }
+ marker_iter_stop(&iter);
+}
+//ust// EXPORT_SYMBOL_GPL(ltt_dump_marker_state);
+
+
+static LIST_HEAD(libs);
+
+/*
+ * Returns 0 if current not found.
+ * Returns 1 if current found.
+ */
+int lib_get_iter_markers(struct marker_iter *iter)
+{
+ struct lib *iter_lib;
+ int found = 0;
+
+//ust// mutex_lock(&module_mutex);
+ list_for_each_entry(iter_lib, &libs, list) {
+ if (iter_lib < iter->lib)
+ continue;
+ else if (iter_lib > iter->lib)
+ iter->marker = NULL;
+ found = marker_get_iter_range(&iter->marker,
+ iter_lib->markers_start,
+ iter_lib->markers_start + iter_lib->markers_count);
+ if (found) {
+ iter->lib = iter_lib;
+ break;
+ }
+ }
+//ust// mutex_unlock(&module_mutex);
+ return found;
+}
+
+void lib_update_markers(void)
+{
+ struct lib *lib;
+
+//ust// mutex_lock(&module_mutex);
+ list_for_each_entry(lib, &libs, list)
+ marker_update_probe_range(lib->markers_start,
+ lib->markers_start + lib->markers_count);
+//ust// mutex_unlock(&module_mutex);
+}
+
+static void (*new_marker_cb)(struct marker *) = NULL;
+
+void marker_set_new_marker_cb(void (*cb)(struct marker *))
+{
+ new_marker_cb = cb;
+}
+
+static void new_markers(struct marker *start, struct marker *end)
+{
+ if(new_marker_cb) {
+ struct marker *m;
+ for(m=start; m < end; m++) {
+ new_marker_cb(m);
+ }
+ }
+}
+
+int marker_register_lib(struct marker *markers_start, int markers_count)
+{
+ struct lib *pl;
+
+ pl = (struct lib *) malloc(sizeof(struct lib));
+
+ pl->markers_start = markers_start;
+ pl->markers_count = markers_count;
+
+ /* FIXME: maybe protect this with its own mutex? */
+ lock_markers();
+ list_add(&pl->list, &libs);
+ unlock_markers();
+
+ new_markers(markers_start, markers_start + markers_count);
+
+ /* FIXME: update just the loaded lib */
+ lib_update_markers();
+
+ DBG("just registered a markers section from %p and having %d markers", markers_start, markers_count);
+
+ return 0;
+}
+
+int marker_unregister_lib(struct marker *markers_start, int markers_count)
+{
+ /*FIXME: implement; but before implementing, marker_register_lib must
+ have appropriate locking. */
+
+ return 0;
+}
+
+static int initialized = 0;
+
+void __attribute__((constructor)) init_markers(void)
+{
+ if(!initialized) {
+ marker_register_lib(__start___markers, (((long)__stop___markers)-((long)__start___markers))/sizeof(struct marker));
+ printf("markers_start: %p, markers_stop: %p\n", __start___markers, __stop___markers);
+ initialized = 1;
+ }
+}
--- /dev/null
+/*
+ * Code markup for dynamic and static tracing.
+ *
+ * See Documentation/marker.txt.
+ *
+ * (C) Copyright 2006 Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
+ * (C) Copyright 2009 Pierre-Marc Fournier <pierre-marc dot fournier at polymtl dot ca>
+ *
+ * This file is released under the GPLv2.
+ * See the file COPYING for more details.
+ */
+
+#ifndef _LINUX_MARKER_H
+#define _LINUX_MARKER_H
+
+#include <stdarg.h>
+//ust// #include <linux/types.h>
+#include "immediate.h"
+//ust// #include <linux/ltt-channels.h>
+#include "kernelcompat.h"
+#include "compiler.h"
+#include "list.h"
+
+//ust// struct module;
+//ust// struct task_struct;
+struct marker;
+
+/**
+ * marker_probe_func - Type of a marker probe function
+ * @mdata: marker data
+ * @probe_private: probe private data
+ * @call_private: call site private data
+ * @fmt: format string
+ * @args: variable argument list pointer. Use a pointer to overcome C's
+ * inability to pass this around as a pointer in a portable manner in
+ * the callee otherwise.
+ *
+ * Type of marker probe functions. They receive the mdata and need to parse the
+ * format string to recover the variable argument list.
+ */
+typedef void marker_probe_func(const struct marker *mdata,
+ void *probe_private, void *call_private,
+ const char *fmt, va_list *args);
+
+struct marker_probe_closure {
+ marker_probe_func *func; /* Callback */
+ void *probe_private; /* Private probe data */
+};
+
+struct marker {
+ const char *channel; /* Name of channel where to send data */
+ const char *name; /* Marker name */
+ const char *format; /* Marker format string, describing the
+ * variable argument list.
+ */
+ DEFINE_IMV(char, state);/* Immediate value state. */
+ char ptype; /* probe type : 0 : single, 1 : multi */
+ /* Probe wrapper */
+ u16 channel_id; /* Numeric channel identifier, dynamic */
+ u16 event_id; /* Numeric event identifier, dynamic */
+ void (*call)(const struct marker *mdata, void *call_private, ...);
+ struct marker_probe_closure single;
+ struct marker_probe_closure *multi;
+ const char *tp_name; /* Optional tracepoint name */
+ void *tp_cb; /* Optional tracepoint callback */
+} __attribute__((aligned(8)));
+
+//ust// #ifdef CONFIG_MARKERS
+
+#define _DEFINE_MARKER(channel, name, tp_name_str, tp_cb, format) \
+ static const char __mstrtab_##channel##_##name[] \
+ __attribute__((section("__markers_strings"))) \
+ = #channel "\0" #name "\0" format; \
+ static struct marker __mark_##channel##_##name \
+ __attribute__((section("__markers"), aligned(8))) = \
+ { __mstrtab_##channel##_##name, \
+ &__mstrtab_##channel##_##name[sizeof(#channel)], \
+ &__mstrtab_##channel##_##name[sizeof(#channel) + \
+ sizeof(#name)], \
+ 0, 0, 0, 0, marker_probe_cb, \
+ { __mark_empty_function, NULL}, \
+ NULL, tp_name_str, tp_cb }
+
+#define DEFINE_MARKER(channel, name, format) \
+ _DEFINE_MARKER(channel, name, NULL, NULL, format)
+
+#define DEFINE_MARKER_TP(channel, name, tp_name, tp_cb, format) \
+ _DEFINE_MARKER(channel, name, #tp_name, tp_cb, format)
+
+/*
+ * Make sure the alignment of the structure in the __markers section will
+ * not add unwanted padding between the beginning of the section and the
+ * structure. Force alignment to the same alignment as the section start.
+ *
+ * The "generic" argument controls which marker enabling mechanism must be used.
+ * If generic is true, a variable read is used.
+ * If generic is false, immediate values are used.
+ */
+#define __trace_mark(generic, channel, name, call_private, format, args...) \
+ do { \
+ DEFINE_MARKER(channel, name, format); \
+ __mark_check_format(format, ## args); \
+ if (!generic) { \
+ if (unlikely(imv_read( \
+ __mark_##channel##_##name.state))) \
+ (*__mark_##channel##_##name.call) \
+ (&__mark_##channel##_##name, \
+ call_private, ## args); \
+ } else { \
+ if (unlikely(_imv_read( \
+ __mark_##channel##_##name.state))) \
+ (*__mark_##channel##_##name.call) \
+ (&__mark_##channel##_##name, \
+ call_private, ## args); \
+ } \
+ } while (0)
+
+#define __trace_mark_tp(channel, name, call_private, tp_name, tp_cb, \
+ format, args...) \
+ do { \
+ void __check_tp_type(void) \
+ { \
+ register_trace_##tp_name(tp_cb); \
+ } \
+ DEFINE_MARKER_TP(channel, name, tp_name, tp_cb, format);\
+ __mark_check_format(format, ## args); \
+ (*__mark_##channel##_##name.call)(&__mark_##channel##_##name, \
+ call_private, ## args); \
+ } while (0)
+
+extern void marker_update_probe_range(struct marker *begin,
+ struct marker *end);
+
+#define GET_MARKER(channel, name) (__mark_##channel##_##name)
+
+//ust// #else /* !CONFIG_MARKERS */
+//ust// #define DEFINE_MARKER(channel, name, tp_name, tp_cb, format)
+//ust// #define __trace_mark(generic, channel, name, call_private, format, args...) \
+//ust// __mark_check_format(format, ## args)
+//ust// #define __trace_mark_tp(channel, name, call_private, tp_name, tp_cb, \
+//ust// format, args...) \
+//ust// do { \
+//ust// void __check_tp_type(void) \
+//ust// { \
+//ust// register_trace_##tp_name(tp_cb); \
+//ust// } \
+//ust// __mark_check_format(format, ## args); \
+//ust// } while (0)
+//ust// static inline void marker_update_probe_range(struct marker *begin,
+//ust// struct marker *end)
+//ust// { }
+//ust// #define GET_MARKER(channel, name)
+//ust// #endif /* CONFIG_MARKERS */
+
+/**
+ * trace_mark - Marker using code patching
+ * @channel: marker channel (where to send the data), not quoted.
+ * @name: marker name, not quoted.
+ * @format: format string
+ * @args...: variable argument list
+ *
+ * Places a marker using optimized code patching technique (imv_read())
+ * to be enabled when immediate values are present.
+ */
+#define trace_mark(channel, name, format, args...) \
+ __trace_mark(0, channel, name, NULL, format, ## args)
+
+/**
+ * _trace_mark - Marker using variable read
+ * @channel: marker channel (where to send the data), not quoted.
+ * @name: marker name, not quoted.
+ * @format: format string
+ * @args...: variable argument list
+ *
+ * Places a marker using a standard memory read (_imv_read()) to be
+ * enabled. Should be used for markers in code paths where instruction
+ * modification based enabling is not welcome. (__init and __exit functions,
+ * lockdep, some traps, printk).
+ */
+#define _trace_mark(channel, name, format, args...) \
+ __trace_mark(1, channel, name, NULL, format, ## args)
+
+/**
+ * trace_mark_tp - Marker in a tracepoint callback
+ * @channel: marker channel (where to send the data), not quoted.
+ * @name: marker name, not quoted.
+ * @tp_name: tracepoint name, not quoted.
+ * @tp_cb: tracepoint callback. Should have an associated global symbol so it
+ * is not optimized away by the compiler (should not be static).
+ * @format: format string
+ * @args...: variable argument list
+ *
+ * Places a marker in a tracepoint callback.
+ */
+#define trace_mark_tp(channel, name, tp_name, tp_cb, format, args...) \
+ __trace_mark_tp(channel, name, NULL, tp_name, tp_cb, format, ## args)
+
+/**
+ * MARK_NOARGS - Format string for a marker with no argument.
+ */
+#define MARK_NOARGS " "
+
+extern void lock_markers(void);
+extern void unlock_markers(void);
+
+extern void markers_compact_event_ids(void);
+
+/* To be used for string format validity checking with gcc */
+static inline void __printf(1, 2) ___mark_check_format(const char *fmt, ...)
+{
+}
+
+#define __mark_check_format(format, args...) \
+ do { \
+ if (0) \
+ ___mark_check_format(format, ## args); \
+ } while (0)
+
+extern marker_probe_func __mark_empty_function;
+
+extern void marker_probe_cb(const struct marker *mdata,
+ void *call_private, ...);
+
+/*
+ * Connect a probe to a marker.
+ * private data pointer must be a valid allocated memory address, or NULL.
+ */
+extern int marker_probe_register(const char *channel, const char *name,
+ const char *format, marker_probe_func *probe, void *probe_private);
+
+/*
+ * Returns the private data given to marker_probe_register.
+ */
+extern int marker_probe_unregister(const char *channel, const char *name,
+ marker_probe_func *probe, void *probe_private);
+/*
+ * Unregister a marker by providing the registered private data.
+ */
+extern int marker_probe_unregister_private_data(marker_probe_func *probe,
+ void *probe_private);
+
+extern void *marker_get_private_data(const char *channel, const char *name,
+ marker_probe_func *probe, int num);
+
+/*
+ * marker_synchronize_unregister must be called between the last marker probe
+ * unregistration and the first one of
+ * - the end of module exit function
+ * - the free of any resource used by the probes
+ * to ensure the code and data are valid for any possibly running probes.
+ */
+#define marker_synchronize_unregister() synchronize_sched()
+
+struct marker_iter {
+//ust// struct module *module;
+ struct lib *lib;
+ struct marker *marker;
+};
+
+extern void marker_iter_start(struct marker_iter *iter);
+extern void marker_iter_next(struct marker_iter *iter);
+extern void marker_iter_stop(struct marker_iter *iter);
+extern void marker_iter_reset(struct marker_iter *iter);
+extern int marker_get_iter_range(struct marker **marker, struct marker *begin,
+ struct marker *end);
+
+extern void marker_update_process(void);
+extern int is_marker_enabled(const char *channel, const char *name);
+
+//ust// #ifdef CONFIG_MARKERS_USERSPACE
+//ust// extern void exit_user_markers(struct task_struct *p);
+//ust// #else
+//ust// static inline void exit_user_markers(struct task_struct *p)
+//ust// {
+//ust// }
+//ust// #endif
+
+
+struct lib {
+ struct marker *markers_start;
+ int markers_count;
+ struct list_head list;
+};
+
+int marker_register_lib(struct marker *markers_start, int markers_count);
+
+#define MARKER_LIB \
+extern struct marker __start___markers[] __attribute__((visibility("hidden"))); \
+extern struct marker __stop___markers[] __attribute__((visibility("hidden"))); \
+ \
+static void __attribute__((constructor)) __markers__init(void) \
+{ \
+ marker_register_lib(__start___markers, (((long)__stop___markers)-((long)__start___markers))/sizeof(struct marker));\
+}
+
+void marker_set_new_marker_cb(void (*cb)(struct marker *));
+
+#endif
--- /dev/null
+/*
+ * Public API and common code for kernel->userspace relay file support.
+ *
+ * Copyright (C) 2002-2005 - Tom Zanussi (zanussi@us.ibm.com), IBM Corp
+ * Copyright (C) 1999-2005 - Karim Yaghmour (karim@opersys.com)
+ * Copyright (C) 2008 - Mathieu Desnoyers (mathieu.desnoyers@polymtl.ca)
+ *
+ * Moved to kernel/relay.c by Paul Mundt, 2006.
+ * November 2006 - CPU hotplug support by Mathieu Desnoyers
+ * (mathieu.desnoyers@polymtl.ca)
+ *
+ * This file is released under the GPL.
+ */
+//ust// #include <linux/errno.h>
+//ust// #include <linux/stddef.h>
+//ust// #include <linux/slab.h>
+//ust// #include <linux/module.h>
+//ust// #include <linux/string.h>
+//ust// #include <linux/ltt-relay.h>
+//ust// #include <linux/vmalloc.h>
+//ust// #include <linux/mm.h>
+//ust// #include <linux/cpu.h>
+//ust// #include <linux/splice.h>
+//ust// #include <linux/bitops.h>
+#include "kernelcompat.h"
+#include <sys/mman.h>
+#include <sys/ipc.h>
+#include <sys/shm.h>
+#include "list.h"
+#include "relay.h"
+#include "channels.h"
+#include "kref.h"
+#include "tracer.h"
+#include "tracercore.h"
+#include "usterr.h"
+
+/* list of open channels, for cpu hotplug */
+static DEFINE_MUTEX(relay_channels_mutex);
+static LIST_HEAD(relay_channels);
+
+
+static struct dentry *ltt_create_buf_file_callback(struct rchan_buf *buf);
+
+/**
+ * relay_alloc_buf - allocate a channel buffer
+ * @buf: the buffer struct
+ * @size: total size of the buffer
+ */
+//ust// static int relay_alloc_buf(struct rchan_buf *buf, size_t *size)
+//ust//{
+//ust// unsigned int i, n_pages;
+//ust// struct buf_page *buf_page, *n;
+//ust//
+//ust// *size = PAGE_ALIGN(*size);
+//ust// n_pages = *size >> PAGE_SHIFT;
+//ust//
+//ust// INIT_LIST_HEAD(&buf->pages);
+//ust//
+//ust// for (i = 0; i < n_pages; i++) {
+//ust// buf_page = kmalloc_node(sizeof(*buf_page), GFP_KERNEL,
+//ust// cpu_to_node(buf->cpu));
+//ust// if (unlikely(!buf_page))
+//ust// goto depopulate;
+//ust// buf_page->page = alloc_pages_node(cpu_to_node(buf->cpu),
+//ust// GFP_KERNEL | __GFP_ZERO, 0);
+//ust// if (unlikely(!buf_page->page)) {
+//ust// kfree(buf_page);
+//ust// goto depopulate;
+//ust// }
+//ust// list_add_tail(&buf_page->list, &buf->pages);
+//ust// buf_page->offset = (size_t)i << PAGE_SHIFT;
+//ust// buf_page->buf = buf;
+//ust// set_page_private(buf_page->page, (unsigned long)buf_page);
+//ust// if (i == 0) {
+//ust// buf->wpage = buf_page;
+//ust// buf->hpage[0] = buf_page;
+//ust// buf->hpage[1] = buf_page;
+//ust// buf->rpage = buf_page;
+//ust// }
+//ust// }
+//ust// buf->page_count = n_pages;
+//ust// return 0;
+//ust//
+//ust//depopulate:
+//ust// list_for_each_entry_safe(buf_page, n, &buf->pages, list) {
+//ust// list_del_init(&buf_page->list);
+//ust// __free_page(buf_page->page);
+//ust// kfree(buf_page);
+//ust// }
+//ust// return -ENOMEM;
+//ust//}
+
+static int relay_alloc_buf(struct rchan_buf *buf, size_t *size)
+{
+//ust// unsigned int n_pages;
+//ust// struct buf_page *buf_page, *n;
+
+ void *ptr;
+ int result;
+
+ *size = PAGE_ALIGN(*size);
+
+ result = buf->shmid = shmget(getpid(), *size, IPC_CREAT | IPC_EXCL | 0700);
+ if(buf->shmid == -1) {
+ PERROR("shmget");
+ return -1;
+ }
+
+ ptr = shmat(buf->shmid, NULL, 0);
+ if(ptr == (void *) -1) {
+ perror("shmat");
+ goto destroy_shmem;
+ }
+
+ /* Already mark the shared memory for destruction. This will occur only
+ * when all users have detached.
+ */
+ result = shmctl(buf->shmid, IPC_RMID, NULL);
+ if(result == -1) {
+ perror("shmctl");
+ return -1;
+ }
+
+ buf->buf_data = ptr;
+ buf->buf_size = *size;
+
+ return 0;
+
+ destroy_shmem:
+ result = shmctl(buf->shmid, IPC_RMID, NULL);
+ if(result == -1) {
+ perror("shmctl");
+ }
+
+ return -1;
+}
+
+/**
+ * relay_create_buf - allocate and initialize a channel buffer
+ * @chan: the relay channel
+ * @cpu: cpu the buffer belongs to
+ *
+ * Returns channel buffer if successful, %NULL otherwise.
+ */
+static struct rchan_buf *relay_create_buf(struct rchan *chan)
+{
+ int ret;
+ struct rchan_buf *buf = kzalloc(sizeof(struct rchan_buf), GFP_KERNEL);
+ if (!buf)
+ return NULL;
+
+// buf->cpu = cpu;
+ ret = relay_alloc_buf(buf, &chan->alloc_size);
+ if (ret)
+ goto free_buf;
+
+ buf->chan = chan;
+ kref_get(&buf->chan->kref);
+ return buf;
+
+free_buf:
+ kfree(buf);
+ return NULL;
+}
+
+/**
+ * relay_destroy_channel - free the channel struct
+ * @kref: target kernel reference that contains the relay channel
+ *
+ * Should only be called from kref_put().
+ */
+static void relay_destroy_channel(struct kref *kref)
+{
+ struct rchan *chan = container_of(kref, struct rchan, kref);
+ kfree(chan);
+}
+
+/**
+ * relay_destroy_buf - destroy an rchan_buf struct and associated buffer
+ * @buf: the buffer struct
+ */
+static void relay_destroy_buf(struct rchan_buf *buf)
+{
+ struct rchan *chan = buf->chan;
+ struct buf_page *buf_page, *n;
+ int result;
+
+ result = munmap(buf->buf_data, buf->buf_size);
+ if(result == -1) {
+ PERROR("munmap");
+ }
+
+//ust// chan->buf[buf->cpu] = NULL;
+ kfree(buf);
+ kref_put(&chan->kref, relay_destroy_channel);
+}
+
+/**
+ * relay_remove_buf - remove a channel buffer
+ * @kref: target kernel reference that contains the relay buffer
+ *
+ * Removes the file from the fileystem, which also frees the
+ * rchan_buf_struct and the channel buffer. Should only be called from
+ * kref_put().
+ */
+static void relay_remove_buf(struct kref *kref)
+{
+ struct rchan_buf *buf = container_of(kref, struct rchan_buf, kref);
+//ust// buf->chan->cb->remove_buf_file(buf);
+ relay_destroy_buf(buf);
+}
+
+/*
+ * High-level relay kernel API and associated functions.
+ */
+
+/*
+ * rchan_callback implementations defining default channel behavior. Used
+ * in place of corresponding NULL values in client callback struct.
+ */
+
+/*
+ * create_buf_file_create() default callback. Does nothing.
+ */
+static struct dentry *create_buf_file_default_callback(const char *filename,
+ struct dentry *parent,
+ int mode,
+ struct rchan_buf *buf)
+{
+ return NULL;
+}
+
+/*
+ * remove_buf_file() default callback. Does nothing.
+ */
+static int remove_buf_file_default_callback(struct dentry *dentry)
+{
+ return -EINVAL;
+}
+
+/**
+ * wakeup_readers - wake up readers waiting on a channel
+ * @data: contains the channel buffer
+ *
+ * This is the timer function used to defer reader waking.
+ */
+//ust// static void wakeup_readers(unsigned long data)
+//ust// {
+//ust// struct rchan_buf *buf = (struct rchan_buf *)data;
+//ust// wake_up_interruptible(&buf->read_wait);
+//ust// }
+
+/**
+ * __relay_reset - reset a channel buffer
+ * @buf: the channel buffer
+ * @init: 1 if this is a first-time initialization
+ *
+ * See relay_reset() for description of effect.
+ */
+static void __relay_reset(struct rchan_buf *buf, unsigned int init)
+{
+ if (init) {
+//ust// init_waitqueue_head(&buf->read_wait);
+ kref_init(&buf->kref);
+//ust// setup_timer(&buf->timer, wakeup_readers, (unsigned long)buf);
+ } else
+//ust// del_timer_sync(&buf->timer);
+
+ buf->finalized = 0;
+}
+
+/*
+ * relay_open_buf - create a new relay channel buffer
+ *
+ * used by relay_open() and CPU hotplug.
+ */
+static struct rchan_buf *relay_open_buf(struct rchan *chan)
+{
+ struct rchan_buf *buf = NULL;
+ struct dentry *dentry;
+//ust// char *tmpname;
+
+//ust// tmpname = kzalloc(NAME_MAX + 1, GFP_KERNEL);
+//ust// if (!tmpname)
+//ust// goto end;
+//ust// snprintf(tmpname, NAME_MAX, "%s%d", chan->base_filename, cpu);
+
+ buf = relay_create_buf(chan);
+ if (!buf)
+ goto free_name;
+
+ __relay_reset(buf, 1);
+
+ /* Create file in fs */
+//ust// dentry = chan->cb->create_buf_file(tmpname, chan->parent, S_IRUSR,
+//ust// buf);
+
+ ltt_create_buf_file_callback(buf); // ust //
+
+//ust// if (!dentry)
+//ust// goto free_buf;
+//ust//
+//ust// buf->dentry = dentry;
+
+ goto free_name;
+
+free_buf:
+ relay_destroy_buf(buf);
+ buf = NULL;
+free_name:
+//ust// kfree(tmpname);
+end:
+ return buf;
+}
+
+/**
+ * relay_close_buf - close a channel buffer
+ * @buf: channel buffer
+ *
+ * Marks the buffer finalized and restores the default callbacks.
+ * The channel buffer and channel buffer data structure are then freed
+ * automatically when the last reference is given up.
+ */
+static void relay_close_buf(struct rchan_buf *buf)
+{
+//ust// del_timer_sync(&buf->timer);
+ kref_put(&buf->kref, relay_remove_buf);
+}
+
+//ust// static void setup_callbacks(struct rchan *chan,
+//ust// struct rchan_callbacks *cb)
+//ust// {
+//ust// if (!cb) {
+//ust// chan->cb = &default_channel_callbacks;
+//ust// return;
+//ust// }
+//ust//
+//ust// if (!cb->create_buf_file)
+//ust// cb->create_buf_file = create_buf_file_default_callback;
+//ust// if (!cb->remove_buf_file)
+//ust// cb->remove_buf_file = remove_buf_file_default_callback;
+//ust// chan->cb = cb;
+//ust// }
+
+/**
+ * relay_hotcpu_callback - CPU hotplug callback
+ * @nb: notifier block
+ * @action: hotplug action to take
+ * @hcpu: CPU number
+ *
+ * Returns the success/failure of the operation. (%NOTIFY_OK, %NOTIFY_BAD)
+ */
+//ust// static int __cpuinit relay_hotcpu_callback(struct notifier_block *nb,
+//ust// unsigned long action,
+//ust// void *hcpu)
+//ust// {
+//ust// unsigned int hotcpu = (unsigned long)hcpu;
+//ust// struct rchan *chan;
+//ust//
+//ust// switch (action) {
+//ust// case CPU_UP_PREPARE:
+//ust// case CPU_UP_PREPARE_FROZEN:
+//ust// mutex_lock(&relay_channels_mutex);
+//ust// list_for_each_entry(chan, &relay_channels, list) {
+//ust// if (chan->buf[hotcpu])
+//ust// continue;
+//ust// chan->buf[hotcpu] = relay_open_buf(chan, hotcpu);
+//ust// if (!chan->buf[hotcpu]) {
+//ust// printk(KERN_ERR
+//ust// "relay_hotcpu_callback: cpu %d buffer "
+//ust// "creation failed\n", hotcpu);
+//ust// mutex_unlock(&relay_channels_mutex);
+//ust// return NOTIFY_BAD;
+//ust// }
+//ust// }
+//ust// mutex_unlock(&relay_channels_mutex);
+//ust// break;
+//ust// case CPU_DEAD:
+//ust// case CPU_DEAD_FROZEN:
+//ust// /* No need to flush the cpu : will be flushed upon
+//ust// * final relay_flush() call. */
+//ust// break;
+//ust// }
+//ust// return NOTIFY_OK;
+//ust// }
+
+/**
+ * ltt_relay_open - create a new relay channel
+ * @base_filename: base name of files to create
+ * @parent: dentry of parent directory, %NULL for root directory
+ * @subbuf_size: size of sub-buffers
+ * @n_subbufs: number of sub-buffers
+ * @cb: client callback functions
+ * @private_data: user-defined data
+ *
+ * Returns channel pointer if successful, %NULL otherwise.
+ *
+ * Creates a channel buffer for each cpu using the sizes and
+ * attributes specified. The created channel buffer files
+ * will be named base_filename0...base_filenameN-1. File
+ * permissions will be %S_IRUSR.
+ */
+struct rchan *ltt_relay_open(const char *base_filename,
+ struct dentry *parent,
+ size_t subbuf_size,
+ size_t n_subbufs,
+ void *private_data)
+{
+ unsigned int i;
+ struct rchan *chan;
+//ust// if (!base_filename)
+//ust// return NULL;
+
+ if (!(subbuf_size && n_subbufs))
+ return NULL;
+
+ chan = kzalloc(sizeof(struct rchan), GFP_KERNEL);
+ if (!chan)
+ return NULL;
+
+ chan->version = LTT_RELAY_CHANNEL_VERSION;
+ chan->n_subbufs = n_subbufs;
+ chan->subbuf_size = subbuf_size;
+ chan->subbuf_size_order = get_count_order(subbuf_size);
+ chan->alloc_size = FIX_SIZE(subbuf_size * n_subbufs);
+ chan->parent = parent;
+ chan->private_data = private_data;
+//ust// strlcpy(chan->base_filename, base_filename, NAME_MAX);
+//ust// setup_callbacks(chan, cb);
+ kref_init(&chan->kref);
+
+ mutex_lock(&relay_channels_mutex);
+//ust// for_each_online_cpu(i) {
+ chan->buf = relay_open_buf(chan);
+ if (!chan->buf)
+ goto error;
+//ust// }
+ list_add(&chan->list, &relay_channels);
+ mutex_unlock(&relay_channels_mutex);
+
+ return chan;
+
+//ust//free_bufs:
+//ust// for_each_possible_cpu(i) {
+//ust// if (!chan->buf[i])
+//ust// break;
+//ust// relay_close_buf(chan->buf[i]);
+//ust// }
+
+ error:
+ kref_put(&chan->kref, relay_destroy_channel);
+ mutex_unlock(&relay_channels_mutex);
+ return NULL;
+}
+//ust// EXPORT_SYMBOL_GPL(ltt_relay_open);
+
+/**
+ * ltt_relay_close - close the channel
+ * @chan: the channel
+ *
+ * Closes all channel buffers and frees the channel.
+ */
+void ltt_relay_close(struct rchan *chan)
+{
+ unsigned int i;
+
+ if (!chan)
+ return;
+
+ mutex_lock(&relay_channels_mutex);
+//ust// for_each_possible_cpu(i)
+ if (chan->buf)
+ relay_close_buf(chan->buf);
+
+ list_del(&chan->list);
+ kref_put(&chan->kref, relay_destroy_channel);
+ mutex_unlock(&relay_channels_mutex);
+}
+//ust// EXPORT_SYMBOL_GPL(ltt_relay_close);
+
+/*
+ * Start iteration at the previous element. Skip the real list head.
+ */
+//ust// struct buf_page *ltt_relay_find_prev_page(struct rchan_buf *buf,
+//ust// struct buf_page *page, size_t offset, ssize_t diff_offset)
+//ust// {
+//ust// struct buf_page *iter;
+//ust// size_t orig_iter_off;
+//ust// unsigned int i = 0;
+//ust//
+//ust// orig_iter_off = page->offset;
+//ust// list_for_each_entry_reverse(iter, &page->list, list) {
+//ust// /*
+//ust// * Skip the real list head.
+//ust// */
+//ust// if (&iter->list == &buf->pages)
+//ust// continue;
+//ust// i++;
+//ust// if (offset >= iter->offset
+//ust// && offset < iter->offset + PAGE_SIZE) {
+//ust// #ifdef CONFIG_LTT_RELAY_CHECK_RANDOM_ACCESS
+//ust// if (i > 1) {
+//ust// printk(KERN_WARNING
+//ust// "Backward random access detected in "
+//ust// "ltt_relay. Iterations %u, "
+//ust// "offset %zu, orig iter->off %zu, "
+//ust// "iter->off %zu diff_offset %zd.\n", i,
+//ust// offset, orig_iter_off, iter->offset,
+//ust// diff_offset);
+//ust// WARN_ON(1);
+//ust// }
+//ust// #endif
+//ust// return iter;
+//ust// }
+//ust// }
+//ust// WARN_ON(1);
+//ust// return NULL;
+//ust// }
+//ust// EXPORT_SYMBOL_GPL(ltt_relay_find_prev_page);
+
+/*
+ * Start iteration at the next element. Skip the real list head.
+ */
+//ust// struct buf_page *ltt_relay_find_next_page(struct rchan_buf *buf,
+//ust// struct buf_page *page, size_t offset, ssize_t diff_offset)
+//ust// {
+//ust// struct buf_page *iter;
+//ust// unsigned int i = 0;
+//ust// size_t orig_iter_off;
+//ust//
+//ust// orig_iter_off = page->offset;
+//ust// list_for_each_entry(iter, &page->list, list) {
+//ust// /*
+//ust// * Skip the real list head.
+//ust// */
+//ust// if (&iter->list == &buf->pages)
+//ust// continue;
+//ust// i++;
+//ust// if (offset >= iter->offset
+//ust// && offset < iter->offset + PAGE_SIZE) {
+//ust// #ifdef CONFIG_LTT_RELAY_CHECK_RANDOM_ACCESS
+//ust// if (i > 1) {
+//ust// printk(KERN_WARNING
+//ust// "Forward random access detected in "
+//ust// "ltt_relay. Iterations %u, "
+//ust// "offset %zu, orig iter->off %zu, "
+//ust// "iter->off %zu diff_offset %zd.\n", i,
+//ust// offset, orig_iter_off, iter->offset,
+//ust// diff_offset);
+//ust// WARN_ON(1);
+//ust// }
+//ust// #endif
+//ust// return iter;
+//ust// }
+//ust// }
+//ust// WARN_ON(1);
+//ust// return NULL;
+//ust// }
+//ust// EXPORT_SYMBOL_GPL(ltt_relay_find_next_page);
+
+/**
+ * ltt_relay_write - write data to a ltt_relay buffer.
+ * @buf : buffer
+ * @offset : offset within the buffer
+ * @src : source address
+ * @len : length to write
+ * @page : cached buffer page
+ * @pagecpy : page size copied so far
+ */
+void _ltt_relay_write(struct rchan_buf *buf, size_t offset,
+ const void *src, size_t len, ssize_t cpy)
+{
+ do {
+ len -= cpy;
+ src += cpy;
+ offset += cpy;
+ /*
+ * Underlying layer should never ask for writes across
+ * subbuffers.
+ */
+ WARN_ON(offset >= buf->buf_size);
+
+ cpy = min_t(size_t, len, buf->buf_size - offset);
+ ltt_relay_do_copy(buf->buf_data + offset, src, cpy);
+ } while (unlikely(len != cpy));
+}
+//ust// EXPORT_SYMBOL_GPL(_ltt_relay_write);
+
+/**
+ * ltt_relay_read - read data from ltt_relay_buffer.
+ * @buf : buffer
+ * @offset : offset within the buffer
+ * @dest : destination address
+ * @len : length to write
+ */
+//ust// int ltt_relay_read(struct rchan_buf *buf, size_t offset,
+//ust// void *dest, size_t len)
+//ust// {
+//ust// struct buf_page *page;
+//ust// ssize_t pagecpy, orig_len;
+//ust//
+//ust// orig_len = len;
+//ust// offset &= buf->chan->alloc_size - 1;
+//ust// page = buf->rpage;
+//ust// if (unlikely(!len))
+//ust// return 0;
+//ust// for (;;) {
+//ust// page = ltt_relay_cache_page(buf, &buf->rpage, page, offset);
+//ust// pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK));
+//ust// memcpy(dest, page_address(page->page) + (offset & ~PAGE_MASK),
+//ust// pagecpy);
+//ust// len -= pagecpy;
+//ust// if (likely(!len))
+//ust// break;
+//ust// dest += pagecpy;
+//ust// offset += pagecpy;
+//ust// /*
+//ust// * Underlying layer should never ask for reads across
+//ust// * subbuffers.
+//ust// */
+//ust// WARN_ON(offset >= buf->chan->alloc_size);
+//ust// }
+//ust// return orig_len;
+//ust// }
+//ust// EXPORT_SYMBOL_GPL(ltt_relay_read);
+
+/**
+ * ltt_relay_read_get_page - Get a whole page to read from
+ * @buf : buffer
+ * @offset : offset within the buffer
+ */
+//ust// struct buf_page *ltt_relay_read_get_page(struct rchan_buf *buf, size_t offset)
+//ust// {
+//ust// struct buf_page *page;
+
+//ust// offset &= buf->chan->alloc_size - 1;
+//ust// page = buf->rpage;
+//ust// page = ltt_relay_cache_page(buf, &buf->rpage, page, offset);
+//ust// return page;
+//ust// }
+//ust// EXPORT_SYMBOL_GPL(ltt_relay_read_get_page);
+
+/**
+ * ltt_relay_offset_address - get address of a location within the buffer
+ * @buf : buffer
+ * @offset : offset within the buffer.
+ *
+ * Return the address where a given offset is located.
+ * Should be used to get the current subbuffer header pointer. Given we know
+ * it's never on a page boundary, it's safe to write directly to this address,
+ * as long as the write is never bigger than a page size.
+ */
+void *ltt_relay_offset_address(struct rchan_buf *buf, size_t offset)
+{
+//ust// struct buf_page *page;
+//ust// unsigned int odd;
+//ust//
+//ust// offset &= buf->chan->alloc_size - 1;
+//ust// odd = !!(offset & buf->chan->subbuf_size);
+//ust// page = buf->hpage[odd];
+//ust// if (offset < page->offset || offset >= page->offset + PAGE_SIZE)
+//ust// buf->hpage[odd] = page = buf->wpage;
+//ust// page = ltt_relay_cache_page(buf, &buf->hpage[odd], page, offset);
+//ust// return page_address(page->page) + (offset & ~PAGE_MASK);
+ return ((char *)buf->buf_data)+offset;
+ return NULL;
+}
+//ust// EXPORT_SYMBOL_GPL(ltt_relay_offset_address);
+
+/**
+ * relay_file_open - open file op for relay files
+ * @inode: the inode
+ * @filp: the file
+ *
+ * Increments the channel buffer refcount.
+ */
+//ust// static int relay_file_open(struct inode *inode, struct file *filp)
+//ust// {
+//ust// struct rchan_buf *buf = inode->i_private;
+//ust// kref_get(&buf->kref);
+//ust// filp->private_data = buf;
+//ust//
+//ust// return nonseekable_open(inode, filp);
+//ust// }
+
+/**
+ * relay_file_release - release file op for relay files
+ * @inode: the inode
+ * @filp: the file
+ *
+ * Decrements the channel refcount, as the filesystem is
+ * no longer using it.
+ */
+//ust// static int relay_file_release(struct inode *inode, struct file *filp)
+//ust// {
+//ust// struct rchan_buf *buf = filp->private_data;
+//ust// kref_put(&buf->kref, relay_remove_buf);
+//ust//
+//ust// return 0;
+//ust// }
+
+//ust// const struct file_operations ltt_relay_file_operations = {
+//ust// .open = relay_file_open,
+//ust// .release = relay_file_release,
+//ust// };
+//ust// EXPORT_SYMBOL_GPL(ltt_relay_file_operations);
+
+//ust// static __init int relay_init(void)
+//ust// {
+//ust// hotcpu_notifier(relay_hotcpu_callback, 5);
+//ust// return 0;
+//ust// }
+
+//ust// module_init(relay_init);
+/*
+ * ltt/ltt-relay.c
+ *
+ * (C) Copyright 2005-2008 - Mathieu Desnoyers (mathieu.desnoyers@polymtl.ca)
+ *
+ * LTTng lockless buffer space management (reader/writer).
+ *
+ * Author:
+ * Mathieu Desnoyers (mathieu.desnoyers@polymtl.ca)
+ *
+ * Inspired from LTT :
+ * Karim Yaghmour (karim@opersys.com)
+ * Tom Zanussi (zanussi@us.ibm.com)
+ * Bob Wisniewski (bob@watson.ibm.com)
+ * And from K42 :
+ * Bob Wisniewski (bob@watson.ibm.com)
+ *
+ * Changelog:
+ * 08/10/08, Cleanup.
+ * 19/10/05, Complete lockless mechanism.
+ * 27/05/05, Modular redesign and rewrite.
+ *
+ * Userspace reader semantic :
+ * while (poll fd != POLLHUP) {
+ * - ioctl RELAY_GET_SUBBUF_SIZE
+ * while (1) {
+ * - ioctl GET_SUBBUF
+ * - splice 1 subbuffer worth of data to a pipe
+ * - splice the data from pipe to disk/network
+ * - ioctl PUT_SUBBUF, check error value
+ * if err val < 0, previous subbuffer was corrupted.
+ * }
+ * }
+ */
+
+//ust// #include <linux/time.h>
+//ust// #include <linux/ltt-tracer.h>
+//ust// #include <linux/ltt-relay.h>
+//ust// #include <linux/module.h>
+//ust// #include <linux/string.h>
+//ust// #include <linux/slab.h>
+//ust// #include <linux/init.h>
+//ust// #include <linux/rcupdate.h>
+//ust// #include <linux/sched.h>
+//ust// #include <linux/bitops.h>
+//ust// #include <linux/fs.h>
+//ust// #include <linux/smp_lock.h>
+//ust// #include <linux/debugfs.h>
+//ust// #include <linux/stat.h>
+//ust// #include <linux/cpu.h>
+//ust// #include <linux/pipe_fs_i.h>
+//ust// #include <linux/splice.h>
+//ust// #include <asm/atomic.h>
+//ust// #include <asm/local.h>
+
+#if 0
+#define printk_dbg(fmt, args...) printk(fmt, args)
+#else
+#define printk_dbg(fmt, args...)
+#endif
+
+/*
+ * Last TSC comparison functions. Check if the current TSC overflows
+ * LTT_TSC_BITS bits from the last TSC read. Reads and writes last_tsc
+ * atomically.
+ */
+
+#if (BITS_PER_LONG == 32)
+static inline void save_last_tsc(struct ltt_channel_buf_struct *ltt_buf,
+ u64 tsc)
+{
+ ltt_buf->last_tsc = (unsigned long)(tsc >> LTT_TSC_BITS);
+}
+
+static inline int last_tsc_overflow(struct ltt_channel_buf_struct *ltt_buf,
+ u64 tsc)
+{
+ unsigned long tsc_shifted = (unsigned long)(tsc >> LTT_TSC_BITS);
+
+ if (unlikely((tsc_shifted - ltt_buf->last_tsc)))
+ return 1;
+ else
+ return 0;
+}
+#else
+static inline void save_last_tsc(struct ltt_channel_buf_struct *ltt_buf,
+ u64 tsc)
+{
+ ltt_buf->last_tsc = (unsigned long)tsc;
+}
+
+static inline int last_tsc_overflow(struct ltt_channel_buf_struct *ltt_buf,
+ u64 tsc)
+{
+ if (unlikely((tsc - ltt_buf->last_tsc) >> LTT_TSC_BITS))
+ return 1;
+ else
+ return 0;
+}
+#endif
+
+//ust// static struct file_operations ltt_file_operations;
+
+/*
+ * A switch is done during tracing or as a final flush after tracing (so it
+ * won't write in the new sub-buffer).
+ */
+enum force_switch_mode { FORCE_ACTIVE, FORCE_FLUSH };
+
+static int ltt_relay_create_buffer(struct ltt_trace_struct *trace,
+ struct ltt_channel_struct *ltt_chan,
+ struct rchan_buf *buf,
+ unsigned int n_subbufs);
+
+static void ltt_relay_destroy_buffer(struct ltt_channel_struct *ltt_chan);
+
+static void ltt_force_switch(struct rchan_buf *buf,
+ enum force_switch_mode mode);
+
+/*
+ * Trace callbacks
+ */
+static void ltt_buffer_begin_callback(struct rchan_buf *buf,
+ u64 tsc, unsigned int subbuf_idx)
+{
+ struct ltt_channel_struct *channel =
+ (struct ltt_channel_struct *)buf->chan->private_data;
+ struct ltt_subbuffer_header *header =
+ (struct ltt_subbuffer_header *)
+ ltt_relay_offset_address(buf,
+ subbuf_idx * buf->chan->subbuf_size);
+
+ header->cycle_count_begin = tsc;
+ header->lost_size = 0xFFFFFFFF; /* for debugging */
+ header->buf_size = buf->chan->subbuf_size;
+ ltt_write_trace_header(channel->trace, header);
+}
+
+/*
+ * offset is assumed to never be 0 here : never deliver a completely empty
+ * subbuffer. The lost size is between 0 and subbuf_size-1.
+ */
+static notrace void ltt_buffer_end_callback(struct rchan_buf *buf,
+ u64 tsc, unsigned int offset, unsigned int subbuf_idx)
+{
+ struct ltt_channel_struct *channel =
+ (struct ltt_channel_struct *)buf->chan->private_data;
+ struct ltt_channel_buf_struct *ltt_buf = channel->buf;
+ struct ltt_subbuffer_header *header =
+ (struct ltt_subbuffer_header *)
+ ltt_relay_offset_address(buf,
+ subbuf_idx * buf->chan->subbuf_size);
+
+ header->lost_size = SUBBUF_OFFSET((buf->chan->subbuf_size - offset),
+ buf->chan);
+ header->cycle_count_end = tsc;
+ header->events_lost = local_read(<t_buf->events_lost);
+ header->subbuf_corrupt = local_read(<t_buf->corrupted_subbuffers);
+
+}
+
+void (*wake_consumer)(void *, int) = NULL;
+
+void relay_set_wake_consumer(void (*wake)(void *, int))
+{
+ wake_consumer = wake;
+}
+
+void relay_wake_consumer(void *arg, int finished)
+{
+ if(wake_consumer)
+ wake_consumer(arg, finished);
+}
+
+static notrace void ltt_deliver(struct rchan_buf *buf, unsigned int subbuf_idx,
+ void *subbuf)
+{
+ struct ltt_channel_struct *channel =
+ (struct ltt_channel_struct *)buf->chan->private_data;
+ struct ltt_channel_buf_struct *ltt_buf = channel->buf;
+ int result;
+
+ result = write(ltt_buf->data_ready_fd_write, "1", 1);
+ if(result == -1) {
+ PERROR("write (in ltt_relay_buffer_flush)");
+ ERR("this should never happen!");
+ }
+//ust// atomic_set(<t_buf->wakeup_readers, 1);
+}
+
+static struct dentry *ltt_create_buf_file_callback(struct rchan_buf *buf)
+{
+ struct ltt_channel_struct *ltt_chan;
+ int err;
+//ust// struct dentry *dentry;
+
+ ltt_chan = buf->chan->private_data;
+ err = ltt_relay_create_buffer(ltt_chan->trace, ltt_chan, buf, buf->chan->n_subbufs);
+ if (err)
+ return ERR_PTR(err);
+
+//ust// dentry = debugfs_create_file(filename, mode, parent, buf,
+//ust// <t_file_operations);
+//ust// if (!dentry)
+//ust// goto error;
+//ust// return dentry;
+ return NULL; //ust//
+//ust//error:
+ ltt_relay_destroy_buffer(ltt_chan);
+ return NULL;
+}
+
+static int ltt_remove_buf_file_callback(struct rchan_buf *buf)
+{
+//ust// struct rchan_buf *buf = dentry->d_inode->i_private;
+ struct ltt_channel_struct *ltt_chan = buf->chan->private_data;
+
+//ust// debugfs_remove(dentry);
+ ltt_relay_destroy_buffer(ltt_chan);
+
+ return 0;
+}
+
+/*
+ * Wake writers :
+ *
+ * This must be done after the trace is removed from the RCU list so that there
+ * are no stalled writers.
+ */
+//ust// static void ltt_relay_wake_writers(struct ltt_channel_buf_struct *ltt_buf)
+//ust// {
+//ust//
+//ust// if (waitqueue_active(<t_buf->write_wait))
+//ust// wake_up_interruptible(<t_buf->write_wait);
+//ust// }
+
+/*
+ * This function should not be called from NMI interrupt context
+ */
+static notrace void ltt_buf_unfull(struct rchan_buf *buf,
+ unsigned int subbuf_idx,
+ long offset)
+{
+//ust// struct ltt_channel_struct *ltt_channel =
+//ust// (struct ltt_channel_struct *)buf->chan->private_data;
+//ust// struct ltt_channel_buf_struct *ltt_buf = ltt_channel->buf;
+//ust//
+//ust// ltt_relay_wake_writers(ltt_buf);
+}
+
+/**
+ * ltt_open - open file op for ltt files
+ * @inode: opened inode
+ * @file: opened file
+ *
+ * Open implementation. Makes sure only one open instance of a buffer is
+ * done at a given moment.
+ */
+//ust// static int ltt_open(struct inode *inode, struct file *file)
+//ust// {
+//ust// struct rchan_buf *buf = inode->i_private;
+//ust// struct ltt_channel_struct *ltt_channel =
+//ust// (struct ltt_channel_struct *)buf->chan->private_data;
+//ust// struct ltt_channel_buf_struct *ltt_buf =
+//ust// percpu_ptr(ltt_channel->buf, buf->cpu);
+//ust//
+//ust// if (!atomic_long_add_unless(<t_buf->active_readers, 1, 1))
+//ust// return -EBUSY;
+//ust// return ltt_relay_file_operations.open(inode, file);
+//ust// }
+
+/**
+ * ltt_release - release file op for ltt files
+ * @inode: opened inode
+ * @file: opened file
+ *
+ * Release implementation.
+ */
+//ust// static int ltt_release(struct inode *inode, struct file *file)
+//ust// {
+//ust// struct rchan_buf *buf = inode->i_private;
+//ust// struct ltt_channel_struct *ltt_channel =
+//ust// (struct ltt_channel_struct *)buf->chan->private_data;
+//ust// struct ltt_channel_buf_struct *ltt_buf =
+//ust// percpu_ptr(ltt_channel->buf, buf->cpu);
+//ust// int ret;
+//ust//
+//ust// WARN_ON(atomic_long_read(<t_buf->active_readers) != 1);
+//ust// atomic_long_dec(<t_buf->active_readers);
+//ust// ret = ltt_relay_file_operations.release(inode, file);
+//ust// WARN_ON(ret);
+//ust// return ret;
+//ust// }
+
+/**
+ * ltt_poll - file op for ltt files
+ * @filp: the file
+ * @wait: poll table
+ *
+ * Poll implementation.
+ */
+//ust// static unsigned int ltt_poll(struct file *filp, poll_table *wait)
+//ust// {
+//ust// unsigned int mask = 0;
+//ust// struct inode *inode = filp->f_dentry->d_inode;
+//ust// struct rchan_buf *buf = inode->i_private;
+//ust// struct ltt_channel_struct *ltt_channel =
+//ust// (struct ltt_channel_struct *)buf->chan->private_data;
+//ust// struct ltt_channel_buf_struct *ltt_buf =
+//ust// percpu_ptr(ltt_channel->buf, buf->cpu);
+//ust//
+//ust// if (filp->f_mode & FMODE_READ) {
+//ust// poll_wait_set_exclusive(wait);
+//ust// poll_wait(filp, &buf->read_wait, wait);
+//ust//
+//ust// WARN_ON(atomic_long_read(<t_buf->active_readers) != 1);
+//ust// if (SUBBUF_TRUNC(local_read(<t_buf->offset),
+//ust// buf->chan)
+//ust// - SUBBUF_TRUNC(atomic_long_read(<t_buf->consumed),
+//ust// buf->chan)
+//ust// == 0) {
+//ust// if (buf->finalized)
+//ust// return POLLHUP;
+//ust// else
+//ust// return 0;
+//ust// } else {
+//ust// struct rchan *rchan =
+//ust// ltt_channel->trans_channel_data;
+//ust// if (SUBBUF_TRUNC(local_read(<t_buf->offset),
+//ust// buf->chan)
+//ust// - SUBBUF_TRUNC(atomic_long_read(
+//ust// <t_buf->consumed),
+//ust// buf->chan)
+//ust// >= rchan->alloc_size)
+//ust// return POLLPRI | POLLRDBAND;
+//ust// else
+//ust// return POLLIN | POLLRDNORM;
+//ust// }
+//ust// }
+//ust// return mask;
+//ust// }
+
+int ltt_do_get_subbuf(struct rchan_buf *buf, struct ltt_channel_buf_struct *ltt_buf, long *pconsumed_old)
+{
+ struct ltt_channel_struct *ltt_channel = (struct ltt_channel_struct *)buf->chan->private_data;
+ long consumed_old, consumed_idx, commit_count, write_offset;
+ consumed_old = atomic_long_read(<t_buf->consumed);
+ consumed_idx = SUBBUF_INDEX(consumed_old, buf->chan);
+ commit_count = local_read(<t_buf->commit_count[consumed_idx]);
+ /*
+ * Make sure we read the commit count before reading the buffer
+ * data and the write offset. Correct consumed offset ordering
+ * wrt commit count is insured by the use of cmpxchg to update
+ * the consumed offset.
+ */
+ smp_rmb();
+ write_offset = local_read(<t_buf->offset);
+ /*
+ * Check that the subbuffer we are trying to consume has been
+ * already fully committed.
+ */
+ if (((commit_count - buf->chan->subbuf_size)
+ & ltt_channel->commit_count_mask)
+ - (BUFFER_TRUNC(consumed_old, buf->chan)
+ >> ltt_channel->n_subbufs_order)
+ != 0) {
+ return -EAGAIN;
+ }
+ /*
+ * Check that we are not about to read the same subbuffer in
+ * which the writer head is.
+ */
+ if ((SUBBUF_TRUNC(write_offset, buf->chan)
+ - SUBBUF_TRUNC(consumed_old, buf->chan))
+ == 0) {
+ return -EAGAIN;
+ }
+
+ *pconsumed_old = consumed_old;
+ return 0;
+}
+
+int ltt_do_put_subbuf(struct rchan_buf *buf, struct ltt_channel_buf_struct *ltt_buf, u32 uconsumed_old)
+{
+ long consumed_new, consumed_old;
+
+ consumed_old = atomic_long_read(<t_buf->consumed);
+ consumed_old = consumed_old & (~0xFFFFFFFFL);
+ consumed_old = consumed_old | uconsumed_old;
+ consumed_new = SUBBUF_ALIGN(consumed_old, buf->chan);
+
+//ust// spin_lock(<t_buf->full_lock);
+ if (atomic_long_cmpxchg(<t_buf->consumed, consumed_old,
+ consumed_new)
+ != consumed_old) {
+ /* We have been pushed by the writer : the last
+ * buffer read _is_ corrupted! It can also
+ * happen if this is a buffer we never got. */
+//ust// spin_unlock(<t_buf->full_lock);
+ return -EIO;
+ } else {
+ /* tell the client that buffer is now unfull */
+ int index;
+ long data;
+ index = SUBBUF_INDEX(consumed_old, buf->chan);
+ data = BUFFER_OFFSET(consumed_old, buf->chan);
+ ltt_buf_unfull(buf, index, data);
+//ust// spin_unlock(<t_buf->full_lock);
+ }
+ return 0;
+}
+
+/**
+ * ltt_ioctl - control on the debugfs file
+ *
+ * @inode: the inode
+ * @filp: the file
+ * @cmd: the command
+ * @arg: command arg
+ *
+ * This ioctl implements three commands necessary for a minimal
+ * producer/consumer implementation :
+ * RELAY_GET_SUBBUF
+ * Get the next sub buffer that can be read. It never blocks.
+ * RELAY_PUT_SUBBUF
+ * Release the currently read sub-buffer. Parameter is the last
+ * put subbuffer (returned by GET_SUBBUF).
+ * RELAY_GET_N_BUBBUFS
+ * returns the number of sub buffers in the per cpu channel.
+ * RELAY_GET_SUBBUF_SIZE
+ * returns the size of the sub buffers.
+ */
+//ust// static int ltt_ioctl(struct inode *inode, struct file *filp,
+//ust// unsigned int cmd, unsigned long arg)
+//ust// {
+//ust// struct rchan_buf *buf = inode->i_private;
+//ust// struct ltt_channel_struct *ltt_channel =
+//ust// (struct ltt_channel_struct *)buf->chan->private_data;
+//ust// struct ltt_channel_buf_struct *ltt_buf =
+//ust// percpu_ptr(ltt_channel->buf, buf->cpu);
+//ust// u32 __user *argp = (u32 __user *)arg;
+//ust//
+//ust// WARN_ON(atomic_long_read(<t_buf->active_readers) != 1);
+//ust// switch (cmd) {
+//ust// case RELAY_GET_SUBBUF:
+//ust// {
+//ust// int ret;
+//ust// ret = ltt_do_get_subbuf(buf, ltt_buf, &consumed_old);
+//ust// if(ret < 0)
+//ust// return ret;
+//ust// return put_user((u32)consumed_old, argp);
+//ust// }
+//ust// case RELAY_PUT_SUBBUF:
+//ust// {
+//ust// int ret;
+//ust// u32 uconsumed_old;
+//ust// ret = get_user(uconsumed_old, argp);
+//ust// if (ret)
+//ust// return ret; /* will return -EFAULT */
+//ust// return ltt_do_put_subbuf(buf, ltt_buf, uconsumed_old);
+//ust// }
+//ust// case RELAY_GET_N_SUBBUFS:
+//ust// return put_user((u32)buf->chan->n_subbufs, argp);
+//ust// break;
+//ust// case RELAY_GET_SUBBUF_SIZE:
+//ust// return put_user((u32)buf->chan->subbuf_size, argp);
+//ust// break;
+//ust// default:
+//ust// return -ENOIOCTLCMD;
+//ust// }
+//ust// return 0;
+//ust// }
+
+//ust// #ifdef CONFIG_COMPAT
+//ust// static long ltt_compat_ioctl(struct file *file, unsigned int cmd,
+//ust// unsigned long arg)
+//ust// {
+//ust// long ret = -ENOIOCTLCMD;
+//ust//
+//ust// lock_kernel();
+//ust// ret = ltt_ioctl(file->f_dentry->d_inode, file, cmd, arg);
+//ust// unlock_kernel();
+//ust//
+//ust// return ret;
+//ust// }
+//ust// #endif
+
+//ust// static void ltt_relay_pipe_buf_release(struct pipe_inode_info *pipe,
+//ust// struct pipe_buffer *pbuf)
+//ust// {
+//ust// }
+//ust//
+//ust// static struct pipe_buf_operations ltt_relay_pipe_buf_ops = {
+//ust// .can_merge = 0,
+//ust// .map = generic_pipe_buf_map,
+//ust// .unmap = generic_pipe_buf_unmap,
+//ust// .confirm = generic_pipe_buf_confirm,
+//ust// .release = ltt_relay_pipe_buf_release,
+//ust// .steal = generic_pipe_buf_steal,
+//ust// .get = generic_pipe_buf_get,
+//ust// };
+
+//ust// static void ltt_relay_page_release(struct splice_pipe_desc *spd, unsigned int i)
+//ust// {
+//ust// }
+
+/*
+ * subbuf_splice_actor - splice up to one subbuf's worth of data
+ */
+//ust// static int subbuf_splice_actor(struct file *in,
+//ust// loff_t *ppos,
+//ust// struct pipe_inode_info *pipe,
+//ust// size_t len,
+//ust// unsigned int flags)
+//ust// {
+//ust// struct rchan_buf *buf = in->private_data;
+//ust// struct ltt_channel_struct *ltt_channel =
+//ust// (struct ltt_channel_struct *)buf->chan->private_data;
+//ust// struct ltt_channel_buf_struct *ltt_buf =
+//ust// percpu_ptr(ltt_channel->buf, buf->cpu);
+//ust// unsigned int poff, subbuf_pages, nr_pages;
+//ust// struct page *pages[PIPE_BUFFERS];
+//ust// struct partial_page partial[PIPE_BUFFERS];
+//ust// struct splice_pipe_desc spd = {
+//ust// .pages = pages,
+//ust// .nr_pages = 0,
+//ust// .partial = partial,
+//ust// .flags = flags,
+//ust// .ops = <t_relay_pipe_buf_ops,
+//ust// .spd_release = ltt_relay_page_release,
+//ust// };
+//ust// long consumed_old, consumed_idx, roffset;
+//ust// unsigned long bytes_avail;
+//ust//
+//ust// /*
+//ust// * Check that a GET_SUBBUF ioctl has been done before.
+//ust// */
+//ust// WARN_ON(atomic_long_read(<t_buf->active_readers) != 1);
+//ust// consumed_old = atomic_long_read(<t_buf->consumed);
+//ust// consumed_old += *ppos;
+//ust// consumed_idx = SUBBUF_INDEX(consumed_old, buf->chan);
+//ust//
+//ust// /*
+//ust// * Adjust read len, if longer than what is available
+//ust// */
+//ust// bytes_avail = SUBBUF_TRUNC(local_read(<t_buf->offset), buf->chan)
+//ust// - consumed_old;
+//ust// WARN_ON(bytes_avail > buf->chan->alloc_size);
+//ust// len = min_t(size_t, len, bytes_avail);
+//ust// subbuf_pages = bytes_avail >> PAGE_SHIFT;
+//ust// nr_pages = min_t(unsigned int, subbuf_pages, PIPE_BUFFERS);
+//ust// roffset = consumed_old & PAGE_MASK;
+//ust// poff = consumed_old & ~PAGE_MASK;
+//ust// printk_dbg(KERN_DEBUG "SPLICE actor len %zu pos %zd write_pos %ld\n",
+//ust// len, (ssize_t)*ppos, local_read(<t_buf->offset));
+//ust//
+//ust// for (; spd.nr_pages < nr_pages; spd.nr_pages++) {
+//ust// unsigned int this_len;
+//ust// struct buf_page *page;
+//ust//
+//ust// if (!len)
+//ust// break;
+//ust// printk_dbg(KERN_DEBUG "SPLICE actor loop len %zu roffset %ld\n",
+//ust// len, roffset);
+//ust//
+//ust// this_len = PAGE_SIZE - poff;
+//ust// page = ltt_relay_read_get_page(buf, roffset);
+//ust// spd.pages[spd.nr_pages] = page->page;
+//ust// spd.partial[spd.nr_pages].offset = poff;
+//ust// spd.partial[spd.nr_pages].len = this_len;
+//ust//
+//ust// poff = 0;
+//ust// roffset += PAGE_SIZE;
+//ust// len -= this_len;
+//ust// }
+//ust//
+//ust// if (!spd.nr_pages)
+//ust// return 0;
+//ust//
+//ust// return splice_to_pipe(pipe, &spd);
+//ust// }
+
+//ust// static ssize_t ltt_relay_file_splice_read(struct file *in,
+//ust// loff_t *ppos,
+//ust// struct pipe_inode_info *pipe,
+//ust// size_t len,
+//ust// unsigned int flags)
+//ust// {
+//ust// ssize_t spliced;
+//ust// int ret;
+//ust//
+//ust// ret = 0;
+//ust// spliced = 0;
+//ust//
+//ust// printk_dbg(KERN_DEBUG "SPLICE read len %zu pos %zd\n",
+//ust// len, (ssize_t)*ppos);
+//ust// while (len && !spliced) {
+//ust// ret = subbuf_splice_actor(in, ppos, pipe, len, flags);
+//ust// printk_dbg(KERN_DEBUG "SPLICE read loop ret %d\n", ret);
+//ust// if (ret < 0)
+//ust// break;
+//ust// else if (!ret) {
+//ust// if (flags & SPLICE_F_NONBLOCK)
+//ust// ret = -EAGAIN;
+//ust// break;
+//ust// }
+//ust//
+//ust// *ppos += ret;
+//ust// if (ret > len)
+//ust// len = 0;
+//ust// else
+//ust// len -= ret;
+//ust// spliced += ret;
+//ust// }
+//ust//
+//ust// if (spliced)
+//ust// return spliced;
+//ust//
+//ust// return ret;
+//ust// }
+
+static void ltt_relay_print_subbuffer_errors(
+ struct ltt_channel_struct *ltt_chan,
+ long cons_off)
+{
+ struct rchan *rchan = ltt_chan->trans_channel_data;
+ struct ltt_channel_buf_struct *ltt_buf = ltt_chan->buf;
+ long cons_idx, commit_count, write_offset;
+
+ cons_idx = SUBBUF_INDEX(cons_off, rchan);
+ commit_count = local_read(<t_buf->commit_count[cons_idx]);
+ /*
+ * No need to order commit_count and write_offset reads because we
+ * execute after trace is stopped when there are no readers left.
+ */
+ write_offset = local_read(<t_buf->offset);
+ printk(KERN_WARNING
+ "LTT : unread channel %s offset is %ld "
+ "and cons_off : %ld\n",
+ ltt_chan->channel_name, write_offset, cons_off);
+ /* Check each sub-buffer for non filled commit count */
+ if (((commit_count - rchan->subbuf_size) & ltt_chan->commit_count_mask)
+ - (BUFFER_TRUNC(cons_off, rchan) >> ltt_chan->n_subbufs_order)
+ != 0)
+ printk(KERN_ALERT
+ "LTT : %s : subbuffer %lu has non filled "
+ "commit count %lu.\n",
+ ltt_chan->channel_name, cons_idx, commit_count);
+ printk(KERN_ALERT "LTT : %s : commit count : %lu, subbuf size %zd\n",
+ ltt_chan->channel_name, commit_count,
+ rchan->subbuf_size);
+}
+
+static void ltt_relay_print_errors(struct ltt_trace_struct *trace,
+ struct ltt_channel_struct *ltt_chan)
+{
+ struct rchan *rchan = ltt_chan->trans_channel_data;
+ struct ltt_channel_buf_struct *ltt_buf = ltt_chan->buf;
+ long cons_off;
+
+ for (cons_off = atomic_long_read(<t_buf->consumed);
+ (SUBBUF_TRUNC(local_read(<t_buf->offset),
+ rchan)
+ - cons_off) > 0;
+ cons_off = SUBBUF_ALIGN(cons_off, rchan))
+ ltt_relay_print_subbuffer_errors(ltt_chan, cons_off);
+}
+
+static void ltt_relay_print_buffer_errors(struct ltt_channel_struct *ltt_chan)
+{
+ struct ltt_trace_struct *trace = ltt_chan->trace;
+ struct ltt_channel_buf_struct *ltt_buf = ltt_chan->buf;
+
+ if (local_read(<t_buf->events_lost))
+ printk(KERN_ALERT
+ "LTT : %s : %ld events lost "
+ "in %s channel.\n",
+ ltt_chan->channel_name,
+ local_read(<t_buf->events_lost),
+ ltt_chan->channel_name);
+ if (local_read(<t_buf->corrupted_subbuffers))
+ printk(KERN_ALERT
+ "LTT : %s : %ld corrupted subbuffers "
+ "in %s channel.\n",
+ ltt_chan->channel_name,
+ local_read(<t_buf->corrupted_subbuffers),
+ ltt_chan->channel_name);
+
+ ltt_relay_print_errors(trace, ltt_chan);
+}
+
+static void ltt_relay_remove_dirs(struct ltt_trace_struct *trace)
+{
+//ust// debugfs_remove(trace->dentry.trace_root);
+}
+
+static void ltt_relay_release_channel(struct kref *kref)
+{
+ struct ltt_channel_struct *ltt_chan = container_of(kref,
+ struct ltt_channel_struct, kref);
+ free(ltt_chan->buf);
+}
+
+/*
+ * Create ltt buffer.
+ */
+//ust// static int ltt_relay_create_buffer(struct ltt_trace_struct *trace,
+//ust// struct ltt_channel_struct *ltt_chan, struct rchan_buf *buf,
+//ust// unsigned int cpu, unsigned int n_subbufs)
+//ust// {
+//ust// struct ltt_channel_buf_struct *ltt_buf =
+//ust// percpu_ptr(ltt_chan->buf, cpu);
+//ust// unsigned int j;
+//ust//
+//ust// ltt_buf->commit_count =
+//ust// kzalloc_node(sizeof(ltt_buf->commit_count) * n_subbufs,
+//ust// GFP_KERNEL, cpu_to_node(cpu));
+//ust// if (!ltt_buf->commit_count)
+//ust// return -ENOMEM;
+//ust// kref_get(&trace->kref);
+//ust// kref_get(&trace->ltt_transport_kref);
+//ust// kref_get(<t_chan->kref);
+//ust// local_set(<t_buf->offset, ltt_subbuffer_header_size());
+//ust// atomic_long_set(<t_buf->consumed, 0);
+//ust// atomic_long_set(<t_buf->active_readers, 0);
+//ust// for (j = 0; j < n_subbufs; j++)
+//ust// local_set(<t_buf->commit_count[j], 0);
+//ust// init_waitqueue_head(<t_buf->write_wait);
+//ust// atomic_set(<t_buf->wakeup_readers, 0);
+//ust// spin_lock_init(<t_buf->full_lock);
+//ust//
+//ust// ltt_buffer_begin_callback(buf, trace->start_tsc, 0);
+//ust// /* atomic_add made on local variable on data that belongs to
+//ust// * various CPUs : ok because tracing not started (for this cpu). */
+//ust// local_add(ltt_subbuffer_header_size(), <t_buf->commit_count[0]);
+//ust//
+//ust// local_set(<t_buf->events_lost, 0);
+//ust// local_set(<t_buf->corrupted_subbuffers, 0);
+//ust//
+//ust// return 0;
+//ust// }
+
+static int ltt_relay_create_buffer(struct ltt_trace_struct *trace,
+ struct ltt_channel_struct *ltt_chan, struct rchan_buf *buf,
+ unsigned int n_subbufs)
+{
+ struct ltt_channel_buf_struct *ltt_buf = ltt_chan->buf;
+ unsigned int j;
+ int fds[2];
+ int result;
+
+//ust// ltt_buf->commit_count =
+//ust// zmalloc(sizeof(ltt_buf->commit_count) * n_subbufs);
+//ust// if (!ltt_buf->commit_count)
+//ust// return -ENOMEM;
+ kref_get(&trace->kref);
+ kref_get(&trace->ltt_transport_kref);
+ kref_get(<t_chan->kref);
+ local_set(<t_buf->offset, ltt_subbuffer_header_size());
+ atomic_long_set(<t_buf->consumed, 0);
+ atomic_long_set(<t_buf->active_readers, 0);
+ for (j = 0; j < n_subbufs; j++)
+ local_set(<t_buf->commit_count[j], 0);
+//ust// init_waitqueue_head(<t_buf->write_wait);
+//ust// atomic_set(<t_buf->wakeup_readers, 0);
+//ust// spin_lock_init(<t_buf->full_lock);
+
+ ltt_buffer_begin_callback(buf, trace->start_tsc, 0);
+
+ local_add(ltt_subbuffer_header_size(), <t_buf->commit_count[0]);
+
+ local_set(<t_buf->events_lost, 0);
+ local_set(<t_buf->corrupted_subbuffers, 0);
+
+ result = pipe(fds);
+ if(result == -1) {
+ PERROR("pipe");
+ return -1;
+ }
+ ltt_buf->data_ready_fd_read = fds[0];
+ ltt_buf->data_ready_fd_write = fds[1];
+
+ return 0;
+}
+
+static void ltt_relay_destroy_buffer(struct ltt_channel_struct *ltt_chan)
+{
+ struct ltt_trace_struct *trace = ltt_chan->trace;
+ struct ltt_channel_buf_struct *ltt_buf = ltt_chan->buf;
+
+ kref_put(<t_chan->trace->ltt_transport_kref,
+ ltt_release_transport);
+ ltt_relay_print_buffer_errors(ltt_chan);
+//ust// kfree(ltt_buf->commit_count);
+//ust// ltt_buf->commit_count = NULL;
+ kref_put(<t_chan->kref, ltt_relay_release_channel);
+ kref_put(&trace->kref, ltt_release_trace);
+//ust// wake_up_interruptible(&trace->kref_wq);
+}
+
+static void ltt_chan_alloc_ltt_buf(struct ltt_channel_struct *ltt_chan)
+{
+ void *ptr;
+ int result;
+
+ /* Get one page */
+ /* FIXME: increase size if we have a commit_count array that overflows the page */
+ size_t size = PAGE_ALIGN(1);
+
+ result = ltt_chan->buf_shmid = shmget(getpid(), size, IPC_CREAT | IPC_EXCL | 0700);
+ if(ltt_chan->buf_shmid == -1) {
+ PERROR("shmget");
+ return -1;
+ }
+
+ ptr = shmat(ltt_chan->buf_shmid, NULL, 0);
+ if(ptr == (void *) -1) {
+ perror("shmat");
+ goto destroy_shmem;
+ }
+
+ /* Already mark the shared memory for destruction. This will occur only
+ * when all users have detached.
+ */
+ result = shmctl(ltt_chan->buf_shmid, IPC_RMID, NULL);
+ if(result == -1) {
+ perror("shmctl");
+ return -1;
+ }
+
+ ltt_chan->buf = ptr;
+
+ return 0;
+
+ destroy_shmem:
+ result = shmctl(ltt_chan->buf_shmid, IPC_RMID, NULL);
+ if(result == -1) {
+ perror("shmctl");
+ }
+
+ return -1;
+}
+
+/*
+ * Create channel.
+ */
+static int ltt_relay_create_channel(const char *trace_name,
+ struct ltt_trace_struct *trace, struct dentry *dir,
+ const char *channel_name, struct ltt_channel_struct *ltt_chan,
+ unsigned int subbuf_size, unsigned int n_subbufs,
+ int overwrite)
+{
+ char *tmpname;
+ unsigned int tmpname_len;
+ int err = 0;
+
+ tmpname = kmalloc(PATH_MAX, GFP_KERNEL);
+ if (!tmpname)
+ return EPERM;
+ if (overwrite) {
+ strncpy(tmpname, LTT_FLIGHT_PREFIX, PATH_MAX-1);
+ strncat(tmpname, channel_name,
+ PATH_MAX-1-sizeof(LTT_FLIGHT_PREFIX));
+ } else {
+ strncpy(tmpname, channel_name, PATH_MAX-1);
+ }
+ strncat(tmpname, "_", PATH_MAX-1-strlen(tmpname));
+
+ kref_init(<t_chan->kref);
+
+ ltt_chan->trace = trace;
+ ltt_chan->buffer_begin = ltt_buffer_begin_callback;
+ ltt_chan->buffer_end = ltt_buffer_end_callback;
+ ltt_chan->overwrite = overwrite;
+ ltt_chan->n_subbufs_order = get_count_order(n_subbufs);
+ ltt_chan->commit_count_mask = (~0UL >> ltt_chan->n_subbufs_order);
+//ust// ltt_chan->buf = percpu_alloc_mask(sizeof(struct ltt_channel_buf_struct), GFP_KERNEL, cpu_possible_map);
+
+ ltt_chan_alloc_ltt_buf(ltt_chan);
+
+//ust// ltt_chan->buf = malloc(sizeof(struct ltt_channel_buf_struct));
+ if (!ltt_chan->buf)
+ goto alloc_error;
+ ltt_chan->trans_channel_data = ltt_relay_open(tmpname,
+ dir,
+ subbuf_size,
+ n_subbufs,
+ ltt_chan);
+ tmpname_len = strlen(tmpname);
+ if (tmpname_len > 0) {
+ /* Remove final _ for pretty printing */
+ tmpname[tmpname_len-1] = '\0';
+ }
+ if (ltt_chan->trans_channel_data == NULL) {
+ printk(KERN_ERR "LTT : Can't open %s channel for trace %s\n",
+ tmpname, trace_name);
+ goto relay_open_error;
+ }
+
+ err = 0;
+ goto end;
+
+relay_open_error:
+//ust// percpu_free(ltt_chan->buf);
+alloc_error:
+ err = EPERM;
+end:
+ kfree(tmpname);
+ return err;
+}
+
+static int ltt_relay_create_dirs(struct ltt_trace_struct *new_trace)
+{
+//ust// new_trace->dentry.trace_root = debugfs_create_dir(new_trace->trace_name,
+//ust// get_ltt_root());
+//ust// if (new_trace->dentry.trace_root == NULL) {
+//ust// printk(KERN_ERR "LTT : Trace directory name %s already taken\n",
+//ust// new_trace->trace_name);
+//ust// return EEXIST;
+//ust// }
+
+//ust// new_trace->callbacks.create_buf_file = ltt_create_buf_file_callback;
+//ust// new_trace->callbacks.remove_buf_file = ltt_remove_buf_file_callback;
+
+ return 0;
+}
+
+/*
+ * LTTng channel flush function.
+ *
+ * Must be called when no tracing is active in the channel, because of
+ * accesses across CPUs.
+ */
+static notrace void ltt_relay_buffer_flush(struct rchan_buf *buf)
+{
+ struct ltt_channel_struct *channel =
+ (struct ltt_channel_struct *)buf->chan->private_data;
+ struct ltt_channel_buf_struct *ltt_buf = channel->buf;
+ int result;
+
+ buf->finalized = 1;
+ ltt_force_switch(buf, FORCE_FLUSH);
+
+ result = write(ltt_buf->data_ready_fd_write, "1", 1);
+ if(result == -1) {
+ PERROR("write (in ltt_relay_buffer_flush)");
+ ERR("this should never happen!");
+ }
+}
+
+static void ltt_relay_async_wakeup_chan(struct ltt_channel_struct *ltt_channel)
+{
+//ust// unsigned int i;
+//ust// struct rchan *rchan = ltt_channel->trans_channel_data;
+//ust//
+//ust// for_each_possible_cpu(i) {
+//ust// struct ltt_channel_buf_struct *ltt_buf =
+//ust// percpu_ptr(ltt_channel->buf, i);
+//ust//
+//ust// if (atomic_read(<t_buf->wakeup_readers) == 1) {
+//ust// atomic_set(<t_buf->wakeup_readers, 0);
+//ust// wake_up_interruptible(&rchan->buf[i]->read_wait);
+//ust// }
+//ust// }
+}
+
+static void ltt_relay_finish_buffer(struct ltt_channel_struct *ltt_channel)
+{
+ struct rchan *rchan = ltt_channel->trans_channel_data;
+ int result;
+
+ if (rchan->buf) {
+ struct ltt_channel_buf_struct *ltt_buf = ltt_channel->buf;
+ ltt_relay_buffer_flush(rchan->buf);
+//ust// ltt_relay_wake_writers(ltt_buf);
+ /* closing the pipe tells the consumer the buffer is finished */
+
+ //result = write(ltt_buf->data_ready_fd_write, "D", 1);
+ //if(result == -1) {
+ // PERROR("write (in ltt_relay_finish_buffer)");
+ // ERR("this should never happen!");
+ //}
+ close(ltt_buf->data_ready_fd_write);
+ }
+}
+
+
+static void ltt_relay_finish_channel(struct ltt_channel_struct *ltt_channel)
+{
+ unsigned int i;
+
+//ust// for_each_possible_cpu(i)
+ ltt_relay_finish_buffer(ltt_channel);
+}
+
+static void ltt_relay_remove_channel(struct ltt_channel_struct *channel)
+{
+ struct rchan *rchan = channel->trans_channel_data;
+
+ ltt_relay_close(rchan);
+ kref_put(&channel->kref, ltt_relay_release_channel);
+}
+
+struct ltt_reserve_switch_offsets {
+ long begin, end, old;
+ long begin_switch, end_switch_current, end_switch_old;
+ long commit_count, reserve_commit_diff;
+ size_t before_hdr_pad, size;
+};
+
+/*
+ * Returns :
+ * 0 if ok
+ * !0 if execution must be aborted.
+ */
+static inline int ltt_relay_try_reserve(
+ struct ltt_channel_struct *ltt_channel,
+ struct ltt_channel_buf_struct *ltt_buf, struct rchan *rchan,
+ struct rchan_buf *buf,
+ struct ltt_reserve_switch_offsets *offsets, size_t data_size,
+ u64 *tsc, unsigned int *rflags, int largest_align)
+{
+ offsets->begin = local_read(<t_buf->offset);
+ offsets->old = offsets->begin;
+ offsets->begin_switch = 0;
+ offsets->end_switch_current = 0;
+ offsets->end_switch_old = 0;
+
+ *tsc = trace_clock_read64();
+ if (last_tsc_overflow(ltt_buf, *tsc))
+ *rflags = LTT_RFLAG_ID_SIZE_TSC;
+
+ if (SUBBUF_OFFSET(offsets->begin, buf->chan) == 0) {
+ offsets->begin_switch = 1; /* For offsets->begin */
+ } else {
+ offsets->size = ltt_get_header_size(ltt_channel,
+ offsets->begin, data_size,
+ &offsets->before_hdr_pad, *rflags);
+ offsets->size += ltt_align(offsets->begin + offsets->size,
+ largest_align)
+ + data_size;
+ if ((SUBBUF_OFFSET(offsets->begin, buf->chan) + offsets->size)
+ > buf->chan->subbuf_size) {
+ offsets->end_switch_old = 1; /* For offsets->old */
+ offsets->begin_switch = 1; /* For offsets->begin */
+ }
+ }
+ if (offsets->begin_switch) {
+ long subbuf_index;
+
+ if (offsets->end_switch_old)
+ offsets->begin = SUBBUF_ALIGN(offsets->begin,
+ buf->chan);
+ offsets->begin = offsets->begin + ltt_subbuffer_header_size();
+ /* Test new buffer integrity */
+ subbuf_index = SUBBUF_INDEX(offsets->begin, buf->chan);
+ offsets->reserve_commit_diff =
+ (BUFFER_TRUNC(offsets->begin, buf->chan)
+ >> ltt_channel->n_subbufs_order)
+ - (local_read(<t_buf->commit_count[subbuf_index])
+ & ltt_channel->commit_count_mask);
+ if (offsets->reserve_commit_diff == 0) {
+ /* Next buffer not corrupted. */
+ if (!ltt_channel->overwrite &&
+ (SUBBUF_TRUNC(offsets->begin, buf->chan)
+ - SUBBUF_TRUNC(atomic_long_read(
+ <t_buf->consumed),
+ buf->chan))
+ >= rchan->alloc_size) {
+ /*
+ * We do not overwrite non consumed buffers
+ * and we are full : event is lost.
+ */
+ local_inc(<t_buf->events_lost);
+ return -1;
+ } else {
+ /*
+ * next buffer not corrupted, we are either in
+ * overwrite mode or the buffer is not full.
+ * It's safe to write in this new subbuffer.
+ */
+ }
+ } else {
+ /*
+ * Next subbuffer corrupted. Force pushing reader even
+ * in normal mode. It's safe to write in this new
+ * subbuffer.
+ */
+ }
+ offsets->size = ltt_get_header_size(ltt_channel,
+ offsets->begin, data_size,
+ &offsets->before_hdr_pad, *rflags);
+ offsets->size += ltt_align(offsets->begin + offsets->size,
+ largest_align)
+ + data_size;
+ if ((SUBBUF_OFFSET(offsets->begin, buf->chan) + offsets->size)
+ > buf->chan->subbuf_size) {
+ /*
+ * Event too big for subbuffers, report error, don't
+ * complete the sub-buffer switch.
+ */
+ local_inc(<t_buf->events_lost);
+ return -1;
+ } else {
+ /*
+ * We just made a successful buffer switch and the event
+ * fits in the new subbuffer. Let's write.
+ */
+ }
+ } else {
+ /*
+ * Event fits in the current buffer and we are not on a switch
+ * boundary. It's safe to write.
+ */
+ }
+ offsets->end = offsets->begin + offsets->size;
+
+ if ((SUBBUF_OFFSET(offsets->end, buf->chan)) == 0) {
+ /*
+ * The offset_end will fall at the very beginning of the next
+ * subbuffer.
+ */
+ offsets->end_switch_current = 1; /* For offsets->begin */
+ }
+ return 0;
+}
+
+/*
+ * Returns :
+ * 0 if ok
+ * !0 if execution must be aborted.
+ */
+static inline int ltt_relay_try_switch(
+ enum force_switch_mode mode,
+ struct ltt_channel_struct *ltt_channel,
+ struct ltt_channel_buf_struct *ltt_buf, struct rchan *rchan,
+ struct rchan_buf *buf,
+ struct ltt_reserve_switch_offsets *offsets,
+ u64 *tsc)
+{
+ long subbuf_index;
+
+ offsets->begin = local_read(<t_buf->offset);
+ offsets->old = offsets->begin;
+ offsets->begin_switch = 0;
+ offsets->end_switch_old = 0;
+
+ *tsc = trace_clock_read64();
+
+ if (SUBBUF_OFFSET(offsets->begin, buf->chan) != 0) {
+ offsets->begin = SUBBUF_ALIGN(offsets->begin, buf->chan);
+ offsets->end_switch_old = 1;
+ } else {
+ /* we do not have to switch : buffer is empty */
+ return -1;
+ }
+ if (mode == FORCE_ACTIVE)
+ offsets->begin += ltt_subbuffer_header_size();
+ /*
+ * Always begin_switch in FORCE_ACTIVE mode.
+ * Test new buffer integrity
+ */
+ subbuf_index = SUBBUF_INDEX(offsets->begin, buf->chan);
+ offsets->reserve_commit_diff =
+ (BUFFER_TRUNC(offsets->begin, buf->chan)
+ >> ltt_channel->n_subbufs_order)
+ - (local_read(<t_buf->commit_count[subbuf_index])
+ & ltt_channel->commit_count_mask);
+ if (offsets->reserve_commit_diff == 0) {
+ /* Next buffer not corrupted. */
+ if (mode == FORCE_ACTIVE
+ && !ltt_channel->overwrite
+ && offsets->begin - atomic_long_read(<t_buf->consumed)
+ >= rchan->alloc_size) {
+ /*
+ * We do not overwrite non consumed buffers and we are
+ * full : ignore switch while tracing is active.
+ */
+ return -1;
+ }
+ } else {
+ /*
+ * Next subbuffer corrupted. Force pushing reader even in normal
+ * mode
+ */
+ }
+ offsets->end = offsets->begin;
+ return 0;
+}
+
+static inline void ltt_reserve_push_reader(
+ struct ltt_channel_struct *ltt_channel,
+ struct ltt_channel_buf_struct *ltt_buf,
+ struct rchan *rchan,
+ struct rchan_buf *buf,
+ struct ltt_reserve_switch_offsets *offsets)
+{
+ long consumed_old, consumed_new;
+
+ do {
+ consumed_old = atomic_long_read(<t_buf->consumed);
+ /*
+ * If buffer is in overwrite mode, push the reader consumed
+ * count if the write position has reached it and we are not
+ * at the first iteration (don't push the reader farther than
+ * the writer). This operation can be done concurrently by many
+ * writers in the same buffer, the writer being at the farthest
+ * write position sub-buffer index in the buffer being the one
+ * which will win this loop.
+ * If the buffer is not in overwrite mode, pushing the reader
+ * only happens if a sub-buffer is corrupted.
+ */
+ if ((SUBBUF_TRUNC(offsets->end-1, buf->chan)
+ - SUBBUF_TRUNC(consumed_old, buf->chan))
+ >= rchan->alloc_size)
+ consumed_new = SUBBUF_ALIGN(consumed_old, buf->chan);
+ else {
+ consumed_new = consumed_old;
+ break;
+ }
+ } while (atomic_long_cmpxchg(<t_buf->consumed, consumed_old,
+ consumed_new) != consumed_old);
+
+ if (consumed_old != consumed_new) {
+ /*
+ * Reader pushed : we are the winner of the push, we can
+ * therefore reequilibrate reserve and commit. Atomic increment
+ * of the commit count permits other writers to play around
+ * with this variable before us. We keep track of
+ * corrupted_subbuffers even in overwrite mode :
+ * we never want to write over a non completely committed
+ * sub-buffer : possible causes : the buffer size is too low
+ * compared to the unordered data input, or there is a writer
+ * that died between the reserve and the commit.
+ */
+ if (offsets->reserve_commit_diff) {
+ /*
+ * We have to alter the sub-buffer commit count.
+ * We do not deliver the previous subbuffer, given it
+ * was either corrupted or not consumed (overwrite
+ * mode).
+ */
+ local_add(offsets->reserve_commit_diff,
+ <t_buf->commit_count[
+ SUBBUF_INDEX(offsets->begin,
+ buf->chan)]);
+ if (!ltt_channel->overwrite
+ || offsets->reserve_commit_diff
+ != rchan->subbuf_size) {
+ /*
+ * The reserve commit diff was not subbuf_size :
+ * it means the subbuffer was partly written to
+ * and is therefore corrupted. If it is multiple
+ * of subbuffer size and we are in flight
+ * recorder mode, we are skipping over a whole
+ * subbuffer.
+ */
+ local_inc(<t_buf->corrupted_subbuffers);
+ }
+ }
+ }
+}
+
+
+/*
+ * ltt_reserve_switch_old_subbuf: switch old subbuffer
+ *
+ * Concurrency safe because we are the last and only thread to alter this
+ * sub-buffer. As long as it is not delivered and read, no other thread can
+ * alter the offset, alter the reserve_count or call the
+ * client_buffer_end_callback on this sub-buffer.
+ *
+ * The only remaining threads could be the ones with pending commits. They will
+ * have to do the deliver themselves. Not concurrency safe in overwrite mode.
+ * We detect corrupted subbuffers with commit and reserve counts. We keep a
+ * corrupted sub-buffers count and push the readers across these sub-buffers.
+ *
+ * Not concurrency safe if a writer is stalled in a subbuffer and another writer
+ * switches in, finding out it's corrupted. The result will be than the old
+ * (uncommited) subbuffer will be declared corrupted, and that the new subbuffer
+ * will be declared corrupted too because of the commit count adjustment.
+ *
+ * Note : offset_old should never be 0 here.
+ */
+static inline void ltt_reserve_switch_old_subbuf(
+ struct ltt_channel_struct *ltt_channel,
+ struct ltt_channel_buf_struct *ltt_buf, struct rchan *rchan,
+ struct rchan_buf *buf,
+ struct ltt_reserve_switch_offsets *offsets, u64 *tsc)
+{
+ long oldidx = SUBBUF_INDEX(offsets->old - 1, rchan);
+
+ ltt_channel->buffer_end(buf, *tsc, offsets->old, oldidx);
+ /* Must write buffer end before incrementing commit count */
+ smp_wmb();
+ offsets->commit_count =
+ local_add_return(rchan->subbuf_size
+ - (SUBBUF_OFFSET(offsets->old - 1, rchan)
+ + 1),
+ <t_buf->commit_count[oldidx]);
+ if ((BUFFER_TRUNC(offsets->old - 1, rchan)
+ >> ltt_channel->n_subbufs_order)
+ - ((offsets->commit_count - rchan->subbuf_size)
+ & ltt_channel->commit_count_mask) == 0)
+ ltt_deliver(buf, oldidx, NULL);
+}
+
+/*
+ * ltt_reserve_switch_new_subbuf: Populate new subbuffer.
+ *
+ * This code can be executed unordered : writers may already have written to the
+ * sub-buffer before this code gets executed, caution. The commit makes sure
+ * that this code is executed before the deliver of this sub-buffer.
+ */
+static /*inline*/ void ltt_reserve_switch_new_subbuf(
+ struct ltt_channel_struct *ltt_channel,
+ struct ltt_channel_buf_struct *ltt_buf, struct rchan *rchan,
+ struct rchan_buf *buf,
+ struct ltt_reserve_switch_offsets *offsets, u64 *tsc)
+{
+ long beginidx = SUBBUF_INDEX(offsets->begin, rchan);
+
+ ltt_channel->buffer_begin(buf, *tsc, beginidx);
+ /* Must write buffer end before incrementing commit count */
+ smp_wmb();
+ offsets->commit_count = local_add_return(ltt_subbuffer_header_size(),
+ <t_buf->commit_count[beginidx]);
+ /* Check if the written buffer has to be delivered */
+ if ((BUFFER_TRUNC(offsets->begin, rchan)
+ >> ltt_channel->n_subbufs_order)
+ - ((offsets->commit_count - rchan->subbuf_size)
+ & ltt_channel->commit_count_mask) == 0)
+ ltt_deliver(buf, beginidx, NULL);
+}
+
+
+/*
+ * ltt_reserve_end_switch_current: finish switching current subbuffer
+ *
+ * Concurrency safe because we are the last and only thread to alter this
+ * sub-buffer. As long as it is not delivered and read, no other thread can
+ * alter the offset, alter the reserve_count or call the
+ * client_buffer_end_callback on this sub-buffer.
+ *
+ * The only remaining threads could be the ones with pending commits. They will
+ * have to do the deliver themselves. Not concurrency safe in overwrite mode.
+ * We detect corrupted subbuffers with commit and reserve counts. We keep a
+ * corrupted sub-buffers count and push the readers across these sub-buffers.
+ *
+ * Not concurrency safe if a writer is stalled in a subbuffer and another writer
+ * switches in, finding out it's corrupted. The result will be than the old
+ * (uncommited) subbuffer will be declared corrupted, and that the new subbuffer
+ * will be declared corrupted too because of the commit count adjustment.
+ */
+static inline void ltt_reserve_end_switch_current(
+ struct ltt_channel_struct *ltt_channel,
+ struct ltt_channel_buf_struct *ltt_buf, struct rchan *rchan,
+ struct rchan_buf *buf,
+ struct ltt_reserve_switch_offsets *offsets, u64 *tsc)
+{
+ long endidx = SUBBUF_INDEX(offsets->end - 1, rchan);
+
+ ltt_channel->buffer_end(buf, *tsc, offsets->end, endidx);
+ /* Must write buffer begin before incrementing commit count */
+ smp_wmb();
+ offsets->commit_count =
+ local_add_return(rchan->subbuf_size
+ - (SUBBUF_OFFSET(offsets->end - 1, rchan)
+ + 1),
+ <t_buf->commit_count[endidx]);
+ if ((BUFFER_TRUNC(offsets->end - 1, rchan)
+ >> ltt_channel->n_subbufs_order)
+ - ((offsets->commit_count - rchan->subbuf_size)
+ & ltt_channel->commit_count_mask) == 0)
+ ltt_deliver(buf, endidx, NULL);
+}
+
+/**
+ * ltt_relay_reserve_slot - Atomic slot reservation in a LTTng buffer.
+ * @trace: the trace structure to log to.
+ * @ltt_channel: channel structure
+ * @transport_data: data structure specific to ltt relay
+ * @data_size: size of the variable length data to log.
+ * @slot_size: pointer to total size of the slot (out)
+ * @buf_offset : pointer to reserved buffer offset (out)
+ * @tsc: pointer to the tsc at the slot reservation (out)
+ * @cpu: cpuid
+ *
+ * Return : -ENOSPC if not enough space, else returns 0.
+ * It will take care of sub-buffer switching.
+ */
+static notrace int ltt_relay_reserve_slot(struct ltt_trace_struct *trace,
+ struct ltt_channel_struct *ltt_channel, void **transport_data,
+ size_t data_size, size_t *slot_size, long *buf_offset, u64 *tsc,
+ unsigned int *rflags, int largest_align)
+{
+ struct rchan *rchan = ltt_channel->trans_channel_data;
+ struct rchan_buf *buf = *transport_data = rchan->buf;
+ struct ltt_channel_buf_struct *ltt_buf = ltt_channel->buf;
+ struct ltt_reserve_switch_offsets offsets;
+
+ offsets.reserve_commit_diff = 0;
+ offsets.size = 0;
+
+ /*
+ * Perform retryable operations.
+ */
+ if (ltt_nesting > 4) {
+ local_inc(<t_buf->events_lost);
+ return -EPERM;
+ }
+ do {
+ if (ltt_relay_try_reserve(ltt_channel, ltt_buf,
+ rchan, buf, &offsets, data_size, tsc, rflags,
+ largest_align))
+ return -ENOSPC;
+ } while (local_cmpxchg(<t_buf->offset, offsets.old,
+ offsets.end) != offsets.old);
+
+ /*
+ * Atomically update last_tsc. This update races against concurrent
+ * atomic updates, but the race will always cause supplementary full TSC
+ * events, never the opposite (missing a full TSC event when it would be
+ * needed).
+ */
+ save_last_tsc(ltt_buf, *tsc);
+
+ /*
+ * Push the reader if necessary
+ */
+ ltt_reserve_push_reader(ltt_channel, ltt_buf, rchan, buf, &offsets);
+
+ /*
+ * Switch old subbuffer if needed.
+ */
+ if (offsets.end_switch_old)
+ ltt_reserve_switch_old_subbuf(ltt_channel, ltt_buf, rchan, buf,
+ &offsets, tsc);
+
+ /*
+ * Populate new subbuffer.
+ */
+ if (offsets.begin_switch)
+ ltt_reserve_switch_new_subbuf(ltt_channel, ltt_buf, rchan,
+ buf, &offsets, tsc);
+
+ if (offsets.end_switch_current)
+ ltt_reserve_end_switch_current(ltt_channel, ltt_buf, rchan,
+ buf, &offsets, tsc);
+
+ *slot_size = offsets.size;
+ *buf_offset = offsets.begin + offsets.before_hdr_pad;
+ return 0;
+}
+
+/*
+ * Force a sub-buffer switch for a per-cpu buffer. This operation is
+ * completely reentrant : can be called while tracing is active with
+ * absolutely no lock held.
+ *
+ * Note, however, that as a local_cmpxchg is used for some atomic
+ * operations, this function must be called from the CPU which owns the buffer
+ * for a ACTIVE flush.
+ */
+static notrace void ltt_force_switch(struct rchan_buf *buf,
+ enum force_switch_mode mode)
+{
+ struct ltt_channel_struct *ltt_channel =
+ (struct ltt_channel_struct *)buf->chan->private_data;
+ struct ltt_channel_buf_struct *ltt_buf = ltt_channel->buf;
+ struct rchan *rchan = ltt_channel->trans_channel_data;
+ struct ltt_reserve_switch_offsets offsets;
+ u64 tsc;
+
+ offsets.reserve_commit_diff = 0;
+ offsets.size = 0;
+
+ /*
+ * Perform retryable operations.
+ */
+ do {
+ if (ltt_relay_try_switch(mode, ltt_channel, ltt_buf,
+ rchan, buf, &offsets, &tsc))
+ return;
+ } while (local_cmpxchg(<t_buf->offset, offsets.old,
+ offsets.end) != offsets.old);
+
+ /*
+ * Atomically update last_tsc. This update races against concurrent
+ * atomic updates, but the race will always cause supplementary full TSC
+ * events, never the opposite (missing a full TSC event when it would be
+ * needed).
+ */
+ save_last_tsc(ltt_buf, tsc);
+
+ /*
+ * Push the reader if necessary
+ */
+ if (mode == FORCE_ACTIVE)
+ ltt_reserve_push_reader(ltt_channel, ltt_buf, rchan,
+ buf, &offsets);
+
+ /*
+ * Switch old subbuffer if needed.
+ */
+ if (offsets.end_switch_old)
+ ltt_reserve_switch_old_subbuf(ltt_channel, ltt_buf, rchan, buf,
+ &offsets, &tsc);
+
+ /*
+ * Populate new subbuffer.
+ */
+ if (mode == FORCE_ACTIVE)
+ ltt_reserve_switch_new_subbuf(ltt_channel,
+ ltt_buf, rchan, buf, &offsets, &tsc);
+}
+
+/*
+ * for flight recording. must be called after relay_commit.
+ * This function decrements de subbuffer's lost_size each time the commit count
+ * reaches back the reserve offset (module subbuffer size). It is useful for
+ * crash dump.
+ * We use slot_size - 1 to make sure we deal correctly with the case where we
+ * fill the subbuffer completely (so the subbuf index stays in the previous
+ * subbuffer).
+ */
+//ust// #ifdef CONFIG_LTT_VMCORE
+static /*inline*/ void ltt_write_commit_counter(struct rchan_buf *buf,
+ long buf_offset, size_t slot_size)
+{
+ struct ltt_channel_struct *ltt_channel =
+ (struct ltt_channel_struct *)buf->chan->private_data;
+ struct ltt_channel_buf_struct *ltt_buf = ltt_channel->buf;
+ struct ltt_subbuffer_header *header;
+ long offset, subbuf_idx, commit_count;
+ uint32_t lost_old, lost_new;
+
+ subbuf_idx = SUBBUF_INDEX(buf_offset - 1, buf->chan);
+ offset = buf_offset + slot_size;
+ header = (struct ltt_subbuffer_header *)
+ ltt_relay_offset_address(buf,
+ subbuf_idx * buf->chan->subbuf_size);
+ for (;;) {
+ lost_old = header->lost_size;
+ commit_count =
+ local_read(<t_buf->commit_count[subbuf_idx]);
+ /* SUBBUF_OFFSET includes commit_count_mask */
+ if (!SUBBUF_OFFSET(offset - commit_count, buf->chan)) {
+ lost_new = (uint32_t)buf->chan->subbuf_size
+ - SUBBUF_OFFSET(commit_count, buf->chan);
+ lost_old = cmpxchg_local(&header->lost_size, lost_old,
+ lost_new);
+ if (lost_old <= lost_new)
+ break;
+ } else {
+ break;
+ }
+ }
+}
+//ust// #else
+//ust// static inline void ltt_write_commit_counter(struct rchan_buf *buf,
+//ust// long buf_offset, size_t slot_size)
+//ust// {
+//ust// }
+//ust// #endif
+
+/*
+ * Atomic unordered slot commit. Increments the commit count in the
+ * specified sub-buffer, and delivers it if necessary.
+ *
+ * Parameters:
+ *
+ * @ltt_channel : channel structure
+ * @transport_data: transport-specific data
+ * @buf_offset : offset following the event header.
+ * @slot_size : size of the reserved slot.
+ */
+static notrace void ltt_relay_commit_slot(
+ struct ltt_channel_struct *ltt_channel,
+ void **transport_data, long buf_offset, size_t slot_size)
+{
+ struct rchan_buf *buf = *transport_data;
+ struct ltt_channel_buf_struct *ltt_buf = ltt_channel->buf;
+ struct rchan *rchan = buf->chan;
+ long offset_end = buf_offset;
+ long endidx = SUBBUF_INDEX(offset_end - 1, rchan);
+ long commit_count;
+
+ /* Must write slot data before incrementing commit count */
+ smp_wmb();
+ commit_count = local_add_return(slot_size,
+ <t_buf->commit_count[endidx]);
+ /* Check if all commits have been done */
+ if ((BUFFER_TRUNC(offset_end - 1, rchan)
+ >> ltt_channel->n_subbufs_order)
+ - ((commit_count - rchan->subbuf_size)
+ & ltt_channel->commit_count_mask) == 0)
+ ltt_deliver(buf, endidx, NULL);
+ /*
+ * Update lost_size for each commit. It's needed only for extracting
+ * ltt buffers from vmcore, after crash.
+ */
+ ltt_write_commit_counter(buf, buf_offset, slot_size);
+
+ DBG("commited slot. now commit count is %ld", commit_count);
+}
+
+/*
+ * This is called with preemption disabled when user space has requested
+ * blocking mode. If one of the active traces has free space below a
+ * specific threshold value, we reenable preemption and block.
+ */
+static int ltt_relay_user_blocking(struct ltt_trace_struct *trace,
+ unsigned int chan_index, size_t data_size,
+ struct user_dbg_data *dbg)
+{
+//ust// struct rchan *rchan;
+//ust// struct ltt_channel_buf_struct *ltt_buf;
+//ust// struct ltt_channel_struct *channel;
+//ust// struct rchan_buf *relay_buf;
+//ust// int cpu;
+//ust// DECLARE_WAITQUEUE(wait, current);
+//ust//
+//ust// channel = &trace->channels[chan_index];
+//ust// rchan = channel->trans_channel_data;
+//ust// cpu = smp_processor_id();
+//ust// relay_buf = rchan->buf[cpu];
+//ust// ltt_buf = percpu_ptr(channel->buf, cpu);
+//ust//
+//ust// /*
+//ust// * Check if data is too big for the channel : do not
+//ust// * block for it.
+//ust// */
+//ust// if (LTT_RESERVE_CRITICAL + data_size > relay_buf->chan->subbuf_size)
+//ust// return 0;
+//ust//
+//ust// /*
+//ust// * If free space too low, we block. We restart from the
+//ust// * beginning after we resume (cpu id may have changed
+//ust// * while preemption is active).
+//ust// */
+//ust// spin_lock(<t_buf->full_lock);
+//ust// if (!channel->overwrite) {
+//ust// dbg->write = local_read(<t_buf->offset);
+//ust// dbg->read = atomic_long_read(<t_buf->consumed);
+//ust// dbg->avail_size = dbg->write + LTT_RESERVE_CRITICAL + data_size
+//ust// - SUBBUF_TRUNC(dbg->read,
+//ust// relay_buf->chan);
+//ust// if (dbg->avail_size > rchan->alloc_size) {
+//ust// __set_current_state(TASK_INTERRUPTIBLE);
+//ust// add_wait_queue(<t_buf->write_wait, &wait);
+//ust// spin_unlock(<t_buf->full_lock);
+//ust// preempt_enable();
+//ust// schedule();
+//ust// __set_current_state(TASK_RUNNING);
+//ust// remove_wait_queue(<t_buf->write_wait, &wait);
+//ust// if (signal_pending(current))
+//ust// return -ERESTARTSYS;
+//ust// preempt_disable();
+//ust// return 1;
+//ust// }
+//ust// }
+//ust// spin_unlock(<t_buf->full_lock);
+ return 0;
+}
+
+static void ltt_relay_print_user_errors(struct ltt_trace_struct *trace,
+ unsigned int chan_index, size_t data_size,
+ struct user_dbg_data *dbg)
+{
+ struct rchan *rchan;
+ struct ltt_channel_buf_struct *ltt_buf;
+ struct ltt_channel_struct *channel;
+ struct rchan_buf *relay_buf;
+
+ channel = &trace->channels[chan_index];
+ rchan = channel->trans_channel_data;
+ relay_buf = rchan->buf;
+ ltt_buf = channel->buf;
+
+ printk(KERN_ERR "Error in LTT usertrace : "
+ "buffer full : event lost in blocking "
+ "mode. Increase LTT_RESERVE_CRITICAL.\n");
+ printk(KERN_ERR "LTT nesting level is %u.\n", ltt_nesting);
+ printk(KERN_ERR "LTT avail size %lu.\n",
+ dbg->avail_size);
+ printk(KERN_ERR "avai write : %lu, read : %lu\n",
+ dbg->write, dbg->read);
+
+ dbg->write = local_read(<t_buf->offset);
+ dbg->read = atomic_long_read(<t_buf->consumed);
+
+ printk(KERN_ERR "LTT cur size %lu.\n",
+ dbg->write + LTT_RESERVE_CRITICAL + data_size
+ - SUBBUF_TRUNC(dbg->read, relay_buf->chan));
+ printk(KERN_ERR "cur write : %lu, read : %lu\n",
+ dbg->write, dbg->read);
+}
+
+//ust// static struct ltt_transport ltt_relay_transport = {
+//ust// .name = "relay",
+//ust// .owner = THIS_MODULE,
+//ust// .ops = {
+//ust// .create_dirs = ltt_relay_create_dirs,
+//ust// .remove_dirs = ltt_relay_remove_dirs,
+//ust// .create_channel = ltt_relay_create_channel,
+//ust// .finish_channel = ltt_relay_finish_channel,
+//ust// .remove_channel = ltt_relay_remove_channel,
+//ust// .wakeup_channel = ltt_relay_async_wakeup_chan,
+//ust// .commit_slot = ltt_relay_commit_slot,
+//ust// .reserve_slot = ltt_relay_reserve_slot,
+//ust// .user_blocking = ltt_relay_user_blocking,
+//ust// .user_errors = ltt_relay_print_user_errors,
+//ust// },
+//ust// };
+
+static struct ltt_transport ust_relay_transport = {
+ .name = "ustrelay",
+//ust// .owner = THIS_MODULE,
+ .ops = {
+ .create_dirs = ltt_relay_create_dirs,
+ .remove_dirs = ltt_relay_remove_dirs,
+ .create_channel = ltt_relay_create_channel,
+ .finish_channel = ltt_relay_finish_channel,
+ .remove_channel = ltt_relay_remove_channel,
+ .wakeup_channel = ltt_relay_async_wakeup_chan,
+ .commit_slot = ltt_relay_commit_slot,
+ .reserve_slot = ltt_relay_reserve_slot,
+ .user_blocking = ltt_relay_user_blocking,
+ .user_errors = ltt_relay_print_user_errors,
+ },
+};
+
+//ust// static int __init ltt_relay_init(void)
+//ust// {
+//ust// printk(KERN_INFO "LTT : ltt-relay init\n");
+//ust//
+//ust// ltt_file_operations = ltt_relay_file_operations;
+//ust// ltt_file_operations.owner = THIS_MODULE;
+//ust// ltt_file_operations.open = ltt_open;
+//ust// ltt_file_operations.release = ltt_release;
+//ust// ltt_file_operations.poll = ltt_poll;
+//ust// ltt_file_operations.splice_read = ltt_relay_file_splice_read,
+//ust// ltt_file_operations.ioctl = ltt_ioctl;
+//ust//#ifdef CONFIG_COMPAT
+//ust// ltt_file_operations.compat_ioctl = ltt_compat_ioctl;
+//ust//#endif
+//ust//
+//ust// ltt_transport_register(<t_relay_transport);
+//ust//
+//ust// return 0;
+//ust// }
+
+static char initialized = 0;
+
+void __attribute__((constructor)) init_ustrelay_transport(void)
+{
+ if(!initialized) {
+ ltt_transport_register(&ust_relay_transport);
+ initialized = 1;
+ }
+}
+
+static void __exit ltt_relay_exit(void)
+{
+//ust// printk(KERN_INFO "LTT : ltt-relay exit\n");
+
+ ltt_transport_unregister(&ust_relay_transport);
+}
+
+//ust// module_init(ltt_relay_init);
+//ust// module_exit(ltt_relay_exit);
+//ust//
+//ust// MODULE_LICENSE("GPL");
+//ust// MODULE_AUTHOR("Mathieu Desnoyers");
+//ust// MODULE_DESCRIPTION("Linux Trace Toolkit Next Generation Lockless Relay");
--- /dev/null
+/*
+ * linux/include/linux/ltt-relay.h
+ *
+ * Copyright (C) 2002, 2003 - Tom Zanussi (zanussi@us.ibm.com), IBM Corp
+ * Copyright (C) 1999, 2000, 2001, 2002 - Karim Yaghmour (karim@opersys.com)
+ * Copyright (C) 2008 - Mathieu Desnoyers (mathieu.desnoyers@polymtl.ca)
+ *
+ * CONFIG_RELAY definitions and declarations
+ */
+
+#ifndef _LINUX_LTT_RELAY_H
+#define _LINUX_LTT_RELAY_H
+
+//ust// #include <linux/types.h>
+//ust// #include <linux/sched.h>
+//ust// #include <linux/timer.h>
+//ust// #include <linux/wait.h>
+//ust// #include <linux/list.h>
+//ust// #include <linux/fs.h>
+//ust// #include <linux/poll.h>
+//ust// #include <linux/kref.h>
+//ust// #include <linux/mm.h>
+//ust// #include <linux/ltt-core.h>
+#include "kref.h"
+#include "list.h"
+
+/* Needs a _much_ better name... */
+#define FIX_SIZE(x) ((((x) - 1) & PAGE_MASK) + PAGE_SIZE)
+
+/*
+ * Tracks changes to rchan/rchan_buf structs
+ */
+#define LTT_RELAY_CHANNEL_VERSION 8
+
+struct rchan_buf;
+
+struct buf_page {
+ struct page *page;
+ struct rchan_buf *buf; /* buffer the page belongs to */
+ size_t offset; /* page offset in the buffer */
+ struct list_head list; /* buffer linked list */
+};
+
+/*
+ * Per-cpu relay channel buffer
+ */
+struct rchan_buf {
+ struct rchan *chan; /* associated channel */
+//ust// wait_queue_head_t read_wait; /* reader wait queue */
+//ust// struct timer_list timer; /* reader wake-up timer */
+//ust// struct dentry *dentry; /* channel file dentry */
+ struct kref kref; /* channel buffer refcount */
+//ust// struct list_head pages; /* list of buffer pages */
+ void *buf_data; //ust//
+ size_t buf_size;
+//ust// struct buf_page *wpage; /* current write page (cache) */
+//ust// struct buf_page *hpage[2]; /* current subbuf header page (cache) */
+//ust// struct buf_page *rpage; /* current subbuf read page (cache) */
+//ust// unsigned int page_count; /* number of current buffer pages */
+ unsigned int finalized; /* buffer has been finalized */
+//ust// unsigned int cpu; /* this buf's cpu */
+ int shmid; /* the shmid of the buffer data pages */
+} ____cacheline_aligned;
+
+/*
+ * Relay channel data structure
+ */
+struct rchan {
+ u32 version; /* the version of this struct */
+ size_t subbuf_size; /* sub-buffer size */
+ size_t n_subbufs; /* number of sub-buffers per buffer */
+ size_t alloc_size; /* total buffer size allocated */
+ struct rchan_callbacks *cb; /* client callbacks */
+ struct kref kref; /* channel refcount */
+ void *private_data; /* for user-defined data */
+//ust// struct rchan_buf *buf[NR_CPUS]; /* per-cpu channel buffers */
+ struct rchan_buf *buf;
+ struct list_head list; /* for channel list */
+ struct dentry *parent; /* parent dentry passed to open */
+ int subbuf_size_order; /* order of sub-buffer size */
+//ust// char base_filename[NAME_MAX]; /* saved base filename */
+};
+
+/*
+ * Relay channel client callbacks
+ */
+struct rchan_callbacks {
+ /*
+ * subbuf_start - called on buffer-switch to a new sub-buffer
+ * @buf: the channel buffer containing the new sub-buffer
+ * @subbuf: the start of the new sub-buffer
+ * @prev_subbuf: the start of the previous sub-buffer
+ * @prev_padding: unused space at the end of previous sub-buffer
+ *
+ * The client should return 1 to continue logging, 0 to stop
+ * logging.
+ *
+ * NOTE: subbuf_start will also be invoked when the buffer is
+ * created, so that the first sub-buffer can be initialized
+ * if necessary. In this case, prev_subbuf will be NULL.
+ *
+ * NOTE: the client can reserve bytes at the beginning of the new
+ * sub-buffer by calling subbuf_start_reserve() in this callback.
+ */
+ int (*subbuf_start) (struct rchan_buf *buf,
+ void *subbuf,
+ void *prev_subbuf,
+ size_t prev_padding);
+
+ /*
+ * create_buf_file - create file to represent a relay channel buffer
+ * @filename: the name of the file to create
+ * @parent: the parent of the file to create
+ * @mode: the mode of the file to create
+ * @buf: the channel buffer
+ *
+ * Called during relay_open(), once for each per-cpu buffer,
+ * to allow the client to create a file to be used to
+ * represent the corresponding channel buffer. If the file is
+ * created outside of relay, the parent must also exist in
+ * that filesystem.
+ *
+ * The callback should return the dentry of the file created
+ * to represent the relay buffer.
+ *
+ * Setting the is_global outparam to a non-zero value will
+ * cause relay_open() to create a single global buffer rather
+ * than the default set of per-cpu buffers.
+ *
+ * See Documentation/filesystems/relayfs.txt for more info.
+ */
+ struct dentry *(*create_buf_file)(const char *filename,
+ struct dentry *parent,
+ int mode,
+ struct rchan_buf *buf);
+
+ /*
+ * remove_buf_file - remove file representing a relay channel buffer
+ * @dentry: the dentry of the file to remove
+ *
+ * Called during relay_close(), once for each per-cpu buffer,
+ * to allow the client to remove a file used to represent a
+ * channel buffer.
+ *
+ * The callback should return 0 if successful, negative if not.
+ */
+//ust// int (*remove_buf_file)(struct rchan_buf *buf);
+};
+
+extern struct buf_page *ltt_relay_find_prev_page(struct rchan_buf *buf,
+ struct buf_page *page, size_t offset, ssize_t diff_offset);
+
+extern struct buf_page *ltt_relay_find_next_page(struct rchan_buf *buf,
+ struct buf_page *page, size_t offset, ssize_t diff_offset);
+
+extern void _ltt_relay_write(struct rchan_buf *buf, size_t offset,
+ const void *src, size_t len, ssize_t cpy);
+
+extern int ltt_relay_read(struct rchan_buf *buf, size_t offset,
+ void *dest, size_t len);
+
+extern struct buf_page *ltt_relay_read_get_page(struct rchan_buf *buf,
+ size_t offset);
+
+/*
+ * Return the address where a given offset is located.
+ * Should be used to get the current subbuffer header pointer. Given we know
+ * it's never on a page boundary, it's safe to write directly to this address,
+ * as long as the write is never bigger than a page size.
+ */
+extern void *ltt_relay_offset_address(struct rchan_buf *buf,
+ size_t offset);
+
+/*
+ * Find the page containing "offset". Cache it if it is after the currently
+ * cached page.
+ */
+static inline struct buf_page *ltt_relay_cache_page(struct rchan_buf *buf,
+ struct buf_page **page_cache,
+ struct buf_page *page, size_t offset)
+{
+ ssize_t diff_offset;
+ ssize_t half_buf_size = buf->chan->alloc_size >> 1;
+
+ /*
+ * Make sure this is the page we want to write into. The current
+ * page is changed concurrently by other writers. [wrh]page are
+ * used as a cache remembering the last page written
+ * to/read/looked up for header address. No synchronization;
+ * could have to find the previous page is a nested write
+ * occured. Finding the right page is done by comparing the
+ * dest_offset with the buf_page offsets.
+ * When at the exact opposite of the buffer, bias towards forward search
+ * because it will be cached.
+ */
+
+ diff_offset = (ssize_t)offset - (ssize_t)page->offset;
+ if (diff_offset <= -(ssize_t)half_buf_size)
+ diff_offset += buf->chan->alloc_size;
+ else if (diff_offset > half_buf_size)
+ diff_offset -= buf->chan->alloc_size;
+
+ if (unlikely(diff_offset >= (ssize_t)PAGE_SIZE)) {
+ page = ltt_relay_find_next_page(buf, page, offset, diff_offset);
+ *page_cache = page;
+ } else if (unlikely(diff_offset < 0)) {
+ page = ltt_relay_find_prev_page(buf, page, offset, diff_offset);
+ }
+ return page;
+}
+
+//ust// #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+ static inline void ltt_relay_do_copy(void *dest, const void *src, size_t len)
+{
+ switch (len) {
+ case 0: break;
+ case 1: *(u8 *)dest = *(const u8 *)src;
+ break;
+ case 2: *(u16 *)dest = *(const u16 *)src;
+ break;
+ case 4: *(u32 *)dest = *(const u32 *)src;
+ break;
+//ust// #if (BITS_PER_LONG == 64)
+ case 8: *(u64 *)dest = *(const u64 *)src;
+ break;
+//ust// #endif
+ default:
+ memcpy(dest, src, len);
+ }
+}
+//ust// #else
+//ust// /*
+//ust// * Returns whether the dest and src addresses are aligned on
+//ust// * min(sizeof(void *), len). Call this with statically known len for efficiency.
+//ust// */
+//ust// static inline int addr_aligned(const void *dest, const void *src, size_t len)
+//ust// {
+//ust// if (ltt_align((size_t)dest, len))
+//ust// return 0;
+//ust// if (ltt_align((size_t)src, len))
+//ust// return 0;
+//ust// return 1;
+//ust// }
+//ust//
+//ust// static inline void ltt_relay_do_copy(void *dest, const void *src, size_t len)
+//ust// {
+//ust// switch (len) {
+//ust// case 0: break;
+//ust// case 1: *(u8 *)dest = *(const u8 *)src;
+//ust// break;
+//ust// case 2: if (unlikely(!addr_aligned(dest, src, 2)))
+//ust// goto memcpy_fallback;
+//ust// *(u16 *)dest = *(const u16 *)src;
+//ust// break;
+//ust// case 4: if (unlikely(!addr_aligned(dest, src, 4)))
+//ust// goto memcpy_fallback;
+//ust// *(u32 *)dest = *(const u32 *)src;
+//ust// break;
+//ust// #if (BITS_PER_LONG == 64)
+//ust// case 8: if (unlikely(!addr_aligned(dest, src, 8)))
+//ust// goto memcpy_fallback;
+//ust// *(u64 *)dest = *(const u64 *)src;
+//ust// break;
+//ust// #endif
+//ust// default:
+//ust// goto memcpy_fallback;
+//ust// }
+//ust// return;
+//ust// memcpy_fallback:
+//ust// memcpy(dest, src, len);
+//ust// }
+//ust// #endif
+
+static inline int ltt_relay_write(struct rchan_buf *buf, size_t offset,
+ const void *src, size_t len)
+{
+//ust// struct buf_page *page;
+//ust// ssize_t pagecpy;
+//ust//
+//ust// offset &= buf->chan->alloc_size - 1;
+//ust// page = buf->wpage;
+//ust//
+//ust// page = ltt_relay_cache_page(buf, &buf->wpage, page, offset);
+//ust// pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK));
+//ust// ltt_relay_do_copy(page_address(page->page)
+//ust// + (offset & ~PAGE_MASK), src, pagecpy);
+//ust//
+//ust// if (unlikely(len != pagecpy))
+//ust// _ltt_relay_write(buf, offset, src, len, page, pagecpy);
+//ust// return len;
+
+
+ size_t cpy;
+ cpy = min_t(size_t, len, buf->buf_size - offset);
+ ltt_relay_do_copy(buf->buf_data + offset, src, cpy);
+
+ if (unlikely(len != cpy))
+ _ltt_relay_write(buf, offset, src, len, cpy);
+ return len;
+}
+
+/*
+ * CONFIG_LTT_RELAY kernel API, ltt/ltt-relay-alloc.c
+ */
+
+struct rchan *ltt_relay_open(const char *base_filename,
+ struct dentry *parent,
+ size_t subbuf_size,
+ size_t n_subbufs,
+ void *private_data);
+extern void ltt_relay_close(struct rchan *chan);
+
+/*
+ * exported ltt_relay file operations, ltt/ltt-relay-alloc.c
+ */
+extern const struct file_operations ltt_relay_file_operations;
+
+
+/* LTTng lockless logging buffer info */
+struct ltt_channel_buf_struct {
+ /* First 32 bytes cache-hot cacheline */
+ local_t offset; /* Current offset in the buffer */
+//ust// local_t *commit_count; /* Commit count per sub-buffer */
+ atomic_long_t consumed; /*
+ * Current offset in the buffer
+ * standard atomic access (shared)
+ */
+ unsigned long last_tsc; /*
+ * Last timestamp written in the buffer.
+ */
+ /* End of first 32 bytes cacheline */
+ atomic_long_t active_readers; /*
+ * Active readers count
+ * standard atomic access (shared)
+ */
+ local_t events_lost;
+ local_t corrupted_subbuffers;
+//ust// spinlock_t full_lock; /*
+//ust// * buffer full condition spinlock, only
+//ust// * for userspace tracing blocking mode
+//ust// * synchronization with reader.
+//ust// */
+//ust// wait_queue_head_t write_wait; /*
+//ust// * Wait queue for blocking user space
+//ust// * writers
+//ust// */
+//ust// atomic_t wakeup_readers; /* Boolean : wakeup readers waiting ? */
+ /* one byte is written to this pipe when data is available, in order
+ to wake the consumer */
+ /* portability: Single byte writes must be as quick as possible. The kernel-side
+ buffer must be large enough so the writer doesn't block. From the pipe(7)
+ man page: Since linux 2.6.11, the pipe capacity is 65536 bytes. */
+ int data_ready_fd_write;
+ /* the reading end of the pipe */
+ int data_ready_fd_read;
+
+ /* commit count per subbuffer; must be at end of struct */
+ local_t commit_count[0] ____cacheline_aligned;
+} ____cacheline_aligned;
+
+int ltt_do_get_subbuf(struct rchan_buf *buf, struct ltt_channel_buf_struct *ltt_buf, long *pconsumed_old);
+
+int ltt_do_put_subbuf(struct rchan_buf *buf, struct ltt_channel_buf_struct *ltt_buf, u32 uconsumed_old);
+
+
+#endif /* _LINUX_LTT_RELAY_H */
+
--- /dev/null
+/*
+ * LTTng serializing code.
+ *
+ * Copyright Mathieu Desnoyers, March 2007.
+ *
+ * Licensed under the GPLv2.
+ *
+ * See this discussion about weirdness about passing va_list and then va_list to
+ * functions. (related to array argument passing). va_list seems to be
+ * implemented as an array on x86_64, but not on i386... This is why we pass a
+ * va_list * to ltt_vtrace.
+ */
+
+#include <stdarg.h>
+//ust// #include <linux/ctype.h>
+//ust// #include <linux/string.h>
+//ust// #include <linux/module.h>
+//ust// #include <linux/ltt-tracer.h>
+#include <string.h>
+#include <stdint.h>
+#include "kernelcompat.h"
+#include "relay.h"
+#include "tracer.h"
+#include "list.h"
+#include "usterr.h"
+
+enum ltt_type {
+ LTT_TYPE_SIGNED_INT,
+ LTT_TYPE_UNSIGNED_INT,
+ LTT_TYPE_STRING,
+ LTT_TYPE_NONE,
+};
+
+#define LTT_ATTRIBUTE_NETWORK_BYTE_ORDER (1<<1)
+
+/*
+ * Inspired from vsnprintf
+ *
+ * The serialization format string supports the basic printf format strings.
+ * In addition, it defines new formats that can be used to serialize more
+ * complex/non portable data structures.
+ *
+ * Typical use:
+ *
+ * field_name %ctype
+ * field_name #tracetype %ctype
+ * field_name #tracetype %ctype1 %ctype2 ...
+ *
+ * A conversion is performed between format string types supported by GCC and
+ * the trace type requested. GCC type is used to perform type checking on format
+ * strings. Trace type is used to specify the exact binary representation
+ * in the trace. A mapping is done between one or more GCC types to one trace
+ * type. Sign extension, if required by the conversion, is performed following
+ * the trace type.
+ *
+ * If a gcc format is not declared with a trace format, the gcc format is
+ * also used as binary representation in the trace.
+ *
+ * Strings are supported with %s.
+ * A single tracetype (sequence) can take multiple c types as parameter.
+ *
+ * c types:
+ *
+ * see printf(3).
+ *
+ * Note: to write a uint32_t in a trace, the following expression is recommended
+ * si it can be portable:
+ *
+ * ("#4u%lu", (unsigned long)var)
+ *
+ * trace types:
+ *
+ * Serialization specific formats :
+ *
+ * Fixed size integers
+ * #1u writes uint8_t
+ * #2u writes uint16_t
+ * #4u writes uint32_t
+ * #8u writes uint64_t
+ * #1d writes int8_t
+ * #2d writes int16_t
+ * #4d writes int32_t
+ * #8d writes int64_t
+ * i.e.:
+ * #1u%lu #2u%lu #4d%lu #8d%lu #llu%hu #d%lu
+ *
+ * * Attributes:
+ *
+ * n: (for network byte order)
+ * #ntracetype%ctype
+ * is written in the trace in network byte order.
+ *
+ * i.e.: #bn4u%lu, #n%lu, #b%u
+ *
+ * TODO (eventually)
+ * Variable length sequence
+ * #a #tracetype1 #tracetype2 %array_ptr %elem_size %num_elems
+ * In the trace:
+ * #a specifies that this is a sequence
+ * #tracetype1 is the type of elements in the sequence
+ * #tracetype2 is the type of the element count
+ * GCC input:
+ * array_ptr is a pointer to an array that contains members of size
+ * elem_size.
+ * num_elems is the number of elements in the array.
+ * i.e.: #a #lu #lu %p %lu %u
+ *
+ * Callback
+ * #k callback (taken from the probe data)
+ * The following % arguments are exepected by the callback
+ *
+ * i.e.: #a #lu #lu #k %p
+ *
+ * Note: No conversion is done from floats to integers, nor from integers to
+ * floats between c types and trace types. float conversion from double to float
+ * or from float to double is also not supported.
+ *
+ * REMOVE
+ * %*b expects sizeof(data), data
+ * where sizeof(data) is 1, 2, 4 or 8
+ *
+ * Fixed length struct, union or array.
+ * FIXME: unable to extract those sizes statically.
+ * %*r expects sizeof(*ptr), ptr
+ * %*.*r expects sizeof(*ptr), __alignof__(*ptr), ptr
+ * struct and unions removed.
+ * Fixed length array:
+ * [%p]#a[len #tracetype]
+ * i.e.: [%p]#a[12 #lu]
+ *
+ * Variable length sequence
+ * %*.*:*v expects sizeof(*ptr), __alignof__(*ptr), elem_num, ptr
+ * where elem_num is the number of elements in the sequence
+ */
+static inline const char *parse_trace_type(const char *fmt,
+ char *trace_size, enum ltt_type *trace_type,
+ unsigned long *attributes)
+{
+ int qualifier; /* 'h', 'l', or 'L' for integer fields */
+ /* 'z' support added 23/7/1999 S.H. */
+ /* 'z' changed to 'Z' --davidm 1/25/99 */
+ /* 't' added for ptrdiff_t */
+
+ /* parse attributes. */
+repeat:
+ switch (*fmt) {
+ case 'n':
+ *attributes |= LTT_ATTRIBUTE_NETWORK_BYTE_ORDER;
+ ++fmt;
+ goto repeat;
+ }
+
+ /* get the conversion qualifier */
+ qualifier = -1;
+ if (*fmt == 'h' || *fmt == 'l' || *fmt == 'L' ||
+ *fmt == 'Z' || *fmt == 'z' || *fmt == 't' ||
+ *fmt == 'S' || *fmt == '1' || *fmt == '2' ||
+ *fmt == '4' || *fmt == 8) {
+ qualifier = *fmt;
+ ++fmt;
+ if (qualifier == 'l' && *fmt == 'l') {
+ qualifier = 'L';
+ ++fmt;
+ }
+ }
+
+ switch (*fmt) {
+ case 'c':
+ *trace_type = LTT_TYPE_UNSIGNED_INT;
+ *trace_size = sizeof(unsigned char);
+ goto parse_end;
+ case 's':
+ *trace_type = LTT_TYPE_STRING;
+ goto parse_end;
+ case 'p':
+ *trace_type = LTT_TYPE_UNSIGNED_INT;
+ *trace_size = sizeof(void *);
+ goto parse_end;
+ case 'd':
+ case 'i':
+ *trace_type = LTT_TYPE_SIGNED_INT;
+ break;
+ case 'o':
+ case 'u':
+ case 'x':
+ case 'X':
+ *trace_type = LTT_TYPE_UNSIGNED_INT;
+ break;
+ default:
+ if (!*fmt)
+ --fmt;
+ goto parse_end;
+ }
+ switch (qualifier) {
+ case 'L':
+ *trace_size = sizeof(long long);
+ break;
+ case 'l':
+ *trace_size = sizeof(long);
+ break;
+ case 'Z':
+ case 'z':
+ *trace_size = sizeof(size_t);
+ break;
+//ust// case 't':
+//ust// *trace_size = sizeof(ptrdiff_t);
+//ust// break;
+ case 'h':
+ *trace_size = sizeof(short);
+ break;
+ case '1':
+ *trace_size = sizeof(uint8_t);
+ break;
+ case '2':
+ *trace_size = sizeof(uint16_t);
+ break;
+ case '4':
+ *trace_size = sizeof(uint32_t);
+ break;
+ case '8':
+ *trace_size = sizeof(uint64_t);
+ break;
+ default:
+ *trace_size = sizeof(int);
+ }
+
+parse_end:
+ return fmt;
+}
+
+/*
+ * Restrictions:
+ * Field width and precision are *not* supported.
+ * %n not supported.
+ */
+static inline const char *parse_c_type(const char *fmt,
+ char *c_size, enum ltt_type *c_type)
+{
+ int qualifier; /* 'h', 'l', or 'L' for integer fields */
+ /* 'z' support added 23/7/1999 S.H. */
+ /* 'z' changed to 'Z' --davidm 1/25/99 */
+ /* 't' added for ptrdiff_t */
+
+ /* process flags : ignore standard print formats for now. */
+repeat:
+ switch (*fmt) {
+ case '-':
+ case '+':
+ case ' ':
+ case '#':
+ case '0':
+ ++fmt;
+ goto repeat;
+ }
+
+ /* get the conversion qualifier */
+ qualifier = -1;
+ if (*fmt == 'h' || *fmt == 'l' || *fmt == 'L' ||
+ *fmt == 'Z' || *fmt == 'z' || *fmt == 't' ||
+ *fmt == 'S') {
+ qualifier = *fmt;
+ ++fmt;
+ if (qualifier == 'l' && *fmt == 'l') {
+ qualifier = 'L';
+ ++fmt;
+ }
+ }
+
+ switch (*fmt) {
+ case 'c':
+ *c_type = LTT_TYPE_UNSIGNED_INT;
+ *c_size = sizeof(unsigned char);
+ goto parse_end;
+ case 's':
+ *c_type = LTT_TYPE_STRING;
+ goto parse_end;
+ case 'p':
+ *c_type = LTT_TYPE_UNSIGNED_INT;
+ *c_size = sizeof(void *);
+ goto parse_end;
+ case 'd':
+ case 'i':
+ *c_type = LTT_TYPE_SIGNED_INT;
+ break;
+ case 'o':
+ case 'u':
+ case 'x':
+ case 'X':
+ *c_type = LTT_TYPE_UNSIGNED_INT;
+ break;
+ default:
+ if (!*fmt)
+ --fmt;
+ goto parse_end;
+ }
+ switch (qualifier) {
+ case 'L':
+ *c_size = sizeof(long long);
+ break;
+ case 'l':
+ *c_size = sizeof(long);
+ break;
+ case 'Z':
+ case 'z':
+ *c_size = sizeof(size_t);
+ break;
+//ust// case 't':
+//ust// *c_size = sizeof(ptrdiff_t);
+//ust// break;
+ case 'h':
+ *c_size = sizeof(short);
+ break;
+ default:
+ *c_size = sizeof(int);
+ }
+
+parse_end:
+ return fmt;
+}
+
+static inline size_t serialize_trace_data(struct rchan_buf *buf,
+ size_t buf_offset,
+ char trace_size, enum ltt_type trace_type,
+ char c_size, enum ltt_type c_type,
+ int *largest_align, va_list *args)
+{
+ union {
+ unsigned long v_ulong;
+ uint64_t v_uint64;
+ struct {
+ const char *s;
+ size_t len;
+ } v_string;
+ } tmp;
+
+ /*
+ * Be careful about sign extension here.
+ * Sign extension is done with the destination (trace) type.
+ */
+ switch (trace_type) {
+ case LTT_TYPE_SIGNED_INT:
+ switch (c_size) {
+ case 1:
+ tmp.v_ulong = (long)(int8_t)va_arg(*args, int);
+ break;
+ case 2:
+ tmp.v_ulong = (long)(int16_t)va_arg(*args, int);
+ break;
+ case 4:
+ tmp.v_ulong = (long)(int32_t)va_arg(*args, int);
+ break;
+ case 8:
+ tmp.v_uint64 = va_arg(*args, int64_t);
+ break;
+ default:
+ BUG();
+ }
+ break;
+ case LTT_TYPE_UNSIGNED_INT:
+ switch (c_size) {
+ case 1:
+ tmp.v_ulong = (unsigned long)(uint8_t)
+ va_arg(*args, unsigned int);
+ break;
+ case 2:
+ tmp.v_ulong = (unsigned long)(uint16_t)
+ va_arg(*args, unsigned int);
+ break;
+ case 4:
+ tmp.v_ulong = (unsigned long)(uint32_t)
+ va_arg(*args, unsigned int);
+ break;
+ case 8:
+ tmp.v_uint64 = va_arg(*args, uint64_t);
+ break;
+ default:
+ BUG();
+ }
+ break;
+ case LTT_TYPE_STRING:
+ tmp.v_string.s = va_arg(*args, const char *);
+ if ((unsigned long)tmp.v_string.s < PAGE_SIZE)
+ tmp.v_string.s = "<NULL>";
+ tmp.v_string.len = strlen(tmp.v_string.s)+1;
+ if (buf)
+ ltt_relay_write(buf, buf_offset, tmp.v_string.s,
+ tmp.v_string.len);
+ buf_offset += tmp.v_string.len;
+ goto copydone;
+ default:
+ BUG();
+ }
+
+ /*
+ * If trace_size is lower or equal to 4 bytes, there is no sign
+ * extension to do because we are already encoded in a long. Therefore,
+ * we can combine signed and unsigned ops. 4 bytes float also works
+ * with this, because we do a simple copy of 4 bytes into 4 bytes
+ * without manipulation (and we do not support conversion from integers
+ * to floats).
+ * It is also the case if c_size is 8 bytes, which is the largest
+ * possible integer.
+ */
+ if (ltt_get_alignment()) {
+ buf_offset += ltt_align(buf_offset, trace_size);
+ if (largest_align)
+ *largest_align = max_t(int, *largest_align, trace_size);
+ }
+ if (trace_size <= 4 || c_size == 8) {
+ if (buf) {
+ switch (trace_size) {
+ case 1:
+ if (c_size == 8)
+ ltt_relay_write(buf, buf_offset,
+ (uint8_t[]){ (uint8_t)tmp.v_uint64 },
+ sizeof(uint8_t));
+ else
+ ltt_relay_write(buf, buf_offset,
+ (uint8_t[]){ (uint8_t)tmp.v_ulong },
+ sizeof(uint8_t));
+ break;
+ case 2:
+ if (c_size == 8)
+ ltt_relay_write(buf, buf_offset,
+ (uint16_t[]){ (uint16_t)tmp.v_uint64 },
+ sizeof(uint16_t));
+ else
+ ltt_relay_write(buf, buf_offset,
+ (uint16_t[]){ (uint16_t)tmp.v_ulong },
+ sizeof(uint16_t));
+ break;
+ case 4:
+ if (c_size == 8)
+ ltt_relay_write(buf, buf_offset,
+ (uint32_t[]){ (uint32_t)tmp.v_uint64 },
+ sizeof(uint32_t));
+ else
+ ltt_relay_write(buf, buf_offset,
+ (uint32_t[]){ (uint32_t)tmp.v_ulong },
+ sizeof(uint32_t));
+ break;
+ case 8:
+ /*
+ * c_size cannot be other than 8 here because
+ * trace_size > 4.
+ */
+ ltt_relay_write(buf, buf_offset,
+ (uint64_t[]){ (uint64_t)tmp.v_uint64 },
+ sizeof(uint64_t));
+ break;
+ default:
+ BUG();
+ }
+ }
+ buf_offset += trace_size;
+ goto copydone;
+ } else {
+ /*
+ * Perform sign extension.
+ */
+ if (buf) {
+ switch (trace_type) {
+ case LTT_TYPE_SIGNED_INT:
+ ltt_relay_write(buf, buf_offset,
+ (int64_t[]){ (int64_t)tmp.v_ulong },
+ sizeof(int64_t));
+ break;
+ case LTT_TYPE_UNSIGNED_INT:
+ ltt_relay_write(buf, buf_offset,
+ (uint64_t[]){ (uint64_t)tmp.v_ulong },
+ sizeof(uint64_t));
+ break;
+ default:
+ BUG();
+ }
+ }
+ buf_offset += trace_size;
+ goto copydone;
+ }
+
+copydone:
+ return buf_offset;
+}
+
+notrace size_t ltt_serialize_data(struct rchan_buf *buf, size_t buf_offset,
+ struct ltt_serialize_closure *closure,
+ void *serialize_private, int *largest_align,
+ const char *fmt, va_list *args)
+{
+ char trace_size = 0, c_size = 0; /*
+ * 0 (unset), 1, 2, 4, 8 bytes.
+ */
+ enum ltt_type trace_type = LTT_TYPE_NONE, c_type = LTT_TYPE_NONE;
+ unsigned long attributes = 0;
+
+ for (; *fmt ; ++fmt) {
+ switch (*fmt) {
+ case '#':
+ /* tracetypes (#) */
+ ++fmt; /* skip first '#' */
+ if (*fmt == '#') /* Escaped ## */
+ break;
+ attributes = 0;
+ fmt = parse_trace_type(fmt, &trace_size, &trace_type,
+ &attributes);
+ break;
+ case '%':
+ /* c types (%) */
+ ++fmt; /* skip first '%' */
+ if (*fmt == '%') /* Escaped %% */
+ break;
+ fmt = parse_c_type(fmt, &c_size, &c_type);
+ /*
+ * Output c types if no trace types has been
+ * specified.
+ */
+ if (!trace_size)
+ trace_size = c_size;
+ if (trace_type == LTT_TYPE_NONE)
+ trace_type = c_type;
+ if (c_type == LTT_TYPE_STRING)
+ trace_type = LTT_TYPE_STRING;
+ /* perform trace write */
+ buf_offset = serialize_trace_data(buf,
+ buf_offset, trace_size,
+ trace_type, c_size, c_type,
+ largest_align, args);
+ trace_size = 0;
+ c_size = 0;
+ trace_type = LTT_TYPE_NONE;
+ c_size = LTT_TYPE_NONE;
+ attributes = 0;
+ break;
+ /* default is to skip the text, doing nothing */
+ }
+ }
+ return buf_offset;
+}
+EXPORT_SYMBOL_GPL(ltt_serialize_data);
+
+/*
+ * Calculate data size
+ * Assume that the padding for alignment starts at a sizeof(void *) address.
+ */
+static notrace size_t ltt_get_data_size(struct ltt_serialize_closure *closure,
+ void *serialize_private, int *largest_align,
+ const char *fmt, va_list *args)
+{
+ ltt_serialize_cb cb = closure->callbacks[0];
+ closure->cb_idx = 0;
+ return (size_t)cb(NULL, 0, closure, serialize_private,
+ largest_align, fmt, args);
+}
+
+static notrace
+void ltt_write_event_data(struct rchan_buf *buf, size_t buf_offset,
+ struct ltt_serialize_closure *closure,
+ void *serialize_private, int largest_align,
+ const char *fmt, va_list *args)
+{
+ ltt_serialize_cb cb = closure->callbacks[0];
+ closure->cb_idx = 0;
+ buf_offset += ltt_align(buf_offset, largest_align);
+ cb(buf, buf_offset, closure, serialize_private, NULL, fmt, args);
+}
+
+
+notrace void ltt_vtrace(const struct marker *mdata, void *probe_data,
+ void *call_data, const char *fmt, va_list *args)
+{
+ int largest_align, ret;
+ struct ltt_active_marker *pdata;
+ uint16_t eID;
+ size_t data_size, slot_size;
+ unsigned int chan_index;
+ struct ltt_channel_struct *channel;
+ struct ltt_trace_struct *trace, *dest_trace = NULL;
+ struct rchan_buf *buf;
+ void *transport_data;
+ u64 tsc;
+ long buf_offset;
+ va_list args_copy;
+ struct ltt_serialize_closure closure;
+ struct ltt_probe_private_data *private_data = call_data;
+ void *serialize_private = NULL;
+ int cpu;
+ unsigned int rflags;
+
+ /*
+ * This test is useful for quickly exiting static tracing when no trace
+ * is active. We expect to have an active trace when we get here.
+ */
+ if (unlikely(ltt_traces.num_active_traces == 0))
+ return;
+
+ rcu_read_lock_sched_notrace();
+ cpu = smp_processor_id();
+//ust// __get_cpu_var(ltt_nesting)++;
+ ltt_nesting++;
+
+ pdata = (struct ltt_active_marker *)probe_data;
+ eID = mdata->event_id;
+ chan_index = mdata->channel_id;
+ closure.callbacks = pdata->probe->callbacks;
+
+ if (unlikely(private_data)) {
+ dest_trace = private_data->trace;
+ if (private_data->serializer)
+ closure.callbacks = &private_data->serializer;
+ serialize_private = private_data->serialize_private;
+ }
+
+ va_copy(args_copy, *args);
+ /*
+ * Assumes event payload to start on largest_align alignment.
+ */
+ largest_align = 1; /* must be non-zero for ltt_align */
+ data_size = ltt_get_data_size(&closure, serialize_private,
+ &largest_align, fmt, &args_copy);
+ largest_align = min_t(int, largest_align, sizeof(void *));
+ va_end(args_copy);
+
+ /* Iterate on each trace */
+ list_for_each_entry_rcu(trace, <t_traces.head, list) {
+ /*
+ * Expect the filter to filter out events. If we get here,
+ * we went through tracepoint activation as a first step.
+ */
+ if (unlikely(dest_trace && trace != dest_trace))
+ continue;
+ if (unlikely(!trace->active))
+ continue;
+ if (unlikely(!ltt_run_filter(trace, eID)))
+ continue;
+#ifdef CONFIG_LTT_DEBUG_EVENT_SIZE
+ rflags = LTT_RFLAG_ID_SIZE;
+#else
+ if (unlikely(eID >= LTT_FREE_EVENTS))
+ rflags = LTT_RFLAG_ID;
+ else
+ rflags = 0;
+#endif
+ /*
+ * Skip channels added after trace creation.
+ */
+ if (unlikely(chan_index >= trace->nr_channels))
+ continue;
+ channel = &trace->channels[chan_index];
+ if (!channel->active)
+ continue;
+
+ /* reserve space : header and data */
+ ret = ltt_reserve_slot(trace, channel, &transport_data,
+ data_size, &slot_size, &buf_offset,
+ &tsc, &rflags,
+ largest_align);
+ if (unlikely(ret < 0))
+ continue; /* buffer full */
+
+ va_copy(args_copy, *args);
+ /* FIXME : could probably encapsulate transport better. */
+//ust// buf = ((struct rchan *)channel->trans_channel_data)->buf[cpu];
+ buf = ((struct rchan *)channel->trans_channel_data)->buf;
+ /* Out-of-order write : header and data */
+ buf_offset = ltt_write_event_header(trace,
+ channel, buf, buf_offset,
+ eID, data_size, tsc, rflags);
+ ltt_write_event_data(buf, buf_offset, &closure,
+ serialize_private,
+ largest_align, fmt, &args_copy);
+ va_end(args_copy);
+ /* Out-of-order commit */
+ ltt_commit_slot(channel, &transport_data, buf_offset,
+ slot_size);
+ printf("just commited event at offset %d and size %d\n", buf_offset, slot_size);
+ }
+//ust// __get_cpu_var(ltt_nesting)--;
+ ltt_nesting--;
+ rcu_read_unlock_sched_notrace();
+}
+EXPORT_SYMBOL_GPL(ltt_vtrace);
+
+notrace void ltt_trace(const struct marker *mdata, void *probe_data,
+ void *call_data, const char *fmt, ...)
+{
+ va_list args;
+
+ va_start(args, fmt);
+ ltt_vtrace(mdata, probe_data, call_data, fmt, &args);
+ va_end(args);
+}
+EXPORT_SYMBOL_GPL(ltt_trace);
+
+//ust// MODULE_LICENSE("GPL");
+//ust// MODULE_AUTHOR("Mathieu Desnoyers");
+//ust// MODULE_DESCRIPTION("Linux Trace Toolkit Next Generation Serializer");
--- /dev/null
+#include <stdio.h>
+#include <stdint.h>
+#include <signal.h>
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <sys/un.h>
+#include <sched.h>
+#include <fcntl.h>
+#include <poll.h>
+
+#include "marker.h"
+#include "tracer.h"
+#include "localerr.h"
+#include "ustcomm.h"
+#include "relay.h" /* FIXME: remove */
+
+//#define USE_CLONE
+
+#define USTSIGNAL SIGIO
+
+#define MAX_MSG_SIZE (100)
+#define MSG_NOTIF 1
+#define MSG_REGISTER_NOTIF 2
+
+char consumer_stack[10000];
+
+struct list_head blocked_consumers = LIST_HEAD_INIT(blocked_consumers);
+
+static struct ustcomm_app ustcomm_app;
+
+struct tracecmd { /* no padding */
+ uint32_t size;
+ uint16_t command;
+};
+
+//struct listener_arg {
+// int pipe_fd;
+//};
+
+struct trctl_msg {
+ /* size: the size of all the fields except size itself */
+ uint32_t size;
+ uint16_t type;
+ /* Only the necessary part of the payload is transferred. It
+ * may even be none of it.
+ */
+ char payload[94];
+};
+
+struct consumer_channel {
+ int fd;
+ struct ltt_channel_struct *chan;
+};
+
+struct blocked_consumer {
+ int fd_consumer;
+ int fd_producer;
+ int tmp_poll_idx;
+
+ /* args to ustcomm_send_reply */
+ struct ustcomm_server server;
+ struct ustcomm_source src;
+
+ /* args to ltt_do_get_subbuf */
+ struct rchan_buf *rbuf;
+ struct ltt_channel_buf_struct *lttbuf;
+
+ struct list_head list;
+};
+
+static void print_markers(void)
+{
+ struct marker_iter iter;
+
+ lock_markers();
+ marker_iter_reset(&iter);
+ marker_iter_start(&iter);
+
+ while(iter.marker) {
+ fprintf(stderr, "marker: %s_%s \"%s\"\n", iter.marker->channel, iter.marker->name, iter.marker->format);
+ marker_iter_next(&iter);
+ }
+ unlock_markers();
+}
+
+void do_command(struct tracecmd *cmd)
+{
+}
+
+void receive_commands()
+{
+}
+
+int fd_notif = -1;
+void notif_cb(void)
+{
+ int result;
+ struct trctl_msg msg;
+
+ /* FIXME: fd_notif should probably be protected by a spinlock */
+
+ if(fd_notif == -1)
+ return;
+
+ msg.type = MSG_NOTIF;
+ msg.size = sizeof(msg.type);
+
+ /* FIXME: don't block here */
+ result = write(fd_notif, &msg, msg.size+sizeof(msg.size));
+ if(result == -1) {
+ PERROR("write");
+ return;
+ }
+}
+
+static int inform_consumer_daemon(void)
+{
+ ustcomm_request_consumer(getpid(), "metadata");
+ ustcomm_request_consumer(getpid(), "ust");
+}
+
+void process_blocked_consumers(void)
+{
+ int n_fds = 0;
+ struct pollfd *fds;
+ struct blocked_consumer *bc;
+ int idx = 0;
+ char inbuf;
+ int result;
+
+ list_for_each_entry(bc, &blocked_consumers, list) {
+ n_fds++;
+ }
+
+ fds = (struct pollfd *) malloc(n_fds * sizeof(struct pollfd));
+ if(fds == NULL) {
+ ERR("malloc returned NULL");
+ return;
+ }
+
+ list_for_each_entry(bc, &blocked_consumers, list) {
+ fds[idx].fd = bc->fd_producer;
+ fds[idx].events = POLLIN;
+ bc->tmp_poll_idx = idx;
+ idx++;
+ }
+
+ result = poll(fds, n_fds, 0);
+ if(result == -1) {
+ PERROR("poll");
+ return -1;
+ }
+
+ list_for_each_entry(bc, &blocked_consumers, list) {
+ if(fds[bc->tmp_poll_idx].revents) {
+ long consumed_old = 0;
+ char *reply;
+
+ result = read(bc->fd_producer, &inbuf, 1);
+ if(result == -1) {
+ PERROR("read");
+ continue;
+ }
+ if(result == 0) {
+ DBG("PRODUCER END");
+
+ close(bc->fd_producer);
+
+ __list_del(bc->list.prev, bc->list.next);
+
+ result = ustcomm_send_reply(&bc->server, "END", &bc->src);
+ if(result < 0) {
+ ERR("ustcomm_send_reply failed");
+ continue;
+ }
+
+ continue;
+ }
+
+ result = ltt_do_get_subbuf(bc->rbuf, bc->lttbuf, &consumed_old);
+ if(result == -EAGAIN) {
+ WARN("missed buffer?");
+ continue;
+ }
+ else if(result < 0) {
+ DBG("ltt_do_get_subbuf: error: %s", strerror(-result));
+ }
+ asprintf(&reply, "%s %ld", "OK", consumed_old);
+ result = ustcomm_send_reply(&bc->server, reply, &bc->src);
+ if(result < 0) {
+ ERR("ustcomm_send_reply failed");
+ free(reply);
+ continue;
+ }
+ free(reply);
+
+ __list_del(bc->list.prev, bc->list.next);
+ }
+ }
+
+}
+
+int listener_main(void *p)
+{
+ int result;
+
+ DBG("LISTENER");
+
+ for(;;) {
+ uint32_t size;
+ struct sockaddr_un addr;
+ socklen_t addrlen = sizeof(addr);
+ char trace_name[] = "auto";
+ char trace_type[] = "ustrelay";
+ char *recvbuf;
+ int len;
+ struct ustcomm_source src;
+
+ process_blocked_consumers();
+
+ result = ustcomm_app_recv_message(&ustcomm_app, &recvbuf, &src, 5);
+ if(result < 0) {
+ WARN("error in ustcomm_app_recv_message");
+ continue;
+ }
+ else if(result == 0) {
+ /* no message */
+ continue;
+ }
+
+ DBG("received a message! it's: %s\n", recvbuf);
+ len = strlen(recvbuf);
+
+ if(!strcmp(recvbuf, "print_markers")) {
+ print_markers();
+ }
+ else if(!strcmp(recvbuf, "trace_setup")) {
+ DBG("trace setup");
+
+ result = ltt_trace_setup(trace_name);
+ if(result < 0) {
+ ERR("ltt_trace_setup failed");
+ return;
+ }
+
+ result = ltt_trace_set_type(trace_name, trace_type);
+ if(result < 0) {
+ ERR("ltt_trace_set_type failed");
+ return;
+ }
+ }
+ else if(!strcmp(recvbuf, "trace_alloc")) {
+ DBG("trace alloc");
+
+ result = ltt_trace_alloc(trace_name);
+ if(result < 0) {
+ ERR("ltt_trace_alloc failed");
+ return;
+ }
+ }
+ else if(!strcmp(recvbuf, "trace_start")) {
+ DBG("trace start");
+
+ result = ltt_trace_start(trace_name);
+ if(result < 0) {
+ ERR("ltt_trace_start failed");
+ continue;
+ }
+ }
+ else if(!strcmp(recvbuf, "trace_stop")) {
+ DBG("trace stop");
+
+ result = ltt_trace_stop(trace_name);
+ if(result < 0) {
+ ERR("ltt_trace_stop failed");
+ return;
+ }
+ }
+ else if(!strcmp(recvbuf, "trace_destroy")) {
+
+ DBG("trace destroy");
+
+ result = ltt_trace_destroy(trace_name);
+ if(result < 0) {
+ ERR("ltt_trace_destroy failed");
+ return;
+ }
+ }
+ else if(nth_token_is(recvbuf, "get_shmid", 0) == 1) {
+ struct ltt_trace_struct *trace;
+ char trace_name[] = "auto";
+ int i;
+ char *channel_name;
+
+ DBG("get_shmid");
+
+ channel_name = nth_token(recvbuf, 1);
+ if(channel_name == NULL) {
+ ERR("get_shmid: cannot parse channel");
+ goto next_cmd;
+ }
+
+ ltt_lock_traces();
+ trace = _ltt_trace_find(trace_name);
+ ltt_unlock_traces();
+
+ if(trace == NULL) {
+ CPRINTF("cannot find trace!");
+ return 1;
+ }
+
+ for(i=0; i<trace->nr_channels; i++) {
+ struct rchan *rchan = trace->channels[i].trans_channel_data;
+ struct rchan_buf *rbuf = rchan->buf;
+ struct ltt_channel_struct *ltt_channel = (struct ltt_channel_struct *)rchan->private_data;
+ struct ltt_channel_buf_struct *ltt_buf = ltt_channel->buf;
+
+ if(!strcmp(trace->channels[i].channel_name, channel_name)) {
+ char *reply;
+
+ DBG("the shmid for the requested channel is %d", rbuf->shmid);
+ DBG("the shmid for its buffer structure is %d", ltt_channel->buf_shmid);
+ asprintf(&reply, "%d %d", rbuf->shmid, ltt_channel->buf_shmid);
+
+ result = ustcomm_send_reply(&ustcomm_app.server, reply, &src);
+ if(result) {
+ ERR("listener: get_shmid: ustcomm_send_reply failed");
+ goto next_cmd;
+ }
+
+ free(reply);
+
+ break;
+ }
+ }
+ }
+ else if(nth_token_is(recvbuf, "get_n_subbufs", 0) == 1) {
+ struct ltt_trace_struct *trace;
+ char trace_name[] = "auto";
+ int i;
+ char *channel_name;
+
+ DBG("get_n_subbufs");
+
+ channel_name = nth_token(recvbuf, 1);
+ if(channel_name == NULL) {
+ ERR("get_n_subbufs: cannot parse channel");
+ goto next_cmd;
+ }
+
+ ltt_lock_traces();
+ trace = _ltt_trace_find(trace_name);
+ ltt_unlock_traces();
+
+ if(trace == NULL) {
+ CPRINTF("cannot find trace!");
+ return 1;
+ }
+
+ for(i=0; i<trace->nr_channels; i++) {
+ struct rchan *rchan = trace->channels[i].trans_channel_data;
+
+ if(!strcmp(trace->channels[i].channel_name, channel_name)) {
+ char *reply;
+
+ DBG("the n_subbufs for the requested channel is %d", rchan->n_subbufs);
+ asprintf(&reply, "%d", rchan->n_subbufs);
+
+ result = ustcomm_send_reply(&ustcomm_app.server, reply, &src);
+ if(result) {
+ ERR("listener: get_n_subbufs: ustcomm_send_reply failed");
+ goto next_cmd;
+ }
+
+ free(reply);
+
+ break;
+ }
+ }
+ }
+ else if(nth_token_is(recvbuf, "get_subbuf_size", 0) == 1) {
+ struct ltt_trace_struct *trace;
+ char trace_name[] = "auto";
+ int i;
+ char *channel_name;
+
+ DBG("get_subbuf_size");
+
+ channel_name = nth_token(recvbuf, 1);
+ if(channel_name == NULL) {
+ ERR("get_subbuf_size: cannot parse channel");
+ goto next_cmd;
+ }
+
+ ltt_lock_traces();
+ trace = _ltt_trace_find(trace_name);
+ ltt_unlock_traces();
+
+ if(trace == NULL) {
+ CPRINTF("cannot find trace!");
+ return 1;
+ }
+
+ for(i=0; i<trace->nr_channels; i++) {
+ struct rchan *rchan = trace->channels[i].trans_channel_data;
+
+ if(!strcmp(trace->channels[i].channel_name, channel_name)) {
+ char *reply;
+
+ DBG("the subbuf_size for the requested channel is %d", rchan->subbuf_size);
+ asprintf(&reply, "%d", rchan->subbuf_size);
+
+ result = ustcomm_send_reply(&ustcomm_app.server, reply, &src);
+ if(result) {
+ ERR("listener: get_subbuf_size: ustcomm_send_reply failed");
+ goto next_cmd;
+ }
+
+ free(reply);
+
+ break;
+ }
+ }
+ }
+ else if(nth_token_is(recvbuf, "load_probe_lib", 0) == 1) {
+ char *libfile;
+
+ libfile = nth_token(recvbuf, 1);
+
+ DBG("load_probe_lib loading %s", libfile);
+ }
+ else if(nth_token_is(recvbuf, "get_subbuffer", 0) == 1) {
+ struct ltt_trace_struct *trace;
+ char trace_name[] = "auto";
+ int i;
+ char *channel_name;
+
+ DBG("get_subbuf");
+
+ channel_name = nth_token(recvbuf, 1);
+ if(channel_name == NULL) {
+ ERR("get_subbuf: cannot parse channel");
+ goto next_cmd;
+ }
+
+ ltt_lock_traces();
+ trace = _ltt_trace_find(trace_name);
+ ltt_unlock_traces();
+
+ if(trace == NULL) {
+ CPRINTF("cannot find trace!");
+ return 1;
+ }
+
+ for(i=0; i<trace->nr_channels; i++) {
+ struct rchan *rchan = trace->channels[i].trans_channel_data;
+
+ if(!strcmp(trace->channels[i].channel_name, channel_name)) {
+ struct rchan_buf *rbuf = rchan->buf;
+ struct ltt_channel_buf_struct *lttbuf = trace->channels[i].buf;
+ char *reply;
+ long consumed_old=0;
+ int fd;
+ struct blocked_consumer *bc;
+
+ bc = (struct blocked_consumer *) malloc(sizeof(struct blocked_consumer));
+ if(bc == NULL) {
+ ERR("malloc returned NULL");
+ goto next_cmd;
+ }
+ bc->fd_consumer = src.fd;
+ bc->fd_producer = lttbuf->data_ready_fd_read;
+ bc->rbuf = rbuf;
+ bc->lttbuf = lttbuf;
+ bc->src = src;
+ bc->server = ustcomm_app.server;
+
+ list_add(&bc->list, &blocked_consumers);
+
+ break;
+ }
+ }
+ }
+ else if(nth_token_is(recvbuf, "put_subbuffer", 0) == 1) {
+ struct ltt_trace_struct *trace;
+ char trace_name[] = "auto";
+ int i;
+ char *channel_name;
+ long consumed_old;
+ char *consumed_old_str;
+ char *endptr;
+
+ DBG("put_subbuf");
+
+ channel_name = strdup_malloc(nth_token(recvbuf, 1));
+ if(channel_name == NULL) {
+ ERR("put_subbuf_size: cannot parse channel");
+ goto next_cmd;
+ }
+
+ consumed_old_str = strdup_malloc(nth_token(recvbuf, 2));
+ if(consumed_old_str == NULL) {
+ ERR("put_subbuf: cannot parse consumed_old");
+ goto next_cmd;
+ }
+ consumed_old = strtol(consumed_old_str, &endptr, 10);
+ if(*endptr != '\0') {
+ ERR("put_subbuf: invalid value for consumed_old");
+ goto next_cmd;
+ }
+
+ ltt_lock_traces();
+ trace = _ltt_trace_find(trace_name);
+ ltt_unlock_traces();
+
+ if(trace == NULL) {
+ CPRINTF("cannot find trace!");
+ return 1;
+ }
+
+ for(i=0; i<trace->nr_channels; i++) {
+ struct rchan *rchan = trace->channels[i].trans_channel_data;
+
+ if(!strcmp(trace->channels[i].channel_name, channel_name)) {
+ struct rchan_buf *rbuf = rchan->buf;
+ struct ltt_channel_buf_struct *lttbuf = trace->channels[i].buf;
+ char *reply;
+ long consumed_old=0;
+
+ result = ltt_do_put_subbuf(rbuf, lttbuf, consumed_old);
+ if(result < 0) {
+ WARN("ltt_do_put_subbuf: error");
+ }
+ else {
+ DBG("ltt_do_put_subbuf: success");
+ }
+ asprintf(&reply, "%s", "OK", consumed_old);
+
+ result = ustcomm_send_reply(&ustcomm_app.server, reply, &src);
+ if(result) {
+ ERR("listener: put_subbuf: ustcomm_send_reply failed");
+ goto next_cmd;
+ }
+
+ free(reply);
+
+ break;
+ }
+ }
+
+ free(channel_name);
+ free(consumed_old_str);
+ }
+// else if(nth_token_is(recvbuf, "get_notifications", 0) == 1) {
+// struct ltt_trace_struct *trace;
+// char trace_name[] = "auto";
+// int i;
+// char *channel_name;
+//
+// DBG("get_notifications");
+//
+// channel_name = strdup_malloc(nth_token(recvbuf, 1));
+// if(channel_name == NULL) {
+// ERR("put_subbuf_size: cannot parse channel");
+// goto next_cmd;
+// }
+//
+// ltt_lock_traces();
+// trace = _ltt_trace_find(trace_name);
+// ltt_unlock_traces();
+//
+// if(trace == NULL) {
+// CPRINTF("cannot find trace!");
+// return 1;
+// }
+//
+// for(i=0; i<trace->nr_channels; i++) {
+// struct rchan *rchan = trace->channels[i].trans_channel_data;
+// int fd;
+//
+// if(!strcmp(trace->channels[i].channel_name, channel_name)) {
+// struct rchan_buf *rbuf = rchan->buf;
+// struct ltt_channel_buf_struct *lttbuf = trace->channels[i].buf;
+//
+// result = fd = ustcomm_app_detach_client(&ustcomm_app, &src);
+// if(result == -1) {
+// ERR("ustcomm_app_detach_client failed");
+// goto next_cmd;
+// }
+//
+// lttbuf->wake_consumer_arg = (void *) fd;
+//
+// smp_wmb();
+//
+// lttbuf->call_wake_consumer = 1;
+//
+// break;
+// }
+// }
+//
+// free(channel_name);
+// }
+ else {
+ ERR("unable to parse message: %s", recvbuf);
+ }
+
+ next_cmd:
+ free(recvbuf);
+ }
+}
+
+static char listener_stack[16384];
+
+void create_listener(void)
+{
+ int result;
+ static char listener_stack[16384];
+ //char *listener_stack = malloc(16384);
+
+#ifdef USE_CLONE
+ result = clone(listener_main, listener_stack+sizeof(listener_stack)-1, CLONE_FS | CLONE_FILES | CLONE_VM | CLONE_SIGHAND | CLONE_THREAD, NULL);
+ if(result == -1) {
+ perror("clone");
+ }
+#else
+ pthread_t thread;
+
+ pthread_create(&thread, NULL, listener_main, NULL);
+#endif
+}
+
+/* The signal handler itself. Signals must be setup so there cannot be
+ nested signals. */
+
+void sighandler(int sig)
+{
+ static char have_listener = 0;
+ DBG("sighandler");
+
+ if(!have_listener) {
+ create_listener();
+ have_listener = 1;
+ }
+}
+
+/* Called by the app signal handler to chain it to us. */
+
+void chain_signal(void)
+{
+ sighandler(USTSIGNAL);
+}
+
+static int init_socket(void)
+{
+ return ustcomm_init_app(getpid(), &ustcomm_app);
+}
+
+static void destroy_socket(void)
+{
+// int result;
+//
+// if(mysocketfile[0] == '\0')
+// return;
+//
+// result = unlink(mysocketfile);
+// if(result == -1) {
+// PERROR("unlink");
+// }
+}
+
+static int init_signal_handler(void)
+{
+ /* Attempt to handler SIGIO. If the main program wants to
+ * handle it, fine, it'll override us. They it'll have to
+ * use the chaining function.
+ */
+
+ int result;
+ struct sigaction act;
+
+ result = sigemptyset(&act.sa_mask);
+ if(result == -1) {
+ PERROR("sigemptyset");
+ return -1;
+ }
+
+ act.sa_handler = sighandler;
+ act.sa_flags = SA_RESTART;
+
+ /* Only defer ourselves. Also, try to restart interrupted
+ * syscalls to disturb the traced program as little as possible.
+ */
+ result = sigaction(SIGIO, &act, NULL);
+ if(result == -1) {
+ PERROR("sigaction");
+ return -1;
+ }
+
+ return 0;
+}
+
+static void auto_probe_connect(struct marker *m)
+{
+ int result;
+
+ result = ltt_marker_connect(m->channel, m->name, "default");
+ if(result)
+ ERR("ltt_marker_connect");
+
+ DBG("just auto connected marker %s %s to probe default", m->channel, m->name);
+}
+
+static void __attribute__((constructor(101))) init0()
+{
+ DBG("UST_AUTOPROBE constructor");
+ if(getenv("UST_AUTOPROBE")) {
+ marker_set_new_marker_cb(auto_probe_connect);
+ }
+}
+
+static void fini(void);
+
+static void __attribute__((constructor(1000))) init()
+{
+ int result;
+
+ DBG("UST_TRACE constructor");
+
+ /* Must create socket before signal handler to prevent races.
+ */
+ result = init_socket();
+ if(result == -1) {
+ ERR("init_socket error");
+ return;
+ }
+ result = init_signal_handler();
+ if(result == -1) {
+ ERR("init_signal_handler error");
+ return;
+ }
+
+ if(getenv("UST_TRACE")) {
+ char trace_name[] = "auto";
+ char trace_type[] = "ustrelay";
+
+ DBG("starting early tracing");
+
+ /* Ensure marker control is initialized */
+ init_marker_control();
+
+ /* Ensure relay is initialized */
+ init_ustrelay_transport();
+
+ /* Ensure markers are initialized */
+ init_markers();
+
+ /* In case. */
+ ltt_channels_register("ust");
+
+ result = ltt_trace_setup(trace_name);
+ if(result < 0) {
+ ERR("ltt_trace_setup failed");
+ return;
+ }
+
+ result = ltt_trace_set_type(trace_name, trace_type);
+ if(result < 0) {
+ ERR("ltt_trace_set_type failed");
+ return;
+ }
+
+ result = ltt_trace_alloc(trace_name);
+ if(result < 0) {
+ ERR("ltt_trace_alloc failed");
+ return;
+ }
+
+ result = ltt_trace_start(trace_name);
+ if(result < 0) {
+ ERR("ltt_trace_start failed");
+ return;
+ }
+ //start_consumer();
+ inform_consumer_daemon();
+ }
+
+
+ return;
+
+ /* should decrementally destroy stuff if error */
+
+}
+
+/* This is only called if we terminate normally, not with an unhandled signal,
+ * so we cannot rely on it. */
+
+static void __attribute__((destructor)) fini()
+{
+ int result;
+
+ /* if trace running, finish it */
+
+ DBG("destructor stopping traces");
+
+ result = ltt_trace_stop("auto");
+ if(result == -1) {
+ ERR("ltt_trace_stop error");
+ }
+
+ result = ltt_trace_destroy("auto");
+ if(result == -1) {
+ ERR("ltt_trace_destroy error");
+ }
+
+ /* FIXME: wait for the consumer to be done */
+ //DBG("waiting 5 sec for consume");
+ //sleep(5);
+
+ destroy_socket();
+}
--- /dev/null
+/*
+ * Copyright (C) 2008 Mathieu Desnoyers
+ * Copyright (C) 2009 Pierre-Marc Fournier
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Ported to userspace by Pierre-Marc Fournier.
+ */
+
+//ust// #include <linux/module.h>
+//ust// #include <linux/mutex.h>
+//ust// #include <linux/types.h>
+//ust// #include <linux/jhash.h>
+//ust// #include <linux/list.h>
+//ust// #include <linux/rcupdate.h>
+//ust// #include <linux/tracepoint.h>
+//ust// #include <linux/err.h>
+//ust// #include <linux/slab.h>
+//ust// #include <linux/immediate.h>
+
+#include <errno.h>
+
+#include "kernelcompat.h"
+#include "tracepoint.h"
+#include "usterr.h"
+#include "list.h"
+
+//extern struct tracepoint __start___tracepoints[] __attribute__((visibility("hidden")));
+//extern struct tracepoint __stop___tracepoints[] __attribute__((visibility("hidden")));
+
+/* Set to 1 to enable tracepoint debug output */
+static const int tracepoint_debug;
+
+/* libraries that contain tracepoints (struct tracepoint_lib) */
+static LIST_HEAD(libs);
+
+/*
+ * tracepoints_mutex nests inside module_mutex. Tracepoints mutex protects the
+ * builtin and module tracepoints and the hash table.
+ */
+static DEFINE_MUTEX(tracepoints_mutex);
+
+/*
+ * Tracepoint hash table, containing the active tracepoints.
+ * Protected by tracepoints_mutex.
+ */
+#define TRACEPOINT_HASH_BITS 6
+#define TRACEPOINT_TABLE_SIZE (1 << TRACEPOINT_HASH_BITS)
+static struct hlist_head tracepoint_table[TRACEPOINT_TABLE_SIZE];
+
+/*
+ * Note about RCU :
+ * It is used to to delay the free of multiple probes array until a quiescent
+ * state is reached.
+ * Tracepoint entries modifications are protected by the tracepoints_mutex.
+ */
+struct tracepoint_entry {
+ struct hlist_node hlist;
+ void **funcs;
+ int refcount; /* Number of times armed. 0 if disarmed. */
+ char name[0];
+};
+
+struct tp_probes {
+ union {
+//ust// struct rcu_head rcu;
+ struct list_head list;
+ } u;
+ void *probes[0];
+};
+
+static inline void *allocate_probes(int count)
+{
+ struct tp_probes *p = kmalloc(count * sizeof(void *)
+ + sizeof(struct tp_probes), GFP_KERNEL);
+ return p == NULL ? NULL : p->probes;
+}
+
+//ust// static void rcu_free_old_probes(struct rcu_head *head)
+//ust// {
+//ust// kfree(container_of(head, struct tp_probes, u.rcu));
+//ust// }
+
+static inline void release_probes(void *old)
+{
+ if (old) {
+ struct tp_probes *tp_probes = container_of(old,
+ struct tp_probes, probes[0]);
+//ust// call_rcu_sched(&tp_probes->u.rcu, rcu_free_old_probes);
+ synchronize_rcu();
+ kfree(tp_probes);
+ }
+}
+
+static void debug_print_probes(struct tracepoint_entry *entry)
+{
+ int i;
+
+ if (!tracepoint_debug || !entry->funcs)
+ return;
+
+ for (i = 0; entry->funcs[i]; i++)
+ printk(KERN_DEBUG "Probe %d : %p\n", i, entry->funcs[i]);
+}
+
+static void *
+tracepoint_entry_add_probe(struct tracepoint_entry *entry, void *probe)
+{
+ int nr_probes = 0;
+ void **old, **new;
+
+ WARN_ON(!probe);
+
+ debug_print_probes(entry);
+ old = entry->funcs;
+ if (old) {
+ /* (N -> N+1), (N != 0, 1) probes */
+ for (nr_probes = 0; old[nr_probes]; nr_probes++)
+ if (old[nr_probes] == probe)
+ return ERR_PTR(-EEXIST);
+ }
+ /* + 2 : one for new probe, one for NULL func */
+ new = allocate_probes(nr_probes + 2);
+ if (new == NULL)
+ return ERR_PTR(-ENOMEM);
+ if (old)
+ memcpy(new, old, nr_probes * sizeof(void *));
+ new[nr_probes] = probe;
+ new[nr_probes + 1] = NULL;
+ entry->refcount = nr_probes + 1;
+ entry->funcs = new;
+ debug_print_probes(entry);
+ return old;
+}
+
+static void *
+tracepoint_entry_remove_probe(struct tracepoint_entry *entry, void *probe)
+{
+ int nr_probes = 0, nr_del = 0, i;
+ void **old, **new;
+
+ old = entry->funcs;
+
+ if (!old)
+ return ERR_PTR(-ENOENT);
+
+ debug_print_probes(entry);
+ /* (N -> M), (N > 1, M >= 0) probes */
+ for (nr_probes = 0; old[nr_probes]; nr_probes++) {
+ if ((!probe || old[nr_probes] == probe))
+ nr_del++;
+ }
+
+ if (nr_probes - nr_del == 0) {
+ /* N -> 0, (N > 1) */
+ entry->funcs = NULL;
+ entry->refcount = 0;
+ debug_print_probes(entry);
+ return old;
+ } else {
+ int j = 0;
+ /* N -> M, (N > 1, M > 0) */
+ /* + 1 for NULL */
+ new = allocate_probes(nr_probes - nr_del + 1);
+ if (new == NULL)
+ return ERR_PTR(-ENOMEM);
+ for (i = 0; old[i]; i++)
+ if ((probe && old[i] != probe))
+ new[j++] = old[i];
+ new[nr_probes - nr_del] = NULL;
+ entry->refcount = nr_probes - nr_del;
+ entry->funcs = new;
+ }
+ debug_print_probes(entry);
+ return old;
+}
+
+/*
+ * Get tracepoint if the tracepoint is present in the tracepoint hash table.
+ * Must be called with tracepoints_mutex held.
+ * Returns NULL if not present.
+ */
+static struct tracepoint_entry *get_tracepoint(const char *name)
+{
+ struct hlist_head *head;
+ struct hlist_node *node;
+ struct tracepoint_entry *e;
+ u32 hash = jhash(name, strlen(name), 0);
+
+ head = &tracepoint_table[hash & (TRACEPOINT_TABLE_SIZE - 1)];
+ hlist_for_each_entry(e, node, head, hlist) {
+ if (!strcmp(name, e->name))
+ return e;
+ }
+ return NULL;
+}
+
+/*
+ * Add the tracepoint to the tracepoint hash table. Must be called with
+ * tracepoints_mutex held.
+ */
+static struct tracepoint_entry *add_tracepoint(const char *name)
+{
+ struct hlist_head *head;
+ struct hlist_node *node;
+ struct tracepoint_entry *e;
+ size_t name_len = strlen(name) + 1;
+ u32 hash = jhash(name, name_len-1, 0);
+
+ head = &tracepoint_table[hash & (TRACEPOINT_TABLE_SIZE - 1)];
+ hlist_for_each_entry(e, node, head, hlist) {
+ if (!strcmp(name, e->name)) {
+ printk(KERN_NOTICE
+ "tracepoint %s busy\n", name);
+ return ERR_PTR(-EEXIST); /* Already there */
+ }
+ }
+ /*
+ * Using kmalloc here to allocate a variable length element. Could
+ * cause some memory fragmentation if overused.
+ */
+ e = kmalloc(sizeof(struct tracepoint_entry) + name_len, GFP_KERNEL);
+ if (!e)
+ return ERR_PTR(-ENOMEM);
+ memcpy(&e->name[0], name, name_len);
+ e->funcs = NULL;
+ e->refcount = 0;
+ hlist_add_head(&e->hlist, head);
+ return e;
+}
+
+/*
+ * Remove the tracepoint from the tracepoint hash table. Must be called with
+ * mutex_lock held.
+ */
+static inline void remove_tracepoint(struct tracepoint_entry *e)
+{
+ hlist_del(&e->hlist);
+ kfree(e);
+}
+
+/*
+ * Sets the probe callback corresponding to one tracepoint.
+ */
+static void set_tracepoint(struct tracepoint_entry **entry,
+ struct tracepoint *elem, int active)
+{
+ WARN_ON(strcmp((*entry)->name, elem->name) != 0);
+
+ /*
+ * rcu_assign_pointer has a smp_wmb() which makes sure that the new
+ * probe callbacks array is consistent before setting a pointer to it.
+ * This array is referenced by __DO_TRACE from
+ * include/linux/tracepoints.h. A matching smp_read_barrier_depends()
+ * is used.
+ */
+ rcu_assign_pointer(elem->funcs, (*entry)->funcs);
+ elem->state__imv = active;
+}
+
+/*
+ * Disable a tracepoint and its probe callback.
+ * Note: only waiting an RCU period after setting elem->call to the empty
+ * function insures that the original callback is not used anymore. This insured
+ * by preempt_disable around the call site.
+ */
+static void disable_tracepoint(struct tracepoint *elem)
+{
+ elem->state__imv = 0;
+ rcu_assign_pointer(elem->funcs, NULL);
+}
+
+/**
+ * tracepoint_update_probe_range - Update a probe range
+ * @begin: beginning of the range
+ * @end: end of the range
+ *
+ * Updates the probe callback corresponding to a range of tracepoints.
+ */
+void tracepoint_update_probe_range(struct tracepoint *begin,
+ struct tracepoint *end)
+{
+ struct tracepoint *iter;
+ struct tracepoint_entry *mark_entry;
+
+ mutex_lock(&tracepoints_mutex);
+ for (iter = begin; iter < end; iter++) {
+ mark_entry = get_tracepoint(iter->name);
+ if (mark_entry) {
+ set_tracepoint(&mark_entry, iter,
+ !!mark_entry->refcount);
+ } else {
+ disable_tracepoint(iter);
+ }
+ }
+ mutex_unlock(&tracepoints_mutex);
+}
+
+/*
+ * Update probes, removing the faulty probes.
+ */
+static void tracepoint_update_probes(void)
+{
+ /* Core kernel tracepoints */
+//ust// tracepoint_update_probe_range(__start___tracepoints,
+//ust// __stop___tracepoints);
+ /* tracepoints in modules. */
+ lib_update_tracepoints();
+ /* Update immediate values */
+ core_imv_update();
+//ust// module_imv_update();
+}
+
+static void *tracepoint_add_probe(const char *name, void *probe)
+{
+ struct tracepoint_entry *entry;
+ void *old;
+
+ entry = get_tracepoint(name);
+ if (!entry) {
+ entry = add_tracepoint(name);
+ if (IS_ERR(entry))
+ return entry;
+ }
+ old = tracepoint_entry_add_probe(entry, probe);
+ if (IS_ERR(old) && !entry->refcount)
+ remove_tracepoint(entry);
+ return old;
+}
+
+/**
+ * tracepoint_probe_register - Connect a probe to a tracepoint
+ * @name: tracepoint name
+ * @probe: probe handler
+ *
+ * Returns 0 if ok, error value on error.
+ * The probe address must at least be aligned on the architecture pointer size.
+ */
+int tracepoint_probe_register(const char *name, void *probe)
+{
+ void *old;
+
+ mutex_lock(&tracepoints_mutex);
+ old = tracepoint_add_probe(name, probe);
+ mutex_unlock(&tracepoints_mutex);
+ if (IS_ERR(old))
+ return PTR_ERR(old);
+
+ tracepoint_update_probes(); /* may update entry */
+ release_probes(old);
+ return 0;
+}
+//ust// EXPORT_SYMBOL_GPL(tracepoint_probe_register);
+
+static void *tracepoint_remove_probe(const char *name, void *probe)
+{
+ struct tracepoint_entry *entry;
+ void *old;
+
+ entry = get_tracepoint(name);
+ if (!entry)
+ return ERR_PTR(-ENOENT);
+ old = tracepoint_entry_remove_probe(entry, probe);
+ if (IS_ERR(old))
+ return old;
+ if (!entry->refcount)
+ remove_tracepoint(entry);
+ return old;
+}
+
+/**
+ * tracepoint_probe_unregister - Disconnect a probe from a tracepoint
+ * @name: tracepoint name
+ * @probe: probe function pointer
+ *
+ * We do not need to call a synchronize_sched to make sure the probes have
+ * finished running before doing a module unload, because the module unload
+ * itself uses stop_machine(), which insures that every preempt disabled section
+ * have finished.
+ */
+int tracepoint_probe_unregister(const char *name, void *probe)
+{
+ void *old;
+
+ mutex_lock(&tracepoints_mutex);
+ old = tracepoint_remove_probe(name, probe);
+ mutex_unlock(&tracepoints_mutex);
+ if (IS_ERR(old))
+ return PTR_ERR(old);
+
+ tracepoint_update_probes(); /* may update entry */
+ release_probes(old);
+ return 0;
+}
+//ust// EXPORT_SYMBOL_GPL(tracepoint_probe_unregister);
+
+static LIST_HEAD(old_probes);
+static int need_update;
+
+static void tracepoint_add_old_probes(void *old)
+{
+ need_update = 1;
+ if (old) {
+ struct tp_probes *tp_probes = container_of(old,
+ struct tp_probes, probes[0]);
+ list_add(&tp_probes->u.list, &old_probes);
+ }
+}
+
+/**
+ * tracepoint_probe_register_noupdate - register a probe but not connect
+ * @name: tracepoint name
+ * @probe: probe handler
+ *
+ * caller must call tracepoint_probe_update_all()
+ */
+int tracepoint_probe_register_noupdate(const char *name, void *probe)
+{
+ void *old;
+
+ mutex_lock(&tracepoints_mutex);
+ old = tracepoint_add_probe(name, probe);
+ if (IS_ERR(old)) {
+ mutex_unlock(&tracepoints_mutex);
+ return PTR_ERR(old);
+ }
+ tracepoint_add_old_probes(old);
+ mutex_unlock(&tracepoints_mutex);
+ return 0;
+}
+//ust// EXPORT_SYMBOL_GPL(tracepoint_probe_register_noupdate);
+
+/**
+ * tracepoint_probe_unregister_noupdate - remove a probe but not disconnect
+ * @name: tracepoint name
+ * @probe: probe function pointer
+ *
+ * caller must call tracepoint_probe_update_all()
+ */
+int tracepoint_probe_unregister_noupdate(const char *name, void *probe)
+{
+ void *old;
+
+ mutex_lock(&tracepoints_mutex);
+ old = tracepoint_remove_probe(name, probe);
+ if (IS_ERR(old)) {
+ mutex_unlock(&tracepoints_mutex);
+ return PTR_ERR(old);
+ }
+ tracepoint_add_old_probes(old);
+ mutex_unlock(&tracepoints_mutex);
+ return 0;
+}
+//ust// EXPORT_SYMBOL_GPL(tracepoint_probe_unregister_noupdate);
+
+/**
+ * tracepoint_probe_update_all - update tracepoints
+ */
+void tracepoint_probe_update_all(void)
+{
+ LIST_HEAD(release_probes);
+ struct tp_probes *pos, *next;
+
+ mutex_lock(&tracepoints_mutex);
+ if (!need_update) {
+ mutex_unlock(&tracepoints_mutex);
+ return;
+ }
+ if (!list_empty(&old_probes))
+ list_replace_init(&old_probes, &release_probes);
+ need_update = 0;
+ mutex_unlock(&tracepoints_mutex);
+
+ tracepoint_update_probes();
+ list_for_each_entry_safe(pos, next, &release_probes, u.list) {
+ list_del(&pos->u.list);
+//ust// call_rcu_sched(&pos->u.rcu, rcu_free_old_probes);
+ synchronize_rcu();
+ kfree(pos);
+ }
+}
+//ust// EXPORT_SYMBOL_GPL(tracepoint_probe_update_all);
+
+/**
+ * tracepoint_get_iter_range - Get a next tracepoint iterator given a range.
+ * @tracepoint: current tracepoints (in), next tracepoint (out)
+ * @begin: beginning of the range
+ * @end: end of the range
+ *
+ * Returns whether a next tracepoint has been found (1) or not (0).
+ * Will return the first tracepoint in the range if the input tracepoint is
+ * NULL.
+ */
+int tracepoint_get_iter_range(struct tracepoint **tracepoint,
+ struct tracepoint *begin, struct tracepoint *end)
+{
+ if (!*tracepoint && begin != end) {
+ *tracepoint = begin;
+ return 1;
+ }
+ if (*tracepoint >= begin && *tracepoint < end)
+ return 1;
+ return 0;
+}
+//ust// EXPORT_SYMBOL_GPL(tracepoint_get_iter_range);
+
+static void tracepoint_get_iter(struct tracepoint_iter *iter)
+{
+ int found = 0;
+
+//ust// /* Core kernel tracepoints */
+//ust// if (!iter->module) {
+//ust// found = tracepoint_get_iter_range(&iter->tracepoint,
+//ust// __start___tracepoints, __stop___tracepoints);
+//ust// if (found)
+//ust// goto end;
+//ust// }
+ /* tracepoints in libs. */
+ found = lib_get_iter_tracepoints(iter);
+end:
+ if (!found)
+ tracepoint_iter_reset(iter);
+}
+
+void tracepoint_iter_start(struct tracepoint_iter *iter)
+{
+ tracepoint_get_iter(iter);
+}
+//ust// EXPORT_SYMBOL_GPL(tracepoint_iter_start);
+
+void tracepoint_iter_next(struct tracepoint_iter *iter)
+{
+ iter->tracepoint++;
+ /*
+ * iter->tracepoint may be invalid because we blindly incremented it.
+ * Make sure it is valid by marshalling on the tracepoints, getting the
+ * tracepoints from following modules if necessary.
+ */
+ tracepoint_get_iter(iter);
+}
+//ust// EXPORT_SYMBOL_GPL(tracepoint_iter_next);
+
+void tracepoint_iter_stop(struct tracepoint_iter *iter)
+{
+}
+//ust// EXPORT_SYMBOL_GPL(tracepoint_iter_stop);
+
+void tracepoint_iter_reset(struct tracepoint_iter *iter)
+{
+//ust// iter->module = NULL;
+ iter->tracepoint = NULL;
+}
+//ust// EXPORT_SYMBOL_GPL(tracepoint_iter_reset);
+
+//ust// #ifdef CONFIG_MODULES
+
+//ust// int tracepoint_module_notify(struct notifier_block *self,
+//ust// unsigned long val, void *data)
+//ust// {
+//ust// struct module *mod = data;
+//ust//
+//ust// switch (val) {
+//ust// case MODULE_STATE_COMING:
+//ust// tracepoint_update_probe_range(mod->tracepoints,
+//ust// mod->tracepoints + mod->num_tracepoints);
+//ust// break;
+//ust// case MODULE_STATE_GOING:
+//ust// tracepoint_update_probe_range(mod->tracepoints,
+//ust// mod->tracepoints + mod->num_tracepoints);
+//ust// break;
+//ust// }
+//ust// return 0;
+//ust// }
+
+//ust// struct notifier_block tracepoint_module_nb = {
+//ust// .notifier_call = tracepoint_module_notify,
+//ust// .priority = 0,
+//ust// };
+
+//ust// static int init_tracepoints(void)
+//ust// {
+//ust// return register_module_notifier(&tracepoint_module_nb);
+//ust// }
+//ust// __initcall(init_tracepoints);
+
+//ust// #endif /* CONFIG_MODULES */
+
+/*
+ * Returns 0 if current not found.
+ * Returns 1 if current found.
+ */
+int lib_get_iter_tracepoints(struct tracepoint_iter *iter)
+{
+ struct tracepoint_lib *iter_lib;
+ int found = 0;
+
+//ust// mutex_lock(&module_mutex);
+ list_for_each_entry(iter_lib, &libs, list) {
+ if (iter_lib < iter->lib)
+ continue;
+ else if (iter_lib > iter->lib)
+ iter->tracepoint = NULL;
+ found = marker_get_iter_range(&iter->tracepoint,
+ iter_lib->tracepoints_start,
+ iter_lib->tracepoints_start + iter_lib->tracepoints_count);
+ if (found) {
+ iter->lib = iter_lib;
+ break;
+ }
+ }
+//ust// mutex_unlock(&module_mutex);
+ return found;
+}
+
+void lib_update_tracepoints(void)
+{
+ struct tracepoint_lib *lib;
+
+//ust// mutex_lock(&module_mutex);
+ list_for_each_entry(lib, &libs, list)
+ tracepoint_update_probe_range(lib->tracepoints_start,
+ lib->tracepoints_start + lib->tracepoints_count);
+//ust// mutex_unlock(&module_mutex);
+}
+
+static void (*new_tracepoint_cb)(struct tracepoint *) = NULL;
+
+void tracepoint_set_new_tracepoint_cb(void (*cb)(struct tracepoint *))
+{
+ new_tracepoint_cb = cb;
+}
+
+static void new_tracepoints(struct tracepoint *start, struct tracepoint *end)
+{
+ if(new_tracepoint_cb) {
+ struct tracepoint *t;
+ for(t=start; t < end; t++) {
+ new_tracepoint_cb(t);
+ }
+ }
+}
+
+int tracepoint_register_lib(struct tracepoint *tracepoints_start, int tracepoints_count)
+{
+ struct tracepoint_lib *pl;
+
+ pl = (struct tracepoint_lib *) malloc(sizeof(struct tracepoint_lib));
+
+ pl->tracepoints_start = tracepoints_start;
+ pl->tracepoints_count = tracepoints_count;
+
+ /* FIXME: maybe protect this with its own mutex? */
+ mutex_lock(&tracepoints_mutex);
+ list_add(&pl->list, &libs);
+ mutex_unlock(&tracepoints_mutex);
+
+ new_tracepoints(tracepoints_start, tracepoints_start + tracepoints_count);
+
+ /* FIXME: update just the loaded lib */
+ lib_update_tracepoints();
+
+ DBG("just registered a tracepoints section from %p and having %d tracepoints", tracepoints_start, tracepoints_count);
+
+ return 0;
+}
+
+int tracepoint_unregister_lib(struct tracepoint *tracepoints_start, int tracepoints_count)
+{
+ /*FIXME: implement; but before implementing, tracepoint_register_lib must
+ have appropriate locking. */
+
+ return 0;
+}
--- /dev/null
+#ifndef _LINUX_TRACEPOINT_H
+#define _LINUX_TRACEPOINT_H
+
+/*
+ * Copyright (C) 2008 Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
+ * Copyright (C) 2009 Pierre-Marc Fournier
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Heavily inspired from the Linux Kernel Markers.
+ *
+ * Ported to userspace by Pierre-Marc Fournier.
+ */
+
+//#include <linux/immediate.h>
+//#include <linux/types.h>
+//#include <linux/rcupdate.h>
+
+#include "immediate.h"
+#include "kernelcompat.h"
+
+struct module;
+struct tracepoint;
+
+struct tracepoint {
+ const char *name; /* Tracepoint name */
+ DEFINE_IMV(char, state); /* State. */
+ void **funcs;
+} __attribute__((aligned(32))); /*
+ * Aligned on 32 bytes because it is
+ * globally visible and gcc happily
+ * align these on the structure size.
+ * Keep in sync with vmlinux.lds.h.
+ */
+
+#define TPPROTO(args...) args
+#define TPARGS(args...) args
+
+//ust// #ifdef CONFIG_TRACEPOINTS
+
+/*
+ * it_func[0] is never NULL because there is at least one element in the array
+ * when the array itself is non NULL.
+ */
+#define __DO_TRACE(tp, proto, args) \
+ do { \
+ void **it_func; \
+ \
+ rcu_read_lock_sched_notrace(); \
+ it_func = rcu_dereference((tp)->funcs); \
+ if (it_func) { \
+ do { \
+ ((void(*)(proto))(*it_func))(args); \
+ } while (*(++it_func)); \
+ } \
+ rcu_read_unlock_sched_notrace(); \
+ } while (0)
+
+#define __CHECK_TRACE(name, generic, proto, args) \
+ do { \
+ if (!generic) { \
+ if (unlikely(imv_read(__tracepoint_##name.state))) \
+ __DO_TRACE(&__tracepoint_##name, \
+ TPPROTO(proto), TPARGS(args)); \
+ } else { \
+ if (unlikely(_imv_read(__tracepoint_##name.state))) \
+ __DO_TRACE(&__tracepoint_##name, \
+ TPPROTO(proto), TPARGS(args)); \
+ } \
+ } while (0)
+
+/*
+ * Make sure the alignment of the structure in the __tracepoints section will
+ * not add unwanted padding between the beginning of the section and the
+ * structure. Force alignment to the same alignment as the section start.
+ *
+ * The "generic" argument, passed to the declared __trace_##name inline
+ * function controls which tracepoint enabling mechanism must be used.
+ * If generic is true, a variable read is used.
+ * If generic is false, immediate values are used.
+ */
+#define DECLARE_TRACE(name, proto, args) \
+ extern struct tracepoint __tracepoint_##name; \
+ static inline void trace_##name(proto) \
+ { \
+ __CHECK_TRACE(name, 0, TPPROTO(proto), TPARGS(args)); \
+ } \
+ static inline void _trace_##name(proto) \
+ { \
+ __CHECK_TRACE(name, 1, TPPROTO(proto), TPARGS(args)); \
+ } \
+ static inline int register_trace_##name(void (*probe)(proto)) \
+ { \
+ return tracepoint_probe_register(#name, (void *)probe); \
+ } \
+ static inline int unregister_trace_##name(void (*probe)(proto)) \
+ { \
+ return tracepoint_probe_unregister(#name, (void *)probe);\
+ }
+
+#define DEFINE_TRACE(name) \
+ static const char __tpstrtab_##name[] \
+ __attribute__((section("__tracepoints_strings"))) = #name; \
+ struct tracepoint __tracepoint_##name \
+ __attribute__((section("__tracepoints"), aligned(32))) = \
+ { __tpstrtab_##name, 0, NULL }
+
+#define EXPORT_TRACEPOINT_SYMBOL_GPL(name) \
+ EXPORT_SYMBOL_GPL(__tracepoint_##name)
+#define EXPORT_TRACEPOINT_SYMBOL(name) \
+ EXPORT_SYMBOL(__tracepoint_##name)
+
+extern void tracepoint_update_probe_range(struct tracepoint *begin,
+ struct tracepoint *end);
+
+//ust// #else /* !CONFIG_TRACEPOINTS */
+//ust// #define DECLARE_TRACE(name, proto, args) \
+//ust// static inline void trace_##name(proto) \
+//ust// { } \
+//ust// static inline void _trace_##name(proto) \
+//ust// { } \
+//ust// static inline int register_trace_##name(void (*probe)(proto)) \
+//ust// { \
+//ust// return -ENOSYS; \
+//ust// } \
+//ust// static inline int unregister_trace_##name(void (*probe)(proto)) \
+//ust// { \
+//ust// return -ENOSYS; \
+//ust// }
+//ust//
+//ust// #define DEFINE_TRACE(name)
+//ust// #define EXPORT_TRACEPOINT_SYMBOL_GPL(name)
+//ust// #define EXPORT_TRACEPOINT_SYMBOL(name)
+//ust//
+//ust// static inline void tracepoint_update_probe_range(struct tracepoint *begin,
+//ust// struct tracepoint *end)
+//ust// { }
+//ust// #endif /* CONFIG_TRACEPOINTS */
+
+/*
+ * Connect a probe to a tracepoint.
+ * Internal API, should not be used directly.
+ */
+extern int tracepoint_probe_register(const char *name, void *probe);
+
+/*
+ * Disconnect a probe from a tracepoint.
+ * Internal API, should not be used directly.
+ */
+extern int tracepoint_probe_unregister(const char *name, void *probe);
+
+extern int tracepoint_probe_register_noupdate(const char *name, void *probe);
+extern int tracepoint_probe_unregister_noupdate(const char *name, void *probe);
+extern void tracepoint_probe_update_all(void);
+
+struct tracepoint_iter {
+//ust// struct module *module;
+ struct tracepoint_lib *lib;
+ struct tracepoint *tracepoint;
+};
+
+extern void tracepoint_iter_start(struct tracepoint_iter *iter);
+extern void tracepoint_iter_next(struct tracepoint_iter *iter);
+extern void tracepoint_iter_stop(struct tracepoint_iter *iter);
+extern void tracepoint_iter_reset(struct tracepoint_iter *iter);
+extern int tracepoint_get_iter_range(struct tracepoint **tracepoint,
+ struct tracepoint *begin, struct tracepoint *end);
+
+/*
+ * tracepoint_synchronize_unregister must be called between the last tracepoint
+ * probe unregistration and the end of module exit to make sure there is no
+ * caller executing a probe when it is freed.
+ */
+static inline void tracepoint_synchronize_unregister(void)
+{
+ synchronize_sched();
+}
+
+struct tracepoint_lib {
+ struct tracepoint *tracepoints_start;
+ int tracepoints_count;
+ struct list_head list;
+};
+
+#define TRACEPOINT_LIB \
+extern struct tracepoint __start___tracepoints[] __attribute__((visibility("hidden"))); \
+extern struct tracepoint __stop___tracepoints[] __attribute__((visibility("hidden"))); \
+ \
+static void __attribute__((constructor)) __tracepoints__init(void) \
+{ \
+ tracepoint_register_lib(__start___tracepoints, (((long)__stop___tracepoints)-((long)__start___tracepoints))/sizeof(struct tracepoint));\
+}
+#endif
--- /dev/null
+/*
+ * ltt/ltt-tracer.c
+ *
+ * (C) Copyright 2005-2008 -
+ * Mathieu Desnoyers (mathieu.desnoyers@polymtl.ca)
+ *
+ * Tracing management internal kernel API. Trace buffer allocation/free, tracing
+ * start/stop.
+ *
+ * Author:
+ * Mathieu Desnoyers (mathieu.desnoyers@polymtl.ca)
+ *
+ * Inspired from LTT :
+ * Karim Yaghmour (karim@opersys.com)
+ * Tom Zanussi (zanussi@us.ibm.com)
+ * Bob Wisniewski (bob@watson.ibm.com)
+ * And from K42 :
+ * Bob Wisniewski (bob@watson.ibm.com)
+ *
+ * Changelog:
+ * 22/09/06, Move to the marker/probes mechanism.
+ * 19/10/05, Complete lockless mechanism.
+ * 27/05/05, Modular redesign and rewrite.
+ */
+
+//ust// #include <linux/time.h>
+//ust// #include <linux/ltt-tracer.h>
+//ust// #include <linux/module.h>
+//ust// #include <linux/string.h>
+//ust// #include <linux/slab.h>
+//ust// #include <linux/init.h>
+//ust// #include <linux/rcupdate.h>
+//ust// #include <linux/sched.h>
+//ust// #include <linux/bitops.h>
+//ust// #include <linux/fs.h>
+//ust// #include <linux/cpu.h>
+//ust// #include <linux/kref.h>
+//ust// #include <linux/delay.h>
+//ust// #include <linux/vmalloc.h>
+//ust// #include <asm/atomic.h>
+#include "kernelcompat.h"
+#include "tracercore.h"
+#include "tracer.h"
+#include "usterr.h"
+
+//ust// static void async_wakeup(unsigned long data);
+//ust//
+//ust// static DEFINE_TIMER(ltt_async_wakeup_timer, async_wakeup, 0, 0);
+
+/* Default callbacks for modules */
+notrace int ltt_filter_control_default(enum ltt_filter_control_msg msg,
+ struct ltt_trace_struct *trace)
+{
+ return 0;
+}
+
+int ltt_statedump_default(struct ltt_trace_struct *trace)
+{
+ return 0;
+}
+
+/* Callbacks for registered modules */
+
+int (*ltt_filter_control_functor)
+ (enum ltt_filter_control_msg msg, struct ltt_trace_struct *trace) =
+ ltt_filter_control_default;
+struct module *ltt_filter_control_owner;
+
+/* These function pointers are protected by a trace activation check */
+struct module *ltt_run_filter_owner;
+int (*ltt_statedump_functor)(struct ltt_trace_struct *trace) =
+ ltt_statedump_default;
+struct module *ltt_statedump_owner;
+
+struct chan_info_struct {
+ const char *name;
+ unsigned int def_subbufsize;
+ unsigned int def_subbufcount;
+} chan_infos[] = {
+ [LTT_CHANNEL_METADATA] = {
+ LTT_METADATA_CHANNEL,
+ LTT_DEFAULT_SUBBUF_SIZE_LOW,
+ LTT_DEFAULT_N_SUBBUFS_LOW,
+ },
+ [LTT_CHANNEL_UST] = {
+ LTT_UST_CHANNEL,
+ LTT_DEFAULT_SUBBUF_SIZE_HIGH,
+ LTT_DEFAULT_N_SUBBUFS_HIGH,
+ },
+};
+
+static enum ltt_channels get_channel_type_from_name(const char *name)
+{
+ int i;
+
+ if (!name)
+ return LTT_CHANNEL_UST;
+
+ for (i = 0; i < ARRAY_SIZE(chan_infos); i++)
+ if (chan_infos[i].name && !strcmp(name, chan_infos[i].name))
+ return (enum ltt_channels)i;
+
+ return LTT_CHANNEL_UST;
+}
+
+/**
+ * ltt_module_register - LTT module registration
+ * @name: module type
+ * @function: callback to register
+ * @owner: module which owns the callback
+ *
+ * The module calling this registration function must ensure that no
+ * trap-inducing code will be executed by "function". E.g. vmalloc_sync_all()
+ * must be called between a vmalloc and the moment the memory is made visible to
+ * "function". This registration acts as a vmalloc_sync_all. Therefore, only if
+ * the module allocates virtual memory after its registration must it
+ * synchronize the TLBs.
+ */
+//ust// int ltt_module_register(enum ltt_module_function name, void *function,
+//ust// struct module *owner)
+//ust// {
+//ust// int ret = 0;
+//ust//
+//ust// /*
+//ust// * Make sure no page fault can be triggered by the module about to be
+//ust// * registered. We deal with this here so we don't have to call
+//ust// * vmalloc_sync_all() in each module's init.
+//ust// */
+//ust// vmalloc_sync_all();
+//ust//
+//ust// switch (name) {
+//ust// case LTT_FUNCTION_RUN_FILTER:
+//ust// if (ltt_run_filter_owner != NULL) {
+//ust// ret = -EEXIST;
+//ust// goto end;
+//ust// }
+//ust// ltt_filter_register((ltt_run_filter_functor)function);
+//ust// ltt_run_filter_owner = owner;
+//ust// break;
+//ust// case LTT_FUNCTION_FILTER_CONTROL:
+//ust// if (ltt_filter_control_owner != NULL) {
+//ust// ret = -EEXIST;
+//ust// goto end;
+//ust// }
+//ust// ltt_filter_control_functor =
+//ust// (int (*)(enum ltt_filter_control_msg,
+//ust// struct ltt_trace_struct *))function;
+//ust// ltt_filter_control_owner = owner;
+//ust// break;
+//ust// case LTT_FUNCTION_STATEDUMP:
+//ust// if (ltt_statedump_owner != NULL) {
+//ust// ret = -EEXIST;
+//ust// goto end;
+//ust// }
+//ust// ltt_statedump_functor =
+//ust// (int (*)(struct ltt_trace_struct *))function;
+//ust// ltt_statedump_owner = owner;
+//ust// break;
+//ust// }
+//ust//
+//ust// end:
+//ust//
+//ust// return ret;
+//ust// }
+//ust// EXPORT_SYMBOL_GPL(ltt_module_register);
+
+/**
+ * ltt_module_unregister - LTT module unregistration
+ * @name: module type
+ */
+//ust// void ltt_module_unregister(enum ltt_module_function name)
+//ust// {
+//ust// switch (name) {
+//ust// case LTT_FUNCTION_RUN_FILTER:
+//ust// ltt_filter_unregister();
+//ust// ltt_run_filter_owner = NULL;
+//ust// /* Wait for preempt sections to finish */
+//ust// synchronize_sched();
+//ust// break;
+//ust// case LTT_FUNCTION_FILTER_CONTROL:
+//ust// ltt_filter_control_functor = ltt_filter_control_default;
+//ust// ltt_filter_control_owner = NULL;
+//ust// break;
+//ust// case LTT_FUNCTION_STATEDUMP:
+//ust// ltt_statedump_functor = ltt_statedump_default;
+//ust// ltt_statedump_owner = NULL;
+//ust// break;
+//ust// }
+//ust//
+//ust// }
+//ust// EXPORT_SYMBOL_GPL(ltt_module_unregister);
+
+static LIST_HEAD(ltt_transport_list);
+
+/**
+ * ltt_transport_register - LTT transport registration
+ * @transport: transport structure
+ *
+ * Registers a transport which can be used as output to extract the data out of
+ * LTTng. The module calling this registration function must ensure that no
+ * trap-inducing code will be executed by the transport functions. E.g.
+ * vmalloc_sync_all() must be called between a vmalloc and the moment the memory
+ * is made visible to the transport function. This registration acts as a
+ * vmalloc_sync_all. Therefore, only if the module allocates virtual memory
+ * after its registration must it synchronize the TLBs.
+ */
+void ltt_transport_register(struct ltt_transport *transport)
+{
+ /*
+ * Make sure no page fault can be triggered by the module about to be
+ * registered. We deal with this here so we don't have to call
+ * vmalloc_sync_all() in each module's init.
+ */
+//ust// vmalloc_sync_all();
+
+ ltt_lock_traces();
+ list_add_tail(&transport->node, <t_transport_list);
+ ltt_unlock_traces();
+}
+//ust// EXPORT_SYMBOL_GPL(ltt_transport_register);
+
+/**
+ * ltt_transport_unregister - LTT transport unregistration
+ * @transport: transport structure
+ */
+void ltt_transport_unregister(struct ltt_transport *transport)
+{
+ ltt_lock_traces();
+ list_del(&transport->node);
+ ltt_unlock_traces();
+}
+//ust// EXPORT_SYMBOL_GPL(ltt_transport_unregister);
+
+static inline int is_channel_overwrite(enum ltt_channels chan,
+ enum trace_mode mode)
+{
+ switch (mode) {
+ case LTT_TRACE_NORMAL:
+ return 0;
+ case LTT_TRACE_FLIGHT:
+ switch (chan) {
+ case LTT_CHANNEL_METADATA:
+ return 0;
+ default:
+ return 1;
+ }
+ case LTT_TRACE_HYBRID:
+ switch (chan) {
+ case LTT_CHANNEL_METADATA:
+ return 0;
+ default:
+ return 1;
+ }
+ default:
+ return 0;
+ }
+}
+
+/**
+ * ltt_write_trace_header - Write trace header
+ * @trace: Trace information
+ * @header: Memory address where the information must be written to
+ */
+void notrace ltt_write_trace_header(struct ltt_trace_struct *trace,
+ struct ltt_subbuffer_header *header)
+{
+ header->magic_number = LTT_TRACER_MAGIC_NUMBER;
+ header->major_version = LTT_TRACER_VERSION_MAJOR;
+ header->minor_version = LTT_TRACER_VERSION_MINOR;
+ header->arch_size = sizeof(void *);
+ header->alignment = ltt_get_alignment();
+ header->start_time_sec = trace->start_time.tv_sec;
+ header->start_time_usec = trace->start_time.tv_usec;
+ header->start_freq = trace->start_freq;
+ header->freq_scale = trace->freq_scale;
+}
+//ust// EXPORT_SYMBOL_GPL(ltt_write_trace_header);
+
+static void trace_async_wakeup(struct ltt_trace_struct *trace)
+{
+ int i;
+ struct ltt_channel_struct *chan;
+
+ /* Must check each channel for pending read wakeup */
+ for (i = 0; i < trace->nr_channels; i++) {
+ chan = &trace->channels[i];
+ if (chan->active)
+ trace->ops->wakeup_channel(chan);
+ }
+}
+
+//ust// /* Timer to send async wakeups to the readers */
+//ust// static void async_wakeup(unsigned long data)
+//ust// {
+//ust// struct ltt_trace_struct *trace;
+//ust//
+//ust// /*
+//ust// * PREEMPT_RT does not allow spinlocks to be taken within preempt
+//ust// * disable sections (spinlock taken in wake_up). However, mainline won't
+//ust// * allow mutex to be taken in interrupt context. Ugly.
+//ust// * A proper way to do this would be to turn the timer into a
+//ust// * periodically woken up thread, but it adds to the footprint.
+//ust// */
+//ust// #ifndef CONFIG_PREEMPT_RT
+//ust// rcu_read_lock_sched();
+//ust// #else
+//ust// ltt_lock_traces();
+//ust// #endif
+//ust// list_for_each_entry_rcu(trace, <t_traces.head, list) {
+//ust// trace_async_wakeup(trace);
+//ust// }
+//ust// #ifndef CONFIG_PREEMPT_RT
+//ust// rcu_read_unlock_sched();
+//ust// #else
+//ust// ltt_unlock_traces();
+//ust// #endif
+//ust//
+//ust// mod_timer(<t_async_wakeup_timer, jiffies + LTT_PERCPU_TIMER_INTERVAL);
+//ust// }
+
+/**
+ * _ltt_trace_find - find a trace by given name.
+ * trace_name: trace name
+ *
+ * Returns a pointer to the trace structure, NULL if not found.
+ */
+struct ltt_trace_struct *_ltt_trace_find(const char *trace_name)
+{
+ struct ltt_trace_struct *trace;
+
+ list_for_each_entry(trace, <t_traces.head, list)
+ if (!strncmp(trace->trace_name, trace_name, NAME_MAX))
+ return trace;
+
+ return NULL;
+}
+
+/* _ltt_trace_find_setup :
+ * find a trace in setup list by given name.
+ *
+ * Returns a pointer to the trace structure, NULL if not found.
+ */
+struct ltt_trace_struct *_ltt_trace_find_setup(const char *trace_name)
+{
+ struct ltt_trace_struct *trace;
+
+ list_for_each_entry(trace, <t_traces.setup_head, list)
+ if (!strncmp(trace->trace_name, trace_name, NAME_MAX))
+ return trace;
+
+ return NULL;
+}
+//ust// EXPORT_SYMBOL_GPL(_ltt_trace_find_setup);
+
+/**
+ * ltt_release_transport - Release an LTT transport
+ * @kref : reference count on the transport
+ */
+void ltt_release_transport(struct kref *kref)
+{
+ struct ltt_trace_struct *trace = container_of(kref,
+ struct ltt_trace_struct, ltt_transport_kref);
+//ust// trace->ops->remove_dirs(trace);
+}
+//ust// EXPORT_SYMBOL_GPL(ltt_release_transport);
+
+/**
+ * ltt_release_trace - Release a LTT trace
+ * @kref : reference count on the trace
+ */
+void ltt_release_trace(struct kref *kref)
+{
+ struct ltt_trace_struct *trace = container_of(kref,
+ struct ltt_trace_struct, kref);
+ ltt_channels_trace_free(trace->channels);
+ kfree(trace);
+}
+//ust// EXPORT_SYMBOL_GPL(ltt_release_trace);
+
+static inline void prepare_chan_size_num(unsigned int *subbuf_size,
+ unsigned int *n_subbufs)
+{
+ *subbuf_size = 1 << get_count_order(*subbuf_size);
+ *n_subbufs = 1 << get_count_order(*n_subbufs);
+
+ /* Subbuf size and number must both be power of two */
+ WARN_ON(hweight32(*subbuf_size) != 1);
+ WARN_ON(hweight32(*n_subbufs) != 1);
+}
+
+int _ltt_trace_setup(const char *trace_name)
+{
+ int err = 0;
+ struct ltt_trace_struct *new_trace = NULL;
+ int metadata_index;
+ unsigned int chan;
+ enum ltt_channels chantype;
+
+ if (_ltt_trace_find_setup(trace_name)) {
+ printk(KERN_ERR "LTT : Trace name %s already used.\n",
+ trace_name);
+ err = -EEXIST;
+ goto traces_error;
+ }
+
+ if (_ltt_trace_find(trace_name)) {
+ printk(KERN_ERR "LTT : Trace name %s already used.\n",
+ trace_name);
+ err = -EEXIST;
+ goto traces_error;
+ }
+
+ new_trace = kzalloc(sizeof(struct ltt_trace_struct), GFP_KERNEL);
+ if (!new_trace) {
+ printk(KERN_ERR
+ "LTT : Unable to allocate memory for trace %s\n",
+ trace_name);
+ err = -ENOMEM;
+ goto traces_error;
+ }
+ strncpy(new_trace->trace_name, trace_name, NAME_MAX);
+ new_trace->channels = ltt_channels_trace_alloc(&new_trace->nr_channels,
+ 0, 1);
+ if (!new_trace->channels) {
+ printk(KERN_ERR
+ "LTT : Unable to allocate memory for chaninfo %s\n",
+ trace_name);
+ err = -ENOMEM;
+ goto trace_free;
+ }
+
+ /*
+ * Force metadata channel to active, no overwrite.
+ */
+ metadata_index = ltt_channels_get_index_from_name("metadata");
+ WARN_ON(metadata_index < 0);
+ new_trace->channels[metadata_index].overwrite = 0;
+ new_trace->channels[metadata_index].active = 1;
+
+ /*
+ * Set hardcoded tracer defaults for some channels
+ */
+ for (chan = 0; chan < new_trace->nr_channels; chan++) {
+ if (!(new_trace->channels[chan].active))
+ continue;
+
+ chantype = get_channel_type_from_name(
+ ltt_channels_get_name_from_index(chan));
+ new_trace->channels[chan].subbuf_size =
+ chan_infos[chantype].def_subbufsize;
+ new_trace->channels[chan].subbuf_cnt =
+ chan_infos[chantype].def_subbufcount;
+ }
+
+ list_add(&new_trace->list, <t_traces.setup_head);
+ return 0;
+
+trace_free:
+ kfree(new_trace);
+traces_error:
+ return err;
+}
+//ust// EXPORT_SYMBOL_GPL(_ltt_trace_setup);
+
+
+int ltt_trace_setup(const char *trace_name)
+{
+ int ret;
+ ltt_lock_traces();
+ ret = _ltt_trace_setup(trace_name);
+ ltt_unlock_traces();
+ return ret;
+}
+//ust// EXPORT_SYMBOL_GPL(ltt_trace_setup);
+
+/* must be called from within a traces lock. */
+static void _ltt_trace_free(struct ltt_trace_struct *trace)
+{
+ list_del(&trace->list);
+ kfree(trace);
+}
+
+int ltt_trace_set_type(const char *trace_name, const char *trace_type)
+{
+ int err = 0;
+ struct ltt_trace_struct *trace;
+ struct ltt_transport *tran_iter, *transport = NULL;
+
+ ltt_lock_traces();
+
+ trace = _ltt_trace_find_setup(trace_name);
+ if (!trace) {
+ printk(KERN_ERR "LTT : Trace not found %s\n", trace_name);
+ err = -ENOENT;
+ goto traces_error;
+ }
+
+ list_for_each_entry(tran_iter, <t_transport_list, node) {
+ if (!strcmp(tran_iter->name, trace_type)) {
+ transport = tran_iter;
+ break;
+ }
+ }
+ if (!transport) {
+ printk(KERN_ERR "LTT : Transport %s is not present.\n",
+ trace_type);
+ err = -EINVAL;
+ goto traces_error;
+ }
+
+ trace->transport = transport;
+
+traces_error:
+ ltt_unlock_traces();
+ return err;
+}
+//ust// EXPORT_SYMBOL_GPL(ltt_trace_set_type);
+
+int ltt_trace_set_channel_subbufsize(const char *trace_name,
+ const char *channel_name, unsigned int size)
+{
+ int err = 0;
+ struct ltt_trace_struct *trace;
+ int index;
+
+ ltt_lock_traces();
+
+ trace = _ltt_trace_find_setup(trace_name);
+ if (!trace) {
+ printk(KERN_ERR "LTT : Trace not found %s\n", trace_name);
+ err = -ENOENT;
+ goto traces_error;
+ }
+
+ index = ltt_channels_get_index_from_name(channel_name);
+ if (index < 0) {
+ printk(KERN_ERR "LTT : Channel %s not found\n", channel_name);
+ err = -ENOENT;
+ goto traces_error;
+ }
+ trace->channels[index].subbuf_size = size;
+
+traces_error:
+ ltt_unlock_traces();
+ return err;
+}
+//ust// EXPORT_SYMBOL_GPL(ltt_trace_set_channel_subbufsize);
+
+int ltt_trace_set_channel_subbufcount(const char *trace_name,
+ const char *channel_name, unsigned int cnt)
+{
+ int err = 0;
+ struct ltt_trace_struct *trace;
+ int index;
+
+ ltt_lock_traces();
+
+ trace = _ltt_trace_find_setup(trace_name);
+ if (!trace) {
+ printk(KERN_ERR "LTT : Trace not found %s\n", trace_name);
+ err = -ENOENT;
+ goto traces_error;
+ }
+
+ index = ltt_channels_get_index_from_name(channel_name);
+ if (index < 0) {
+ printk(KERN_ERR "LTT : Channel %s not found\n", channel_name);
+ err = -ENOENT;
+ goto traces_error;
+ }
+ trace->channels[index].subbuf_cnt = cnt;
+
+traces_error:
+ ltt_unlock_traces();
+ return err;
+}
+//ust// EXPORT_SYMBOL_GPL(ltt_trace_set_channel_subbufcount);
+
+int ltt_trace_set_channel_enable(const char *trace_name,
+ const char *channel_name, unsigned int enable)
+{
+ int err = 0;
+ struct ltt_trace_struct *trace;
+ int index;
+
+ ltt_lock_traces();
+
+ trace = _ltt_trace_find_setup(trace_name);
+ if (!trace) {
+ printk(KERN_ERR "LTT : Trace not found %s\n", trace_name);
+ err = -ENOENT;
+ goto traces_error;
+ }
+
+ /*
+ * Datas in metadata channel(marker info) is necessary to be able to
+ * read the trace, we always enable this channel.
+ */
+ if (!enable && !strcmp(channel_name, "metadata")) {
+ printk(KERN_ERR "LTT : Trying to disable metadata channel\n");
+ err = -EINVAL;
+ goto traces_error;
+ }
+
+ index = ltt_channels_get_index_from_name(channel_name);
+ if (index < 0) {
+ printk(KERN_ERR "LTT : Channel %s not found\n", channel_name);
+ err = -ENOENT;
+ goto traces_error;
+ }
+
+ trace->channels[index].active = enable;
+
+traces_error:
+ ltt_unlock_traces();
+ return err;
+}
+//ust// EXPORT_SYMBOL_GPL(ltt_trace_set_channel_enable);
+
+int ltt_trace_set_channel_overwrite(const char *trace_name,
+ const char *channel_name, unsigned int overwrite)
+{
+ int err = 0;
+ struct ltt_trace_struct *trace;
+ int index;
+
+ ltt_lock_traces();
+
+ trace = _ltt_trace_find_setup(trace_name);
+ if (!trace) {
+ printk(KERN_ERR "LTT : Trace not found %s\n", trace_name);
+ err = -ENOENT;
+ goto traces_error;
+ }
+
+ /*
+ * Always put the metadata channel in non-overwrite mode :
+ * This is a very low traffic channel and it can't afford to have its
+ * data overwritten : this data (marker info) is necessary to be
+ * able to read the trace.
+ */
+ if (overwrite && !strcmp(channel_name, "metadata")) {
+ printk(KERN_ERR "LTT : Trying to set metadata channel to "
+ "overwrite mode\n");
+ err = -EINVAL;
+ goto traces_error;
+ }
+
+ index = ltt_channels_get_index_from_name(channel_name);
+ if (index < 0) {
+ printk(KERN_ERR "LTT : Channel %s not found\n", channel_name);
+ err = -ENOENT;
+ goto traces_error;
+ }
+
+ trace->channels[index].overwrite = overwrite;
+
+traces_error:
+ ltt_unlock_traces();
+ return err;
+}
+//ust// EXPORT_SYMBOL_GPL(ltt_trace_set_channel_overwrite);
+
+int ltt_trace_alloc(const char *trace_name)
+{
+ int err = 0;
+ struct ltt_trace_struct *trace;
+ int subbuf_size, subbuf_cnt;
+ unsigned long flags;
+ int chan;
+ const char *channel_name;
+
+ ltt_lock_traces();
+
+ trace = _ltt_trace_find_setup(trace_name);
+ if (!trace) {
+ printk(KERN_ERR "LTT : Trace not found %s\n", trace_name);
+ err = -ENOENT;
+ goto traces_error;
+ }
+
+ kref_init(&trace->kref);
+ kref_init(&trace->ltt_transport_kref);
+//ust// init_waitqueue_head(&trace->kref_wq);
+ trace->active = 0;
+//ust// get_trace_clock();
+ trace->freq_scale = trace_clock_freq_scale();
+
+ if (!trace->transport) {
+ printk(KERN_ERR "LTT : Transport is not set.\n");
+ err = -EINVAL;
+ goto transport_error;
+ }
+//ust// if (!try_module_get(trace->transport->owner)) {
+//ust// printk(KERN_ERR "LTT : Can't lock transport module.\n");
+//ust// err = -ENODEV;
+//ust// goto transport_error;
+//ust// }
+ trace->ops = &trace->transport->ops;
+
+//ust// err = trace->ops->create_dirs(trace);
+//ust// if (err) {
+//ust// printk(KERN_ERR "LTT : Can't create dir for trace %s.\n",
+//ust// trace_name);
+//ust// goto dirs_error;
+//ust// }
+
+//ust// local_irq_save(flags);
+ trace->start_freq = trace_clock_frequency();
+ trace->start_tsc = trace_clock_read64();
+ gettimeofday(&trace->start_time, NULL); //ust// changed
+//ust// local_irq_restore(flags);
+
+ for (chan = 0; chan < trace->nr_channels; chan++) {
+ if (!(trace->channels[chan].active))
+ continue;
+
+ channel_name = ltt_channels_get_name_from_index(chan);
+ WARN_ON(!channel_name);
+ subbuf_size = trace->channels[chan].subbuf_size;
+ subbuf_cnt = trace->channels[chan].subbuf_cnt;
+ prepare_chan_size_num(&subbuf_size, &subbuf_cnt);
+ err = trace->ops->create_channel(trace_name, trace,
+ trace->dentry.trace_root,
+ channel_name,
+ &trace->channels[chan],
+ subbuf_size,
+ subbuf_cnt,
+ trace->channels[chan].overwrite);
+ if (err != 0) {
+ printk(KERN_ERR "LTT : Can't create channel %s.\n",
+ channel_name);
+ goto create_channel_error;
+ }
+ }
+
+ list_del(&trace->list);
+//ust// if (list_empty(<t_traces.head)) {
+//ust// mod_timer(<t_async_wakeup_timer,
+//ust// jiffies + LTT_PERCPU_TIMER_INTERVAL);
+//ust// set_kernel_trace_flag_all_tasks();
+//ust// }
+ list_add_rcu(&trace->list, <t_traces.head);
+//ust// synchronize_sched();
+
+ ltt_unlock_traces();
+
+ return 0;
+
+create_channel_error:
+ for (chan--; chan >= 0; chan--)
+ if (trace->channels[chan].active)
+ trace->ops->remove_channel(&trace->channels[chan]);
+
+dirs_error:
+//ust// module_put(trace->transport->owner);
+transport_error:
+//ust// put_trace_clock();
+traces_error:
+ ltt_unlock_traces();
+ return err;
+}
+//ust// EXPORT_SYMBOL_GPL(ltt_trace_alloc);
+
+/*
+ * It is worked as a wrapper for current version of ltt_control.ko.
+ * We will make a new ltt_control based on debugfs, and control each channel's
+ * buffer.
+ */
+static int ltt_trace_create(const char *trace_name, const char *trace_type,
+ enum trace_mode mode,
+ unsigned int subbuf_size_low, unsigned int n_subbufs_low,
+ unsigned int subbuf_size_med, unsigned int n_subbufs_med,
+ unsigned int subbuf_size_high, unsigned int n_subbufs_high)
+{
+ int err = 0;
+
+ err = ltt_trace_setup(trace_name);
+ if (IS_ERR_VALUE(err))
+ return err;
+
+ err = ltt_trace_set_type(trace_name, trace_type);
+ if (IS_ERR_VALUE(err))
+ return err;
+
+ err = ltt_trace_alloc(trace_name);
+ if (IS_ERR_VALUE(err))
+ return err;
+
+ return err;
+}
+
+/* Must be called while sure that trace is in the list. */
+static int _ltt_trace_destroy(struct ltt_trace_struct *trace)
+{
+ int err = -EPERM;
+
+ if (trace == NULL) {
+ err = -ENOENT;
+ goto traces_error;
+ }
+ if (trace->active) {
+ printk(KERN_ERR
+ "LTT : Can't destroy trace %s : tracer is active\n",
+ trace->trace_name);
+ err = -EBUSY;
+ goto active_error;
+ }
+ /* Everything went fine */
+//ust// list_del_rcu(&trace->list);
+//ust// synchronize_sched();
+ if (list_empty(<t_traces.head)) {
+//ust// clear_kernel_trace_flag_all_tasks();
+ /*
+ * We stop the asynchronous delivery of reader wakeup, but
+ * we must make one last check for reader wakeups pending
+ * later in __ltt_trace_destroy.
+ */
+//ust// del_timer_sync(<t_async_wakeup_timer);
+ }
+ return 0;
+
+ /* error handling */
+active_error:
+traces_error:
+ return err;
+}
+
+/* Sleepable part of the destroy */
+static void __ltt_trace_destroy(struct ltt_trace_struct *trace)
+{
+ int i;
+ struct ltt_channel_struct *chan;
+
+ for (i = 0; i < trace->nr_channels; i++) {
+ chan = &trace->channels[i];
+ if (chan->active)
+ trace->ops->finish_channel(chan);
+ }
+
+ return; /* FIXME: temporary for ust */
+//ust// flush_scheduled_work();
+
+ /*
+ * The currently destroyed trace is not in the trace list anymore,
+ * so it's safe to call the async wakeup ourself. It will deliver
+ * the last subbuffers.
+ */
+ trace_async_wakeup(trace);
+
+ for (i = 0; i < trace->nr_channels; i++) {
+ chan = &trace->channels[i];
+ if (chan->active)
+ trace->ops->remove_channel(chan);
+ }
+
+ kref_put(&trace->ltt_transport_kref, ltt_release_transport);
+
+//ust// module_put(trace->transport->owner);
+
+ /*
+ * Wait for lttd readers to release the files, therefore making sure
+ * the last subbuffers have been read.
+ */
+//ust// if (atomic_read(&trace->kref.refcount) > 1) {
+//ust// int ret = 0;
+//ust// __wait_event_interruptible(trace->kref_wq,
+//ust// (atomic_read(&trace->kref.refcount) == 1), ret);
+//ust// }
+ kref_put(&trace->kref, ltt_release_trace);
+}
+
+int ltt_trace_destroy(const char *trace_name)
+{
+ int err = 0;
+ struct ltt_trace_struct *trace;
+
+ ltt_lock_traces();
+
+ trace = _ltt_trace_find(trace_name);
+ if (trace) {
+ err = _ltt_trace_destroy(trace);
+ if (err)
+ goto error;
+
+ ltt_unlock_traces();
+
+ __ltt_trace_destroy(trace);
+//ust// put_trace_clock();
+
+ return 0;
+ }
+
+ trace = _ltt_trace_find_setup(trace_name);
+ if (trace) {
+ _ltt_trace_free(trace);
+ ltt_unlock_traces();
+ return 0;
+ }
+
+ err = -ENOENT;
+
+ /* Error handling */
+error:
+ ltt_unlock_traces();
+ return err;
+}
+//ust// EXPORT_SYMBOL_GPL(ltt_trace_destroy);
+
+/* must be called from within a traces lock. */
+static int _ltt_trace_start(struct ltt_trace_struct *trace)
+{
+ int err = 0;
+
+ if (trace == NULL) {
+ err = -ENOENT;
+ goto traces_error;
+ }
+ if (trace->active)
+ printk(KERN_INFO "LTT : Tracing already active for trace %s\n",
+ trace->trace_name);
+//ust// if (!try_module_get(ltt_run_filter_owner)) {
+//ust// err = -ENODEV;
+//ust// printk(KERN_ERR "LTT : Can't lock filter module.\n");
+//ust// goto get_ltt_run_filter_error;
+//ust// }
+ trace->active = 1;
+ /* Read by trace points without protection : be careful */
+ ltt_traces.num_active_traces++;
+ return err;
+
+ /* error handling */
+get_ltt_run_filter_error:
+traces_error:
+ return err;
+}
+
+int ltt_trace_start(const char *trace_name)
+{
+ int err = 0;
+ struct ltt_trace_struct *trace;
+
+ ltt_lock_traces();
+
+ trace = _ltt_trace_find(trace_name);
+ err = _ltt_trace_start(trace);
+ if (err)
+ goto no_trace;
+
+ ltt_unlock_traces();
+
+ /*
+ * Call the kernel state dump.
+ * Events will be mixed with real kernel events, it's ok.
+ * Notice that there is no protection on the trace : that's exactly
+ * why we iterate on the list and check for trace equality instead of
+ * directly using this trace handle inside the logging function.
+ */
+
+ ltt_dump_marker_state(trace);
+
+//ust// if (!try_module_get(ltt_statedump_owner)) {
+//ust// err = -ENODEV;
+//ust// printk(KERN_ERR
+//ust// "LTT : Can't lock state dump module.\n");
+//ust// } else {
+ ltt_statedump_functor(trace);
+//ust// module_put(ltt_statedump_owner);
+//ust// }
+
+ return err;
+
+ /* Error handling */
+no_trace:
+ ltt_unlock_traces();
+ return err;
+}
+//ust// EXPORT_SYMBOL_GPL(ltt_trace_start);
+
+/* must be called from within traces lock */
+static int _ltt_trace_stop(struct ltt_trace_struct *trace)
+{
+ int err = -EPERM;
+
+ if (trace == NULL) {
+ err = -ENOENT;
+ goto traces_error;
+ }
+ if (!trace->active)
+ printk(KERN_INFO "LTT : Tracing not active for trace %s\n",
+ trace->trace_name);
+ if (trace->active) {
+ trace->active = 0;
+ ltt_traces.num_active_traces--;
+//ust// synchronize_sched(); /* Wait for each tracing to be finished */
+ }
+//ust// module_put(ltt_run_filter_owner);
+ /* Everything went fine */
+ return 0;
+
+ /* Error handling */
+traces_error:
+ return err;
+}
+
+int ltt_trace_stop(const char *trace_name)
+{
+ int err = 0;
+ struct ltt_trace_struct *trace;
+
+ ltt_lock_traces();
+ trace = _ltt_trace_find(trace_name);
+ err = _ltt_trace_stop(trace);
+ ltt_unlock_traces();
+ return err;
+}
+//ust// EXPORT_SYMBOL_GPL(ltt_trace_stop);
+
+/**
+ * ltt_control - Trace control in-kernel API
+ * @msg: Action to perform
+ * @trace_name: Trace on which the action must be done
+ * @trace_type: Type of trace (normal, flight, hybrid)
+ * @args: Arguments specific to the action
+ */
+//ust// int ltt_control(enum ltt_control_msg msg, const char *trace_name,
+//ust// const char *trace_type, union ltt_control_args args)
+//ust// {
+//ust// int err = -EPERM;
+//ust//
+//ust// printk(KERN_ALERT "ltt_control : trace %s\n", trace_name);
+//ust// switch (msg) {
+//ust// case LTT_CONTROL_START:
+//ust// printk(KERN_DEBUG "Start tracing %s\n", trace_name);
+//ust// err = ltt_trace_start(trace_name);
+//ust// break;
+//ust// case LTT_CONTROL_STOP:
+//ust// printk(KERN_DEBUG "Stop tracing %s\n", trace_name);
+//ust// err = ltt_trace_stop(trace_name);
+//ust// break;
+//ust// case LTT_CONTROL_CREATE_TRACE:
+//ust// printk(KERN_DEBUG "Creating trace %s\n", trace_name);
+//ust// err = ltt_trace_create(trace_name, trace_type,
+//ust// args.new_trace.mode,
+//ust// args.new_trace.subbuf_size_low,
+//ust// args.new_trace.n_subbufs_low,
+//ust// args.new_trace.subbuf_size_med,
+//ust// args.new_trace.n_subbufs_med,
+//ust// args.new_trace.subbuf_size_high,
+//ust// args.new_trace.n_subbufs_high);
+//ust// break;
+//ust// case LTT_CONTROL_DESTROY_TRACE:
+//ust// printk(KERN_DEBUG "Destroying trace %s\n", trace_name);
+//ust// err = ltt_trace_destroy(trace_name);
+//ust// break;
+//ust// }
+//ust// return err;
+//ust// }
+//ust// EXPORT_SYMBOL_GPL(ltt_control);
+
+/**
+ * ltt_filter_control - Trace filter control in-kernel API
+ * @msg: Action to perform on the filter
+ * @trace_name: Trace on which the action must be done
+ */
+int ltt_filter_control(enum ltt_filter_control_msg msg, const char *trace_name)
+{
+ int err;
+ struct ltt_trace_struct *trace;
+
+ printk(KERN_DEBUG "ltt_filter_control : trace %s\n", trace_name);
+ ltt_lock_traces();
+ trace = _ltt_trace_find(trace_name);
+ if (trace == NULL) {
+ printk(KERN_ALERT
+ "Trace does not exist. Cannot proxy control request\n");
+ err = -ENOENT;
+ goto trace_error;
+ }
+//ust// if (!try_module_get(ltt_filter_control_owner)) {
+//ust// err = -ENODEV;
+//ust// goto get_module_error;
+//ust// }
+ switch (msg) {
+ case LTT_FILTER_DEFAULT_ACCEPT:
+ printk(KERN_DEBUG
+ "Proxy filter default accept %s\n", trace_name);
+ err = (*ltt_filter_control_functor)(msg, trace);
+ break;
+ case LTT_FILTER_DEFAULT_REJECT:
+ printk(KERN_DEBUG
+ "Proxy filter default reject %s\n", trace_name);
+ err = (*ltt_filter_control_functor)(msg, trace);
+ break;
+ default:
+ err = -EPERM;
+ }
+//ust// module_put(ltt_filter_control_owner);
+
+get_module_error:
+trace_error:
+ ltt_unlock_traces();
+ return err;
+}
+//ust// EXPORT_SYMBOL_GPL(ltt_filter_control);
+
+//ust// int __init ltt_init(void)
+//ust// {
+//ust// /* Make sure no page fault can be triggered by this module */
+//ust// vmalloc_sync_all();
+//ust// return 0;
+//ust// }
+
+//ust// module_init(ltt_init)
+
+//ust// static void __exit ltt_exit(void)
+//ust// {
+//ust// struct ltt_trace_struct *trace;
+//ust// struct list_head *pos, *n;
+//ust//
+//ust// ltt_lock_traces();
+//ust// /* Stop each trace, currently being read by RCU read-side */
+//ust// list_for_each_entry_rcu(trace, <t_traces.head, list)
+//ust// _ltt_trace_stop(trace);
+//ust// /* Wait for quiescent state. Readers have preemption disabled. */
+//ust// synchronize_sched();
+//ust// /* Safe iteration is now permitted. It does not have to be RCU-safe
+//ust// * because no readers are left. */
+//ust// list_for_each_safe(pos, n, <t_traces.head) {
+//ust// trace = container_of(pos, struct ltt_trace_struct, list);
+//ust// /* _ltt_trace_destroy does a synchronize_sched() */
+//ust// _ltt_trace_destroy(trace);
+//ust// __ltt_trace_destroy(trace);
+//ust// }
+//ust// /* free traces in pre-alloc status */
+//ust// list_for_each_safe(pos, n, <t_traces.setup_head) {
+//ust// trace = container_of(pos, struct ltt_trace_struct, list);
+//ust// _ltt_trace_free(trace);
+//ust// }
+//ust//
+//ust// ltt_unlock_traces();
+//ust// }
+
+//ust// module_exit(ltt_exit)
+
+//ust// MODULE_LICENSE("GPL");
+//ust// MODULE_AUTHOR("Mathieu Desnoyers");
+//ust// MODULE_DESCRIPTION("Linux Trace Toolkit Next Generation Tracer Kernel API");
--- /dev/null
+/*
+ * Copyright (C) 2005,2006,2008 Mathieu Desnoyers (mathieu.desnoyers@polymtl.ca)
+ * Copyright (C) 2009 Pierre-Marc Fournier
+ *
+ * This contains the definitions for the Linux Trace Toolkit tracer.
+ *
+ * Ported to userspace by Pierre-Marc Fournier.
+ *
+ * This file is released under the GPLv2.
+ */
+
+#ifndef _LTT_TRACER_H
+#define _LTT_TRACER_H
+
+#include <sys/types.h>
+#include <stdarg.h>
+#include "relay.h"
+#include "list.h"
+#include "kernelcompat.h"
+#include "channels.h"
+#include "tracercore.h"
+#include "marker.h"
+
+/* Number of bytes to log with a read/write event */
+#define LTT_LOG_RW_SIZE 32L
+
+/* Interval (in jiffies) at which the LTT per-CPU timer fires */
+#define LTT_PERCPU_TIMER_INTERVAL 1
+
+#ifndef LTT_ARCH_TYPE
+#define LTT_ARCH_TYPE LTT_ARCH_TYPE_UNDEFINED
+#endif
+
+#ifndef LTT_ARCH_VARIANT
+#define LTT_ARCH_VARIANT LTT_ARCH_VARIANT_NONE
+#endif
+
+struct ltt_active_marker;
+
+/* Maximum number of callbacks per marker */
+#define LTT_NR_CALLBACKS 10
+
+struct ltt_serialize_closure;
+struct ltt_probe_private_data;
+
+/* Serialization callback '%k' */
+typedef size_t (*ltt_serialize_cb)(struct rchan_buf *buf, size_t buf_offset,
+ struct ltt_serialize_closure *closure,
+ void *serialize_private, int *largest_align,
+ const char *fmt, va_list *args);
+
+struct ltt_serialize_closure {
+ ltt_serialize_cb *callbacks;
+ long cb_args[LTT_NR_CALLBACKS];
+ unsigned int cb_idx;
+};
+
+size_t ltt_serialize_data(struct rchan_buf *buf, size_t buf_offset,
+ struct ltt_serialize_closure *closure,
+ void *serialize_private,
+ int *largest_align, const char *fmt, va_list *args);
+
+struct ltt_available_probe {
+ const char *name; /* probe name */
+ const char *format;
+ marker_probe_func *probe_func;
+ ltt_serialize_cb callbacks[LTT_NR_CALLBACKS];
+ struct list_head node; /* registered probes list */
+};
+
+struct ltt_probe_private_data {
+ struct ltt_trace_struct *trace; /*
+ * Target trace, for metadata
+ * or statedump.
+ */
+ ltt_serialize_cb serializer; /*
+ * Serialization function override.
+ */
+ void *serialize_private; /*
+ * Private data for serialization
+ * functions.
+ */
+};
+
+enum ltt_channels {
+ LTT_CHANNEL_METADATA,
+ LTT_CHANNEL_UST,
+};
+
+struct ltt_active_marker {
+ struct list_head node; /* active markers list */
+ const char *channel;
+ const char *name;
+ const char *format;
+ struct ltt_available_probe *probe;
+};
+
+struct marker; //ust//
+extern void ltt_vtrace(const struct marker *mdata, void *probe_data,
+ void *call_data, const char *fmt, va_list *args);
+extern void ltt_trace(const struct marker *mdata, void *probe_data,
+ void *call_data, const char *fmt, ...);
+
+/*
+ * Unique ID assigned to each registered probe.
+ */
+enum marker_id {
+ MARKER_ID_SET_MARKER_ID = 0, /* Static IDs available (range 0-7) */
+ MARKER_ID_SET_MARKER_FORMAT,
+ MARKER_ID_COMPACT, /* Compact IDs (range: 8-127) */
+ MARKER_ID_DYNAMIC, /* Dynamic IDs (range: 128-65535) */
+};
+
+/* static ids 0-1 reserved for internal use. */
+#define MARKER_CORE_IDS 2
+static inline enum marker_id marker_id_type(uint16_t id)
+{
+ if (id < MARKER_CORE_IDS)
+ return (enum marker_id)id;
+ else
+ return MARKER_ID_DYNAMIC;
+}
+
+struct user_dbg_data {
+ unsigned long avail_size;
+ unsigned long write;
+ unsigned long read;
+};
+
+struct ltt_trace_ops {
+ /* First 32 bytes cache-hot cacheline */
+ int (*reserve_slot) (struct ltt_trace_struct *trace,
+ struct ltt_channel_struct *channel,
+ void **transport_data, size_t data_size,
+ size_t *slot_size, long *buf_offset, u64 *tsc,
+ unsigned int *rflags,
+ int largest_align);
+ void (*commit_slot) (struct ltt_channel_struct *channel,
+ void **transport_data, long buf_offset,
+ size_t slot_size);
+ void (*wakeup_channel) (struct ltt_channel_struct *ltt_channel);
+ int (*user_blocking) (struct ltt_trace_struct *trace,
+ unsigned int index, size_t data_size,
+ struct user_dbg_data *dbg);
+ /* End of first 32 bytes cacheline */
+ int (*create_dirs) (struct ltt_trace_struct *new_trace);
+ void (*remove_dirs) (struct ltt_trace_struct *new_trace);
+ int (*create_channel) (const char *trace_name,
+ struct ltt_trace_struct *trace,
+ struct dentry *dir, const char *channel_name,
+ struct ltt_channel_struct *ltt_chan,
+ unsigned int subbuf_size,
+ unsigned int n_subbufs, int overwrite);
+ void (*finish_channel) (struct ltt_channel_struct *channel);
+ void (*remove_channel) (struct ltt_channel_struct *channel);
+ void (*user_errors) (struct ltt_trace_struct *trace,
+ unsigned int index, size_t data_size,
+ struct user_dbg_data *dbg);
+} ____cacheline_aligned;
+
+struct ltt_transport {
+ char *name;
+ struct module *owner;
+ struct list_head node;
+ struct ltt_trace_ops ops;
+};
+
+enum trace_mode { LTT_TRACE_NORMAL, LTT_TRACE_FLIGHT, LTT_TRACE_HYBRID };
+
+#define CHANNEL_FLAG_ENABLE (1U<<0)
+#define CHANNEL_FLAG_OVERWRITE (1U<<1)
+
+/* Per-trace information - each trace/flight recorder represented by one */
+struct ltt_trace_struct {
+ /* First 32 bytes cache-hot cacheline */
+ struct list_head list;
+ struct ltt_trace_ops *ops;
+ int active;
+ /* Second 32 bytes cache-hot cacheline */
+ struct ltt_channel_struct *channels;
+ unsigned int nr_channels;
+ u32 freq_scale;
+ u64 start_freq;
+ u64 start_tsc;
+ unsigned long long start_monotonic;
+ struct timeval start_time;
+ struct ltt_channel_setting *settings;
+ struct {
+ struct dentry *trace_root;
+ } dentry;
+ struct kref kref; /* Each channel has a kref of the trace struct */
+ struct ltt_transport *transport;
+ struct kref ltt_transport_kref;
+ char trace_name[NAME_MAX];
+} ____cacheline_aligned;
+
+/* Hardcoded event headers
+ *
+ * event header for a trace with active heartbeat : 27 bits timestamps
+ *
+ * headers are 32-bits aligned. In order to insure such alignment, a dynamic per
+ * trace alignment value must be done.
+ *
+ * Remember that the C compiler does align each member on the boundary
+ * equivalent to their own size.
+ *
+ * As relay subbuffers are aligned on pages, we are sure that they are 4 and 8
+ * bytes aligned, so the buffer header and trace header are aligned.
+ *
+ * Event headers are aligned depending on the trace alignment option.
+ *
+ * Note using C structure bitfields for cross-endianness and portability
+ * concerns.
+ */
+
+#define LTT_RESERVED_EVENTS 3
+#define LTT_EVENT_BITS 5
+#define LTT_FREE_EVENTS ((1 << LTT_EVENT_BITS) - LTT_RESERVED_EVENTS)
+#define LTT_TSC_BITS 27
+#define LTT_TSC_MASK ((1 << LTT_TSC_BITS) - 1)
+
+struct ltt_event_header {
+ u32 id_time; /* 5 bits event id (MSB); 27 bits time (LSB) */
+};
+
+/* Reservation flags */
+#define LTT_RFLAG_ID (1 << 0)
+#define LTT_RFLAG_ID_SIZE (1 << 1)
+#define LTT_RFLAG_ID_SIZE_TSC (1 << 2)
+
+/*
+ * We use asm/timex.h : cpu_khz/HZ variable in here : we might have to deal
+ * specifically with CPU frequency scaling someday, so using an interpolation
+ * between the start and end of buffer values is not flexible enough. Using an
+ * immediate frequency value permits to calculate directly the times for parts
+ * of a buffer that would be before a frequency change.
+ *
+ * Keep the natural field alignment for _each field_ within this structure if
+ * you ever add/remove a field from this header. Packed attribute is not used
+ * because gcc generates poor code on at least powerpc and mips. Don't ever
+ * let gcc add padding between the structure elements.
+ */
+struct ltt_subbuffer_header {
+ uint64_t cycle_count_begin; /* Cycle count at subbuffer start */
+ uint64_t cycle_count_end; /* Cycle count at subbuffer end */
+ uint32_t magic_number; /*
+ * Trace magic number.
+ * contains endianness information.
+ */
+ uint8_t major_version;
+ uint8_t minor_version;
+ uint8_t arch_size; /* Architecture pointer size */
+ uint8_t alignment; /* LTT data alignment */
+ uint64_t start_time_sec; /* NTP-corrected start time */
+ uint64_t start_time_usec;
+ uint64_t start_freq; /*
+ * Frequency at trace start,
+ * used all along the trace.
+ */
+ uint32_t freq_scale; /* Frequency scaling (divisor) */
+ uint32_t lost_size; /* Size unused at end of subbuffer */
+ uint32_t buf_size; /* Size of this subbuffer */
+ uint32_t events_lost; /*
+ * Events lost in this subbuffer since
+ * the beginning of the trace.
+ * (may overflow)
+ */
+ uint32_t subbuf_corrupt; /*
+ * Corrupted (lost) subbuffers since
+ * the begginig of the trace.
+ * (may overflow)
+ */
+ uint8_t header_end[0]; /* End of header */
+};
+
+/**
+ * ltt_subbuffer_header_size - called on buffer-switch to a new sub-buffer
+ *
+ * Return header size without padding after the structure. Don't use packed
+ * structure because gcc generates inefficient code on some architectures
+ * (powerpc, mips..)
+ */
+static inline size_t ltt_subbuffer_header_size(void)
+{
+ return offsetof(struct ltt_subbuffer_header, header_end);
+}
+
+/*
+ * ltt_get_header_size
+ *
+ * Calculate alignment offset to 32-bits. This is the alignment offset of the
+ * event header.
+ *
+ * Important note :
+ * The event header must be 32-bits. The total offset calculated here :
+ *
+ * Alignment of header struct on 32 bits (min arch size, header size)
+ * + sizeof(header struct) (32-bits)
+ * + (opt) u16 (ext. event id)
+ * + (opt) u16 (event_size) (if event_size == 0xFFFFUL, has ext. event size)
+ * + (opt) u32 (ext. event size)
+ * + (opt) u64 full TSC (aligned on min(64-bits, arch size))
+ *
+ * The payload must itself determine its own alignment from the biggest type it
+ * contains.
+ * */
+static inline unsigned char ltt_get_header_size(
+ struct ltt_channel_struct *channel,
+ size_t offset,
+ size_t data_size,
+ size_t *before_hdr_pad,
+ unsigned int rflags)
+{
+ size_t orig_offset = offset;
+ size_t padding;
+
+ padding = ltt_align(offset, sizeof(struct ltt_event_header));
+ offset += padding;
+ offset += sizeof(struct ltt_event_header);
+
+ switch (rflags) {
+ case LTT_RFLAG_ID_SIZE_TSC:
+ offset += sizeof(u16) + sizeof(u16);
+ if (data_size >= 0xFFFFU)
+ offset += sizeof(u32);
+ offset += ltt_align(offset, sizeof(u64));
+ offset += sizeof(u64);
+ break;
+ case LTT_RFLAG_ID_SIZE:
+ offset += sizeof(u16) + sizeof(u16);
+ if (data_size >= 0xFFFFU)
+ offset += sizeof(u32);
+ break;
+ case LTT_RFLAG_ID:
+ offset += sizeof(u16);
+ break;
+ }
+
+ *before_hdr_pad = padding;
+ return offset - orig_offset;
+}
+
+/*
+ * ltt_write_event_header
+ *
+ * Writes the event header to the offset (already aligned on 32-bits).
+ *
+ * @trace : trace to write to.
+ * @channel : pointer to the channel structure..
+ * @buf : buffer to write to.
+ * @buf_offset : buffer offset to write to (aligned on 32 bits).
+ * @eID : event ID
+ * @event_size : size of the event, excluding the event header.
+ * @tsc : time stamp counter.
+ * @rflags : reservation flags.
+ *
+ * returns : offset where the event data must be written.
+ */
+static inline size_t ltt_write_event_header(struct ltt_trace_struct *trace,
+ struct ltt_channel_struct *channel,
+ struct rchan_buf *buf, long buf_offset,
+ u16 eID, size_t event_size,
+ u64 tsc, unsigned int rflags)
+{
+ struct ltt_event_header header;
+ size_t small_size;
+
+ switch (rflags) {
+ case LTT_RFLAG_ID_SIZE_TSC:
+ header.id_time = 29 << LTT_TSC_BITS;
+ break;
+ case LTT_RFLAG_ID_SIZE:
+ header.id_time = 30 << LTT_TSC_BITS;
+ break;
+ case LTT_RFLAG_ID:
+ header.id_time = 31 << LTT_TSC_BITS;
+ break;
+ default:
+ header.id_time = eID << LTT_TSC_BITS;
+ break;
+ }
+ header.id_time |= (u32)tsc & LTT_TSC_MASK;
+ ltt_relay_write(buf, buf_offset, &header, sizeof(header));
+ buf_offset += sizeof(header);
+
+ switch (rflags) {
+ case LTT_RFLAG_ID_SIZE_TSC:
+ small_size = min_t(size_t, event_size, 0xFFFFU);
+ ltt_relay_write(buf, buf_offset,
+ (u16[]){ (u16)eID }, sizeof(u16));
+ buf_offset += sizeof(u16);
+ ltt_relay_write(buf, buf_offset,
+ (u16[]){ (u16)small_size }, sizeof(u16));
+ buf_offset += sizeof(u16);
+ if (small_size == 0xFFFFU) {
+ ltt_relay_write(buf, buf_offset,
+ (u32[]){ (u32)event_size }, sizeof(u32));
+ buf_offset += sizeof(u32);
+ }
+ buf_offset += ltt_align(buf_offset, sizeof(u64));
+ ltt_relay_write(buf, buf_offset,
+ (u64[]){ (u64)tsc }, sizeof(u64));
+ buf_offset += sizeof(u64);
+ break;
+ case LTT_RFLAG_ID_SIZE:
+ small_size = min_t(size_t, event_size, 0xFFFFU);
+ ltt_relay_write(buf, buf_offset,
+ (u16[]){ (u16)eID }, sizeof(u16));
+ buf_offset += sizeof(u16);
+ ltt_relay_write(buf, buf_offset,
+ (u16[]){ (u16)small_size }, sizeof(u16));
+ buf_offset += sizeof(u16);
+ if (small_size == 0xFFFFU) {
+ ltt_relay_write(buf, buf_offset,
+ (u32[]){ (u32)event_size }, sizeof(u32));
+ buf_offset += sizeof(u32);
+ }
+ break;
+ case LTT_RFLAG_ID:
+ ltt_relay_write(buf, buf_offset,
+ (u16[]){ (u16)eID }, sizeof(u16));
+ buf_offset += sizeof(u16);
+ break;
+ default:
+ break;
+ }
+
+ return buf_offset;
+}
+
+/* Lockless LTTng */
+
+/* Buffer offset macros */
+
+/*
+ * BUFFER_TRUNC zeroes the subbuffer offset and the subbuffer number parts of
+ * the offset, which leaves only the buffer number.
+ */
+#define BUFFER_TRUNC(offset, chan) \
+ ((offset) & (~((chan)->alloc_size-1)))
+#define BUFFER_OFFSET(offset, chan) ((offset) & ((chan)->alloc_size - 1))
+#define SUBBUF_OFFSET(offset, chan) ((offset) & ((chan)->subbuf_size - 1))
+#define SUBBUF_ALIGN(offset, chan) \
+ (((offset) + (chan)->subbuf_size) & (~((chan)->subbuf_size - 1)))
+#define SUBBUF_TRUNC(offset, chan) \
+ ((offset) & (~((chan)->subbuf_size - 1)))
+#define SUBBUF_INDEX(offset, chan) \
+ (BUFFER_OFFSET((offset), chan) >> (chan)->subbuf_size_order)
+
+/*
+ * ltt_reserve_slot
+ *
+ * Atomic slot reservation in a LTTng buffer. It will take care of
+ * sub-buffer switching.
+ *
+ * Parameters:
+ *
+ * @trace : the trace structure to log to.
+ * @channel : the chanel to reserve space into.
+ * @transport_data : specific transport data.
+ * @data_size : size of the variable length data to log.
+ * @slot_size : pointer to total size of the slot (out)
+ * @buf_offset : pointer to reserve offset (out)
+ * @tsc : pointer to the tsc at the slot reservation (out)
+ * @rflags : reservation flags (header specificity)
+ * @cpu : cpu id
+ *
+ * Return : -ENOSPC if not enough space, else 0.
+ */
+static inline int ltt_reserve_slot(
+ struct ltt_trace_struct *trace,
+ struct ltt_channel_struct *channel,
+ void **transport_data,
+ size_t data_size,
+ size_t *slot_size,
+ long *buf_offset,
+ u64 *tsc,
+ unsigned int *rflags,
+ int largest_align)
+{
+ return trace->ops->reserve_slot(trace, channel, transport_data,
+ data_size, slot_size, buf_offset, tsc, rflags,
+ largest_align);
+}
+
+
+/*
+ * ltt_commit_slot
+ *
+ * Atomic unordered slot commit. Increments the commit count in the
+ * specified sub-buffer, and delivers it if necessary.
+ *
+ * Parameters:
+ *
+ * @channel : the chanel to reserve space into.
+ * @transport_data : specific transport data.
+ * @buf_offset : offset of beginning of reserved slot
+ * @slot_size : size of the reserved slot.
+ */
+static inline void ltt_commit_slot(
+ struct ltt_channel_struct *channel,
+ void **transport_data,
+ long buf_offset,
+ size_t slot_size)
+{
+ struct ltt_trace_struct *trace = channel->trace;
+
+ trace->ops->commit_slot(channel, transport_data, buf_offset, slot_size);
+}
+
+/*
+ * Control channels :
+ * control/metadata
+ * control/interrupts
+ * control/...
+ *
+ * cpu channel :
+ * cpu
+ */
+
+#define LTT_METADATA_CHANNEL "metadata_state"
+#define LTT_UST_CHANNEL "ust"
+
+#define LTT_FLIGHT_PREFIX "flight-"
+
+/* Tracer properties */
+//#define LTT_DEFAULT_SUBBUF_SIZE_LOW 65536
+#define LTT_DEFAULT_SUBBUF_SIZE_LOW 4096
+#define LTT_DEFAULT_N_SUBBUFS_LOW 2
+//#define LTT_DEFAULT_SUBBUF_SIZE_MED 262144
+#define LTT_DEFAULT_SUBBUF_SIZE_MED 4096
+#define LTT_DEFAULT_N_SUBBUFS_MED 2
+//#define LTT_DEFAULT_SUBBUF_SIZE_HIGH 1048576
+#define LTT_DEFAULT_SUBBUF_SIZE_HIGH 4096
+#define LTT_DEFAULT_N_SUBBUFS_HIGH 2
+#define LTT_TRACER_MAGIC_NUMBER 0x00D6B7ED
+#define LTT_TRACER_VERSION_MAJOR 2
+#define LTT_TRACER_VERSION_MINOR 3
+
+/*
+ * Size reserved for high priority events (interrupts, NMI, BH) at the end of a
+ * nearly full buffer. User space won't use this last amount of space when in
+ * blocking mode. This space also includes the event header that would be
+ * written by this user space event.
+ */
+#define LTT_RESERVE_CRITICAL 4096
+
+/* Register and unregister function pointers */
+
+enum ltt_module_function {
+ LTT_FUNCTION_RUN_FILTER,
+ LTT_FUNCTION_FILTER_CONTROL,
+ LTT_FUNCTION_STATEDUMP
+};
+
+void ltt_transport_register(struct ltt_transport *transport);
+void ltt_transport_unregister(struct ltt_transport *transport);
+
+/* Exported control function */
+
+union ltt_control_args {
+ struct {
+ enum trace_mode mode;
+ unsigned int subbuf_size_low;
+ unsigned int n_subbufs_low;
+ unsigned int subbuf_size_med;
+ unsigned int n_subbufs_med;
+ unsigned int subbuf_size_high;
+ unsigned int n_subbufs_high;
+ } new_trace;
+};
+
+int _ltt_trace_setup(const char *trace_name);
+int ltt_trace_setup(const char *trace_name);
+struct ltt_trace_struct *_ltt_trace_find_setup(const char *trace_name);
+int ltt_trace_set_type(const char *trace_name, const char *trace_type);
+int ltt_trace_set_channel_subbufsize(const char *trace_name,
+ const char *channel_name, unsigned int size);
+int ltt_trace_set_channel_subbufcount(const char *trace_name,
+ const char *channel_name, unsigned int cnt);
+int ltt_trace_set_channel_enable(const char *trace_name,
+ const char *channel_name, unsigned int enable);
+int ltt_trace_set_channel_overwrite(const char *trace_name,
+ const char *channel_name, unsigned int overwrite);
+int ltt_trace_alloc(const char *trace_name);
+int ltt_trace_destroy(const char *trace_name);
+int ltt_trace_start(const char *trace_name);
+int ltt_trace_stop(const char *trace_name);
+
+enum ltt_filter_control_msg {
+ LTT_FILTER_DEFAULT_ACCEPT,
+ LTT_FILTER_DEFAULT_REJECT
+};
+
+extern int ltt_filter_control(enum ltt_filter_control_msg msg,
+ const char *trace_name);
+
+extern struct dentry *get_filter_root(void);
+
+void ltt_write_trace_header(struct ltt_trace_struct *trace,
+ struct ltt_subbuffer_header *header);
+extern void ltt_buffer_destroy(struct ltt_channel_struct *ltt_chan);
+
+void ltt_core_register(int (*function)(u8, void *));
+
+void ltt_core_unregister(void);
+
+void ltt_release_trace(struct kref *kref);
+void ltt_release_transport(struct kref *kref);
+
+extern int ltt_probe_register(struct ltt_available_probe *pdata);
+extern int ltt_probe_unregister(struct ltt_available_probe *pdata);
+extern int ltt_marker_connect(const char *channel, const char *mname,
+ const char *pname);
+extern int ltt_marker_disconnect(const char *channel, const char *mname,
+ const char *pname);
+extern void ltt_dump_marker_state(struct ltt_trace_struct *trace);
+
+void ltt_lock_traces(void);
+void ltt_unlock_traces(void);
+
+struct ltt_trace_struct *_ltt_trace_find(const char *trace_name);
+
+#endif /* _LTT_TRACER_H */
--- /dev/null
+/*
+ * LTT core in-kernel infrastructure.
+ *
+ * Copyright 2006 - Mathieu Desnoyers mathieu.desnoyers@polymtl.ca
+ *
+ * Distributed under the GPL license
+ */
+
+//ust// #include <linux/ltt-core.h>
+//ust// #include <linux/percpu.h>
+//ust// #include <linux/module.h>
+//ust// #include <linux/debugfs.h>
+#include "kernelcompat.h"
+#include "tracercore.h"
+
+/* Traces structures */
+struct ltt_traces ltt_traces = {
+ .setup_head = LIST_HEAD_INIT(ltt_traces.setup_head),
+ .head = LIST_HEAD_INIT(ltt_traces.head),
+};
+//ust// EXPORT_SYMBOL(ltt_traces);
+
+/* Traces list writer locking */
+static DEFINE_MUTEX(ltt_traces_mutex);
+
+/* dentry of ltt's root dir */
+//ust// static struct dentry *ltt_root_dentry;
+//ust// struct dentry *get_ltt_root(void)
+//ust// {
+//ust// if (!ltt_root_dentry) {
+//ust// ltt_root_dentry = debugfs_create_dir(LTT_ROOT, NULL);
+//ust// if (!ltt_root_dentry)
+//ust// printk(KERN_ERR "LTT : create ltt root dir failed\n");
+//ust// }
+//ust// return ltt_root_dentry;
+//ust// }
+//ust// EXPORT_SYMBOL_GPL(get_ltt_root);
+
+void ltt_lock_traces(void)
+{
+ mutex_lock(<t_traces_mutex);
+}
+//ust// EXPORT_SYMBOL_GPL(ltt_lock_traces);
+
+void ltt_unlock_traces(void)
+{
+ mutex_unlock(<t_traces_mutex);
+}
+//ust// EXPORT_SYMBOL_GPL(ltt_unlock_traces);
+
+//ust// DEFINE_PER_CPU(unsigned int, ltt_nesting);
+//ust// EXPORT_PER_CPU_SYMBOL(ltt_nesting);
+unsigned int ltt_nesting;
+
+int ltt_run_filter_default(void *trace, uint16_t eID)
+{
+ return 1;
+}
+
+/* This function pointer is protected by a trace activation check */
+ltt_run_filter_functor ltt_run_filter = ltt_run_filter_default;
+//ust// EXPORT_SYMBOL_GPL(ltt_run_filter);
+
+void ltt_filter_register(ltt_run_filter_functor func)
+{
+ ltt_run_filter = func;
+}
+//ust// EXPORT_SYMBOL_GPL(ltt_filter_register);
+
+void ltt_filter_unregister(void)
+{
+ ltt_run_filter = ltt_run_filter_default;
+}
+//ust// EXPORT_SYMBOL_GPL(ltt_filter_unregister);
--- /dev/null
+/*
+ * Copyright (C) 2005,2006 Mathieu Desnoyers (mathieu.desnoyers@polymtl.ca)
+ *
+ * This contains the core definitions for the Linux Trace Toolkit.
+ */
+
+#ifndef LTT_CORE_H
+#define LTT_CORE_H
+
+#include "list.h"
+#include "kernelcompat.h"
+//ust// #include <linux/percpu.h>
+
+/* ltt's root dir in debugfs */
+#define LTT_ROOT "ltt"
+
+/*
+ * All modifications of ltt_traces must be done by ltt-tracer.c, while holding
+ * the semaphore. Only reading of this information can be done elsewhere, with
+ * the RCU mechanism : the preemption must be disabled while reading the
+ * list.
+ */
+struct ltt_traces {
+ struct list_head setup_head; /* Pre-allocated traces list */
+ struct list_head head; /* Allocated Traces list */
+ unsigned int num_active_traces; /* Number of active traces */
+} ____cacheline_aligned;
+
+extern struct ltt_traces ltt_traces;
+
+/*
+ * get dentry of ltt's root dir
+ */
+struct dentry *get_ltt_root(void);
+
+/* Keep track of trap nesting inside LTT */
+//ust// DECLARE_PER_CPU(unsigned int, ltt_nesting);
+extern unsigned int ltt_nesting;
+
+typedef int (*ltt_run_filter_functor)(void *trace, uint16_t eID);
+//typedef int (*ltt_run_filter_functor)(void *, __u16);
+
+extern ltt_run_filter_functor ltt_run_filter;
+
+extern void ltt_filter_register(ltt_run_filter_functor func);
+extern void ltt_filter_unregister(void);
+
+#if defined(CONFIG_LTT) && defined(CONFIG_LTT_ALIGNMENT)
+
+/*
+ * Calculate the offset needed to align the type.
+ * size_of_type must be non-zero.
+ */
+static inline unsigned int ltt_align(size_t align_drift, size_t size_of_type)
+{
+ size_t alignment = min(sizeof(void *), size_of_type);
+ return (alignment - align_drift) & (alignment - 1);
+}
+/* Default arch alignment */
+#define LTT_ALIGN
+
+static inline int ltt_get_alignment(void)
+{
+ return sizeof(void *);
+}
+
+#else
+
+static inline unsigned int ltt_align(size_t align_drift,
+ size_t size_of_type)
+{
+ return 0;
+}
+
+#define LTT_ALIGN __attribute__((packed))
+
+static inline int ltt_get_alignment(void)
+{
+ return 0;
+}
+#endif /* defined(CONFIG_LTT) && defined(CONFIG_LTT_ALIGNMENT) */
+
+#endif /* LTT_CORE_H */