From: Mathieu Desnoyers Date: Mon, 17 Jan 2011 06:55:38 +0000 (-0500) Subject: Rename the "discard" directory to "deprecated" X-Git-Tag: v2.0-pre1~176 X-Git-Url: http://git.lttng.org./?a=commitdiff_plain;h=681cc3bb4f00fd63ad65920ea6e201fcaed300c0;p=lttng-modules.git Rename the "discard" directory to "deprecated" Signed-off-by: Mathieu Desnoyers --- diff --git a/deprecated/ltt-ascii.c b/deprecated/ltt-ascii.c new file mode 100644 index 00000000..b020fedb --- /dev/null +++ b/deprecated/ltt-ascii.c @@ -0,0 +1,583 @@ +/* + * LTT ascii binary buffer to ascii converter. + * + * Copyright 2008 - 2009 Lai Jiangshan (laijs@cn.fujitsu.com) + * Copyright 2009 - Mathieu Desnoyers mathieu.desnoyers@polymtl.ca + * + * Dual LGPL v2.1/GPL v2 license. + */ + +/* + * TODO + * + * Move to new switch behavior: Wait for data for the duration of the + * timer interval + safety, if none is coming, consider that no activity occured + * in the buffer. + * + * Fix case when having a text file open and destroying trace. + * + * - Automate periodical switch: + * + * The debugfs file "switch_timer" receives a timer period as parameter + * (e.g. echo 100 > switch_timer) to activate the timer per channel. This can + * also be accessed through the internal API _before the trace session starts_. + * This timer will insure that we periodically have subbuffers to read, and + * therefore that the merge-sort does not wait endlessly for a subbuffer. + * + * - If a channel is switched and read without data, make sure it is still + * considered afterward (not removed from the queue). + * + * - Create a ascii/tracename/ALL file to merge-sort all active channels. + * - Create a ascii/tracename/README file to contain the text output legend. + * - Remove leading zeroes from timestamps. + * - Enhance pretty-printing to make sure all types used for addesses output in + * the form 0xAB00000000 (not decimal). This is true for %p and 0x%...X. + * - Hotplug support + */ + + + + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ltt-tracer.h" +#include "ltt-relay.h" +#include "ltt-relay-lockless.h" + +#if 0 +#define DEBUGP printk +#else +#define DEBUGP(fmt , a...) +#endif + +struct dentry *ltt_ascii_dir_dentry; +EXPORT_SYMBOL_GPL(ltt_ascii_dir_dentry); + +struct ltt_relay_iter; + +struct ltt_relay_cpu_iter { + /* cpu buffer information */ + struct ltt_chanbuf *buf; + struct ltt_relay_iter *iter; + int sb_ref; /* holding a reference to a subbuffer */ + long read_sb_offset; /* offset of the subbuffer read */ + + /* current event information */ + struct ltt_subbuffer_header *header; + long hdr_offset; /* event header offset */ + long payload_offset; /* event payload offset */ + u64 tsc; /* full 64-bits timestamp value */ + u32 data_size; + u16 chID; /* channel ID, const */ + u16 eID; +}; + +struct ltt_relay_iter { + struct ltt_relay_cpu_iter iter_cpu[NR_CPUS]; + struct ltt_chan *chan; + loff_t pos; + int cpu; + int nr_refs; +}; + +/* + * offset of 0 in subbuffer means "subbuf size" (filled subbuffer). + */ +static int is_subbuffer_offset_end(struct ltt_relay_cpu_iter *citer, + long offset) +{ + struct ltt_chan *chan = container_of(citer->buf->a.chan, + struct ltt_chan, a); + long sub_offset = SUBBUF_OFFSET(offset - 1, chan) + 1; + + return (sub_offset <= citer->header->data_size); +} + +static u64 calculate_tsc(u64 pre_tsc, u64 read_tsc, unsigned int rflags) +{ + u64 new_tsc = read_tsc; + + if (rflags != LTT_RFLAG_ID_SIZE_TSC) { + BUG_ON(read_tsc >> LTT_TSC_BITS); + + new_tsc = (pre_tsc & ~LTT_TSC_MASK) + read_tsc; + if (read_tsc < (pre_tsc & LTT_TSC_MASK)) + new_tsc += 1UL << LTT_TSC_BITS; + } + + return new_tsc; +} + +/* + * calculate payload offset */ +static inline long calculate_payload_offset(long offset, u16 chID, u16 eID) +{ + const char *fmt; + + if (!ltt_get_alignment()) + return offset; + + fmt = marker_get_fmt_from_id(chID, eID); + BUG_ON(!fmt); + + return offset + ltt_fmt_largest_align(offset, fmt); +} + +static void update_new_event(struct ltt_relay_cpu_iter *citer, long hdr_offset) +{ + u64 read_tsc; + unsigned int rflags; + long tmp_offset; + + WARN_ON_ONCE(hdr_offset != citer->hdr_offset); + + tmp_offset = ltt_read_event_header(&citer->buf->a, hdr_offset, + &read_tsc, &citer->data_size, + &citer->eID, &rflags); + citer->payload_offset = calculate_payload_offset(tmp_offset, + citer->chID, + citer->eID); + + citer->tsc = calculate_tsc(citer->tsc, read_tsc, rflags); +} + +static void update_event_size(struct ltt_relay_cpu_iter *citer, long hdr_offset) +{ + char output[1]; + const char *fmt; + size_t data_size; + + if (citer->data_size != INT_MAX) + return; + + fmt = marker_get_fmt_from_id(citer->chID, citer->eID); + BUG_ON(!fmt); + ltt_serialize_printf(citer->buf, citer->payload_offset, + &data_size, output, 0, fmt); + citer->data_size = data_size; +} + +static void update_cpu_iter(struct ltt_relay_cpu_iter *citer, long hdr_offset) +{ + if (unlikely((!citer->sb_ref) + || is_subbuffer_offset_end(citer, hdr_offset))) { + citer->header = NULL; + return; + } + update_new_event(citer, hdr_offset); + update_event_size(citer, hdr_offset); +} + +/* + * returns 0 if we get a subbuffer reference. + * else, the buffer has not available data, try again later. + */ +static int subbuffer_start(struct ltt_relay_cpu_iter *citer, long *offset) +{ + int ret; + struct ltt_relay_iter *iter = citer->iter; + + ret = ltt_chanbuf_get_subbuf(citer->buf, offset); + if (!ret) { + citer->header = ltt_relay_read_offset_address(&citer->buf->a, + *offset); + citer->hdr_offset = (*offset) + ltt_sb_header_size(); + citer->tsc = citer->header->cycle_count_begin; + iter->nr_refs++; + citer->sb_ref = 1; + return 0; + } else { + if (ltt_chanbuf_is_finalized(citer->buf)) + return -ENODATA; + else + return -EAGAIN; + } +} + +static void subbuffer_stop(struct ltt_relay_cpu_iter *citer, + long offset) +{ + int ret; + struct ltt_relay_iter *iter = citer->iter; + + WARN_ON_ONCE(!citer->sb_ref); + ret = ltt_chanbuf_put_subbuf(citer->buf, offset); + WARN_ON_ONCE(ret); + citer->sb_ref = 0; + iter->nr_refs--; +} + +static void ltt_relay_advance_cpu_iter(struct ltt_relay_cpu_iter *citer) +{ + long old_offset = citer->payload_offset; + long new_offset = citer->payload_offset; + int ret; + + /* find that whether we read all data in this subbuffer */ + if (unlikely(is_subbuffer_offset_end(citer, + old_offset + citer->data_size))) { + DEBUGP(KERN_DEBUG "LTT ASCII stop cpu %d offset %lX\n", + citer->buf->a.cpu, citer->read_sb_offset); + subbuffer_stop(citer, citer->read_sb_offset); + for (;;) { + ret = subbuffer_start(citer, &citer->read_sb_offset); + DEBUGP(KERN_DEBUG + "LTT ASCII start cpu %d ret %d offset %lX\n", + citer->buf->a.cpu, ret, citer->read_sb_offset); + if (!ret || ret == -ENODATA) { + break; /* got data, or finalized */ + } else { /* -EAGAIN */ + if (signal_pending(current)) + break; + schedule_timeout_interruptible(1); + //TODO: check for no-data delay. take ref. break + } + } + } else { + new_offset += citer->data_size; + citer->hdr_offset = new_offset + ltt_align(new_offset, sizeof(struct ltt_event_header)); + DEBUGP(KERN_DEBUG + "LTT ASCII old_offset %lX new_offset %lX cpu %d\n", + old_offset, new_offset, citer->buf->a.cpu); + } + + update_cpu_iter(citer, citer->hdr_offset); +} + +static int cpu_iter_eof(struct ltt_relay_cpu_iter *citer) +{ + return !citer->sb_ref; +} + +static int ltt_relay_iter_eof(struct ltt_relay_iter *iter) +{ + return iter->nr_refs == 0; +} + +static void ltt_relay_advance_iter(struct ltt_relay_iter *iter) +{ + int i; + struct ltt_relay_cpu_iter *curr, *min = NULL; + iter->cpu = -1; + + /* + * find the event with the minimum tsc. + * TODO: use min-heep for 4096CPUS + */ + for_each_possible_cpu(i) { + curr = &iter->iter_cpu[i]; + + if (!curr->buf->a.allocated || !curr->header) + continue; + + if (cpu_iter_eof(curr)) + continue; + + if (!min || curr->tsc < min->tsc) { + min = curr; + iter->cpu = i; + } + } + + /* update cpu_iter for next ltt_relay_advance_iter() */ + if (min) + ltt_relay_advance_cpu_iter(min); +} + +static void *ascii_next(struct seq_file *m, void *v, loff_t *ppos) +{ + struct ltt_relay_iter *iter = m->private; + + WARN_ON_ONCE(!iter->nr_refs); + BUG_ON(v != iter); + + ltt_relay_advance_iter(iter); + return (ltt_relay_iter_eof(iter) || signal_pending(current)) + ? NULL : iter; +} + +static void *ascii_start(struct seq_file *m, loff_t *ppos) +{ + struct ltt_relay_iter *iter = m->private; + + ltt_relay_advance_iter(iter); + return (ltt_relay_iter_eof(iter) || signal_pending(current)) + ? NULL : iter; +} + +static void ascii_stop(struct seq_file *m, void *v) +{ +} + +static +int seq_serialize(struct seq_file *m, struct ltt_chanbuf *buf, + size_t buf_offset, const char *fmt, size_t *data_size) +{ + int len; + + if (m->count < m->size) { + len = ltt_serialize_printf(buf, buf_offset, data_size, + m->buf + m->count, + m->size - m->count, fmt); + if (m->count + len < m->size) { + m->count += len; + return 0; + } + } + + m->count = m->size; + return -1; +} + +static int ascii_show(struct seq_file *m, void *v) +{ + struct ltt_relay_iter *iter = v; + struct ltt_relay_cpu_iter *citer; + const char *name; + const char *fmt; + unsigned long long tsc; + size_t data_size; + + if (iter->cpu == -1) + return 0; + + citer = &iter->iter_cpu[iter->cpu]; + WARN_ON_ONCE(!citer->sb_ref); + /* + * Nothing to show, we are at the end of the last subbuffer currently + * having data. + */ + if (!citer->header) + return 0; + + tsc = citer->tsc; + name = marker_get_name_from_id(citer->chID, citer->eID); + fmt = marker_get_fmt_from_id(citer->chID, citer->eID); + + if (!name || !fmt) + return 0; + + seq_printf(m, "event:%16.16s: cpu:%2d time:%20.20llu ", + name, iter->cpu, tsc); + seq_serialize(m, citer->buf, citer->payload_offset, fmt, &data_size); + seq_puts(m, "\n"); + if (citer->data_size == INT_MAX) + citer->data_size = data_size; + + return 0; +} + +static struct seq_operations ascii_seq_ops = { + .start = ascii_start, + .next = ascii_next, + .stop = ascii_stop, + .show = ascii_show, +}; + +/* FIXME : cpu hotplug support */ +static int ltt_relay_iter_open_channel(struct ltt_relay_iter *iter, + struct ltt_chan *chan) +{ + int i, ret; + u16 chID = ltt_channels_get_index_from_name(chan->a.filename); + + /* we don't need lock relay_channels_mutex */ + for_each_possible_cpu(i) { + struct ltt_relay_cpu_iter *citer = &iter->iter_cpu[i]; + + citer->buf = per_cpu_ptr(chan->a.buf, i); + if (!citer->buf->a.allocated) + continue; + + citer->iter = iter; /* easy lazy parent info */ + citer->chID = chID; + + ret = ltt_chanbuf_open_read(citer->buf); + if (ret) { + /* Failed to open a percpu buffer, close everything. */ + citer->buf = NULL; + goto error; + } + + for (;;) { + ret = subbuffer_start(citer, + &citer->read_sb_offset); + DEBUGP(KERN_DEBUG + "LTT ASCII open start " + "cpu %d ret %d offset %lX\n", + citer->buf->a.cpu, ret, citer->read_sb_offset); + if (!ret || ret == -ENODATA) { + break; /* got data, or finalized */ + } else { /* -EAGAIN */ + if (signal_pending(current)) + break; + schedule_timeout_interruptible(1); + } + } + update_cpu_iter(citer, citer->hdr_offset); + } + if (!iter->nr_refs) { + ret = -ENODATA; + goto error; + } + + return 0; + +error: + for_each_possible_cpu(i) { + struct ltt_relay_cpu_iter *citer = &iter->iter_cpu[i]; + + if (!citer->buf) + break; + + if (citer->buf->a.allocated) + ltt_chanbuf_release_read(citer->buf); + } + return ret; +} + +/* FIXME : cpu hotplug support */ +static int ltt_relay_iter_release_channel(struct ltt_relay_iter *iter) +{ + int i; + + for_each_possible_cpu(i) { + struct ltt_relay_cpu_iter *citer = &iter->iter_cpu[i]; + + if (citer->sb_ref) { + WARN_ON_ONCE(!citer->buf->a.allocated); + DEBUGP(KERN_DEBUG + "LTT ASCII release stop cpu %d offset %lX\n", + citer->buf->a.cpu, citer->read_sb_offset); + subbuffer_stop(&iter->iter_cpu[i], + citer->read_sb_offset); + } + if (citer->buf->a.allocated) + ltt_chanbuf_release_read(citer->buf); + } + WARN_ON_ONCE(iter->nr_refs); + return 0; +} + +static int ltt_relay_ascii_open(struct inode *inode, struct file *file) +{ + int ret; + struct ltt_chan *chan = inode->i_private; + struct ltt_relay_iter *iter = kzalloc(sizeof(*iter), GFP_KERNEL); + if (!iter) + return -ENOMEM; + + iter->chan = chan; + ret = ltt_relay_iter_open_channel(iter, chan); + if (ret) + goto error_free_alloc; + + ret = seq_open(file, &ascii_seq_ops); + if (ret) + goto error_release_channel; + ((struct seq_file *)file->private_data)->private = iter; + return 0; + +error_release_channel: + ltt_relay_iter_release_channel(iter); +error_free_alloc: + kfree(iter); + return ret; +} + +static int ltt_relay_ascii_release(struct inode *inode, struct file *file) +{ + struct seq_file *seq = file->private_data; + struct ltt_relay_iter *iter = seq->private; + + ltt_relay_iter_release_channel(iter); + kfree(iter); + return 0; +} + +static struct file_operations ltt_ascii_fops = +{ + .read = seq_read, + .open = ltt_relay_ascii_open, + .release = ltt_relay_ascii_release, + .llseek = no_llseek, + .owner = THIS_MODULE, +}; + +int ltt_ascii_create(struct ltt_chan *chan) +{ + struct dentry *dentry; + + dentry = debugfs_create_file(chan->a.filename, + S_IRUSR | S_IRGRP, + chan->a.trace->dentry.ascii_root, + chan, <t_ascii_fops); + if (IS_ERR(dentry)) + return PTR_ERR(dentry); + + if (!dentry) + return -EEXIST; + + chan->a.ascii_dentry = dentry; + dentry->d_inode->i_private = chan; + return 0; +} +EXPORT_SYMBOL_GPL(ltt_ascii_create); + +void ltt_ascii_remove(struct ltt_chan *chan) +{ + struct dentry *dentry; + + dentry = dget(chan->a.ascii_dentry); + debugfs_remove(dentry); + /* TODO: wait / wakeup instead */ + /* + * Wait for every reference to the dentry to be gone, + * except us. + */ + while (atomic_read(&dentry->d_count) != 1) + msleep(100); + dput(dentry); +} +EXPORT_SYMBOL_GPL(ltt_ascii_remove); + +int ltt_ascii_create_dir(struct ltt_trace *new_trace) +{ + new_trace->dentry.ascii_root = debugfs_create_dir(new_trace->trace_name, + ltt_ascii_dir_dentry); + if (!new_trace->dentry.ascii_root) + return -EEXIST; + return 0; +} +EXPORT_SYMBOL_GPL(ltt_ascii_create_dir); + +void ltt_ascii_remove_dir(struct ltt_trace *trace) +{ + debugfs_remove(trace->dentry.ascii_root); +} +EXPORT_SYMBOL_GPL(ltt_ascii_remove_dir); + +__init int ltt_ascii_init(void) +{ + ltt_ascii_dir_dentry = debugfs_create_dir(LTT_ASCII, get_ltt_root()); + + return ltt_ascii_dir_dentry ? 0 : -EFAULT; +} + +__exit void ltt_ascii_exit(void) +{ + debugfs_remove(ltt_ascii_dir_dentry); + put_ltt_root(); +} + +MODULE_LICENSE("GPL and additional rights"); +MODULE_AUTHOR("Lai Jiangshan@FNST and Mathieu Desnoyers"); +MODULE_DESCRIPTION("Linux Trace Toolkit Next Generation Ascii Converter"); diff --git a/deprecated/ltt-channels.c b/deprecated/ltt-channels.c new file mode 100644 index 00000000..962c81a8 --- /dev/null +++ b/deprecated/ltt-channels.c @@ -0,0 +1,397 @@ +/* + * ltt/ltt-channels.c + * + * (C) Copyright 2008 - Mathieu Desnoyers (mathieu.desnoyers@polymtl.ca) + * + * LTTng channel management. + * + * Author: + * Mathieu Desnoyers (mathieu.desnoyers@polymtl.ca) + * + * Dual LGPL v2.1/GPL v2 license. + */ + +#include +#include +#include +#include +#include "ltt-channels.h" + +/* + * ltt_channel_mutex may be nested inside the LTT trace mutex. + * ltt_channel_mutex mutex may be nested inside markers mutex. + */ +static DEFINE_MUTEX(ltt_channel_mutex); +static LIST_HEAD(ltt_channels); +/* + * Index of next channel in array. Makes sure that as long as a trace channel is + * allocated, no array index will be re-used when a channel is freed and then + * another channel is allocated. This index is cleared and the array indexeds + * get reassigned when the index_kref goes back to 0, which indicates that no + * more trace channels are allocated. + */ +static unsigned int free_index; +/* index_kref is protected by both ltt_channel_mutex and lock_markers */ +static struct kref index_kref; /* Keeps track of allocated trace channels */ + +static struct ltt_channel_setting *lookup_channel(const char *name) +{ + struct ltt_channel_setting *iter; + + list_for_each_entry(iter, <t_channels, list) + if (strcmp(name, iter->name) == 0) + return iter; + return NULL; +} + +/* + * Must be called when channel refcount falls to 0 _and_ also when the last + * trace is freed. This function is responsible for compacting the channel and + * event IDs when no users are active. + * + * Called with lock_markers() and channels mutex held. + */ +static void release_channel_setting(struct kref *kref) +{ + struct ltt_channel_setting *setting = container_of(kref, + struct ltt_channel_setting, kref); + struct ltt_channel_setting *iter; + + if (atomic_read(&index_kref.refcount) == 0 + && atomic_read(&setting->kref.refcount) == 0) { + list_del(&setting->list); + kfree(setting); + + free_index = 0; + list_for_each_entry(iter, <t_channels, list) { + iter->index = free_index++; + iter->free_event_id = 0; + } + } +} + +/* + * Perform channel index compaction when the last trace channel is freed. + * + * Called with lock_markers() and channels mutex held. + */ +static void release_trace_channel(struct kref *kref) +{ + struct ltt_channel_setting *iter, *n; + + list_for_each_entry_safe(iter, n, <t_channels, list) + release_channel_setting(&iter->kref); + if (atomic_read(&index_kref.refcount) == 0) + markers_compact_event_ids(); +} + +/* + * ltt_channel_trace_ref : Is there an existing trace session ? + * + * Must be called with lock_markers() held. + */ +int ltt_channels_trace_ref(void) +{ + return !!atomic_read(&index_kref.refcount); +} +EXPORT_SYMBOL_GPL(ltt_channels_trace_ref); + +/** + * ltt_channels_register - Register a trace channel. + * @name: channel name + * + * Uses refcounting. + */ +int ltt_channels_register(const char *name) +{ + struct ltt_channel_setting *setting; + int ret = 0; + + mutex_lock(<t_channel_mutex); + setting = lookup_channel(name); + if (setting) { + if (atomic_read(&setting->kref.refcount) == 0) + goto init_kref; + else { + kref_get(&setting->kref); + goto end; + } + } + setting = kzalloc(sizeof(*setting), GFP_KERNEL); + if (!setting) { + ret = -ENOMEM; + goto end; + } + list_add(&setting->list, <t_channels); + strncpy(setting->name, name, PATH_MAX-1); + setting->index = free_index++; +init_kref: + kref_init(&setting->kref); +end: + mutex_unlock(<t_channel_mutex); + return ret; +} +EXPORT_SYMBOL_GPL(ltt_channels_register); + +/** + * ltt_channels_unregister - Unregister a trace channel. + * @name: channel name + * @compacting: performing compaction + * + * Must be called with markers mutex held. + */ +int ltt_channels_unregister(const char *name, int compacting) +{ + struct ltt_channel_setting *setting; + int ret = 0; + + if (!compacting) + mutex_lock(<t_channel_mutex); + setting = lookup_channel(name); + if (!setting || atomic_read(&setting->kref.refcount) == 0) { + ret = -ENOENT; + goto end; + } + kref_put(&setting->kref, release_channel_setting); + if (!compacting && atomic_read(&index_kref.refcount) == 0) + markers_compact_event_ids(); +end: + if (!compacting) + mutex_unlock(<t_channel_mutex); + return ret; +} +EXPORT_SYMBOL_GPL(ltt_channels_unregister); + +/** + * ltt_channels_set_default - Set channel default behavior. + * @name: default channel name + * @sb_size: size of the subbuffers + * @n_sb: number of subbuffers + */ +int ltt_channels_set_default(const char *name, + unsigned int sb_size, + unsigned int n_sb) +{ + struct ltt_channel_setting *setting; + int ret = 0; + + mutex_lock(<t_channel_mutex); + setting = lookup_channel(name); + if (!setting || atomic_read(&setting->kref.refcount) == 0) { + ret = -ENOENT; + goto end; + } + setting->sb_size = sb_size; + setting->n_sb = n_sb; +end: + mutex_unlock(<t_channel_mutex); + return ret; +} +EXPORT_SYMBOL_GPL(ltt_channels_set_default); + +/** + * ltt_channels_get_name_from_index - get channel name from channel index + * @index: channel index + * + * Allows to lookup the channel name given its index. Done to keep the name + * information outside of each trace channel instance. + */ +const char *ltt_channels_get_name_from_index(unsigned int index) +{ + struct ltt_channel_setting *iter; + + list_for_each_entry(iter, <t_channels, list) + if (iter->index == index && atomic_read(&iter->kref.refcount)) + return iter->name; + return NULL; +} +EXPORT_SYMBOL_GPL(ltt_channels_get_name_from_index); + +static struct ltt_channel_setting * +ltt_channels_get_setting_from_name(const char *name) +{ + struct ltt_channel_setting *iter; + + list_for_each_entry(iter, <t_channels, list) + if (!strcmp(iter->name, name) + && atomic_read(&iter->kref.refcount)) + return iter; + return NULL; +} + +/** + * ltt_channels_get_index_from_name - get channel index from channel name + * @name: channel name + * + * Allows to lookup the channel index given its name. Done to keep the name + * information outside of each trace channel instance. + * Returns -1 if not found. + */ +int ltt_channels_get_index_from_name(const char *name) +{ + struct ltt_channel_setting *setting; + + setting = ltt_channels_get_setting_from_name(name); + if (setting) + return setting->index; + else + return -1; +} +EXPORT_SYMBOL_GPL(ltt_channels_get_index_from_name); + +/** + * ltt_channels_trace_alloc - Allocate channel structures for a trace + * + * Use the current channel list to allocate the channels for a trace. + * Called with trace lock held. Does not perform the trace buffer allocation, + * because we must let the user overwrite specific channel sizes. + */ +int ltt_channels_trace_alloc(struct ltt_trace *trace, int overwrite) +{ + struct channel **chan = NULL; + struct ltt_channel_setting *chans, *iter; + int ret = 0; + + lock_markers(); + mutex_lock(<t_channel_mutex); + if (!free_index) + goto end; + if (!atomic_read(&index_kref.refcount)) + kref_init(&index_kref); + else + kref_get(&index_kref); + trace->nr_channels = free_index; + chan = kzalloc(sizeof(struct channel *) * free_index, GFP_KERNEL); + if (!chan) + goto end; + chans = kzalloc(sizeof(struct ltt_channel_setting) * free_index, + GFP_KERNEL); + if (!chan_settings) + goto free_chan; + list_for_each_entry(iter, <t_channels, list) { + if (!atomic_read(&iter->kref.refcount)) + continue; + chans[iter->index].sb_size = iter->sb_size; + chans[iter->index].n_sb = iter->n_sb; + chans[iter->index].overwrite = overwrite; + strncpy(chans[iter->index].filename, iter->name, + NAME_MAX - 1); + chans[iter->index].switch_timer_interval = 0; + chans[iter->index].read_timer_interval = LTT_READ_TIMER_INTERVAL; + } + trace->channels = chan; + trace->settings = chans; +end: + mutex_unlock(<t_channel_mutex); + unlock_markers(); + return ret; + +free_chan: + kfree(chan); + ret = -ENOMEM; + goto end; +} +EXPORT_SYMBOL_GPL(ltt_channels_trace_alloc); + +/** + * ltt_channels_trace_free - Free one trace's channels + * @channels: channels to free + * + * Called with trace lock held. The actual channel buffers must be freed before + * this function is called. + */ +void ltt_channels_trace_free(struct ltt_trace *trace) +{ + lock_markers(); + mutex_lock(<t_channel_mutex); + kfree(trace->settings); + kfree(trace->channels); + kref_put(&index_kref, release_trace_channel); + mutex_unlock(<t_channel_mutex); + unlock_markers(); + marker_update_probes(); +} +EXPORT_SYMBOL_GPL(ltt_channels_trace_free); + +/** + * ltt_channels_trace_set_timer - set switch timer + * @channel: channel + * @interval: interval of timer interrupt, in jiffies. 0 inhibits timer. + */ + +void ltt_channels_trace_set_timer(struct ltt_chan *chan, + unsigned long interval) +{ + chan->switch_timer_interval = interval; +} +EXPORT_SYMBOL_GPL(ltt_channels_trace_set_timer); + +/** + * _ltt_channels_get_event_id - get next event ID for a marker + * @channel: channel name + * @name: event name + * + * Returns a unique event ID (for this channel) or < 0 on error. + * Must be called with channels mutex held. + */ +int _ltt_channels_get_event_id(const char *channel, const char *name) +{ + struct ltt_channel_setting *setting; + int ret; + + setting = ltt_channels_get_setting_from_name(channel); + if (!setting) { + ret = -ENOENT; + goto end; + } + if (strcmp(channel, "metadata") == 0) { + if (strcmp(name, "core_marker_id") == 0) + ret = 0; + else if (strcmp(name, "core_marker_format") == 0) + ret = 1; + else + ret = -ENOENT; + goto end; + } + if (setting->free_event_id == EVENTS_PER_CHANNEL - 1) { + ret = -ENOSPC; + goto end; + } + ret = setting->free_event_id++; +end: + return ret; +} + +/** + * ltt_channels_get_event_id - get next event ID for a marker + * @channel: channel name + * @name: event name + * + * Returns a unique event ID (for this channel) or < 0 on error. + */ +int ltt_channels_get_event_id(const char *channel, const char *name) +{ + int ret; + + mutex_lock(<t_channel_mutex); + ret = _ltt_channels_get_event_id(channel, name); + mutex_unlock(<t_channel_mutex); + return ret; +} + +/** + * ltt_channels_reset_event_ids - reset event IDs at compaction + * + * Called with lock marker and channel mutex held. + */ +void _ltt_channels_reset_event_ids(void) +{ + struct ltt_channel_setting *iter; + + list_for_each_entry(iter, <t_channels, list) + iter->free_event_id = 0; +} + +MODULE_LICENSE("GPL and additional rights"); +MODULE_AUTHOR("Mathieu Desnoyers"); +MODULE_DESCRIPTION("Linux Trace Toolkit Next Generation Channel Management"); diff --git a/deprecated/ltt-channels.h b/deprecated/ltt-channels.h new file mode 100644 index 00000000..9eb604ba --- /dev/null +++ b/deprecated/ltt-channels.h @@ -0,0 +1,83 @@ +#ifndef _LTT_CHANNELS_H +#define _LTT_CHANNELS_H + +/* + * Copyright (C) 2008 Mathieu Desnoyers (mathieu.desnoyers@polymtl.ca) + * + * Dynamic tracer channel allocation. + + * Dual LGPL v2.1/GPL v2 license. + */ + +#include +#include +#include +#include +#include + +#define EVENTS_PER_CHANNEL 65536 + +#define LTT_READ_TIMER_INTERVAL 10000 /* us */ + +/* + * Forward declaration of locking-specific per-cpu buffer structure. + */ +struct ltt_trace; +struct ltt_serialize_closure; +struct ltt_probe_private_data; + +/* Serialization callback '%k' */ +typedef size_t (*ltt_serialize_cb)(struct ltt_chanbuf *buf, size_t buf_offset, + struct ltt_serialize_closure *closure, + void *serialize_private, + unsigned int stack_pos_ctx, + int *largest_align, + const char *fmt, va_list *args); + +struct ltt_probe_private_data { + struct ltt_trace *trace; /* + * Target trace, for metadata + * or statedump. + */ + ltt_serialize_cb serializer; /* + * Serialization function override. + */ + void *serialize_private; /* + * Private data for serialization + * functions. + */ +}; + +struct ltt_channel_setting { + unsigned int sb_size; + unsigned int n_sb; + int overwrite; + unsigned long switch_timer_interval; + unsigned long read_timer_interval; + struct kref kref; /* Number of references to structure content */ + struct list_head list; + unsigned int index; /* index of channel in trace channel array */ + u16 free_event_id; /* Next event ID to allocate */ + char name[PATH_MAX]; +}; + +int ltt_channels_register(const char *name); +int ltt_channels_unregister(const char *name, int compacting); +int ltt_channels_set_default(const char *name, + unsigned int subbuf_size, + unsigned int subbuf_cnt); +const char *ltt_channels_get_name_from_index(unsigned int index); +int ltt_channels_get_index_from_name(const char *name); +int ltt_channels_trace_ref(void); +struct ltt_chan *ltt_channels_trace_alloc(unsigned int *nr_channels, + int overwrite, int active); +void ltt_channels_trace_free(struct ltt_chan *channels, + unsigned int nr_channels); +void ltt_channels_trace_set_timer(struct ltt_channel_setting *chan, + unsigned long interval); + +int _ltt_channels_get_event_id(const char *channel, const char *name); +int ltt_channels_get_event_id(const char *channel, const char *name); +void _ltt_channels_reset_event_ids(void); + +#endif /* _LTT_CHANNELS_H */ diff --git a/deprecated/ltt-filter.c b/deprecated/ltt-filter.c new file mode 100644 index 00000000..ec113af6 --- /dev/null +++ b/deprecated/ltt-filter.c @@ -0,0 +1,55 @@ +/* + * Copyright (C) 2008 Mathieu Desnoyers + * + * Dual LGPL v2.1/GPL v2 license. + */ + +#include +#include +#include +#include + +#include "ltt-tracer.h" + +#define LTT_FILTER_DIR "filter" + +/* + * Protects the ltt_filter_dir allocation. + */ +static DEFINE_MUTEX(ltt_filter_mutex); + +static struct dentry *ltt_filter_dir; + +struct dentry *get_filter_root(void) +{ + struct dentry *ltt_root_dentry; + + mutex_lock(<t_filter_mutex); + if (!ltt_filter_dir) { + ltt_root_dentry = get_ltt_root(); + if (!ltt_root_dentry) + goto err_no_root; + + ltt_filter_dir = debugfs_create_dir(LTT_FILTER_DIR, + ltt_root_dentry); + if (!ltt_filter_dir) + printk(KERN_ERR + "ltt_filter_init: failed to create dir %s\n", + LTT_FILTER_DIR); + } +err_no_root: + mutex_unlock(<t_filter_mutex); + return ltt_filter_dir; +} +EXPORT_SYMBOL_GPL(get_filter_root); + +static void __exit ltt_filter_exit(void) +{ + debugfs_remove(ltt_filter_dir); +} + +module_exit(ltt_filter_exit); + +MODULE_LICENSE("GPL and additional rights"); +MODULE_AUTHOR("Mathieu Desnoyers "); +MODULE_DESCRIPTION("Linux Trace Toolkit Filter"); diff --git a/deprecated/ltt-kprobes.c b/deprecated/ltt-kprobes.c new file mode 100644 index 00000000..7539381b --- /dev/null +++ b/deprecated/ltt-kprobes.c @@ -0,0 +1,493 @@ +/* + * (C) Copyright 2009 - + * Mathieu Desnoyers (mathieu.desnoyers@polymtl.ca) + * + * LTTng kprobes integration module. + * + * Dual LGPL v2.1/GPL v2 license. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ltt-type-serializer.h" +#include "ltt-tracer.h" + +#define LTT_KPROBES_DIR "kprobes" +#define LTT_KPROBES_ENABLE "enable" +#define LTT_KPROBES_DISABLE "disable" +#define LTT_KPROBES_LIST "list" + +/* Active LTTng kprobes hash table */ +static DEFINE_MUTEX(ltt_kprobes_mutex); + +#define LTT_KPROBE_HASH_BITS 6 +#define LTT_KPROBE_TABLE_SIZE (1 << LTT_KPROBE_HASH_BITS) +static struct hlist_head ltt_kprobe_table[LTT_KPROBE_TABLE_SIZE]; + +struct kprobe_entry { + struct hlist_node hlist; + struct kprobe kp; + char key[0]; +}; + +static struct dentry *ltt_kprobes_dir, + *ltt_kprobes_enable_dentry, + *ltt_kprobes_disable_dentry, + *ltt_kprobes_list_dentry; + +static int module_exit; + + +static void trace_kprobe_table_entry(void *call_data, struct kprobe_entry *e) +{ + unsigned long addr; + char *namebuf = (char *)__get_free_page(GFP_KERNEL); + + if (e->kp.addr) { + sprint_symbol(namebuf, (unsigned long)e->kp.addr); + addr = (unsigned long)e->kp.addr; + } else { + strncpy(namebuf, e->kp.symbol_name, PAGE_SIZE - 1); + /* TODO : add offset */ + addr = kallsyms_lookup_name(namebuf); + } + if (addr) + __trace_mark(0, kprobe_state, kprobe_table, call_data, + "ip 0x%lX symbol %s", addr, namebuf); + free_page((unsigned long)namebuf); +} + +DEFINE_MARKER(kernel, kprobe, "ip %lX"); + +static int ltt_kprobe_handler_pre(struct kprobe *p, struct pt_regs *regs) +{ + struct marker *marker; + unsigned long data; + + data = (unsigned long)p->addr; + marker = &GET_MARKER(kernel, kprobe); + ltt_specialized_trace(marker, marker->single.probe_private, + &data, sizeof(data), sizeof(data)); + return 0; +} + +static int ltt_register_kprobe(const char *key) +{ + struct hlist_head *head; + struct hlist_node *node; + struct kprobe_entry *e = NULL; + char *symbol_name = NULL; + unsigned long addr; + unsigned int offset = 0; + u32 hash; + size_t key_len = strlen(key) + 1; + int ret; + + if (key_len == 1) + return -ENOENT; /* only \0 */ + + if (sscanf(key, "%li", &addr) != 1) + addr = 0; + + if (!addr) { + const char *symbol_end = NULL; + unsigned int symbol_len; /* includes final \0 */ + + symbol_end = strchr(key, ' '); + if (symbol_end) + symbol_len = symbol_end - key + 1; + else + symbol_len = key_len; + symbol_name = kmalloc(symbol_len, GFP_KERNEL); + if (!symbol_name) { + ret = -ENOMEM; + goto error; + } + memcpy(symbol_name, key, symbol_len - 1); + symbol_name[symbol_len-1] = '\0'; + if (symbol_end) { + symbol_end++; /* start of offset */ + if (sscanf(symbol_end, "%i", &offset) != 1) + offset = 0; + } + } + + hash = jhash(key, key_len-1, 0); + head = <t_kprobe_table[hash & ((1 << LTT_KPROBE_HASH_BITS)-1)]; + hlist_for_each_entry(e, node, head, hlist) { + if (!strcmp(key, e->key)) { + printk(KERN_NOTICE "Kprobe %s busy\n", key); + ret = -EBUSY; + goto error; + } + } + /* + * Using kzalloc here to allocate a variable length element. Could + * cause some memory fragmentation if overused. + */ + e = kzalloc(sizeof(struct kprobe_entry) + key_len, GFP_KERNEL); + if (!e) { + ret = -ENOMEM; + goto error; + } + memcpy(e->key, key, key_len); + hlist_add_head(&e->hlist, head); + e->kp.pre_handler = ltt_kprobe_handler_pre; + e->kp.symbol_name = symbol_name; + e->kp.offset = offset; + e->kp.addr = (void *)addr; + ret = register_kprobe(&e->kp); + if (ret < 0) + goto error_list_del; + trace_kprobe_table_entry(NULL, e); + return 0; + +error_list_del: + hlist_del(&e->hlist); +error: + kfree(symbol_name); + kfree(e); + return ret; +} + +static int ltt_unregister_kprobe(const char *key) +{ + struct hlist_head *head; + struct hlist_node *node; + struct kprobe_entry *e; + int found = 0; + size_t key_len = strlen(key) + 1; + u32 hash; + + hash = jhash(key, key_len-1, 0); + head = <t_kprobe_table[hash & ((1 << LTT_KPROBE_HASH_BITS)-1)]; + hlist_for_each_entry(e, node, head, hlist) { + if (!strcmp(key, e->key)) { + found = 1; + break; + } + } + if (!found) + return -ENOENT; + hlist_del(&e->hlist); + unregister_kprobe(&e->kp); + kfree(e->kp.symbol_name); + kfree(e); + return 0; +} + +static void ltt_unregister_all_kprobes(void) +{ + struct kprobe_entry *e; + struct hlist_head *head; + struct hlist_node *node, *tmp; + unsigned int i; + + for (i = 0; i < LTT_KPROBE_TABLE_SIZE; i++) { + head = <t_kprobe_table[i]; + hlist_for_each_entry_safe(e, node, tmp, head, hlist) { + hlist_del(&e->hlist); + unregister_kprobe(&e->kp); + kfree(e->kp.symbol_name); + kfree(e); + } + } +} + +/* + * Allows to specify either + * - symbol + * - symbol offset + * - address + */ +static ssize_t enable_op_write(struct file *file, + const char __user *user_buf, size_t count, loff_t *ppos) +{ + int err, buf_size; + char *end; + char *buf = (char *)__get_free_page(GFP_KERNEL); + + mutex_lock(<t_kprobes_mutex); + if (module_exit) { + err = -EPERM; + goto error; + } + + buf_size = min_t(size_t, count, PAGE_SIZE - 1); + err = copy_from_user(buf, user_buf, buf_size); + if (err) + goto error; + buf[buf_size] = '\0'; + end = strchr(buf, '\n'); + if (end) + *end = '\0'; + err = ltt_register_kprobe(buf); + if (err) + goto error; + + mutex_unlock(<t_kprobes_mutex); + free_page((unsigned long)buf); + return count; +error: + mutex_unlock(<t_kprobes_mutex); + free_page((unsigned long)buf); + return err; +} + +static const struct file_operations ltt_kprobes_enable = { + .write = enable_op_write, +}; + +static ssize_t disable_op_write(struct file *file, + const char __user *user_buf, size_t count, loff_t *ppos) +{ + int err, buf_size; + char *end; + char *buf = (char *)__get_free_page(GFP_KERNEL); + + mutex_lock(<t_kprobes_mutex); + if (module_exit) + goto end; + + buf_size = min_t(size_t, count, PAGE_SIZE - 1); + err = copy_from_user(buf, user_buf, buf_size); + if (err) + goto error; + buf[buf_size] = '\0'; + end = strchr(buf, '\n'); + if (end) + *end = '\0'; + err = ltt_unregister_kprobe(buf); + if (err) + goto error; +end: + mutex_unlock(<t_kprobes_mutex); + free_page((unsigned long)buf); + return count; +error: + mutex_unlock(<t_kprobes_mutex); + free_page((unsigned long)buf); + return err; +} + +static const struct file_operations ltt_kprobes_disable = { + .write = disable_op_write, +}; + +/* + * This seqfile read is not perfectly safe, as a kprobe could be removed from + * the hash table between two reads. This will result in an incomplete output. + */ +static struct kprobe_entry *ltt_find_next_kprobe(struct kprobe_entry *prev) +{ + struct kprobe_entry *e; + struct hlist_head *head; + struct hlist_node *node; + unsigned int i; + int found = 0; + + if (prev == (void *)-1UL) + return NULL; + + if (!prev) + found = 1; + + for (i = 0; i < LTT_KPROBE_TABLE_SIZE; i++) { + head = <t_kprobe_table[i]; + hlist_for_each_entry(e, node, head, hlist) { + if (found) + return e; + if (e == prev) + found = 1; + } + } + return NULL; +} + +static void *lk_next(struct seq_file *m, void *p, loff_t *pos) +{ + m->private = ltt_find_next_kprobe(m->private); + if (!m->private) { + m->private = (void *)-1UL; + return NULL; + } + return m->private; +} + +static void *lk_start(struct seq_file *m, loff_t *pos) +{ + mutex_lock(<t_kprobes_mutex); + if (!*pos) + m->private = NULL; + m->private = ltt_find_next_kprobe(m->private); + if (!m->private) { + m->private = (void *)-1UL; + return NULL; + } + return m->private; +} + +static void lk_stop(struct seq_file *m, void *p) +{ + mutex_unlock(<t_kprobes_mutex); +} + +static int lk_show(struct seq_file *m, void *p) +{ + struct kprobe_entry *e = m->private; + seq_printf(m, "%s\n", e->key); + return 0; +} + +static const struct seq_operations ltt_kprobes_list_op = { + .start = lk_start, + .next = lk_next, + .stop = lk_stop, + .show = lk_show, +}; + +static int ltt_kprobes_list_open(struct inode *inode, struct file *file) +{ + int ret; + + ret = seq_open(file, <t_kprobes_list_op); + if (ret == 0) + ((struct seq_file *)file->private_data)->private = NULL; + return ret; +} + +static int ltt_kprobes_list_release(struct inode *inode, struct file *file) +{ + struct seq_file *seq = file->private_data; + + seq->private = NULL; + return seq_release(inode, file); +} + +static const struct file_operations ltt_kprobes_list = { + .open = ltt_kprobes_list_open, + .read = seq_read, + .llseek = seq_lseek, + .release = ltt_kprobes_list_release, +}; + +/* + * kprobes table dump. Callback invoked by ltt-statedump. ltt-statedump must + * take a reference to this module before calling this callback. + */ +void ltt_dump_kprobes_table(void *call_data) +{ + struct kprobe_entry *e; + struct hlist_head *head; + struct hlist_node *node; + unsigned int i; + + for (i = 0; i < LTT_KPROBE_TABLE_SIZE; i++) { + head = <t_kprobe_table[i]; + hlist_for_each_entry(e, node, head, hlist) + trace_kprobe_table_entry(call_data, e); + } +} +EXPORT_SYMBOL_GPL(ltt_dump_kprobes_table); + +static int __init ltt_kprobes_init(void) +{ + struct dentry *ltt_root_dentry; + int ret = 0; + + printk(KERN_INFO "LTT : ltt-kprobes init\n"); + mutex_lock(<t_kprobes_mutex); + + ltt_root_dentry = get_ltt_root(); + if (!ltt_root_dentry) { + ret = -ENOENT; + goto err_no_root; + } + + ltt_kprobes_dir = debugfs_create_dir(LTT_KPROBES_DIR, ltt_root_dentry); + if (!ltt_kprobes_dir) { + printk(KERN_ERR + "ltt_kprobes_init: failed to create dir %s\n", + LTT_KPROBES_DIR); + ret = -ENOMEM; + goto err_no_dir; + } + + ltt_kprobes_enable_dentry = debugfs_create_file(LTT_KPROBES_ENABLE, + S_IWUSR, + ltt_kprobes_dir, NULL, + <t_kprobes_enable); + if (IS_ERR(ltt_kprobes_enable_dentry) || !ltt_kprobes_enable_dentry) { + printk(KERN_ERR + "ltt_kprobes_init: failed to create file %s\n", + LTT_KPROBES_ENABLE); + ret = -ENOMEM; + goto err_no_enable; + } + + ltt_kprobes_disable_dentry = debugfs_create_file(LTT_KPROBES_DISABLE, + S_IWUSR, + ltt_kprobes_dir, NULL, + <t_kprobes_disable); + if (IS_ERR(ltt_kprobes_disable_dentry) || !ltt_kprobes_disable_dentry) { + printk(KERN_ERR + "ltt_kprobes_init: failed to create file %s\n", + LTT_KPROBES_DISABLE); + ret = -ENOMEM; + goto err_no_disable; + } + + ltt_kprobes_list_dentry = debugfs_create_file(LTT_KPROBES_LIST, + S_IWUSR, ltt_kprobes_dir, + NULL, <t_kprobes_list); + if (IS_ERR(ltt_kprobes_list_dentry) || !ltt_kprobes_list_dentry) { + printk(KERN_ERR + "ltt_kprobes_init: failed to create file %s\n", + LTT_KPROBES_LIST); + ret = -ENOMEM; + goto err_no_list; + } + ltt_statedump_register_kprobes_dump(ltt_dump_kprobes_table); + + mutex_unlock(<t_kprobes_mutex); + return ret; + +err_no_list: + debugfs_remove(ltt_kprobes_disable_dentry); +err_no_disable: + debugfs_remove(ltt_kprobes_enable_dentry); +err_no_enable: + debugfs_remove(ltt_kprobes_dir); +err_no_dir: +err_no_root: + mutex_unlock(<t_kprobes_mutex); + return ret; +} +module_init(ltt_kprobes_init); + +static void __exit ltt_kprobes_exit(void) +{ + printk(KERN_INFO "LTT : ltt-kprobes exit\n"); + mutex_lock(<t_kprobes_mutex); + module_exit = 1; + ltt_statedump_unregister_kprobes_dump(ltt_dump_kprobes_table); + debugfs_remove(ltt_kprobes_list_dentry); + debugfs_remove(ltt_kprobes_disable_dentry); + debugfs_remove(ltt_kprobes_enable_dentry); + debugfs_remove(ltt_kprobes_dir); + ltt_unregister_all_kprobes(); + mutex_unlock(<t_kprobes_mutex); +} +module_exit(ltt_kprobes_exit); + +MODULE_LICENSE("GPL and additional rights"); +MODULE_AUTHOR("Mathieu Desnoyers"); +MODULE_DESCRIPTION("Linux Trace Toolkit Kprobes Support"); diff --git a/deprecated/ltt-marker-control.c b/deprecated/ltt-marker-control.c new file mode 100644 index 00000000..2db5c4e9 --- /dev/null +++ b/deprecated/ltt-marker-control.c @@ -0,0 +1,253 @@ +/* + * Copyright (C) 2007 Mathieu Desnoyers + * + * Dual LGPL v2.1/GPL v2 license. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ltt-tracer.h" + +#define DEFAULT_CHANNEL "cpu" +#define DEFAULT_PROBE "default" + +LIST_HEAD(probes_list); + +/* + * Mutex protecting the probe slab cache. + * Nests inside the traces mutex. + */ +DEFINE_MUTEX(probes_mutex); + +struct ltt_available_probe default_probe = { + .name = "default", + .format = NULL, + .probe_func = ltt_vtrace, + .callbacks[0] = ltt_serialize_data, +}; + +static struct kmem_cache *markers_loaded_cachep; +static LIST_HEAD(markers_loaded_list); +/* + * List sorted by name strcmp order. + */ +static LIST_HEAD(probes_registered_list); + +static struct ltt_available_probe *get_probe_from_name(const char *pname) +{ + struct ltt_available_probe *iter; + int comparison, found = 0; + + if (!pname) + pname = DEFAULT_PROBE; + list_for_each_entry(iter, &probes_registered_list, node) { + comparison = strcmp(pname, iter->name); + if (!comparison) + found = 1; + if (comparison <= 0) + break; + } + if (found) + return iter; + else + return NULL; +} + +int ltt_probe_register(struct ltt_available_probe *pdata) +{ + int ret = 0; + int comparison; + struct ltt_available_probe *iter; + + mutex_lock(&probes_mutex); + list_for_each_entry_reverse(iter, &probes_registered_list, node) { + comparison = strcmp(pdata->name, iter->name); + if (!comparison) { + ret = -EBUSY; + goto end; + } else if (comparison > 0) { + /* We belong to the location right after iter. */ + list_add(&pdata->node, &iter->node); + goto end; + } + } + /* Should be added at the head of the list */ + list_add(&pdata->node, &probes_registered_list); +end: + mutex_unlock(&probes_mutex); + return ret; +} +EXPORT_SYMBOL_GPL(ltt_probe_register); + +/* + * Called when a probe does not want to be called anymore. + */ +int ltt_probe_unregister(struct ltt_available_probe *pdata) +{ + int ret = 0; + struct ltt_active_marker *amark, *tmp; + + mutex_lock(&probes_mutex); + list_for_each_entry_safe(amark, tmp, &markers_loaded_list, node) { + if (amark->probe == pdata) { + ret = marker_probe_unregister_private_data( + pdata->probe_func, amark); + if (ret) + goto end; + list_del(&amark->node); + kmem_cache_free(markers_loaded_cachep, amark); + } + } + list_del(&pdata->node); +end: + mutex_unlock(&probes_mutex); + return ret; +} +EXPORT_SYMBOL_GPL(ltt_probe_unregister); + +/* + * Connect marker "mname" to probe "pname". + * Only allow _only_ probe instance to be connected to a marker. + */ +int ltt_marker_connect(const char *channel, const char *mname, + const char *pname) +{ + int ret; + struct ltt_active_marker *pdata; + struct ltt_available_probe *probe; + + ltt_lock_traces(); + mutex_lock(&probes_mutex); + probe = get_probe_from_name(pname); + if (!probe) { + ret = -ENOENT; + goto end; + } + pdata = marker_get_private_data(channel, mname, probe->probe_func, 0); + if (pdata && !IS_ERR(pdata)) { + ret = -EEXIST; + goto end; + } + pdata = kmem_cache_zalloc(markers_loaded_cachep, GFP_KERNEL); + if (!pdata) { + ret = -ENOMEM; + goto end; + } + pdata->probe = probe; + /* + * ID has priority over channel in case of conflict. + */ + ret = marker_probe_register(channel, mname, NULL, + probe->probe_func, pdata); + if (ret) + kmem_cache_free(markers_loaded_cachep, pdata); + else + list_add(&pdata->node, &markers_loaded_list); +end: + mutex_unlock(&probes_mutex); + ltt_unlock_traces(); + return ret; +} +EXPORT_SYMBOL_GPL(ltt_marker_connect); + +/* + * Disconnect marker "mname", probe "pname". + */ +int ltt_marker_disconnect(const char *channel, const char *mname, + const char *pname) +{ + struct ltt_active_marker *pdata; + struct ltt_available_probe *probe; + int ret = 0; + + mutex_lock(&probes_mutex); + probe = get_probe_from_name(pname); + if (!probe) { + ret = -ENOENT; + goto end; + } + pdata = marker_get_private_data(channel, mname, probe->probe_func, 0); + if (IS_ERR(pdata)) { + ret = PTR_ERR(pdata); + goto end; + } else if (!pdata) { + /* + * Not registered by us. + */ + ret = -EPERM; + goto end; + } + ret = marker_probe_unregister(channel, mname, probe->probe_func, pdata); + if (ret) + goto end; + else { + list_del(&pdata->node); + kmem_cache_free(markers_loaded_cachep, pdata); + } +end: + mutex_unlock(&probes_mutex); + return ret; +} +EXPORT_SYMBOL_GPL(ltt_marker_disconnect); + +static void disconnect_all_markers(void) +{ + struct ltt_active_marker *pdata, *tmp; + + list_for_each_entry_safe(pdata, tmp, &markers_loaded_list, node) { + marker_probe_unregister_private_data(pdata->probe->probe_func, + pdata); + list_del(&pdata->node); + kmem_cache_free(markers_loaded_cachep, pdata); + } +} + +static int __init marker_control_init(void) +{ + int ret; + + markers_loaded_cachep = KMEM_CACHE(ltt_active_marker, 0); + + ret = ltt_probe_register(&default_probe); + BUG_ON(ret); + ret = ltt_marker_connect("metadata", "core_marker_format", + DEFAULT_PROBE); + BUG_ON(ret); + ret = ltt_marker_connect("metadata", "core_marker_id", DEFAULT_PROBE); + BUG_ON(ret); + + return 0; +} +module_init(marker_control_init); + +static void __exit marker_control_exit(void) +{ + int ret; + + ret = ltt_marker_disconnect("metadata", "core_marker_format", + DEFAULT_PROBE); + BUG_ON(ret); + ret = ltt_marker_disconnect("metadata", "core_marker_id", + DEFAULT_PROBE); + BUG_ON(ret); + ret = ltt_probe_unregister(&default_probe); + BUG_ON(ret); + disconnect_all_markers(); + kmem_cache_destroy(markers_loaded_cachep); + marker_synchronize_unregister(); +} +module_exit(marker_control_exit); + +MODULE_LICENSE("GPL and additional rights"); +MODULE_AUTHOR("Mathieu Desnoyers"); +MODULE_DESCRIPTION("Linux Trace Toolkit Marker Control"); diff --git a/deprecated/ltt-serialize.c b/deprecated/ltt-serialize.c new file mode 100644 index 00000000..50d7132c --- /dev/null +++ b/deprecated/ltt-serialize.c @@ -0,0 +1,968 @@ +/* + * LTTng serializing code. + * + * Copyright Mathieu Desnoyers, March 2007. + * + * Dual LGPL v2.1/GPL v2 license. + * + * See this discussion about weirdness about passing va_list and then va_list to + * functions. (related to array argument passing). va_list seems to be + * implemented as an array on x86_64, but not on i386... This is why we pass a + * va_list * to ltt_vtrace. + */ + +#include +#include +#include +#include + +#include "ltt-tracer.h" + +enum ltt_type { + LTT_TYPE_SIGNED_INT, + LTT_TYPE_UNSIGNED_INT, + LTT_TYPE_STRING, + LTT_TYPE_NONE, +}; + +#define LTT_ATTRIBUTE_NETWORK_BYTE_ORDER (1<<1) + +/* + * Stack used to keep track of string length at size calculation, passed to + * string copy to handle racy input string updates. + * Can be used by any context; this is ensured by putting the stack position + * back to its original position after using it. + */ +#define TRACER_STACK_LEN (PAGE_SIZE / sizeof(unsigned long)) +static DEFINE_PER_CPU(unsigned long [TRACER_STACK_LEN], + tracer_stack); +static DEFINE_PER_CPU(unsigned int, tracer_stack_pos); + +/* + * Inspired from vsnprintf + * + * The serialization format string supports the basic printf format strings. + * In addition, it defines new formats that can be used to serialize more + * complex/non portable data structures. + * + * Typical use: + * + * field_name %ctype + * field_name #tracetype %ctype + * field_name #tracetype %ctype1 %ctype2 ... + * + * A conversion is performed between format string types supported by GCC and + * the trace type requested. GCC type is used to perform type checking on format + * strings. Trace type is used to specify the exact binary representation + * in the trace. A mapping is done between one or more GCC types to one trace + * type. Sign extension, if required by the conversion, is performed following + * the trace type. + * + * If a gcc format is not declared with a trace format, the gcc format is + * also used as binary representation in the trace. + * + * Strings are supported with %s. + * A single tracetype (sequence) can take multiple c types as parameter. + * + * c types: + * + * see printf(3). + * + * Note: to write a uint32_t in a trace, the following expression is recommended + * si it can be portable: + * + * ("#4u%lu", (unsigned long)var) + * + * trace types: + * + * Serialization specific formats : + * + * Fixed size integers + * #1u writes uint8_t + * #2u writes uint16_t + * #4u writes uint32_t + * #8u writes uint64_t + * #1d writes int8_t + * #2d writes int16_t + * #4d writes int32_t + * #8d writes int64_t + * i.e.: + * #1u%lu #2u%lu #4d%lu #8d%lu #llu%hu #d%lu + * + * * Attributes: + * + * n: (for network byte order) + * #ntracetype%ctype + * is written in the trace in network byte order. + * + * i.e.: #bn4u%lu, #n%lu, #b%u + * + * TODO (eventually) + * Variable length sequence + * #a #tracetype1 #tracetype2 %array_ptr %elem_size %num_elems + * In the trace: + * #a specifies that this is a sequence + * #tracetype1 is the type of elements in the sequence + * #tracetype2 is the type of the element count + * GCC input: + * array_ptr is a pointer to an array that contains members of size + * elem_size. + * num_elems is the number of elements in the array. + * i.e.: #a #lu #lu %p %lu %u + * + * Callback + * #k callback (taken from the probe data) + * The following % arguments are exepected by the callback + * + * i.e.: #a #lu #lu #k %p + * + * Note: No conversion is done from floats to integers, nor from integers to + * floats between c types and trace types. float conversion from double to float + * or from float to double is also not supported. + * + * REMOVE + * %*b expects sizeof(data), data + * where sizeof(data) is 1, 2, 4 or 8 + * + * Fixed length struct, union or array. + * FIXME: unable to extract those sizes statically. + * %*r expects sizeof(*ptr), ptr + * %*.*r expects sizeof(*ptr), __alignof__(*ptr), ptr + * struct and unions removed. + * Fixed length array: + * [%p]#a[len #tracetype] + * i.e.: [%p]#a[12 #lu] + * + * Variable length sequence + * %*.*:*v expects sizeof(*ptr), __alignof__(*ptr), elem_num, ptr + * where elem_num is the number of elements in the sequence + */ +static inline +const char *parse_trace_type(const char *fmt, char *trace_size, + enum ltt_type *trace_type, + unsigned long *attributes) +{ + int qualifier; /* 'h', 'l', or 'L' for integer fields */ + /* 'z' support added 23/7/1999 S.H. */ + /* 'z' changed to 'Z' --davidm 1/25/99 */ + /* 't' added for ptrdiff_t */ + + /* parse attributes. */ +repeat: + switch (*fmt) { + case 'n': + *attributes |= LTT_ATTRIBUTE_NETWORK_BYTE_ORDER; + ++fmt; + goto repeat; + } + + /* get the conversion qualifier */ + qualifier = -1; + if (*fmt == 'h' || *fmt == 'l' || *fmt == 'L' || + *fmt == 'Z' || *fmt == 'z' || *fmt == 't' || + *fmt == 'S' || *fmt == '1' || *fmt == '2' || + *fmt == '4' || *fmt == 8) { + qualifier = *fmt; + ++fmt; + if (qualifier == 'l' && *fmt == 'l') { + qualifier = 'L'; + ++fmt; + } + } + + switch (*fmt) { + case 'c': + *trace_type = LTT_TYPE_UNSIGNED_INT; + *trace_size = sizeof(unsigned char); + goto parse_end; + case 's': + *trace_type = LTT_TYPE_STRING; + goto parse_end; + case 'p': + *trace_type = LTT_TYPE_UNSIGNED_INT; + *trace_size = sizeof(void *); + goto parse_end; + case 'd': + case 'i': + *trace_type = LTT_TYPE_SIGNED_INT; + break; + case 'o': + case 'u': + case 'x': + case 'X': + *trace_type = LTT_TYPE_UNSIGNED_INT; + break; + default: + if (!*fmt) + --fmt; + goto parse_end; + } + switch (qualifier) { + case 'L': + *trace_size = sizeof(long long); + break; + case 'l': + *trace_size = sizeof(long); + break; + case 'Z': + case 'z': + *trace_size = sizeof(size_t); + break; + case 't': + *trace_size = sizeof(ptrdiff_t); + break; + case 'h': + *trace_size = sizeof(short); + break; + case '1': + *trace_size = sizeof(uint8_t); + break; + case '2': + *trace_size = sizeof(uint16_t); + break; + case '4': + *trace_size = sizeof(uint32_t); + break; + case '8': + *trace_size = sizeof(uint64_t); + break; + default: + *trace_size = sizeof(int); + } + +parse_end: + return fmt; +} + +/* + * Restrictions: + * Field width and precision are *not* supported. + * %n not supported. + */ +static inline +const char *parse_c_type(const char *fmt, char *c_size, enum ltt_type *c_type, + char *outfmt) +{ + int qualifier; /* 'h', 'l', or 'L' for integer fields */ + /* 'z' support added 23/7/1999 S.H. */ + /* 'z' changed to 'Z' --davidm 1/25/99 */ + /* 't' added for ptrdiff_t */ + + /* process flags : ignore standard print formats for now. */ +repeat: + switch (*fmt) { + case '-': + case '+': + case ' ': + case '#': + case '0': + ++fmt; + goto repeat; + } + + /* get the conversion qualifier */ + qualifier = -1; + if (*fmt == 'h' || *fmt == 'l' || *fmt == 'L' || + *fmt == 'Z' || *fmt == 'z' || *fmt == 't' || + *fmt == 'S') { + qualifier = *fmt; + ++fmt; + if (qualifier == 'l' && *fmt == 'l') { + qualifier = 'L'; + ++fmt; + } + } + + if (outfmt) { + if (qualifier != -1) + *outfmt++ = (char)qualifier; + *outfmt++ = *fmt; + *outfmt = 0; + } + + switch (*fmt) { + case 'c': + *c_type = LTT_TYPE_UNSIGNED_INT; + *c_size = sizeof(unsigned char); + goto parse_end; + case 's': + *c_type = LTT_TYPE_STRING; + goto parse_end; + case 'p': + *c_type = LTT_TYPE_UNSIGNED_INT; + *c_size = sizeof(void *); + goto parse_end; + case 'd': + case 'i': + *c_type = LTT_TYPE_SIGNED_INT; + break; + case 'o': + case 'u': + case 'x': + case 'X': + *c_type = LTT_TYPE_UNSIGNED_INT; + break; + default: + if (!*fmt) + --fmt; + goto parse_end; + } + switch (qualifier) { + case 'L': + *c_size = sizeof(long long); + break; + case 'l': + *c_size = sizeof(long); + break; + case 'Z': + case 'z': + *c_size = sizeof(size_t); + break; + case 't': + *c_size = sizeof(ptrdiff_t); + break; + case 'h': + *c_size = sizeof(short); + break; + default: + *c_size = sizeof(int); + } + +parse_end: + return fmt; +} + +static inline +size_t serialize_trace_data(struct ltt_chanbuf *buf, size_t buf_offset, + char trace_size, enum ltt_type trace_type, + char c_size, enum ltt_type c_type, + unsigned int *stack_pos_ctx, + int *largest_align, + va_list *args) +{ + union { + unsigned long v_ulong; + uint64_t v_uint64; + struct { + const char *s; + size_t len; + } v_string; + } tmp; + + /* + * Be careful about sign extension here. + * Sign extension is done with the destination (trace) type. + */ + switch (trace_type) { + case LTT_TYPE_SIGNED_INT: + switch (c_size) { + case 1: + tmp.v_ulong = (long)(int8_t)va_arg(*args, int); + break; + case 2: + tmp.v_ulong = (long)(int16_t)va_arg(*args, int); + break; + case 4: + tmp.v_ulong = (long)(int32_t)va_arg(*args, int); + break; + case 8: + tmp.v_uint64 = va_arg(*args, int64_t); + break; + default: + BUG(); + } + break; + case LTT_TYPE_UNSIGNED_INT: + switch (c_size) { + case 1: + tmp.v_ulong = (unsigned long)(uint8_t)va_arg(*args, unsigned int); + break; + case 2: + tmp.v_ulong = (unsigned long)(uint16_t)va_arg(*args, unsigned int); + break; + case 4: + tmp.v_ulong = (unsigned long)(uint32_t)va_arg(*args, unsigned int); + break; + case 8: + tmp.v_uint64 = va_arg(*args, uint64_t); + break; + default: + BUG(); + } + break; + case LTT_TYPE_STRING: + tmp.v_string.s = va_arg(*args, const char *); + if ((unsigned long)tmp.v_string.s < PAGE_SIZE) + tmp.v_string.s = ""; + if (!buf) { + /* + * Reserve tracer stack entry. + */ + __get_cpu_var(tracer_stack_pos)++; + WARN_ON_ONCE(__get_cpu_var(tracer_stack_pos) + > TRACER_STACK_LEN); + barrier(); + __get_cpu_var(tracer_stack)[*stack_pos_ctx] = + strlen(tmp.v_string.s) + 1; + } + tmp.v_string.len = __get_cpu_var(tracer_stack) + [(*stack_pos_ctx)++]; + if (buf) + ltt_relay_strncpy(&buf->a, buf->a.chan, buf_offset, + tmp.v_string.s, tmp.v_string.len); + buf_offset += tmp.v_string.len; + goto copydone; + default: + BUG(); + } + + /* + * If trace_size is lower or equal to 4 bytes, there is no sign + * extension to do because we are already encoded in a long. Therefore, + * we can combine signed and unsigned ops. 4 bytes float also works + * with this, because we do a simple copy of 4 bytes into 4 bytes + * without manipulation (and we do not support conversion from integers + * to floats). + * It is also the case if c_size is 8 bytes, which is the largest + * possible integer. + */ + if (ltt_get_alignment()) { + buf_offset += ltt_align(buf_offset, trace_size); + if (largest_align) + *largest_align = max_t(int, *largest_align, trace_size); + } + if (trace_size <= 4 || c_size == 8) { + if (buf) { + switch (trace_size) { + case 1: + if (c_size == 8) + ltt_relay_write(&buf->a, buf->a.chan, + buf_offset, + (uint8_t[]){ (uint8_t)tmp.v_uint64 }, + sizeof(uint8_t)); + else + ltt_relay_write(&buf->a, buf->a.chan, + buf_offset, + (uint8_t[]){ (uint8_t)tmp.v_ulong }, + sizeof(uint8_t)); + break; + case 2: + if (c_size == 8) + ltt_relay_write(&buf->a, buf->a.chan, + buf_offset, + (uint16_t[]){ (uint16_t)tmp.v_uint64 }, + sizeof(uint16_t)); + else + ltt_relay_write(&buf->a, buf->a.chan, + buf_offset, + (uint16_t[]){ (uint16_t)tmp.v_ulong }, + sizeof(uint16_t)); + break; + case 4: + if (c_size == 8) + ltt_relay_write(&buf->a, buf->a.chan, + buf_offset, + (uint32_t[]){ (uint32_t)tmp.v_uint64 }, + sizeof(uint32_t)); + else + ltt_relay_write(&buf->a, buf->a.chan, + buf_offset, + (uint32_t[]){ (uint32_t)tmp.v_ulong }, + sizeof(uint32_t)); + break; + case 8: + /* + * c_size cannot be other than 8 here because + * trace_size > 4. + */ + ltt_relay_write(&buf->a, buf->a.chan, buf_offset, + (uint64_t[]){ (uint64_t)tmp.v_uint64 }, + sizeof(uint64_t)); + break; + default: + BUG(); + } + } + buf_offset += trace_size; + goto copydone; + } else { + /* + * Perform sign extension. + */ + if (buf) { + switch (trace_type) { + case LTT_TYPE_SIGNED_INT: + ltt_relay_write(&buf->a, buf->a.chan, buf_offset, + (int64_t[]){ (int64_t)tmp.v_ulong }, + sizeof(int64_t)); + break; + case LTT_TYPE_UNSIGNED_INT: + ltt_relay_write(&buf->a, buf->a.chan, buf_offset, + (uint64_t[]){ (uint64_t)tmp.v_ulong }, + sizeof(uint64_t)); + break; + default: + BUG(); + } + } + buf_offset += trace_size; + goto copydone; + } + +copydone: + return buf_offset; +} + +notrace size_t +ltt_serialize_data(struct ltt_chanbuf *buf, size_t buf_offset, + struct ltt_serialize_closure *closure, + void *serialize_private, unsigned int stack_pos_ctx, + int *largest_align, const char *fmt, va_list *args) +{ + char trace_size = 0, c_size = 0; /* + * 0 (unset), 1, 2, 4, 8 bytes. + */ + enum ltt_type trace_type = LTT_TYPE_NONE, c_type = LTT_TYPE_NONE; + unsigned long attributes = 0; + + for (; *fmt ; ++fmt) { + switch (*fmt) { + case '#': + /* tracetypes (#) */ + ++fmt; /* skip first '#' */ + if (*fmt == '#') /* Escaped ## */ + break; + attributes = 0; + fmt = parse_trace_type(fmt, &trace_size, &trace_type, + &attributes); + break; + case '%': + /* c types (%) */ + ++fmt; /* skip first '%' */ + if (*fmt == '%') /* Escaped %% */ + break; + fmt = parse_c_type(fmt, &c_size, &c_type, NULL); + /* + * Output c types if no trace types has been + * specified. + */ + if (!trace_size) + trace_size = c_size; + if (trace_type == LTT_TYPE_NONE) + trace_type = c_type; + if (c_type == LTT_TYPE_STRING) + trace_type = LTT_TYPE_STRING; + /* perform trace write */ + buf_offset = serialize_trace_data(buf, buf_offset, + trace_size, + trace_type, c_size, + c_type, + &stack_pos_ctx, + largest_align, + args); + trace_size = 0; + c_size = 0; + trace_type = LTT_TYPE_NONE; + c_size = LTT_TYPE_NONE; + attributes = 0; + break; + /* default is to skip the text, doing nothing */ + } + } + return buf_offset; +} +EXPORT_SYMBOL_GPL(ltt_serialize_data); + +static inline +uint64_t unserialize_base_type(struct ltt_chanbuf *buf, + size_t *ppos, char trace_size, + enum ltt_type trace_type) +{ + uint64_t tmp; + + *ppos += ltt_align(*ppos, trace_size); + ltt_relay_read(&buf->a, *ppos, &tmp, trace_size); + *ppos += trace_size; + + switch (trace_type) { + case LTT_TYPE_SIGNED_INT: + switch (trace_size) { + case 1: + return (uint64_t)*(int8_t *)&tmp; + case 2: + return (uint64_t)*(int16_t *)&tmp; + case 4: + return (uint64_t)*(int32_t *)&tmp; + case 8: + return tmp; + } + break; + case LTT_TYPE_UNSIGNED_INT: + switch (trace_size) { + case 1: + return (uint64_t)*(uint8_t *)&tmp; + case 2: + return (uint64_t)*(uint16_t *)&tmp; + case 4: + return (uint64_t)*(uint32_t *)&tmp; + case 8: + return tmp; + } + break; + default: + break; + } + + BUG(); + return 0; +} + +static +int serialize_printf_data(struct ltt_chanbuf *buf, size_t *ppos, + char trace_size, enum ltt_type trace_type, + char c_size, enum ltt_type c_type, char *output, + ssize_t outlen, const char *outfmt) +{ + u64 value; + outlen = outlen < 0 ? 0 : outlen; + + if (trace_type == LTT_TYPE_STRING) { + size_t len = ltt_relay_read_cstr(&buf->a, *ppos, output, + outlen); + *ppos += len + 1; + return len; + } + + value = unserialize_base_type(buf, ppos, trace_size, trace_type); + + if (c_size == 8) + return snprintf(output, outlen, outfmt, value); + else + return snprintf(output, outlen, outfmt, (unsigned int)value); +} + +/** + * ltt_serialize_printf - Format a string and place it in a buffer + * @buf: The ltt-relay buffer that store binary data + * @buf_offset: binary data's offset in @buf (should be masked to use as offset) + * @msg_size: return message's length + * @output: The buffer to place the result into + * @outlen: The size of the buffer, including the trailing '\0' + * @fmt: The format string to use + * + * The return value is the number of characters which would + * be generated for the given input, excluding the trailing + * '\0', as per ISO C99. If the return is greater than or equal to @outlen, + * the resulting string is truncated. + */ +size_t ltt_serialize_printf(struct ltt_chanbuf *buf, unsigned long buf_offset, + size_t *msg_size, char *output, size_t outlen, + const char *fmt) +{ + char trace_size = 0, c_size = 0; /* + * 0 (unset), 1, 2, 4, 8 bytes. + */ + enum ltt_type trace_type = LTT_TYPE_NONE, c_type = LTT_TYPE_NONE; + unsigned long attributes = 0; + char outfmt[4] = "%"; + size_t outpos = 0; + size_t len; + size_t msgpos = buf_offset; + + for (; *fmt ; ++fmt) { + switch (*fmt) { + case '#': + /* tracetypes (#) */ + ++fmt; /* skip first '#' */ + if (*fmt == '#') { /* Escaped ## */ + if (outpos < outlen) + output[outpos] = '#'; + outpos++; + break; + } + attributes = 0; + fmt = parse_trace_type(fmt, &trace_size, &trace_type, + &attributes); + break; + case '%': + /* c types (%) */ + ++fmt; /* skip first '%' */ + if (*fmt == '%') { /* Escaped %% */ + if (outpos < outlen) + output[outpos] = '%'; + outpos++; + break; + } + fmt = parse_c_type(fmt, &c_size, &c_type, outfmt + 1); + /* + * Output c types if no trace types has been + * specified. + */ + if (!trace_size) + trace_size = c_size; + if (trace_type == LTT_TYPE_NONE) + trace_type = c_type; + if (c_type == LTT_TYPE_STRING) + trace_type = LTT_TYPE_STRING; + + /* perform trace printf */ + len = serialize_printf_data(buf, &msgpos, trace_size, + trace_type, c_size, c_type, + output + outpos, + outlen - outpos, outfmt); + outpos += len; + trace_size = 0; + c_size = 0; + trace_type = LTT_TYPE_NONE; + c_size = LTT_TYPE_NONE; + attributes = 0; + break; + default: + if (outpos < outlen) + output[outpos] = *fmt; + outpos++; + break; + } + } + if (msg_size) + *msg_size = (size_t)(msgpos - buf_offset); + /* + * Make sure we end output with terminating \0 when truncated. + */ + if (outpos >= outlen + 1) + output[outlen] = '\0'; + return outpos; +} +EXPORT_SYMBOL_GPL(ltt_serialize_printf); + +#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS + +unsigned int ltt_fmt_largest_align(size_t align_drift, const char *fmt) +{ + char trace_size = 0, c_size = 0; + enum ltt_type trace_type = LTT_TYPE_NONE, c_type = LTT_TYPE_NONE; + unsigned long attributes = 0; + int largest_align = 1; + + for (; *fmt ; ++fmt) { + switch (*fmt) { + case '#': + /* tracetypes (#) */ + ++fmt; /* skip first '#' */ + if (*fmt == '#') /* Escaped ## */ + break; + attributes = 0; + fmt = parse_trace_type(fmt, &trace_size, &trace_type, + &attributes); + + largest_align = max_t(int, largest_align, trace_size); + if (largest_align >= ltt_get_alignment()) + goto exit; + break; + case '%': + /* c types (%) */ + ++fmt; /* skip first '%' */ + if (*fmt == '%') /* Escaped %% */ + break; + fmt = parse_c_type(fmt, &c_size, &c_type, NULL); + /* + * Output c types if no trace types has been + * specified. + */ + if (!trace_size) + trace_size = c_size; + if (trace_type == LTT_TYPE_NONE) + trace_type = c_type; + if (c_type == LTT_TYPE_STRING) + trace_type = LTT_TYPE_STRING; + + largest_align = max_t(int, largest_align, trace_size); + if (largest_align >= ltt_get_alignment()) + goto exit; + + trace_size = 0; + c_size = 0; + trace_type = LTT_TYPE_NONE; + c_size = LTT_TYPE_NONE; + break; + } + } + +exit: + largest_align = min_t(int, largest_align, ltt_get_alignment()); + return (largest_align - align_drift) & (largest_align - 1); +} +EXPORT_SYMBOL_GPL(ltt_fmt_largest_align); + +#endif + +/* + * Calculate data size + * Assume that the padding for alignment starts at a sizeof(void *) address. + */ +static notrace +size_t ltt_get_data_size(struct ltt_serialize_closure *closure, + void *serialize_private, unsigned int stack_pos_ctx, + int *largest_align, const char *fmt, va_list *args) +{ + ltt_serialize_cb cb = closure->callbacks[0]; + closure->cb_idx = 0; + return (size_t)cb(NULL, 0, closure, serialize_private, stack_pos_ctx, + largest_align, fmt, args); +} + +static notrace +void ltt_write_event_data(struct ltt_chanbuf *buf, size_t buf_offset, + struct ltt_serialize_closure *closure, + void *serialize_private, unsigned int stack_pos_ctx, + int largest_align, const char *fmt, va_list *args) +{ + ltt_serialize_cb cb = closure->callbacks[0]; + closure->cb_idx = 0; + buf_offset += ltt_align(buf_offset, largest_align); + cb(buf, buf_offset, closure, serialize_private, stack_pos_ctx, NULL, + fmt, args); +} + + +notrace +void ltt_vtrace(const struct marker *mdata, void *probe_data, void *call_data, + const char *fmt, va_list *args) +{ + int largest_align, ret; + struct ltt_active_marker *pdata; + uint16_t eID; + size_t data_size, slot_size; + unsigned int chan_index; + struct ltt_chanbuf *buf; + struct ltt_chan *chan; + struct ltt_trace *trace, *dest_trace = NULL; + uint64_t tsc; + long buf_offset; + va_list args_copy; + struct ltt_serialize_closure closure; + struct ltt_probe_private_data *private_data = call_data; + void *serialize_private = NULL; + int cpu; + unsigned int rflags; + unsigned int stack_pos_ctx; + + /* + * This test is useful for quickly exiting static tracing when no trace + * is active. We expect to have an active trace when we get here. + */ + if (unlikely(ltt_traces.num_active_traces == 0)) + return; + + rcu_read_lock_sched_notrace(); + cpu = smp_processor_id(); + __get_cpu_var(ltt_nesting)++; + stack_pos_ctx = __get_cpu_var(tracer_stack_pos); + /* + * asm volatile and "memory" clobber prevent the compiler from moving + * instructions out of the ltt nesting count. This is required to ensure + * that probe side-effects which can cause recursion (e.g. unforeseen + * traps, divisions by 0, ...) are triggered within the incremented + * nesting count section. + */ + barrier(); + pdata = (struct ltt_active_marker *)probe_data; + eID = mdata->event_id; + chan_index = mdata->channel_id; + closure.callbacks = pdata->probe->callbacks; + + if (unlikely(private_data)) { + dest_trace = private_data->trace; + if (private_data->serializer) + closure.callbacks = &private_data->serializer; + serialize_private = private_data->serialize_private; + } + + va_copy(args_copy, *args); + /* + * Assumes event payload to start on largest_align alignment. + */ + largest_align = 1; /* must be non-zero for ltt_align */ + data_size = ltt_get_data_size(&closure, serialize_private, + stack_pos_ctx, &largest_align, + fmt, &args_copy); + largest_align = min_t(int, largest_align, sizeof(void *)); + va_end(args_copy); + + /* Iterate on each trace */ + list_for_each_entry_rcu(trace, <t_traces.head, list) { + /* + * Expect the filter to filter out events. If we get here, + * we went through tracepoint activation as a first step. + */ + if (unlikely(dest_trace && trace != dest_trace)) + continue; + if (unlikely(!trace->active)) + continue; + if (unlikely(!ltt_run_filter(trace, eID))) + continue; +#ifdef LTT_DEBUG_EVENT_SIZE + rflags = LTT_RFLAG_ID_SIZE; +#else + if (unlikely(eID >= LTT_FREE_EVENTS)) + rflags = LTT_RFLAG_ID; + else + rflags = 0; +#endif + /* + * Skip channels added after trace creation. + */ + if (unlikely(chan_index >= trace->nr_channels)) + continue; + chan = &trace->channels[chan_index]; + if (!chan->active) + continue; + + /* reserve space : header and data */ + ret = ltt_reserve_slot(chan, trace, data_size, largest_align, + cpu, &buf, &slot_size, &buf_offset, + &tsc, &rflags); + if (unlikely(ret < 0)) + continue; /* buffer full */ + + va_copy(args_copy, *args); + /* Out-of-order write : header and data */ + buf_offset = ltt_write_event_header(&buf->a, &chan->a, + buf_offset, eID, data_size, + tsc, rflags); + ltt_write_event_data(buf, buf_offset, &closure, + serialize_private, stack_pos_ctx, + largest_align, fmt, &args_copy); + va_end(args_copy); + /* Out-of-order commit */ + ltt_commit_slot(buf, chan, buf_offset, data_size, slot_size); + } + /* + * asm volatile and "memory" clobber prevent the compiler from moving + * instructions out of the ltt nesting count. This is required to ensure + * that probe side-effects which can cause recursion (e.g. unforeseen + * traps, divisions by 0, ...) are triggered within the incremented + * nesting count section. + */ + barrier(); + __get_cpu_var(tracer_stack_pos) = stack_pos_ctx; + __get_cpu_var(ltt_nesting)--; + rcu_read_unlock_sched_notrace(); +} +EXPORT_SYMBOL_GPL(ltt_vtrace); + +notrace +void ltt_trace(const struct marker *mdata, void *probe_data, void *call_data, + const char *fmt, ...) +{ + va_list args; + + va_start(args, fmt); + ltt_vtrace(mdata, probe_data, call_data, fmt, &args); + va_end(args); +} +EXPORT_SYMBOL_GPL(ltt_trace); + +MODULE_LICENSE("GPL and additional rights"); +MODULE_AUTHOR("Mathieu Desnoyers"); +MODULE_DESCRIPTION("Linux Trace Toolkit Next Generation Serializer"); diff --git a/deprecated/ltt-statedump.c b/deprecated/ltt-statedump.c new file mode 100644 index 00000000..06ade69a --- /dev/null +++ b/deprecated/ltt-statedump.c @@ -0,0 +1,441 @@ +/* + * Linux Trace Toolkit Kernel State Dump + * + * Copyright 2005 - + * Jean-Hugues Deschenes + * + * Changes: + * Eric Clement: Add listing of network IP interface + * 2006, 2007 Mathieu Desnoyers Fix kernel threads + * Various updates + * + * Dual LGPL v2.1/GPL v2 license. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ltt-tracer.h" + +#ifdef CONFIG_GENERIC_HARDIRQS +#include +#endif + +#define NB_PROC_CHUNK 20 + +/* + * Protected by the trace lock. + */ +static struct delayed_work cpu_work[NR_CPUS]; +static DECLARE_WAIT_QUEUE_HEAD(statedump_wq); +static atomic_t kernel_threads_to_run; + +static void empty_cb(void *call_data) +{ +} + +static DEFINE_MUTEX(statedump_cb_mutex); +static void (*ltt_dump_kprobes_table_cb)(void *call_data) = empty_cb; + +enum lttng_thread_type { + LTTNG_USER_THREAD = 0, + LTTNG_KERNEL_THREAD = 1, +}; + +enum lttng_execution_mode { + LTTNG_USER_MODE = 0, + LTTNG_SYSCALL = 1, + LTTNG_TRAP = 2, + LTTNG_IRQ = 3, + LTTNG_SOFTIRQ = 4, + LTTNG_MODE_UNKNOWN = 5, +}; + +enum lttng_execution_submode { + LTTNG_NONE = 0, + LTTNG_UNKNOWN = 1, +}; + +enum lttng_process_status { + LTTNG_UNNAMED = 0, + LTTNG_WAIT_FORK = 1, + LTTNG_WAIT_CPU = 2, + LTTNG_EXIT = 3, + LTTNG_ZOMBIE = 4, + LTTNG_WAIT = 5, + LTTNG_RUN = 6, + LTTNG_DEAD = 7, +}; + +#ifdef CONFIG_INET +static void ltt_enumerate_device(struct ltt_probe_private_data *call_data, + struct net_device *dev) +{ + struct in_device *in_dev; + struct in_ifaddr *ifa; + + if (dev->flags & IFF_UP) { + in_dev = in_dev_get(dev); + if (in_dev) { + for (ifa = in_dev->ifa_list; ifa != NULL; + ifa = ifa->ifa_next) + __trace_mark(0, netif_state, + network_ipv4_interface, + call_data, + "name %s address #n4u%lu up %d", + dev->name, + (unsigned long)ifa->ifa_address, + 0); + in_dev_put(in_dev); + } + } else + __trace_mark(0, netif_state, network_ip_interface, + call_data, "name %s address #n4u%lu up %d", + dev->name, 0UL, 0); +} + +static inline int +ltt_enumerate_network_ip_interface(struct ltt_probe_private_data *call_data) +{ + struct net_device *dev; + + read_lock(&dev_base_lock); + for_each_netdev(&init_net, dev) + ltt_enumerate_device(call_data, dev); + read_unlock(&dev_base_lock); + + return 0; +} +#else /* CONFIG_INET */ +static inline int +ltt_enumerate_network_ip_interface(struct ltt_probe_private_data *call_data) +{ + return 0; +} +#endif /* CONFIG_INET */ + + +static inline void +ltt_enumerate_task_fd(struct ltt_probe_private_data *call_data, + struct task_struct *t, char *tmp) +{ + struct fdtable *fdt; + struct file *filp; + unsigned int i; + const unsigned char *path; + + if (!t->files) + return; + + spin_lock(&t->files->file_lock); + fdt = files_fdtable(t->files); + for (i = 0; i < fdt->max_fds; i++) { + filp = fcheck_files(t->files, i); + if (!filp) + continue; + path = d_path(&filp->f_path, tmp, PAGE_SIZE); + /* Make sure we give at least some info */ + __trace_mark(0, fd_state, file_descriptor, call_data, + "filename %s pid %d fd %u", + (IS_ERR(path))?(filp->f_dentry->d_name.name):(path), + t->pid, i); + } + spin_unlock(&t->files->file_lock); +} + +static inline int +ltt_enumerate_file_descriptors(struct ltt_probe_private_data *call_data) +{ + struct task_struct *t = &init_task; + char *tmp = (char *)__get_free_page(GFP_KERNEL); + + /* Enumerate active file descriptors */ + do { + read_lock(&tasklist_lock); + if (t != &init_task) + atomic_dec(&t->usage); + t = next_task(t); + atomic_inc(&t->usage); + read_unlock(&tasklist_lock); + task_lock(t); + ltt_enumerate_task_fd(call_data, t, tmp); + task_unlock(t); + } while (t != &init_task); + free_page((unsigned long)tmp); + return 0; +} + +static inline void +ltt_enumerate_task_vm_maps(struct ltt_probe_private_data *call_data, + struct task_struct *t) +{ + struct mm_struct *mm; + struct vm_area_struct *map; + unsigned long ino; + + /* get_task_mm does a task_lock... */ + mm = get_task_mm(t); + if (!mm) + return; + + map = mm->mmap; + if (map) { + down_read(&mm->mmap_sem); + while (map) { + if (map->vm_file) + ino = map->vm_file->f_dentry->d_inode->i_ino; + else + ino = 0; + __trace_mark(0, vm_state, vm_map, call_data, + "pid %d start %lu end %lu flags %lu " + "pgoff %lu inode %lu", + t->pid, map->vm_start, map->vm_end, + map->vm_flags, map->vm_pgoff << PAGE_SHIFT, + ino); + map = map->vm_next; + } + up_read(&mm->mmap_sem); + } + mmput(mm); +} + +static inline int +ltt_enumerate_vm_maps(struct ltt_probe_private_data *call_data) +{ + struct task_struct *t = &init_task; + + do { + read_lock(&tasklist_lock); + if (t != &init_task) + atomic_dec(&t->usage); + t = next_task(t); + atomic_inc(&t->usage); + read_unlock(&tasklist_lock); + ltt_enumerate_task_vm_maps(call_data, t); + } while (t != &init_task); + return 0; +} + +#ifdef CONFIG_GENERIC_HARDIRQS +static inline void list_interrupts(struct ltt_probe_private_data *call_data) +{ + unsigned int irq; + unsigned long flags = 0; + struct irq_desc *desc; + + /* needs irq_desc */ + for_each_irq_desc(irq, desc) { + struct irqaction *action; + const char *irq_chip_name = + desc->chip->name ? : "unnamed_irq_chip"; + + local_irq_save(flags); + raw_spin_lock(&desc->lock); + for (action = desc->action; action; action = action->next) + __trace_mark(0, irq_state, interrupt, call_data, + "name %s action %s irq_id %u", + irq_chip_name, action->name, irq); + raw_spin_unlock(&desc->lock); + local_irq_restore(flags); + } +} +#else +static inline void list_interrupts(struct ltt_probe_private_data *call_data) +{ +} +#endif + +static inline int +ltt_enumerate_process_states(struct ltt_probe_private_data *call_data) +{ + struct task_struct *t = &init_task; + struct task_struct *p = t; + enum lttng_process_status status; + enum lttng_thread_type type; + enum lttng_execution_mode mode; + enum lttng_execution_submode submode; + + do { + mode = LTTNG_MODE_UNKNOWN; + submode = LTTNG_UNKNOWN; + + read_lock(&tasklist_lock); + if (t != &init_task) { + atomic_dec(&t->usage); + t = next_thread(t); + } + if (t == p) { + p = next_task(t); + t = p; + } + atomic_inc(&t->usage); + read_unlock(&tasklist_lock); + + task_lock(t); + + if (t->exit_state == EXIT_ZOMBIE) + status = LTTNG_ZOMBIE; + else if (t->exit_state == EXIT_DEAD) + status = LTTNG_DEAD; + else if (t->state == TASK_RUNNING) { + /* Is this a forked child that has not run yet? */ + if (list_empty(&t->rt.run_list)) + status = LTTNG_WAIT_FORK; + else + /* + * All tasks are considered as wait_cpu; + * the viewer will sort out if the task was + * really running at this time. + */ + status = LTTNG_WAIT_CPU; + } else if (t->state & + (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)) { + /* Task is waiting for something to complete */ + status = LTTNG_WAIT; + } else + status = LTTNG_UNNAMED; + submode = LTTNG_NONE; + + /* + * Verification of t->mm is to filter out kernel threads; + * Viewer will further filter out if a user-space thread was + * in syscall mode or not. + */ + if (t->mm) + type = LTTNG_USER_THREAD; + else + type = LTTNG_KERNEL_THREAD; + + __trace_mark(0, task_state, process_state, call_data, + "pid %d parent_pid %d name %s type %d mode %d " + "submode %d status %d tgid %d", + t->pid, t->parent->pid, t->comm, + type, mode, submode, status, t->tgid); + task_unlock(t); + } while (t != &init_task); + + return 0; +} + +void ltt_statedump_register_kprobes_dump(void (*callback)(void *call_data)) +{ + mutex_lock(&statedump_cb_mutex); + ltt_dump_kprobes_table_cb = callback; + mutex_unlock(&statedump_cb_mutex); +} +EXPORT_SYMBOL_GPL(ltt_statedump_register_kprobes_dump); + +void ltt_statedump_unregister_kprobes_dump(void (*callback)(void *call_data)) +{ + mutex_lock(&statedump_cb_mutex); + ltt_dump_kprobes_table_cb = empty_cb; + mutex_unlock(&statedump_cb_mutex); +} +EXPORT_SYMBOL_GPL(ltt_statedump_unregister_kprobes_dump); + +void ltt_statedump_work_func(struct work_struct *work) +{ + if (atomic_dec_and_test(&kernel_threads_to_run)) + /* If we are the last thread, wake up do_ltt_statedump */ + wake_up(&statedump_wq); +} + +static int do_ltt_statedump(struct ltt_probe_private_data *call_data) +{ + int cpu; + struct module *cb_owner; + + printk(KERN_DEBUG "LTT state dump thread start\n"); + ltt_enumerate_process_states(call_data); + ltt_enumerate_file_descriptors(call_data); + list_modules(call_data); + ltt_enumerate_vm_maps(call_data); + list_interrupts(call_data); + ltt_enumerate_network_ip_interface(call_data); + ltt_dump_swap_files(call_data); + ltt_dump_sys_call_table(call_data); + ltt_dump_softirq_vec(call_data); + ltt_dump_idt_table(call_data); + + mutex_lock(&statedump_cb_mutex); + + cb_owner = __module_address((unsigned long)ltt_dump_kprobes_table_cb); + __module_get(cb_owner); + ltt_dump_kprobes_table_cb(call_data); + module_put(cb_owner); + + mutex_unlock(&statedump_cb_mutex); + + /* + * Fire off a work queue on each CPU. Their sole purpose in life + * is to guarantee that each CPU has been in a state where is was in + * syscall mode (i.e. not in a trap, an IRQ or a soft IRQ). + */ + get_online_cpus(); + atomic_set(&kernel_threads_to_run, num_online_cpus()); + for_each_online_cpu(cpu) { + INIT_DELAYED_WORK(&cpu_work[cpu], ltt_statedump_work_func); + schedule_delayed_work_on(cpu, &cpu_work[cpu], 0); + } + /* Wait for all threads to run */ + __wait_event(statedump_wq, (atomic_read(&kernel_threads_to_run) != 0)); + put_online_cpus(); + /* Our work is done */ + printk(KERN_DEBUG "LTT state dump end\n"); + __trace_mark(0, global_state, statedump_end, + call_data, MARK_NOARGS); + return 0; +} + +/* + * Called with trace lock held. + */ +int ltt_statedump_start(struct ltt_trace *trace) +{ + struct ltt_probe_private_data call_data; + printk(KERN_DEBUG "LTT state dump begin\n"); + + call_data.trace = trace; + call_data.serializer = NULL; + return do_ltt_statedump(&call_data); +} + +static int __init statedump_init(void) +{ + int ret; + printk(KERN_DEBUG "LTT : State dump init\n"); + ret = ltt_module_register(LTT_FUNCTION_STATEDUMP, + ltt_statedump_start, THIS_MODULE); + return ret; +} + +static void __exit statedump_exit(void) +{ + printk(KERN_DEBUG "LTT : State dump exit\n"); + ltt_module_unregister(LTT_FUNCTION_STATEDUMP); +} + +module_init(statedump_init) +module_exit(statedump_exit) + +MODULE_LICENSE("GPL and additional rights"); +MODULE_AUTHOR("Jean-Hugues Deschenes"); +MODULE_DESCRIPTION("Linux Trace Toolkit Statedump"); diff --git a/deprecated/ltt-trace-control.c b/deprecated/ltt-trace-control.c new file mode 100644 index 00000000..0a02549d --- /dev/null +++ b/deprecated/ltt-trace-control.c @@ -0,0 +1,1426 @@ +/* + * LTT trace control module over debugfs. + * + * Copyright 2008 - Zhaolei + * + * Copyright 2009 - Gui Jianfeng + * Make mark-control work in debugfs + * + * Dual LGPL v2.1/GPL v2 license. + */ + +/* + * Todo: + * Impl read operations for control file to read attributes + * Create a README file in ltt control dir, for display help info + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "ltt-tracer.h" + +#define LTT_CONTROL_DIR "control" +#define MARKERS_CONTROL_DIR "markers" +#define LTT_SETUP_TRACE_FILE "setup_trace" +#define LTT_DESTROY_TRACE_FILE "destroy_trace" + +#define LTT_WRITE_MAXLEN (128) + +struct dentry *ltt_control_dir, *ltt_setup_trace_file, *ltt_destroy_trace_file, + *markers_control_dir; + +/* + * the traces_lock nests inside control_lock. + * control_lock protects the consistency of directories presented in ltt + * directory. + */ +static DEFINE_MUTEX(control_lock); + +/* + * big note about locking for marker control files : + * If a marker control file is added/removed manually racing with module + * load/unload, there may be warning messages appearing, but those two + * operations should be able to execute concurrently without any lock + * synchronizing their operation one wrt another. + * Locking the marker mutex, module mutex and also keeping a mutex here + * from mkdir/rmdir _and_ from the notifier called from module load/unload makes + * life miserable and just asks for deadlocks. + */ + +/* + * lookup a file/dir in parent dir. + * only designed to work well for debugfs. + * (although it maybe ok for other fs) + * + * return: + * file/dir's dentry on success + * NULL on failure + */ +static struct dentry *dir_lookup(struct dentry *parent, const char *name) +{ + struct qstr q; + struct dentry *d; + + q.name = name; + q.len = strlen(name); + q.hash = full_name_hash(q.name, q.len); + + d = d_lookup(parent, &q); + if (d) + dput(d); + + return d; +} + + +static ssize_t alloc_write(struct file *file, const char __user *user_buf, + size_t count, loff_t *ppos) +{ + int err = 0; + int buf_size; + char *buf = (char *)__get_free_page(GFP_KERNEL); + char *cmd = (char *)__get_free_page(GFP_KERNEL); + + buf_size = min_t(size_t, count, PAGE_SIZE - 1); + err = copy_from_user(buf, user_buf, buf_size); + if (err) + goto err_copy_from_user; + buf[buf_size] = 0; + + if (sscanf(buf, "%s", cmd) != 1) { + err = -EPERM; + goto err_get_cmd; + } + + if ((cmd[0] != 'Y' && cmd[0] != 'y' && cmd[0] != '1') || cmd[1]) { + err = -EPERM; + goto err_bad_cmd; + } + + err = ltt_trace_alloc(file->f_dentry->d_parent->d_name.name); + if (IS_ERR_VALUE(err)) { + printk(KERN_ERR "alloc_write: ltt_trace_alloc failed: %d\n", + err); + goto err_alloc_trace; + } + + free_page((unsigned long)buf); + free_page((unsigned long)cmd); + return count; + +err_alloc_trace: +err_bad_cmd: +err_get_cmd: +err_copy_from_user: + free_page((unsigned long)buf); + free_page((unsigned long)cmd); + return err; +} + +static const struct file_operations ltt_alloc_operations = { + .write = alloc_write, +}; + + +static ssize_t enabled_write(struct file *file, const char __user *user_buf, + size_t count, loff_t *ppos) +{ + int err = 0; + int buf_size; + char *buf = (char *)__get_free_page(GFP_KERNEL); + char *cmd = (char *)__get_free_page(GFP_KERNEL); + + buf_size = min_t(size_t, count, PAGE_SIZE - 1); + err = copy_from_user(buf, user_buf, buf_size); + if (err) + goto err_copy_from_user; + buf[buf_size] = 0; + + if (sscanf(buf, "%s", cmd) != 1) { + err = -EPERM; + goto err_get_cmd; + } + + if (cmd[1]) { + err = -EPERM; + goto err_bad_cmd; + } + + switch (cmd[0]) { + case 'Y': + case 'y': + case '1': + err = ltt_trace_start(file->f_dentry->d_parent->d_name.name); + if (IS_ERR_VALUE(err)) { + printk(KERN_ERR + "enabled_write: ltt_trace_start failed: %d\n", + err); + err = -EPERM; + goto err_start_trace; + } + break; + case 'N': + case 'n': + case '0': + err = ltt_trace_stop(file->f_dentry->d_parent->d_name.name); + if (IS_ERR_VALUE(err)) { + printk(KERN_ERR + "enabled_write: ltt_trace_stop failed: %d\n", + err); + err = -EPERM; + goto err_stop_trace; + } + break; + default: + err = -EPERM; + goto err_bad_cmd; + } + + free_page((unsigned long)buf); + free_page((unsigned long)cmd); + return count; + +err_stop_trace: +err_start_trace: +err_bad_cmd: +err_get_cmd: +err_copy_from_user: + free_page((unsigned long)buf); + free_page((unsigned long)cmd); + return err; +} + +static const struct file_operations ltt_enabled_operations = { + .write = enabled_write, +}; + + +static ssize_t trans_write(struct file *file, const char __user *user_buf, + size_t count, loff_t *ppos) +{ + char *buf = (char *)__get_free_page(GFP_KERNEL); + char *trans_name = (char *)__get_free_page(GFP_KERNEL); + int err = 0; + int buf_size; + + buf_size = min_t(size_t, count, PAGE_SIZE - 1); + err = copy_from_user(buf, user_buf, buf_size); + if (err) + goto err_copy_from_user; + buf[buf_size] = 0; + + if (sscanf(buf, "%s", trans_name) != 1) { + err = -EPERM; + goto err_get_transname; + } + + err = ltt_trace_set_type(file->f_dentry->d_parent->d_name.name, + trans_name); + if (IS_ERR_VALUE(err)) { + printk(KERN_ERR "trans_write: ltt_trace_set_type failed: %d\n", + err); + goto err_set_trans; + } + + free_page((unsigned long)buf); + free_page((unsigned long)trans_name); + return count; + +err_set_trans: +err_get_transname: +err_copy_from_user: + free_page((unsigned long)buf); + free_page((unsigned long)trans_name); + return err; +} + +static const struct file_operations ltt_trans_operations = { + .write = trans_write, +}; + + +static ssize_t channel_subbuf_num_write(struct file *file, + const char __user *user_buf, size_t count, loff_t *ppos) +{ + int err = 0; + int buf_size; + unsigned int num; + const char *channel_name; + const char *trace_name; + char *buf = (char *)__get_free_page(GFP_KERNEL); + + buf_size = min_t(size_t, count, PAGE_SIZE - 1); + err = copy_from_user(buf, user_buf, buf_size); + if (err) + goto err_copy_from_user; + buf[buf_size] = 0; + + if (sscanf(buf, "%u", &num) != 1) { + err = -EPERM; + goto err_get_number; + } + + channel_name = file->f_dentry->d_parent->d_name.name; + trace_name = file->f_dentry->d_parent->d_parent->d_parent->d_name.name; + + err = ltt_trace_set_channel_subbufcount(trace_name, channel_name, num); + if (IS_ERR_VALUE(err)) { + printk(KERN_ERR "channel_subbuf_num_write: " + "ltt_trace_set_channel_subbufcount failed: %d\n", err); + goto err_set_subbufcount; + } + + free_page((unsigned long)buf); + return count; + +err_set_subbufcount: +err_get_number: +err_copy_from_user: + free_page((unsigned long)buf); + return err; +} + +static const struct file_operations ltt_channel_subbuf_num_operations = { + .write = channel_subbuf_num_write, +}; + + +static +ssize_t channel_subbuf_size_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + int err = 0; + int buf_size; + unsigned int num; + const char *channel_name; + const char *trace_name; + char *buf = (char *)__get_free_page(GFP_KERNEL); + + buf_size = min_t(size_t, count, PAGE_SIZE - 1); + err = copy_from_user(buf, user_buf, buf_size); + if (err) + goto err_copy_from_user; + buf[buf_size] = 0; + + if (sscanf(buf, "%u", &num) != 1) { + err = -EPERM; + goto err_get_number; + } + + channel_name = file->f_dentry->d_parent->d_name.name; + trace_name = file->f_dentry->d_parent->d_parent->d_parent->d_name.name; + + err = ltt_trace_set_channel_subbufsize(trace_name, channel_name, num); + if (IS_ERR_VALUE(err)) { + printk(KERN_ERR "channel_subbuf_size_write: " + "ltt_trace_set_channel_subbufsize failed: %d\n", err); + goto err_set_subbufsize; + } + + free_page((unsigned long)buf); + return count; + +err_set_subbufsize: +err_get_number: +err_copy_from_user: + free_page((unsigned long)buf); + return err; +} + +static const struct file_operations ltt_channel_subbuf_size_operations = { + .write = channel_subbuf_size_write, +}; + +static +ssize_t channel_switch_timer_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + int err = 0; + int buf_size; + unsigned long num; + const char *channel_name; + const char *trace_name; + char *buf = (char *)__get_free_page(GFP_KERNEL); + + buf_size = min_t(size_t, count, PAGE_SIZE - 1); + err = copy_from_user(buf, user_buf, buf_size); + if (err) + goto err_copy_from_user; + buf[buf_size] = 0; + + if (sscanf(buf, "%lu", &num) != 1) { + err = -EPERM; + goto err_get_number; + } + + channel_name = file->f_dentry->d_parent->d_name.name; + trace_name = file->f_dentry->d_parent->d_parent->d_parent->d_name.name; + + /* Convert from ms to us */ + num *= 1000; + + err = ltt_trace_set_channel_switch_timer(trace_name, channel_name, num); + if (IS_ERR_VALUE(err)) { + printk(KERN_ERR "channel_switch_timer_write: " + "ltt_trace_set_channel_switch_timer failed: %d\n", err); + goto err_set_switch_timer; + } + + free_page((unsigned long)buf); + return count; + +err_set_switch_timer: +err_get_number: +err_copy_from_user: + free_page((unsigned long)buf); + return err; +} + +static struct file_operations ltt_channel_switch_timer_operations = { + .write = channel_switch_timer_write, +}; + +static +ssize_t channel_overwrite_write(struct file *file, + const char __user *user_buf, size_t count, + loff_t *ppos) +{ + int err = 0; + int buf_size; + const char *channel_name; + const char *trace_name; + char *buf = (char *)__get_free_page(GFP_KERNEL); + char *cmd = (char *)__get_free_page(GFP_KERNEL); + + buf_size = min_t(size_t, count, PAGE_SIZE - 1); + err = copy_from_user(buf, user_buf, buf_size); + if (err) + goto err_copy_from_user; + buf[buf_size] = 0; + + if (sscanf(buf, "%s", cmd) != 1) { + err = -EPERM; + goto err_get_cmd; + } + + if (cmd[1]) { + err = -EPERM; + goto err_bad_cmd; + } + + channel_name = file->f_dentry->d_parent->d_name.name; + trace_name = file->f_dentry->d_parent->d_parent->d_parent->d_name.name; + + switch (cmd[0]) { + case 'Y': + case 'y': + case '1': + err = ltt_trace_set_channel_overwrite(trace_name, channel_name, + 1); + if (IS_ERR_VALUE(err)) { + printk(KERN_ERR "channel_overwrite_write: " + "ltt_trace_set_channel_overwrite failed: %d\n", + err); + goto err_set_subbufsize; + } + break; + case 'N': + case 'n': + case '0': + err = ltt_trace_set_channel_overwrite(trace_name, channel_name, + 0); + if (IS_ERR_VALUE(err)) { + printk(KERN_ERR "channel_overwrite_write: " + "ltt_trace_set_channel_overwrite failed: %d\n", + err); + goto err_set_subbufsize; + } + break; + default: + err = -EPERM; + goto err_bad_cmd; + } + + free_page((unsigned long)buf); + free_page((unsigned long)cmd); + return count; + +err_set_subbufsize: +err_bad_cmd: +err_get_cmd: +err_copy_from_user: + free_page((unsigned long)buf); + free_page((unsigned long)cmd); + return err; +} + +static const struct file_operations ltt_channel_overwrite_operations = { + .write = channel_overwrite_write, +}; + + +static +ssize_t channel_enable_write(struct file *file, + const char __user *user_buf, size_t count, + loff_t *ppos) +{ + int err = 0; + int buf_size; + const char *channel_name; + const char *trace_name; + char *buf = (char *)__get_free_page(GFP_KERNEL); + char *cmd = (char *)__get_free_page(GFP_KERNEL); + + buf_size = min_t(size_t, count, PAGE_SIZE - 1); + err = copy_from_user(buf, user_buf, buf_size); + if (err) + goto err_copy_from_user; + buf[buf_size] = 0; + + if (sscanf(buf, "%s", cmd) != 1) { + err = -EPERM; + goto err_get_cmd; + } + + if (cmd[1]) { + err = -EPERM; + goto err_bad_cmd; + } + + channel_name = file->f_dentry->d_parent->d_name.name; + trace_name = file->f_dentry->d_parent->d_parent->d_parent->d_name.name; + + switch (cmd[0]) { + case 'Y': + case 'y': + case '1': + err = ltt_trace_set_channel_enable(trace_name, channel_name, + 1); + if (IS_ERR_VALUE(err)) { + printk(KERN_ERR "channel_enable_write: " + "ltt_trace_set_channel_enable failed: %d\n", + err); + goto err_set_subbufsize; + } + break; + case 'N': + case 'n': + case '0': + err = ltt_trace_set_channel_enable(trace_name, channel_name, + 0); + if (IS_ERR_VALUE(err)) { + printk(KERN_ERR "channel_enable_write: " + "ltt_trace_set_channel_enable failed: %d\n", + err); + goto err_set_subbufsize; + } + break; + default: + err = -EPERM; + goto err_bad_cmd; + } + + free_page((unsigned long)buf); + free_page((unsigned long)cmd); + return count; + +err_set_subbufsize: +err_bad_cmd: +err_get_cmd: +err_copy_from_user: + free_page((unsigned long)buf); + free_page((unsigned long)cmd); + return err; +} + +static const struct file_operations ltt_channel_enable_operations = { + .write = channel_enable_write, +}; + + +static int _create_trace_control_dir(const char *trace_name, + struct ltt_trace *trace) +{ + int err; + struct dentry *trace_root, *channel_root; + struct dentry *tmp_den; + int i; + + /* debugfs/control/trace_name */ + trace_root = debugfs_create_dir(trace_name, ltt_control_dir); + if (IS_ERR(trace_root) || !trace_root) { + printk(KERN_ERR "_create_trace_control_dir: " + "create control root dir of %s failed\n", trace_name); + err = -ENOMEM; + goto err_create_trace_root; + } + + /* debugfs/control/trace_name/alloc */ + tmp_den = debugfs_create_file("alloc", S_IWUSR, trace_root, NULL, + <t_alloc_operations); + if (IS_ERR(tmp_den) || !tmp_den) { + printk(KERN_ERR "_create_trace_control_dir: " + "create file of alloc failed\n"); + err = -ENOMEM; + goto err_create_subdir; + } + + /* debugfs/control/trace_name/trans */ + tmp_den = debugfs_create_file("trans", S_IWUSR, trace_root, NULL, + <t_trans_operations); + if (IS_ERR(tmp_den) || !tmp_den) { + printk(KERN_ERR "_create_trace_control_dir: " + "create file of trans failed\n"); + err = -ENOMEM; + goto err_create_subdir; + } + + /* debugfs/control/trace_name/enabled */ + tmp_den = debugfs_create_file("enabled", S_IWUSR, trace_root, NULL, + <t_enabled_operations); + if (IS_ERR(tmp_den) || !tmp_den) { + printk(KERN_ERR "_create_trace_control_dir: " + "create file of enabled failed\n"); + err = -ENOMEM; + goto err_create_subdir; + } + + /* debugfs/control/trace_name/channel/ */ + channel_root = debugfs_create_dir("channel", trace_root); + if (IS_ERR(channel_root) || !channel_root) { + printk(KERN_ERR "_create_trace_control_dir: " + "create dir of channel failed\n"); + err = -ENOMEM; + goto err_create_subdir; + } + + /* + * Create dir and files in debugfs/ltt/control/trace_name/channel/ + * Following things(without <>) will be created: + * `-- + * `-- + * `-- + * |-- + * | |-- enable + * | |-- overwrite + * | |-- subbuf_num + * | |-- subbuf_size + * | `-- switch_timer + * `-- ... + */ + + for (i = 0; i < trace->nr_channels; i++) { + struct dentry *channel_den; + struct ltt_chan *chan; + + chan = &trace->channels[i]; + if (!chan->active) + continue; + channel_den = debugfs_create_dir(chan->a.filename, + channel_root); + if (IS_ERR(channel_den) || !channel_den) { + printk(KERN_ERR "_create_trace_control_dir: " + "create channel dir of %s failed\n", + chan->a.filename); + err = -ENOMEM; + goto err_create_subdir; + } + + tmp_den = debugfs_create_file("subbuf_num", S_IWUSR, + channel_den, NULL, + <t_channel_subbuf_num_operations); + if (IS_ERR(tmp_den) || !tmp_den) { + printk(KERN_ERR "_create_trace_control_dir: " + "create subbuf_num in %s failed\n", + chan->a.filename); + err = -ENOMEM; + goto err_create_subdir; + } + + tmp_den = debugfs_create_file("subbuf_size", S_IWUSR, + channel_den, NULL, + <t_channel_subbuf_size_operations); + if (IS_ERR(tmp_den) || !tmp_den) { + printk(KERN_ERR "_create_trace_control_dir: " + "create subbuf_size in %s failed\n", + chan->a.filename); + err = -ENOMEM; + goto err_create_subdir; + } + + tmp_den = debugfs_create_file("enable", S_IWUSR, channel_den, + NULL, + <t_channel_enable_operations); + if (IS_ERR(tmp_den) || !tmp_den) { + printk(KERN_ERR "_create_trace_control_dir: " + "create enable in %s failed\n", + chan->a.filename); + err = -ENOMEM; + goto err_create_subdir; + } + + tmp_den = debugfs_create_file("overwrite", S_IWUSR, channel_den, + NULL, + <t_channel_overwrite_operations); + if (IS_ERR(tmp_den) || !tmp_den) { + printk(KERN_ERR "_create_trace_control_dir: " + "create overwrite in %s failed\n", + chan->a.filename); + err = -ENOMEM; + goto err_create_subdir; + } + + tmp_den = debugfs_create_file("switch_timer", S_IWUSR, + channel_den, NULL, + <t_channel_switch_timer_operations); + if (IS_ERR(tmp_den) || !tmp_den) { + printk(KERN_ERR "_create_trace_control_dir: " + "create switch_timer in %s failed\n", + chan->a.filename); + err = -ENOMEM; + goto err_create_subdir; + } + } + + return 0; + +err_create_subdir: + debugfs_remove_recursive(trace_root); +err_create_trace_root: + return err; +} + +static +ssize_t setup_trace_write(struct file *file, const char __user *user_buf, + size_t count, loff_t *ppos) +{ + int err = 0; + int buf_size; + struct ltt_trace *trace; + char *buf = (char *)__get_free_page(GFP_KERNEL); + char *trace_name = (char *)__get_free_page(GFP_KERNEL); + + buf_size = min_t(size_t, count, PAGE_SIZE - 1); + err = copy_from_user(buf, user_buf, buf_size); + if (err) + goto err_copy_from_user; + buf[buf_size] = 0; + + if (sscanf(buf, "%s", trace_name) != 1) { + err = -EPERM; + goto err_get_tracename; + } + + mutex_lock(&control_lock); + ltt_lock_traces(); + + err = _ltt_trace_setup(trace_name); + if (IS_ERR_VALUE(err)) { + printk(KERN_ERR + "setup_trace_write: ltt_trace_setup failed: %d\n", err); + goto err_setup_trace; + } + trace = _ltt_trace_find_setup(trace_name); + BUG_ON(!trace); + err = _create_trace_control_dir(trace_name, trace); + if (IS_ERR_VALUE(err)) { + printk(KERN_ERR "setup_trace_write: " + "_create_trace_control_dir failed: %d\n", err); + goto err_create_trace_control_dir; + } + + ltt_unlock_traces(); + mutex_unlock(&control_lock); + + free_page((unsigned long)buf); + free_page((unsigned long)trace_name); + return count; + +err_create_trace_control_dir: + ltt_trace_destroy(trace_name); +err_setup_trace: + ltt_unlock_traces(); + mutex_unlock(&control_lock); +err_get_tracename: +err_copy_from_user: + free_page((unsigned long)buf); + free_page((unsigned long)trace_name); + return err; +} + +static const struct file_operations ltt_setup_trace_operations = { + .write = setup_trace_write, +}; + +static +ssize_t destroy_trace_write(struct file *file, const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct dentry *trace_den; + int buf_size; + int err = 0; + char *buf = (char *)__get_free_page(GFP_KERNEL); + char *trace_name = (char *)__get_free_page(GFP_KERNEL); + + buf_size = min_t(size_t, count, PAGE_SIZE - 1); + err = copy_from_user(buf, user_buf, buf_size); + if (err) + goto err_copy_from_user; + buf[buf_size] = 0; + + if (sscanf(buf, "%s", trace_name) != 1) { + err = -EPERM; + goto err_get_tracename; + } + + mutex_lock(&control_lock); + + err = ltt_trace_destroy(trace_name); + if (IS_ERR_VALUE(err)) { + printk(KERN_ERR + "destroy_trace_write: ltt_trace_destroy failed: %d\n", + err); + err = -EPERM; + goto err_destroy_trace; + } + + trace_den = dir_lookup(ltt_control_dir, trace_name); + if (!trace_den) { + printk(KERN_ERR + "destroy_trace_write: lookup for %s's dentry failed\n", + trace_name); + err = -ENOENT; + goto err_get_dentry; + } + + debugfs_remove_recursive(trace_den); + + mutex_unlock(&control_lock); + + free_page((unsigned long)buf); + free_page((unsigned long)trace_name); + return count; + +err_get_dentry: +err_destroy_trace: + mutex_unlock(&control_lock); +err_get_tracename: +err_copy_from_user: + free_page((unsigned long)buf); + free_page((unsigned long)trace_name); + return err; +} + +static const struct file_operations ltt_destroy_trace_operations = { + .write = destroy_trace_write, +}; + +static void init_marker_dir(struct dentry *dentry, + const struct inode_operations *opt) +{ + dentry->d_inode->i_op = opt; +} + +static +ssize_t marker_enable_read(struct file *filp, char __user *ubuf, + size_t cnt, loff_t *ppos) +{ + char *buf; + const char *channel, *marker; + int len, enabled, present; + + marker = filp->f_dentry->d_parent->d_name.name; + channel = filp->f_dentry->d_parent->d_parent->d_name.name; + + len = 0; + buf = (char *)__get_free_page(GFP_KERNEL); + + /* + * Note: we cannot take the marker lock to make these two checks + * atomic, because the marker mutex nests inside the module mutex, taken + * inside the marker present check. + */ + enabled = is_marker_enabled(channel, marker); + present = is_marker_present(channel, marker); + + if (enabled && present) + len = snprintf(buf, PAGE_SIZE, "%d\n", 1); + else if (enabled && !present) + len = snprintf(buf, PAGE_SIZE, "%d\n", 2); + else + len = snprintf(buf, PAGE_SIZE, "%d\n", 0); + + + if (len >= PAGE_SIZE) { + len = PAGE_SIZE; + buf[PAGE_SIZE] = '\0'; + } + len = simple_read_from_buffer(ubuf, cnt, ppos, buf, len); + free_page((unsigned long)buf); + + return len; +} + +static +ssize_t marker_enable_write(struct file *filp, const char __user *ubuf, + size_t cnt, loff_t *ppos) +{ + char *buf = (char *)__get_free_page(GFP_KERNEL); + int buf_size; + ssize_t ret = 0; + const char *channel, *marker; + + marker = filp->f_dentry->d_parent->d_name.name; + channel = filp->f_dentry->d_parent->d_parent->d_name.name; + + buf_size = min_t(size_t, cnt, PAGE_SIZE - 1); + ret = copy_from_user(buf, ubuf, buf_size); + if (ret) + goto end; + + buf[buf_size] = 0; + + switch (buf[0]) { + case 'Y': + case 'y': + case '1': + ret = ltt_marker_connect(channel, marker, "default"); + if (ret) + goto end; + break; + case 'N': + case 'n': + case '0': + ret = ltt_marker_disconnect(channel, marker, "default"); + if (ret) + goto end; + break; + default: + ret = -EPERM; + goto end; + } + ret = cnt; +end: + free_page((unsigned long)buf); + return ret; +} + +static const struct file_operations enable_fops = { + .read = marker_enable_read, + .write = marker_enable_write, +}; + +/* + * In practice, the output size should never be larger than 4096 kB. If it + * ever happens, the output will simply be truncated. + */ +static +ssize_t marker_info_read(struct file *filp, char __user *ubuf, + size_t cnt, loff_t *ppos) +{ + char *buf; + const char *channel, *marker; + int len; + struct marker_iter iter; + + marker = filp->f_dentry->d_parent->d_name.name; + channel = filp->f_dentry->d_parent->d_parent->d_name.name; + + len = 0; + buf = (char *)__get_free_page(GFP_KERNEL); + + if (is_marker_enabled(channel, marker) && + !is_marker_present(channel, marker)) { + len += snprintf(buf + len, PAGE_SIZE - len, + "Marker Pre-enabled\n"); + goto out; + } + + marker_iter_reset(&iter); + marker_iter_start(&iter); + for (; iter.marker != NULL; marker_iter_next(&iter)) { + if (!strcmp(iter.marker->channel, channel) && + !strcmp(iter.marker->name, marker)) + len += snprintf(buf + len, PAGE_SIZE - len, + "Location: %s\n" + "format: \"%s\"\nstate: %d\n" + "event_id: %hu\n" + "call: 0x%p\n" + "probe %s : 0x%p\n\n", +#ifdef CONFIG_MODULES + iter.module ? iter.module->name : +#endif + "Core Kernel", + iter.marker->format, + _imv_read(iter.marker->state), + iter.marker->event_id, + iter.marker->call, + iter.marker->ptype ? + "multi" : "single", iter.marker->ptype ? + (void *)iter.marker->multi : + (void *)iter.marker->single.func); + if (len >= PAGE_SIZE) + break; + } + marker_iter_stop(&iter); + +out: + if (len >= PAGE_SIZE) { + len = PAGE_SIZE; + buf[PAGE_SIZE] = '\0'; + } + + len = simple_read_from_buffer(ubuf, cnt, ppos, buf, len); + free_page((unsigned long)buf); + + return len; +} + +static const struct file_operations info_fops = { + .read = marker_info_read, +}; + +static int marker_mkdir(struct inode *dir, struct dentry *dentry, int mode) +{ + struct dentry *marker_d, *enable_d, *info_d, *channel_d; + int ret; + + ret = 0; + channel_d = (struct dentry *)dir->i_private; + mutex_unlock(&dir->i_mutex); + + marker_d = debugfs_create_dir(dentry->d_name.name, + channel_d); + if (IS_ERR(marker_d)) { + ret = PTR_ERR(marker_d); + goto out; + } + + enable_d = debugfs_create_file("enable", 0644, marker_d, + NULL, &enable_fops); + if (IS_ERR(enable_d) || !enable_d) { + printk(KERN_ERR + "%s: create file of %s failed\n", + __func__, "enable"); + ret = -ENOMEM; + goto remove_marker_dir; + } + + info_d = debugfs_create_file("info", 0644, marker_d, + NULL, &info_fops); + if (IS_ERR(info_d) || !info_d) { + printk(KERN_ERR + "%s: create file of %s failed\n", + __func__, "info"); + ret = -ENOMEM; + goto remove_enable_dir; + } + + goto out; + +remove_enable_dir: + debugfs_remove(enable_d); +remove_marker_dir: + debugfs_remove(marker_d); +out: + mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT); + return ret; +} + +static int marker_rmdir(struct inode *dir, struct dentry *dentry) +{ + struct dentry *marker_d, *channel_d; + const char *channel, *name; + int ret, enabled, present; + + ret = 0; + + channel_d = (struct dentry *)dir->i_private; + channel = channel_d->d_name.name; + + marker_d = dir_lookup(channel_d, dentry->d_name.name); + + if (!marker_d) { + ret = -ENOENT; + goto out; + } + + name = marker_d->d_name.name; + + enabled = is_marker_enabled(channel, name); + present = is_marker_present(channel, name); + + if (present || (!present && enabled)) { + ret = -EPERM; + goto out; + } + + mutex_unlock(&dir->i_mutex); + mutex_unlock(&dentry->d_inode->i_mutex); + debugfs_remove_recursive(marker_d); + mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT); + mutex_lock(&dentry->d_inode->i_mutex); +out: + return ret; +} + +const struct inode_operations channel_dir_opt = { + .lookup = simple_lookup, + .mkdir = marker_mkdir, + .rmdir = marker_rmdir, +}; + +static int channel_mkdir(struct inode *dir, struct dentry *dentry, int mode) +{ + struct dentry *channel_d; + int ret; + + ret = 0; + mutex_unlock(&dir->i_mutex); + + channel_d = debugfs_create_dir(dentry->d_name.name, + markers_control_dir); + if (IS_ERR(channel_d)) { + ret = PTR_ERR(channel_d); + goto out; + } + + channel_d->d_inode->i_private = (void *)channel_d; + init_marker_dir(channel_d, &channel_dir_opt); +out: + mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT); + return ret; +} + +static int channel_rmdir(struct inode *dir, struct dentry *dentry) +{ + struct dentry *channel_d; + int ret; + + ret = 0; + + channel_d = dir_lookup(markers_control_dir, dentry->d_name.name); + if (!channel_d) { + ret = -ENOENT; + goto out; + } + + if (list_empty(&channel_d->d_subdirs)) { + mutex_unlock(&dir->i_mutex); + mutex_unlock(&dentry->d_inode->i_mutex); + debugfs_remove(channel_d); + mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT); + mutex_lock(&dentry->d_inode->i_mutex); + } else + ret = -EPERM; + +out: + return ret; +} + +const struct inode_operations root_dir_opt = { + .lookup = simple_lookup, + .mkdir = channel_mkdir, + .rmdir = channel_rmdir +}; + +static int build_marker_file(struct marker *marker) +{ + struct dentry *channel_d, *marker_d, *enable_d, *info_d; + int err; + + channel_d = dir_lookup(markers_control_dir, marker->channel); + if (!channel_d) { + channel_d = debugfs_create_dir(marker->channel, + markers_control_dir); + if (IS_ERR(channel_d) || !channel_d) { + printk(KERN_ERR + "%s: build channel dir of %s failed\n", + __func__, marker->channel); + err = -ENOMEM; + goto err_build_fail; + } + channel_d->d_inode->i_private = (void *)channel_d; + init_marker_dir(channel_d, &channel_dir_opt); + } + + marker_d = dir_lookup(channel_d, marker->name); + if (!marker_d) { + marker_d = debugfs_create_dir(marker->name, channel_d); + if (IS_ERR(marker_d) || !marker_d) { + printk(KERN_ERR + "%s: marker dir of %s failed\n", + __func__, marker->name); + err = -ENOMEM; + goto err_build_fail; + } + } + + enable_d = dir_lookup(marker_d, "enable"); + if (!enable_d) { + enable_d = debugfs_create_file("enable", 0644, marker_d, + NULL, &enable_fops); + if (IS_ERR(enable_d) || !enable_d) { + printk(KERN_ERR + "%s: create file of %s failed\n", + __func__, "enable"); + err = -ENOMEM; + goto err_build_fail; + } + } + + info_d = dir_lookup(marker_d, "info"); + if (!info_d) { + info_d = debugfs_create_file("info", 0444, marker_d, + NULL, &info_fops); + if (IS_ERR(info_d) || !info_d) { + printk(KERN_ERR + "%s: create file of %s failed\n", + __func__, "enable"); + err = -ENOMEM; + goto err_build_fail; + } + } + + return 0; + +err_build_fail: + return err; +} + +static int build_marker_control_files(void) +{ + struct marker_iter iter; + int err; + + err = 0; + if (!markers_control_dir) + return -EEXIST; + + marker_iter_reset(&iter); + marker_iter_start(&iter); + for (; iter.marker != NULL; marker_iter_next(&iter)) { + err = build_marker_file(iter.marker); + if (err) + goto out; + } + marker_iter_stop(&iter); + +out: + return err; +} + +#ifdef CONFIG_MODULES +static int remove_marker_control_dir(struct module *mod, struct marker *marker) +{ + struct dentry *channel_d, *marker_d; + const char *channel, *name; + int count; + struct marker_iter iter; + + count = 0; + + channel_d = dir_lookup(markers_control_dir, marker->channel); + if (!channel_d) + return -ENOENT; + channel = channel_d->d_name.name; + + marker_d = dir_lookup(channel_d, marker->name); + if (!marker_d) + return -ENOENT; + name = marker_d->d_name.name; + + marker_iter_reset(&iter); + marker_iter_start(&iter); + for (; iter.marker != NULL; marker_iter_next(&iter)) { + if (!strcmp(iter.marker->channel, channel) && + !strcmp(iter.marker->name, name) && mod != iter.module) + count++; + } + + if (count > 0) + goto end; + + debugfs_remove_recursive(marker_d); + if (list_empty(&channel_d->d_subdirs)) + debugfs_remove(channel_d); + +end: + marker_iter_stop(&iter); + return 0; +} + +static void cleanup_control_dir(struct module *mod, struct marker *begin, + struct marker *end) +{ + struct marker *iter; + + if (!markers_control_dir) + return; + + for (iter = begin; iter < end; iter++) + remove_marker_control_dir(mod, iter); + + return; +} + +static void build_control_dir(struct module *mod, struct marker *begin, + struct marker *end) +{ + struct marker *iter; + int err; + + err = 0; + if (!markers_control_dir) + return; + + for (iter = begin; iter < end; iter++) { + err = build_marker_file(iter); + if (err) + goto err_build_fail; + } + + return; +err_build_fail: + cleanup_control_dir(mod, begin, end); +} + +static int module_notify(struct notifier_block *self, + unsigned long val, void *data) +{ + struct module *mod = data; + + switch (val) { + case MODULE_STATE_COMING: + build_control_dir(mod, mod->markers, + mod->markers + mod->num_markers); + break; + case MODULE_STATE_GOING: + cleanup_control_dir(mod, mod->markers, + mod->markers + mod->num_markers); + break; + } + return NOTIFY_DONE; +} +#else +static inline int module_notify(struct notifier_block *self, + unsigned long val, void *data) +{ + return 0; +} +#endif + +static struct notifier_block module_nb = { + .notifier_call = module_notify, +}; + +static int __init ltt_trace_control_init(void) +{ + int err = 0; + struct dentry *ltt_root_dentry; + + ltt_root_dentry = get_ltt_root(); + if (!ltt_root_dentry) { + err = -ENOENT; + goto err_no_root; + } + + ltt_control_dir = debugfs_create_dir(LTT_CONTROL_DIR, ltt_root_dentry); + if (IS_ERR(ltt_control_dir) || !ltt_control_dir) { + printk(KERN_ERR + "ltt_channel_control_init: create dir of %s failed\n", + LTT_CONTROL_DIR); + err = -ENOMEM; + goto err_create_control_dir; + } + + ltt_setup_trace_file = debugfs_create_file(LTT_SETUP_TRACE_FILE, + S_IWUSR, ltt_root_dentry, + NULL, + <t_setup_trace_operations); + if (IS_ERR(ltt_setup_trace_file) || !ltt_setup_trace_file) { + printk(KERN_ERR + "ltt_channel_control_init: create file of %s failed\n", + LTT_SETUP_TRACE_FILE); + err = -ENOMEM; + goto err_create_setup_trace_file; + } + + ltt_destroy_trace_file = debugfs_create_file(LTT_DESTROY_TRACE_FILE, + S_IWUSR, ltt_root_dentry, + NULL, + <t_destroy_trace_operations); + if (IS_ERR(ltt_destroy_trace_file) || !ltt_destroy_trace_file) { + printk(KERN_ERR + "ltt_channel_control_init: create file of %s failed\n", + LTT_DESTROY_TRACE_FILE); + err = -ENOMEM; + goto err_create_destroy_trace_file; + } + + markers_control_dir = debugfs_create_dir(MARKERS_CONTROL_DIR, + ltt_root_dentry); + if (IS_ERR(markers_control_dir) || !markers_control_dir) { + printk(KERN_ERR + "ltt_channel_control_init: create dir of %s failed\n", + MARKERS_CONTROL_DIR); + err = -ENOMEM; + goto err_create_marker_control_dir; + } + + init_marker_dir(markers_control_dir, &root_dir_opt); + + if (build_marker_control_files()) + goto err_build_fail; + + if (!register_module_notifier(&module_nb)) + return 0; + +err_build_fail: + debugfs_remove_recursive(markers_control_dir); + markers_control_dir = NULL; +err_create_marker_control_dir: + debugfs_remove(ltt_destroy_trace_file); +err_create_destroy_trace_file: + debugfs_remove(ltt_setup_trace_file); +err_create_setup_trace_file: + debugfs_remove(ltt_control_dir); +err_create_control_dir: +err_no_root: + return err; +} + +static void __exit ltt_trace_control_exit(void) +{ + struct dentry *trace_dir; + + /* destory all traces */ + list_for_each_entry(trace_dir, <t_control_dir->d_subdirs, + d_u.d_child) { + ltt_trace_stop(trace_dir->d_name.name); + ltt_trace_destroy(trace_dir->d_name.name); + } + + /* clean dirs in debugfs */ + debugfs_remove(ltt_setup_trace_file); + debugfs_remove(ltt_destroy_trace_file); + debugfs_remove_recursive(ltt_control_dir); + debugfs_remove_recursive(markers_control_dir); + unregister_module_notifier(&module_nb); + put_ltt_root(); +} + +module_init(ltt_trace_control_init); +module_exit(ltt_trace_control_exit); + +MODULE_LICENSE("GPL and additional rights"); +MODULE_AUTHOR("Zhao Lei "); +MODULE_DESCRIPTION("Linux Trace Toolkit Trace Controller"); diff --git a/deprecated/ltt-tracer.c b/deprecated/ltt-tracer.c new file mode 100644 index 00000000..5cdea932 --- /dev/null +++ b/deprecated/ltt-tracer.c @@ -0,0 +1,1112 @@ +/* + * ltt/ltt-tracer.c + * + * Copyright (c) 2005-2010 - Mathieu Desnoyers + * + * Tracing management internal kernel API. Trace buffer allocation/free, tracing + * start/stop. + * + * Author: + * Mathieu Desnoyers + * + * Inspired from LTT : + * Karim Yaghmour (karim@opersys.com) + * Tom Zanussi (zanussi@us.ibm.com) + * Bob Wisniewski (bob@watson.ibm.com) + * And from K42 : + * Bob Wisniewski (bob@watson.ibm.com) + * + * Changelog: + * 22/09/06, Move to the marker/probes mechanism. + * 19/10/05, Complete lockless mechanism. + * 27/05/05, Modular redesign and rewrite. + * + * Dual LGPL v2.1/GPL v2 license. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ltt-tracer.h" + +static void synchronize_trace(void) +{ + synchronize_sched(); +#ifdef CONFIG_PREEMPT_RT + synchronize_rcu(); +#endif +} + +static void async_wakeup(unsigned long data); + +static DEFINE_TIMER(ltt_async_wakeup_timer, async_wakeup, 0, 0); + +/* Default callbacks for modules */ +notrace +int ltt_filter_control_default(enum ltt_filter_control_msg msg, + struct ltt_trace *trace) +{ + return 0; +} + +int ltt_statedump_default(struct ltt_trace *trace) +{ + return 0; +} + +/* Callbacks for registered modules */ + +int (*ltt_filter_control_functor) + (enum ltt_filter_control_msg msg, struct ltt_trace *trace) = + ltt_filter_control_default; +struct module *ltt_filter_control_owner; + +/* These function pointers are protected by a trace activation check */ +struct module *ltt_run_filter_owner; +int (*ltt_statedump_functor)(struct ltt_trace *trace) = ltt_statedump_default; +struct module *ltt_statedump_owner; + +struct chan_info_struct { + const char *name; + unsigned int def_sb_size; + unsigned int def_n_sb; +} chan_infos[] = { + [LTT_CHANNEL_METADATA] = { + LTT_METADATA_CHANNEL, + LTT_DEFAULT_SUBBUF_SIZE_LOW, + LTT_DEFAULT_N_SUBBUFS_LOW, + }, + [LTT_CHANNEL_FD_STATE] = { + LTT_FD_STATE_CHANNEL, + LTT_DEFAULT_SUBBUF_SIZE_LOW, + LTT_DEFAULT_N_SUBBUFS_LOW, + }, + [LTT_CHANNEL_GLOBAL_STATE] = { + LTT_GLOBAL_STATE_CHANNEL, + LTT_DEFAULT_SUBBUF_SIZE_LOW, + LTT_DEFAULT_N_SUBBUFS_LOW, + }, + [LTT_CHANNEL_IRQ_STATE] = { + LTT_IRQ_STATE_CHANNEL, + LTT_DEFAULT_SUBBUF_SIZE_LOW, + LTT_DEFAULT_N_SUBBUFS_LOW, + }, + [LTT_CHANNEL_MODULE_STATE] = { + LTT_MODULE_STATE_CHANNEL, + LTT_DEFAULT_SUBBUF_SIZE_LOW, + LTT_DEFAULT_N_SUBBUFS_LOW, + }, + [LTT_CHANNEL_NETIF_STATE] = { + LTT_NETIF_STATE_CHANNEL, + LTT_DEFAULT_SUBBUF_SIZE_LOW, + LTT_DEFAULT_N_SUBBUFS_LOW, + }, + [LTT_CHANNEL_SOFTIRQ_STATE] = { + LTT_SOFTIRQ_STATE_CHANNEL, + LTT_DEFAULT_SUBBUF_SIZE_LOW, + LTT_DEFAULT_N_SUBBUFS_LOW, + }, + [LTT_CHANNEL_SWAP_STATE] = { + LTT_SWAP_STATE_CHANNEL, + LTT_DEFAULT_SUBBUF_SIZE_LOW, + LTT_DEFAULT_N_SUBBUFS_LOW, + }, + [LTT_CHANNEL_SYSCALL_STATE] = { + LTT_SYSCALL_STATE_CHANNEL, + LTT_DEFAULT_SUBBUF_SIZE_LOW, + LTT_DEFAULT_N_SUBBUFS_LOW, + }, + [LTT_CHANNEL_TASK_STATE] = { + LTT_TASK_STATE_CHANNEL, + LTT_DEFAULT_SUBBUF_SIZE_LOW, + LTT_DEFAULT_N_SUBBUFS_LOW, + }, + [LTT_CHANNEL_VM_STATE] = { + LTT_VM_STATE_CHANNEL, + LTT_DEFAULT_SUBBUF_SIZE_MED, + LTT_DEFAULT_N_SUBBUFS_MED, + }, + [LTT_CHANNEL_FS] = { + LTT_FS_CHANNEL, + LTT_DEFAULT_SUBBUF_SIZE_MED, + LTT_DEFAULT_N_SUBBUFS_MED, + }, + [LTT_CHANNEL_INPUT] = { + LTT_INPUT_CHANNEL, + LTT_DEFAULT_SUBBUF_SIZE_LOW, + LTT_DEFAULT_N_SUBBUFS_LOW, + }, + [LTT_CHANNEL_IPC] = { + LTT_IPC_CHANNEL, + LTT_DEFAULT_SUBBUF_SIZE_LOW, + LTT_DEFAULT_N_SUBBUFS_LOW, + }, + [LTT_CHANNEL_KERNEL] = { + LTT_KERNEL_CHANNEL, + LTT_DEFAULT_SUBBUF_SIZE_HIGH, + LTT_DEFAULT_N_SUBBUFS_HIGH, + }, + [LTT_CHANNEL_MM] = { + LTT_MM_CHANNEL, + LTT_DEFAULT_SUBBUF_SIZE_MED, + LTT_DEFAULT_N_SUBBUFS_MED, + }, + [LTT_CHANNEL_RCU] = { + LTT_RCU_CHANNEL, + LTT_DEFAULT_SUBBUF_SIZE_MED, + LTT_DEFAULT_N_SUBBUFS_MED, + }, + [LTT_CHANNEL_DEFAULT] = { + NULL, + LTT_DEFAULT_SUBBUF_SIZE_MED, + LTT_DEFAULT_N_SUBBUFS_MED, + }, +}; + +static enum ltt_channels get_channel_type_from_name(const char *name) +{ + int i; + + if (!name) + return LTT_CHANNEL_DEFAULT; + + for (i = 0; i < ARRAY_SIZE(chan_infos); i++) + if (chan_infos[i].name && !strcmp(name, chan_infos[i].name)) + return (enum ltt_channels)i; + + return LTT_CHANNEL_DEFAULT; +} + +/** + * ltt_module_register - LTT module registration + * @name: module type + * @function: callback to register + * @owner: module which owns the callback + * + * The module calling this registration function must ensure that no + * trap-inducing code will be executed by "function". E.g. vmalloc_sync_all() + * must be called between a vmalloc and the moment the memory is made visible to + * "function". This registration acts as a vmalloc_sync_all. Therefore, only if + * the module allocates virtual memory after its registration must it + * synchronize the TLBs. + */ +int ltt_module_register(enum ltt_module_function name, void *function, + struct module *owner) +{ + int ret = 0; + + /* + * Make sure no page fault can be triggered by the module about to be + * registered. We deal with this here so we don't have to call + * vmalloc_sync_all() in each module's init. + */ + vmalloc_sync_all(); + + switch (name) { + case LTT_FUNCTION_RUN_FILTER: + if (ltt_run_filter_owner != NULL) { + ret = -EEXIST; + goto end; + } + ltt_filter_register((ltt_run_filter_functor)function); + ltt_run_filter_owner = owner; + break; + case LTT_FUNCTION_FILTER_CONTROL: + if (ltt_filter_control_owner != NULL) { + ret = -EEXIST; + goto end; + } + ltt_filter_control_functor = + (int (*)(enum ltt_filter_control_msg, + struct ltt_trace *))function; + ltt_filter_control_owner = owner; + break; + case LTT_FUNCTION_STATEDUMP: + if (ltt_statedump_owner != NULL) { + ret = -EEXIST; + goto end; + } + ltt_statedump_functor = + (int (*)(struct ltt_trace *))function; + ltt_statedump_owner = owner; + break; + } +end: + return ret; +} +EXPORT_SYMBOL_GPL(ltt_module_register); + +/** + * ltt_module_unregister - LTT module unregistration + * @name: module type + */ +void ltt_module_unregister(enum ltt_module_function name) +{ + switch (name) { + case LTT_FUNCTION_RUN_FILTER: + ltt_filter_unregister(); + ltt_run_filter_owner = NULL; + /* Wait for preempt sections to finish */ + synchronize_trace(); + break; + case LTT_FUNCTION_FILTER_CONTROL: + ltt_filter_control_functor = ltt_filter_control_default; + ltt_filter_control_owner = NULL; + break; + case LTT_FUNCTION_STATEDUMP: + ltt_statedump_functor = ltt_statedump_default; + ltt_statedump_owner = NULL; + break; + } + +} +EXPORT_SYMBOL_GPL(ltt_module_unregister); + +static LIST_HEAD(ltt_transport_list); + +/** + * ltt_transport_register - LTT transport registration + * @transport: transport structure + * + * Registers a transport which can be used as output to extract the data out of + * LTTng. The module calling this registration function must ensure that no + * trap-inducing code will be executed by the transport functions. E.g. + * vmalloc_sync_all() must be called between a vmalloc and the moment the memory + * is made visible to the transport function. This registration acts as a + * vmalloc_sync_all. Therefore, only if the module allocates virtual memory + * after its registration must it synchronize the TLBs. + */ +void ltt_transport_register(struct ltt_transport *transport) +{ + /* + * Make sure no page fault can be triggered by the module about to be + * registered. We deal with this here so we don't have to call + * vmalloc_sync_all() in each module's init. + */ + vmalloc_sync_all(); + + ltt_lock_traces(); + list_add_tail(&transport->node, <t_transport_list); + ltt_unlock_traces(); +} +EXPORT_SYMBOL_GPL(ltt_transport_register); + +/** + * ltt_transport_unregister - LTT transport unregistration + * @transport: transport structure + */ +void ltt_transport_unregister(struct ltt_transport *transport) +{ + ltt_lock_traces(); + list_del(&transport->node); + ltt_unlock_traces(); +} +EXPORT_SYMBOL_GPL(ltt_transport_unregister); + +static inline +int is_channel_overwrite(enum ltt_channels chan, enum trace_mode mode) +{ + switch (mode) { + case LTT_TRACE_NORMAL: + return 0; + case LTT_TRACE_FLIGHT: + switch (chan) { + case LTT_CHANNEL_METADATA: + return 0; + default: + return 1; + } + case LTT_TRACE_HYBRID: + switch (chan) { + case LTT_CHANNEL_KERNEL: + case LTT_CHANNEL_FS: + case LTT_CHANNEL_MM: + case LTT_CHANNEL_RCU: + case LTT_CHANNEL_IPC: + case LTT_CHANNEL_INPUT: + return 1; + default: + return 0; + } + default: + return 0; + } +} + +/** + * _ltt_trace_find - find a trace by given name. + * trace_name: trace name + * + * Returns a pointer to the trace structure, NULL if not found. + */ +static struct ltt_trace *_ltt_trace_find(const char *trace_name) +{ + struct ltt_trace *trace; + + list_for_each_entry(trace, <t_traces.head, list) + if (!strncmp(trace->trace_name, trace_name, NAME_MAX)) + return trace; + + return NULL; +} + +/* _ltt_trace_find_setup : + * find a trace in setup list by given name. + * + * Returns a pointer to the trace structure, NULL if not found. + */ +struct ltt_trace *_ltt_trace_find_setup(const char *trace_name) +{ + struct ltt_trace *trace; + + list_for_each_entry(trace, <t_traces.setup_head, list) + if (!strncmp(trace->trace_name, trace_name, NAME_MAX)) + return trace; + + return NULL; +} +EXPORT_SYMBOL_GPL(_ltt_trace_find_setup); + +/** + * ltt_release_trace - Release a LTT trace + * @kref : reference count on the trace + */ +void ltt_release_trace(struct kref *kref) +{ + struct ltt_trace *trace = container_of(kref, struct ltt_trace, kref); + + trace->ops->remove_dirs(trace); + module_put(trace->transport->owner); + ltt_channels_trace_free(trace); + kfree(trace); +} +EXPORT_SYMBOL_GPL(ltt_release_trace); + +static inline void prepare_chan_size_num(unsigned int *subbuf_size, + unsigned int *n_subbufs) +{ + /* Make sure the subbuffer size is larger than a page */ + *subbuf_size = max_t(unsigned int, *subbuf_size, PAGE_SIZE); + + /* round to next power of 2 */ + *subbuf_size = 1 << get_count_order(*subbuf_size); + *n_subbufs = 1 << get_count_order(*n_subbufs); + + /* Subbuf size and number must both be power of two */ + WARN_ON(hweight32(*subbuf_size) != 1); + WARN_ON(hweight32(*n_subbufs) != 1); +} + +int _ltt_trace_setup(const char *trace_name) +{ + int err = 0; + struct ltt_trace *new_trace = NULL; + int metadata_index; + unsigned int chan; + enum ltt_channels chantype; + + if (_ltt_trace_find_setup(trace_name)) { + printk(KERN_ERR "LTT : Trace name %s already used.\n", + trace_name); + err = -EEXIST; + goto traces_error; + } + + if (_ltt_trace_find(trace_name)) { + printk(KERN_ERR "LTT : Trace name %s already used.\n", + trace_name); + err = -EEXIST; + goto traces_error; + } + + new_trace = kzalloc(sizeof(struct ltt_trace), GFP_KERNEL); + if (!new_trace) { + printk(KERN_ERR + "LTT : Unable to allocate memory for trace %s\n", + trace_name); + err = -ENOMEM; + goto traces_error; + } + strncpy(new_trace->trace_name, trace_name, NAME_MAX); + if (ltt_channels_trace_alloc(&new_trace->nr_channels, 0)) { + printk(KERN_ERR + "LTT : Unable to allocate memory for chaninfo %s\n", + trace_name); + err = -ENOMEM; + goto trace_free; + } + + /* + * Force metadata channel to no overwrite. + */ + metadata_index = ltt_channels_get_index_from_name("metadata"); + WARN_ON(metadata_index < 0); + new_trace->settings[metadata_index].overwrite = 0; + + /* + * Set hardcoded tracer defaults for some channels + */ + for (chan = 0; chan < new_trace->nr_channels; chan++) { + chantype = get_channel_type_from_name( + ltt_channels_get_name_from_index(chan)); + new_trace->settings[chan].sb_size = + chan_infos[chantype].def_sb_size; + new_trace->settings[chan].n_sb = + chan_infos[chantype].def_n_sb; + } + + list_add(&new_trace->list, <t_traces.setup_head); + return 0; + +trace_free: + kfree(new_trace); +traces_error: + return err; +} +EXPORT_SYMBOL_GPL(_ltt_trace_setup); + + +int ltt_trace_setup(const char *trace_name) +{ + int ret; + ltt_lock_traces(); + ret = _ltt_trace_setup(trace_name); + ltt_unlock_traces(); + return ret; +} +EXPORT_SYMBOL_GPL(ltt_trace_setup); + +/* must be called from within a traces lock. */ +static void _ltt_trace_free(struct ltt_trace *trace) +{ + list_del(&trace->list); + kfree(trace); +} + +int ltt_trace_set_type(const char *trace_name, const char *trace_type) +{ + int err = 0; + struct ltt_trace *trace; + struct ltt_transport *tran_iter, *transport = NULL; + + ltt_lock_traces(); + + trace = _ltt_trace_find_setup(trace_name); + if (!trace) { + printk(KERN_ERR "LTT : Trace not found %s\n", trace_name); + err = -ENOENT; + goto traces_error; + } + + list_for_each_entry(tran_iter, <t_transport_list, node) { + if (!strcmp(tran_iter->name, trace_type)) { + transport = tran_iter; + break; + } + } + if (!transport) { + printk(KERN_ERR "LTT : Transport %s is not present.\n", + trace_type); + err = -EINVAL; + goto traces_error; + } + + trace->transport = transport; + +traces_error: + ltt_unlock_traces(); + return err; +} +EXPORT_SYMBOL_GPL(ltt_trace_set_type); + +int ltt_trace_set_channel_subbufsize(const char *trace_name, + const char *channel_name, + unsigned int size) +{ + int err = 0; + struct ltt_trace *trace; + int index; + + ltt_lock_traces(); + + trace = _ltt_trace_find_setup(trace_name); + if (!trace) { + printk(KERN_ERR "LTT : Trace not found %s\n", trace_name); + err = -ENOENT; + goto traces_error; + } + + index = ltt_channels_get_index_from_name(channel_name); + if (index < 0) { + printk(KERN_ERR "LTT : Channel %s not found\n", channel_name); + err = -ENOENT; + goto traces_error; + } + trace->settings[index].sb_size = size; + +traces_error: + ltt_unlock_traces(); + return err; +} +EXPORT_SYMBOL_GPL(ltt_trace_set_channel_subbufsize); + +int ltt_trace_set_channel_subbufcount(const char *trace_name, + const char *channel_name, + unsigned int cnt) +{ + int err = 0; + struct ltt_trace *trace; + int index; + + ltt_lock_traces(); + + trace = _ltt_trace_find_setup(trace_name); + if (!trace) { + printk(KERN_ERR "LTT : Trace not found %s\n", trace_name); + err = -ENOENT; + goto traces_error; + } + + index = ltt_channels_get_index_from_name(channel_name); + if (index < 0) { + printk(KERN_ERR "LTT : Channel %s not found\n", channel_name); + err = -ENOENT; + goto traces_error; + } + trace->settings[index].n_sb = cnt; + +traces_error: + ltt_unlock_traces(); + return err; +} +EXPORT_SYMBOL_GPL(ltt_trace_set_channel_subbufcount); + +int ltt_trace_set_channel_switch_timer(const char *trace_name, + const char *channel_name, + unsigned long interval) +{ + int err = 0; + struct ltt_trace *trace; + int index; + + ltt_lock_traces(); + + trace = _ltt_trace_find_setup(trace_name); + if (!trace) { + printk(KERN_ERR "LTT : Trace not found %s\n", trace_name); + err = -ENOENT; + goto traces_error; + } + + index = ltt_channels_get_index_from_name(channel_name); + if (index < 0) { + printk(KERN_ERR "LTT : Channel %s not found\n", channel_name); + err = -ENOENT; + goto traces_error; + } + ltt_channels_trace_set_timer(&trace->settings[index], interval); + +traces_error: + ltt_unlock_traces(); + return err; +} +EXPORT_SYMBOL_GPL(ltt_trace_set_channel_switch_timer); + +int ltt_trace_set_channel_overwrite(const char *trace_name, + const char *channel_name, + unsigned int overwrite) +{ + int err = 0; + struct ltt_trace *trace; + int index; + + ltt_lock_traces(); + + trace = _ltt_trace_find_setup(trace_name); + if (!trace) { + printk(KERN_ERR "LTT : Trace not found %s\n", trace_name); + err = -ENOENT; + goto traces_error; + } + + /* + * Always put the metadata channel in non-overwrite mode : + * This is a very low traffic channel and it can't afford to have its + * data overwritten : this data (marker info) is necessary to be + * able to read the trace. + */ + if (overwrite && !strcmp(channel_name, "metadata")) { + printk(KERN_ERR "LTT : Trying to set metadata channel to " + "overwrite mode\n"); + err = -EINVAL; + goto traces_error; + } + + index = ltt_channels_get_index_from_name(channel_name); + if (index < 0) { + printk(KERN_ERR "LTT : Channel %s not found\n", channel_name); + err = -ENOENT; + goto traces_error; + } + + trace->settings[index].overwrite = overwrite; + +traces_error: + ltt_unlock_traces(); + return err; +} +EXPORT_SYMBOL_GPL(ltt_trace_set_channel_overwrite); + +int ltt_trace_alloc(const char *trace_name) +{ + int err = 0; + struct ltt_trace *trace; + int sb_size, n_sb; + unsigned long flags; + int chan; + const char *channel_name; + + ltt_lock_traces(); + + trace = _ltt_trace_find_setup(trace_name); + if (!trace) { + printk(KERN_ERR "LTT : Trace not found %s\n", trace_name); + err = -ENOENT; + goto traces_error; + } + + kref_init(&trace->kref); + init_waitqueue_head(&trace->kref_wq); + trace->active = 0; + get_trace_clock(); + trace->freq_scale = trace_clock_freq_scale(); + + if (!trace->transport) { + printk(KERN_ERR "LTT : Transport is not set.\n"); + err = -EINVAL; + goto transport_error; + } + if (!try_module_get(trace->transport->owner)) { + printk(KERN_ERR "LTT : Can't lock transport module.\n"); + err = -ENODEV; + goto transport_error; + } + trace->ops = &trace->transport->ops; + + err = trace->ops->create_dirs(trace); + if (err) { + printk(KERN_ERR "LTT : Can't create dir for trace %s.\n", + trace_name); + goto dirs_error; + } + + local_irq_save(flags); + trace->start_freq = trace_clock_frequency(); + trace->start_tsc = trace_clock_read64(); + do_gettimeofday(&trace->start_time); + local_irq_restore(flags); + + for (chan = 0; chan < trace->nr_channels; chan++) { + channel_name = ltt_channels_get_name_from_index(chan); + WARN_ON(!channel_name); + /* + * note: sb_size and n_sb will be overwritten with updated + * values by channel creation. + */ + sb_size = trace->settings[chan].sb_size; + n_sb = trace->settings[chan].n_sb; + prepare_chan_size_num(&sb_size, &n_sb); + trace->channels[chan] = ltt_create_channel(channel_name, + trace, NULL, sb_size, n_sb, + trace->settings[chan].overwrite, + trace->settings[chan].switch_timer_interval, + trace->settings[chan].read_timer_interval); + if (err != 0) { + printk(KERN_ERR "LTT : Can't create channel %s.\n", + channel_name); + goto create_channel_error; + } + } + + list_del(&trace->list); + if (list_empty(<t_traces.head)) + set_kernel_trace_flag_all_tasks(); + list_add_rcu(&trace->list, <t_traces.head); + synchronize_trace(); + + ltt_unlock_traces(); + + return 0; + +create_channel_error: + for (chan--; chan >= 0; chan--) + ltt_channel_destroy(trace->channels[chan]); + trace->ops->remove_dirs(trace); + +dirs_error: + module_put(trace->transport->owner); +transport_error: + put_trace_clock(); +traces_error: + ltt_unlock_traces(); + return err; +} +EXPORT_SYMBOL_GPL(ltt_trace_alloc); + +/* + * It is worked as a wrapper for current version of ltt_control.ko. + * We will make a new ltt_control based on debugfs, and control each channel's + * buffer. + */ +static +int ltt_trace_create(const char *trace_name, const char *trace_type, + enum trace_mode mode, + unsigned int subbuf_size_low, unsigned int n_subbufs_low, + unsigned int subbuf_size_med, unsigned int n_subbufs_med, + unsigned int subbuf_size_high, unsigned int n_subbufs_high) +{ + int err = 0; + + err = ltt_trace_setup(trace_name); + if (IS_ERR_VALUE(err)) + return err; + + err = ltt_trace_set_type(trace_name, trace_type); + if (IS_ERR_VALUE(err)) + return err; + + err = ltt_trace_alloc(trace_name); + if (IS_ERR_VALUE(err)) + return err; + + return err; +} + +/* Must be called while sure that trace is in the list. */ +static int _ltt_trace_destroy(struct ltt_trace *trace) +{ + int err = -EPERM; + + if (trace == NULL) { + err = -ENOENT; + goto traces_error; + } + if (trace->active) { + printk(KERN_ERR + "LTT : Can't destroy trace %s : tracer is active\n", + trace->trace_name); + err = -EBUSY; + goto active_error; + } + /* Everything went fine */ + list_del_rcu(&trace->list); + synchronize_trace(); + if (list_empty(<t_traces.head)) { + clear_kernel_trace_flag_all_tasks(); + } + return 0; + + /* error handling */ +active_error: +traces_error: + return err; +} + +/* Sleepable part of the destroy */ +static void __ltt_trace_destroy(struct ltt_trace *trace) +{ + int i; + + for (i = 0; i < trace->nr_channels; i++) + ltt_channel_destroy(trace->channels[i]); + kref_put(&trace->kref, ltt_release_trace); +} + +int ltt_trace_destroy(const char *trace_name) +{ + int err = 0; + struct ltt_trace *trace; + + ltt_lock_traces(); + + trace = _ltt_trace_find(trace_name); + if (trace) { + err = _ltt_trace_destroy(trace); + if (err) + goto error; + + __ltt_trace_destroy(trace); + ltt_unlock_traces(); + put_trace_clock(); + + return 0; + } + + trace = _ltt_trace_find_setup(trace_name); + if (trace) { + _ltt_trace_free(trace); + ltt_unlock_traces(); + return 0; + } + + err = -ENOENT; + + /* Error handling */ +error: + ltt_unlock_traces(); + return err; +} +EXPORT_SYMBOL_GPL(ltt_trace_destroy); + +/* must be called from within a traces lock. */ +static int _ltt_trace_start(struct ltt_trace *trace) +{ + int err = 0; + + if (trace == NULL) { + err = -ENOENT; + goto traces_error; + } + if (trace->active) + printk(KERN_INFO "LTT : Tracing already active for trace %s\n", + trace->trace_name); + if (!try_module_get(ltt_run_filter_owner)) { + err = -ENODEV; + printk(KERN_ERR "LTT : Can't lock filter module.\n"); + goto get_ltt_run_filter_error; + } + trace->active = 1; + /* Read by trace points without protection : be careful */ + ltt_traces.num_active_traces++; + return err; + + /* error handling */ +get_ltt_run_filter_error: +traces_error: + return err; +} + +int ltt_trace_start(const char *trace_name) +{ + int err = 0; + struct ltt_trace *trace; + + ltt_lock_traces(); + + trace = _ltt_trace_find(trace_name); + err = _ltt_trace_start(trace); + if (err) + goto no_trace; + + ltt_unlock_traces(); + + /* + * Call the kernel state dump. + * Events will be mixed with real kernel events, it's ok. + * Notice that there is no protection on the trace : that's exactly + * why we iterate on the list and check for trace equality instead of + * directly using this trace handle inside the logging function. + */ + + ltt_dump_marker_state(trace); + + if (!try_module_get(ltt_statedump_owner)) { + err = -ENODEV; + printk(KERN_ERR + "LTT : Can't lock state dump module.\n"); + } else { + ltt_statedump_functor(trace); + module_put(ltt_statedump_owner); + } + + return err; + + /* Error handling */ +no_trace: + ltt_unlock_traces(); + return err; +} +EXPORT_SYMBOL_GPL(ltt_trace_start); + +/* must be called from within traces lock */ +static int _ltt_trace_stop(struct ltt_trace *trace) +{ + int err = -EPERM; + + if (trace == NULL) { + err = -ENOENT; + goto traces_error; + } + if (!trace->active) + printk(KERN_INFO "LTT : Tracing not active for trace %s\n", + trace->trace_name); + if (trace->active) { + trace->active = 0; + ltt_traces.num_active_traces--; + synchronize_trace(); /* Wait for each tracing to be finished */ + } + module_put(ltt_run_filter_owner); + /* Everything went fine */ + return 0; + + /* Error handling */ +traces_error: + return err; +} + +int ltt_trace_stop(const char *trace_name) +{ + int err = 0; + struct ltt_trace *trace; + + ltt_lock_traces(); + trace = _ltt_trace_find(trace_name); + err = _ltt_trace_stop(trace); + ltt_unlock_traces(); + return err; +} +EXPORT_SYMBOL_GPL(ltt_trace_stop); + +/** + * ltt_control - Trace control in-kernel API + * @msg: Action to perform + * @trace_name: Trace on which the action must be done + * @trace_type: Type of trace (normal, flight, hybrid) + * @args: Arguments specific to the action + */ +int ltt_control(enum ltt_control_msg msg, const char *trace_name, + const char *trace_type, union ltt_control_args args) +{ + int err = -EPERM; + + printk(KERN_ALERT "ltt_control : trace %s\n", trace_name); + switch (msg) { + case LTT_CONTROL_START: + printk(KERN_DEBUG "Start tracing %s\n", trace_name); + err = ltt_trace_start(trace_name); + break; + case LTT_CONTROL_STOP: + printk(KERN_DEBUG "Stop tracing %s\n", trace_name); + err = ltt_trace_stop(trace_name); + break; + case LTT_CONTROL_CREATE_TRACE: + printk(KERN_DEBUG "Creating trace %s\n", trace_name); + err = ltt_trace_create(trace_name, trace_type, + args.new_trace.mode, + args.new_trace.subbuf_size_low, + args.new_trace.n_subbufs_low, + args.new_trace.subbuf_size_med, + args.new_trace.n_subbufs_med, + args.new_trace.subbuf_size_high, + args.new_trace.n_subbufs_high); + break; + case LTT_CONTROL_DESTROY_TRACE: + printk(KERN_DEBUG "Destroying trace %s\n", trace_name); + err = ltt_trace_destroy(trace_name); + break; + } + return err; +} +EXPORT_SYMBOL_GPL(ltt_control); + +/** + * ltt_filter_control - Trace filter control in-kernel API + * @msg: Action to perform on the filter + * @trace_name: Trace on which the action must be done + */ +int ltt_filter_control(enum ltt_filter_control_msg msg, const char *trace_name) +{ + int err; + struct ltt_trace *trace; + + printk(KERN_DEBUG "ltt_filter_control : trace %s\n", trace_name); + ltt_lock_traces(); + trace = _ltt_trace_find(trace_name); + if (trace == NULL) { + printk(KERN_ALERT + "Trace does not exist. Cannot proxy control request\n"); + err = -ENOENT; + goto trace_error; + } + if (!try_module_get(ltt_filter_control_owner)) { + err = -ENODEV; + goto get_module_error; + } + switch (msg) { + case LTT_FILTER_DEFAULT_ACCEPT: + printk(KERN_DEBUG + "Proxy filter default accept %s\n", trace_name); + err = (*ltt_filter_control_functor)(msg, trace); + break; + case LTT_FILTER_DEFAULT_REJECT: + printk(KERN_DEBUG + "Proxy filter default reject %s\n", trace_name); + err = (*ltt_filter_control_functor)(msg, trace); + break; + default: + err = -EPERM; + } + module_put(ltt_filter_control_owner); + +get_module_error: +trace_error: + ltt_unlock_traces(); + return err; +} +EXPORT_SYMBOL_GPL(ltt_filter_control); + +int __init ltt_init(void) +{ + /* Make sure no page fault can be triggered by this module */ + vmalloc_sync_all(); + init_timer_deferrable(<t_async_wakeup_timer); + return 0; +} + +module_init(ltt_init) + +static void __exit ltt_exit(void) +{ + struct ltt_trace *trace; + struct list_head *pos, *n; + + ltt_lock_traces(); + /* Stop each trace, currently being read by RCU read-side */ + list_for_each_entry_rcu(trace, <t_traces.head, list) + _ltt_trace_stop(trace); + /* Wait for quiescent state. Readers have preemption disabled. */ + synchronize_trace(); + /* Safe iteration is now permitted. It does not have to be RCU-safe + * because no readers are left. */ + list_for_each_safe(pos, n, <t_traces.head) { + trace = container_of(pos, struct ltt_trace, list); + /* _ltt_trace_destroy does a synchronize_trace() */ + _ltt_trace_destroy(trace); + __ltt_trace_destroy(trace); + } + /* free traces in pre-alloc status */ + list_for_each_safe(pos, n, <t_traces.setup_head) { + trace = container_of(pos, struct ltt_trace, list); + _ltt_trace_free(trace); + } + + ltt_unlock_traces(); +} + +module_exit(ltt_exit) + +MODULE_LICENSE("GPL and additional rights"); +MODULE_AUTHOR("Mathieu Desnoyers"); +MODULE_DESCRIPTION("Linux Trace Toolkit Next Generation Tracer Kernel API"); diff --git a/deprecated/ltt-type-serializer.c b/deprecated/ltt-type-serializer.c new file mode 100644 index 00000000..ed589c73 --- /dev/null +++ b/deprecated/ltt-type-serializer.c @@ -0,0 +1,94 @@ +/** + * ltt-type-serializer.c + * + * LTTng specialized type serializer. + * + * Copyright Mathieu Desnoyers, 2008. + * + * Dual LGPL v2.1/GPL v2 license. + */ +#include + +#include "ltt-type-serializer.h" +#include "ltt-relay-lockless.h" + +notrace +void _ltt_specialized_trace(void *probe_data, + void *serialize_private, unsigned int data_size, + unsigned int largest_align) +{ + struct ltt_event *event = probe_data; + int ret; + uint16_t eID; + size_t slot_size; + struct ltt_chanbuf *buf; + struct ltt_channel *chan; + struct ltt_session *session; + uint64_t tsc; + long buf_offset; + int cpu; + unsigned int rflags; + + /* disabled from tracepoints rcu_read_lock_sched_notrace(); */ + cpu = smp_processor_id(); + __get_cpu_var(ltt_nesting)++; + /* + * asm volatile and "memory" clobber prevent the compiler from moving + * instructions out of the ltt nesting count. This is required to ensure + * that probe side-effects which can cause recursion (e.g. unforeseen + * traps, divisions by 0, ...) are triggered within the incremented + * nesting count section. + */ + barrier(); + eID = event->id; + chan = event->chan; + session = chan->session; + + if (unlikely(!session->active)) + goto skip; + if (unlikely(!ltt_run_filter(session, chan, event))) + goto skip; +#ifdef LTT_DEBUG_EVENT_SIZE + rflags = LTT_RFLAG_ID_SIZE; +#else + if (unlikely(eID >= LTT_FREE_EVENTS)) + rflags = LTT_RFLAG_ID; + else + rflags = 0; +#endif + /* reserve space : header and data */ + ret = ltt_reserve_slot(chan, trace, data_size, largest_align, + cpu, &buf, &slot_size, &buf_offset, &tsc, + &rflags); + if (unlikely(ret < 0)) + goto skip; /* buffer full */ + + /* Out-of-order write : header and data */ + buf_offset = ltt_write_event_header(&buf->a, &chan->a, + buf_offset, eID, data_size, + tsc, rflags); + if (data_size) { + buf_offset += ltt_align(buf_offset, largest_align); + ltt_relay_write(&buf->a, &chan->a, buf_offset, + serialize_private, data_size); + buf_offset += data_size; + } + /* Out-of-order commit */ + ltt_commit_slot(buf, chan, buf_offset, data_size, slot_size); +skip: + /* + * asm volatile and "memory" clobber prevent the compiler from moving + * instructions out of the ltt nesting count. This is required to ensure + * that probe side-effects which can cause recursion (e.g. unforeseen + * traps, divisions by 0, ...) are triggered within the incremented + * nesting count section. + */ + barrier(); + __get_cpu_var(ltt_nesting)--; + /* disabled from tracepoints rcu_read_unlock_sched_notrace(); */ +} +EXPORT_SYMBOL_GPL(_ltt_specialized_trace); + +MODULE_LICENSE("GPL and additional rights"); +MODULE_AUTHOR("Mathieu Desnoyers"); +MODULE_DESCRIPTION("LTT type serializer"); diff --git a/deprecated/ltt-type-serializer.h b/deprecated/ltt-type-serializer.h new file mode 100644 index 00000000..fb870c8f --- /dev/null +++ b/deprecated/ltt-type-serializer.h @@ -0,0 +1,186 @@ +#ifndef _LTT_TYPE_SERIALIZER_H +#define _LTT_TYPE_SERIALIZER_H + +#include /* For IFNAMSIZ */ + +#include "ltt-tracer.h" + +/* + * largest_align must be non-zero, equal to the minimum between the largest type + * and sizeof(void *). + */ +extern void _ltt_specialized_trace(void *probe_data, + void *serialize_private, unsigned int data_size, + unsigned int largest_align); + +/* + * Statically check that 0 < largest_align < sizeof(void *) to make sure it is + * dumb-proof. It will make sure 0 is changed into 1 and unsigned long long is + * changed into sizeof(void *) on 32-bit architectures. + */ +static inline void ltt_specialized_trace(void *probe_data, + void *serialize_private, unsigned int data_size, + unsigned int largest_align) +{ + largest_align = min_t(unsigned int, largest_align, sizeof(void *)); + largest_align = max_t(unsigned int, largest_align, 1); + _ltt_specialized_trace(probe_data, serialize_private, data_size, + largest_align); +} + +/* + * Type serializer definitions. + */ + +/* + * Return size of structure without end-of-structure padding. + */ +#define serialize_sizeof(type) offsetof(typeof(type), end_field) + +struct serialize_long_int { + unsigned long f1; + unsigned int f2; + unsigned char end_field[0]; +} RING_BUFFER_ALIGN_ATTR; + +struct serialize_int_int_long { + unsigned int f1; + unsigned int f2; + unsigned long f3; + unsigned char end_field[0]; +} RING_BUFFER_ALIGN_ATTR; + +struct serialize_int_int_short { + unsigned int f1; + unsigned int f2; + unsigned short f3; + unsigned char end_field[0]; +} RING_BUFFER_ALIGN_ATTR; + +struct serialize_long_long_long { + unsigned long f1; + unsigned long f2; + unsigned long f3; + unsigned char end_field[0]; +} RING_BUFFER_ALIGN_ATTR; + +struct serialize_long_long_int { + unsigned long f1; + unsigned long f2; + unsigned int f3; + unsigned char end_field[0]; +} RING_BUFFER_ALIGN_ATTR; + +struct serialize_long_long_short_char { + unsigned long f1; + unsigned long f2; + unsigned short f3; + unsigned char f4; + unsigned char end_field[0]; +} RING_BUFFER_ALIGN_ATTR; + +struct serialize_long_long_short { + unsigned long f1; + unsigned long f2; + unsigned short f3; + unsigned char end_field[0]; +} RING_BUFFER_ALIGN_ATTR; + +struct serialize_long_short_char { + unsigned long f1; + unsigned short f2; + unsigned char f3; + unsigned char end_field[0]; +} RING_BUFFER_ALIGN_ATTR; + +struct serialize_long_short { + unsigned long f1; + unsigned short f2; + unsigned char end_field[0]; +} RING_BUFFER_ALIGN_ATTR; + +struct serialize_long_char { + unsigned long f1; + unsigned char f2; + unsigned char end_field[0]; +} RING_BUFFER_ALIGN_ATTR; + +struct serialize_long_ifname { + unsigned long f1; + unsigned char f2[IFNAMSIZ]; + unsigned char end_field[0]; +} RING_BUFFER_ALIGN_ATTR; + +struct serialize_sizet_int { + size_t f1; + unsigned int f2; + unsigned char end_field[0]; +} RING_BUFFER_ALIGN_ATTR; + +struct serialize_long_long_sizet_int { + unsigned long f1; + unsigned long f2; + size_t f3; + unsigned int f4; + unsigned char end_field[0]; +} RING_BUFFER_ALIGN_ATTR; + +struct serialize_long_long_sizet_int_int { + unsigned long f1; + unsigned long f2; + size_t f3; + unsigned int f4; + unsigned int f5; + unsigned char end_field[0]; +} RING_BUFFER_ALIGN_ATTR; + +struct serialize_l4421224411111 { + unsigned long f1; + uint32_t f2; + uint32_t f3; + uint16_t f4; + uint8_t f5; + uint16_t f6; + uint16_t f7; + uint32_t f8; + uint32_t f9; + uint8_t f10; + uint8_t f11; + uint8_t f12; + uint8_t f13; + uint8_t f14; + unsigned char end_field[0]; +} RING_BUFFER_ALIGN_ATTR; + +struct serialize_l214421224411111 { + unsigned long f1; + uint16_t f2; + uint8_t f3; + uint32_t f4; + uint32_t f5; + uint16_t f6; + uint8_t f7; + uint16_t f8; + uint16_t f9; + uint32_t f10; + uint32_t f11; + uint8_t f12; + uint8_t f13; + uint8_t f14; + uint8_t f15; + uint8_t f16; + uint8_t end_field[0]; +} RING_BUFFER_ALIGN_ATTR; + +struct serialize_l4412228 { + unsigned long f1; + uint32_t f2; + uint32_t f3; + uint8_t f4; + uint16_t f5; + uint16_t f6; + uint16_t f7; + uint64_t f8; + unsigned char end_field[0]; +} RING_BUFFER_ALIGN_ATTR; +#endif /* _LTT_TYPE_SERIALIZER_H */ diff --git a/deprecated/ltt-userspace-event.c b/deprecated/ltt-userspace-event.c new file mode 100644 index 00000000..c716d724 --- /dev/null +++ b/deprecated/ltt-userspace-event.c @@ -0,0 +1,122 @@ +/* + * Copyright (C) 2008 Mathieu Desnoyers + * + * Dual LGPL v2.1/GPL v2 license. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "ltt-type-serializer.h" + +#define LTT_WRITE_EVENT_FILE "write_event" + +DEFINE_MARKER(userspace, event, "string %s"); +static struct dentry *ltt_event_file; + +/** + * write_event - write a userspace string into the trace system + * @file: file pointer + * @user_buf: user string + * @count: length to copy, including the final NULL + * @ppos: unused + * + * Copy a string into a trace event, in channel "userspace", event "event". + * Copies until either \n or \0 is reached. + * On success, returns the number of bytes copied from the source, including the + * \n or \0 character (if there was one in the count range). It cannot return + * more than count. + * Inspired from tracing_mark_write implementation from Steven Rostedt and + * Ingo Molnar. + */ +static +ssize_t write_event(struct file *file, const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct marker *marker; + char *buf, *end; + long copycount; + ssize_t ret; + + buf = kmalloc(count + 1, GFP_KERNEL); + if (!buf) { + ret = -ENOMEM; + goto string_out; + } + copycount = strncpy_from_user(buf, user_buf, count); + if (copycount < 0) { + ret = -EFAULT; + goto string_err; + } + /* Cut from the first nil or newline. */ + buf[copycount] = '\0'; + end = strchr(buf, '\n'); + if (end) { + *end = '\0'; + copycount = end - buf; + } + /* Add final \0 to copycount */ + copycount++; + marker = &GET_MARKER(userspace, event); + ltt_specialized_trace(marker, marker->single.probe_private, buf, + copycount, sizeof(char)); + /* If there is no \0 nor \n in count, do not return a larger value */ + ret = min_t(size_t, copycount, count); +string_err: + kfree(buf); +string_out: + return ret; +} + +static const struct file_operations ltt_userspace_operations = { + .write = write_event, +}; + +static int __init ltt_userspace_init(void) +{ + struct dentry *ltt_root_dentry; + int err = 0; + + ltt_root_dentry = get_ltt_root(); + if (!ltt_root_dentry) { + err = -ENOENT; + goto err_no_root; + } + + ltt_event_file = debugfs_create_file(LTT_WRITE_EVENT_FILE, + S_IWUGO, + ltt_root_dentry, + NULL, + <t_userspace_operations); + if (IS_ERR(ltt_event_file) || !ltt_event_file) { + printk(KERN_ERR + "ltt_userspace_init: failed to create file %s\n", + LTT_WRITE_EVENT_FILE); + err = -EPERM; + goto err_no_file; + } + + return err; +err_no_file: + put_ltt_root(); +err_no_root: + return err; +} + +static void __exit ltt_userspace_exit(void) +{ + debugfs_remove(ltt_event_file); + put_ltt_root(); +} + +module_init(ltt_userspace_init); +module_exit(ltt_userspace_exit); + +MODULE_LICENSE("GPL and additional rights"); +MODULE_AUTHOR("Mathieu Desnoyers "); +MODULE_DESCRIPTION("Linux Trace Toolkit Userspace Event"); diff --git a/deprecated/probes/Makefile b/deprecated/probes/Makefile new file mode 100644 index 00000000..d8f1c403 --- /dev/null +++ b/deprecated/probes/Makefile @@ -0,0 +1,47 @@ +# LTTng tracing probes + +ifdef CONFIG_FTRACE +CFLAGS_REMOVE_kernel-trace.o = -pg +CFLAGS_REMOVE_mm-trace.o = -pg +CFLAGS_REMOVE_fs-trace.o = -pg +CFLAGS_REMOVE_ipc-trace.o = -pg +CFLAGS_REMOVE_lockdep-trace.o = -pg +CFLAGS_REMOVE_rcu-trace.o = -pg +CFLAGS_REMOVE_syscall-trace.o = -pg +CFLAGS_REMOVE_trap-trace.o = -pg +CFLAGS_REMOVE_pm-trace.o = -pg +endif + +obj-m += kernel-trace.o mm-trace.o fs-trace.o ipc-trace.o lockdep-trace.o \ + rcu-trace.o syscall-trace.o trap-trace.o pm-trace.o + +ifeq ($(CONFIG_NET),y) +ifdef CONFIG_FTRACE +CFLAGS_REMOVE_net-trace.o = -pg +CFLAGS_REMOVE_net-extended-trace.o = -pg +endif +obj-m += net-trace.o net-extended-trace.o +endif + +ifdef CONFIG_JBD2 +ifdef CONFIG_FTRACE +CFLAGS_REMOVE_jbd2-trace.o = -pg +endif +obj-m += jbd2-trace.o +endif + +#ifdef CONFIG_EXT4_FS +#ifdef CONFIG_FTRACE +#CFLAGS_REMOVE_ext4-trace.o = -pg +#endif +#obj-$(CONFIG_LTT_TRACEPROBES) += ext4-trace.o +#endif + +ifdef CONFIG_BLOCK +ifdef CONFIG_FTRACE +CFLAGS_REMOVE_block-trace.o = -pg +endif +obj-m += block-trace.o +endif + + diff --git a/deprecated/probes/block-trace.c b/deprecated/probes/block-trace.c new file mode 100644 index 00000000..51ae2cdd --- /dev/null +++ b/deprecated/probes/block-trace.c @@ -0,0 +1,309 @@ +/* + * ltt/probes/block-trace.c + * + * block layer tracepoint probes. + * + * (C) Copyright 2009 - Mathieu Desnoyers + * Dual LGPL v2.1/GPL v2 license. + */ + +#include + +#include + +/* + * Add rq cmd as a sequence. Needs new type. (size + binary blob) + */ + +void probe_block_rq_abort(void *data, struct request_queue *q, struct request *rq) +{ + int rw = rq->cmd_flags & 0x03; + + if (blk_discard_rq(rq)) + rw |= (1 << BIO_RW_DISCARD); + + if (blk_pc_request(rq)) { + trace_mark_tp(block, rq_abort_pc, block_rq_abort, + probe_block_rq_abort, + "data_len %u rw %d errors %d", + blk_rq_bytes(rq), rw, rq->errors); + } else { + /* + * FIXME Using a simple trace_mark for the second event + * possibility because tracepoints do not support multiple + * connections to the same probe yet. They should have some + * refcounting. Need to enable both rq_abort_pc and rq_abort_fs + * markers to have the rq_abort_fs marker enabled. + */ + trace_mark(block, rq_abort_fs, + "hard_sector %llu " + "rw %d errors %d", (unsigned long long)blk_rq_pos(rq), + rw, rq->errors); + } +} + +void probe_block_rq_insert(void *data, struct request_queue *q, struct request *rq) +{ + int rw = rq->cmd_flags & 0x03; + + if (blk_discard_rq(rq)) + rw |= (1 << BIO_RW_DISCARD); + + if (blk_pc_request(rq)) { + trace_mark_tp(block, rq_insert_pc, block_rq_insert, + probe_block_rq_insert, + "data_len %u rw %d errors %d", + blk_rq_bytes(rq), rw, rq->errors); + } else { + /* + * FIXME Using a simple trace_mark for the second event + * possibility because tracepoints do not support multiple + * connections to the same probe yet. They should have some + * refcounting. Need to enable both rq_insert_pc and + * rq_insert_fs markers to have the rq_insert_fs marker enabled. + */ + trace_mark(block, rq_insert_fs, + "hard_sector %llu " + "rw %d errors %d", (unsigned long long)blk_rq_pos(rq), + rw, rq->errors); + } +} + +void probe_block_rq_issue(void *data, struct request_queue *q, struct request *rq) +{ + int rw = rq->cmd_flags & 0x03; + + if (blk_discard_rq(rq)) + rw |= (1 << BIO_RW_DISCARD); + + if (blk_pc_request(rq)) { + trace_mark_tp(block, rq_issue_pc, block_rq_issue, + probe_block_rq_issue, + "data_len %u rw %d errors %d", + blk_rq_bytes(rq), rw, rq->errors); + } else { + /* + * FIXME Using a simple trace_mark for the second event + * possibility because tracepoints do not support multiple + * connections to the same probe yet. They should have some + * refcounting. Need to enable both rq_issue_pc and rq_issue_fs + * markers to have the rq_issue_fs marker enabled. + */ + trace_mark(block, rq_issue_fs, + "hard_sector %llu " + "rw %d errors %d", (unsigned long long)blk_rq_pos(rq), + rw, rq->errors); + } +} + +void probe_block_rq_requeue(void *data, struct request_queue *q, struct request *rq) +{ + int rw = rq->cmd_flags & 0x03; + + if (blk_discard_rq(rq)) + rw |= (1 << BIO_RW_DISCARD); + + if (blk_pc_request(rq)) { + trace_mark_tp(block, rq_requeue_pc, block_rq_requeue, + probe_block_rq_requeue, + "data_len %u rw %d errors %d", + blk_rq_bytes(rq), rw, rq->errors); + } else { + /* + * FIXME Using a simple trace_mark for the second event + * possibility because tracepoints do not support multiple + * connections to the same probe yet. They should have some + * refcounting. Need to enable both rq_requeue_pc and + * rq_requeue_fs markers to have the rq_requeue_fs marker + * enabled. + */ + trace_mark(block, rq_requeue_fs, + "hard_sector %llu " + "rw %d errors %d", (unsigned long long)blk_rq_pos(rq), + rw, rq->errors); + } +} + +void probe_block_rq_complete(void *data, struct request_queue *q, struct request *rq) +{ + int rw = rq->cmd_flags & 0x03; + + if (blk_discard_rq(rq)) + rw |= (1 << BIO_RW_DISCARD); + + if (blk_pc_request(rq)) { + trace_mark_tp(block, rq_complete_pc, block_rq_complete, + probe_block_rq_complete, + "data_len %u rw %d errors %d", + blk_rq_bytes(rq), rw, rq->errors); + } else { + /* + * FIXME Using a simple trace_mark for the second event + * possibility because tracepoints do not support multiple + * connections to the same probe yet. They should have some + * refcounting. Need to enable both rq_complete_pc and + * rq_complete_fs markers to have the rq_complete_fs marker + * enabled. + */ + trace_mark(block, rq_complete_fs, + "hard_sector %llu " + "rw %d errors %d", (unsigned long long)blk_rq_pos(rq), + rw, rq->errors); + } +} + +void probe_block_bio_bounce(void *data, struct request_queue *q, struct bio *bio) +{ + trace_mark_tp(block, bio_bounce, block_bio_bounce, + probe_block_bio_bounce, + "sector %llu size %u rw(FAILFAST_DRIVER,FAILFAST_TRANSPORT," + "FAILFAST_DEV,DISCARD,META,SYNC,BARRIER,AHEAD,RW) %lX " + "not_uptodate #1u%d", + (unsigned long long)bio->bi_sector, bio->bi_size, + bio->bi_rw, !bio_flagged(bio, BIO_UPTODATE)); +} + +void probe_block_bio_complete(void *data, struct request_queue *q, struct bio *bio) +{ + trace_mark_tp(block, bio_complete, block_bio_complete, + probe_block_bio_complete, + "sector %llu size %u rw(FAILFAST_DRIVER,FAILFAST_TRANSPORT," + "FAILFAST_DEV,DISCARD,META,SYNC,BARRIER,AHEAD,RW) %lX " + "not_uptodate #1u%d", + (unsigned long long)bio->bi_sector, bio->bi_size, + bio->bi_rw, !bio_flagged(bio, BIO_UPTODATE)); +} + +void probe_block_bio_backmerge(void *data, struct request_queue *q, struct bio *bio) +{ + trace_mark_tp(block, bio_backmerge, block_bio_backmerge, + probe_block_bio_backmerge, + "sector %llu size %u rw(FAILFAST_DRIVER,FAILFAST_TRANSPORT," + "FAILFAST_DEV,DISCARD,META,SYNC,BARRIER,AHEAD,RW) %lX " + "not_uptodate #1u%d", + (unsigned long long)bio->bi_sector, bio->bi_size, + bio->bi_rw, !bio_flagged(bio, BIO_UPTODATE)); +} + +void probe_block_bio_frontmerge(void *data, struct request_queue *q, struct bio *bio) +{ + trace_mark_tp(block, bio_frontmerge, block_bio_frontmerge, + probe_block_bio_frontmerge, + "sector %llu size %u rw(FAILFAST_DRIVER,FAILFAST_TRANSPORT," + "FAILFAST_DEV,DISCARD,META,SYNC,BARRIER,AHEAD,RW) %lX " + "not_uptodate #1u%d", + (unsigned long long)bio->bi_sector, bio->bi_size, + bio->bi_rw, !bio_flagged(bio, BIO_UPTODATE)); +} + +void probe_block_bio_queue(void *data, struct request_queue *q, struct bio *bio) +{ + trace_mark_tp(block, bio_queue, block_bio_queue, + probe_block_bio_queue, + "sector %llu size %u rw(FAILFAST_DRIVER,FAILFAST_TRANSPORT," + "FAILFAST_DEV,DISCARD,META,SYNC,BARRIER,AHEAD,RW) %lX " + "not_uptodate #1u%d", + (unsigned long long)bio->bi_sector, bio->bi_size, + bio->bi_rw, !bio_flagged(bio, BIO_UPTODATE)); +} + +void probe_block_getrq(void *data, struct request_queue *q, struct bio *bio, int rw) +{ + if (bio) { + trace_mark_tp(block, getrq_bio, block_getrq, + probe_block_getrq, + "sector %llu size %u " + "rw(FAILFAST_DRIVER,FAILFAST_TRANSPORT," + "FAILFAST_DEV,DISCARD,META,SYNC,BARRIER,AHEAD,RW) %lX " + "not_uptodate #1u%d", + (unsigned long long)bio->bi_sector, bio->bi_size, + bio->bi_rw, !bio_flagged(bio, BIO_UPTODATE)); + } else { + /* + * FIXME Using a simple trace_mark for the second event + * possibility because tracepoints do not support multiple + * connections to the same probe yet. They should have some + * refcounting. Need to enable both getrq_bio and getrq markers + * to have the getrq marker enabled. + */ + trace_mark(block, getrq, "rw %d", rw); + } +} + +void probe_block_sleeprq(void *data, struct request_queue *q, struct bio *bio, int rw) +{ + if (bio) { + trace_mark_tp(block, sleeprq_bio, block_sleeprq, + probe_block_sleeprq, + "sector %llu size %u " + "rw(FAILFAST_DRIVER,FAILFAST_TRANSPORT," + "FAILFAST_DEV,DISCARD,META,SYNC,BARRIER,AHEAD,RW) %lX " + "not_uptodate #1u%d", + (unsigned long long)bio->bi_sector, bio->bi_size, + bio->bi_rw, !bio_flagged(bio, BIO_UPTODATE)); + } else { + /* + * FIXME Using a simple trace_mark for the second event + * possibility because tracepoints do not support multiple + * connections to the same probe yet. They should have some + * refcounting. Need to enable both sleeprq_bio and sleeprq + * markers to have the sleeprq marker enabled. + */ + trace_mark(block, sleeprq, "rw %d", rw); + } +} + +void probe_block_plug(void *data, struct request_queue *q) +{ + trace_mark_tp(block, plug, block_plug, probe_block_plug, + MARK_NOARGS); +} + +void probe_block_unplug_io(void *data, struct request_queue *q) +{ + unsigned int pdu = q->rq.count[READ] + q->rq.count[WRITE]; + + trace_mark_tp(block, unplug_io, block_unplug_io, probe_block_unplug_io, + "pdu %u", pdu); +} + +void probe_block_unplug_timer(void *data, struct request_queue *q) +{ + unsigned int pdu = q->rq.count[READ] + q->rq.count[WRITE]; + + trace_mark_tp(block, unplug_timer, block_unplug_timer, + probe_block_unplug_timer, + "pdu %u", pdu); +} + +void probe_block_split(void *data, struct request_queue *q, struct bio *bio, + unsigned int pdu) +{ + trace_mark_tp(block, split, block_split, + probe_block_split, + "sector %llu size %u rw(FAILFAST_DRIVER,FAILFAST_TRANSPORT," + "FAILFAST_DEV,DISCARD,META,SYNC,BARRIER,AHEAD,RW) %lX " + "not_uptodate #1u%d pdu %u", + (unsigned long long)bio->bi_sector, bio->bi_size, + bio->bi_rw, !bio_flagged(bio, BIO_UPTODATE), pdu); +} + +void probe_block_remap(void *data, struct request_queue *q, struct bio *bio, + dev_t dev, sector_t from) +{ + trace_mark_tp(block, remap, block_remap, + probe_block_remap, + "device_from %lu sector_from %llu device_to %lu " + "size %u rw(FAILFAST_DRIVER,FAILFAST_TRANSPORT," + "FAILFAST_DEV,DISCARD,META,SYNC,BARRIER,AHEAD,RW) %lX " + "not_uptodate #1u%d", + (unsigned long)bio->bi_bdev->bd_dev, + (unsigned long long)from, + (unsigned long)dev, + bio->bi_size, bio->bi_rw, + !bio_flagged(bio, BIO_UPTODATE)); +} + +MODULE_LICENSE("GPL and additional rights"); +MODULE_AUTHOR("Mathieu Desnoyers"); +MODULE_DESCRIPTION("Block Tracepoint Probes"); diff --git a/deprecated/probes/ext4-trace.c b/deprecated/probes/ext4-trace.c new file mode 100644 index 00000000..83683e70 --- /dev/null +++ b/deprecated/probes/ext4-trace.c @@ -0,0 +1,611 @@ +/* + * ltt/probes/ext4-trace.c + * + * ext4 tracepoint probes. + * + * (C) Copyright 2009 - Mathieu Desnoyers + * Dual LGPL v2.1/GPL v2 license. + */ + +#include +#include +#include +#include +#include +#include + +#include "../ltt-tracer.h" +#include "../../fs/ext4/mballoc.h" + +static struct dentry *ext4_filter_dentry, *ext4_filter_dev_dentry, + *ext4_filter_inode_dentry; +static DEFINE_MUTEX(ext4_filter_mutex); +/* Make sure we don't race between module exit and file write */ +static int module_exits; + +struct rcu_dev_filter { + struct rcu_head rcu; + char devname[NAME_MAX]; +}; + +static struct rcu_dev_filter *dev_filter; +/* ~0UL inode_filter enables all inodes */ +static unsigned long inode_filter = ~0UL; + +/* + * Probes are executed in rcu_sched read-side critical section. + */ + +static int do_dev_filter(const char *dev) +{ + struct rcu_dev_filter *ldev_filter = rcu_dereference(dev_filter); + + if (unlikely(ldev_filter)) + if (unlikely(strcmp(ldev_filter->devname, dev))) + return 0; + return 1; +} + +static int do_inode_filter(unsigned long ino) +{ + if (unlikely(inode_filter != ~0UL)) + if (unlikely(inode_filter != ino)) + return 0; + return 1; +} + +/* + * Logical AND between dev and inode filter. + */ +static int do_filter(const char *dev, unsigned long ino) +{ + if (unlikely(!do_dev_filter(dev))) + return 0; + if (unlikely(!do_inode_filter(ino))) + return 0; + return 1; +} + + +void probe_ext4_free_inode(void *data, struct inode *inode) +{ + if (unlikely(!do_filter(inode->i_sb->s_id, inode->i_ino))) + return; + trace_mark_tp(ext4, free_inode, ext4_free_inode, + probe_ext4_free_inode, + "dev %s ino %lu mode %d uid %lu gid %lu blocks %llu", + inode->i_sb->s_id, inode->i_ino, inode->i_mode, + (unsigned long) inode->i_uid, (unsigned long) inode->i_gid, + (unsigned long long) inode->i_blocks); +} + +void probe_ext4_request_inode(void *data, struct inode *dir, int mode) +{ + if (unlikely(!do_filter(dir->i_sb->s_id, dir->i_ino))) + return; + trace_mark_tp(ext4, request_inode, ext4_request_inode, + probe_ext4_request_inode, + "dev %s dir %lu mode %d", + dir->i_sb->s_id, dir->i_ino, mode); +} + +void probe_ext4_allocate_inode(void *data, struct inode *inode, struct inode *dir, int mode) +{ + if (unlikely(!do_filter(inode->i_sb->s_id, inode->i_ino) + && !do_filter(dir->i_sb->s_id, dir->i_ino))) + return; + trace_mark_tp(ext4, allocate_inode, ext4_allocate_inode, + probe_ext4_allocate_inode, + "dev %s ino %lu dir %lu mode %d", + dir->i_sb->s_id, inode->i_ino, dir->i_ino, mode); +} + +void probe_ext4_write_begin(void *data, struct inode *inode, loff_t pos, unsigned int len, + unsigned int flags) +{ + if (unlikely(!do_filter(inode->i_sb->s_id, inode->i_ino))) + return; + trace_mark_tp(ext4, write_begin, ext4_write_begin, + probe_ext4_write_begin, + "dev %s ino %lu pos %llu len %u flags %u", + inode->i_sb->s_id, inode->i_ino, + (unsigned long long) pos, len, flags); +} + +void probe_ext4_ordered_write_end(void *data, struct inode *inode, loff_t pos, + unsigned int len, unsigned int copied) +{ + if (unlikely(!do_filter(inode->i_sb->s_id, inode->i_ino))) + return; + trace_mark_tp(ext4, ordered_write_end, ext4_ordered_write_end, + probe_ext4_ordered_write_end, + "dev %s ino %lu pos %llu len %u copied %u", + inode->i_sb->s_id, inode->i_ino, + (unsigned long long) pos, len, copied); +} + +void probe_ext4_writeback_write_end(void *data, struct inode *inode, loff_t pos, + unsigned int len, unsigned int copied) +{ + if (unlikely(!do_filter(inode->i_sb->s_id, inode->i_ino))) + return; + trace_mark_tp(ext4, writeback_write_end, ext4_writeback_write_end, + probe_ext4_writeback_write_end, + "dev %s ino %lu pos %llu len %u copied %u", + inode->i_sb->s_id, inode->i_ino, + (unsigned long long) pos, len, copied); +} + +void probe_ext4_journalled_write_end(void *data, struct inode *inode, loff_t pos, + unsigned int len, unsigned int copied) +{ + if (unlikely(!do_filter(inode->i_sb->s_id, inode->i_ino))) + return; + trace_mark_tp(ext4, journalled_write_end, ext4_journalled_write_end, + probe_ext4_journalled_write_end, + "dev %s ino %lu pos %llu len %u copied %u", + inode->i_sb->s_id, inode->i_ino, + (unsigned long long) pos, len, copied); +} + +/* + * note : wbc_flags will have to be decoded by userspace. + * #1x uses a single byte in the trace. Limits to 8 bits. + */ +void probe_ext4_da_writepages(void *data, struct inode *inode, + struct writeback_control *wbc) +{ + if (unlikely(!do_filter(inode->i_sb->s_id, inode->i_ino))) + return; + trace_mark_tp(ext4, da_writepages, ext4_da_writepages, + probe_ext4_da_writepages, + "dev %s ino %lu nr_to_write %ld " + "pages_skipped %ld range_start %llu range_end %llu " + "wbc_flags(nonblocking,for_kupdate," + "for_reclaim,range_cyclic) #1x%u", + inode->i_sb->s_id, inode->i_ino, wbc->nr_to_write, + wbc->pages_skipped, + (unsigned long long) wbc->range_start, + (unsigned long long) wbc->range_end, + (wbc->nonblocking << 3) + | (wbc->for_kupdate << 2) + | (wbc->for_reclaim << 1) + | wbc->range_cyclic); +} + +/* + * note : wbc_flags will have to be decoded by userspace. + * #1x uses a single byte in the trace. Limits to 8 bits. + */ +void probe_ext4_da_writepages_result(void *data, struct inode *inode, + struct writeback_control *wbc, + int ret, int pages_written) +{ + if (unlikely(!do_filter(inode->i_sb->s_id, inode->i_ino))) + return; + trace_mark_tp(ext4, da_writepages_result, ext4_da_writepages_result, + probe_ext4_da_writepages_result, + "dev %s ino %lu ret %d pages_written %d " + "pages_skipped %ld " + "wbc_flags(encountered_congestion," + "more_io,no_nrwrite_index_update) #1x%u", + inode->i_sb->s_id, inode->i_ino, ret, pages_written, + wbc->pages_skipped, + (wbc->encountered_congestion << 2) + | (wbc->more_io << 1) + | wbc->no_nrwrite_index_update); +} + +void probe_ext4_da_write_begin(void *data, struct inode *inode, loff_t pos, + unsigned int len, unsigned int flags) +{ + if (unlikely(!do_filter(inode->i_sb->s_id, inode->i_ino))) + return; + trace_mark_tp(ext4, da_write_begin, ext4_da_write_begin, + probe_ext4_da_write_begin, + "dev %s ino %lu pos %llu len %u flags %u", + inode->i_sb->s_id, inode->i_ino, + (unsigned long long) pos, len, flags); +} + +void probe_ext4_da_write_end(void *data, struct inode *inode, loff_t pos, + unsigned int len, unsigned int copied) +{ + if (unlikely(!do_filter(inode->i_sb->s_id, inode->i_ino))) + return; + trace_mark_tp(ext4, da_write_end, ext4_da_write_end, + probe_ext4_da_write_end, + "dev %s ino %lu pos %llu len %u copied %u", + inode->i_sb->s_id, inode->i_ino, + (unsigned long long) pos, len, copied); +} + +void probe_ext4_discard_blocks(void *data, struct super_block *sb, unsigned long long blk, + unsigned long long count) +{ + if (unlikely(!do_dev_filter(sb->s_id))) + return; + trace_mark_tp(ext4, discard_blocks, ext4_discard_blocks, + probe_ext4_discard_blocks, + "dev %s blk %llu count %llu", + sb->s_id, blk, count); +} + +void probe_ext4_mb_new_inode_pa(void *data, struct ext4_allocation_context *ac, + struct ext4_prealloc_space *pa) +{ + if (unlikely(!do_filter(ac->ac_sb->s_id, ac->ac_inode->i_ino))) + return; + trace_mark_tp(ext4, mb_new_inode_pa, ext4_mb_new_inode_pa, + probe_ext4_mb_new_inode_pa, + "dev %s ino %lu pstart %llu len %u lstart %u", + ac->ac_sb->s_id, ac->ac_inode->i_ino, pa->pa_pstart, + pa->pa_len, pa->pa_lstart); +} + +void probe_ext4_mb_new_group_pa(void *data, struct ext4_allocation_context *ac, + struct ext4_prealloc_space *pa) +{ + if (unlikely(!do_dev_filter(ac->ac_sb->s_id))) + return; + trace_mark_tp(ext4, mb_new_group_pa, ext4_mb_new_group_pa, + probe_ext4_mb_new_group_pa, + "dev %s pstart %llu len %u lstart %u", + ac->ac_sb->s_id, pa->pa_pstart, + pa->pa_len, pa->pa_lstart); +} + +void probe_ext4_mb_release_inode_pa(void *data, struct ext4_allocation_context *ac, + struct ext4_prealloc_space *pa, + unsigned long long block, + unsigned int count) +{ + if (unlikely(!do_filter(ac->ac_sb->s_id, ac->ac_inode->i_ino))) + return; + trace_mark_tp(ext4, mb_release_inode_pa, ext4_mb_release_inode_pa, + probe_ext4_mb_release_inode_pa, + "dev %s ino %lu block %llu count %u", + ac->ac_sb->s_id, pa->pa_inode->i_ino, block, count); +} + +void probe_ext4_mb_release_group_pa(void *data, struct ext4_allocation_context *ac, + struct ext4_prealloc_space *pa) +{ + if (unlikely(!do_dev_filter(ac->ac_sb->s_id))) + return; + trace_mark_tp(ext4, mb_release_group_pa, ext4_mb_release_group_pa, + probe_ext4_mb_release_group_pa, + "dev %s pstart %llu len %d", + ac->ac_sb->s_id, pa->pa_pstart, pa->pa_len); +} + +void probe_ext4_discard_preallocations(void *data, struct inode *inode) +{ + if (unlikely(!do_filter(inode->i_sb->s_id, inode->i_ino))) + return; + trace_mark_tp(ext4, discard_preallocations, + ext4_discard_preallocations, + probe_ext4_discard_preallocations, + "dev %s ino %lu", + inode->i_sb->s_id, inode->i_ino); +} + +void probe_ext4_mb_discard_preallocations(void *data, struct super_block *sb, int needed) +{ + if (unlikely(!do_dev_filter(sb->s_id))) + return; + trace_mark_tp(ext4, mb_discard_preallocations, + ext4_mb_discard_preallocations, + probe_ext4_mb_discard_preallocations, + "dev %s needed %d", + sb->s_id, needed); +} + +void probe_ext4_request_blocks(void *data, struct ext4_allocation_request *ar) +{ + if (ar->inode) { + if (unlikely(!do_filter(ar->inode->i_sb->s_id, + ar->inode->i_ino))) + return; + } else { + if (unlikely(!do_dev_filter(ar->inode->i_sb->s_id))) + return; + } + trace_mark_tp(ext4, request_blocks, ext4_request_blocks, + probe_ext4_request_blocks, + "dev %s flags %u len %u ino %lu " + "lblk %llu goal %llu lleft %llu lright %llu " + "pleft %llu pright %llu", + ar->inode->i_sb->s_id, ar->flags, ar->len, + ar->inode ? ar->inode->i_ino : 0, + (unsigned long long) ar->logical, + (unsigned long long) ar->goal, + (unsigned long long) ar->lleft, + (unsigned long long) ar->lright, + (unsigned long long) ar->pleft, + (unsigned long long) ar->pright); +} + +void probe_ext4_allocate_blocks(void *data, struct ext4_allocation_request *ar, + unsigned long long block) +{ + if (ar->inode) { + if (unlikely(!do_filter(ar->inode->i_sb->s_id, + ar->inode->i_ino))) + return; + } else { + if (unlikely(!do_dev_filter(ar->inode->i_sb->s_id))) + return; + } + trace_mark_tp(ext4, allocate_blocks, ext4_allocate_blocks, + probe_ext4_allocate_blocks, + "dev %s block %llu flags %u len %u ino %lu " + "logical %llu goal %llu lleft %llu lright %llu " + "pleft %llu pright %llu", + ar->inode->i_sb->s_id, (unsigned long long) block, + ar->flags, ar->len, ar->inode ? ar->inode->i_ino : 0, + (unsigned long long) ar->logical, + (unsigned long long) ar->goal, + (unsigned long long) ar->lleft, + (unsigned long long) ar->lright, + (unsigned long long) ar->pleft, + (unsigned long long) ar->pright); +} + +void probe_ext4_free_blocks(void *data, struct inode *inode, __u64 block, + unsigned long count, int metadata) +{ + if (unlikely(!do_filter(inode->i_sb->s_id, inode->i_ino))) + return; + trace_mark_tp(ext4, free_blocks, ext4_free_blocks, + probe_ext4_free_blocks, + "dev %s block %llu count %lu metadata %d ino %lu", + inode->i_sb->s_id, (unsigned long long)block, + count, metadata, inode->i_ino); +} + +void probe_ext4_sync_file(void *data, struct file *file, struct dentry *dentry, + int datasync) +{ + if (unlikely(!do_dev_filter(dentry->d_inode->i_sb->s_id))) + return; + if (unlikely(!do_inode_filter(dentry->d_inode->i_ino) + && !do_inode_filter(dentry->d_parent->d_inode->i_ino))) + return; + trace_mark_tp(ext4, sync_file, ext4_sync_file, + probe_ext4_sync_file, + "dev %s datasync %d ino %ld parent %ld", + dentry->d_inode->i_sb->s_id, datasync, dentry->d_inode->i_ino, + dentry->d_parent->d_inode->i_ino); +} + +void probe_ext4_sync_fs(void *data, struct super_block *sb, int wait) +{ + if (unlikely(!do_dev_filter(sb->s_id))) + return; + trace_mark_tp(ext4, sync_fs, ext4_sync_fs, + probe_ext4_sync_fs, + "dev %s wait %d", + sb->s_id, wait); +} + +static void free_dev_filter(struct rcu_head *head) +{ + kfree(container_of(head, struct rcu_dev_filter, rcu)); +} + +static ssize_t dev_filter_op_write(struct file *file, + const char __user *user_buf, size_t count, loff_t *ppos) +{ + int err = 0; + char buf[NAME_MAX]; + int buf_size; + char name[NAME_MAX]; + struct rcu_dev_filter *new, *old; + + mutex_lock(&ext4_filter_mutex); + if (module_exits) { + err = -EPERM; + goto error; + } + buf_size = min(count, sizeof(buf) - 1); + err = copy_from_user(buf, user_buf, buf_size); + if (err) + goto error; + buf[buf_size] = 0; + + if (sscanf(buf, "%s", name) != 1) { + err = -EPERM; + goto error; + } + + old = dev_filter; + + /* Empty string or * means all active */ + if (name[0] == '\0' || (name[0] == '*' && name[1] == '\0')) { + new = NULL; + } else { + new = kmalloc(sizeof(*new), GFP_KERNEL); + strcpy(new->devname, name); + } + + rcu_assign_pointer(dev_filter, new); + if (old) + call_rcu_sched(&old->rcu, free_dev_filter); + + mutex_unlock(&ext4_filter_mutex); + return count; + +error: + mutex_unlock(&ext4_filter_mutex); + return err; +} + +static ssize_t dev_filter_op_read(struct file *filp, char __user *buffer, + size_t count, loff_t *ppos) +{ + ssize_t bcount; + const char *devname; + + mutex_lock(&ext4_filter_mutex); + if (!dev_filter) + devname = "*"; + else + devname = dev_filter->devname; + bcount = simple_read_from_buffer(buffer, count, ppos, + devname, strlen(devname)); + mutex_unlock(&ext4_filter_mutex); + return bcount; +} + +static struct file_operations ext4_dev_file_operations = { + .write = dev_filter_op_write, + .read = dev_filter_op_read, +}; + +static ssize_t inode_filter_op_write(struct file *file, + const char __user *user_buf, size_t count, loff_t *ppos) +{ + int err = 0; + char buf[NAME_MAX]; + int buf_size; + char name[NAME_MAX]; + unsigned long inode_num; + + mutex_lock(&ext4_filter_mutex); + if (module_exits) { + err = -EPERM; + goto error; + } + buf_size = min(count, sizeof(buf) - 1); + err = copy_from_user(buf, user_buf, buf_size); + if (err) + goto error; + buf[buf_size] = 0; + + if (sscanf(buf, "%s", name) != 1) { + err = -EPERM; + goto error; + } + + /* Empty string or * means all active */ + if (name[0] == '\0' || (name[0] == '*' && name[1] == '\0')) { + inode_filter = ~0UL; + } else { + if (sscanf(buf, "%lu", &inode_num) != 1) { + err = -EPERM; + goto error; + } + inode_filter = inode_num; + } + + mutex_unlock(&ext4_filter_mutex); + return count; + +error: + mutex_unlock(&ext4_filter_mutex); + return err; +} + +static ssize_t inode_filter_op_read(struct file *filp, char __user *buffer, + size_t count, loff_t *ppos) +{ + ssize_t bcount; + char inode_str[NAME_MAX]; + + mutex_lock(&ext4_filter_mutex); + if (inode_filter == ~0UL) + strcpy(inode_str, "*"); + else { + bcount = snprintf(inode_str, sizeof(inode_str), "%lu", + inode_filter); + if (bcount == sizeof(inode_str)) + bcount = -ENOSPC; + if (bcount < 0) + goto end; + } + bcount = simple_read_from_buffer(buffer, count, ppos, + inode_str, strlen(inode_str)); +end: + mutex_unlock(&ext4_filter_mutex); + return bcount; +} + +static struct file_operations ext4_inode_file_operations = { + .write = inode_filter_op_write, + .read = inode_filter_op_read, +}; + +static void release_filter_dev(void) +{ + struct rcu_dev_filter *old; + + mutex_lock(&ext4_filter_mutex); + module_exits = 1; + old = dev_filter; + rcu_assign_pointer(dev_filter, NULL); + if (old) + call_rcu_sched(&old->rcu, free_dev_filter); + mutex_unlock(&ext4_filter_mutex); +} + +static int __init filter_init(void) +{ + struct dentry *filter_root_dentry; + int err = 0; + + filter_root_dentry = get_filter_root(); + if (!filter_root_dentry) { + err = -ENOENT; + goto end; + } + + ext4_filter_dentry = debugfs_create_dir("ext4", filter_root_dentry); + + if (IS_ERR(ext4_filter_dentry) || !ext4_filter_dentry) { + printk(KERN_ERR "Failed to create ext4 filter file\n"); + err = -ENOMEM; + goto end; + } + + ext4_filter_dev_dentry = debugfs_create_file("dev", S_IWUSR, + ext4_filter_dentry, NULL, &ext4_dev_file_operations); + if (IS_ERR(ext4_filter_dev_dentry) || !ext4_filter_dev_dentry) { + printk(KERN_ERR "Failed to create ext4 dev filter file\n"); + err = -ENOMEM; + goto release_filter_dentry; + } + + ext4_filter_inode_dentry = debugfs_create_file("inode", S_IWUSR, + ext4_filter_dentry, NULL, &ext4_inode_file_operations); + if (IS_ERR(ext4_filter_inode_dentry) || !ext4_filter_inode_dentry) { + printk(KERN_ERR "Failed to create ext4 inode filter file\n"); + err = -ENOMEM; + goto release_filter_dev_dentry; + } + + goto end; + +release_filter_dev_dentry: + debugfs_remove(ext4_filter_dev_dentry); +release_filter_dentry: + debugfs_remove(ext4_filter_dentry); + release_filter_dev(); +end: + return err; +} + +static void __exit filter_exit(void) +{ + debugfs_remove(ext4_filter_dev_dentry); + debugfs_remove(ext4_filter_inode_dentry); + debugfs_remove(ext4_filter_dentry); + release_filter_dev(); +} + +module_init(filter_init); +module_exit(filter_exit); + +MODULE_LICENSE("GPL and additional rights"); +MODULE_AUTHOR("Mathieu Desnoyers"); +MODULE_DESCRIPTION("ext4 Tracepoint Probes"); diff --git a/deprecated/probes/fs-trace.c b/deprecated/probes/fs-trace.c new file mode 100644 index 00000000..bca28275 --- /dev/null +++ b/deprecated/probes/fs-trace.c @@ -0,0 +1,158 @@ +/* + * ltt/probes/fs-trace.c + * + * FS tracepoint probes. + * + * (C) Copyright 2009 - Mathieu Desnoyers + * Dual LGPL v2.1/GPL v2 license. + */ + +#include +#include +#include + +#include "../ltt-type-serializer.h" + +void probe_fs_buffer_wait_start(void *_data, struct buffer_head *bh) +{ + trace_mark_tp(fs, buffer_wait_start, fs_buffer_wait_start, + probe_fs_buffer_wait_start, "bh %p", bh); +} + +void probe_fs_buffer_wait_end(void *_data, struct buffer_head *bh) +{ + trace_mark_tp(fs, buffer_wait_end, fs_buffer_wait_end, + probe_fs_buffer_wait_end, "bh %p", bh); +} + +void probe_fs_exec(void *_data, char *filename) +{ + trace_mark_tp(fs, exec, fs_exec, probe_fs_exec, "filename %s", + filename); +} + +void probe_fs_ioctl(void *_data, unsigned int fd, unsigned int cmd, unsigned long arg) +{ + trace_mark_tp(fs, ioctl, fs_ioctl, probe_fs_ioctl, + "fd %u cmd %u arg %lu", fd, cmd, arg); +} + +void probe_fs_open(void *_data, int fd, char *filename) +{ + trace_mark_tp(fs, open, fs_open, probe_fs_open, + "fd %d filename %s", fd, filename); +} + +void probe_fs_close(void *_data, unsigned int fd) +{ + trace_mark_tp(fs, close, fs_close, probe_fs_close, "fd %u", fd); +} + +void probe_fs_lseek(void *_data, unsigned int fd, long offset, unsigned int origin) +{ + trace_mark_tp(fs, lseek, fs_lseek, probe_fs_lseek, + "fd %u offset %ld origin %u", fd, offset, origin); +} + +void probe_fs_llseek(void *_data, unsigned int fd, loff_t offset, unsigned int origin) +{ + trace_mark_tp(fs, llseek, fs_llseek, probe_fs_llseek, + "fd %u offset %lld origin %u", fd, + (long long)offset, origin); +} + +void probe_fs_read(void *_data, unsigned int fd, char __user *buf, size_t count, + ssize_t ret); + +DEFINE_MARKER_TP(fs, read, fs_read, probe_fs_read, + "count %zu fd %u"); + +notrace void probe_fs_read(void *_data, unsigned int fd, char __user *buf, size_t count, + ssize_t ret) +{ + struct marker *marker; + struct serialize_sizet_int data; + + data.f1 = count; + data.f2 = fd; + + marker = &GET_MARKER(fs, read); + ltt_specialized_trace(marker, marker->single.probe_private, + &data, serialize_sizeof(data), sizeof(size_t)); +} + +void probe_fs_write(void *_data, unsigned int fd, char __user *buf, size_t count, + ssize_t ret); + +DEFINE_MARKER_TP(fs, write, fs_write, probe_fs_write, + "count %zu fd %u"); + +notrace void probe_fs_write(void *_data, unsigned int fd, char __user *buf, size_t count, + ssize_t ret) +{ + struct marker *marker; + struct serialize_sizet_int data; + + data.f1 = count; + data.f2 = fd; + + marker = &GET_MARKER(fs, write); + ltt_specialized_trace(marker, marker->single.probe_private, + &data, serialize_sizeof(data), sizeof(size_t)); +} + +void probe_fs_pread64(void *_data, unsigned int fd, char __user *buf, size_t count, + loff_t pos, ssize_t ret) +{ + trace_mark_tp(fs, pread64, fs_pread64, probe_fs_pread64, + "fd %u count %zu pos %llu", + fd, count, (unsigned long long)pos); +} + +void probe_fs_pwrite64(void *_data, unsigned int fd, const char __user *buf, + size_t count, loff_t pos, ssize_t ret) +{ + trace_mark_tp(fs, pwrite64, fs_pwrite64, probe_fs_pwrite64, + "fd %u count %zu pos %llu", + fd, count, (unsigned long long)pos); +} + +void probe_fs_readv(void *_data, unsigned long fd, const struct iovec __user *vec, + unsigned long vlen, ssize_t ret) +{ + trace_mark_tp(fs, readv, fs_readv, probe_fs_readv, + "fd %lu vlen %lu", fd, vlen); +} + +void probe_fs_writev(void *_data, unsigned long fd, const struct iovec __user *vec, + unsigned long vlen, ssize_t ret) +{ + trace_mark_tp(fs, writev, fs_writev, probe_fs_writev, + "fd %lu vlen %lu", fd, vlen); +} + +void probe_fs_select(void *_data, int fd, struct timespec *end_time) +{ + struct timespec tmptime; + + if (end_time) { + tmptime = *end_time; + } else { + tmptime.tv_sec = -1L; + tmptime.tv_nsec = -1L; + } + + trace_mark_tp(fs, select, fs_select, probe_fs_select, + "fd %d end_time_sec %ld end_time_nsec %ld", fd, + tmptime.tv_sec, tmptime.tv_nsec); +} + +void probe_fs_poll(void *_data, int fd) +{ + trace_mark_tp(fs, pollfd, fs_poll, probe_fs_poll, + "fd %d", fd); +} + +MODULE_LICENSE("GPL and additional rights"); +MODULE_AUTHOR("Mathieu Desnoyers"); +MODULE_DESCRIPTION("FS Tracepoint Probes"); diff --git a/deprecated/probes/ipc-trace.c b/deprecated/probes/ipc-trace.c new file mode 100644 index 00000000..3a095252 --- /dev/null +++ b/deprecated/probes/ipc-trace.c @@ -0,0 +1,39 @@ +/* + * ltt/probes/ipc-trace.c + * + * IPC tracepoint probes. + * + * (C) Copyright 2009 - Mathieu Desnoyers + * Dual LGPL v2.1/GPL v2 license. + */ + +#include +#include + +void probe_ipc_msg_create(void *data, long id, int flags) +{ + trace_mark_tp(ipc, msg_create, ipc_msg_create, probe_ipc_msg_create, + "id %ld flags %d", id, flags); +} + +void probe_ipc_sem_create(void *data, long id, int flags) +{ + trace_mark_tp(ipc, sem_create, ipc_sem_create, probe_ipc_sem_create, + "id %ld flags %d", id, flags); +} + +void probe_ipc_shm_create(void *data, long id, int flags) +{ + trace_mark_tp(ipc, shm_create, ipc_shm_create, probe_ipc_shm_create, + "id %ld flags %d", id, flags); +} + +void probe_ipc_call(void *data, unsigned int call, unsigned int first) +{ + trace_mark_tp(ipc, call, ipc_call, probe_ipc_call, + "call %u first %d", call, first); +} + +MODULE_LICENSE("GPL and additional rights"); +MODULE_AUTHOR("Mathieu Desnoyers"); +MODULE_DESCRIPTION("IPC Tracepoint Probes"); diff --git a/deprecated/probes/jbd2-trace.c b/deprecated/probes/jbd2-trace.c new file mode 100644 index 00000000..3da32cd4 --- /dev/null +++ b/deprecated/probes/jbd2-trace.c @@ -0,0 +1,208 @@ +/* + * ltt/probes/jbd2-trace.c + * + * JBD2 tracepoint probes. + * + * (C) Copyright 2009 - Mathieu Desnoyers + * Dual LGPL v2.1/GPL v2 license. + */ + +#include +#include +#include +#include +#include + +#include "../ltt-tracer.h" + +static struct dentry *jbd2_filter_dentry, *jbd2_filter_dev_dentry; +static DEFINE_MUTEX(jbd2_filter_mutex); +/* Make sure we don't race between module exit and file write */ +static int module_exits; + +struct rcu_dev_filter { + struct rcu_head rcu; + char devname[NAME_MAX]; +}; + +static struct rcu_dev_filter *dev_filter; + +/* + * Probes are executed in rcu_sched read-side critical section. + */ +static int do_filter(const char *dev) +{ + struct rcu_dev_filter *ldev_filter = rcu_dereference(dev_filter); + + if (unlikely(ldev_filter)) + if (unlikely(strcmp(ldev_filter->devname, dev))) + return 0; + return 1; +} + +void probe_jbd2_checkpoint(void *data, journal_t *journal, int result) +{ + if (unlikely(!do_filter(journal->j_devname))) + return; + trace_mark_tp(jbd2, checkpoint, jbd2_checkpoint, + probe_jbd2_checkpoint, "dev %s need_checkpoint %d", + journal->j_devname, result); +} + +void probe_jbd2_start_commit(void *data, journal_t *journal, + transaction_t *commit_transaction) +{ + if (unlikely(!do_filter(journal->j_devname))) + return; + trace_mark_tp(jbd2, start_commit, jbd2_start_commit, + probe_jbd2_start_commit, "dev %s transaction %d", + journal->j_devname, commit_transaction->t_tid); +} + +void probe_jbd2_end_commit(void *data, journal_t *journal, + transaction_t *commit_transaction) +{ + if (unlikely(!do_filter(journal->j_devname))) + return; + trace_mark_tp(jbd2, end_commit, jbd2_end_commit, + probe_jbd2_end_commit, "dev %s transaction %d head %d", + journal->j_devname, commit_transaction->t_tid, + journal->j_tail_sequence); +} + +static void free_dev_filter(struct rcu_head *head) +{ + kfree(container_of(head, struct rcu_dev_filter, rcu)); +} + +static ssize_t filter_op_write(struct file *file, + const char __user *user_buf, size_t count, loff_t *ppos) +{ + int err = 0; + char buf[NAME_MAX]; + int buf_size; + char name[NAME_MAX]; + struct rcu_dev_filter *new, *old; + + mutex_lock(&jbd2_filter_mutex); + if (module_exits) { + err = -EPERM; + goto error; + } + buf_size = min(count, sizeof(buf) - 1); + err = copy_from_user(buf, user_buf, buf_size); + if (err) + goto error; + buf[buf_size] = 0; + + if (sscanf(buf, "%s", name) != 1) { + err = -EPERM; + goto error; + } + + old = dev_filter; + + /* Empty string or * means all active */ + if (name[0] == '\0' || (name[0] == '*' && name[1] == '\0')) { + new = NULL; + } else { + new = kmalloc(sizeof(*new), GFP_KERNEL); + strcpy(new->devname, name); + } + + rcu_assign_pointer(dev_filter, new); + if (old) + call_rcu_sched(&old->rcu, free_dev_filter); + + mutex_unlock(&jbd2_filter_mutex); + return count; + +error: + mutex_unlock(&jbd2_filter_mutex); + return err; +} + +static ssize_t filter_op_read(struct file *filp, char __user *buffer, + size_t count, loff_t *ppos) +{ + ssize_t bcount; + const char *devname; + + mutex_lock(&jbd2_filter_mutex); + if (!dev_filter) + devname = "*"; + else + devname = dev_filter->devname; + bcount = simple_read_from_buffer(buffer, count, ppos, + devname, strlen(devname)); + mutex_unlock(&jbd2_filter_mutex); + return bcount; +} + +static struct file_operations jbd2_file_operations = { + .write = filter_op_write, + .read = filter_op_read, +}; + +static void release_filter_dev(void) +{ + struct rcu_dev_filter *old; + + mutex_lock(&jbd2_filter_mutex); + module_exits = 1; + old = dev_filter; + rcu_assign_pointer(dev_filter, NULL); + if (old) + call_rcu_sched(&old->rcu, free_dev_filter); + mutex_unlock(&jbd2_filter_mutex); +} + +static int __init filter_init(void) +{ + struct dentry *filter_root_dentry; + int err = 0; + + filter_root_dentry = get_filter_root(); + if (!filter_root_dentry) { + err = -ENOENT; + goto end; + } + + jbd2_filter_dentry = debugfs_create_dir("jbd2", filter_root_dentry); + + if (IS_ERR(jbd2_filter_dentry) || !jbd2_filter_dentry) { + printk(KERN_ERR "Failed to create jbd2 filter file\n"); + err = -ENOMEM; + goto end; + } + + jbd2_filter_dev_dentry = debugfs_create_file("dev", S_IWUSR, + jbd2_filter_dentry, NULL, &jbd2_file_operations); + if (IS_ERR(jbd2_filter_dentry) || !jbd2_filter_dentry) { + printk(KERN_ERR "Failed to create jbd2 filter file\n"); + err = -ENOMEM; + goto release_filter_dentry; + } + + goto end; + +release_filter_dentry: + debugfs_remove(jbd2_filter_dentry); + release_filter_dev(); +end: + return err; +} + +static void __exit filter_exit(void) +{ + debugfs_remove(jbd2_filter_dev_dentry); + debugfs_remove(jbd2_filter_dentry); + release_filter_dev(); +} + +module_init(filter_init); +module_exit(filter_exit); + +MODULE_LICENSE("GPL and additional rights"); +MODULE_AUTHOR("Mathieu Desnoyers"); +MODULE_DESCRIPTION("JBD2 Tracepoint Probes"); diff --git a/deprecated/probes/kernel-trace.c b/deprecated/probes/kernel-trace.c new file mode 100644 index 00000000..cabe60e1 --- /dev/null +++ b/deprecated/probes/kernel-trace.c @@ -0,0 +1,581 @@ +/* + * ltt/probes/kernel-trace.c + * + * kernel tracepoint probes. + * + * (C) Copyright 2009 - Mathieu Desnoyers + * Dual LGPL v2.1/GPL v2 license. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "../ltt-tracer.h" +#include "../ltt-type-serializer.h" + +/* + * This should probably be added to s390. + */ +#ifdef CONFIG_S390 +static struct pt_regs *get_irq_regs(void) +{ + return task_pt_regs(current); +} +#endif + +/* + * FIXME : + * currently, the specialized tracepoint probes cannot call into other marker + * probes, such as ftrace enable/disable. Given we want them to be as fast as + * possible, it might not be so bad to lose this flexibility. But that means + * such probes would have to connect to tracepoints on their own. + */ + +/* kernel_irq_entry specialized tracepoint probe */ + +void probe_irq_entry(void *_data, unsigned int id, struct pt_regs *regs, + struct irqaction *action); + +DEFINE_MARKER_TP(kernel, irq_entry, irq_entry, probe_irq_entry, + "ip %lu handler %p irq_id #2u%u kernel_mode #1u%u"); + +notrace void probe_irq_entry(void *_data, unsigned int id, struct pt_regs *regs, + struct irqaction *action) +{ + struct marker *marker; + struct serialize_long_long_short_char data; + + if (unlikely(!regs)) + regs = get_irq_regs(); + if (likely(regs)) { + data.f1 = instruction_pointer(regs); + data.f4 = !user_mode(regs); + } else { + data.f1 = 0UL; + data.f4 = 1; + } + data.f2 = (unsigned long) (action ? action->handler : NULL); + data.f3 = id; + + marker = &GET_MARKER(kernel, irq_entry); + ltt_specialized_trace(marker, marker->single.probe_private, + &data, serialize_sizeof(data), sizeof(long)); +} + +void probe_irq_next_handler(void *_data, unsigned int id, struct irqaction *action, + irqreturn_t prev_ret); + +DEFINE_MARKER_TP(kernel, irq_next_handler, irq_next_handler, + probe_irq_next_handler, + "handler %p prev_ret #1u%u"); + +notrace void probe_irq_next_handler(void *_data, unsigned int id, struct irqaction *action, + irqreturn_t prev_ret) +{ + struct marker *marker; + struct serialize_long_char data; + + data.f1 = (unsigned long) (action ? action->handler : NULL); + data.f2 = prev_ret; + + marker = &GET_MARKER(kernel, irq_next_handler); + ltt_specialized_trace(marker, marker->single.probe_private, + &data, serialize_sizeof(data), sizeof(long)); +} + +/* kernel_irq_exit specialized tracepoint probe */ + +void probe_irq_exit(void *_data, irqreturn_t retval); + +DEFINE_MARKER_TP(kernel, irq_exit, irq_exit, probe_irq_exit, + "handled #1u%u"); + +notrace void probe_irq_exit(void *_data, irqreturn_t retval) +{ + struct marker *marker; + unsigned char data; + + data = IRQ_RETVAL(retval); + + marker = &GET_MARKER(kernel, irq_exit); + ltt_specialized_trace(marker, marker->single.probe_private, + &data, sizeof(data), sizeof(data)); +} + +/* kernel_softirq_entry specialized tracepoint probe */ + +void probe_softirq_entry(void *_data, struct softirq_action *h, + struct softirq_action *softirq_vec); + +DEFINE_MARKER_TP(kernel, softirq_entry, softirq_entry, + probe_softirq_entry, "softirq_id #1u%lu"); + +notrace void probe_softirq_entry(void *_data, struct softirq_action *h, + struct softirq_action *softirq_vec) +{ + struct marker *marker; + unsigned char data; + + data = ((unsigned long)h - (unsigned long)softirq_vec) / sizeof(*h); + + marker = &GET_MARKER(kernel, softirq_entry); + ltt_specialized_trace(marker, marker->single.probe_private, + &data, sizeof(data), sizeof(data)); +} + +/* kernel_softirq_exit specialized tracepoint probe */ + +void probe_softirq_exit(void *_data, struct softirq_action *h, + struct softirq_action *softirq_vec); + +DEFINE_MARKER_TP(kernel, softirq_exit, softirq_exit, + probe_softirq_exit, "softirq_id #1u%lu"); + +notrace void probe_softirq_exit(void *_data, struct softirq_action *h, + struct softirq_action *softirq_vec) +{ + struct marker *marker; + unsigned char data; + + data = ((unsigned long)h - (unsigned long)softirq_vec) / sizeof(*h); + + marker = &GET_MARKER(kernel, softirq_exit); + ltt_specialized_trace(marker, marker->single.probe_private, + &data, sizeof(data), sizeof(data)); +} + +/* kernel_softirq_raise specialized tracepoint probe */ + +void probe_softirq_raise(void *_data, unsigned int nr); + +DEFINE_MARKER_TP(kernel, softirq_raise, softirq_raise, + probe_softirq_raise, "softirq_id #1u%u"); + +notrace void probe_softirq_raise(void *_data, unsigned int nr) +{ + struct marker *marker; + unsigned char data; + + data = nr; + + marker = &GET_MARKER(kernel, softirq_raise); + ltt_specialized_trace(marker, marker->single.probe_private, + &data, sizeof(data), sizeof(data)); +} + +/* Standard probes */ +void probe_irq_tasklet_low_entry(void *_data, struct tasklet_struct *t) +{ + trace_mark_tp(kernel, tasklet_low_entry, irq_tasklet_low_entry, + probe_irq_tasklet_low_entry, "func %p data %lu", + t->func, t->data); +} + +void probe_irq_tasklet_low_exit(void *_data, struct tasklet_struct *t) +{ + trace_mark_tp(kernel, tasklet_low_exit, irq_tasklet_low_exit, + probe_irq_tasklet_low_exit, "func %p data %lu", + t->func, t->data); +} + +void probe_irq_tasklet_high_entry(void *_data, struct tasklet_struct *t) +{ + trace_mark_tp(kernel, tasklet_high_entry, irq_tasklet_high_entry, + probe_irq_tasklet_high_entry, "func %p data %lu", + t->func, t->data); +} + +void probe_irq_tasklet_high_exit(void *_data, struct tasklet_struct *t) +{ + trace_mark_tp(kernel, tasklet_high_exit, irq_tasklet_high_exit, + probe_irq_tasklet_high_exit, "func %p data %lu", + t->func, t->data); +} + +void probe_sched_kthread_stop(void *_data, struct task_struct *t) +{ + trace_mark_tp(kernel, kthread_stop, sched_kthread_stop, + probe_sched_kthread_stop, "pid %d", t->pid); +} + +void probe_sched_kthread_stop_ret(void *_data, int ret) +{ + trace_mark_tp(kernel, kthread_stop_ret, sched_kthread_stop_ret, + probe_sched_kthread_stop_ret, "ret %d", ret); +} + +void probe_sched_wait_task(void *_data, struct task_struct *p) +{ + trace_mark_tp(kernel, sched_wait_task, sched_wait_task, + probe_sched_wait_task, "pid %d state #2d%ld", + p->pid, p->state); +} + +/* kernel_sched_try_wakeup specialized tracepoint probe */ + +void probe_sched_wakeup(void *_data, struct task_struct *p, int success); + +DEFINE_MARKER_TP(kernel, sched_try_wakeup, sched_wakeup, + probe_sched_wakeup, "pid %d cpu_id %u state #2d%ld"); + +notrace void probe_sched_wakeup(void *_data, struct task_struct *p, int success) +{ + struct marker *marker; + struct serialize_int_int_short data; + + data.f1 = p->pid; + data.f2 = task_cpu(p); + data.f3 = p->state; + + marker = &GET_MARKER(kernel, sched_try_wakeup); + ltt_specialized_trace(marker, marker->single.probe_private, + &data, serialize_sizeof(data), sizeof(int)); +} + +void probe_sched_wakeup_new(void *_data, struct task_struct *p, int success) +{ + trace_mark_tp(kernel, sched_wakeup_new_task, sched_wakeup_new, + probe_sched_wakeup_new, "pid %d state #2d%ld cpu_id %u", + p->pid, p->state, task_cpu(p)); +} + +/* kernel_sched_schedule specialized tracepoint probe */ + +void probe_sched_switch(void *_data, struct task_struct *prev, + struct task_struct *next); + +DEFINE_MARKER_TP(kernel, sched_schedule, sched_switch, probe_sched_switch, + "prev_pid %d next_pid %d prev_state #2d%ld"); + +notrace void probe_sched_switch(void *_data, struct task_struct *prev, + struct task_struct *next) +{ + struct marker *marker; + struct serialize_int_int_short data; + + data.f1 = prev->pid; + data.f2 = next->pid; + data.f3 = prev->state; + + marker = &GET_MARKER(kernel, sched_schedule); + ltt_specialized_trace(marker, marker->single.probe_private, + &data, serialize_sizeof(data), sizeof(int)); +} + +void probe_sched_migrate_task(void *_data, struct task_struct *p, int dest_cpu) +{ + trace_mark_tp(kernel, sched_migrate_task, sched_migrate_task, + probe_sched_migrate_task, "pid %d state #2d%ld dest_cpu %d", + p->pid, p->state, dest_cpu); +} + +void probe_sched_signal_send(void *_data, int sig, struct siginfo *info, struct task_struct *t) +{ + trace_mark_tp(kernel, send_signal, signal_generate, + probe_sched_signal_send, "pid %d signal %d", t->pid, sig); +} + +void probe_sched_process_free(void *_data, struct task_struct *p) +{ + trace_mark_tp(kernel, process_free, sched_process_free, + probe_sched_process_free, "pid %d", p->pid); +} + +void probe_sched_process_exit(void *_data, struct task_struct *p) +{ + trace_mark_tp(kernel, process_exit, sched_process_exit, + probe_sched_process_exit, "pid %d", p->pid); +} + +void probe_sched_process_wait(void *_data, struct pid *pid) +{ + trace_mark_tp(kernel, process_wait, sched_process_wait, + probe_sched_process_wait, "pid %d", pid_nr(pid)); +} + +void probe_sched_process_fork(void *_data, struct task_struct *parent, + struct task_struct *child) +{ + trace_mark_tp(kernel, process_fork, sched_process_fork, + probe_sched_process_fork, + "parent_pid %d child_pid %d child_tgid %d", + parent->pid, child->pid, child->tgid); +} + +void probe_sched_kthread_create(void *_data, void *fn, int pid) +{ + trace_mark_tp(kernel, kthread_create, sched_kthread_create, + probe_sched_kthread_create, + "fn %p pid %d", fn, pid); +} + +void probe_timer_itimer_expired(void *_data, struct signal_struct *sig) +{ + trace_mark_tp(kernel, timer_itimer_expired, timer_itimer_expired, + probe_timer_itimer_expired, "pid %d", + pid_nr(sig->leader_pid)); +} + +void probe_timer_itimer_set(void *_data, int which, struct itimerval *value) +{ + trace_mark_tp(kernel, timer_itimer_set, + timer_itimer_set, probe_timer_itimer_set, + "which %d interval_sec %ld interval_usec %ld " + "value_sec %ld value_usec %ld", + which, + value->it_interval.tv_sec, + value->it_interval.tv_usec, + value->it_value.tv_sec, + value->it_value.tv_usec); +} + +/* kernel_timer_set specialized tracepoint probe */ + +void probe_timer_set(void *_data, struct timer_list *timer); + +DEFINE_MARKER_TP(kernel, timer_set, timer_set, probe_timer_set, + "expires %lu function %p data %lu"); + +notrace void probe_timer_set(void *_data, struct timer_list *timer) +{ + struct marker *marker; + struct serialize_long_long_long data; + + data.f1 = timer->expires; + data.f2 = (unsigned long)timer->function; + data.f3 = timer->data; + + marker = &GET_MARKER(kernel, timer_set); + ltt_specialized_trace(marker, marker->single.probe_private, + &data, serialize_sizeof(data), sizeof(long)); +} + +void probe_timer_update_time(void *_data, struct timespec *_xtime, + struct timespec *_wall_to_monotonic) +{ + trace_mark_tp(kernel, timer_update_time, timer_update_time, + probe_timer_update_time, + "jiffies #8u%llu xtime_sec %ld xtime_nsec %ld " + "walltomonotonic_sec %ld walltomonotonic_nsec %ld", + (unsigned long long)jiffies_64, _xtime->tv_sec, _xtime->tv_nsec, + _wall_to_monotonic->tv_sec, _wall_to_monotonic->tv_nsec); +} + +void probe_timer_timeout(void *_data, struct task_struct *p) +{ + trace_mark_tp(kernel, timer_timeout, timer_timeout, + probe_timer_timeout, "pid %d", p->pid); +} + +void probe_kernel_printk(void *_data, unsigned long retaddr) +{ + trace_mark_tp(kernel, printk, kernel_printk, + probe_kernel_printk, "ip 0x%lX", retaddr); +} + +void probe_kernel_vprintk(void *_data, unsigned long retaddr, char *buf, int len) +{ + if (len > 0) { + unsigned int loglevel; + int mark_len; + char *mark_buf; + char saved_char; + + if (buf[0] == '<' && buf[1] >= '0' && + buf[1] <= '7' && buf[2] == '>') { + loglevel = buf[1] - '0'; + mark_buf = &buf[3]; + mark_len = len - 3; + } else { + loglevel = default_message_loglevel; + mark_buf = buf; + mark_len = len; + } + if (mark_buf[mark_len - 1] == '\n') + mark_len--; + saved_char = mark_buf[mark_len]; + mark_buf[mark_len] = '\0'; + trace_mark_tp(kernel, vprintk, kernel_vprintk, + probe_kernel_vprintk, + "loglevel #1u%u string %s ip 0x%lX", + loglevel, mark_buf, retaddr); + mark_buf[mark_len] = saved_char; + } +} + +#ifdef CONFIG_MODULES +void probe_kernel_module_free(void *_data, struct module *mod) +{ + trace_mark_tp(kernel, module_free, kernel_module_free, + probe_kernel_module_free, "name %s", mod->name); +} + +void probe_kernel_module_load(void *_data, struct module *mod) +{ + trace_mark_tp(kernel, module_load, kernel_module_load, + probe_kernel_module_load, "name %s", mod->name); +} +#endif + +void probe_kernel_panic(void *_data, const char *fmt, va_list args) +{ + char info[64]; + vsnprintf(info, sizeof(info), fmt, args); + trace_mark_tp(kernel, panic, kernel_panic, probe_kernel_panic, + "info %s", info); +} + +void probe_kernel_kernel_kexec(void *_data, struct kimage *image) +{ + trace_mark_tp(kernel, kernel_kexec, kernel_kernel_kexec, + probe_kernel_kernel_kexec, "image %p", image); +} + +void probe_kernel_crash_kexec(void *_data, struct kimage *image, struct pt_regs *regs) +{ + trace_mark_tp(kernel, crash_kexec, kernel_crash_kexec, + probe_kernel_crash_kexec, "image %p ip %p", image, + regs ? (void *)instruction_pointer(regs) : NULL); +} + +/* kernel_page_fault_entry specialized tracepoint probe */ + +void probe_kernel_page_fault_entry(void *_data, struct pt_regs *regs, int trapnr, + struct mm_struct *mm, struct vm_area_struct *vma, + unsigned long address, int write_access); + +DEFINE_MARKER_TP(kernel, page_fault_entry, page_fault_entry, + probe_kernel_page_fault_entry, + "ip #p%lu address #p%lu trap_id #2u%u write_access #1u%u"); + +notrace void probe_kernel_page_fault_entry(void *_data, struct pt_regs *regs, int trapnr, + struct mm_struct *mm, struct vm_area_struct *vma, + unsigned long address, int write_access) +{ + struct marker *marker; + struct serialize_long_long_short_char data; + + if (likely(regs)) + data.f1 = instruction_pointer(regs); + else + data.f1 = 0UL; + data.f2 = address; + data.f3 = (unsigned short)trapnr; + data.f4 = (unsigned char)!!write_access; + + marker = &GET_MARKER(kernel, page_fault_entry); + ltt_specialized_trace(marker, marker->single.probe_private, + &data, serialize_sizeof(data), sizeof(long)); +} + +/* kernel_page_fault_exit specialized tracepoint probe */ + +void probe_kernel_page_fault_exit(void *_data, int res); + +DEFINE_MARKER_TP(kernel, page_fault_exit, page_fault_exit, + probe_kernel_page_fault_exit, + "res %d"); + +notrace void probe_kernel_page_fault_exit(void *_data, int res) +{ + struct marker *marker; + + marker = &GET_MARKER(kernel, page_fault_exit); + ltt_specialized_trace(marker, marker->single.probe_private, + &res, sizeof(res), sizeof(res)); +} + +/* kernel_page_fault_nosem_entry specialized tracepoint probe */ + +void probe_kernel_page_fault_nosem_entry(void *_data, struct pt_regs *regs, + int trapnr, unsigned long address); + +DEFINE_MARKER_TP(kernel, page_fault_nosem_entry, page_fault_nosem_entry, + probe_kernel_page_fault_nosem_entry, + "ip #p%lu address #p%lu trap_id #2u%u"); + +notrace void probe_kernel_page_fault_nosem_entry(void *_data, struct pt_regs *regs, + int trapnr, unsigned long address) +{ + struct marker *marker; + struct serialize_long_long_short data; + + if (likely(regs)) + data.f1 = instruction_pointer(regs); + else + data.f1 = 0UL; + data.f2 = address; + data.f3 = (unsigned short)trapnr; + + marker = &GET_MARKER(kernel, page_fault_nosem_entry); + ltt_specialized_trace(marker, marker->single.probe_private, + &data, serialize_sizeof(data), sizeof(long)); +} + +/* kernel_page_fault_nosem_exit specialized tracepoint probe */ + +void probe_kernel_page_fault_nosem_exit(void *_data, int res); + +DEFINE_MARKER_TP(kernel, page_fault_nosem_exit, page_fault_nosem_exit, + probe_kernel_page_fault_nosem_exit, + MARK_NOARGS); + +notrace void probe_kernel_page_fault_nosem_exit(void *_data, int res) +{ + struct marker *marker; + + marker = &GET_MARKER(kernel, page_fault_nosem_exit); + ltt_specialized_trace(marker, marker->single.probe_private, + NULL, 0, 0); +} + +/* kernel_page_fault_get_user_entry specialized tracepoint probe */ + +void probe_kernel_page_fault_get_user_entry(void *_data, struct mm_struct *mm, + struct vm_area_struct *vma, unsigned long address, int write_access); + +DEFINE_MARKER_TP(kernel, page_fault_get_user_entry, page_fault_get_user_entry, + probe_kernel_page_fault_get_user_entry, + "address #p%lu write_access #1u%u"); + +notrace void probe_kernel_page_fault_get_user_entry(void *_data, struct mm_struct *mm, + struct vm_area_struct *vma, unsigned long address, int write_access) +{ + struct marker *marker; + struct serialize_long_char data; + + data.f1 = address; + data.f2 = (unsigned char)!!write_access; + + marker = &GET_MARKER(kernel, page_fault_get_user_entry); + ltt_specialized_trace(marker, marker->single.probe_private, + &data, serialize_sizeof(data), sizeof(long)); +} + +/* kernel_page_fault_get_user_exit specialized tracepoint probe */ + +void probe_kernel_page_fault_get_user_exit(void *_data, int res); + +DEFINE_MARKER_TP(kernel, page_fault_get_user_exit, page_fault_get_user_exit, + probe_kernel_page_fault_get_user_exit, + "res %d"); + +notrace void probe_kernel_page_fault_get_user_exit(void *_data, int res) +{ + struct marker *marker; + + marker = &GET_MARKER(kernel, page_fault_get_user_exit); + ltt_specialized_trace(marker, marker->single.probe_private, + &res, sizeof(res), sizeof(res)); +} + +MODULE_LICENSE("GPL and additional rights"); +MODULE_AUTHOR("Mathieu Desnoyers"); +MODULE_DESCRIPTION("kernel Tracepoint Probes"); diff --git a/deprecated/probes/lockdep-trace.c b/deprecated/probes/lockdep-trace.c new file mode 100644 index 00000000..a9a77344 --- /dev/null +++ b/deprecated/probes/lockdep-trace.c @@ -0,0 +1,60 @@ +/* + * ltt/probes/lockdep-trace.c + * + * lockdep tracepoint probes. + * + * (C) Copyright 2009 - Mathieu Desnoyers + * Dual LGPL v2.1/GPL v2 license. + */ + +#include +#include +#include + +void probe_lockdep_hardirqs_on(void *data, unsigned long retaddr) +{ + trace_mark_tp(lockdep, hardirqs_on, lockdep_hardirqs_on, + probe_lockdep_hardirqs_on, "retaddr 0x%lX", retaddr); +} + +void probe_lockdep_hardirqs_off(void *data, unsigned long retaddr) +{ + trace_mark_tp(lockdep, hardirqs_off, lockdep_hardirqs_off, + probe_lockdep_hardirqs_off, "retaddr 0x%lX", retaddr); +} + +void probe_lockdep_softirqs_on(void *data, unsigned long retaddr) +{ + trace_mark_tp(lockdep, softirqs_on, lockdep_softirqs_on, + probe_lockdep_softirqs_on, "retaddr 0x%lX", retaddr); +} + +void probe_lockdep_softirqs_off(void *data, unsigned long retaddr) +{ + trace_mark_tp(lockdep, softirqs_off, lockdep_softirqs_off, + probe_lockdep_softirqs_off, "retaddr 0x%lX", retaddr); +} + +void probe_lockdep_lock_acquire(void *data, unsigned long retaddr, + unsigned int subclass, struct lockdep_map *lock, int trylock, + int read, int hardirqs_off) +{ + trace_mark_tp(lockdep, lock_acquire, lockdep_lock_acquire, + probe_lockdep_lock_acquire, + "retaddr 0x%lX subclass %u lock %p trylock %d read %d " + "hardirqs_off %d", + retaddr, subclass, lock, trylock, read, hardirqs_off); +} + +void probe_lockdep_lock_release(void *data, unsigned long retaddr, + struct lockdep_map *lock, int nested) +{ + trace_mark_tp(lockdep, lock_release, lockdep_lock_release, + probe_lockdep_lock_release, + "retaddr 0x%lX lock %p nested %d", + retaddr, lock, nested); +} + +MODULE_LICENSE("GPL and additional rights"); +MODULE_AUTHOR("Mathieu Desnoyers"); +MODULE_DESCRIPTION("lockdep Tracepoint Probes"); diff --git a/deprecated/probes/mm-trace.c b/deprecated/probes/mm-trace.c new file mode 100644 index 00000000..935e366c --- /dev/null +++ b/deprecated/probes/mm-trace.c @@ -0,0 +1,146 @@ +/* + * ltt/probes/mm-trace.c + * + * MM tracepoint probes. + * + * (C) Copyright 2009 - Mathieu Desnoyers + * Dual LGPL v2.1/GPL v2 license. + */ + +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "../ltt-type-serializer.h" + +void probe_wait_on_page_start(void *_data, struct page *page, int bit_nr) +{ + trace_mark_tp(mm, wait_on_page_start, wait_on_page_start, + probe_wait_on_page_start, "pfn %lu bit_nr %d", + page_to_pfn(page), bit_nr); +} + +void probe_wait_on_page_end(void *_data, struct page *page, int bit_nr) +{ + trace_mark_tp(mm, wait_on_page_end, wait_on_page_end, + probe_wait_on_page_end, "pfn %lu bit_nr %d", + page_to_pfn(page), bit_nr); +} + +void probe_hugetlb_page_free(void *_data, struct page *page) +{ + trace_mark_tp(mm, huge_page_free, hugetlb_page_free, + probe_hugetlb_page_free, "pfn %lu", page_to_pfn(page)); +} + +void probe_hugetlb_page_alloc(void *_data, struct page *page) +{ + if (page) + trace_mark_tp(mm, huge_page_alloc, hugetlb_page_alloc, + probe_hugetlb_page_alloc, "pfn %lu", page_to_pfn(page)); +} + +/* mm_page_free specialized tracepoint probe */ + +void probe_page_free(void *_data, struct page *page, unsigned int order); + +DEFINE_MARKER_TP(mm, page_free, page_free, probe_page_free, + "pfn %lu order %u"); + +notrace void probe_page_free(void *_data, struct page *page, unsigned int order) +{ + struct marker *marker; + struct serialize_long_int data; + + data.f1 = page_to_pfn(page); + data.f2 = order; + + marker = &GET_MARKER(mm, page_free); + ltt_specialized_trace(marker, marker->single.probe_private, + &data, serialize_sizeof(data), sizeof(long)); +} + +/* mm_page_alloc specialized tracepoint probe */ + +void probe_page_alloc(void *_data, struct page *page, unsigned int order); + +DEFINE_MARKER_TP(mm, page_alloc, page_alloc, probe_page_alloc, + "pfn %lu order %u"); + +notrace void probe_page_alloc(void *_data, struct page *page, unsigned int order) +{ + struct marker *marker; + struct serialize_long_int data; + + if (unlikely(!page)) + return; + + data.f1 = page_to_pfn(page); + data.f2 = order; + + marker = &GET_MARKER(mm, page_alloc); + ltt_specialized_trace(marker, marker->single.probe_private, + &data, serialize_sizeof(data), sizeof(long)); +} + +#ifdef CONFIG_SWAP +void probe_swap_in(void *_data, struct page *page, swp_entry_t entry) +{ + trace_mark_tp(mm, swap_in, swap_in, probe_swap_in, + "pfn %lu filp %p offset %lu", + page_to_pfn(page), + get_swap_info_struct(swp_type(entry))->swap_file, + swp_offset(entry)); +} + +void probe_swap_out(void *_data, struct page *page) +{ + trace_mark_tp(mm, swap_out, swap_out, probe_swap_out, + "pfn %lu filp %p offset %lu", + page_to_pfn(page), + get_swap_info_struct(swp_type( + page_swp_entry(page)))->swap_file, + swp_offset(page_swp_entry(page))); +} + +void probe_swap_file_close(void *_data, struct file *file) +{ + trace_mark_tp(mm, swap_file_close, swap_file_close, + probe_swap_file_close, "filp %p", file); +} + +void probe_swap_file_open(void *_data, struct file *file, char *filename) +{ + trace_mark_tp(mm, swap_file_open, swap_file_open, + probe_swap_file_open, "filp %p filename %s", + file, filename); +} +#endif + +void probe_add_to_page_cache(void *_data, struct address_space *mapping, pgoff_t offset) +{ + trace_mark_tp(mm, add_to_page_cache, add_to_page_cache, + probe_add_to_page_cache, + "inode %lu sdev %u", + mapping->host->i_ino, mapping->host->i_sb->s_dev); +} + +void probe_remove_from_page_cache(void *_data, struct address_space *mapping) +{ + trace_mark_tp(mm, remove_from_page_cache, remove_from_page_cache, + probe_remove_from_page_cache, + "inode %lu sdev %u", + mapping->host->i_ino, mapping->host->i_sb->s_dev); +} + +MODULE_LICENSE("GPL and additional rights"); +MODULE_AUTHOR("Mathieu Desnoyers"); +MODULE_DESCRIPTION("MM Tracepoint Probes"); diff --git a/deprecated/probes/net-extended-trace.c b/deprecated/probes/net-extended-trace.c new file mode 100644 index 00000000..15fc8109 --- /dev/null +++ b/deprecated/probes/net-extended-trace.c @@ -0,0 +1,146 @@ +/* + * ltt/probes/net-extended-trace.c + * + * Net tracepoint extended probes. + * + * These probes record many header fields from TCP and UDP messages. Here are + * the consequences of this: + * 1) it allows analyzing network traffic to provide some pcap-like + * functionality within LTTng + * 2) it allows offline synchronization of a group of concurrent traces + * recorded on different nodes + * 3) it increases tracing overhead + * + * You can leave out these probes or not activate them if you are not + * especially interested in the details of network traffic and do not wish to + * synchronize distributed traces. + * + * Dual LGPL v2.1/GPL v2 license. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "../ltt-type-serializer.h" + +void probe_net_dev_xmit_extended(void *_data, struct sk_buff *skb); + +DEFINE_MARKER_TP(net, dev_xmit_extended, net_dev_xmit, + probe_net_dev_xmit_extended, "skb 0x%lX network_protocol #n2u%hu " + "transport_protocol #1u%u saddr #n4u%lu daddr #n4u%lu " + "tot_len #n2u%hu ihl #1u%u source #n2u%hu dest #n2u%hu seq #n4u%lu " + "ack_seq #n4u%lu doff #1u%u ack #1u%u rst #1u%u syn #1u%u fin #1u%u"); + +notrace void probe_net_dev_xmit_extended(void *_data, struct sk_buff *skb) +{ + struct marker *marker; + struct serialize_l214421224411111 data; + struct iphdr *iph = ip_hdr(skb); + struct tcphdr *th = tcp_hdr(skb); + + data.f1 = (unsigned long)skb; + data.f2 = skb->protocol; + + if (ntohs(skb->protocol) == ETH_P_IP) { + data.f3 = ip_hdr(skb)->protocol; + data.f4 = iph->saddr; + data.f5 = iph->daddr; + data.f6 = iph->tot_len; + data.f7 = iph->ihl; + + if (data.f3 == IPPROTO_TCP) { + data.f8 = th->source; + data.f9 = th->dest; + data.f10 = th->seq; + data.f11 = th->ack_seq; + data.f12 = th->doff; + data.f13 = th->ack; + data.f14 = th->rst; + data.f15 = th->syn; + data.f16 = th->fin; + } + } + + marker = &GET_MARKER(net, dev_xmit_extended); + ltt_specialized_trace(marker, marker->single.probe_private, + &data, serialize_sizeof(data), sizeof(long)); +} + +void probe_tcpv4_rcv_extended(void *_data, struct sk_buff *skb); + +DEFINE_MARKER_TP(net, tcpv4_rcv_extended, net_tcpv4_rcv, + probe_tcpv4_rcv_extended, "skb 0x%lX saddr #n4u%lu daddr #n4u%lu " + "tot_len #n2u%hu ihl #1u%u source #n2u%hu dest #n2u%hu seq #n4u%lu " + "ack_seq #n4u%lu doff #1u%u ack #1u%u rst #1u%u syn #1u%u fin #1u%u"); + +notrace void probe_tcpv4_rcv_extended(void *_data, struct sk_buff *skb) +{ + struct marker *marker; + struct serialize_l4421224411111 data; + struct iphdr *iph = ip_hdr(skb); + struct tcphdr *th = tcp_hdr(skb); + + data.f1 = (unsigned long)skb; + data.f2 = iph->saddr; + data.f3 = iph->daddr; + data.f4 = iph->tot_len; + data.f5 = iph->ihl; + data.f6 = th->source; + data.f7 = th->dest; + data.f8 = th->seq; + data.f9 = th->ack_seq; + data.f10 = th->doff; + data.f11 = th->ack; + data.f12 = th->rst; + data.f13 = th->syn; + data.f14 = th->fin; + + marker = &GET_MARKER(net, tcpv4_rcv_extended); + ltt_specialized_trace(marker, marker->single.probe_private, + &data, serialize_sizeof(data), sizeof(long)); +} + +void probe_udpv4_rcv_extended(void *_data, struct sk_buff *skb); + +DEFINE_MARKER_TP(net, udpv4_rcv_extended, net_udpv4_rcv, + probe_udpv4_rcv_extended, "skb 0x%lX saddr #n4u%lu daddr #n4u%lu " + "unicast #1u%u ulen #n2u%hu source #n2u%hu dest #n2u%hu " + "data_start #8u%lx"); + +notrace void probe_udpv4_rcv_extended(void *_data, struct sk_buff *skb) +{ + struct marker *marker; + struct serialize_l4412228 data; + struct iphdr *iph = ip_hdr(skb); + struct rtable *rt = skb_rtable(skb); + struct udphdr *uh = udp_hdr(skb); + + data.f1 = (unsigned long)skb; + data.f2 = iph->saddr; + data.f3 = iph->daddr; + data.f4 = rt->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST) ? 0 : 1; + data.f5 = uh->len; + data.f6 = uh->source; + data.f7 = uh->dest; + /* UDP header has not been pulled from skb->data, read the first 8 + * bytes of UDP data if they are not in a fragment*/ + data.f8 = 0; + if (skb_headlen(skb) >= sizeof(struct udphdr) + 8) + data.f8 = *(unsigned long long *)(skb->data + sizeof(*uh)); + else if (skb_headlen(skb) >= sizeof(struct udphdr)) + memcpy(&data.f8, skb->data + sizeof(struct udphdr), + skb_headlen(skb) - sizeof(struct udphdr)); + + marker = &GET_MARKER(net, udpv4_rcv_extended); + ltt_specialized_trace(marker, marker->single.probe_private, + &data, serialize_sizeof(data), sizeof(unsigned long long)); +} + +MODULE_LICENSE("GPL and additional rights"); +MODULE_AUTHOR("Benjamin Poirier"); +MODULE_DESCRIPTION("Net Tracepoint Extended Probes"); diff --git a/deprecated/probes/net-trace.c b/deprecated/probes/net-trace.c new file mode 100644 index 00000000..3124125d --- /dev/null +++ b/deprecated/probes/net-trace.c @@ -0,0 +1,406 @@ +/* + * ltt/probes/net-trace.c + * + * Net tracepoint probes. + * + * (C) Copyright 2009 - Mathieu Desnoyers + * Dual LGPL v2.1/GPL v2 license. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "../ltt-type-serializer.h" + +void probe_net_dev_xmit(void *_data, struct sk_buff *skb); + +DEFINE_MARKER_TP(net, dev_xmit, net_dev_xmit, probe_net_dev_xmit, + "skb %p protocol #n2u%hu"); + +notrace void probe_net_dev_xmit(void *_data, struct sk_buff *skb) +{ + struct marker *marker; + struct serialize_long_short data; + + data.f1 = (unsigned long)skb; + data.f2 = skb->protocol; + + marker = &GET_MARKER(net, dev_xmit); + ltt_specialized_trace(marker, marker->single.probe_private, + &data, serialize_sizeof(data), sizeof(long)); +} + +void probe_net_dev_receive(void *_data, struct sk_buff *skb); + +DEFINE_MARKER_TP(net, dev_receive, net_dev_receive, probe_net_dev_receive, + "skb %p protocol #n2u%hu"); + +notrace void probe_net_dev_receive(void *_data, struct sk_buff *skb) +{ + struct marker *marker; + struct serialize_long_short data; + + data.f1 = (unsigned long)skb; + data.f2 = skb->protocol; + + marker = &GET_MARKER(net, dev_receive); + ltt_specialized_trace(marker, marker->single.probe_private, + &data, serialize_sizeof(data), sizeof(long)); +} + +void probe_ipv4_addr_add(void *_data, struct in_ifaddr *ifa) +{ + trace_mark_tp(netif_state, insert_ifa_ipv4, ipv4_addr_add, + probe_ipv4_addr_add, "label %s address #4u%u", + ifa->ifa_label, (unsigned int)ifa->ifa_address); +} + +void probe_ipv4_addr_del(void *_data, struct in_ifaddr *ifa) +{ + trace_mark_tp(netif_state, del_ifa_ipv4, ipv4_addr_del, + probe_ipv4_addr_del, "label %s address #4u%u", + ifa->ifa_label, (unsigned int)ifa->ifa_address); +} + +void probe_ipv6_addr_add(void *_data, struct inet6_ifaddr *ifa) +{ + __u8 *addr = ifa->addr.s6_addr; + + trace_mark_tp(netif_state, insert_ifa_ipv6, ipv6_addr_add, + probe_ipv6_addr_add, + "label %s " + "a15 #1x%c a14 #1x%c a13 #1x%c a12 #1x%c " + "a11 #1x%c a10 #1x%c a9 #1x%c a8 #1x%c " + "a7 #1x%c a6 #1x%c a5 #1x%c a4 #1x%c " + "a3 #1x%c a2 #1x%c a1 #1x%c a0 #1x%c", + ifa->idev->dev->name, + addr[15], addr[14], addr[13], addr[12], + addr[11], addr[10], addr[9], addr[8], + addr[7], addr[6], addr[5], addr[4], + addr[3], addr[2], addr[1], addr[0]); +} + +void probe_ipv6_addr_del(void *_data, struct inet6_ifaddr *ifa) +{ + __u8 *addr = ifa->addr.s6_addr; + + trace_mark_tp(netif_state, insert_ifa_ipv6, ipv6_addr_del, + probe_ipv6_addr_del, + "label %s " + "a15 #1x%c a14 #1x%c a13 #1x%c a12 #1x%c " + "a11 #1x%c a10 #1x%c a9 #1x%c a8 #1x%c " + "a7 #1x%c a6 #1x%c a5 #1x%c a4 #1x%c " + "a3 #1x%c a2 #1x%c a1 #1x%c a0 #1x%c", + ifa->idev->dev->name, + addr[15], addr[14], addr[13], addr[12], + addr[11], addr[10], addr[9], addr[8], + addr[7], addr[6], addr[5], addr[4], + addr[3], addr[2], addr[1], addr[0]); +} + +void probe_socket_create(void *_data, int family, int type, int protocol, + struct socket *sock, int ret) +{ + trace_mark_tp(net, socket_create, socket_create, probe_socket_create, + "family %d type %d protocol %d sock %p ret %d", + family, type, protocol, sock, ret); +} + +void probe_socket_bind(void *_data, int fd, struct sockaddr __user *umyaddr, int addrlen, + int ret) +{ + trace_mark_tp(net, socket_bind, socket_bind, probe_socket_bind, + "fd %d umyaddr %p addrlen %d ret %d", + fd, umyaddr, addrlen, ret); +} + +void probe_socket_connect(void *_data, int fd, struct sockaddr __user *uservaddr, + int addrlen, int ret) +{ + trace_mark_tp(net, socket_connect, socket_connect, probe_socket_connect, + "fd %d uservaddr %p addrlen %d ret %d", + fd, uservaddr, addrlen, ret); +} + +void probe_socket_listen(void *_data, int fd, int backlog, int ret) +{ + trace_mark_tp(net, socket_listen, socket_listen, probe_socket_listen, + "fd %d backlog %d ret %d", + fd, backlog, ret); +} + +void probe_socket_accept(void *_data, int fd, struct sockaddr __user *upeer_sockaddr, + int __user *upeer_addrlen, int flags, int ret) +{ + trace_mark_tp(net, socket_accept, socket_accept, probe_socket_accept, + "fd %d upeer_sockaddr %p upeer_addrlen %p flags %d ret %d", + fd, upeer_sockaddr, upeer_addrlen, flags, ret); +} + +void probe_socket_getsockname(void *_data, int fd, struct sockaddr __user *usockaddr, + int __user *usockaddr_len, int ret) +{ + trace_mark_tp(net, socket_getsockname, socket_getsockname, + probe_socket_getsockname, + "fd %d usockaddr %p usockaddr_len %p ret %d", + fd, usockaddr, usockaddr_len, ret); +} + +void probe_socket_getpeername(void *_data, int fd, struct sockaddr __user *usockaddr, + int __user *usockaddr_len, int ret) +{ + trace_mark_tp(net, socket_getpeername, socket_getpeername, + probe_socket_getpeername, + "fd %d usockaddr %p usockaddr_len %p ret %d", + fd, usockaddr, usockaddr_len, ret); +} + +void probe_socket_socketpair(void *_data, int family, int type, int protocol, + int __user *usockvec, int ret) +{ + trace_mark_tp(net, socket_socketpair, socket_socketpair, + probe_socket_socketpair, + "family %d type %d protocol %d usockvec %p ret %d", + family, type, protocol, usockvec, ret); +} + +void probe_socket_sendmsg(void *_data, struct socket *sock, struct msghdr *msg, size_t size, + int ret); + +DEFINE_MARKER_TP(net, socket_sendmsg, net_socket_sendmsg, + probe_socket_sendmsg, + "sock %p msg %p size %zu ret %d"); + +notrace void probe_socket_sendmsg(void *_data, struct socket *sock, struct msghdr *msg, + size_t size, int ret) +{ + struct marker *marker; + struct serialize_long_long_sizet_int data; + + data.f1 = (unsigned long)sock; + data.f2 = (unsigned long)msg; + data.f3 = size; + data.f4 = ret; + + marker = &GET_MARKER(net, socket_sendmsg); + ltt_specialized_trace(marker, marker->single.probe_private, + &data, serialize_sizeof(data), sizeof(size_t)); +} + +void probe_socket_recvmsg(void *_data, struct socket *sock, struct msghdr *msg, size_t size, + int flags, int ret); + +DEFINE_MARKER_TP(net, socket_recvmsg, net_socket_recvmsg, + probe_socket_recvmsg, + "sock %p msg %p size %zu flags %d ret %d"); + +notrace void probe_socket_recvmsg(void *_data, struct socket *sock, struct msghdr *msg, + size_t size, int flags, int ret) +{ + struct marker *marker; + struct serialize_long_long_sizet_int_int data; + + data.f1 = (unsigned long)sock; + data.f2 = (unsigned long)msg; + data.f3 = size; + data.f4 = flags; + data.f5 = ret; + + marker = &GET_MARKER(net, socket_recvmsg); + ltt_specialized_trace(marker, marker->single.probe_private, + &data, serialize_sizeof(data), sizeof(size_t)); +} + +void probe_socket_setsockopt(void *_data, int fd, int level, int optname, + char __user *optval, int optlen, int ret) +{ + trace_mark_tp(net, socket_setsockopt, socket_setsockopt, + probe_socket_setsockopt, + "fd %d level %d optname %d optval %p optlen %d ret %d", + fd, level, optname, optval, optlen, ret); +} + +void probe_socket_getsockopt(void *_data, int fd, int level, int optname, + char __user *optval, int __user *optlen, int ret) +{ + trace_mark_tp(net, socket_getsockopt, socket_getsockopt, + probe_socket_getsockopt, + "fd %d level %d optname %d optval %p optlen %p ret %d", + fd, level, optname, optval, optlen, ret); +} + +void probe_socket_shutdown(void *_data, int fd, int how, int ret) +{ + trace_mark_tp(net, socket_shutdown, socket_shutdown, + probe_socket_shutdown, + "fd %d how %d ret %d", + fd, how, ret); +} + +void probe_socket_call(void *_data, int call, unsigned long a0) +{ + trace_mark_tp(net, socket_call, socket_call, probe_socket_call, + "call %d a0 %lu", call, a0); +} + +void probe_tcpv4_rcv(void *_data, struct sk_buff *skb); + +DEFINE_MARKER_TP(net, tcpv4_rcv, net_tcpv4_rcv, probe_tcpv4_rcv, + "skb %p"); + +notrace void probe_tcpv4_rcv(void *_data, struct sk_buff *skb) +{ + struct marker *marker; + + marker = &GET_MARKER(net, tcpv4_rcv); + ltt_specialized_trace(marker, marker->single.probe_private, + &skb, sizeof(skb), sizeof(skb)); +} + +void probe_udpv4_rcv(void *_data, struct sk_buff *skb); + +DEFINE_MARKER_TP(net, udpv4_rcv, net_udpv4_rcv, probe_udpv4_rcv, + "skb %p"); + +notrace void probe_udpv4_rcv(void *_data, struct sk_buff *skb) +{ + struct marker *marker; + + marker = &GET_MARKER(net, udpv4_rcv); + ltt_specialized_trace(marker, marker->single.probe_private, + &skb, sizeof(skb), sizeof(skb)); +} + +#ifdef CONFIG_NETPOLL +void probe_net_napi_schedule(void *_data, struct napi_struct *n); + +DEFINE_MARKER_TP(net, napi_schedule, net_napi_schedule, + probe_net_napi_schedule, + "napi_struct %p name %s"); + +notrace void probe_net_napi_schedule(void *_data, struct napi_struct *n) +{ + struct marker *marker; + struct serialize_long_ifname data; + size_t data_len = 0; + + data.f1 = (unsigned long)n; + data_len += sizeof(data.f1); + /* No need to align for strings */ + strcpy(data.f2, n->dev ? n->dev->name : ""); + data_len += strlen(data.f2) + 1; + + marker = &GET_MARKER(net, napi_schedule); + ltt_specialized_trace(marker, marker->single.probe_private, + &data, data_len, sizeof(long)); +} + +void probe_net_napi_poll(void *_data, struct napi_struct *n); + +DEFINE_MARKER_TP(net, napi_poll, net_napi_poll, + probe_net_napi_poll, + "napi_struct %p name %s"); + +notrace void probe_net_napi_poll(void *_data, struct napi_struct *n) +{ + struct marker *marker; + struct serialize_long_ifname data; + size_t data_len = 0; + + data.f1 = (unsigned long)n; + data_len += sizeof(data.f1); + /* No need to align for strings */ + strcpy(data.f2, n->dev ? n->dev->name : ""); + data_len += strlen(data.f2) + 1; + + marker = &GET_MARKER(net, napi_poll); + ltt_specialized_trace(marker, marker->single.probe_private, + &data, data_len, sizeof(long)); +} + +void probe_net_napi_complete(void *_data, struct napi_struct *n); + +DEFINE_MARKER_TP(net, napi_complete, net_napi_complete, + probe_net_napi_complete, + "napi_struct %p name %s"); + +notrace void probe_net_napi_complete(void *_data, struct napi_struct *n) +{ + struct marker *marker; + struct serialize_long_ifname data; + size_t data_len = 0; + + data.f1 = (unsigned long)n; + data_len += sizeof(data.f1); + /* No need to align for strings */ + strcpy(data.f2, n->dev ? n->dev->name : ""); + data_len += strlen(data.f2) + 1; + + marker = &GET_MARKER(net, napi_complete); + ltt_specialized_trace(marker, marker->single.probe_private, + &data, data_len, sizeof(long)); +} +#else /* !CONFIG_NETPOLL */ +void probe_net_napi_schedule(void *_data, struct napi_struct *n); + +DEFINE_MARKER_TP(net, napi_schedule, net_napi_schedule, + probe_net_napi_schedule, + "napi_struct %p"); + +notrace void probe_net_napi_schedule(void *_data, struct napi_struct *n) +{ + struct marker *marker; + unsigned long data; + + data = (unsigned long)n; + + marker = &GET_MARKER(net, napi_schedule); + ltt_specialized_trace(marker, marker->single.probe_private, + &data, sizeof(data), sizeof(data)); +} + +void probe_net_napi_poll(void *_data, struct napi_struct *n); + +DEFINE_MARKER_TP(net, napi_poll, net_napi_poll, + probe_net_napi_poll, + "napi_struct %p"); + +notrace void probe_net_napi_poll(void *_data, struct napi_struct *n) +{ + struct marker *marker; + unsigned long data; + + data = (unsigned long)n; + + marker = &GET_MARKER(net, napi_poll); + ltt_specialized_trace(marker, marker->single.probe_private, + &data, sizeof(data), sizeof(data)); +} + +void probe_net_napi_complete(void *_data, struct napi_struct *n); + +DEFINE_MARKER_TP(net, napi_complete, net_napi_complete, + probe_net_napi_complete, + "napi_struct %p"); + +notrace void probe_net_napi_complete(void *_data, struct napi_struct *n) +{ + struct marker *marker; + unsigned long data; + + data = (unsigned long)n; + + marker = &GET_MARKER(net, napi_complete); + ltt_specialized_trace(marker, marker->single.probe_private, + &data, sizeof(data), sizeof(data)); +} +#endif + +MODULE_LICENSE("GPL and additional rights"); +MODULE_AUTHOR("Mathieu Desnoyers"); +MODULE_DESCRIPTION("Net Tracepoint Probes"); diff --git a/deprecated/probes/pm-trace.c b/deprecated/probes/pm-trace.c new file mode 100644 index 00000000..7abe8e37 --- /dev/null +++ b/deprecated/probes/pm-trace.c @@ -0,0 +1,43 @@ +/* + * ltt/probes/pm-trace.c + * + * Power Management tracepoint probes. + * + * (C) Copyright 2009 - Mathieu Desnoyers + * Dual LGPL v2.1/GPL v2 license. + */ + +#include +#include + +void probe_pm_idle_entry(void *_data) +{ + trace_mark_tp(pm, idle_entry, pm_idle_entry, + probe_pm_idle_entry, "irqstate #1%d", + irqs_disabled()); +} + +void probe_pm_idle_exit(void *_data) +{ + trace_mark_tp(pm, idle_exit, pm_idle_exit, + probe_pm_idle_exit, "irqstate #1%d", + irqs_disabled()); +} + +void probe_pm_suspend_entry(void *_data) +{ + trace_mark_tp(pm, suspend_entry, pm_suspend_entry, + probe_pm_suspend_entry, "irqstate #1%d", + irqs_disabled()); +} + +void probe_pm_suspend_exit(void *_data) +{ + trace_mark_tp(pm, suspend_exit, pm_suspend_exit, + probe_pm_suspend_exit, "irqstate #1%d", + irqs_disabled()); +} + +MODULE_LICENSE("GPL and additional rights"); +MODULE_AUTHOR("Mathieu Desnoyers"); +MODULE_DESCRIPTION("Power Management Tracepoint Probes"); diff --git a/deprecated/probes/rcu-trace.c b/deprecated/probes/rcu-trace.c new file mode 100644 index 00000000..cc164546 --- /dev/null +++ b/deprecated/probes/rcu-trace.c @@ -0,0 +1,36 @@ +/* + * ltt/probes/rcu-trace.c + * + * RCU tracepoint probes. + * + * (C) Copyright 2009 - Mathieu Desnoyers + * Dual LGPL v2.1/GPL v2 license. + */ + +#include +#include + +#ifdef CONFIG_TREE_RCU +void probe_rcu_tree_callback(void *data, struct rcu_head *head) +{ + trace_mark_tp(rcu, tree_callback, rcu_tree_callback, + probe_rcu_tree_callback, "func %p", head->func); +} + +void probe_rcu_tree_call_rcu(void *data, struct rcu_head *head, unsigned long ip) +{ + trace_mark_tp(rcu, tree_call_rcu, rcu_tree_call_rcu, + probe_rcu_tree_call_rcu, "func %p ip 0x%lX", head->func, ip); +} + +void probe_rcu_tree_call_rcu_bh(void *data, struct rcu_head *head, unsigned long ip) +{ + trace_mark_tp(rcu, tree_call_rcu_bh, rcu_tree_call_rcu_bh, + probe_rcu_tree_call_rcu_bh, "func %p ip 0x%lX", + head->func, ip); +} +#endif + +MODULE_LICENSE("GPL and additional rights"); +MODULE_AUTHOR("Mathieu Desnoyers"); +MODULE_DESCRIPTION("RCU Tracepoint Probes"); diff --git a/deprecated/probes/syscall-trace.c b/deprecated/probes/syscall-trace.c new file mode 100644 index 00000000..9ae419fc --- /dev/null +++ b/deprecated/probes/syscall-trace.c @@ -0,0 +1,54 @@ +/* + * ltt/probes/syscall-trace.c + * + * System call tracepoint probes. + * + * (C) Copyright 2009 - Mathieu Desnoyers + * Dual LGPL v2.1/GPL v2 license. + */ + +#include +#include + +#include "../ltt-type-serializer.h" + + +/* kernel_syscall_entry specialized tracepoint probe */ + +void probe_syscall_entry(void *_data, struct pt_regs *regs, long id); + +DEFINE_MARKER_TP(kernel, syscall_entry, syscall_entry, + probe_syscall_entry, "ip #p%ld syscall_id #2u%u"); + +notrace void probe_syscall_entry(void *_data, struct pt_regs *regs, long id) +{ + struct marker *marker; + struct serialize_long_short data; + + data.f1 = instruction_pointer(regs); + data.f2 = (unsigned short)id; + + marker = &GET_MARKER(kernel, syscall_entry); + ltt_specialized_trace(marker, marker->single.probe_private, + &data, serialize_sizeof(data), sizeof(long)); +} + +/* kernel_syscall_exit specialized tracepoint probe */ + +void probe_syscall_exit(void *_data, long ret); + +DEFINE_MARKER_TP(kernel, syscall_exit, syscall_exit, + probe_syscall_exit, "ret %ld"); + +notrace void probe_syscall_exit(void *_data, long ret) +{ + struct marker *marker; + + marker = &GET_MARKER(kernel, syscall_exit); + ltt_specialized_trace(marker, marker->single.probe_private, + &ret, sizeof(ret), sizeof(ret)); +} + +MODULE_LICENSE("GPL and additional rights"); +MODULE_AUTHOR("Mathieu Desnoyers"); +MODULE_DESCRIPTION("syscall Tracepoint Probes"); diff --git a/deprecated/probes/trap-trace.c b/deprecated/probes/trap-trace.c new file mode 100644 index 00000000..397254cd --- /dev/null +++ b/deprecated/probes/trap-trace.c @@ -0,0 +1,56 @@ +/* + * ltt/probes/trap-trace.c + * + * Trap tracepoint probes. + * + * (C) Copyright 2009 - Mathieu Desnoyers + * Dual LGPL v2.1/GPL v2 license. + */ + +#include +#include + +#include "../ltt-type-serializer.h" + +/* kernel_trap_entry specialized tracepoint probe */ + +void probe_trap_entry(void *_data, struct pt_regs *regs, long id); + +DEFINE_MARKER_TP(kernel, trap_entry, trap_entry, + probe_trap_entry, "ip #p%ld trap_id #2u%u"); + +notrace void probe_trap_entry(void *_data, struct pt_regs *regs, long id) +{ + struct marker *marker; + struct serialize_long_short data; + + if (likely(regs)) + data.f1 = instruction_pointer(regs); + else + data.f1 = 0UL; + data.f2 = (unsigned short)id; + + marker = &GET_MARKER(kernel, trap_entry); + ltt_specialized_trace(marker, marker->single.probe_private, + &data, serialize_sizeof(data), sizeof(long)); +} + +/* kernel_syscall_exit specialized tracepoint probe */ + +void probe_trap_exit(void *_data); + +DEFINE_MARKER_TP(kernel, trap_exit, trap_exit, + probe_trap_exit, MARK_NOARGS); + +notrace void probe_trap_exit(void *_data) +{ + struct marker *marker; + + marker = &GET_MARKER(kernel, trap_exit); + ltt_specialized_trace(marker, marker->single.probe_private, + NULL, 0, 0); +} + +MODULE_LICENSE("GPL and additional rights"); +MODULE_AUTHOR("Mathieu Desnoyers"); +MODULE_DESCRIPTION("Trap Tracepoint Probes"); diff --git a/discard/ltt-ascii.c b/discard/ltt-ascii.c deleted file mode 100644 index b020fedb..00000000 --- a/discard/ltt-ascii.c +++ /dev/null @@ -1,583 +0,0 @@ -/* - * LTT ascii binary buffer to ascii converter. - * - * Copyright 2008 - 2009 Lai Jiangshan (laijs@cn.fujitsu.com) - * Copyright 2009 - Mathieu Desnoyers mathieu.desnoyers@polymtl.ca - * - * Dual LGPL v2.1/GPL v2 license. - */ - -/* - * TODO - * - * Move to new switch behavior: Wait for data for the duration of the - * timer interval + safety, if none is coming, consider that no activity occured - * in the buffer. - * - * Fix case when having a text file open and destroying trace. - * - * - Automate periodical switch: - * - * The debugfs file "switch_timer" receives a timer period as parameter - * (e.g. echo 100 > switch_timer) to activate the timer per channel. This can - * also be accessed through the internal API _before the trace session starts_. - * This timer will insure that we periodically have subbuffers to read, and - * therefore that the merge-sort does not wait endlessly for a subbuffer. - * - * - If a channel is switched and read without data, make sure it is still - * considered afterward (not removed from the queue). - * - * - Create a ascii/tracename/ALL file to merge-sort all active channels. - * - Create a ascii/tracename/README file to contain the text output legend. - * - Remove leading zeroes from timestamps. - * - Enhance pretty-printing to make sure all types used for addesses output in - * the form 0xAB00000000 (not decimal). This is true for %p and 0x%...X. - * - Hotplug support - */ - - - - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "ltt-tracer.h" -#include "ltt-relay.h" -#include "ltt-relay-lockless.h" - -#if 0 -#define DEBUGP printk -#else -#define DEBUGP(fmt , a...) -#endif - -struct dentry *ltt_ascii_dir_dentry; -EXPORT_SYMBOL_GPL(ltt_ascii_dir_dentry); - -struct ltt_relay_iter; - -struct ltt_relay_cpu_iter { - /* cpu buffer information */ - struct ltt_chanbuf *buf; - struct ltt_relay_iter *iter; - int sb_ref; /* holding a reference to a subbuffer */ - long read_sb_offset; /* offset of the subbuffer read */ - - /* current event information */ - struct ltt_subbuffer_header *header; - long hdr_offset; /* event header offset */ - long payload_offset; /* event payload offset */ - u64 tsc; /* full 64-bits timestamp value */ - u32 data_size; - u16 chID; /* channel ID, const */ - u16 eID; -}; - -struct ltt_relay_iter { - struct ltt_relay_cpu_iter iter_cpu[NR_CPUS]; - struct ltt_chan *chan; - loff_t pos; - int cpu; - int nr_refs; -}; - -/* - * offset of 0 in subbuffer means "subbuf size" (filled subbuffer). - */ -static int is_subbuffer_offset_end(struct ltt_relay_cpu_iter *citer, - long offset) -{ - struct ltt_chan *chan = container_of(citer->buf->a.chan, - struct ltt_chan, a); - long sub_offset = SUBBUF_OFFSET(offset - 1, chan) + 1; - - return (sub_offset <= citer->header->data_size); -} - -static u64 calculate_tsc(u64 pre_tsc, u64 read_tsc, unsigned int rflags) -{ - u64 new_tsc = read_tsc; - - if (rflags != LTT_RFLAG_ID_SIZE_TSC) { - BUG_ON(read_tsc >> LTT_TSC_BITS); - - new_tsc = (pre_tsc & ~LTT_TSC_MASK) + read_tsc; - if (read_tsc < (pre_tsc & LTT_TSC_MASK)) - new_tsc += 1UL << LTT_TSC_BITS; - } - - return new_tsc; -} - -/* - * calculate payload offset */ -static inline long calculate_payload_offset(long offset, u16 chID, u16 eID) -{ - const char *fmt; - - if (!ltt_get_alignment()) - return offset; - - fmt = marker_get_fmt_from_id(chID, eID); - BUG_ON(!fmt); - - return offset + ltt_fmt_largest_align(offset, fmt); -} - -static void update_new_event(struct ltt_relay_cpu_iter *citer, long hdr_offset) -{ - u64 read_tsc; - unsigned int rflags; - long tmp_offset; - - WARN_ON_ONCE(hdr_offset != citer->hdr_offset); - - tmp_offset = ltt_read_event_header(&citer->buf->a, hdr_offset, - &read_tsc, &citer->data_size, - &citer->eID, &rflags); - citer->payload_offset = calculate_payload_offset(tmp_offset, - citer->chID, - citer->eID); - - citer->tsc = calculate_tsc(citer->tsc, read_tsc, rflags); -} - -static void update_event_size(struct ltt_relay_cpu_iter *citer, long hdr_offset) -{ - char output[1]; - const char *fmt; - size_t data_size; - - if (citer->data_size != INT_MAX) - return; - - fmt = marker_get_fmt_from_id(citer->chID, citer->eID); - BUG_ON(!fmt); - ltt_serialize_printf(citer->buf, citer->payload_offset, - &data_size, output, 0, fmt); - citer->data_size = data_size; -} - -static void update_cpu_iter(struct ltt_relay_cpu_iter *citer, long hdr_offset) -{ - if (unlikely((!citer->sb_ref) - || is_subbuffer_offset_end(citer, hdr_offset))) { - citer->header = NULL; - return; - } - update_new_event(citer, hdr_offset); - update_event_size(citer, hdr_offset); -} - -/* - * returns 0 if we get a subbuffer reference. - * else, the buffer has not available data, try again later. - */ -static int subbuffer_start(struct ltt_relay_cpu_iter *citer, long *offset) -{ - int ret; - struct ltt_relay_iter *iter = citer->iter; - - ret = ltt_chanbuf_get_subbuf(citer->buf, offset); - if (!ret) { - citer->header = ltt_relay_read_offset_address(&citer->buf->a, - *offset); - citer->hdr_offset = (*offset) + ltt_sb_header_size(); - citer->tsc = citer->header->cycle_count_begin; - iter->nr_refs++; - citer->sb_ref = 1; - return 0; - } else { - if (ltt_chanbuf_is_finalized(citer->buf)) - return -ENODATA; - else - return -EAGAIN; - } -} - -static void subbuffer_stop(struct ltt_relay_cpu_iter *citer, - long offset) -{ - int ret; - struct ltt_relay_iter *iter = citer->iter; - - WARN_ON_ONCE(!citer->sb_ref); - ret = ltt_chanbuf_put_subbuf(citer->buf, offset); - WARN_ON_ONCE(ret); - citer->sb_ref = 0; - iter->nr_refs--; -} - -static void ltt_relay_advance_cpu_iter(struct ltt_relay_cpu_iter *citer) -{ - long old_offset = citer->payload_offset; - long new_offset = citer->payload_offset; - int ret; - - /* find that whether we read all data in this subbuffer */ - if (unlikely(is_subbuffer_offset_end(citer, - old_offset + citer->data_size))) { - DEBUGP(KERN_DEBUG "LTT ASCII stop cpu %d offset %lX\n", - citer->buf->a.cpu, citer->read_sb_offset); - subbuffer_stop(citer, citer->read_sb_offset); - for (;;) { - ret = subbuffer_start(citer, &citer->read_sb_offset); - DEBUGP(KERN_DEBUG - "LTT ASCII start cpu %d ret %d offset %lX\n", - citer->buf->a.cpu, ret, citer->read_sb_offset); - if (!ret || ret == -ENODATA) { - break; /* got data, or finalized */ - } else { /* -EAGAIN */ - if (signal_pending(current)) - break; - schedule_timeout_interruptible(1); - //TODO: check for no-data delay. take ref. break - } - } - } else { - new_offset += citer->data_size; - citer->hdr_offset = new_offset + ltt_align(new_offset, sizeof(struct ltt_event_header)); - DEBUGP(KERN_DEBUG - "LTT ASCII old_offset %lX new_offset %lX cpu %d\n", - old_offset, new_offset, citer->buf->a.cpu); - } - - update_cpu_iter(citer, citer->hdr_offset); -} - -static int cpu_iter_eof(struct ltt_relay_cpu_iter *citer) -{ - return !citer->sb_ref; -} - -static int ltt_relay_iter_eof(struct ltt_relay_iter *iter) -{ - return iter->nr_refs == 0; -} - -static void ltt_relay_advance_iter(struct ltt_relay_iter *iter) -{ - int i; - struct ltt_relay_cpu_iter *curr, *min = NULL; - iter->cpu = -1; - - /* - * find the event with the minimum tsc. - * TODO: use min-heep for 4096CPUS - */ - for_each_possible_cpu(i) { - curr = &iter->iter_cpu[i]; - - if (!curr->buf->a.allocated || !curr->header) - continue; - - if (cpu_iter_eof(curr)) - continue; - - if (!min || curr->tsc < min->tsc) { - min = curr; - iter->cpu = i; - } - } - - /* update cpu_iter for next ltt_relay_advance_iter() */ - if (min) - ltt_relay_advance_cpu_iter(min); -} - -static void *ascii_next(struct seq_file *m, void *v, loff_t *ppos) -{ - struct ltt_relay_iter *iter = m->private; - - WARN_ON_ONCE(!iter->nr_refs); - BUG_ON(v != iter); - - ltt_relay_advance_iter(iter); - return (ltt_relay_iter_eof(iter) || signal_pending(current)) - ? NULL : iter; -} - -static void *ascii_start(struct seq_file *m, loff_t *ppos) -{ - struct ltt_relay_iter *iter = m->private; - - ltt_relay_advance_iter(iter); - return (ltt_relay_iter_eof(iter) || signal_pending(current)) - ? NULL : iter; -} - -static void ascii_stop(struct seq_file *m, void *v) -{ -} - -static -int seq_serialize(struct seq_file *m, struct ltt_chanbuf *buf, - size_t buf_offset, const char *fmt, size_t *data_size) -{ - int len; - - if (m->count < m->size) { - len = ltt_serialize_printf(buf, buf_offset, data_size, - m->buf + m->count, - m->size - m->count, fmt); - if (m->count + len < m->size) { - m->count += len; - return 0; - } - } - - m->count = m->size; - return -1; -} - -static int ascii_show(struct seq_file *m, void *v) -{ - struct ltt_relay_iter *iter = v; - struct ltt_relay_cpu_iter *citer; - const char *name; - const char *fmt; - unsigned long long tsc; - size_t data_size; - - if (iter->cpu == -1) - return 0; - - citer = &iter->iter_cpu[iter->cpu]; - WARN_ON_ONCE(!citer->sb_ref); - /* - * Nothing to show, we are at the end of the last subbuffer currently - * having data. - */ - if (!citer->header) - return 0; - - tsc = citer->tsc; - name = marker_get_name_from_id(citer->chID, citer->eID); - fmt = marker_get_fmt_from_id(citer->chID, citer->eID); - - if (!name || !fmt) - return 0; - - seq_printf(m, "event:%16.16s: cpu:%2d time:%20.20llu ", - name, iter->cpu, tsc); - seq_serialize(m, citer->buf, citer->payload_offset, fmt, &data_size); - seq_puts(m, "\n"); - if (citer->data_size == INT_MAX) - citer->data_size = data_size; - - return 0; -} - -static struct seq_operations ascii_seq_ops = { - .start = ascii_start, - .next = ascii_next, - .stop = ascii_stop, - .show = ascii_show, -}; - -/* FIXME : cpu hotplug support */ -static int ltt_relay_iter_open_channel(struct ltt_relay_iter *iter, - struct ltt_chan *chan) -{ - int i, ret; - u16 chID = ltt_channels_get_index_from_name(chan->a.filename); - - /* we don't need lock relay_channels_mutex */ - for_each_possible_cpu(i) { - struct ltt_relay_cpu_iter *citer = &iter->iter_cpu[i]; - - citer->buf = per_cpu_ptr(chan->a.buf, i); - if (!citer->buf->a.allocated) - continue; - - citer->iter = iter; /* easy lazy parent info */ - citer->chID = chID; - - ret = ltt_chanbuf_open_read(citer->buf); - if (ret) { - /* Failed to open a percpu buffer, close everything. */ - citer->buf = NULL; - goto error; - } - - for (;;) { - ret = subbuffer_start(citer, - &citer->read_sb_offset); - DEBUGP(KERN_DEBUG - "LTT ASCII open start " - "cpu %d ret %d offset %lX\n", - citer->buf->a.cpu, ret, citer->read_sb_offset); - if (!ret || ret == -ENODATA) { - break; /* got data, or finalized */ - } else { /* -EAGAIN */ - if (signal_pending(current)) - break; - schedule_timeout_interruptible(1); - } - } - update_cpu_iter(citer, citer->hdr_offset); - } - if (!iter->nr_refs) { - ret = -ENODATA; - goto error; - } - - return 0; - -error: - for_each_possible_cpu(i) { - struct ltt_relay_cpu_iter *citer = &iter->iter_cpu[i]; - - if (!citer->buf) - break; - - if (citer->buf->a.allocated) - ltt_chanbuf_release_read(citer->buf); - } - return ret; -} - -/* FIXME : cpu hotplug support */ -static int ltt_relay_iter_release_channel(struct ltt_relay_iter *iter) -{ - int i; - - for_each_possible_cpu(i) { - struct ltt_relay_cpu_iter *citer = &iter->iter_cpu[i]; - - if (citer->sb_ref) { - WARN_ON_ONCE(!citer->buf->a.allocated); - DEBUGP(KERN_DEBUG - "LTT ASCII release stop cpu %d offset %lX\n", - citer->buf->a.cpu, citer->read_sb_offset); - subbuffer_stop(&iter->iter_cpu[i], - citer->read_sb_offset); - } - if (citer->buf->a.allocated) - ltt_chanbuf_release_read(citer->buf); - } - WARN_ON_ONCE(iter->nr_refs); - return 0; -} - -static int ltt_relay_ascii_open(struct inode *inode, struct file *file) -{ - int ret; - struct ltt_chan *chan = inode->i_private; - struct ltt_relay_iter *iter = kzalloc(sizeof(*iter), GFP_KERNEL); - if (!iter) - return -ENOMEM; - - iter->chan = chan; - ret = ltt_relay_iter_open_channel(iter, chan); - if (ret) - goto error_free_alloc; - - ret = seq_open(file, &ascii_seq_ops); - if (ret) - goto error_release_channel; - ((struct seq_file *)file->private_data)->private = iter; - return 0; - -error_release_channel: - ltt_relay_iter_release_channel(iter); -error_free_alloc: - kfree(iter); - return ret; -} - -static int ltt_relay_ascii_release(struct inode *inode, struct file *file) -{ - struct seq_file *seq = file->private_data; - struct ltt_relay_iter *iter = seq->private; - - ltt_relay_iter_release_channel(iter); - kfree(iter); - return 0; -} - -static struct file_operations ltt_ascii_fops = -{ - .read = seq_read, - .open = ltt_relay_ascii_open, - .release = ltt_relay_ascii_release, - .llseek = no_llseek, - .owner = THIS_MODULE, -}; - -int ltt_ascii_create(struct ltt_chan *chan) -{ - struct dentry *dentry; - - dentry = debugfs_create_file(chan->a.filename, - S_IRUSR | S_IRGRP, - chan->a.trace->dentry.ascii_root, - chan, <t_ascii_fops); - if (IS_ERR(dentry)) - return PTR_ERR(dentry); - - if (!dentry) - return -EEXIST; - - chan->a.ascii_dentry = dentry; - dentry->d_inode->i_private = chan; - return 0; -} -EXPORT_SYMBOL_GPL(ltt_ascii_create); - -void ltt_ascii_remove(struct ltt_chan *chan) -{ - struct dentry *dentry; - - dentry = dget(chan->a.ascii_dentry); - debugfs_remove(dentry); - /* TODO: wait / wakeup instead */ - /* - * Wait for every reference to the dentry to be gone, - * except us. - */ - while (atomic_read(&dentry->d_count) != 1) - msleep(100); - dput(dentry); -} -EXPORT_SYMBOL_GPL(ltt_ascii_remove); - -int ltt_ascii_create_dir(struct ltt_trace *new_trace) -{ - new_trace->dentry.ascii_root = debugfs_create_dir(new_trace->trace_name, - ltt_ascii_dir_dentry); - if (!new_trace->dentry.ascii_root) - return -EEXIST; - return 0; -} -EXPORT_SYMBOL_GPL(ltt_ascii_create_dir); - -void ltt_ascii_remove_dir(struct ltt_trace *trace) -{ - debugfs_remove(trace->dentry.ascii_root); -} -EXPORT_SYMBOL_GPL(ltt_ascii_remove_dir); - -__init int ltt_ascii_init(void) -{ - ltt_ascii_dir_dentry = debugfs_create_dir(LTT_ASCII, get_ltt_root()); - - return ltt_ascii_dir_dentry ? 0 : -EFAULT; -} - -__exit void ltt_ascii_exit(void) -{ - debugfs_remove(ltt_ascii_dir_dentry); - put_ltt_root(); -} - -MODULE_LICENSE("GPL and additional rights"); -MODULE_AUTHOR("Lai Jiangshan@FNST and Mathieu Desnoyers"); -MODULE_DESCRIPTION("Linux Trace Toolkit Next Generation Ascii Converter"); diff --git a/discard/ltt-channels.c b/discard/ltt-channels.c deleted file mode 100644 index 962c81a8..00000000 --- a/discard/ltt-channels.c +++ /dev/null @@ -1,397 +0,0 @@ -/* - * ltt/ltt-channels.c - * - * (C) Copyright 2008 - Mathieu Desnoyers (mathieu.desnoyers@polymtl.ca) - * - * LTTng channel management. - * - * Author: - * Mathieu Desnoyers (mathieu.desnoyers@polymtl.ca) - * - * Dual LGPL v2.1/GPL v2 license. - */ - -#include -#include -#include -#include -#include "ltt-channels.h" - -/* - * ltt_channel_mutex may be nested inside the LTT trace mutex. - * ltt_channel_mutex mutex may be nested inside markers mutex. - */ -static DEFINE_MUTEX(ltt_channel_mutex); -static LIST_HEAD(ltt_channels); -/* - * Index of next channel in array. Makes sure that as long as a trace channel is - * allocated, no array index will be re-used when a channel is freed and then - * another channel is allocated. This index is cleared and the array indexeds - * get reassigned when the index_kref goes back to 0, which indicates that no - * more trace channels are allocated. - */ -static unsigned int free_index; -/* index_kref is protected by both ltt_channel_mutex and lock_markers */ -static struct kref index_kref; /* Keeps track of allocated trace channels */ - -static struct ltt_channel_setting *lookup_channel(const char *name) -{ - struct ltt_channel_setting *iter; - - list_for_each_entry(iter, <t_channels, list) - if (strcmp(name, iter->name) == 0) - return iter; - return NULL; -} - -/* - * Must be called when channel refcount falls to 0 _and_ also when the last - * trace is freed. This function is responsible for compacting the channel and - * event IDs when no users are active. - * - * Called with lock_markers() and channels mutex held. - */ -static void release_channel_setting(struct kref *kref) -{ - struct ltt_channel_setting *setting = container_of(kref, - struct ltt_channel_setting, kref); - struct ltt_channel_setting *iter; - - if (atomic_read(&index_kref.refcount) == 0 - && atomic_read(&setting->kref.refcount) == 0) { - list_del(&setting->list); - kfree(setting); - - free_index = 0; - list_for_each_entry(iter, <t_channels, list) { - iter->index = free_index++; - iter->free_event_id = 0; - } - } -} - -/* - * Perform channel index compaction when the last trace channel is freed. - * - * Called with lock_markers() and channels mutex held. - */ -static void release_trace_channel(struct kref *kref) -{ - struct ltt_channel_setting *iter, *n; - - list_for_each_entry_safe(iter, n, <t_channels, list) - release_channel_setting(&iter->kref); - if (atomic_read(&index_kref.refcount) == 0) - markers_compact_event_ids(); -} - -/* - * ltt_channel_trace_ref : Is there an existing trace session ? - * - * Must be called with lock_markers() held. - */ -int ltt_channels_trace_ref(void) -{ - return !!atomic_read(&index_kref.refcount); -} -EXPORT_SYMBOL_GPL(ltt_channels_trace_ref); - -/** - * ltt_channels_register - Register a trace channel. - * @name: channel name - * - * Uses refcounting. - */ -int ltt_channels_register(const char *name) -{ - struct ltt_channel_setting *setting; - int ret = 0; - - mutex_lock(<t_channel_mutex); - setting = lookup_channel(name); - if (setting) { - if (atomic_read(&setting->kref.refcount) == 0) - goto init_kref; - else { - kref_get(&setting->kref); - goto end; - } - } - setting = kzalloc(sizeof(*setting), GFP_KERNEL); - if (!setting) { - ret = -ENOMEM; - goto end; - } - list_add(&setting->list, <t_channels); - strncpy(setting->name, name, PATH_MAX-1); - setting->index = free_index++; -init_kref: - kref_init(&setting->kref); -end: - mutex_unlock(<t_channel_mutex); - return ret; -} -EXPORT_SYMBOL_GPL(ltt_channels_register); - -/** - * ltt_channels_unregister - Unregister a trace channel. - * @name: channel name - * @compacting: performing compaction - * - * Must be called with markers mutex held. - */ -int ltt_channels_unregister(const char *name, int compacting) -{ - struct ltt_channel_setting *setting; - int ret = 0; - - if (!compacting) - mutex_lock(<t_channel_mutex); - setting = lookup_channel(name); - if (!setting || atomic_read(&setting->kref.refcount) == 0) { - ret = -ENOENT; - goto end; - } - kref_put(&setting->kref, release_channel_setting); - if (!compacting && atomic_read(&index_kref.refcount) == 0) - markers_compact_event_ids(); -end: - if (!compacting) - mutex_unlock(<t_channel_mutex); - return ret; -} -EXPORT_SYMBOL_GPL(ltt_channels_unregister); - -/** - * ltt_channels_set_default - Set channel default behavior. - * @name: default channel name - * @sb_size: size of the subbuffers - * @n_sb: number of subbuffers - */ -int ltt_channels_set_default(const char *name, - unsigned int sb_size, - unsigned int n_sb) -{ - struct ltt_channel_setting *setting; - int ret = 0; - - mutex_lock(<t_channel_mutex); - setting = lookup_channel(name); - if (!setting || atomic_read(&setting->kref.refcount) == 0) { - ret = -ENOENT; - goto end; - } - setting->sb_size = sb_size; - setting->n_sb = n_sb; -end: - mutex_unlock(<t_channel_mutex); - return ret; -} -EXPORT_SYMBOL_GPL(ltt_channels_set_default); - -/** - * ltt_channels_get_name_from_index - get channel name from channel index - * @index: channel index - * - * Allows to lookup the channel name given its index. Done to keep the name - * information outside of each trace channel instance. - */ -const char *ltt_channels_get_name_from_index(unsigned int index) -{ - struct ltt_channel_setting *iter; - - list_for_each_entry(iter, <t_channels, list) - if (iter->index == index && atomic_read(&iter->kref.refcount)) - return iter->name; - return NULL; -} -EXPORT_SYMBOL_GPL(ltt_channels_get_name_from_index); - -static struct ltt_channel_setting * -ltt_channels_get_setting_from_name(const char *name) -{ - struct ltt_channel_setting *iter; - - list_for_each_entry(iter, <t_channels, list) - if (!strcmp(iter->name, name) - && atomic_read(&iter->kref.refcount)) - return iter; - return NULL; -} - -/** - * ltt_channels_get_index_from_name - get channel index from channel name - * @name: channel name - * - * Allows to lookup the channel index given its name. Done to keep the name - * information outside of each trace channel instance. - * Returns -1 if not found. - */ -int ltt_channels_get_index_from_name(const char *name) -{ - struct ltt_channel_setting *setting; - - setting = ltt_channels_get_setting_from_name(name); - if (setting) - return setting->index; - else - return -1; -} -EXPORT_SYMBOL_GPL(ltt_channels_get_index_from_name); - -/** - * ltt_channels_trace_alloc - Allocate channel structures for a trace - * - * Use the current channel list to allocate the channels for a trace. - * Called with trace lock held. Does not perform the trace buffer allocation, - * because we must let the user overwrite specific channel sizes. - */ -int ltt_channels_trace_alloc(struct ltt_trace *trace, int overwrite) -{ - struct channel **chan = NULL; - struct ltt_channel_setting *chans, *iter; - int ret = 0; - - lock_markers(); - mutex_lock(<t_channel_mutex); - if (!free_index) - goto end; - if (!atomic_read(&index_kref.refcount)) - kref_init(&index_kref); - else - kref_get(&index_kref); - trace->nr_channels = free_index; - chan = kzalloc(sizeof(struct channel *) * free_index, GFP_KERNEL); - if (!chan) - goto end; - chans = kzalloc(sizeof(struct ltt_channel_setting) * free_index, - GFP_KERNEL); - if (!chan_settings) - goto free_chan; - list_for_each_entry(iter, <t_channels, list) { - if (!atomic_read(&iter->kref.refcount)) - continue; - chans[iter->index].sb_size = iter->sb_size; - chans[iter->index].n_sb = iter->n_sb; - chans[iter->index].overwrite = overwrite; - strncpy(chans[iter->index].filename, iter->name, - NAME_MAX - 1); - chans[iter->index].switch_timer_interval = 0; - chans[iter->index].read_timer_interval = LTT_READ_TIMER_INTERVAL; - } - trace->channels = chan; - trace->settings = chans; -end: - mutex_unlock(<t_channel_mutex); - unlock_markers(); - return ret; - -free_chan: - kfree(chan); - ret = -ENOMEM; - goto end; -} -EXPORT_SYMBOL_GPL(ltt_channels_trace_alloc); - -/** - * ltt_channels_trace_free - Free one trace's channels - * @channels: channels to free - * - * Called with trace lock held. The actual channel buffers must be freed before - * this function is called. - */ -void ltt_channels_trace_free(struct ltt_trace *trace) -{ - lock_markers(); - mutex_lock(<t_channel_mutex); - kfree(trace->settings); - kfree(trace->channels); - kref_put(&index_kref, release_trace_channel); - mutex_unlock(<t_channel_mutex); - unlock_markers(); - marker_update_probes(); -} -EXPORT_SYMBOL_GPL(ltt_channels_trace_free); - -/** - * ltt_channels_trace_set_timer - set switch timer - * @channel: channel - * @interval: interval of timer interrupt, in jiffies. 0 inhibits timer. - */ - -void ltt_channels_trace_set_timer(struct ltt_chan *chan, - unsigned long interval) -{ - chan->switch_timer_interval = interval; -} -EXPORT_SYMBOL_GPL(ltt_channels_trace_set_timer); - -/** - * _ltt_channels_get_event_id - get next event ID for a marker - * @channel: channel name - * @name: event name - * - * Returns a unique event ID (for this channel) or < 0 on error. - * Must be called with channels mutex held. - */ -int _ltt_channels_get_event_id(const char *channel, const char *name) -{ - struct ltt_channel_setting *setting; - int ret; - - setting = ltt_channels_get_setting_from_name(channel); - if (!setting) { - ret = -ENOENT; - goto end; - } - if (strcmp(channel, "metadata") == 0) { - if (strcmp(name, "core_marker_id") == 0) - ret = 0; - else if (strcmp(name, "core_marker_format") == 0) - ret = 1; - else - ret = -ENOENT; - goto end; - } - if (setting->free_event_id == EVENTS_PER_CHANNEL - 1) { - ret = -ENOSPC; - goto end; - } - ret = setting->free_event_id++; -end: - return ret; -} - -/** - * ltt_channels_get_event_id - get next event ID for a marker - * @channel: channel name - * @name: event name - * - * Returns a unique event ID (for this channel) or < 0 on error. - */ -int ltt_channels_get_event_id(const char *channel, const char *name) -{ - int ret; - - mutex_lock(<t_channel_mutex); - ret = _ltt_channels_get_event_id(channel, name); - mutex_unlock(<t_channel_mutex); - return ret; -} - -/** - * ltt_channels_reset_event_ids - reset event IDs at compaction - * - * Called with lock marker and channel mutex held. - */ -void _ltt_channels_reset_event_ids(void) -{ - struct ltt_channel_setting *iter; - - list_for_each_entry(iter, <t_channels, list) - iter->free_event_id = 0; -} - -MODULE_LICENSE("GPL and additional rights"); -MODULE_AUTHOR("Mathieu Desnoyers"); -MODULE_DESCRIPTION("Linux Trace Toolkit Next Generation Channel Management"); diff --git a/discard/ltt-channels.h b/discard/ltt-channels.h deleted file mode 100644 index 9eb604ba..00000000 --- a/discard/ltt-channels.h +++ /dev/null @@ -1,83 +0,0 @@ -#ifndef _LTT_CHANNELS_H -#define _LTT_CHANNELS_H - -/* - * Copyright (C) 2008 Mathieu Desnoyers (mathieu.desnoyers@polymtl.ca) - * - * Dynamic tracer channel allocation. - - * Dual LGPL v2.1/GPL v2 license. - */ - -#include -#include -#include -#include -#include - -#define EVENTS_PER_CHANNEL 65536 - -#define LTT_READ_TIMER_INTERVAL 10000 /* us */ - -/* - * Forward declaration of locking-specific per-cpu buffer structure. - */ -struct ltt_trace; -struct ltt_serialize_closure; -struct ltt_probe_private_data; - -/* Serialization callback '%k' */ -typedef size_t (*ltt_serialize_cb)(struct ltt_chanbuf *buf, size_t buf_offset, - struct ltt_serialize_closure *closure, - void *serialize_private, - unsigned int stack_pos_ctx, - int *largest_align, - const char *fmt, va_list *args); - -struct ltt_probe_private_data { - struct ltt_trace *trace; /* - * Target trace, for metadata - * or statedump. - */ - ltt_serialize_cb serializer; /* - * Serialization function override. - */ - void *serialize_private; /* - * Private data for serialization - * functions. - */ -}; - -struct ltt_channel_setting { - unsigned int sb_size; - unsigned int n_sb; - int overwrite; - unsigned long switch_timer_interval; - unsigned long read_timer_interval; - struct kref kref; /* Number of references to structure content */ - struct list_head list; - unsigned int index; /* index of channel in trace channel array */ - u16 free_event_id; /* Next event ID to allocate */ - char name[PATH_MAX]; -}; - -int ltt_channels_register(const char *name); -int ltt_channels_unregister(const char *name, int compacting); -int ltt_channels_set_default(const char *name, - unsigned int subbuf_size, - unsigned int subbuf_cnt); -const char *ltt_channels_get_name_from_index(unsigned int index); -int ltt_channels_get_index_from_name(const char *name); -int ltt_channels_trace_ref(void); -struct ltt_chan *ltt_channels_trace_alloc(unsigned int *nr_channels, - int overwrite, int active); -void ltt_channels_trace_free(struct ltt_chan *channels, - unsigned int nr_channels); -void ltt_channels_trace_set_timer(struct ltt_channel_setting *chan, - unsigned long interval); - -int _ltt_channels_get_event_id(const char *channel, const char *name); -int ltt_channels_get_event_id(const char *channel, const char *name); -void _ltt_channels_reset_event_ids(void); - -#endif /* _LTT_CHANNELS_H */ diff --git a/discard/ltt-filter.c b/discard/ltt-filter.c deleted file mode 100644 index ec113af6..00000000 --- a/discard/ltt-filter.c +++ /dev/null @@ -1,55 +0,0 @@ -/* - * Copyright (C) 2008 Mathieu Desnoyers - * - * Dual LGPL v2.1/GPL v2 license. - */ - -#include -#include -#include -#include - -#include "ltt-tracer.h" - -#define LTT_FILTER_DIR "filter" - -/* - * Protects the ltt_filter_dir allocation. - */ -static DEFINE_MUTEX(ltt_filter_mutex); - -static struct dentry *ltt_filter_dir; - -struct dentry *get_filter_root(void) -{ - struct dentry *ltt_root_dentry; - - mutex_lock(<t_filter_mutex); - if (!ltt_filter_dir) { - ltt_root_dentry = get_ltt_root(); - if (!ltt_root_dentry) - goto err_no_root; - - ltt_filter_dir = debugfs_create_dir(LTT_FILTER_DIR, - ltt_root_dentry); - if (!ltt_filter_dir) - printk(KERN_ERR - "ltt_filter_init: failed to create dir %s\n", - LTT_FILTER_DIR); - } -err_no_root: - mutex_unlock(<t_filter_mutex); - return ltt_filter_dir; -} -EXPORT_SYMBOL_GPL(get_filter_root); - -static void __exit ltt_filter_exit(void) -{ - debugfs_remove(ltt_filter_dir); -} - -module_exit(ltt_filter_exit); - -MODULE_LICENSE("GPL and additional rights"); -MODULE_AUTHOR("Mathieu Desnoyers "); -MODULE_DESCRIPTION("Linux Trace Toolkit Filter"); diff --git a/discard/ltt-kprobes.c b/discard/ltt-kprobes.c deleted file mode 100644 index 7539381b..00000000 --- a/discard/ltt-kprobes.c +++ /dev/null @@ -1,493 +0,0 @@ -/* - * (C) Copyright 2009 - - * Mathieu Desnoyers (mathieu.desnoyers@polymtl.ca) - * - * LTTng kprobes integration module. - * - * Dual LGPL v2.1/GPL v2 license. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "ltt-type-serializer.h" -#include "ltt-tracer.h" - -#define LTT_KPROBES_DIR "kprobes" -#define LTT_KPROBES_ENABLE "enable" -#define LTT_KPROBES_DISABLE "disable" -#define LTT_KPROBES_LIST "list" - -/* Active LTTng kprobes hash table */ -static DEFINE_MUTEX(ltt_kprobes_mutex); - -#define LTT_KPROBE_HASH_BITS 6 -#define LTT_KPROBE_TABLE_SIZE (1 << LTT_KPROBE_HASH_BITS) -static struct hlist_head ltt_kprobe_table[LTT_KPROBE_TABLE_SIZE]; - -struct kprobe_entry { - struct hlist_node hlist; - struct kprobe kp; - char key[0]; -}; - -static struct dentry *ltt_kprobes_dir, - *ltt_kprobes_enable_dentry, - *ltt_kprobes_disable_dentry, - *ltt_kprobes_list_dentry; - -static int module_exit; - - -static void trace_kprobe_table_entry(void *call_data, struct kprobe_entry *e) -{ - unsigned long addr; - char *namebuf = (char *)__get_free_page(GFP_KERNEL); - - if (e->kp.addr) { - sprint_symbol(namebuf, (unsigned long)e->kp.addr); - addr = (unsigned long)e->kp.addr; - } else { - strncpy(namebuf, e->kp.symbol_name, PAGE_SIZE - 1); - /* TODO : add offset */ - addr = kallsyms_lookup_name(namebuf); - } - if (addr) - __trace_mark(0, kprobe_state, kprobe_table, call_data, - "ip 0x%lX symbol %s", addr, namebuf); - free_page((unsigned long)namebuf); -} - -DEFINE_MARKER(kernel, kprobe, "ip %lX"); - -static int ltt_kprobe_handler_pre(struct kprobe *p, struct pt_regs *regs) -{ - struct marker *marker; - unsigned long data; - - data = (unsigned long)p->addr; - marker = &GET_MARKER(kernel, kprobe); - ltt_specialized_trace(marker, marker->single.probe_private, - &data, sizeof(data), sizeof(data)); - return 0; -} - -static int ltt_register_kprobe(const char *key) -{ - struct hlist_head *head; - struct hlist_node *node; - struct kprobe_entry *e = NULL; - char *symbol_name = NULL; - unsigned long addr; - unsigned int offset = 0; - u32 hash; - size_t key_len = strlen(key) + 1; - int ret; - - if (key_len == 1) - return -ENOENT; /* only \0 */ - - if (sscanf(key, "%li", &addr) != 1) - addr = 0; - - if (!addr) { - const char *symbol_end = NULL; - unsigned int symbol_len; /* includes final \0 */ - - symbol_end = strchr(key, ' '); - if (symbol_end) - symbol_len = symbol_end - key + 1; - else - symbol_len = key_len; - symbol_name = kmalloc(symbol_len, GFP_KERNEL); - if (!symbol_name) { - ret = -ENOMEM; - goto error; - } - memcpy(symbol_name, key, symbol_len - 1); - symbol_name[symbol_len-1] = '\0'; - if (symbol_end) { - symbol_end++; /* start of offset */ - if (sscanf(symbol_end, "%i", &offset) != 1) - offset = 0; - } - } - - hash = jhash(key, key_len-1, 0); - head = <t_kprobe_table[hash & ((1 << LTT_KPROBE_HASH_BITS)-1)]; - hlist_for_each_entry(e, node, head, hlist) { - if (!strcmp(key, e->key)) { - printk(KERN_NOTICE "Kprobe %s busy\n", key); - ret = -EBUSY; - goto error; - } - } - /* - * Using kzalloc here to allocate a variable length element. Could - * cause some memory fragmentation if overused. - */ - e = kzalloc(sizeof(struct kprobe_entry) + key_len, GFP_KERNEL); - if (!e) { - ret = -ENOMEM; - goto error; - } - memcpy(e->key, key, key_len); - hlist_add_head(&e->hlist, head); - e->kp.pre_handler = ltt_kprobe_handler_pre; - e->kp.symbol_name = symbol_name; - e->kp.offset = offset; - e->kp.addr = (void *)addr; - ret = register_kprobe(&e->kp); - if (ret < 0) - goto error_list_del; - trace_kprobe_table_entry(NULL, e); - return 0; - -error_list_del: - hlist_del(&e->hlist); -error: - kfree(symbol_name); - kfree(e); - return ret; -} - -static int ltt_unregister_kprobe(const char *key) -{ - struct hlist_head *head; - struct hlist_node *node; - struct kprobe_entry *e; - int found = 0; - size_t key_len = strlen(key) + 1; - u32 hash; - - hash = jhash(key, key_len-1, 0); - head = <t_kprobe_table[hash & ((1 << LTT_KPROBE_HASH_BITS)-1)]; - hlist_for_each_entry(e, node, head, hlist) { - if (!strcmp(key, e->key)) { - found = 1; - break; - } - } - if (!found) - return -ENOENT; - hlist_del(&e->hlist); - unregister_kprobe(&e->kp); - kfree(e->kp.symbol_name); - kfree(e); - return 0; -} - -static void ltt_unregister_all_kprobes(void) -{ - struct kprobe_entry *e; - struct hlist_head *head; - struct hlist_node *node, *tmp; - unsigned int i; - - for (i = 0; i < LTT_KPROBE_TABLE_SIZE; i++) { - head = <t_kprobe_table[i]; - hlist_for_each_entry_safe(e, node, tmp, head, hlist) { - hlist_del(&e->hlist); - unregister_kprobe(&e->kp); - kfree(e->kp.symbol_name); - kfree(e); - } - } -} - -/* - * Allows to specify either - * - symbol - * - symbol offset - * - address - */ -static ssize_t enable_op_write(struct file *file, - const char __user *user_buf, size_t count, loff_t *ppos) -{ - int err, buf_size; - char *end; - char *buf = (char *)__get_free_page(GFP_KERNEL); - - mutex_lock(<t_kprobes_mutex); - if (module_exit) { - err = -EPERM; - goto error; - } - - buf_size = min_t(size_t, count, PAGE_SIZE - 1); - err = copy_from_user(buf, user_buf, buf_size); - if (err) - goto error; - buf[buf_size] = '\0'; - end = strchr(buf, '\n'); - if (end) - *end = '\0'; - err = ltt_register_kprobe(buf); - if (err) - goto error; - - mutex_unlock(<t_kprobes_mutex); - free_page((unsigned long)buf); - return count; -error: - mutex_unlock(<t_kprobes_mutex); - free_page((unsigned long)buf); - return err; -} - -static const struct file_operations ltt_kprobes_enable = { - .write = enable_op_write, -}; - -static ssize_t disable_op_write(struct file *file, - const char __user *user_buf, size_t count, loff_t *ppos) -{ - int err, buf_size; - char *end; - char *buf = (char *)__get_free_page(GFP_KERNEL); - - mutex_lock(<t_kprobes_mutex); - if (module_exit) - goto end; - - buf_size = min_t(size_t, count, PAGE_SIZE - 1); - err = copy_from_user(buf, user_buf, buf_size); - if (err) - goto error; - buf[buf_size] = '\0'; - end = strchr(buf, '\n'); - if (end) - *end = '\0'; - err = ltt_unregister_kprobe(buf); - if (err) - goto error; -end: - mutex_unlock(<t_kprobes_mutex); - free_page((unsigned long)buf); - return count; -error: - mutex_unlock(<t_kprobes_mutex); - free_page((unsigned long)buf); - return err; -} - -static const struct file_operations ltt_kprobes_disable = { - .write = disable_op_write, -}; - -/* - * This seqfile read is not perfectly safe, as a kprobe could be removed from - * the hash table between two reads. This will result in an incomplete output. - */ -static struct kprobe_entry *ltt_find_next_kprobe(struct kprobe_entry *prev) -{ - struct kprobe_entry *e; - struct hlist_head *head; - struct hlist_node *node; - unsigned int i; - int found = 0; - - if (prev == (void *)-1UL) - return NULL; - - if (!prev) - found = 1; - - for (i = 0; i < LTT_KPROBE_TABLE_SIZE; i++) { - head = <t_kprobe_table[i]; - hlist_for_each_entry(e, node, head, hlist) { - if (found) - return e; - if (e == prev) - found = 1; - } - } - return NULL; -} - -static void *lk_next(struct seq_file *m, void *p, loff_t *pos) -{ - m->private = ltt_find_next_kprobe(m->private); - if (!m->private) { - m->private = (void *)-1UL; - return NULL; - } - return m->private; -} - -static void *lk_start(struct seq_file *m, loff_t *pos) -{ - mutex_lock(<t_kprobes_mutex); - if (!*pos) - m->private = NULL; - m->private = ltt_find_next_kprobe(m->private); - if (!m->private) { - m->private = (void *)-1UL; - return NULL; - } - return m->private; -} - -static void lk_stop(struct seq_file *m, void *p) -{ - mutex_unlock(<t_kprobes_mutex); -} - -static int lk_show(struct seq_file *m, void *p) -{ - struct kprobe_entry *e = m->private; - seq_printf(m, "%s\n", e->key); - return 0; -} - -static const struct seq_operations ltt_kprobes_list_op = { - .start = lk_start, - .next = lk_next, - .stop = lk_stop, - .show = lk_show, -}; - -static int ltt_kprobes_list_open(struct inode *inode, struct file *file) -{ - int ret; - - ret = seq_open(file, <t_kprobes_list_op); - if (ret == 0) - ((struct seq_file *)file->private_data)->private = NULL; - return ret; -} - -static int ltt_kprobes_list_release(struct inode *inode, struct file *file) -{ - struct seq_file *seq = file->private_data; - - seq->private = NULL; - return seq_release(inode, file); -} - -static const struct file_operations ltt_kprobes_list = { - .open = ltt_kprobes_list_open, - .read = seq_read, - .llseek = seq_lseek, - .release = ltt_kprobes_list_release, -}; - -/* - * kprobes table dump. Callback invoked by ltt-statedump. ltt-statedump must - * take a reference to this module before calling this callback. - */ -void ltt_dump_kprobes_table(void *call_data) -{ - struct kprobe_entry *e; - struct hlist_head *head; - struct hlist_node *node; - unsigned int i; - - for (i = 0; i < LTT_KPROBE_TABLE_SIZE; i++) { - head = <t_kprobe_table[i]; - hlist_for_each_entry(e, node, head, hlist) - trace_kprobe_table_entry(call_data, e); - } -} -EXPORT_SYMBOL_GPL(ltt_dump_kprobes_table); - -static int __init ltt_kprobes_init(void) -{ - struct dentry *ltt_root_dentry; - int ret = 0; - - printk(KERN_INFO "LTT : ltt-kprobes init\n"); - mutex_lock(<t_kprobes_mutex); - - ltt_root_dentry = get_ltt_root(); - if (!ltt_root_dentry) { - ret = -ENOENT; - goto err_no_root; - } - - ltt_kprobes_dir = debugfs_create_dir(LTT_KPROBES_DIR, ltt_root_dentry); - if (!ltt_kprobes_dir) { - printk(KERN_ERR - "ltt_kprobes_init: failed to create dir %s\n", - LTT_KPROBES_DIR); - ret = -ENOMEM; - goto err_no_dir; - } - - ltt_kprobes_enable_dentry = debugfs_create_file(LTT_KPROBES_ENABLE, - S_IWUSR, - ltt_kprobes_dir, NULL, - <t_kprobes_enable); - if (IS_ERR(ltt_kprobes_enable_dentry) || !ltt_kprobes_enable_dentry) { - printk(KERN_ERR - "ltt_kprobes_init: failed to create file %s\n", - LTT_KPROBES_ENABLE); - ret = -ENOMEM; - goto err_no_enable; - } - - ltt_kprobes_disable_dentry = debugfs_create_file(LTT_KPROBES_DISABLE, - S_IWUSR, - ltt_kprobes_dir, NULL, - <t_kprobes_disable); - if (IS_ERR(ltt_kprobes_disable_dentry) || !ltt_kprobes_disable_dentry) { - printk(KERN_ERR - "ltt_kprobes_init: failed to create file %s\n", - LTT_KPROBES_DISABLE); - ret = -ENOMEM; - goto err_no_disable; - } - - ltt_kprobes_list_dentry = debugfs_create_file(LTT_KPROBES_LIST, - S_IWUSR, ltt_kprobes_dir, - NULL, <t_kprobes_list); - if (IS_ERR(ltt_kprobes_list_dentry) || !ltt_kprobes_list_dentry) { - printk(KERN_ERR - "ltt_kprobes_init: failed to create file %s\n", - LTT_KPROBES_LIST); - ret = -ENOMEM; - goto err_no_list; - } - ltt_statedump_register_kprobes_dump(ltt_dump_kprobes_table); - - mutex_unlock(<t_kprobes_mutex); - return ret; - -err_no_list: - debugfs_remove(ltt_kprobes_disable_dentry); -err_no_disable: - debugfs_remove(ltt_kprobes_enable_dentry); -err_no_enable: - debugfs_remove(ltt_kprobes_dir); -err_no_dir: -err_no_root: - mutex_unlock(<t_kprobes_mutex); - return ret; -} -module_init(ltt_kprobes_init); - -static void __exit ltt_kprobes_exit(void) -{ - printk(KERN_INFO "LTT : ltt-kprobes exit\n"); - mutex_lock(<t_kprobes_mutex); - module_exit = 1; - ltt_statedump_unregister_kprobes_dump(ltt_dump_kprobes_table); - debugfs_remove(ltt_kprobes_list_dentry); - debugfs_remove(ltt_kprobes_disable_dentry); - debugfs_remove(ltt_kprobes_enable_dentry); - debugfs_remove(ltt_kprobes_dir); - ltt_unregister_all_kprobes(); - mutex_unlock(<t_kprobes_mutex); -} -module_exit(ltt_kprobes_exit); - -MODULE_LICENSE("GPL and additional rights"); -MODULE_AUTHOR("Mathieu Desnoyers"); -MODULE_DESCRIPTION("Linux Trace Toolkit Kprobes Support"); diff --git a/discard/ltt-marker-control.c b/discard/ltt-marker-control.c deleted file mode 100644 index 2db5c4e9..00000000 --- a/discard/ltt-marker-control.c +++ /dev/null @@ -1,253 +0,0 @@ -/* - * Copyright (C) 2007 Mathieu Desnoyers - * - * Dual LGPL v2.1/GPL v2 license. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "ltt-tracer.h" - -#define DEFAULT_CHANNEL "cpu" -#define DEFAULT_PROBE "default" - -LIST_HEAD(probes_list); - -/* - * Mutex protecting the probe slab cache. - * Nests inside the traces mutex. - */ -DEFINE_MUTEX(probes_mutex); - -struct ltt_available_probe default_probe = { - .name = "default", - .format = NULL, - .probe_func = ltt_vtrace, - .callbacks[0] = ltt_serialize_data, -}; - -static struct kmem_cache *markers_loaded_cachep; -static LIST_HEAD(markers_loaded_list); -/* - * List sorted by name strcmp order. - */ -static LIST_HEAD(probes_registered_list); - -static struct ltt_available_probe *get_probe_from_name(const char *pname) -{ - struct ltt_available_probe *iter; - int comparison, found = 0; - - if (!pname) - pname = DEFAULT_PROBE; - list_for_each_entry(iter, &probes_registered_list, node) { - comparison = strcmp(pname, iter->name); - if (!comparison) - found = 1; - if (comparison <= 0) - break; - } - if (found) - return iter; - else - return NULL; -} - -int ltt_probe_register(struct ltt_available_probe *pdata) -{ - int ret = 0; - int comparison; - struct ltt_available_probe *iter; - - mutex_lock(&probes_mutex); - list_for_each_entry_reverse(iter, &probes_registered_list, node) { - comparison = strcmp(pdata->name, iter->name); - if (!comparison) { - ret = -EBUSY; - goto end; - } else if (comparison > 0) { - /* We belong to the location right after iter. */ - list_add(&pdata->node, &iter->node); - goto end; - } - } - /* Should be added at the head of the list */ - list_add(&pdata->node, &probes_registered_list); -end: - mutex_unlock(&probes_mutex); - return ret; -} -EXPORT_SYMBOL_GPL(ltt_probe_register); - -/* - * Called when a probe does not want to be called anymore. - */ -int ltt_probe_unregister(struct ltt_available_probe *pdata) -{ - int ret = 0; - struct ltt_active_marker *amark, *tmp; - - mutex_lock(&probes_mutex); - list_for_each_entry_safe(amark, tmp, &markers_loaded_list, node) { - if (amark->probe == pdata) { - ret = marker_probe_unregister_private_data( - pdata->probe_func, amark); - if (ret) - goto end; - list_del(&amark->node); - kmem_cache_free(markers_loaded_cachep, amark); - } - } - list_del(&pdata->node); -end: - mutex_unlock(&probes_mutex); - return ret; -} -EXPORT_SYMBOL_GPL(ltt_probe_unregister); - -/* - * Connect marker "mname" to probe "pname". - * Only allow _only_ probe instance to be connected to a marker. - */ -int ltt_marker_connect(const char *channel, const char *mname, - const char *pname) -{ - int ret; - struct ltt_active_marker *pdata; - struct ltt_available_probe *probe; - - ltt_lock_traces(); - mutex_lock(&probes_mutex); - probe = get_probe_from_name(pname); - if (!probe) { - ret = -ENOENT; - goto end; - } - pdata = marker_get_private_data(channel, mname, probe->probe_func, 0); - if (pdata && !IS_ERR(pdata)) { - ret = -EEXIST; - goto end; - } - pdata = kmem_cache_zalloc(markers_loaded_cachep, GFP_KERNEL); - if (!pdata) { - ret = -ENOMEM; - goto end; - } - pdata->probe = probe; - /* - * ID has priority over channel in case of conflict. - */ - ret = marker_probe_register(channel, mname, NULL, - probe->probe_func, pdata); - if (ret) - kmem_cache_free(markers_loaded_cachep, pdata); - else - list_add(&pdata->node, &markers_loaded_list); -end: - mutex_unlock(&probes_mutex); - ltt_unlock_traces(); - return ret; -} -EXPORT_SYMBOL_GPL(ltt_marker_connect); - -/* - * Disconnect marker "mname", probe "pname". - */ -int ltt_marker_disconnect(const char *channel, const char *mname, - const char *pname) -{ - struct ltt_active_marker *pdata; - struct ltt_available_probe *probe; - int ret = 0; - - mutex_lock(&probes_mutex); - probe = get_probe_from_name(pname); - if (!probe) { - ret = -ENOENT; - goto end; - } - pdata = marker_get_private_data(channel, mname, probe->probe_func, 0); - if (IS_ERR(pdata)) { - ret = PTR_ERR(pdata); - goto end; - } else if (!pdata) { - /* - * Not registered by us. - */ - ret = -EPERM; - goto end; - } - ret = marker_probe_unregister(channel, mname, probe->probe_func, pdata); - if (ret) - goto end; - else { - list_del(&pdata->node); - kmem_cache_free(markers_loaded_cachep, pdata); - } -end: - mutex_unlock(&probes_mutex); - return ret; -} -EXPORT_SYMBOL_GPL(ltt_marker_disconnect); - -static void disconnect_all_markers(void) -{ - struct ltt_active_marker *pdata, *tmp; - - list_for_each_entry_safe(pdata, tmp, &markers_loaded_list, node) { - marker_probe_unregister_private_data(pdata->probe->probe_func, - pdata); - list_del(&pdata->node); - kmem_cache_free(markers_loaded_cachep, pdata); - } -} - -static int __init marker_control_init(void) -{ - int ret; - - markers_loaded_cachep = KMEM_CACHE(ltt_active_marker, 0); - - ret = ltt_probe_register(&default_probe); - BUG_ON(ret); - ret = ltt_marker_connect("metadata", "core_marker_format", - DEFAULT_PROBE); - BUG_ON(ret); - ret = ltt_marker_connect("metadata", "core_marker_id", DEFAULT_PROBE); - BUG_ON(ret); - - return 0; -} -module_init(marker_control_init); - -static void __exit marker_control_exit(void) -{ - int ret; - - ret = ltt_marker_disconnect("metadata", "core_marker_format", - DEFAULT_PROBE); - BUG_ON(ret); - ret = ltt_marker_disconnect("metadata", "core_marker_id", - DEFAULT_PROBE); - BUG_ON(ret); - ret = ltt_probe_unregister(&default_probe); - BUG_ON(ret); - disconnect_all_markers(); - kmem_cache_destroy(markers_loaded_cachep); - marker_synchronize_unregister(); -} -module_exit(marker_control_exit); - -MODULE_LICENSE("GPL and additional rights"); -MODULE_AUTHOR("Mathieu Desnoyers"); -MODULE_DESCRIPTION("Linux Trace Toolkit Marker Control"); diff --git a/discard/ltt-serialize.c b/discard/ltt-serialize.c deleted file mode 100644 index 50d7132c..00000000 --- a/discard/ltt-serialize.c +++ /dev/null @@ -1,968 +0,0 @@ -/* - * LTTng serializing code. - * - * Copyright Mathieu Desnoyers, March 2007. - * - * Dual LGPL v2.1/GPL v2 license. - * - * See this discussion about weirdness about passing va_list and then va_list to - * functions. (related to array argument passing). va_list seems to be - * implemented as an array on x86_64, but not on i386... This is why we pass a - * va_list * to ltt_vtrace. - */ - -#include -#include -#include -#include - -#include "ltt-tracer.h" - -enum ltt_type { - LTT_TYPE_SIGNED_INT, - LTT_TYPE_UNSIGNED_INT, - LTT_TYPE_STRING, - LTT_TYPE_NONE, -}; - -#define LTT_ATTRIBUTE_NETWORK_BYTE_ORDER (1<<1) - -/* - * Stack used to keep track of string length at size calculation, passed to - * string copy to handle racy input string updates. - * Can be used by any context; this is ensured by putting the stack position - * back to its original position after using it. - */ -#define TRACER_STACK_LEN (PAGE_SIZE / sizeof(unsigned long)) -static DEFINE_PER_CPU(unsigned long [TRACER_STACK_LEN], - tracer_stack); -static DEFINE_PER_CPU(unsigned int, tracer_stack_pos); - -/* - * Inspired from vsnprintf - * - * The serialization format string supports the basic printf format strings. - * In addition, it defines new formats that can be used to serialize more - * complex/non portable data structures. - * - * Typical use: - * - * field_name %ctype - * field_name #tracetype %ctype - * field_name #tracetype %ctype1 %ctype2 ... - * - * A conversion is performed between format string types supported by GCC and - * the trace type requested. GCC type is used to perform type checking on format - * strings. Trace type is used to specify the exact binary representation - * in the trace. A mapping is done between one or more GCC types to one trace - * type. Sign extension, if required by the conversion, is performed following - * the trace type. - * - * If a gcc format is not declared with a trace format, the gcc format is - * also used as binary representation in the trace. - * - * Strings are supported with %s. - * A single tracetype (sequence) can take multiple c types as parameter. - * - * c types: - * - * see printf(3). - * - * Note: to write a uint32_t in a trace, the following expression is recommended - * si it can be portable: - * - * ("#4u%lu", (unsigned long)var) - * - * trace types: - * - * Serialization specific formats : - * - * Fixed size integers - * #1u writes uint8_t - * #2u writes uint16_t - * #4u writes uint32_t - * #8u writes uint64_t - * #1d writes int8_t - * #2d writes int16_t - * #4d writes int32_t - * #8d writes int64_t - * i.e.: - * #1u%lu #2u%lu #4d%lu #8d%lu #llu%hu #d%lu - * - * * Attributes: - * - * n: (for network byte order) - * #ntracetype%ctype - * is written in the trace in network byte order. - * - * i.e.: #bn4u%lu, #n%lu, #b%u - * - * TODO (eventually) - * Variable length sequence - * #a #tracetype1 #tracetype2 %array_ptr %elem_size %num_elems - * In the trace: - * #a specifies that this is a sequence - * #tracetype1 is the type of elements in the sequence - * #tracetype2 is the type of the element count - * GCC input: - * array_ptr is a pointer to an array that contains members of size - * elem_size. - * num_elems is the number of elements in the array. - * i.e.: #a #lu #lu %p %lu %u - * - * Callback - * #k callback (taken from the probe data) - * The following % arguments are exepected by the callback - * - * i.e.: #a #lu #lu #k %p - * - * Note: No conversion is done from floats to integers, nor from integers to - * floats between c types and trace types. float conversion from double to float - * or from float to double is also not supported. - * - * REMOVE - * %*b expects sizeof(data), data - * where sizeof(data) is 1, 2, 4 or 8 - * - * Fixed length struct, union or array. - * FIXME: unable to extract those sizes statically. - * %*r expects sizeof(*ptr), ptr - * %*.*r expects sizeof(*ptr), __alignof__(*ptr), ptr - * struct and unions removed. - * Fixed length array: - * [%p]#a[len #tracetype] - * i.e.: [%p]#a[12 #lu] - * - * Variable length sequence - * %*.*:*v expects sizeof(*ptr), __alignof__(*ptr), elem_num, ptr - * where elem_num is the number of elements in the sequence - */ -static inline -const char *parse_trace_type(const char *fmt, char *trace_size, - enum ltt_type *trace_type, - unsigned long *attributes) -{ - int qualifier; /* 'h', 'l', or 'L' for integer fields */ - /* 'z' support added 23/7/1999 S.H. */ - /* 'z' changed to 'Z' --davidm 1/25/99 */ - /* 't' added for ptrdiff_t */ - - /* parse attributes. */ -repeat: - switch (*fmt) { - case 'n': - *attributes |= LTT_ATTRIBUTE_NETWORK_BYTE_ORDER; - ++fmt; - goto repeat; - } - - /* get the conversion qualifier */ - qualifier = -1; - if (*fmt == 'h' || *fmt == 'l' || *fmt == 'L' || - *fmt == 'Z' || *fmt == 'z' || *fmt == 't' || - *fmt == 'S' || *fmt == '1' || *fmt == '2' || - *fmt == '4' || *fmt == 8) { - qualifier = *fmt; - ++fmt; - if (qualifier == 'l' && *fmt == 'l') { - qualifier = 'L'; - ++fmt; - } - } - - switch (*fmt) { - case 'c': - *trace_type = LTT_TYPE_UNSIGNED_INT; - *trace_size = sizeof(unsigned char); - goto parse_end; - case 's': - *trace_type = LTT_TYPE_STRING; - goto parse_end; - case 'p': - *trace_type = LTT_TYPE_UNSIGNED_INT; - *trace_size = sizeof(void *); - goto parse_end; - case 'd': - case 'i': - *trace_type = LTT_TYPE_SIGNED_INT; - break; - case 'o': - case 'u': - case 'x': - case 'X': - *trace_type = LTT_TYPE_UNSIGNED_INT; - break; - default: - if (!*fmt) - --fmt; - goto parse_end; - } - switch (qualifier) { - case 'L': - *trace_size = sizeof(long long); - break; - case 'l': - *trace_size = sizeof(long); - break; - case 'Z': - case 'z': - *trace_size = sizeof(size_t); - break; - case 't': - *trace_size = sizeof(ptrdiff_t); - break; - case 'h': - *trace_size = sizeof(short); - break; - case '1': - *trace_size = sizeof(uint8_t); - break; - case '2': - *trace_size = sizeof(uint16_t); - break; - case '4': - *trace_size = sizeof(uint32_t); - break; - case '8': - *trace_size = sizeof(uint64_t); - break; - default: - *trace_size = sizeof(int); - } - -parse_end: - return fmt; -} - -/* - * Restrictions: - * Field width and precision are *not* supported. - * %n not supported. - */ -static inline -const char *parse_c_type(const char *fmt, char *c_size, enum ltt_type *c_type, - char *outfmt) -{ - int qualifier; /* 'h', 'l', or 'L' for integer fields */ - /* 'z' support added 23/7/1999 S.H. */ - /* 'z' changed to 'Z' --davidm 1/25/99 */ - /* 't' added for ptrdiff_t */ - - /* process flags : ignore standard print formats for now. */ -repeat: - switch (*fmt) { - case '-': - case '+': - case ' ': - case '#': - case '0': - ++fmt; - goto repeat; - } - - /* get the conversion qualifier */ - qualifier = -1; - if (*fmt == 'h' || *fmt == 'l' || *fmt == 'L' || - *fmt == 'Z' || *fmt == 'z' || *fmt == 't' || - *fmt == 'S') { - qualifier = *fmt; - ++fmt; - if (qualifier == 'l' && *fmt == 'l') { - qualifier = 'L'; - ++fmt; - } - } - - if (outfmt) { - if (qualifier != -1) - *outfmt++ = (char)qualifier; - *outfmt++ = *fmt; - *outfmt = 0; - } - - switch (*fmt) { - case 'c': - *c_type = LTT_TYPE_UNSIGNED_INT; - *c_size = sizeof(unsigned char); - goto parse_end; - case 's': - *c_type = LTT_TYPE_STRING; - goto parse_end; - case 'p': - *c_type = LTT_TYPE_UNSIGNED_INT; - *c_size = sizeof(void *); - goto parse_end; - case 'd': - case 'i': - *c_type = LTT_TYPE_SIGNED_INT; - break; - case 'o': - case 'u': - case 'x': - case 'X': - *c_type = LTT_TYPE_UNSIGNED_INT; - break; - default: - if (!*fmt) - --fmt; - goto parse_end; - } - switch (qualifier) { - case 'L': - *c_size = sizeof(long long); - break; - case 'l': - *c_size = sizeof(long); - break; - case 'Z': - case 'z': - *c_size = sizeof(size_t); - break; - case 't': - *c_size = sizeof(ptrdiff_t); - break; - case 'h': - *c_size = sizeof(short); - break; - default: - *c_size = sizeof(int); - } - -parse_end: - return fmt; -} - -static inline -size_t serialize_trace_data(struct ltt_chanbuf *buf, size_t buf_offset, - char trace_size, enum ltt_type trace_type, - char c_size, enum ltt_type c_type, - unsigned int *stack_pos_ctx, - int *largest_align, - va_list *args) -{ - union { - unsigned long v_ulong; - uint64_t v_uint64; - struct { - const char *s; - size_t len; - } v_string; - } tmp; - - /* - * Be careful about sign extension here. - * Sign extension is done with the destination (trace) type. - */ - switch (trace_type) { - case LTT_TYPE_SIGNED_INT: - switch (c_size) { - case 1: - tmp.v_ulong = (long)(int8_t)va_arg(*args, int); - break; - case 2: - tmp.v_ulong = (long)(int16_t)va_arg(*args, int); - break; - case 4: - tmp.v_ulong = (long)(int32_t)va_arg(*args, int); - break; - case 8: - tmp.v_uint64 = va_arg(*args, int64_t); - break; - default: - BUG(); - } - break; - case LTT_TYPE_UNSIGNED_INT: - switch (c_size) { - case 1: - tmp.v_ulong = (unsigned long)(uint8_t)va_arg(*args, unsigned int); - break; - case 2: - tmp.v_ulong = (unsigned long)(uint16_t)va_arg(*args, unsigned int); - break; - case 4: - tmp.v_ulong = (unsigned long)(uint32_t)va_arg(*args, unsigned int); - break; - case 8: - tmp.v_uint64 = va_arg(*args, uint64_t); - break; - default: - BUG(); - } - break; - case LTT_TYPE_STRING: - tmp.v_string.s = va_arg(*args, const char *); - if ((unsigned long)tmp.v_string.s < PAGE_SIZE) - tmp.v_string.s = ""; - if (!buf) { - /* - * Reserve tracer stack entry. - */ - __get_cpu_var(tracer_stack_pos)++; - WARN_ON_ONCE(__get_cpu_var(tracer_stack_pos) - > TRACER_STACK_LEN); - barrier(); - __get_cpu_var(tracer_stack)[*stack_pos_ctx] = - strlen(tmp.v_string.s) + 1; - } - tmp.v_string.len = __get_cpu_var(tracer_stack) - [(*stack_pos_ctx)++]; - if (buf) - ltt_relay_strncpy(&buf->a, buf->a.chan, buf_offset, - tmp.v_string.s, tmp.v_string.len); - buf_offset += tmp.v_string.len; - goto copydone; - default: - BUG(); - } - - /* - * If trace_size is lower or equal to 4 bytes, there is no sign - * extension to do because we are already encoded in a long. Therefore, - * we can combine signed and unsigned ops. 4 bytes float also works - * with this, because we do a simple copy of 4 bytes into 4 bytes - * without manipulation (and we do not support conversion from integers - * to floats). - * It is also the case if c_size is 8 bytes, which is the largest - * possible integer. - */ - if (ltt_get_alignment()) { - buf_offset += ltt_align(buf_offset, trace_size); - if (largest_align) - *largest_align = max_t(int, *largest_align, trace_size); - } - if (trace_size <= 4 || c_size == 8) { - if (buf) { - switch (trace_size) { - case 1: - if (c_size == 8) - ltt_relay_write(&buf->a, buf->a.chan, - buf_offset, - (uint8_t[]){ (uint8_t)tmp.v_uint64 }, - sizeof(uint8_t)); - else - ltt_relay_write(&buf->a, buf->a.chan, - buf_offset, - (uint8_t[]){ (uint8_t)tmp.v_ulong }, - sizeof(uint8_t)); - break; - case 2: - if (c_size == 8) - ltt_relay_write(&buf->a, buf->a.chan, - buf_offset, - (uint16_t[]){ (uint16_t)tmp.v_uint64 }, - sizeof(uint16_t)); - else - ltt_relay_write(&buf->a, buf->a.chan, - buf_offset, - (uint16_t[]){ (uint16_t)tmp.v_ulong }, - sizeof(uint16_t)); - break; - case 4: - if (c_size == 8) - ltt_relay_write(&buf->a, buf->a.chan, - buf_offset, - (uint32_t[]){ (uint32_t)tmp.v_uint64 }, - sizeof(uint32_t)); - else - ltt_relay_write(&buf->a, buf->a.chan, - buf_offset, - (uint32_t[]){ (uint32_t)tmp.v_ulong }, - sizeof(uint32_t)); - break; - case 8: - /* - * c_size cannot be other than 8 here because - * trace_size > 4. - */ - ltt_relay_write(&buf->a, buf->a.chan, buf_offset, - (uint64_t[]){ (uint64_t)tmp.v_uint64 }, - sizeof(uint64_t)); - break; - default: - BUG(); - } - } - buf_offset += trace_size; - goto copydone; - } else { - /* - * Perform sign extension. - */ - if (buf) { - switch (trace_type) { - case LTT_TYPE_SIGNED_INT: - ltt_relay_write(&buf->a, buf->a.chan, buf_offset, - (int64_t[]){ (int64_t)tmp.v_ulong }, - sizeof(int64_t)); - break; - case LTT_TYPE_UNSIGNED_INT: - ltt_relay_write(&buf->a, buf->a.chan, buf_offset, - (uint64_t[]){ (uint64_t)tmp.v_ulong }, - sizeof(uint64_t)); - break; - default: - BUG(); - } - } - buf_offset += trace_size; - goto copydone; - } - -copydone: - return buf_offset; -} - -notrace size_t -ltt_serialize_data(struct ltt_chanbuf *buf, size_t buf_offset, - struct ltt_serialize_closure *closure, - void *serialize_private, unsigned int stack_pos_ctx, - int *largest_align, const char *fmt, va_list *args) -{ - char trace_size = 0, c_size = 0; /* - * 0 (unset), 1, 2, 4, 8 bytes. - */ - enum ltt_type trace_type = LTT_TYPE_NONE, c_type = LTT_TYPE_NONE; - unsigned long attributes = 0; - - for (; *fmt ; ++fmt) { - switch (*fmt) { - case '#': - /* tracetypes (#) */ - ++fmt; /* skip first '#' */ - if (*fmt == '#') /* Escaped ## */ - break; - attributes = 0; - fmt = parse_trace_type(fmt, &trace_size, &trace_type, - &attributes); - break; - case '%': - /* c types (%) */ - ++fmt; /* skip first '%' */ - if (*fmt == '%') /* Escaped %% */ - break; - fmt = parse_c_type(fmt, &c_size, &c_type, NULL); - /* - * Output c types if no trace types has been - * specified. - */ - if (!trace_size) - trace_size = c_size; - if (trace_type == LTT_TYPE_NONE) - trace_type = c_type; - if (c_type == LTT_TYPE_STRING) - trace_type = LTT_TYPE_STRING; - /* perform trace write */ - buf_offset = serialize_trace_data(buf, buf_offset, - trace_size, - trace_type, c_size, - c_type, - &stack_pos_ctx, - largest_align, - args); - trace_size = 0; - c_size = 0; - trace_type = LTT_TYPE_NONE; - c_size = LTT_TYPE_NONE; - attributes = 0; - break; - /* default is to skip the text, doing nothing */ - } - } - return buf_offset; -} -EXPORT_SYMBOL_GPL(ltt_serialize_data); - -static inline -uint64_t unserialize_base_type(struct ltt_chanbuf *buf, - size_t *ppos, char trace_size, - enum ltt_type trace_type) -{ - uint64_t tmp; - - *ppos += ltt_align(*ppos, trace_size); - ltt_relay_read(&buf->a, *ppos, &tmp, trace_size); - *ppos += trace_size; - - switch (trace_type) { - case LTT_TYPE_SIGNED_INT: - switch (trace_size) { - case 1: - return (uint64_t)*(int8_t *)&tmp; - case 2: - return (uint64_t)*(int16_t *)&tmp; - case 4: - return (uint64_t)*(int32_t *)&tmp; - case 8: - return tmp; - } - break; - case LTT_TYPE_UNSIGNED_INT: - switch (trace_size) { - case 1: - return (uint64_t)*(uint8_t *)&tmp; - case 2: - return (uint64_t)*(uint16_t *)&tmp; - case 4: - return (uint64_t)*(uint32_t *)&tmp; - case 8: - return tmp; - } - break; - default: - break; - } - - BUG(); - return 0; -} - -static -int serialize_printf_data(struct ltt_chanbuf *buf, size_t *ppos, - char trace_size, enum ltt_type trace_type, - char c_size, enum ltt_type c_type, char *output, - ssize_t outlen, const char *outfmt) -{ - u64 value; - outlen = outlen < 0 ? 0 : outlen; - - if (trace_type == LTT_TYPE_STRING) { - size_t len = ltt_relay_read_cstr(&buf->a, *ppos, output, - outlen); - *ppos += len + 1; - return len; - } - - value = unserialize_base_type(buf, ppos, trace_size, trace_type); - - if (c_size == 8) - return snprintf(output, outlen, outfmt, value); - else - return snprintf(output, outlen, outfmt, (unsigned int)value); -} - -/** - * ltt_serialize_printf - Format a string and place it in a buffer - * @buf: The ltt-relay buffer that store binary data - * @buf_offset: binary data's offset in @buf (should be masked to use as offset) - * @msg_size: return message's length - * @output: The buffer to place the result into - * @outlen: The size of the buffer, including the trailing '\0' - * @fmt: The format string to use - * - * The return value is the number of characters which would - * be generated for the given input, excluding the trailing - * '\0', as per ISO C99. If the return is greater than or equal to @outlen, - * the resulting string is truncated. - */ -size_t ltt_serialize_printf(struct ltt_chanbuf *buf, unsigned long buf_offset, - size_t *msg_size, char *output, size_t outlen, - const char *fmt) -{ - char trace_size = 0, c_size = 0; /* - * 0 (unset), 1, 2, 4, 8 bytes. - */ - enum ltt_type trace_type = LTT_TYPE_NONE, c_type = LTT_TYPE_NONE; - unsigned long attributes = 0; - char outfmt[4] = "%"; - size_t outpos = 0; - size_t len; - size_t msgpos = buf_offset; - - for (; *fmt ; ++fmt) { - switch (*fmt) { - case '#': - /* tracetypes (#) */ - ++fmt; /* skip first '#' */ - if (*fmt == '#') { /* Escaped ## */ - if (outpos < outlen) - output[outpos] = '#'; - outpos++; - break; - } - attributes = 0; - fmt = parse_trace_type(fmt, &trace_size, &trace_type, - &attributes); - break; - case '%': - /* c types (%) */ - ++fmt; /* skip first '%' */ - if (*fmt == '%') { /* Escaped %% */ - if (outpos < outlen) - output[outpos] = '%'; - outpos++; - break; - } - fmt = parse_c_type(fmt, &c_size, &c_type, outfmt + 1); - /* - * Output c types if no trace types has been - * specified. - */ - if (!trace_size) - trace_size = c_size; - if (trace_type == LTT_TYPE_NONE) - trace_type = c_type; - if (c_type == LTT_TYPE_STRING) - trace_type = LTT_TYPE_STRING; - - /* perform trace printf */ - len = serialize_printf_data(buf, &msgpos, trace_size, - trace_type, c_size, c_type, - output + outpos, - outlen - outpos, outfmt); - outpos += len; - trace_size = 0; - c_size = 0; - trace_type = LTT_TYPE_NONE; - c_size = LTT_TYPE_NONE; - attributes = 0; - break; - default: - if (outpos < outlen) - output[outpos] = *fmt; - outpos++; - break; - } - } - if (msg_size) - *msg_size = (size_t)(msgpos - buf_offset); - /* - * Make sure we end output with terminating \0 when truncated. - */ - if (outpos >= outlen + 1) - output[outlen] = '\0'; - return outpos; -} -EXPORT_SYMBOL_GPL(ltt_serialize_printf); - -#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS - -unsigned int ltt_fmt_largest_align(size_t align_drift, const char *fmt) -{ - char trace_size = 0, c_size = 0; - enum ltt_type trace_type = LTT_TYPE_NONE, c_type = LTT_TYPE_NONE; - unsigned long attributes = 0; - int largest_align = 1; - - for (; *fmt ; ++fmt) { - switch (*fmt) { - case '#': - /* tracetypes (#) */ - ++fmt; /* skip first '#' */ - if (*fmt == '#') /* Escaped ## */ - break; - attributes = 0; - fmt = parse_trace_type(fmt, &trace_size, &trace_type, - &attributes); - - largest_align = max_t(int, largest_align, trace_size); - if (largest_align >= ltt_get_alignment()) - goto exit; - break; - case '%': - /* c types (%) */ - ++fmt; /* skip first '%' */ - if (*fmt == '%') /* Escaped %% */ - break; - fmt = parse_c_type(fmt, &c_size, &c_type, NULL); - /* - * Output c types if no trace types has been - * specified. - */ - if (!trace_size) - trace_size = c_size; - if (trace_type == LTT_TYPE_NONE) - trace_type = c_type; - if (c_type == LTT_TYPE_STRING) - trace_type = LTT_TYPE_STRING; - - largest_align = max_t(int, largest_align, trace_size); - if (largest_align >= ltt_get_alignment()) - goto exit; - - trace_size = 0; - c_size = 0; - trace_type = LTT_TYPE_NONE; - c_size = LTT_TYPE_NONE; - break; - } - } - -exit: - largest_align = min_t(int, largest_align, ltt_get_alignment()); - return (largest_align - align_drift) & (largest_align - 1); -} -EXPORT_SYMBOL_GPL(ltt_fmt_largest_align); - -#endif - -/* - * Calculate data size - * Assume that the padding for alignment starts at a sizeof(void *) address. - */ -static notrace -size_t ltt_get_data_size(struct ltt_serialize_closure *closure, - void *serialize_private, unsigned int stack_pos_ctx, - int *largest_align, const char *fmt, va_list *args) -{ - ltt_serialize_cb cb = closure->callbacks[0]; - closure->cb_idx = 0; - return (size_t)cb(NULL, 0, closure, serialize_private, stack_pos_ctx, - largest_align, fmt, args); -} - -static notrace -void ltt_write_event_data(struct ltt_chanbuf *buf, size_t buf_offset, - struct ltt_serialize_closure *closure, - void *serialize_private, unsigned int stack_pos_ctx, - int largest_align, const char *fmt, va_list *args) -{ - ltt_serialize_cb cb = closure->callbacks[0]; - closure->cb_idx = 0; - buf_offset += ltt_align(buf_offset, largest_align); - cb(buf, buf_offset, closure, serialize_private, stack_pos_ctx, NULL, - fmt, args); -} - - -notrace -void ltt_vtrace(const struct marker *mdata, void *probe_data, void *call_data, - const char *fmt, va_list *args) -{ - int largest_align, ret; - struct ltt_active_marker *pdata; - uint16_t eID; - size_t data_size, slot_size; - unsigned int chan_index; - struct ltt_chanbuf *buf; - struct ltt_chan *chan; - struct ltt_trace *trace, *dest_trace = NULL; - uint64_t tsc; - long buf_offset; - va_list args_copy; - struct ltt_serialize_closure closure; - struct ltt_probe_private_data *private_data = call_data; - void *serialize_private = NULL; - int cpu; - unsigned int rflags; - unsigned int stack_pos_ctx; - - /* - * This test is useful for quickly exiting static tracing when no trace - * is active. We expect to have an active trace when we get here. - */ - if (unlikely(ltt_traces.num_active_traces == 0)) - return; - - rcu_read_lock_sched_notrace(); - cpu = smp_processor_id(); - __get_cpu_var(ltt_nesting)++; - stack_pos_ctx = __get_cpu_var(tracer_stack_pos); - /* - * asm volatile and "memory" clobber prevent the compiler from moving - * instructions out of the ltt nesting count. This is required to ensure - * that probe side-effects which can cause recursion (e.g. unforeseen - * traps, divisions by 0, ...) are triggered within the incremented - * nesting count section. - */ - barrier(); - pdata = (struct ltt_active_marker *)probe_data; - eID = mdata->event_id; - chan_index = mdata->channel_id; - closure.callbacks = pdata->probe->callbacks; - - if (unlikely(private_data)) { - dest_trace = private_data->trace; - if (private_data->serializer) - closure.callbacks = &private_data->serializer; - serialize_private = private_data->serialize_private; - } - - va_copy(args_copy, *args); - /* - * Assumes event payload to start on largest_align alignment. - */ - largest_align = 1; /* must be non-zero for ltt_align */ - data_size = ltt_get_data_size(&closure, serialize_private, - stack_pos_ctx, &largest_align, - fmt, &args_copy); - largest_align = min_t(int, largest_align, sizeof(void *)); - va_end(args_copy); - - /* Iterate on each trace */ - list_for_each_entry_rcu(trace, <t_traces.head, list) { - /* - * Expect the filter to filter out events. If we get here, - * we went through tracepoint activation as a first step. - */ - if (unlikely(dest_trace && trace != dest_trace)) - continue; - if (unlikely(!trace->active)) - continue; - if (unlikely(!ltt_run_filter(trace, eID))) - continue; -#ifdef LTT_DEBUG_EVENT_SIZE - rflags = LTT_RFLAG_ID_SIZE; -#else - if (unlikely(eID >= LTT_FREE_EVENTS)) - rflags = LTT_RFLAG_ID; - else - rflags = 0; -#endif - /* - * Skip channels added after trace creation. - */ - if (unlikely(chan_index >= trace->nr_channels)) - continue; - chan = &trace->channels[chan_index]; - if (!chan->active) - continue; - - /* reserve space : header and data */ - ret = ltt_reserve_slot(chan, trace, data_size, largest_align, - cpu, &buf, &slot_size, &buf_offset, - &tsc, &rflags); - if (unlikely(ret < 0)) - continue; /* buffer full */ - - va_copy(args_copy, *args); - /* Out-of-order write : header and data */ - buf_offset = ltt_write_event_header(&buf->a, &chan->a, - buf_offset, eID, data_size, - tsc, rflags); - ltt_write_event_data(buf, buf_offset, &closure, - serialize_private, stack_pos_ctx, - largest_align, fmt, &args_copy); - va_end(args_copy); - /* Out-of-order commit */ - ltt_commit_slot(buf, chan, buf_offset, data_size, slot_size); - } - /* - * asm volatile and "memory" clobber prevent the compiler from moving - * instructions out of the ltt nesting count. This is required to ensure - * that probe side-effects which can cause recursion (e.g. unforeseen - * traps, divisions by 0, ...) are triggered within the incremented - * nesting count section. - */ - barrier(); - __get_cpu_var(tracer_stack_pos) = stack_pos_ctx; - __get_cpu_var(ltt_nesting)--; - rcu_read_unlock_sched_notrace(); -} -EXPORT_SYMBOL_GPL(ltt_vtrace); - -notrace -void ltt_trace(const struct marker *mdata, void *probe_data, void *call_data, - const char *fmt, ...) -{ - va_list args; - - va_start(args, fmt); - ltt_vtrace(mdata, probe_data, call_data, fmt, &args); - va_end(args); -} -EXPORT_SYMBOL_GPL(ltt_trace); - -MODULE_LICENSE("GPL and additional rights"); -MODULE_AUTHOR("Mathieu Desnoyers"); -MODULE_DESCRIPTION("Linux Trace Toolkit Next Generation Serializer"); diff --git a/discard/ltt-statedump.c b/discard/ltt-statedump.c deleted file mode 100644 index 06ade69a..00000000 --- a/discard/ltt-statedump.c +++ /dev/null @@ -1,441 +0,0 @@ -/* - * Linux Trace Toolkit Kernel State Dump - * - * Copyright 2005 - - * Jean-Hugues Deschenes - * - * Changes: - * Eric Clement: Add listing of network IP interface - * 2006, 2007 Mathieu Desnoyers Fix kernel threads - * Various updates - * - * Dual LGPL v2.1/GPL v2 license. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "ltt-tracer.h" - -#ifdef CONFIG_GENERIC_HARDIRQS -#include -#endif - -#define NB_PROC_CHUNK 20 - -/* - * Protected by the trace lock. - */ -static struct delayed_work cpu_work[NR_CPUS]; -static DECLARE_WAIT_QUEUE_HEAD(statedump_wq); -static atomic_t kernel_threads_to_run; - -static void empty_cb(void *call_data) -{ -} - -static DEFINE_MUTEX(statedump_cb_mutex); -static void (*ltt_dump_kprobes_table_cb)(void *call_data) = empty_cb; - -enum lttng_thread_type { - LTTNG_USER_THREAD = 0, - LTTNG_KERNEL_THREAD = 1, -}; - -enum lttng_execution_mode { - LTTNG_USER_MODE = 0, - LTTNG_SYSCALL = 1, - LTTNG_TRAP = 2, - LTTNG_IRQ = 3, - LTTNG_SOFTIRQ = 4, - LTTNG_MODE_UNKNOWN = 5, -}; - -enum lttng_execution_submode { - LTTNG_NONE = 0, - LTTNG_UNKNOWN = 1, -}; - -enum lttng_process_status { - LTTNG_UNNAMED = 0, - LTTNG_WAIT_FORK = 1, - LTTNG_WAIT_CPU = 2, - LTTNG_EXIT = 3, - LTTNG_ZOMBIE = 4, - LTTNG_WAIT = 5, - LTTNG_RUN = 6, - LTTNG_DEAD = 7, -}; - -#ifdef CONFIG_INET -static void ltt_enumerate_device(struct ltt_probe_private_data *call_data, - struct net_device *dev) -{ - struct in_device *in_dev; - struct in_ifaddr *ifa; - - if (dev->flags & IFF_UP) { - in_dev = in_dev_get(dev); - if (in_dev) { - for (ifa = in_dev->ifa_list; ifa != NULL; - ifa = ifa->ifa_next) - __trace_mark(0, netif_state, - network_ipv4_interface, - call_data, - "name %s address #n4u%lu up %d", - dev->name, - (unsigned long)ifa->ifa_address, - 0); - in_dev_put(in_dev); - } - } else - __trace_mark(0, netif_state, network_ip_interface, - call_data, "name %s address #n4u%lu up %d", - dev->name, 0UL, 0); -} - -static inline int -ltt_enumerate_network_ip_interface(struct ltt_probe_private_data *call_data) -{ - struct net_device *dev; - - read_lock(&dev_base_lock); - for_each_netdev(&init_net, dev) - ltt_enumerate_device(call_data, dev); - read_unlock(&dev_base_lock); - - return 0; -} -#else /* CONFIG_INET */ -static inline int -ltt_enumerate_network_ip_interface(struct ltt_probe_private_data *call_data) -{ - return 0; -} -#endif /* CONFIG_INET */ - - -static inline void -ltt_enumerate_task_fd(struct ltt_probe_private_data *call_data, - struct task_struct *t, char *tmp) -{ - struct fdtable *fdt; - struct file *filp; - unsigned int i; - const unsigned char *path; - - if (!t->files) - return; - - spin_lock(&t->files->file_lock); - fdt = files_fdtable(t->files); - for (i = 0; i < fdt->max_fds; i++) { - filp = fcheck_files(t->files, i); - if (!filp) - continue; - path = d_path(&filp->f_path, tmp, PAGE_SIZE); - /* Make sure we give at least some info */ - __trace_mark(0, fd_state, file_descriptor, call_data, - "filename %s pid %d fd %u", - (IS_ERR(path))?(filp->f_dentry->d_name.name):(path), - t->pid, i); - } - spin_unlock(&t->files->file_lock); -} - -static inline int -ltt_enumerate_file_descriptors(struct ltt_probe_private_data *call_data) -{ - struct task_struct *t = &init_task; - char *tmp = (char *)__get_free_page(GFP_KERNEL); - - /* Enumerate active file descriptors */ - do { - read_lock(&tasklist_lock); - if (t != &init_task) - atomic_dec(&t->usage); - t = next_task(t); - atomic_inc(&t->usage); - read_unlock(&tasklist_lock); - task_lock(t); - ltt_enumerate_task_fd(call_data, t, tmp); - task_unlock(t); - } while (t != &init_task); - free_page((unsigned long)tmp); - return 0; -} - -static inline void -ltt_enumerate_task_vm_maps(struct ltt_probe_private_data *call_data, - struct task_struct *t) -{ - struct mm_struct *mm; - struct vm_area_struct *map; - unsigned long ino; - - /* get_task_mm does a task_lock... */ - mm = get_task_mm(t); - if (!mm) - return; - - map = mm->mmap; - if (map) { - down_read(&mm->mmap_sem); - while (map) { - if (map->vm_file) - ino = map->vm_file->f_dentry->d_inode->i_ino; - else - ino = 0; - __trace_mark(0, vm_state, vm_map, call_data, - "pid %d start %lu end %lu flags %lu " - "pgoff %lu inode %lu", - t->pid, map->vm_start, map->vm_end, - map->vm_flags, map->vm_pgoff << PAGE_SHIFT, - ino); - map = map->vm_next; - } - up_read(&mm->mmap_sem); - } - mmput(mm); -} - -static inline int -ltt_enumerate_vm_maps(struct ltt_probe_private_data *call_data) -{ - struct task_struct *t = &init_task; - - do { - read_lock(&tasklist_lock); - if (t != &init_task) - atomic_dec(&t->usage); - t = next_task(t); - atomic_inc(&t->usage); - read_unlock(&tasklist_lock); - ltt_enumerate_task_vm_maps(call_data, t); - } while (t != &init_task); - return 0; -} - -#ifdef CONFIG_GENERIC_HARDIRQS -static inline void list_interrupts(struct ltt_probe_private_data *call_data) -{ - unsigned int irq; - unsigned long flags = 0; - struct irq_desc *desc; - - /* needs irq_desc */ - for_each_irq_desc(irq, desc) { - struct irqaction *action; - const char *irq_chip_name = - desc->chip->name ? : "unnamed_irq_chip"; - - local_irq_save(flags); - raw_spin_lock(&desc->lock); - for (action = desc->action; action; action = action->next) - __trace_mark(0, irq_state, interrupt, call_data, - "name %s action %s irq_id %u", - irq_chip_name, action->name, irq); - raw_spin_unlock(&desc->lock); - local_irq_restore(flags); - } -} -#else -static inline void list_interrupts(struct ltt_probe_private_data *call_data) -{ -} -#endif - -static inline int -ltt_enumerate_process_states(struct ltt_probe_private_data *call_data) -{ - struct task_struct *t = &init_task; - struct task_struct *p = t; - enum lttng_process_status status; - enum lttng_thread_type type; - enum lttng_execution_mode mode; - enum lttng_execution_submode submode; - - do { - mode = LTTNG_MODE_UNKNOWN; - submode = LTTNG_UNKNOWN; - - read_lock(&tasklist_lock); - if (t != &init_task) { - atomic_dec(&t->usage); - t = next_thread(t); - } - if (t == p) { - p = next_task(t); - t = p; - } - atomic_inc(&t->usage); - read_unlock(&tasklist_lock); - - task_lock(t); - - if (t->exit_state == EXIT_ZOMBIE) - status = LTTNG_ZOMBIE; - else if (t->exit_state == EXIT_DEAD) - status = LTTNG_DEAD; - else if (t->state == TASK_RUNNING) { - /* Is this a forked child that has not run yet? */ - if (list_empty(&t->rt.run_list)) - status = LTTNG_WAIT_FORK; - else - /* - * All tasks are considered as wait_cpu; - * the viewer will sort out if the task was - * really running at this time. - */ - status = LTTNG_WAIT_CPU; - } else if (t->state & - (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)) { - /* Task is waiting for something to complete */ - status = LTTNG_WAIT; - } else - status = LTTNG_UNNAMED; - submode = LTTNG_NONE; - - /* - * Verification of t->mm is to filter out kernel threads; - * Viewer will further filter out if a user-space thread was - * in syscall mode or not. - */ - if (t->mm) - type = LTTNG_USER_THREAD; - else - type = LTTNG_KERNEL_THREAD; - - __trace_mark(0, task_state, process_state, call_data, - "pid %d parent_pid %d name %s type %d mode %d " - "submode %d status %d tgid %d", - t->pid, t->parent->pid, t->comm, - type, mode, submode, status, t->tgid); - task_unlock(t); - } while (t != &init_task); - - return 0; -} - -void ltt_statedump_register_kprobes_dump(void (*callback)(void *call_data)) -{ - mutex_lock(&statedump_cb_mutex); - ltt_dump_kprobes_table_cb = callback; - mutex_unlock(&statedump_cb_mutex); -} -EXPORT_SYMBOL_GPL(ltt_statedump_register_kprobes_dump); - -void ltt_statedump_unregister_kprobes_dump(void (*callback)(void *call_data)) -{ - mutex_lock(&statedump_cb_mutex); - ltt_dump_kprobes_table_cb = empty_cb; - mutex_unlock(&statedump_cb_mutex); -} -EXPORT_SYMBOL_GPL(ltt_statedump_unregister_kprobes_dump); - -void ltt_statedump_work_func(struct work_struct *work) -{ - if (atomic_dec_and_test(&kernel_threads_to_run)) - /* If we are the last thread, wake up do_ltt_statedump */ - wake_up(&statedump_wq); -} - -static int do_ltt_statedump(struct ltt_probe_private_data *call_data) -{ - int cpu; - struct module *cb_owner; - - printk(KERN_DEBUG "LTT state dump thread start\n"); - ltt_enumerate_process_states(call_data); - ltt_enumerate_file_descriptors(call_data); - list_modules(call_data); - ltt_enumerate_vm_maps(call_data); - list_interrupts(call_data); - ltt_enumerate_network_ip_interface(call_data); - ltt_dump_swap_files(call_data); - ltt_dump_sys_call_table(call_data); - ltt_dump_softirq_vec(call_data); - ltt_dump_idt_table(call_data); - - mutex_lock(&statedump_cb_mutex); - - cb_owner = __module_address((unsigned long)ltt_dump_kprobes_table_cb); - __module_get(cb_owner); - ltt_dump_kprobes_table_cb(call_data); - module_put(cb_owner); - - mutex_unlock(&statedump_cb_mutex); - - /* - * Fire off a work queue on each CPU. Their sole purpose in life - * is to guarantee that each CPU has been in a state where is was in - * syscall mode (i.e. not in a trap, an IRQ or a soft IRQ). - */ - get_online_cpus(); - atomic_set(&kernel_threads_to_run, num_online_cpus()); - for_each_online_cpu(cpu) { - INIT_DELAYED_WORK(&cpu_work[cpu], ltt_statedump_work_func); - schedule_delayed_work_on(cpu, &cpu_work[cpu], 0); - } - /* Wait for all threads to run */ - __wait_event(statedump_wq, (atomic_read(&kernel_threads_to_run) != 0)); - put_online_cpus(); - /* Our work is done */ - printk(KERN_DEBUG "LTT state dump end\n"); - __trace_mark(0, global_state, statedump_end, - call_data, MARK_NOARGS); - return 0; -} - -/* - * Called with trace lock held. - */ -int ltt_statedump_start(struct ltt_trace *trace) -{ - struct ltt_probe_private_data call_data; - printk(KERN_DEBUG "LTT state dump begin\n"); - - call_data.trace = trace; - call_data.serializer = NULL; - return do_ltt_statedump(&call_data); -} - -static int __init statedump_init(void) -{ - int ret; - printk(KERN_DEBUG "LTT : State dump init\n"); - ret = ltt_module_register(LTT_FUNCTION_STATEDUMP, - ltt_statedump_start, THIS_MODULE); - return ret; -} - -static void __exit statedump_exit(void) -{ - printk(KERN_DEBUG "LTT : State dump exit\n"); - ltt_module_unregister(LTT_FUNCTION_STATEDUMP); -} - -module_init(statedump_init) -module_exit(statedump_exit) - -MODULE_LICENSE("GPL and additional rights"); -MODULE_AUTHOR("Jean-Hugues Deschenes"); -MODULE_DESCRIPTION("Linux Trace Toolkit Statedump"); diff --git a/discard/ltt-trace-control.c b/discard/ltt-trace-control.c deleted file mode 100644 index 0a02549d..00000000 --- a/discard/ltt-trace-control.c +++ /dev/null @@ -1,1426 +0,0 @@ -/* - * LTT trace control module over debugfs. - * - * Copyright 2008 - Zhaolei - * - * Copyright 2009 - Gui Jianfeng - * Make mark-control work in debugfs - * - * Dual LGPL v2.1/GPL v2 license. - */ - -/* - * Todo: - * Impl read operations for control file to read attributes - * Create a README file in ltt control dir, for display help info - */ - -#include -#include -#include -#include -#include -#include -#include - -#include "ltt-tracer.h" - -#define LTT_CONTROL_DIR "control" -#define MARKERS_CONTROL_DIR "markers" -#define LTT_SETUP_TRACE_FILE "setup_trace" -#define LTT_DESTROY_TRACE_FILE "destroy_trace" - -#define LTT_WRITE_MAXLEN (128) - -struct dentry *ltt_control_dir, *ltt_setup_trace_file, *ltt_destroy_trace_file, - *markers_control_dir; - -/* - * the traces_lock nests inside control_lock. - * control_lock protects the consistency of directories presented in ltt - * directory. - */ -static DEFINE_MUTEX(control_lock); - -/* - * big note about locking for marker control files : - * If a marker control file is added/removed manually racing with module - * load/unload, there may be warning messages appearing, but those two - * operations should be able to execute concurrently without any lock - * synchronizing their operation one wrt another. - * Locking the marker mutex, module mutex and also keeping a mutex here - * from mkdir/rmdir _and_ from the notifier called from module load/unload makes - * life miserable and just asks for deadlocks. - */ - -/* - * lookup a file/dir in parent dir. - * only designed to work well for debugfs. - * (although it maybe ok for other fs) - * - * return: - * file/dir's dentry on success - * NULL on failure - */ -static struct dentry *dir_lookup(struct dentry *parent, const char *name) -{ - struct qstr q; - struct dentry *d; - - q.name = name; - q.len = strlen(name); - q.hash = full_name_hash(q.name, q.len); - - d = d_lookup(parent, &q); - if (d) - dput(d); - - return d; -} - - -static ssize_t alloc_write(struct file *file, const char __user *user_buf, - size_t count, loff_t *ppos) -{ - int err = 0; - int buf_size; - char *buf = (char *)__get_free_page(GFP_KERNEL); - char *cmd = (char *)__get_free_page(GFP_KERNEL); - - buf_size = min_t(size_t, count, PAGE_SIZE - 1); - err = copy_from_user(buf, user_buf, buf_size); - if (err) - goto err_copy_from_user; - buf[buf_size] = 0; - - if (sscanf(buf, "%s", cmd) != 1) { - err = -EPERM; - goto err_get_cmd; - } - - if ((cmd[0] != 'Y' && cmd[0] != 'y' && cmd[0] != '1') || cmd[1]) { - err = -EPERM; - goto err_bad_cmd; - } - - err = ltt_trace_alloc(file->f_dentry->d_parent->d_name.name); - if (IS_ERR_VALUE(err)) { - printk(KERN_ERR "alloc_write: ltt_trace_alloc failed: %d\n", - err); - goto err_alloc_trace; - } - - free_page((unsigned long)buf); - free_page((unsigned long)cmd); - return count; - -err_alloc_trace: -err_bad_cmd: -err_get_cmd: -err_copy_from_user: - free_page((unsigned long)buf); - free_page((unsigned long)cmd); - return err; -} - -static const struct file_operations ltt_alloc_operations = { - .write = alloc_write, -}; - - -static ssize_t enabled_write(struct file *file, const char __user *user_buf, - size_t count, loff_t *ppos) -{ - int err = 0; - int buf_size; - char *buf = (char *)__get_free_page(GFP_KERNEL); - char *cmd = (char *)__get_free_page(GFP_KERNEL); - - buf_size = min_t(size_t, count, PAGE_SIZE - 1); - err = copy_from_user(buf, user_buf, buf_size); - if (err) - goto err_copy_from_user; - buf[buf_size] = 0; - - if (sscanf(buf, "%s", cmd) != 1) { - err = -EPERM; - goto err_get_cmd; - } - - if (cmd[1]) { - err = -EPERM; - goto err_bad_cmd; - } - - switch (cmd[0]) { - case 'Y': - case 'y': - case '1': - err = ltt_trace_start(file->f_dentry->d_parent->d_name.name); - if (IS_ERR_VALUE(err)) { - printk(KERN_ERR - "enabled_write: ltt_trace_start failed: %d\n", - err); - err = -EPERM; - goto err_start_trace; - } - break; - case 'N': - case 'n': - case '0': - err = ltt_trace_stop(file->f_dentry->d_parent->d_name.name); - if (IS_ERR_VALUE(err)) { - printk(KERN_ERR - "enabled_write: ltt_trace_stop failed: %d\n", - err); - err = -EPERM; - goto err_stop_trace; - } - break; - default: - err = -EPERM; - goto err_bad_cmd; - } - - free_page((unsigned long)buf); - free_page((unsigned long)cmd); - return count; - -err_stop_trace: -err_start_trace: -err_bad_cmd: -err_get_cmd: -err_copy_from_user: - free_page((unsigned long)buf); - free_page((unsigned long)cmd); - return err; -} - -static const struct file_operations ltt_enabled_operations = { - .write = enabled_write, -}; - - -static ssize_t trans_write(struct file *file, const char __user *user_buf, - size_t count, loff_t *ppos) -{ - char *buf = (char *)__get_free_page(GFP_KERNEL); - char *trans_name = (char *)__get_free_page(GFP_KERNEL); - int err = 0; - int buf_size; - - buf_size = min_t(size_t, count, PAGE_SIZE - 1); - err = copy_from_user(buf, user_buf, buf_size); - if (err) - goto err_copy_from_user; - buf[buf_size] = 0; - - if (sscanf(buf, "%s", trans_name) != 1) { - err = -EPERM; - goto err_get_transname; - } - - err = ltt_trace_set_type(file->f_dentry->d_parent->d_name.name, - trans_name); - if (IS_ERR_VALUE(err)) { - printk(KERN_ERR "trans_write: ltt_trace_set_type failed: %d\n", - err); - goto err_set_trans; - } - - free_page((unsigned long)buf); - free_page((unsigned long)trans_name); - return count; - -err_set_trans: -err_get_transname: -err_copy_from_user: - free_page((unsigned long)buf); - free_page((unsigned long)trans_name); - return err; -} - -static const struct file_operations ltt_trans_operations = { - .write = trans_write, -}; - - -static ssize_t channel_subbuf_num_write(struct file *file, - const char __user *user_buf, size_t count, loff_t *ppos) -{ - int err = 0; - int buf_size; - unsigned int num; - const char *channel_name; - const char *trace_name; - char *buf = (char *)__get_free_page(GFP_KERNEL); - - buf_size = min_t(size_t, count, PAGE_SIZE - 1); - err = copy_from_user(buf, user_buf, buf_size); - if (err) - goto err_copy_from_user; - buf[buf_size] = 0; - - if (sscanf(buf, "%u", &num) != 1) { - err = -EPERM; - goto err_get_number; - } - - channel_name = file->f_dentry->d_parent->d_name.name; - trace_name = file->f_dentry->d_parent->d_parent->d_parent->d_name.name; - - err = ltt_trace_set_channel_subbufcount(trace_name, channel_name, num); - if (IS_ERR_VALUE(err)) { - printk(KERN_ERR "channel_subbuf_num_write: " - "ltt_trace_set_channel_subbufcount failed: %d\n", err); - goto err_set_subbufcount; - } - - free_page((unsigned long)buf); - return count; - -err_set_subbufcount: -err_get_number: -err_copy_from_user: - free_page((unsigned long)buf); - return err; -} - -static const struct file_operations ltt_channel_subbuf_num_operations = { - .write = channel_subbuf_num_write, -}; - - -static -ssize_t channel_subbuf_size_write(struct file *file, - const char __user *user_buf, - size_t count, loff_t *ppos) -{ - int err = 0; - int buf_size; - unsigned int num; - const char *channel_name; - const char *trace_name; - char *buf = (char *)__get_free_page(GFP_KERNEL); - - buf_size = min_t(size_t, count, PAGE_SIZE - 1); - err = copy_from_user(buf, user_buf, buf_size); - if (err) - goto err_copy_from_user; - buf[buf_size] = 0; - - if (sscanf(buf, "%u", &num) != 1) { - err = -EPERM; - goto err_get_number; - } - - channel_name = file->f_dentry->d_parent->d_name.name; - trace_name = file->f_dentry->d_parent->d_parent->d_parent->d_name.name; - - err = ltt_trace_set_channel_subbufsize(trace_name, channel_name, num); - if (IS_ERR_VALUE(err)) { - printk(KERN_ERR "channel_subbuf_size_write: " - "ltt_trace_set_channel_subbufsize failed: %d\n", err); - goto err_set_subbufsize; - } - - free_page((unsigned long)buf); - return count; - -err_set_subbufsize: -err_get_number: -err_copy_from_user: - free_page((unsigned long)buf); - return err; -} - -static const struct file_operations ltt_channel_subbuf_size_operations = { - .write = channel_subbuf_size_write, -}; - -static -ssize_t channel_switch_timer_write(struct file *file, - const char __user *user_buf, - size_t count, loff_t *ppos) -{ - int err = 0; - int buf_size; - unsigned long num; - const char *channel_name; - const char *trace_name; - char *buf = (char *)__get_free_page(GFP_KERNEL); - - buf_size = min_t(size_t, count, PAGE_SIZE - 1); - err = copy_from_user(buf, user_buf, buf_size); - if (err) - goto err_copy_from_user; - buf[buf_size] = 0; - - if (sscanf(buf, "%lu", &num) != 1) { - err = -EPERM; - goto err_get_number; - } - - channel_name = file->f_dentry->d_parent->d_name.name; - trace_name = file->f_dentry->d_parent->d_parent->d_parent->d_name.name; - - /* Convert from ms to us */ - num *= 1000; - - err = ltt_trace_set_channel_switch_timer(trace_name, channel_name, num); - if (IS_ERR_VALUE(err)) { - printk(KERN_ERR "channel_switch_timer_write: " - "ltt_trace_set_channel_switch_timer failed: %d\n", err); - goto err_set_switch_timer; - } - - free_page((unsigned long)buf); - return count; - -err_set_switch_timer: -err_get_number: -err_copy_from_user: - free_page((unsigned long)buf); - return err; -} - -static struct file_operations ltt_channel_switch_timer_operations = { - .write = channel_switch_timer_write, -}; - -static -ssize_t channel_overwrite_write(struct file *file, - const char __user *user_buf, size_t count, - loff_t *ppos) -{ - int err = 0; - int buf_size; - const char *channel_name; - const char *trace_name; - char *buf = (char *)__get_free_page(GFP_KERNEL); - char *cmd = (char *)__get_free_page(GFP_KERNEL); - - buf_size = min_t(size_t, count, PAGE_SIZE - 1); - err = copy_from_user(buf, user_buf, buf_size); - if (err) - goto err_copy_from_user; - buf[buf_size] = 0; - - if (sscanf(buf, "%s", cmd) != 1) { - err = -EPERM; - goto err_get_cmd; - } - - if (cmd[1]) { - err = -EPERM; - goto err_bad_cmd; - } - - channel_name = file->f_dentry->d_parent->d_name.name; - trace_name = file->f_dentry->d_parent->d_parent->d_parent->d_name.name; - - switch (cmd[0]) { - case 'Y': - case 'y': - case '1': - err = ltt_trace_set_channel_overwrite(trace_name, channel_name, - 1); - if (IS_ERR_VALUE(err)) { - printk(KERN_ERR "channel_overwrite_write: " - "ltt_trace_set_channel_overwrite failed: %d\n", - err); - goto err_set_subbufsize; - } - break; - case 'N': - case 'n': - case '0': - err = ltt_trace_set_channel_overwrite(trace_name, channel_name, - 0); - if (IS_ERR_VALUE(err)) { - printk(KERN_ERR "channel_overwrite_write: " - "ltt_trace_set_channel_overwrite failed: %d\n", - err); - goto err_set_subbufsize; - } - break; - default: - err = -EPERM; - goto err_bad_cmd; - } - - free_page((unsigned long)buf); - free_page((unsigned long)cmd); - return count; - -err_set_subbufsize: -err_bad_cmd: -err_get_cmd: -err_copy_from_user: - free_page((unsigned long)buf); - free_page((unsigned long)cmd); - return err; -} - -static const struct file_operations ltt_channel_overwrite_operations = { - .write = channel_overwrite_write, -}; - - -static -ssize_t channel_enable_write(struct file *file, - const char __user *user_buf, size_t count, - loff_t *ppos) -{ - int err = 0; - int buf_size; - const char *channel_name; - const char *trace_name; - char *buf = (char *)__get_free_page(GFP_KERNEL); - char *cmd = (char *)__get_free_page(GFP_KERNEL); - - buf_size = min_t(size_t, count, PAGE_SIZE - 1); - err = copy_from_user(buf, user_buf, buf_size); - if (err) - goto err_copy_from_user; - buf[buf_size] = 0; - - if (sscanf(buf, "%s", cmd) != 1) { - err = -EPERM; - goto err_get_cmd; - } - - if (cmd[1]) { - err = -EPERM; - goto err_bad_cmd; - } - - channel_name = file->f_dentry->d_parent->d_name.name; - trace_name = file->f_dentry->d_parent->d_parent->d_parent->d_name.name; - - switch (cmd[0]) { - case 'Y': - case 'y': - case '1': - err = ltt_trace_set_channel_enable(trace_name, channel_name, - 1); - if (IS_ERR_VALUE(err)) { - printk(KERN_ERR "channel_enable_write: " - "ltt_trace_set_channel_enable failed: %d\n", - err); - goto err_set_subbufsize; - } - break; - case 'N': - case 'n': - case '0': - err = ltt_trace_set_channel_enable(trace_name, channel_name, - 0); - if (IS_ERR_VALUE(err)) { - printk(KERN_ERR "channel_enable_write: " - "ltt_trace_set_channel_enable failed: %d\n", - err); - goto err_set_subbufsize; - } - break; - default: - err = -EPERM; - goto err_bad_cmd; - } - - free_page((unsigned long)buf); - free_page((unsigned long)cmd); - return count; - -err_set_subbufsize: -err_bad_cmd: -err_get_cmd: -err_copy_from_user: - free_page((unsigned long)buf); - free_page((unsigned long)cmd); - return err; -} - -static const struct file_operations ltt_channel_enable_operations = { - .write = channel_enable_write, -}; - - -static int _create_trace_control_dir(const char *trace_name, - struct ltt_trace *trace) -{ - int err; - struct dentry *trace_root, *channel_root; - struct dentry *tmp_den; - int i; - - /* debugfs/control/trace_name */ - trace_root = debugfs_create_dir(trace_name, ltt_control_dir); - if (IS_ERR(trace_root) || !trace_root) { - printk(KERN_ERR "_create_trace_control_dir: " - "create control root dir of %s failed\n", trace_name); - err = -ENOMEM; - goto err_create_trace_root; - } - - /* debugfs/control/trace_name/alloc */ - tmp_den = debugfs_create_file("alloc", S_IWUSR, trace_root, NULL, - <t_alloc_operations); - if (IS_ERR(tmp_den) || !tmp_den) { - printk(KERN_ERR "_create_trace_control_dir: " - "create file of alloc failed\n"); - err = -ENOMEM; - goto err_create_subdir; - } - - /* debugfs/control/trace_name/trans */ - tmp_den = debugfs_create_file("trans", S_IWUSR, trace_root, NULL, - <t_trans_operations); - if (IS_ERR(tmp_den) || !tmp_den) { - printk(KERN_ERR "_create_trace_control_dir: " - "create file of trans failed\n"); - err = -ENOMEM; - goto err_create_subdir; - } - - /* debugfs/control/trace_name/enabled */ - tmp_den = debugfs_create_file("enabled", S_IWUSR, trace_root, NULL, - <t_enabled_operations); - if (IS_ERR(tmp_den) || !tmp_den) { - printk(KERN_ERR "_create_trace_control_dir: " - "create file of enabled failed\n"); - err = -ENOMEM; - goto err_create_subdir; - } - - /* debugfs/control/trace_name/channel/ */ - channel_root = debugfs_create_dir("channel", trace_root); - if (IS_ERR(channel_root) || !channel_root) { - printk(KERN_ERR "_create_trace_control_dir: " - "create dir of channel failed\n"); - err = -ENOMEM; - goto err_create_subdir; - } - - /* - * Create dir and files in debugfs/ltt/control/trace_name/channel/ - * Following things(without <>) will be created: - * `-- - * `-- - * `-- - * |-- - * | |-- enable - * | |-- overwrite - * | |-- subbuf_num - * | |-- subbuf_size - * | `-- switch_timer - * `-- ... - */ - - for (i = 0; i < trace->nr_channels; i++) { - struct dentry *channel_den; - struct ltt_chan *chan; - - chan = &trace->channels[i]; - if (!chan->active) - continue; - channel_den = debugfs_create_dir(chan->a.filename, - channel_root); - if (IS_ERR(channel_den) || !channel_den) { - printk(KERN_ERR "_create_trace_control_dir: " - "create channel dir of %s failed\n", - chan->a.filename); - err = -ENOMEM; - goto err_create_subdir; - } - - tmp_den = debugfs_create_file("subbuf_num", S_IWUSR, - channel_den, NULL, - <t_channel_subbuf_num_operations); - if (IS_ERR(tmp_den) || !tmp_den) { - printk(KERN_ERR "_create_trace_control_dir: " - "create subbuf_num in %s failed\n", - chan->a.filename); - err = -ENOMEM; - goto err_create_subdir; - } - - tmp_den = debugfs_create_file("subbuf_size", S_IWUSR, - channel_den, NULL, - <t_channel_subbuf_size_operations); - if (IS_ERR(tmp_den) || !tmp_den) { - printk(KERN_ERR "_create_trace_control_dir: " - "create subbuf_size in %s failed\n", - chan->a.filename); - err = -ENOMEM; - goto err_create_subdir; - } - - tmp_den = debugfs_create_file("enable", S_IWUSR, channel_den, - NULL, - <t_channel_enable_operations); - if (IS_ERR(tmp_den) || !tmp_den) { - printk(KERN_ERR "_create_trace_control_dir: " - "create enable in %s failed\n", - chan->a.filename); - err = -ENOMEM; - goto err_create_subdir; - } - - tmp_den = debugfs_create_file("overwrite", S_IWUSR, channel_den, - NULL, - <t_channel_overwrite_operations); - if (IS_ERR(tmp_den) || !tmp_den) { - printk(KERN_ERR "_create_trace_control_dir: " - "create overwrite in %s failed\n", - chan->a.filename); - err = -ENOMEM; - goto err_create_subdir; - } - - tmp_den = debugfs_create_file("switch_timer", S_IWUSR, - channel_den, NULL, - <t_channel_switch_timer_operations); - if (IS_ERR(tmp_den) || !tmp_den) { - printk(KERN_ERR "_create_trace_control_dir: " - "create switch_timer in %s failed\n", - chan->a.filename); - err = -ENOMEM; - goto err_create_subdir; - } - } - - return 0; - -err_create_subdir: - debugfs_remove_recursive(trace_root); -err_create_trace_root: - return err; -} - -static -ssize_t setup_trace_write(struct file *file, const char __user *user_buf, - size_t count, loff_t *ppos) -{ - int err = 0; - int buf_size; - struct ltt_trace *trace; - char *buf = (char *)__get_free_page(GFP_KERNEL); - char *trace_name = (char *)__get_free_page(GFP_KERNEL); - - buf_size = min_t(size_t, count, PAGE_SIZE - 1); - err = copy_from_user(buf, user_buf, buf_size); - if (err) - goto err_copy_from_user; - buf[buf_size] = 0; - - if (sscanf(buf, "%s", trace_name) != 1) { - err = -EPERM; - goto err_get_tracename; - } - - mutex_lock(&control_lock); - ltt_lock_traces(); - - err = _ltt_trace_setup(trace_name); - if (IS_ERR_VALUE(err)) { - printk(KERN_ERR - "setup_trace_write: ltt_trace_setup failed: %d\n", err); - goto err_setup_trace; - } - trace = _ltt_trace_find_setup(trace_name); - BUG_ON(!trace); - err = _create_trace_control_dir(trace_name, trace); - if (IS_ERR_VALUE(err)) { - printk(KERN_ERR "setup_trace_write: " - "_create_trace_control_dir failed: %d\n", err); - goto err_create_trace_control_dir; - } - - ltt_unlock_traces(); - mutex_unlock(&control_lock); - - free_page((unsigned long)buf); - free_page((unsigned long)trace_name); - return count; - -err_create_trace_control_dir: - ltt_trace_destroy(trace_name); -err_setup_trace: - ltt_unlock_traces(); - mutex_unlock(&control_lock); -err_get_tracename: -err_copy_from_user: - free_page((unsigned long)buf); - free_page((unsigned long)trace_name); - return err; -} - -static const struct file_operations ltt_setup_trace_operations = { - .write = setup_trace_write, -}; - -static -ssize_t destroy_trace_write(struct file *file, const char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct dentry *trace_den; - int buf_size; - int err = 0; - char *buf = (char *)__get_free_page(GFP_KERNEL); - char *trace_name = (char *)__get_free_page(GFP_KERNEL); - - buf_size = min_t(size_t, count, PAGE_SIZE - 1); - err = copy_from_user(buf, user_buf, buf_size); - if (err) - goto err_copy_from_user; - buf[buf_size] = 0; - - if (sscanf(buf, "%s", trace_name) != 1) { - err = -EPERM; - goto err_get_tracename; - } - - mutex_lock(&control_lock); - - err = ltt_trace_destroy(trace_name); - if (IS_ERR_VALUE(err)) { - printk(KERN_ERR - "destroy_trace_write: ltt_trace_destroy failed: %d\n", - err); - err = -EPERM; - goto err_destroy_trace; - } - - trace_den = dir_lookup(ltt_control_dir, trace_name); - if (!trace_den) { - printk(KERN_ERR - "destroy_trace_write: lookup for %s's dentry failed\n", - trace_name); - err = -ENOENT; - goto err_get_dentry; - } - - debugfs_remove_recursive(trace_den); - - mutex_unlock(&control_lock); - - free_page((unsigned long)buf); - free_page((unsigned long)trace_name); - return count; - -err_get_dentry: -err_destroy_trace: - mutex_unlock(&control_lock); -err_get_tracename: -err_copy_from_user: - free_page((unsigned long)buf); - free_page((unsigned long)trace_name); - return err; -} - -static const struct file_operations ltt_destroy_trace_operations = { - .write = destroy_trace_write, -}; - -static void init_marker_dir(struct dentry *dentry, - const struct inode_operations *opt) -{ - dentry->d_inode->i_op = opt; -} - -static -ssize_t marker_enable_read(struct file *filp, char __user *ubuf, - size_t cnt, loff_t *ppos) -{ - char *buf; - const char *channel, *marker; - int len, enabled, present; - - marker = filp->f_dentry->d_parent->d_name.name; - channel = filp->f_dentry->d_parent->d_parent->d_name.name; - - len = 0; - buf = (char *)__get_free_page(GFP_KERNEL); - - /* - * Note: we cannot take the marker lock to make these two checks - * atomic, because the marker mutex nests inside the module mutex, taken - * inside the marker present check. - */ - enabled = is_marker_enabled(channel, marker); - present = is_marker_present(channel, marker); - - if (enabled && present) - len = snprintf(buf, PAGE_SIZE, "%d\n", 1); - else if (enabled && !present) - len = snprintf(buf, PAGE_SIZE, "%d\n", 2); - else - len = snprintf(buf, PAGE_SIZE, "%d\n", 0); - - - if (len >= PAGE_SIZE) { - len = PAGE_SIZE; - buf[PAGE_SIZE] = '\0'; - } - len = simple_read_from_buffer(ubuf, cnt, ppos, buf, len); - free_page((unsigned long)buf); - - return len; -} - -static -ssize_t marker_enable_write(struct file *filp, const char __user *ubuf, - size_t cnt, loff_t *ppos) -{ - char *buf = (char *)__get_free_page(GFP_KERNEL); - int buf_size; - ssize_t ret = 0; - const char *channel, *marker; - - marker = filp->f_dentry->d_parent->d_name.name; - channel = filp->f_dentry->d_parent->d_parent->d_name.name; - - buf_size = min_t(size_t, cnt, PAGE_SIZE - 1); - ret = copy_from_user(buf, ubuf, buf_size); - if (ret) - goto end; - - buf[buf_size] = 0; - - switch (buf[0]) { - case 'Y': - case 'y': - case '1': - ret = ltt_marker_connect(channel, marker, "default"); - if (ret) - goto end; - break; - case 'N': - case 'n': - case '0': - ret = ltt_marker_disconnect(channel, marker, "default"); - if (ret) - goto end; - break; - default: - ret = -EPERM; - goto end; - } - ret = cnt; -end: - free_page((unsigned long)buf); - return ret; -} - -static const struct file_operations enable_fops = { - .read = marker_enable_read, - .write = marker_enable_write, -}; - -/* - * In practice, the output size should never be larger than 4096 kB. If it - * ever happens, the output will simply be truncated. - */ -static -ssize_t marker_info_read(struct file *filp, char __user *ubuf, - size_t cnt, loff_t *ppos) -{ - char *buf; - const char *channel, *marker; - int len; - struct marker_iter iter; - - marker = filp->f_dentry->d_parent->d_name.name; - channel = filp->f_dentry->d_parent->d_parent->d_name.name; - - len = 0; - buf = (char *)__get_free_page(GFP_KERNEL); - - if (is_marker_enabled(channel, marker) && - !is_marker_present(channel, marker)) { - len += snprintf(buf + len, PAGE_SIZE - len, - "Marker Pre-enabled\n"); - goto out; - } - - marker_iter_reset(&iter); - marker_iter_start(&iter); - for (; iter.marker != NULL; marker_iter_next(&iter)) { - if (!strcmp(iter.marker->channel, channel) && - !strcmp(iter.marker->name, marker)) - len += snprintf(buf + len, PAGE_SIZE - len, - "Location: %s\n" - "format: \"%s\"\nstate: %d\n" - "event_id: %hu\n" - "call: 0x%p\n" - "probe %s : 0x%p\n\n", -#ifdef CONFIG_MODULES - iter.module ? iter.module->name : -#endif - "Core Kernel", - iter.marker->format, - _imv_read(iter.marker->state), - iter.marker->event_id, - iter.marker->call, - iter.marker->ptype ? - "multi" : "single", iter.marker->ptype ? - (void *)iter.marker->multi : - (void *)iter.marker->single.func); - if (len >= PAGE_SIZE) - break; - } - marker_iter_stop(&iter); - -out: - if (len >= PAGE_SIZE) { - len = PAGE_SIZE; - buf[PAGE_SIZE] = '\0'; - } - - len = simple_read_from_buffer(ubuf, cnt, ppos, buf, len); - free_page((unsigned long)buf); - - return len; -} - -static const struct file_operations info_fops = { - .read = marker_info_read, -}; - -static int marker_mkdir(struct inode *dir, struct dentry *dentry, int mode) -{ - struct dentry *marker_d, *enable_d, *info_d, *channel_d; - int ret; - - ret = 0; - channel_d = (struct dentry *)dir->i_private; - mutex_unlock(&dir->i_mutex); - - marker_d = debugfs_create_dir(dentry->d_name.name, - channel_d); - if (IS_ERR(marker_d)) { - ret = PTR_ERR(marker_d); - goto out; - } - - enable_d = debugfs_create_file("enable", 0644, marker_d, - NULL, &enable_fops); - if (IS_ERR(enable_d) || !enable_d) { - printk(KERN_ERR - "%s: create file of %s failed\n", - __func__, "enable"); - ret = -ENOMEM; - goto remove_marker_dir; - } - - info_d = debugfs_create_file("info", 0644, marker_d, - NULL, &info_fops); - if (IS_ERR(info_d) || !info_d) { - printk(KERN_ERR - "%s: create file of %s failed\n", - __func__, "info"); - ret = -ENOMEM; - goto remove_enable_dir; - } - - goto out; - -remove_enable_dir: - debugfs_remove(enable_d); -remove_marker_dir: - debugfs_remove(marker_d); -out: - mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT); - return ret; -} - -static int marker_rmdir(struct inode *dir, struct dentry *dentry) -{ - struct dentry *marker_d, *channel_d; - const char *channel, *name; - int ret, enabled, present; - - ret = 0; - - channel_d = (struct dentry *)dir->i_private; - channel = channel_d->d_name.name; - - marker_d = dir_lookup(channel_d, dentry->d_name.name); - - if (!marker_d) { - ret = -ENOENT; - goto out; - } - - name = marker_d->d_name.name; - - enabled = is_marker_enabled(channel, name); - present = is_marker_present(channel, name); - - if (present || (!present && enabled)) { - ret = -EPERM; - goto out; - } - - mutex_unlock(&dir->i_mutex); - mutex_unlock(&dentry->d_inode->i_mutex); - debugfs_remove_recursive(marker_d); - mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT); - mutex_lock(&dentry->d_inode->i_mutex); -out: - return ret; -} - -const struct inode_operations channel_dir_opt = { - .lookup = simple_lookup, - .mkdir = marker_mkdir, - .rmdir = marker_rmdir, -}; - -static int channel_mkdir(struct inode *dir, struct dentry *dentry, int mode) -{ - struct dentry *channel_d; - int ret; - - ret = 0; - mutex_unlock(&dir->i_mutex); - - channel_d = debugfs_create_dir(dentry->d_name.name, - markers_control_dir); - if (IS_ERR(channel_d)) { - ret = PTR_ERR(channel_d); - goto out; - } - - channel_d->d_inode->i_private = (void *)channel_d; - init_marker_dir(channel_d, &channel_dir_opt); -out: - mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT); - return ret; -} - -static int channel_rmdir(struct inode *dir, struct dentry *dentry) -{ - struct dentry *channel_d; - int ret; - - ret = 0; - - channel_d = dir_lookup(markers_control_dir, dentry->d_name.name); - if (!channel_d) { - ret = -ENOENT; - goto out; - } - - if (list_empty(&channel_d->d_subdirs)) { - mutex_unlock(&dir->i_mutex); - mutex_unlock(&dentry->d_inode->i_mutex); - debugfs_remove(channel_d); - mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT); - mutex_lock(&dentry->d_inode->i_mutex); - } else - ret = -EPERM; - -out: - return ret; -} - -const struct inode_operations root_dir_opt = { - .lookup = simple_lookup, - .mkdir = channel_mkdir, - .rmdir = channel_rmdir -}; - -static int build_marker_file(struct marker *marker) -{ - struct dentry *channel_d, *marker_d, *enable_d, *info_d; - int err; - - channel_d = dir_lookup(markers_control_dir, marker->channel); - if (!channel_d) { - channel_d = debugfs_create_dir(marker->channel, - markers_control_dir); - if (IS_ERR(channel_d) || !channel_d) { - printk(KERN_ERR - "%s: build channel dir of %s failed\n", - __func__, marker->channel); - err = -ENOMEM; - goto err_build_fail; - } - channel_d->d_inode->i_private = (void *)channel_d; - init_marker_dir(channel_d, &channel_dir_opt); - } - - marker_d = dir_lookup(channel_d, marker->name); - if (!marker_d) { - marker_d = debugfs_create_dir(marker->name, channel_d); - if (IS_ERR(marker_d) || !marker_d) { - printk(KERN_ERR - "%s: marker dir of %s failed\n", - __func__, marker->name); - err = -ENOMEM; - goto err_build_fail; - } - } - - enable_d = dir_lookup(marker_d, "enable"); - if (!enable_d) { - enable_d = debugfs_create_file("enable", 0644, marker_d, - NULL, &enable_fops); - if (IS_ERR(enable_d) || !enable_d) { - printk(KERN_ERR - "%s: create file of %s failed\n", - __func__, "enable"); - err = -ENOMEM; - goto err_build_fail; - } - } - - info_d = dir_lookup(marker_d, "info"); - if (!info_d) { - info_d = debugfs_create_file("info", 0444, marker_d, - NULL, &info_fops); - if (IS_ERR(info_d) || !info_d) { - printk(KERN_ERR - "%s: create file of %s failed\n", - __func__, "enable"); - err = -ENOMEM; - goto err_build_fail; - } - } - - return 0; - -err_build_fail: - return err; -} - -static int build_marker_control_files(void) -{ - struct marker_iter iter; - int err; - - err = 0; - if (!markers_control_dir) - return -EEXIST; - - marker_iter_reset(&iter); - marker_iter_start(&iter); - for (; iter.marker != NULL; marker_iter_next(&iter)) { - err = build_marker_file(iter.marker); - if (err) - goto out; - } - marker_iter_stop(&iter); - -out: - return err; -} - -#ifdef CONFIG_MODULES -static int remove_marker_control_dir(struct module *mod, struct marker *marker) -{ - struct dentry *channel_d, *marker_d; - const char *channel, *name; - int count; - struct marker_iter iter; - - count = 0; - - channel_d = dir_lookup(markers_control_dir, marker->channel); - if (!channel_d) - return -ENOENT; - channel = channel_d->d_name.name; - - marker_d = dir_lookup(channel_d, marker->name); - if (!marker_d) - return -ENOENT; - name = marker_d->d_name.name; - - marker_iter_reset(&iter); - marker_iter_start(&iter); - for (; iter.marker != NULL; marker_iter_next(&iter)) { - if (!strcmp(iter.marker->channel, channel) && - !strcmp(iter.marker->name, name) && mod != iter.module) - count++; - } - - if (count > 0) - goto end; - - debugfs_remove_recursive(marker_d); - if (list_empty(&channel_d->d_subdirs)) - debugfs_remove(channel_d); - -end: - marker_iter_stop(&iter); - return 0; -} - -static void cleanup_control_dir(struct module *mod, struct marker *begin, - struct marker *end) -{ - struct marker *iter; - - if (!markers_control_dir) - return; - - for (iter = begin; iter < end; iter++) - remove_marker_control_dir(mod, iter); - - return; -} - -static void build_control_dir(struct module *mod, struct marker *begin, - struct marker *end) -{ - struct marker *iter; - int err; - - err = 0; - if (!markers_control_dir) - return; - - for (iter = begin; iter < end; iter++) { - err = build_marker_file(iter); - if (err) - goto err_build_fail; - } - - return; -err_build_fail: - cleanup_control_dir(mod, begin, end); -} - -static int module_notify(struct notifier_block *self, - unsigned long val, void *data) -{ - struct module *mod = data; - - switch (val) { - case MODULE_STATE_COMING: - build_control_dir(mod, mod->markers, - mod->markers + mod->num_markers); - break; - case MODULE_STATE_GOING: - cleanup_control_dir(mod, mod->markers, - mod->markers + mod->num_markers); - break; - } - return NOTIFY_DONE; -} -#else -static inline int module_notify(struct notifier_block *self, - unsigned long val, void *data) -{ - return 0; -} -#endif - -static struct notifier_block module_nb = { - .notifier_call = module_notify, -}; - -static int __init ltt_trace_control_init(void) -{ - int err = 0; - struct dentry *ltt_root_dentry; - - ltt_root_dentry = get_ltt_root(); - if (!ltt_root_dentry) { - err = -ENOENT; - goto err_no_root; - } - - ltt_control_dir = debugfs_create_dir(LTT_CONTROL_DIR, ltt_root_dentry); - if (IS_ERR(ltt_control_dir) || !ltt_control_dir) { - printk(KERN_ERR - "ltt_channel_control_init: create dir of %s failed\n", - LTT_CONTROL_DIR); - err = -ENOMEM; - goto err_create_control_dir; - } - - ltt_setup_trace_file = debugfs_create_file(LTT_SETUP_TRACE_FILE, - S_IWUSR, ltt_root_dentry, - NULL, - <t_setup_trace_operations); - if (IS_ERR(ltt_setup_trace_file) || !ltt_setup_trace_file) { - printk(KERN_ERR - "ltt_channel_control_init: create file of %s failed\n", - LTT_SETUP_TRACE_FILE); - err = -ENOMEM; - goto err_create_setup_trace_file; - } - - ltt_destroy_trace_file = debugfs_create_file(LTT_DESTROY_TRACE_FILE, - S_IWUSR, ltt_root_dentry, - NULL, - <t_destroy_trace_operations); - if (IS_ERR(ltt_destroy_trace_file) || !ltt_destroy_trace_file) { - printk(KERN_ERR - "ltt_channel_control_init: create file of %s failed\n", - LTT_DESTROY_TRACE_FILE); - err = -ENOMEM; - goto err_create_destroy_trace_file; - } - - markers_control_dir = debugfs_create_dir(MARKERS_CONTROL_DIR, - ltt_root_dentry); - if (IS_ERR(markers_control_dir) || !markers_control_dir) { - printk(KERN_ERR - "ltt_channel_control_init: create dir of %s failed\n", - MARKERS_CONTROL_DIR); - err = -ENOMEM; - goto err_create_marker_control_dir; - } - - init_marker_dir(markers_control_dir, &root_dir_opt); - - if (build_marker_control_files()) - goto err_build_fail; - - if (!register_module_notifier(&module_nb)) - return 0; - -err_build_fail: - debugfs_remove_recursive(markers_control_dir); - markers_control_dir = NULL; -err_create_marker_control_dir: - debugfs_remove(ltt_destroy_trace_file); -err_create_destroy_trace_file: - debugfs_remove(ltt_setup_trace_file); -err_create_setup_trace_file: - debugfs_remove(ltt_control_dir); -err_create_control_dir: -err_no_root: - return err; -} - -static void __exit ltt_trace_control_exit(void) -{ - struct dentry *trace_dir; - - /* destory all traces */ - list_for_each_entry(trace_dir, <t_control_dir->d_subdirs, - d_u.d_child) { - ltt_trace_stop(trace_dir->d_name.name); - ltt_trace_destroy(trace_dir->d_name.name); - } - - /* clean dirs in debugfs */ - debugfs_remove(ltt_setup_trace_file); - debugfs_remove(ltt_destroy_trace_file); - debugfs_remove_recursive(ltt_control_dir); - debugfs_remove_recursive(markers_control_dir); - unregister_module_notifier(&module_nb); - put_ltt_root(); -} - -module_init(ltt_trace_control_init); -module_exit(ltt_trace_control_exit); - -MODULE_LICENSE("GPL and additional rights"); -MODULE_AUTHOR("Zhao Lei "); -MODULE_DESCRIPTION("Linux Trace Toolkit Trace Controller"); diff --git a/discard/ltt-tracer.c b/discard/ltt-tracer.c deleted file mode 100644 index 5cdea932..00000000 --- a/discard/ltt-tracer.c +++ /dev/null @@ -1,1112 +0,0 @@ -/* - * ltt/ltt-tracer.c - * - * Copyright (c) 2005-2010 - Mathieu Desnoyers - * - * Tracing management internal kernel API. Trace buffer allocation/free, tracing - * start/stop. - * - * Author: - * Mathieu Desnoyers - * - * Inspired from LTT : - * Karim Yaghmour (karim@opersys.com) - * Tom Zanussi (zanussi@us.ibm.com) - * Bob Wisniewski (bob@watson.ibm.com) - * And from K42 : - * Bob Wisniewski (bob@watson.ibm.com) - * - * Changelog: - * 22/09/06, Move to the marker/probes mechanism. - * 19/10/05, Complete lockless mechanism. - * 27/05/05, Modular redesign and rewrite. - * - * Dual LGPL v2.1/GPL v2 license. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "ltt-tracer.h" - -static void synchronize_trace(void) -{ - synchronize_sched(); -#ifdef CONFIG_PREEMPT_RT - synchronize_rcu(); -#endif -} - -static void async_wakeup(unsigned long data); - -static DEFINE_TIMER(ltt_async_wakeup_timer, async_wakeup, 0, 0); - -/* Default callbacks for modules */ -notrace -int ltt_filter_control_default(enum ltt_filter_control_msg msg, - struct ltt_trace *trace) -{ - return 0; -} - -int ltt_statedump_default(struct ltt_trace *trace) -{ - return 0; -} - -/* Callbacks for registered modules */ - -int (*ltt_filter_control_functor) - (enum ltt_filter_control_msg msg, struct ltt_trace *trace) = - ltt_filter_control_default; -struct module *ltt_filter_control_owner; - -/* These function pointers are protected by a trace activation check */ -struct module *ltt_run_filter_owner; -int (*ltt_statedump_functor)(struct ltt_trace *trace) = ltt_statedump_default; -struct module *ltt_statedump_owner; - -struct chan_info_struct { - const char *name; - unsigned int def_sb_size; - unsigned int def_n_sb; -} chan_infos[] = { - [LTT_CHANNEL_METADATA] = { - LTT_METADATA_CHANNEL, - LTT_DEFAULT_SUBBUF_SIZE_LOW, - LTT_DEFAULT_N_SUBBUFS_LOW, - }, - [LTT_CHANNEL_FD_STATE] = { - LTT_FD_STATE_CHANNEL, - LTT_DEFAULT_SUBBUF_SIZE_LOW, - LTT_DEFAULT_N_SUBBUFS_LOW, - }, - [LTT_CHANNEL_GLOBAL_STATE] = { - LTT_GLOBAL_STATE_CHANNEL, - LTT_DEFAULT_SUBBUF_SIZE_LOW, - LTT_DEFAULT_N_SUBBUFS_LOW, - }, - [LTT_CHANNEL_IRQ_STATE] = { - LTT_IRQ_STATE_CHANNEL, - LTT_DEFAULT_SUBBUF_SIZE_LOW, - LTT_DEFAULT_N_SUBBUFS_LOW, - }, - [LTT_CHANNEL_MODULE_STATE] = { - LTT_MODULE_STATE_CHANNEL, - LTT_DEFAULT_SUBBUF_SIZE_LOW, - LTT_DEFAULT_N_SUBBUFS_LOW, - }, - [LTT_CHANNEL_NETIF_STATE] = { - LTT_NETIF_STATE_CHANNEL, - LTT_DEFAULT_SUBBUF_SIZE_LOW, - LTT_DEFAULT_N_SUBBUFS_LOW, - }, - [LTT_CHANNEL_SOFTIRQ_STATE] = { - LTT_SOFTIRQ_STATE_CHANNEL, - LTT_DEFAULT_SUBBUF_SIZE_LOW, - LTT_DEFAULT_N_SUBBUFS_LOW, - }, - [LTT_CHANNEL_SWAP_STATE] = { - LTT_SWAP_STATE_CHANNEL, - LTT_DEFAULT_SUBBUF_SIZE_LOW, - LTT_DEFAULT_N_SUBBUFS_LOW, - }, - [LTT_CHANNEL_SYSCALL_STATE] = { - LTT_SYSCALL_STATE_CHANNEL, - LTT_DEFAULT_SUBBUF_SIZE_LOW, - LTT_DEFAULT_N_SUBBUFS_LOW, - }, - [LTT_CHANNEL_TASK_STATE] = { - LTT_TASK_STATE_CHANNEL, - LTT_DEFAULT_SUBBUF_SIZE_LOW, - LTT_DEFAULT_N_SUBBUFS_LOW, - }, - [LTT_CHANNEL_VM_STATE] = { - LTT_VM_STATE_CHANNEL, - LTT_DEFAULT_SUBBUF_SIZE_MED, - LTT_DEFAULT_N_SUBBUFS_MED, - }, - [LTT_CHANNEL_FS] = { - LTT_FS_CHANNEL, - LTT_DEFAULT_SUBBUF_SIZE_MED, - LTT_DEFAULT_N_SUBBUFS_MED, - }, - [LTT_CHANNEL_INPUT] = { - LTT_INPUT_CHANNEL, - LTT_DEFAULT_SUBBUF_SIZE_LOW, - LTT_DEFAULT_N_SUBBUFS_LOW, - }, - [LTT_CHANNEL_IPC] = { - LTT_IPC_CHANNEL, - LTT_DEFAULT_SUBBUF_SIZE_LOW, - LTT_DEFAULT_N_SUBBUFS_LOW, - }, - [LTT_CHANNEL_KERNEL] = { - LTT_KERNEL_CHANNEL, - LTT_DEFAULT_SUBBUF_SIZE_HIGH, - LTT_DEFAULT_N_SUBBUFS_HIGH, - }, - [LTT_CHANNEL_MM] = { - LTT_MM_CHANNEL, - LTT_DEFAULT_SUBBUF_SIZE_MED, - LTT_DEFAULT_N_SUBBUFS_MED, - }, - [LTT_CHANNEL_RCU] = { - LTT_RCU_CHANNEL, - LTT_DEFAULT_SUBBUF_SIZE_MED, - LTT_DEFAULT_N_SUBBUFS_MED, - }, - [LTT_CHANNEL_DEFAULT] = { - NULL, - LTT_DEFAULT_SUBBUF_SIZE_MED, - LTT_DEFAULT_N_SUBBUFS_MED, - }, -}; - -static enum ltt_channels get_channel_type_from_name(const char *name) -{ - int i; - - if (!name) - return LTT_CHANNEL_DEFAULT; - - for (i = 0; i < ARRAY_SIZE(chan_infos); i++) - if (chan_infos[i].name && !strcmp(name, chan_infos[i].name)) - return (enum ltt_channels)i; - - return LTT_CHANNEL_DEFAULT; -} - -/** - * ltt_module_register - LTT module registration - * @name: module type - * @function: callback to register - * @owner: module which owns the callback - * - * The module calling this registration function must ensure that no - * trap-inducing code will be executed by "function". E.g. vmalloc_sync_all() - * must be called between a vmalloc and the moment the memory is made visible to - * "function". This registration acts as a vmalloc_sync_all. Therefore, only if - * the module allocates virtual memory after its registration must it - * synchronize the TLBs. - */ -int ltt_module_register(enum ltt_module_function name, void *function, - struct module *owner) -{ - int ret = 0; - - /* - * Make sure no page fault can be triggered by the module about to be - * registered. We deal with this here so we don't have to call - * vmalloc_sync_all() in each module's init. - */ - vmalloc_sync_all(); - - switch (name) { - case LTT_FUNCTION_RUN_FILTER: - if (ltt_run_filter_owner != NULL) { - ret = -EEXIST; - goto end; - } - ltt_filter_register((ltt_run_filter_functor)function); - ltt_run_filter_owner = owner; - break; - case LTT_FUNCTION_FILTER_CONTROL: - if (ltt_filter_control_owner != NULL) { - ret = -EEXIST; - goto end; - } - ltt_filter_control_functor = - (int (*)(enum ltt_filter_control_msg, - struct ltt_trace *))function; - ltt_filter_control_owner = owner; - break; - case LTT_FUNCTION_STATEDUMP: - if (ltt_statedump_owner != NULL) { - ret = -EEXIST; - goto end; - } - ltt_statedump_functor = - (int (*)(struct ltt_trace *))function; - ltt_statedump_owner = owner; - break; - } -end: - return ret; -} -EXPORT_SYMBOL_GPL(ltt_module_register); - -/** - * ltt_module_unregister - LTT module unregistration - * @name: module type - */ -void ltt_module_unregister(enum ltt_module_function name) -{ - switch (name) { - case LTT_FUNCTION_RUN_FILTER: - ltt_filter_unregister(); - ltt_run_filter_owner = NULL; - /* Wait for preempt sections to finish */ - synchronize_trace(); - break; - case LTT_FUNCTION_FILTER_CONTROL: - ltt_filter_control_functor = ltt_filter_control_default; - ltt_filter_control_owner = NULL; - break; - case LTT_FUNCTION_STATEDUMP: - ltt_statedump_functor = ltt_statedump_default; - ltt_statedump_owner = NULL; - break; - } - -} -EXPORT_SYMBOL_GPL(ltt_module_unregister); - -static LIST_HEAD(ltt_transport_list); - -/** - * ltt_transport_register - LTT transport registration - * @transport: transport structure - * - * Registers a transport which can be used as output to extract the data out of - * LTTng. The module calling this registration function must ensure that no - * trap-inducing code will be executed by the transport functions. E.g. - * vmalloc_sync_all() must be called between a vmalloc and the moment the memory - * is made visible to the transport function. This registration acts as a - * vmalloc_sync_all. Therefore, only if the module allocates virtual memory - * after its registration must it synchronize the TLBs. - */ -void ltt_transport_register(struct ltt_transport *transport) -{ - /* - * Make sure no page fault can be triggered by the module about to be - * registered. We deal with this here so we don't have to call - * vmalloc_sync_all() in each module's init. - */ - vmalloc_sync_all(); - - ltt_lock_traces(); - list_add_tail(&transport->node, <t_transport_list); - ltt_unlock_traces(); -} -EXPORT_SYMBOL_GPL(ltt_transport_register); - -/** - * ltt_transport_unregister - LTT transport unregistration - * @transport: transport structure - */ -void ltt_transport_unregister(struct ltt_transport *transport) -{ - ltt_lock_traces(); - list_del(&transport->node); - ltt_unlock_traces(); -} -EXPORT_SYMBOL_GPL(ltt_transport_unregister); - -static inline -int is_channel_overwrite(enum ltt_channels chan, enum trace_mode mode) -{ - switch (mode) { - case LTT_TRACE_NORMAL: - return 0; - case LTT_TRACE_FLIGHT: - switch (chan) { - case LTT_CHANNEL_METADATA: - return 0; - default: - return 1; - } - case LTT_TRACE_HYBRID: - switch (chan) { - case LTT_CHANNEL_KERNEL: - case LTT_CHANNEL_FS: - case LTT_CHANNEL_MM: - case LTT_CHANNEL_RCU: - case LTT_CHANNEL_IPC: - case LTT_CHANNEL_INPUT: - return 1; - default: - return 0; - } - default: - return 0; - } -} - -/** - * _ltt_trace_find - find a trace by given name. - * trace_name: trace name - * - * Returns a pointer to the trace structure, NULL if not found. - */ -static struct ltt_trace *_ltt_trace_find(const char *trace_name) -{ - struct ltt_trace *trace; - - list_for_each_entry(trace, <t_traces.head, list) - if (!strncmp(trace->trace_name, trace_name, NAME_MAX)) - return trace; - - return NULL; -} - -/* _ltt_trace_find_setup : - * find a trace in setup list by given name. - * - * Returns a pointer to the trace structure, NULL if not found. - */ -struct ltt_trace *_ltt_trace_find_setup(const char *trace_name) -{ - struct ltt_trace *trace; - - list_for_each_entry(trace, <t_traces.setup_head, list) - if (!strncmp(trace->trace_name, trace_name, NAME_MAX)) - return trace; - - return NULL; -} -EXPORT_SYMBOL_GPL(_ltt_trace_find_setup); - -/** - * ltt_release_trace - Release a LTT trace - * @kref : reference count on the trace - */ -void ltt_release_trace(struct kref *kref) -{ - struct ltt_trace *trace = container_of(kref, struct ltt_trace, kref); - - trace->ops->remove_dirs(trace); - module_put(trace->transport->owner); - ltt_channels_trace_free(trace); - kfree(trace); -} -EXPORT_SYMBOL_GPL(ltt_release_trace); - -static inline void prepare_chan_size_num(unsigned int *subbuf_size, - unsigned int *n_subbufs) -{ - /* Make sure the subbuffer size is larger than a page */ - *subbuf_size = max_t(unsigned int, *subbuf_size, PAGE_SIZE); - - /* round to next power of 2 */ - *subbuf_size = 1 << get_count_order(*subbuf_size); - *n_subbufs = 1 << get_count_order(*n_subbufs); - - /* Subbuf size and number must both be power of two */ - WARN_ON(hweight32(*subbuf_size) != 1); - WARN_ON(hweight32(*n_subbufs) != 1); -} - -int _ltt_trace_setup(const char *trace_name) -{ - int err = 0; - struct ltt_trace *new_trace = NULL; - int metadata_index; - unsigned int chan; - enum ltt_channels chantype; - - if (_ltt_trace_find_setup(trace_name)) { - printk(KERN_ERR "LTT : Trace name %s already used.\n", - trace_name); - err = -EEXIST; - goto traces_error; - } - - if (_ltt_trace_find(trace_name)) { - printk(KERN_ERR "LTT : Trace name %s already used.\n", - trace_name); - err = -EEXIST; - goto traces_error; - } - - new_trace = kzalloc(sizeof(struct ltt_trace), GFP_KERNEL); - if (!new_trace) { - printk(KERN_ERR - "LTT : Unable to allocate memory for trace %s\n", - trace_name); - err = -ENOMEM; - goto traces_error; - } - strncpy(new_trace->trace_name, trace_name, NAME_MAX); - if (ltt_channels_trace_alloc(&new_trace->nr_channels, 0)) { - printk(KERN_ERR - "LTT : Unable to allocate memory for chaninfo %s\n", - trace_name); - err = -ENOMEM; - goto trace_free; - } - - /* - * Force metadata channel to no overwrite. - */ - metadata_index = ltt_channels_get_index_from_name("metadata"); - WARN_ON(metadata_index < 0); - new_trace->settings[metadata_index].overwrite = 0; - - /* - * Set hardcoded tracer defaults for some channels - */ - for (chan = 0; chan < new_trace->nr_channels; chan++) { - chantype = get_channel_type_from_name( - ltt_channels_get_name_from_index(chan)); - new_trace->settings[chan].sb_size = - chan_infos[chantype].def_sb_size; - new_trace->settings[chan].n_sb = - chan_infos[chantype].def_n_sb; - } - - list_add(&new_trace->list, <t_traces.setup_head); - return 0; - -trace_free: - kfree(new_trace); -traces_error: - return err; -} -EXPORT_SYMBOL_GPL(_ltt_trace_setup); - - -int ltt_trace_setup(const char *trace_name) -{ - int ret; - ltt_lock_traces(); - ret = _ltt_trace_setup(trace_name); - ltt_unlock_traces(); - return ret; -} -EXPORT_SYMBOL_GPL(ltt_trace_setup); - -/* must be called from within a traces lock. */ -static void _ltt_trace_free(struct ltt_trace *trace) -{ - list_del(&trace->list); - kfree(trace); -} - -int ltt_trace_set_type(const char *trace_name, const char *trace_type) -{ - int err = 0; - struct ltt_trace *trace; - struct ltt_transport *tran_iter, *transport = NULL; - - ltt_lock_traces(); - - trace = _ltt_trace_find_setup(trace_name); - if (!trace) { - printk(KERN_ERR "LTT : Trace not found %s\n", trace_name); - err = -ENOENT; - goto traces_error; - } - - list_for_each_entry(tran_iter, <t_transport_list, node) { - if (!strcmp(tran_iter->name, trace_type)) { - transport = tran_iter; - break; - } - } - if (!transport) { - printk(KERN_ERR "LTT : Transport %s is not present.\n", - trace_type); - err = -EINVAL; - goto traces_error; - } - - trace->transport = transport; - -traces_error: - ltt_unlock_traces(); - return err; -} -EXPORT_SYMBOL_GPL(ltt_trace_set_type); - -int ltt_trace_set_channel_subbufsize(const char *trace_name, - const char *channel_name, - unsigned int size) -{ - int err = 0; - struct ltt_trace *trace; - int index; - - ltt_lock_traces(); - - trace = _ltt_trace_find_setup(trace_name); - if (!trace) { - printk(KERN_ERR "LTT : Trace not found %s\n", trace_name); - err = -ENOENT; - goto traces_error; - } - - index = ltt_channels_get_index_from_name(channel_name); - if (index < 0) { - printk(KERN_ERR "LTT : Channel %s not found\n", channel_name); - err = -ENOENT; - goto traces_error; - } - trace->settings[index].sb_size = size; - -traces_error: - ltt_unlock_traces(); - return err; -} -EXPORT_SYMBOL_GPL(ltt_trace_set_channel_subbufsize); - -int ltt_trace_set_channel_subbufcount(const char *trace_name, - const char *channel_name, - unsigned int cnt) -{ - int err = 0; - struct ltt_trace *trace; - int index; - - ltt_lock_traces(); - - trace = _ltt_trace_find_setup(trace_name); - if (!trace) { - printk(KERN_ERR "LTT : Trace not found %s\n", trace_name); - err = -ENOENT; - goto traces_error; - } - - index = ltt_channels_get_index_from_name(channel_name); - if (index < 0) { - printk(KERN_ERR "LTT : Channel %s not found\n", channel_name); - err = -ENOENT; - goto traces_error; - } - trace->settings[index].n_sb = cnt; - -traces_error: - ltt_unlock_traces(); - return err; -} -EXPORT_SYMBOL_GPL(ltt_trace_set_channel_subbufcount); - -int ltt_trace_set_channel_switch_timer(const char *trace_name, - const char *channel_name, - unsigned long interval) -{ - int err = 0; - struct ltt_trace *trace; - int index; - - ltt_lock_traces(); - - trace = _ltt_trace_find_setup(trace_name); - if (!trace) { - printk(KERN_ERR "LTT : Trace not found %s\n", trace_name); - err = -ENOENT; - goto traces_error; - } - - index = ltt_channels_get_index_from_name(channel_name); - if (index < 0) { - printk(KERN_ERR "LTT : Channel %s not found\n", channel_name); - err = -ENOENT; - goto traces_error; - } - ltt_channels_trace_set_timer(&trace->settings[index], interval); - -traces_error: - ltt_unlock_traces(); - return err; -} -EXPORT_SYMBOL_GPL(ltt_trace_set_channel_switch_timer); - -int ltt_trace_set_channel_overwrite(const char *trace_name, - const char *channel_name, - unsigned int overwrite) -{ - int err = 0; - struct ltt_trace *trace; - int index; - - ltt_lock_traces(); - - trace = _ltt_trace_find_setup(trace_name); - if (!trace) { - printk(KERN_ERR "LTT : Trace not found %s\n", trace_name); - err = -ENOENT; - goto traces_error; - } - - /* - * Always put the metadata channel in non-overwrite mode : - * This is a very low traffic channel and it can't afford to have its - * data overwritten : this data (marker info) is necessary to be - * able to read the trace. - */ - if (overwrite && !strcmp(channel_name, "metadata")) { - printk(KERN_ERR "LTT : Trying to set metadata channel to " - "overwrite mode\n"); - err = -EINVAL; - goto traces_error; - } - - index = ltt_channels_get_index_from_name(channel_name); - if (index < 0) { - printk(KERN_ERR "LTT : Channel %s not found\n", channel_name); - err = -ENOENT; - goto traces_error; - } - - trace->settings[index].overwrite = overwrite; - -traces_error: - ltt_unlock_traces(); - return err; -} -EXPORT_SYMBOL_GPL(ltt_trace_set_channel_overwrite); - -int ltt_trace_alloc(const char *trace_name) -{ - int err = 0; - struct ltt_trace *trace; - int sb_size, n_sb; - unsigned long flags; - int chan; - const char *channel_name; - - ltt_lock_traces(); - - trace = _ltt_trace_find_setup(trace_name); - if (!trace) { - printk(KERN_ERR "LTT : Trace not found %s\n", trace_name); - err = -ENOENT; - goto traces_error; - } - - kref_init(&trace->kref); - init_waitqueue_head(&trace->kref_wq); - trace->active = 0; - get_trace_clock(); - trace->freq_scale = trace_clock_freq_scale(); - - if (!trace->transport) { - printk(KERN_ERR "LTT : Transport is not set.\n"); - err = -EINVAL; - goto transport_error; - } - if (!try_module_get(trace->transport->owner)) { - printk(KERN_ERR "LTT : Can't lock transport module.\n"); - err = -ENODEV; - goto transport_error; - } - trace->ops = &trace->transport->ops; - - err = trace->ops->create_dirs(trace); - if (err) { - printk(KERN_ERR "LTT : Can't create dir for trace %s.\n", - trace_name); - goto dirs_error; - } - - local_irq_save(flags); - trace->start_freq = trace_clock_frequency(); - trace->start_tsc = trace_clock_read64(); - do_gettimeofday(&trace->start_time); - local_irq_restore(flags); - - for (chan = 0; chan < trace->nr_channels; chan++) { - channel_name = ltt_channels_get_name_from_index(chan); - WARN_ON(!channel_name); - /* - * note: sb_size and n_sb will be overwritten with updated - * values by channel creation. - */ - sb_size = trace->settings[chan].sb_size; - n_sb = trace->settings[chan].n_sb; - prepare_chan_size_num(&sb_size, &n_sb); - trace->channels[chan] = ltt_create_channel(channel_name, - trace, NULL, sb_size, n_sb, - trace->settings[chan].overwrite, - trace->settings[chan].switch_timer_interval, - trace->settings[chan].read_timer_interval); - if (err != 0) { - printk(KERN_ERR "LTT : Can't create channel %s.\n", - channel_name); - goto create_channel_error; - } - } - - list_del(&trace->list); - if (list_empty(<t_traces.head)) - set_kernel_trace_flag_all_tasks(); - list_add_rcu(&trace->list, <t_traces.head); - synchronize_trace(); - - ltt_unlock_traces(); - - return 0; - -create_channel_error: - for (chan--; chan >= 0; chan--) - ltt_channel_destroy(trace->channels[chan]); - trace->ops->remove_dirs(trace); - -dirs_error: - module_put(trace->transport->owner); -transport_error: - put_trace_clock(); -traces_error: - ltt_unlock_traces(); - return err; -} -EXPORT_SYMBOL_GPL(ltt_trace_alloc); - -/* - * It is worked as a wrapper for current version of ltt_control.ko. - * We will make a new ltt_control based on debugfs, and control each channel's - * buffer. - */ -static -int ltt_trace_create(const char *trace_name, const char *trace_type, - enum trace_mode mode, - unsigned int subbuf_size_low, unsigned int n_subbufs_low, - unsigned int subbuf_size_med, unsigned int n_subbufs_med, - unsigned int subbuf_size_high, unsigned int n_subbufs_high) -{ - int err = 0; - - err = ltt_trace_setup(trace_name); - if (IS_ERR_VALUE(err)) - return err; - - err = ltt_trace_set_type(trace_name, trace_type); - if (IS_ERR_VALUE(err)) - return err; - - err = ltt_trace_alloc(trace_name); - if (IS_ERR_VALUE(err)) - return err; - - return err; -} - -/* Must be called while sure that trace is in the list. */ -static int _ltt_trace_destroy(struct ltt_trace *trace) -{ - int err = -EPERM; - - if (trace == NULL) { - err = -ENOENT; - goto traces_error; - } - if (trace->active) { - printk(KERN_ERR - "LTT : Can't destroy trace %s : tracer is active\n", - trace->trace_name); - err = -EBUSY; - goto active_error; - } - /* Everything went fine */ - list_del_rcu(&trace->list); - synchronize_trace(); - if (list_empty(<t_traces.head)) { - clear_kernel_trace_flag_all_tasks(); - } - return 0; - - /* error handling */ -active_error: -traces_error: - return err; -} - -/* Sleepable part of the destroy */ -static void __ltt_trace_destroy(struct ltt_trace *trace) -{ - int i; - - for (i = 0; i < trace->nr_channels; i++) - ltt_channel_destroy(trace->channels[i]); - kref_put(&trace->kref, ltt_release_trace); -} - -int ltt_trace_destroy(const char *trace_name) -{ - int err = 0; - struct ltt_trace *trace; - - ltt_lock_traces(); - - trace = _ltt_trace_find(trace_name); - if (trace) { - err = _ltt_trace_destroy(trace); - if (err) - goto error; - - __ltt_trace_destroy(trace); - ltt_unlock_traces(); - put_trace_clock(); - - return 0; - } - - trace = _ltt_trace_find_setup(trace_name); - if (trace) { - _ltt_trace_free(trace); - ltt_unlock_traces(); - return 0; - } - - err = -ENOENT; - - /* Error handling */ -error: - ltt_unlock_traces(); - return err; -} -EXPORT_SYMBOL_GPL(ltt_trace_destroy); - -/* must be called from within a traces lock. */ -static int _ltt_trace_start(struct ltt_trace *trace) -{ - int err = 0; - - if (trace == NULL) { - err = -ENOENT; - goto traces_error; - } - if (trace->active) - printk(KERN_INFO "LTT : Tracing already active for trace %s\n", - trace->trace_name); - if (!try_module_get(ltt_run_filter_owner)) { - err = -ENODEV; - printk(KERN_ERR "LTT : Can't lock filter module.\n"); - goto get_ltt_run_filter_error; - } - trace->active = 1; - /* Read by trace points without protection : be careful */ - ltt_traces.num_active_traces++; - return err; - - /* error handling */ -get_ltt_run_filter_error: -traces_error: - return err; -} - -int ltt_trace_start(const char *trace_name) -{ - int err = 0; - struct ltt_trace *trace; - - ltt_lock_traces(); - - trace = _ltt_trace_find(trace_name); - err = _ltt_trace_start(trace); - if (err) - goto no_trace; - - ltt_unlock_traces(); - - /* - * Call the kernel state dump. - * Events will be mixed with real kernel events, it's ok. - * Notice that there is no protection on the trace : that's exactly - * why we iterate on the list and check for trace equality instead of - * directly using this trace handle inside the logging function. - */ - - ltt_dump_marker_state(trace); - - if (!try_module_get(ltt_statedump_owner)) { - err = -ENODEV; - printk(KERN_ERR - "LTT : Can't lock state dump module.\n"); - } else { - ltt_statedump_functor(trace); - module_put(ltt_statedump_owner); - } - - return err; - - /* Error handling */ -no_trace: - ltt_unlock_traces(); - return err; -} -EXPORT_SYMBOL_GPL(ltt_trace_start); - -/* must be called from within traces lock */ -static int _ltt_trace_stop(struct ltt_trace *trace) -{ - int err = -EPERM; - - if (trace == NULL) { - err = -ENOENT; - goto traces_error; - } - if (!trace->active) - printk(KERN_INFO "LTT : Tracing not active for trace %s\n", - trace->trace_name); - if (trace->active) { - trace->active = 0; - ltt_traces.num_active_traces--; - synchronize_trace(); /* Wait for each tracing to be finished */ - } - module_put(ltt_run_filter_owner); - /* Everything went fine */ - return 0; - - /* Error handling */ -traces_error: - return err; -} - -int ltt_trace_stop(const char *trace_name) -{ - int err = 0; - struct ltt_trace *trace; - - ltt_lock_traces(); - trace = _ltt_trace_find(trace_name); - err = _ltt_trace_stop(trace); - ltt_unlock_traces(); - return err; -} -EXPORT_SYMBOL_GPL(ltt_trace_stop); - -/** - * ltt_control - Trace control in-kernel API - * @msg: Action to perform - * @trace_name: Trace on which the action must be done - * @trace_type: Type of trace (normal, flight, hybrid) - * @args: Arguments specific to the action - */ -int ltt_control(enum ltt_control_msg msg, const char *trace_name, - const char *trace_type, union ltt_control_args args) -{ - int err = -EPERM; - - printk(KERN_ALERT "ltt_control : trace %s\n", trace_name); - switch (msg) { - case LTT_CONTROL_START: - printk(KERN_DEBUG "Start tracing %s\n", trace_name); - err = ltt_trace_start(trace_name); - break; - case LTT_CONTROL_STOP: - printk(KERN_DEBUG "Stop tracing %s\n", trace_name); - err = ltt_trace_stop(trace_name); - break; - case LTT_CONTROL_CREATE_TRACE: - printk(KERN_DEBUG "Creating trace %s\n", trace_name); - err = ltt_trace_create(trace_name, trace_type, - args.new_trace.mode, - args.new_trace.subbuf_size_low, - args.new_trace.n_subbufs_low, - args.new_trace.subbuf_size_med, - args.new_trace.n_subbufs_med, - args.new_trace.subbuf_size_high, - args.new_trace.n_subbufs_high); - break; - case LTT_CONTROL_DESTROY_TRACE: - printk(KERN_DEBUG "Destroying trace %s\n", trace_name); - err = ltt_trace_destroy(trace_name); - break; - } - return err; -} -EXPORT_SYMBOL_GPL(ltt_control); - -/** - * ltt_filter_control - Trace filter control in-kernel API - * @msg: Action to perform on the filter - * @trace_name: Trace on which the action must be done - */ -int ltt_filter_control(enum ltt_filter_control_msg msg, const char *trace_name) -{ - int err; - struct ltt_trace *trace; - - printk(KERN_DEBUG "ltt_filter_control : trace %s\n", trace_name); - ltt_lock_traces(); - trace = _ltt_trace_find(trace_name); - if (trace == NULL) { - printk(KERN_ALERT - "Trace does not exist. Cannot proxy control request\n"); - err = -ENOENT; - goto trace_error; - } - if (!try_module_get(ltt_filter_control_owner)) { - err = -ENODEV; - goto get_module_error; - } - switch (msg) { - case LTT_FILTER_DEFAULT_ACCEPT: - printk(KERN_DEBUG - "Proxy filter default accept %s\n", trace_name); - err = (*ltt_filter_control_functor)(msg, trace); - break; - case LTT_FILTER_DEFAULT_REJECT: - printk(KERN_DEBUG - "Proxy filter default reject %s\n", trace_name); - err = (*ltt_filter_control_functor)(msg, trace); - break; - default: - err = -EPERM; - } - module_put(ltt_filter_control_owner); - -get_module_error: -trace_error: - ltt_unlock_traces(); - return err; -} -EXPORT_SYMBOL_GPL(ltt_filter_control); - -int __init ltt_init(void) -{ - /* Make sure no page fault can be triggered by this module */ - vmalloc_sync_all(); - init_timer_deferrable(<t_async_wakeup_timer); - return 0; -} - -module_init(ltt_init) - -static void __exit ltt_exit(void) -{ - struct ltt_trace *trace; - struct list_head *pos, *n; - - ltt_lock_traces(); - /* Stop each trace, currently being read by RCU read-side */ - list_for_each_entry_rcu(trace, <t_traces.head, list) - _ltt_trace_stop(trace); - /* Wait for quiescent state. Readers have preemption disabled. */ - synchronize_trace(); - /* Safe iteration is now permitted. It does not have to be RCU-safe - * because no readers are left. */ - list_for_each_safe(pos, n, <t_traces.head) { - trace = container_of(pos, struct ltt_trace, list); - /* _ltt_trace_destroy does a synchronize_trace() */ - _ltt_trace_destroy(trace); - __ltt_trace_destroy(trace); - } - /* free traces in pre-alloc status */ - list_for_each_safe(pos, n, <t_traces.setup_head) { - trace = container_of(pos, struct ltt_trace, list); - _ltt_trace_free(trace); - } - - ltt_unlock_traces(); -} - -module_exit(ltt_exit) - -MODULE_LICENSE("GPL and additional rights"); -MODULE_AUTHOR("Mathieu Desnoyers"); -MODULE_DESCRIPTION("Linux Trace Toolkit Next Generation Tracer Kernel API"); diff --git a/discard/ltt-type-serializer.c b/discard/ltt-type-serializer.c deleted file mode 100644 index ed589c73..00000000 --- a/discard/ltt-type-serializer.c +++ /dev/null @@ -1,94 +0,0 @@ -/** - * ltt-type-serializer.c - * - * LTTng specialized type serializer. - * - * Copyright Mathieu Desnoyers, 2008. - * - * Dual LGPL v2.1/GPL v2 license. - */ -#include - -#include "ltt-type-serializer.h" -#include "ltt-relay-lockless.h" - -notrace -void _ltt_specialized_trace(void *probe_data, - void *serialize_private, unsigned int data_size, - unsigned int largest_align) -{ - struct ltt_event *event = probe_data; - int ret; - uint16_t eID; - size_t slot_size; - struct ltt_chanbuf *buf; - struct ltt_channel *chan; - struct ltt_session *session; - uint64_t tsc; - long buf_offset; - int cpu; - unsigned int rflags; - - /* disabled from tracepoints rcu_read_lock_sched_notrace(); */ - cpu = smp_processor_id(); - __get_cpu_var(ltt_nesting)++; - /* - * asm volatile and "memory" clobber prevent the compiler from moving - * instructions out of the ltt nesting count. This is required to ensure - * that probe side-effects which can cause recursion (e.g. unforeseen - * traps, divisions by 0, ...) are triggered within the incremented - * nesting count section. - */ - barrier(); - eID = event->id; - chan = event->chan; - session = chan->session; - - if (unlikely(!session->active)) - goto skip; - if (unlikely(!ltt_run_filter(session, chan, event))) - goto skip; -#ifdef LTT_DEBUG_EVENT_SIZE - rflags = LTT_RFLAG_ID_SIZE; -#else - if (unlikely(eID >= LTT_FREE_EVENTS)) - rflags = LTT_RFLAG_ID; - else - rflags = 0; -#endif - /* reserve space : header and data */ - ret = ltt_reserve_slot(chan, trace, data_size, largest_align, - cpu, &buf, &slot_size, &buf_offset, &tsc, - &rflags); - if (unlikely(ret < 0)) - goto skip; /* buffer full */ - - /* Out-of-order write : header and data */ - buf_offset = ltt_write_event_header(&buf->a, &chan->a, - buf_offset, eID, data_size, - tsc, rflags); - if (data_size) { - buf_offset += ltt_align(buf_offset, largest_align); - ltt_relay_write(&buf->a, &chan->a, buf_offset, - serialize_private, data_size); - buf_offset += data_size; - } - /* Out-of-order commit */ - ltt_commit_slot(buf, chan, buf_offset, data_size, slot_size); -skip: - /* - * asm volatile and "memory" clobber prevent the compiler from moving - * instructions out of the ltt nesting count. This is required to ensure - * that probe side-effects which can cause recursion (e.g. unforeseen - * traps, divisions by 0, ...) are triggered within the incremented - * nesting count section. - */ - barrier(); - __get_cpu_var(ltt_nesting)--; - /* disabled from tracepoints rcu_read_unlock_sched_notrace(); */ -} -EXPORT_SYMBOL_GPL(_ltt_specialized_trace); - -MODULE_LICENSE("GPL and additional rights"); -MODULE_AUTHOR("Mathieu Desnoyers"); -MODULE_DESCRIPTION("LTT type serializer"); diff --git a/discard/ltt-type-serializer.h b/discard/ltt-type-serializer.h deleted file mode 100644 index fb870c8f..00000000 --- a/discard/ltt-type-serializer.h +++ /dev/null @@ -1,186 +0,0 @@ -#ifndef _LTT_TYPE_SERIALIZER_H -#define _LTT_TYPE_SERIALIZER_H - -#include /* For IFNAMSIZ */ - -#include "ltt-tracer.h" - -/* - * largest_align must be non-zero, equal to the minimum between the largest type - * and sizeof(void *). - */ -extern void _ltt_specialized_trace(void *probe_data, - void *serialize_private, unsigned int data_size, - unsigned int largest_align); - -/* - * Statically check that 0 < largest_align < sizeof(void *) to make sure it is - * dumb-proof. It will make sure 0 is changed into 1 and unsigned long long is - * changed into sizeof(void *) on 32-bit architectures. - */ -static inline void ltt_specialized_trace(void *probe_data, - void *serialize_private, unsigned int data_size, - unsigned int largest_align) -{ - largest_align = min_t(unsigned int, largest_align, sizeof(void *)); - largest_align = max_t(unsigned int, largest_align, 1); - _ltt_specialized_trace(probe_data, serialize_private, data_size, - largest_align); -} - -/* - * Type serializer definitions. - */ - -/* - * Return size of structure without end-of-structure padding. - */ -#define serialize_sizeof(type) offsetof(typeof(type), end_field) - -struct serialize_long_int { - unsigned long f1; - unsigned int f2; - unsigned char end_field[0]; -} RING_BUFFER_ALIGN_ATTR; - -struct serialize_int_int_long { - unsigned int f1; - unsigned int f2; - unsigned long f3; - unsigned char end_field[0]; -} RING_BUFFER_ALIGN_ATTR; - -struct serialize_int_int_short { - unsigned int f1; - unsigned int f2; - unsigned short f3; - unsigned char end_field[0]; -} RING_BUFFER_ALIGN_ATTR; - -struct serialize_long_long_long { - unsigned long f1; - unsigned long f2; - unsigned long f3; - unsigned char end_field[0]; -} RING_BUFFER_ALIGN_ATTR; - -struct serialize_long_long_int { - unsigned long f1; - unsigned long f2; - unsigned int f3; - unsigned char end_field[0]; -} RING_BUFFER_ALIGN_ATTR; - -struct serialize_long_long_short_char { - unsigned long f1; - unsigned long f2; - unsigned short f3; - unsigned char f4; - unsigned char end_field[0]; -} RING_BUFFER_ALIGN_ATTR; - -struct serialize_long_long_short { - unsigned long f1; - unsigned long f2; - unsigned short f3; - unsigned char end_field[0]; -} RING_BUFFER_ALIGN_ATTR; - -struct serialize_long_short_char { - unsigned long f1; - unsigned short f2; - unsigned char f3; - unsigned char end_field[0]; -} RING_BUFFER_ALIGN_ATTR; - -struct serialize_long_short { - unsigned long f1; - unsigned short f2; - unsigned char end_field[0]; -} RING_BUFFER_ALIGN_ATTR; - -struct serialize_long_char { - unsigned long f1; - unsigned char f2; - unsigned char end_field[0]; -} RING_BUFFER_ALIGN_ATTR; - -struct serialize_long_ifname { - unsigned long f1; - unsigned char f2[IFNAMSIZ]; - unsigned char end_field[0]; -} RING_BUFFER_ALIGN_ATTR; - -struct serialize_sizet_int { - size_t f1; - unsigned int f2; - unsigned char end_field[0]; -} RING_BUFFER_ALIGN_ATTR; - -struct serialize_long_long_sizet_int { - unsigned long f1; - unsigned long f2; - size_t f3; - unsigned int f4; - unsigned char end_field[0]; -} RING_BUFFER_ALIGN_ATTR; - -struct serialize_long_long_sizet_int_int { - unsigned long f1; - unsigned long f2; - size_t f3; - unsigned int f4; - unsigned int f5; - unsigned char end_field[0]; -} RING_BUFFER_ALIGN_ATTR; - -struct serialize_l4421224411111 { - unsigned long f1; - uint32_t f2; - uint32_t f3; - uint16_t f4; - uint8_t f5; - uint16_t f6; - uint16_t f7; - uint32_t f8; - uint32_t f9; - uint8_t f10; - uint8_t f11; - uint8_t f12; - uint8_t f13; - uint8_t f14; - unsigned char end_field[0]; -} RING_BUFFER_ALIGN_ATTR; - -struct serialize_l214421224411111 { - unsigned long f1; - uint16_t f2; - uint8_t f3; - uint32_t f4; - uint32_t f5; - uint16_t f6; - uint8_t f7; - uint16_t f8; - uint16_t f9; - uint32_t f10; - uint32_t f11; - uint8_t f12; - uint8_t f13; - uint8_t f14; - uint8_t f15; - uint8_t f16; - uint8_t end_field[0]; -} RING_BUFFER_ALIGN_ATTR; - -struct serialize_l4412228 { - unsigned long f1; - uint32_t f2; - uint32_t f3; - uint8_t f4; - uint16_t f5; - uint16_t f6; - uint16_t f7; - uint64_t f8; - unsigned char end_field[0]; -} RING_BUFFER_ALIGN_ATTR; -#endif /* _LTT_TYPE_SERIALIZER_H */ diff --git a/discard/ltt-userspace-event.c b/discard/ltt-userspace-event.c deleted file mode 100644 index c716d724..00000000 --- a/discard/ltt-userspace-event.c +++ /dev/null @@ -1,122 +0,0 @@ -/* - * Copyright (C) 2008 Mathieu Desnoyers - * - * Dual LGPL v2.1/GPL v2 license. - */ - -#include -#include -#include -#include -#include -#include -#include - -#include "ltt-type-serializer.h" - -#define LTT_WRITE_EVENT_FILE "write_event" - -DEFINE_MARKER(userspace, event, "string %s"); -static struct dentry *ltt_event_file; - -/** - * write_event - write a userspace string into the trace system - * @file: file pointer - * @user_buf: user string - * @count: length to copy, including the final NULL - * @ppos: unused - * - * Copy a string into a trace event, in channel "userspace", event "event". - * Copies until either \n or \0 is reached. - * On success, returns the number of bytes copied from the source, including the - * \n or \0 character (if there was one in the count range). It cannot return - * more than count. - * Inspired from tracing_mark_write implementation from Steven Rostedt and - * Ingo Molnar. - */ -static -ssize_t write_event(struct file *file, const char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct marker *marker; - char *buf, *end; - long copycount; - ssize_t ret; - - buf = kmalloc(count + 1, GFP_KERNEL); - if (!buf) { - ret = -ENOMEM; - goto string_out; - } - copycount = strncpy_from_user(buf, user_buf, count); - if (copycount < 0) { - ret = -EFAULT; - goto string_err; - } - /* Cut from the first nil or newline. */ - buf[copycount] = '\0'; - end = strchr(buf, '\n'); - if (end) { - *end = '\0'; - copycount = end - buf; - } - /* Add final \0 to copycount */ - copycount++; - marker = &GET_MARKER(userspace, event); - ltt_specialized_trace(marker, marker->single.probe_private, buf, - copycount, sizeof(char)); - /* If there is no \0 nor \n in count, do not return a larger value */ - ret = min_t(size_t, copycount, count); -string_err: - kfree(buf); -string_out: - return ret; -} - -static const struct file_operations ltt_userspace_operations = { - .write = write_event, -}; - -static int __init ltt_userspace_init(void) -{ - struct dentry *ltt_root_dentry; - int err = 0; - - ltt_root_dentry = get_ltt_root(); - if (!ltt_root_dentry) { - err = -ENOENT; - goto err_no_root; - } - - ltt_event_file = debugfs_create_file(LTT_WRITE_EVENT_FILE, - S_IWUGO, - ltt_root_dentry, - NULL, - <t_userspace_operations); - if (IS_ERR(ltt_event_file) || !ltt_event_file) { - printk(KERN_ERR - "ltt_userspace_init: failed to create file %s\n", - LTT_WRITE_EVENT_FILE); - err = -EPERM; - goto err_no_file; - } - - return err; -err_no_file: - put_ltt_root(); -err_no_root: - return err; -} - -static void __exit ltt_userspace_exit(void) -{ - debugfs_remove(ltt_event_file); - put_ltt_root(); -} - -module_init(ltt_userspace_init); -module_exit(ltt_userspace_exit); - -MODULE_LICENSE("GPL and additional rights"); -MODULE_AUTHOR("Mathieu Desnoyers "); -MODULE_DESCRIPTION("Linux Trace Toolkit Userspace Event"); diff --git a/discard/probes/Makefile b/discard/probes/Makefile deleted file mode 100644 index d8f1c403..00000000 --- a/discard/probes/Makefile +++ /dev/null @@ -1,47 +0,0 @@ -# LTTng tracing probes - -ifdef CONFIG_FTRACE -CFLAGS_REMOVE_kernel-trace.o = -pg -CFLAGS_REMOVE_mm-trace.o = -pg -CFLAGS_REMOVE_fs-trace.o = -pg -CFLAGS_REMOVE_ipc-trace.o = -pg -CFLAGS_REMOVE_lockdep-trace.o = -pg -CFLAGS_REMOVE_rcu-trace.o = -pg -CFLAGS_REMOVE_syscall-trace.o = -pg -CFLAGS_REMOVE_trap-trace.o = -pg -CFLAGS_REMOVE_pm-trace.o = -pg -endif - -obj-m += kernel-trace.o mm-trace.o fs-trace.o ipc-trace.o lockdep-trace.o \ - rcu-trace.o syscall-trace.o trap-trace.o pm-trace.o - -ifeq ($(CONFIG_NET),y) -ifdef CONFIG_FTRACE -CFLAGS_REMOVE_net-trace.o = -pg -CFLAGS_REMOVE_net-extended-trace.o = -pg -endif -obj-m += net-trace.o net-extended-trace.o -endif - -ifdef CONFIG_JBD2 -ifdef CONFIG_FTRACE -CFLAGS_REMOVE_jbd2-trace.o = -pg -endif -obj-m += jbd2-trace.o -endif - -#ifdef CONFIG_EXT4_FS -#ifdef CONFIG_FTRACE -#CFLAGS_REMOVE_ext4-trace.o = -pg -#endif -#obj-$(CONFIG_LTT_TRACEPROBES) += ext4-trace.o -#endif - -ifdef CONFIG_BLOCK -ifdef CONFIG_FTRACE -CFLAGS_REMOVE_block-trace.o = -pg -endif -obj-m += block-trace.o -endif - - diff --git a/discard/probes/block-trace.c b/discard/probes/block-trace.c deleted file mode 100644 index 51ae2cdd..00000000 --- a/discard/probes/block-trace.c +++ /dev/null @@ -1,309 +0,0 @@ -/* - * ltt/probes/block-trace.c - * - * block layer tracepoint probes. - * - * (C) Copyright 2009 - Mathieu Desnoyers - * Dual LGPL v2.1/GPL v2 license. - */ - -#include - -#include - -/* - * Add rq cmd as a sequence. Needs new type. (size + binary blob) - */ - -void probe_block_rq_abort(void *data, struct request_queue *q, struct request *rq) -{ - int rw = rq->cmd_flags & 0x03; - - if (blk_discard_rq(rq)) - rw |= (1 << BIO_RW_DISCARD); - - if (blk_pc_request(rq)) { - trace_mark_tp(block, rq_abort_pc, block_rq_abort, - probe_block_rq_abort, - "data_len %u rw %d errors %d", - blk_rq_bytes(rq), rw, rq->errors); - } else { - /* - * FIXME Using a simple trace_mark for the second event - * possibility because tracepoints do not support multiple - * connections to the same probe yet. They should have some - * refcounting. Need to enable both rq_abort_pc and rq_abort_fs - * markers to have the rq_abort_fs marker enabled. - */ - trace_mark(block, rq_abort_fs, - "hard_sector %llu " - "rw %d errors %d", (unsigned long long)blk_rq_pos(rq), - rw, rq->errors); - } -} - -void probe_block_rq_insert(void *data, struct request_queue *q, struct request *rq) -{ - int rw = rq->cmd_flags & 0x03; - - if (blk_discard_rq(rq)) - rw |= (1 << BIO_RW_DISCARD); - - if (blk_pc_request(rq)) { - trace_mark_tp(block, rq_insert_pc, block_rq_insert, - probe_block_rq_insert, - "data_len %u rw %d errors %d", - blk_rq_bytes(rq), rw, rq->errors); - } else { - /* - * FIXME Using a simple trace_mark for the second event - * possibility because tracepoints do not support multiple - * connections to the same probe yet. They should have some - * refcounting. Need to enable both rq_insert_pc and - * rq_insert_fs markers to have the rq_insert_fs marker enabled. - */ - trace_mark(block, rq_insert_fs, - "hard_sector %llu " - "rw %d errors %d", (unsigned long long)blk_rq_pos(rq), - rw, rq->errors); - } -} - -void probe_block_rq_issue(void *data, struct request_queue *q, struct request *rq) -{ - int rw = rq->cmd_flags & 0x03; - - if (blk_discard_rq(rq)) - rw |= (1 << BIO_RW_DISCARD); - - if (blk_pc_request(rq)) { - trace_mark_tp(block, rq_issue_pc, block_rq_issue, - probe_block_rq_issue, - "data_len %u rw %d errors %d", - blk_rq_bytes(rq), rw, rq->errors); - } else { - /* - * FIXME Using a simple trace_mark for the second event - * possibility because tracepoints do not support multiple - * connections to the same probe yet. They should have some - * refcounting. Need to enable both rq_issue_pc and rq_issue_fs - * markers to have the rq_issue_fs marker enabled. - */ - trace_mark(block, rq_issue_fs, - "hard_sector %llu " - "rw %d errors %d", (unsigned long long)blk_rq_pos(rq), - rw, rq->errors); - } -} - -void probe_block_rq_requeue(void *data, struct request_queue *q, struct request *rq) -{ - int rw = rq->cmd_flags & 0x03; - - if (blk_discard_rq(rq)) - rw |= (1 << BIO_RW_DISCARD); - - if (blk_pc_request(rq)) { - trace_mark_tp(block, rq_requeue_pc, block_rq_requeue, - probe_block_rq_requeue, - "data_len %u rw %d errors %d", - blk_rq_bytes(rq), rw, rq->errors); - } else { - /* - * FIXME Using a simple trace_mark for the second event - * possibility because tracepoints do not support multiple - * connections to the same probe yet. They should have some - * refcounting. Need to enable both rq_requeue_pc and - * rq_requeue_fs markers to have the rq_requeue_fs marker - * enabled. - */ - trace_mark(block, rq_requeue_fs, - "hard_sector %llu " - "rw %d errors %d", (unsigned long long)blk_rq_pos(rq), - rw, rq->errors); - } -} - -void probe_block_rq_complete(void *data, struct request_queue *q, struct request *rq) -{ - int rw = rq->cmd_flags & 0x03; - - if (blk_discard_rq(rq)) - rw |= (1 << BIO_RW_DISCARD); - - if (blk_pc_request(rq)) { - trace_mark_tp(block, rq_complete_pc, block_rq_complete, - probe_block_rq_complete, - "data_len %u rw %d errors %d", - blk_rq_bytes(rq), rw, rq->errors); - } else { - /* - * FIXME Using a simple trace_mark for the second event - * possibility because tracepoints do not support multiple - * connections to the same probe yet. They should have some - * refcounting. Need to enable both rq_complete_pc and - * rq_complete_fs markers to have the rq_complete_fs marker - * enabled. - */ - trace_mark(block, rq_complete_fs, - "hard_sector %llu " - "rw %d errors %d", (unsigned long long)blk_rq_pos(rq), - rw, rq->errors); - } -} - -void probe_block_bio_bounce(void *data, struct request_queue *q, struct bio *bio) -{ - trace_mark_tp(block, bio_bounce, block_bio_bounce, - probe_block_bio_bounce, - "sector %llu size %u rw(FAILFAST_DRIVER,FAILFAST_TRANSPORT," - "FAILFAST_DEV,DISCARD,META,SYNC,BARRIER,AHEAD,RW) %lX " - "not_uptodate #1u%d", - (unsigned long long)bio->bi_sector, bio->bi_size, - bio->bi_rw, !bio_flagged(bio, BIO_UPTODATE)); -} - -void probe_block_bio_complete(void *data, struct request_queue *q, struct bio *bio) -{ - trace_mark_tp(block, bio_complete, block_bio_complete, - probe_block_bio_complete, - "sector %llu size %u rw(FAILFAST_DRIVER,FAILFAST_TRANSPORT," - "FAILFAST_DEV,DISCARD,META,SYNC,BARRIER,AHEAD,RW) %lX " - "not_uptodate #1u%d", - (unsigned long long)bio->bi_sector, bio->bi_size, - bio->bi_rw, !bio_flagged(bio, BIO_UPTODATE)); -} - -void probe_block_bio_backmerge(void *data, struct request_queue *q, struct bio *bio) -{ - trace_mark_tp(block, bio_backmerge, block_bio_backmerge, - probe_block_bio_backmerge, - "sector %llu size %u rw(FAILFAST_DRIVER,FAILFAST_TRANSPORT," - "FAILFAST_DEV,DISCARD,META,SYNC,BARRIER,AHEAD,RW) %lX " - "not_uptodate #1u%d", - (unsigned long long)bio->bi_sector, bio->bi_size, - bio->bi_rw, !bio_flagged(bio, BIO_UPTODATE)); -} - -void probe_block_bio_frontmerge(void *data, struct request_queue *q, struct bio *bio) -{ - trace_mark_tp(block, bio_frontmerge, block_bio_frontmerge, - probe_block_bio_frontmerge, - "sector %llu size %u rw(FAILFAST_DRIVER,FAILFAST_TRANSPORT," - "FAILFAST_DEV,DISCARD,META,SYNC,BARRIER,AHEAD,RW) %lX " - "not_uptodate #1u%d", - (unsigned long long)bio->bi_sector, bio->bi_size, - bio->bi_rw, !bio_flagged(bio, BIO_UPTODATE)); -} - -void probe_block_bio_queue(void *data, struct request_queue *q, struct bio *bio) -{ - trace_mark_tp(block, bio_queue, block_bio_queue, - probe_block_bio_queue, - "sector %llu size %u rw(FAILFAST_DRIVER,FAILFAST_TRANSPORT," - "FAILFAST_DEV,DISCARD,META,SYNC,BARRIER,AHEAD,RW) %lX " - "not_uptodate #1u%d", - (unsigned long long)bio->bi_sector, bio->bi_size, - bio->bi_rw, !bio_flagged(bio, BIO_UPTODATE)); -} - -void probe_block_getrq(void *data, struct request_queue *q, struct bio *bio, int rw) -{ - if (bio) { - trace_mark_tp(block, getrq_bio, block_getrq, - probe_block_getrq, - "sector %llu size %u " - "rw(FAILFAST_DRIVER,FAILFAST_TRANSPORT," - "FAILFAST_DEV,DISCARD,META,SYNC,BARRIER,AHEAD,RW) %lX " - "not_uptodate #1u%d", - (unsigned long long)bio->bi_sector, bio->bi_size, - bio->bi_rw, !bio_flagged(bio, BIO_UPTODATE)); - } else { - /* - * FIXME Using a simple trace_mark for the second event - * possibility because tracepoints do not support multiple - * connections to the same probe yet. They should have some - * refcounting. Need to enable both getrq_bio and getrq markers - * to have the getrq marker enabled. - */ - trace_mark(block, getrq, "rw %d", rw); - } -} - -void probe_block_sleeprq(void *data, struct request_queue *q, struct bio *bio, int rw) -{ - if (bio) { - trace_mark_tp(block, sleeprq_bio, block_sleeprq, - probe_block_sleeprq, - "sector %llu size %u " - "rw(FAILFAST_DRIVER,FAILFAST_TRANSPORT," - "FAILFAST_DEV,DISCARD,META,SYNC,BARRIER,AHEAD,RW) %lX " - "not_uptodate #1u%d", - (unsigned long long)bio->bi_sector, bio->bi_size, - bio->bi_rw, !bio_flagged(bio, BIO_UPTODATE)); - } else { - /* - * FIXME Using a simple trace_mark for the second event - * possibility because tracepoints do not support multiple - * connections to the same probe yet. They should have some - * refcounting. Need to enable both sleeprq_bio and sleeprq - * markers to have the sleeprq marker enabled. - */ - trace_mark(block, sleeprq, "rw %d", rw); - } -} - -void probe_block_plug(void *data, struct request_queue *q) -{ - trace_mark_tp(block, plug, block_plug, probe_block_plug, - MARK_NOARGS); -} - -void probe_block_unplug_io(void *data, struct request_queue *q) -{ - unsigned int pdu = q->rq.count[READ] + q->rq.count[WRITE]; - - trace_mark_tp(block, unplug_io, block_unplug_io, probe_block_unplug_io, - "pdu %u", pdu); -} - -void probe_block_unplug_timer(void *data, struct request_queue *q) -{ - unsigned int pdu = q->rq.count[READ] + q->rq.count[WRITE]; - - trace_mark_tp(block, unplug_timer, block_unplug_timer, - probe_block_unplug_timer, - "pdu %u", pdu); -} - -void probe_block_split(void *data, struct request_queue *q, struct bio *bio, - unsigned int pdu) -{ - trace_mark_tp(block, split, block_split, - probe_block_split, - "sector %llu size %u rw(FAILFAST_DRIVER,FAILFAST_TRANSPORT," - "FAILFAST_DEV,DISCARD,META,SYNC,BARRIER,AHEAD,RW) %lX " - "not_uptodate #1u%d pdu %u", - (unsigned long long)bio->bi_sector, bio->bi_size, - bio->bi_rw, !bio_flagged(bio, BIO_UPTODATE), pdu); -} - -void probe_block_remap(void *data, struct request_queue *q, struct bio *bio, - dev_t dev, sector_t from) -{ - trace_mark_tp(block, remap, block_remap, - probe_block_remap, - "device_from %lu sector_from %llu device_to %lu " - "size %u rw(FAILFAST_DRIVER,FAILFAST_TRANSPORT," - "FAILFAST_DEV,DISCARD,META,SYNC,BARRIER,AHEAD,RW) %lX " - "not_uptodate #1u%d", - (unsigned long)bio->bi_bdev->bd_dev, - (unsigned long long)from, - (unsigned long)dev, - bio->bi_size, bio->bi_rw, - !bio_flagged(bio, BIO_UPTODATE)); -} - -MODULE_LICENSE("GPL and additional rights"); -MODULE_AUTHOR("Mathieu Desnoyers"); -MODULE_DESCRIPTION("Block Tracepoint Probes"); diff --git a/discard/probes/ext4-trace.c b/discard/probes/ext4-trace.c deleted file mode 100644 index 83683e70..00000000 --- a/discard/probes/ext4-trace.c +++ /dev/null @@ -1,611 +0,0 @@ -/* - * ltt/probes/ext4-trace.c - * - * ext4 tracepoint probes. - * - * (C) Copyright 2009 - Mathieu Desnoyers - * Dual LGPL v2.1/GPL v2 license. - */ - -#include -#include -#include -#include -#include -#include - -#include "../ltt-tracer.h" -#include "../../fs/ext4/mballoc.h" - -static struct dentry *ext4_filter_dentry, *ext4_filter_dev_dentry, - *ext4_filter_inode_dentry; -static DEFINE_MUTEX(ext4_filter_mutex); -/* Make sure we don't race between module exit and file write */ -static int module_exits; - -struct rcu_dev_filter { - struct rcu_head rcu; - char devname[NAME_MAX]; -}; - -static struct rcu_dev_filter *dev_filter; -/* ~0UL inode_filter enables all inodes */ -static unsigned long inode_filter = ~0UL; - -/* - * Probes are executed in rcu_sched read-side critical section. - */ - -static int do_dev_filter(const char *dev) -{ - struct rcu_dev_filter *ldev_filter = rcu_dereference(dev_filter); - - if (unlikely(ldev_filter)) - if (unlikely(strcmp(ldev_filter->devname, dev))) - return 0; - return 1; -} - -static int do_inode_filter(unsigned long ino) -{ - if (unlikely(inode_filter != ~0UL)) - if (unlikely(inode_filter != ino)) - return 0; - return 1; -} - -/* - * Logical AND between dev and inode filter. - */ -static int do_filter(const char *dev, unsigned long ino) -{ - if (unlikely(!do_dev_filter(dev))) - return 0; - if (unlikely(!do_inode_filter(ino))) - return 0; - return 1; -} - - -void probe_ext4_free_inode(void *data, struct inode *inode) -{ - if (unlikely(!do_filter(inode->i_sb->s_id, inode->i_ino))) - return; - trace_mark_tp(ext4, free_inode, ext4_free_inode, - probe_ext4_free_inode, - "dev %s ino %lu mode %d uid %lu gid %lu blocks %llu", - inode->i_sb->s_id, inode->i_ino, inode->i_mode, - (unsigned long) inode->i_uid, (unsigned long) inode->i_gid, - (unsigned long long) inode->i_blocks); -} - -void probe_ext4_request_inode(void *data, struct inode *dir, int mode) -{ - if (unlikely(!do_filter(dir->i_sb->s_id, dir->i_ino))) - return; - trace_mark_tp(ext4, request_inode, ext4_request_inode, - probe_ext4_request_inode, - "dev %s dir %lu mode %d", - dir->i_sb->s_id, dir->i_ino, mode); -} - -void probe_ext4_allocate_inode(void *data, struct inode *inode, struct inode *dir, int mode) -{ - if (unlikely(!do_filter(inode->i_sb->s_id, inode->i_ino) - && !do_filter(dir->i_sb->s_id, dir->i_ino))) - return; - trace_mark_tp(ext4, allocate_inode, ext4_allocate_inode, - probe_ext4_allocate_inode, - "dev %s ino %lu dir %lu mode %d", - dir->i_sb->s_id, inode->i_ino, dir->i_ino, mode); -} - -void probe_ext4_write_begin(void *data, struct inode *inode, loff_t pos, unsigned int len, - unsigned int flags) -{ - if (unlikely(!do_filter(inode->i_sb->s_id, inode->i_ino))) - return; - trace_mark_tp(ext4, write_begin, ext4_write_begin, - probe_ext4_write_begin, - "dev %s ino %lu pos %llu len %u flags %u", - inode->i_sb->s_id, inode->i_ino, - (unsigned long long) pos, len, flags); -} - -void probe_ext4_ordered_write_end(void *data, struct inode *inode, loff_t pos, - unsigned int len, unsigned int copied) -{ - if (unlikely(!do_filter(inode->i_sb->s_id, inode->i_ino))) - return; - trace_mark_tp(ext4, ordered_write_end, ext4_ordered_write_end, - probe_ext4_ordered_write_end, - "dev %s ino %lu pos %llu len %u copied %u", - inode->i_sb->s_id, inode->i_ino, - (unsigned long long) pos, len, copied); -} - -void probe_ext4_writeback_write_end(void *data, struct inode *inode, loff_t pos, - unsigned int len, unsigned int copied) -{ - if (unlikely(!do_filter(inode->i_sb->s_id, inode->i_ino))) - return; - trace_mark_tp(ext4, writeback_write_end, ext4_writeback_write_end, - probe_ext4_writeback_write_end, - "dev %s ino %lu pos %llu len %u copied %u", - inode->i_sb->s_id, inode->i_ino, - (unsigned long long) pos, len, copied); -} - -void probe_ext4_journalled_write_end(void *data, struct inode *inode, loff_t pos, - unsigned int len, unsigned int copied) -{ - if (unlikely(!do_filter(inode->i_sb->s_id, inode->i_ino))) - return; - trace_mark_tp(ext4, journalled_write_end, ext4_journalled_write_end, - probe_ext4_journalled_write_end, - "dev %s ino %lu pos %llu len %u copied %u", - inode->i_sb->s_id, inode->i_ino, - (unsigned long long) pos, len, copied); -} - -/* - * note : wbc_flags will have to be decoded by userspace. - * #1x uses a single byte in the trace. Limits to 8 bits. - */ -void probe_ext4_da_writepages(void *data, struct inode *inode, - struct writeback_control *wbc) -{ - if (unlikely(!do_filter(inode->i_sb->s_id, inode->i_ino))) - return; - trace_mark_tp(ext4, da_writepages, ext4_da_writepages, - probe_ext4_da_writepages, - "dev %s ino %lu nr_to_write %ld " - "pages_skipped %ld range_start %llu range_end %llu " - "wbc_flags(nonblocking,for_kupdate," - "for_reclaim,range_cyclic) #1x%u", - inode->i_sb->s_id, inode->i_ino, wbc->nr_to_write, - wbc->pages_skipped, - (unsigned long long) wbc->range_start, - (unsigned long long) wbc->range_end, - (wbc->nonblocking << 3) - | (wbc->for_kupdate << 2) - | (wbc->for_reclaim << 1) - | wbc->range_cyclic); -} - -/* - * note : wbc_flags will have to be decoded by userspace. - * #1x uses a single byte in the trace. Limits to 8 bits. - */ -void probe_ext4_da_writepages_result(void *data, struct inode *inode, - struct writeback_control *wbc, - int ret, int pages_written) -{ - if (unlikely(!do_filter(inode->i_sb->s_id, inode->i_ino))) - return; - trace_mark_tp(ext4, da_writepages_result, ext4_da_writepages_result, - probe_ext4_da_writepages_result, - "dev %s ino %lu ret %d pages_written %d " - "pages_skipped %ld " - "wbc_flags(encountered_congestion," - "more_io,no_nrwrite_index_update) #1x%u", - inode->i_sb->s_id, inode->i_ino, ret, pages_written, - wbc->pages_skipped, - (wbc->encountered_congestion << 2) - | (wbc->more_io << 1) - | wbc->no_nrwrite_index_update); -} - -void probe_ext4_da_write_begin(void *data, struct inode *inode, loff_t pos, - unsigned int len, unsigned int flags) -{ - if (unlikely(!do_filter(inode->i_sb->s_id, inode->i_ino))) - return; - trace_mark_tp(ext4, da_write_begin, ext4_da_write_begin, - probe_ext4_da_write_begin, - "dev %s ino %lu pos %llu len %u flags %u", - inode->i_sb->s_id, inode->i_ino, - (unsigned long long) pos, len, flags); -} - -void probe_ext4_da_write_end(void *data, struct inode *inode, loff_t pos, - unsigned int len, unsigned int copied) -{ - if (unlikely(!do_filter(inode->i_sb->s_id, inode->i_ino))) - return; - trace_mark_tp(ext4, da_write_end, ext4_da_write_end, - probe_ext4_da_write_end, - "dev %s ino %lu pos %llu len %u copied %u", - inode->i_sb->s_id, inode->i_ino, - (unsigned long long) pos, len, copied); -} - -void probe_ext4_discard_blocks(void *data, struct super_block *sb, unsigned long long blk, - unsigned long long count) -{ - if (unlikely(!do_dev_filter(sb->s_id))) - return; - trace_mark_tp(ext4, discard_blocks, ext4_discard_blocks, - probe_ext4_discard_blocks, - "dev %s blk %llu count %llu", - sb->s_id, blk, count); -} - -void probe_ext4_mb_new_inode_pa(void *data, struct ext4_allocation_context *ac, - struct ext4_prealloc_space *pa) -{ - if (unlikely(!do_filter(ac->ac_sb->s_id, ac->ac_inode->i_ino))) - return; - trace_mark_tp(ext4, mb_new_inode_pa, ext4_mb_new_inode_pa, - probe_ext4_mb_new_inode_pa, - "dev %s ino %lu pstart %llu len %u lstart %u", - ac->ac_sb->s_id, ac->ac_inode->i_ino, pa->pa_pstart, - pa->pa_len, pa->pa_lstart); -} - -void probe_ext4_mb_new_group_pa(void *data, struct ext4_allocation_context *ac, - struct ext4_prealloc_space *pa) -{ - if (unlikely(!do_dev_filter(ac->ac_sb->s_id))) - return; - trace_mark_tp(ext4, mb_new_group_pa, ext4_mb_new_group_pa, - probe_ext4_mb_new_group_pa, - "dev %s pstart %llu len %u lstart %u", - ac->ac_sb->s_id, pa->pa_pstart, - pa->pa_len, pa->pa_lstart); -} - -void probe_ext4_mb_release_inode_pa(void *data, struct ext4_allocation_context *ac, - struct ext4_prealloc_space *pa, - unsigned long long block, - unsigned int count) -{ - if (unlikely(!do_filter(ac->ac_sb->s_id, ac->ac_inode->i_ino))) - return; - trace_mark_tp(ext4, mb_release_inode_pa, ext4_mb_release_inode_pa, - probe_ext4_mb_release_inode_pa, - "dev %s ino %lu block %llu count %u", - ac->ac_sb->s_id, pa->pa_inode->i_ino, block, count); -} - -void probe_ext4_mb_release_group_pa(void *data, struct ext4_allocation_context *ac, - struct ext4_prealloc_space *pa) -{ - if (unlikely(!do_dev_filter(ac->ac_sb->s_id))) - return; - trace_mark_tp(ext4, mb_release_group_pa, ext4_mb_release_group_pa, - probe_ext4_mb_release_group_pa, - "dev %s pstart %llu len %d", - ac->ac_sb->s_id, pa->pa_pstart, pa->pa_len); -} - -void probe_ext4_discard_preallocations(void *data, struct inode *inode) -{ - if (unlikely(!do_filter(inode->i_sb->s_id, inode->i_ino))) - return; - trace_mark_tp(ext4, discard_preallocations, - ext4_discard_preallocations, - probe_ext4_discard_preallocations, - "dev %s ino %lu", - inode->i_sb->s_id, inode->i_ino); -} - -void probe_ext4_mb_discard_preallocations(void *data, struct super_block *sb, int needed) -{ - if (unlikely(!do_dev_filter(sb->s_id))) - return; - trace_mark_tp(ext4, mb_discard_preallocations, - ext4_mb_discard_preallocations, - probe_ext4_mb_discard_preallocations, - "dev %s needed %d", - sb->s_id, needed); -} - -void probe_ext4_request_blocks(void *data, struct ext4_allocation_request *ar) -{ - if (ar->inode) { - if (unlikely(!do_filter(ar->inode->i_sb->s_id, - ar->inode->i_ino))) - return; - } else { - if (unlikely(!do_dev_filter(ar->inode->i_sb->s_id))) - return; - } - trace_mark_tp(ext4, request_blocks, ext4_request_blocks, - probe_ext4_request_blocks, - "dev %s flags %u len %u ino %lu " - "lblk %llu goal %llu lleft %llu lright %llu " - "pleft %llu pright %llu", - ar->inode->i_sb->s_id, ar->flags, ar->len, - ar->inode ? ar->inode->i_ino : 0, - (unsigned long long) ar->logical, - (unsigned long long) ar->goal, - (unsigned long long) ar->lleft, - (unsigned long long) ar->lright, - (unsigned long long) ar->pleft, - (unsigned long long) ar->pright); -} - -void probe_ext4_allocate_blocks(void *data, struct ext4_allocation_request *ar, - unsigned long long block) -{ - if (ar->inode) { - if (unlikely(!do_filter(ar->inode->i_sb->s_id, - ar->inode->i_ino))) - return; - } else { - if (unlikely(!do_dev_filter(ar->inode->i_sb->s_id))) - return; - } - trace_mark_tp(ext4, allocate_blocks, ext4_allocate_blocks, - probe_ext4_allocate_blocks, - "dev %s block %llu flags %u len %u ino %lu " - "logical %llu goal %llu lleft %llu lright %llu " - "pleft %llu pright %llu", - ar->inode->i_sb->s_id, (unsigned long long) block, - ar->flags, ar->len, ar->inode ? ar->inode->i_ino : 0, - (unsigned long long) ar->logical, - (unsigned long long) ar->goal, - (unsigned long long) ar->lleft, - (unsigned long long) ar->lright, - (unsigned long long) ar->pleft, - (unsigned long long) ar->pright); -} - -void probe_ext4_free_blocks(void *data, struct inode *inode, __u64 block, - unsigned long count, int metadata) -{ - if (unlikely(!do_filter(inode->i_sb->s_id, inode->i_ino))) - return; - trace_mark_tp(ext4, free_blocks, ext4_free_blocks, - probe_ext4_free_blocks, - "dev %s block %llu count %lu metadata %d ino %lu", - inode->i_sb->s_id, (unsigned long long)block, - count, metadata, inode->i_ino); -} - -void probe_ext4_sync_file(void *data, struct file *file, struct dentry *dentry, - int datasync) -{ - if (unlikely(!do_dev_filter(dentry->d_inode->i_sb->s_id))) - return; - if (unlikely(!do_inode_filter(dentry->d_inode->i_ino) - && !do_inode_filter(dentry->d_parent->d_inode->i_ino))) - return; - trace_mark_tp(ext4, sync_file, ext4_sync_file, - probe_ext4_sync_file, - "dev %s datasync %d ino %ld parent %ld", - dentry->d_inode->i_sb->s_id, datasync, dentry->d_inode->i_ino, - dentry->d_parent->d_inode->i_ino); -} - -void probe_ext4_sync_fs(void *data, struct super_block *sb, int wait) -{ - if (unlikely(!do_dev_filter(sb->s_id))) - return; - trace_mark_tp(ext4, sync_fs, ext4_sync_fs, - probe_ext4_sync_fs, - "dev %s wait %d", - sb->s_id, wait); -} - -static void free_dev_filter(struct rcu_head *head) -{ - kfree(container_of(head, struct rcu_dev_filter, rcu)); -} - -static ssize_t dev_filter_op_write(struct file *file, - const char __user *user_buf, size_t count, loff_t *ppos) -{ - int err = 0; - char buf[NAME_MAX]; - int buf_size; - char name[NAME_MAX]; - struct rcu_dev_filter *new, *old; - - mutex_lock(&ext4_filter_mutex); - if (module_exits) { - err = -EPERM; - goto error; - } - buf_size = min(count, sizeof(buf) - 1); - err = copy_from_user(buf, user_buf, buf_size); - if (err) - goto error; - buf[buf_size] = 0; - - if (sscanf(buf, "%s", name) != 1) { - err = -EPERM; - goto error; - } - - old = dev_filter; - - /* Empty string or * means all active */ - if (name[0] == '\0' || (name[0] == '*' && name[1] == '\0')) { - new = NULL; - } else { - new = kmalloc(sizeof(*new), GFP_KERNEL); - strcpy(new->devname, name); - } - - rcu_assign_pointer(dev_filter, new); - if (old) - call_rcu_sched(&old->rcu, free_dev_filter); - - mutex_unlock(&ext4_filter_mutex); - return count; - -error: - mutex_unlock(&ext4_filter_mutex); - return err; -} - -static ssize_t dev_filter_op_read(struct file *filp, char __user *buffer, - size_t count, loff_t *ppos) -{ - ssize_t bcount; - const char *devname; - - mutex_lock(&ext4_filter_mutex); - if (!dev_filter) - devname = "*"; - else - devname = dev_filter->devname; - bcount = simple_read_from_buffer(buffer, count, ppos, - devname, strlen(devname)); - mutex_unlock(&ext4_filter_mutex); - return bcount; -} - -static struct file_operations ext4_dev_file_operations = { - .write = dev_filter_op_write, - .read = dev_filter_op_read, -}; - -static ssize_t inode_filter_op_write(struct file *file, - const char __user *user_buf, size_t count, loff_t *ppos) -{ - int err = 0; - char buf[NAME_MAX]; - int buf_size; - char name[NAME_MAX]; - unsigned long inode_num; - - mutex_lock(&ext4_filter_mutex); - if (module_exits) { - err = -EPERM; - goto error; - } - buf_size = min(count, sizeof(buf) - 1); - err = copy_from_user(buf, user_buf, buf_size); - if (err) - goto error; - buf[buf_size] = 0; - - if (sscanf(buf, "%s", name) != 1) { - err = -EPERM; - goto error; - } - - /* Empty string or * means all active */ - if (name[0] == '\0' || (name[0] == '*' && name[1] == '\0')) { - inode_filter = ~0UL; - } else { - if (sscanf(buf, "%lu", &inode_num) != 1) { - err = -EPERM; - goto error; - } - inode_filter = inode_num; - } - - mutex_unlock(&ext4_filter_mutex); - return count; - -error: - mutex_unlock(&ext4_filter_mutex); - return err; -} - -static ssize_t inode_filter_op_read(struct file *filp, char __user *buffer, - size_t count, loff_t *ppos) -{ - ssize_t bcount; - char inode_str[NAME_MAX]; - - mutex_lock(&ext4_filter_mutex); - if (inode_filter == ~0UL) - strcpy(inode_str, "*"); - else { - bcount = snprintf(inode_str, sizeof(inode_str), "%lu", - inode_filter); - if (bcount == sizeof(inode_str)) - bcount = -ENOSPC; - if (bcount < 0) - goto end; - } - bcount = simple_read_from_buffer(buffer, count, ppos, - inode_str, strlen(inode_str)); -end: - mutex_unlock(&ext4_filter_mutex); - return bcount; -} - -static struct file_operations ext4_inode_file_operations = { - .write = inode_filter_op_write, - .read = inode_filter_op_read, -}; - -static void release_filter_dev(void) -{ - struct rcu_dev_filter *old; - - mutex_lock(&ext4_filter_mutex); - module_exits = 1; - old = dev_filter; - rcu_assign_pointer(dev_filter, NULL); - if (old) - call_rcu_sched(&old->rcu, free_dev_filter); - mutex_unlock(&ext4_filter_mutex); -} - -static int __init filter_init(void) -{ - struct dentry *filter_root_dentry; - int err = 0; - - filter_root_dentry = get_filter_root(); - if (!filter_root_dentry) { - err = -ENOENT; - goto end; - } - - ext4_filter_dentry = debugfs_create_dir("ext4", filter_root_dentry); - - if (IS_ERR(ext4_filter_dentry) || !ext4_filter_dentry) { - printk(KERN_ERR "Failed to create ext4 filter file\n"); - err = -ENOMEM; - goto end; - } - - ext4_filter_dev_dentry = debugfs_create_file("dev", S_IWUSR, - ext4_filter_dentry, NULL, &ext4_dev_file_operations); - if (IS_ERR(ext4_filter_dev_dentry) || !ext4_filter_dev_dentry) { - printk(KERN_ERR "Failed to create ext4 dev filter file\n"); - err = -ENOMEM; - goto release_filter_dentry; - } - - ext4_filter_inode_dentry = debugfs_create_file("inode", S_IWUSR, - ext4_filter_dentry, NULL, &ext4_inode_file_operations); - if (IS_ERR(ext4_filter_inode_dentry) || !ext4_filter_inode_dentry) { - printk(KERN_ERR "Failed to create ext4 inode filter file\n"); - err = -ENOMEM; - goto release_filter_dev_dentry; - } - - goto end; - -release_filter_dev_dentry: - debugfs_remove(ext4_filter_dev_dentry); -release_filter_dentry: - debugfs_remove(ext4_filter_dentry); - release_filter_dev(); -end: - return err; -} - -static void __exit filter_exit(void) -{ - debugfs_remove(ext4_filter_dev_dentry); - debugfs_remove(ext4_filter_inode_dentry); - debugfs_remove(ext4_filter_dentry); - release_filter_dev(); -} - -module_init(filter_init); -module_exit(filter_exit); - -MODULE_LICENSE("GPL and additional rights"); -MODULE_AUTHOR("Mathieu Desnoyers"); -MODULE_DESCRIPTION("ext4 Tracepoint Probes"); diff --git a/discard/probes/fs-trace.c b/discard/probes/fs-trace.c deleted file mode 100644 index bca28275..00000000 --- a/discard/probes/fs-trace.c +++ /dev/null @@ -1,158 +0,0 @@ -/* - * ltt/probes/fs-trace.c - * - * FS tracepoint probes. - * - * (C) Copyright 2009 - Mathieu Desnoyers - * Dual LGPL v2.1/GPL v2 license. - */ - -#include -#include -#include - -#include "../ltt-type-serializer.h" - -void probe_fs_buffer_wait_start(void *_data, struct buffer_head *bh) -{ - trace_mark_tp(fs, buffer_wait_start, fs_buffer_wait_start, - probe_fs_buffer_wait_start, "bh %p", bh); -} - -void probe_fs_buffer_wait_end(void *_data, struct buffer_head *bh) -{ - trace_mark_tp(fs, buffer_wait_end, fs_buffer_wait_end, - probe_fs_buffer_wait_end, "bh %p", bh); -} - -void probe_fs_exec(void *_data, char *filename) -{ - trace_mark_tp(fs, exec, fs_exec, probe_fs_exec, "filename %s", - filename); -} - -void probe_fs_ioctl(void *_data, unsigned int fd, unsigned int cmd, unsigned long arg) -{ - trace_mark_tp(fs, ioctl, fs_ioctl, probe_fs_ioctl, - "fd %u cmd %u arg %lu", fd, cmd, arg); -} - -void probe_fs_open(void *_data, int fd, char *filename) -{ - trace_mark_tp(fs, open, fs_open, probe_fs_open, - "fd %d filename %s", fd, filename); -} - -void probe_fs_close(void *_data, unsigned int fd) -{ - trace_mark_tp(fs, close, fs_close, probe_fs_close, "fd %u", fd); -} - -void probe_fs_lseek(void *_data, unsigned int fd, long offset, unsigned int origin) -{ - trace_mark_tp(fs, lseek, fs_lseek, probe_fs_lseek, - "fd %u offset %ld origin %u", fd, offset, origin); -} - -void probe_fs_llseek(void *_data, unsigned int fd, loff_t offset, unsigned int origin) -{ - trace_mark_tp(fs, llseek, fs_llseek, probe_fs_llseek, - "fd %u offset %lld origin %u", fd, - (long long)offset, origin); -} - -void probe_fs_read(void *_data, unsigned int fd, char __user *buf, size_t count, - ssize_t ret); - -DEFINE_MARKER_TP(fs, read, fs_read, probe_fs_read, - "count %zu fd %u"); - -notrace void probe_fs_read(void *_data, unsigned int fd, char __user *buf, size_t count, - ssize_t ret) -{ - struct marker *marker; - struct serialize_sizet_int data; - - data.f1 = count; - data.f2 = fd; - - marker = &GET_MARKER(fs, read); - ltt_specialized_trace(marker, marker->single.probe_private, - &data, serialize_sizeof(data), sizeof(size_t)); -} - -void probe_fs_write(void *_data, unsigned int fd, char __user *buf, size_t count, - ssize_t ret); - -DEFINE_MARKER_TP(fs, write, fs_write, probe_fs_write, - "count %zu fd %u"); - -notrace void probe_fs_write(void *_data, unsigned int fd, char __user *buf, size_t count, - ssize_t ret) -{ - struct marker *marker; - struct serialize_sizet_int data; - - data.f1 = count; - data.f2 = fd; - - marker = &GET_MARKER(fs, write); - ltt_specialized_trace(marker, marker->single.probe_private, - &data, serialize_sizeof(data), sizeof(size_t)); -} - -void probe_fs_pread64(void *_data, unsigned int fd, char __user *buf, size_t count, - loff_t pos, ssize_t ret) -{ - trace_mark_tp(fs, pread64, fs_pread64, probe_fs_pread64, - "fd %u count %zu pos %llu", - fd, count, (unsigned long long)pos); -} - -void probe_fs_pwrite64(void *_data, unsigned int fd, const char __user *buf, - size_t count, loff_t pos, ssize_t ret) -{ - trace_mark_tp(fs, pwrite64, fs_pwrite64, probe_fs_pwrite64, - "fd %u count %zu pos %llu", - fd, count, (unsigned long long)pos); -} - -void probe_fs_readv(void *_data, unsigned long fd, const struct iovec __user *vec, - unsigned long vlen, ssize_t ret) -{ - trace_mark_tp(fs, readv, fs_readv, probe_fs_readv, - "fd %lu vlen %lu", fd, vlen); -} - -void probe_fs_writev(void *_data, unsigned long fd, const struct iovec __user *vec, - unsigned long vlen, ssize_t ret) -{ - trace_mark_tp(fs, writev, fs_writev, probe_fs_writev, - "fd %lu vlen %lu", fd, vlen); -} - -void probe_fs_select(void *_data, int fd, struct timespec *end_time) -{ - struct timespec tmptime; - - if (end_time) { - tmptime = *end_time; - } else { - tmptime.tv_sec = -1L; - tmptime.tv_nsec = -1L; - } - - trace_mark_tp(fs, select, fs_select, probe_fs_select, - "fd %d end_time_sec %ld end_time_nsec %ld", fd, - tmptime.tv_sec, tmptime.tv_nsec); -} - -void probe_fs_poll(void *_data, int fd) -{ - trace_mark_tp(fs, pollfd, fs_poll, probe_fs_poll, - "fd %d", fd); -} - -MODULE_LICENSE("GPL and additional rights"); -MODULE_AUTHOR("Mathieu Desnoyers"); -MODULE_DESCRIPTION("FS Tracepoint Probes"); diff --git a/discard/probes/ipc-trace.c b/discard/probes/ipc-trace.c deleted file mode 100644 index 3a095252..00000000 --- a/discard/probes/ipc-trace.c +++ /dev/null @@ -1,39 +0,0 @@ -/* - * ltt/probes/ipc-trace.c - * - * IPC tracepoint probes. - * - * (C) Copyright 2009 - Mathieu Desnoyers - * Dual LGPL v2.1/GPL v2 license. - */ - -#include -#include - -void probe_ipc_msg_create(void *data, long id, int flags) -{ - trace_mark_tp(ipc, msg_create, ipc_msg_create, probe_ipc_msg_create, - "id %ld flags %d", id, flags); -} - -void probe_ipc_sem_create(void *data, long id, int flags) -{ - trace_mark_tp(ipc, sem_create, ipc_sem_create, probe_ipc_sem_create, - "id %ld flags %d", id, flags); -} - -void probe_ipc_shm_create(void *data, long id, int flags) -{ - trace_mark_tp(ipc, shm_create, ipc_shm_create, probe_ipc_shm_create, - "id %ld flags %d", id, flags); -} - -void probe_ipc_call(void *data, unsigned int call, unsigned int first) -{ - trace_mark_tp(ipc, call, ipc_call, probe_ipc_call, - "call %u first %d", call, first); -} - -MODULE_LICENSE("GPL and additional rights"); -MODULE_AUTHOR("Mathieu Desnoyers"); -MODULE_DESCRIPTION("IPC Tracepoint Probes"); diff --git a/discard/probes/jbd2-trace.c b/discard/probes/jbd2-trace.c deleted file mode 100644 index 3da32cd4..00000000 --- a/discard/probes/jbd2-trace.c +++ /dev/null @@ -1,208 +0,0 @@ -/* - * ltt/probes/jbd2-trace.c - * - * JBD2 tracepoint probes. - * - * (C) Copyright 2009 - Mathieu Desnoyers - * Dual LGPL v2.1/GPL v2 license. - */ - -#include -#include -#include -#include -#include - -#include "../ltt-tracer.h" - -static struct dentry *jbd2_filter_dentry, *jbd2_filter_dev_dentry; -static DEFINE_MUTEX(jbd2_filter_mutex); -/* Make sure we don't race between module exit and file write */ -static int module_exits; - -struct rcu_dev_filter { - struct rcu_head rcu; - char devname[NAME_MAX]; -}; - -static struct rcu_dev_filter *dev_filter; - -/* - * Probes are executed in rcu_sched read-side critical section. - */ -static int do_filter(const char *dev) -{ - struct rcu_dev_filter *ldev_filter = rcu_dereference(dev_filter); - - if (unlikely(ldev_filter)) - if (unlikely(strcmp(ldev_filter->devname, dev))) - return 0; - return 1; -} - -void probe_jbd2_checkpoint(void *data, journal_t *journal, int result) -{ - if (unlikely(!do_filter(journal->j_devname))) - return; - trace_mark_tp(jbd2, checkpoint, jbd2_checkpoint, - probe_jbd2_checkpoint, "dev %s need_checkpoint %d", - journal->j_devname, result); -} - -void probe_jbd2_start_commit(void *data, journal_t *journal, - transaction_t *commit_transaction) -{ - if (unlikely(!do_filter(journal->j_devname))) - return; - trace_mark_tp(jbd2, start_commit, jbd2_start_commit, - probe_jbd2_start_commit, "dev %s transaction %d", - journal->j_devname, commit_transaction->t_tid); -} - -void probe_jbd2_end_commit(void *data, journal_t *journal, - transaction_t *commit_transaction) -{ - if (unlikely(!do_filter(journal->j_devname))) - return; - trace_mark_tp(jbd2, end_commit, jbd2_end_commit, - probe_jbd2_end_commit, "dev %s transaction %d head %d", - journal->j_devname, commit_transaction->t_tid, - journal->j_tail_sequence); -} - -static void free_dev_filter(struct rcu_head *head) -{ - kfree(container_of(head, struct rcu_dev_filter, rcu)); -} - -static ssize_t filter_op_write(struct file *file, - const char __user *user_buf, size_t count, loff_t *ppos) -{ - int err = 0; - char buf[NAME_MAX]; - int buf_size; - char name[NAME_MAX]; - struct rcu_dev_filter *new, *old; - - mutex_lock(&jbd2_filter_mutex); - if (module_exits) { - err = -EPERM; - goto error; - } - buf_size = min(count, sizeof(buf) - 1); - err = copy_from_user(buf, user_buf, buf_size); - if (err) - goto error; - buf[buf_size] = 0; - - if (sscanf(buf, "%s", name) != 1) { - err = -EPERM; - goto error; - } - - old = dev_filter; - - /* Empty string or * means all active */ - if (name[0] == '\0' || (name[0] == '*' && name[1] == '\0')) { - new = NULL; - } else { - new = kmalloc(sizeof(*new), GFP_KERNEL); - strcpy(new->devname, name); - } - - rcu_assign_pointer(dev_filter, new); - if (old) - call_rcu_sched(&old->rcu, free_dev_filter); - - mutex_unlock(&jbd2_filter_mutex); - return count; - -error: - mutex_unlock(&jbd2_filter_mutex); - return err; -} - -static ssize_t filter_op_read(struct file *filp, char __user *buffer, - size_t count, loff_t *ppos) -{ - ssize_t bcount; - const char *devname; - - mutex_lock(&jbd2_filter_mutex); - if (!dev_filter) - devname = "*"; - else - devname = dev_filter->devname; - bcount = simple_read_from_buffer(buffer, count, ppos, - devname, strlen(devname)); - mutex_unlock(&jbd2_filter_mutex); - return bcount; -} - -static struct file_operations jbd2_file_operations = { - .write = filter_op_write, - .read = filter_op_read, -}; - -static void release_filter_dev(void) -{ - struct rcu_dev_filter *old; - - mutex_lock(&jbd2_filter_mutex); - module_exits = 1; - old = dev_filter; - rcu_assign_pointer(dev_filter, NULL); - if (old) - call_rcu_sched(&old->rcu, free_dev_filter); - mutex_unlock(&jbd2_filter_mutex); -} - -static int __init filter_init(void) -{ - struct dentry *filter_root_dentry; - int err = 0; - - filter_root_dentry = get_filter_root(); - if (!filter_root_dentry) { - err = -ENOENT; - goto end; - } - - jbd2_filter_dentry = debugfs_create_dir("jbd2", filter_root_dentry); - - if (IS_ERR(jbd2_filter_dentry) || !jbd2_filter_dentry) { - printk(KERN_ERR "Failed to create jbd2 filter file\n"); - err = -ENOMEM; - goto end; - } - - jbd2_filter_dev_dentry = debugfs_create_file("dev", S_IWUSR, - jbd2_filter_dentry, NULL, &jbd2_file_operations); - if (IS_ERR(jbd2_filter_dentry) || !jbd2_filter_dentry) { - printk(KERN_ERR "Failed to create jbd2 filter file\n"); - err = -ENOMEM; - goto release_filter_dentry; - } - - goto end; - -release_filter_dentry: - debugfs_remove(jbd2_filter_dentry); - release_filter_dev(); -end: - return err; -} - -static void __exit filter_exit(void) -{ - debugfs_remove(jbd2_filter_dev_dentry); - debugfs_remove(jbd2_filter_dentry); - release_filter_dev(); -} - -module_init(filter_init); -module_exit(filter_exit); - -MODULE_LICENSE("GPL and additional rights"); -MODULE_AUTHOR("Mathieu Desnoyers"); -MODULE_DESCRIPTION("JBD2 Tracepoint Probes"); diff --git a/discard/probes/kernel-trace.c b/discard/probes/kernel-trace.c deleted file mode 100644 index cabe60e1..00000000 --- a/discard/probes/kernel-trace.c +++ /dev/null @@ -1,581 +0,0 @@ -/* - * ltt/probes/kernel-trace.c - * - * kernel tracepoint probes. - * - * (C) Copyright 2009 - Mathieu Desnoyers - * Dual LGPL v2.1/GPL v2 license. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "../ltt-tracer.h" -#include "../ltt-type-serializer.h" - -/* - * This should probably be added to s390. - */ -#ifdef CONFIG_S390 -static struct pt_regs *get_irq_regs(void) -{ - return task_pt_regs(current); -} -#endif - -/* - * FIXME : - * currently, the specialized tracepoint probes cannot call into other marker - * probes, such as ftrace enable/disable. Given we want them to be as fast as - * possible, it might not be so bad to lose this flexibility. But that means - * such probes would have to connect to tracepoints on their own. - */ - -/* kernel_irq_entry specialized tracepoint probe */ - -void probe_irq_entry(void *_data, unsigned int id, struct pt_regs *regs, - struct irqaction *action); - -DEFINE_MARKER_TP(kernel, irq_entry, irq_entry, probe_irq_entry, - "ip %lu handler %p irq_id #2u%u kernel_mode #1u%u"); - -notrace void probe_irq_entry(void *_data, unsigned int id, struct pt_regs *regs, - struct irqaction *action) -{ - struct marker *marker; - struct serialize_long_long_short_char data; - - if (unlikely(!regs)) - regs = get_irq_regs(); - if (likely(regs)) { - data.f1 = instruction_pointer(regs); - data.f4 = !user_mode(regs); - } else { - data.f1 = 0UL; - data.f4 = 1; - } - data.f2 = (unsigned long) (action ? action->handler : NULL); - data.f3 = id; - - marker = &GET_MARKER(kernel, irq_entry); - ltt_specialized_trace(marker, marker->single.probe_private, - &data, serialize_sizeof(data), sizeof(long)); -} - -void probe_irq_next_handler(void *_data, unsigned int id, struct irqaction *action, - irqreturn_t prev_ret); - -DEFINE_MARKER_TP(kernel, irq_next_handler, irq_next_handler, - probe_irq_next_handler, - "handler %p prev_ret #1u%u"); - -notrace void probe_irq_next_handler(void *_data, unsigned int id, struct irqaction *action, - irqreturn_t prev_ret) -{ - struct marker *marker; - struct serialize_long_char data; - - data.f1 = (unsigned long) (action ? action->handler : NULL); - data.f2 = prev_ret; - - marker = &GET_MARKER(kernel, irq_next_handler); - ltt_specialized_trace(marker, marker->single.probe_private, - &data, serialize_sizeof(data), sizeof(long)); -} - -/* kernel_irq_exit specialized tracepoint probe */ - -void probe_irq_exit(void *_data, irqreturn_t retval); - -DEFINE_MARKER_TP(kernel, irq_exit, irq_exit, probe_irq_exit, - "handled #1u%u"); - -notrace void probe_irq_exit(void *_data, irqreturn_t retval) -{ - struct marker *marker; - unsigned char data; - - data = IRQ_RETVAL(retval); - - marker = &GET_MARKER(kernel, irq_exit); - ltt_specialized_trace(marker, marker->single.probe_private, - &data, sizeof(data), sizeof(data)); -} - -/* kernel_softirq_entry specialized tracepoint probe */ - -void probe_softirq_entry(void *_data, struct softirq_action *h, - struct softirq_action *softirq_vec); - -DEFINE_MARKER_TP(kernel, softirq_entry, softirq_entry, - probe_softirq_entry, "softirq_id #1u%lu"); - -notrace void probe_softirq_entry(void *_data, struct softirq_action *h, - struct softirq_action *softirq_vec) -{ - struct marker *marker; - unsigned char data; - - data = ((unsigned long)h - (unsigned long)softirq_vec) / sizeof(*h); - - marker = &GET_MARKER(kernel, softirq_entry); - ltt_specialized_trace(marker, marker->single.probe_private, - &data, sizeof(data), sizeof(data)); -} - -/* kernel_softirq_exit specialized tracepoint probe */ - -void probe_softirq_exit(void *_data, struct softirq_action *h, - struct softirq_action *softirq_vec); - -DEFINE_MARKER_TP(kernel, softirq_exit, softirq_exit, - probe_softirq_exit, "softirq_id #1u%lu"); - -notrace void probe_softirq_exit(void *_data, struct softirq_action *h, - struct softirq_action *softirq_vec) -{ - struct marker *marker; - unsigned char data; - - data = ((unsigned long)h - (unsigned long)softirq_vec) / sizeof(*h); - - marker = &GET_MARKER(kernel, softirq_exit); - ltt_specialized_trace(marker, marker->single.probe_private, - &data, sizeof(data), sizeof(data)); -} - -/* kernel_softirq_raise specialized tracepoint probe */ - -void probe_softirq_raise(void *_data, unsigned int nr); - -DEFINE_MARKER_TP(kernel, softirq_raise, softirq_raise, - probe_softirq_raise, "softirq_id #1u%u"); - -notrace void probe_softirq_raise(void *_data, unsigned int nr) -{ - struct marker *marker; - unsigned char data; - - data = nr; - - marker = &GET_MARKER(kernel, softirq_raise); - ltt_specialized_trace(marker, marker->single.probe_private, - &data, sizeof(data), sizeof(data)); -} - -/* Standard probes */ -void probe_irq_tasklet_low_entry(void *_data, struct tasklet_struct *t) -{ - trace_mark_tp(kernel, tasklet_low_entry, irq_tasklet_low_entry, - probe_irq_tasklet_low_entry, "func %p data %lu", - t->func, t->data); -} - -void probe_irq_tasklet_low_exit(void *_data, struct tasklet_struct *t) -{ - trace_mark_tp(kernel, tasklet_low_exit, irq_tasklet_low_exit, - probe_irq_tasklet_low_exit, "func %p data %lu", - t->func, t->data); -} - -void probe_irq_tasklet_high_entry(void *_data, struct tasklet_struct *t) -{ - trace_mark_tp(kernel, tasklet_high_entry, irq_tasklet_high_entry, - probe_irq_tasklet_high_entry, "func %p data %lu", - t->func, t->data); -} - -void probe_irq_tasklet_high_exit(void *_data, struct tasklet_struct *t) -{ - trace_mark_tp(kernel, tasklet_high_exit, irq_tasklet_high_exit, - probe_irq_tasklet_high_exit, "func %p data %lu", - t->func, t->data); -} - -void probe_sched_kthread_stop(void *_data, struct task_struct *t) -{ - trace_mark_tp(kernel, kthread_stop, sched_kthread_stop, - probe_sched_kthread_stop, "pid %d", t->pid); -} - -void probe_sched_kthread_stop_ret(void *_data, int ret) -{ - trace_mark_tp(kernel, kthread_stop_ret, sched_kthread_stop_ret, - probe_sched_kthread_stop_ret, "ret %d", ret); -} - -void probe_sched_wait_task(void *_data, struct task_struct *p) -{ - trace_mark_tp(kernel, sched_wait_task, sched_wait_task, - probe_sched_wait_task, "pid %d state #2d%ld", - p->pid, p->state); -} - -/* kernel_sched_try_wakeup specialized tracepoint probe */ - -void probe_sched_wakeup(void *_data, struct task_struct *p, int success); - -DEFINE_MARKER_TP(kernel, sched_try_wakeup, sched_wakeup, - probe_sched_wakeup, "pid %d cpu_id %u state #2d%ld"); - -notrace void probe_sched_wakeup(void *_data, struct task_struct *p, int success) -{ - struct marker *marker; - struct serialize_int_int_short data; - - data.f1 = p->pid; - data.f2 = task_cpu(p); - data.f3 = p->state; - - marker = &GET_MARKER(kernel, sched_try_wakeup); - ltt_specialized_trace(marker, marker->single.probe_private, - &data, serialize_sizeof(data), sizeof(int)); -} - -void probe_sched_wakeup_new(void *_data, struct task_struct *p, int success) -{ - trace_mark_tp(kernel, sched_wakeup_new_task, sched_wakeup_new, - probe_sched_wakeup_new, "pid %d state #2d%ld cpu_id %u", - p->pid, p->state, task_cpu(p)); -} - -/* kernel_sched_schedule specialized tracepoint probe */ - -void probe_sched_switch(void *_data, struct task_struct *prev, - struct task_struct *next); - -DEFINE_MARKER_TP(kernel, sched_schedule, sched_switch, probe_sched_switch, - "prev_pid %d next_pid %d prev_state #2d%ld"); - -notrace void probe_sched_switch(void *_data, struct task_struct *prev, - struct task_struct *next) -{ - struct marker *marker; - struct serialize_int_int_short data; - - data.f1 = prev->pid; - data.f2 = next->pid; - data.f3 = prev->state; - - marker = &GET_MARKER(kernel, sched_schedule); - ltt_specialized_trace(marker, marker->single.probe_private, - &data, serialize_sizeof(data), sizeof(int)); -} - -void probe_sched_migrate_task(void *_data, struct task_struct *p, int dest_cpu) -{ - trace_mark_tp(kernel, sched_migrate_task, sched_migrate_task, - probe_sched_migrate_task, "pid %d state #2d%ld dest_cpu %d", - p->pid, p->state, dest_cpu); -} - -void probe_sched_signal_send(void *_data, int sig, struct siginfo *info, struct task_struct *t) -{ - trace_mark_tp(kernel, send_signal, signal_generate, - probe_sched_signal_send, "pid %d signal %d", t->pid, sig); -} - -void probe_sched_process_free(void *_data, struct task_struct *p) -{ - trace_mark_tp(kernel, process_free, sched_process_free, - probe_sched_process_free, "pid %d", p->pid); -} - -void probe_sched_process_exit(void *_data, struct task_struct *p) -{ - trace_mark_tp(kernel, process_exit, sched_process_exit, - probe_sched_process_exit, "pid %d", p->pid); -} - -void probe_sched_process_wait(void *_data, struct pid *pid) -{ - trace_mark_tp(kernel, process_wait, sched_process_wait, - probe_sched_process_wait, "pid %d", pid_nr(pid)); -} - -void probe_sched_process_fork(void *_data, struct task_struct *parent, - struct task_struct *child) -{ - trace_mark_tp(kernel, process_fork, sched_process_fork, - probe_sched_process_fork, - "parent_pid %d child_pid %d child_tgid %d", - parent->pid, child->pid, child->tgid); -} - -void probe_sched_kthread_create(void *_data, void *fn, int pid) -{ - trace_mark_tp(kernel, kthread_create, sched_kthread_create, - probe_sched_kthread_create, - "fn %p pid %d", fn, pid); -} - -void probe_timer_itimer_expired(void *_data, struct signal_struct *sig) -{ - trace_mark_tp(kernel, timer_itimer_expired, timer_itimer_expired, - probe_timer_itimer_expired, "pid %d", - pid_nr(sig->leader_pid)); -} - -void probe_timer_itimer_set(void *_data, int which, struct itimerval *value) -{ - trace_mark_tp(kernel, timer_itimer_set, - timer_itimer_set, probe_timer_itimer_set, - "which %d interval_sec %ld interval_usec %ld " - "value_sec %ld value_usec %ld", - which, - value->it_interval.tv_sec, - value->it_interval.tv_usec, - value->it_value.tv_sec, - value->it_value.tv_usec); -} - -/* kernel_timer_set specialized tracepoint probe */ - -void probe_timer_set(void *_data, struct timer_list *timer); - -DEFINE_MARKER_TP(kernel, timer_set, timer_set, probe_timer_set, - "expires %lu function %p data %lu"); - -notrace void probe_timer_set(void *_data, struct timer_list *timer) -{ - struct marker *marker; - struct serialize_long_long_long data; - - data.f1 = timer->expires; - data.f2 = (unsigned long)timer->function; - data.f3 = timer->data; - - marker = &GET_MARKER(kernel, timer_set); - ltt_specialized_trace(marker, marker->single.probe_private, - &data, serialize_sizeof(data), sizeof(long)); -} - -void probe_timer_update_time(void *_data, struct timespec *_xtime, - struct timespec *_wall_to_monotonic) -{ - trace_mark_tp(kernel, timer_update_time, timer_update_time, - probe_timer_update_time, - "jiffies #8u%llu xtime_sec %ld xtime_nsec %ld " - "walltomonotonic_sec %ld walltomonotonic_nsec %ld", - (unsigned long long)jiffies_64, _xtime->tv_sec, _xtime->tv_nsec, - _wall_to_monotonic->tv_sec, _wall_to_monotonic->tv_nsec); -} - -void probe_timer_timeout(void *_data, struct task_struct *p) -{ - trace_mark_tp(kernel, timer_timeout, timer_timeout, - probe_timer_timeout, "pid %d", p->pid); -} - -void probe_kernel_printk(void *_data, unsigned long retaddr) -{ - trace_mark_tp(kernel, printk, kernel_printk, - probe_kernel_printk, "ip 0x%lX", retaddr); -} - -void probe_kernel_vprintk(void *_data, unsigned long retaddr, char *buf, int len) -{ - if (len > 0) { - unsigned int loglevel; - int mark_len; - char *mark_buf; - char saved_char; - - if (buf[0] == '<' && buf[1] >= '0' && - buf[1] <= '7' && buf[2] == '>') { - loglevel = buf[1] - '0'; - mark_buf = &buf[3]; - mark_len = len - 3; - } else { - loglevel = default_message_loglevel; - mark_buf = buf; - mark_len = len; - } - if (mark_buf[mark_len - 1] == '\n') - mark_len--; - saved_char = mark_buf[mark_len]; - mark_buf[mark_len] = '\0'; - trace_mark_tp(kernel, vprintk, kernel_vprintk, - probe_kernel_vprintk, - "loglevel #1u%u string %s ip 0x%lX", - loglevel, mark_buf, retaddr); - mark_buf[mark_len] = saved_char; - } -} - -#ifdef CONFIG_MODULES -void probe_kernel_module_free(void *_data, struct module *mod) -{ - trace_mark_tp(kernel, module_free, kernel_module_free, - probe_kernel_module_free, "name %s", mod->name); -} - -void probe_kernel_module_load(void *_data, struct module *mod) -{ - trace_mark_tp(kernel, module_load, kernel_module_load, - probe_kernel_module_load, "name %s", mod->name); -} -#endif - -void probe_kernel_panic(void *_data, const char *fmt, va_list args) -{ - char info[64]; - vsnprintf(info, sizeof(info), fmt, args); - trace_mark_tp(kernel, panic, kernel_panic, probe_kernel_panic, - "info %s", info); -} - -void probe_kernel_kernel_kexec(void *_data, struct kimage *image) -{ - trace_mark_tp(kernel, kernel_kexec, kernel_kernel_kexec, - probe_kernel_kernel_kexec, "image %p", image); -} - -void probe_kernel_crash_kexec(void *_data, struct kimage *image, struct pt_regs *regs) -{ - trace_mark_tp(kernel, crash_kexec, kernel_crash_kexec, - probe_kernel_crash_kexec, "image %p ip %p", image, - regs ? (void *)instruction_pointer(regs) : NULL); -} - -/* kernel_page_fault_entry specialized tracepoint probe */ - -void probe_kernel_page_fault_entry(void *_data, struct pt_regs *regs, int trapnr, - struct mm_struct *mm, struct vm_area_struct *vma, - unsigned long address, int write_access); - -DEFINE_MARKER_TP(kernel, page_fault_entry, page_fault_entry, - probe_kernel_page_fault_entry, - "ip #p%lu address #p%lu trap_id #2u%u write_access #1u%u"); - -notrace void probe_kernel_page_fault_entry(void *_data, struct pt_regs *regs, int trapnr, - struct mm_struct *mm, struct vm_area_struct *vma, - unsigned long address, int write_access) -{ - struct marker *marker; - struct serialize_long_long_short_char data; - - if (likely(regs)) - data.f1 = instruction_pointer(regs); - else - data.f1 = 0UL; - data.f2 = address; - data.f3 = (unsigned short)trapnr; - data.f4 = (unsigned char)!!write_access; - - marker = &GET_MARKER(kernel, page_fault_entry); - ltt_specialized_trace(marker, marker->single.probe_private, - &data, serialize_sizeof(data), sizeof(long)); -} - -/* kernel_page_fault_exit specialized tracepoint probe */ - -void probe_kernel_page_fault_exit(void *_data, int res); - -DEFINE_MARKER_TP(kernel, page_fault_exit, page_fault_exit, - probe_kernel_page_fault_exit, - "res %d"); - -notrace void probe_kernel_page_fault_exit(void *_data, int res) -{ - struct marker *marker; - - marker = &GET_MARKER(kernel, page_fault_exit); - ltt_specialized_trace(marker, marker->single.probe_private, - &res, sizeof(res), sizeof(res)); -} - -/* kernel_page_fault_nosem_entry specialized tracepoint probe */ - -void probe_kernel_page_fault_nosem_entry(void *_data, struct pt_regs *regs, - int trapnr, unsigned long address); - -DEFINE_MARKER_TP(kernel, page_fault_nosem_entry, page_fault_nosem_entry, - probe_kernel_page_fault_nosem_entry, - "ip #p%lu address #p%lu trap_id #2u%u"); - -notrace void probe_kernel_page_fault_nosem_entry(void *_data, struct pt_regs *regs, - int trapnr, unsigned long address) -{ - struct marker *marker; - struct serialize_long_long_short data; - - if (likely(regs)) - data.f1 = instruction_pointer(regs); - else - data.f1 = 0UL; - data.f2 = address; - data.f3 = (unsigned short)trapnr; - - marker = &GET_MARKER(kernel, page_fault_nosem_entry); - ltt_specialized_trace(marker, marker->single.probe_private, - &data, serialize_sizeof(data), sizeof(long)); -} - -/* kernel_page_fault_nosem_exit specialized tracepoint probe */ - -void probe_kernel_page_fault_nosem_exit(void *_data, int res); - -DEFINE_MARKER_TP(kernel, page_fault_nosem_exit, page_fault_nosem_exit, - probe_kernel_page_fault_nosem_exit, - MARK_NOARGS); - -notrace void probe_kernel_page_fault_nosem_exit(void *_data, int res) -{ - struct marker *marker; - - marker = &GET_MARKER(kernel, page_fault_nosem_exit); - ltt_specialized_trace(marker, marker->single.probe_private, - NULL, 0, 0); -} - -/* kernel_page_fault_get_user_entry specialized tracepoint probe */ - -void probe_kernel_page_fault_get_user_entry(void *_data, struct mm_struct *mm, - struct vm_area_struct *vma, unsigned long address, int write_access); - -DEFINE_MARKER_TP(kernel, page_fault_get_user_entry, page_fault_get_user_entry, - probe_kernel_page_fault_get_user_entry, - "address #p%lu write_access #1u%u"); - -notrace void probe_kernel_page_fault_get_user_entry(void *_data, struct mm_struct *mm, - struct vm_area_struct *vma, unsigned long address, int write_access) -{ - struct marker *marker; - struct serialize_long_char data; - - data.f1 = address; - data.f2 = (unsigned char)!!write_access; - - marker = &GET_MARKER(kernel, page_fault_get_user_entry); - ltt_specialized_trace(marker, marker->single.probe_private, - &data, serialize_sizeof(data), sizeof(long)); -} - -/* kernel_page_fault_get_user_exit specialized tracepoint probe */ - -void probe_kernel_page_fault_get_user_exit(void *_data, int res); - -DEFINE_MARKER_TP(kernel, page_fault_get_user_exit, page_fault_get_user_exit, - probe_kernel_page_fault_get_user_exit, - "res %d"); - -notrace void probe_kernel_page_fault_get_user_exit(void *_data, int res) -{ - struct marker *marker; - - marker = &GET_MARKER(kernel, page_fault_get_user_exit); - ltt_specialized_trace(marker, marker->single.probe_private, - &res, sizeof(res), sizeof(res)); -} - -MODULE_LICENSE("GPL and additional rights"); -MODULE_AUTHOR("Mathieu Desnoyers"); -MODULE_DESCRIPTION("kernel Tracepoint Probes"); diff --git a/discard/probes/lockdep-trace.c b/discard/probes/lockdep-trace.c deleted file mode 100644 index a9a77344..00000000 --- a/discard/probes/lockdep-trace.c +++ /dev/null @@ -1,60 +0,0 @@ -/* - * ltt/probes/lockdep-trace.c - * - * lockdep tracepoint probes. - * - * (C) Copyright 2009 - Mathieu Desnoyers - * Dual LGPL v2.1/GPL v2 license. - */ - -#include -#include -#include - -void probe_lockdep_hardirqs_on(void *data, unsigned long retaddr) -{ - trace_mark_tp(lockdep, hardirqs_on, lockdep_hardirqs_on, - probe_lockdep_hardirqs_on, "retaddr 0x%lX", retaddr); -} - -void probe_lockdep_hardirqs_off(void *data, unsigned long retaddr) -{ - trace_mark_tp(lockdep, hardirqs_off, lockdep_hardirqs_off, - probe_lockdep_hardirqs_off, "retaddr 0x%lX", retaddr); -} - -void probe_lockdep_softirqs_on(void *data, unsigned long retaddr) -{ - trace_mark_tp(lockdep, softirqs_on, lockdep_softirqs_on, - probe_lockdep_softirqs_on, "retaddr 0x%lX", retaddr); -} - -void probe_lockdep_softirqs_off(void *data, unsigned long retaddr) -{ - trace_mark_tp(lockdep, softirqs_off, lockdep_softirqs_off, - probe_lockdep_softirqs_off, "retaddr 0x%lX", retaddr); -} - -void probe_lockdep_lock_acquire(void *data, unsigned long retaddr, - unsigned int subclass, struct lockdep_map *lock, int trylock, - int read, int hardirqs_off) -{ - trace_mark_tp(lockdep, lock_acquire, lockdep_lock_acquire, - probe_lockdep_lock_acquire, - "retaddr 0x%lX subclass %u lock %p trylock %d read %d " - "hardirqs_off %d", - retaddr, subclass, lock, trylock, read, hardirqs_off); -} - -void probe_lockdep_lock_release(void *data, unsigned long retaddr, - struct lockdep_map *lock, int nested) -{ - trace_mark_tp(lockdep, lock_release, lockdep_lock_release, - probe_lockdep_lock_release, - "retaddr 0x%lX lock %p nested %d", - retaddr, lock, nested); -} - -MODULE_LICENSE("GPL and additional rights"); -MODULE_AUTHOR("Mathieu Desnoyers"); -MODULE_DESCRIPTION("lockdep Tracepoint Probes"); diff --git a/discard/probes/mm-trace.c b/discard/probes/mm-trace.c deleted file mode 100644 index 935e366c..00000000 --- a/discard/probes/mm-trace.c +++ /dev/null @@ -1,146 +0,0 @@ -/* - * ltt/probes/mm-trace.c - * - * MM tracepoint probes. - * - * (C) Copyright 2009 - Mathieu Desnoyers - * Dual LGPL v2.1/GPL v2 license. - */ - -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include - -#include "../ltt-type-serializer.h" - -void probe_wait_on_page_start(void *_data, struct page *page, int bit_nr) -{ - trace_mark_tp(mm, wait_on_page_start, wait_on_page_start, - probe_wait_on_page_start, "pfn %lu bit_nr %d", - page_to_pfn(page), bit_nr); -} - -void probe_wait_on_page_end(void *_data, struct page *page, int bit_nr) -{ - trace_mark_tp(mm, wait_on_page_end, wait_on_page_end, - probe_wait_on_page_end, "pfn %lu bit_nr %d", - page_to_pfn(page), bit_nr); -} - -void probe_hugetlb_page_free(void *_data, struct page *page) -{ - trace_mark_tp(mm, huge_page_free, hugetlb_page_free, - probe_hugetlb_page_free, "pfn %lu", page_to_pfn(page)); -} - -void probe_hugetlb_page_alloc(void *_data, struct page *page) -{ - if (page) - trace_mark_tp(mm, huge_page_alloc, hugetlb_page_alloc, - probe_hugetlb_page_alloc, "pfn %lu", page_to_pfn(page)); -} - -/* mm_page_free specialized tracepoint probe */ - -void probe_page_free(void *_data, struct page *page, unsigned int order); - -DEFINE_MARKER_TP(mm, page_free, page_free, probe_page_free, - "pfn %lu order %u"); - -notrace void probe_page_free(void *_data, struct page *page, unsigned int order) -{ - struct marker *marker; - struct serialize_long_int data; - - data.f1 = page_to_pfn(page); - data.f2 = order; - - marker = &GET_MARKER(mm, page_free); - ltt_specialized_trace(marker, marker->single.probe_private, - &data, serialize_sizeof(data), sizeof(long)); -} - -/* mm_page_alloc specialized tracepoint probe */ - -void probe_page_alloc(void *_data, struct page *page, unsigned int order); - -DEFINE_MARKER_TP(mm, page_alloc, page_alloc, probe_page_alloc, - "pfn %lu order %u"); - -notrace void probe_page_alloc(void *_data, struct page *page, unsigned int order) -{ - struct marker *marker; - struct serialize_long_int data; - - if (unlikely(!page)) - return; - - data.f1 = page_to_pfn(page); - data.f2 = order; - - marker = &GET_MARKER(mm, page_alloc); - ltt_specialized_trace(marker, marker->single.probe_private, - &data, serialize_sizeof(data), sizeof(long)); -} - -#ifdef CONFIG_SWAP -void probe_swap_in(void *_data, struct page *page, swp_entry_t entry) -{ - trace_mark_tp(mm, swap_in, swap_in, probe_swap_in, - "pfn %lu filp %p offset %lu", - page_to_pfn(page), - get_swap_info_struct(swp_type(entry))->swap_file, - swp_offset(entry)); -} - -void probe_swap_out(void *_data, struct page *page) -{ - trace_mark_tp(mm, swap_out, swap_out, probe_swap_out, - "pfn %lu filp %p offset %lu", - page_to_pfn(page), - get_swap_info_struct(swp_type( - page_swp_entry(page)))->swap_file, - swp_offset(page_swp_entry(page))); -} - -void probe_swap_file_close(void *_data, struct file *file) -{ - trace_mark_tp(mm, swap_file_close, swap_file_close, - probe_swap_file_close, "filp %p", file); -} - -void probe_swap_file_open(void *_data, struct file *file, char *filename) -{ - trace_mark_tp(mm, swap_file_open, swap_file_open, - probe_swap_file_open, "filp %p filename %s", - file, filename); -} -#endif - -void probe_add_to_page_cache(void *_data, struct address_space *mapping, pgoff_t offset) -{ - trace_mark_tp(mm, add_to_page_cache, add_to_page_cache, - probe_add_to_page_cache, - "inode %lu sdev %u", - mapping->host->i_ino, mapping->host->i_sb->s_dev); -} - -void probe_remove_from_page_cache(void *_data, struct address_space *mapping) -{ - trace_mark_tp(mm, remove_from_page_cache, remove_from_page_cache, - probe_remove_from_page_cache, - "inode %lu sdev %u", - mapping->host->i_ino, mapping->host->i_sb->s_dev); -} - -MODULE_LICENSE("GPL and additional rights"); -MODULE_AUTHOR("Mathieu Desnoyers"); -MODULE_DESCRIPTION("MM Tracepoint Probes"); diff --git a/discard/probes/net-extended-trace.c b/discard/probes/net-extended-trace.c deleted file mode 100644 index 15fc8109..00000000 --- a/discard/probes/net-extended-trace.c +++ /dev/null @@ -1,146 +0,0 @@ -/* - * ltt/probes/net-extended-trace.c - * - * Net tracepoint extended probes. - * - * These probes record many header fields from TCP and UDP messages. Here are - * the consequences of this: - * 1) it allows analyzing network traffic to provide some pcap-like - * functionality within LTTng - * 2) it allows offline synchronization of a group of concurrent traces - * recorded on different nodes - * 3) it increases tracing overhead - * - * You can leave out these probes or not activate them if you are not - * especially interested in the details of network traffic and do not wish to - * synchronize distributed traces. - * - * Dual LGPL v2.1/GPL v2 license. - */ - -#include -#include -#include -#include -#include -#include -#include - -#include "../ltt-type-serializer.h" - -void probe_net_dev_xmit_extended(void *_data, struct sk_buff *skb); - -DEFINE_MARKER_TP(net, dev_xmit_extended, net_dev_xmit, - probe_net_dev_xmit_extended, "skb 0x%lX network_protocol #n2u%hu " - "transport_protocol #1u%u saddr #n4u%lu daddr #n4u%lu " - "tot_len #n2u%hu ihl #1u%u source #n2u%hu dest #n2u%hu seq #n4u%lu " - "ack_seq #n4u%lu doff #1u%u ack #1u%u rst #1u%u syn #1u%u fin #1u%u"); - -notrace void probe_net_dev_xmit_extended(void *_data, struct sk_buff *skb) -{ - struct marker *marker; - struct serialize_l214421224411111 data; - struct iphdr *iph = ip_hdr(skb); - struct tcphdr *th = tcp_hdr(skb); - - data.f1 = (unsigned long)skb; - data.f2 = skb->protocol; - - if (ntohs(skb->protocol) == ETH_P_IP) { - data.f3 = ip_hdr(skb)->protocol; - data.f4 = iph->saddr; - data.f5 = iph->daddr; - data.f6 = iph->tot_len; - data.f7 = iph->ihl; - - if (data.f3 == IPPROTO_TCP) { - data.f8 = th->source; - data.f9 = th->dest; - data.f10 = th->seq; - data.f11 = th->ack_seq; - data.f12 = th->doff; - data.f13 = th->ack; - data.f14 = th->rst; - data.f15 = th->syn; - data.f16 = th->fin; - } - } - - marker = &GET_MARKER(net, dev_xmit_extended); - ltt_specialized_trace(marker, marker->single.probe_private, - &data, serialize_sizeof(data), sizeof(long)); -} - -void probe_tcpv4_rcv_extended(void *_data, struct sk_buff *skb); - -DEFINE_MARKER_TP(net, tcpv4_rcv_extended, net_tcpv4_rcv, - probe_tcpv4_rcv_extended, "skb 0x%lX saddr #n4u%lu daddr #n4u%lu " - "tot_len #n2u%hu ihl #1u%u source #n2u%hu dest #n2u%hu seq #n4u%lu " - "ack_seq #n4u%lu doff #1u%u ack #1u%u rst #1u%u syn #1u%u fin #1u%u"); - -notrace void probe_tcpv4_rcv_extended(void *_data, struct sk_buff *skb) -{ - struct marker *marker; - struct serialize_l4421224411111 data; - struct iphdr *iph = ip_hdr(skb); - struct tcphdr *th = tcp_hdr(skb); - - data.f1 = (unsigned long)skb; - data.f2 = iph->saddr; - data.f3 = iph->daddr; - data.f4 = iph->tot_len; - data.f5 = iph->ihl; - data.f6 = th->source; - data.f7 = th->dest; - data.f8 = th->seq; - data.f9 = th->ack_seq; - data.f10 = th->doff; - data.f11 = th->ack; - data.f12 = th->rst; - data.f13 = th->syn; - data.f14 = th->fin; - - marker = &GET_MARKER(net, tcpv4_rcv_extended); - ltt_specialized_trace(marker, marker->single.probe_private, - &data, serialize_sizeof(data), sizeof(long)); -} - -void probe_udpv4_rcv_extended(void *_data, struct sk_buff *skb); - -DEFINE_MARKER_TP(net, udpv4_rcv_extended, net_udpv4_rcv, - probe_udpv4_rcv_extended, "skb 0x%lX saddr #n4u%lu daddr #n4u%lu " - "unicast #1u%u ulen #n2u%hu source #n2u%hu dest #n2u%hu " - "data_start #8u%lx"); - -notrace void probe_udpv4_rcv_extended(void *_data, struct sk_buff *skb) -{ - struct marker *marker; - struct serialize_l4412228 data; - struct iphdr *iph = ip_hdr(skb); - struct rtable *rt = skb_rtable(skb); - struct udphdr *uh = udp_hdr(skb); - - data.f1 = (unsigned long)skb; - data.f2 = iph->saddr; - data.f3 = iph->daddr; - data.f4 = rt->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST) ? 0 : 1; - data.f5 = uh->len; - data.f6 = uh->source; - data.f7 = uh->dest; - /* UDP header has not been pulled from skb->data, read the first 8 - * bytes of UDP data if they are not in a fragment*/ - data.f8 = 0; - if (skb_headlen(skb) >= sizeof(struct udphdr) + 8) - data.f8 = *(unsigned long long *)(skb->data + sizeof(*uh)); - else if (skb_headlen(skb) >= sizeof(struct udphdr)) - memcpy(&data.f8, skb->data + sizeof(struct udphdr), - skb_headlen(skb) - sizeof(struct udphdr)); - - marker = &GET_MARKER(net, udpv4_rcv_extended); - ltt_specialized_trace(marker, marker->single.probe_private, - &data, serialize_sizeof(data), sizeof(unsigned long long)); -} - -MODULE_LICENSE("GPL and additional rights"); -MODULE_AUTHOR("Benjamin Poirier"); -MODULE_DESCRIPTION("Net Tracepoint Extended Probes"); diff --git a/discard/probes/net-trace.c b/discard/probes/net-trace.c deleted file mode 100644 index 3124125d..00000000 --- a/discard/probes/net-trace.c +++ /dev/null @@ -1,406 +0,0 @@ -/* - * ltt/probes/net-trace.c - * - * Net tracepoint probes. - * - * (C) Copyright 2009 - Mathieu Desnoyers - * Dual LGPL v2.1/GPL v2 license. - */ - -#include -#include -#include -#include -#include -#include -#include - -#include "../ltt-type-serializer.h" - -void probe_net_dev_xmit(void *_data, struct sk_buff *skb); - -DEFINE_MARKER_TP(net, dev_xmit, net_dev_xmit, probe_net_dev_xmit, - "skb %p protocol #n2u%hu"); - -notrace void probe_net_dev_xmit(void *_data, struct sk_buff *skb) -{ - struct marker *marker; - struct serialize_long_short data; - - data.f1 = (unsigned long)skb; - data.f2 = skb->protocol; - - marker = &GET_MARKER(net, dev_xmit); - ltt_specialized_trace(marker, marker->single.probe_private, - &data, serialize_sizeof(data), sizeof(long)); -} - -void probe_net_dev_receive(void *_data, struct sk_buff *skb); - -DEFINE_MARKER_TP(net, dev_receive, net_dev_receive, probe_net_dev_receive, - "skb %p protocol #n2u%hu"); - -notrace void probe_net_dev_receive(void *_data, struct sk_buff *skb) -{ - struct marker *marker; - struct serialize_long_short data; - - data.f1 = (unsigned long)skb; - data.f2 = skb->protocol; - - marker = &GET_MARKER(net, dev_receive); - ltt_specialized_trace(marker, marker->single.probe_private, - &data, serialize_sizeof(data), sizeof(long)); -} - -void probe_ipv4_addr_add(void *_data, struct in_ifaddr *ifa) -{ - trace_mark_tp(netif_state, insert_ifa_ipv4, ipv4_addr_add, - probe_ipv4_addr_add, "label %s address #4u%u", - ifa->ifa_label, (unsigned int)ifa->ifa_address); -} - -void probe_ipv4_addr_del(void *_data, struct in_ifaddr *ifa) -{ - trace_mark_tp(netif_state, del_ifa_ipv4, ipv4_addr_del, - probe_ipv4_addr_del, "label %s address #4u%u", - ifa->ifa_label, (unsigned int)ifa->ifa_address); -} - -void probe_ipv6_addr_add(void *_data, struct inet6_ifaddr *ifa) -{ - __u8 *addr = ifa->addr.s6_addr; - - trace_mark_tp(netif_state, insert_ifa_ipv6, ipv6_addr_add, - probe_ipv6_addr_add, - "label %s " - "a15 #1x%c a14 #1x%c a13 #1x%c a12 #1x%c " - "a11 #1x%c a10 #1x%c a9 #1x%c a8 #1x%c " - "a7 #1x%c a6 #1x%c a5 #1x%c a4 #1x%c " - "a3 #1x%c a2 #1x%c a1 #1x%c a0 #1x%c", - ifa->idev->dev->name, - addr[15], addr[14], addr[13], addr[12], - addr[11], addr[10], addr[9], addr[8], - addr[7], addr[6], addr[5], addr[4], - addr[3], addr[2], addr[1], addr[0]); -} - -void probe_ipv6_addr_del(void *_data, struct inet6_ifaddr *ifa) -{ - __u8 *addr = ifa->addr.s6_addr; - - trace_mark_tp(netif_state, insert_ifa_ipv6, ipv6_addr_del, - probe_ipv6_addr_del, - "label %s " - "a15 #1x%c a14 #1x%c a13 #1x%c a12 #1x%c " - "a11 #1x%c a10 #1x%c a9 #1x%c a8 #1x%c " - "a7 #1x%c a6 #1x%c a5 #1x%c a4 #1x%c " - "a3 #1x%c a2 #1x%c a1 #1x%c a0 #1x%c", - ifa->idev->dev->name, - addr[15], addr[14], addr[13], addr[12], - addr[11], addr[10], addr[9], addr[8], - addr[7], addr[6], addr[5], addr[4], - addr[3], addr[2], addr[1], addr[0]); -} - -void probe_socket_create(void *_data, int family, int type, int protocol, - struct socket *sock, int ret) -{ - trace_mark_tp(net, socket_create, socket_create, probe_socket_create, - "family %d type %d protocol %d sock %p ret %d", - family, type, protocol, sock, ret); -} - -void probe_socket_bind(void *_data, int fd, struct sockaddr __user *umyaddr, int addrlen, - int ret) -{ - trace_mark_tp(net, socket_bind, socket_bind, probe_socket_bind, - "fd %d umyaddr %p addrlen %d ret %d", - fd, umyaddr, addrlen, ret); -} - -void probe_socket_connect(void *_data, int fd, struct sockaddr __user *uservaddr, - int addrlen, int ret) -{ - trace_mark_tp(net, socket_connect, socket_connect, probe_socket_connect, - "fd %d uservaddr %p addrlen %d ret %d", - fd, uservaddr, addrlen, ret); -} - -void probe_socket_listen(void *_data, int fd, int backlog, int ret) -{ - trace_mark_tp(net, socket_listen, socket_listen, probe_socket_listen, - "fd %d backlog %d ret %d", - fd, backlog, ret); -} - -void probe_socket_accept(void *_data, int fd, struct sockaddr __user *upeer_sockaddr, - int __user *upeer_addrlen, int flags, int ret) -{ - trace_mark_tp(net, socket_accept, socket_accept, probe_socket_accept, - "fd %d upeer_sockaddr %p upeer_addrlen %p flags %d ret %d", - fd, upeer_sockaddr, upeer_addrlen, flags, ret); -} - -void probe_socket_getsockname(void *_data, int fd, struct sockaddr __user *usockaddr, - int __user *usockaddr_len, int ret) -{ - trace_mark_tp(net, socket_getsockname, socket_getsockname, - probe_socket_getsockname, - "fd %d usockaddr %p usockaddr_len %p ret %d", - fd, usockaddr, usockaddr_len, ret); -} - -void probe_socket_getpeername(void *_data, int fd, struct sockaddr __user *usockaddr, - int __user *usockaddr_len, int ret) -{ - trace_mark_tp(net, socket_getpeername, socket_getpeername, - probe_socket_getpeername, - "fd %d usockaddr %p usockaddr_len %p ret %d", - fd, usockaddr, usockaddr_len, ret); -} - -void probe_socket_socketpair(void *_data, int family, int type, int protocol, - int __user *usockvec, int ret) -{ - trace_mark_tp(net, socket_socketpair, socket_socketpair, - probe_socket_socketpair, - "family %d type %d protocol %d usockvec %p ret %d", - family, type, protocol, usockvec, ret); -} - -void probe_socket_sendmsg(void *_data, struct socket *sock, struct msghdr *msg, size_t size, - int ret); - -DEFINE_MARKER_TP(net, socket_sendmsg, net_socket_sendmsg, - probe_socket_sendmsg, - "sock %p msg %p size %zu ret %d"); - -notrace void probe_socket_sendmsg(void *_data, struct socket *sock, struct msghdr *msg, - size_t size, int ret) -{ - struct marker *marker; - struct serialize_long_long_sizet_int data; - - data.f1 = (unsigned long)sock; - data.f2 = (unsigned long)msg; - data.f3 = size; - data.f4 = ret; - - marker = &GET_MARKER(net, socket_sendmsg); - ltt_specialized_trace(marker, marker->single.probe_private, - &data, serialize_sizeof(data), sizeof(size_t)); -} - -void probe_socket_recvmsg(void *_data, struct socket *sock, struct msghdr *msg, size_t size, - int flags, int ret); - -DEFINE_MARKER_TP(net, socket_recvmsg, net_socket_recvmsg, - probe_socket_recvmsg, - "sock %p msg %p size %zu flags %d ret %d"); - -notrace void probe_socket_recvmsg(void *_data, struct socket *sock, struct msghdr *msg, - size_t size, int flags, int ret) -{ - struct marker *marker; - struct serialize_long_long_sizet_int_int data; - - data.f1 = (unsigned long)sock; - data.f2 = (unsigned long)msg; - data.f3 = size; - data.f4 = flags; - data.f5 = ret; - - marker = &GET_MARKER(net, socket_recvmsg); - ltt_specialized_trace(marker, marker->single.probe_private, - &data, serialize_sizeof(data), sizeof(size_t)); -} - -void probe_socket_setsockopt(void *_data, int fd, int level, int optname, - char __user *optval, int optlen, int ret) -{ - trace_mark_tp(net, socket_setsockopt, socket_setsockopt, - probe_socket_setsockopt, - "fd %d level %d optname %d optval %p optlen %d ret %d", - fd, level, optname, optval, optlen, ret); -} - -void probe_socket_getsockopt(void *_data, int fd, int level, int optname, - char __user *optval, int __user *optlen, int ret) -{ - trace_mark_tp(net, socket_getsockopt, socket_getsockopt, - probe_socket_getsockopt, - "fd %d level %d optname %d optval %p optlen %p ret %d", - fd, level, optname, optval, optlen, ret); -} - -void probe_socket_shutdown(void *_data, int fd, int how, int ret) -{ - trace_mark_tp(net, socket_shutdown, socket_shutdown, - probe_socket_shutdown, - "fd %d how %d ret %d", - fd, how, ret); -} - -void probe_socket_call(void *_data, int call, unsigned long a0) -{ - trace_mark_tp(net, socket_call, socket_call, probe_socket_call, - "call %d a0 %lu", call, a0); -} - -void probe_tcpv4_rcv(void *_data, struct sk_buff *skb); - -DEFINE_MARKER_TP(net, tcpv4_rcv, net_tcpv4_rcv, probe_tcpv4_rcv, - "skb %p"); - -notrace void probe_tcpv4_rcv(void *_data, struct sk_buff *skb) -{ - struct marker *marker; - - marker = &GET_MARKER(net, tcpv4_rcv); - ltt_specialized_trace(marker, marker->single.probe_private, - &skb, sizeof(skb), sizeof(skb)); -} - -void probe_udpv4_rcv(void *_data, struct sk_buff *skb); - -DEFINE_MARKER_TP(net, udpv4_rcv, net_udpv4_rcv, probe_udpv4_rcv, - "skb %p"); - -notrace void probe_udpv4_rcv(void *_data, struct sk_buff *skb) -{ - struct marker *marker; - - marker = &GET_MARKER(net, udpv4_rcv); - ltt_specialized_trace(marker, marker->single.probe_private, - &skb, sizeof(skb), sizeof(skb)); -} - -#ifdef CONFIG_NETPOLL -void probe_net_napi_schedule(void *_data, struct napi_struct *n); - -DEFINE_MARKER_TP(net, napi_schedule, net_napi_schedule, - probe_net_napi_schedule, - "napi_struct %p name %s"); - -notrace void probe_net_napi_schedule(void *_data, struct napi_struct *n) -{ - struct marker *marker; - struct serialize_long_ifname data; - size_t data_len = 0; - - data.f1 = (unsigned long)n; - data_len += sizeof(data.f1); - /* No need to align for strings */ - strcpy(data.f2, n->dev ? n->dev->name : ""); - data_len += strlen(data.f2) + 1; - - marker = &GET_MARKER(net, napi_schedule); - ltt_specialized_trace(marker, marker->single.probe_private, - &data, data_len, sizeof(long)); -} - -void probe_net_napi_poll(void *_data, struct napi_struct *n); - -DEFINE_MARKER_TP(net, napi_poll, net_napi_poll, - probe_net_napi_poll, - "napi_struct %p name %s"); - -notrace void probe_net_napi_poll(void *_data, struct napi_struct *n) -{ - struct marker *marker; - struct serialize_long_ifname data; - size_t data_len = 0; - - data.f1 = (unsigned long)n; - data_len += sizeof(data.f1); - /* No need to align for strings */ - strcpy(data.f2, n->dev ? n->dev->name : ""); - data_len += strlen(data.f2) + 1; - - marker = &GET_MARKER(net, napi_poll); - ltt_specialized_trace(marker, marker->single.probe_private, - &data, data_len, sizeof(long)); -} - -void probe_net_napi_complete(void *_data, struct napi_struct *n); - -DEFINE_MARKER_TP(net, napi_complete, net_napi_complete, - probe_net_napi_complete, - "napi_struct %p name %s"); - -notrace void probe_net_napi_complete(void *_data, struct napi_struct *n) -{ - struct marker *marker; - struct serialize_long_ifname data; - size_t data_len = 0; - - data.f1 = (unsigned long)n; - data_len += sizeof(data.f1); - /* No need to align for strings */ - strcpy(data.f2, n->dev ? n->dev->name : ""); - data_len += strlen(data.f2) + 1; - - marker = &GET_MARKER(net, napi_complete); - ltt_specialized_trace(marker, marker->single.probe_private, - &data, data_len, sizeof(long)); -} -#else /* !CONFIG_NETPOLL */ -void probe_net_napi_schedule(void *_data, struct napi_struct *n); - -DEFINE_MARKER_TP(net, napi_schedule, net_napi_schedule, - probe_net_napi_schedule, - "napi_struct %p"); - -notrace void probe_net_napi_schedule(void *_data, struct napi_struct *n) -{ - struct marker *marker; - unsigned long data; - - data = (unsigned long)n; - - marker = &GET_MARKER(net, napi_schedule); - ltt_specialized_trace(marker, marker->single.probe_private, - &data, sizeof(data), sizeof(data)); -} - -void probe_net_napi_poll(void *_data, struct napi_struct *n); - -DEFINE_MARKER_TP(net, napi_poll, net_napi_poll, - probe_net_napi_poll, - "napi_struct %p"); - -notrace void probe_net_napi_poll(void *_data, struct napi_struct *n) -{ - struct marker *marker; - unsigned long data; - - data = (unsigned long)n; - - marker = &GET_MARKER(net, napi_poll); - ltt_specialized_trace(marker, marker->single.probe_private, - &data, sizeof(data), sizeof(data)); -} - -void probe_net_napi_complete(void *_data, struct napi_struct *n); - -DEFINE_MARKER_TP(net, napi_complete, net_napi_complete, - probe_net_napi_complete, - "napi_struct %p"); - -notrace void probe_net_napi_complete(void *_data, struct napi_struct *n) -{ - struct marker *marker; - unsigned long data; - - data = (unsigned long)n; - - marker = &GET_MARKER(net, napi_complete); - ltt_specialized_trace(marker, marker->single.probe_private, - &data, sizeof(data), sizeof(data)); -} -#endif - -MODULE_LICENSE("GPL and additional rights"); -MODULE_AUTHOR("Mathieu Desnoyers"); -MODULE_DESCRIPTION("Net Tracepoint Probes"); diff --git a/discard/probes/pm-trace.c b/discard/probes/pm-trace.c deleted file mode 100644 index 7abe8e37..00000000 --- a/discard/probes/pm-trace.c +++ /dev/null @@ -1,43 +0,0 @@ -/* - * ltt/probes/pm-trace.c - * - * Power Management tracepoint probes. - * - * (C) Copyright 2009 - Mathieu Desnoyers - * Dual LGPL v2.1/GPL v2 license. - */ - -#include -#include - -void probe_pm_idle_entry(void *_data) -{ - trace_mark_tp(pm, idle_entry, pm_idle_entry, - probe_pm_idle_entry, "irqstate #1%d", - irqs_disabled()); -} - -void probe_pm_idle_exit(void *_data) -{ - trace_mark_tp(pm, idle_exit, pm_idle_exit, - probe_pm_idle_exit, "irqstate #1%d", - irqs_disabled()); -} - -void probe_pm_suspend_entry(void *_data) -{ - trace_mark_tp(pm, suspend_entry, pm_suspend_entry, - probe_pm_suspend_entry, "irqstate #1%d", - irqs_disabled()); -} - -void probe_pm_suspend_exit(void *_data) -{ - trace_mark_tp(pm, suspend_exit, pm_suspend_exit, - probe_pm_suspend_exit, "irqstate #1%d", - irqs_disabled()); -} - -MODULE_LICENSE("GPL and additional rights"); -MODULE_AUTHOR("Mathieu Desnoyers"); -MODULE_DESCRIPTION("Power Management Tracepoint Probes"); diff --git a/discard/probes/rcu-trace.c b/discard/probes/rcu-trace.c deleted file mode 100644 index cc164546..00000000 --- a/discard/probes/rcu-trace.c +++ /dev/null @@ -1,36 +0,0 @@ -/* - * ltt/probes/rcu-trace.c - * - * RCU tracepoint probes. - * - * (C) Copyright 2009 - Mathieu Desnoyers - * Dual LGPL v2.1/GPL v2 license. - */ - -#include -#include - -#ifdef CONFIG_TREE_RCU -void probe_rcu_tree_callback(void *data, struct rcu_head *head) -{ - trace_mark_tp(rcu, tree_callback, rcu_tree_callback, - probe_rcu_tree_callback, "func %p", head->func); -} - -void probe_rcu_tree_call_rcu(void *data, struct rcu_head *head, unsigned long ip) -{ - trace_mark_tp(rcu, tree_call_rcu, rcu_tree_call_rcu, - probe_rcu_tree_call_rcu, "func %p ip 0x%lX", head->func, ip); -} - -void probe_rcu_tree_call_rcu_bh(void *data, struct rcu_head *head, unsigned long ip) -{ - trace_mark_tp(rcu, tree_call_rcu_bh, rcu_tree_call_rcu_bh, - probe_rcu_tree_call_rcu_bh, "func %p ip 0x%lX", - head->func, ip); -} -#endif - -MODULE_LICENSE("GPL and additional rights"); -MODULE_AUTHOR("Mathieu Desnoyers"); -MODULE_DESCRIPTION("RCU Tracepoint Probes"); diff --git a/discard/probes/syscall-trace.c b/discard/probes/syscall-trace.c deleted file mode 100644 index 9ae419fc..00000000 --- a/discard/probes/syscall-trace.c +++ /dev/null @@ -1,54 +0,0 @@ -/* - * ltt/probes/syscall-trace.c - * - * System call tracepoint probes. - * - * (C) Copyright 2009 - Mathieu Desnoyers - * Dual LGPL v2.1/GPL v2 license. - */ - -#include -#include - -#include "../ltt-type-serializer.h" - - -/* kernel_syscall_entry specialized tracepoint probe */ - -void probe_syscall_entry(void *_data, struct pt_regs *regs, long id); - -DEFINE_MARKER_TP(kernel, syscall_entry, syscall_entry, - probe_syscall_entry, "ip #p%ld syscall_id #2u%u"); - -notrace void probe_syscall_entry(void *_data, struct pt_regs *regs, long id) -{ - struct marker *marker; - struct serialize_long_short data; - - data.f1 = instruction_pointer(regs); - data.f2 = (unsigned short)id; - - marker = &GET_MARKER(kernel, syscall_entry); - ltt_specialized_trace(marker, marker->single.probe_private, - &data, serialize_sizeof(data), sizeof(long)); -} - -/* kernel_syscall_exit specialized tracepoint probe */ - -void probe_syscall_exit(void *_data, long ret); - -DEFINE_MARKER_TP(kernel, syscall_exit, syscall_exit, - probe_syscall_exit, "ret %ld"); - -notrace void probe_syscall_exit(void *_data, long ret) -{ - struct marker *marker; - - marker = &GET_MARKER(kernel, syscall_exit); - ltt_specialized_trace(marker, marker->single.probe_private, - &ret, sizeof(ret), sizeof(ret)); -} - -MODULE_LICENSE("GPL and additional rights"); -MODULE_AUTHOR("Mathieu Desnoyers"); -MODULE_DESCRIPTION("syscall Tracepoint Probes"); diff --git a/discard/probes/trap-trace.c b/discard/probes/trap-trace.c deleted file mode 100644 index 397254cd..00000000 --- a/discard/probes/trap-trace.c +++ /dev/null @@ -1,56 +0,0 @@ -/* - * ltt/probes/trap-trace.c - * - * Trap tracepoint probes. - * - * (C) Copyright 2009 - Mathieu Desnoyers - * Dual LGPL v2.1/GPL v2 license. - */ - -#include -#include - -#include "../ltt-type-serializer.h" - -/* kernel_trap_entry specialized tracepoint probe */ - -void probe_trap_entry(void *_data, struct pt_regs *regs, long id); - -DEFINE_MARKER_TP(kernel, trap_entry, trap_entry, - probe_trap_entry, "ip #p%ld trap_id #2u%u"); - -notrace void probe_trap_entry(void *_data, struct pt_regs *regs, long id) -{ - struct marker *marker; - struct serialize_long_short data; - - if (likely(regs)) - data.f1 = instruction_pointer(regs); - else - data.f1 = 0UL; - data.f2 = (unsigned short)id; - - marker = &GET_MARKER(kernel, trap_entry); - ltt_specialized_trace(marker, marker->single.probe_private, - &data, serialize_sizeof(data), sizeof(long)); -} - -/* kernel_syscall_exit specialized tracepoint probe */ - -void probe_trap_exit(void *_data); - -DEFINE_MARKER_TP(kernel, trap_exit, trap_exit, - probe_trap_exit, MARK_NOARGS); - -notrace void probe_trap_exit(void *_data) -{ - struct marker *marker; - - marker = &GET_MARKER(kernel, trap_exit); - ltt_specialized_trace(marker, marker->single.probe_private, - NULL, 0, 0); -} - -MODULE_LICENSE("GPL and additional rights"); -MODULE_AUTHOR("Mathieu Desnoyers"); -MODULE_DESCRIPTION("Trap Tracepoint Probes");