-/*
- * Public API and common code for kernel->userspace relay file support.
- *
- * Copyright (C) 2002-2005 - Tom Zanussi (zanussi@us.ibm.com), IBM Corp
- * Copyright (C) 1999-2005 - Karim Yaghmour (karim@opersys.com)
- * Copyright (C) 2008 - Mathieu Desnoyers (mathieu.desnoyers@polymtl.ca)
- *
- * Moved to kernel/relay.c by Paul Mundt, 2006.
- * November 2006 - CPU hotplug support by Mathieu Desnoyers
- * (mathieu.desnoyers@polymtl.ca)
- *
- * This file is released under the GPL.
- */
-//ust// #include <linux/errno.h>
-//ust// #include <linux/stddef.h>
-//ust// #include <linux/slab.h>
-//ust// #include <linux/module.h>
-//ust// #include <linux/string.h>
-//ust// #include <linux/ltt-relay.h>
-//ust// #include <linux/vmalloc.h>
-//ust// #include <linux/mm.h>
-//ust// #include <linux/cpu.h>
-//ust// #include <linux/splice.h>
-//ust// #include <linux/bitops.h>
-#include <sys/mman.h>
-#include "kernelcompat.h"
-#include "list.h"
-#include "relay.h"
-#include "channels.h"
-#include "kref.h"
-
-/* list of open channels, for cpu hotplug */
-static DEFINE_MUTEX(relay_channels_mutex);
-static LIST_HEAD(relay_channels);
-
-/**
- * relay_alloc_buf - allocate a channel buffer
- * @buf: the buffer struct
- * @size: total size of the buffer
- */
-//ust// static int relay_alloc_buf(struct rchan_buf *buf, size_t *size)
-//ust//{
-//ust// unsigned int i, n_pages;
-//ust// struct buf_page *buf_page, *n;
-//ust//
-//ust// *size = PAGE_ALIGN(*size);
-//ust// n_pages = *size >> PAGE_SHIFT;
-//ust//
-//ust// INIT_LIST_HEAD(&buf->pages);
-//ust//
-//ust// for (i = 0; i < n_pages; i++) {
-//ust// buf_page = kmalloc_node(sizeof(*buf_page), GFP_KERNEL,
-//ust// cpu_to_node(buf->cpu));
-//ust// if (unlikely(!buf_page))
-//ust// goto depopulate;
-//ust// buf_page->page = alloc_pages_node(cpu_to_node(buf->cpu),
-//ust// GFP_KERNEL | __GFP_ZERO, 0);
-//ust// if (unlikely(!buf_page->page)) {
-//ust// kfree(buf_page);
-//ust// goto depopulate;
-//ust// }
-//ust// list_add_tail(&buf_page->list, &buf->pages);
-//ust// buf_page->offset = (size_t)i << PAGE_SHIFT;
-//ust// buf_page->buf = buf;
-//ust// set_page_private(buf_page->page, (unsigned long)buf_page);
-//ust// if (i == 0) {
-//ust// buf->wpage = buf_page;
-//ust// buf->hpage[0] = buf_page;
-//ust// buf->hpage[1] = buf_page;
-//ust// buf->rpage = buf_page;
-//ust// }
-//ust// }
-//ust// buf->page_count = n_pages;
-//ust// return 0;
-//ust//
-//ust//depopulate:
-//ust// list_for_each_entry_safe(buf_page, n, &buf->pages, list) {
-//ust// list_del_init(&buf_page->list);
-//ust// __free_page(buf_page->page);
-//ust// kfree(buf_page);
-//ust// }
-//ust// return -ENOMEM;
-//ust//}
-
-static int relay_alloc_buf(struct rchan_buf *buf, size_t *size)
-{
- unsigned int n_pages;
- struct buf_page *buf_page, *n;
-
- void *result;
-
- *size = PAGE_ALIGN(*size);
-
- /* Maybe do read-ahead */
- result = mmap(NULL, *size, PROT_READ | PROT_WRITE, MAP_ANONYMOUS, -1, 0);
- if(result == MAP_FAILED) {
- PERROR("mmap");
- return -1;
- }
-
- buf->buf_data = result;
- buf->buf_size = *size;
-
- return 0;
-}
-
-/**
- * relay_create_buf - allocate and initialize a channel buffer
- * @chan: the relay channel
- * @cpu: cpu the buffer belongs to
- *
- * Returns channel buffer if successful, %NULL otherwise.
- */
-static struct rchan_buf *relay_create_buf(struct rchan *chan, int cpu)
-{
- int ret;
- struct rchan_buf *buf = kzalloc(sizeof(struct rchan_buf), GFP_KERNEL);
- if (!buf)
- return NULL;
-
-// buf->cpu = cpu;
- ret = relay_alloc_buf(buf, &chan->alloc_size);
- if (ret)
- goto free_buf;
-
- buf->chan = chan;
- kref_get(&buf->chan->kref);
- return buf;
-
-free_buf:
- kfree(buf);
- return NULL;
-}
-
-/**
- * relay_destroy_channel - free the channel struct
- * @kref: target kernel reference that contains the relay channel
- *
- * Should only be called from kref_put().
- */
-static void relay_destroy_channel(struct kref *kref)
-{
- struct rchan *chan = container_of(kref, struct rchan, kref);
- kfree(chan);
-}
-
-/**
- * relay_destroy_buf - destroy an rchan_buf struct and associated buffer
- * @buf: the buffer struct
- */
-static void relay_destroy_buf(struct rchan_buf *buf)
-{
- struct rchan *chan = buf->chan;
- struct buf_page *buf_page, *n;
- int result;
-
- result = munmap(buf->buf_data, buf->buf_size);
- if(result == -1) {
- PERROR("munmap");
-
-//ust// chan->buf[buf->cpu] = NULL;
- kfree(buf);
- kref_put(&chan->kref, relay_destroy_channel);
-}
-
-/**
- * relay_remove_buf - remove a channel buffer
- * @kref: target kernel reference that contains the relay buffer
- *
- * Removes the file from the fileystem, which also frees the
- * rchan_buf_struct and the channel buffer. Should only be called from
- * kref_put().
- */
-static void relay_remove_buf(struct kref *kref)
-{
- struct rchan_buf *buf = container_of(kref, struct rchan_buf, kref);
- buf->chan->cb->remove_buf_file(buf->dentry);
- relay_destroy_buf(buf);
-}
-
-/*
- * High-level relay kernel API and associated functions.
- */
-
-/*
- * rchan_callback implementations defining default channel behavior. Used
- * in place of corresponding NULL values in client callback struct.
- */
-
-/*
- * create_buf_file_create() default callback. Does nothing.
- */
-static struct dentry *create_buf_file_default_callback(const char *filename,
- struct dentry *parent,
- int mode,
- struct rchan_buf *buf)
-{
- return NULL;
-}
-
-/*
- * remove_buf_file() default callback. Does nothing.
- */
-static int remove_buf_file_default_callback(struct dentry *dentry)
-{
- return -EINVAL;
-}
-
-/* relay channel default callbacks */
-static struct rchan_callbacks default_channel_callbacks = {
- .create_buf_file = create_buf_file_default_callback,
- .remove_buf_file = remove_buf_file_default_callback,
-};
-
-/**
- * wakeup_readers - wake up readers waiting on a channel
- * @data: contains the channel buffer
- *
- * This is the timer function used to defer reader waking.
- */
-static void wakeup_readers(unsigned long data)
-{
- struct rchan_buf *buf = (struct rchan_buf *)data;
- wake_up_interruptible(&buf->read_wait);
-}
-
-/**
- * __relay_reset - reset a channel buffer
- * @buf: the channel buffer
- * @init: 1 if this is a first-time initialization
- *
- * See relay_reset() for description of effect.
- */
-static void __relay_reset(struct rchan_buf *buf, unsigned int init)
-{
- if (init) {
- init_waitqueue_head(&buf->read_wait);
- kref_init(&buf->kref);
- setup_timer(&buf->timer, wakeup_readers, (unsigned long)buf);
- } else
- del_timer_sync(&buf->timer);
-
- buf->finalized = 0;
-}
-
-/*
- * relay_open_buf - create a new relay channel buffer
- *
- * used by relay_open() and CPU hotplug.
- */
-static struct rchan_buf *relay_open_buf(struct rchan *chan, unsigned int cpu)
-{
- struct rchan_buf *buf = NULL;
- struct dentry *dentry;
- char *tmpname;
-
- tmpname = kzalloc(NAME_MAX + 1, GFP_KERNEL);
- if (!tmpname)
- goto end;
- snprintf(tmpname, NAME_MAX, "%s%d", chan->base_filename, cpu);
-
- buf = relay_create_buf(chan, cpu);
- if (!buf)
- goto free_name;
-
- __relay_reset(buf, 1);
-
- /* Create file in fs */
-//ust// dentry = chan->cb->create_buf_file(tmpname, chan->parent, S_IRUSR,
-//ust// buf);
-//ust// if (!dentry)
-//ust// goto free_buf;
-//ust//
-//ust// buf->dentry = dentry;
-
- goto free_name;
-
-free_buf:
- relay_destroy_buf(buf);
- buf = NULL;
-free_name:
- kfree(tmpname);
-end:
- return buf;
-}
-
-/**
- * relay_close_buf - close a channel buffer
- * @buf: channel buffer
- *
- * Marks the buffer finalized and restores the default callbacks.
- * The channel buffer and channel buffer data structure are then freed
- * automatically when the last reference is given up.
- */
-static void relay_close_buf(struct rchan_buf *buf)
-{
- del_timer_sync(&buf->timer);
- kref_put(&buf->kref, relay_remove_buf);
-}
-
-static void setup_callbacks(struct rchan *chan,
- struct rchan_callbacks *cb)
-{
- if (!cb) {
- chan->cb = &default_channel_callbacks;
- return;
- }
-
- if (!cb->create_buf_file)
- cb->create_buf_file = create_buf_file_default_callback;
- if (!cb->remove_buf_file)
- cb->remove_buf_file = remove_buf_file_default_callback;
- chan->cb = cb;
-}
-
-/**
- * relay_hotcpu_callback - CPU hotplug callback
- * @nb: notifier block
- * @action: hotplug action to take
- * @hcpu: CPU number
- *
- * Returns the success/failure of the operation. (%NOTIFY_OK, %NOTIFY_BAD)
- */
-//ust// static int __cpuinit relay_hotcpu_callback(struct notifier_block *nb,
-//ust// unsigned long action,
-//ust// void *hcpu)
-//ust// {
-//ust// unsigned int hotcpu = (unsigned long)hcpu;
-//ust// struct rchan *chan;
-//ust//
-//ust// switch (action) {
-//ust// case CPU_UP_PREPARE:
-//ust// case CPU_UP_PREPARE_FROZEN:
-//ust// mutex_lock(&relay_channels_mutex);
-//ust// list_for_each_entry(chan, &relay_channels, list) {
-//ust// if (chan->buf[hotcpu])
-//ust// continue;
-//ust// chan->buf[hotcpu] = relay_open_buf(chan, hotcpu);
-//ust// if (!chan->buf[hotcpu]) {
-//ust// printk(KERN_ERR
-//ust// "relay_hotcpu_callback: cpu %d buffer "
-//ust// "creation failed\n", hotcpu);
-//ust// mutex_unlock(&relay_channels_mutex);
-//ust// return NOTIFY_BAD;
-//ust// }
-//ust// }
-//ust// mutex_unlock(&relay_channels_mutex);
-//ust// break;
-//ust// case CPU_DEAD:
-//ust// case CPU_DEAD_FROZEN:
-//ust// /* No need to flush the cpu : will be flushed upon
-//ust// * final relay_flush() call. */
-//ust// break;
-//ust// }
-//ust// return NOTIFY_OK;
-//ust// }
-
-/**
- * ltt_relay_open - create a new relay channel
- * @base_filename: base name of files to create
- * @parent: dentry of parent directory, %NULL for root directory
- * @subbuf_size: size of sub-buffers
- * @n_subbufs: number of sub-buffers
- * @cb: client callback functions
- * @private_data: user-defined data
- *
- * Returns channel pointer if successful, %NULL otherwise.
- *
- * Creates a channel buffer for each cpu using the sizes and
- * attributes specified. The created channel buffer files
- * will be named base_filename0...base_filenameN-1. File
- * permissions will be %S_IRUSR.
- */
-struct rchan *ltt_relay_open(const char *base_filename,
- struct dentry *parent,
- size_t subbuf_size,
- size_t n_subbufs,
- struct rchan_callbacks *cb,
- void *private_data)
-{
- unsigned int i;
- struct rchan *chan;
- if (!base_filename)
- return NULL;
-
- if (!(subbuf_size && n_subbufs))
- return NULL;
-
- chan = kzalloc(sizeof(struct rchan), GFP_KERNEL);
- if (!chan)
- return NULL;
-
- chan->version = LTT_RELAY_CHANNEL_VERSION;
- chan->n_subbufs = n_subbufs;
- chan->subbuf_size = subbuf_size;
- chan->subbuf_size_order = get_count_order(subbuf_size);
- chan->alloc_size = FIX_SIZE(subbuf_size * n_subbufs);
- chan->parent = parent;
- chan->private_data = private_data;
- strlcpy(chan->base_filename, base_filename, NAME_MAX);
- setup_callbacks(chan, cb);
- kref_init(&chan->kref);
-
- mutex_lock(&relay_channels_mutex);
- for_each_online_cpu(i) {
- chan->buf[i] = relay_open_buf(chan, i);
- if (!chan->buf[i])
- goto free_bufs;
- }
- list_add(&chan->list, &relay_channels);
- mutex_unlock(&relay_channels_mutex);
-
- return chan;
-
-free_bufs:
- for_each_possible_cpu(i) {
- if (!chan->buf[i])
- break;
- relay_close_buf(chan->buf[i]);
- }
-
- kref_put(&chan->kref, relay_destroy_channel);
- mutex_unlock(&relay_channels_mutex);
- return NULL;
-}
-EXPORT_SYMBOL_GPL(ltt_relay_open);
-
-/**
- * ltt_relay_close - close the channel
- * @chan: the channel
- *
- * Closes all channel buffers and frees the channel.
- */
-void ltt_relay_close(struct rchan *chan)
-{
- unsigned int i;
-
- if (!chan)
- return;
-
- mutex_lock(&relay_channels_mutex);
- for_each_possible_cpu(i)
- if (chan->buf[i])
- relay_close_buf(chan->buf[i]);
-
- list_del(&chan->list);
- kref_put(&chan->kref, relay_destroy_channel);
- mutex_unlock(&relay_channels_mutex);
-}
-EXPORT_SYMBOL_GPL(ltt_relay_close);
-
-/*
- * Start iteration at the previous element. Skip the real list head.
- */
-struct buf_page *ltt_relay_find_prev_page(struct rchan_buf *buf,
- struct buf_page *page, size_t offset, ssize_t diff_offset)
-{
- struct buf_page *iter;
- size_t orig_iter_off;
- unsigned int i = 0;
-
- orig_iter_off = page->offset;
- list_for_each_entry_reverse(iter, &page->list, list) {
- /*
- * Skip the real list head.
- */
- if (&iter->list == &buf->pages)
- continue;
- i++;
- if (offset >= iter->offset
- && offset < iter->offset + PAGE_SIZE) {
-#ifdef CONFIG_LTT_RELAY_CHECK_RANDOM_ACCESS
- if (i > 1) {
- printk(KERN_WARNING
- "Backward random access detected in "
- "ltt_relay. Iterations %u, "
- "offset %zu, orig iter->off %zu, "
- "iter->off %zu diff_offset %zd.\n", i,
- offset, orig_iter_off, iter->offset,
- diff_offset);
- WARN_ON(1);
- }
-#endif
- return iter;
- }
- }
- WARN_ON(1);
- return NULL;
-}
-EXPORT_SYMBOL_GPL(ltt_relay_find_prev_page);
-
-/*
- * Start iteration at the next element. Skip the real list head.
- */
-struct buf_page *ltt_relay_find_next_page(struct rchan_buf *buf,
- struct buf_page *page, size_t offset, ssize_t diff_offset)
-{
- struct buf_page *iter;
- unsigned int i = 0;
- size_t orig_iter_off;
-
- orig_iter_off = page->offset;
- list_for_each_entry(iter, &page->list, list) {
- /*
- * Skip the real list head.
- */
- if (&iter->list == &buf->pages)
- continue;
- i++;
- if (offset >= iter->offset
- && offset < iter->offset + PAGE_SIZE) {
-#ifdef CONFIG_LTT_RELAY_CHECK_RANDOM_ACCESS
- if (i > 1) {
- printk(KERN_WARNING
- "Forward random access detected in "
- "ltt_relay. Iterations %u, "
- "offset %zu, orig iter->off %zu, "
- "iter->off %zu diff_offset %zd.\n", i,
- offset, orig_iter_off, iter->offset,
- diff_offset);
- WARN_ON(1);
- }
-#endif
- return iter;
- }
- }
- WARN_ON(1);
- return NULL;
-}
-EXPORT_SYMBOL_GPL(ltt_relay_find_next_page);
-
-/**
- * ltt_relay_write - write data to a ltt_relay buffer.
- * @buf : buffer
- * @offset : offset within the buffer
- * @src : source address
- * @len : length to write
- * @page : cached buffer page
- * @pagecpy : page size copied so far
- */
-void _ltt_relay_write(struct rchan_buf *buf, size_t offset,
- const void *src, size_t len, struct buf_page *page, ssize_t pagecpy)
-{
- do {
- len -= pagecpy;
- src += pagecpy;
- offset += pagecpy;
- /*
- * Underlying layer should never ask for writes across
- * subbuffers.
- */
- WARN_ON(offset >= buf->chan->alloc_size);
-
- page = ltt_relay_cache_page(buf, &buf->wpage, page, offset);
- pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK));
- ltt_relay_do_copy(page_address(page->page)
- + (offset & ~PAGE_MASK), src, pagecpy);
- } while (unlikely(len != pagecpy));
-}
-EXPORT_SYMBOL_GPL(_ltt_relay_write);
-
-/**
- * ltt_relay_read - read data from ltt_relay_buffer.
- * @buf : buffer
- * @offset : offset within the buffer
- * @dest : destination address
- * @len : length to write
- */
-int ltt_relay_read(struct rchan_buf *buf, size_t offset,
- void *dest, size_t len)
-{
- struct buf_page *page;
- ssize_t pagecpy, orig_len;
-
- orig_len = len;
- offset &= buf->chan->alloc_size - 1;
- page = buf->rpage;
- if (unlikely(!len))
- return 0;
- for (;;) {
- page = ltt_relay_cache_page(buf, &buf->rpage, page, offset);
- pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK));
- memcpy(dest, page_address(page->page) + (offset & ~PAGE_MASK),
- pagecpy);
- len -= pagecpy;
- if (likely(!len))
- break;
- dest += pagecpy;
- offset += pagecpy;
- /*
- * Underlying layer should never ask for reads across
- * subbuffers.
- */
- WARN_ON(offset >= buf->chan->alloc_size);
- }
- return orig_len;
-}
-EXPORT_SYMBOL_GPL(ltt_relay_read);
-
-/**
- * ltt_relay_read_get_page - Get a whole page to read from
- * @buf : buffer
- * @offset : offset within the buffer
- */
-//ust// struct buf_page *ltt_relay_read_get_page(struct rchan_buf *buf, size_t offset)
-//ust// {
-//ust// struct buf_page *page;
-
-//ust// offset &= buf->chan->alloc_size - 1;
-//ust// page = buf->rpage;
-//ust// page = ltt_relay_cache_page(buf, &buf->rpage, page, offset);
-//ust// return page;
-//ust// }
-//ust// EXPORT_SYMBOL_GPL(ltt_relay_read_get_page);
-
-/**
- * ltt_relay_offset_address - get address of a location within the buffer
- * @buf : buffer
- * @offset : offset within the buffer.
- *
- * Return the address where a given offset is located.
- * Should be used to get the current subbuffer header pointer. Given we know
- * it's never on a page boundary, it's safe to write directly to this address,
- * as long as the write is never bigger than a page size.
- */
-void *ltt_relay_offset_address(struct rchan_buf *buf, size_t offset)
-{
- struct buf_page *page;
- unsigned int odd;
-
- offset &= buf->chan->alloc_size - 1;
- odd = !!(offset & buf->chan->subbuf_size);
- page = buf->hpage[odd];
- if (offset < page->offset || offset >= page->offset + PAGE_SIZE)
- buf->hpage[odd] = page = buf->wpage;
- page = ltt_relay_cache_page(buf, &buf->hpage[odd], page, offset);
- return page_address(page->page) + (offset & ~PAGE_MASK);
-}
-//ust// EXPORT_SYMBOL_GPL(ltt_relay_offset_address);
-
-/**
- * relay_file_open - open file op for relay files
- * @inode: the inode
- * @filp: the file
- *
- * Increments the channel buffer refcount.
- */
-//ust// static int relay_file_open(struct inode *inode, struct file *filp)
-//ust// {
-//ust// struct rchan_buf *buf = inode->i_private;
-//ust// kref_get(&buf->kref);
-//ust// filp->private_data = buf;
-//ust//
-//ust// return nonseekable_open(inode, filp);
-//ust// }
-
-/**
- * relay_file_release - release file op for relay files
- * @inode: the inode
- * @filp: the file
- *
- * Decrements the channel refcount, as the filesystem is
- * no longer using it.
- */
-static int relay_file_release(struct inode *inode, struct file *filp)
-{
- struct rchan_buf *buf = filp->private_data;
- kref_put(&buf->kref, relay_remove_buf);
-
- return 0;
-}
-
-//ust// const struct file_operations ltt_relay_file_operations = {
-//ust// .open = relay_file_open,
-//ust// .release = relay_file_release,
-//ust// };
-//ust// EXPORT_SYMBOL_GPL(ltt_relay_file_operations);
-
-//ust// static __init int relay_init(void)
-//ust// {
-//ust// hotcpu_notifier(relay_hotcpu_callback, 5);
-//ust// return 0;
-//ust// }
-
-//ust// module_init(relay_init);
+/*
+ * Public API and common code for kernel->userspace relay file support.
+ *
+ * Copyright (C) 2002-2005 - Tom Zanussi (zanussi@us.ibm.com), IBM Corp
+ * Copyright (C) 1999-2005 - Karim Yaghmour (karim@opersys.com)
+ * Copyright (C) 2008 - Mathieu Desnoyers (mathieu.desnoyers@polymtl.ca)
+ *
+ * Moved to kernel/relay.c by Paul Mundt, 2006.
+ * November 2006 - CPU hotplug support by Mathieu Desnoyers
+ * (mathieu.desnoyers@polymtl.ca)
+ *
+ * This file is released under the GPL.
+ */
+//ust// #include <linux/errno.h>
+//ust// #include <linux/stddef.h>
+//ust// #include <linux/slab.h>
+//ust// #include <linux/module.h>
+//ust// #include <linux/string.h>
+//ust// #include <linux/ltt-relay.h>
+//ust// #include <linux/vmalloc.h>
+//ust// #include <linux/mm.h>
+//ust// #include <linux/cpu.h>
+//ust// #include <linux/splice.h>
+//ust// #include <linux/bitops.h>
+#include <sys/mman.h>
+#include "kernelcompat.h"
+#include "list.h"
+#include "relay.h"
+#include "channels.h"
+#include "kref.h"
+#include "tracer.h"
+#include "tracercore.h"
+#include "usterr.h"
+
+/* list of open channels, for cpu hotplug */
+static DEFINE_MUTEX(relay_channels_mutex);
+static LIST_HEAD(relay_channels);
+
+/**
+ * relay_alloc_buf - allocate a channel buffer
+ * @buf: the buffer struct
+ * @size: total size of the buffer
+ */
+//ust// static int relay_alloc_buf(struct rchan_buf *buf, size_t *size)
+//ust//{
+//ust// unsigned int i, n_pages;
+//ust// struct buf_page *buf_page, *n;
+//ust//
+//ust// *size = PAGE_ALIGN(*size);
+//ust// n_pages = *size >> PAGE_SHIFT;
+//ust//
+//ust// INIT_LIST_HEAD(&buf->pages);
+//ust//
+//ust// for (i = 0; i < n_pages; i++) {
+//ust// buf_page = kmalloc_node(sizeof(*buf_page), GFP_KERNEL,
+//ust// cpu_to_node(buf->cpu));
+//ust// if (unlikely(!buf_page))
+//ust// goto depopulate;
+//ust// buf_page->page = alloc_pages_node(cpu_to_node(buf->cpu),
+//ust// GFP_KERNEL | __GFP_ZERO, 0);
+//ust// if (unlikely(!buf_page->page)) {
+//ust// kfree(buf_page);
+//ust// goto depopulate;
+//ust// }
+//ust// list_add_tail(&buf_page->list, &buf->pages);
+//ust// buf_page->offset = (size_t)i << PAGE_SHIFT;
+//ust// buf_page->buf = buf;
+//ust// set_page_private(buf_page->page, (unsigned long)buf_page);
+//ust// if (i == 0) {
+//ust// buf->wpage = buf_page;
+//ust// buf->hpage[0] = buf_page;
+//ust// buf->hpage[1] = buf_page;
+//ust// buf->rpage = buf_page;
+//ust// }
+//ust// }
+//ust// buf->page_count = n_pages;
+//ust// return 0;
+//ust//
+//ust//depopulate:
+//ust// list_for_each_entry_safe(buf_page, n, &buf->pages, list) {
+//ust// list_del_init(&buf_page->list);
+//ust// __free_page(buf_page->page);
+//ust// kfree(buf_page);
+//ust// }
+//ust// return -ENOMEM;
+//ust//}
+
+static int relay_alloc_buf(struct rchan_buf *buf, size_t *size)
+{
+ unsigned int n_pages;
+ struct buf_page *buf_page, *n;
+
+ void *result;
+
+ *size = PAGE_ALIGN(*size);
+
+ /* Maybe do read-ahead */
+ result = mmap(NULL, *size, PROT_READ | PROT_WRITE, MAP_ANONYMOUS, -1, 0);
+ if(result == MAP_FAILED) {
+ PERROR("mmap");
+ return -1;
+ }
+
+ buf->buf_data = result;
+ buf->buf_size = *size;
+
+ return 0;
+}
+
+/**
+ * relay_create_buf - allocate and initialize a channel buffer
+ * @chan: the relay channel
+ * @cpu: cpu the buffer belongs to
+ *
+ * Returns channel buffer if successful, %NULL otherwise.
+ */
+static struct rchan_buf *relay_create_buf(struct rchan *chan)
+{
+ int ret;
+ struct rchan_buf *buf = kzalloc(sizeof(struct rchan_buf), GFP_KERNEL);
+ if (!buf)
+ return NULL;
+
+// buf->cpu = cpu;
+ ret = relay_alloc_buf(buf, &chan->alloc_size);
+ if (ret)
+ goto free_buf;
+
+ buf->chan = chan;
+ kref_get(&buf->chan->kref);
+ return buf;
+
+free_buf:
+ kfree(buf);
+ return NULL;
+}
+
+/**
+ * relay_destroy_channel - free the channel struct
+ * @kref: target kernel reference that contains the relay channel
+ *
+ * Should only be called from kref_put().
+ */
+static void relay_destroy_channel(struct kref *kref)
+{
+ struct rchan *chan = container_of(kref, struct rchan, kref);
+ kfree(chan);
+}
+
+/**
+ * relay_destroy_buf - destroy an rchan_buf struct and associated buffer
+ * @buf: the buffer struct
+ */
+static void relay_destroy_buf(struct rchan_buf *buf)
+{
+ struct rchan *chan = buf->chan;
+ struct buf_page *buf_page, *n;
+ int result;
+
+ result = munmap(buf->buf_data, buf->buf_size);
+ if(result == -1) {
+ PERROR("munmap");
+ }
+
+//ust// chan->buf[buf->cpu] = NULL;
+ kfree(buf);
+ kref_put(&chan->kref, relay_destroy_channel);
+}
+
+/**
+ * relay_remove_buf - remove a channel buffer
+ * @kref: target kernel reference that contains the relay buffer
+ *
+ * Removes the file from the fileystem, which also frees the
+ * rchan_buf_struct and the channel buffer. Should only be called from
+ * kref_put().
+ */
+static void relay_remove_buf(struct kref *kref)
+{
+ struct rchan_buf *buf = container_of(kref, struct rchan_buf, kref);
+ buf->chan->cb->remove_buf_file(buf);
+ relay_destroy_buf(buf);
+}
+
+/*
+ * High-level relay kernel API and associated functions.
+ */
+
+/*
+ * rchan_callback implementations defining default channel behavior. Used
+ * in place of corresponding NULL values in client callback struct.
+ */
+
+/*
+ * create_buf_file_create() default callback. Does nothing.
+ */
+static struct dentry *create_buf_file_default_callback(const char *filename,
+ struct dentry *parent,
+ int mode,
+ struct rchan_buf *buf)
+{
+ return NULL;
+}
+
+/*
+ * remove_buf_file() default callback. Does nothing.
+ */
+static int remove_buf_file_default_callback(struct dentry *dentry)
+{
+ return -EINVAL;
+}
+
+/**
+ * wakeup_readers - wake up readers waiting on a channel
+ * @data: contains the channel buffer
+ *
+ * This is the timer function used to defer reader waking.
+ */
+//ust// static void wakeup_readers(unsigned long data)
+//ust// {
+//ust// struct rchan_buf *buf = (struct rchan_buf *)data;
+//ust// wake_up_interruptible(&buf->read_wait);
+//ust// }
+
+/**
+ * __relay_reset - reset a channel buffer
+ * @buf: the channel buffer
+ * @init: 1 if this is a first-time initialization
+ *
+ * See relay_reset() for description of effect.
+ */
+static void __relay_reset(struct rchan_buf *buf, unsigned int init)
+{
+ if (init) {
+//ust// init_waitqueue_head(&buf->read_wait);
+ kref_init(&buf->kref);
+//ust// setup_timer(&buf->timer, wakeup_readers, (unsigned long)buf);
+ } else
+//ust// del_timer_sync(&buf->timer);
+
+ buf->finalized = 0;
+}
+
+/*
+ * relay_open_buf - create a new relay channel buffer
+ *
+ * used by relay_open() and CPU hotplug.
+ */
+static struct rchan_buf *relay_open_buf(struct rchan *chan)
+{
+ struct rchan_buf *buf = NULL;
+ struct dentry *dentry;
+//ust// char *tmpname;
+
+//ust// tmpname = kzalloc(NAME_MAX + 1, GFP_KERNEL);
+//ust// if (!tmpname)
+//ust// goto end;
+//ust// snprintf(tmpname, NAME_MAX, "%s%d", chan->base_filename, cpu);
+
+ buf = relay_create_buf(chan);
+ if (!buf)
+ goto free_name;
+
+ __relay_reset(buf, 1);
+
+ /* Create file in fs */
+//ust// dentry = chan->cb->create_buf_file(tmpname, chan->parent, S_IRUSR,
+//ust// buf);
+//ust// if (!dentry)
+//ust// goto free_buf;
+//ust//
+//ust// buf->dentry = dentry;
+
+ goto free_name;
+
+free_buf:
+ relay_destroy_buf(buf);
+ buf = NULL;
+free_name:
+//ust// kfree(tmpname);
+end:
+ return buf;
+}
+
+/**
+ * relay_close_buf - close a channel buffer
+ * @buf: channel buffer
+ *
+ * Marks the buffer finalized and restores the default callbacks.
+ * The channel buffer and channel buffer data structure are then freed
+ * automatically when the last reference is given up.
+ */
+static void relay_close_buf(struct rchan_buf *buf)
+{
+//ust// del_timer_sync(&buf->timer);
+ kref_put(&buf->kref, relay_remove_buf);
+}
+
+//ust// static void setup_callbacks(struct rchan *chan,
+//ust// struct rchan_callbacks *cb)
+//ust// {
+//ust// if (!cb) {
+//ust// chan->cb = &default_channel_callbacks;
+//ust// return;
+//ust// }
+//ust//
+//ust// if (!cb->create_buf_file)
+//ust// cb->create_buf_file = create_buf_file_default_callback;
+//ust// if (!cb->remove_buf_file)
+//ust// cb->remove_buf_file = remove_buf_file_default_callback;
+//ust// chan->cb = cb;
+//ust// }
+
+/**
+ * relay_hotcpu_callback - CPU hotplug callback
+ * @nb: notifier block
+ * @action: hotplug action to take
+ * @hcpu: CPU number
+ *
+ * Returns the success/failure of the operation. (%NOTIFY_OK, %NOTIFY_BAD)
+ */
+//ust// static int __cpuinit relay_hotcpu_callback(struct notifier_block *nb,
+//ust// unsigned long action,
+//ust// void *hcpu)
+//ust// {
+//ust// unsigned int hotcpu = (unsigned long)hcpu;
+//ust// struct rchan *chan;
+//ust//
+//ust// switch (action) {
+//ust// case CPU_UP_PREPARE:
+//ust// case CPU_UP_PREPARE_FROZEN:
+//ust// mutex_lock(&relay_channels_mutex);
+//ust// list_for_each_entry(chan, &relay_channels, list) {
+//ust// if (chan->buf[hotcpu])
+//ust// continue;
+//ust// chan->buf[hotcpu] = relay_open_buf(chan, hotcpu);
+//ust// if (!chan->buf[hotcpu]) {
+//ust// printk(KERN_ERR
+//ust// "relay_hotcpu_callback: cpu %d buffer "
+//ust// "creation failed\n", hotcpu);
+//ust// mutex_unlock(&relay_channels_mutex);
+//ust// return NOTIFY_BAD;
+//ust// }
+//ust// }
+//ust// mutex_unlock(&relay_channels_mutex);
+//ust// break;
+//ust// case CPU_DEAD:
+//ust// case CPU_DEAD_FROZEN:
+//ust// /* No need to flush the cpu : will be flushed upon
+//ust// * final relay_flush() call. */
+//ust// break;
+//ust// }
+//ust// return NOTIFY_OK;
+//ust// }
+
+/**
+ * ltt_relay_open - create a new relay channel
+ * @base_filename: base name of files to create
+ * @parent: dentry of parent directory, %NULL for root directory
+ * @subbuf_size: size of sub-buffers
+ * @n_subbufs: number of sub-buffers
+ * @cb: client callback functions
+ * @private_data: user-defined data
+ *
+ * Returns channel pointer if successful, %NULL otherwise.
+ *
+ * Creates a channel buffer for each cpu using the sizes and
+ * attributes specified. The created channel buffer files
+ * will be named base_filename0...base_filenameN-1. File
+ * permissions will be %S_IRUSR.
+ */
+struct rchan *ltt_relay_open(const char *base_filename,
+ struct dentry *parent,
+ size_t subbuf_size,
+ size_t n_subbufs,
+ void *private_data)
+{
+ unsigned int i;
+ struct rchan *chan;
+//ust// if (!base_filename)
+//ust// return NULL;
+
+ if (!(subbuf_size && n_subbufs))
+ return NULL;
+
+ chan = kzalloc(sizeof(struct rchan), GFP_KERNEL);
+ if (!chan)
+ return NULL;
+
+ chan->version = LTT_RELAY_CHANNEL_VERSION;
+ chan->n_subbufs = n_subbufs;
+ chan->subbuf_size = subbuf_size;
+ chan->subbuf_size_order = get_count_order(subbuf_size);
+ chan->alloc_size = FIX_SIZE(subbuf_size * n_subbufs);
+ chan->parent = parent;
+ chan->private_data = private_data;
+//ust// strlcpy(chan->base_filename, base_filename, NAME_MAX);
+//ust// setup_callbacks(chan, cb);
+ kref_init(&chan->kref);
+
+ mutex_lock(&relay_channels_mutex);
+//ust// for_each_online_cpu(i) {
+ chan->buf = relay_open_buf(chan);
+ if (!chan->buf)
+ goto error;
+//ust// }
+ list_add(&chan->list, &relay_channels);
+ mutex_unlock(&relay_channels_mutex);
+
+ return chan;
+
+//ust//free_bufs:
+//ust// for_each_possible_cpu(i) {
+//ust// if (!chan->buf[i])
+//ust// break;
+//ust// relay_close_buf(chan->buf[i]);
+//ust// }
+
+ error:
+ kref_put(&chan->kref, relay_destroy_channel);
+ mutex_unlock(&relay_channels_mutex);
+ return NULL;
+}
+//ust// EXPORT_SYMBOL_GPL(ltt_relay_open);
+
+/**
+ * ltt_relay_close - close the channel
+ * @chan: the channel
+ *
+ * Closes all channel buffers and frees the channel.
+ */
+void ltt_relay_close(struct rchan *chan)
+{
+ unsigned int i;
+
+ if (!chan)
+ return;
+
+ mutex_lock(&relay_channels_mutex);
+//ust// for_each_possible_cpu(i)
+ if (chan->buf)
+ relay_close_buf(chan->buf);
+
+ list_del(&chan->list);
+ kref_put(&chan->kref, relay_destroy_channel);
+ mutex_unlock(&relay_channels_mutex);
+}
+//ust// EXPORT_SYMBOL_GPL(ltt_relay_close);
+
+/*
+ * Start iteration at the previous element. Skip the real list head.
+ */
+//ust// struct buf_page *ltt_relay_find_prev_page(struct rchan_buf *buf,
+//ust// struct buf_page *page, size_t offset, ssize_t diff_offset)
+//ust// {
+//ust// struct buf_page *iter;
+//ust// size_t orig_iter_off;
+//ust// unsigned int i = 0;
+//ust//
+//ust// orig_iter_off = page->offset;
+//ust// list_for_each_entry_reverse(iter, &page->list, list) {
+//ust// /*
+//ust// * Skip the real list head.
+//ust// */
+//ust// if (&iter->list == &buf->pages)
+//ust// continue;
+//ust// i++;
+//ust// if (offset >= iter->offset
+//ust// && offset < iter->offset + PAGE_SIZE) {
+//ust// #ifdef CONFIG_LTT_RELAY_CHECK_RANDOM_ACCESS
+//ust// if (i > 1) {
+//ust// printk(KERN_WARNING
+//ust// "Backward random access detected in "
+//ust// "ltt_relay. Iterations %u, "
+//ust// "offset %zu, orig iter->off %zu, "
+//ust// "iter->off %zu diff_offset %zd.\n", i,
+//ust// offset, orig_iter_off, iter->offset,
+//ust// diff_offset);
+//ust// WARN_ON(1);
+//ust// }
+//ust// #endif
+//ust// return iter;
+//ust// }
+//ust// }
+//ust// WARN_ON(1);
+//ust// return NULL;
+//ust// }
+//ust// EXPORT_SYMBOL_GPL(ltt_relay_find_prev_page);
+
+/*
+ * Start iteration at the next element. Skip the real list head.
+ */
+//ust// struct buf_page *ltt_relay_find_next_page(struct rchan_buf *buf,
+//ust// struct buf_page *page, size_t offset, ssize_t diff_offset)
+//ust// {
+//ust// struct buf_page *iter;
+//ust// unsigned int i = 0;
+//ust// size_t orig_iter_off;
+//ust//
+//ust// orig_iter_off = page->offset;
+//ust// list_for_each_entry(iter, &page->list, list) {
+//ust// /*
+//ust// * Skip the real list head.
+//ust// */
+//ust// if (&iter->list == &buf->pages)
+//ust// continue;
+//ust// i++;
+//ust// if (offset >= iter->offset
+//ust// && offset < iter->offset + PAGE_SIZE) {
+//ust// #ifdef CONFIG_LTT_RELAY_CHECK_RANDOM_ACCESS
+//ust// if (i > 1) {
+//ust// printk(KERN_WARNING
+//ust// "Forward random access detected in "
+//ust// "ltt_relay. Iterations %u, "
+//ust// "offset %zu, orig iter->off %zu, "
+//ust// "iter->off %zu diff_offset %zd.\n", i,
+//ust// offset, orig_iter_off, iter->offset,
+//ust// diff_offset);
+//ust// WARN_ON(1);
+//ust// }
+//ust// #endif
+//ust// return iter;
+//ust// }
+//ust// }
+//ust// WARN_ON(1);
+//ust// return NULL;
+//ust// }
+//ust// EXPORT_SYMBOL_GPL(ltt_relay_find_next_page);
+
+/**
+ * ltt_relay_write - write data to a ltt_relay buffer.
+ * @buf : buffer
+ * @offset : offset within the buffer
+ * @src : source address
+ * @len : length to write
+ * @page : cached buffer page
+ * @pagecpy : page size copied so far
+ */
+void _ltt_relay_write(struct rchan_buf *buf, size_t offset,
+ const void *src, size_t len, ssize_t cpy)
+{
+ do {
+ len -= cpy;
+ src += cpy;
+ offset += cpy;
+ /*
+ * Underlying layer should never ask for writes across
+ * subbuffers.
+ */
+ WARN_ON(offset >= buf->buf_size);
+
+ cpy = min_t(size_t, len, buf->buf_size - offset);
+ ltt_relay_do_copy(buf->buf_data + offset, src, cpy);
+ } while (unlikely(len != cpy));
+}
+//ust// EXPORT_SYMBOL_GPL(_ltt_relay_write);
+
+/**
+ * ltt_relay_read - read data from ltt_relay_buffer.
+ * @buf : buffer
+ * @offset : offset within the buffer
+ * @dest : destination address
+ * @len : length to write
+ */
+//ust// int ltt_relay_read(struct rchan_buf *buf, size_t offset,
+//ust// void *dest, size_t len)
+//ust// {
+//ust// struct buf_page *page;
+//ust// ssize_t pagecpy, orig_len;
+//ust//
+//ust// orig_len = len;
+//ust// offset &= buf->chan->alloc_size - 1;
+//ust// page = buf->rpage;
+//ust// if (unlikely(!len))
+//ust// return 0;
+//ust// for (;;) {
+//ust// page = ltt_relay_cache_page(buf, &buf->rpage, page, offset);
+//ust// pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK));
+//ust// memcpy(dest, page_address(page->page) + (offset & ~PAGE_MASK),
+//ust// pagecpy);
+//ust// len -= pagecpy;
+//ust// if (likely(!len))
+//ust// break;
+//ust// dest += pagecpy;
+//ust// offset += pagecpy;
+//ust// /*
+//ust// * Underlying layer should never ask for reads across
+//ust// * subbuffers.
+//ust// */
+//ust// WARN_ON(offset >= buf->chan->alloc_size);
+//ust// }
+//ust// return orig_len;
+//ust// }
+//ust// EXPORT_SYMBOL_GPL(ltt_relay_read);
+
+/**
+ * ltt_relay_read_get_page - Get a whole page to read from
+ * @buf : buffer
+ * @offset : offset within the buffer
+ */
+//ust// struct buf_page *ltt_relay_read_get_page(struct rchan_buf *buf, size_t offset)
+//ust// {
+//ust// struct buf_page *page;
+
+//ust// offset &= buf->chan->alloc_size - 1;
+//ust// page = buf->rpage;
+//ust// page = ltt_relay_cache_page(buf, &buf->rpage, page, offset);
+//ust// return page;
+//ust// }
+//ust// EXPORT_SYMBOL_GPL(ltt_relay_read_get_page);
+
+/**
+ * ltt_relay_offset_address - get address of a location within the buffer
+ * @buf : buffer
+ * @offset : offset within the buffer.
+ *
+ * Return the address where a given offset is located.
+ * Should be used to get the current subbuffer header pointer. Given we know
+ * it's never on a page boundary, it's safe to write directly to this address,
+ * as long as the write is never bigger than a page size.
+ */
+void *ltt_relay_offset_address(struct rchan_buf *buf, size_t offset)
+{
+//ust// struct buf_page *page;
+//ust// unsigned int odd;
+//ust//
+//ust// offset &= buf->chan->alloc_size - 1;
+//ust// odd = !!(offset & buf->chan->subbuf_size);
+//ust// page = buf->hpage[odd];
+//ust// if (offset < page->offset || offset >= page->offset + PAGE_SIZE)
+//ust// buf->hpage[odd] = page = buf->wpage;
+//ust// page = ltt_relay_cache_page(buf, &buf->hpage[odd], page, offset);
+//ust// return page_address(page->page) + (offset & ~PAGE_MASK);
+ return NULL;
+}
+//ust// EXPORT_SYMBOL_GPL(ltt_relay_offset_address);
+
+/**
+ * relay_file_open - open file op for relay files
+ * @inode: the inode
+ * @filp: the file
+ *
+ * Increments the channel buffer refcount.
+ */
+//ust// static int relay_file_open(struct inode *inode, struct file *filp)
+//ust// {
+//ust// struct rchan_buf *buf = inode->i_private;
+//ust// kref_get(&buf->kref);
+//ust// filp->private_data = buf;
+//ust//
+//ust// return nonseekable_open(inode, filp);
+//ust// }
+
+/**
+ * relay_file_release - release file op for relay files
+ * @inode: the inode
+ * @filp: the file
+ *
+ * Decrements the channel refcount, as the filesystem is
+ * no longer using it.
+ */
+//ust// static int relay_file_release(struct inode *inode, struct file *filp)
+//ust// {
+//ust// struct rchan_buf *buf = filp->private_data;
+//ust// kref_put(&buf->kref, relay_remove_buf);
+//ust//
+//ust// return 0;
+//ust// }
+
+//ust// const struct file_operations ltt_relay_file_operations = {
+//ust// .open = relay_file_open,
+//ust// .release = relay_file_release,
+//ust// };
+//ust// EXPORT_SYMBOL_GPL(ltt_relay_file_operations);
+
+//ust// static __init int relay_init(void)
+//ust// {
+//ust// hotcpu_notifier(relay_hotcpu_callback, 5);
+//ust// return 0;
+//ust// }
+
+//ust// module_init(relay_init);
/*
* ltt/ltt-relay.c
*
* }
*/
-#include <linux/time.h>
-#include <linux/ltt-tracer.h>
-#include <linux/ltt-relay.h>
-#include <linux/module.h>
-#include <linux/string.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-#include <linux/rcupdate.h>
-#include <linux/sched.h>
-#include <linux/bitops.h>
-#include <linux/fs.h>
-#include <linux/smp_lock.h>
-#include <linux/debugfs.h>
-#include <linux/stat.h>
-#include <linux/cpu.h>
-#include <linux/pipe_fs_i.h>
-#include <linux/splice.h>
-#include <asm/atomic.h>
-#include <asm/local.h>
+//ust// #include <linux/time.h>
+//ust// #include <linux/ltt-tracer.h>
+//ust// #include <linux/ltt-relay.h>
+//ust// #include <linux/module.h>
+//ust// #include <linux/string.h>
+//ust// #include <linux/slab.h>
+//ust// #include <linux/init.h>
+//ust// #include <linux/rcupdate.h>
+//ust// #include <linux/sched.h>
+//ust// #include <linux/bitops.h>
+//ust// #include <linux/fs.h>
+//ust// #include <linux/smp_lock.h>
+//ust// #include <linux/debugfs.h>
+//ust// #include <linux/stat.h>
+//ust// #include <linux/cpu.h>
+//ust// #include <linux/pipe_fs_i.h>
+//ust// #include <linux/splice.h>
+//ust// #include <asm/atomic.h>
+//ust// #include <asm/local.h>
#if 0
#define printk_dbg(fmt, args...) printk(fmt, args)
* for userspace tracing blocking mode
* synchronization with reader.
*/
- wait_queue_head_t write_wait; /*
- * Wait queue for blocking user space
- * writers
- */
+//ust// wait_queue_head_t write_wait; /*
+//ust// * Wait queue for blocking user space
+//ust// * writers
+//ust// */
atomic_t wakeup_readers; /* Boolean : wakeup readers waiting ? */
} ____cacheline_aligned;
static int ltt_relay_create_buffer(struct ltt_trace_struct *trace,
struct ltt_channel_struct *ltt_chan,
struct rchan_buf *buf,
- unsigned int cpu,
unsigned int n_subbufs);
-static void ltt_relay_destroy_buffer(struct ltt_channel_struct *ltt_chan,
- unsigned int cpu);
+static void ltt_relay_destroy_buffer(struct ltt_channel_struct *ltt_chan);
static void ltt_force_switch(struct rchan_buf *buf,
enum force_switch_mode mode);
{
struct ltt_channel_struct *channel =
(struct ltt_channel_struct *)buf->chan->private_data;
- struct ltt_channel_buf_struct *ltt_buf =
- percpu_ptr(channel->buf, buf->cpu);
+ struct ltt_channel_buf_struct *ltt_buf = channel->buf;
struct ltt_subbuffer_header *header =
(struct ltt_subbuffer_header *)
ltt_relay_offset_address(buf,
header->lost_size = SUBBUF_OFFSET((buf->chan->subbuf_size - offset),
buf->chan);
header->cycle_count_end = tsc;
- header->events_lost = local_read(<t_buf->events_lost);
- header->subbuf_corrupt = local_read(<t_buf->corrupted_subbuffers);
+ header->events_lost = ltt_buf->events_lost;
+ header->subbuf_corrupt = ltt_buf->corrupted_subbuffers;
}
static notrace void ltt_deliver(struct rchan_buf *buf, unsigned int subbuf_idx,
{
struct ltt_channel_struct *channel =
(struct ltt_channel_struct *)buf->chan->private_data;
- struct ltt_channel_buf_struct *ltt_buf =
- percpu_ptr(channel->buf, buf->cpu);
+ struct ltt_channel_buf_struct *ltt_buf = channel->buf;
atomic_set(<t_buf->wakeup_readers, 1);
}
//ust// struct dentry *dentry;
ltt_chan = buf->chan->private_data;
- err = ltt_relay_create_buffer(ltt_chan->trace, ltt_chan,
- buf, buf->cpu,
- buf->chan->n_subbufs);
+ err = ltt_relay_create_buffer(ltt_chan->trace, ltt_chan, buf, buf->chan->n_subbufs);
if (err)
return ERR_PTR(err);
//ust// goto error;
//ust// return dentry;
//ust//error:
- ltt_relay_destroy_buffer(ltt_chan, buf->cpu);
+ ltt_relay_destroy_buffer(ltt_chan);
return NULL;
}
-static int ltt_remove_buf_file_callback(struct dentry *dentry)
+static int ltt_remove_buf_file_callback(struct rchan_buf *buf)
{
- struct rchan_buf *buf = dentry->d_inode->i_private;
+//ust// struct rchan_buf *buf = dentry->d_inode->i_private;
struct ltt_channel_struct *ltt_chan = buf->chan->private_data;
//ust// debugfs_remove(dentry);
- ltt_relay_destroy_buffer(ltt_chan, buf->cpu);
+ ltt_relay_destroy_buffer(ltt_chan);
return 0;
}
* This must be done after the trace is removed from the RCU list so that there
* are no stalled writers.
*/
-static void ltt_relay_wake_writers(struct ltt_channel_buf_struct *ltt_buf)
-{
-
- if (waitqueue_active(<t_buf->write_wait))
- wake_up_interruptible(<t_buf->write_wait);
-}
+//ust// static void ltt_relay_wake_writers(struct ltt_channel_buf_struct *ltt_buf)
+//ust// {
+//ust//
+//ust// if (waitqueue_active(<t_buf->write_wait))
+//ust// wake_up_interruptible(<t_buf->write_wait);
+//ust// }
/*
* This function should not be called from NMI interrupt context
unsigned int subbuf_idx,
long offset)
{
- struct ltt_channel_struct *ltt_channel =
- (struct ltt_channel_struct *)buf->chan->private_data;
- struct ltt_channel_buf_struct *ltt_buf =
- percpu_ptr(ltt_channel->buf, buf->cpu);
-
- ltt_relay_wake_writers(ltt_buf);
+//ust// struct ltt_channel_struct *ltt_channel =
+//ust// (struct ltt_channel_struct *)buf->chan->private_data;
+//ust// struct ltt_channel_buf_struct *ltt_buf = ltt_channel->buf;
+//ust//
+//ust// ltt_relay_wake_writers(ltt_buf);
}
/**
* Open implementation. Makes sure only one open instance of a buffer is
* done at a given moment.
*/
-static int ltt_open(struct inode *inode, struct file *file)
-{
- struct rchan_buf *buf = inode->i_private;
- struct ltt_channel_struct *ltt_channel =
- (struct ltt_channel_struct *)buf->chan->private_data;
- struct ltt_channel_buf_struct *ltt_buf =
- percpu_ptr(ltt_channel->buf, buf->cpu);
-
- if (!atomic_long_add_unless(<t_buf->active_readers, 1, 1))
- return -EBUSY;
- return ltt_relay_file_operations.open(inode, file);
-}
+//ust// static int ltt_open(struct inode *inode, struct file *file)
+//ust// {
+//ust// struct rchan_buf *buf = inode->i_private;
+//ust// struct ltt_channel_struct *ltt_channel =
+//ust// (struct ltt_channel_struct *)buf->chan->private_data;
+//ust// struct ltt_channel_buf_struct *ltt_buf =
+//ust// percpu_ptr(ltt_channel->buf, buf->cpu);
+//ust//
+//ust// if (!atomic_long_add_unless(<t_buf->active_readers, 1, 1))
+//ust// return -EBUSY;
+//ust// return ltt_relay_file_operations.open(inode, file);
+//ust// }
/**
* ltt_release - release file op for ltt files
*
* Release implementation.
*/
-static int ltt_release(struct inode *inode, struct file *file)
-{
- struct rchan_buf *buf = inode->i_private;
- struct ltt_channel_struct *ltt_channel =
- (struct ltt_channel_struct *)buf->chan->private_data;
- struct ltt_channel_buf_struct *ltt_buf =
- percpu_ptr(ltt_channel->buf, buf->cpu);
- int ret;
-
- WARN_ON(atomic_long_read(<t_buf->active_readers) != 1);
- atomic_long_dec(<t_buf->active_readers);
- ret = ltt_relay_file_operations.release(inode, file);
- WARN_ON(ret);
- return ret;
-}
+//ust// static int ltt_release(struct inode *inode, struct file *file)
+//ust// {
+//ust// struct rchan_buf *buf = inode->i_private;
+//ust// struct ltt_channel_struct *ltt_channel =
+//ust// (struct ltt_channel_struct *)buf->chan->private_data;
+//ust// struct ltt_channel_buf_struct *ltt_buf =
+//ust// percpu_ptr(ltt_channel->buf, buf->cpu);
+//ust// int ret;
+//ust//
+//ust// WARN_ON(atomic_long_read(<t_buf->active_readers) != 1);
+//ust// atomic_long_dec(<t_buf->active_readers);
+//ust// ret = ltt_relay_file_operations.release(inode, file);
+//ust// WARN_ON(ret);
+//ust// return ret;
+//ust// }
/**
* ltt_poll - file op for ltt files
*
* Poll implementation.
*/
-static unsigned int ltt_poll(struct file *filp, poll_table *wait)
-{
- unsigned int mask = 0;
- struct inode *inode = filp->f_dentry->d_inode;
- struct rchan_buf *buf = inode->i_private;
- struct ltt_channel_struct *ltt_channel =
- (struct ltt_channel_struct *)buf->chan->private_data;
- struct ltt_channel_buf_struct *ltt_buf =
- percpu_ptr(ltt_channel->buf, buf->cpu);
-
- if (filp->f_mode & FMODE_READ) {
- poll_wait_set_exclusive(wait);
- poll_wait(filp, &buf->read_wait, wait);
-
- WARN_ON(atomic_long_read(<t_buf->active_readers) != 1);
- if (SUBBUF_TRUNC(local_read(<t_buf->offset),
- buf->chan)
- - SUBBUF_TRUNC(atomic_long_read(<t_buf->consumed),
- buf->chan)
- == 0) {
- if (buf->finalized)
- return POLLHUP;
- else
- return 0;
- } else {
- struct rchan *rchan =
- ltt_channel->trans_channel_data;
- if (SUBBUF_TRUNC(local_read(<t_buf->offset),
- buf->chan)
- - SUBBUF_TRUNC(atomic_long_read(
- <t_buf->consumed),
- buf->chan)
- >= rchan->alloc_size)
- return POLLPRI | POLLRDBAND;
- else
- return POLLIN | POLLRDNORM;
- }
- }
- return mask;
-}
+//ust// static unsigned int ltt_poll(struct file *filp, poll_table *wait)
+//ust// {
+//ust// unsigned int mask = 0;
+//ust// struct inode *inode = filp->f_dentry->d_inode;
+//ust// struct rchan_buf *buf = inode->i_private;
+//ust// struct ltt_channel_struct *ltt_channel =
+//ust// (struct ltt_channel_struct *)buf->chan->private_data;
+//ust// struct ltt_channel_buf_struct *ltt_buf =
+//ust// percpu_ptr(ltt_channel->buf, buf->cpu);
+//ust//
+//ust// if (filp->f_mode & FMODE_READ) {
+//ust// poll_wait_set_exclusive(wait);
+//ust// poll_wait(filp, &buf->read_wait, wait);
+//ust//
+//ust// WARN_ON(atomic_long_read(<t_buf->active_readers) != 1);
+//ust// if (SUBBUF_TRUNC(local_read(<t_buf->offset),
+//ust// buf->chan)
+//ust// - SUBBUF_TRUNC(atomic_long_read(<t_buf->consumed),
+//ust// buf->chan)
+//ust// == 0) {
+//ust// if (buf->finalized)
+//ust// return POLLHUP;
+//ust// else
+//ust// return 0;
+//ust// } else {
+//ust// struct rchan *rchan =
+//ust// ltt_channel->trans_channel_data;
+//ust// if (SUBBUF_TRUNC(local_read(<t_buf->offset),
+//ust// buf->chan)
+//ust// - SUBBUF_TRUNC(atomic_long_read(
+//ust// <t_buf->consumed),
+//ust// buf->chan)
+//ust// >= rchan->alloc_size)
+//ust// return POLLPRI | POLLRDBAND;
+//ust// else
+//ust// return POLLIN | POLLRDNORM;
+//ust// }
+//ust// }
+//ust// return mask;
+//ust// }
static int ltt_do_get_subbuf(struct rchan_buf *buf, struct ltt_channel_buf_struct *ltt_buf, long *pconsumed_old)
{
+ struct ltt_channel_struct *ltt_channel = (struct ltt_channel_struct *)buf->chan->private_data;
long consumed_old, consumed_idx, commit_count, write_offset;
consumed_old = atomic_long_read(<t_buf->consumed);
consumed_idx = SUBBUF_INDEX(consumed_old, buf->chan);
/*
* subbuf_splice_actor - splice up to one subbuf's worth of data
*/
-static int subbuf_splice_actor(struct file *in,
- loff_t *ppos,
- struct pipe_inode_info *pipe,
- size_t len,
- unsigned int flags)
-{
- struct rchan_buf *buf = in->private_data;
- struct ltt_channel_struct *ltt_channel =
- (struct ltt_channel_struct *)buf->chan->private_data;
- struct ltt_channel_buf_struct *ltt_buf =
- percpu_ptr(ltt_channel->buf, buf->cpu);
- unsigned int poff, subbuf_pages, nr_pages;
- struct page *pages[PIPE_BUFFERS];
- struct partial_page partial[PIPE_BUFFERS];
- struct splice_pipe_desc spd = {
- .pages = pages,
- .nr_pages = 0,
- .partial = partial,
- .flags = flags,
- .ops = <t_relay_pipe_buf_ops,
- .spd_release = ltt_relay_page_release,
- };
- long consumed_old, consumed_idx, roffset;
- unsigned long bytes_avail;
-
- /*
- * Check that a GET_SUBBUF ioctl has been done before.
- */
- WARN_ON(atomic_long_read(<t_buf->active_readers) != 1);
- consumed_old = atomic_long_read(<t_buf->consumed);
- consumed_old += *ppos;
- consumed_idx = SUBBUF_INDEX(consumed_old, buf->chan);
-
- /*
- * Adjust read len, if longer than what is available
- */
- bytes_avail = SUBBUF_TRUNC(local_read(<t_buf->offset), buf->chan)
- - consumed_old;
- WARN_ON(bytes_avail > buf->chan->alloc_size);
- len = min_t(size_t, len, bytes_avail);
- subbuf_pages = bytes_avail >> PAGE_SHIFT;
- nr_pages = min_t(unsigned int, subbuf_pages, PIPE_BUFFERS);
- roffset = consumed_old & PAGE_MASK;
- poff = consumed_old & ~PAGE_MASK;
- printk_dbg(KERN_DEBUG "SPLICE actor len %zu pos %zd write_pos %ld\n",
- len, (ssize_t)*ppos, local_read(<t_buf->offset));
-
- for (; spd.nr_pages < nr_pages; spd.nr_pages++) {
- unsigned int this_len;
- struct buf_page *page;
-
- if (!len)
- break;
- printk_dbg(KERN_DEBUG "SPLICE actor loop len %zu roffset %ld\n",
- len, roffset);
-
- this_len = PAGE_SIZE - poff;
- page = ltt_relay_read_get_page(buf, roffset);
- spd.pages[spd.nr_pages] = page->page;
- spd.partial[spd.nr_pages].offset = poff;
- spd.partial[spd.nr_pages].len = this_len;
-
- poff = 0;
- roffset += PAGE_SIZE;
- len -= this_len;
- }
-
- if (!spd.nr_pages)
- return 0;
-
- return splice_to_pipe(pipe, &spd);
-}
-
-static ssize_t ltt_relay_file_splice_read(struct file *in,
- loff_t *ppos,
- struct pipe_inode_info *pipe,
- size_t len,
- unsigned int flags)
-{
- ssize_t spliced;
- int ret;
-
- ret = 0;
- spliced = 0;
-
- printk_dbg(KERN_DEBUG "SPLICE read len %zu pos %zd\n",
- len, (ssize_t)*ppos);
- while (len && !spliced) {
- ret = subbuf_splice_actor(in, ppos, pipe, len, flags);
- printk_dbg(KERN_DEBUG "SPLICE read loop ret %d\n", ret);
- if (ret < 0)
- break;
- else if (!ret) {
- if (flags & SPLICE_F_NONBLOCK)
- ret = -EAGAIN;
- break;
- }
-
- *ppos += ret;
- if (ret > len)
- len = 0;
- else
- len -= ret;
- spliced += ret;
- }
-
- if (spliced)
- return spliced;
+//ust// static int subbuf_splice_actor(struct file *in,
+//ust// loff_t *ppos,
+//ust// struct pipe_inode_info *pipe,
+//ust// size_t len,
+//ust// unsigned int flags)
+//ust// {
+//ust// struct rchan_buf *buf = in->private_data;
+//ust// struct ltt_channel_struct *ltt_channel =
+//ust// (struct ltt_channel_struct *)buf->chan->private_data;
+//ust// struct ltt_channel_buf_struct *ltt_buf =
+//ust// percpu_ptr(ltt_channel->buf, buf->cpu);
+//ust// unsigned int poff, subbuf_pages, nr_pages;
+//ust// struct page *pages[PIPE_BUFFERS];
+//ust// struct partial_page partial[PIPE_BUFFERS];
+//ust// struct splice_pipe_desc spd = {
+//ust// .pages = pages,
+//ust// .nr_pages = 0,
+//ust// .partial = partial,
+//ust// .flags = flags,
+//ust// .ops = <t_relay_pipe_buf_ops,
+//ust// .spd_release = ltt_relay_page_release,
+//ust// };
+//ust// long consumed_old, consumed_idx, roffset;
+//ust// unsigned long bytes_avail;
+//ust//
+//ust// /*
+//ust// * Check that a GET_SUBBUF ioctl has been done before.
+//ust// */
+//ust// WARN_ON(atomic_long_read(<t_buf->active_readers) != 1);
+//ust// consumed_old = atomic_long_read(<t_buf->consumed);
+//ust// consumed_old += *ppos;
+//ust// consumed_idx = SUBBUF_INDEX(consumed_old, buf->chan);
+//ust//
+//ust// /*
+//ust// * Adjust read len, if longer than what is available
+//ust// */
+//ust// bytes_avail = SUBBUF_TRUNC(local_read(<t_buf->offset), buf->chan)
+//ust// - consumed_old;
+//ust// WARN_ON(bytes_avail > buf->chan->alloc_size);
+//ust// len = min_t(size_t, len, bytes_avail);
+//ust// subbuf_pages = bytes_avail >> PAGE_SHIFT;
+//ust// nr_pages = min_t(unsigned int, subbuf_pages, PIPE_BUFFERS);
+//ust// roffset = consumed_old & PAGE_MASK;
+//ust// poff = consumed_old & ~PAGE_MASK;
+//ust// printk_dbg(KERN_DEBUG "SPLICE actor len %zu pos %zd write_pos %ld\n",
+//ust// len, (ssize_t)*ppos, local_read(<t_buf->offset));
+//ust//
+//ust// for (; spd.nr_pages < nr_pages; spd.nr_pages++) {
+//ust// unsigned int this_len;
+//ust// struct buf_page *page;
+//ust//
+//ust// if (!len)
+//ust// break;
+//ust// printk_dbg(KERN_DEBUG "SPLICE actor loop len %zu roffset %ld\n",
+//ust// len, roffset);
+//ust//
+//ust// this_len = PAGE_SIZE - poff;
+//ust// page = ltt_relay_read_get_page(buf, roffset);
+//ust// spd.pages[spd.nr_pages] = page->page;
+//ust// spd.partial[spd.nr_pages].offset = poff;
+//ust// spd.partial[spd.nr_pages].len = this_len;
+//ust//
+//ust// poff = 0;
+//ust// roffset += PAGE_SIZE;
+//ust// len -= this_len;
+//ust// }
+//ust//
+//ust// if (!spd.nr_pages)
+//ust// return 0;
+//ust//
+//ust// return splice_to_pipe(pipe, &spd);
+//ust// }
- return ret;
-}
+//ust// static ssize_t ltt_relay_file_splice_read(struct file *in,
+//ust// loff_t *ppos,
+//ust// struct pipe_inode_info *pipe,
+//ust// size_t len,
+//ust// unsigned int flags)
+//ust// {
+//ust// ssize_t spliced;
+//ust// int ret;
+//ust//
+//ust// ret = 0;
+//ust// spliced = 0;
+//ust//
+//ust// printk_dbg(KERN_DEBUG "SPLICE read len %zu pos %zd\n",
+//ust// len, (ssize_t)*ppos);
+//ust// while (len && !spliced) {
+//ust// ret = subbuf_splice_actor(in, ppos, pipe, len, flags);
+//ust// printk_dbg(KERN_DEBUG "SPLICE read loop ret %d\n", ret);
+//ust// if (ret < 0)
+//ust// break;
+//ust// else if (!ret) {
+//ust// if (flags & SPLICE_F_NONBLOCK)
+//ust// ret = -EAGAIN;
+//ust// break;
+//ust// }
+//ust//
+//ust// *ppos += ret;
+//ust// if (ret > len)
+//ust// len = 0;
+//ust// else
+//ust// len -= ret;
+//ust// spliced += ret;
+//ust// }
+//ust//
+//ust// if (spliced)
+//ust// return spliced;
+//ust//
+//ust// return ret;
+//ust// }
static void ltt_relay_print_subbuffer_errors(
struct ltt_channel_struct *ltt_chan,
- long cons_off, unsigned int cpu)
+ long cons_off)
{
struct rchan *rchan = ltt_chan->trans_channel_data;
- struct ltt_channel_buf_struct *ltt_buf =
- percpu_ptr(ltt_chan->buf, cpu);
+ struct ltt_channel_buf_struct *ltt_buf = ltt_chan->buf;
long cons_idx, commit_count, write_offset;
cons_idx = SUBBUF_INDEX(cons_off, rchan);
write_offset = local_read(<t_buf->offset);
printk(KERN_WARNING
"LTT : unread channel %s offset is %ld "
- "and cons_off : %ld (cpu %u)\n",
- ltt_chan->channel_name, write_offset, cons_off, cpu);
+ "and cons_off : %ld\n",
+ ltt_chan->channel_name, write_offset, cons_off);
/* Check each sub-buffer for non filled commit count */
if (((commit_count - rchan->subbuf_size) & ltt_chan->commit_count_mask)
- (BUFFER_TRUNC(cons_off, rchan) >> ltt_chan->n_subbufs_order)
}
static void ltt_relay_print_errors(struct ltt_trace_struct *trace,
- struct ltt_channel_struct *ltt_chan, int cpu)
+ struct ltt_channel_struct *ltt_chan)
{
struct rchan *rchan = ltt_chan->trans_channel_data;
- struct ltt_channel_buf_struct *ltt_buf =
- percpu_ptr(ltt_chan->buf, cpu);
+ struct ltt_channel_buf_struct *ltt_buf = ltt_chan->buf;
long cons_off;
for (cons_off = atomic_long_read(<t_buf->consumed);
rchan)
- cons_off) > 0;
cons_off = SUBBUF_ALIGN(cons_off, rchan))
- ltt_relay_print_subbuffer_errors(ltt_chan, cons_off, cpu);
+ ltt_relay_print_subbuffer_errors(ltt_chan, cons_off);
}
-static void ltt_relay_print_buffer_errors(struct ltt_channel_struct *ltt_chan,
- unsigned int cpu)
+static void ltt_relay_print_buffer_errors(struct ltt_channel_struct *ltt_chan)
{
struct ltt_trace_struct *trace = ltt_chan->trace;
- struct ltt_channel_buf_struct *ltt_buf =
- percpu_ptr(ltt_chan->buf, cpu);
+ struct ltt_channel_buf_struct *ltt_buf = ltt_chan->buf;
if (local_read(<t_buf->events_lost))
printk(KERN_ALERT
"LTT : %s : %ld events lost "
- "in %s channel (cpu %u).\n",
+ "in %s channel.\n",
ltt_chan->channel_name,
local_read(<t_buf->events_lost),
- ltt_chan->channel_name, cpu);
+ ltt_chan->channel_name);
if (local_read(<t_buf->corrupted_subbuffers))
printk(KERN_ALERT
"LTT : %s : %ld corrupted subbuffers "
- "in %s channel (cpu %u).\n",
+ "in %s channel.\n",
ltt_chan->channel_name,
local_read(<t_buf->corrupted_subbuffers),
- ltt_chan->channel_name, cpu);
+ ltt_chan->channel_name);
- ltt_relay_print_errors(trace, ltt_chan, cpu);
+ ltt_relay_print_errors(trace, ltt_chan);
}
-//ust// static void ltt_relay_remove_dirs(struct ltt_trace_struct *trace)
-//ust// {
+static void ltt_relay_remove_dirs(struct ltt_trace_struct *trace)
+{
//ust// debugfs_remove(trace->dentry.trace_root);
-//ust// }
+}
static void ltt_relay_release_channel(struct kref *kref)
{
struct ltt_channel_struct *ltt_chan = container_of(kref,
struct ltt_channel_struct, kref);
- percpu_free(ltt_chan->buf);
+ free(ltt_chan->buf);
}
/*
static int ltt_relay_create_buffer(struct ltt_trace_struct *trace,
struct ltt_channel_struct *ltt_chan, struct rchan_buf *buf,
- unsigned int cpu, unsigned int n_subbufs)
+ unsigned int n_subbufs)
{
struct ltt_channel_buf_struct *ltt_buf = ltt_chan->buf;
unsigned int j;
return 0;
}
-static void ltt_relay_destroy_buffer(struct ltt_channel_struct *ltt_chan,
- unsigned int cpu)
+static void ltt_relay_destroy_buffer(struct ltt_channel_struct *ltt_chan)
{
struct ltt_trace_struct *trace = ltt_chan->trace;
- struct ltt_channel_buf_struct *ltt_buf =
- percpu_ptr(ltt_chan->buf, cpu);
+ struct ltt_channel_buf_struct *ltt_buf = ltt_chan->buf;
kref_put(<t_chan->trace->ltt_transport_kref,
ltt_release_transport);
- ltt_relay_print_buffer_errors(ltt_chan, cpu);
+ ltt_relay_print_buffer_errors(ltt_chan);
kfree(ltt_buf->commit_count);
ltt_buf->commit_count = NULL;
kref_put(<t_chan->kref, ltt_relay_release_channel);
kref_put(&trace->kref, ltt_release_trace);
- wake_up_interruptible(&trace->kref_wq);
+//ust// wake_up_interruptible(&trace->kref_wq);
}
/*
ltt_chan->overwrite = overwrite;
ltt_chan->n_subbufs_order = get_count_order(n_subbufs);
ltt_chan->commit_count_mask = (~0UL >> ltt_chan->n_subbufs_order);
- ltt_chan->buf = percpu_alloc_mask(sizeof(struct ltt_channel_buf_struct),
- GFP_KERNEL, cpu_possible_map);
+//ust// ltt_chan->buf = percpu_alloc_mask(sizeof(struct ltt_channel_buf_struct), GFP_KERNEL, cpu_possible_map);
+ ltt_chan->buf = malloc(sizeof(struct ltt_channel_buf_struct));
if (!ltt_chan->buf)
- goto ltt_percpu_alloc_error;
+ goto alloc_error;
ltt_chan->trans_channel_data = ltt_relay_open(tmpname,
dir,
subbuf_size,
n_subbufs,
- &trace->callbacks,
ltt_chan);
tmpname_len = strlen(tmpname);
if (tmpname_len > 0) {
goto end;
relay_open_error:
- percpu_free(ltt_chan->buf);
-ltt_percpu_alloc_error:
+//ust// percpu_free(ltt_chan->buf);
+alloc_error:
err = EPERM;
end:
kfree(tmpname);
return err;
}
-//ust// static int ltt_relay_create_dirs(struct ltt_trace_struct *new_trace)
-//ust// {
-//ust// new_trace->dentry.trace_root = debugfs_create_dir(new_trace->trace_name,
-//ust// get_ltt_root());
-//ust// if (new_trace->dentry.trace_root == NULL) {
-//ust// printk(KERN_ERR "LTT : Trace directory name %s already taken\n",
-//ust// new_trace->trace_name);
-//ust// return EEXIST;
-//ust// }
-//ust//
-//ust// new_trace->callbacks.create_buf_file = ltt_create_buf_file_callback;
-//ust// new_trace->callbacks.remove_buf_file = ltt_remove_buf_file_callback;
-//ust//
-//ust// return 0;
-//ust// }
+static int ltt_relay_create_dirs(struct ltt_trace_struct *new_trace)
+{
+//ust// new_trace->dentry.trace_root = debugfs_create_dir(new_trace->trace_name,
+//ust// get_ltt_root());
+//ust// if (new_trace->dentry.trace_root == NULL) {
+//ust// printk(KERN_ERR "LTT : Trace directory name %s already taken\n",
+//ust// new_trace->trace_name);
+//ust// return EEXIST;
+//ust// }
+
+//ust// new_trace->callbacks.create_buf_file = ltt_create_buf_file_callback;
+//ust// new_trace->callbacks.remove_buf_file = ltt_remove_buf_file_callback;
+
+ return 0;
+}
/*
* LTTng channel flush function.
static void ltt_relay_async_wakeup_chan(struct ltt_channel_struct *ltt_channel)
{
- unsigned int i;
- struct rchan *rchan = ltt_channel->trans_channel_data;
-
- for_each_possible_cpu(i) {
- struct ltt_channel_buf_struct *ltt_buf =
- percpu_ptr(ltt_channel->buf, i);
-
- if (atomic_read(<t_buf->wakeup_readers) == 1) {
- atomic_set(<t_buf->wakeup_readers, 0);
- wake_up_interruptible(&rchan->buf[i]->read_wait);
- }
- }
+//ust// unsigned int i;
+//ust// struct rchan *rchan = ltt_channel->trans_channel_data;
+//ust//
+//ust// for_each_possible_cpu(i) {
+//ust// struct ltt_channel_buf_struct *ltt_buf =
+//ust// percpu_ptr(ltt_channel->buf, i);
+//ust//
+//ust// if (atomic_read(<t_buf->wakeup_readers) == 1) {
+//ust// atomic_set(<t_buf->wakeup_readers, 0);
+//ust// wake_up_interruptible(&rchan->buf[i]->read_wait);
+//ust// }
+//ust// }
}
-static void ltt_relay_finish_buffer(struct ltt_channel_struct *ltt_channel,
- unsigned int cpu)
+static void ltt_relay_finish_buffer(struct ltt_channel_struct *ltt_channel)
{
struct rchan *rchan = ltt_channel->trans_channel_data;
- if (rchan->buf[cpu]) {
- struct ltt_channel_buf_struct *ltt_buf =
- percpu_ptr(ltt_channel->buf, cpu);
- ltt_relay_buffer_flush(rchan->buf[cpu]);
- ltt_relay_wake_writers(ltt_buf);
+ if (rchan->buf) {
+ struct ltt_channel_buf_struct *ltt_buf = ltt_channel->buf;
+ ltt_relay_buffer_flush(rchan->buf);
+//ust// ltt_relay_wake_writers(ltt_buf);
}
}
{
unsigned int i;
- for_each_possible_cpu(i)
- ltt_relay_finish_buffer(ltt_channel, i);
+//ust// for_each_possible_cpu(i)
+ ltt_relay_finish_buffer(ltt_channel);
}
static void ltt_relay_remove_channel(struct ltt_channel_struct *channel)
unsigned int *rflags, int largest_align, int cpu)
{
struct rchan *rchan = ltt_channel->trans_channel_data;
- struct rchan_buf *buf = *transport_data =
- rchan->buf[cpu];
- struct ltt_channel_buf_struct *ltt_buf =
- percpu_ptr(ltt_channel->buf, buf->cpu);
+ struct rchan_buf *buf = *transport_data = rchan->buf;
+ struct ltt_channel_buf_struct *ltt_buf = ltt_channel->buf;
struct ltt_reserve_switch_offsets offsets;
offsets.reserve_commit_diff = 0;
/*
* Perform retryable operations.
*/
- if (__get_cpu_var(ltt_nesting) > 4) {
+ if (ltt_nesting > 4) {
local_inc(<t_buf->events_lost);
return -EPERM;
}
{
struct ltt_channel_struct *ltt_channel =
(struct ltt_channel_struct *)buf->chan->private_data;
- struct ltt_channel_buf_struct *ltt_buf =
- percpu_ptr(ltt_channel->buf, buf->cpu);
+ struct ltt_channel_buf_struct *ltt_buf = ltt_channel->buf;
struct rchan *rchan = ltt_channel->trans_channel_data;
struct ltt_reserve_switch_offsets offsets;
u64 tsc;
void **transport_data, long buf_offset, size_t slot_size)
{
struct rchan_buf *buf = *transport_data;
- struct ltt_channel_buf_struct *ltt_buf =
- percpu_ptr(ltt_channel->buf, buf->cpu);
+ struct ltt_channel_buf_struct *ltt_buf = ltt_channel->buf;
struct rchan *rchan = buf->chan;
long offset_end = buf_offset;
long endidx = SUBBUF_INDEX(offset_end - 1, rchan);
unsigned int chan_index, size_t data_size,
struct user_dbg_data *dbg)
{
- struct rchan *rchan;
- struct ltt_channel_buf_struct *ltt_buf;
- struct ltt_channel_struct *channel;
- struct rchan_buf *relay_buf;
- int cpu;
- DECLARE_WAITQUEUE(wait, current);
-
- channel = &trace->channels[chan_index];
- rchan = channel->trans_channel_data;
- cpu = smp_processor_id();
- relay_buf = rchan->buf[cpu];
- ltt_buf = percpu_ptr(channel->buf, cpu);
-
- /*
- * Check if data is too big for the channel : do not
- * block for it.
- */
- if (LTT_RESERVE_CRITICAL + data_size > relay_buf->chan->subbuf_size)
- return 0;
-
- /*
- * If free space too low, we block. We restart from the
- * beginning after we resume (cpu id may have changed
- * while preemption is active).
- */
- spin_lock(<t_buf->full_lock);
- if (!channel->overwrite) {
- dbg->write = local_read(<t_buf->offset);
- dbg->read = atomic_long_read(<t_buf->consumed);
- dbg->avail_size = dbg->write + LTT_RESERVE_CRITICAL + data_size
- - SUBBUF_TRUNC(dbg->read,
- relay_buf->chan);
- if (dbg->avail_size > rchan->alloc_size) {
- __set_current_state(TASK_INTERRUPTIBLE);
- add_wait_queue(<t_buf->write_wait, &wait);
- spin_unlock(<t_buf->full_lock);
- preempt_enable();
- schedule();
- __set_current_state(TASK_RUNNING);
- remove_wait_queue(<t_buf->write_wait, &wait);
- if (signal_pending(current))
- return -ERESTARTSYS;
- preempt_disable();
- return 1;
- }
- }
- spin_unlock(<t_buf->full_lock);
+//ust// struct rchan *rchan;
+//ust// struct ltt_channel_buf_struct *ltt_buf;
+//ust// struct ltt_channel_struct *channel;
+//ust// struct rchan_buf *relay_buf;
+//ust// int cpu;
+//ust// DECLARE_WAITQUEUE(wait, current);
+//ust//
+//ust// channel = &trace->channels[chan_index];
+//ust// rchan = channel->trans_channel_data;
+//ust// cpu = smp_processor_id();
+//ust// relay_buf = rchan->buf[cpu];
+//ust// ltt_buf = percpu_ptr(channel->buf, cpu);
+//ust//
+//ust// /*
+//ust// * Check if data is too big for the channel : do not
+//ust// * block for it.
+//ust// */
+//ust// if (LTT_RESERVE_CRITICAL + data_size > relay_buf->chan->subbuf_size)
+//ust// return 0;
+//ust//
+//ust// /*
+//ust// * If free space too low, we block. We restart from the
+//ust// * beginning after we resume (cpu id may have changed
+//ust// * while preemption is active).
+//ust// */
+//ust// spin_lock(<t_buf->full_lock);
+//ust// if (!channel->overwrite) {
+//ust// dbg->write = local_read(<t_buf->offset);
+//ust// dbg->read = atomic_long_read(<t_buf->consumed);
+//ust// dbg->avail_size = dbg->write + LTT_RESERVE_CRITICAL + data_size
+//ust// - SUBBUF_TRUNC(dbg->read,
+//ust// relay_buf->chan);
+//ust// if (dbg->avail_size > rchan->alloc_size) {
+//ust// __set_current_state(TASK_INTERRUPTIBLE);
+//ust// add_wait_queue(<t_buf->write_wait, &wait);
+//ust// spin_unlock(<t_buf->full_lock);
+//ust// preempt_enable();
+//ust// schedule();
+//ust// __set_current_state(TASK_RUNNING);
+//ust// remove_wait_queue(<t_buf->write_wait, &wait);
+//ust// if (signal_pending(current))
+//ust// return -ERESTARTSYS;
+//ust// preempt_disable();
+//ust// return 1;
+//ust// }
+//ust// }
+//ust// spin_unlock(<t_buf->full_lock);
return 0;
}
channel = &trace->channels[chan_index];
rchan = channel->trans_channel_data;
- relay_buf = rchan->buf[cpu];
- ltt_buf = percpu_ptr(channel->buf, cpu);
+ relay_buf = rchan->buf;
+ ltt_buf = channel->buf;
printk(KERN_ERR "Error in LTT usertrace : "
"buffer full : event lost in blocking "
"mode. Increase LTT_RESERVE_CRITICAL.\n");
- printk(KERN_ERR "LTT nesting level is %u.\n",
- per_cpu(ltt_nesting, cpu));
+ printk(KERN_ERR "LTT nesting level is %u.\n", ltt_nesting);
printk(KERN_ERR "LTT avail size %lu.\n",
dbg->avail_size);
printk(KERN_ERR "avai write : %lu, read : %lu\n",
static struct ltt_transport ust_relay_transport = {
.name = "ustrelay",
- .owner = THIS_MODULE,
+//ust// .owner = THIS_MODULE,
.ops = {
.create_dirs = ltt_relay_create_dirs,
.remove_dirs = ltt_relay_remove_dirs,
{
//ust// printk(KERN_INFO "LTT : ltt-relay exit\n");
- ltt_transport_unregister(<t_relay_transport);
+ ltt_transport_unregister(&ust_relay_transport);
}
//ust// module_init(ltt_relay_init);
--- /dev/null
+/*
+ * linux/include/linux/ltt-relay.h
+ *
+ * Copyright (C) 2002, 2003 - Tom Zanussi (zanussi@us.ibm.com), IBM Corp
+ * Copyright (C) 1999, 2000, 2001, 2002 - Karim Yaghmour (karim@opersys.com)
+ * Copyright (C) 2008 - Mathieu Desnoyers (mathieu.desnoyers@polymtl.ca)
+ *
+ * CONFIG_RELAY definitions and declarations
+ */
+
+#ifndef _LINUX_LTT_RELAY_H
+#define _LINUX_LTT_RELAY_H
+
+//ust// #include <linux/types.h>
+//ust// #include <linux/sched.h>
+//ust// #include <linux/timer.h>
+//ust// #include <linux/wait.h>
+//ust// #include <linux/list.h>
+//ust// #include <linux/fs.h>
+//ust// #include <linux/poll.h>
+//ust// #include <linux/kref.h>
+//ust// #include <linux/mm.h>
+//ust// #include <linux/ltt-core.h>
+#include "kref.h"
+#include "list.h"
+
+/* Needs a _much_ better name... */
+#define FIX_SIZE(x) ((((x) - 1) & PAGE_MASK) + PAGE_SIZE)
+
+/*
+ * Tracks changes to rchan/rchan_buf structs
+ */
+#define LTT_RELAY_CHANNEL_VERSION 8
+
+struct rchan_buf;
+
+struct buf_page {
+ struct page *page;
+ struct rchan_buf *buf; /* buffer the page belongs to */
+ size_t offset; /* page offset in the buffer */
+ struct list_head list; /* buffer linked list */
+};
+
+/*
+ * Per-cpu relay channel buffer
+ */
+struct rchan_buf {
+ struct rchan *chan; /* associated channel */
+//ust// wait_queue_head_t read_wait; /* reader wait queue */
+//ust// struct timer_list timer; /* reader wake-up timer */
+//ust// struct dentry *dentry; /* channel file dentry */
+ struct kref kref; /* channel buffer refcount */
+//ust// struct list_head pages; /* list of buffer pages */
+ void *buf_data; //ust//
+ size_t buf_size;
+//ust// struct buf_page *wpage; /* current write page (cache) */
+//ust// struct buf_page *hpage[2]; /* current subbuf header page (cache) */
+//ust// struct buf_page *rpage; /* current subbuf read page (cache) */
+//ust// unsigned int page_count; /* number of current buffer pages */
+ unsigned int finalized; /* buffer has been finalized */
+//ust// unsigned int cpu; /* this buf's cpu */
+} ____cacheline_aligned;
+
+/*
+ * Relay channel data structure
+ */
+struct rchan {
+ u32 version; /* the version of this struct */
+ size_t subbuf_size; /* sub-buffer size */
+ size_t n_subbufs; /* number of sub-buffers per buffer */
+ size_t alloc_size; /* total buffer size allocated */
+ struct rchan_callbacks *cb; /* client callbacks */
+ struct kref kref; /* channel refcount */
+ void *private_data; /* for user-defined data */
+//ust// struct rchan_buf *buf[NR_CPUS]; /* per-cpu channel buffers */
+ struct rchan_buf *buf;
+ struct list_head list; /* for channel list */
+ struct dentry *parent; /* parent dentry passed to open */
+ int subbuf_size_order; /* order of sub-buffer size */
+//ust// char base_filename[NAME_MAX]; /* saved base filename */
+};
+
+/*
+ * Relay channel client callbacks
+ */
+struct rchan_callbacks {
+ /*
+ * subbuf_start - called on buffer-switch to a new sub-buffer
+ * @buf: the channel buffer containing the new sub-buffer
+ * @subbuf: the start of the new sub-buffer
+ * @prev_subbuf: the start of the previous sub-buffer
+ * @prev_padding: unused space at the end of previous sub-buffer
+ *
+ * The client should return 1 to continue logging, 0 to stop
+ * logging.
+ *
+ * NOTE: subbuf_start will also be invoked when the buffer is
+ * created, so that the first sub-buffer can be initialized
+ * if necessary. In this case, prev_subbuf will be NULL.
+ *
+ * NOTE: the client can reserve bytes at the beginning of the new
+ * sub-buffer by calling subbuf_start_reserve() in this callback.
+ */
+ int (*subbuf_start) (struct rchan_buf *buf,
+ void *subbuf,
+ void *prev_subbuf,
+ size_t prev_padding);
+
+ /*
+ * create_buf_file - create file to represent a relay channel buffer
+ * @filename: the name of the file to create
+ * @parent: the parent of the file to create
+ * @mode: the mode of the file to create
+ * @buf: the channel buffer
+ *
+ * Called during relay_open(), once for each per-cpu buffer,
+ * to allow the client to create a file to be used to
+ * represent the corresponding channel buffer. If the file is
+ * created outside of relay, the parent must also exist in
+ * that filesystem.
+ *
+ * The callback should return the dentry of the file created
+ * to represent the relay buffer.
+ *
+ * Setting the is_global outparam to a non-zero value will
+ * cause relay_open() to create a single global buffer rather
+ * than the default set of per-cpu buffers.
+ *
+ * See Documentation/filesystems/relayfs.txt for more info.
+ */
+ struct dentry *(*create_buf_file)(const char *filename,
+ struct dentry *parent,
+ int mode,
+ struct rchan_buf *buf);
+
+ /*
+ * remove_buf_file - remove file representing a relay channel buffer
+ * @dentry: the dentry of the file to remove
+ *
+ * Called during relay_close(), once for each per-cpu buffer,
+ * to allow the client to remove a file used to represent a
+ * channel buffer.
+ *
+ * The callback should return 0 if successful, negative if not.
+ */
+ int (*remove_buf_file)(struct rchan_buf *buf);
+};
+
+extern struct buf_page *ltt_relay_find_prev_page(struct rchan_buf *buf,
+ struct buf_page *page, size_t offset, ssize_t diff_offset);
+
+extern struct buf_page *ltt_relay_find_next_page(struct rchan_buf *buf,
+ struct buf_page *page, size_t offset, ssize_t diff_offset);
+
+extern void _ltt_relay_write(struct rchan_buf *buf, size_t offset,
+ const void *src, size_t len, ssize_t cpy);
+
+extern int ltt_relay_read(struct rchan_buf *buf, size_t offset,
+ void *dest, size_t len);
+
+extern struct buf_page *ltt_relay_read_get_page(struct rchan_buf *buf,
+ size_t offset);
+
+/*
+ * Return the address where a given offset is located.
+ * Should be used to get the current subbuffer header pointer. Given we know
+ * it's never on a page boundary, it's safe to write directly to this address,
+ * as long as the write is never bigger than a page size.
+ */
+extern void *ltt_relay_offset_address(struct rchan_buf *buf,
+ size_t offset);
+
+/*
+ * Find the page containing "offset". Cache it if it is after the currently
+ * cached page.
+ */
+static inline struct buf_page *ltt_relay_cache_page(struct rchan_buf *buf,
+ struct buf_page **page_cache,
+ struct buf_page *page, size_t offset)
+{
+ ssize_t diff_offset;
+ ssize_t half_buf_size = buf->chan->alloc_size >> 1;
+
+ /*
+ * Make sure this is the page we want to write into. The current
+ * page is changed concurrently by other writers. [wrh]page are
+ * used as a cache remembering the last page written
+ * to/read/looked up for header address. No synchronization;
+ * could have to find the previous page is a nested write
+ * occured. Finding the right page is done by comparing the
+ * dest_offset with the buf_page offsets.
+ * When at the exact opposite of the buffer, bias towards forward search
+ * because it will be cached.
+ */
+
+ diff_offset = (ssize_t)offset - (ssize_t)page->offset;
+ if (diff_offset <= -(ssize_t)half_buf_size)
+ diff_offset += buf->chan->alloc_size;
+ else if (diff_offset > half_buf_size)
+ diff_offset -= buf->chan->alloc_size;
+
+ if (unlikely(diff_offset >= (ssize_t)PAGE_SIZE)) {
+ page = ltt_relay_find_next_page(buf, page, offset, diff_offset);
+ *page_cache = page;
+ } else if (unlikely(diff_offset < 0)) {
+ page = ltt_relay_find_prev_page(buf, page, offset, diff_offset);
+ }
+ return page;
+}
+
+//ust// #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+ static inline void ltt_relay_do_copy(void *dest, const void *src, size_t len)
+{
+ switch (len) {
+ case 0: break;
+ case 1: *(u8 *)dest = *(const u8 *)src;
+ break;
+ case 2: *(u16 *)dest = *(const u16 *)src;
+ break;
+ case 4: *(u32 *)dest = *(const u32 *)src;
+ break;
+//ust// #if (BITS_PER_LONG == 64)
+ case 8: *(u64 *)dest = *(const u64 *)src;
+ break;
+//ust// #endif
+ default:
+ memcpy(dest, src, len);
+ }
+}
+//ust// #else
+//ust// /*
+//ust// * Returns whether the dest and src addresses are aligned on
+//ust// * min(sizeof(void *), len). Call this with statically known len for efficiency.
+//ust// */
+//ust// static inline int addr_aligned(const void *dest, const void *src, size_t len)
+//ust// {
+//ust// if (ltt_align((size_t)dest, len))
+//ust// return 0;
+//ust// if (ltt_align((size_t)src, len))
+//ust// return 0;
+//ust// return 1;
+//ust// }
+//ust//
+//ust// static inline void ltt_relay_do_copy(void *dest, const void *src, size_t len)
+//ust// {
+//ust// switch (len) {
+//ust// case 0: break;
+//ust// case 1: *(u8 *)dest = *(const u8 *)src;
+//ust// break;
+//ust// case 2: if (unlikely(!addr_aligned(dest, src, 2)))
+//ust// goto memcpy_fallback;
+//ust// *(u16 *)dest = *(const u16 *)src;
+//ust// break;
+//ust// case 4: if (unlikely(!addr_aligned(dest, src, 4)))
+//ust// goto memcpy_fallback;
+//ust// *(u32 *)dest = *(const u32 *)src;
+//ust// break;
+//ust// #if (BITS_PER_LONG == 64)
+//ust// case 8: if (unlikely(!addr_aligned(dest, src, 8)))
+//ust// goto memcpy_fallback;
+//ust// *(u64 *)dest = *(const u64 *)src;
+//ust// break;
+//ust// #endif
+//ust// default:
+//ust// goto memcpy_fallback;
+//ust// }
+//ust// return;
+//ust// memcpy_fallback:
+//ust// memcpy(dest, src, len);
+//ust// }
+//ust// #endif
+
+static inline int ltt_relay_write(struct rchan_buf *buf, size_t offset,
+ const void *src, size_t len)
+{
+//ust// struct buf_page *page;
+//ust// ssize_t pagecpy;
+//ust//
+//ust// offset &= buf->chan->alloc_size - 1;
+//ust// page = buf->wpage;
+//ust//
+//ust// page = ltt_relay_cache_page(buf, &buf->wpage, page, offset);
+//ust// pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK));
+//ust// ltt_relay_do_copy(page_address(page->page)
+//ust// + (offset & ~PAGE_MASK), src, pagecpy);
+//ust//
+//ust// if (unlikely(len != pagecpy))
+//ust// _ltt_relay_write(buf, offset, src, len, page, pagecpy);
+//ust// return len;
+
+
+ size_t cpy;
+ cpy = min_t(size_t, len, buf->buf_size - offset);
+ ltt_relay_do_copy(buf->buf_data + offset, src, cpy);
+
+ if (unlikely(len != cpy))
+ _ltt_relay_write(buf, offset, src, len, cpy);
+ return len;
+}
+
+/*
+ * CONFIG_LTT_RELAY kernel API, ltt/ltt-relay-alloc.c
+ */
+
+struct rchan *ltt_relay_open(const char *base_filename,
+ struct dentry *parent,
+ size_t subbuf_size,
+ size_t n_subbufs,
+ void *private_data);
+extern void ltt_relay_close(struct rchan *chan);
+
+/*
+ * exported ltt_relay file operations, ltt/ltt-relay-alloc.c
+ */
+extern const struct file_operations ltt_relay_file_operations;
+
+#endif /* _LINUX_LTT_RELAY_H */
+