all: hello
hello: hello.c
- gcc -g -L../libtracectl -ltracectl -o hello hello.c
+ gcc -g -I../libmarkers -I../share -L../libmarkers -lmarkers -L../libtracectl -ltracectl -o hello hello.c
clean:
rm -rf hello *.o
+
+.PHONY: all hello clean
#include <stdio.h>
#include <unistd.h>
+#include "../libmarkers/marker.h"
+
+void probe(const struct marker *mdata,
+ void *probe_private, void *call_private,
+ const char *fmt, va_list *args)
+{
+ printf("In probe\n");
+}
+
int main()
{
printf("Hello, World!\n");
+
+ marker_probe_register("abc", "testmark", "", probe, NULL);
+
+ trace_mark(abc, testmark, "", MARK_NOARGS);
+
scanf("%*s");
return 0;
all: libmarkers.so
libmarkers.so: marker.c *.c *.h
- gcc -g -fPIC -I../share -I../libtracing -shared -o libmarkers.so marker.c
+ gcc -g -fPIC -I../share -I../libtracing -shared -o libmarkers.so marker.c ../share/kref.c
+
+.PHONY: libmarkers.so all
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
-#include <linux/module.h>
-#include <linux/mutex.h>
-#include <linux/types.h>
-#include <linux/jhash.h>
-#include <linux/list.h>
-#include <linux/rcupdate.h>
-#include <linux/marker.h>
-#include <linux/err.h>
-#include <linux/slab.h>
-#include <linux/immediate.h>
-#include <linux/sched.h>
-#include <linux/uaccess.h>
-#include <linux/user_marker.h>
-#include <linux/ltt-tracer.h>
+//ust// #include <linux/module.h>
+//ust// #include <linux/mutex.h>
+//ust// #include <linux/types.h>
+#include "jhash.h"
+#include "list.h"
+#include "rcupdate.h"
+//ust// #include <linux/marker.h>
+#include <errno.h>
+//ust// #include <linux/slab.h>
+//ust// #include <linux/immediate.h>
+//ust// #include <linux/sched.h>
+//ust// #include <linux/uaccess.h>
+//ust// #include <linux/user_marker.h>
+//ust// #include <linux/ltt-tracer.h>
+
+#include "marker.h"
+#include "kernelcompat.h"
+#include "usterr.h"
+#include "channels.h"
+#include "tracercore.h"
extern struct marker __start___markers[];
extern struct marker __stop___markers[];
void *probe_private, void *call_private, const char *fmt, va_list *args)
{
}
-EXPORT_SYMBOL_GPL(__mark_empty_function);
+//ust// EXPORT_SYMBOL_GPL(__mark_empty_function);
/*
* marker_probe_cb Callback that prepares the variable argument list for probes.
* sure the teardown of the callbacks can be done correctly when they
* are in modules and they insure RCU read coherency.
*/
- rcu_read_lock_sched_notrace();
+//ust// rcu_read_lock_sched_notrace();
ptype = mdata->ptype;
if (likely(!ptype)) {
marker_probe_func *func;
va_end(args);
}
}
- rcu_read_unlock_sched_notrace();
+//ust// rcu_read_unlock_sched_notrace();
}
-EXPORT_SYMBOL_GPL(marker_probe_cb);
+//ust// EXPORT_SYMBOL_GPL(marker_probe_cb);
/*
* marker_probe_cb Callback that does not prepare the variable argument list.
va_list args; /* not initialized */
char ptype;
- rcu_read_lock_sched_notrace();
+//ust// rcu_read_lock_sched_notrace();
ptype = mdata->ptype;
if (likely(!ptype)) {
marker_probe_func *func;
multi[i].func(mdata, multi[i].probe_private,
call_private, mdata->format, &args);
}
- rcu_read_unlock_sched_notrace();
+//ust// rcu_read_unlock_sched_notrace();
}
static void free_old_closure(struct rcu_head *head)
smp_wmb();
elem->ptype = entry->ptype;
- if (elem->tp_name && (active ^ _imv_read(elem->state))) {
- WARN_ON(!elem->tp_cb);
- /*
- * It is ok to directly call the probe registration because type
- * checking has been done in the __trace_mark_tp() macro.
- */
-
- if (active) {
- /*
- * try_module_get should always succeed because we hold
- * markers_mutex to get the tp_cb address.
- */
- ret = try_module_get(__module_text_address(
- (unsigned long)elem->tp_cb));
- BUG_ON(!ret);
- ret = tracepoint_probe_register_noupdate(
- elem->tp_name,
- elem->tp_cb);
- } else {
- ret = tracepoint_probe_unregister_noupdate(
- elem->tp_name,
- elem->tp_cb);
- /*
- * tracepoint_probe_update_all() must be called
- * before the module containing tp_cb is unloaded.
- */
- module_put(__module_text_address(
- (unsigned long)elem->tp_cb));
- }
- }
+//ust// if (elem->tp_name && (active ^ _imv_read(elem->state))) {
+//ust// WARN_ON(!elem->tp_cb);
+//ust// /*
+//ust// * It is ok to directly call the probe registration because type
+//ust// * checking has been done in the __trace_mark_tp() macro.
+//ust// */
+//ust//
+//ust// if (active) {
+//ust// /*
+//ust// * try_module_get should always succeed because we hold
+//ust// * markers_mutex to get the tp_cb address.
+//ust// */
+//ust// ret = try_module_get(__module_text_address(
+//ust// (unsigned long)elem->tp_cb));
+//ust// BUG_ON(!ret);
+//ust// ret = tracepoint_probe_register_noupdate(
+//ust// elem->tp_name,
+//ust// elem->tp_cb);
+//ust// } else {
+//ust// ret = tracepoint_probe_unregister_noupdate(
+//ust// elem->tp_name,
+//ust// elem->tp_cb);
+//ust// /*
+//ust// * tracepoint_probe_update_all() must be called
+//ust// * before the module containing tp_cb is unloaded.
+//ust// */
+//ust// module_put(__module_text_address(
+//ust// (unsigned long)elem->tp_cb));
+//ust// }
+//ust// }
elem->state__imv = active;
return ret;
int ret;
/* leave "call" as is. It is known statically. */
- if (elem->tp_name && _imv_read(elem->state)) {
- WARN_ON(!elem->tp_cb);
- /*
- * It is ok to directly call the probe registration because type
- * checking has been done in the __trace_mark_tp() macro.
- */
- ret = tracepoint_probe_unregister_noupdate(elem->tp_name,
- elem->tp_cb);
- WARN_ON(ret);
- /*
- * tracepoint_probe_update_all() must be called
- * before the module containing tp_cb is unloaded.
- */
- module_put(__module_text_address((unsigned long)elem->tp_cb));
- }
+//ust// if (elem->tp_name && _imv_read(elem->state)) {
+//ust// WARN_ON(!elem->tp_cb);
+//ust// /*
+//ust// * It is ok to directly call the probe registration because type
+//ust// * checking has been done in the __trace_mark_tp() macro.
+//ust// */
+//ust// ret = tracepoint_probe_unregister_noupdate(elem->tp_name,
+//ust// elem->tp_cb);
+//ust// WARN_ON(ret);
+//ust// /*
+//ust// * tracepoint_probe_update_all() must be called
+//ust// * before the module containing tp_cb is unloaded.
+//ust// */
+//ust// module_put(__module_text_address((unsigned long)elem->tp_cb));
+//ust// }
elem->state__imv = 0;
elem->single.func = __mark_empty_function;
/* Update the function before setting the ptype */
/* Core kernel markers */
marker_update_probe_range(__start___markers, __stop___markers);
/* Markers in modules. */
- module_update_markers();
+//ust// module_update_markers();
tracepoint_probe_update_all();
/* Update immediate values */
core_imv_update();
- module_imv_update();
+//ust// module_imv_update();
marker_update_processes();
}
mutex_unlock(&markers_mutex);
return ret;
}
-EXPORT_SYMBOL_GPL(marker_probe_register);
+//ust// EXPORT_SYMBOL_GPL(marker_probe_register);
/**
* marker_probe_unregister - Disconnect a probe from a marker
mutex_unlock(&markers_mutex);
return ret;
}
-EXPORT_SYMBOL_GPL(marker_probe_unregister);
+//ust// EXPORT_SYMBOL_GPL(marker_probe_unregister);
static struct marker_entry *
get_marker_from_private_data(marker_probe_func *probe, void *probe_private)
kfree(name);
return ret;
}
-EXPORT_SYMBOL_GPL(marker_probe_unregister_private_data);
+//ust// EXPORT_SYMBOL_GPL(marker_probe_unregister_private_data);
/**
* marker_get_private_data - Get a marker's probe private data
}
return ERR_PTR(-ENOENT);
}
-EXPORT_SYMBOL_GPL(marker_get_private_data);
+//ust// EXPORT_SYMBOL_GPL(marker_get_private_data);
/**
* markers_compact_event_ids - Compact markers event IDs and reassign channels
* Called when no channel users are active by the channel infrastructure.
* Called with lock_markers() and channel mutex held.
*/
-void markers_compact_event_ids(void)
-{
- struct marker_entry *entry;
- unsigned int i;
- struct hlist_head *head;
- struct hlist_node *node;
- int ret;
-
- for (i = 0; i < MARKER_TABLE_SIZE; i++) {
- head = &marker_table[i];
- hlist_for_each_entry(entry, node, head, hlist) {
- ret = ltt_channels_get_index_from_name(entry->channel);
- WARN_ON(ret < 0);
- entry->channel_id = ret;
- ret = _ltt_channels_get_event_id(entry->channel,
- entry->name);
- WARN_ON(ret < 0);
- entry->event_id = ret;
- }
- }
-}
+//ust// void markers_compact_event_ids(void)
+//ust// {
+//ust// struct marker_entry *entry;
+//ust// unsigned int i;
+//ust// struct hlist_head *head;
+//ust// struct hlist_node *node;
+//ust// int ret;
+//ust//
+//ust// for (i = 0; i < MARKER_TABLE_SIZE; i++) {
+//ust// head = &marker_table[i];
+//ust// hlist_for_each_entry(entry, node, head, hlist) {
+//ust// ret = ltt_channels_get_index_from_name(entry->channel);
+//ust// WARN_ON(ret < 0);
+//ust// entry->channel_id = ret;
+//ust// ret = _ltt_channels_get_event_id(entry->channel,
+//ust// entry->name);
+//ust// WARN_ON(ret < 0);
+//ust// entry->event_id = ret;
+//ust// }
+//ust// }
+//ust// }
#ifdef CONFIG_MODULES
return 1;
return 0;
}
-EXPORT_SYMBOL_GPL(marker_get_iter_range);
+//ust// EXPORT_SYMBOL_GPL(marker_get_iter_range);
static void marker_get_iter(struct marker_iter *iter)
{
{
marker_get_iter(iter);
}
-EXPORT_SYMBOL_GPL(marker_iter_start);
+//ust// EXPORT_SYMBOL_GPL(marker_iter_start);
void marker_iter_next(struct marker_iter *iter)
{
*/
marker_get_iter(iter);
}
-EXPORT_SYMBOL_GPL(marker_iter_next);
+//ust// EXPORT_SYMBOL_GPL(marker_iter_next);
void marker_iter_stop(struct marker_iter *iter)
{
}
-EXPORT_SYMBOL_GPL(marker_iter_stop);
+//ust// EXPORT_SYMBOL_GPL(marker_iter_stop);
void marker_iter_reset(struct marker_iter *iter)
{
iter->module = NULL;
iter->marker = NULL;
}
-EXPORT_SYMBOL_GPL(marker_iter_reset);
+//ust// EXPORT_SYMBOL_GPL(marker_iter_reset);
#ifdef CONFIG_MARKERS_USERSPACE
/*
}
}
-asmlinkage long sys_marker(char __user *name, char __user *format,
- char __user *state, int reg)
-{
- struct user_marker *umark;
- long len;
- struct marker_entry *entry;
- int ret = 0;
-
- printk(KERN_DEBUG "Program %s %s marker [%p, %p]\n",
- current->comm, reg ? "registers" : "unregisters",
- name, state);
- if (reg) {
- umark = kmalloc(sizeof(struct user_marker), GFP_KERNEL);
- umark->name[MAX_USER_MARKER_NAME_LEN - 1] = '\0';
- umark->format[MAX_USER_MARKER_FORMAT_LEN - 1] = '\0';
- umark->state = state;
- len = strncpy_from_user(umark->name, name,
- MAX_USER_MARKER_NAME_LEN - 1);
- if (len < 0) {
- ret = -EFAULT;
- goto error;
- }
- len = strncpy_from_user(umark->format, format,
- MAX_USER_MARKER_FORMAT_LEN - 1);
- if (len < 0) {
- ret = -EFAULT;
- goto error;
- }
- printk(KERN_DEBUG "Marker name : %s, format : %s", umark->name,
- umark->format);
- mutex_lock(&markers_mutex);
- entry = get_marker("userspace", umark->name);
- if (entry) {
- if (entry->format &&
- strcmp(entry->format, umark->format) != 0) {
- printk(" error, wrong format in process %s",
- current->comm);
- ret = -EPERM;
- goto error_unlock;
- }
- printk(" %s", !!entry->refcount
- ? "enabled" : "disabled");
- if (put_user(!!entry->refcount, state)) {
- ret = -EFAULT;
- goto error_unlock;
- }
- printk("\n");
- } else {
- printk(" disabled\n");
- if (put_user(0, umark->state)) {
- printk(KERN_WARNING
- "Marker in %s caused a fault\n",
- current->comm);
- goto error_unlock;
- }
- }
- mutex_lock(¤t->group_leader->user_markers_mutex);
- hlist_add_head(&umark->hlist,
- ¤t->group_leader->user_markers);
- current->group_leader->user_markers_sequence++;
- mutex_unlock(¤t->group_leader->user_markers_mutex);
- mutex_unlock(&markers_mutex);
- } else {
- mutex_lock(¤t->group_leader->user_markers_mutex);
- free_user_marker(state,
- ¤t->group_leader->user_markers);
- current->group_leader->user_markers_sequence++;
- mutex_unlock(¤t->group_leader->user_markers_mutex);
- }
- goto end;
-error_unlock:
- mutex_unlock(&markers_mutex);
-error:
- kfree(umark);
-end:
- return ret;
-}
-
-/*
- * Types :
- * string : 0
- */
-asmlinkage long sys_trace(int type, uint16_t id,
- char __user *ubuf)
-{
- long ret = -EPERM;
- char *page;
- int len;
-
- switch (type) {
- case 0: /* String */
- ret = -ENOMEM;
- page = (char *)__get_free_page(GFP_TEMPORARY);
- if (!page)
- goto string_out;
- len = strncpy_from_user(page, ubuf, PAGE_SIZE);
- if (len < 0) {
- ret = -EFAULT;
- goto string_err;
- }
- trace_mark(userspace, string, "string %s", page);
-string_err:
- free_page((unsigned long) page);
-string_out:
- break;
- default:
- break;
- }
- return ret;
-}
-
-static void marker_update_processes(void)
-{
- struct task_struct *g, *t;
-
- /*
- * markers_mutex is taken to protect the p->user_markers read.
- */
- mutex_lock(&markers_mutex);
- read_lock(&tasklist_lock);
- for_each_process(g) {
- WARN_ON(!thread_group_leader(g));
- if (hlist_empty(&g->user_markers))
- continue;
- if (strcmp(g->comm, "testprog") == 0)
- printk(KERN_DEBUG "set update pending for testprog\n");
- t = g;
- do {
- /* TODO : implement this thread flag in each arch. */
- set_tsk_thread_flag(t, TIF_MARKER_PENDING);
- } while ((t = next_thread(t)) != g);
- }
- read_unlock(&tasklist_lock);
- mutex_unlock(&markers_mutex);
-}
+//ust// asmlinkage long sys_marker(char __user *name, char __user *format,
+//ust// char __user *state, int reg)
+//ust// {
+//ust// struct user_marker *umark;
+//ust// long len;
+//ust// struct marker_entry *entry;
+//ust// int ret = 0;
+//ust//
+//ust// printk(KERN_DEBUG "Program %s %s marker [%p, %p]\n",
+//ust// current->comm, reg ? "registers" : "unregisters",
+//ust// name, state);
+//ust// if (reg) {
+//ust// umark = kmalloc(sizeof(struct user_marker), GFP_KERNEL);
+//ust// umark->name[MAX_USER_MARKER_NAME_LEN - 1] = '\0';
+//ust// umark->format[MAX_USER_MARKER_FORMAT_LEN - 1] = '\0';
+//ust// umark->state = state;
+//ust// len = strncpy_from_user(umark->name, name,
+//ust// MAX_USER_MARKER_NAME_LEN - 1);
+//ust// if (len < 0) {
+//ust// ret = -EFAULT;
+//ust// goto error;
+//ust// }
+//ust// len = strncpy_from_user(umark->format, format,
+//ust// MAX_USER_MARKER_FORMAT_LEN - 1);
+//ust// if (len < 0) {
+//ust// ret = -EFAULT;
+//ust// goto error;
+//ust// }
+//ust// printk(KERN_DEBUG "Marker name : %s, format : %s", umark->name,
+//ust// umark->format);
+//ust// mutex_lock(&markers_mutex);
+//ust// entry = get_marker("userspace", umark->name);
+//ust// if (entry) {
+//ust// if (entry->format &&
+//ust// strcmp(entry->format, umark->format) != 0) {
+//ust// printk(" error, wrong format in process %s",
+//ust// current->comm);
+//ust// ret = -EPERM;
+//ust// goto error_unlock;
+//ust// }
+//ust// printk(" %s", !!entry->refcount
+//ust// ? "enabled" : "disabled");
+//ust// if (put_user(!!entry->refcount, state)) {
+//ust// ret = -EFAULT;
+//ust// goto error_unlock;
+//ust// }
+//ust// printk("\n");
+//ust// } else {
+//ust// printk(" disabled\n");
+//ust// if (put_user(0, umark->state)) {
+//ust// printk(KERN_WARNING
+//ust// "Marker in %s caused a fault\n",
+//ust// current->comm);
+//ust// goto error_unlock;
+//ust// }
+//ust// }
+//ust// mutex_lock(¤t->group_leader->user_markers_mutex);
+//ust// hlist_add_head(&umark->hlist,
+//ust// ¤t->group_leader->user_markers);
+//ust// current->group_leader->user_markers_sequence++;
+//ust// mutex_unlock(¤t->group_leader->user_markers_mutex);
+//ust// mutex_unlock(&markers_mutex);
+//ust// } else {
+//ust// mutex_lock(¤t->group_leader->user_markers_mutex);
+//ust// free_user_marker(state,
+//ust// ¤t->group_leader->user_markers);
+//ust// current->group_leader->user_markers_sequence++;
+//ust// mutex_unlock(¤t->group_leader->user_markers_mutex);
+//ust// }
+//ust// goto end;
+//ust// error_unlock:
+//ust// mutex_unlock(&markers_mutex);
+//ust// error:
+//ust// kfree(umark);
+//ust// end:
+//ust// return ret;
+//ust// }
+//ust//
+//ust// /*
+//ust// * Types :
+//ust// * string : 0
+//ust// */
+//ust// asmlinkage long sys_trace(int type, uint16_t id,
+//ust// char __user *ubuf)
+//ust// {
+//ust// long ret = -EPERM;
+//ust// char *page;
+//ust// int len;
+//ust//
+//ust// switch (type) {
+//ust// case 0: /* String */
+//ust// ret = -ENOMEM;
+//ust// page = (char *)__get_free_page(GFP_TEMPORARY);
+//ust// if (!page)
+//ust// goto string_out;
+//ust// len = strncpy_from_user(page, ubuf, PAGE_SIZE);
+//ust// if (len < 0) {
+//ust// ret = -EFAULT;
+//ust// goto string_err;
+//ust// }
+//ust// trace_mark(userspace, string, "string %s", page);
+//ust// string_err:
+//ust// free_page((unsigned long) page);
+//ust// string_out:
+//ust// break;
+//ust// default:
+//ust// break;
+//ust// }
+//ust// return ret;
+//ust// }
+
+//ust// static void marker_update_processes(void)
+//ust// {
+//ust// struct task_struct *g, *t;
+//ust//
+//ust// /*
+//ust// * markers_mutex is taken to protect the p->user_markers read.
+//ust// */
+//ust// mutex_lock(&markers_mutex);
+//ust// read_lock(&tasklist_lock);
+//ust// for_each_process(g) {
+//ust// WARN_ON(!thread_group_leader(g));
+//ust// if (hlist_empty(&g->user_markers))
+//ust// continue;
+//ust// if (strcmp(g->comm, "testprog") == 0)
+//ust// printk(KERN_DEBUG "set update pending for testprog\n");
+//ust// t = g;
+//ust// do {
+//ust// /* TODO : implement this thread flag in each arch. */
+//ust// set_tsk_thread_flag(t, TIF_MARKER_PENDING);
+//ust// } while ((t = next_thread(t)) != g);
+//ust// }
+//ust// read_unlock(&tasklist_lock);
+//ust// mutex_unlock(&markers_mutex);
+//ust// }
/*
* Update current process.
.priority = 0,
};
-static int init_markers(void)
-{
- return register_module_notifier(&marker_module_nb);
-}
-__initcall(init_markers);
+//ust// static int init_markers(void)
+//ust// {
+//ust// return register_module_notifier(&marker_module_nb);
+//ust// }
+//ust// __initcall(init_markers);
+/* TODO: call marker_module_nb() when a library is linked at runtime (dlopen)? */
#endif /* CONFIG_MODULES */
-void ltt_dump_marker_state(struct ltt_trace_struct *trace)
-{
- struct marker_iter iter;
- struct ltt_probe_private_data call_data;
- const char *channel;
-
- call_data.trace = trace;
- call_data.serializer = NULL;
-
- marker_iter_reset(&iter);
- marker_iter_start(&iter);
- for (; iter.marker != NULL; marker_iter_next(&iter)) {
- if (!_imv_read(iter.marker->state))
- continue;
- channel = ltt_channels_get_name_from_index(
- iter.marker->channel_id);
- __trace_mark(0, metadata, core_marker_id,
- &call_data,
- "channel %s name %s event_id %hu "
- "int #1u%zu long #1u%zu pointer #1u%zu "
- "size_t #1u%zu alignment #1u%u",
- channel,
- iter.marker->name,
- iter.marker->event_id,
- sizeof(int), sizeof(long),
- sizeof(void *), sizeof(size_t),
- ltt_get_alignment());
- if (iter.marker->format)
- __trace_mark(0, metadata,
- core_marker_format,
- &call_data,
- "channel %s name %s format %s",
- channel,
- iter.marker->name,
- iter.marker->format);
- }
- marker_iter_stop(&iter);
-}
-EXPORT_SYMBOL_GPL(ltt_dump_marker_state);
+//ust// void ltt_dump_marker_state(struct ltt_trace_struct *trace)
+//ust// {
+//ust// struct marker_iter iter;
+//ust// struct ltt_probe_private_data call_data;
+//ust// const char *channel;
+//ust//
+//ust// call_data.trace = trace;
+//ust// call_data.serializer = NULL;
+//ust//
+//ust// marker_iter_reset(&iter);
+//ust// marker_iter_start(&iter);
+//ust// for (; iter.marker != NULL; marker_iter_next(&iter)) {
+//ust// if (!_imv_read(iter.marker->state))
+//ust// continue;
+//ust// channel = ltt_channels_get_name_from_index(
+//ust// iter.marker->channel_id);
+//ust// __trace_mark(0, metadata, core_marker_id,
+//ust// &call_data,
+//ust// "channel %s name %s event_id %hu "
+//ust// "int #1u%zu long #1u%zu pointer #1u%zu "
+//ust// "size_t #1u%zu alignment #1u%u",
+//ust// channel,
+//ust// iter.marker->name,
+//ust// iter.marker->event_id,
+//ust// sizeof(int), sizeof(long),
+//ust// sizeof(void *), sizeof(size_t),
+//ust// ltt_get_alignment());
+//ust// if (iter.marker->format)
+//ust// __trace_mark(0, metadata,
+//ust// core_marker_format,
+//ust// &call_data,
+//ust// "channel %s name %s format %s",
+//ust// channel,
+//ust// iter.marker->name,
+//ust// iter.marker->format);
+//ust// }
+//ust// marker_iter_stop(&iter);
+//ust// }
+//ust// EXPORT_SYMBOL_GPL(ltt_dump_marker_state);
* See Documentation/marker.txt.
*
* (C) Copyright 2006 Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
+ * (C) Copyright 2009 Pierre-Marc Fournier <pierre-marc dot fournier at polymtl dot ca>
*
* This file is released under the GPLv2.
* See the file COPYING for more details.
*/
#include <stdarg.h>
-#include <linux/types.h>
-#include <linux/immediate.h>
-#include <linux/ltt-channels.h>
-
-struct module;
-struct task_struct;
+//ust// #include <linux/types.h>
+#include "immediate.h"
+//ust// #include <linux/ltt-channels.h>
+#include "kernelcompat.h"
+#include "compiler.h"
+
+//ust// struct module;
+//ust// struct task_struct;
struct marker;
/**
const char *format; /* Marker format string, describing the
* variable argument list.
*/
- DEFINE_IMV(char, state);/* Immediate value state. */
+ DEFINE_IMV(char, state);/* Immediate value state. */
char ptype; /* probe type : 0 : single, 1 : multi */
/* Probe wrapper */
u16 channel_id; /* Numeric channel identifier, dynamic */
void *tp_cb; /* Optional tracepoint callback */
} __attribute__((aligned(8)));
-#ifdef CONFIG_MARKERS
+//ust// #ifdef CONFIG_MARKERS
#define _DEFINE_MARKER(channel, name, tp_name_str, tp_cb, format) \
static const char __mstrtab_##channel##_##name[] \
#define GET_MARKER(channel, name) (__mark_##channel##_##name)
-#else /* !CONFIG_MARKERS */
-#define DEFINE_MARKER(channel, name, tp_name, tp_cb, format)
-#define __trace_mark(generic, channel, name, call_private, format, args...) \
- __mark_check_format(format, ## args)
-#define __trace_mark_tp(channel, name, call_private, tp_name, tp_cb, \
- format, args...) \
- do { \
- void __check_tp_type(void) \
- { \
- register_trace_##tp_name(tp_cb); \
- } \
- __mark_check_format(format, ## args); \
- } while (0)
-static inline void marker_update_probe_range(struct marker *begin,
- struct marker *end)
-{ }
-#define GET_MARKER(channel, name)
-#endif /* CONFIG_MARKERS */
+//ust// #else /* !CONFIG_MARKERS */
+//ust// #define DEFINE_MARKER(channel, name, tp_name, tp_cb, format)
+//ust// #define __trace_mark(generic, channel, name, call_private, format, args...) \
+//ust// __mark_check_format(format, ## args)
+//ust// #define __trace_mark_tp(channel, name, call_private, tp_name, tp_cb, \
+//ust// format, args...) \
+//ust// do { \
+//ust// void __check_tp_type(void) \
+//ust// { \
+//ust// register_trace_##tp_name(tp_cb); \
+//ust// } \
+//ust// __mark_check_format(format, ## args); \
+//ust// } while (0)
+//ust// static inline void marker_update_probe_range(struct marker *begin,
+//ust// struct marker *end)
+//ust// { }
+//ust// #define GET_MARKER(channel, name)
+//ust// #endif /* CONFIG_MARKERS */
/**
* trace_mark - Marker using code patching
extern void marker_update_process(void);
extern int is_marker_enabled(const char *channel, const char *name);
-#ifdef CONFIG_MARKERS_USERSPACE
-extern void exit_user_markers(struct task_struct *p);
-#else
-static inline void exit_user_markers(struct task_struct *p)
-{
-}
-#endif
+//ust// #ifdef CONFIG_MARKERS_USERSPACE
+//ust// extern void exit_user_markers(struct task_struct *p);
+//ust// #else
+//ust// static inline void exit_user_markers(struct task_struct *p)
+//ust// {
+//ust// }
+//ust// #endif
#endif
#ifndef __LINUX_RCUPDATE_H
#define __LINUX_RCUPDATE_H
-#include <linux/cache.h>
-#include <linux/spinlock.h>
-#include <linux/threads.h>
-#include <linux/percpu.h>
-#include <linux/cpumask.h>
-#include <linux/seqlock.h>
-#include <linux/lockdep.h>
-#include <linux/completion.h>
+//ust// #include <linux/cache.h>
+//ust// #include <linux/spinlock.h>
+//ust// #include <linux/threads.h>
+//ust// #include <linux/percpu.h>
+//ust// #include <linux/cpumask.h>
+//ust// #include <linux/seqlock.h>
+//ust// #include <linux/lockdep.h>
+//ust// #include <linux/completion.h>
/**
* struct rcu_head - callback structure for use with RCU
void (*func)(struct rcu_head *head);
};
-#if defined(CONFIG_CLASSIC_RCU)
-#include <linux/rcuclassic.h>
-#elif defined(CONFIG_TREE_RCU)
-#include <linux/rcutree.h>
-#elif defined(CONFIG_PREEMPT_RCU)
-#include <linux/rcupreempt.h>
-#else
-#error "Unknown RCU implementation specified to kernel configuration"
-#endif /* #else #if defined(CONFIG_CLASSIC_RCU) */
-
-#define RCU_HEAD_INIT { .next = NULL, .func = NULL }
-#define RCU_HEAD(head) struct rcu_head head = RCU_HEAD_INIT
-#define INIT_RCU_HEAD(ptr) do { \
- (ptr)->next = NULL; (ptr)->func = NULL; \
-} while (0)
-
-/**
- * rcu_read_lock - mark the beginning of an RCU read-side critical section.
- *
- * When synchronize_rcu() is invoked on one CPU while other CPUs
- * are within RCU read-side critical sections, then the
- * synchronize_rcu() is guaranteed to block until after all the other
- * CPUs exit their critical sections. Similarly, if call_rcu() is invoked
- * on one CPU while other CPUs are within RCU read-side critical
- * sections, invocation of the corresponding RCU callback is deferred
- * until after the all the other CPUs exit their critical sections.
- *
- * Note, however, that RCU callbacks are permitted to run concurrently
- * with RCU read-side critical sections. One way that this can happen
- * is via the following sequence of events: (1) CPU 0 enters an RCU
- * read-side critical section, (2) CPU 1 invokes call_rcu() to register
- * an RCU callback, (3) CPU 0 exits the RCU read-side critical section,
- * (4) CPU 2 enters a RCU read-side critical section, (5) the RCU
- * callback is invoked. This is legal, because the RCU read-side critical
- * section that was running concurrently with the call_rcu() (and which
- * therefore might be referencing something that the corresponding RCU
- * callback would free up) has completed before the corresponding
- * RCU callback is invoked.
- *
- * RCU read-side critical sections may be nested. Any deferred actions
- * will be deferred until the outermost RCU read-side critical section
- * completes.
- *
- * It is illegal to block while in an RCU read-side critical section.
- */
-#define rcu_read_lock() __rcu_read_lock()
-
-/**
- * rcu_read_unlock - marks the end of an RCU read-side critical section.
- *
- * See rcu_read_lock() for more information.
- */
-
-/*
- * So where is rcu_write_lock()? It does not exist, as there is no
- * way for writers to lock out RCU readers. This is a feature, not
- * a bug -- this property is what provides RCU's performance benefits.
- * Of course, writers must coordinate with each other. The normal
- * spinlock primitives work well for this, but any other technique may be
- * used as well. RCU does not care how the writers keep out of each
- * others' way, as long as they do so.
- */
-#define rcu_read_unlock() __rcu_read_unlock()
-
-/**
- * rcu_read_lock_bh - mark the beginning of a softirq-only RCU critical section
- *
- * This is equivalent of rcu_read_lock(), but to be used when updates
- * are being done using call_rcu_bh(). Since call_rcu_bh() callbacks
- * consider completion of a softirq handler to be a quiescent state,
- * a process in RCU read-side critical section must be protected by
- * disabling softirqs. Read-side critical sections in interrupt context
- * can use just rcu_read_lock().
- *
- */
-#define rcu_read_lock_bh() __rcu_read_lock_bh()
-
-/*
- * rcu_read_unlock_bh - marks the end of a softirq-only RCU critical section
- *
- * See rcu_read_lock_bh() for more information.
- */
-#define rcu_read_unlock_bh() __rcu_read_unlock_bh()
-
-/**
- * rcu_read_lock_sched - mark the beginning of a RCU-classic critical section
- *
- * Should be used with either
- * - synchronize_sched()
- * or
- * - call_rcu_sched() and rcu_barrier_sched()
- * on the write-side to insure proper synchronization.
- */
-#define rcu_read_lock_sched() preempt_disable()
-#define rcu_read_lock_sched_notrace() preempt_disable_notrace()
-
-/*
- * rcu_read_unlock_sched - marks the end of a RCU-classic critical section
- *
- * See rcu_read_lock_sched for more information.
- */
-#define rcu_read_unlock_sched() preempt_enable()
-#define rcu_read_unlock_sched_notrace() preempt_enable_notrace()
-
-
-
-/**
- * rcu_dereference - fetch an RCU-protected pointer in an
- * RCU read-side critical section. This pointer may later
- * be safely dereferenced.
- *
- * Inserts memory barriers on architectures that require them
- * (currently only the Alpha), and, more importantly, documents
- * exactly which pointers are protected by RCU.
- */
-
-#define rcu_dereference(p) ({ \
- typeof(p) _________p1 = ACCESS_ONCE(p); \
- smp_read_barrier_depends(); \
- (_________p1); \
- })
-
-/**
- * rcu_assign_pointer - assign (publicize) a pointer to a newly
- * initialized structure that will be dereferenced by RCU read-side
- * critical sections. Returns the value assigned.
- *
- * Inserts memory barriers on architectures that require them
- * (pretty much all of them other than x86), and also prevents
- * the compiler from reordering the code that initializes the
- * structure after the pointer assignment. More importantly, this
- * call documents which pointers will be dereferenced by RCU read-side
- * code.
- */
-
-#define rcu_assign_pointer(p, v) \
- ({ \
- if (!__builtin_constant_p(v) || \
- ((v) != NULL)) \
- smp_wmb(); \
- (p) = (v); \
- })
-
-/* Infrastructure to implement the synchronize_() primitives. */
-
-struct rcu_synchronize {
- struct rcu_head head;
- struct completion completion;
-};
-
-extern void wakeme_after_rcu(struct rcu_head *head);
-
-/**
- * synchronize_sched - block until all CPUs have exited any non-preemptive
- * kernel code sequences.
- *
- * This means that all preempt_disable code sequences, including NMI and
- * hardware-interrupt handlers, in progress on entry will have completed
- * before this primitive returns. However, this does not guarantee that
- * softirq handlers will have completed, since in some kernels, these
- * handlers can run in process context, and can block.
- *
- * This primitive provides the guarantees made by the (now removed)
- * synchronize_kernel() API. In contrast, synchronize_rcu() only
- * guarantees that rcu_read_lock() sections will have completed.
- * In "classic RCU", these two guarantees happen to be one and
- * the same, but can differ in realtime RCU implementations.
- */
-#define synchronize_sched() __synchronize_sched()
-
-/**
- * call_rcu - Queue an RCU callback for invocation after a grace period.
- * @head: structure to be used for queueing the RCU updates.
- * @func: actual update function to be invoked after the grace period
- *
- * The update function will be invoked some time after a full grace
- * period elapses, in other words after all currently executing RCU
- * read-side critical sections have completed. RCU read-side critical
- * sections are delimited by rcu_read_lock() and rcu_read_unlock(),
- * and may be nested.
- */
-extern void call_rcu(struct rcu_head *head,
- void (*func)(struct rcu_head *head));
-
-/**
- * call_rcu_bh - Queue an RCU for invocation after a quicker grace period.
- * @head: structure to be used for queueing the RCU updates.
- * @func: actual update function to be invoked after the grace period
- *
- * The update function will be invoked some time after a full grace
- * period elapses, in other words after all currently executing RCU
- * read-side critical sections have completed. call_rcu_bh() assumes
- * that the read-side critical sections end on completion of a softirq
- * handler. This means that read-side critical sections in process
- * context must not be interrupted by softirqs. This interface is to be
- * used when most of the read-side critical sections are in softirq context.
- * RCU read-side critical sections are delimited by :
- * - rcu_read_lock() and rcu_read_unlock(), if in interrupt context.
- * OR
- * - rcu_read_lock_bh() and rcu_read_unlock_bh(), if in process context.
- * These may be nested.
- */
-extern void call_rcu_bh(struct rcu_head *head,
- void (*func)(struct rcu_head *head));
-
-/* Exported common interfaces */
-extern void synchronize_rcu(void);
-extern void rcu_barrier(void);
-extern void rcu_barrier_bh(void);
-extern void rcu_barrier_sched(void);
-
-/* Internal to kernel */
-extern void rcu_init(void);
-extern int rcu_needs_cpu(int cpu);
-
+//ust// #if defined(CONFIG_CLASSIC_RCU)
+//ust// #include <linux/rcuclassic.h>
+//ust// #elif defined(CONFIG_TREE_RCU)
+//ust// #include <linux/rcutree.h>
+//ust// #elif defined(CONFIG_PREEMPT_RCU)
+//ust// #include <linux/rcupreempt.h>
+//ust// #else
+//ust// #error "Unknown RCU implementation specified to kernel configuration"
+//ust// #endif /* #else #if defined(CONFIG_CLASSIC_RCU) */
+//ust//
+//ust// #define RCU_HEAD_INIT { .next = NULL, .func = NULL }
+//ust// #define RCU_HEAD(head) struct rcu_head head = RCU_HEAD_INIT
+//ust// #define INIT_RCU_HEAD(ptr) do { \
+//ust// (ptr)->next = NULL; (ptr)->func = NULL; \
+//ust// } while (0)
+//ust//
+//ust// /**
+//ust// * rcu_read_lock - mark the beginning of an RCU read-side critical section.
+//ust// *
+//ust// * When synchronize_rcu() is invoked on one CPU while other CPUs
+//ust// * are within RCU read-side critical sections, then the
+//ust// * synchronize_rcu() is guaranteed to block until after all the other
+//ust// * CPUs exit their critical sections. Similarly, if call_rcu() is invoked
+//ust// * on one CPU while other CPUs are within RCU read-side critical
+//ust// * sections, invocation of the corresponding RCU callback is deferred
+//ust// * until after the all the other CPUs exit their critical sections.
+//ust// *
+//ust// * Note, however, that RCU callbacks are permitted to run concurrently
+//ust// * with RCU read-side critical sections. One way that this can happen
+//ust// * is via the following sequence of events: (1) CPU 0 enters an RCU
+//ust// * read-side critical section, (2) CPU 1 invokes call_rcu() to register
+//ust// * an RCU callback, (3) CPU 0 exits the RCU read-side critical section,
+//ust// * (4) CPU 2 enters a RCU read-side critical section, (5) the RCU
+//ust// * callback is invoked. This is legal, because the RCU read-side critical
+//ust// * section that was running concurrently with the call_rcu() (and which
+//ust// * therefore might be referencing something that the corresponding RCU
+//ust// * callback would free up) has completed before the corresponding
+//ust// * RCU callback is invoked.
+//ust// *
+//ust// * RCU read-side critical sections may be nested. Any deferred actions
+//ust// * will be deferred until the outermost RCU read-side critical section
+//ust// * completes.
+//ust// *
+//ust// * It is illegal to block while in an RCU read-side critical section.
+//ust// */
+//ust// #define rcu_read_lock() __rcu_read_lock()
+//ust//
+//ust// /**
+//ust// * rcu_read_unlock - marks the end of an RCU read-side critical section.
+//ust// *
+//ust// * See rcu_read_lock() for more information.
+//ust// */
+//ust//
+//ust// /*
+//ust// * So where is rcu_write_lock()? It does not exist, as there is no
+//ust// * way for writers to lock out RCU readers. This is a feature, not
+//ust// * a bug -- this property is what provides RCU's performance benefits.
+//ust// * Of course, writers must coordinate with each other. The normal
+//ust// * spinlock primitives work well for this, but any other technique may be
+//ust// * used as well. RCU does not care how the writers keep out of each
+//ust// * others' way, as long as they do so.
+//ust// */
+//ust// #define rcu_read_unlock() __rcu_read_unlock()
+//ust//
+//ust// /**
+//ust// * rcu_read_lock_bh - mark the beginning of a softirq-only RCU critical section
+//ust// *
+//ust// * This is equivalent of rcu_read_lock(), but to be used when updates
+//ust// * are being done using call_rcu_bh(). Since call_rcu_bh() callbacks
+//ust// * consider completion of a softirq handler to be a quiescent state,
+//ust// * a process in RCU read-side critical section must be protected by
+//ust// * disabling softirqs. Read-side critical sections in interrupt context
+//ust// * can use just rcu_read_lock().
+//ust// *
+//ust// */
+//ust// #define rcu_read_lock_bh() __rcu_read_lock_bh()
+//ust//
+//ust// /*
+//ust// * rcu_read_unlock_bh - marks the end of a softirq-only RCU critical section
+//ust// *
+//ust// * See rcu_read_lock_bh() for more information.
+//ust// */
+//ust// #define rcu_read_unlock_bh() __rcu_read_unlock_bh()
+//ust//
+//ust// /**
+//ust// * rcu_read_lock_sched - mark the beginning of a RCU-classic critical section
+//ust// *
+//ust// * Should be used with either
+//ust// * - synchronize_sched()
+//ust// * or
+//ust// * - call_rcu_sched() and rcu_barrier_sched()
+//ust// * on the write-side to insure proper synchronization.
+//ust// */
+//ust// #define rcu_read_lock_sched() preempt_disable()
+//ust// #define rcu_read_lock_sched_notrace() preempt_disable_notrace()
+//ust//
+//ust// /*
+//ust// * rcu_read_unlock_sched - marks the end of a RCU-classic critical section
+//ust// *
+//ust// * See rcu_read_lock_sched for more information.
+//ust// */
+//ust// #define rcu_read_unlock_sched() preempt_enable()
+//ust// #define rcu_read_unlock_sched_notrace() preempt_enable_notrace()
+//ust//
+//ust//
+//ust//
+//ust// /**
+//ust// * rcu_dereference - fetch an RCU-protected pointer in an
+//ust// * RCU read-side critical section. This pointer may later
+//ust// * be safely dereferenced.
+//ust// *
+//ust// * Inserts memory barriers on architectures that require them
+//ust// * (currently only the Alpha), and, more importantly, documents
+//ust// * exactly which pointers are protected by RCU.
+//ust// */
+//ust//
+//ust// #define rcu_dereference(p) ({ \
+//ust// typeof(p) _________p1 = ACCESS_ONCE(p); \
+//ust// smp_read_barrier_depends(); \
+//ust// (_________p1); \
+//ust// })
+//ust//
+//ust// /**
+//ust// * rcu_assign_pointer - assign (publicize) a pointer to a newly
+//ust// * initialized structure that will be dereferenced by RCU read-side
+//ust// * critical sections. Returns the value assigned.
+//ust// *
+//ust// * Inserts memory barriers on architectures that require them
+//ust// * (pretty much all of them other than x86), and also prevents
+//ust// * the compiler from reordering the code that initializes the
+//ust// * structure after the pointer assignment. More importantly, this
+//ust// * call documents which pointers will be dereferenced by RCU read-side
+//ust// * code.
+//ust// */
+//ust//
+//ust// #define rcu_assign_pointer(p, v) \
+//ust// ({ \
+//ust// if (!__builtin_constant_p(v) || \
+//ust// ((v) != NULL)) \
+//ust// smp_wmb(); \
+//ust// (p) = (v); \
+//ust// })
+//ust//
+//ust// /* Infrastructure to implement the synchronize_() primitives. */
+//ust//
+//ust// struct rcu_synchronize {
+//ust// struct rcu_head head;
+//ust// struct completion completion;
+//ust// };
+//ust//
+//ust// extern void wakeme_after_rcu(struct rcu_head *head);
+//ust//
+//ust// /**
+//ust// * synchronize_sched - block until all CPUs have exited any non-preemptive
+//ust// * kernel code sequences.
+//ust// *
+//ust// * This means that all preempt_disable code sequences, including NMI and
+//ust// * hardware-interrupt handlers, in progress on entry will have completed
+//ust// * before this primitive returns. However, this does not guarantee that
+//ust// * softirq handlers will have completed, since in some kernels, these
+//ust// * handlers can run in process context, and can block.
+//ust// *
+//ust// * This primitive provides the guarantees made by the (now removed)
+//ust// * synchronize_kernel() API. In contrast, synchronize_rcu() only
+//ust// * guarantees that rcu_read_lock() sections will have completed.
+//ust// * In "classic RCU", these two guarantees happen to be one and
+//ust// * the same, but can differ in realtime RCU implementations.
+//ust// */
+//ust// #define synchronize_sched() __synchronize_sched()
+//ust//
+//ust// /**
+//ust// * call_rcu - Queue an RCU callback for invocation after a grace period.
+//ust// * @head: structure to be used for queueing the RCU updates.
+//ust// * @func: actual update function to be invoked after the grace period
+//ust// *
+//ust// * The update function will be invoked some time after a full grace
+//ust// * period elapses, in other words after all currently executing RCU
+//ust// * read-side critical sections have completed. RCU read-side critical
+//ust// * sections are delimited by rcu_read_lock() and rcu_read_unlock(),
+//ust// * and may be nested.
+//ust// */
+//ust// extern void call_rcu(struct rcu_head *head,
+//ust// void (*func)(struct rcu_head *head));
+//ust//
+//ust// /**
+//ust// * call_rcu_bh - Queue an RCU for invocation after a quicker grace period.
+//ust// * @head: structure to be used for queueing the RCU updates.
+//ust// * @func: actual update function to be invoked after the grace period
+//ust// *
+//ust// * The update function will be invoked some time after a full grace
+//ust// * period elapses, in other words after all currently executing RCU
+//ust// * read-side critical sections have completed. call_rcu_bh() assumes
+//ust// * that the read-side critical sections end on completion of a softirq
+//ust// * handler. This means that read-side critical sections in process
+//ust// * context must not be interrupted by softirqs. This interface is to be
+//ust// * used when most of the read-side critical sections are in softirq context.
+//ust// * RCU read-side critical sections are delimited by :
+//ust// * - rcu_read_lock() and rcu_read_unlock(), if in interrupt context.
+//ust// * OR
+//ust// * - rcu_read_lock_bh() and rcu_read_unlock_bh(), if in process context.
+//ust// * These may be nested.
+//ust// */
+//ust// extern void call_rcu_bh(struct rcu_head *head,
+//ust// void (*func)(struct rcu_head *head));
+//ust//
+//ust// /* Exported common interfaces */
+//ust// extern void synchronize_rcu(void);
+//ust// extern void rcu_barrier(void);
+//ust// extern void rcu_barrier_bh(void);
+//ust// extern void rcu_barrier_sched(void);
+//ust//
+//ust// /* Internal to kernel */
+//ust// extern void rcu_init(void);
+//ust// extern int rcu_needs_cpu(int cpu);
+//ust//
#endif /* __LINUX_RCUPDATE_H */
*/
#include <linux/limits.h>
-#include <linux/kref.h>
-#include <linux/list.h>
+//ust// #include <linux/kref.h>
+//ust// #include <linux/list.h>
+#include <kernelcompat.h>
+
+#include <kref.h>
#define EVENTS_PER_CHANNEL 65536
#ifndef KERNELCOMPAT_H
#define KERNELCOMPAT_H
+#include "compiler.h"
+
#include <string.h>
#define container_of(ptr, type, member) ({ \
#define KERN_DEBUG
#define KERN_NOTICE
+/* ERROR OPS */
+
+#define MAX_ERRNO 4095
+
+#define IS_ERR_VALUE(x) unlikely((x) >= (unsigned long)-MAX_ERRNO)
+
static inline void *ERR_PTR(long error)
{
- return (void *) error;
+ return (void *) error;
}
+static inline long PTR_ERR(const void *ptr)
+{
+ return (long) ptr;
+}
+
+static inline long IS_ERR(const void *ptr)
+{
+ return IS_ERR_VALUE((unsigned long)ptr);
+}
+
+
+/* FIXED SIZE INTEGERS */
#include <stdint.h>
+typedef uint8_t u8;
typedef uint16_t u16;
typedef uint32_t u32;
+typedef uint64_t u64;
#include <pthread.h>
#define printk(fmt, args...) printf(fmt, ## args)
+/* MEMORY BARRIERS */
+
+#define smp_rmb() do {} while(0)
+#define smp_wmb() do {} while(0)
+#define smp_mb() do {} while(0)
+#define smp_mb__after_atomic_inc() do {} while(0)
+
+#define read_barrier_depends() do {} while(0)
+#define smp_read_barrier_depends() do {} while(0)
-#define smp_rmb()
-#define smp_wmb()
+/* RCU */
+#define rcu_assign_pointer(a, b) do {} while(0)
-#define read_barrier_depends()
-#define smp_read_barrier_depends()
+/* ATOMICITY */
+#include <signal.h>
+
+typedef struct { sig_atomic_t counter; } atomic_t;
+
+static inline int atomic_dec_and_test(atomic_t *p)
+{
+ (p->counter)--;
+ return !p->counter;
+}
+
+static inline void atomic_set(atomic_t *p, int v)
+{
+ p->counter=v;
+}
+
+static inline void atomic_inc(atomic_t *p)
+{
+ p->counter++;
+}
+
+static int atomic_read(atomic_t *p)
+{
+ return p->counter;
+}
+/* CACHE */
+#define ____cacheline_aligned
-#define rcu_assign_pointer(a, b)
#endif /* KERNELCOMPAT_H */
*
*/
-#include <linux/kref.h>
-#include <linux/module.h>
+//#include "<kernelcompat.h>"
+#include <kref.h>
+//ust// #include <linux/module.h>
+#include "usterr.h"
+#include "compiler.h"
/**
* kref_set - initialize object and set refcount to requested number.
int kref_put(struct kref *kref, void (*release)(struct kref *kref))
{
WARN_ON(release == NULL);
- WARN_ON(release == (void (*)(struct kref *))kfree);
+//ust// WARN_ON(release == (void (*)(struct kref *))kfree);
if (atomic_dec_and_test(&kref->refcount)) {
release(kref);
return 0;
}
-EXPORT_SYMBOL(kref_set);
-EXPORT_SYMBOL(kref_init);
-EXPORT_SYMBOL(kref_get);
-EXPORT_SYMBOL(kref_put);
+//ust// EXPORT_SYMBOL(kref_set);
+//ust// EXPORT_SYMBOL(kref_init);
+//ust// EXPORT_SYMBOL(kref_get);
+//ust// EXPORT_SYMBOL(kref_put);
/*
- * kref.c - library routines for handling generic reference counted objects
+ * kref.h - library routines for handling generic reference counted objects
*
* Copyright (C) 2004 Greg Kroah-Hartman <greg@kroah.com>
* Copyright (C) 2004 IBM Corp.
#ifndef _KREF_H_
#define _KREF_H_
-#include <linux/types.h>
-#include <asm/atomic.h>
+//ust// #include <linux/types.h>
+//ust// #include <asm/atomic.h>
+#include <kernelcompat.h>
struct kref {
atomic_t refcount;
+#ifndef USTERR_H
+#define USTERR_H
+
#define DBG(fmt, args...) fprintf(stderr, fmt "\n", ## args)
#define WARN(fmt, args...) fprintf(stderr, "usertrace: WARNING: " fmt "\n", ## args)
#define ERR(fmt, args...) fprintf(stderr, "usertrace: ERROR: " fmt "\n", ## args)
#define BUG_ON(condition) do { if (unlikely(condition)) ERR("condition not respected (BUG)"); } while(0)
#define WARN_ON(condition) do { if (unlikely(condition)) WARN("condition not respected"); } while(0)
+#endif /* USTERR_H */