#define DEFINE_MUTEX(m) pthread_mutex_t (m) = PTHREAD_MUTEX_INITIALIZER;
#define DECLARE_MUTEX(m) extern pthread_mutex_t (m);
-#define mutex_lock(m) pthread_mutex_lock(m)
-
-#define mutex_unlock(m) pthread_mutex_unlock(m)
-
-
/* MALLOCATION */
#define zmalloc(s) calloc(1, s)
kref_init(&chan->kref);
- mutex_lock(&ust_buffers_channels_mutex);
+ pthread_mutex_lock(&ust_buffers_channels_mutex);
for(i=0; i<chan->n_cpus; i++) {
result = ust_buffers_open_buf(chan, i);
if (result == -1)
goto error;
}
list_add(&chan->list, &ust_buffers_channels);
- mutex_unlock(&ust_buffers_channels_mutex);
+ pthread_mutex_unlock(&ust_buffers_channels_mutex);
return 0;
}
kref_put(&chan->kref, ust_buffers_destroy_channel);
- mutex_unlock(&ust_buffers_channels_mutex);
+ pthread_mutex_unlock(&ust_buffers_channels_mutex);
return -1;
}
if(!chan)
return;
- mutex_lock(&ust_buffers_channels_mutex);
+ pthread_mutex_lock(&ust_buffers_channels_mutex);
for(i=0; i<chan->n_cpus; i++) {
/* FIXME: if we make it here, then all buffers were necessarily allocated. Moreover, we don't
* initialize to NULL so we cannot use this check. Should we? */
list_del(&chan->list);
kref_put(&chan->kref, ust_buffers_destroy_channel);
- mutex_unlock(&ust_buffers_channels_mutex);
+ pthread_mutex_unlock(&ust_buffers_channels_mutex);
}
/*
struct ltt_channel_setting *setting;
int ret = 0;
- mutex_lock(<t_channel_mutex);
+ pthread_mutex_lock(<t_channel_mutex);
setting = lookup_channel(name);
if (setting) {
if (uatomic_read(&setting->kref.refcount) == 0)
init_kref:
kref_init(&setting->kref);
end:
- mutex_unlock(<t_channel_mutex);
+ pthread_mutex_unlock(<t_channel_mutex);
return ret;
}
//ust// EXPORT_SYMBOL_GPL(ltt_channels_register);
struct ltt_channel_setting *setting;
int ret = 0;
- mutex_lock(<t_channel_mutex);
+ pthread_mutex_lock(<t_channel_mutex);
setting = lookup_channel(name);
if (!setting || uatomic_read(&setting->kref.refcount) == 0) {
ret = -ENOENT;
}
kref_put(&setting->kref, release_channel_setting);
end:
- mutex_unlock(<t_channel_mutex);
+ pthread_mutex_unlock(<t_channel_mutex);
return ret;
}
//ust// EXPORT_SYMBOL_GPL(ltt_channels_unregister);
struct ltt_channel_setting *setting;
int ret = 0;
- mutex_lock(<t_channel_mutex);
+ pthread_mutex_lock(<t_channel_mutex);
setting = lookup_channel(name);
if (!setting || uatomic_read(&setting->kref.refcount) == 0) {
ret = -ENOENT;
setting->subbuf_size = subbuf_size;
setting->subbuf_cnt = subbuf_cnt;
end:
- mutex_unlock(<t_channel_mutex);
+ pthread_mutex_unlock(<t_channel_mutex);
return ret;
}
//ust// EXPORT_SYMBOL_GPL(ltt_channels_set_default);
struct ust_channel *channel = NULL;
struct ltt_channel_setting *iter;
- mutex_lock(<t_channel_mutex);
+ pthread_mutex_lock(<t_channel_mutex);
if (!free_index) {
WARN("ltt_channels_trace_alloc: no free_index; are there any probes connected?");
goto end;
channel[iter->index].channel_name = iter->name;
}
end:
- mutex_unlock(<t_channel_mutex);
+ pthread_mutex_unlock(<t_channel_mutex);
return channel;
}
//ust// EXPORT_SYMBOL_GPL(ltt_channels_trace_alloc);
void ltt_channels_trace_free(struct ust_channel *channels)
{
lock_markers();
- mutex_lock(<t_channel_mutex);
+ pthread_mutex_lock(<t_channel_mutex);
free(channels);
kref_put(&index_kref, release_trace_channel);
- mutex_unlock(<t_channel_mutex);
+ pthread_mutex_unlock(<t_channel_mutex);
unlock_markers();
}
//ust// EXPORT_SYMBOL_GPL(ltt_channels_trace_free);
{
int ret;
- mutex_lock(<t_channel_mutex);
+ pthread_mutex_lock(<t_channel_mutex);
ret = _ltt_channels_get_event_id(channel, name);
- mutex_unlock(<t_channel_mutex);
+ pthread_mutex_unlock(<t_channel_mutex);
return ret;
}
int comparison;
struct ltt_available_probe *iter;
- mutex_lock(&probes_mutex);
+ pthread_mutex_lock(&probes_mutex);
list_for_each_entry_reverse(iter, &probes_registered_list, node) {
comparison = strcmp(pdata->name, iter->name);
if (!comparison) {
/* Should be added at the head of the list */
list_add(&pdata->node, &probes_registered_list);
end:
- mutex_unlock(&probes_mutex);
+ pthread_mutex_unlock(&probes_mutex);
return ret;
}
int ret = 0;
struct ltt_active_marker *amark, *tmp;
- mutex_lock(&probes_mutex);
+ pthread_mutex_lock(&probes_mutex);
list_for_each_entry_safe(amark, tmp, &markers_loaded_list, node) {
if (amark->probe == pdata) {
ret = marker_probe_unregister_private_data(
}
list_del(&pdata->node);
end:
- mutex_unlock(&probes_mutex);
+ pthread_mutex_unlock(&probes_mutex);
return ret;
}
struct ltt_available_probe *probe;
ltt_lock_traces();
- mutex_lock(&probes_mutex);
+ pthread_mutex_lock(&probes_mutex);
probe = get_probe_from_name(pname);
if (!probe) {
ret = -ENOENT;
else
list_add(&pdata->node, &markers_loaded_list);
end:
- mutex_unlock(&probes_mutex);
+ pthread_mutex_unlock(&probes_mutex);
ltt_unlock_traces();
return ret;
}
struct ltt_available_probe *probe;
int ret = 0;
- mutex_lock(&probes_mutex);
+ pthread_mutex_lock(&probes_mutex);
probe = get_probe_from_name(pname);
if (!probe) {
ret = -ENOENT;
free(pdata);
}
end:
- mutex_unlock(&probes_mutex);
+ pthread_mutex_unlock(&probes_mutex);
return ret;
}
void lock_markers(void)
{
- mutex_lock(&markers_mutex);
+ pthread_mutex_lock(&markers_mutex);
}
void unlock_markers(void)
{
- mutex_unlock(&markers_mutex);
+ pthread_mutex_unlock(&markers_mutex);
}
/*
{
struct marker_entry *entry;
- mutex_lock(&markers_mutex);
+ pthread_mutex_lock(&markers_mutex);
entry = get_marker(channel, name);
- mutex_unlock(&markers_mutex);
+ pthread_mutex_unlock(&markers_mutex);
return entry && !!entry->refcount;
}
struct marker *iter;
struct marker_entry *mark_entry;
- mutex_lock(&markers_mutex);
+ pthread_mutex_lock(&markers_mutex);
for (iter = begin; iter < end; iter++) {
mark_entry = get_marker(iter->channel, iter->name);
if (mark_entry) {
disable_marker(iter);
}
}
- mutex_unlock(&markers_mutex);
+ pthread_mutex_unlock(&markers_mutex);
}
static void lib_update_markers(void)
struct lib *lib;
/* FIXME: we should probably take a mutex here on libs */
-//ust// mutex_lock(&module_mutex);
+//ust// pthread_mutex_lock(&module_mutex);
list_for_each_entry(lib, &libs, list)
marker_update_probe_range(lib->markers_start,
lib->markers_start + lib->markers_count);
-//ust// mutex_unlock(&module_mutex);
+//ust// pthread_mutex_unlock(&module_mutex);
}
/*
struct marker_probe_closure *old;
int first_probe = 0;
- mutex_lock(&markers_mutex);
+ pthread_mutex_lock(&markers_mutex);
entry = get_marker(channel, name);
if (!entry) {
first_probe = 1;
else
goto end;
}
- mutex_unlock(&markers_mutex);
+ pthread_mutex_unlock(&markers_mutex);
/* Activate marker if necessary */
marker_update_probes();
- mutex_lock(&markers_mutex);
+ pthread_mutex_lock(&markers_mutex);
entry = get_marker(channel, name);
if (!entry)
goto end;
ret_err = remove_marker(channel, name);
WARN_ON(ret_err);
end:
- mutex_unlock(&markers_mutex);
+ pthread_mutex_unlock(&markers_mutex);
return ret;
}
//ust// EXPORT_SYMBOL_GPL(marker_probe_register);
struct marker_probe_closure *old;
int ret = -ENOENT;
- mutex_lock(&markers_mutex);
+ pthread_mutex_lock(&markers_mutex);
entry = get_marker(channel, name);
if (!entry)
goto end;
//ust// if (entry->rcu_pending)
//ust// rcu_barrier_sched();
old = marker_entry_remove_probe(entry, probe, probe_private);
- mutex_unlock(&markers_mutex);
+ pthread_mutex_unlock(&markers_mutex);
marker_update_probes();
- mutex_lock(&markers_mutex);
+ pthread_mutex_lock(&markers_mutex);
entry = get_marker(channel, name);
if (!entry)
goto end;
remove_marker(channel, name); /* Ignore busy error message */
ret = 0;
end:
- mutex_unlock(&markers_mutex);
+ pthread_mutex_unlock(&markers_mutex);
return ret;
}
//ust// EXPORT_SYMBOL_GPL(marker_probe_unregister);
struct marker_probe_closure *old;
char *channel = NULL, *name = NULL;
- mutex_lock(&markers_mutex);
+ pthread_mutex_lock(&markers_mutex);
entry = get_marker_from_private_data(probe, probe_private);
if (!entry) {
ret = -ENOENT;
old = marker_entry_remove_probe(entry, NULL, probe_private);
channel = strdup(entry->channel);
name = strdup(entry->name);
- mutex_unlock(&markers_mutex);
+ pthread_mutex_unlock(&markers_mutex);
marker_update_probes();
- mutex_lock(&markers_mutex);
+ pthread_mutex_lock(&markers_mutex);
entry = get_marker(channel, name);
if (!entry)
goto end;
/* Ignore busy error message */
remove_marker(channel, name);
end:
- mutex_unlock(&markers_mutex);
+ pthread_mutex_unlock(&markers_mutex);
free(channel);
free(name);
return ret;
struct lib *iter_lib;
int found = 0;
-//ust// mutex_lock(&module_mutex);
+//ust// pthread_mutex_lock(&module_mutex);
list_for_each_entry(iter_lib, &libs, list) {
if (iter_lib < iter->lib)
continue;
break;
}
}
-//ust// mutex_unlock(&module_mutex);
+//ust// pthread_mutex_unlock(&module_mutex);
return found;
}
//ust// struct hlist_node *pos;
//ust// struct marker_entry *entry;
//ust//
-//ust// mutex_lock(&markers_mutex);
-//ust// mutex_lock(¤t->group_leader->user_markers_mutex);
+//ust// pthread_mutex_lock(&markers_mutex);
+//ust// pthread_mutex_lock(¤t->group_leader->user_markers_mutex);
//ust// if (strcmp(current->comm, "testprog") == 0)
//ust// DBG("do update pending for testprog");
//ust// hlist_for_each_entry(umark, pos,
//ust// }
//ust// }
//ust// clear_thread_flag(TIF_MARKER_PENDING);
-//ust// mutex_unlock(¤t->group_leader->user_markers_mutex);
-//ust// mutex_unlock(&markers_mutex);
+//ust// pthread_mutex_unlock(¤t->group_leader->user_markers_mutex);
+//ust// pthread_mutex_unlock(&markers_mutex);
//ust// }
/*
struct hlist_node *pos, *n;
if (thread_group_leader(p)) {
- mutex_lock(&markers_mutex);
- mutex_lock(&p->user_markers_mutex);
+ pthread_mutex_lock(&markers_mutex);
+ pthread_mutex_lock(&p->user_markers_mutex);
hlist_for_each_entry_safe(umark, pos, n, &p->user_markers,
hlist)
free(umark);
INIT_HLIST_HEAD(&p->user_markers);
p->user_markers_sequence++;
- mutex_unlock(&p->user_markers_mutex);
- mutex_unlock(&markers_mutex);
+ pthread_mutex_unlock(&p->user_markers_mutex);
+ pthread_mutex_unlock(&markers_mutex);
}
}
{
struct marker_entry *entry;
- mutex_lock(&markers_mutex);
+ pthread_mutex_lock(&markers_mutex);
entry = get_marker(channel, name);
- mutex_unlock(&markers_mutex);
+ pthread_mutex_unlock(&markers_mutex);
return entry && !!entry->refcount;
}
struct hlist_node *node;
unsigned int i;
- mutex_lock(&markers_mutex);
+ pthread_mutex_lock(&markers_mutex);
call_data.trace = trace;
call_data.serializer = NULL;
entry->format);
}
}
- mutex_unlock(&markers_mutex);
+ pthread_mutex_unlock(&markers_mutex);
}
//ust// EXPORT_SYMBOL_GPL(ltt_dump_marker_state);
lock_markers();
/* FIXME: we should probably take a mutex here on libs */
-//ust// mutex_lock(&module_mutex);
+//ust// pthread_mutex_lock(&module_mutex);
list_for_each_entry(lib, &libs, list) {
if(lib->markers_start == markers_start) {
struct lib *lib2free = lib;
struct tracepoint *iter;
struct tracepoint_entry *mark_entry;
- mutex_lock(&tracepoints_mutex);
+ pthread_mutex_lock(&tracepoints_mutex);
for (iter = begin; iter < end; iter++) {
mark_entry = get_tracepoint(iter->name);
if (mark_entry) {
disable_tracepoint(iter);
}
}
- mutex_unlock(&tracepoints_mutex);
+ pthread_mutex_unlock(&tracepoints_mutex);
}
static void lib_update_tracepoints(void)
{
struct tracepoint_lib *lib;
-//ust// mutex_lock(&module_mutex);
+//ust// pthread_mutex_lock(&module_mutex);
list_for_each_entry(lib, &libs, list)
tracepoint_update_probe_range(lib->tracepoints_start,
lib->tracepoints_start + lib->tracepoints_count);
-//ust// mutex_unlock(&module_mutex);
+//ust// pthread_mutex_unlock(&module_mutex);
}
/*
{
void *old;
- mutex_lock(&tracepoints_mutex);
+ pthread_mutex_lock(&tracepoints_mutex);
old = tracepoint_add_probe(name, probe);
- mutex_unlock(&tracepoints_mutex);
+ pthread_mutex_unlock(&tracepoints_mutex);
if (IS_ERR(old))
return PTR_ERR(old);
{
void *old;
- mutex_lock(&tracepoints_mutex);
+ pthread_mutex_lock(&tracepoints_mutex);
old = tracepoint_remove_probe(name, probe);
- mutex_unlock(&tracepoints_mutex);
+ pthread_mutex_unlock(&tracepoints_mutex);
if (IS_ERR(old))
return PTR_ERR(old);
{
void *old;
- mutex_lock(&tracepoints_mutex);
+ pthread_mutex_lock(&tracepoints_mutex);
old = tracepoint_add_probe(name, probe);
if (IS_ERR(old)) {
- mutex_unlock(&tracepoints_mutex);
+ pthread_mutex_unlock(&tracepoints_mutex);
return PTR_ERR(old);
}
tracepoint_add_old_probes(old);
- mutex_unlock(&tracepoints_mutex);
+ pthread_mutex_unlock(&tracepoints_mutex);
return 0;
}
//ust// EXPORT_SYMBOL_GPL(tracepoint_probe_register_noupdate);
{
void *old;
- mutex_lock(&tracepoints_mutex);
+ pthread_mutex_lock(&tracepoints_mutex);
old = tracepoint_remove_probe(name, probe);
if (IS_ERR(old)) {
- mutex_unlock(&tracepoints_mutex);
+ pthread_mutex_unlock(&tracepoints_mutex);
return PTR_ERR(old);
}
tracepoint_add_old_probes(old);
- mutex_unlock(&tracepoints_mutex);
+ pthread_mutex_unlock(&tracepoints_mutex);
return 0;
}
//ust// EXPORT_SYMBOL_GPL(tracepoint_probe_unregister_noupdate);
LIST_HEAD(release_probes);
struct tp_probes *pos, *next;
- mutex_lock(&tracepoints_mutex);
+ pthread_mutex_lock(&tracepoints_mutex);
if (!need_update) {
- mutex_unlock(&tracepoints_mutex);
+ pthread_mutex_unlock(&tracepoints_mutex);
return;
}
if (!list_empty(&old_probes))
list_replace_init(&old_probes, &release_probes);
need_update = 0;
- mutex_unlock(&tracepoints_mutex);
+ pthread_mutex_unlock(&tracepoints_mutex);
tracepoint_update_probes();
list_for_each_entry_safe(pos, next, &release_probes, u.list) {
struct tracepoint_lib *iter_lib;
int found = 0;
-//ust// mutex_lock(&module_mutex);
+//ust// pthread_mutex_lock(&module_mutex);
list_for_each_entry(iter_lib, &libs, list) {
if (iter_lib < iter->lib)
continue;
break;
}
}
-//ust// mutex_unlock(&module_mutex);
+//ust// pthread_mutex_unlock(&module_mutex);
return found;
}
pl->tracepoints_count = tracepoints_count;
/* FIXME: maybe protect this with its own mutex? */
- mutex_lock(&tracepoints_mutex);
+ pthread_mutex_lock(&tracepoints_mutex);
list_add(&pl->list, &libs);
- mutex_unlock(&tracepoints_mutex);
+ pthread_mutex_unlock(&tracepoints_mutex);
new_tracepoints(tracepoints_start, tracepoints_start + tracepoints_count);
{
struct tracepoint_lib *lib;
- mutex_lock(&tracepoints_mutex);
+ pthread_mutex_lock(&tracepoints_mutex);
list_for_each_entry(lib, &libs, list) {
if(lib->tracepoints_start == tracepoints_start) {
}
}
- mutex_unlock(&tracepoints_mutex);
+ pthread_mutex_unlock(&tracepoints_mutex);
return 0;
}
void ltt_lock_traces(void)
{
- mutex_lock(<t_traces_mutex);
+ pthread_mutex_lock(<t_traces_mutex);
}
void ltt_unlock_traces(void)
{
- mutex_unlock(<t_traces_mutex);
+ pthread_mutex_unlock(<t_traces_mutex);
}
//ust// DEFINE_PER_CPU(unsigned int, ltt_nesting);