struct marker_addr *markers_addr_start;
#endif
int markers_count;
- struct list_head list;
+ struct cds_list_head list;
};
extern int marker_register_lib(struct marker *markers_start, int markers_count);
const char *format;
marker_probe_func *probe_func;
ltt_serialize_cb callbacks[LTT_NR_CALLBACKS];
- struct list_head node; /* registered probes list */
+ struct cds_list_head node; /* registered probes list */
};
extern int ltt_probe_register(struct ltt_available_probe *pdata);
extern __thread long ust_reg_stack[500];
extern volatile __thread long *ust_reg_stack_ptr;
-#define ____cacheline_aligned __attribute__((aligned(CACHE_LINE_SIZE)))
+#define ____cacheline_aligned __attribute__((aligned(CAA_CACHE_LINE_SIZE)))
#ifdef __i386
struct tracepoint_lib {
struct tracepoint *tracepoints_start;
int tracepoints_count;
- struct list_head list;
+ struct cds_list_head list;
};
extern int tracepoint_register_lib(struct tracepoint *tracepoints_start,
struct trace_event_lib {
struct trace_event *trace_events_start;
int trace_events_count;
- struct list_head list;
+ struct cds_list_head list;
};
struct trace_event_iter {
struct libustd_callbacks *callbacks;
int quit_program;
int is_init;
- struct list_head connections;
+ struct cds_list_head connections;
int epoll_fd;
struct ustcomm_sock *listen_sock;
char *sock_path;
static DEFINE_MUTEX(ust_buffers_channels_mutex);
-static LIST_HEAD(ust_buffers_channels);
+static CDS_LIST_HEAD(ust_buffers_channels);
static int get_n_cpus(void)
{
if (result == -1)
goto error;
}
- list_add(&chan->list, &ust_buffers_channels);
+ cds_list_add(&chan->list, &ust_buffers_channels);
pthread_mutex_unlock(&ust_buffers_channels_mutex);
return 0;
ust_buffers_close_buf(chan->buf[i]);
}
- list_del(&chan->list);
+ cds_list_del(&chan->list);
kref_put(&chan->kref, ust_buffers_destroy_channel);
pthread_mutex_unlock(&ust_buffers_channels_mutex);
}
header->cycle_count_begin = tsc;
header->data_size = 0xFFFFFFFF; /* for recognizing crashed buffers */
header->sb_size = 0xFFFFFFFF; /* for recognizing crashed buffers */
- /* FIXME: add memory barrier? */
+ /* FIXME: add memory cmm_barrier? */
ltt_write_trace_header(channel->trace, header);
}
}
/*
- * Promote compiler barrier to a smp_mb().
+ * Promote compiler cmm_barrier to a smp_mb().
* For the specific LTTng case, this IPI call should be removed if the
* architecture does not reorder writes. This should eventually be provided by
* a separate architecture-specific infrastructure.
* this is OK because then there is no wmb to execute there.
* If our thread is executing on the same CPU as the on the buffers
* belongs to, we don't have to synchronize it at all. If we are
- * migrated, the scheduler will take care of the memory barriers.
+ * migrated, the scheduler will take care of the memory cmm_barriers.
* Normally, smp_call_function_single() should ensure program order when
* executing the remote function, which implies that it surrounds the
* function execution with :
* smp_mb()
*
* However, smp_call_function_single() does not seem to clearly execute
- * such barriers. It depends on spinlock semantic to provide the barrier
+ * such cmm_barriers. It depends on spinlock semantic to provide the cmm_barrier
* before executing the IPI and, when busy-looping, csd_lock_wait only
* executes smp_mb() when it has to wait for the other CPU.
*
* required ourself, even if duplicated. It has no performance impact
* anyway.
*
- * smp_mb() is needed because smp_rmb() and smp_wmb() only order read vs
+ * smp_mb() is needed because cmm_smp_rmb() and cmm_smp_wmb() only order read vs
* read and write vs write. They do not ensure core synchronization. We
- * really have to ensure total order between the 3 barriers running on
+ * really have to ensure total order between the 3 cmm_barriers running on
* the 2 CPUs.
*/
//ust// #ifdef LTT_NO_IPI_BARRIER
* Local rmb to match the remote wmb to read the commit count before the
* buffer data and the write offset.
*/
- smp_rmb();
+ cmm_smp_rmb();
//ust// #else
//ust// if (raw_smp_processor_id() != buf->cpu) {
//ust// smp_mb(); /* Total order with IPI handler smp_mb() */
/*
* Must write slot data before incrementing commit count.
- * This compiler barrier is upgraded into a smp_wmb() by the IPI
- * sent by get_subbuf() when it does its smp_rmb().
+ * This compiler cmm_barrier is upgraded into a cmm_smp_wmb() by the IPI
+ * sent by get_subbuf() when it does its cmm_smp_rmb().
*/
- smp_wmb();
+ cmm_smp_wmb();
uatomic_add(&buf->commit_count[oldidx].cc, padding_size);
commit_count = uatomic_read(&buf->commit_count[oldidx].cc);
ltt_check_deliver(chan, buf, offsets->old - 1, commit_count, oldidx);
/*
* Must write slot data before incrementing commit count.
- * This compiler barrier is upgraded into a smp_wmb() by the IPI
- * sent by get_subbuf() when it does its smp_rmb().
+ * This compiler cmm_barrier is upgraded into a cmm_smp_wmb() by the IPI
+ * sent by get_subbuf() when it does its cmm_smp_rmb().
*/
- smp_wmb();
+ cmm_smp_wmb();
uatomic_add(&buf->commit_count[beginidx].cc, ltt_subbuffer_header_size());
commit_count = uatomic_read(&buf->commit_count[beginidx].cc);
/* Check if the written buffer has to be delivered */
/*
* Must write slot data before incrementing commit count.
- * This compiler barrier is upgraded into a smp_wmb() by the IPI
- * sent by get_subbuf() when it does its smp_rmb().
+ * This compiler cmm_barrier is upgraded into a cmm_smp_wmb() by the IPI
+ * sent by get_subbuf() when it does its cmm_smp_rmb().
*/
- smp_wmb();
+ cmm_smp_wmb();
uatomic_add(&buf->commit_count[endidx].cc, padding_size);
commit_count = uatomic_read(&buf->commit_count[endidx].cc);
ltt_check_deliver(chan, buf,
* List of buffers with an open pipe, used for fork and forced subbuffer
* switch.
*/
- struct list_head open_buffers_list;
+ struct cds_list_head open_buffers_list;
unsigned int finalized;
//ust// struct timer_list switch_timer; /* timer for periodical switch */
consumed_idx = SUBBUF_INDEX(consumed_old, buf->chan);
commit_count = uatomic_read(&buf->commit_count[consumed_idx].cc_sb);
/*
- * No memory barrier here, since we are only interested
+ * No memory cmm_barrier here, since we are only interested
* in a statistically correct polling result. The next poll will
* get the data is we are racing. The mb() that ensures correct
* memory order is in get_subbuf.
* Perform retryable operations.
*/
/* FIXME: make this really per cpu? */
- if (unlikely(LOAD_SHARED(ltt_nesting) > 4)) {
+ if (unlikely(CMM_LOAD_SHARED(ltt_nesting) > 4)) {
DBG("Dropping event because nesting is too deep.");
uatomic_inc(&buf->events_lost);
return -EPERM;
long endidx = SUBBUF_INDEX(offset_end - 1, chan);
long commit_count;
- smp_wmb();
+ cmm_smp_wmb();
uatomic_add(&buf->commit_count[endidx].cc, slot_size);
/*
* don't have constants, so gcc generally uses a function call.
*/
for (; len > 0; len--) {
- *(u8 *)dest = LOAD_SHARED(*(const u8 *)src);
+ *(u8 *)dest = CMM_LOAD_SHARED(*(const u8 *)src);
/* Check with dest, because src may be modified concurrently */
if (*(const u8 *)dest == '\0') {
len--;
* ltt_channel_mutex mutex may be nested inside markers mutex.
*/
static DEFINE_MUTEX(ltt_channel_mutex);
-static LIST_HEAD(ltt_channels);
+static CDS_LIST_HEAD(ltt_channels);
/*
* Index of next channel in array. Makes sure that as long as a trace channel is
* allocated, no array index will be re-used when a channel is freed and then
{
struct ltt_channel_setting *iter;
- list_for_each_entry(iter, <t_channels, list)
+ cds_list_for_each_entry(iter, <t_channels, list)
if (strcmp(name, iter->name) == 0)
return iter;
return NULL;
if (uatomic_read(&index_kref.refcount) == 0
&& uatomic_read(&setting->kref.refcount) == 0) {
- list_del(&setting->list);
+ cds_list_del(&setting->list);
free(setting);
free_index = 0;
- list_for_each_entry(iter, <t_channels, list) {
+ cds_list_for_each_entry(iter, <t_channels, list) {
iter->index = free_index++;
iter->free_event_id = 0;
}
{
struct ltt_channel_setting *iter, *n;
- list_for_each_entry_safe(iter, n, <t_channels, list)
+ cds_list_for_each_entry_safe(iter, n, <t_channels, list)
release_channel_setting(&iter->kref);
}
ret = -ENOMEM;
goto end;
}
- list_add(&setting->list, <t_channels);
+ cds_list_add(&setting->list, <t_channels);
strncpy(setting->name, name, PATH_MAX-1);
setting->index = free_index++;
init_kref:
{
struct ltt_channel_setting *iter;
- list_for_each_entry(iter, <t_channels, list)
+ cds_list_for_each_entry(iter, <t_channels, list)
if (iter->index == index && uatomic_read(&iter->kref.refcount))
return iter->name;
return NULL;
{
struct ltt_channel_setting *iter;
- list_for_each_entry(iter, <t_channels, list)
+ cds_list_for_each_entry(iter, <t_channels, list)
if (!strcmp(iter->name, name)
&& uatomic_read(&iter->kref.refcount))
return iter;
WARN("ltt_channel_struct: channel null after alloc");
goto end;
}
- list_for_each_entry(iter, <t_channels, list) {
+ cds_list_for_each_entry(iter, <t_channels, list) {
if (!uatomic_read(&iter->kref.refcount))
continue;
channel[iter->index].subbuf_size = iter->subbuf_size;
u32 version;
size_t alloc_size;
- struct list_head list;
+ struct cds_list_head list;
} ____cacheline_aligned;
struct ltt_channel_setting {
unsigned int subbuf_size;
unsigned int subbuf_cnt;
struct kref kref; /* Number of references to structure content */
- struct list_head list;
+ struct cds_list_head list;
unsigned int index; /* index of channel in trace channel array */
u16 free_event_id; /* Next event ID to allocate */
char name[PATH_MAX];
#define DEFAULT_CHANNEL "cpu"
#define DEFAULT_PROBE "default"
-LIST_HEAD(probes_list);
+CDS_LIST_HEAD(probes_list);
/*
* Mutex protecting the probe slab cache.
};
//ust//static struct kmem_cache *markers_loaded_cachep;
-static LIST_HEAD(markers_loaded_list);
+static CDS_LIST_HEAD(markers_loaded_list);
/*
* List sorted by name strcmp order.
*/
-static LIST_HEAD(probes_registered_list);
+static CDS_LIST_HEAD(probes_registered_list);
//ust// static struct proc_dir_entry *pentry;
if (!pname)
pname = DEFAULT_PROBE;
- list_for_each_entry(iter, &probes_registered_list, node) {
+ cds_list_for_each_entry(iter, &probes_registered_list, node) {
comparison = strcmp(pname, iter->name);
if (!comparison)
found = 1;
struct ltt_available_probe *iter;
pthread_mutex_lock(&probes_mutex);
- list_for_each_entry_reverse(iter, &probes_registered_list, node) {
+ cds_list_for_each_entry_reverse(iter, &probes_registered_list, node) {
comparison = strcmp(pdata->name, iter->name);
if (!comparison) {
ret = -EBUSY;
goto end;
} else if (comparison > 0) {
/* We belong to the location right after iter. */
- list_add(&pdata->node, &iter->node);
+ cds_list_add(&pdata->node, &iter->node);
goto end;
}
}
/* Should be added at the head of the list */
- list_add(&pdata->node, &probes_registered_list);
+ cds_list_add(&pdata->node, &probes_registered_list);
end:
pthread_mutex_unlock(&probes_mutex);
return ret;
struct ltt_active_marker *amark, *tmp;
pthread_mutex_lock(&probes_mutex);
- list_for_each_entry_safe(amark, tmp, &markers_loaded_list, node) {
+ cds_list_for_each_entry_safe(amark, tmp, &markers_loaded_list, node) {
if (amark->probe == pdata) {
ret = marker_probe_unregister_private_data(
pdata->probe_func, amark);
if (ret)
goto end;
- list_del(&amark->node);
+ cds_list_del(&amark->node);
free(amark);
}
}
- list_del(&pdata->node);
+ cds_list_del(&pdata->node);
end:
pthread_mutex_unlock(&probes_mutex);
return ret;
if (ret)
free(pdata);
else
- list_add(&pdata->node, &markers_loaded_list);
+ cds_list_add(&pdata->node, &markers_loaded_list);
end:
pthread_mutex_unlock(&probes_mutex);
ltt_unlock_traces();
if (ret)
goto end;
else {
- list_del(&pdata->node);
+ cds_list_del(&pdata->node);
free(pdata);
}
end:
{
struct ltt_active_marker *pdata, *tmp;
- list_for_each_entry_safe(pdata, tmp, &markers_loaded_list, node) {
+ cds_list_for_each_entry_safe(pdata, tmp, &markers_loaded_list, node) {
marker_probe_unregister_private_data(pdata->probe->probe_func,
pdata);
- list_del(&pdata->node);
+ cds_list_del(&pdata->node);
free(pdata);
}
}
*/
static DEFINE_MUTEX(markers_mutex);
-static LIST_HEAD(libs);
+static CDS_LIST_HEAD(libs);
void lock_markers(void)
* @...: Variable argument list.
*
* Since we do not use "typical" pointer based RCU in the 1 argument case, we
- * need to put a full smp_rmb() in this branch. This is why we do not use
+ * need to put a full cmm_smp_rmb() in this branch. This is why we do not use
* rcu_dereference() for the pointer read.
*/
notrace void marker_probe_cb(const struct marker *mdata,
if (likely(!ptype)) {
marker_probe_func *func;
/* Must read the ptype before ptr. They are not data dependant,
- * so we put an explicit smp_rmb() here. */
- smp_rmb();
+ * so we put an explicit cmm_smp_rmb() here. */
+ cmm_smp_rmb();
func = mdata->single.func;
/* Must read the ptr before private data. They are not data
- * dependant, so we put an explicit smp_rmb() here. */
- smp_rmb();
+ * dependant, so we put an explicit cmm_smp_rmb() here. */
+ cmm_smp_rmb();
va_start(args, regs);
func(mdata, mdata->single.probe_private, regs, call_private,
mdata->format, &args);
/*
* Read mdata->ptype before mdata->multi.
*/
- smp_rmb();
+ cmm_smp_rmb();
multi = mdata->multi;
/*
* multi points to an array, therefore accessing the array
* depends on reading multi. However, even in this case,
* we must insure that the pointer is read _before_ the array
- * data. Same as rcu_dereference, but we need a full smp_rmb()
- * in the fast path, so put the explicit barrier here.
+ * data. Same as rcu_dereference, but we need a full cmm_smp_rmb()
+ * in the fast path, so put the explicit cmm_barrier here.
*/
- smp_read_barrier_depends();
+ cmm_smp_read_barrier_depends();
for (i = 0; multi[i].func; i++) {
va_start(args, regs);
multi[i].func(mdata, multi[i].probe_private,
if (likely(!ptype)) {
marker_probe_func *func;
/* Must read the ptype before ptr. They are not data dependant,
- * so we put an explicit smp_rmb() here. */
- smp_rmb();
+ * so we put an explicit cmm_smp_rmb() here. */
+ cmm_smp_rmb();
func = mdata->single.func;
/* Must read the ptr before private data. They are not data
- * dependant, so we put an explicit smp_rmb() here. */
- smp_rmb();
+ * dependant, so we put an explicit cmm_smp_rmb() here. */
+ cmm_smp_rmb();
func(mdata, mdata->single.probe_private, regs, call_private,
mdata->format, &args);
} else {
/*
* Read mdata->ptype before mdata->multi.
*/
- smp_rmb();
+ cmm_smp_rmb();
multi = mdata->multi;
/*
* multi points to an array, therefore accessing the array
* depends on reading multi. However, even in this case,
* we must insure that the pointer is read _before_ the array
- * data. Same as rcu_dereference, but we need a full smp_rmb()
- * in the fast path, so put the explicit barrier here.
+ * data. Same as rcu_dereference, but we need a full cmm_smp_rmb()
+ * in the fast path, so put the explicit cmm_barrier here.
*/
- smp_read_barrier_depends();
+ cmm_smp_read_barrier_depends();
for (i = 0; multi[i].func; i++)
multi[i].func(mdata, multi[i].probe_private, regs,
call_private, mdata->format, &args);
struct marker_entry, rcu);
free(entry->oldptr);
/* Make sure we free the data before setting the pending flag to 0 */
- smp_wmb();
+ cmm_smp_wmb();
entry->rcu_pending = 0;
}
WARN_ON(ret);
/* Make sure the call_rcu has been executed */
//ust// if (e->rcu_pending)
-//ust// rcu_barrier_sched();
+//ust// rcu_cmm_barrier_sched();
free(e);
return 0;
}
* Make sure the private data is valid when we update the
* single probe ptr.
*/
- smp_wmb();
+ cmm_smp_wmb();
elem->single.func = entry->single.func;
/*
* We also make sure that the new probe callbacks array is consistent
* Update the function or multi probe array pointer before setting the
* ptype.
*/
- smp_wmb();
+ cmm_smp_wmb();
elem->ptype = entry->ptype;
if (elem->tp_name && (active ^ _imv_read(elem->state))) {
elem->state__imv = 0;
elem->single.func = __mark_empty_function;
/* Update the function before setting the ptype */
- smp_wmb();
+ cmm_smp_wmb();
elem->ptype = 0; /* single probe */
/*
* Leave the private data and channel_id/event_id there, because removal
/* FIXME: we should probably take a mutex here on libs */
//ust// pthread_mutex_lock(&module_mutex);
- list_for_each_entry(lib, &libs, list)
+ cds_list_for_each_entry(lib, &libs, list)
marker_update_probe_range(lib->markers_start,
lib->markers_start + lib->markers_count);
//ust// pthread_mutex_unlock(&module_mutex);
* make sure it's executed now.
*/
//ust// if (entry->rcu_pending)
-//ust// rcu_barrier_sched();
+//ust// rcu_cmm_barrier_sched();
old = marker_entry_add_probe(entry, probe, probe_private);
if (IS_ERR(old)) {
ret = PTR_ERR(old);
if (!entry)
goto end;
//ust// if (entry->rcu_pending)
-//ust// rcu_barrier_sched();
+//ust// rcu_cmm_barrier_sched();
entry->oldptr = old;
entry->rcu_pending = 1;
/* write rcu_pending before calling the RCU callback */
- smp_wmb();
+ cmm_smp_wmb();
//ust// call_rcu_sched(&entry->rcu, free_old_closure);
synchronize_rcu(); free_old_closure(&entry->rcu);
goto end;
if (!entry)
goto end;
//ust// if (entry->rcu_pending)
-//ust// rcu_barrier_sched();
+//ust// rcu_cmm_barrier_sched();
old = marker_entry_remove_probe(entry, probe, probe_private);
pthread_mutex_unlock(&markers_mutex);
if (!entry)
goto end;
//ust// if (entry->rcu_pending)
-//ust// rcu_barrier_sched();
+//ust// rcu_cmm_barrier_sched();
entry->oldptr = old;
entry->rcu_pending = 1;
/* write rcu_pending before calling the RCU callback */
- smp_wmb();
+ cmm_smp_wmb();
//ust// call_rcu_sched(&entry->rcu, free_old_closure);
synchronize_rcu(); free_old_closure(&entry->rcu);
remove_marker(channel, name); /* Ignore busy error message */
goto end;
}
//ust// if (entry->rcu_pending)
-//ust// rcu_barrier_sched();
+//ust// rcu_cmm_barrier_sched();
old = marker_entry_remove_probe(entry, NULL, probe_private);
channel = strdup(entry->channel);
name = strdup(entry->name);
if (!entry)
goto end;
//ust// if (entry->rcu_pending)
-//ust// rcu_barrier_sched();
+//ust// rcu_cmm_barrier_sched();
entry->oldptr = old;
entry->rcu_pending = 1;
/* write rcu_pending before calling the RCU callback */
- smp_wmb();
+ cmm_smp_wmb();
//ust// call_rcu_sched(&entry->rcu, free_old_closure);
synchronize_rcu(); free_old_closure(&entry->rcu);
/* Ignore busy error message */
int found = 0;
//ust// pthread_mutex_lock(&module_mutex);
- list_for_each_entry(iter_lib, &libs, list) {
+ cds_list_for_each_entry(iter_lib, &libs, list) {
if (iter_lib < iter->lib)
continue;
else if (iter_lib > iter->lib)
/* FIXME: maybe protect this with its own mutex? */
lock_markers();
- list_add(&pl->list, &libs);
+ cds_list_add(&pl->list, &libs);
unlock_markers();
new_markers(markers_start, markers_start + markers_count);
/* FIXME: we should probably take a mutex here on libs */
//ust// pthread_mutex_lock(&module_mutex);
- list_for_each_entry(lib, &libs, list) {
+ cds_list_for_each_entry(lib, &libs, list) {
if(lib->markers_start == markers_start) {
struct lib *lib2free = lib;
- list_del(&lib->list);
+ cds_list_del(&lib->list);
free(lib2free);
break;
}
*/
tracer_stack_pos++;
assert(tracer_stack_pos <= TRACER_STACK_LEN);
- barrier();
+ cmm_barrier();
tracer_stack[*stack_pos_ctx] =
strlen(tmp.v_string.s) + 1;
}
cpu = ust_get_cpu();
/* Force volatile access. */
- STORE_SHARED(ltt_nesting, LOAD_SHARED(ltt_nesting) + 1);
+ CMM_STORE_SHARED(ltt_nesting, CMM_LOAD_SHARED(ltt_nesting) + 1);
stack_pos_ctx = tracer_stack_pos;
- barrier();
+ cmm_barrier();
pdata = (struct ltt_active_marker *)probe_data;
eID = mdata->event_id;
va_end(args_copy);
/* Iterate on each trace */
- list_for_each_entry_rcu(trace, <t_traces.head, list) {
+ cds_list_for_each_entry_rcu(trace, <t_traces.head, list) {
/*
* Expect the filter to filter out events. If we get here,
* we went through tracepoint activation as a first step.
DBG("just commited event (%s/%s) at offset %ld and size %zd", mdata->channel, mdata->name, buf_offset, slot_size);
}
- barrier();
+ cmm_barrier();
tracer_stack_pos = stack_pos_ctx;
- STORE_SHARED(ltt_nesting, LOAD_SHARED(ltt_nesting) - 1);
+ CMM_STORE_SHARED(ltt_nesting, CMM_LOAD_SHARED(ltt_nesting) - 1);
rcu_read_unlock(); //ust// rcu_read_unlock_sched_notrace();
}
#include <urcu-bp.h>
/* libraries that contain trace_events (struct trace_event_lib) */
-static LIST_HEAD(libs);
+static CDS_LIST_HEAD(libs);
static DEFINE_MUTEX(trace_events_mutex);
struct trace_event_lib *iter_lib;
int found = 0;
- list_for_each_entry(iter_lib, &libs, list) {
+ cds_list_for_each_entry(iter_lib, &libs, list) {
if (iter_lib < iter->lib)
continue;
else if (iter_lib > iter->lib)
/* FIXME: maybe protect this with its own mutex? */
pthread_mutex_lock(&trace_events_mutex);
- list_add(&pl->list, &libs);
+ cds_list_add(&pl->list, &libs);
pthread_mutex_unlock(&trace_events_mutex);
DBG("just registered a trace_events section from %p and having %d trace_events", trace_events_start, trace_events_count);
pthread_mutex_lock(&trace_events_mutex);
- list_for_each_entry(lib, &libs, list) {
+ cds_list_for_each_entry(lib, &libs, list) {
if(lib->trace_events_start == trace_events_start) {
struct trace_event_lib *lib2free = lib;
- list_del(&lib->list);
+ cds_list_del(&lib->list);
free(lib2free);
break;
}
extern struct chan_info_struct chan_infos[];
-static struct list_head open_buffers_list = LIST_HEAD_INIT(open_buffers_list);
+static struct cds_list_head open_buffers_list = CDS_LIST_HEAD_INIT(open_buffers_list);
-static struct list_head ust_socks = LIST_HEAD_INIT(ust_socks);
+static struct cds_list_head ust_socks = CDS_LIST_HEAD_INIT(ust_socks);
/* volatile because shared between the listener and the main thread */
int buffers_to_export = 0;
ch_name = trace->channels[i].channel_name;
request_buffer_consumer(sock, trace_name,
ch_name, j);
- STORE_SHARED(buffers_to_export,
- LOAD_SHARED(buffers_to_export)+1);
+ CMM_STORE_SHARED(buffers_to_export,
+ CMM_LOAD_SHARED(buffers_to_export)+1);
}
}
}
*/
if (uatomic_read(&buf->consumed) == 0) {
DBG("decrementing buffers_to_export");
- STORE_SHARED(buffers_to_export, LOAD_SHARED(buffers_to_export)-1);
+ CMM_STORE_SHARED(buffers_to_export, CMM_LOAD_SHARED(buffers_to_export)-1);
}
/* The buffer has been exported, ergo, we can add it to the
* list of open buffers
*/
- list_add(&buf->open_buffers_list, &open_buffers_list);
+ cds_list_add(&buf->open_buffers_list, &open_buffers_list);
unlock_traces:
ltt_unlock_traces();
{
struct ust_buffer *buf;
- list_for_each_entry(buf, &open_buffers_list,
+ cds_list_for_each_entry(buf, &open_buffers_list,
open_buffers_list) {
ltt_force_switch(buf, FORCE_FLUSH);
}
if (getenv("UST_OVERWRITE")) {
int val = atoi(getenv("UST_OVERWRITE"));
if (val == 0 || val == 1) {
- STORE_SHARED(ust_channels_overwrite_by_default, val);
+ CMM_STORE_SHARED(ust_channels_overwrite_by_default, val);
} else {
WARN("invalid value for UST_OVERWRITE");
}
if (getenv("UST_AUTOCOLLECT")) {
int val = atoi(getenv("UST_AUTOCOLLECT"));
if (val == 0 || val == 1) {
- STORE_SHARED(ust_channels_request_collection_by_default, val);
+ CMM_STORE_SHARED(ust_channels_request_collection_by_default, val);
} else {
WARN("invalid value for UST_AUTOCOLLECT");
}
ltt_lock_traces();
- list_for_each_entry(trace, <t_traces.head, list) {
+ cds_list_for_each_entry(trace, <t_traces.head, list) {
if (trace->active) {
retval = 1;
break;
return;
}
- if (trace_recording() && LOAD_SHARED(buffers_to_export)) {
+ if (trace_recording() && CMM_LOAD_SHARED(buffers_to_export)) {
int total = 0;
DBG("Keeping process alive for consumer daemon...");
- while (LOAD_SHARED(buffers_to_export)) {
+ while (CMM_LOAD_SHARED(buffers_to_export)) {
const int interv = 200000;
restarting_usleep(interv);
total += interv;
ltt_trace_stop("auto");
ltt_trace_destroy("auto", 1);
/* Delete all active connections, but leave them in the epoll set */
- list_for_each_entry_safe(sock, sock_tmp, &ust_socks, list) {
+ cds_list_for_each_entry_safe(sock, sock_tmp, &ust_socks, list) {
ustcomm_del_sock(sock, 1);
}
/* Delete all blocked consumers */
- list_for_each_entry_safe(buf, buf_tmp, &open_buffers_list,
+ cds_list_for_each_entry_safe(buf, buf_tmp, &open_buffers_list,
open_buffers_list) {
result = close(buf->data_ready_fd_read);
if (result == -1) {
if (result == -1) {
PERROR("close");
}
- list_del(&buf->open_buffers_list);
+ cds_list_del(&buf->open_buffers_list);
}
/* Clean up the listener socket and epoll, keeping the scoket file */
close(epoll_fd);
/* Re-start the launch sequence */
- STORE_SHARED(buffers_to_export, 0);
+ CMM_STORE_SHARED(buffers_to_export, 0);
have_listener = 0;
/* Set up epoll */
static const int tracepoint_debug;
/* libraries that contain tracepoints (struct tracepoint_lib) */
-static LIST_HEAD(libs);
+static CDS_LIST_HEAD(libs);
/*
* tracepoints_mutex nests inside module_mutex. Tracepoints mutex protects the
struct tp_probes {
union {
//ust// struct rcu_head rcu;
- struct list_head list;
+ struct cds_list_head list;
} u;
struct probe probes[0];
};
WARN_ON(strcmp((*entry)->name, elem->name) != 0);
/*
- * rcu_assign_pointer has a smp_wmb() which makes sure that the new
+ * rcu_assign_pointer has a cmm_smp_wmb() which makes sure that the new
* probe callbacks array is consistent before setting a pointer to it.
* This array is referenced by __DO_TRACE from
- * include/linux/tracepoints.h. A matching smp_read_barrier_depends()
+ * include/linux/tracepoints.h. A matching cmm_smp_read_barrier_depends()
* is used.
*/
rcu_assign_pointer(elem->probes, (*entry)->probes);
struct tracepoint_lib *lib;
//ust// pthread_mutex_lock(&module_mutex);
- list_for_each_entry(lib, &libs, list)
+ cds_list_for_each_entry(lib, &libs, list)
tracepoint_update_probe_range(lib->tracepoints_start,
lib->tracepoints_start + lib->tracepoints_count);
//ust// pthread_mutex_unlock(&module_mutex);
}
//ust// EXPORT_SYMBOL_GPL(tracepoint_probe_unregister);
-static LIST_HEAD(old_probes);
+static CDS_LIST_HEAD(old_probes);
static int need_update;
static void tracepoint_add_old_probes(void *old)
if (old) {
struct tp_probes *tp_probes = _ust_container_of(old,
struct tp_probes, probes[0]);
- list_add(&tp_probes->u.list, &old_probes);
+ cds_list_add(&tp_probes->u.list, &old_probes);
}
}
*/
void tracepoint_probe_update_all(void)
{
- LIST_HEAD(release_probes);
+ CDS_LIST_HEAD(release_probes);
struct tp_probes *pos, *next;
pthread_mutex_lock(&tracepoints_mutex);
pthread_mutex_unlock(&tracepoints_mutex);
return;
}
- if (!list_empty(&old_probes))
- list_replace_init(&old_probes, &release_probes);
+ if (!cds_list_empty(&old_probes))
+ cds_list_replace_init(&old_probes, &release_probes);
need_update = 0;
pthread_mutex_unlock(&tracepoints_mutex);
tracepoint_update_probes();
- list_for_each_entry_safe(pos, next, &release_probes, u.list) {
- list_del(&pos->u.list);
+ cds_list_for_each_entry_safe(pos, next, &release_probes, u.list) {
+ cds_list_del(&pos->u.list);
//ust// call_rcu_sched(&pos->u.rcu, rcu_free_old_probes);
synchronize_rcu();
free(pos);
int found = 0;
//ust// pthread_mutex_lock(&module_mutex);
- list_for_each_entry(iter_lib, &libs, list) {
+ cds_list_for_each_entry(iter_lib, &libs, list) {
if (iter_lib < iter->lib)
continue;
else if (iter_lib > iter->lib)
/* FIXME: maybe protect this with its own mutex? */
pthread_mutex_lock(&tracepoints_mutex);
- list_add(&pl->list, &libs);
+ cds_list_add(&pl->list, &libs);
pthread_mutex_unlock(&tracepoints_mutex);
new_tracepoints(tracepoints_start, tracepoints_start + tracepoints_count);
pthread_mutex_lock(&tracepoints_mutex);
- list_for_each_entry(lib, &libs, list) {
+ cds_list_for_each_entry(lib, &libs, list) {
if(lib->tracepoints_start == tracepoints_start) {
struct tracepoint_lib *lib2free = lib;
- list_del(&lib->list);
+ cds_list_del(&lib->list);
free(lib2free);
break;
}
//ust//
//ust// }
-static LIST_HEAD(ltt_transport_list);
+static CDS_LIST_HEAD(ltt_transport_list);
/**
* ltt_transport_register - LTT transport registration
//ust// vmalloc_sync_all();
ltt_lock_traces();
- list_add_tail(&transport->node, <t_transport_list);
+ cds_list_add_tail(&transport->node, <t_transport_list);
ltt_unlock_traces();
}
void ltt_transport_unregister(struct ltt_transport *transport)
{
ltt_lock_traces();
- list_del(&transport->node);
+ cds_list_del(&transport->node);
ltt_unlock_traces();
}
//ust// #else
//ust// ltt_lock_traces();
//ust// #endif
-//ust// list_for_each_entry_rcu(trace, <t_traces.head, list) {
+//ust// cds_list_for_each_entry_rcu(trace, <t_traces.head, list) {
//ust// trace_async_wakeup(trace);
//ust// }
//ust// #ifndef CONFIG_PREEMPT_RT
{
struct ust_trace *trace;
- list_for_each_entry(trace, <t_traces.head, list)
+ cds_list_for_each_entry(trace, <t_traces.head, list)
if (!strncmp(trace->trace_name, trace_name, NAME_MAX))
return trace;
{
struct ust_trace *trace;
- list_for_each_entry(trace, <t_traces.setup_head, list)
+ cds_list_for_each_entry(trace, <t_traces.setup_head, list)
if (!strncmp(trace->trace_name, trace_name, NAME_MAX))
return trace;
chan_infos[chantype].def_subbufcount;
}
- list_add(&new_trace->list, <t_traces.setup_head);
+ cds_list_add(&new_trace->list, <t_traces.setup_head);
return 0;
trace_free:
/* must be called from within a traces lock. */
static void _ltt_trace_free(struct ust_trace *trace)
{
- list_del(&trace->list);
+ cds_list_del(&trace->list);
free(trace);
}
goto traces_error;
}
- list_for_each_entry(tran_iter, <t_transport_list, node) {
+ cds_list_for_each_entry(tran_iter, <t_transport_list, node) {
if (!strcmp(tran_iter->name, trace_type)) {
transport = tran_iter;
break;
}
}
- list_del(&trace->list);
-//ust// if (list_empty(<t_traces.head)) {
+ cds_list_del(&trace->list);
+//ust// if (cds_list_empty(<t_traces.head)) {
//ust// mod_timer(<t_async_wakeup_timer,
//ust// jiffies + LTT_PERCPU_TIMER_INTERVAL);
//ust// set_kernel_trace_flag_all_tasks();
//ust// }
- list_add_rcu(&trace->list, <t_traces.head);
+ cds_list_add_rcu(&trace->list, <t_traces.head);
//ust// synchronize_sched();
ltt_unlock_traces();
goto active_error;
}
/* Everything went fine */
- list_del_rcu(&trace->list);
+ cds_list_del_rcu(&trace->list);
synchronize_rcu();
- if (list_empty(<t_traces.head)) {
+ if (cds_list_empty(<t_traces.head)) {
//ust// clear_kernel_trace_flag_all_tasks();
/*
* We stop the asynchronous delivery of reader wakeup, but
};
struct ltt_active_marker {
- struct list_head node; /* active markers list */
+ struct cds_list_head node; /* active markers list */
const char *channel;
const char *name;
const char *format;
struct ltt_transport {
char *name;
struct module *owner;
- struct list_head node;
+ struct cds_list_head node;
struct ltt_trace_ops ops;
};
/* Per-trace information - each trace/flight recorder represented by one */
struct ust_trace {
/* First 32 bytes cache-hot cacheline */
- struct list_head list;
+ struct cds_list_head list;
struct ltt_trace_ops *ops;
int active;
/* Second 32 bytes cache-hot cacheline */
/* Traces structures */
struct ltt_traces ltt_traces = {
- .setup_head = LIST_HEAD_INIT(ltt_traces.setup_head),
- .head = LIST_HEAD_INIT(ltt_traces.head),
+ .setup_head = CDS_LIST_HEAD_INIT(ltt_traces.setup_head),
+ .head = CDS_LIST_HEAD_INIT(ltt_traces.head),
};
/* Traces list writer locking */
* list.
*/
struct ltt_traces {
- struct list_head setup_head; /* Pre-allocated traces list */
- struct list_head head; /* Allocated Traces list */
+ struct cds_list_head setup_head; /* Pre-allocated traces list */
+ struct cds_list_head head; /* Allocated Traces list */
unsigned int num_active_traces; /* Number of active traces */
} ____cacheline_aligned;
cpu = ust_get_cpu();
/* Force volatile access. */
- STORE_SHARED(ltt_nesting, LOAD_SHARED(ltt_nesting) + 1);
+ CMM_STORE_SHARED(ltt_nesting, CMM_LOAD_SHARED(ltt_nesting) + 1);
/*
* asm volatile and "memory" clobber prevent the compiler from moving
* traps, divisions by 0, ...) are triggered within the incremented
* nesting count section.
*/
- barrier();
+ cmm_barrier();
eID = mdata->event_id;
chan_index = mdata->channel_id;
* Iterate on each trace, typically small number of active traces,
* list iteration with prefetch is usually slower.
*/
- list_for_each_entry_rcu(trace, <t_traces.head, list) {
+ cds_list_for_each_entry_rcu(trace, <t_traces.head, list) {
if (unlikely(!trace->active))
continue;
//ust// if (unlikely(!ltt_run_filter(trace, eID)))
* traps, divisions by 0, ...) are triggered within the incremented
* nesting count section.
*/
- barrier();
- STORE_SHARED(ltt_nesting, LOAD_SHARED(ltt_nesting) - 1);
+ cmm_barrier();
+ CMM_STORE_SHARED(ltt_nesting, CMM_LOAD_SHARED(ltt_nesting) - 1);
rcu_read_unlock();
}
}
struct ustcomm_sock * ustcomm_init_sock(int fd, int epoll_fd,
- struct list_head *list)
+ struct cds_list_head *list)
{
struct epoll_event ev;
struct ustcomm_sock *sock;
sock->epoll_fd = epoll_fd;
if (list) {
- list_add(&sock->list, list);
+ cds_list_add(&sock->list, list);
} else {
- INIT_LIST_HEAD(&sock->list);
+ CDS_INIT_LIST_HEAD(&sock->list);
}
return sock;
void ustcomm_del_sock(struct ustcomm_sock *sock, int keep_in_epoll)
{
- list_del(&sock->list);
+ cds_list_del(&sock->list);
if (!keep_in_epoll) {
if (epoll_ctl(sock->epoll_fd, EPOLL_CTL_DEL, sock->fd, NULL) == -1) {
PERROR("epoll_ctl: failed to delete socket");
#define SOCK_DIR "/tmp/ust-app-socks"
struct ustcomm_sock {
- struct list_head list;
+ struct cds_list_head list;
int fd;
int epoll_fd;
};
/* Create and delete sockets */
extern struct ustcomm_sock * ustcomm_init_sock(int fd, int epoll_fd,
- struct list_head *list);
+ struct cds_list_head *list);
extern void ustcomm_del_sock(struct ustcomm_sock *sock, int keep_in_epoll);
/* Create and delete named sockets */
goto close_epoll;
}
- INIT_LIST_HEAD(&instance->connections);
+ CDS_INIT_LIST_HEAD(&instance->connections);
free(name);