This fixes an ltt_nesting corruption bug.
* Perform retryable operations.
*/
/* FIXME: make this rellay per cpu? */
- if (unlikely(__get_cpu_var(ltt_nesting) > 4)) {
+ if (unlikely(LOAD_SHARED(ltt_nesting) > 4)) {
local_inc(&buf->events_lost);
return -EPERM;
}
return;
rcu_read_lock(); //ust// rcu_read_lock_sched_notrace();
-//ust// cpu = smp_processor_id();
cpu = ust_get_cpu();
-//ust// __get_cpu_var(ltt_nesting)++;
- /* FIXME: should nesting be per-cpu? */
- ltt_nesting++;
+
+ /* Force volatile access. */
+ STORE_SHARED(ltt_nesting, LOAD_SHARED(ltt_nesting) + 1);
+ barrier();
pdata = (struct ltt_active_marker *)probe_data;
eID = mdata->event_id;
ltt_commit_slot(channel, buf, buf_offset, data_size, slot_size);
DBG("just commited event (%s/%s) at offset %ld and size %zd", mdata->channel, mdata->name, buf_offset, slot_size);
}
-//ust// __get_cpu_var(ltt_nesting)--;
- ltt_nesting--;
+
+ barrier();
+ STORE_SHARED(ltt_nesting, LOAD_SHARED(ltt_nesting) - 1);
+
rcu_read_unlock(); //ust// rcu_read_unlock_sched_notrace();
}
//ust// DEFINE_PER_CPU(unsigned int, ltt_nesting);
//ust// EXPORT_PER_CPU_SYMBOL(ltt_nesting);
-unsigned int ltt_nesting;
+__thread int ltt_nesting;
int ltt_run_filter_default(void *trace, uint16_t eID)
{
extern struct ltt_traces ltt_traces;
/* Keep track of trap nesting inside LTT */
-//ust// DECLARE_PER_CPU(unsigned int, ltt_nesting);
-extern unsigned int ltt_nesting;
+extern __thread int ltt_nesting;
typedef int (*ltt_run_filter_functor)(void *trace, uint16_t eID);
//typedef int (*ltt_run_filter_functor)(void *, __u16);