convert ltt_nesting to a per_thread variable, and access it with volatile access
authorPierre-Marc Fournier <pierre-marc.fournier@polymtl.ca>
Mon, 1 Mar 2010 21:19:38 +0000 (16:19 -0500)
committerPierre-Marc Fournier <pierre-marc.fournier@polymtl.ca>
Mon, 1 Mar 2010 21:23:00 +0000 (16:23 -0500)
This fixes an ltt_nesting corruption bug.

libust/buffers.h
libust/serialize.c
libust/tracercore.c
libust/tracercore.h

index 28faad79551ab8a0839c2be043875a7246a71cf8..db4b7089071e42358d18b4172da60ca2a6963ff6 100644 (file)
@@ -356,7 +356,7 @@ static __inline__ int ltt_reserve_slot(struct ust_trace *trace,
         * Perform retryable operations.
         */
        /* FIXME: make this rellay per cpu? */
-       if (unlikely(__get_cpu_var(ltt_nesting) > 4)) {
+       if (unlikely(LOAD_SHARED(ltt_nesting) > 4)) {
                local_inc(&buf->events_lost);
                return -EPERM;
        }
index 011206afbe94dd7e0cdee20e9d2aae34e72627d6..4c23e8dd02451d46d6b8de34c418d40e872bc9f3 100644 (file)
@@ -623,11 +623,11 @@ notrace void ltt_vtrace(const struct marker *mdata, void *probe_data,
                return;
 
        rcu_read_lock(); //ust// rcu_read_lock_sched_notrace();
-//ust//        cpu = smp_processor_id();
        cpu = ust_get_cpu();
-//ust//        __get_cpu_var(ltt_nesting)++;
-       /* FIXME: should nesting be per-cpu? */
-       ltt_nesting++;
+
+       /* Force volatile access. */
+       STORE_SHARED(ltt_nesting, LOAD_SHARED(ltt_nesting) + 1);
+       barrier();
 
        pdata = (struct ltt_active_marker *)probe_data;
        eID = mdata->event_id;
@@ -712,8 +712,10 @@ notrace void ltt_vtrace(const struct marker *mdata, void *probe_data,
                ltt_commit_slot(channel, buf, buf_offset, data_size, slot_size);
                DBG("just commited event (%s/%s) at offset %ld and size %zd", mdata->channel, mdata->name, buf_offset, slot_size);
        }
-//ust//        __get_cpu_var(ltt_nesting)--;
-       ltt_nesting--;
+
+       barrier();
+       STORE_SHARED(ltt_nesting, LOAD_SHARED(ltt_nesting) - 1);
+
        rcu_read_unlock(); //ust// rcu_read_unlock_sched_notrace();
 }
 
index 51f9a70877b07143692501b90cc01053a5867cf2..72c2887d8a1ece8b0ca479573d04b6172e4c5069 100644 (file)
@@ -42,7 +42,7 @@ void ltt_unlock_traces(void)
 
 //ust// DEFINE_PER_CPU(unsigned int, ltt_nesting);
 //ust// EXPORT_PER_CPU_SYMBOL(ltt_nesting);
-unsigned int ltt_nesting;
+__thread int ltt_nesting;
 
 int ltt_run_filter_default(void *trace, uint16_t eID)
 {
index 3113383cfd69184e52cd06f1a81f3b47566f8472..2f3d7c4a3b9fc93c9a85e6eac544a889f00947c8 100644 (file)
@@ -42,8 +42,7 @@ struct ltt_traces {
 extern struct ltt_traces ltt_traces;
 
 /* Keep track of trap nesting inside LTT */
-//ust// DECLARE_PER_CPU(unsigned int, ltt_nesting);
-extern unsigned int ltt_nesting;
+extern __thread int ltt_nesting;
 
 typedef int (*ltt_run_filter_functor)(void *trace, uint16_t eID);
 //typedef int (*ltt_run_filter_functor)(void *, __u16);
This page took 0.026035 seconds and 4 git commands to generate.