Prevent re-entrancy in callstack-user context
authorFrancis Deslauriers <francis.deslauriers@efficios.com>
Mon, 29 May 2017 19:32:04 +0000 (15:32 -0400)
committerMathieu Desnoyers <mathieu.desnoyers@efficios.com>
Mon, 11 Jun 2018 18:39:17 +0000 (14:39 -0400)
Userspace callstack context often triggers kernel pagefaults that can be
traced by the kernel tracer which might then attempt to gather the
userspace callstack again... This recursion will be stop by the
RING_BUFFER_MAX_NESTING check but will still pollute the traces with
redundant information.

To prevent this, check if the tracer is already gathering the userspace
callstack and if it's the case don't record it again.

Signed-off-by: Francis Deslauriers <francis.deslauriers@efficios.com>
Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
lttng-context-callstack.c

index fd730bbc164090bdaf9ef708a4265894de2154a7..2fa5f52dbf19929ff2686a1e594dc5d22cbd724a 100644 (file)
@@ -120,14 +120,26 @@ int init_type(enum lttng_cs_ctx_modes mode)
        return 0;
 }
 
+/* Keep track of nesting inside userspace callstack context code */
+DEFINE_PER_CPU(int, callstack_user_nesting);
+
 static
 struct stack_trace *stack_trace_context(struct lttng_ctx_field *field,
                                        struct lib_ring_buffer_ctx *ctx)
 {
-       int nesting;
+       int buffer_nesting, cs_user_nesting;
        struct lttng_cs *cs;
        struct field_data *fdata = field->priv;
 
+       /*
+        * Do not gather the userspace callstack context when the event was
+        * triggered by the userspace callstack context saving mechanism.
+        */
+       cs_user_nesting = per_cpu(callstack_user_nesting, ctx->cpu);
+
+       if (fdata->mode == CALLSTACK_USER && cs_user_nesting >= 1)
+               return NULL;
+
        /*
         * get_cpu() is not required, preemption is already
         * disabled while event is written.
@@ -136,11 +148,11 @@ struct stack_trace *stack_trace_context(struct lttng_ctx_field *field,
         * Check it again as a safety net.
         */
        cs = per_cpu_ptr(fdata->cs_percpu, ctx->cpu);
-       nesting = per_cpu(lib_ring_buffer_nesting, ctx->cpu) - 1;
-       if (nesting >= RING_BUFFER_MAX_NESTING) {
+       buffer_nesting = per_cpu(lib_ring_buffer_nesting, ctx->cpu) - 1;
+       if (buffer_nesting >= RING_BUFFER_MAX_NESTING)
                return NULL;
-       }
-       return &cs->dispatch[nesting].stack_trace;
+
+       return &cs->dispatch[buffer_nesting].stack_trace;
 }
 
 /*
@@ -168,8 +180,15 @@ size_t lttng_callstack_get_size(size_t offset, struct lttng_ctx_field *field,
        /* reset stack trace, no need to clear memory */
        trace->nr_entries = 0;
 
+       if (fdata->mode == CALLSTACK_USER)
+               ++per_cpu(callstack_user_nesting, ctx->cpu);
+
        /* do the real work and reserve space */
        cs_types[fdata->mode].save_func(trace);
+
+       if (fdata->mode == CALLSTACK_USER)
+               per_cpu(callstack_user_nesting, ctx->cpu)--;
+
        /*
         * Remove final ULONG_MAX delimiter. If we cannot find it, add
         * our own marker to show that the stack is incomplete. This is
This page took 0.030007 seconds and 4 git commands to generate.